max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
rpython/memory/test/test_generational_gc.py | nanjekyejoannah/pypy | 381 | 12695205 | <reponame>nanjekyejoannah/pypy
from rpython.memory.test import test_semispace_gc
class TestGenerationalGC(test_semispace_gc.TestSemiSpaceGC):
from rpython.memory.gc.generation import GenerationGC as GCClass
|
tools/print_system_info.py | BenjaminWegener/tensorflow-directml | 351 | 12695219 | <filename>tools/print_system_info.py
import sys
import subprocess
import tempfile
import os
import platform
import pkg_resources
import xml.etree.ElementTree as ET
# Determine version of tensorflow-directml
installed_packages = pkg_resources.working_set
tfdml_version = [p.version for p in installed_packages if p.key == "tensorflow-directml"]
if tfdml_version:
tfdml_version = tfdml_version[0]
else:
tfdml_version = "Not Installed"
# Collect info from dxdiag.exe in Windows.
# NOTE: NamedTemporaryFile in a 'with' statement leaves the file open, which prevents dxdiag.exe
# from opening it a second time for writing on Windows. We must manually delete it without leaving
# the file open.
dxdiag_path = tempfile.NamedTemporaryFile(suffix=".xml", delete=False).name
try:
if os.name == "nt":
subprocess.run(['dxdiag.exe', '/x', dxdiag_path], check=True)
else:
dxdiag_path_windows = subprocess.run(
"wslpath -w {}".format(dxdiag_path),
shell=True,
check=True,
capture_output=True,
text=True).stdout.rstrip().replace('\\','\\\\')
subprocess.run('dxdiag.exe /x {}'.format(dxdiag_path_windows), shell=True, check=True)
with open(dxdiag_path, "r") as dxdiag_log:
dxdiag = ET.parse(dxdiag_log).getroot()
finally:
if os.path.exists(dxdiag_path):
os.remove(dxdiag_path)
print("Host System\n{}".format('-'*80))
print("Windows 10 Version : {}".format(dxdiag.find("./SystemInformation/OperatingSystem").text))
print("Processor : {}".format(dxdiag.find("./SystemInformation/Processor").text))
print("Memory : {}".format(dxdiag.find("./SystemInformation/Memory").text))
print("DirectX Version : {}".format(dxdiag.find("./SystemInformation/DirectXVersion").text))
if os.name != "nt":
import distro
print("\nWindows Subsystem for Linux\n{}".format('-'*80))
print("WSL Name : {}".format(os.environ["WSL_DISTRO_NAME"]))
print("WSL Distribution : {}".format(" ".join(distro.linux_distribution())))
print("WSL Kernel : {}".format(platform.release()))
print("\nPython Environment\n{}".format('-'*80))
print("Python Version : {}".format(platform.python_version()))
print("TensorFlow-DirectML : {}".format(tfdml_version))
for device in dxdiag.findall("./DisplayDevices/DisplayDevice"):
print("\nDirectX Device\n{}".format('-'*80))
print("Description : {}".format(device.find("./CardName").text))
print("Manufacturer : {}".format(device.find("./Manufacturer").text))
print("Chip Type : {}".format(device.find("./ChipType").text))
print("Dedicated Memory : {}".format(device.find("./DedicatedMemory").text))
print("Driver Version : {}".format(device.find("./DriverVersion").text))
print("Driver Model : {}".format(device.find("./DriverModel").text))
print("Driver Date : {}".format(device.find("./DriverDate").text))
print("Feature Levels : {}".format(device.find("./FeatureLevels").text))
|
wouso/interface/apps/pages/__init__.py | AlexandruGhergut/wouso | 117 | 12695263 | __author__ = 'alex'
|
model/transformers/blocks.py | ishine/Comprehensive-Transformer-TTS | 147 | 12695268 | import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
import math
from utils.tools import make_positions
def Embedding(num_embeddings, embedding_dim, padding_idx=None):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
""" Sinusoid position encoding table """
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array(
[get_posi_angle_vec(pos_i) for pos_i in range(n_position)]
)
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
if padding_idx is not None:
# zero vector for padding dimension
sinusoid_table[padding_idx] = 0.0
return torch.FloatTensor(sinusoid_table)
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size,
embedding_dim,
padding_idx,
)
self.register_buffer("_float_tensor", torch.FloatTensor(1))
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.shape[:2]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = make_positions(input, self.padding_idx) if positions is None else positions
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
class Swish(nn.Module):
"""
Swish is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks applied
to a variety of challenging domains such as Image classification and Machine translation.
"""
def __init__(self):
super(Swish, self).__init__()
def forward(self, inputs):
return inputs * inputs.sigmoid()
class GLU(nn.Module):
"""
The gating mechanism is called Gated Linear Units (GLU), which was first introduced for natural language processing
in the paper “Language Modeling with Gated Convolutional Networks”
"""
def __init__(self, dim: int) -> None:
super(GLU, self).__init__()
self.dim = dim
def forward(self, inputs):
outputs, gate = inputs.chunk(2, dim=self.dim)
return outputs * gate.sigmoid()
class LayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
:param int nout: output dim size
:param int dim: dimension to be normalized
"""
def __init__(self, nout, dim=-1):
"""Construct an LayerNorm object."""
super(LayerNorm, self).__init__(nout, eps=1e-12)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
:param torch.Tensor x: input tensor
:return: layer normalized tensor
:rtype torch.Tensor
"""
if self.dim == -1:
return super(LayerNorm, self).forward(x)
return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
class LinearNorm(nn.Module):
""" LinearNorm Projection """
def __init__(self, in_features, out_features, bias=False):
super(LinearNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(self.linear.weight)
if bias:
nn.init.constant_(self.linear.bias, 0.0)
def forward(self, x):
x = self.linear(x)
return x
class ConvBlock(nn.Module):
""" 1D Convolutional Block """
def __init__(self, in_channels, out_channels, kernel_size, dropout=None, normalization=nn.BatchNorm1d, activation=nn.ReLU, transpose=False):
super(ConvBlock, self).__init__()
self.conv_layer = nn.Sequential(
ConvNorm(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
padding=int((kernel_size - 1) / 2),
dilation=1,
w_init_gain="tanh",
transpose=transpose
),
normalization(out_channels),
activation(),
)
self.dropout = dropout if dropout is not None else None
self.transpose = transpose
def forward(self, enc_input, mask=None):
if not self.transpose:
enc_input = enc_input.contiguous().transpose(1, 2)
enc_output = self.conv_layer(enc_input)
if self.dropout is not None:
enc_output = F.dropout(enc_output, self.dropout, training=True) # self.training)
if not self.transpose:
enc_output = enc_output.contiguous().transpose(1, 2)
if mask is not None:
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
return enc_output
class ConvBlock2D(nn.Module):
""" 2D Convolutional Block """
def __init__(self, in_channels, out_channels, kernel_size, dropout=None, normalization=nn.BatchNorm2d, activation=nn.ReLU, transpose=False):
super(ConvBlock2D, self).__init__()
self.conv_layer = nn.Sequential(
ConvNorm2D(
in_channels,
out_channels,
kernel_size=(1, kernel_size),
stride=1,
padding=(0, int((kernel_size - 1) / 2)),
bias=False,
w_init_gain="tanh",
transpose=transpose,
),
normalization(out_channels),
activation(),
)
self.dropout = dropout if dropout is not None else None
self.transpose = transpose
def forward(self, enc_input, mask=None):
"""
enc_input -- [B, H, W, C_in]
mask -- [B, H]
"""
if not self.transpose:
enc_input = enc_input.contiguous().permute(0, 3, 1, 2) # [B, C_in, H, W]
enc_output = self.conv_layer(enc_input)
if self.dropout is not None:
enc_output = F.dropout(enc_output, self.dropout, self.training)
if not self.transpose:
enc_output = enc_output.contiguous().permute(0, 2, 3, 1) # [B, H, W, C_out]
if mask is not None:
enc_output = enc_output.masked_fill(mask.unsqueeze(-1).unsqueeze(-1), 0)
return enc_output
class ConvNorm(nn.Module):
""" 1D Convolution """
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain="linear",
transpose=False,
):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)
)
self.transpose = transpose
def forward(self, x):
if self.transpose:
x = x.contiguous().transpose(1, 2)
x = self.conv(x)
if self.transpose:
x = x.contiguous().transpose(1, 2)
return x
class ConvNorm2D(nn.Module):
""" 2D Convolution """
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain="linear",
transpose=False,
):
super(ConvNorm2D, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)
)
self.transpose = transpose
def forward(self, x):
"""
x -- [B, H, W, C] or [B, C, H, W]
"""
if self.transpose:
x = x.contiguous().permute(0, 3, 1, 2) # [B, C, H, W]
x = self.conv(x)
if self.transpose:
x = x.contiguous().permute(0, 2, 3, 1) # [B, H, W, C]
return x
|
04. Chapter_4/timing_hash_function.py | Mikma03/High-performance-Python | 223 | 12695288 | <filename>04. Chapter_4/timing_hash_function.py
import string
import timeit
class BadHash(str):
def __hash__(self):
return 42
class GoodHash(str):
def __hash__(self):
"""
This is a slightly optimized version of twoletter_hash
"""
return ord(self[1]) + 26 * ord(self[0]) - 2619
if __name__ == "__main__":
baddict = set()
gooddict = set()
for i in string.ascii_lowercase:
for j in string.ascii_lowercase:
key = i + j
baddict.add(BadHash(key))
gooddict.add(GoodHash(key))
badtime = timeit.repeat(
"key in baddict",
setup="from __main__ import baddict, BadHash; key = BadHash('zz')",
repeat=3,
number=100_000,
)
goodtime = timeit.repeat(
"key in gooddict",
setup="from __main__ import gooddict, GoodHash; key = GoodHash('zz')",
repeat=3,
number=100_000,
)
print(f"Min lookup time for baddict: {min(badtime)}")
print(f"Min lookup time for gooddict: {min(goodtime)}")
# Results:
# Min lookup time for baddict: 17.719061855008476
# Min lookup time for gooddict: 0.42408075400453527
|
tests/test_parallelism.py | colindean/peru | 525 | 12695323 | <reponame>colindean/peru
from textwrap import dedent
from peru import plugin
import shared
def assert_parallel(n):
# The plugin module keep a global counter of all the jobs that run in
# parallel, so that we can write these tests.
if plugin.DEBUG_PARALLEL_MAX != n:
raise AssertionError('Expected {} parallel {}. Counted {}.'.format(
n, 'job' if n == 1 else 'jobs', plugin.DEBUG_PARALLEL_MAX))
class ParallelismTest(shared.PeruTest):
def setUp(self):
# Make sure nothing is fishy with the jobs counter, and reset the max.
plugin.debug_assert_clean_parallel_count()
plugin.DEBUG_PARALLEL_MAX = 0
def tearDown(self):
# Make sure nothing is fishy with the jobs counter. No sense in
# resetting the max here, because the rest of our tests don't know to
# reset it anyway.
plugin.debug_assert_clean_parallel_count()
def test_two_jobs_in_parallel(self):
# This just checks that two different modules can actually be fetched
# in parallel.
foo = shared.create_dir()
bar = shared.create_dir()
peru_yaml = dedent('''\
imports:
foo: ./
bar: ./
cp module foo:
path: {}
cp module bar:
path: {}
'''.format(foo, bar))
test_dir = shared.create_dir({'peru.yaml': peru_yaml})
shared.run_peru_command(['sync'], test_dir)
assert_parallel(2)
def test_jobs_flag(self):
# This checks that the --jobs flag is respected, even when two modules
# could have been fetched in parallel.
foo = shared.create_dir()
bar = shared.create_dir()
peru_yaml = dedent('''\
imports:
foo: ./
bar: ./
cp module foo:
path: {}
cp module bar:
path: {}
'''.format(foo, bar))
test_dir = shared.create_dir({'peru.yaml': peru_yaml})
shared.run_peru_command(['sync', '-j1'], test_dir)
assert_parallel(1)
def test_identical_fields(self):
# This checks that modules with identical fields are not fetched in
# parallel. This is the same logic that protects us from fetching a
# given module twice, like when it's imported with two different named
# rules.
foo = shared.create_dir()
peru_yaml = dedent('''\
imports:
foo1: ./
foo2: ./
cp module foo1:
path: {}
cp module foo2:
path: {}
'''.format(foo, foo))
test_dir = shared.create_dir({'peru.yaml': peru_yaml})
shared.run_peru_command(['sync'], test_dir)
assert_parallel(1)
def test_identical_plugin_cache_fields(self):
# Plugins that use caching also need to avoid running in parallel, if
# their cache directories are the same. The noop_cache plugin (created
# for this test) uses the path field (but not the nonce field) in its
# plugin cache key. Check that these two modules are not fetched in
# parallel, even though their module fields aren't exactly the same.
foo = shared.create_dir()
peru_yaml = dedent('''\
imports:
foo1: ./
foo2: ./
noop_cache module foo1:
path: {}
# nonce is ignored, but it makes foo1 different from foo2 as
# far as the module cache is concerned
nonce: '1'
noop_cache module foo2:
path: {}
nonce: '2'
'''.format(foo, foo))
test_dir = shared.create_dir({'peru.yaml': peru_yaml})
shared.run_peru_command(['sync'], test_dir)
assert_parallel(1)
|
Ryven/packages/auto_generated/doctest/nodes.py | tfroehlich82/Ryven | 2,872 | 12695354 | <filename>Ryven/packages/auto_generated/doctest/nodes.py
from NENV import *
import doctest
class NodeBase(Node):
pass
class Docfilesuite_Node(NodeBase):
"""
A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
title = 'DocFileSuite'
type_ = 'doctest'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.DocFileSuite())
class Docfiletest_Node(NodeBase):
"""
"""
title = 'DocFileTest'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='path'),
NodeInputBP(label='module_relative', dtype=dtypes.Data(default=True, size='s')),
NodeInputBP(label='package', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='globs', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='parser', dtype=dtypes.Data(default=<doctest.DocTestParser object at 0x000001B84293D820>, size='s')),
NodeInputBP(label='encoding', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.DocFileTest(self.input(0), self.input(1), self.input(2), self.input(3), self.input(4), self.input(5)))
class Doctestsuite_Node(NodeBase):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
title = 'DocTestSuite'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='module', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='globs', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='extraglobs', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='test_finder', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.DocTestSuite(self.input(0), self.input(1), self.input(2), self.input(3)))
class _Comment_Line_Node(NodeBase):
"""
Return a commented form of the given line"""
title = '_comment_line'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='line'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest._comment_line(self.input(0)))
class _Ellipsis_Match_Node(NodeBase):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
title = '_ellipsis_match'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='want'),
NodeInputBP(label='got'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest._ellipsis_match(self.input(0), self.input(1)))
class _Exception_Traceback_Node(NodeBase):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
title = '_exception_traceback'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='exc_info'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest._exception_traceback(self.input(0)))
class _Extract_Future_Flags_Node(NodeBase):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
title = '_extract_future_flags'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='globs'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest._extract_future_flags(self.input(0)))
class _Indent_Node(NodeBase):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
"""
title = '_indent'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='s'),
NodeInputBP(label='indent', dtype=dtypes.Data(default=4, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest._indent(self.input(0), self.input(1)))
class _Load_Testfile_Node(NodeBase):
"""
"""
title = '_load_testfile'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='filename'),
NodeInputBP(label='package'),
NodeInputBP(label='module_relative'),
NodeInputBP(label='encoding'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest._load_testfile(self.input(0), self.input(1), self.input(2), self.input(3)))
class _Module_Relative_Path_Node(NodeBase):
"""
"""
title = '_module_relative_path'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='module'),
NodeInputBP(label='test_path'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest._module_relative_path(self.input(0), self.input(1)))
class _Newline_Convert_Node(NodeBase):
"""
"""
title = '_newline_convert'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='data'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest._newline_convert(self.input(0)))
class _Normalize_Module_Node(NodeBase):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
title = '_normalize_module'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='module'),
NodeInputBP(label='depth', dtype=dtypes.Data(default=2, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest._normalize_module(self.input(0), self.input(1)))
class _Strip_Exception_Details_Node(NodeBase):
"""
"""
title = '_strip_exception_details'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='msg'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest._strip_exception_details(self.input(0)))
class _Test_Node(NodeBase):
"""
"""
title = '_test'
type_ = 'doctest'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest._test())
class Debug_Node(NodeBase):
"""
Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
title = 'debug'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='module'),
NodeInputBP(label='name'),
NodeInputBP(label='pm', dtype=dtypes.Data(default=False, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.debug(self.input(0), self.input(1), self.input(2)))
class Debug_Script_Node(NodeBase):
"""
Debug a test script. `src` is the script, as a string."""
title = 'debug_script'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='src'),
NodeInputBP(label='pm', dtype=dtypes.Data(default=False, size='s')),
NodeInputBP(label='globs', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.debug_script(self.input(0), self.input(1), self.input(2)))
class Debug_Src_Node(NodeBase):
"""
Debug a single doctest docstring, in argument `src`'"""
title = 'debug_src'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='src'),
NodeInputBP(label='pm', dtype=dtypes.Data(default=False, size='s')),
NodeInputBP(label='globs', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.debug_src(self.input(0), self.input(1), self.input(2)))
class Namedtuple_Node(NodeBase):
"""
Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
title = 'namedtuple'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='typename'),
NodeInputBP(label='field_names'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.namedtuple(self.input(0), self.input(1)))
class Register_Optionflag_Node(NodeBase):
"""
"""
title = 'register_optionflag'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='name'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.register_optionflag(self.input(0)))
class Run_Docstring_Examples_Node(NodeBase):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
title = 'run_docstring_examples'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='f'),
NodeInputBP(label='globs'),
NodeInputBP(label='verbose', dtype=dtypes.Data(default=False, size='s')),
NodeInputBP(label='name', dtype=dtypes.Data(default='NoName', size='s')),
NodeInputBP(label='compileflags', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='optionflags', dtype=dtypes.Data(default=0, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.run_docstring_examples(self.input(0), self.input(1), self.input(2), self.input(3), self.input(4), self.input(5)))
class Script_From_Examples_Node(NodeBase):
"""
Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print(script_from_examples(text))
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
title = 'script_from_examples'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='s'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.script_from_examples(self.input(0)))
class Set_Unittest_Reportflags_Node(NodeBase):
"""
Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
title = 'set_unittest_reportflags'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='flags'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.set_unittest_reportflags(self.input(0)))
class Testfile_Node(NodeBase):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
title = 'testfile'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='filename'),
NodeInputBP(label='module_relative', dtype=dtypes.Data(default=True, size='s')),
NodeInputBP(label='name', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='package', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='globs', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='verbose', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='report', dtype=dtypes.Data(default=True, size='s')),
NodeInputBP(label='optionflags', dtype=dtypes.Data(default=0, size='s')),
NodeInputBP(label='extraglobs', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='raise_on_error', dtype=dtypes.Data(default=False, size='s')),
NodeInputBP(label='parser', dtype=dtypes.Data(default=<doctest.DocTestParser object at 0x000001B84293D880>, size='s')),
NodeInputBP(label='encoding', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.testfile(self.input(0), self.input(1), self.input(2), self.input(3), self.input(4), self.input(5), self.input(6), self.input(7), self.input(8), self.input(9), self.input(10), self.input(11)))
class Testmod_Node(NodeBase):
"""
m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See help(doctest) for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
title = 'testmod'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='m', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='name', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='globs', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='verbose', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='report', dtype=dtypes.Data(default=True, size='s')),
NodeInputBP(label='optionflags', dtype=dtypes.Data(default=0, size='s')),
NodeInputBP(label='extraglobs', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='raise_on_error', dtype=dtypes.Data(default=False, size='s')),
NodeInputBP(label='exclude_empty', dtype=dtypes.Data(default=False, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.testmod(self.input(0), self.input(1), self.input(2), self.input(3), self.input(4), self.input(5), self.input(6), self.input(7), self.input(8)))
class Testsource_Node(NodeBase):
"""
Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
title = 'testsource'
type_ = 'doctest'
init_inputs = [
NodeInputBP(label='module'),
NodeInputBP(label='name'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, doctest.testsource(self.input(0), self.input(1)))
export_nodes(
Docfilesuite_Node,
Docfiletest_Node,
Doctestsuite_Node,
_Comment_Line_Node,
_Ellipsis_Match_Node,
_Exception_Traceback_Node,
_Extract_Future_Flags_Node,
_Indent_Node,
_Load_Testfile_Node,
_Module_Relative_Path_Node,
_Newline_Convert_Node,
_Normalize_Module_Node,
_Strip_Exception_Details_Node,
_Test_Node,
Debug_Node,
Debug_Script_Node,
Debug_Src_Node,
Namedtuple_Node,
Register_Optionflag_Node,
Run_Docstring_Examples_Node,
Script_From_Examples_Node,
Set_Unittest_Reportflags_Node,
Testfile_Node,
Testmod_Node,
Testsource_Node,
)
|
data/server/king_phisher/alembic/env.py | chachabooboo/king-phisher | 1,143 | 12695419 | <filename>data/server/king_phisher/alembic/env.py
from __future__ import with_statement
from alembic import context
from sqlalchemy import create_engine, pool
from logging.config import fileConfig
import os
import sys
kp_path = os.path.dirname(os.path.abspath(__file__))
kp_path = os.path.normpath(os.path.join(kp_path, '..', '..', '..', '..'))
sys.path.insert(1, kp_path)
from king_phisher.server.database import manager
from king_phisher.server.database import models
import yaml
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if not config.get_main_option('skip_logger_config'):
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = models.Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
database_connection_url = config.get_main_option('sqlalchemy.url')
if not database_connection_url:
# consume the x arguments provided on the command line
x_arguments = context.get_x_argument(as_dictionary=True)
if 'config' in x_arguments:
server_config = yaml.load(open(x_arguments['config']))
database_connection_url = server_config['server']['database']
elif 'database' in x_arguments:
database_connection_url = x_arguments['database']
else:
print('[-] the database connection string has not been specified, either')
print('[-] \'config\' or \'database\' must be specified via the -x option')
print('[-] for example:')
print(' -x database=driver://user:pass@localhost/dbname')
print(' -x config=/path/to/server/config/file')
os._exit(os.EX_USAGE)
database_connection_url = manager.normalize_connection_url(database_connection_url)
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=database_connection_url, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = create_engine(
database_connection_url,
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
source_code_in_theano/news_group_data.py | wasiahmad/paraphrase_identification | 126 | 12695426 | import numpy as np
from nltk import wordpunct_tokenize
import nltk
import itertools
import operator
import sklearn
import re, string
import math
SENTENCE_START_TOKEN = "sentence_<PASSWORD>"
SENTENCE_END_TOKEN = "sentence_<PASSWORD>"
UNKNOWN_TOKEN = "<PASSWORD>"
def load_data(loc='./data/'):
trainloc = loc + '20_news_group_sentences.txt'
sentences = []
with open(trainloc, 'r', encoding='utf8') as f:
for line in f:
sentences.append("%s %s %s" % (SENTENCE_START_TOKEN, line, SENTENCE_END_TOKEN))
return sentences
def build_dictionary(loc='./data/', vocabulary_size=-1):
trainloc = loc + '20_news_group_sentences.txt'
document_frequency = {}
total_document = 0
with open(trainloc, 'r', encoding='utf8') as f:
for line in f:
sentence = my_tokenizer(line)
for token in set(sentence):
if token in document_frequency:
document_frequency[token] += 1
else:
document_frequency[token] = 1
total_document += 1
for key, value in document_frequency.items():
document_frequency[key] = math.log(total_document / document_frequency[key])
vocab = sorted(document_frequency.items(), key=operator.itemgetter(1), reverse=True)
word_to_index = {}
index_to_word = {}
word_to_index[SENTENCE_START_TOKEN] = 0
word_to_index[SENTENCE_END_TOKEN] = 1
word_to_index[UNKNOWN_TOKEN] = 2
index_to_word[0] = SENTENCE_START_TOKEN
index_to_word[1] = SENTENCE_END_TOKEN
index_to_word[2] = UNKNOWN_TOKEN
counter = 3
for key, value in vocab:
if len(key) < 4:
continue
elif counter == vocabulary_size:
break
word_to_index[key] = counter
index_to_word[counter] = key
counter += 1
return word_to_index, index_to_word
def my_tokenizer(input):
token_list = []
tokens = wordpunct_tokenize(input.lower())
token_list.extend([x for x in tokens if not re.fullmatch('[' + string.punctuation + ']+', x)])
return token_list
def get_train_data(vocabulary_size):
word_to_index, index_to_word = build_dictionary(vocabulary_size=vocabulary_size)
sentences = load_data()
sentences_tokenized = [my_tokenizer(sent) for sent in sentences]
for i, sent in enumerate(sentences_tokenized):
sentences_tokenized[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
sentences_indices = []
for sentence in sentences_tokenized:
sentences_indices.append([word_to_index[word] for word in sentence])
return sentences_indices, word_to_index, index_to_word
def get_train_data_reversed(vocabulary_size):
sentences_indices, word_to_index, index_to_word = get_train_data(vocabulary_size)
sentences_indices_reversed = []
for index_list in sentences_indices:
temp = []
temp.extend(index_list)
temp.reverse()
sentences_indices_reversed.append(temp)
return sentences_indices_reversed, word_to_index, index_to_word
def get_train_sentences(vocabulary_size):
sentences_indices, word_to_index, index_to_word = get_train_data(vocabulary_size)
all_sentences = []
all_sentences.extend(sentences_indices)
x_train = np.asarray([[w for w in sentence[:-1]] for sentence in all_sentences])
y_train = np.asarray([[w for w in sentence[1:]] for sentence in all_sentences])
return x_train, y_train, word_to_index, index_to_word
def get_train_sentences_reversed(vocabulary_size):
sentences_indices_reversed, word_to_index, index_to_word = get_train_data_reversed(vocabulary_size)
all_sentences = []
all_sentences.extend(sentences_indices_reversed)
x_train = np.asarray([[w for w in sentence[:-1]] for sentence in all_sentences])
y_train = np.asarray([[w for w in sentence[1:]] for sentence in all_sentences])
return x_train, y_train, word_to_index, index_to_word
|
Algo and DSA/LeetCode-Solutions-master/Python/knight-probability-in-chessboard.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 12695453 | # Time: O(k * n^2)
# Space: O(n^2)
class Solution(object):
def knightProbability(self, N, K, r, c):
"""
:type N: int
:type K: int
:type r: int
:type c: int
:rtype: float
"""
directions = \
[[ 1, 2], [ 1, -2], [ 2, 1], [ 2, -1], \
[-1, 2], [-1, -2], [-2, 1], [-2, -1]]
dp = [[[1 for _ in xrange(N)] for _ in xrange(N)] for _ in xrange(2)]
for step in xrange(1, K+1):
for i in xrange(N):
for j in xrange(N):
dp[step%2][i][j] = 0
for direction in directions:
rr, cc = i+direction[0], j+direction[1]
if 0 <= cc < N and 0 <= rr < N:
dp[step%2][i][j] += 0.125 * dp[(step-1)%2][rr][cc]
return dp[K%2][r][c]
|
pytorch_tutorials/dqn/visualize.py | paulrschrater/tutorials | 118 | 12695470 | <filename>pytorch_tutorials/dqn/visualize.py
import io
import time
import cv2
import numpy as np
import pyglet
import tensorflow as tf
import tensorflow.keras.backend as K
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
from pyglet.gl import *
from sklearn.decomposition import PCA
from config import (BATCH_SIZE, CLIP_REWARD, DISCOUNT_FACTOR, ENV_NAME,
EVAL_LENGTH, FRAMES_BETWEEN_EVAL, INPUT_SHAPE,
LEARNING_RATE, LOAD_FROM, MAX_EPISODE_LENGTH,
MAX_NOOP_STEPS, MEM_SIZE, MIN_REPLAY_BUFFER_SIZE,
PRIORITY_SCALE, SAVE_PATH, TOTAL_FRAMES, UPDATE_FREQ,
WRITE_TENSORBOARD)
from train_dqn import (Agent, GameWrapper, ReplayBuffer, build_q_network,
process_frame)
# My installations require I run this to avoid errors with cuDNN.
# You can remove it if your system doesn't require it.
# (it shouldn't mess anything up if you keep it in)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
# Change this to the path of the model you would like to visualize
RESTORE_PATH = None
if RESTORE_PATH is None:
raise UserWarning('Please change the variable `RESTORE_PATH` to where you would like to load the model from. If you haven\'t trained a model, try \'example-save\'')
ENV_NAME = 'BreakoutDeterministic-v4'
DISPLAY_FPS = False
DISPLAY_HUMAN_RENDERED = True
DISPLAY_MACHINE_RENDERED = True
DISPLAY_Q_VALUES = True
DISPLAY_VAL_CHART = True
DISPLAY_HEATMAP = True
# Create environment
game_wrapper = GameWrapper(ENV_NAME, MAX_NOOP_STEPS)
print("The environment has the following {} actions: {}".format(game_wrapper.env.action_space.n, game_wrapper.env.unwrapped.get_action_meanings()))
# Create agent
MAIN_DQN = build_q_network(game_wrapper.env.action_space.n, LEARNING_RATE, input_shape=INPUT_SHAPE)
TARGET_DQN = build_q_network(game_wrapper.env.action_space.n, input_shape=INPUT_SHAPE)
replay_buffer = ReplayBuffer(size=MEM_SIZE, input_shape=INPUT_SHAPE)
agent = Agent(MAIN_DQN, TARGET_DQN, replay_buffer, game_wrapper.env.action_space.n, input_shape=INPUT_SHAPE)
print('Loading agent...')
agent.load(RESTORE_PATH)
def display_nparray(arr, maxwidth=500):
assert len(arr.shape) == 3
height, width, _channels = arr.shape
if width > maxwidth:
scale = maxwidth / width
width = int(scale * width)
height = int(scale * height)
image = pyglet.image.ImageData(arr.shape[1], arr.shape[0], 'RGB', arr.tobytes(), pitch=arr.shape[1]*-3)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
texture = image.get_texture()
texture.width = width
texture.height = height
return texture
def generate_heatmap(frame, model):
with tf.GradientTape() as tape:
last_conv_layer = model.get_layer('conv2d_2')
iterate = tf.keras.models.Model([model.inputs], [model.output, last_conv_layer.output])
model_out, last_conv_layer = iterate(frame[np.newaxis, :, :, :])
class_out = model_out[:, np.argmax(model_out[0])]
grads = tape.gradient(class_out, last_conv_layer)
pooled_grads = K.mean(grads, axis=(0, 1, 2))
heatmap = tf.reduce_mean(tf.multiply(pooled_grads, last_conv_layer), axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
heatmap = heatmap.reshape((7, 7))
heatmap = cv2.resize(heatmap, (frame.shape[1], frame.shape[0]))
heatmap = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET) / 255
return heatmap
class VisWindow(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_minimum_size(400, 300)
self.frame_rate = 1/60
self.max_q_val = 0.1
self.min_q_val = -0.1
self.fps_display = pyglet.window.FPSDisplay(self)
self.fps_display.label.x = self.width-100
self.fps_display.label.y = self.height-50
# For drawing screens
self.game_image = np.ones((210, 160, 3))
self.state_image = np.ones((84, 84, 4))
# For keeping simulating the game
self.terminal = True
self.eval_rewards = []
self.evaluate_frame_number = 0
self.episode_reward_sum = 0
self.life_lost = True
self.q_vals = [0]*game_wrapper.env.action_space.n
self.values = []
# Text
self.human_title = pyglet.text.Label('Human-Rendered Game Screen', font_size=20, color=(0, 0, 0, 255), x=10, y=self.height-20, anchor_y='center')
self.q_val_title = pyglet.text.Label('Q-Values', font_size=20, color=(0, 0, 0, 255), x=500, y=self.height-20, anchor_y='center')
self.agent_title = pyglet.text.Label('Agent-Rendered Game Screen', font_size=20, color=(0, 0, 0, 255), x=10, y=235, anchor_y='center')
self.heatmap_title = pyglet.text.Label('Attention Heatmap', font_size=20, color=(0, 0, 0, 255), x=1000, y=self.height-140, anchor_y='center')
self.action_titles = []
for i, action in enumerate(game_wrapper.env.unwrapped.get_action_meanings()):
self.action_titles.append(pyglet.text.Label(action, font_size=20, color=(0, 0, 0, 255), x=0, y=0, anchor_x='center'))
def on_draw(self):
self.clear()
glClearColor(1., 1., 1., 1.)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
self.switch_to()
self.dispatch_events()
# Draw FPS counter
if DISPLAY_FPS:
self.fps_display.draw()
# Display RGB "human" version of the game state
if DISPLAY_HUMAN_RENDERED:
self.human_title.draw()
base_dimensions = (210, 160)
scale = 2
display_nparray(cv2.resize(
self.game_image,
dsize=(int(base_dimensions[1]*scale), int(base_dimensions[0]*scale)),
interpolation=cv2.INTER_CUBIC))\
.blit(50, self.height-base_dimensions[0]*scale-50)
# Display grayscale "machine" version of the game state (this is what the agent really sees)
if DISPLAY_MACHINE_RENDERED:
self.agent_title.draw()
base_dimensions = (84, 84)
scale = 2.5
# For some strange reason, we must render this in RGB mode (not 'L' mode as implied in the pyglet docs)
# Because of this, we must repeat each frame 3 times to simulate RGB, despite being grayscale
state_images = [np.repeat(self.state_image[:, :, i, np.newaxis], 3, axis=2) for i in range(self.state_image.shape[-1])]
for i, state_image in enumerate(state_images):
display_nparray(cv2.resize(state_image,
dsize=(int(base_dimensions[1]*scale), int(base_dimensions[0]*scale)),
interpolation=cv2.INTER_CUBIC))\
.blit(10+i*(84*scale+5), 10)
# Display q-values
if DISPLAY_Q_VALUES:
self.q_val_title.draw()
LENGTH = 80
STARTING_X = 400
for i, (q_val, label) in enumerate(zip(self.q_vals[::-1], self.action_titles[::-1])):
if q_val > self.max_q_val:
self.max_q_val = q_val
elif q_val < self.min_q_val:
self.min_q_val = q_val
# Draw square represention q-val
x_value = STARTING_X + i*(LENGTH+10) # x-coordinate to draw square
color = (150**(q_val*2)) / (sum([150**(q*2) for q in self.q_vals]) + 0.0001)
pyglet.graphics.draw(4, GL_QUADS,
('v2f', (x_value,self.height-50, x_value+LENGTH,self.height-50, x_value+LENGTH,self.height-LENGTH-50, x_value,self.height-LENGTH-50)),
('c3f', (color,color,color, color,color,color, color,color,color, color,color,color)))
# Draw action label
glTranslatef(x_value+LENGTH/2, self.height-100-LENGTH, 0.0)
glRotatef(-90.0, 0.0, 0.0, 1.0)
label.draw()
glRotatef(90.0, 0.0, 0.0, 1.0)
glTranslatef(-(x_value+LENGTH/2), -(self.height-100-LENGTH), 0.0)
# Display value history (adapted from https://learning.oreilly.com/library/view/matplotlib-plotting-cookbook/9781849513265/ch08s06.html)
if DISPLAY_VAL_CHART:
dpi_res = min(self.width, self.height) / 10
fig = Figure((500 / dpi_res, 230 / dpi_res), dpi=dpi_res)
ax = fig.add_subplot(111)
# Set up plot
ax.set_title('Estimated Value over Time', fontsize=20)
ax.set_xticklabels([])
ax.set_ylabel('V(s)')
ax.plot(self.values[max(len(self.values)-200, 0):]) # plot values
w, h = fig.get_size_inches()
dpi_res = fig.get_dpi()
w, h = int(np.ceil(w * dpi_res)), int(np.ceil(h * dpi_res))
canvas = FigureCanvasAgg(fig)
pic_data = io.BytesIO()
canvas.print_raw(pic_data, dpi=dpi_res)
img = pyglet.image.ImageData(w, h, 'RGBA', pic_data.getvalue(), -4 * w)
img.blit(375, 265)
# Display heatmap
if DISPLAY_HEATMAP and self.evaluate_frame_number > 1:
self.heatmap_title.draw()
base_dimensions = (84, 84)
INTENSITY = 0.1
scale = 10
processed_frame = np.repeat(self.state_image[:, :, 3, np.newaxis], 3, axis=2)
heatmap = generate_heatmap(game_wrapper.state, agent.DQN)
img = (heatmap*255 * INTENSITY + processed_frame * 0.8).astype(np.uint8)
display_nparray(cv2.resize(img + (heatmap*255*INTENSITY).astype(np.uint8),
dsize=(int(base_dimensions[1]*scale), int(base_dimensions[0]*scale)),
interpolation=cv2.INTER_CUBIC)).blit(880, 60)
self.flip()
def update(self, dt):
if self.terminal:
game_wrapper.reset(evaluation=True)
self.life_lost = True
self.episode_reward_sum = 0
self.terminal = False
self.q_vals, value = agent.get_intermediate_representation(game_wrapper.state, ['add', 'dense'], stack_state=False)
self.q_vals, value = self.q_vals[0], value[0]
action = 1 if self.life_lost else self.q_vals.argmax()
self.values.append(value)
_, reward, self.terminal, self.life_lost, self.game_image = game_wrapper.step(action, render_mode='rgb_array')
self.evaluate_frame_number += 1
self.episode_reward_sum += reward
self.state_image = game_wrapper.state
if self.terminal:
self.eval_rewards.append(self.episode_reward_sum)
self.values = []
if __name__ == "__main__":
print('Finished setup. Visualizing...')
window = VisWindow(1400, 720, "RL Visualizer", resizable=True)
pyglet.clock.schedule_interval(window.update, window.frame_rate)
pyglet.app.run()
|
questions/binary-tree-tilt/Solution.py | marcus-aurelianus/leetcode-solutions | 141 | 12695472 | """
Given the root of a binary tree, return the sum of every tree node's tilt.
The tilt of a tree node is the absolute difference between the sum of all left subtree node values and all right subtree node values. If a node does not have a left child, then the sum of the left subtree node values is treated as 0. The rule is similar if there the node does not have a right child.
Example 1:
Input: root = [1,2,3]
Output: 1
Explanation:
Tilt of node 2 : |0-0| = 0 (no children)
Tilt of node 3 : |0-0| = 0 (no children)
Tile of node 1 : |2-3| = 1 (left subtree is just left child, so sum is 2; right subtree is just right child, so sum is 3)
Sum of every tilt : 0 + 0 + 1 = 1
Example 2:
Input: root = [4,2,9,3,5,null,7]
Output: 15
Explanation:
Tilt of node 3 : |0-0| = 0 (no children)
Tilt of node 5 : |0-0| = 0 (no children)
Tilt of node 7 : |0-0| = 0 (no children)
Tilt of node 2 : |3-5| = 2 (left subtree is just left child, so sum is 3; right subtree is just right child, so sum is 5)
Tilt of node 9 : |0-7| = 7 (no left child, so sum is 0; right subtree is just right child, so sum is 7)
Tilt of node 4 : |(3+5+2)-(9+7)| = |10-16| = 6 (left subtree values are 3, 5, and 2, which sums to 10; right subtree values are 9 and 7, which sums to 16)
Sum of every tilt : 0 + 0 + 0 + 2 + 7 + 6 = 15
Example 3:
Input: root = [21,7,14,1,1,2,2,3,3]
Output: 9
Constraints:
The number of nodes in the tree is in the range [0, 104].
-1000 <= Node.val <= 1000
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def find_tilt(root):
if root is None:
return 0, 0
lstilt, ls = find_tilt(root.left)
rstilt, rs = find_tilt(root.right)
return abs(ls - rs) + lstilt + rstilt, ls + rs + root.val
stilt, s = find_tilt(root)
return stilt |
cle/backends/static_archive.py | Atipriya/cle | 317 | 12695474 | import logging
from . import Backend, register_backend
from ..errors import CLEError
try:
import arpy
except ImportError:
arpy = None
l = logging.getLogger(__name__)
class StaticArchive(Backend):
@classmethod
def is_compatible(cls, stream):
stream.seek(0)
return stream.read(8) == b'!<arch>\n'
is_default = True
def __init__(self, *args, **kwargs):
if arpy is None:
raise CLEError("run `pip install arpy==1.1.1` to load archive files")
super().__init__(*args, **kwargs)
# hack: we are using a loader internal method in a non-kosher way which will cause our children to be
# marked as the main binary if we are also the main binary
# work around this by setting ourself here:
if self.loader.main_object is None:
self.loader.main_object = self
ar = arpy.Archive(fileobj=self._binary_stream)
ar.read_all_headers()
for name, stream in ar.archived_files.items():
child = self.loader._load_object_isolated(stream)
child.binary = child.binary_basename = name.decode()
child.parent_object = self
self.child_objects.append(child)
if self.child_objects:
self.arch = self.child_objects[0].arch
else:
l.warning("Loaded empty static archive?")
self.has_memory = False
self.pic = True
# hack pt. 2
if self.loader.main_object is self:
self.loader.main_object = None
register_backend('AR', StaticArchive)
|
recipes/sota/2019/lm_corpus_and_PL_generation/generate_uniq.py | Zilv1128/test1 | 5,921 | 12695489 | <filename>recipes/sota/2019/lm_corpus_and_PL_generation/generate_uniq.py
import sys
pl_data = []
with open(sys.argv[1], "r") as f:
for line in f:
pl_data.append(line.strip())
pl_data = set(pl_data)
with open(sys.argv[1] + ".unique", "w") as f:
for elem in pl_data:
f.write(elem + "\n")
|
elastichq/common/JobPool.py | billboggs/elasticsearch-HQ | 2,026 | 12695498 | from elastichq.globals import scheduler
# TODO: rename this to Metrics Service and move to service package
class JobPool():
app = None
def init_app(self, app):
self.app = app
return self
def blah(self):
JOB = {
'trigger': 'interval',
'seconds': 3 # ,
# 'args': (app, 'in')
}
scheduler.add_job('job1', self.do_task, **JOB)
def do_task(self):
from elastichq.service import ClusterService
clusters = ClusterService().get_clusters(create_if_missing=False)
return clusters
|
01_BlueBorne/l2cap_infra/l2cap_infra.py | Charmve/BLE-Security-Att-Def | 149 | 12695499 | <gh_stars>100-1000
import sys
from scapy.layers.bluetooth import *
import binascii
from traced_bt_user_sock import BluetoothUserSocket_WithTrace
# TODO: Allocate scid dynamically (currently it is hard coded to OUR_LOCAL_SCID)
OUR_LOCAL_SCID = 0x40
def hci_devid(dev):
# Replacement to bluez's hci_devid because we don't care if the interface is
# down
if not dev.startswith('hci'):
raise ValueError()
if not dev[3:].isdigit():
raise ValueError()
return int(dev[3:])
# Hack around bad definitions in scapy
def unbind_layers(lower, upper):
lower.payload_guess = [(fval, pay) for (fval, pay) in lower.payload_guess if
pay is not upper]
lower.payload_guess.append((fval, upper))
unbind_layers(HCI_Event_Hdr, HCI_Event_Number_Of_Completed_Packets)
def to_opcode(ogf, ocf):
return (ogf << 10) | ocf
class HCI_Cmd_Create_Connection(Packet):
name = "Create Connection"
fields_desc = [ LEMACField("bd_addr", None),
LEShortField("packet_type", 0xcc18),
ByteEnumField("page_scan_repetition_mode", 2,
{0: "R0", 1: "R1", 2: "R2"}),
ByteField("reserved", 0),
LEShortField("clock_offset", 0),
ByteEnumField("allow_role_switch", 0,
{0: "no", 1: "yes"}), ]
class HCI_Event_Connection_Complete(Packet):
name = "Connection Complete"
fields_desc = [ ByteEnumField("status", 0, {0:"success"}),
XLEShortField("connection_handle", 0),
LEMACField("bd_addr", None),
ByteEnumField("link_type", 1,
{0: "sco", 1: "acl"}),
ByteEnumField("encryption_enabled", 0,
{0: "disabled", 1: "enabled"}), ]
class HCI_Cmd_Read_Remote_Supported_Features(Packet):
name = "Read Remote Supported Features"
fields_desc = [ XLEShortField("connection_handle", 0), ]
class HCI_Event_Read_Remote_Supported_Features_Complete(Packet):
name = "Read Remote Supported Features Complete"
fields_desc = [ ByteEnumField("status", 0, {0: "success"}),
XLEShortField("connection_handle", 0),
StrFixedLenField("lmp_features", '\x00' * 8, 8), ]
class HCI_Event_Number_Of_Completed_Packets(Packet):
name = "Number Of Completed Packets"
fields_desc = [ ByteField("number_of_handles", 0),
FieldListField("connection_handle", [],
XLEShortField("", 0),
count_from=lambda pkt: pkt.number_of_handles),
FieldListField("hc_num_of_completed_packets", [],
XLEShortField("", 0),
count_from=lambda pkt: pkt.number_of_handles),
]
bind_layers(HCI_Command_Hdr, HCI_Cmd_Create_Connection,
opcode=to_opcode(0x01, 0x0005))
bind_layers(HCI_Event_Hdr, HCI_Event_Connection_Complete, code=0x03)
bind_layers(HCI_Command_Hdr, HCI_Cmd_Read_Remote_Supported_Features,
opcode=to_opcode(0x01, 0x001b))
bind_layers(HCI_Event_Hdr, HCI_Event_Read_Remote_Supported_Features_Complete,
code=0x0b)
bind_layers(HCI_Event_Hdr, HCI_Event_Number_Of_Completed_Packets, code=0x13)
class Loop(object):
def __init__(self, sock):
self._sock = sock
self._waiters = {}
def on(self, condition, handler):
if handler is None:
handler = lambda loop, packet: None
if condition not in self._waiters:
self._waiters[condition] = []
self._waiters[condition].append(handler)
def on_pkt(self, layer, handler):
self.on(lambda packet: packet is not None and layer in packet, handler)
def ignore(self, condition):
self.on(condition, None)
def ignore_pkt(self, layer):
return self.on_pkt(layer, None)
def _build_queue(self, packet):
result = []
for condition in self._waiters.keys():
if not condition(packet):
continue
result.extend(self._waiters[condition])
del self._waiters[condition]
return result
def _iterate_with_packet(self, packet):
queue = self._build_queue(packet)
if len(queue) == 0:
print 'WARNING: ignored packet %s' % (repr(packet), )
return []
results = []
while len(queue) != 0:
results.extend([handler(self, packet) for handler in queue])
queue = self._build_queue(None)
return filter(lambda x: x is not None, results)
def iterate(self):
packet = self._sock.recv()
# print('<< %s' % (repr(packet), ))
return self._iterate_with_packet(packet)
def is_waiting(self):
return len(self._waiters) != 0
def cont(self):
while self.is_waiting():
results = self.iterate()
if len(results) != 0:
return results
return []
def finish(self):
while self.is_waiting():
self.iterate()
def send(self, packet):
# print('>> %s' % (repr(packet), ))
self._sock.send(packet)
L2CAP_DEFAULT_MTU = 672
class L2CAP(object):
def __init__(self, loop, handle):
self._loop = loop
self._total_length = None
self._data = ''
self._handle = handle
self._queue = []
self._call_on_data()
self.drop_acl_mode = False
def _unpack_packet_handle_and_flags(self, packet):
assert HCI_ACL_Hdr in packet
# HCI_ACL_Hdr definition in scapy is wrong; don't have time to fix it
packet_handle = (packet[HCI_ACL_Hdr].flags & 0x0f) << 8
packet_handle |= packet[HCI_ACL_Hdr].handle
packet_flags = packet[HCI_ACL_Hdr].flags >> 4
return packet_handle, packet_flags
def _is_relevant(self, packet):
if packet is None:
return False
if HCI_ACL_Hdr not in packet:
return False
if self.drop_acl_mode:
return False
packet_handle, packet_flags = self._unpack_packet_handle_and_flags(packet)
return self._handle == packet_handle
def _flush(self):
data = self._data
assert len(data) == self._total_length
self._data = ''
self._total_length = None
self._queue.append(L2CAP_Hdr(data))
def _handle_acl(self, loop, packet):
assert not self.drop_acl_mode
self._call_on_data()
packet_handle, packet_flags = self._unpack_packet_handle_and_flags(packet)
if self._total_length is None:
self._total_length = packet[HCI_ACL_Hdr].len
else:
assert packet_flags & 0x02 == 0x02, "Expected continuation packet"
self._data += str(packet[HCI_ACL_Hdr].payload)
if len(self._data) < self._total_length:
return None
self._flush()
return True
def _call_on_data(self):
self._loop.on(self._is_relevant, self._handle_acl)
def recv(self):
while len(self._queue) == 0:
assert self._loop.cont() == [True]
return self._queue.pop(0)
def _verify_sent(self, _, packet):
index = packet[HCI_Event_Number_Of_Completed_Packets].connection_handle.index(self._handle)
return ('send_ack', packet[HCI_Event_Number_Of_Completed_Packets].hc_num_of_completed_packets[index])
def send(self, l2cap):
# Here we perform ACL fragmentation.
# For simplicity we chose to split the fragments based on the L2CAP_DEFAULT_MTU,
# However, the correct way to do it is by using the specific controller's mtu limitations
# and\or the currently negotiated MTU of the connection.
for i in range(0, len(str(l2cap)), L2CAP_DEFAULT_MTU):
self.send_fragment(Raw(str(l2cap)[i:i+L2CAP_DEFAULT_MTU]), i == 0)
def send_fragment(self, frag, is_first):
flags = 0
if not is_first:
flags |= 1
# HCI_ACL_Hdr is a piece of shit, also see rant above
scapy_handle = self._handle & 0xff
scapy_flags = self._handle >> 8 | ((flags & 0x0f) << 4)
hci = HCI_Hdr() / HCI_ACL_Hdr(handle=scapy_handle, flags=scapy_flags) / frag
self._loop.on(lambda pkt: (pkt is not None and
HCI_Event_Number_Of_Completed_Packets in pkt and
self._handle in pkt.connection_handle),
self._verify_sent)
self._loop.send(hci)
while True:
result = self._loop.cont()
if result == [True]:
continue
break
assert result == [('send_ack', True)]
def is_complete_evt_for_cmd(cmd, event):
if event is None:
return False
if HCI_Event_Command_Complete not in event:
return False
if event.code != 0x0e:
return False
if event.opcode != cmd.opcode:
return False
return True
def is_pending_evt_for_cmd(cmd, event):
if event is None:
return False
if HCI_Event_Command_Status not in event:
return False
if event.code != 0x0f:
return False
if event.opcode != cmd.opcode:
return False
return event.status == 0
def reset(loop):
cmd = HCI_Hdr() / HCI_Command_Hdr() / HCI_Cmd_Reset()
loop.on(lambda evt: is_complete_evt_for_cmd(cmd, evt),
lambda loop, evt: evt.status == 0)
loop.send(cmd)
assert loop.cont() == [True]
def acl_connect(loop, addr):
cmd = HCI_Hdr() / HCI_Command_Hdr() / HCI_Cmd_Create_Connection(bd_addr=addr)
loop.ignore(lambda evt: is_pending_evt_for_cmd(cmd, evt))
loop.on_pkt(HCI_Event_Connection_Complete,
lambda loop, evt: evt[HCI_Event_Connection_Complete])
loop.send(cmd)
result = loop.cont()
assert len(result) == 1
result = result[0]
return result.status == 0, result.connection_handle
def ignore_evt(loop, code):
loop.ignore(lambda evt: evt is not None and HCI_Event_Hdr in evt and evt.code == code)
def read_remote_supported_features(loop, handle):
cmd = (HCI_Hdr() / HCI_Command_Hdr() /
HCI_Cmd_Read_Remote_Supported_Features(connection_handle=handle))
loop.ignore(lambda evt: is_pending_evt_for_cmd(cmd, evt))
loop.ignore_pkt(HCI_Event_Read_Remote_Supported_Features_Complete)
loop.send(cmd)
def is_info_req(info_req):
return info_req is not None and L2CAP_InfoReq in info_req
def is_info_req_features(info_req):
return is_info_req(info_req) and info_req.type == 2
def is_info_rsp(pkt):
return pkt is not None and L2CAP_InfoResp in pkt
def is_info_rsp_features(pkt):
return is_info_rsp(pkt) and pkt.id == 1 and pkt.type == 2
def is_info_rsp_fixed_channels(pkt):
return is_info_rsp(pkt) and pkt.id == 1 and pkt.type == 3
def is_info_req_fixed_channels(info_req):
return is_info_req(info_req) and info_req.type == 3
def reply_to_info_req_features(loop, info_req):
features = binascii.unhexlify('b8020000')
resp = (L2CAP_Hdr(cid=1) / L2CAP_CmdHdr(id=info_req.id) /
L2CAP_InfoResp(type=2, result=0, data=features))
loop.send(resp)
return True
def reply_to_info_req_fixed_channels(loop, info_req):
features = binascii.unhexlify('0600000000000000')
resp = (L2CAP_Hdr(cid=1) / L2CAP_CmdHdr(id=info_req.id) /
L2CAP_InfoResp(type=3, result=0, data=features))
loop.send(resp)
return True
def send_info_req_features(loop):
info_req = L2CAP_Hdr(cid=1) / L2CAP_CmdHdr(id=1) / L2CAP_InfoReq(type=2)
loop.on(lambda pkt: pkt is not None and
L2CAP_InfoResp in pkt and
pkt.id == 1 and
pkt.type == 2,
lambda loop, pkt: True)
loop.send(info_req)
assert loop.cont() == [True]
def send_info_req_fixed_channels(loop):
info_req = L2CAP_Hdr(cid=1) / L2CAP_CmdHdr(id=1) / L2CAP_InfoReq(type=3)
loop.on(lambda pkt: pkt is not None and
L2CAP_InfoResp in pkt and
pkt.id == 1 and
pkt.type == 3,
lambda loop, pkt: True)
loop.send(info_req)
assert loop.cont() == [True]
def l2cap_connect(loop, psm='SDP', scid=OUR_LOCAL_SCID):
connect_req = (L2CAP_Hdr(cid=1) / L2CAP_CmdHdr(id=1) /
L2CAP_ConnReq(psm=psm, scid=scid))
loop.on_pkt(L2CAP_ConnResp,
lambda loop, pkt: pkt)
loop.send(connect_req)
result = loop.cont()
assert len(result) == 1
connect_resp = result[0]
assert L2CAP_ConnResp in connect_resp
assert connect_resp.id == 1
assert connect_resp.scid == scid
assert connect_resp.status == 0
return connect_resp.dcid
def reply_to_conf_req_unaccept(loop, scid, dcid):
loop.on(lambda conf_req: conf_req is not None and
L2CAP_ConfReq in conf_req and
conf_req.dcid == scid,
lambda loop, conf_req: loop.send(L2CAP_Hdr(cid=1) /
L2CAP_CmdHdr(id=conf_req.id) /
L2CAP_ConfResp(scid=dcid, flags=0, result='unaccept') /
Raw(binascii.unhexlify('01020002'))) or True)
assert loop.cont() == [True]
def reply_to_conf_req_accept(loop, scid, dcid):
# We agree to any configuration requested by the other peer.
loop.on(lambda conf_req: conf_req is not None and
L2CAP_ConfReq in conf_req and
conf_req.dcid == scid,
lambda loop, conf_req: loop.send(L2CAP_Hdr(cid=1) /
L2CAP_CmdHdr(id=conf_req.id) /
L2CAP_ConfResp(scid=dcid, flags=0, result='success')))
# Do the lockstep confiugration process (with EFS) - only with targets which supports this.
def lockstep_efs_conf_process(loop, scid, dcid):
# Note that stype == L2CAP_SERV_NOTRAFIC (0) which is important
efs = binascii.unhexlify('0610') + (binascii.unhexlify('00') * 0x10)
conf_req = (L2CAP_Hdr(cid=1) / L2CAP_CmdHdr(id=1) /
L2CAP_ConfReq(dcid=dcid, flags=0) /
Raw(binascii.unhexlify('0409000000000000000000') + efs))
loop.on(lambda conf_resp: conf_resp is not None and
conf_resp.id == 1 and
L2CAP_ConfResp in conf_resp and
conf_resp.scid == scid and
conf_resp.result == 4, # pending
lambda loop, conf_resp: conf_resp)
loop.send(conf_req)
conf_resp = loop.cont()[0]
resp = (L2CAP_Hdr(cid=1) / L2CAP_CmdHdr(id=conf_req.id) /
L2CAP_ConfResp(scid=dcid, flags=0, result=4) /
Raw(binascii.unhexlify('01020004')))
loop.on(lambda conf_resp: conf_resp is not None and
conf_resp.id == 1 and
L2CAP_ConfResp in conf_resp and
conf_resp.scid == scid and
conf_resp.result == 0,
lambda loop, conf_resp: conf_resp)
loop.send(resp)
conf_resp = loop.cont()[0]
resp = (L2CAP_Hdr(cid=1) / L2CAP_CmdHdr(id=conf_req.id) /
L2CAP_ConfResp(scid=dcid, flags=0, result=0))
loop.send(resp)
# Do the standard configuration process
def standard_conf_process(loop, scid, dcid):
conf_req = (L2CAP_Hdr(cid=1) / L2CAP_CmdHdr(id=1) /
L2CAP_ConfReq(dcid=dcid, flags=0) /
Raw(binascii.unhexlify('0102a002')))
loop.on(lambda conf_resp: conf_resp is not None and
conf_resp.id == 1 and
L2CAP_ConfResp in conf_resp and
conf_resp.scid == scid and
conf_resp.result == 0, # success
lambda loop, conf_resp: conf_resp)
loop.send(conf_req)
loop.cont()
def handle_information_negotiation_process(l2cap_loop):
# There is an inherent race that might exist in the information negotiation process.
# If both sides of the connection are waiting for the other side to send the first info req
# the connection will be deadlocked. So we start by sending are own info request.
info_req = L2CAP_Hdr(cid=1) / L2CAP_CmdHdr(id=1) / L2CAP_InfoReq(type=2)
l2cap_loop.send(info_req)
l2cap_loop.on(is_info_req_features, reply_to_info_req_features)
l2cap_loop.on(is_info_rsp_features, lambda loop, pkt: True)
# We wait for two events to be handled:
# 1. An info request was received, and we have replied with 'info_rsp'
# (reply_to_info_req_features returned True)
# 2. An info rsp message was returned (in response to the info_req we initially sent).
# The order of the two events is not important, so we just wait for two 'True' returns.
assert l2cap_loop.cont() == [True]
assert l2cap_loop.cont() == [True]
# The same practice as above, only for the "fixed channels" info request\response.
info_req = L2CAP_Hdr(cid=1) / L2CAP_CmdHdr(id=1) / L2CAP_InfoReq(type=3)
l2cap_loop.send(info_req)
l2cap_loop.on(is_info_req_fixed_channels, reply_to_info_req_fixed_channels)
l2cap_loop.on(is_info_rsp_fixed_channels, lambda loop, pkt: True)
assert l2cap_loop.cont() == [True]
assert l2cap_loop.cont() == [True]
def create_l2cap_connection(interface, target, psm='SDP', with_mutual_config=True, pcap_path=None):
os.system("hciconfig %s down" % interface)
if pcap_path:
user_socket = BluetoothUserSocket_WithTrace(pcap_path, hci_devid(interface))
else:
user_socket = BluetoothUserSocket(hci_devid(interface))
loop = Loop(user_socket)
reset(loop)
is_connected, handle = acl_connect(loop, target)
if not is_connected:
print("Unable to connect target via Bluetooth")
sys.exit(1)
print('Handle = %04x' % (handle, ))
# Configure connection and initiate config handshake
ignore_evt(loop, 0x20) # Page scan repetition mode
ignore_evt(loop, 0x1b) # Max slots change
read_remote_supported_features(loop, handle)
l2cap_loop = Loop(L2CAP(loop, handle))
########################################
# This Is the 'naieve' way to handle the information request\response:
# Wait for the peer to send it's requests, and respond to them,
# And then send are own info requets.
########################################
# l2cap_loop.on(is_info_req_features, reply_to_info_req_features)
# l2cap_loop.on(is_info_req_fixed_channels, reply_to_info_req_fixed_channels)
# send_info_req_features(l2cap_loop)
# send_info_req_fixed_channels(l2cap_loop)
# The above code tends to deadlock on certain conditions (some race condition).
# So this following functions works better:
handle_information_negotiation_process(l2cap_loop)
# An ACL Connection is established, create a l2cap connection over it.
dcid = l2cap_connect(l2cap_loop, psm=psm)
print('DCID = %x' % (dcid, ))
if with_mutual_config:
l2cap_mutual_configration(l2cap_loop, dcid)
return l2cap_loop, dcid
def l2cap_mutual_configration(l2cap_loop, dcid):
# Register handler to accept any configuration request coming from the other peer.
reply_to_conf_req_accept(l2cap_loop, OUR_LOCAL_SCID, dcid)
# Negotiate our own configuration parametres, using the lockstep procedure (using the pending state)
standard_conf_process(l2cap_loop, OUR_LOCAL_SCID, dcid)
# Reaching this phase, the connection is in CONNECTED state.
def main(src_hci, dst_bdaddr, pcap_path=None):
l2cap_loop, _ = create_l2cap_connection(src_hci, dst_bdaddr, pcap_path=pcap_path)
# Seding 'test' to the established l2cap connection
print("Sending 'test' in l2cap connection")
l2cap_loop.send(L2CAP_Hdr(cid=OUR_LOCAL_SCID) / Raw('test'))
l2cap_loop.on(lambda pkt: True,
lambda loop, pkt: pkt)
# And printing the returned data.
print(repr(l2cap_loop.cont()))
l2cap_loop.finish()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: l2cap_infra.py <src-hci> <dst-bdaddr> (<pcap_path>)")
else:
main(*sys.argv[1:])
|
mindsdb/api/mysql/mysql_proxy/datahub/__init__.py | yarenty/mindsdb | 261 | 12695500 | <reponame>yarenty/mindsdb
from mindsdb.api.mysql.mysql_proxy.datahub.datahub import init_datahub |
neural_sp/models/modules/mocha/mocha_test.py | ishine/neural_sp | 577 | 12695506 | <gh_stars>100-1000
# Copyright 2021 Kyoto University (<NAME>)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Chunkwise attention in MoChA at test time."""
import logging
import numpy as np
import torch
logger = logging.getLogger(__name__)
def hard_chunkwise_attention(alpha, u, mask, chunk_size, H_ca,
sharpening_factor, share_chunkwise_attention):
"""Chunkwise attention in MoChA at test time.
Args:
alpha (FloatTensor): `[B, H_ma, qlen, klen]`
u (FloatTensor): `[B, (H_ma*)H_ca, qlen, klen]`
mask (ByteTensor): `[B, qlen, klen]`
chunk_size (int): window size for chunkwise attention
H_ca (int): number of chunkwise attention heads
sharpening_factor (float): sharping factor for beta calculation
share_chunkwise_attention (int): share CA heads among MA heads
Returns:
beta (FloatTensor): `[B, H_ma * H_ca, qlen, klen]`
"""
bs, H_ma, qlen, klen = alpha.size()
assert (u.size(2) == qlen) and (u.size(3) == klen), (u.size(), alpha.size())
alpha = alpha.unsqueeze(2) # `[B, H_ma, 1, qlen, klen]`
u = u.unsqueeze(1) # `[B, 1, (H_ma*)H_ca, qlen, klen]`
if H_ca > 1:
alpha = alpha.repeat([1, 1, H_ca, 1, 1])
if H_ma > 1:
if share_chunkwise_attention:
u = u.repeat([1, H_ma, 1, 1, 1])
else:
u = u.view(bs, H_ma, H_ca, qlen, klen)
mask = alpha.clone().byte() # `[B, H_ma, H_ca, qlen, klen]`
for b in range(bs):
for h in range(H_ma):
if alpha[b, h, 0, 0].sum() > 0:
boundary = alpha[b, h, 0, 0].nonzero()[:, -1].min().item()
if chunk_size == -1:
# infinite lookback attention
mask[b, h, :, 0, 0:boundary + 1] = 1
else:
mask[b, h, :, 0, max(0, boundary - chunk_size + 1):boundary + 1] = 1
NEG_INF = float(np.finfo(torch.tensor(0, dtype=u.dtype).numpy().dtype).min)
u = u.masked_fill(mask == 0, NEG_INF)
beta = torch.softmax(u, dim=-1)
return beta.view(bs, -1, qlen, klen)
|
tools/device_file_generator/dfg/avr/avr_writer.py | roboterclubaachen/xpcc | 161 | 12695516 | <reponame>roboterclubaachen/xpcc
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Roboterclub Aachen e.V.
# All rights reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
import itertools
from logger import Logger
from ..writer import XMLDeviceWriter
from . import avr_io
class AVRDeviceWriter(XMLDeviceWriter):
""" AVRDeviceWriter
Translates the Device to a XPCC specific format.
"""
def __init__(self, device, logger=None):
XMLDeviceWriter.__init__(self, device, logger)
self.root.removeAttribute('size_id')
self.log.info(("Generating Device File for '%s'." % self.device.ids.string))
self.types = self.device.ids.getAttribute('type')
self.pin_ids = self.device.ids.getAttribute('pin_id')
self.names = self.device.ids.getAttribute('name')
self.family = self.device.ids.intersection.family
# search the io dictionary for this device
# we only need one pin name to identify the device group
pin_name = self.device.getProperty('pin-name').values[0].value
self.io = [a for a in avr_io.pins if pin_name in a['devices']]
if len(self.io) > 0:
self.io = self.io[0]
else:
self.io = {}
if self.device.id.family != 'xmega':
self.log.warn("AvrWriter: IO not found for device '%s' with pin-name: '%s'" % (self.device.id.string, pin_name))
self.addDeviceAttributesToNode(self.root, 'flash')
self.addDeviceAttributesToNode(self.root, 'ram')
self.addDeviceAttributesToNode(self.root, 'eeprom')
self.addDeviceAttributesToNode(self.root, 'core')
self.addDeviceAttributesToNode(self.root, 'mcu')
pin_count_child = self.root.addChild('pin-count')
if self.family == 'xmega':
# the int in the type is the package device_id
# ie. A1, B1 = 100 pins, A3, C3 = 64 pins, etc...
pins = [0, 100, 0, 64, 44, 32]
pin_count_child.setValue(pins[int(self.types[0][1:])])
else:
# the AT90, ATtiny and ATmega have very weird pin counts, with so many different packages
pin_count_child.setValue(0)
for header in ['avr/io.h', 'avr/interrupt.h']:
header_child = self.root.addChild('header')
header_child.setValue(header)
# self.addDeviceAttributesToNode(self.root, 'define')
core_child = self.root.addChild('driver')
core_child.setAttributes({'type': 'core', 'name': 'avr'})
ram_sizes = self.device.getProperty('ram')
for ram_size in ram_sizes.values:
size = ram_size.value
# for large RAM sizes, reserve 1kB for stack
# for small RAM sizes, reserve half of entire size for stack
if size > 2048:
size -= 1024
else:
size /= 2
for device_id in ram_size.ids.differenceFromIds(self.device.ids):
attr = self._getAttributeDictionaryFromId(device_id)
attr['name'] = 'ram_length'
ram_size_child = core_child.addChild('parameter')
ram_size_child.setAttributes(attr)
ram_size_child.setValue(size)
attr2 = self._getAttributeDictionaryFromId(device_id)
attr2['name'] = 'ram_block_length'
block_size = 4
while (size / block_size > 127):
block_size *= 2
ram_block_child = core_child.addChild('parameter')
ram_block_child.setAttributes(attr2)
ram_block_child.setValue(block_size)
# ADC
self.addAdcToNode(self.root)
# Clock
clock_child = self.root.addChild('driver')
clock_child.setAttributes({'type': 'clock', 'name': 'avr'})
# DAC
self.addDacToNode(self.root)
# I2C aka TWI
self.addI2cToNode(self.root)
# SPI
self.addSpiToNode(self.root)
# Timer
self.addTimerToNode(self.root)
# UART
self.addUartToNode(self.root)
# USI can be used to emulate UART, SPI and I2C, so there should not be a seperate driver for it.
# self.addUsiToNode(self.root)
# GPIO
self.addGpioToNode(self.root)
def addDeviceAttributesToNode(self, node, name):
properties = self.device.getProperty(name)
if properties == None:
return
for prop in properties.values:
for device_id in prop.ids.differenceFromIds(self.device.ids):
attr = self._getAttributeDictionaryFromId(device_id)
child = node.addChild(name)
child.setAttributes(attr)
child.setValue(prop.value)
def addModuleAttributesToNode(self, node, peripheral, name, family=None):
if family == None:
family = self.family
modules = self.device.getProperty('modules')
for prop in modules.values:
if any(m for m in prop.value if m.startswith(peripheral)):
for device_id in prop.ids.differenceFromIds(self.device.ids):
attr = self._getAttributeDictionaryFromId(device_id)
driver = node.addChild('driver')
driver.setAttributes(attr)
driver.setAttributes({'type': name, 'name': family})
def addModuleInstancesAttributesToNode(self, node, peripheral, name, family=None):
if family == None:
family = self.family
modules = self.device.getProperty('modules')
for prop in modules.values:
instances = []
for module in [m for m in prop.value if m.startswith(peripheral)]:
instances.append(module[len(peripheral):])
if len(instances) == 0:
continue
instances.sort()
for device_id in prop.ids.differenceFromIds(self.device.ids):
attr = self._getAttributeDictionaryFromId(device_id)
driver = node.addChild('driver')
driver.setAttributes(attr)
driver.setAttributes({'type': name, 'name': family})
if len(instances) > 0:
driver.setAttribute('instances', ",".join(instances))
if name in self.io:
for io in self.io[name]:
ch = driver.addChild('gpio')
ch.setAttributes(io)
def addI2cToNode(self, node):
family = 'at90_tiny_mega' if (self.family in ['at90', 'attiny', 'atmega']) else self.family
if self.family == 'xmega':
self.addModuleInstancesAttributesToNode(node, 'TWI', 'i2c', family)
else:
self.addModuleAttributesToNode(node, 'TWI', 'i2c', family)
def addSpiToNode(self, node):
family = 'at90_tiny_mega' if (self.family in ['at90', 'attiny', 'atmega']) else self.family
if self.family == 'xmega':
self.addModuleInstancesAttributesToNode(node, 'SPI', 'spi', family)
else:
self.addModuleAttributesToNode(node, 'SPI', 'spi', family)
def addAdcToNode(self, node):
if self.family == 'at90' and self.types[0] in ['usb', 'can', 'pwm']:
family = 'at90'
else:
family = 'at90_tiny_mega' if (self.family in ['at90', 'attiny', 'atmega']) else self.family
if self.family == 'xmega':
self.addModuleInstancesAttributesToNode(node, 'ADC', 'adc', family)
else:
self.addModuleAttributesToNode(node, 'AD_CONVERTER', 'adc', family)
def addDacToNode(self, node):
if self.family == 'xmega':
self.addModuleInstancesAttributesToNode(node, 'DAC', 'dac')
else:
self.addModuleAttributesToNode(node, 'DA_CONVERTER', 'dac')
def addUsiToNode(self, node):
if self.family != 'xmega':
family = 'at90_tiny_mega' if (self.family in ['at90', 'attiny', 'atmega']) else self.family
self.addModuleAttributesToNode(node, 'USI', 'usi', family)
def addTimerToNode(self, node):
if self.family == 'xmega':
self.addModuleInstancesAttributesToNode(node, 'TC', 'timer')
else:
self.addModuleInstancesAttributesToNode(node, 'TIMER_COUNTER_', 'timer')
def addUartToNode(self, node):
family = 'at90_tiny_mega' if (self.family in ['at90', 'attiny', 'atmega']) else self.family
# this is special, some AT90_Tiny_Megas can put their USART into SPI mode
# we have to parse this specially.
uartSpi = 'uartspi' in self.io or self.family == 'xmega'
modules = self.device.getProperty('modules')
for prop in modules.values:
instances = []
for module in [m for m in prop.value if m.startswith('USART')]:
if self.family == 'xmega':
instances.append(module[5:7])
else:
# some device only have a 'USART', but we want 'USART0'
mod = module + '0'
instances.append(mod[5:6])
if instances != []:
instances = list(set(instances))
instances.sort()
for device_id in prop.ids.differenceFromIds(self.device.ids):
attr = self._getAttributeDictionaryFromId(device_id)
driver = node.addChild('driver')
driver.setAttributes(attr)
driver.setAttributes({'type': 'uart', 'name': family})
if uartSpi:
spiDriver = node.addChild('driver')
spiDriver.setAttributes(attr)
spiDriver.setAttributes({'type': 'spi', 'name': family + "_uart"})
driver.setAttribute('instances', ",".join(instances))
if uartSpi:
spiDriver.setAttribute('instances', ",".join(instances))
ram_sizes = self.device.getProperty('ram')
for ram_size in ram_sizes.values:
size = ram_size.value
# for small RAM sizes, reserve only 16 bytes for the tx buffer
if size < 1024 or size > 1024 * 4:
for ram_id in ram_size.ids.differenceFromIds(self.device.ids):
attr = self._getAttributeDictionaryFromId(ram_id)
attr['name'] = 'tx_buffer'
ram_size_child = driver.addChild('parameter')
ram_size_child.setAttributes(attr)
ram_size_child.setValue(16 if size < 1024 else 250)
def addGpioToNode(self, node):
family = 'at90_tiny_mega' if (self.family in ['at90', 'attiny', 'atmega']) else self.family
props = self.device.getProperty('gpios')
driver = node.addChild('driver')
driver.setAttributes({'type': 'gpio', 'name': family})
for prop in props.values:
gpios = prop.value
gpios.sort(key=lambda k: (k['port'], k['id']))
for device_id in prop.ids.differenceFromIds(self.device.ids):
device_dict = self._getAttributeDictionaryFromId(device_id)
for gpio in gpios:
gpio_child = driver.addChild('gpio')
gpio_child.setAttributes(device_dict)
for name in ['port', 'id', 'pcint', 'extint']:
if name in gpio:
gpio_child.setAttribute(name, gpio[name])
for af in gpio['af']:
af_child = gpio_child.addChild('af')
af_child.setAttributes(af)
def _getAttributeDictionaryFromId(self, device_id):
target = device_id.properties
device_dict = {}
for attr in target:
if target[attr] != None:
if attr == 'type':
device_dict['device-type'] = target[attr]
if attr == 'name':
device_dict['device-name'] = target[attr]
if attr == 'pin_id':
device_dict['device-pin-id'] = target[attr]
return device_dict
def _addNamingSchema(self):
if self.family == 'xmega':
naming_schema = 'at{{ family }}{{ name }}{{ type }}{{ pin_id }}'
identifiers = list(itertools.product(("at",),
(self.family,),
self.names,
self.types,
self.pin_ids))
devices = ['at' + d.string.replace('none', '') for d in self.device.ids]
elif self.family == 'at90':
naming_schema = '{{ family }}{{ type }}{{ name }}'
identifiers = list(itertools.product((self.family,),
self.types,
self.names))
devices = [d.string.replace('none', '') for d in self.device.ids]
else:
naming_schema = '{{ family }}{{ name }}{{ type }}'
identifiers = list(itertools.product((self.family,),
self.names,
self.types))
devices = [d.string.replace('none', '') for d in self.device.ids]
for identifier_parts in identifiers:
identifier = ''.join(identifier_parts).replace('none', '')
if identifier not in devices:
child = self.root.prependChild('invalid-device')
child.setValue(identifier)
else:
devices.remove(identifier)
for device in devices:
self.log.error("Found device not matching naming schema: '{}'".format(device))
child = self.root.prependChild('naming-schema')
child.setValue(naming_schema)
def write(self, folder):
self._addNamingSchema()
names = self.names
names.sort(key=int)
types = self.types
name = self.family + "-".join(["_".join(names), "_".join(types)]) + ".xml"
if self.family == 'xmega':
name = name[:-4] + "-" + "_".join(self.pin_ids) + ".xml"
self.writeToFolder(folder, name)
def __repr__(self):
return self.__str__()
def __str__(self):
return "AVRDeviceWriter(\n" + self.toString() + ")"
|
problems/euler/45/pentagonal.py | vidyadeepa/the-coding-interview | 1,571 | 12695523 | from itertools import takewhile, combinations
def triangle_generator(start):
n = 1
while True:
num = n*(n+1)/2
if num >= start:
yield num
n = n + 1
def pentagonal_generator(start):
n = 1
while True:
num = n*(3*n-1)/2
if num >= start:
yield num
n = n + 1
def hexagonal_generator(start):
n = 1
while True:
num = n*(2*n-1)
if num >= start:
yield num
n = n + 1
start = 40756
tg = triangle_generator(start)
pg = pentagonal_generator(start)
hg = hexagonal_generator(start)
p = pg.next()
t = tg.next()
for h in hg:
while p < h:
p = pg.next()
if p != h:
continue
while t < h:
t = tg.next()
if t == h:
print h
|
quokka/admin/forms.py | songshansitulv/quokka | 1,141 | 12695529 | # coding: utf-8
from flask_admin.babel import Translations
from flask_admin.form import rules # noqa
from flask_admin.form.fields import (DateTimeField, JSONField, Select2Field,
Select2TagsField, TimeField)
from flask_admin.form.widgets import Select2TagsWidget
from flask_admin.model.fields import InlineFieldList, InlineFormField
from flask_wtf import FlaskForm
from quokka.admin.fields import SmartSelect2Field
from quokka.admin.wtforms_html5 import AutoAttrMeta
from wtforms import fields as _fields
from wtforms import widgets as _widgets
from wtforms import validators # noqa
from wtforms.validators import ValidationError
# from wtforms_components import read_only # noqa
# from wtforms_components import ReadOnlyWidgetProxy # noqa
class PassiveField(object):
"""
Passive field that does not populate obj values.
"""
def populate_obj(self, obj, name):
pass
class PassiveHiddenField(PassiveField, _fields.HiddenField):
pass
class PassiveStringField(PassiveField, _fields.StringField):
pass
fields = _fields # noqa
fields.SmartSelect2Field = SmartSelect2Field
fields.DateTimeField = DateTimeField
fields.TimeField = TimeField
fields.Select2Field = Select2Field
fields.Select2TagsField = Select2TagsField
fields.JSONField = JSONField
fields.InlineFieldList = InlineFieldList
fields.InlineFormField = InlineFormField
fields.PassiveHiddenField = PassiveHiddenField
fields.PassiveStringField = PassiveStringField
widgets = _widgets
widgets.Select2TagsWidget = Select2TagsWidget
READ_ONLY = {'readonly': True}
class Form(FlaskForm):
"""Base class to customize wtforms"""
_translations = Translations()
Meta = AutoAttrMeta
def _get_translations(self):
return self._translations
class CallableValidator(object):
"""
Takes a callable and validates using it
"""
def __init__(self, function, message=None):
self.function = function
self.message = message
def __call__(self, form, field):
validation = self.function(form, field)
if validation is not None:
raise ValidationError(self.message or validation)
validators.CallableValidator = CallableValidator
rules.csrf_token = rules.Field(
'csrf_token',
render_field='quokka_macros.render_hidden_field'
)
|
pinion/common.py | dzarda/Pinion | 233 | 12695553 | <filename>pinion/common.py
import os
PKG_BASE = os.path.dirname(__file__)
RESOURCES = os.path.join(PKG_BASE, "resources")
|
pose_test.py | WestCityInstitute/DeepSFM | 235 | 12695614 | import argparse
import os.path as Path
import warnings
import custom_transforms
import time
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
from logger import AverageMeter
from transforms3d.axangles import mat2axangle
from convert import *
from demon_metrics import compute_motion_errors
from models import PoseNet
from pose_sequence_folders import SequenceFolder
parser = argparse.ArgumentParser(description='DeepSFM pose subnet test script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers')
parser.add_argument('--sequence-length', type=int, metavar='N', help='sequence length for training', default=2)
parser.add_argument('-b', '--batch-size', default=1, type=int, # 1
metavar='N', help='mini-batch size')
parser.add_argument('--geo', '--geo-cost', default=True, type=bool,
metavar='GC', help='whether add geometry cost')
parser.add_argument('--pretrained-dps', dest='pretrained_dps',
default='pose_checkpoint.pth.tar',
metavar='PATH',
help='path to pre-trained model')
parser.add_argument('--seed', default=0, type=int, help='seed for random functions, and network initialization')
parser.add_argument('--save', default="I0", type=str, help='save prefix')
parser.add_argument('--ttype', default='test.txt', type=str, help='Text file indicates input data')
parser.add_argument('-f', '--training-output-freq', type=int,
help='frequence for outputting dispnet outputs and warped imgs at training for all scales if 0 will not output',
metavar='N', default=100)
parser.add_argument('--nlabel', type=int, default=10, help='number of label')
parser.add_argument('--std_tr', type=float, default=0.27, help='translation')
parser.add_argument('--std_rot', type=float, default=0.12, help='rotation')
parser.add_argument('--pose_init', default='demon', help='path to init pose')
parser.add_argument('--depth_init', default='demon', help='path to init depth')
n_iter = 0
# NOTE: test set for testing
def main():
global n_iter
args = parser.parse_args()
# Data loading code
normalize = custom_transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
train_transform = custom_transforms.Compose([
# custom_transforms.RandomScaleCrop(),
custom_transforms.ArrayToTensor(),
normalize
])
print("=> fetching scenes in '{}'".format(args.data))
train_set = SequenceFolder(
args.data,
transform=train_transform,
seed=args.seed,
ttype=args.ttype,
add_geo=args.geo,
depth_source=args.depth_init,
sequence_length=args.sequence_length,
gt_source='g',
std=args.std_tr,
pose_init=args.pose_init,
dataset="",
get_path=True
)
print('{} samples found in {} train scenes'.format(len(train_set), len(train_set.scenes)))
val_loader = torch.utils.data.DataLoader(
train_set, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# create model
print("=> creating model")
pose_net = PoseNet(args.nlabel, args.std_tr, args.std_rot, add_geo_cost=args.geo, depth_augment=False).cuda()
if args.pretrained_dps:
# freeze feature extra layers
# for param in pose_net.feature_extraction.parameters():
# param.requires_grad = False
print("=> using pre-trained weights for DPSNet")
model_dict = pose_net.state_dict()
weights = torch.load(args.pretrained_dps)['state_dict']
pretrained_dict = {k: v for k, v in weights.items() if
k in model_dict and weights[k].shape == model_dict[k].shape}
model_dict.update(pretrained_dict)
pose_net.load_state_dict(model_dict)
else:
pose_net.init_weights()
cudnn.benchmark = True
pose_net = torch.nn.DataParallel(pose_net)
global n_iter
data_time = AverageMeter()
pose_net.eval()
end = time.time()
errors = np.zeros((2, 2, int(np.ceil(len(val_loader)))), np.float32)
with torch.no_grad():
for i, (tgt_img, ref_imgs, ref_poses, intrinsics, intrinsics_inv, tgt_depth, ref_depths,
ref_noise_poses, initial_pose, tgt_path, ref_paths) in enumerate(val_loader):
data_time.update(time.time() - end)
tgt_img_var = Variable(tgt_img.cuda())
ref_imgs_var = [Variable(img.cuda()) for img in ref_imgs]
ref_poses_var = [Variable(pose.cuda()) for pose in ref_poses]
ref_noise_poses_var = [Variable(pose.cuda()) for pose in ref_noise_poses]
initial_pose_var = Variable(initial_pose.cuda())
ref_depths_var = [Variable(dep.cuda()) for dep in ref_depths]
intrinsics_var = Variable(intrinsics.cuda())
intrinsics_inv_var = Variable(intrinsics_inv.cuda())
tgt_depth_var = Variable(tgt_depth.cuda())
pose = torch.cat(ref_poses_var, 1)
noise_pose = torch.cat(ref_noise_poses_var, 1)
pose_norm = torch.norm(noise_pose[:, :, :3, 3], dim=-1, keepdim=True) # b * n* 1
p_angle, p_trans, rot_c, trans_c = pose_net(tgt_img_var, ref_imgs_var, initial_pose_var, noise_pose,
intrinsics_var,
intrinsics_inv_var,
tgt_depth_var,
ref_depths_var, trans_norm=pose_norm)
batch_size = p_angle.shape[0]
p_angle_v = torch.sum(F.softmax(p_angle, dim=1).view(batch_size, -1, 1) * rot_c, dim=1)
p_trans_v = torch.sum(F.softmax(p_trans, dim=1).view(batch_size, -1, 1) * trans_c, dim=1)
p_matrix = Variable(torch.zeros((batch_size, 4, 4)).float()).cuda()
p_matrix[:, 3, 3] = 1
p_matrix[:, :3, :] = torch.cat([angle2matrix(p_angle_v), p_trans_v.unsqueeze(-1)], dim=-1) # 2*3*4
p_rel_pose = torch.ones_like(noise_pose)
for bat in range(batch_size):
path = tgt_path[bat]
dirname = Path.dirname(path)
orig_poses = np.genfromtxt(Path.join(dirname, args.pose_init + "_poses.txt"))
for j in range(len(ref_imgs)):
p_rel_pose[:, j] = torch.matmul(noise_pose[:, j], inv(p_matrix))
seq_num = int(Path.basename(ref_paths[bat][j])[:-4])
orig_poses[seq_num] = p_rel_pose[bat, j, :3, :].data.cpu().numpy().reshape(12, )
p_aa = mat2axangle(p_rel_pose[bat, j, :3, :3].data.cpu().numpy())
gt_aa = mat2axangle(pose[bat, j, :3, :3].data.cpu().numpy(), unit_thresh=1e-2)
n_aa = mat2axangle(noise_pose[bat, j, :3, :3].data.cpu().numpy(), unit_thresh=1e-2)
p_t = p_rel_pose[bat, j, :3, 3].data.cpu().numpy()
gt_t = pose[bat, j, :3, 3].data.cpu().numpy()
n_t = noise_pose[bat, j, :3, 3].data.cpu().numpy()
p_aa = p_aa[0] * p_aa[1]
n_aa = n_aa[0] * n_aa[1]
gt_aa = gt_aa[0] * gt_aa[1]
error = compute_motion_errors(np.concatenate([n_aa, n_t]), np.concatenate([gt_aa, gt_t]), True)
error_p = compute_motion_errors(np.concatenate([p_aa, p_t]), np.concatenate([gt_aa, gt_t]), True)
print("%d n r%.6f, t%.6f" % (i, error[0], error[2]))
print("%d p r%.6f, t%.6f" % (i, error_p[0], error_p[2]))
errors[0, 0, i] += error[0]
errors[0, 1, i] += error[2]
errors[1, 0, i] += error_p[0]
errors[1, 1, i] += error_p[2]
errors[:, :, i] /= len(ref_imgs)
if args.save and not Path.exists(Path.join(dirname, args.save + "_poses.txt")):
np.savetxt(Path.join(dirname, args.save + "_poses.txt"), orig_poses)
mean_error = errors.mean(2)
error_names = ['rot', 'trans']
print("%s Results : " % args.pose_init)
print(
"{:>10}, {:>10}".format(
*error_names))
print("{:10.4f}, {:10.4f}".format(*mean_error[0]))
print("new Results : ")
print(
"{:>10}, {:>10}".format(
*error_names))
print("{:10.4f}, {:10.4f}".format(*mean_error[1]))
if __name__ == '__main__':
main()
|
src/pyhf/__init__.py | Saransh-cpp/pyhf | 188 | 12695626 | from pyhf.tensor import BackendRetriever as tensor
from pyhf.optimize import OptimizerRetriever as optimize
from pyhf._version import version as __version__
from pyhf.exceptions import InvalidBackend, InvalidOptimizer, Unsupported
from pyhf import events
tensorlib = None
optimizer = None
def get_backend():
"""
Get the current backend and the associated optimizer
Example:
>>> import pyhf
>>> backend, optimizer = pyhf.get_backend()
>>> backend
<pyhf.tensor.numpy_backend.numpy_backend object at 0x...>
>>> optimizer
<pyhf.optimize.scipy_optimizer object at 0x...>
Returns:
backend, optimizer
"""
global tensorlib
global optimizer
return tensorlib, optimizer
tensorlib = tensor.numpy_backend()
default_backend = tensorlib
optimizer = optimize.scipy_optimizer()
default_optimizer = optimizer
@events.register('change_backend')
def set_backend(backend, custom_optimizer=None, precision=None):
"""
Set the backend and the associated optimizer
Example:
>>> import pyhf
>>> pyhf.set_backend("tensorflow")
>>> pyhf.tensorlib.name
'tensorflow'
>>> pyhf.tensorlib.precision
'64b'
>>> pyhf.set_backend(b"pytorch", precision="32b")
>>> pyhf.tensorlib.name
'pytorch'
>>> pyhf.tensorlib.precision
'32b'
>>> pyhf.set_backend(pyhf.tensor.numpy_backend())
>>> pyhf.tensorlib.name
'numpy'
>>> pyhf.tensorlib.precision
'64b'
Args:
backend (:obj:`str` or `pyhf.tensor` backend): One of the supported pyhf backends: NumPy, TensorFlow, PyTorch, and JAX
custom_optimizer (`pyhf.optimize` optimizer): Optional custom optimizer defined by the user
precision (:obj:`str`): Floating point precision to use in the backend: ``64b`` or ``32b``. Default is backend dependent.
Returns:
None
"""
global tensorlib
global optimizer
_supported_precisions = ["32b", "64b"]
backend_kwargs = {}
if isinstance(precision, (str, bytes)):
if isinstance(precision, bytes):
precision = precision.decode("utf-8")
precision = precision.lower()
if isinstance(backend, (str, bytes)):
if isinstance(backend, bytes):
backend = backend.decode("utf-8")
backend = backend.lower()
if precision is not None:
backend_kwargs["precision"] = precision
try:
backend = getattr(tensor, f"{backend:s}_backend")(**backend_kwargs)
except TypeError:
raise InvalidBackend(
f"The backend provided is not supported: {backend:s}. Select from one of the supported backends: numpy, tensorflow, pytorch"
)
_name_supported = getattr(tensor, f"{backend.name:s}_backend")
if _name_supported:
if not isinstance(backend, _name_supported):
raise AttributeError(
f"'{backend.name:s}' is not a valid name attribute for backend type {type(backend)}\n Custom backends must have names unique from supported backends"
)
if backend.precision not in _supported_precisions:
raise Unsupported(
f"The backend precision provided is not supported: {backend.precision:s}. Select from one of the supported precisions: {', '.join([str(v) for v in _supported_precisions])}"
)
# If "precision" arg passed, it should always win
# If no "precision" arg, defer to tensor backend object API if set there
if precision is not None:
if backend.precision != precision:
backend_kwargs["precision"] = precision
backend = getattr(tensor, f"{backend.name:s}_backend")(**backend_kwargs)
# need to determine if the tensorlib changed or the optimizer changed for events
tensorlib_changed = bool(
(backend.name != tensorlib.name) | (backend.precision != tensorlib.precision)
)
optimizer_changed = False
if custom_optimizer:
if isinstance(custom_optimizer, (str, bytes)):
if isinstance(custom_optimizer, bytes):
custom_optimizer = custom_optimizer.decode("utf-8")
try:
new_optimizer = getattr(
optimize, f"{custom_optimizer.lower()}_optimizer"
)()
except TypeError:
raise InvalidOptimizer(
f"The optimizer provided is not supported: {custom_optimizer}. Select from one of the supported optimizers: scipy, minuit"
)
else:
_name_supported = getattr(optimize, f"{custom_optimizer.name:s}_optimizer")
if _name_supported:
if not isinstance(custom_optimizer, _name_supported):
raise AttributeError(
f"'{custom_optimizer.name}' is not a valid name attribute for optimizer type {type(custom_optimizer)}\n Custom optimizers must have names unique from supported optimizers"
)
new_optimizer = custom_optimizer
else:
new_optimizer = optimize.scipy_optimizer()
optimizer_changed = bool(optimizer != new_optimizer)
# set new backend
tensorlib = backend
optimizer = new_optimizer
# trigger events
if tensorlib_changed:
events.trigger("tensorlib_changed")()
if optimizer_changed:
events.trigger("optimizer_changed")()
# set up any other globals for backend
tensorlib._setup()
from pyhf.pdf import Model
from pyhf.workspace import Workspace
from pyhf import simplemodels
from pyhf import infer
from pyhf import compat
from pyhf.patchset import PatchSet
__all__ = [
"Model",
"PatchSet",
"Workspace",
"__version__",
"compat",
"exceptions",
"get_backend",
"infer",
"interpolators",
"modifiers",
"optimizer",
"parameters",
"patchset",
"pdf",
"probability",
"set_backend",
"simplemodels",
"tensor",
"tensorlib",
"utils",
"workspace",
]
def __dir__():
return __all__
|
src/oci/cloud_guard/models/recommendation_summary.py | Manny27nyc/oci-python-sdk | 249 | 12695631 | <filename>src/oci/cloud_guard/models/recommendation_summary.py<gh_stars>100-1000
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class RecommendationSummary(object):
"""
Recommendation Definition.
"""
#: A constant which can be used with the type property of a RecommendationSummary.
#: This constant has a value of "DETECTOR_PROBLEMS"
TYPE_DETECTOR_PROBLEMS = "DETECTOR_PROBLEMS"
#: A constant which can be used with the type property of a RecommendationSummary.
#: This constant has a value of "RESOLVED_PROBLEMS"
TYPE_RESOLVED_PROBLEMS = "RESOLVED_PROBLEMS"
#: A constant which can be used with the risk_level property of a RecommendationSummary.
#: This constant has a value of "CRITICAL"
RISK_LEVEL_CRITICAL = "CRITICAL"
#: A constant which can be used with the risk_level property of a RecommendationSummary.
#: This constant has a value of "HIGH"
RISK_LEVEL_HIGH = "HIGH"
#: A constant which can be used with the risk_level property of a RecommendationSummary.
#: This constant has a value of "MEDIUM"
RISK_LEVEL_MEDIUM = "MEDIUM"
#: A constant which can be used with the risk_level property of a RecommendationSummary.
#: This constant has a value of "LOW"
RISK_LEVEL_LOW = "LOW"
#: A constant which can be used with the risk_level property of a RecommendationSummary.
#: This constant has a value of "MINOR"
RISK_LEVEL_MINOR = "MINOR"
#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a RecommendationSummary.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the lifecycle_detail property of a RecommendationSummary.
#: This constant has a value of "OPEN"
LIFECYCLE_DETAIL_OPEN = "OPEN"
#: A constant which can be used with the lifecycle_detail property of a RecommendationSummary.
#: This constant has a value of "RESOLVED"
LIFECYCLE_DETAIL_RESOLVED = "RESOLVED"
#: A constant which can be used with the lifecycle_detail property of a RecommendationSummary.
#: This constant has a value of "DISMISSED"
LIFECYCLE_DETAIL_DISMISSED = "DISMISSED"
def __init__(self, **kwargs):
"""
Initializes a new RecommendationSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this RecommendationSummary.
:type id: str
:param type:
The value to assign to the type property of this RecommendationSummary.
Allowed values for this property are: "DETECTOR_PROBLEMS", "RESOLVED_PROBLEMS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type type: str
:param tenant_id:
The value to assign to the tenant_id property of this RecommendationSummary.
:type tenant_id: str
:param compartment_id:
The value to assign to the compartment_id property of this RecommendationSummary.
:type compartment_id: str
:param target_id:
The value to assign to the target_id property of this RecommendationSummary.
:type target_id: str
:param details:
The value to assign to the details property of this RecommendationSummary.
:type details: dict(str, str)
:param risk_level:
The value to assign to the risk_level property of this RecommendationSummary.
Allowed values for this property are: "CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type risk_level: str
:param problem_count:
The value to assign to the problem_count property of this RecommendationSummary.
:type problem_count: int
:param lifecycle_state:
The value to assign to the lifecycle_state property of this RecommendationSummary.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param lifecycle_detail:
The value to assign to the lifecycle_detail property of this RecommendationSummary.
Allowed values for this property are: "OPEN", "RESOLVED", "DISMISSED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_detail: str
:param time_created:
The value to assign to the time_created property of this RecommendationSummary.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this RecommendationSummary.
:type time_updated: datetime
:param name:
The value to assign to the name property of this RecommendationSummary.
:type name: str
:param description:
The value to assign to the description property of this RecommendationSummary.
:type description: str
"""
self.swagger_types = {
'id': 'str',
'type': 'str',
'tenant_id': 'str',
'compartment_id': 'str',
'target_id': 'str',
'details': 'dict(str, str)',
'risk_level': 'str',
'problem_count': 'int',
'lifecycle_state': 'str',
'lifecycle_detail': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'name': 'str',
'description': 'str'
}
self.attribute_map = {
'id': 'id',
'type': 'type',
'tenant_id': 'tenantId',
'compartment_id': 'compartmentId',
'target_id': 'targetId',
'details': 'details',
'risk_level': 'riskLevel',
'problem_count': 'problemCount',
'lifecycle_state': 'lifecycleState',
'lifecycle_detail': 'lifecycleDetail',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'name': 'name',
'description': 'description'
}
self._id = None
self._type = None
self._tenant_id = None
self._compartment_id = None
self._target_id = None
self._details = None
self._risk_level = None
self._problem_count = None
self._lifecycle_state = None
self._lifecycle_detail = None
self._time_created = None
self._time_updated = None
self._name = None
self._description = None
@property
def id(self):
"""
**[Required]** Gets the id of this RecommendationSummary.
Unique identifier for Recommendation
:return: The id of this RecommendationSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this RecommendationSummary.
Unique identifier for Recommendation
:param id: The id of this RecommendationSummary.
:type: str
"""
self._id = id
@property
def type(self):
"""
Gets the type of this RecommendationSummary.
Recommendation type
Allowed values for this property are: "DETECTOR_PROBLEMS", "RESOLVED_PROBLEMS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The type of this RecommendationSummary.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this RecommendationSummary.
Recommendation type
:param type: The type of this RecommendationSummary.
:type: str
"""
allowed_values = ["DETECTOR_PROBLEMS", "RESOLVED_PROBLEMS"]
if not value_allowed_none_or_none_sentinel(type, allowed_values):
type = 'UNKNOWN_ENUM_VALUE'
self._type = type
@property
def tenant_id(self):
"""
Gets the tenant_id of this RecommendationSummary.
Tenant Identifier
:return: The tenant_id of this RecommendationSummary.
:rtype: str
"""
return self._tenant_id
@tenant_id.setter
def tenant_id(self, tenant_id):
"""
Sets the tenant_id of this RecommendationSummary.
Tenant Identifier
:param tenant_id: The tenant_id of this RecommendationSummary.
:type: str
"""
self._tenant_id = tenant_id
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this RecommendationSummary.
Compartment Identifier
:return: The compartment_id of this RecommendationSummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this RecommendationSummary.
Compartment Identifier
:param compartment_id: The compartment_id of this RecommendationSummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def target_id(self):
"""
**[Required]** Gets the target_id of this RecommendationSummary.
targetId associated with the problem
:return: The target_id of this RecommendationSummary.
:rtype: str
"""
return self._target_id
@target_id.setter
def target_id(self, target_id):
"""
Sets the target_id of this RecommendationSummary.
targetId associated with the problem
:param target_id: The target_id of this RecommendationSummary.
:type: str
"""
self._target_id = target_id
@property
def details(self):
"""
**[Required]** Gets the details of this RecommendationSummary.
Recommendation details
:return: The details of this RecommendationSummary.
:rtype: dict(str, str)
"""
return self._details
@details.setter
def details(self, details):
"""
Sets the details of this RecommendationSummary.
Recommendation details
:param details: The details of this RecommendationSummary.
:type: dict(str, str)
"""
self._details = details
@property
def risk_level(self):
"""
Gets the risk_level of this RecommendationSummary.
The Risk Level
Allowed values for this property are: "CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The risk_level of this RecommendationSummary.
:rtype: str
"""
return self._risk_level
@risk_level.setter
def risk_level(self, risk_level):
"""
Sets the risk_level of this RecommendationSummary.
The Risk Level
:param risk_level: The risk_level of this RecommendationSummary.
:type: str
"""
allowed_values = ["CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR"]
if not value_allowed_none_or_none_sentinel(risk_level, allowed_values):
risk_level = 'UNKNOWN_ENUM_VALUE'
self._risk_level = risk_level
@property
def problem_count(self):
"""
**[Required]** Gets the problem_count of this RecommendationSummary.
Count number of the problem
:return: The problem_count of this RecommendationSummary.
:rtype: int
"""
return self._problem_count
@problem_count.setter
def problem_count(self, problem_count):
"""
Sets the problem_count of this RecommendationSummary.
Count number of the problem
:param problem_count: The problem_count of this RecommendationSummary.
:type: int
"""
self._problem_count = problem_count
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this RecommendationSummary.
The current state of the Recommendation.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this RecommendationSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this RecommendationSummary.
The current state of the Recommendation.
:param lifecycle_state: The lifecycle_state of this RecommendationSummary.
:type: str
"""
allowed_values = ["CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def lifecycle_detail(self):
"""
**[Required]** Gets the lifecycle_detail of this RecommendationSummary.
The lifecycleDetail will give more detail on the substate of the lifecycleState.
Allowed values for this property are: "OPEN", "RESOLVED", "DISMISSED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_detail of this RecommendationSummary.
:rtype: str
"""
return self._lifecycle_detail
@lifecycle_detail.setter
def lifecycle_detail(self, lifecycle_detail):
"""
Sets the lifecycle_detail of this RecommendationSummary.
The lifecycleDetail will give more detail on the substate of the lifecycleState.
:param lifecycle_detail: The lifecycle_detail of this RecommendationSummary.
:type: str
"""
allowed_values = ["OPEN", "RESOLVED", "DISMISSED"]
if not value_allowed_none_or_none_sentinel(lifecycle_detail, allowed_values):
lifecycle_detail = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_detail = lifecycle_detail
@property
def time_created(self):
"""
Gets the time_created of this RecommendationSummary.
problem creating time
:return: The time_created of this RecommendationSummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this RecommendationSummary.
problem creating time
:param time_created: The time_created of this RecommendationSummary.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
Gets the time_updated of this RecommendationSummary.
problem updating time
:return: The time_updated of this RecommendationSummary.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this RecommendationSummary.
problem updating time
:param time_updated: The time_updated of this RecommendationSummary.
:type: datetime
"""
self._time_updated = time_updated
@property
def name(self):
"""
**[Required]** Gets the name of this RecommendationSummary.
recommendation string showing on UX
:return: The name of this RecommendationSummary.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this RecommendationSummary.
recommendation string showing on UX
:param name: The name of this RecommendationSummary.
:type: str
"""
self._name = name
@property
def description(self):
"""
**[Required]** Gets the description of this RecommendationSummary.
description of the recommendation
:return: The description of this RecommendationSummary.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this RecommendationSummary.
description of the recommendation
:param description: The description of this RecommendationSummary.
:type: str
"""
self._description = description
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
radon/tests/test_cli_colors.py | DolajoCZ/radon | 943 | 12695661 | import radon.cli.colors as colors
def test_color_enabled_yes(monkeypatch):
monkeypatch.setenv("COLOR", "yes")
assert colors.color_enabled()
def test_color_enabled_no(monkeypatch):
monkeypatch.setenv("COLOR", "no")
assert not colors.color_enabled()
def test_color_enabled_auto(monkeypatch, mocker):
monkeypatch.setenv("COLOR", "auto")
isatty_mock = mocker.patch('sys.stdout.isatty')
isatty_mock.return_value = True
assert colors.color_enabled()
isatty_mock.return_value = False
assert not colors.color_enabled()
|
examples/spark_dataset_converter/utils.py | rizalgowandy/petastorm | 1,393 | 12695695 | <reponame>rizalgowandy/petastorm<gh_stars>1000+
import os
import tempfile
import requests
def download_mnist_libsvm(mnist_data_dir):
mnist_data_path = os.path.join(mnist_data_dir, "mnist.bz2")
data_url = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/mnist.bz2"
r = requests.get(data_url)
with open(mnist_data_path, "wb") as f:
f.write(r.content)
def get_mnist_dir():
# This folder is baked into the docker image
MNIST_DATA_DIR = "/data/mnist/"
if os.path.isdir(MNIST_DATA_DIR) and os.path.isfile(os.path.join(MNIST_DATA_DIR, 'mnist.bz2')):
return MNIST_DATA_DIR
mnist_dir = tempfile.mkdtemp('_mnist_data')
download_mnist_libsvm(mnist_dir)
return mnist_dir
|
pymdp/envs/visual_foraging.py | spetey/pymdp | 108 | 12695708 | <reponame>spetey/pymdp
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Visual Foraging Environment
__author__: <NAME>, <NAME>, <NAME>
"""
from pymdp.envs import Env
import numpy as np
LOCATION_ID = 0
SCENE_ID = 1
class VisualForagingEnv(Env):
def __init__(self, scenes=None, n_features=2):
if scenes is None:
self.scenes = self._construct_default_scenes()
else:
self.scenes = scenes
self.n_scenes = len(self.scenes)
self.n_features = n_features + 1
self.n_states = [np.prod(self.scenes[0].shape) + 1, self.scenes.shape[0]]
self.n_locations = self.n_states[LOCATION_ID]
self.n_control = [self.n_locations, 1]
self.n_observations = [self.n_locations, self.n_features]
self.n_factors = len(self.n_states)
self.n_modalities = len(self.n_observations)
self._transition_dist = self._construct_transition_dist()
self._likelihood_dist = self._construct_likelihood_dist()
self._true_scene = None
self._state = None
def reset(self, state=None):
if state is None:
loc_state = np.zeros(self.n_locations)
loc_state[0] = 1.0
scene_state = np.zeros(self.n_scenes)
self._true_scene = np.random.randint(self.n_scenes)
scene_state[self._true_scene] = 1.0
full_state = np.empty(self.n_factors, dtype=object)
full_state[LOCATION_ID] = loc_state
full_state[SCENE_ID] = scene_state
self._state = Categorical(values=full_state)
else:
self._state = Categorical(values=state)
return self._get_observation()
def step(self, actions):
prob_states = np.empty(self.n_factors, dtype=object)
for f in range(self.n_factors):
prob_states[f] = (
self._transition_dist[f][:, :, actions[f]]
.dot(self._state[f], return_numpy=True)
.flatten()
)
state = Categorical(values=prob_states).sample()
self._state = self._construct_state(state)
return self._get_observation()
def render(self):
pass
def sample_action(self):
return [np.random.randint(self.n_control[i]) for i in range(self.n_factors)]
def get_likelihood_dist(self):
return self._likelihood_dist.copy()
def get_transition_dist(self):
return self._transition_dist.copy()
def get_uniform_posterior(self):
values = np.array(
[
np.ones(self.n_states[f]) / self.n_states[f]
for f in range(self.n_factors)
]
)
return Categorical(values=values)
def get_rand_likelihood_dist(self):
pass
def get_rand_transition_dist(self):
pass
def _get_observation(self):
prob_obs = self._likelihood_dist.dot(self._state)
return prob_obs.sample()
def _construct_transition_dist(self):
B_locs = np.eye(self.n_locations)
B_locs = B_locs.reshape(self.n_locations, self.n_locations, 1)
B_locs = np.tile(B_locs, (1, 1, self.n_locations))
B_locs = B_locs.transpose(1, 2, 0)
B = np.empty(self.n_factors, dtype=object)
B[LOCATION_ID] = B_locs
B[SCENE_ID] = np.eye(self.n_scenes).reshape(self.n_scenes, self.n_scenes, 1)
return Categorical(values=B)
def _construct_likelihood_dist(self):
A = np.empty(self.n_modalities, dtype=object)
for g in range(self.n_modalities):
A[g] = np.zeros([self.n_observations[g]] + self.n_states)
for loc in range(self.n_states[LOCATION_ID]):
for scene_id in range(self.n_states[SCENE_ID]):
scene = self.scenes[scene_id]
feat_loc_ids = np.ravel_multi_index(np.where(scene), scene.shape)
if loc in feat_loc_ids + 1:
feat_ids = np.unravel_index(
feat_loc_ids[loc == (feat_loc_ids + 1)], scene.shape
)
feats = scene[feat_ids]
A[SCENE_ID][int(feats), loc, scene_id] = 1.0
else:
A[SCENE_ID][0, loc, scene_id] = 1.0
A[LOCATION_ID][loc, loc, scene_id] = 1.0
return Categorical(values=A)
def _construct_default_scenes(self):
scene_one = [[2, 2], [2, 2]]
scene_two = [[1, 1], [1, 1]]
scenes = np.array([scene_one, scene_two])
return scenes
def _construct_state(self, state_tuple):
state = np.empty(self.n_factors, dtype=object)
for f in range(self.n_factors):
state[f] = np.eye(self.n_states[f])[state_tuple[f]]
return Categorical(values=state)
@property
def state(self):
return self._state
@property
def true_scene(self):
return self._true_scene
|
lib/zuora/views.py | goztrk/django-htk | 206 | 12695735 | <filename>lib/zuora/views.py
# Python Standard Library Imports
import json
# Django Imports
from django.http import Http404
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
# HTK Imports
from htk.lib.zuora.utils import get_event_handler
@require_POST
@csrf_exempt
def zuora_webhook_view(request):
payload = json.loads(request.body)
event_type = request.GET.get('event')
if event_type is None:
raise Http404
event_handler = get_event_handler(event_type)
if event_handler:
event_handler(event_type, payload)
response = HttpResponse(status=200)
return response
|
runners/__init__.py | ShenYujun/genforce | 827 | 12695743 | # python3.7
"""Collects all runners."""
from .stylegan_runner import StyleGANRunner
from .encoder_runner import EncoderRunner
__all__ = ['StyleGANRunner', 'EncoderRunner']
|
src/utils/logs.py | MrRobertYuan/docklet | 273 | 12695784 | #!/usr/bin/python3
from utils import env
import json, os
from utils.log import logger
from werkzeug.utils import secure_filename
logsPath = env.getenv('FS_PREFIX') + '/local/log/'
class logsClass:
setting = {}
def list(*args, **kwargs):
if ( ('user_group' in kwargs) == False):
return {"success":'false', "reason":"Cannot get user_group"}
user_group = kwargs['user_group']
if (not ((user_group == 'admin') or (user_group == 'root'))):
return {"success": 'false', "reason": 'Unauthorized Action'}
s = os.listdir(logsPath)
r = []
for i in s:
if ('log' in i):
r.append(i)
return {'success': 'true', 'result': r}
def get(*args, **kwargs):
if ( ('user_group' in kwargs) == False):
return {"success":'false', "reason":"Cannot get user_group"}
user_group = kwargs['user_group']
if (not ((user_group == 'admin') or (user_group == 'root'))):
return {"success": 'false', "reason": 'Unauthorized Action'}
filepath = logsPath + secure_filename(kwargs['filename'])
try:
if not os.path.exists(filepath):
return {"success": 'false', "reason": 'file not exist'}
logfile = open(filepath, 'r')
logtext = logfile.read()
logfile.close()
return {'success': 'true', 'result': logtext}
except:
return {'success': 'false', 'reason': 'file read error'}
logs = logsClass()
|
numpy_native_slower_than_translated.py | pmolfese/AppleSiliconForNeuroimaging | 188 | 12695801 | <filename>numpy_native_slower_than_translated.py<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import measure
#example where translated code (Python 3.8.3, NumPy version 1.19.4) is x10 faster than native code (Python 3.9.1rc1, NumPy version 1.19.4)
rng = np.random.RandomState(20111001)
img_shape = (128, 128, 64, 10)
repeat = 100
arr = rng.normal(size=img_shape)
mtime = measure('np.max(arr)', repeat)
print('%30s %6.2f' % ('max all finite', mtime))
mtime = measure('np.min(arr)', repeat)
print('%30s %6.2f' % ('min all finite', mtime))
arr[:, :, :, 1] = np.nan
mtime = measure('np.max(arr)', repeat)
print('%30s %6.2f' % ('max all nan', mtime))
mtime = measure('np.min(arr)', repeat)
print('%30s %6.2f' % ('min all nan', mtime))
|
sacredboard/app/data/pymongo/genericdao.py | emited/sacredboard | 188 | 12695803 | <filename>sacredboard/app/data/pymongo/genericdao.py<gh_stars>100-1000
"""
Generic DAO object for safe access to the MongoDB.
Issue: https://github.com/chovanecm/sacredboard/issues/61
"""
import pymongo
from pymongo.errors import InvalidName
from sacredboard.app.data import DataSourceError
from .mongocursor import MongoDbCursor
class GenericDAO:
"""
Generic DAO object for safe access to the MongoDB.
Issue: https://github.com/chovanecm/sacredboard/issues/61
"""
def __init__(self, pymongo_client, database_name):
"""
Create a new GenericDAO object that will work on the given database.
:param pymongo_client: PyMongo client that is connected to MongoDB.
:param database_name: Name of the database this GenericDAO works with.
:raise DataSourceError
"""
self._client = pymongo_client
self._database = self._get_database(database_name)
def find_record(self, collection_name, query):
"""
Return the first record mathing the given Mongo query.
:param collection_name: Name of the collection to search in.
:param query: MongoDB Query, e.g. {_id: 123}
:return: A single MongoDB record or None if not found.
:raise DataSourceError
"""
cursor = self._get_collection(collection_name).find(query)
for record in cursor:
# Return the first record found.
return record
# Return None if nothing found.
return None
def find_records(self, collection_name, query={}, sort_by=None,
sort_direction=None, start=0, limit=None):
"""
Return a cursor of records from the given MongoDB collection.
:param collection_name: Name of the MongoDB collection to query.
:param query: Standard MongoDB query. By default no restriction.
:param sort_by: Name of a single field to sort by.
:param sort_direction: The direction to sort, "asc" or "desc".
:param start: Skip first n results.
:param limit: The maximum number of results to return.
:return: Cursor -- An iterable with results.
:raise DataSourceError
"""
cursor = self._get_collection(collection_name).find(query)
if sort_by is not None:
cursor = self._apply_sort(cursor, sort_by, sort_direction)
cursor = cursor.skip(start)
if limit is not None:
cursor = cursor.limit(limit)
return MongoDbCursor(cursor)
def delete_record(self, collection_name, query):
"""Delete record matching the given MongoDB query."""
return self._get_collection(collection_name).remove(query)
def _get_database(self, database_name):
"""
Get PyMongo client pointing to the current database.
:return: MongoDB client of the current database.
:raise DataSourceError
"""
try:
return self._client[database_name]
except InvalidName as ex:
raise DataSourceError("Cannot connect to database %s!"
% self._database) from ex
def _get_collection(self, collection_name):
"""
Get PyMongo client pointing to the current DB and the given collection.
:return: MongoDB client of the current database and given collection.
:raise DataSourceError
"""
try:
return self._database[collection_name]
except InvalidName as ex:
raise DataSourceError("Cannot access MongoDB collection %s!"
% collection_name) from ex
except Exception as ex:
raise DataSourceError("Unexpected error when accessing MongoDB"
"collection %s!"
% collection_name) from ex
def _apply_sort(self, cursor, sort_by, sort_direction):
"""
Apply sort to a cursor.
:param cursor: The cursor to apply sort on.
:param sort_by: The field name to sort by.
:param sort_direction: The direction to sort, "asc" or "desc".
:return:
"""
if sort_direction is not None and sort_direction.lower() == "desc":
sort = pymongo.DESCENDING
else:
sort = pymongo.ASCENDING
return cursor.sort(sort_by, sort)
|
tests/test_summary.py | deeplearningforfun/torch-tools | 353 | 12695810 | <filename>tests/test_summary.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# @Author : DevinYang(<EMAIL>)
import torch
from torchtoolbox.tools import summary
from torchvision.models.resnet import resnet50
from torchvision.models.mobilenet import mobilenet_v2
model1 = resnet50()
model2 = mobilenet_v2()
def test_summary():
summary(model1, torch.rand((1, 3, 224, 224)), True)
print(summary(model2, torch.rand((1, 3, 224, 224))))
|
tests/integration-tests/tests/common/osu_common.py | enrico-usai/cfncluster | 279 | 12695818 | <filename>tests/integration-tests/tests/common/osu_common.py<gh_stars>100-1000
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
import pathlib
import re
from datetime import datetime
from shutil import copyfile
from constants import OSU_BENCHMARK_VERSION
from utils import render_jinja_template
OSU_COMMON_DATADIR = pathlib.Path(__file__).parent / "data/osu/"
SUPPORTED_MPIS = ["openmpi", "intelmpi"]
def compile_osu(mpi_variant, remote_command_executor):
init_script = render_jinja_template(
template_file_path=OSU_COMMON_DATADIR / "init_osu_benchmarks.sh", osu_benchmark_version=OSU_BENCHMARK_VERSION
)
remote_command_executor.run_remote_script(
str(init_script),
args=[mpi_variant],
hide=True,
additional_files=[
str(OSU_COMMON_DATADIR / f"osu-micro-benchmarks-{OSU_BENCHMARK_VERSION}.tgz"),
str(OSU_COMMON_DATADIR / "config.guess"),
str(OSU_COMMON_DATADIR / "config.sub"),
],
)
def run_individual_osu_benchmark(
mpi_version,
benchmark_group,
benchmark_name,
partition,
remote_command_executor,
scheduler_commands,
num_instances,
slots_per_instance,
test_datadir,
submission_script_template_path=None,
rendered_template_path=None,
timeout=None,
):
"""
Run the given OSU benchmark.
:param mpi_version: string, should be one of SUPPORTED_MPIS
:param benchmark_group: string, which of the MPI benchmarks to run. As of 5.7.1 this includes collective, one-sided,
pt2pt, and startup
:param benchmark_name: string, name of the benchmark to run from the given group
:param partition: string, partition on which to benchmark job (assumes the use of Slurm scheduler)
:param remote_command_executor: RemoteCommandExecutor instance, used to submit jobs
:param scheduler_commands: SchedulerlurmCommands instance, used to submit jobs
:param num_instances: int, number of instances to run benchmark across
:param slots_per_instance: int, number of processes to run on each node
:param test_datadir: Path, used to construct default output path when rendering submission script template
:param submission_script_template_path: string, override default path for source submission script template
:param rendered_template_path: string, override destination path when rendering submission script template
:param timeout: int, maximum number of minutes to wait for job to complete
:return: string, stdout of the benchmark job
"""
logging.info(f"Running OSU benchmark {OSU_BENCHMARK_VERSION}: {benchmark_name} for {mpi_version}")
if mpi_version not in SUPPORTED_MPIS:
raise Exception(f"Unsupported MPI: '{mpi_version}'. Must be one of {' '.join(SUPPORTED_MPIS)}")
compile_osu(mpi_version, remote_command_executor)
# Prepare submission script and pass to the scheduler for the job submission
if not submission_script_template_path:
submission_script_template_path = OSU_COMMON_DATADIR / f"osu_{benchmark_group}_submit_{mpi_version}.sh"
if not rendered_template_path:
rendered_template_path = test_datadir / f"osu_{benchmark_group}_submit_{mpi_version}_{benchmark_name}.sh"
copyfile(submission_script_template_path, rendered_template_path)
slots = num_instances * slots_per_instance
submission_script = render_jinja_template(
template_file_path=rendered_template_path,
benchmark_name=benchmark_name,
osu_benchmark_version=OSU_BENCHMARK_VERSION,
num_of_processes=slots,
)
if partition:
result = scheduler_commands.submit_script(
str(submission_script), slots=slots, partition=partition, nodes=num_instances
)
else:
result = scheduler_commands.submit_script(str(submission_script), slots=slots, nodes=num_instances)
job_id = scheduler_commands.assert_job_submitted(result.stdout)
scheduler_commands.wait_job_completed(job_id, timeout=timeout)
scheduler_commands.assert_job_succeeded(job_id)
output = remote_command_executor.run_remote_command(f"cat /shared/{benchmark_name}.out").stdout
return job_id, output
def run_osu_benchmarks(
osu_benchmarks,
mpi_variant,
partition,
remote_command_executor,
scheduler_commands,
num_instances,
slots_per_instance,
region,
instance,
test_datadir,
dimensions,
):
for osu_benchmark_group, osu_benchmark_names in osu_benchmarks.items():
for osu_benchmark_name in osu_benchmark_names:
dimensions_copy = dimensions.copy()
logging.info("Running benchmark %s", osu_benchmark_name)
job_id, output = run_individual_osu_benchmark(
mpi_version=mpi_variant,
benchmark_group=osu_benchmark_group,
benchmark_name=osu_benchmark_name,
partition=partition,
remote_command_executor=remote_command_executor,
scheduler_commands=scheduler_commands,
num_instances=num_instances,
slots_per_instance=slots_per_instance,
test_datadir=test_datadir,
timeout=40,
)
logging.info("Preparing benchmarks %s metrics", osu_benchmark_name)
metric_data = []
submit_time = datetime.strptime(scheduler_commands.get_job_submit_time(job_id), "%Y-%m-%dT%H:%M:%S")
start_time = datetime.strptime(scheduler_commands.get_job_start_time(job_id), "%Y-%m-%dT%H:%M:%S")
wait_seconds = (start_time - submit_time).total_seconds()
if wait_seconds >= 15:
# After submission, if job waited more than 15 seconds before running, the job was probably
# waiting for compute nodes to be launched. Therefore, the wait time is pushed to CloudWatch
# as an indicator of how fast the compute nodes were launched.
metric_data.append(
{
"MetricName": "JobWaitTime",
"Dimensions": [{"Name": name, "Value": str(value)} for name, value in dimensions_copy.items()],
"Value": wait_seconds,
"Unit": "Seconds",
}
)
for packet_size, latency in re.findall(r"(\d+)\s+(\d+)\.", output):
dimensions_copy.update(
{
"OsuBenchmarkGroup": osu_benchmark_group,
"OsuBenchmarkName": osu_benchmark_name,
"PacketSize": packet_size,
}
)
metric_data.append(
{
"MetricName": "Latency",
"Dimensions": [{"Name": name, "Value": str(value)} for name, value in dimensions_copy.items()],
"Value": int(latency),
"Unit": "Microseconds",
}
)
yield metric_data
|
tests/shared/core/training_data/test_visualization.py | Next-Trends/rasa | 3,603 | 12695822 | <filename>tests/shared/core/training_data/test_visualization.py
from pathlib import Path
from typing import Text
import rasa.shared.utils.io
from rasa.shared.core.domain import Domain
from rasa.shared.core.events import ActionExecuted, SlotSet, UserUttered
from rasa.shared.core.training_data import visualization
import rasa.utils.io
from rasa.shared.nlu.constants import TEXT, INTENT
from rasa.shared.nlu.training_data.message import Message
from rasa.shared.nlu.training_data.training_data import TrainingData
def test_style_transfer():
r = visualization._transfer_style({"class": "dashed great"}, {"class": "myclass"})
assert r["class"] == "myclass dashed"
def test_style_transfer_empty():
r = visualization._transfer_style({"class": "dashed great"}, {"something": "else"})
assert r["class"] == "dashed"
def test_common_action_prefix():
this = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
# until this point they are the same
SlotSet("my_slot", "a"),
ActionExecuted("a"),
ActionExecuted("after_a"),
]
other = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
# until this point they are the same
SlotSet("my_slot", "b"),
ActionExecuted("b"),
ActionExecuted("after_b"),
]
num_common = visualization._length_of_common_action_prefix(this, other)
assert num_common == 3
def test_common_action_prefix_equal():
this = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
]
other = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
]
num_common = visualization._length_of_common_action_prefix(this, other)
assert num_common == 3
def test_common_action_prefix_unequal():
this = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
]
other = [
ActionExecuted("greet"),
ActionExecuted("action_listen"),
UserUttered("hey"),
]
num_common = visualization._length_of_common_action_prefix(this, other)
assert num_common == 0
def test_graph_persistence(domain: Domain, tmp_path: Path):
from os.path import isfile
from networkx.drawing import nx_pydot
import rasa.shared.core.training_data.loading as core_loading
story_steps = core_loading.load_data_from_resource(
"data/test_yaml_stories/stories.yml", domain
)
out_file = str(tmp_path / "graph.html")
generated_graph = visualization.visualize_stories(
story_steps,
domain,
output_file=out_file,
max_history=3,
should_merge_nodes=False,
)
generated_graph = nx_pydot.to_pydot(generated_graph)
assert isfile(out_file)
content = rasa.shared.utils.io.read_file(out_file)
assert "isClient = true" in content
assert "graph = `{}`".format(generated_graph.to_string()) in content
def test_merge_nodes(domain: Domain, tmp_path: Path):
from os.path import isfile
import rasa.shared.core.training_data.loading as core_loading
story_steps = core_loading.load_data_from_resource(
"data/test_yaml_stories/stories.yml", domain
)
out_file = str(tmp_path / "graph.html")
visualization.visualize_stories(
story_steps,
domain,
output_file=out_file,
max_history=3,
should_merge_nodes=True,
)
assert isfile(out_file)
def test_story_visualization(domain: Domain, tmp_path: Path):
import rasa.shared.core.training_data.loading as core_loading
story_steps = core_loading.load_data_from_resource(
"data/test_yaml_stories/stories.yml", domain
)
out_file = tmp_path / "graph.html"
generated_graph = visualization.visualize_stories(
story_steps,
domain,
output_file=str(out_file),
max_history=3,
should_merge_nodes=False,
)
assert str(None) not in out_file.read_text()
assert "/affirm" in out_file.read_text()
assert len(generated_graph.nodes()) == 51
assert len(generated_graph.edges()) == 56
def test_story_visualization_with_training_data(
domain: Domain, tmp_path: Path, nlu_data_path: Text
):
import rasa.shared.core.training_data.loading as core_loading
story_steps = core_loading.load_data_from_resource(
"data/test_yaml_stories/stories.yml", domain
)
out_file = tmp_path / "graph.html"
test_text = "test text"
test_intent = "affirm"
generated_graph = visualization.visualize_stories(
story_steps,
domain,
output_file=str(out_file),
max_history=3,
should_merge_nodes=False,
nlu_training_data=TrainingData(
[Message({TEXT: test_text, INTENT: test_intent})]
),
)
assert test_text in out_file.read_text()
assert test_intent not in out_file.read_text()
assert len(generated_graph.nodes()) == 51
assert len(generated_graph.edges()) == 56
def test_story_visualization_with_merging(domain: Domain):
import rasa.shared.core.training_data.loading as core_loading
story_steps = core_loading.load_data_from_resource(
"data/test_yaml_stories/stories.yml", domain
)
generated_graph = visualization.visualize_stories(
story_steps, domain, output_file=None, max_history=3, should_merge_nodes=True
)
assert 15 < len(generated_graph.nodes()) < 33
assert 20 < len(generated_graph.edges()) < 33
|
machina/apps/forum_conversation/forum_polls/__init__.py | BrendaH/django-machina | 572 | 12695860 | default_app_config = 'machina.apps.forum_conversation.forum_polls.apps.ForumPollsAppConfig'
|
src/zero_config.py | scy6500/large-scale-lm-tutorials | 128 | 12695872 | <reponame>scy6500/large-scale-lm-tutorials
"""
src/zero_args.py
"""
from datasets import load_dataset
from torch.optim import Adam
from torch.utils.data import DataLoader
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import deepspeed
import torch.distributed as dist
model = GPT2LMHeadModel.from_pretrained("gpt2")
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
tokenizer.pad_token = tokenizer.eos_token
optimizer = Adam(model.parameters(), lr=3e-5, weight_decay=3e-7)
engine, optimizer, _, scheduler = deepspeed.initialize(
optimizer=optimizer,
model=model,
config={
"train_batch_size": 16,
"gradient_accumulation_steps": 1,
"scheduler": {
"type": "WarmupDecayLR",
"params": {
"total_num_steps": 300,
"warmup_min_lr": 0,
"warmup_max_lr": 3e-5,
"warmup_num_steps": 30,
},
},
"fp16": {
"enabled": True,
"initial_scale_power": 32,
"loss_scale_window": 1000,
"hysteresis": 2,
"min_loss_scale": 1,
},
"zero_optimization": {
"stage": 1,
"allgather_partitions": True,
"allgather_bucket_size": 5e8,
"overlap_comm": False,
"reduce_scatter": True,
"reduce_bucket_size": 5e8,
"contiguous_gradients": True,
},
"zero_allow_untested_optimizer": True,
"wall_clock_breakdown": False,
"steps_per_print": 9999999999,
},
)
datasets = load_dataset("squad").data["train"]["context"]
datasets = [str(sample) for sample in datasets]
data_loader = DataLoader(datasets, batch_size=8, num_workers=8)
for i, data in enumerate(data_loader):
tokens = tokenizer(
data,
return_tensors="pt",
truncation=True,
padding=True,
max_length=1024,
)
loss = engine(
input_ids=tokens.input_ids.cuda(),
attention_mask=tokens.attention_mask.cuda(),
labels=tokens.input_ids.cuda(),
).loss
engine.backward(loss)
engine.step()
if i % 10 == 0 and dist.get_rank() == 0:
print(f"step:{i}, loss:{loss}")
if i >= 300:
break
|
alipay/aop/api/domain/KbAdvertIdentifyResponse.py | snowxmas/alipay-sdk-python-all | 213 | 12695873 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KbAdvertIdentifyResponse(object):
def __init__(self):
self._benefit_ids = None
self._benefit_type = None
self._code = None
self._ext_info = None
self._identify = None
self._identify_type = None
@property
def benefit_ids(self):
return self._benefit_ids
@benefit_ids.setter
def benefit_ids(self, value):
if isinstance(value, list):
self._benefit_ids = list()
for i in value:
self._benefit_ids.append(i)
@property
def benefit_type(self):
return self._benefit_type
@benefit_type.setter
def benefit_type(self, value):
self._benefit_type = value
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def identify(self):
return self._identify
@identify.setter
def identify(self, value):
self._identify = value
@property
def identify_type(self):
return self._identify_type
@identify_type.setter
def identify_type(self, value):
self._identify_type = value
def to_alipay_dict(self):
params = dict()
if self.benefit_ids:
if isinstance(self.benefit_ids, list):
for i in range(0, len(self.benefit_ids)):
element = self.benefit_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.benefit_ids[i] = element.to_alipay_dict()
if hasattr(self.benefit_ids, 'to_alipay_dict'):
params['benefit_ids'] = self.benefit_ids.to_alipay_dict()
else:
params['benefit_ids'] = self.benefit_ids
if self.benefit_type:
if hasattr(self.benefit_type, 'to_alipay_dict'):
params['benefit_type'] = self.benefit_type.to_alipay_dict()
else:
params['benefit_type'] = self.benefit_type
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.identify:
if hasattr(self.identify, 'to_alipay_dict'):
params['identify'] = self.identify.to_alipay_dict()
else:
params['identify'] = self.identify
if self.identify_type:
if hasattr(self.identify_type, 'to_alipay_dict'):
params['identify_type'] = self.identify_type.to_alipay_dict()
else:
params['identify_type'] = self.identify_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KbAdvertIdentifyResponse()
if 'benefit_ids' in d:
o.benefit_ids = d['benefit_ids']
if 'benefit_type' in d:
o.benefit_type = d['benefit_type']
if 'code' in d:
o.code = d['code']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'identify' in d:
o.identify = d['identify']
if 'identify_type' in d:
o.identify_type = d['identify_type']
return o
|
examples/files/make_example_files.py | shawnbrown/dbfread | 179 | 12695880 | #!/usr/bin/env python2
"""
This creates the example file people.dbf.
You need the dbfpy library to run this.
"""
from __future__ import print_function
from dbfpy import dbf
def make_example_file(filename, fields, records, delete_last_record=False):
field_names = [field[0] for field in fields]
print('Creating', filename)
print(' Fields:', ', '.join(field_names))
print(' ', len(records), 'records')
db = dbf.Dbf(filename, new=True)
db.addField(*fields)
for data in records:
record = db.newRecord()
for name, value in zip(field_names, data):
record[name] = value
record.store()
if delete_last_record:
# Delete the last one of those records.
record.delete()
record.store()
try:
db.close()
except AttributeError:
# This ignores the following error:
# self.memo.flush()
# AttributeError: 'NoneType' object has no attribute 'flush'
pass
make_example_file('people.dbf',
[('NAME', 'C', 16),
('BIRTHDATE', 'D')],
[('Alice', (1987, 3, 1)),
('Bob', (1980, 11, 12)),
('Deleted Guy', (1979, 12, 22))],
delete_last_record=True)
make_example_file('../../testcases/memotest.dbf',
[('NAME', 'C', 16),
('BIRTHDATE', 'D'),
('MEMO', 'M')],
[('Alice', (1987, 3, 1), 'Alice memo'),
('Bob', (1980, 11, 12), 'Bob memo'),
('Deleted Guy', (1979, 12, 22), 'Deleted Guy memo')],
delete_last_record=True)
|
archai/algos/didarts/didarts_arch_trainer.py | shatadru99/archai | 344 | 12695882 | <reponame>shatadru99/archai<gh_stars>100-1000
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Mapping, Optional, Union, Tuple
import copy
import torch
from torch.utils.data import DataLoader
from torch import Tensor, nn, autograd
from torch.nn.modules.loss import _Loss
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from overrides import overrides
from archai.common.config import Config
from archai.nas.arch_trainer import ArchTrainer
from archai.common import utils, ml_utils
from archai.nas.model import Model
from archai.common.checkpoint import CheckPoint
from archai.common.common import logger
from archai.common.multi_optim import MultiOptim, OptimSched
class DidartsArchTrainer(ArchTrainer):
"""Train network using different optimizers for alphas and other parameters"""
def __init__(self, conf_train: Config, model: Model,
checkpoint:Optional[CheckPoint]) -> None:
super().__init__(conf_train, model, checkpoint)
self._conf_alpha_optim = conf_train['alpha_optimizer']
self._conf_alpha_sched = conf_train['alpha_lr_schedule']
@overrides
def create_multi_optim(self, train_len:int)->MultiOptim:
# optimizers, schedulers needs to be recreated for each fit call
# as they have state specific to each run
optim = self.create_optimizer(self.conf_optim, self.model.nonarch_params(recurse=True))
# create scheduler for optim before applying amp
sched, sched_on_epoch = self.create_scheduler(self.conf_sched, optim, train_len)
alpha_optim = self.create_optimizer(self._conf_alpha_optim,
self.model.all_owned().param_by_kind(None))
alpha_sched, alpha_sched_on_epoch = self.create_scheduler(self._conf_alpha_sched, alpha_optim, train_len)
multi_optim = MultiOptim()
multi_optim.append(OptimSched(optim, sched, sched_on_epoch))
multi_optim.append(OptimSched(alpha_optim, alpha_sched, alpha_sched_on_epoch))
logger.info({'multi_optim_len': len(multi_optim)})
return multi_optim
|
Python3/1323.py | rakhi2001/ecom7 | 854 | 12695905 | __________________________________________________________________________________________________
class Solution:
def maximum69Number (self, num: int) -> int:
for idx in range(len(str(num))):
if str(num)[idx]=='6':
return int(str(num)[:idx]+'9'+str(num)[idx+1:])
return num
__________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def maximum69Number (self, num: int) -> int:
num_str = list(str(num))
for i, ch in enumerate(num_str):
if ch == '6':
num_str[i] = '9'
break
return int(''.join(num_str))
__________________________________________________________________________________________________
|
tests/pytests/unit/runners/test_network.py | waynegemmell/salt | 9,425 | 12695907 | """
Unit tests for Network runner
"""
import logging
import pytest
import salt.runners.network as network
from tests.support.mock import MagicMock, patch
log = logging.getLogger(__name__)
@pytest.fixture
def mac_addr_list():
test_list_mac_addresses = [
"08:00:27:82:b2:ca",
"52:54:00:ee:eb:e1",
"52:54:00:ee:eb:e1",
]
return test_list_mac_addresses.sort()
@pytest.fixture
def id_minion():
return "test-host"
@pytest.fixture
def cache_grain_data(id_minion):
return {
id_minion: {
"cwd": "/",
"ip_gw": True,
"ip4_gw": "192.168.0.1",
"ip6_gw": False,
"dns": {
"nameservers": ["192.168.0.1"],
"ip4_nameservers": ["192.168.0.1"],
"ip6_nameservers": [],
"sortlist": [],
"domain": "",
"search": ["example.org"],
"options": [],
},
"fqdns": ["Unknown.example.org"],
"machine_id": "ae886ddffbcc4f0da1e72769adfe0171",
"master": "192.168.0.109",
"server_id": 644891398,
"localhost": "Unknown.example.org",
"fqdn": "Unknown.example.org",
"host": "Unknown",
"domain": "example.org",
"hwaddr_interfaces": {
"lo": "00:00:00:00:00:00",
"enp0s3": "08:00:27:82:b2:ca",
"virbr0": "52:54:00:ee:eb:e1",
"virbr0-nic": "52:54:00:ee:eb:e1",
},
"id": "test-host",
"ip4_interfaces": {
"lo": ["127.0.0.1"],
"enp0s3": ["192.168.0.124"],
"virbr0": ["192.168.122.1"],
"virbr0-nic": [],
},
"ip6_interfaces": {
"lo": ["::1"],
"enp0s3": ["fe80::a00:27ff:fe82:b2ca"],
"virbr0": [],
"virbr0-nic": [],
},
"ipv4": ["127.0.0.1", "192.168.0.124", "192.168.122.1"],
"ipv6": ["::1", "fe80::a00:27ff:fe82:b2ca"],
"fqdn_ip4": ["192.168.0.70"],
"fqdn_ip6": [],
"ip_interfaces": {
"lo": ["127.0.0.1", "::1"],
"enp0s3": ["192.168.0.124", "fe80::a00:27ff:fe82:b2ca"],
"virbr0": ["192.168.122.1"],
"virbr0-nic": [],
},
"kernelparams": [
["BOOT_IMAGE", "/vmlinuz-3.10.0-1127.18.2.el7.x86_64"],
["root", "/dev/mapper/centos-root"],
["ro", None],
["rd.lvm.lv", "centos/root"],
["rd.lvm.lv", "centos/swap"],
["rhgb", None],
["quiet", None],
["LANG", "en_US.UTF-8"],
],
"locale_info": {
"defaultlanguage": "en_US",
"defaultencoding": "UTF-8",
"detectedencoding": "UTF-8",
"timezone": "unknown",
},
"num_gpus": 1,
"gpus": [{"vendor": "vmware", "model": "SVGA II Adapter"}],
"kernel": "Linux",
"nodename": "Unknown.example.org",
"kernelrelease": "3.10.0-1127.18.2.el7.x86_64",
"kernelversion": "#1 SMP Sun Jul 26 15:27:06 UTC 2020",
"cpuarch": "x86_64",
"selinux": {"enabled": False, "enforced": "Disabled"},
"systemd": {
"version": "219",
"features": (
"+PAM +AUDIT +SELINUX +IMA -APPARMOR +SMACK +SYSVINIT +UTMP"
" +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ +LZ4 -SECCOMP +BLKID"
" +ELFUTILS +KMOD +IDN"
),
},
"init": "systemd",
"lsb_distrib_id": "CentOS Linux",
"lsb_distrib_codename": "CentOS Linux 7 (Core)",
"osfullname": "CentOS Linux",
"osrelease": "7.8.2003",
"oscodename": "CentOS Linux 7 (Core)",
"os": "CentOS",
"num_cpus": 1,
"cpu_model": "Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz",
"cpu_flags": [
"fpu",
"vme",
"de",
"pse",
"tsc",
"msr",
"pae",
"mce",
"cx8",
"apic",
"sep",
"mtrr ",
"pge",
"mca",
"cmov",
"pat",
"pse36",
"clflush",
"mmx",
"fxsr",
"sse",
"sse2",
"ht",
"syscall",
"nx",
"rdtscp",
"lm",
"constant_tsc",
"rep_good",
"nopl",
"xtopology",
"nonstop_tsc",
"eagerfpu",
"pni",
"pclmulqdq",
"monitor",
"ssse3",
"cx16",
"pcid",
"sse4_1",
"sse4_2",
"x2apic",
"movbe",
"popcnt",
"aes",
"xsave",
"avx",
"rdrand",
"hypervisor",
"lahf_lm",
"abm",
"3dnowprefetch",
"invpcid_single",
"fsgsbase",
"avx2",
"inv pcid",
"rdseed",
"clflushopt",
"md_clear",
"flush_l1d",
],
"os_family": "RedHat",
"osarch": "x86_64",
"mem_total": 1998,
"swap_total": 2047,
"biosversion": "VirtualBox",
"productname": "VirtualBox",
"manufacturer": "innotek GmbH",
"biosreleasedate": "12/01/2006",
"uuid": "dd95fedd-1a2b-5e48-86a7-7e339f9f02a1",
"serialnumber": "0",
"virtual": "VirtualBox",
"ps": "ps -efHww",
"osrelease_info": [7, 8, 2003],
"osmajorrelease": 7,
"osfinger": "CentOS Linux-7",
"path": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin",
"systempath": [
"/usr/local/sbin",
"/usr/local/bin",
"/usr/sbin",
"/usr/bin",
],
"pythonexecutable": "/usr/bin/python3",
"pythonpath": [
"/usr/bin",
"/usr/lib64/python36.zip",
"/usr/lib64/python3.6",
"/usr/lib64/python3.6/lib-dynload",
"/usr/lib64/python3.6/site-packages",
"/usr/lib/python3.6/site-packages",
],
"pythonversion": [3, 6, 8, "final", 0],
"saltpath": "/usr/lib/python3.6/site-packages/salt",
"saltversion": "3003",
"saltversioninfo": [3003],
"zmqversion": "4.1.4",
"disks": ["sda", "sr0"],
"ssds": [],
"shell": "/bin/sh",
"lvm": {"centos": ["root", "swap"]},
"mdadm": [],
"username": "root",
"groupname": "root",
"pid": 2469,
"gid": 0,
"uid": 0,
"zfs_support": False,
"zfs_feature_flags": False,
}
}
@pytest.fixture
def configure_loader_modules():
return {
network: {
"__grains__": {
"osarch": "x86_64",
"os_family": "Redhat",
"osmajorrelease": 7,
"kernelrelease": "3.10.0-1127.18.2.el7.x86_64",
},
},
}
def test_wolmatch(cache_grain_data, id_minion, mac_addr_list):
"""
Test wolmatch
"""
cache_mock = MagicMock(return_value=cache_grain_data)
patches = {
"cache.grains": cache_mock,
}
wol_out = MagicMock(return_value=mac_addr_list)
with patch.dict(network.__salt__, patches):
with patch("salt.runners.network.wol", wol_out):
added = network.wolmatch(id_minion)
assert added.sort() == mac_addr_list
|
readthedocs/projects/migrations/0067_change_max_length_feature_id.py | mforbes/readthedocs.org | 4,054 | 12695994 | <filename>readthedocs/projects/migrations/0067_change_max_length_feature_id.py
# Generated by Django 2.2.17 on 2020-11-24 17:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0066_make_imported_file_slug_nullable'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='feature_id',
field=models.CharField(max_length=255, unique=True, verbose_name='Feature identifier'),
),
]
|
train.py | liucongg/GPT2-NewsTitle | 540 | 12696012 | # -*- coding:utf-8 -*-
# @project: GPT2-NewsTitle
# @filename: train.py
# @author: 刘聪NLP
# @contact: <EMAIL>
# @time: 2020/12/16 16:28
"""
文件说明:
通过新闻正文生成新闻标题的GPT2模型的训练文件
"""
import torch
import os
import random
import numpy as np
import argparse
import logging
from transformers.modeling_gpt2 import GPT2Config
from model import GPT2LMHeadModel
from transformers import BertTokenizer
from data_set import GPT2NewsTitleDataSet, collate_func
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import AdamW, get_linear_schedule_with_warmup
from tqdm import tqdm, trange
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def train(model, device, train_data, test_data, args):
"""
训练模型
Args:
model: 模型
device: 设备信息
train_data: 训练数据类
test_data: 测试数据类
args: 训练参数配置信息
Returns:
"""
tb_write = SummaryWriter()
if args.gradient_accumulation_steps < 1:
raise ValueError("gradient_accumulation_steps参数无效,必须大于等于1")
# 计算真实的训练batch_size大小
train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
train_sampler = RandomSampler(train_data)
train_data_loader = DataLoader(train_data, sampler=train_sampler,
batch_size=train_batch_size, collate_fn=collate_func)
total_steps = int(len(train_data_loader) * args.num_train_epochs / args.gradient_accumulation_steps)
logger.info("总训练步数为:{}".format(total_steps))
model.to(device)
# 获取模型所有参数
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
# 设置优化器
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_proportion * total_steps),
num_training_steps=total_steps)
# 清空cuda缓存
torch.cuda.empty_cache()
# 将模型调至训练状态
model.train()
title_id = train_data.title_id
tr_loss, logging_loss, min_loss = 0.0, 0.0, 0.0
global_step = 0
# 开始训练模型
for iepoch in trange(0, int(args.num_train_epochs), desc="Epoch", disable=False):
iter_bar = tqdm(train_data_loader, desc="Iter (loss=X.XXX)", disable=False)
for step, batch in enumerate(iter_bar):
input_ids = batch["input_ids"].to(device)
token_type_ids = batch["token_type_ids"].to(device)
# 获取训练结果
outputs = model.forward(input_ids=input_ids, token_type_ids=token_type_ids, labels=input_ids, title_id=title_id)
loss = outputs[0]
tr_loss += loss.item()
# 将损失值放到Iter中,方便观察
iter_bar.set_description("Iter (loss=%5.3f)" % loss.item())
# 判断是否进行梯度累积,如果进行,则将损失值除以累积步数
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
# 损失进行回传
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
# 当训练步数整除累积步数时,进行参数优化
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
global_step += 1
# 如果步数整除logging_steps,则记录学习率和训练集损失值
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
tb_write.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_write.add_scalar("train_loss", (tr_loss-logging_loss) /
(args.logging_steps*args.gradient_accumulation_steps), global_step)
logging_loss = tr_loss
# 如果步数整除eval_steps,则进行模型测试,记录测试集的损失
if args.eval_steps > 0 and global_step % args.eval_steps == 0:
eval_loss = evaluate(model, device, test_data, args)
tb_write.add_scalar("test_loss", eval_loss, global_step)
model.train()
# 每个epoch进行完,则保存模型
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
# 清空cuda缓存
torch.cuda.empty_cache()
def evaluate(model, device, test_data, args):
"""
对测试数据集进行模型测试
Args:
model: 模型
device: 设备信息
test_data: 测试数据类
args: 训练参数配置信息
Returns:
"""
# 构造测试集的DataLoader
test_sampler = SequentialSampler(test_data)
test_data_loader = DataLoader(test_data, sampler=test_sampler,
batch_size=args.test_batch_size, collate_fn=collate_func)
iter_bar = tqdm(test_data_loader, desc="iter", disable=False)
title_id = test_data.title_id
total_loss, total = 0.0, 0.0
# 进行测试
for step, batch in enumerate(iter_bar):
# 模型设为eval
model.eval()
with torch.no_grad():
input_ids = batch["input_ids"].to(device)
token_type_ids = batch["token_type_ids"].to(device)
# 获取预测结果
outputs = model.forward(input_ids=input_ids, token_type_ids=token_type_ids, labels=input_ids, title_id=title_id)
loss = outputs[0]
loss = loss.item()
# 对loss进行累加
total_loss += loss*len(batch["input_ids"])
total += len(batch["input_ids"])
# 计算最终测试集的loss结果
test_loss = total_loss / total
return test_loss
def set_args():
"""设置训练模型所需参数"""
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0', type=str, help='设置训练或测试时使用的显卡')
parser.add_argument('--config_path', default='./config/config.json', type=str, help='模型参数配置信息')
parser.add_argument('--vocab_path', default='./vocab/vocab.txt', type=str, help='词表,该词表为小词表,并增加了一些新的标记')
parser.add_argument('--train_file_path', default='./data_dir/train_data.json', type=str, help='新闻标题生成的训练数据')
parser.add_argument('--test_file_path', default='./data_dir/test_data.json', type=str, help='新闻标题生成的测试数据')
parser.add_argument('--pretrained_model_path', default=None, type=str, help='预训练的GPT2模型的路径')
parser.add_argument('--data_dir', default='./data_dir', type=str, help='生成缓存数据的存放路径')
parser.add_argument('--num_train_epochs', default=5, type=int, help='模型训练的轮数')
parser.add_argument('--train_batch_size', default=16, type=int, help='训练时每个batch的大小')
parser.add_argument('--test_batch_size', default=8, type=int, help='测试时每个batch的大小')
parser.add_argument('--learning_rate', default=1e-4, type=float, help='模型训练时的学习率')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='warm up概率,即训练总步长的百分之多少,进行warm up')
parser.add_argument('--adam_epsilon', default=1e-8, type=float, help='Adam优化器的epsilon值')
parser.add_argument('--logging_steps', default=20, type=int, help='保存训练日志的步数')
parser.add_argument('--eval_steps', default=4000, type=int, help='训练时,多少步进行一次测试')
parser.add_argument('--gradient_accumulation_steps', default=4, type=int, help='梯度积累')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='')
parser.add_argument('--output_dir', default='output_dir/', type=str, help='模型输出路径')
parser.add_argument('--seed', type=int, default=2020, help='随机种子')
parser.add_argument('--max_len', type=int, default=512, help='输入模型的最大长度,要比config中n_ctx小')
parser.add_argument('--title_max_len', type=int, default=32, help='生成标题的最大长度,要比max_len小')
return parser.parse_args()
def main():
# 设置模型训练参数
args = set_args()
# 设置显卡信息
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICE"] = args.device
# 获取device信息,用于模型训练
device = torch.device("cuda" if torch.cuda.is_available() and int(args.device) >= 0 else "cpu")
# 设置随机种子,方便模型复现
if args.seed:
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# 加载模型的config
model_config = GPT2Config.from_json_file(args.config_path)
# 实例化GPT2LMHeadModel模型,这里我们没有加载预训练好的模型,而是直接从头开始训练。
# 为什么从头开始训练?我们采用的是小模型,只有6层,并且词表也做了修改,没有找到合适的预训练模型。(其实是,穷人,卡不行。)
# 判断是否使用预训练好的GPT2模型
if args.pretrained_model_path:
model = GPT2LMHeadModel.from_pretrained(args.pretrained_model_path)
else:
# 如果没有指定的预训练模型,则初始化模型
model = GPT2LMHeadModel(config=model_config)
# model = GPT2LMHeadModel(config=model_config)
# 实例化tokenizer
tokenizer = BertTokenizer.from_pretrained(args.vocab_path, do_lower_case=True)
# 将[space]作为一个分割整体,例如:"我爱[Space]中国。",使用原始tokenizer分词结果为"['我', '爱', '[', 'Space', ']', '中', '国', '。']";
# 增加分割符号后的结果为"['我', '爱', '[Space]', '中', '国', '。']"
tokenizer.add_tokens("[Space]", special_tokens=True)
# 创建模型的输出目录
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
# 加载训练数据和测试数据
train_data = GPT2NewsTitleDataSet(tokenizer, args.max_len, args.title_max_len, args.data_dir, "train", args.train_file_path)
test_data = GPT2NewsTitleDataSet(tokenizer, args.max_len, args.title_max_len, args.data_dir, "test", args.test_file_path)
# 开始训练
train(model, device, train_data, test_data, args)
if __name__ == '__main__':
main()
|
usaspending_api/references/v2/views/total_budgetary_resources.py | ststuck/usaspending-api | 217 | 12696033 | <gh_stars>100-1000
from django.db.models import Sum
from rest_framework.response import Response
from rest_framework.views import APIView
from usaspending_api.common.cache_decorator import cache_response
from usaspending_api.common.exceptions import InvalidParameterException
from usaspending_api.common.validator.tinyshield import TinyShield
from usaspending_api.references.models.gtas_sf133_balances import GTASSF133Balances
from usaspending_api.common.helpers.generic_helper import get_account_data_time_period_message
class TotalBudgetaryResources(APIView):
"""
This route sends a request to the backend to retrieve GTAS totals by FY/FP.
"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/references/total_budgetary_resources.md"
@cache_response()
def get(self, request):
model = [
{
"key": "fiscal_year",
"name": "fiscal_year",
"type": "integer",
"min": 2017,
"optional": True,
"default": None,
"allow_nulls": True,
},
{
"key": "fiscal_period",
"name": "fiscal_period",
"type": "integer",
"min": 2,
"max": 12,
"optional": True,
"default": None,
"allow_nulls": True,
},
]
validated = TinyShield(model).block(request.query_params)
fiscal_year = validated.get("fiscal_year", None)
fiscal_period = validated.get("fiscal_period", None)
gtas_queryset = GTASSF133Balances.objects.values("fiscal_year", "fiscal_period")
if fiscal_period:
if not fiscal_year:
raise InvalidParameterException("fiscal_period was provided without fiscal_year.")
else:
gtas_queryset = gtas_queryset.filter(fiscal_year=fiscal_year, fiscal_period=fiscal_period)
elif fiscal_year:
gtas_queryset = gtas_queryset.filter(fiscal_year=fiscal_year)
results = gtas_queryset.annotate(total_budgetary_resources=Sum("total_budgetary_resources_cpe")).order_by(
"-fiscal_year", "-fiscal_period"
)
return Response(
{
"results": list(results),
"messages": [get_account_data_time_period_message()] if not fiscal_year or fiscal_year < 2017 else [],
}
)
|
indra/tests/test_tas.py | zebulon2/indra | 136 | 12696043 | <filename>indra/tests/test_tas.py<gh_stars>100-1000
from nose.plugins.attrib import attr
from indra.sources.tas import process_from_web
@attr('slow')
def test_processor():
tp = process_from_web(affinity_class_limit=10)
assert tp
assert tp.statements
num_stmts = len(tp.statements)
# This is the total number of statements about human genes
assert num_stmts == 1123724, num_stmts
assert all(len(s.evidence) >= 1 for s in tp.statements), \
'Some statements lack any evidence'
|
tests/test_org_indexer.py | halhenke/promnesia | 1,327 | 12696047 | <reponame>halhenke/promnesia<filename>tests/test_org_indexer.py
from typing import Optional
from promnesia.common import Visit
from promnesia.sources.org import extract_from_file
from common import tdata, throw
def declrf(s: Optional[str]) -> Optional[str]:
if s is None:
return None
# meh.. not sure how ot handle this properly, ideally should be via pytest?
# not sure if should just do it in the indexer? e.g. extension might not like it
return s.replace('\r', '')
def test_org_indexer() -> None:
[_, cpp, cozy] = [v if isinstance(v, Visit) else throw(v) for v in extract_from_file(tdata('auto/orgs/file.org'))]
assert cpp.url == 'https://www.youtube.com/watch?v=rHIkrotSwcc'
# TODO not sure about filetags?
exp = '''
xxx /r/cpp :cpp:programming:
I've enjoyed [<NAME>'s _There Are No Zero-cost Abstractions_](
https://www.youtube.com/watch?v=rHIkrotSwcc) very much.
'''.lstrip()
assert declrf(cpp.context) == exp
assert cozy.url == 'https://twitter.com/Mappletons/status/1255221220263563269'
def test_org_indexer_2() -> None:
items = [v if isinstance(v, Visit) else throw(v) for v in extract_from_file(tdata('auto/orgs/file3.org'))]
assert len(items) == 6
assert items[0].url == 'https://www.reddit.com/r/androidapps/comments/4i36z9/how_you_use_your_android_to_the_maximum/d2uq24i'
assert items[1].url == 'https://link.com'
assert items[-2].url == 'https://en.wikipedia.org/wiki/Resilio_Sync'
# TODO shit def need org specific url extractor (and then extract from everything remaining)
# assert results[-1].url == 'https://en.wikipedia.org/wiki/InterPlanetary_File_System'
def test_heading() -> None:
items = [v if isinstance(v, Visit) else throw(v) for v in extract_from_file(tdata('auto/orgs/file2.org'))]
assert {i.url for i in items} == {
'https://en.wikipedia.org/wiki/Computational_topology',
'http://graphics.stanford.edu/courses/cs468-09-fall/',
'https://en.wikipedia.org/wiki/Triangulation_(topology)',
'https://en.wikipedia.org/wiki/Digital_manifold',
}
|
multi_AdaBoost/Bayes.py | wu546300070/weiboanalysis | 685 | 12696066 | <reponame>wu546300070/weiboanalysis<gh_stars>100-1000
'''
多类的朴素贝叶斯实现
'''
import random
import re
import traceback
import jieba
import matplotlib.pyplot as plt
import numpy as np
from pylab import mpl
from sklearn.externals import joblib
from sklearn.naive_bayes import MultinomialNB
jieba.load_userdict("../train/word.txt")
stop = [line.strip() for line in open('../ad/stop.txt', 'r', encoding='utf-8').readlines()] # 停用词
def build_key_word(path): # 通过词频产生特征
d = {}
with open(path, encoding="utf-8") as fp:
for line in fp:
for word in jieba.cut(line.strip()):
p = re.compile(b'\w', re.L)
result = p.sub(b"", bytes(word, encoding="utf-8")).decode("utf-8")
if not result or result == ' ': # 空字符
continue
if len(word) > 1: # 避免大量无意义的词语进入统计范围
d[word] = d.get(word, 0) + 1
kw_list = sorted(d, key=lambda x: d[x], reverse=True)
size = int(len(kw_list) * 0.2) # 取最前的30%
mood = set(kw_list[:size])
return list(mood - set(stop))
def loadDataSet(path): # 返回每条微博的分词与标签
line_cut = []
label = []
with open(path, encoding="utf-8") as fp:
for line in fp:
temp = line.strip()
try:
sentence = temp[2:].lstrip() # 每条微博
label.append(int(temp[:2])) # 获取标注
word_list = []
sentence = str(sentence).replace('\u200b', '')
for word in jieba.cut(sentence.strip()):
p = re.compile(b'\w', re.L)
result = p.sub(b"", bytes(word, encoding="utf-8")).decode("utf-8")
if not result or result == ' ': # 空字符
continue
word_list.append(word)
word_list = list(set(word_list) - set(stop) - set('\u200b')
- set(' ') - set('\u3000') - set('️'))
line_cut.append(word_list)
except Exception:
continue
return line_cut, label # 返回每条微博的分词和标注
def setOfWordsToVecTor(vocabularyList, moodWords): # 每条微博向量化
vocabMarked = [0] * len(vocabularyList)
for smsWord in moodWords:
if smsWord in vocabularyList:
vocabMarked[vocabularyList.index(smsWord)] += 1
return np.array(vocabMarked)
def setOfWordsListToVecTor(vocabularyList, train_mood_array): # 将所有微博准备向量化
vocabMarkedList = []
for i in range(len(train_mood_array)):
vocabMarked = setOfWordsToVecTor(vocabularyList, train_mood_array[i])
vocabMarkedList.append(vocabMarked)
return vocabMarkedList
def trainingNaiveBayes(train_mood_array, label): # 计算先验概率
numTrainDoc = len(train_mood_array)
numWords = len(train_mood_array[0])
prior_Pos, prior_Neg, prior_Neutral = 0.0, 0.0, 0.0
for i in label:
if i == 1:
prior_Pos = prior_Pos + 1
elif i == 2:
prior_Neg = prior_Neg + 1
else:
prior_Neutral = prior_Neutral + 1
prior_Pos = prior_Pos / float(numTrainDoc)
prior_Neg = prior_Neg / float(numTrainDoc)
prior_Neutral = prior_Neutral / float(numTrainDoc)
wordsInPosNum = np.ones(numWords)
wordsInNegNum = np.ones(numWords)
wordsInNeutralNum = np.ones(numWords)
PosWordsNum = 2.0 # 如果一个概率为0,乘积为0,故初始化1,分母2
NegWordsNum = 2.0
NeutralWordsNum = 2.0
for i in range(0, numTrainDoc):
try:
if label[i] == 1:
wordsInPosNum += train_mood_array[i]
PosWordsNum += sum(train_mood_array[i]) # 统计Pos中语料库中词汇出现的总次数
elif label[i] == 2:
wordsInNegNum += train_mood_array[i]
NegWordsNum += sum(train_mood_array[i])
else:
wordsInNeutralNum += train_mood_array[i]
NeutralWordsNum += sum(train_mood_array[i])
except Exception as e:
traceback.print_exc(e)
pWordsPosicity = np.log(wordsInPosNum / PosWordsNum)
pWordsNegy = np.log(wordsInNegNum / NegWordsNum)
pWordsNeutral = np.log(wordsInNeutralNum / NeutralWordsNum)
return pWordsPosicity, pWordsNegy, pWordsNeutral, prior_Pos, prior_Neg, prior_Neutral
def classify(pWordsPosicity, pWordsNegy, pWordsNeutral, prior_Pos, prior_Neg, prior_Neutral,
test_word_arrayMarkedArray):
pP = sum(test_word_arrayMarkedArray * pWordsPosicity) + np.log(prior_Pos)
pN = sum(test_word_arrayMarkedArray * pWordsNegy) + np.log(prior_Neg)
pNeu = sum(test_word_arrayMarkedArray * pWordsNeutral) + np.log(prior_Neutral)
if pP > pN > pNeu or pP > pNeu > pN:
return pP, pN, pNeu, 1
elif pN > pP > pNeu or pN > pNeu > pP:
return pP, pN, pNeu, 2
else:
return pP, pN, pNeu, 3
def predict(test_word_array, test_word_arrayLabel, testCount, PosWords, NegWords, NeutralWords, prior_Pos, prior_Neg,
prior_Neutral):
errorCount = 0
for j in range(testCount):
try:
pP, pN, pNeu, smsType = classify(PosWords, NegWords, NeutralWords, prior_Pos, prior_Neg, prior_Neutral,
test_word_array[j])
if smsType != test_word_arrayLabel[j]:
errorCount += 1
except Exception as e:
traceback.print_exc(e)
print("Bayes", errorCount / testCount)
return errorCount / testCount
if __name__ == '__main__':
multi_nb = []
bayes_nb = []
for m in range(1, 51):
vocabList = build_key_word("../train/train.txt")
line_cut, label = loadDataSet("../train/train.txt")
train_mood_array = setOfWordsListToVecTor(vocabList, line_cut)
test_word_array = []
test_word_arrayLabel = []
testCount = 100 # 从中随机选取100条用来测试,并删除原来的位置
for i in range(testCount):
try:
randomIndex = int(random.uniform(0, len(train_mood_array)))
test_word_arrayLabel.append(label[randomIndex])
test_word_array.append(train_mood_array[randomIndex])
del (train_mood_array[randomIndex])
del (label[randomIndex])
except Exception as e:
print(e)
multi = MultinomialNB()
multi = multi.fit(train_mood_array, label)
joblib.dump(multi, 'model/gnb.model')
muljob = joblib.load('model/gnb.model')
result = muljob.predict(test_word_array)
count = 0
for i in range(len(test_word_array)):
type = result[i]
if type != test_word_arrayLabel[i]:
count = count + 1
# print(test_word_array[i], "----", result[i])
print("MultinomialNB", count / float(testCount))
multi_nb.append(count / float(testCount))
PosWords, NegWords, NeutralWords, prior_Pos, prior_Neg, prior_Neutral = \
trainingNaiveBayes(train_mood_array, label)
accuracy = predict(test_word_array, test_word_arrayLabel, testCount, PosWords, NegWords, NeutralWords,
prior_Pos, prior_Neg,
prior_Neutral)
bayes_nb.append(accuracy)
# 画图
mpl.rcParams['font.sans-serif'] = ['SimHei']
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([x for x in range(1, 51)], multi_nb,
label='sklearn库',
color='orange')
ax.plot([x for x in range(1, 51)], bayes_nb,
label='实现',
color='green')
ax.set_xlabel('次数')
ax.set_ylabel('准确率')
plt.xlim([1,50])
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.title("对比")
plt.show()
|
workflows/pipe-common/pipeline/run_stats.py | msleprosy/cloud-pipeline | 126 | 12696076 | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import requests
import datetime
class Task:
def __init__(self, start, end, name, parameters):
self.start = start
self.end = end
self.name = name
self.parameters = parameters
self.started = False
self.ended = False
class Api:
__API_URL = "http://10.66.128.50:9999/pipeline/restapi/"
__LOG_URL = 'run/{}/logs'
__RESPONSE_STATUS_OK = 'OK'
__DEFAULT_HEADER = {'content-type': 'application/json'}
__DATE_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
def __init__(self):
pass
def get_logs(self, run_id):
result = requests.get(self.__API_URL + self.__LOG_URL.format(run_id), headers=self.__DEFAULT_HEADER)
if hasattr(result.json(), 'error') or result.json()['status'] != self.__RESPONSE_STATUS_OK:
raise RuntimeError('Failed to load run {} logs. API response: {}'.format(run_id, result.json()['message']))
logs = result.json()['payload']
tasks = {}
for log in logs:
id = log['task']['name']
name = id
parameters = ''
if 'parameters' in log['task']:
id += ' ' + log['task']['parameters']
parameters = log['task']['parameters']
else:
continue
date = datetime.datetime.strptime(log['date'], self.__DATE_FORMAT)
if id not in tasks:
task = Task(date, date, name, parameters)
tasks[id] = task
else:
task = tasks[id]
if 'logText' in log and 'Kubernetes pod state: Running' in log['logText'] and not task.started:
task.start = date
task.started = True
elif log['status'] == "FAILURE" or log['status'] == "STOPPED" or log['status'] == "SUCCESS" and not task.ended:
task.end = date
task.ended = True
total_time = 0
for id in tasks:
task = tasks[id]
task_time = (task.end - task.start).seconds
minutes = task_time/60
seconds = task_time%60
print('{}\t{}\t{} min {} s'.format(task.name, task.parameters, minutes, seconds))
total_time += task_time
print
print('Whole pipeline ran for {} s.'.format(total_time))
if __name__ == '__main__':
if len(sys.argv) < 2:
raise RuntimeError('Run ID is required for script')
run_id = sys.argv[1]
api = Api()
api.get_logs(run_id)
|
tencentcloud/tcr/v20190924/errorcodes.py | PlasticMem/tencentcloud-sdk-python | 465 | 12696080 | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# CAM签名/鉴权错误。
AUTHFAILURE = 'AuthFailure'
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 内部错误。
INTERNALERROR = 'InternalError'
# 创建私有域失败。
INTERNALERROR_CREATEPRIVATEZONE = 'InternalError.CreatePrivateZone'
# 创建私有域记录失败。
INTERNALERROR_CREATEPRIVATEZONERECORD = 'InternalError.CreatePrivateZoneRecord'
# 数据库错误。
INTERNALERROR_DBERROR = 'InternalError.DbError'
# 删除私有域记录失败。
INTERNALERROR_DELETEPRIVATEZONERECORD = 'InternalError.DeletePrivateZoneRecord'
# 查询vpc私有解析状态失败。
INTERNALERROR_DESCRIBEINTERNALENDPOINTDNSSTATUS = 'InternalError.DescribeInternalEndpointDnsStatus'
# 查询私有域列表失败。
INTERNALERROR_DESCRIBEPRIVATEZONELIST = 'InternalError.DescribePrivateZoneList'
# 查询私有域记录列表失败。
INTERNALERROR_DESCRIBEPRIVATEZONERECORDLIST = 'InternalError.DescribePrivateZoneRecordList'
# 查询开白vpc列表失败。
INTERNALERROR_DESCRIBEPRIVATEZONESERVICELIST = 'InternalError.DescribePrivateZoneServiceList'
# 目标冲突。
INTERNALERROR_ERRCONFLICT = 'InternalError.ErrConflict'
# 目标不存在。
INTERNALERROR_ERRNOTEXIST = 'InternalError.ErrNotExist'
# 鉴权失败。
INTERNALERROR_ERRUNAUTHORIZED = 'InternalError.ErrUnauthorized'
# 资源已存在。
INTERNALERROR_ERRORCONFLICT = 'InternalError.ErrorConflict'
# 资源超过配额。
INTERNALERROR_ERROROVERLIMIT = 'InternalError.ErrorOverLimit'
# Tcr实例内部错误。
INTERNALERROR_ERRORTCRINTERNAL = 'InternalError.ErrorTcrInternal'
# Tcr实例请求无效的Hearder类型。
INTERNALERROR_ERRORTCRINVALIDMEDIATYPE = 'InternalError.ErrorTcrInvalidMediaType'
# Tcr实例资源冲突。
INTERNALERROR_ERRORTCRRESOURCECONFLICT = 'InternalError.ErrorTcrResourceConflict'
# 没有Tcr操作权限。
INTERNALERROR_ERRORTCRUNAUTHORIZED = 'InternalError.ErrorTcrUnauthorized'
# 修改vpc与私有域关联关系失败。
INTERNALERROR_MODIFYPRIVATEZONEVPC = 'InternalError.ModifyPrivateZoneVpc'
# 未知错误。
INTERNALERROR_UNKNOWN = 'InternalError.Unknown'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 用户请求中的信息与其namespace不匹配。
INVALIDPARAMETER_ERRNSMISMATCH = 'InvalidParameter.ErrNSMisMatch'
# 命名空间名称已经存在。
INVALIDPARAMETER_ERRNAMESPACEEXIST = 'InvalidParameter.ErrNamespaceExist'
# 命名空间已被占用。
INVALIDPARAMETER_ERRNAMESPACERESERVED = 'InvalidParameter.ErrNamespaceReserved'
# 无效的参数,仓库已存在。
INVALIDPARAMETER_ERRREPOEXIST = 'InvalidParameter.ErrRepoExist'
# 触发器名称已存在。
INVALIDPARAMETER_ERRTRIGGEREXIST = 'InvalidParameter.ErrTriggerExist'
# 用户已经存在。
INVALIDPARAMETER_ERRUSEREXIST = 'InvalidParameter.ErrUserExist'
# 实例名称已存在。
INVALIDPARAMETER_ERRORNAMEEXISTS = 'InvalidParameter.ErrorNameExists'
# 实例名称非法。
INVALIDPARAMETER_ERRORNAMEILLEGAL = 'InvalidParameter.ErrorNameIllegal'
# 实例名称已保留。
INVALIDPARAMETER_ERRORNAMERESERVED = 'InvalidParameter.ErrorNameReserved'
# 实例名称非法,格式不正确或者已保留。
INVALIDPARAMETER_ERRORREGISTRYNAME = 'InvalidParameter.ErrorRegistryName'
# 云标签超过10个上线。
INVALIDPARAMETER_ERRORTAGOVERLIMIT = 'InvalidParameter.ErrorTagOverLimit'
# 无效的TCR请求。
INVALIDPARAMETER_ERRORTCRINVALIDPARAMETER = 'InvalidParameter.ErrorTcrInvalidParameter'
# 该地域不支持创建实例。
INVALIDPARAMETER_UNSUPPORTEDREGION = 'InvalidParameter.UnsupportedRegion'
# 用户命名空间达到配额。
LIMITEXCEEDED_ERRNAMESPACEMAXLIMIT = 'LimitExceeded.ErrNamespaceMaxLimit'
# 用户仓库已经达到最大配额。
LIMITEXCEEDED_ERRREPOMAXLIMIT = 'LimitExceeded.ErrRepoMaxLimit'
# 触发器达到配额。
LIMITEXCEEDED_ERRTRIGGERMAXLIMIT = 'LimitExceeded.ErrTriggerMaxLimit'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 缺少参数。
MISSINGPARAMETER_MISSINGPARAMETER = 'MissingParameter.MissingParameter'
# 操作被拒绝。
OPERATIONDENIED = 'OperationDenied'
# 实例状态异常。
RESOURCEINSUFFICIENT_ERRORINSTANCENOTRUNNING = 'ResourceInsufficient.ErrorInstanceNotRunning'
# Vpc dsn解析状态异常或未删除。
RESOURCEINSUFFICIENT_ERRORVPCDNSSTATUS = 'ResourceInsufficient.ErrorVpcDnsStatus'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 用户没有创建命名空间。
RESOURCENOTFOUND_ERRNONAMESPACE = 'ResourceNotFound.ErrNoNamespace'
# 仓库不存在。
RESOURCENOTFOUND_ERRNOREPO = 'ResourceNotFound.ErrNoRepo'
# tag不存在。
RESOURCENOTFOUND_ERRNOTAG = 'ResourceNotFound.ErrNoTag'
# 触发器不存在。
RESOURCENOTFOUND_ERRNOTRIGGER = 'ResourceNotFound.ErrNoTrigger'
# 用户不存在(未注册)。
RESOURCENOTFOUND_ERRNOUSER = 'ResourceNotFound.ErrNoUser'
# Tcr实例中的资源未找到。
RESOURCENOTFOUND_TCRRESOURCENOTFOUND = 'ResourceNotFound.TcrResourceNotFound'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
# 未知参数错误。
UNKNOWNPARAMETER = 'UnknownParameter'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
|
examples/pytorch/transformer/optims/__init__.py | ketyi/dgl | 9,516 | 12696082 | from .noamopt import *
|
sunpy/net/tests/test_baseclient.py | RhnSharma/sunpy | 628 | 12696103 | import re
import pytest
from sunpy.net import base_client, dataretriever, jsoc, vso
from sunpy.net.base_client import QueryResponseTable, convert_row_to_table
from sunpy.net.dataretriever.sources.norh import NoRHClient
_REGEX = re.compile(r"Client")
CLIENT_LIST = []
for a_import in [vso, jsoc, dataretriever]:
for item in dir(a_import):
if _REGEX.search(item):
CLIENT_LIST.append(getattr(a_import, item))
CLIENT_LIST.remove(dataretriever.client.GenericClient)
# We can access the registry directly
CLIENT_NAMES = base_client.BaseClient._registry.keys()
CLIENTS_REG = base_client.BaseClient._registry.items()
@pytest.mark.parametrize("client", CLIENT_LIST)
def test_registry(client):
"""
Check if each client has been registered.
"""
assert client in CLIENT_NAMES
assert (client, client._can_handle_query) in CLIENTS_REG
@pytest.fixture
def dummy_response():
return QueryResponseTable([{'hello': 1}], client=NoRHClient())
def test_slice(dummy_response):
assert len(dummy_response) == 1
row = dummy_response[0]
table = row.as_table()
assert len(table) == 1
assert isinstance(table.client, NoRHClient)
col = dummy_response['hello']
table = col.as_table()
assert len(table) == 1
assert isinstance(table.client, NoRHClient)
def test_path_format_keys(dummy_response):
assert dummy_response.path_format_keys() == {'hello'}
def test_convert_row_to_table(dummy_response):
@convert_row_to_table
def example(self, query_results, **kwargs):
return query_results
assert example(None, dummy_response) is dummy_response
# This is a single row table anyway
assert example(None, dummy_response[0]) == dummy_response
|
mmocr/models/textrecog/backbones/resnet_abi.py | yuexy/mmocr | 2,261 | 12696106 | <reponame>yuexy/mmocr
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.runner import BaseModule, Sequential
import mmocr.utils as utils
from mmocr.models.builder import BACKBONES
from mmocr.models.textrecog.layers import BasicBlock
@BACKBONES.register_module()
class ResNetABI(BaseModule):
"""Implement ResNet backbone for text recognition, modified from `ResNet.
<https://arxiv.org/pdf/1512.03385.pdf>`_ and
`<https://github.com/FangShancheng/ABINet>`_
Args:
in_channels (int): Number of channels of input image tensor.
stem_channels (int): Number of stem channels.
base_channels (int): Number of base channels.
arch_settings (list[int]): List of BasicBlock number for each stage.
strides (Sequence[int]): Strides of the first block of each stage.
out_indices (None | Sequence[int]): Indices of output stages. If not
specified, only the last stage will be returned.
last_stage_pool (bool): If True, add `MaxPool2d` layer to last stage.
"""
def __init__(self,
in_channels=3,
stem_channels=32,
base_channels=32,
arch_settings=[3, 4, 6, 6, 3],
strides=[2, 1, 2, 1, 1],
out_indices=None,
last_stage_pool=False,
init_cfg=[
dict(type='Xavier', layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d')
]):
super().__init__(init_cfg=init_cfg)
assert isinstance(in_channels, int)
assert isinstance(stem_channels, int)
assert utils.is_type_list(arch_settings, int)
assert utils.is_type_list(strides, int)
assert len(arch_settings) == len(strides)
assert out_indices is None or isinstance(out_indices, (list, tuple))
assert isinstance(last_stage_pool, bool)
self.out_indices = out_indices
self.last_stage_pool = last_stage_pool
self.block = BasicBlock
self.inplanes = stem_channels
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
planes = base_channels
for i, num_blocks in enumerate(arch_settings):
stride = strides[i]
res_layer = self._make_layer(
block=self.block,
inplanes=self.inplanes,
planes=planes,
blocks=num_blocks,
stride=stride)
self.inplanes = planes * self.block.expansion
planes *= 2
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
layers = []
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes, 1, stride, bias=False),
nn.BatchNorm2d(planes),
)
layers.append(
block(
inplanes,
planes,
use_conv1x1=True,
stride=stride,
downsample=downsample))
inplanes = planes
for _ in range(1, blocks):
layers.append(block(inplanes, planes, use_conv1x1=True))
return Sequential(*layers)
def _make_stem_layer(self, in_channels, stem_channels):
self.conv1 = nn.Conv2d(
in_channels, stem_channels, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(stem_channels)
self.relu1 = nn.ReLU(inplace=True)
def forward(self, x):
"""
Args:
x (Tensor): Image tensor of shape :math:`(N, 3, H, W)`.
Returns:
Tensor or list[Tensor]: Feature tensor. Its shape depends on
ResNetABI's config. It can be a list of feature outputs at specific
layers if ``out_indices`` is specified.
"""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if self.out_indices and i in self.out_indices:
outs.append(x)
return tuple(outs) if self.out_indices else x
|
voice/Chat/files/utils/pattern_lister/pattern_lister.py | muhammadbilalakbar021/driverDrowsiness | 345 | 12696114 | <reponame>muhammadbilalakbar021/driverDrowsiness<gh_stars>100-1000
import sys
import os.path
import xml.etree.ElementTree as ET
if __name__ == '__main__':
aiml_dir = sys.argv[1]
csv_file = sys.argv[2]
print("aiml_dir:", aiml_dir)
print("csv_file:", csv_file)
questions = []
files = 0
for dirpath, dirnames, filenames in os.walk(aiml_dir):
for filename in filenames:
files += 1
aiml_file = os.path.join(dirpath, filename)
print (aiml_file)
try:
tree = ET.parse(aiml_file)
aiml = tree.getroot()
categories = aiml.findall('category')
for category in categories:
pattern_text = ""
pattern = category.find("pattern")
for elt in pattern.iter():
comma = False
if elt.tag == "pattern":
if elt.text is not None:
text = elt.text.strip().upper()
pattern_text += " ".join(text.split())
comma = True
elif elt.tag == "set":
if 'name' in elt.attrib:
name = elt.attrib['name']
else:
name = elt.text.strip()
if comma is True:
pattern_text += " "
pattern_text += " SET[%s]"%name
if text:
pattern_text += " "
pattern_text += " ".join(text.split())
comma = True
elif elt.tag == "bot":
if 'name' in elt.attrib:
name = elt.attrib['name']
else:
name = elt.text.strip()
if comma is True:
pattern_text += " "
pattern_text += " BOT[%s]" % name
if text:
pattern_text += " "
pattern_text += " ".join(text.split())
comma = True
if elt.tail is not None and elt.tail.strip() != "":
if comma is True:
pattern_text += " "
text = elt.tail.strip().upper()
if text:
pattern_text += " "
pattern_text += " ".join(text.split())
comma = True
if pattern_text is not None:
pattern_text = pattern_text.strip()
if len(pattern_text) > 0:
questions.append([aiml_file, pattern_text])
except Exception as e:
print(e)
raise e
questions.sort(key=lambda x: x[1])
with open(csv_file, "w+") as output_file:
for line in questions:
new_line = ", ".join(line[1].split())
output_file.write(line[0])
output_file.write(", ")
output_file.write(new_line)
output_file.write("\n")
print("Files: %d"%files)
print("Patterns: %d"%len(questions)) |
gcloud/iam_auth/view_interceptors/base_template.py | DomineCore/bk-sops | 881 | 12696123 | <filename>gcloud/iam_auth/view_interceptors/base_template.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from gcloud.iam_auth import IAMMeta
from gcloud.iam_auth.intercept import ViewInterceptor
from gcloud.iam_auth.utils import iam_resource_auth_or_raise, iam_multi_resource_auth_or_raise
class YamlImportInterceptor(ViewInterceptor):
def process(self, request, *args, **kwargs):
data = request.data
template_type = data["template_type"]
template_ids = list(data.get("override_mappings", {}).values())
username = request.user.username
if template_type == "project":
project_resource_id = data["project_id"]
project_get_resource_func = "resources_for_project"
project_action = IAMMeta.FLOW_CREATE_ACTION
template_action = IAMMeta.FLOW_EDIT_ACTION
template_get_resource_func = "resources_list_for_flows"
else:
project_resource_id = None
project_get_resource_func = None
project_action = IAMMeta.COMMON_FLOW_CREATE_ACTION
template_action = IAMMeta.COMMON_FLOW_EDIT_ACTION
template_get_resource_func = "resources_list_for_common_flows"
iam_resource_auth_or_raise(username, project_action, project_resource_id, project_get_resource_func)
if template_ids:
iam_multi_resource_auth_or_raise(username, template_action, template_ids, template_get_resource_func)
class YamlExportInterceptor(ViewInterceptor):
def process(self, request, *args, **kwargs):
data = request.data
template_type = data["template_type"]
template_ids = data["template_id_list"]
if template_type == "project":
template_action = IAMMeta.FLOW_VIEW_ACTION
template_get_resource_func = "resources_list_for_flows"
else:
template_action = IAMMeta.COMMON_FLOW_VIEW_ACTION
template_get_resource_func = "resources_list_for_common_flows"
iam_multi_resource_auth_or_raise(
request.user.username, template_action, template_ids, template_get_resource_func
)
|
pex/venv/bin_path.py | alexey-tereshenkov-oxb/pex | 2,160 | 12696126 | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
from pex.enum import Enum
class BinPath(Enum["BinPath.Value"]):
class Value(Enum.Value):
pass
FALSE = Value("false")
PREPEND = Value("prepend")
APPEND = Value("append")
|
backend/projects/migrations/0023_auto_20190621_1129.py | donroyco/falco | 796 | 12696152 | # Generated by Django 2.2 on 2019-06-21 09:29
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("projects", "0022_availableauditparameters_is_active"),
]
operations = [
migrations.AddField(
model_name="project",
name="admins",
field=models.ManyToManyField(
blank=True, related_name="admin_of", to=settings.AUTH_USER_MODEL
),
),
migrations.AlterField(
model_name="project",
name="members",
field=models.ManyToManyField(
blank=True, related_name="member_of", to=settings.AUTH_USER_MODEL
),
),
]
|
onnx_tf/handlers/backend/dropout.py | malisit/onnx-tensorflow | 1,110 | 12696160 | <filename>onnx_tf/handlers/backend/dropout.py
import copy
import tensorflow as tf
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func
@onnx_op("Dropout")
@tf_func(tf.nn.dropout)
class Dropout(BackendHandler):
@classmethod
def _common(cls, node, **kwargs):
tensor_dict = kwargs["tensor_dict"]
x = tensor_dict[node.inputs[0]]
attrs = copy.deepcopy(node.attrs)
if cls.SINCE_VERSION < 7 and attrs.pop("is_test", 0) == 0:
attrs["keep_prob"] = 1 - attrs.pop("ratio", 0.5)
return [cls.make_tensor_from_onnx_node(node, attrs=attrs, **kwargs)]
elif cls.SINCE_VERSION < 12 : # for Opset 7, 10
# at inference mode, is_test attribute is always set to 1
# dropout at inference mode is a no-op
return [x]
else: # for Opset 12, 13
# ratio and training_mode are optional and passed as inputs
ratio = 0.5 # default ratio
if len(node.inputs) > 1:
ratio = tensor_dict[node.inputs[1]]
training_mode = False # default is false
if len(node.inputs) == 3:
training_mode = tensor_dict[node.inputs[2]]
return_mask = len(node.outputs) == 2 # if there are 2 outputs, mask is requested
if ratio == 0 or training_mode is False: # Inferencing
if return_mask is True:
return x, tf.ones(x.shape, dtype=tf.bool)
else:
return [x]
else: # Training
# seed is passed in as an attribute
seed = attrs.pop("seed", None)
noise_shape = None # noise_shape is not passed in so default to None
dropout_result = cls.make_tensor_from_onnx_node(node, inputs=[x, ratio, noise_shape, seed], attrs=attrs, **kwargs)
if return_mask is True:
# Create the mask based on the result of the Dropout
mask = tf.dtypes.cast(dropout_result, tf.bool)
return dropout_result, mask
else:
return [dropout_result]
@classmethod
def version_1(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_6(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_7(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_10(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_12(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_13(cls, node, **kwargs):
return cls._common(node, **kwargs)
|
examples/embed/server_session/bokeh_app.py | kevin1kevin1k/bokeh | 15,193 | 12696171 | import numpy as np
from bokeh.io import curdoc
from bokeh.plotting import figure
N = 4000
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 1.5
colors = [
"#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)
]
p = figure(tools="", toolbar_location=None)
p.circle(x, y, radius=radii,
fill_color=colors, fill_alpha=0.6,
line_color=None)
curdoc().add_root(p)
|
rojak-analyzer/rojak.py | pyk/rojak | 107 | 12696198 | import csv
import click
import MySQLdb as mysql
# Custom tokenizer
# TODO: fix this, we can't load the model without this declaration
# something wrong with the pickle stuff
def whitespace_tokenizer(s):
return s.split(' ')
# Change class below to use different method
#from rojak_fasttext import RojakFastTextWrapper
#from rojak_svm import RojakSVM
# from rojak_ovr import RojakOvR as Rojak
import rojak_ovr_pair
from rojak_ovr_pair import RojakOvRPair
rojak = RojakOvRPair(max_ngram=5, min_df=3,
tokenizer=rojak_ovr_pair.whitespace_tokenizer)
@click.group()
def cli():
pass
# Train Rojak
@click.command('train')
@click.option('--input', 'input_file',
default='', help='Path to training data file',
type=click.Path(exists=True))
@click.option('--output', 'output_file',
default='', help='Where the model written to',
type=click.Path())
def train(input_file, output_file):
"""Train Rojak"""
rojak.train(input_file, output_file)
cli.add_command(train)
# Eval Rojak
@click.command('eval')
@click.option('--model', default='', help='Path to the model file',
type=click.Path(exists=True))
@click.option('--test-data', default='', help='Path to test data',
type=click.Path(exists=True))
def evaluate(model, test_data):
rojak.eval(model, test_data)
cli.add_command(evaluate)
# Map candidate name to their corresponding data
candidate_data = {
'<NAME>': {
'alias': ['agus', 'harimurti', 'ahy'],
'id': -1
},
'<NAME>': {
'alias': ['sylvi', 'sylviana', 'silvy'],
'id': -1
},
'<NAME>': {
'alias': ['ahok', 'basuki'],
'id': -1
},
'<NAME>': {
'alias': ['djarot'],
'id': -1
},
'<NAME>': {
'alias': ['anies'],
'id': -1
},
'<NAME>': {
'alias': ['sandiaga', 'sandi', 'uno'],
'id': -1
}
}
# Sentiment data
# Map sentiment name to id
sentiment_data_id = {
'pos_agus_sylvi': -1,
'neg_agus_sylvi': -1,
'pos_ahok_djarot': -1,
'neg_ahok_djarot': -1,
'pos_anies_sandi': -1,
'neg_anies_sandi': -1,
'oot': -1
}
# Function to scale the score
# value score_raw:
# -1.x < score_raw < 1.x
# we want to convert it to 0 < x <= 1.0 scale
def scale_confident_score(score_raw):
score = abs(score_raw)
if score >= 1.0:
return 1.0
else:
return score
# Run Rojak
@click.command('run')
@click.option('--model', default='', help='Path to the model file',
type=click.Path(exists=True))
@click.option('--db-host', 'db_host', default='localhost',
help='Database host')
@click.option('--db-port', 'db_port', default=3306,
help='Database port number')
@click.option('--db-user', 'db_user', default='root',
help='Database user name')
@click.option('--db-pass', 'db_pass', default='<PASSWORD>',
help='Database user password')
@click.option('--db-name', 'db_name', default='rojak_database',
help='Database name')
@click.option('--max-news', default=100, help='Maximal news analyzed')
@click.option('--exclude-media', 'exclude_media_names', default='',
help='Exclude media, media name separated by comma')
@click.option('--only-media', 'only_media_names', default='',
help='Run analyzer only for this media')
def run(model, db_host, db_port, db_user, db_pass, db_name, max_news,
exclude_media_names, only_media_names):
"""Run Rojak to analyze data on the database"""
# Load the model
rojak.load_model(model)
# Open database connection
db = mysql.connect(host=db_host, port=db_port, user=db_user,
passwd=<PASSWORD>, db=db_name)
# Set autocommit to false
db.autocommit(False)
# Create new db cursor
select_cursor = db.cursor()
# Get candidate ID
sql_get_candidate_id = 'select id from candidate where full_name=%s;'
for candidate_name in candidate_data:
try:
select_cursor.execute(sql_get_candidate_id, [candidate_name])
res = select_cursor.fetchone()
candidate_id = int(res[0])
candidate_data[candidate_name]['id'] = candidate_id
except mysql.Error as err:
raise Exception(err)
# Get sentiment ID
sql_get_sentiment_id = 'select id from sentiment where name=%s;'
for sentiment_name in sentiment_data_id:
try:
select_cursor.execute(sql_get_sentiment_id, [sentiment_name])
res = select_cursor.fetchone()
sentiment_id = int(res[0])
sentiment_data_id[sentiment_name] = sentiment_id
except mysql.Error as err:
raise Exception(err)
# Exclude media if any
excluded_media = exclude_media_names.split(',')
excluded_media_ids = []
sql_get_media_id = 'select id from media where name=%s;'
for media_name in excluded_media:
if media_name == '': continue
# Get the id
try:
select_cursor.execute(sql_get_media_id, [media_name])
res = select_cursor.fetchone()
media_id = res[0]
except mysql.Error as err:
raise Exception(err)
# Concat the sql string
excluded_media_ids.append('media_id!=' + str(media_id) + ' ')
# Run only for the following media
only_media = only_media_names.split(',')
only_media_ids = []
for media_name in only_media:
if media_name == '': continue
# Get the id
try:
select_cursor.execute(sql_get_media_id, [media_name])
res = select_cursor.fetchone()
media_id = res[0]
except mysql.Error as err:
raise Exception(err)
# Concat the sql string
only_media_ids.append('media_id=' + str(media_id) + ' ')
# SQL query to get the news
sql_get_news_template = '''
select id, title, raw_content, media_id
from news
where is_analyzed=false
{}{}
'''
excluded_media_sql = ''
if len(excluded_media_ids) > 0:
excluded_media_sql = 'and '.join(excluded_media_ids)
excluded_media_sql = 'and ({})'.format(excluded_media_sql)
only_media_sql = ''
if len(only_media_ids) > 0:
only_media_sql = 'or '.join(only_media_ids)
only_media_sql = 'and ({})'.format(only_media_sql)
sql_get_news = sql_get_news_template.format(excluded_media_sql,
only_media_sql)
print '=== Start debug sql_get_news'
print 'sql_get_news:', sql_get_news
print '=== End debug sql_get_news'
select_cursor.execute(sql_get_news)
for i in xrange(max_news):
title = ''
raw_content = ''
result = select_cursor.fetchone()
if result:
news_id = result[0]
news_title = result[1]
news_raw_content = result[2]
news_media_id = result[3]
else:
print 'Cannot fetch news, skipping ...'
continue
raw_text = '{} {}'.format(news_title, news_raw_content)
# Get mention information
print '=== Start debug mention'
clean_raw_text = rojak_ovr_pair.clean_string(raw_text,
use_synonym=False)
normalized_words = clean_raw_text.lower().split(' ')
print 'raw_text:', raw_text
print 'normalized_words:', normalized_words
mentioned_candidates = []
for candidate_name in candidate_data:
alias = candidate_data[candidate_name]['alias']
is_mentioned = False
for alias_name in alias:
if alias_name in normalized_words:
is_mentioned = True
if is_mentioned:
mentioned_candidates.append(candidate_name)
print 'mentioned_candidates:', mentioned_candidates
print '=== End debug mention'
print '=== Start debug label'
pred = rojak.predict_proba(raw_text)
print 'label:', pred['labels']
print 'confident_score:', pred['confident_score']
print '=== End debug label'
# Insert to the database
insert_cursor = db.cursor()
sql_insert_mention = '''
insert into mention(`news_id`, `candidate_id`)
values (%s, %s);
'''
sql_insert_sentiment = '''
insert into news_sentiment(`news_id`, `sentiment_id`,
`confident_score_raw`, `confident_score_scaled`)
values (%s, %s, %s, %s);
'''
sql_update_is_analyzed = '''
update news set is_analyzed=true where id=%s;
'''
try:
# For mention data
for candidate_name in mentioned_candidates:
candidate_id = candidate_data[candidate_name]['id']
if candidate_id == -1:
raise Exception('candidate_id data not updated')
insert_cursor.execute(sql_insert_mention, [news_id,
candidate_id])
# For sentiment data
labels = pred['labels']
if not labels:
raise Exception('Cannot predict the labels')
for label in labels:
sentiment_id = sentiment_data_id[label]
if sentiment_id == -1:
raise Exception('candidate_id data not updated')
score = pred['confident_score'][label]
score_scaled = scale_confident_score(score)
insert_cursor.execute(sql_insert_sentiment, [news_id,
sentiment_id, score, score_scaled])
# Update is_analyzed status
insert_cursor.execute(sql_update_is_analyzed, [news_id])
db.commit()
insert_cursor.close()
except Exception as err:
db.rollback()
print 'Failed to analyze news:', news_id
print 'Error:', err
continue
select_cursor.close()
db.close()
cli.add_command(run)
if __name__ == '__main__':
cli()
|
InvenTree/part/migrations/0054_auto_20201109_1246.py | ArakniD/InvenTree | 656 | 12696199 | # Generated by Django 3.0.7 on 2020-11-09 12:46
from django.db import migrations, models
import part.settings
class Migration(migrations.Migration):
dependencies = [
('part', '0052_partrelated'),
]
operations = [
migrations.AlterField(
model_name='part',
name='active',
field=models.BooleanField(default=True, help_text='Is this part active?', verbose_name='Active'),
),
migrations.AlterField(
model_name='part',
name='component',
field=models.BooleanField(default=part.settings.part_component_default, help_text='Can this part be used to build other parts?', verbose_name='Component'),
),
migrations.AlterField(
model_name='part',
name='purchaseable',
field=models.BooleanField(default=part.settings.part_purchaseable_default, help_text='Can this part be purchased from external suppliers?', verbose_name='Purchaseable'),
),
migrations.AlterField(
model_name='part',
name='salable',
field=models.BooleanField(default=part.settings.part_salable_default, help_text='Can this part be sold to customers?', verbose_name='Salable'),
),
migrations.AlterField(
model_name='part',
name='trackable',
field=models.BooleanField(default=part.settings.part_trackable_default, help_text='Does this part have tracking for unique items?', verbose_name='Trackable'),
),
migrations.AlterField(
model_name='part',
name='virtual',
field=models.BooleanField(default=False, help_text='Is this a virtual part, such as a software product or license?', verbose_name='Virtual'),
),
]
|
notebook/pandas_index.py | vhn0912/python-snippets | 174 | 12696203 | import pandas as pd
df = pd.read_csv('data/src/sample_pandas_normal.csv', index_col=0)
print(df)
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
print(df['age'])
print(type(df['age']))
# name
# Alice 24
# Bob 42
# Charlie 18
# Dave 68
# Ellen 24
# Frank 30
# Name: age, dtype: int64
# <class 'pandas.core.series.Series'>
print(df.age)
print(type(df.age))
# name
# Alice 24
# Bob 42
# Charlie 18
# Dave 68
# Ellen 24
# Frank 30
# Name: age, dtype: int64
# <class 'pandas.core.series.Series'>
print(df[['age', 'point']])
print(type(df[['age', 'point']]))
# age point
# name
# Alice 24 64
# Bob 42 92
# Charlie 18 70
# Dave 68 70
# Ellen 24 88
# Frank 30 57
# <class 'pandas.core.frame.DataFrame'>
print(df[['age']])
print(type(df[['age']]))
# age
# name
# Alice 24
# Bob 42
# Charlie 18
# Dave 68
# Ellen 24
# Frank 30
# <class 'pandas.core.frame.DataFrame'>
print(df['age':'point'])
# Empty DataFrame
# Columns: [age, state, point]
# Index: []
print(df.loc[:, 'age':'point'])
print(type(df.loc[:, 'age':'point']))
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
# <class 'pandas.core.frame.DataFrame'>
print(df.iloc[:, [0, 2]])
print(type(df.iloc[:, [0, 2]]))
# age point
# name
# Alice 24 64
# Bob 42 92
# Charlie 18 70
# Dave 68 70
# Ellen 24 88
# Frank 30 57
# <class 'pandas.core.frame.DataFrame'>
print(df[1:4])
print(type(df[1:4]))
# age state point
# name
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# <class 'pandas.core.frame.DataFrame'>
print(df[:-3])
print(type(df[:-3]))
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# <class 'pandas.core.frame.DataFrame'>
print(df[::2])
print(type(df[::2]))
# age state point
# name
# Alice 24 NY 64
# Charlie 18 CA 70
# Ellen 24 CA 88
# <class 'pandas.core.frame.DataFrame'>
print(df[1::2])
print(type(df[1::2]))
# age state point
# name
# Bob 42 CA 92
# Dave 68 TX 70
# Frank 30 NY 57
# <class 'pandas.core.frame.DataFrame'>
# print(df[1])
# KeyError: 1
print(df[1:2])
print(type(df[1:2]))
# age state point
# name
# Bob 42 CA 92
# <class 'pandas.core.frame.DataFrame'>
print(df['Bob':'Ellen'])
print(type(df['Bob':'Ellen']))
# age state point
# name
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# <class 'pandas.core.frame.DataFrame'>
print(df.loc['Bob'])
print(type(df.loc['Bob']))
# age 42
# state CA
# point 92
# Name: Bob, dtype: object
# <class 'pandas.core.series.Series'>
print(df.loc[['Bob', 'Ellen']])
print(type(df.loc[['Bob', 'Ellen']]))
# age state point
# name
# Bob 42 CA 92
# Ellen 24 CA 88
# <class 'pandas.core.frame.DataFrame'>
print(df.iloc[[1, 4]])
print(type(df.iloc[[1, 4]]))
# age state point
# name
# Bob 42 CA 92
# Ellen 24 CA 88
# <class 'pandas.core.frame.DataFrame'>
print(df['age']['Alice'])
# 24
print(df['Bob':'Dave'][['age', 'point']])
# age point
# name
# Bob 42 92
# Charlie 18 70
# Dave 68 70
print(df.at['Alice', 'age'])
# 24
print(df.loc['Bob':'Dave', ['age', 'point']])
# age point
# name
# Bob 42 92
# Charlie 18 70
# Dave 68 70
|
flocker/testtools/_testhelpers.py | stackriot/flocker | 2,690 | 12696221 | <filename>flocker/testtools/_testhelpers.py
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Helpers for testing our test code.
Only put stuff here that is specific to testing code about unit testing.
"""
from hypothesis.strategies import sampled_from
import unittest
from testtools.matchers import (
AfterPreprocessing,
Equals,
MatchesStructure,
)
from ._base import AsyncTestCase, TestCase
base_test_cases = sampled_from([AsyncTestCase, TestCase])
def throw(exception):
"""
Raise 'exception'.
"""
raise exception
def only_skips(tests_run, reasons):
"""
Matches results that only had skips, and only for the given reasons.
"""
return has_results(
tests_run=Equals(tests_run),
skipped=AfterPreprocessing(
lambda xs: list(unicode(x[1]) for x in xs),
Equals(reasons)),
)
def has_results(errors=None, failures=None, skipped=None,
expected_failures=None, unexpected_successes=None,
tests_run=None):
"""
Return a matcher on test results.
By default, will match a result that has no tests run.
"""
if errors is None:
errors = Equals([])
if failures is None:
failures = Equals([])
if skipped is None:
skipped = Equals([])
if expected_failures is None:
expected_failures = Equals([])
if unexpected_successes is None:
unexpected_successes = Equals([])
if tests_run is None:
tests_run = Equals(0)
return MatchesStructure(
errors=errors,
failures=failures,
skipped=skipped,
expectedFailures=expected_failures,
unexpectedSuccesses=unexpected_successes,
testsRun=tests_run,
)
def run_test(case):
"""
Run a test and return its results.
"""
# XXX: How many times have I written something like this?
result = unittest.TestResult()
case.run(result)
return result
def make_test_case(base_case):
"""
Make a single test that subclasses ``base_case`` and passes.
:param type base_case: A ``TestCase`` class.
:rtype: ``base_case``
"""
class FooTests(base_case):
def test_something(self):
pass
return FooTests('test_something')
|
foreman/data_refinery_foreman/foreman/management/commands/check_missing_results.py | AlexsLemonade/refinebio | 106 | 12696226 | from django.core.management.base import BaseCommand
from data_refinery_common.models import Sample
from data_refinery_common.performant_pagination.pagination import PAGE_SIZE, PerformantPaginator
class Command(BaseCommand):
def handle(self, *args, **options):
samples = Sample.processed_objects.all()
paginator = PerformantPaginator(samples, PAGE_SIZE)
page = paginator.page()
counter = 0
while True:
for sample in page.object_list:
counter += 1
if sample.results.count() == 0:
print(sample.accession_code)
if not page.has_next():
break
else:
page = paginator.page(page.next_page_number())
if counter % 10000 == 0:
print("Checked another 10000k samples.")
|
netcal/metrics/__init__.py | by-liu/calibration-framework | 148 | 12696230 | # Copyright (C) 2019-2021 Ruhr West University of Applied Sciences, Bottrop, Germany
# AND Elektronische Fahrwerksysteme GmbH, Gaimersheim Germany
#
# This Source Code Form is subject to the terms of the Apache License 2.0
# If a copy of the APL2 was not distributed with this
# file, You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.txt.
"""
Methods for measuring miscalibration. The common methods are given with the
'Average Calibration Error (ACE)', 'Expected Calibration Error (ECE)' and 'Maximum Calibration Error (MCE)'.
Each methods bins the samples by their confidence and measures the accuracy in each bin. The ECE gives the
mean gap between confidence and observed accuracy in each bin weighted by the number of samples.
The MCE returns the highest observed deviation. The ACE is similar to the ECE but weights each bin equally.
Available classes
=================
.. autosummary::
:toctree: _autosummary_metric
:template: custom_class.rst
ACE
ECE
MCE
MMCE
PICP
"""
from .ACE import ACE
from .ECE import ECE
from .MCE import MCE
from .Miscalibration import _Miscalibration
from .PICP import PICP
from .MMCE import MMCE
|
lintreview/tools/yamllint.py | jsoref/lint-review | 271 | 12696232 | <reponame>jsoref/lint-review
import os
import lintreview.docker as docker
from lintreview.review import IssueComment
from lintreview.tools import Tool, process_quickfix, extract_version
class Yamllint(Tool):
name = 'yamllint'
def version(self):
output = docker.run('python2', ['yamllint', '--version'], self.base_path)
return extract_version(output)
def check_dependencies(self):
"""
See if python2 image is installed
"""
return docker.image_exists('python2')
def match_file(self, filename):
base = os.path.basename(filename)
name, ext = os.path.splitext(base)
return ext in ['.yml', '.yaml']
def process_files(self, files):
"""
Run code checks with yamllint.
Only a single process is made for all files
to save resources.
Configuration is not supported at this time
"""
command = ['yamllint', '--format=parsable']
# Add config file if its present
if self.options.get('config'):
command += [
'-c',
docker.apply_base(self.options['config'])
]
command += files
output = docker.run('python2', command, self.base_path)
if not output:
return False
if 'No such file' in output and 'Traceback' in output:
error = output.strip().split("\n")[-1]
msg = (u'`yamllint` failed with the following error:\n'
'```\n'
'{}\n'
'```\n')
return self.problems.add(IssueComment(msg.format(error)))
output = output.split("\n")
process_quickfix(self.problems, output, docker.strip_base)
|
design/dynamics/annotate.py | ParikhKadam/cycloid | 156 | 12696240 | import cv2
import numpy as np
import params
METERS_PER_ENCODER_TICK = params.WHEEL_TICK_LENGTH
def draw_steering(bgr, steering, servo, center=(320, 420)):
# make steering wheel, lower center
#servo = 128*(servo - 125)/70.0
servo = steering
# sdeg = steering # just 1:1 i guess?
sdeg = params.STEER_DIRECTION*servo # just 1:1 i guess?
srad = sdeg * np.pi / 180.0
S, C = 16*30*np.sin(srad), 16*30*np.cos(srad)
cv2.circle(bgr, center, 30, (255, 255, 255), 1, cv2.LINE_AA)
scenter = (center[0]*16, center[1]*16)
cv2.line(bgr, (int(scenter[0] - C), int(scenter[1] + S)),
(int(scenter[0] + C), int(scenter[1] - S)),
(255, 255, 255), 1, cv2.LINE_AA, 4)
cv2.ellipse(bgr, center, (30, 30), 0, -90, -90 + steering,
(255, 180, 180), 5, cv2.LINE_AA)
cv2.ellipse(bgr, center, (30, 30), 0, -90, -90 + servo,
(0, 180, 255), 2, cv2.LINE_AA)
last_ts = None
last_wheels = None
def draw_speed(bgr, tstamp, wheels, periods, center=(40, 420), radius=30):
# draw a little spedometer in the lower left
# just draw the needle for each period now
global last_ts, last_wheels
av = np.mean(periods[:params.NUM_ENCODERS])
if av != 0:
av = METERS_PER_ENCODER_TICK * 1e6 / av
# cv2.putText(bgr, "%0.1f %0.1f %0.1f %0.1f m/s" % tuple(v), (10, 470),
# cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1,
# cv2.LINE_AA)
if last_ts is None:
last_ts = tstamp
last_wheels = wheels
return
dw = wheels - last_wheels
if np.all(dw == 0):
last_ts = tstamp
last_wheels = wheels
return
# vv = METERS_PER_ENCODER_TICK * np.float32(dw) / (tstamp - last_ts)
# av = 0.5 * np.mean(v[dw != 0] + vv[dw != 0])
mph = 2.23694 * av
# draw ticks
for i in range(13):
phi = (i - 6) * 0.4
C, S = radius * np.cos(phi), radius * np.sin(phi)
cv2.line(bgr, (int(center[0] + S), int(center[1] - C)),
(int(center[0] + 0.8*S), int(center[1] - 0.8*C)),
(255, 255, 255), 1, cv2.LINE_AA)
phi = (mph - 6) * 0.4
C, S = radius * np.cos(phi), radius * np.sin(phi)
cv2.line(bgr, (int(center[0] + S), int(center[1] - C)),
(int(center[0]), int(center[1])),
(180, 255, 180), 2, cv2.LINE_AA)
cv2.putText(bgr, "%0.1f mph" % (mph), (center[0] - 10, center[1] + 40),
cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1,
cv2.LINE_AA)
last_ts = tstamp
last_wheels = wheels
def draw_throttle(img, throttle, center=(320, 470)):
cv2.line(img, center, (center[0] + throttle, center[1]),
throttle > 0 and (0, 255, 0) or (0, 95, 255), 5)
def draw_accelerometer(bgr, accel, gyro, center=(470, 470)):
cv2.circle(bgr, center, 30, (255, 255, 255), 1, cv2.LINE_AA)
cv2.ellipse(bgr, center, (30, 30), 0, -90, -90 - 180*gyro[2] / np.pi,
(100, 255, 180), 3, cv2.LINE_AA)
cv2.line(bgr, center, (int(center[0] - accel[1]*30),
int(center[1] + accel[0]*30)),
(100, 255, 100), 2, cv2.LINE_AA)
|
litex/soc/cores/bitbang.py | osterwood/litex | 1,501 | 12696243 | <gh_stars>1000+
#
# This file is part of LiteX.
#
# Copyright (c) 2019 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
from migen import *
from migen.fhdl.specials import Tristate
from litex.soc.interconnect.csr import *
# I2C Master Bit-Banging ---------------------------------------------------------------------------
class I2CMaster(Module, AutoCSR):
"""I2C Master Bit-Banging
Provides the minimal hardware to do software I2C Master bit banging.
On the same write CSRStorage (_w), software can control:
- SCL (I2C_SCL).
- SDA direction and value (I2C_OE, I2C_W).
Software get back SDA value with the read CSRStatus (_r).
"""
pads_layout = [("scl", 1), ("sda", 1)]
def __init__(self, pads=None):
if pads is None:
pads = Record(self.pads_layout)
self.pads = pads
self._w = CSRStorage(fields=[
CSRField("scl", size=1, offset=0),
CSRField("oe", size=1, offset=1),
CSRField("sda", size=1, offset=2)],
name="w")
self._r = CSRStatus(fields=[
CSRField("sda", size=1, offset=0)],
name="r")
self.connect(pads)
def connect(self, pads):
# SCL
self.specials += Tristate(pads.scl,
o = 0, # I2C uses Pull-ups, only drive low.
oe = ~self._w.fields.scl # Drive when scl is low.
)
# SDA
self.specials += Tristate(pads.sda,
o = 0, # I2C uses Pull-ups, only drive low.
oe = self._w.fields.oe & ~self._w.fields.sda, # Drive when oe and sda is low.
i = self._r.fields.sda
)
class I2CMasterSim(I2CMaster):
"""I2C Master Bit-Banging for Verilator simulation
Uses separate pads for SDA IN/OUT as Verilator does not support tristate pins well.
"""
pads_layout = [("scl", 1), ("sda_in", 1), ("sda_out", 1)]
def connect(self, pads):
_sda_w = Signal()
_sda_oe = Signal()
_sda_r = Signal()
_sda_in = Signal()
self.comb += [
pads.scl.eq(self._w.fields.scl),
_sda_oe.eq( self._w.fields.oe),
_sda_w.eq( self._w.fields.sda),
If(_sda_oe,
pads.sda_out.eq(_sda_w),
self._r.fields.sda.eq(_sda_w),
).Else(
pads.sda_out.eq(1),
self._r.fields.sda.eq(pads.sda_in),
)
]
# SPI Master Bit-Banging ---------------------------------------------------------------------------
class SPIMaster(Module, AutoCSR):
"""3/4-wire SPI Master Bit-Banging
Provides the minimal hardware to do software 3/4-wire SPI Master bit banging.
On the same write CSRStorage (_w), software can control CLK (SPI_CLK), MOSI (SPI_MOSI), MOSI
direction (SPI_OE) in the case 3-wire SPI and up to 4 Chip Selects (SPI_CS). Software get back
MISO (SPI_MISO) with the read CSRStatus (_r).
"""
pads_layout = [("clk", 1), ("cs_n", 4), ("mosi", 1), ("miso", 1)]
def __init__(self, pads=None):
if pads is None:
pads = Record(self.pads_layout)
self.pads = pads
assert len(pads.cs_n) <= 4
self._w = CSRStorage(fields=[
CSRField("clk", size=1, offset=0),
CSRField("mosi", size=1, offset=1),
CSRField("oe", size=1, offset=2),
CSRField("cs", size=1, offset=4)],
name="w")
self._r = CSRStatus(fields=[
CSRField("miso", size=1, offset=0),
CSRField("mosi", size=1, offset=1)],
name="r")
# # #
_mosi_w = Signal()
_mosi_oe = Signal()
_mosi_r = Signal()
_cs = Signal(4)
self.comb += [
pads.clk.eq( self._w.fields.clk),
_mosi_w.eq( self._w.fields.mosi),
_mosi_oe.eq( self._w.fields.oe),
pads.cs_n.eq(~self._w.fields.cs),
self._r.fields.mosi.eq(_mosi_r),
]
if hasattr(pads, "miso"):
self.comb += self._r.fields.miso.eq(pads.miso)
self.specials += Tristate(pads.mosi, _mosi_w, _mosi_oe, _mosi_r)
|
face_detection/detection/sfd/__init__.py | zhaniya-meruki/ZhaniyaKoishybayevaMasterThesis | 5,863 | 12696263 | from .sfd_detector import SFDDetector as FaceDetector |
3_detector/grad_cam.py | meliketoy/gradcam.pytorch | 125 | 12696268 | #!/usr/bin/env python
# coding: utf-8
#
# Author: <NAME>
# URL: http://kazuto1011.github.io
# Created: 2017-05-26
from __future__ import print_function
from collections import OrderedDict
import cv2
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
class PropagationBase(object):
def __init__(self, model, cuda=False):
self.model = model
self.model.eval()
if cuda:
self.model.cuda()
self.cuda = cuda
self.all_fmaps = OrderedDict()
self.all_grads = OrderedDict()
self._set_hook_func()
self.image = None
def _set_hook_func(self):
raise NotImplementedError
def _encode_one_hot(self, idx):
one_hot = torch.FloatTensor(1, self.preds.size()[-1]).zero_()
one_hot[0][idx] = 1.0
return one_hot.cuda() if self.cuda else one_hot
def forward(self, image):
self.image = image
self.preds = self.model.forward(self.image)
self.probs = F.softmax(self.preds, dim=0)[0]
self.prob, self.idx = self.probs.data.sort(0, True)
return self.prob, self.idx
def backward(self, idx):
self.model.zero_grad()
one_hot = self._encode_one_hot(idx)
self.preds.backward(gradient=one_hot, retain_graph=True)
class GradCAM(PropagationBase):
def _set_hook_func(self):
def func_f(module, input, output):
self.all_fmaps[id(module)] = output.data.cpu()
def func_b(module, grad_in, grad_out):
self.all_grads[id(module)] = grad_out[0].cpu()
for module in self.model.named_modules():
module[1].register_forward_hook(func_f)
module[1].register_backward_hook(func_b)
def _find(self, outputs, target_layer):
for key, value in outputs.items():
for module in self.model.named_modules():
if id(module[1]) == key:
if module[0] == target_layer:
return value
raise ValueError('Invalid layer name: {}'.format(target_layer))
def _normalize(self, grads):
l2_norm = torch.sqrt(torch.mean(torch.pow(grads, 2))) + 1e-5
return grads / l2_norm.data[0]
def _compute_grad_weights(self, grads):
grads = self._normalize(grads)
self.map_size = grads.size()[2:]
return nn.AvgPool2d(self.map_size)(grads)
def generate(self, target_layer):
fmaps = self._find(self.all_fmaps, target_layer)
grads = self._find(self.all_grads, target_layer)
weights = self._compute_grad_weights(grads)
gcam = torch.FloatTensor(self.map_size).zero_()
for fmap, weight in zip(fmaps[0], weights[0]):
res = fmap * weight.data.expand_as(fmap)
gcam += fmap * weight.data.expand_as(fmap)
gcam = F.relu(Variable(gcam))
gcam = gcam.data.cpu().numpy()
gcam -= gcam.min()
if(gcam.max() != 0):
gcam /= gcam.max()
gcam = cv2.resize(gcam, (self.image.size(3), self.image.size(2)))
return gcam
def save(self, filename, gcam, raw_image):
gcam = cv2.applyColorMap(np.uint8(gcam * 255.0), cv2.COLORMAP_JET)
gcam = gcam.astype(np.float) + raw_image.astype(np.float)
if(gcam.max() != 0):
gcam = gcam / gcam.max() * 255.0
cv2.imwrite(filename, np.uint8(gcam))
class BackPropagation(PropagationBase):
def _find(self, outputs, target_layer):
for key, value in outputs.items():
for module in self.model.named_modules():
if id(module[1]) == key:
if module[0] == target_layer:
return value
raise ValueError('Invalid layer name: {}'.format(target_layer))
def _set_hook_func(self):
def func_b(module, grad_in, grad_out):
self.all_grads[id(module)] = grad_in[0].cpu()
for module in self.model.named_modules():
module[1].register_backward_hook(func_b)
def generate(self, target_layer):
grads = self._find(self.all_grads, target_layer)
gradients_as_arr = grads.data[0].numpy()[0]
return gradients_as_arr
def save(self, filename, data):
abs_max = np.maximum(-1 * data.min(), data.max())
data = data / abs_max * 127.0 + 127.0
cv2.imwrite(filename, np.uint8(data))
class GuidedBackPropagation(BackPropagation):
def _set_hook_func(self):
def func_b(module, grad_in, grad_out):
self.all_grads[id(module)] = grad_in[0].cpu()
# Cut off negative gradients
if isinstance(module, nn.ReLU):
return (torch.clamp(grad_in[0], min=0.0),)
for module in self.model.named_modules():
module[1].register_backward_hook(func_b)
|
chempy/properties/__init__.py | bertiewooster/chempy | 340 | 12696304 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
This package implements various parameterisations of properties from the
litterature with relevance in chemistry.
"""
|
recipes/Python/438119_shell__Easily_call_executables/recipe-438119.py | tdiprima/code | 2,023 | 12696308 | <filename>recipes/Python/438119_shell__Easily_call_executables/recipe-438119.py
shell.py:
import sys
class Shell:
def __init__(self):
self.prefix = '/bin'
self.env = {}
self.stdout = None
self.stderr = None
self.wait = False
def __getattr__(self, command):
def __call(*args, **keywords):
if command == 'prefix':
return self.prefix
if command == 'stdout':
return self.stdout
if command == 'stderr':
return self.stderr
if command.startswith('__'):
return None
if self.prefix:
exe = '%s/%s' % (self.prefix, command)
else:
exe = command
import os, subprocess
if os.path.exists(exe):
exeargs = [exe]
if keywords:
for i in args.iteritems(): exeargs.extend(i)
if args:
exeargs.extend(args)
exeargs = [str(i) for i in exeargs]
cwd = os.path.abspath(os.curdir)
p = subprocess.Popen(exeargs, bufsize=1, cwd=cwd, env=self.env, stdout=subprocess.PIPE, close_fds=False, universal_newlines=True)
if self.wait:
ret = p.wait()
else:
ret = p.returncode
result = None
if p.stdout:
self.stdout = p.stdout.readlines()
if p.stderr:
self.stderr = p.stderr.readlines()
return ret
else:
raise Exception('No executable found at %s' % exe)
return __call
sys.modules[__name__] = Shell()
|
sdk/machinelearning/azure-mgmt-machinelearningcompute/azure/mgmt/machinelearningcompute/models/_models_py3.py | rsdoherty/azure-sdk-for-python | 2,728 | 12696350 | <gh_stars>1000+
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._machine_learning_compute_management_client_enums import *
class AcsClusterProperties(msrest.serialization.Model):
"""Information about the container service backing the cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar cluster_fqdn: The FQDN of the cluster.
:vartype cluster_fqdn: str
:param orchestrator_type: Required. Type of orchestrator. It cannot be changed once the cluster
is created. Possible values include: "Kubernetes", "None".
:type orchestrator_type: str or ~azure.mgmt.machinelearningcompute.models.OrchestratorType
:param orchestrator_properties: Orchestrator specific properties.
:type orchestrator_properties:
~azure.mgmt.machinelearningcompute.models.KubernetesClusterProperties
:param system_services: The system services deployed to the cluster.
:type system_services: list[~azure.mgmt.machinelearningcompute.models.SystemService]
:param master_count: The number of master nodes in the container service.
:type master_count: int
:param agent_count: The number of agent nodes in the Container Service. This can be changed to
scale the cluster.
:type agent_count: int
:param agent_vm_size: The Azure VM size of the agent VM nodes. This cannot be changed once the
cluster is created. This list is non exhaustive; refer to
https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes for the possible VM
sizes. Possible values include: "Standard_A0", "Standard_A1", "Standard_A2", "Standard_A3",
"Standard_A4", "Standard_A5", "Standard_A6", "Standard_A7", "Standard_A8", "Standard_A9",
"Standard_A10", "Standard_A11", "Standard_D1", "Standard_D2", "Standard_D3", "Standard_D4",
"Standard_D11", "Standard_D12", "Standard_D13", "Standard_D14", "Standard_D1_v2",
"Standard_D2_v2", "Standard_D3_v2", "Standard_D4_v2", "Standard_D5_v2", "Standard_D11_v2",
"Standard_D12_v2", "Standard_D13_v2", "Standard_D14_v2", "Standard_G1", "Standard_G2",
"Standard_G3", "Standard_G4", "Standard_G5", "Standard_DS1", "Standard_DS2", "Standard_DS3",
"Standard_DS4", "Standard_DS11", "Standard_DS12", "Standard_DS13", "Standard_DS14",
"Standard_GS1", "Standard_GS2", "Standard_GS3", "Standard_GS4", "Standard_GS5". Default value:
"Standard_D3_v2".
:type agent_vm_size: str or ~azure.mgmt.machinelearningcompute.models.AgentVMSizeTypes
"""
_validation = {
'cluster_fqdn': {'readonly': True},
'orchestrator_type': {'required': True},
'master_count': {'maximum': 5, 'minimum': 1},
'agent_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'cluster_fqdn': {'key': 'clusterFqdn', 'type': 'str'},
'orchestrator_type': {'key': 'orchestratorType', 'type': 'str'},
'orchestrator_properties': {'key': 'orchestratorProperties', 'type': 'KubernetesClusterProperties'},
'system_services': {'key': 'systemServices', 'type': '[SystemService]'},
'master_count': {'key': 'masterCount', 'type': 'int'},
'agent_count': {'key': 'agentCount', 'type': 'int'},
'agent_vm_size': {'key': 'agentVmSize', 'type': 'str'},
}
def __init__(
self,
*,
orchestrator_type: Union[str, "OrchestratorType"],
orchestrator_properties: Optional["KubernetesClusterProperties"] = None,
system_services: Optional[List["SystemService"]] = None,
master_count: Optional[int] = 1,
agent_count: Optional[int] = 2,
agent_vm_size: Optional[Union[str, "AgentVMSizeTypes"]] = "Standard_D3_v2",
**kwargs
):
super(AcsClusterProperties, self).__init__(**kwargs)
self.cluster_fqdn = None
self.orchestrator_type = orchestrator_type
self.orchestrator_properties = orchestrator_properties
self.system_services = system_services
self.master_count = master_count
self.agent_count = agent_count
self.agent_vm_size = agent_vm_size
class AppInsightsCredentials(msrest.serialization.Model):
"""AppInsights credentials.
:param app_id: The AppInsights application ID.
:type app_id: str
:param instrumentation_key: The AppInsights instrumentation key. This is not returned in
response of GET/PUT on the resource. To see this please call listKeys API.
:type instrumentation_key: str
"""
_attribute_map = {
'app_id': {'key': 'appId', 'type': 'str'},
'instrumentation_key': {'key': 'instrumentationKey', 'type': 'str'},
}
def __init__(
self,
*,
app_id: Optional[str] = None,
instrumentation_key: Optional[str] = None,
**kwargs
):
super(AppInsightsCredentials, self).__init__(**kwargs)
self.app_id = app_id
self.instrumentation_key = instrumentation_key
class AppInsightsProperties(msrest.serialization.Model):
"""Properties of App Insights.
:param resource_id: ARM resource ID of the App Insights.
:type resource_id: str
"""
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
}
def __init__(
self,
*,
resource_id: Optional[str] = None,
**kwargs
):
super(AppInsightsProperties, self).__init__(**kwargs)
self.resource_id = resource_id
class AutoScaleConfiguration(msrest.serialization.Model):
"""AutoScale configuration properties.
:param status: If auto-scale is enabled for all services. Each service can turn it off
individually. Possible values include: "Enabled", "Disabled".
:type status: str or ~azure.mgmt.machinelearningcompute.models.Status
:param min_replicas: The minimum number of replicas for each service.
:type min_replicas: int
:param max_replicas: The maximum number of replicas for each service.
:type max_replicas: int
:param target_utilization: The target utilization.
:type target_utilization: float
:param refresh_period_in_seconds: Refresh period in seconds.
:type refresh_period_in_seconds: int
"""
_validation = {
'min_replicas': {'minimum': 1},
'max_replicas': {'minimum': 1},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'min_replicas': {'key': 'minReplicas', 'type': 'int'},
'max_replicas': {'key': 'maxReplicas', 'type': 'int'},
'target_utilization': {'key': 'targetUtilization', 'type': 'float'},
'refresh_period_in_seconds': {'key': 'refreshPeriodInSeconds', 'type': 'int'},
}
def __init__(
self,
*,
status: Optional[Union[str, "Status"]] = None,
min_replicas: Optional[int] = 1,
max_replicas: Optional[int] = 100,
target_utilization: Optional[float] = None,
refresh_period_in_seconds: Optional[int] = None,
**kwargs
):
super(AutoScaleConfiguration, self).__init__(**kwargs)
self.status = status
self.min_replicas = min_replicas
self.max_replicas = max_replicas
self.target_utilization = target_utilization
self.refresh_period_in_seconds = refresh_period_in_seconds
class AvailableOperations(msrest.serialization.Model):
"""Available operation list.
:param value: An array of available operations.
:type value: list[~azure.mgmt.machinelearningcompute.models.ResourceOperation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceOperation]'},
}
def __init__(
self,
*,
value: Optional[List["ResourceOperation"]] = None,
**kwargs
):
super(AvailableOperations, self).__init__(**kwargs)
self.value = value
class CheckSystemServicesUpdatesAvailableResponse(msrest.serialization.Model):
"""Information about updates available for system services in a cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar updates_available: Yes if updates are available for the system services, No if not.
Possible values include: "Yes", "No".
:vartype updates_available: str or ~azure.mgmt.machinelearningcompute.models.UpdatesAvailable
"""
_validation = {
'updates_available': {'readonly': True},
}
_attribute_map = {
'updates_available': {'key': 'updatesAvailable', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckSystemServicesUpdatesAvailableResponse, self).__init__(**kwargs)
self.updates_available = None
class ContainerRegistryCredentials(msrest.serialization.Model):
"""Information about the Azure Container Registry which contains the images deployed to the cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar login_server: The ACR login server name. User name is the first part of the FQDN.
:vartype login_server: str
:ivar password: The ACR primary password.
:vartype password: str
:ivar password2: The ACR secondary password.
:vartype password2: str
:ivar username: The ACR login username.
:vartype username: str
"""
_validation = {
'login_server': {'readonly': True},
'password': {'readonly': True},
'password2': {'readonly': True},
'username': {'readonly': True},
}
_attribute_map = {
'login_server': {'key': 'loginServer', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'password2': {'key': 'password2', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContainerRegistryCredentials, self).__init__(**kwargs)
self.login_server = None
self.password = None
self.password2 = None
self.username = None
class ContainerRegistryProperties(msrest.serialization.Model):
"""Properties of Azure Container Registry.
:param resource_id: ARM resource ID of the Azure Container Registry used to store Docker images
for web services in the cluster. If not provided one will be created. This cannot be changed
once the cluster is created.
:type resource_id: str
"""
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
}
def __init__(
self,
*,
resource_id: Optional[str] = None,
**kwargs
):
super(ContainerRegistryProperties, self).__init__(**kwargs)
self.resource_id = resource_id
class ContainerServiceCredentials(msrest.serialization.Model):
"""Information about the Azure Container Registry which contains the images deployed to the cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar acs_kube_config: The ACS kube config file.
:vartype acs_kube_config: str
:ivar service_principal_configuration: Service principal configuration used by Kubernetes.
:vartype service_principal_configuration:
~azure.mgmt.machinelearningcompute.models.ServicePrincipalProperties
:ivar image_pull_secret_name: The ACR image pull secret name which was created in Kubernetes.
:vartype image_pull_secret_name: str
"""
_validation = {
'acs_kube_config': {'readonly': True},
'service_principal_configuration': {'readonly': True},
'image_pull_secret_name': {'readonly': True},
}
_attribute_map = {
'acs_kube_config': {'key': 'acsKubeConfig', 'type': 'str'},
'service_principal_configuration': {'key': 'servicePrincipalConfiguration', 'type': 'ServicePrincipalProperties'},
'image_pull_secret_name': {'key': 'imagePullSecretName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContainerServiceCredentials, self).__init__(**kwargs)
self.acs_kube_config = None
self.service_principal_configuration = None
self.image_pull_secret_name = None
class ErrorDetail(msrest.serialization.Model):
"""Error detail information.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code.
:type code: str
:param message: Required. Error message.
:type message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: str,
message: str,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = code
self.message = message
class ErrorResponse(msrest.serialization.Model):
"""Error response information.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code.
:type code: str
:param message: Required. Error message.
:type message: str
:param details: An array of error detail objects.
:type details: list[~azure.mgmt.machinelearningcompute.models.ErrorDetail]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
}
def __init__(
self,
*,
code: str,
message: str,
details: Optional[List["ErrorDetail"]] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = code
self.message = message
self.details = details
class ErrorResponseWrapper(msrest.serialization.Model):
"""Wrapper for error response to follow ARM guidelines.
:param error: The error response.
:type error: ~azure.mgmt.machinelearningcompute.models.ErrorResponse
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorResponse'},
}
def __init__(
self,
*,
error: Optional["ErrorResponse"] = None,
**kwargs
):
super(ErrorResponseWrapper, self).__init__(**kwargs)
self.error = error
class GlobalServiceConfiguration(msrest.serialization.Model):
"""Global configuration for services in the cluster.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, str]
:param etag: The configuration ETag for updates.
:type etag: str
:param ssl: The SSL configuration properties.
:type ssl: ~azure.mgmt.machinelearningcompute.models.SslConfiguration
:param service_auth: Optional global authorization keys for all user services deployed in
cluster. These are used if the service does not have auth keys.
:type service_auth: ~azure.mgmt.machinelearningcompute.models.ServiceAuthConfiguration
:param auto_scale: The auto-scale configuration.
:type auto_scale: ~azure.mgmt.machinelearningcompute.models.AutoScaleConfiguration
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'ssl': {'key': 'ssl', 'type': 'SslConfiguration'},
'service_auth': {'key': 'serviceAuth', 'type': 'ServiceAuthConfiguration'},
'auto_scale': {'key': 'autoScale', 'type': 'AutoScaleConfiguration'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, str]] = None,
etag: Optional[str] = None,
ssl: Optional["SslConfiguration"] = None,
service_auth: Optional["ServiceAuthConfiguration"] = None,
auto_scale: Optional["AutoScaleConfiguration"] = None,
**kwargs
):
super(GlobalServiceConfiguration, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.etag = etag
self.ssl = ssl
self.service_auth = service_auth
self.auto_scale = auto_scale
class KubernetesClusterProperties(msrest.serialization.Model):
"""Kubernetes cluster specific properties.
:param service_principal: The Azure Service Principal used by Kubernetes.
:type service_principal: ~azure.mgmt.machinelearningcompute.models.ServicePrincipalProperties
"""
_attribute_map = {
'service_principal': {'key': 'servicePrincipal', 'type': 'ServicePrincipalProperties'},
}
def __init__(
self,
*,
service_principal: Optional["ServicePrincipalProperties"] = None,
**kwargs
):
super(KubernetesClusterProperties, self).__init__(**kwargs)
self.service_principal = service_principal
class Resource(msrest.serialization.Model):
"""Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param location: Required. Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.location = location
self.type = None
self.tags = tags
class OperationalizationCluster(Resource):
"""Instance of an Azure ML Operationalization Cluster resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param location: Required. Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
:param description: The description of the cluster.
:type description: str
:ivar created_on: The date and time when the cluster was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the cluster was last modified.
:vartype modified_on: ~datetime.datetime
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or ~azure.mgmt.machinelearningcompute.models.OperationStatus
:ivar provisioning_errors: List of provisioning errors reported by the resource provider.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningcompute.models.ErrorResponseWrapper]
:param cluster_type: The cluster type. Possible values include: "ACS", "Local".
:type cluster_type: str or ~azure.mgmt.machinelearningcompute.models.ClusterType
:param storage_account: Storage Account properties.
:type storage_account: ~azure.mgmt.machinelearningcompute.models.StorageAccountProperties
:param container_registry: Container Registry properties.
:type container_registry: ~azure.mgmt.machinelearningcompute.models.ContainerRegistryProperties
:param container_service: Parameters for the Azure Container Service cluster.
:type container_service: ~azure.mgmt.machinelearningcompute.models.AcsClusterProperties
:param app_insights: AppInsights configuration.
:type app_insights: ~azure.mgmt.machinelearningcompute.models.AppInsightsProperties
:param global_service_configuration: Contains global configuration for the web services in the
cluster.
:type global_service_configuration:
~azure.mgmt.machinelearningcompute.models.GlobalServiceConfiguration
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_state': {'readonly': True},
'provisioning_errors': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'description': {'key': 'properties.description', 'type': 'str'},
'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'properties.modifiedOn', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'provisioning_errors': {'key': 'properties.provisioningErrors', 'type': '[ErrorResponseWrapper]'},
'cluster_type': {'key': 'properties.clusterType', 'type': 'str'},
'storage_account': {'key': 'properties.storageAccount', 'type': 'StorageAccountProperties'},
'container_registry': {'key': 'properties.containerRegistry', 'type': 'ContainerRegistryProperties'},
'container_service': {'key': 'properties.containerService', 'type': 'AcsClusterProperties'},
'app_insights': {'key': 'properties.appInsights', 'type': 'AppInsightsProperties'},
'global_service_configuration': {'key': 'properties.globalServiceConfiguration', 'type': 'GlobalServiceConfiguration'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
cluster_type: Optional[Union[str, "ClusterType"]] = None,
storage_account: Optional["StorageAccountProperties"] = None,
container_registry: Optional["ContainerRegistryProperties"] = None,
container_service: Optional["AcsClusterProperties"] = None,
app_insights: Optional["AppInsightsProperties"] = None,
global_service_configuration: Optional["GlobalServiceConfiguration"] = None,
**kwargs
):
super(OperationalizationCluster, self).__init__(location=location, tags=tags, **kwargs)
self.description = description
self.created_on = None
self.modified_on = None
self.provisioning_state = None
self.provisioning_errors = None
self.cluster_type = cluster_type
self.storage_account = storage_account
self.container_registry = container_registry
self.container_service = container_service
self.app_insights = app_insights
self.global_service_configuration = global_service_configuration
class OperationalizationClusterCredentials(msrest.serialization.Model):
"""Credentials to resources in the cluster.
:param storage_account: Credentials for the Storage Account.
:type storage_account: ~azure.mgmt.machinelearningcompute.models.StorageAccountCredentials
:param container_registry: Credentials for Azure Container Registry.
:type container_registry:
~azure.mgmt.machinelearningcompute.models.ContainerRegistryCredentials
:param container_service: Credentials for Azure Container Service.
:type container_service: ~azure.mgmt.machinelearningcompute.models.ContainerServiceCredentials
:param app_insights: Credentials for Azure AppInsights.
:type app_insights: ~azure.mgmt.machinelearningcompute.models.AppInsightsCredentials
:param service_auth_configuration: Global authorization keys for all user services deployed in
cluster. These are used if the service does not have auth keys.
:type service_auth_configuration:
~azure.mgmt.machinelearningcompute.models.ServiceAuthConfiguration
:param ssl_configuration: The SSL configuration for the services.
:type ssl_configuration: ~azure.mgmt.machinelearningcompute.models.SslConfiguration
"""
_attribute_map = {
'storage_account': {'key': 'storageAccount', 'type': 'StorageAccountCredentials'},
'container_registry': {'key': 'containerRegistry', 'type': 'ContainerRegistryCredentials'},
'container_service': {'key': 'containerService', 'type': 'ContainerServiceCredentials'},
'app_insights': {'key': 'appInsights', 'type': 'AppInsightsCredentials'},
'service_auth_configuration': {'key': 'serviceAuthConfiguration', 'type': 'ServiceAuthConfiguration'},
'ssl_configuration': {'key': 'sslConfiguration', 'type': 'SslConfiguration'},
}
def __init__(
self,
*,
storage_account: Optional["StorageAccountCredentials"] = None,
container_registry: Optional["ContainerRegistryCredentials"] = None,
container_service: Optional["ContainerServiceCredentials"] = None,
app_insights: Optional["AppInsightsCredentials"] = None,
service_auth_configuration: Optional["ServiceAuthConfiguration"] = None,
ssl_configuration: Optional["SslConfiguration"] = None,
**kwargs
):
super(OperationalizationClusterCredentials, self).__init__(**kwargs)
self.storage_account = storage_account
self.container_registry = container_registry
self.container_service = container_service
self.app_insights = app_insights
self.service_auth_configuration = service_auth_configuration
self.ssl_configuration = ssl_configuration
class OperationalizationClusterUpdateParameters(msrest.serialization.Model):
"""Parameters for PATCH operation on an operationalization cluster.
:param tags: A set of tags. Gets or sets a list of key value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater in
length than 128 characters and a value no greater in length than 256 characters.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(OperationalizationClusterUpdateParameters, self).__init__(**kwargs)
self.tags = tags
class PaginatedOperationalizationClustersList(msrest.serialization.Model):
"""Paginated list of operationalization clusters.
:param value: An array of cluster objects.
:type value: list[~azure.mgmt.machinelearningcompute.models.OperationalizationCluster]
:param next_link: A continuation link (absolute URI) to the next page of results in the list.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[OperationalizationCluster]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["OperationalizationCluster"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(PaginatedOperationalizationClustersList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ResourceOperation(msrest.serialization.Model):
"""Resource operation.
:param name: Name of this operation.
:type name: str
:param display: Display of the operation.
:type display: ~azure.mgmt.machinelearningcompute.models.ResourceOperationDisplay
:param origin: The operation origin.
:type origin: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'ResourceOperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["ResourceOperationDisplay"] = None,
origin: Optional[str] = None,
**kwargs
):
super(ResourceOperation, self).__init__(**kwargs)
self.name = name
self.display = display
self.origin = origin
class ResourceOperationDisplay(msrest.serialization.Model):
"""Display of the operation.
:param provider: The resource provider name.
:type provider: str
:param resource: The resource name.
:type resource: str
:param operation: The operation.
:type operation: str
:param description: The description of the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(ResourceOperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class ServiceAuthConfiguration(msrest.serialization.Model):
"""Global service auth configuration properties. These are the data-plane authorization keys and are used if a service doesn't define it's own.
All required parameters must be populated in order to send to Azure.
:param primary_auth_key_hash: Required. The primary auth key hash. This is not returned in
response of GET/PUT on the resource.. To see this please call listKeys API.
:type primary_auth_key_hash: str
:param secondary_auth_key_hash: Required. The secondary auth key hash. This is not returned in
response of GET/PUT on the resource.. To see this please call listKeys API.
:type secondary_auth_key_hash: str
"""
_validation = {
'primary_auth_key_hash': {'required': True},
'secondary_auth_key_hash': {'required': True},
}
_attribute_map = {
'primary_auth_key_hash': {'key': 'primaryAuthKeyHash', 'type': 'str'},
'secondary_auth_key_hash': {'key': 'secondaryAuthKeyHash', 'type': 'str'},
}
def __init__(
self,
*,
primary_auth_key_hash: str,
secondary_auth_key_hash: str,
**kwargs
):
super(ServiceAuthConfiguration, self).__init__(**kwargs)
self.primary_auth_key_hash = primary_auth_key_hash
self.secondary_auth_key_hash = secondary_auth_key_hash
class ServicePrincipalProperties(msrest.serialization.Model):
"""The Azure service principal used by Kubernetes for configuring load balancers.
All required parameters must be populated in order to send to Azure.
:param client_id: Required. The service principal client ID.
:type client_id: str
:param secret: Required. The service principal secret. This is not returned in response of
GET/PUT on the resource. To see this please call listKeys.
:type secret: str
"""
_validation = {
'client_id': {'required': True},
'secret': {'required': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'secret': {'key': 'secret', 'type': 'str'},
}
def __init__(
self,
*,
client_id: str,
secret: str,
**kwargs
):
super(ServicePrincipalProperties, self).__init__(**kwargs)
self.client_id = client_id
self.secret = secret
class SslConfiguration(msrest.serialization.Model):
"""SSL configuration. If configured data-plane calls to user services will be exposed over SSL only.
:param status: SSL status. Allowed values are Enabled and Disabled. Possible values include:
"Enabled", "Disabled".
:type status: str or ~azure.mgmt.machinelearningcompute.models.Status
:param cert: The SSL cert data in PEM format.
:type cert: str
:param key: The SSL key data in PEM format. This is not returned in response of GET/PUT on the
resource. To see this please call listKeys API.
:type key: str
:param cname: The CName of the certificate.
:type cname: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'cert': {'key': 'cert', 'type': 'str'},
'key': {'key': 'key', 'type': 'str'},
'cname': {'key': 'cname', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[Union[str, "Status"]] = None,
cert: Optional[str] = None,
key: Optional[str] = None,
cname: Optional[str] = None,
**kwargs
):
super(SslConfiguration, self).__init__(**kwargs)
self.status = status
self.cert = cert
self.key = key
self.cname = cname
class StorageAccountCredentials(msrest.serialization.Model):
"""Access information for the storage account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_id: The ARM resource ID of the storage account.
:vartype resource_id: str
:ivar primary_key: The primary key of the storage account.
:vartype primary_key: str
:ivar secondary_key: The secondary key of the storage account.
:vartype secondary_key: str
"""
_validation = {
'resource_id': {'readonly': True},
'primary_key': {'readonly': True},
'secondary_key': {'readonly': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountCredentials, self).__init__(**kwargs)
self.resource_id = None
self.primary_key = None
self.secondary_key = None
class StorageAccountProperties(msrest.serialization.Model):
"""Properties of Storage Account.
:param resource_id: ARM resource ID of the Azure Storage Account to store CLI specific files.
If not provided one will be created. This cannot be changed once the cluster is created.
:type resource_id: str
"""
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
}
def __init__(
self,
*,
resource_id: Optional[str] = None,
**kwargs
):
super(StorageAccountProperties, self).__init__(**kwargs)
self.resource_id = resource_id
class SystemService(msrest.serialization.Model):
"""Information about a system service deployed in the cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param system_service_type: Required. The system service type. Possible values include: "None",
"ScoringFrontEnd", "BatchFrontEnd".
:type system_service_type: str or ~azure.mgmt.machinelearningcompute.models.SystemServiceType
:ivar public_ip_address: The public IP address of the system service.
:vartype public_ip_address: str
:ivar version: The state of the system service.
:vartype version: str
"""
_validation = {
'system_service_type': {'required': True},
'public_ip_address': {'readonly': True},
'version': {'readonly': True},
}
_attribute_map = {
'system_service_type': {'key': 'systemServiceType', 'type': 'str'},
'public_ip_address': {'key': 'publicIpAddress', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
*,
system_service_type: Union[str, "SystemServiceType"],
**kwargs
):
super(SystemService, self).__init__(**kwargs)
self.system_service_type = system_service_type
self.public_ip_address = None
self.version = None
class UpdateSystemServicesResponse(msrest.serialization.Model):
"""Response of the update system services API.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar update_status: Update status. Possible values include: "Unknown", "Updating", "Creating",
"Deleting", "Succeeded", "Failed", "Canceled".
:vartype update_status: str or ~azure.mgmt.machinelearningcompute.models.OperationStatus
:ivar update_started_on: The date and time when the last system services update was started.
:vartype update_started_on: ~datetime.datetime
:ivar update_completed_on: The date and time when the last system services update completed.
:vartype update_completed_on: ~datetime.datetime
"""
_validation = {
'update_status': {'readonly': True},
'update_started_on': {'readonly': True},
'update_completed_on': {'readonly': True},
}
_attribute_map = {
'update_status': {'key': 'updateStatus', 'type': 'str'},
'update_started_on': {'key': 'updateStartedOn', 'type': 'iso-8601'},
'update_completed_on': {'key': 'updateCompletedOn', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(UpdateSystemServicesResponse, self).__init__(**kwargs)
self.update_status = None
self.update_started_on = None
self.update_completed_on = None
|
neuralDX7/solvers/dx7_vae.py | boldsort/NeuralDX7 | 119 | 12696385 | import torch
from torch.nn import functional as F
from importlib import import_module
from torch.optim import AdamW
from torch.distributions.kl import kl_divergence
from torch.distributions import Normal
from agoge import AbstractSolver
from .utils import sigmoidal_annealing
class DX7VAE(AbstractSolver):
"""
Solver used to train DX7VAE model
"""
def __init__(self, model,
Optim=AdamW, optim_opts=dict(lr= 1e-4),
max_beta=0.5,
beta_temp=1e-4,
**kwargs):
if isinstance(Optim, str):
Optim = import_module(Optim)
self.optim = Optim(params=model.parameters(), **optim_opts)
self.max_beta = max_beta
self.model = model
self.iter = 0
self.beta_temp = beta_temp
def loss(self, X, X_hat, flow):
"""
Computes the VAE loss objective and collects some training statistics
X - data tensor, torch.LongTensor(batch_size, num_parameters=155)
X_hat - data tensor, torch.FloatTensor(batch_size, num_parameters=155, max_value=128)
flow - the namedtuple returned by TriangularSylvesterFlow
for reference, the namedtuple is ('Flow', ('q_z', 'log_det', 'z_0', 'z_k', 'flow'))
"""
p_z_k = Normal(0,1).log_prob(flow.z_k).sum(-1)
q_z_0 = flow.q_z.log_prob(flow.z_0).sum(-1)
kl = (q_z_0-p_z_k-flow.log_det).mean() / flow.z_k.shape[-1]
beta = sigmoidal_annealing(self.iter, self.beta_temp).item()
reconstruction_loss = F.cross_entropy(X_hat.transpose(-1, -2), X)
accuracy = (X_hat.argmax(-1)==X).float().mean()
loss = reconstruction_loss + self.max_beta * beta * kl
return loss, {
'accuracy': accuracy,
'reconstruction_loss': reconstruction_loss,
'kl': kl,
'beta': beta,
'log_det': flow.log_det.mean(),
'p_z_k': p_z_k.mean(),
'q_z_0': q_z_0.mean(),
# 'iter': self.iter // self.
}
def solve(self, X, **kwargs):
"""
Take a gradient step given an input X
X - data tensor, torch.LongTensor(batch_size, num_parameters=155)
"""
Y = self.model(**X)
loss, L = self.loss(**X, **Y)
if loss != loss:
raise ValueError('Nan Values detected')
if self.model.training:
self.iter += 1
self.optim.zero_grad()
loss.backward()
self.optim.step()
return L
def step(self):
pass
def state_dict(self):
state_dict = {
'optim': self.optim.state_dict(),
'iter': self.iter
}
return state_dict
def load_state_dict(self, state_dict):
self.optim.load_state_dict(state_dict['optim'])
self.iter = state_dict['iter'] |
tests/chainer_tests/functions_tests/math_tests/test_minmax.py | zaltoprofen/chainer | 3,705 | 12696397 | <filename>tests/chainer_tests/functions_tests/math_tests/test_minmax.py
import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'function_name': ['max', 'min'],
'shape': [(3, 2, 4)],
'dtype': [numpy.float32],
'axis': [
None,
0, 1, 2, # axis
-1, # negative_axis
(0, 1), # multi_axis
(1, 0), # multi_axis_invert
(0, -1), # negative_multi_axis
(-2, 0), # negative_multi_axis_invert
],
'keepdims': [True, False],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestMinMax(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options.update({
'eps': 1e-5, 'atol': 1e-3, 'rtol': 1e-2})
self.check_double_backward_options.update({
'eps': 1e-5, 'atol': 1e-3, 'rtol': 1e-2})
def generate_inputs(self):
eps = 1e-5
# Sample x with single maximum/minimum value
while True:
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
if self.function_name == 'max':
y = x.max(axis=self.axis, keepdims=True)
if not numpy.all((x > y - 2 * eps).sum(axis=self.axis) == 1):
continue
elif self.function_name == 'min':
y = x.min(axis=self.axis, keepdims=True)
if not numpy.all((x < y + 2 * eps).sum(axis=self.axis) == 1):
continue
return x,
def forward(self, inputs, device):
x, = inputs
function = getattr(functions, self.function_name)
y = function(x, axis=self.axis, keepdims=self.keepdims)
return y,
def forward_expected(self, inputs):
x, = inputs
function = getattr(numpy, 'a' + self.function_name)
expected = function(x, axis=self.axis, keepdims=self.keepdims)
expected = utils.force_array(expected)
return expected,
@testing.parameterize(*testing.product({
'function_name': ['max', 'min'],
}))
class TestMinMaxInvalid(unittest.TestCase):
def setUp(self):
self.function = getattr(functions, self.function_name)
self.x = numpy.array([1], dtype=numpy.float32)
def test_invalid_axis_type(self):
with self.assertRaises(TypeError):
self.function(self.x, [0])
def test_invalid_axis_type_in_tuple(self):
with self.assertRaises(TypeError):
self.function(self.x, (1, 'x'))
def test_duplicate_axis(self):
with self.assertRaises(ValueError):
self.function(self.x, (0, 0))
def test_pos_neg_duplicate_axis(self):
x_data = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(numpy.float32)
x = chainer.Variable(x_data)
with self.assertRaises(ValueError):
self.function(x, axis=(1, -2))
@testing.parameterize(*testing.product({
'function_name': ['argmax', 'argmin'],
'axis': [None, 0, 1, 2, -1, -2, -3],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'shape': [(3, 2, 4)],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestArgMinMax(testing.FunctionTestCase):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
function = getattr(functions, self.function_name)
y = function(x, axis=self.axis)
y = functions.cast(y, numpy.int64)
return y,
def forward_expected(self, inputs):
x, = inputs
function = getattr(numpy, self.function_name)
expected = function(x, axis=self.axis)
expected = utils.force_array(expected)
return expected,
@testing.parameterize(*testing.product({
'function_name': ['argmax', 'argmin'],
}))
class TestArgMinMaxInvalid(unittest.TestCase):
def setUp(self):
self.function = getattr(functions, self.function_name)
self.x = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(numpy.float32)
def test_invalid_axis_type(self):
with self.assertRaises(TypeError):
self.function(self.x, [0])
def test_invalid_axis_type_in_tuple(self):
with self.assertRaises(TypeError):
self.function(self.x, (1, 'x'))
testing.run_module(__name__, __file__)
|
lib/simplejson/tests/test_indent.py | nirzari18/Query-Analysis-Application-on-Google-App-Engine | 5,079 | 12696411 | <gh_stars>1000+
from unittest import TestCase
import textwrap
import simplejson as json
from simplejson.compat import StringIO
class TestIndent(TestCase):
def test_indent(self):
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh',
'i-vhbjkhnth',
{'nifty': 87}, {'field': 'yes', 'morefield': False} ]
expect = textwrap.dedent("""\
[
\t[
\t\t"blorpie"
\t],
\t[
\t\t"whoops"
\t],
\t[],
\t"d-shtaeou",
\t"d-nthiouh",
\t"i-vhbjkhnth",
\t{
\t\t"nifty": 87
\t},
\t{
\t\t"field": "yes",
\t\t"morefield": false
\t}
]""")
d1 = json.dumps(h)
d2 = json.dumps(h, indent='\t', sort_keys=True, separators=(',', ': '))
d3 = json.dumps(h, indent=' ', sort_keys=True, separators=(',', ': '))
d4 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': '))
h1 = json.loads(d1)
h2 = json.loads(d2)
h3 = json.loads(d3)
h4 = json.loads(d4)
self.assertEqual(h1, h)
self.assertEqual(h2, h)
self.assertEqual(h3, h)
self.assertEqual(h4, h)
self.assertEqual(d3, expect.replace('\t', ' '))
self.assertEqual(d4, expect.replace('\t', ' '))
# NOTE: Python 2.4 textwrap.dedent converts tabs to spaces,
# so the following is expected to fail. Python 2.4 is not a
# supported platform in simplejson 2.1.0+.
self.assertEqual(d2, expect)
def test_indent0(self):
h = {3: 1}
def check(indent, expected):
d1 = json.dumps(h, indent=indent)
self.assertEqual(d1, expected)
sio = StringIO()
json.dump(h, sio, indent=indent)
self.assertEqual(sio.getvalue(), expected)
# indent=0 should emit newlines
check(0, '{\n"3": 1\n}')
# indent=None is more compact
check(None, '{"3": 1}')
def test_separators(self):
lst = [1,2,3,4]
expect = '[\n1,\n2,\n3,\n4\n]'
expect_spaces = '[\n1, \n2, \n3, \n4\n]'
# Ensure that separators still works
self.assertEqual(
expect_spaces,
json.dumps(lst, indent=0, separators=(', ', ': ')))
# Force the new defaults
self.assertEqual(
expect,
json.dumps(lst, indent=0, separators=(',', ': ')))
# Added in 2.1.4
self.assertEqual(
expect,
json.dumps(lst, indent=0))
|
timesformer_pytorch/__init__.py | halixness/generative_timesformer_pytorch | 565 | 12696439 | from timesformer_pytorch.timesformer_pytorch import TimeSformer
|
gluonfr/utils/lr_scheduler.py | OmoooJ/gluon-facex | 257 | 12696450 | <reponame>OmoooJ/gluon-facex<filename>gluonfr/utils/lr_scheduler.py
# @File : lr_scheduler.py
# @Author: X.Yang
# @Contact : <EMAIL>
# @Date : 18-12-27
from __future__ import division
from math import pi, cos
from mxnet import lr_scheduler
class IterLRScheduler(lr_scheduler.LRScheduler):
r"""Learning Rate Scheduler
For mode='step', we multiply lr with `step_factor` at each epoch in `step`.
For mode='poly'::
lr = targetlr + (baselr - targetlr) * (1 - iter / maxiter) ^ power
For mode='cosine'::
lr = targetlr + (baselr - targetlr) * (1 + cos(pi * iter / maxiter)) / 2
If warmup_epochs > 0, a warmup stage will be inserted before the main lr scheduler.
For warmup_mode='linear'::
lr = warmup_lr + (baselr - warmup_lr) * iter / max_warmup_iter
For warmup_mode='constant'::
lr = warmup_lr
Parameters
----------
mode : str
Modes for learning rate scheduler.
Currently it supports 'step', 'poly' and 'cosine'.
baselr : float
Base learning rate, i.e. the starting learning rate.
niters : int
Number of iterations in training.
step : list
A list of iterations to decay the learning rate.
step_factor : float
Learning rate decay factor.
targetlr : float
Target learning rate for poly and cosine, as the ending learning rate.
power : float
Power of poly function.
warmup_iters : int
Number of iterations for the warmup stage.
warmup_lr : float
The base learning rate for the warmup stage.
warmup_mode : str
Modes for the warmup stage.
Currently it supports 'linear' and 'constant'.
"""
def __init__(self, mode, baselr, niters, step=(30e3, 60e3, 90e3),
step_factor=0.1, targetlr=0, power=0.9,
warmup_iters=0, warmup_lr=0, warmup_mode='linear'):
super(IterLRScheduler, self).__init__()
assert (mode in ['step', 'poly', 'cosine'])
assert (warmup_mode in ['linear', 'constant'])
self.mode = mode
self.baselr = baselr
self.learning_rate = self.baselr
self.niters = niters
self.step = step
self.step_factor = step_factor
self.targetlr = targetlr
self.power = power
self.warmup_iters = warmup_iters
self.warmup_lr = warmup_lr
self.warmup_mode = warmup_mode
def __call__(self, num_update):
if self.warmup_iters > num_update:
if self.warmup_mode == 'linear':
self.learning_rate = self.warmup_lr + (self.baselr - self.warmup_lr) * \
num_update / self.warmup_iters
elif self.warmup_mode == 'constant':
self.learning_rate = self.warmup_lr
else:
raise NotImplementedError
else:
if self.mode == 'step':
count = sum([1 for s in self.step if s <= num_update])
self.learning_rate = self.baselr * pow(self.step_factor, count)
elif self.mode == 'poly':
self.learning_rate = self.targetlr + (self.baselr - self.targetlr) * \
pow(1 - (num_update - self.warmup_iters) / (self.niters - self.warmup_iters),
self.power)
elif self.mode == 'cosine':
self.learning_rate = self.targetlr + (self.baselr - self.targetlr) * \
(1 + cos(pi * (num_update - self.warmup_iters) /
(self.niters - self.warmup_iters))) / 2
else:
raise NotImplementedError
return self.learning_rate
|
skbio/stats/distance/_utils.py | jolespin/scikit-bio | 643 | 12696472 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
from ._cutils import is_symmetric_and_hollow_cy
from ._cutils import distmat_reorder_cy, distmat_reorder_condensed_cy
def is_symmetric_and_hollow(mat):
"""
Check if a Distance Matrix is symmetric and hollow.
Equivalent to [not (mat.T != mat).any(), np.trace(mat) == 0]
Parameters
----------
mat : 2D array_like
Distance matrix.
Result:
-------
is_symmetric: Boolean
not (mat.T != mat).any()
is_hollow: Boolean
np.trace(mat) == 0
"""
# is_symmetric_and_hollow_cy is optimized
# for the common cas of c_contiguous.
# For all other cases, make a copy.
if not mat.flags.c_contiguous:
mat = np.asarray(mat, order='C')
return is_symmetric_and_hollow_cy(mat)
def is_symmetric(mat):
"""
Check if a Distance Matrix is symmetric.
Equivalent to not (mat.T != mat).any()
Parameters
----------
mat : 2D array_like
Distance matrix.
Result:
-------
is_symmetric: Boolean
not (mat.T != mat).any()
"""
# the is_hollow check is really cheap,
# so can reuse is_symmetric_and_hollow
return is_symmetric_and_hollow(mat)[0]
def is_hollow(mat):
"""
Check if a Distance Matrix is hollow.
Equivalent to np.trace(mat) == 0
Parameters
----------
mat : 2D array_like
Distance matrix.
Result:
-------
is_hollow: Boolean
np.trace(mat) == 0
"""
# is_symmetric_and_hollow_cy spends most
# of its time in symetry check, just use numpy
return (np.trace(mat) == 0)
def distmat_reorder_buf(in_mat, reorder_vec, out_mat, validate=False):
"""
Reorder the rows and columns of a distance matrix
given a reorder vector.
Not all of the columns need to be used.
For example:
[ [0, 1, 2, 3] ,
[1, 0, 4, 5] ,
[2, 4, 0, 6] ,
[3, 5, 6, 0] ]
with
[1,0,3,2]
will result in
[ [0, 1, 5, 4] ,
[1, 0, 3, 2] ,
[5, 3, 0, 6] ,
[4, 2, 6, 0] ]
Parameters
----------
in_mat : 2D array_like
Distance matrix
reorder_vec : 1D_array_like
List of permutation indexes
out_mat : 2D array_like
Output, Distance matrix,
must be in c_order and same size as reorder_vec
validate: boolean
Optional, if True, validate reorder_vec content, detaults to False
"""
np_reorder = np.asarray(reorder_vec, dtype=np.long)
if validate:
maxsize = in_mat.shape[0]
bad_cnt = np.where((np_reorder < 0) or (np_reorder >= maxsize))[0].size
if bad_cnt > 0:
raise ValueError("Invalid reorder_vec")
if not in_mat.flags.c_contiguous:
in_mat = np.asarray(in_mat, order='C')
distmat_reorder_cy(in_mat, np_reorder, out_mat)
def distmat_reorder(in_mat, reorder_vec, validate=False):
"""
Reorder the rows and columns of a distance matrix
given a reorder vector.
Not all of the columns need to be used.
For example:
[ [0, 1, 2, 3] ,
[1, 0, 4, 5] ,
[2, 4, 0, 6] ,
[3, 5, 6, 0] ]
with
[1,0,3,2]
will result in
[ [0, 1, 5, 4] ,
[1, 0, 3, 2] ,
[5, 3, 0, 6] ,
[4, 2, 6, 0] ]
Parameters
----------
in_mat : 2D array_like
Distance matrix, must be in c_order
reorder_vec : 1D_array_like
List of permutation indexes
validate: boolean
Optional, if True, validate reorder_vec content, detaults to False
Returns
-------
out_mat : 2D array_like
Distance matrix
"""
np_reorder = np.asarray(reorder_vec, dtype=np.long)
if validate:
maxsize = in_mat.shape[0]
bad_cnt = np.where((np_reorder < 0) or (np_reorder >= maxsize))[0].size
if bad_cnt > 0:
raise ValueError("Invalid reorder_vec")
if not in_mat.flags.c_contiguous:
in_mat = np.asarray(in_mat, order='C')
out_mat = np.empty([np_reorder.size, np_reorder.size], in_mat.dtype)
distmat_reorder_cy(in_mat, np_reorder, out_mat)
return out_mat
def distmat_reorder_condensed(in_mat, reorder_vec, validate=False):
"""
Reorder the rows and columns of a distance matrix
given a reorder vector.
Not all of the columns need to be used.
For example:
[ [0, 1, 2, 3] ,
[1, 0, 4, 5] ,
[2, 4, 0, 6] ,
[3, 5, 6, 0] ]
with
[1,0,3,2]
will result in
[ 1, 5, 4 , 3, 2, 6 ]
Parameters
----------
in_mat : 2D array_like
Distance matrix, must be in c_order
reorder_vec : 1D_array_like
List of permutation indexes
validate: boolean
Optional, if True, validate reorder_vec content, detaults to False
Returns
-------
out_mat_condensed : 1D array_like
Condensed distance matrix
"""
np_reorder = np.asarray(reorder_vec, dtype=np.long)
if validate:
maxsize = in_mat.shape[0]
bad_cnt = np.where((np_reorder < 0) or (np_reorder >= maxsize))[0].size
if bad_cnt > 0:
raise ValueError("Invalid reorder_vec")
if not in_mat.flags.c_contiguous:
in_mat = np.asarray(in_mat, order='C')
csize = np.long(((np_reorder.size-1)*np_reorder.size)/2)
out_mat_condensed = np.empty([csize], in_mat.dtype)
distmat_reorder_condensed_cy(in_mat, np_reorder, out_mat_condensed)
return out_mat_condensed
|
musicautobot/__init__.py | HalleyYoung/musicautobot | 402 | 12696506 | from .utils.setup_musescore import setup_musescore
setup_musescore() |
mmfashion/models/losses/mse_loss.py | RyanJiang0416/mmfashion | 952 | 12696515 | <reponame>RyanJiang0416/mmfashion
import torch.nn as nn
import torch.nn.functional as F
from ..registry import LOSSES
@LOSSES.register_module
class MSELoss(nn.Module):
def __init__(self,
ratio=1,
size_average=None,
reduce=None,
reduction='mean'):
super(MSELoss, self).__init__()
self.ratio = ratio
self.size_average = size_average
self.reduce = reduce
self.reduction = reduction
def forward(self, input, target, avg_factor=None):
return self.ratio * F.mse_loss(input, target, reduction=self.reduction)
|
active_selection/regional_vote_entropy.py | hitman996/pytorch-deeplab-xception | 126 | 12696523 | from dataloader.paths import PathsDataset
from dataloader import indoor_scenes
from active_selection.vote_entropy import VoteEntropySelector
from utils.misc import turn_on_dropout, visualize_entropy, visualize_spx_dataset
import constants
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
from collections import OrderedDict, defaultdict
class RegionalVoteEntropySelector:
def __init__(self, dataset, lmdb_handle, superpixel_dir, base_size, batch_size, num_classes, region_size, overlap_handler, mode):
self.lmdb_handle = lmdb_handle
self.base_size = base_size
self.batch_size = batch_size
self.dataset = dataset
self.superpixel_dir = superpixel_dir
self.overlap_handler = overlap_handler
self.vote_entropy_selector = VoteEntropySelector(dataset, lmdb_handle, base_size, batch_size, num_classes)
self.region_size = region_size
if mode == 'window':
self.select_next_batch = self.select_next_batch_with_windows
elif mode == 'superpixel':
self.select_next_batch = self.select_next_batch_with_superpixels
else:
raise NotImplementedError
# superpixel based selection methods
def select_next_batch_with_superpixels(self, model, training_set, selection_count):
model.eval()
model.apply(turn_on_dropout)
loader = DataLoader(indoor_scenes.IndoorScenesWithAllInfo(self.dataset, self.lmdb_handle, self.superpixel_dir, self.base_size, training_set.all_train_paths), batch_size=self.batch_size, shuffle=False, num_workers=0)
scores = []
superpixel_masks = []
#visualize_entropy.max_weight = 96*96
for sample in tqdm(loader, desc='Entropy'):
image_batch = sample['image'].cuda()
label_batch = sample['label'].cuda()
superpixel_batch = sample['superpixel']
superpixel_masks.extend([superpixel_batch[i, :, :] for i in range(superpixel_batch.shape[0])])
scores.extend(self.vote_entropy_selector.batch_entropy_func(model, image_batch, label_batch, superpixel_batch.numpy()))
all_train_scenes = sorted(list(set([indoor_scenes.IndoorScenesWithAllInfo.get_scene_id_from_image_path(self.dataset, x) for x in training_set.all_train_paths])))
scene_indices = [all_train_scenes.index(indoor_scenes.IndoorScenesWithAllInfo.get_scene_id_from_image_path(self.dataset, im_path)) for im_path in training_set.all_train_paths]
superpixel_ids = []
superpixel_scores_expanded = []
for image_score_idx, superpixel_scores in enumerate(scores):
for superpixel_idx in superpixel_scores.keys():
superpixel_ids.append((scene_indices[image_score_idx], image_score_idx, superpixel_idx))
superpixel_scores_expanded.append(superpixel_scores[superpixel_idx])
_sorted_scores = np.array(list(list(zip(*sorted(zip(superpixel_ids, superpixel_scores_expanded), key=lambda x: x[1], reverse=True)))[0]))
sorted_scores = np.zeros((_sorted_scores.shape[0], _sorted_scores.shape[1] + 1), dtype=np.int32)
sorted_scores[:, 0:_sorted_scores.shape[1]] = _sorted_scores
total_pixels_selected = 0
selected_regions = OrderedDict()
image_superpixels = defaultdict(list)
ctr = 0
print('Selecting superpixels...')
pbar = tqdm(total=selection_count)
while total_pixels_selected < selection_count * self.base_size[0] * self.base_size[1] and ctr < sorted_scores.shape[0]:
if sorted_scores[ctr, 2] not in training_set.image_superpixels[training_set.all_train_paths[sorted_scores[ctr, 1]]] and not (sorted_scores[ctr, 3] == 1):
mask = (superpixel_masks[sorted_scores[ctr, 1]] == sorted_scores[ctr, 2]).numpy().astype(np.uint8)
if training_set.all_train_paths[sorted_scores[ctr, 1]] in selected_regions:
selected_regions[training_set.all_train_paths[sorted_scores[ctr, 1]]] = selected_regions[training_set.all_train_paths[sorted_scores[ctr, 1]]] | mask
else:
selected_regions[training_set.all_train_paths[sorted_scores[ctr, 1]]] = mask
image_superpixels[training_set.all_train_paths[sorted_scores[ctr, 1]]].append(sorted_scores[ctr, 2])
valid_pixels = mask.sum()
total_pixels_selected += valid_pixels
pbar.update(valid_pixels / (self.base_size[0] * self.base_size[1]))
if not self.overlap_handler is None:
overlapping_indices = []
tgt_scene_id = indoor_scenes.IndoorScenesWithAllInfo.get_scene_id_from_image_path(self.dataset, training_set.all_train_paths[sorted_scores[ctr, 1]])
overlap_dict = self.overlap_handler.get_overlap_dict_for_scene(tgt_scene_id)
tgt_scene_list_index = all_train_scenes.index(tgt_scene_id)
sorted_scores_view_mask = sorted_scores[:, 0] == tgt_scene_list_index
sorted_scores_view = sorted_scores[sorted_scores_view_mask]
for sc_idx in range(sorted_scores_view.shape[0]):
src_scene_id = indoor_scenes.IndoorScenesWithAllInfo.get_scene_id_from_image_path(self.dataset, training_set.all_train_paths[sorted_scores_view[sc_idx, 1]])
if sorted_scores[ctr, 1] in overlap_dict and (sorted_scores[ctr, 2], sorted_scores_view[sc_idx, 1], sorted_scores_view[sc_idx, 2]) in overlap_dict[sorted_scores[ctr, 1]]:
if overlap_dict[sorted_scores[ctr, 1]][(sorted_scores[ctr, 2], sorted_scores_view[sc_idx, 1], sorted_scores_view[sc_idx, 2])] > self.overlap_handler.superpixel_overlap:
sorted_scores_view[sc_idx, 3] = 1
sorted_scores[sorted_scores_view_mask] = sorted_scores_view
ctr += 1
pbar.close()
print('Selected ', total_pixels_selected / (self.base_size[0] * self.base_size[1]), 'images')
model.eval()
training_set.expand_training_set(selected_regions, image_superpixels)
# window based selection methods
def nms(self, img_idx, score_map):
selected_score_map_pts = []
for i in range((score_map.shape[0]*score_map.shape[1])//(self.region_size*self.region_size)):
argmax = score_map.view(-1).argmax()
r, c = argmax // score_map.shape[1], argmax % score_map.shape[1]
selected_score_map_pts.append((img_idx, r.cpu().item(), c.cpu().item(), score_map[r, c].cpu().item()))
score_map[max(0, r - self.region_size): min(score_map.shape[0], r + self.region_size), max(0, c - self.region_size): min(score_map.shape[1], c + self.region_size)] = 0
return selected_score_map_pts
def select_next_batch_with_windows(self, model, training_set, selection_count):
model.eval()
model.apply(turn_on_dropout)
weights = torch.cuda.FloatTensor(self.region_size, self.region_size).fill_(1.)
loader = DataLoader(PathsDataset(self.lmdb_handle, self.base_size, training_set.all_train_paths), batch_size=self.batch_size, shuffle=False, num_workers=0)
map_ctr = 0
scores = []
for sample in tqdm(loader, desc='Entropy'):
image_batch = sample['image'].cuda()
label_batch = sample['label'].cuda()
for batch_idx, entropy_map in enumerate(self.vote_entropy_selector.batch_entropy_func(model, image_batch, label_batch)):
if training_set.all_train_paths[map_ctr] in training_set.get_selections():
entropy_map[training_set.get_selections()[training_set.all_train_paths[map_ctr]] == 1] = 0
convolution_output = torch.nn.functional.conv2d(torch.cuda.FloatTensor(entropy_map).unsqueeze(0).unsqueeze(0), weights.unsqueeze(0).unsqueeze(0)).squeeze().squeeze()
scores.extend(self.nms(map_ctr, convolution_output))
map_ctr += 1
selected_samples = sorted(scores, key=lambda x: x[3], reverse=True)[:int(0.5 + selection_count * self.base_size[0] * self.base_size[1] / (self.region_size * self.region_size))]
print('Last selected sample: ', selected_samples[-1])
selected_regions = OrderedDict()
total_pixels_selected = 0
for ss in selected_samples:
mask = np.zeros(self.base_size, dtype=np.int) == 1
mask[ss[1] : ss[1] + self.region_size, ss[2] : ss[2] + self.region_size] = True
valid_pixels = mask.sum()
total_pixels_selected += valid_pixels
if training_set.all_train_paths[ss[0]] in selected_regions:
selected_regions[training_set.all_train_paths[ss[0]]] = selected_regions[training_set.all_train_paths[ss[0]]] | mask
else:
selected_regions[training_set.all_train_paths[ss[0]]] = mask
model.eval()
print('Selected ', total_pixels_selected / (self.base_size[0] * self.base_size[1]), 'images')
training_set.expand_training_set(selected_regions, [])
|
starthinker/task/dv_targeter/targeting.py | arbrown/starthinker | 138 | 12696547 | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from starthinker.util.bigquery import query_to_view
from starthinker.util.bigquery import table_create
from starthinker.util.data import get_rows
from starthinker.util.data import put_rows
from starthinker.util.google_api import API_DV360
from starthinker.util.discovery_to_bigquery import Discovery_To_BigQuery
from starthinker.util.regexp import lookup_id
from starthinker.util.sheets import sheets_clear
from starthinker.task.dv_targeter.edit import edit_log
from starthinker.task.dv_targeter.edit import edit_preview
from starthinker.util.dv_targeting import Assigned_Targeting
TARGETING_TYPES = [
'TARGETING_TYPE_EXCHANGE',
'TARGETING_TYPE_SUB_EXCHANGE',
'TARGETING_TYPE_BROWSER',
'TARGETING_TYPE_LANGUAGE',
'TARGETING_TYPE_DEVICE_MAKE_MODEL',
'TARGETING_TYPE_OPERATING_SYSTEM',
'TARGETING_TYPE_LANGUAGE',
'TARGETING_TYPE_CARRIER_AND_ISP',
'TARGETING_TYPE_CATEGORY',
'TARGETING_TYPE_APP_CATEGORY',
]
def targeting_clear(config, task):
table_create(
config,
task['auth_bigquery'],
config.project,
task['dataset'],
'DV_Targeting_Options',
Discovery_To_BigQuery(
'displayvideo',
'v1'
).resource_schema(
'TargetingOption'
)
)
sheets_clear(
config,
task['auth_sheets'],
task['sheet'],
'Targeting Options',
'A2:Q'
)
table_create(
config,
task['auth_bigquery'],
config.project,
task['dataset'],
'DV_Targeting_Assigned',
Discovery_To_BigQuery(
'displayvideo',
'v1'
).resource_schema(
'AssignedTargetingOption'
)
)
def targeting_clear_changes(config, task):
sheets_clear(
config,
task['auth_sheets'],
task['sheet'],
'Destination Targeting',
'A2:Z'
)
sheets_clear(
config,
task['auth_sheets'],
task['sheet'],
'Brand Safety Targeting',
'A2:Z'
)
sheets_clear(
config,
task['auth_sheets'],
task['sheet'],
'Demographic Targeting',
'A2:Z'
)
sheets_clear(
config,
task['auth_sheets'],
task['sheet'],
'Audience Targeting',
'A2:Z'
)
sheets_clear(
config,
task['auth_sheets'],
task['sheet'],
'Device Targeting',
'A2:Z'
)
sheets_clear(
config,
task['auth_sheets'],
task['sheet'],
'Geography Targeting',
'A2:Z'
)
sheets_clear(
config,
task['auth_sheets'],
task['sheet'],
'Viewability Targeting',
'A2:Z'
)
def targeting_load(config, task):
# load multiple from user defined sheet
def load_multiple():
advertisers = get_rows(
config,
task['auth_sheets'],
{ 'sheets': {
'sheet': task['sheet'],
'tab': 'Advertisers',
"header":False,
'range': 'A2:A'
}}
)
for advertiser in advertisers:
for targeting_type in TARGETING_TYPES:
yield from API_DV360(
config,
task['auth_dv'],
iterate=True
).targetingTypes().targetingOptions().list(
advertiserId=str(lookup_id(advertiser[0])),
targetingType=targeting_type
).execute()
targeting_clear(config, task)
# write to database
put_rows(
config,
task['auth_bigquery'],
{ 'bigquery': {
'dataset': task['dataset'],
'table': 'DV_Targeting_Options',
'schema': Discovery_To_BigQuery(
'displayvideo',
'v1'
).method_schema(
'targetingTypes.targetingOptions.list'
),
'format': 'JSON'
}},
load_multiple()
)
# write app category
put_rows(
config,
task['auth_sheets'],
{ 'sheets': {
'sheet': task['sheet'],
'tab': 'Targeting Options',
"header":False,
'range': 'A2:A'
}},
get_rows(
config,
task['auth_bigquery'],
{ 'bigquery': {
'dataset': task['dataset'],
'query': """SELECT
DISTINCT(appCategoryDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task),
'legacy': False
}}
)
)
# write exchange
put_rows(
config,
task['auth_sheets'],
{ 'sheets': {
'sheet': task['sheet'],
'tab': 'Targeting Options',
"header":False,
'range': 'B2:B'
}},
get_rows(
config,
task['auth_bigquery'],
{ 'bigquery': {
'dataset': task['dataset'],
'query': """SELECT
DISTINCT(subExchangeDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task),
'legacy': False
}}
)
)
# write browser
put_rows(
config,
task['auth_sheets'],
{ 'sheets': {
'sheet': task['sheet'],
'tab': 'Targeting Options',
"header":False,
'range': 'C2:C'
}},
get_rows(
config,
task['auth_bigquery'],
{ 'bigquery': {
'dataset': task['dataset'],
'query': """SELECT
DISTINCT(browserDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task),
'legacy': False
}}
)
)
# write make / model
put_rows(
config,
task['auth_sheets'],
{ 'sheets': {
'sheet': task['sheet'],
'tab': 'Targeting Options',
"header":False,
'range': 'D2:D'
}},
get_rows(
config,
task['auth_bigquery'],
{ 'bigquery': {
'dataset': task['dataset'],
'query': """SELECT
DISTINCT(deviceMakeModelDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task),
'legacy': False
}}
)
)
# write category
put_rows(
config,
task['auth_sheets'],
{ 'sheets': {
'sheet': task['sheet'],
'tab': 'Targeting Options',
"header":False,
'range': 'E2:E'
}},
get_rows(
config,
task['auth_bigquery'],
{ 'bigquery': {
'dataset': task['dataset'],
'query': """SELECT
DISTINCT(categoryDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task),
'legacy': False
}}
)
)
# write language
put_rows(
config,
task['auth_sheets'],
{ 'sheets': {
'sheet': task['sheet'],
'tab': 'Targeting Options',
"header":False,
'range': 'F2:F'
}},
get_rows(
config,
task['auth_bigquery'],
{ 'bigquery': {
'dataset': task['dataset'],
'query': """SELECT
DISTINCT(languageDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task),
'legacy': False
}}
)
)
# write operating system
put_rows(
config,
task['auth_sheets'],
{ 'sheets': {
'sheet': task['sheet'],
'tab': 'Targeting Options',
"header":False,
'range': 'G2:G'
}},
get_rows(
config,
task['auth_bigquery'],
{ 'bigquery': {
'dataset': task['dataset'],
'query': """SELECT
DISTINCT(operatingSystemDetails.displayName)
FROM `{dataset}.DV_Targeting_Options`
ORDER BY 1
""".format(**task),
'legacy': False
}}
)
)
# write carrier and isp
put_rows(
config,
task['auth_sheets'],
{ 'sheets': {
'sheet': task['sheet'],
'tab': 'Targeting Options',
"header":False,
'range': 'H2:H'
}},
get_rows(
config,
task['auth_bigquery'],
{ 'bigquery': {
'dataset': task['dataset'],
'query': """SELECT
CONCAT(carrierAndIspDetails.displayName, ' - ', SUBSTR(carrierAndIspDetails.type, 22))
FROM `{dataset}.DV_Targeting_Options`
GROUP BY 1
ORDER BY 1
""".format(**task),
'legacy': False
}}
)
)
def targeting_combine(config, task):
# read destination targeting
put_rows(
config,
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table": "SHEET_Destination_Targeting",
"schema": [
{ "name": "Action", "type": "STRING" },
{ "name": "Partner", "type": "STRING" },
{ "name": "Advertiser", "type": "STRING" },
{ "name": "LineItem", "type": "STRING" },
{ "name": "Authorized_Seller", "type": "STRING" },
{ "name": "User_Rewarded_Content", "type": "STRING" },
{ "name": "Exchange", "type": "STRING" },
{ "name": "Sub_Exchange", "type": "STRING" },
{ "name": "Channel", "type": "STRING" },
{ "name": "Channel_Negative", "type": "BOOLEAN" },
{ "name": "Inventory_Source", "type": "STRING" },
{ "name": "Inventory_Group", "type": "STRING" },
{ "name": "URL", "type": "STRING" },
{ "name": "URL_Negative", "type": "BOOLEAN" },
{ "name": "App", "type": "STRING" },
{ "name": "App_Negative", "type": "BOOLEAN" },
{ "name": "App_Category", "type": "STRING" },
{ "name": "App_Category_Negative", "type": "BOOLEAN" },
],
"format": "CSV"
}},
get_rows(
config,
task["auth_sheets"],
{ "sheets": {
"sheet": task["sheet"],
"tab": "Destination Targeting",
"header":False,
"range": "A2:Z"
}}
)
)
# read brand safety targeting
put_rows(
config,
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table": "SHEET_Brand_Safety_Targeting",
"schema": [
{ "name": "Action", "type": "STRING" },
{ "name": "Partner", "type": "STRING" },
{ "name": "Advertiser", "type": "STRING" },
{ "name": "LineItem", "type": "STRING" },
{ "name": "Content_Label", "type": "STRING" },
{ "name": "Sensitive_Category", "type": "STRING" },
{ "name": "Negative_Keyword_List", "type": "STRING" },
{ "name": "Category", "type": "STRING" },
{ "name": "Category_Negative", "type": "BOOLEAN" },
{ "name": "Keyword", "type": "STRING" },
{ "name": "Keyword_Negative", "type": "BOOLEAN" },
],
"format": "CSV"
}},
get_rows(
config,
task["auth_sheets"],
{ "sheets": {
"sheet": task["sheet"],
"tab": "Brand Safety Targeting",
"header":False,
"range": "A2:Z"
}}
)
)
# read demographic targeting
put_rows(
config,
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table": "SHEET_Demographic_Targeting",
"schema": [
{ "name": "Action", "type": "STRING" },
{ "name": "Partner", "type": "STRING" },
{ "name": "Advertiser", "type": "STRING" },
{ "name": "LineItem", "type": "STRING" },
{ "name": "Age_Range", "type": "STRING" },
{ "name": "Gender", "type": "STRING" },
{ "name": "Parental_Status", "type": "STRING" },
{ "name": "Household_Income", "type": "STRING" },
{ "name": "Language", "type": "STRING" },
{ "name": "Language_Negative", "type": "BOOLEAN" },
],
"format": "CSV"
}},
get_rows(
config,
task["auth_sheets"],
{ "sheets": {
"sheet": task["sheet"],
"tab": "Demographic Targeting",
"header":False,
"range": "A2:Z"
}}
)
)
# read audience targeting
put_rows(
config,
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table": "SHEET_Audience_Targeting",
"schema": [
{ "name": "Action", "type": "STRING" },
{ "name": "Partner", "type": "STRING" },
{ "name": "Advertiser", "type": "STRING" },
{ "name": "LineItem", "type": "STRING" },
{ "name": "Included_1P_And_3P_Group", "type": "INTEGER" },
{ "name": "Included_1P_And_3P", "type": "STRING" },
{ "name": "Included_1P_And_3P_Recency", "type": "STRING" },
{ "name": "Excluded_1P_And_3P", "type": "STRING" },
{ "name": "Excluded_1P_And_3P_Recency", "type": "STRING" },
{ "name": "Included_Google", "type": "STRING" },
{ "name": "Excluded_Google", "type": "STRING" },
{ "name": "Included_Custom", "type": "STRING" },
{ "name": "Included_Combined", "type": "STRING" },
],
"format": "CSV"
}},
get_rows(
config,
task["auth_sheets"],
{ "sheets": {
"sheet": task["sheet"],
"tab": "Audience Targeting",
"header":False,
"range": "A2:Z"
}}
)
)
# read device targeting
put_rows(
config,
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table": "SHEET_Device_Targeting",
"schema": [
{ "name": "Action", "type": "STRING" },
{ "name": "Partner", "type": "STRING" },
{ "name": "Advertiser", "type": "STRING" },
{ "name": "LineItem", "type": "STRING" },
{ "name": "Device_Type", "type": "STRING" },
{ "name": "Make_Model", "type": "STRING" },
{ "name": "Make_Model_Negative", "type": "BOOLEAN" },
{ "name": "Operating_System", "type": "STRING" },
{ "name": "Operating_System_Negative", "type": "BOOLEAN" },
{ "name": "Browser", "type": "STRING" },
{ "name": "Browser_Negative", "type": "BOOLEAN" },
{ "name": "Environment", "type": "STRING" },
{ "name": "Carrier_And_ISP", "type": "STRING" },
{ "name": "Carrier_And_ISP_Negative", "type": "BOOLEAN" },
],
"format": "CSV"
}},
get_rows(
config,
task["auth_sheets"],
{ "sheets": {
"sheet": task["sheet"],
"tab": "Device Targeting",
"header":False,
"range": "A2:Z"
}}
)
)
# read geography targeting
put_rows(
config,
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table": "SHEET_Geography_Targeting",
"schema": [
{ "name": "Action", "type": "STRING" },
{ "name": "Partner", "type": "STRING" },
{ "name": "Advertiser", "type": "STRING" },
{ "name": "LineItem", "type": "STRING" },
{ "name": "Day_Of_Week", "type": "STRING" },
{ "name": "Hour_Start", "type": "INTEGER" },
{ "name": "Hour_End", "type": "INTEGER" },
{ "name": "Timezone", "type": "STRING" },
{ "name": "Geo_Region", "type": "STRING" },
{ "name": "Geo_Region_Type", "type": "STRING" },
{ "name": "Geo_Region_Negative", "type": "BOOLEAN" },
{ "name": "Proximity_Location_List", "type": "STRING" },
{ "name": "Proximity_Location_List_Radius_Range", "type": "STRING" },
{ "name": "Regional_Location_List", "type": "STRING" },
{ "name": "Regional_Location_List_Negative", "type": "BOOLEAN" },
],
"format": "CSV"
}},
get_rows(
config,
task["auth_sheets"],
{ "sheets": {
"sheet": task["sheet"],
"tab": "Geography Targeting",
"header":False,
"range": "A2:Z"
}}
)
)
# read viewability targeting
put_rows(
config,
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table": "SHEET_Viewability_Targeting",
"schema": [
{ "name": "Action", "type": "STRING" },
{ "name": "Partner", "type": "STRING" },
{ "name": "Advertiser", "type": "STRING" },
{ "name": "LineItem", "type": "STRING" },
{ "name": "Video_Player_Size", "type": "STRING" },
{ "name": "In_Stream_Position", "type": "STRING" },
{ "name": "Out_Stream_Position", "type": "BOOLEAN" },
{ "name": "On_Screen_Position", "type": "STRING" },
{ "name": "Viewability", "type": "STRING" },
],
"format": "CSV"
}},
get_rows(
config,
task["auth_sheets"],
{ "sheets": {
"sheet": task["sheet"],
"tab": "Viewability Targeting",
"header":False,
"range": "A2:Z"
}}
)
)
query_to_view(
config,
task["auth_bigquery"],
config.project,
task["dataset"],
"SHEET_Combined_Targeting",
"""SELECT
COALESCE(
L.advertiserId,
A.advertiserId,
CAST(REGEXP_EXTRACT(Advertiser, r' - (\d+)$') AS INT64)
) AS Advertiser_Lookup,
T.*
FROM (
SELECT
COALESCE(A.Action,B.Action,C.Action,D.Action,E.Action,F.Action,G.Action) AS Action,
COALESCE(A.partner,B.Partner,C.Partner,D.partner,E.Partner,F.Partner,G.Partner) AS Partner,
COALESCE(A.Advertiser,B.Advertiser,C.Advertiser,D.Advertiser,E.Advertiser,F.Advertiser,G.Advertiser) AS Advertiser,
COALESCE(A.LineItem,B.LineItem,C.LineItem,D.LineItem,E.LineItem,F.LineItem,G.LineItem) AS LineItem,
* EXCEPT (Action, Partner, Advertiser, LineItem)
FROM `{dataset}.SHEET_Destination_Targeting` AS A
FULL OUTER JOIN `{dataset}.SHEET_Brand_Safety_Targeting` AS B
ON A.Action=B.Action
AND A.Partner=B.Partner
AND A.Advertiser=B.Advertiser
AND A.LineItem=B.LineItem
FULL OUTER JOIN `{dataset}.SHEET_Demographic_Targeting` AS C
ON A.Action=C.Action
AND A.Partner=C.Partner
AND A.Advertiser=C.Advertiser
AND A.LineItem=C.LineItem
FULL OUTER JOIN `{dataset}.SHEET_Audience_Targeting` AS D
ON A.Action=D.Action
AND A.Partner=D.Partner
AND A.Advertiser=D.Advertiser
AND A.LineItem=D.LineItem
FULL OUTER JOIN `{dataset}.SHEET_Device_Targeting` AS E
ON A.Action=E.Action
AND A.Partner=E.Partner
AND A.Advertiser=E.Advertiser
AND A.LineItem=E.LineItem
FULL OUTER JOIN `{dataset}.SHEET_Geography_Targeting` AS F
ON A.Action=F.Action
AND A.Partner=F.Partner
AND A.Advertiser=F.Advertiser
AND A.LineItem=F.LineItem
FULL OUTER JOIN `{dataset}.SHEET_Viewability_Targeting` AS G
ON A.Action=G.Action
AND A.Partner=G.Partner
AND A.Advertiser=G.Advertiser
AND A.LineItem=G.LineItem
) AS T
LEFT JOIN `{dataset}.DV_LineItems` AS L
ON CAST(REGEXP_EXTRACT(T.LineItem, r' - (\d+)$') AS INT64)=L.lineItemId
LEFT JOIN (
SELECT partnerId, advertiserId
FROM `{dataset}.DV_Advertisers`
GROUP BY 1,2
) AS A
ON CAST(REGEXP_EXTRACT(T.Partner, r' - (\d+)$') AS INT64)=A.partnerId
""".format(**task),
legacy=False
)
def targeting_edit(config, task, commit=False):
edits = []
targetings = {}
targeting_combine(config, task)
for row in get_rows(
config,
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table":"SHEET_Combined_Targeting",
}},
as_object=True
):
# check if settings are applied at this layer
if not row['Action']: continue
# create new batch of candidates
candidates = []
# check partner ID from sheet
if row['Partner']:
# if action is at Advertiser layer, translate partner into list of advertisers
if 'ADVERTISERS' in row['Action'].upper():
for advertiserId in get_rows(
config,
task['auth_bigquery'],
{ 'bigquery': {
'dataset': task['dataset'],
'query': "SELECT advertiserId FROM `{dataset}.DV_Advertisers` WHERE partnerId={partnerId};".format(
dataset=task['dataset'],
partnerId=lookup_id(row['Partner'])
),
'legacy': False
}},
unnest=True
):
candidates.append(
targetings.setdefault(
('Advertiser', 'Partner {0} : {1}'.format(row['Partner'], advertiserId)),
Assigned_Targeting(
config,
task["auth_dv"],
None,
advertiserId,
None
)
)
)
# if action is at LineItem layer, translate partner into list of lineitems
elif 'LINEITEMS' in row['Action'].upper():
print("NOT IMPLEMENTED UNTIL FURTHER EVALUATION")
# if action is directly on Partner, only add it to the list
else:
candidates.append(
targetings.setdefault(
('Partner', row['Partner']),
Assigned_Targeting(
config,
task["auth_dv"],
lookup_id(row['Partner']),
row['Advertiser_Lookup'], # required by API for lookup of values ( not for targeting )
None
)
)
)
# check advertiser ID from sheet
if row['Advertiser']:
# if action is at LineItem layer, translate advertiser into list of lineitems
if 'LINEITEMS' in row['Action'].upper():
for lineItemId in get_rows(
config,
task['auth_bigquery'],
{ 'bigquery': {
'dataset': task['dataset'],
'query': "SELECT lineItemId FROM `{dataset}.DV_LineItems` WHERE advertiserId={advertiserId};".format(
dataset=task['dataset'],
advertiserId=lookup_id(row['Advertiser'])
),
'legacy': False
}},
unnest=True
):
candidates.append(
targetings.setdefault(
('LineItem', 'Advertiser {0} : {1}'.format(row['Advertiser'], lineItemId)),
Assigned_Targeting(
config,
task["auth_dv"],
None,
lookup_id(row['Advertiser']),
lineItemId
)
)
)
# if action is directly on Advertiser, only add it to the list
else:
candidates.append(
targetings.setdefault(
('Advertiser', row['Advertiser']),
Assigned_Targeting(
config,
task["auth_dv"],
None,
lookup_id(row['Advertiser']),
None
)
)
)
# check lineitem ID from sheet
if row['LineItem']:
candidates.append(
targetings.setdefault(
('LineItem', row['LineItem']),
Assigned_Targeting(
config,
task["auth_dv"],
None,
row['Advertiser_Lookup'],
lookup_id(row['LineItem'])
)
)
)
# attempt targeting changes for each candidate
for targeting in candidates:
if row['Authorized_Seller']:
if 'ADD' in row['Action'].upper():
targeting.add_authorized_seller(row['Authorized_Seller'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_authorized_seller(row['Authorized_Seller'])
if row['User_Rewarded_Content']:
if 'ADD' in row['Action'].upper():
targeting.add_user_rewarded_content(row['User_Rewarded_Content'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_user_rewarded_content(row['User_Rewarded_Content'])
if row['Exchange']:
if 'ADD' in row['Action'].upper():
targeting.add_exchange(row['Exchange'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_exchange(row['Exchange'])
if row['Sub_Exchange']:
if 'ADD' in row['Action'].upper():
targeting.add_sub_exchange(row['Sub_Exchange'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_sub_exchange(row['Sub_Exchange'])
if row['Channel']:
identifier = lookup_id(row['Channel'])
if 'ADD' in row['Action'].upper():
targeting.add_channel(identifier, row['Channel_Negative'] or False)
elif 'DELETE' in row['Action'].upper():
targeting.delete_channel(identifier)
if row['Inventory_Source']:
identifier = lookup_id(row['Inventory_Source'])
if 'ADD' in row['Action'].upper():
targeting.add_inventory_source(identifier)
elif 'DELETE' in row['Action'].upper():
targeting.delete_inventory_source(identifier)
if row['Inventory_Group']:
identifier = lookup_id(row['Inventory_Group'])
if 'ADD' in row['Action'].upper():
targeting.add_inventory_source_group(identifier)
elif 'DELETE' in row['Action'].upper():
targeting.delete_inventory_source_group(identifier)
if row['URL']:
if 'ADD' in row['Action'].upper():
targeting.add_url(row['URL'], row['URL_Negative'] or False)
elif 'DELETE' in row['Action'].upper():
targeting.delete_url(row['URL'])
if row['App']:
identifier = lookup_id(row['App'])
if 'ADD' in row['Action'].upper():
targeting.add_app(identifier, row['App_Negative'] or False)
elif 'DELETE' in row['Action'].upper():
targeting.delete_app(identifier)
if row['App_Category']:
if 'ADD' in row['Action'].upper():
targeting.add_app_category(row['App_Category'], row['App_Category_Negative'] or False)
elif 'DELETE' in row['Action'].upper():
targeting.delete_app_category(row['App_Category'])
if row['Content_Label']:
if 'ADD' in row['Action'].upper():
targeting.add_content_label(row['Content_Label'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_content_label(row['Content_Label'])
if row['Sensitive_Category']:
if 'ADD' in row['Action'].upper():
targeting.add_sensitive_category(row['Sensitive_Category'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_sensitive_category(row['Sensitive_Category'])
if row['Negative_Keyword_List']:
identifier = lookup_id(row['Negative_Keyword_List'])
if 'ADD' in row['Action'].upper():
targeting.add_negative_keyword_list(identifier)
elif 'DELETE' in row['Action'].upper():
targeting.delete_negative_keyword_list(identifier)
if row['Keyword']:
if 'ADD' in row['Action'].upper():
targeting.add_keyword(row['Keyword'], row['Keyword_Negative'] or False)
elif 'DELETE' in row['Action'].upper():
targeting.delete_keyword(row['Keyword'])
if row['Category']:
if 'ADD' in row['Action'].upper():
targeting.add_category(row['Category'], row['Category_Negative'] or False)
elif 'DELETE' in row['Action'].upper():
targeting.delete_category(row['Category'])
if row['Age_Range']:
if 'ADD' in row['Action'].upper():
targeting.add_age_range(row['Age_Range'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_age_range(row['Age_Range'])
if row['Gender']:
if 'ADD' in row['Action'].upper():
targeting.add_gender(row['Gender'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_gender(row['Gender'])
if row['Parental_Status']:
if 'ADD' in row['Action'].upper():
targeting.add_parental_status(row['Parental_Status'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_parental_status(row['Parental_Status'])
if row['Geo_Region']:
if 'ADD' in row['Action'].upper():
targeting.add_geo_region(row['Geo_Region'], row['Geo_Region_Type'], row['Geo_Region_Negative'] or False)
elif 'DELETE' in row['Action'].upper():
targeting.delete_geo_region(row['Geo_Region'])
if row['Proximity_Location_List']:
if 'ADD' in row['Action'].upper():
targeting.add_proximity_location_list(row['Proximity_Location_List'], row['Proximity_Location_List_Radius_Range'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_proximity_location_list(row['Proximity_Location_List'])
if row['Regional_Location_List']:
identifier = lookup_id(row['Regional_Location_List'])
if 'ADD' in row['Action'].upper():
targeting.add_regional_location_list(identifier, row['Regional_Location_List_Negative'] or False)
elif 'DELETE' in row['Action'].upper():
targeting.delete_regional_location_list(identifier)
if row['Household_Income']:
if 'ADD' in row['Action'].upper():
targeting.add_household_income(row['Household_Income'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_household_income(row['Household_Income'])
if row['Language']:
if 'ADD' in row['Action'].upper():
targeting.add_language(row['Language'], row['Language_Negative'] or False)
elif 'DELETE' in row['Action'].upper():
targeting.delete_language(row['Language'])
if row['Included_1P_And_3P']:
identifier = lookup_id(row['Included_1P_And_3P'])
if 'ADD' in row['Action'].upper():
targeting.add_included_1p_and_3p_audience(identifier, row['Included_1P_And_3P_Recency'], row['Included_1P_And_3P_Group'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_included_1p_and_3p_audience(identifier, row['Included_1P_And_3P_Recency'], row['Included_1P_And_3P_Group'])
if row['Excluded_1P_And_3P']:
identifier = lookup_id(row['Excluded_1P_And_3P'])
if 'ADD' in row['Action'].upper():
targeting.add_excluded_1p_and_3p_audience(identifier, row['Excluded_1P_And_3P_Recency'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_excluded_1p_and_3p_audience(identifier, row['Excluded_1P_And_3P_Recency'])
if row['Included_Google']:
identifier = lookup_id(row['Included_Google'])
if 'ADD' in row['Action'].upper():
targeting.add_included_google_audience(identifier)
elif 'DELETE' in row['Action'].upper():
targeting.delete_included_google_audience(identifier)
if row['Excluded_Google']:
identifier = lookup_id(row['Excluded_Google'])
if 'ADD' in row['Action'].upper():
targeting.add_excluded_google_audience(identifier)
elif 'DELETE' in row['Action'].upper():
targeting.delete_excluded_google_audience(identifier)
if row['Included_Custom']:
identifier = lookup_id(row['Included_Custom'])
if 'ADD' in row['Action'].upper():
targeting.add_included_custom_audience(identifier)
elif 'DELETE' in row['Action'].upper():
targeting.delete_included_custom_audience(identifier)
if row['Included_Combined']:
identifier = lookup_id(row['Included_Combined'])
if 'ADD' in row['Action'].upper():
targeting.add_included_combined_audience(identifier)
elif 'DELETE' in row['Action'].upper():
targeting.delete_included_combined_audience(identifier)
if row['Device_Type']:
if 'ADD' in row['Action'].upper():
targeting.add_device_type(row['Device_Type'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_device_type(row['Device_Type'])
if row['Make_Model']:
if 'ADD' in row['Action'].upper():
targeting.add_make_model(row['Make_Model'], row['Make_Model_Negative'] or False)
elif 'DELETE' in row['Action'].upper():
targeting.delete_make_model(row['Make_Model'])
if row['Operating_System']:
if 'ADD' in row['Action'].upper():
targeting.add_operating_system(row['Operating_System'], row['Operating_System_Negative'] or False)
elif 'DELETE' in row['Action'].upper():
targeting.delete_operating_system(row['Operating_System'])
if row['Browser']:
if 'ADD' in row['Action'].upper():
targeting.add_browser(row['Browser'], row['Browser_Negative'] or False)
elif 'DELETE' in row['Action'].upper():
targeting.delete_browser(row['Browser'])
if row['Environment']:
if 'ADD' in row['Action'].upper():
targeting.add_environment(row['Environment'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_environment(row['Environment'])
if row['Carrier_And_ISP']:
if 'ADD' in row['Action'].upper():
targeting.add_carrier_and_isp(row['Carrier_And_ISP'], row['Carrier_And_ISP_Negative'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_carrier_and_isp(row['Carrier_And_ISP'])
if row['Day_Of_Week']:
if 'ADD' in row['Action'].upper():
targeting.add_day_and_time(row['Day_Of_Week'], row['Hour_Start'], row['Hour_End'], row['Timezone'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_day_and_time(row['Day_Of_Week'], row['Hour_Start'], row['Hour_End'], row['Timezone'])
if row['Video_Player_Size']:
if 'ADD' in row['Action'].upper():
targeting.add_video_player_size(row['Video_Player_Size'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_video_player_size(row['Video_Player_Size'])
if row['In_Stream_Position']:
if 'ADD' in row['Action'].upper():
targeting.add_instream_position(row['In_Stream_Position'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_instream_position(row['In_Stream_Position'])
if row['Out_Stream_Position']:
if 'ADD' in row['Action'].upper():
targeting.add_outstream_position(row['Out_Stream_Position'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_outstream_position()
if row['On_Screen_Position']:
if 'ADD' in row['Action'].upper():
targeting.add_on_screen_position(row['On_Screen_Position'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_on_screen_position(row['On_Screen_Position'])
if row['Viewability']:
if 'ADD' in row['Action'].upper():
targeting.add_viewability(row['Viewability'])
elif 'DELETE' in row['Action'].upper():
targeting.delete_viewability(row['Viewability'])
for layer_and_name, targeting in targetings.items():
layer, name = layer_and_name
body = targeting.get_body()
warnings = targeting.get_warnings()
if body:
parameters = {'body':body}
if layer == 'Partner':
parameters['partnerId'] = str(targeting.partner)
elif layer == 'Advertiser':
parameters['advertiserId'] = str(targeting.advertiser)
elif layer == 'LineItem':
parameters['advertiserId'] = str(targeting.advertiser)
parameters['lineItemId'] = str(targeting.lineitem)
edits.append({
"layer": layer,
"partner": name if layer == 'Partner' else '',
"advertiser": name if layer == 'Advertiser' else '',
"line_item": name if layer == 'LineItem' else '',
"parameters": parameters
})
if warnings:
edit_log(config, task, {
"layer": layer,
"partner": name if layer == 'Partner' else '',
"advertiser": name if layer == 'Advertiser' else '',
"line_item": name if layer == 'LineItem' else '',
"warning": "\n".join(warnings)
})
edit_preview(config, task, edits)
if commit:
targeting_commit(config, task, edits)
def targeting_commit(config, task, edits):
for edit in edits:
try:
if edit.get("line_item"):
print("API LINE ITEM:", edit["line_item"])
response = API_DV360(
config,
task["auth_dv"]
).advertisers().lineItems().bulkEditLineItemAssignedTargetingOptions(
**edit["parameters"]
).execute()
edit["success"] = len(response.get("createdAssignedTargetingOptions", []))
elif edit.get("advertiser"):
print("API ADVERTISER:", edit["advertiser"])
response = API_DV360(
config,
task["auth_dv"]
).advertisers().bulkEditAdvertiserAssignedTargetingOptions(
**edit["parameters"]
).execute()
edit["success"] = len(response.get("createdAssignedTargetingOptions", []))
elif edit.get("partner"):
print("API PARTNER:", edit["partner"])
response = API_DV360(
config,
task["auth_dv"]
).partners().bulkEditPartnerAssignedTargetingOptions(
**edit["parameters"]
).execute()
edit["success"] = len(response.get("createdAssignedTargetingOptions", []))
except Exception as e:
edit["error"] = str(e)
finally:
edit_log(config, task, edit)
edit_log(config, task)
|
src/example_extract_finetune.py | OlegJakushkin/s3prl | 856 | 12696554 | <reponame>OlegJakushkin/s3prl<gh_stars>100-1000
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ example_extract_finetune.py ]
# Synopsis [ an example code of using the wrapper class for downstream feature extraction or finetune ]
# Author [ <NAME> (Andi611) ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import torch
from transformer.nn_transformer import TRANSFORMER
from downstream.model import example_classifier
from downstream.solver import get_optimizer
################
# EXAMPLE CODE #
################
# setup the transformer model
"""
`options`: a python dictionary containing the following keys:
ckpt_file: str, a path specifying the pre-trained ckpt file
load_pretrain: str, ['True', 'False'], whether to load pre-trained weights
no_grad: str, ['True', 'False'], whether to have gradient flow over this class
dropout: float/str, use float to modify dropout value during downstream finetune, or use the str `default` for pre-train default values
spec_aug: str, ['True', 'False'], whether to apply SpecAugment on inputs (used for ASR training)
spec_aug_prev: str, ['True', 'False'], apply spec augment on input acoustic features if True, else apply on output representations (used for ASR training)
weighted_sum: str, ['True', 'False'], whether to use a learnable weighted sum to integrate hidden representations from all layers, if False then use the last
select_layer: int, select from all hidden representations, set to -1 to select the last (will only be used when weighted_sum is False)
permute_input: str, ['True', 'False'], this attribute is for the forward method. If Ture then input ouput is in the shape of (T, B, D), if False then in (B, T, D)
"""
options = {
'ckpt_file' : './result/result_transformer/tera/fmllrBase960-F-N-K-libri/states-1000000.ckpt',
'load_pretrain' : 'True',
'no_grad' : 'True',
'dropout' : 'default',
'spec_aug' : 'False',
'spec_aug_prev' : 'True',
'weighted_sum' : 'False',
'select_layer' : -1,
'permute_input' : 'False',
}
transformer = TRANSFORMER(options=options, inp_dim=40)
# setup your downstream class model
classifier = example_classifier(input_dim=768, hidden_dim=128, class_num=2).cuda()
# construct the optimizer
params = list(transformer.named_parameters()) + list(classifier.named_parameters())
optimizer = get_optimizer(params=params, lr=4e-3, warmup_proportion=0.7, training_steps=50000)
# forward
example_inputs = torch.zeros(3, 1200, 40) # A batch of spectrograms: (batch_size, time_step, feature_size)
reps = transformer(example_inputs) # returns: (batch_size, time_step, feature_size)
labels = torch.LongTensor([0, 1, 0]).cuda()
loss = classifier(reps, labels)
# update
loss.backward()
optimizer.step()
# save
PATH_TO_SAVE_YOUR_MODEL = 'example.ckpt'
states = {'Classifier': classifier.state_dict(), 'Transformer': transformer.state_dict()}
# torch.save(states, PATH_TO_SAVE_YOUR_MODEL) |
src/coverage_test_helper.py | jmhodges/atheris | 964 | 12696587 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A helper library for coverage_test.py - coverage is added to this library."""
def simple_func(a):
return 2 * a
def if_func(a):
x = a
if x:
return 2
else:
return 3
def cmp_less(a, b):
return a < b
def cmp_greater(a, b):
return a > b
def cmp_const_less(a):
return 1 < a
def cmp_const_less_inverted(a):
return a < 1
def regex_match(re_obj, a):
re_obj.match(a)
|
testing/test_metastep_mixin.py | Neuraxio/Neuraxle | 519 | 12696592 | <gh_stars>100-1000
from neuraxle.pipeline import Pipeline
from neuraxle.base import BaseStep, MetaStepMixin
from neuraxle.union import Identity
from testing.mocks.step_mocks import SomeMetaStepWithHyperparams
class SomeMetaStep(MetaStepMixin, BaseStep):
def __init__(self, wrapped: BaseStep):
BaseStep.__init__(self)
MetaStepMixin.__init__(self, wrapped)
def transform(self, data_inputs):
self.wrapped.transform(data_inputs)
def test_metastepmixin_set_train_should_set_train_to_false():
p = SomeMetaStep(Pipeline([
Identity()
]))
p.set_train(False)
assert not p.is_train
assert not p.wrapped[0].is_train
assert not p.wrapped.is_train
def test_metastepmixin_set_train_should_set_train_to_true():
p = SomeMetaStep(Pipeline([
Identity()
]))
assert p.is_train
assert p.wrapped[0].is_train
assert p.wrapped.is_train
def test_basestep_str_representation_works_correctly():
output = str(SomeMetaStepWithHyperparams())
assert output == "SomeMetaStepWithHyperparams(SomeStepWithHyperparams(name='MockStep'), name='SomeMetaStepWithHyperparams')"
|
ptm/__init__.py | Devyadav1994/python-topic-model | 200 | 12696606 | from .lda_gibbs import GibbsLDA
from .lda_vb import vbLDA
from .slda_gibbs import GibbsSupervisedLDA
from .collabotm import CollaborativeTopicModel
from .rtm import RelationalTopicModel
from .diln import DILN
from .hmm_lda import HMM_LDA
from .at_model import AuthorTopicModel
|
tests/test_parser.py | vultureofficial/Vulture | 107 | 12696608 | <filename>tests/test_parser.py<gh_stars>100-1000
import unittest
from abrvalg import ast
from abrvalg.lexer import Lexer, TokenStream
from abrvalg.parser import Parser
class ParserTest(unittest.TestCase):
def _parse(self, s):
return Parser().parse(TokenStream(Lexer().tokenize(s))).body
def _assertNodesEq(self, s, nodes):
return self.assertEqual(self._parse(s), nodes)
def test_simple(self):
self._assertNodesEq(
'1',
[ast.Number(1)]
)
|
pygithub3/services/git_data/references.py | teamorchard/python-github3 | 107 | 12696666 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from pygithub3.services.base import Service
class References(Service):
"""Consume `References API <http://developer.github.com/v3/git/refs/>`_"""
def get(self, ref, user=None, repo=None):
""" Get a reference
:param str ref: The name of the reference to get
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
.. note::
Remember that branch references look like "heads/<branch_name>"
"""
request = self.make_request('git_data.references.get', ref=ref,
user=user, repo=repo)
return self._get(request)
def list(self, namespace='', user=None, repo=None):
""" List all the references
:param str namespace: Limit the request to a particular type of
reference. For example, ``heads`` or ``tags``.
:param str user: Username
:param str repo: Repository
:returns: A :doc:`result`
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('git_data.references.list', user=user,
repo=repo, namespace=namespace)
return self._get_result(request)
def create(self, data, user=None, repo=None):
""" Create a reference
:param dict data: Input. See `github refs doc`_
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('git_data.references.create', body=data,
user=user, repo=repo)
return self._post(request)
def update(self, ref, data, user=None, repo=None):
""" Update an existing reference
:param str ref: The SHA of the reference to update
:param dict data: Input. See `github refs doc`_
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('git_data.references.update', ref=ref,
body=data, user=user, repo=repo)
return self._patch(request)
def delete(self, ref, user=None, repo=None):
"""Delete a reference
:param str ref: The SHA of the reference to delete
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('git_data.references.delete', ref=ref,
user=user, repo=repo)
return self._delete(request)
|
scripts/export_function_js/export_function.py | Justin-Fisher/webots | 1,561 | 12696674 | <reponame>Justin-Fisher/webots
#!/usr/bin/env python3
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
try:
from pyclibrary import CParser
listheaders = os.listdir("../../include/wren/")
buggyheaders = {"config.h", "drawable_texture.h", "file_import.h", "font.h", "overlay.h"}
listheaders = ["../../include/wren/" + header for header in listheaders if header
[len(header)-2:len(header)] == '.h' and not(header in buggyheaders)]
parser = CParser(listheaders)
parser.process_all()
# FUNCTIONS
functionSignatures = parser.defs['functions']
functionName = functionSignatures.keys()
functionName = map(lambda name: "_" + name + ", ", functionName)
if os.path.exists("../../src/wren/functions_to_export.txt"):
os.remove("../../src/wren/functions_to_export.txt")
functionName = list(functionName) + ["_wr_config_enable_point_size, _wr_config_get_line_scale, " +
"_wr_config_get_max_active_directional_light_count, " +
" _wr_config_get_max_active_point_light_count, " +
"_wr_config_get_max_active_spot_light_count, _wr_config_enable_shadows"]
# The next lines are not needed as long as we add manually a last function
# lastIndex= len(functionName) - 1
# lastName = functionName[lastIndex];
# lastName = lastName[:len(lastName)-2]
# functionName[lastIndex] = lastName;
f = open("../../src/wren/functions_to_export.txt", 'w')
f.write(''.join(functionName))
f.close()
# ENUM
all_values = parser.defs['values']
# Eliminate the include guard
all_values = [value[0] + " : " + str(value[1]) + ", \n" for value in all_values.items() if not ("_H" in value[0])]
if os.path.exists("../../resources/web/wwi/enum.js"):
os.remove("../../resources/web/wwi/enum.js")
f = open("../../resources/web/wwi/enum.js", 'w')
values_string = ''.join(all_values)
values_string = values_string[:len(values_string) - 3]
f.write("const Enum = {\n" + values_string + "}")
f.close()
print("OK")
except ImportError:
print("Fail to import pyclibrary")
|
model/db/zd_znode.py | knightoning/zkdash | 748 | 12696687 | <filename>model/db/zd_znode.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""
Copyright (c) 2014,掌阅科技
All rights reserved.
摘 要: zd_znode.py
创 建 者: zhuangshixiong
创建日期: 2015-06-16
"""
from peewee import CharField
from peewee import IntegerField
from peewee import SQL
from model.db.base import ZKDASH_DB, EnumField
class ZdZnode(ZKDASH_DB.Model):
"""ZdZnode Model
"""
id = IntegerField(primary_key=True, constraints=[SQL("AUTO_INCREMENT")])
cluster_name = CharField(max_length=64, null=True)
path = CharField(max_length=512, null=True)
type = EnumField(enum_value="'0', '1'", constraints=[SQL("DEFAULT '0'")]) # 节点属于普通节点还是文件节点,默认普通节点
business = CharField(max_length=64, null=True)
deleted = EnumField(enum_value="'0', '1'", constraints=[SQL("DEFAULT '0'")])
class Meta(object):
"""表配置信息
"""
db_table = "zd_znode"
|
tests/integration/test_format_schema_on_server/test.py | chalice19/ClickHouse | 8,629 | 12696720 | <gh_stars>1000+
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance("instance", clickhouse_path_dir="clickhouse_path")
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
instance.query("CREATE DATABASE test")
yield cluster
finally:
cluster.shutdown()
def create_simple_table():
instance.query("DROP TABLE IF EXISTS test.simple")
instance.query(
"""
CREATE TABLE test.simple (key UInt64, value String)
ENGINE = MergeTree ORDER BY tuple();
"""
)
def test_protobuf_format_input(started_cluster):
create_simple_table()
instance.http_query(
"INSERT INTO test.simple SETTINGS format_schema='simple:KeyValuePair' FORMAT Protobuf",
"\x07\x08\x01\x12\x03abc\x07\x08\x02\x12\x03def",
)
assert instance.query("SELECT * from test.simple") == "1\tabc\n2\tdef\n"
def test_protobuf_format_output(started_cluster):
create_simple_table()
instance.query("INSERT INTO test.simple VALUES (1, 'abc'), (2, 'def')")
assert (
instance.http_query(
"SELECT * FROM test.simple FORMAT Protobuf SETTINGS format_schema='simple:KeyValuePair'"
)
== "\x07\x08\x01\x12\x03abc\x07\x08\x02\x12\x03def"
)
|
courses/backend/django-for-everybody/Web Application Technologies and Django/resources/dj4e-samples/menu/urls.py | Nahid-Hassan/fullstack-software-development | 297 | 12696725 | from django.urls import path
from django.views.generic import TemplateView
app_name='menu'
urlpatterns = [
path('', TemplateView.as_view(template_name='menu/main_menu.html'), name='main'),
path('page1', TemplateView.as_view(template_name='menu/main_menu.html'), name='page1'),
path('page2', TemplateView.as_view(template_name='menu/main_menu.html'), name='page2'),
path('page3', TemplateView.as_view(template_name='menu/main_menu.html'), name='page3'),
]
|
testsuite/splineinverse-knots-ascend-reg/run.py | luyatshimbalanga/OpenShadingLanguage | 1,105 | 12696738 | <gh_stars>1000+
#!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
command += testshade("-t 1 -g 64 64 --center -od uint8 -o Fout splineinverse_c_float_v_floatarray.tif test_splineinverse_c_float_v_floatarray")
command += testshade("-t 1 -g 64 64 --center -od uint8 -o Fout splineinverse_c_float_u_floatarray.tif test_splineinverse_c_float_u_floatarray")
command += testshade("-t 1 -g 64 64 --center -od uint8 -o Fout splineinverse_c_float_c_floatarray.tif test_splineinverse_c_float_c_floatarray")
outputs.append ("splineinverse_c_float_v_floatarray.tif")
outputs.append ("splineinverse_c_float_u_floatarray.tif")
outputs.append ("splineinverse_c_float_c_floatarray.tif")
command += testshade("-t 1 -g 64 64 --center -od uint8 -o Fout splineinverse_u_float_v_floatarray.tif test_splineinverse_u_float_v_floatarray")
command += testshade("-t 1 -g 64 64 --center -od uint8 -o Fout splineinverse_u_float_u_floatarray.tif test_splineinverse_u_float_u_floatarray")
command += testshade("-t 1 -g 64 64 --center -od uint8 -o Fout splineinverse_u_float_c_floatarray.tif test_splineinverse_u_float_c_floatarray")
outputs.append ("splineinverse_u_float_v_floatarray.tif")
outputs.append ("splineinverse_u_float_u_floatarray.tif")
outputs.append ("splineinverse_u_float_c_floatarray.tif")
command += testshade("-t 1 -g 64 64 --center -od uint8 -o Fout splineinverse_v_float_v_floatarray.tif test_splineinverse_v_float_v_floatarray")
command += testshade("-t 1 -g 64 64 --center -od uint8 -o Fout splineinverse_v_float_u_floatarray.tif test_splineinverse_v_float_u_floatarray")
command += testshade("-t 1 -g 64 64 --center -od uint8 -o Fout splineinverse_v_float_c_floatarray.tif test_splineinverse_v_float_c_floatarray")
outputs.append ("splineinverse_v_float_v_floatarray.tif")
outputs.append ("splineinverse_v_float_u_floatarray.tif")
outputs.append ("splineinverse_v_float_c_floatarray.tif")
command += testshade("--vary_udxdy --vary_vdxdy -t 1 -g 64 64 --center -od uint8 -o ValDxDyOut deriv_splineinverse_c_float_v_floatarray.tif test_deriv_splineinverse_c_float_v_floatarray")
command += testshade("--vary_udxdy --vary_vdxdy -t 1 -g 64 64 --center -od uint8 -o ValDxDyOut deriv_splineinverse_c_float_u_floatarray.tif test_deriv_splineinverse_c_float_u_floatarray")
command += testshade("--vary_udxdy --vary_vdxdy -t 1 -g 64 64 --center -od uint8 -o ValDxDyOut deriv_splineinverse_c_float_c_floatarray.tif test_deriv_splineinverse_c_float_c_floatarray")
outputs.append ("deriv_splineinverse_c_float_v_floatarray.tif")
outputs.append ("deriv_splineinverse_c_float_u_floatarray.tif")
outputs.append ("deriv_splineinverse_c_float_c_floatarray.tif")
command += testshade("--vary_udxdy --vary_vdxdy -t 1 -g 64 64 --center -od uint8 -o ValDxDyOut deriv_splineinverse_u_float_v_floatarray.tif test_deriv_splineinverse_u_float_v_floatarray")
command += testshade("--vary_udxdy --vary_vdxdy -t 1 -g 64 64 --center -od uint8 -o ValDxDyOut deriv_splineinverse_u_float_u_floatarray.tif test_deriv_splineinverse_u_float_u_floatarray")
command += testshade("--vary_udxdy --vary_vdxdy -t 1 -g 64 64 --center -od uint8 -o ValDxDyOut deriv_splineinverse_u_float_c_floatarray.tif test_deriv_splineinverse_u_float_c_floatarray")
outputs.append ("deriv_splineinverse_u_float_v_floatarray.tif")
outputs.append ("deriv_splineinverse_u_float_u_floatarray.tif")
outputs.append ("deriv_splineinverse_u_float_c_floatarray.tif")
command += testshade("--vary_udxdy --vary_vdxdy -t 1 -g 64 64 --center -od uint8 -o ValDxDyOut deriv_splineinverse_v_float_v_floatarray.tif test_deriv_splineinverse_v_float_v_floatarray")
command += testshade("--vary_udxdy --vary_vdxdy -t 1 -g 64 64 --center -od uint8 -o ValDxDyOut deriv_splineinverse_v_float_u_floatarray.tif test_deriv_splineinverse_v_float_u_floatarray")
command += testshade("--vary_udxdy --vary_vdxdy -t 1 -g 64 64 --center -od uint8 -o ValDxDyOut deriv_splineinverse_v_float_c_floatarray.tif test_deriv_splineinverse_v_float_c_floatarray")
outputs.append ("deriv_splineinverse_v_float_v_floatarray.tif")
outputs.append ("deriv_splineinverse_v_float_u_floatarray.tif")
outputs.append ("deriv_splineinverse_v_float_c_floatarray.tif")
# expect a few LSB failures
failthresh = 0.008
failpercent = 3
|
tests/test_subclassing.py | andrzejnovak/boost-histogram | 105 | 12696739 | import boost_histogram as bh
def test_subclass():
NEW_FAMILY = object()
class MyHist(bh.Histogram, family=NEW_FAMILY):
pass
class MyRegular(bh.axis.Regular, family=NEW_FAMILY):
__slots__ = ()
class MyIntStorage(bh.storage.Int64, family=NEW_FAMILY):
pass
class MyPowTransform(bh.axis.transform.Pow, family=NEW_FAMILY):
pass
h = MyHist(MyRegular(10, 0, 2, transform=MyPowTransform(2)), storage=MyIntStorage())
assert type(h) == MyHist
assert h._storage_type == MyIntStorage
assert type(h.axes[0]) == MyRegular
assert type(h.axes[0].transform) == MyPowTransform
def test_subclass_hist_only():
class MyHist(bh.Histogram):
pass
h = MyHist(bh.axis.Regular(10, 0, 2))
assert type(h) == MyHist
assert type(h.axes[0]) == bh.axis.Regular
|
src/iris/webhooks/rackspace.py | minhaz1/iris | 694 | 12696755 | <gh_stars>100-1000
from __future__ import absolute_import
from falcon import HTTPBadRequest
from iris.webhooks.webhook import webhook
class rackspace(webhook):
def validate_post(self, body):
if not all(k in body for k in("event_id", "details")):
raise HTTPBadRequest('missing event_id and/or details attributes')
def on_post(self, req, resp):
'''
This endpoint is compatible with the webhook posts from Rackspace.
Configure a Rackspace notification to post to a URL with the following
parameters
"http://iris:16649/v0/webhooks/rackspace?application=test-app&key=abc&plan=teamA"
Where application points to an application and key in Iris.
For every POST from Rackspace, a new incident will be created, if the plan label
is attached to an alert.
'''
plan = req.get_param('plan', required=False)
if plan is None:
raise HTTPBadRequest('missing plan in rackspace webhook url parameters')
super().on_post(req, resp, plan)
|
voltron/entry.py | jonasmr/voltron | 5,856 | 12696756 | """
This is the main entry point for Voltron from the debugger host's perspective.
This file is loaded into the debugger through whatever means the given host
supports.
LLDB:
(lldb) command script import /path/to/voltron/entry.py
GDB:
(gdb) source /path/to/voltron/entry.py
VDB:
(vdb) script /path/to/voltron/entry.py
WinDbg/CDB (via PyKD):
> .load pykd.pyd
> !py --global C:\path\to\voltron\entry.py
"""
log = None
try:
# fix path if it's clobbered by brew
import sys
if sys.platform == 'darwin':
py_base = '/System/Library/Frameworks/Python.framework/Versions/2.7/'
new_path = ['lib/python27.zip', 'lib/python2.7', 'lib/python2.7/plat-darwin', 'lib/python2.7/plat-mac',
'lib/python2.7/plat-mac/lib-scriptpackages', 'Extras/lib/python', 'lib/python2.7/lib-tk',
'lib/python2.7/lib-old', 'lib/python2.7/lib-dynload']
sys.path = [p for p in sys.path if 'Cellar' not in p] + [py_base + p for p in new_path]
except:
pass
try:
import logging
import os
import sys
blessed = None
import blessed
# add vtrace to the path so that dbg_vdb.py can import from vdb/vtrace.
if "vtrace" in locals():
def parent_directory(the_path):
return os.path.abspath(os.path.join(the_path, os.pardir))
def add_vdb_to_path(vtrace):
sys.path.append(parent_directory(parent_directory(vtrace.__file__)))
add_vdb_to_path(vtrace)
else:
pass
import voltron
from voltron.plugin import pm
from voltron.core import Server
log = voltron.setup_logging('debugger')
# figure out in which debugger host we are running
args = []
host = None
try:
import lldb
host = "lldb"
def invoke(*args):
voltron.command._invoke(*args)
except ImportError:
pass
try:
import gdb
host = "gdb"
except ImportError:
pass
try:
import pykd
host = "windbg"
except:
pass
if "vtrace" in locals():
host = "vdb"
args = [db]
if not host:
raise Exception("No debugger host is present")
# register any plugins that were loaded
pm.register_plugins()
# get the debugger plugin for the host we're in
plugin = pm.debugger_plugin_for_host(host)
if not voltron.server:
# set up command and adaptor instances
voltron.debugger = plugin.adaptor_class(*args)
voltron.command = plugin.command_class(*args)
# register command plugins now that we have a debugger host loaded
pm.register_command_plugins()
# create and start the voltron server
voltron.server = Server()
voltron.server.start()
print(blessed.Terminal().bold_red("Voltron loaded."))
if host == 'lldb' and not voltron.command.registered:
print("Run `voltron init` after you load a target.")
except Exception as e:
import traceback
msg = ("An error occurred while loading Voltron:\n\n{}"
"\nPlease ensure Voltron is installed correctly per the documentation: "
"https://github.com/snare/voltron/wiki/Installation").format(traceback.format_exc())
if blessed:
msg = blessed.Terminal().bold_red(msg)
if log:
log.exception("Exception raised while loading Voltron")
print(msg)
|
src/modules/model.py | imatge-upc/rsis | 132 | 12696761 | <reponame>imatge-upc/rsis
import torch
import torch.nn as nn
from clstm import ConvLSTMCell
import argparse
import torch.nn.functional as f
from torch.autograd import Variable
from torchvision import transforms, models
import torch.nn as nn
import math
from vision import VGG16, ResNet34, ResNet50, ResNet101
import sys
sys.path.append("..")
from utils.utils import get_skip_dims
class FeatureExtractor(nn.Module):
'''
Returns base network to extract visual features from image
'''
def __init__(self,args):
super(FeatureExtractor,self).__init__()
skip_dims_in = get_skip_dims(args.base_model)
if args.base_model == 'resnet34':
self.base = ResNet34()
self.base.load_state_dict(models.resnet34(pretrained=True).state_dict())
elif args.base_model == 'resnet50':
self.base = ResNet50()
self.base.load_state_dict(models.resnet50(pretrained=True).state_dict())
elif args.base_model == 'resnet101':
self.base = ResNet101()
self.base.load_state_dict(models.resnet101(pretrained=True).state_dict())
elif args.base_model == 'vgg16':
self.base = VGG16()
self.base.load_state_dict(models.vgg16(pretrained=True).state_dict())
else:
raise Exception("The base model you chose is not supported !")
self.hidden_size = args.hidden_size
self.kernel_size = args.kernel_size
self.padding = 0 if self.kernel_size == 1 else 1
self.sk5 = nn.Conv2d(skip_dims_in[0],self.hidden_size,self.kernel_size,padding=self.padding)
self.sk4 = nn.Conv2d(skip_dims_in[1],self.hidden_size,self.kernel_size,padding=self.padding)
self.sk3 = nn.Conv2d(skip_dims_in[2],self.hidden_size/2,self.kernel_size,padding=self.padding)
self.sk2 = nn.Conv2d(skip_dims_in[3],self.hidden_size/4,self.kernel_size,padding=self.padding)
self.sk1 = nn.Conv2d(skip_dims_in[4],self.hidden_size/8,self.kernel_size,padding=self.padding)
self.bn5 = nn.BatchNorm2d(self.hidden_size)
self.bn4 = nn.BatchNorm2d(self.hidden_size)
self.bn3 = nn.BatchNorm2d(self.hidden_size/2)
self.bn2 = nn.BatchNorm2d(self.hidden_size/4)
self.bn1 = nn.BatchNorm2d(self.hidden_size/8)
def forward(self,x,semseg=False, raw = False):
x5,x4,x3,x2,x1 = self.base(x)
x5_skip = self.bn5(self.sk5(x5))
x4_skip = self.bn4(self.sk4(x4))
x3_skip = self.bn3(self.sk3(x3))
x2_skip = self.bn2(self.sk2(x2))
x1_skip = self.bn1(self.sk1(x1))
if semseg:
return x5
elif raw:
return x5, x4, x3, x2, x1
else:
return x5_skip, x4_skip, x3_skip, x2_skip, x1_skip
class RSIS(nn.Module):
"""
The recurrent decoder
"""
def __init__(self, args):
super(RSIS,self).__init__()
skip_dims_in = get_skip_dims(args.base_model)
self.hidden_size = args.hidden_size
self.num_classes = args.num_classes
self.kernel_size = args.kernel_size
padding = 0 if self.kernel_size == 1 else 1
self.dropout = args.dropout
self.dropout_stop = args.dropout_stop
self.dropout_cls = args.dropout_cls
self.skip_mode = args.skip_mode
# convlstms have decreasing dimension as width and height increase
skip_dims_out = [self.hidden_size, self.hidden_size/2,
self.hidden_size/4,self.hidden_size/8,
self.hidden_size/16]
# initialize layers for each deconv stage
self.clstm_list = nn.ModuleList()
# 5 is the number of deconv steps that we need to reach image size in the output
for i in range(len(skip_dims_out)):
if i == 0:
clstm_in_dim = self.hidden_size
else:
clstm_in_dim = skip_dims_out[i-1]
if self.skip_mode == 'concat':
clstm_in_dim*=2
clstm_i = ConvLSTMCell(args, clstm_in_dim, skip_dims_out[i],self.kernel_size, padding = padding)
self.clstm_list.append(clstm_i)
self.conv_out = nn.Conv2d(skip_dims_out[-1], 1,self.kernel_size, padding = padding)
# calculate the dimensionality of classification vector
# side class activations are taken from the output of the convlstm
# therefore we need to compute the sum of the dimensionality of outputs
# from all convlstm layers
fc_dim = 0
for sk in skip_dims_out:
fc_dim+=sk
self.fc_class = nn.Linear(fc_dim,self.num_classes)
self.fc_stop = nn.Linear(fc_dim,1)
def forward(self, skip_feats, prev_hidden_list):
clstm_in = skip_feats[0]
skip_feats = skip_feats[1:]
side_feats = []
hidden_list = []
for i in range(len(skip_feats)+1):
# hidden states will be initialized the first time forward is called
if prev_hidden_list is None:
state = self.clstm_list[i](clstm_in,None)
else:
# else we take the ones from the previous step for the forward pass
state = self.clstm_list[i](clstm_in,prev_hidden_list[i])
hidden_list.append(state)
hidden = state[0]
if self.dropout > 0:
hidden = nn.Dropout2d(self.dropout)(hidden)
side_feats.append(nn.MaxPool2d(clstm_in.size()[2:])(hidden))
# apply skip connection
if i < len(skip_feats):
skip_vec = skip_feats[i]
upsample = nn.UpsamplingBilinear2d(size = (skip_vec.size()[-2],skip_vec.size()[-1]))
hidden = upsample(hidden)
# skip connection
if self.skip_mode == 'concat':
clstm_in = torch.cat([hidden,skip_vec],1)
elif self.skip_mode == 'sum':
clstm_in = hidden + skip_vec
elif self.skip_mode == 'mul':
clstm_in = hidden*skip_vec
elif self.skip_mode == 'none':
clstm_in = hidden
else:
raise Exception('Skip connection mode not supported !')
else:
self.upsample = nn.UpsamplingBilinear2d(size = (hidden.size()[-2]*2,hidden.size()[-1]*2))
hidden = self.upsample(hidden)
clstm_in = hidden
out_mask = self.conv_out(clstm_in)
# classification branch
side_feats = torch.cat(side_feats,1).squeeze()
if self.dropout_cls > 0:
class_feats = nn.Dropout(self.dropout_cls)(side_feats)
else:
class_feats = side_feats
class_feats = self.fc_class(class_feats)
if self.dropout_stop > 0:
stop_feats = nn.Dropout(self.dropout_stop)(side_feats)
else:
stop_feats = side_feats
stop_probs = self.fc_stop(stop_feats)
# the log is computed in the objective function
class_probs = nn.Softmax()(class_feats)
return out_mask, class_probs, stop_probs, hidden_list
|
Mariana/tests/datasetmaps_tests.py | rsumner31/Mariana-212 | 182 | 12696764 | import unittest
import Mariana.layers as ML
import Mariana.layers as ML
import Mariana.decorators as dec
import Mariana.costs as MC
import Mariana.regularizations as MR
import Mariana.scenari as MS
import Mariana.activations as MA
import Mariana.training.datasetmaps as MD
import theano.tensor as tt
import numpy
class DastasetMapsTests(unittest.TestCase):
def setUp(self) :
pass
def tearDown(self) :
pass
def test_classSets(self) :
def sample(cls) :
o = cls.getAll("onehot")
n = cls.getAll("classNumber")
p = cls.getAll("input")
return o, n, p
l1 = numpy.arange(100)
l2 = numpy.arange(10) + 10
cls = MD.ClassSets( sets = [ ("l1", l1), ("l2", l2) ], sampleSize = len(l1) )
o, n, p = sample(cls)
for i in xrange(len(o)) :
if n[i] == 0. :
self.assertEquals(o[i][1], 0.)
self.assertEquals(o[i][0], 1.)
else :
self.assertEquals(o[i][0], 0.)
self.assertEquals(o[i][1], 1.)
nbTrials = 10000
nb2 = 0.
for i in xrange(nbTrials) :
o, n, p = sample(cls)
for j in xrange(len(p)) :
if p[j] > 10 :
nb2 += 1
f = nb2/float(len(p)*nbTrials)
r = abs(f-0.5)
self.assertTrue(r < 2)
if __name__ == '__main__' :
import Mariana.settings as MSET
MSET.VERBOSE = False
unittest.main()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.