max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
doc/rst/getting_started/src/hello_world_python/hello_world_rec.py | SirArep/ecal | 493 | 12616977 | import sys
import time
import ecal.core.core as ecal_core
from ecal.core.subscriber import StringSubscriber
# Callback for receiving messages
def callback(topic_name, msg, time):
print("Received: {}".format(msg))
if __name__ == "__main__":
# Initialize eCAL
ecal_core.initialize(sys.argv, "Python Hello World Publisher")
# Create a subscriber that listenes on the "hello_world_python_topic"
sub = StringSubscriber("hello_world_python_topic")
# Set the Callback
sub.set_callback(callback)
# Just don't exit
while ecal_core.ok():
time.sleep(0.5)
# finalize eCAL API
ecal_core.finalize() |
mods/TowerofHanoi/towers.py | SummitChen/opennero | 215 | 12617005 | <filename>mods/TowerofHanoi/towers.py
def On(A, B):
return ("On", A, B)
def Clear(A):
return ("Clear", A)
def Smaller(A, B):
return ("Smaller", A, B)
class Towers(object):
Pole1 = 'Pole1'
Pole2 = 'Pole2'
Pole3 = 'Pole3'
POLES = ['Pole1', 'Pole2', 'Pole3']
@classmethod
def On(cls, A, B):
return On(A, B)
@classmethod
def Clear(cls, A):
return Clear(A)
@classmethod
def Smaller(cls, A, B):
return Smaller(A, B)
@classmethod
def Move(cls, STATE, Disk, Source, Dest):
if Clear(Disk) in STATE and On(Disk, Source) in STATE and Clear(Dest) in STATE and Smaller(Disk, Dest) in STATE:
STATE.add( On(Disk, Dest) )
STATE.remove( On(Disk, Source) )
STATE.remove( Clear( Dest ) )
STATE.add( Clear( Source ) )
return True
else:
return False
@classmethod
def UnMove(cls, STATE, Disk, Source, Dest):
if On(Disk, Dest) in STATE and On(Disk, Source) not in STATE and Clear(Dest) not in STATE and Clear(Source) in STATE:
STATE.remove( On(Disk, Dest) )
STATE.add( On(Disk, Source) )
STATE.add( Clear( Dest ) )
STATE.remove( Clear( Source ) )
return True
else:
return False
# actions is just a list of pairs of functions to do or undo an action
# in general we could make things general and check for function arity
# but currently the code only works with Disk, Source, Dest
# ACTIONS = [ (Move, UnMove) ]
@classmethod
def get_actions(cls):
return [ (cls.Move, cls.UnMove) ]
@classmethod
def get_pole(cls, state, disk):
""" get the pole of the disk given the state """
if disk in cls.POLES:
return disk
for p in state:
if p[0] == 'On' and p[1] == disk:
if p[2] in cls.POLES:
return p[2]
else:
return cls.get_pole(state - set([p]), p[2])
return None
# action primitives
# move without getting stuff
MOVES = { \
(Pole1, Pole2): [4, 1, 5],
(Pole1, Pole3): [4, 1, 1, 5],
(Pole2, Pole1): [5, 1, 4],
(Pole2, Pole3): [4, 1, 5],
(Pole3, Pole1): [5, 1, 1, 4],
(Pole3, Pole2): [5, 1, 4]
}
# move with pick up and put down
CARRY_MOVES = {}
for (source, dest) in MOVES:
CARRY_MOVES[(source, dest)] = [3] + MOVES[(source, dest)] + [2]
class Towers2(Towers):
Disk1 = 'Disk1'
Disk2 = 'Disk2'
Pole1 = 'Pole1'
Pole2 = 'Pole2'
Pole3 = 'Pole3'
DISKS = ['Disk1', 'Disk2']
POLES = ['Pole1', 'Pole2', 'Pole3']
LITERALS = [Disk1, Disk2, Pole1, Pole2, Pole3]
INIT = set([
Clear(Disk1),
On(Disk1, Disk2),
On(Disk2, Pole1),
Clear(Pole2),
Clear(Pole3),
Smaller(Disk1, Pole1),
Smaller(Disk1, Pole2),
Smaller(Disk1, Pole3),
Smaller(Disk1, Disk2),
Smaller(Disk2, Pole1),
Smaller(Disk2, Pole2),
Smaller(Disk2, Pole3),
])
GOAL = set([
On(Disk1, Disk2),
On(Disk2, Pole3)
])
class Towers3(Towers):
Disk1 = 'Disk1'
Disk2 = 'Disk2'
Disk3 = 'Disk3'
Pole1 = 'Pole1'
Pole2 = 'Pole2'
Pole3 = 'Pole3'
DISKS = ['Disk1', 'Disk2', 'Disk3']
POLES = ['Pole1', 'Pole2', 'Pole3']
LITERALS = [Disk1, Disk2, Disk3, Pole1, Pole2, Pole3]
INIT = set([
Clear(Disk1),
On(Disk1, Disk2),
On(Disk2, Disk3),
On(Disk3, Pole1),
Clear(Pole2),
Clear(Pole3),
Smaller(Disk1, Pole1),
Smaller(Disk1, Pole2),
Smaller(Disk1, Pole3),
Smaller(Disk1, Disk2),
Smaller(Disk1, Disk3),
Smaller(Disk2, Pole1),
Smaller(Disk2, Pole2),
Smaller(Disk2, Pole3),
Smaller(Disk2, Disk3),
Smaller(Disk3, Pole1),
Smaller(Disk3, Pole2),
Smaller(Disk3, Pole3),
])
GOAL = set([
On(Disk1, Disk2),
On(Disk2, Disk3),
On(Disk3, Pole3)
])
|
regtests/calling/starargs.py | ahakingdom/Rusthon | 622 | 12617014 | <gh_stars>100-1000
from runtime import *
"""unpack starargs"""
def f(x, a, b, c):
return x+a+b+c
def f2(x,y,z, w=0):
return x+y+z+w
def main():
a = [1,1,1]
assert( f(1, *a) == 4)
assert( f2(*a, w=10) == 13)
b = [1,1]
assert( f2(100, *b, w=10) == 112)
main()
|
ambari-agent/src/test/python/resource_management/TestFcntlBasedProcessLock.py | likenamehaojie/Apache-Ambari-ZH | 1,664 | 12617017 | '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import tempfile
import time
import shutil
import multiprocessing
from unittest import TestCase
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
class TestFcntlBasedProcessLock(TestCase):
@not_for_platform(PLATFORM_WINDOWS)
def test_fcntl_based_lock(self):
"""
Test blocking_lock using multiprocessing.Lock
"""
test_temp_dir = tempfile.mkdtemp(prefix="test_file_based_lock")
try:
lock_file = os.path.join(test_temp_dir, "lock")
# Raises an exception if mutex.acquire fails.
# It indicates that more than one process acquired the lock.
def dummy_task(index, mutex):
with FcntlBasedProcessLock(lock_file, skip_fcntl_failures = False):
if (not mutex.acquire(block = False)):
raise Exception("ERROR: FcntlBasedProcessLock was acquired by several processes")
time.sleep(0.1)
mutex.release()
mutex = multiprocessing.Lock()
process_list = []
for i in range(0, 3):
p = multiprocessing.Process(target=dummy_task, args=(i, mutex))
p.start()
process_list.append(p)
for p in process_list:
p.join(2)
self.assertEquals(p.exitcode, 0)
finally:
shutil.rmtree(test_temp_dir)
|
h5Nastran/h5Nastran/h5nastran/_result.py | ACea15/pyNastran | 293 | 12617023 | <filename>h5Nastran/h5Nastran/h5nastran/_result.py<gh_stars>100-1000
from __future__ import print_function, absolute_import
from collections import OrderedDict
import numpy as np
import tables
from six import iteritems
from ._punch import H5NastranResultPunch
from ._op2 import H5NastranResultOP2
class H5NastranResult(H5NastranResultPunch, H5NastranResultOP2):
def __init__(self, *args, **kwargs):
super(H5NastranResult, self).__init__(*args, **kwargs)
|
catalyst/runners/__init__.py | sergunya17/catalyst | 2,693 | 12617040 | <filename>catalyst/runners/__init__.py
# flake8: noqa
from catalyst.runners.supervised import ISupervisedRunner
from catalyst.runners.self_supervised import ISelfSupervisedRunner
from catalyst.runners.runner import Runner, SelfSupervisedRunner, SupervisedRunner
from catalyst.runners.config import (
ConfigRunner,
SupervisedConfigRunner,
SelfSupervisedConfigRunner,
)
from catalyst.settings import SETTINGS
if SETTINGS.hydra_required:
from catalyst.runners.hydra import HydraRunner, SupervisedHydraRunner
__all__ = [
"Runner",
"ISupervisedRunner",
"ISelfSupervisedRunner",
"SupervisedRunner",
"ConfigRunner",
"SupervisedConfigRunner",
"HydraRunner",
"SupervisedHydraRunner",
"SelfSupervisedRunner",
"SelfSupervisedConfigRunner",
]
else:
__all__ = [
"Runner",
"ISupervisedRunner",
"ISelfSupervisedRunner",
"SupervisedRunner",
"ConfigRunner",
"SupervisedConfigRunner",
"SelfSupervisedRunner",
"SelfSupervisedConfigRunner",
]
|
Src/StdLib/Lib/site-packages/win32comext/axdebug/documents.py | cwensley/ironpython2 | 1,078 | 12617044 | """ Management of documents for AXDebugging.
"""
import axdebug, gateways
import pythoncom
from util import _wrap, _wrap_remove, RaiseNotImpl, trace
from win32com.server.util import unwrap
import codecontainer
import contexts
from win32com.server.exception import Exception
import win32api, winerror, os, string, sys
#def trace(*args):
# pass
def GetGoodFileName(fname):
if fname[0] != "<":
return win32api.GetFullPathName(fname)
return fname
class DebugDocumentProvider(gateways.DebugDocumentProvider):
def __init__(self, doc):
self.doc = doc
def GetName(self, dnt):
return self.doc.GetName(dnt)
def GetDocumentClassId(self):
return self.doc.GetDocumentClassId()
def GetDocument(self):
return self.doc
class DebugDocumentText(gateways.DebugDocumentInfo, gateways.DebugDocumentText, gateways.DebugDocument):
_com_interfaces_ = gateways.DebugDocumentInfo._com_interfaces_ + \
gateways.DebugDocumentText._com_interfaces_ + \
gateways.DebugDocument._com_interfaces_
_public_methods_ = gateways.DebugDocumentInfo._public_methods_ + \
gateways.DebugDocumentText._public_methods_ + \
gateways.DebugDocument._public_methods_
# A class which implements a DebugDocumentText, using the functionality
# provided by a codeContainer
def __init__(self, codeContainer):
gateways.DebugDocumentText.__init__(self)
gateways.DebugDocumentInfo.__init__(self)
gateways.DebugDocument.__init__(self)
self.codeContainer = codeContainer
def _Close(self):
self.docContexts = None
# self.codeContainer._Close()
self.codeContainer = None
# IDebugDocumentInfo
def GetName(self, dnt):
return self.codeContainer.GetName(dnt)
def GetDocumentClassId(self):
return "{DF630910-1C1D-11d0-AE36-8C0F5E000000}"
# IDebugDocument has no methods!
#
# IDebugDocumentText methods.
# def GetDocumentAttributes
def GetSize(self):
# trace("GetSize")
return self.codeContainer.GetNumLines(), self.codeContainer.GetNumChars()
def GetPositionOfLine(self, cLineNumber):
return self.codeContainer.GetPositionOfLine(cLineNumber)
def GetLineOfPosition(self, charPos):
return self.codeContainer.GetLineOfPosition(charPos)
def GetText(self, charPos, maxChars, wantAttr):
# Get all the attributes, else the tokenizer will get upset.
# XXX - not yet!
# trace("GetText", charPos, maxChars, wantAttr)
cont = self.codeContainer
attr = cont.GetSyntaxColorAttributes()
return cont.GetText(), attr
def GetPositionOfContext(self, context):
trace("GetPositionOfContext", context)
context = unwrap(context)
return context.offset, context.length
# Return a DebugDocumentContext.
def GetContextOfPosition(self, charPos, maxChars):
# Make one
doc = _wrap(self, axdebug.IID_IDebugDocument)
rc = self.codeContainer.GetCodeContextAtPosition(charPos)
return rc.QueryInterface(axdebug.IID_IDebugDocumentContext)
class CodeContainerProvider:
"""An abstract Python class which provides code containers!
Given a Python file name (as the debugger knows it by) this will
return a CodeContainer interface suitable for use.
This provides a simple base imlpementation that simply supports
a dictionary of nodes and providers.
"""
def __init__(self):
self.ccsAndNodes = {}
def AddCodeContainer(self, cc, node = None):
fname = GetGoodFileName(cc.fileName)
self.ccsAndNodes[fname] = cc, node
def FromFileName(self, fname):
cc, node = self.ccsAndNodes.get(GetGoodFileName(fname), (None, None))
# if cc is None:
# print "FromFileName for %s returning None" % fname
return cc
def Close(self):
for cc, node in self.ccsAndNodes.itervalues():
try:
# Must close the node before closing the provider
# as node may make calls on provider (eg Reset breakpoints etc)
if node is not None:
node.Close()
cc._Close()
except pythoncom.com_error:
pass
self.ccsAndNodes = {}
|
tasks/imdb_tcn.py | evanharwin/keras-tcn | 1,473 | 12617057 | """
#Trains a TCN on the IMDB sentiment classification task.
Output after 1 epochs on CPU: ~0.8611
Time per epoch on CPU (Core i7): ~64s.
Based on: https://github.com/keras-team/keras/blob/master/examples/imdb_bidirectional_lstm.py
"""
import numpy as np
from tensorflow.keras import Sequential
from tensorflow.keras.datasets import imdb
from tensorflow.keras.layers import Dense, Embedding
from tensorflow.keras.preprocessing import sequence
from tcn import TCN
max_features = 20000
# cut texts after this number of words
# (among top max_features most common words)
maxlen = 100
batch_size = 32
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
y_train = np.array(y_train)
y_test = np.array(y_test)
model = Sequential([
Embedding(max_features, 128, input_shape=(maxlen,)),
TCN(kernel_size=6, dilations=[1, 2, 4, 8, 16]),
Dense(1, activation='sigmoid')
])
print(f'TCN receptive field: {model.layers[1].receptive_field}.')
model.summary()
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
print('Train...')
model.fit(
x_train, y_train,
batch_size=batch_size,
validation_data=[x_test, y_test]
)
|
dynamo/plot/cell_cycle.py | xing-lab-pitt/dynamo-release | 236 | 12617072 | <gh_stars>100-1000
from anndata import AnnData
from matplotlib.axes import Axes
from typing import Union, Optional
from ..tools.utils import update_dict
from .utils import save_fig
def cell_cycle_scores(
adata: AnnData,
cells: Optional[list] = None,
save_show_or_return: str = "show",
save_kwargs: dict = {},
) -> Union[None, Axes]:
"""Plot a heatmap of cells ordered by cell cycle position
Parameters
----------
adata: :class:`~anndata.AnnData`
cells: a list of cell ids used to subset the adata object.
save_show_or_return:
Whether to save, show or return the figure.
save_kwargs:
A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the
save_fig function will use the {"path": None, "prefix": 'scatter', "dpi": None, "ext": 'pdf', "transparent":
True, "close": True, "verbose": True} as its parameters. Otherwise you can provide a dictionary that
properly modify those keys according to your needs.
"""
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from matplotlib.pyplot import colorbar
if cells is None:
cell_cycle_scores = adata.obsm["cell_cycle_scores"].dropna()
else:
cell_cycle_scores = adata[cells, :].obsm["cell_cycle_scores"].dropna().dropna()
cell_cycle_scores.sort_values(
["cell_cycle_phase", "cell_cycle_progress"],
ascending=[True, False],
inplace=True,
)
# based on https://stackoverflow.com/questions/47916205/seaborn-heatmap-move-colorbar-on-top-of-the-plot
# answwer 4
# plot heatmap without colorbar
ax = sns.heatmap(
cell_cycle_scores[["G1-S", "S", "G2-M", "M", "M-G1"]].transpose(),
annot=False,
xticklabels=False,
linewidths=0,
cbar=False,
) #
# split axes of heatmap to put colorbar
ax_divider = make_axes_locatable(ax)
# define size and padding of axes for colorbar
cax = ax_divider.append_axes("right", size="2%", pad="0.5%", aspect=4, anchor="NW")
# make colorbar for heatmap.
# Heatmap returns an axes obj but you need to get a mappable obj (get_children)
colorbar(ax.get_children()[0], cax=cax, ticks=[-0.9, 0, 0.9])
if save_show_or_return == "save":
s_kwargs = {
"path": None,
"prefix": "plot_direct_graph",
"dpi": None,
"ext": "pdf",
"transparent": True,
"close": True,
"verbose": True,
}
s_kwargs = update_dict(s_kwargs, save_kwargs)
save_fig(**s_kwargs)
elif save_show_or_return == "show":
plt.tight_layout()
plt.show()
elif save_show_or_return == "return":
return ax
|
_solved/solutions/case-conflict-mapping41.py | lleondia/geopandas-tutorial | 341 | 12617092 | <reponame>lleondia/geopandas-tutorial
data_within_border['NAME_AP'].value_counts() |
network/gra_transf_inpt5_new_dropout_2layerMLP.py | Team-Squad-Up/multigraph_transformer | 268 | 12617100 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .graph_transformer_layers_new_dropout import *
import ipdb
class GraphTransformerEncoder(nn.Module):
def __init__(self, coord_input_dim, feat_input_dim, feat_dict_size, n_layers=6, n_heads=8,
embed_dim=512, feedforward_dim=2048, normalization='batch', dropout=0.1):
super(GraphTransformerEncoder, self).__init__()
# Embedding/Input layers
self.coord_embed = nn.Linear(coord_input_dim, embed_dim, bias=False)
self.feat_embed = nn.Embedding(feat_dict_size, embed_dim)
#self.in_drop = nn.Dropout(dropout)
# Transformer blocks
self.transformer_layers = nn.ModuleList([
GraphTransformerLayer(n_heads, embed_dim * 3, feedforward_dim, normalization, dropout)
for _ in range(n_layers)
])
def forward(self, coord, flag, pos, attention_mask=None):
# Embed inputs to embed_dim
#h = self.coord_embed(coord) + self.feat_embed(flag) + self.feat_embed(pos)
h = torch.cat((self.coord_embed(coord), self.feat_embed(flag)), dim=2)
h = torch.cat((h, self.feat_embed(pos)), dim=2)
#h = self.in_drop(h)
# Perform n_layers of Graph Transformer blocks
for layer in self.transformer_layers:
h = layer(h, mask=attention_mask)
return h
# modified on 2019 10 23.
class GraphTransformerClassifier(nn.Module):
def __init__(self, n_classes, coord_input_dim, feat_input_dim, feat_dict_size,
n_layers=6, n_heads=8, embed_dim=512, feedforward_dim=2048,
normalization='batch', dropout=0.1, mlp_classifier_dropout = 0.1):
super(GraphTransformerClassifier, self).__init__()
self.encoder = GraphTransformerEncoder(
coord_input_dim, feat_input_dim, feat_dict_size, n_layers,
n_heads, embed_dim, feedforward_dim, normalization, dropout)
self.mlp_classifier = nn.Sequential(
nn.Dropout(mlp_classifier_dropout),
nn.Linear(embed_dim * 3, feedforward_dim, bias=True),
nn.ReLU(),
# TODO
nn.Dropout(mlp_classifier_dropout),
nn.Linear(feedforward_dim, feedforward_dim, bias=True),
nn.ReLU(),
#nn.Dropout(mlp_classifier_dropout),
nn.Linear(feedforward_dim, n_classes, bias=True)
)
# self.g1 = nn.Linear(embed_dim, embed_dim, bias=False)
# self.g2 = nn.Linear(embed_dim, embed_dim, bias=False)
def forward(self, coord, flag, pos, attention_mask=None,
padding_mask=None, true_seq_length=None):
"""
Args:
coord: Input coordinates (batch_size, seq_length, coord_input_dim)
# TODO feat: Input features (batch_size, seq_length, feat_input_dim)
attention_mask: Masks for attention computation (batch_size, seq_length, seq_length)
Attention mask should contain -inf if attention is not possible
(i.e. mask is a negative adjacency matrix)
padding_mask: Mask indicating padded elements in input (batch_size, seq_length)
Padding mask element should be 1 if valid element, 0 if padding
(i.e. mask is a boolean multiplicative mask)
true_seq_length: True sequence lengths for input (batch_size, )
Used for computing true mean of node embeddings for graph embedding
Returns:
logits: Un-normalized logits for class prediction (batch_size, n_classes)
"""
# Embed input sequence
h = self.encoder(coord, flag, pos, attention_mask)
# h = torch.sigmoid(self.g1(h)) * self.g2(h)
# Mask out padding embeddings to zero
if padding_mask is not None:
masked_h = h * padding_mask.type_as(h)
g = masked_h.sum(dim = 1)
# g = masked_h.sum(dim=1)/true_seq_length.type_as(h)
else:
g = h.sum(dim=1)
# Compute logits
logits = self.mlp_classifier(g)
return logits
def make_model(n_classes=345, coord_input_dim=2, feat_input_dim=2, feat_dict_size=104,
n_layers=6, n_heads=8, embed_dim=512, feedforward_dim=2048,
normalization='batch', dropout=0.1, mlp_classifier_dropout = 0.1):
model = GraphTransformerClassifier(
n_classes, coord_input_dim, feat_input_dim, feat_dict_size, n_layers,
n_heads, embed_dim, feedforward_dim, normalization, dropout, mlp_classifier_dropout)
print(model)
nb_param = 0
for param in model.parameters():
nb_param += np.prod(list(param.data.size()))
print('Number of parameters: ', nb_param)
return model
|
checkov/cloudformation/checks/resource/aws/KMSKeyWildCardPrincipal.py | kylelaker/checkov | 4,013 | 12617120 | <reponame>kylelaker/checkov<filename>checkov/cloudformation/checks/resource/aws/KMSKeyWildCardPrincipal.py
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
def get_recursively(search_dict, field):
"""
Takes a dict with nested lists and dicts,
and searches all dicts for a key of the field
provided.
"""
fields_found = []
for key, value in search_dict.items():
if key == field:
fields_found.append(value)
elif isinstance(value, dict):
results = get_recursively(value, field)
for result in results:
fields_found.append(result)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
more_results = get_recursively(item, field)
for another_result in more_results:
fields_found.append(another_result)
return fields_found
class KMSKeyWildCardPrincipal(BaseResourceValueCheck):
def __init__(self):
name = "Ensure KMS key policy does not contain wildcard (*) principal"
id = "CKV_AWS_33"
supported_resources = ['AWS::KMS::Key']
categories = [CheckCategories.ENCRYPTION]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return 'Properties/KeyPolicy/Statement/Principal'
def scan_resource_conf(self, conf):
if conf.get('Properties'):
if conf['Properties'].get('KeyPolicy'):
policy_block = conf['Properties']['KeyPolicy']
principals_list = get_recursively(policy_block, 'Principal')
for principal in principals_list:
if isinstance(principal, dict):
for principal_key, principal_value in principal.items():
if principal_value == '*':
return CheckResult.FAILED
else:
if principal == '*':
return CheckResult.FAILED
return CheckResult.PASSED
check = KMSKeyWildCardPrincipal()
|
examples/simulation/dataset.py | Chris-george-anil/flower | 895 | 12617132 | <gh_stars>100-1000
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Partitioned version of CIFAR-10 dataset."""
from typing import List, Tuple, cast
import numpy as np
import tensorflow as tf
XY = Tuple[np.ndarray, np.ndarray]
XYList = List[XY]
PartitionedDataset = List[Tuple[XY, XY]]
def shuffle(x: np.ndarray, y: np.ndarray) -> XY:
"""Shuffle x and y."""
idx = np.random.permutation(len(x))
return x[idx], y[idx]
def partition(x: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList:
"""Split x and y into a number of partitions."""
return list(zip(np.split(x, num_partitions), np.split(y, num_partitions)))
def create_partitions(
source_dataset: XY,
num_partitions: int,
) -> XYList:
"""Create partitioned version of a source dataset."""
x, y = source_dataset
x, y = shuffle(x, y)
xy_partitions = partition(x, y, num_partitions)
return xy_partitions
def load(
num_partitions: int,
) -> PartitionedDataset:
"""Create partitioned version of CIFAR-10."""
xy_train, xy_test = tf.keras.datasets.cifar10.load_data()
xy_train_partitions = create_partitions(xy_train, num_partitions)
xy_test_partitions = create_partitions(xy_test, num_partitions)
return list(zip(xy_train_partitions, xy_test_partitions))
|
tests/test_provider_hashicorp_oci.py | mjuenema/python-terrascript | 507 | 12617141 | # tests/test_provider_hashicorp_oci.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:23:14 UTC)
def test_provider_import():
import terrascript.provider.hashicorp.oci
def test_resource_import():
from terrascript.resource.hashicorp.oci import (
oci_ai_anomaly_detection_ai_private_endpoint,
)
from terrascript.resource.hashicorp.oci import oci_ai_anomaly_detection_data_asset
from terrascript.resource.hashicorp.oci import oci_ai_anomaly_detection_model
from terrascript.resource.hashicorp.oci import oci_ai_anomaly_detection_project
from terrascript.resource.hashicorp.oci import oci_analytics_analytics_instance
from terrascript.resource.hashicorp.oci import (
oci_analytics_analytics_instance_private_access_channel,
)
from terrascript.resource.hashicorp.oci import (
oci_analytics_analytics_instance_vanity_url,
)
from terrascript.resource.hashicorp.oci import oci_apigateway_api
from terrascript.resource.hashicorp.oci import oci_apigateway_certificate
from terrascript.resource.hashicorp.oci import oci_apigateway_deployment
from terrascript.resource.hashicorp.oci import oci_apigateway_gateway
from terrascript.resource.hashicorp.oci import oci_apm_apm_domain
from terrascript.resource.hashicorp.oci import oci_apm_synthetics_monitor
from terrascript.resource.hashicorp.oci import oci_apm_synthetics_script
from terrascript.resource.hashicorp.oci import oci_artifacts_container_configuration
from terrascript.resource.hashicorp.oci import (
oci_artifacts_container_image_signature,
)
from terrascript.resource.hashicorp.oci import oci_artifacts_container_repository
from terrascript.resource.hashicorp.oci import oci_artifacts_generic_artifact
from terrascript.resource.hashicorp.oci import oci_artifacts_repository
from terrascript.resource.hashicorp.oci import oci_audit_configuration
from terrascript.resource.hashicorp.oci import (
oci_autoscaling_auto_scaling_configuration,
)
from terrascript.resource.hashicorp.oci import oci_bastion_bastion
from terrascript.resource.hashicorp.oci import oci_bastion_session
from terrascript.resource.hashicorp.oci import oci_bds_auto_scaling_configuration
from terrascript.resource.hashicorp.oci import oci_bds_bds_instance
from terrascript.resource.hashicorp.oci import oci_blockchain_blockchain_platform
from terrascript.resource.hashicorp.oci import oci_blockchain_osn
from terrascript.resource.hashicorp.oci import oci_blockchain_peer
from terrascript.resource.hashicorp.oci import oci_budget_alert_rule
from terrascript.resource.hashicorp.oci import oci_budget_budget
from terrascript.resource.hashicorp.oci import (
oci_cloud_guard_cloud_guard_configuration,
)
from terrascript.resource.hashicorp.oci import oci_cloud_guard_data_mask_rule
from terrascript.resource.hashicorp.oci import oci_cloud_guard_detector_recipe
from terrascript.resource.hashicorp.oci import oci_cloud_guard_managed_list
from terrascript.resource.hashicorp.oci import oci_cloud_guard_responder_recipe
from terrascript.resource.hashicorp.oci import oci_cloud_guard_target
from terrascript.resource.hashicorp.oci import oci_containerengine_cluster
from terrascript.resource.hashicorp.oci import oci_containerengine_node_pool
from terrascript.resource.hashicorp.oci import (
oci_core_app_catalog_listing_resource_version_agreement,
)
from terrascript.resource.hashicorp.oci import oci_core_app_catalog_subscription
from terrascript.resource.hashicorp.oci import oci_core_boot_volume
from terrascript.resource.hashicorp.oci import oci_core_boot_volume_backup
from terrascript.resource.hashicorp.oci import oci_core_cluster_network
from terrascript.resource.hashicorp.oci import oci_core_compute_capacity_reservation
from terrascript.resource.hashicorp.oci import (
oci_core_compute_image_capability_schema,
)
from terrascript.resource.hashicorp.oci import oci_core_console_history
from terrascript.resource.hashicorp.oci import oci_core_cpe
from terrascript.resource.hashicorp.oci import oci_core_cross_connect
from terrascript.resource.hashicorp.oci import oci_core_cross_connect_group
from terrascript.resource.hashicorp.oci import oci_core_dedicated_vm_host
from terrascript.resource.hashicorp.oci import oci_core_default_dhcp_options
from terrascript.resource.hashicorp.oci import oci_core_default_route_table
from terrascript.resource.hashicorp.oci import oci_core_default_security_list
from terrascript.resource.hashicorp.oci import oci_core_dhcp_options
from terrascript.resource.hashicorp.oci import oci_core_drg
from terrascript.resource.hashicorp.oci import oci_core_drg_attachment
from terrascript.resource.hashicorp.oci import oci_core_drg_attachment_management
from terrascript.resource.hashicorp.oci import oci_core_drg_attachments_list
from terrascript.resource.hashicorp.oci import oci_core_drg_route_distribution
from terrascript.resource.hashicorp.oci import (
oci_core_drg_route_distribution_statement,
)
from terrascript.resource.hashicorp.oci import oci_core_drg_route_table
from terrascript.resource.hashicorp.oci import oci_core_drg_route_table_route_rule
from terrascript.resource.hashicorp.oci import oci_core_image
from terrascript.resource.hashicorp.oci import oci_core_instance
from terrascript.resource.hashicorp.oci import oci_core_instance_configuration
from terrascript.resource.hashicorp.oci import oci_core_instance_console_connection
from terrascript.resource.hashicorp.oci import oci_core_instance_pool
from terrascript.resource.hashicorp.oci import oci_core_instance_pool_instance
from terrascript.resource.hashicorp.oci import oci_core_internet_gateway
from terrascript.resource.hashicorp.oci import oci_core_ipsec
from terrascript.resource.hashicorp.oci import (
oci_core_ipsec_connection_tunnel_management,
)
from terrascript.resource.hashicorp.oci import oci_core_ipv6
from terrascript.resource.hashicorp.oci import (
oci_core_listing_resource_version_agreement,
)
from terrascript.resource.hashicorp.oci import oci_core_local_peering_gateway
from terrascript.resource.hashicorp.oci import oci_core_nat_gateway
from terrascript.resource.hashicorp.oci import oci_core_network_security_group
from terrascript.resource.hashicorp.oci import (
oci_core_network_security_group_security_rule,
)
from terrascript.resource.hashicorp.oci import oci_core_private_ip
from terrascript.resource.hashicorp.oci import oci_core_public_ip
from terrascript.resource.hashicorp.oci import oci_core_public_ip_pool
from terrascript.resource.hashicorp.oci import oci_core_public_ip_pool_capacity
from terrascript.resource.hashicorp.oci import oci_core_remote_peering_connection
from terrascript.resource.hashicorp.oci import oci_core_route_table
from terrascript.resource.hashicorp.oci import oci_core_route_table_attachment
from terrascript.resource.hashicorp.oci import oci_core_security_list
from terrascript.resource.hashicorp.oci import oci_core_service_gateway
from terrascript.resource.hashicorp.oci import oci_core_shape_management
from terrascript.resource.hashicorp.oci import oci_core_subnet
from terrascript.resource.hashicorp.oci import oci_core_vcn
from terrascript.resource.hashicorp.oci import oci_core_virtual_circuit
from terrascript.resource.hashicorp.oci import oci_core_virtual_network
from terrascript.resource.hashicorp.oci import oci_core_vlan
from terrascript.resource.hashicorp.oci import oci_core_vnic_attachment
from terrascript.resource.hashicorp.oci import oci_core_volume
from terrascript.resource.hashicorp.oci import oci_core_volume_attachment
from terrascript.resource.hashicorp.oci import oci_core_volume_backup
from terrascript.resource.hashicorp.oci import oci_core_volume_backup_policy
from terrascript.resource.hashicorp.oci import (
oci_core_volume_backup_policy_assignment,
)
from terrascript.resource.hashicorp.oci import oci_core_volume_group
from terrascript.resource.hashicorp.oci import oci_core_volume_group_backup
from terrascript.resource.hashicorp.oci import oci_data_safe_data_safe_configuration
from terrascript.resource.hashicorp.oci import (
oci_data_safe_data_safe_private_endpoint,
)
from terrascript.resource.hashicorp.oci import oci_data_safe_on_prem_connector
from terrascript.resource.hashicorp.oci import oci_data_safe_target_database
from terrascript.resource.hashicorp.oci import (
oci_database_autonomous_container_database,
)
from terrascript.resource.hashicorp.oci import (
oci_database_autonomous_container_database_dataguard_association_operation,
)
from terrascript.resource.hashicorp.oci import oci_database_autonomous_database
from terrascript.resource.hashicorp.oci import (
oci_database_autonomous_database_backup,
)
from terrascript.resource.hashicorp.oci import (
oci_database_autonomous_database_instance_wallet_management,
)
from terrascript.resource.hashicorp.oci import (
oci_database_autonomous_database_regional_wallet_management,
)
from terrascript.resource.hashicorp.oci import (
oci_database_autonomous_database_wallet,
)
from terrascript.resource.hashicorp.oci import (
oci_database_autonomous_exadata_infrastructure,
)
from terrascript.resource.hashicorp.oci import oci_database_autonomous_vm_cluster
from terrascript.resource.hashicorp.oci import oci_database_backup
from terrascript.resource.hashicorp.oci import oci_database_backup_destination
from terrascript.resource.hashicorp.oci import (
oci_database_cloud_database_management,
)
from terrascript.resource.hashicorp.oci import (
oci_database_cloud_exadata_infrastructure,
)
from terrascript.resource.hashicorp.oci import oci_database_cloud_vm_cluster
from terrascript.resource.hashicorp.oci import oci_database_data_guard_association
from terrascript.resource.hashicorp.oci import oci_database_database
from terrascript.resource.hashicorp.oci import oci_database_database_software_image
from terrascript.resource.hashicorp.oci import oci_database_database_upgrade
from terrascript.resource.hashicorp.oci import oci_database_db_home
from terrascript.resource.hashicorp.oci import (
oci_database_db_node_console_connection,
)
from terrascript.resource.hashicorp.oci import oci_database_db_system
from terrascript.resource.hashicorp.oci import oci_database_exadata_infrastructure
from terrascript.resource.hashicorp.oci import (
oci_database_exadata_infrastructure_storage,
)
from terrascript.resource.hashicorp.oci import oci_database_exadata_iorm_config
from terrascript.resource.hashicorp.oci import (
oci_database_external_container_database,
)
from terrascript.resource.hashicorp.oci import (
oci_database_external_container_database_management,
)
from terrascript.resource.hashicorp.oci import (
oci_database_external_database_connector,
)
from terrascript.resource.hashicorp.oci import (
oci_database_external_non_container_database,
)
from terrascript.resource.hashicorp.oci import (
oci_database_external_non_container_database_management,
)
from terrascript.resource.hashicorp.oci import (
oci_database_external_non_container_database_operations_insights_management,
)
from terrascript.resource.hashicorp.oci import (
oci_database_external_pluggable_database,
)
from terrascript.resource.hashicorp.oci import (
oci_database_external_pluggable_database_management,
)
from terrascript.resource.hashicorp.oci import (
oci_database_external_pluggable_database_operations_insights_management,
)
from terrascript.resource.hashicorp.oci import oci_database_key_store
from terrascript.resource.hashicorp.oci import oci_database_maintenance_run
from terrascript.resource.hashicorp.oci import (
oci_database_management_db_management_private_endpoint,
)
from terrascript.resource.hashicorp.oci import (
oci_database_management_managed_database_group,
)
from terrascript.resource.hashicorp.oci import (
oci_database_management_managed_databases_change_database_parameter,
)
from terrascript.resource.hashicorp.oci import (
oci_database_management_managed_databases_reset_database_parameter,
)
from terrascript.resource.hashicorp.oci import oci_database_migration
from terrascript.resource.hashicorp.oci import oci_database_migration_agent
from terrascript.resource.hashicorp.oci import oci_database_migration_connection
from terrascript.resource.hashicorp.oci import oci_database_migration_job
from terrascript.resource.hashicorp.oci import oci_database_migration_migration
from terrascript.resource.hashicorp.oci import oci_database_pluggable_database
from terrascript.resource.hashicorp.oci import (
oci_database_pluggable_databases_local_clone,
)
from terrascript.resource.hashicorp.oci import (
oci_database_pluggable_databases_remote_clone,
)
from terrascript.resource.hashicorp.oci import oci_database_vm_cluster
from terrascript.resource.hashicorp.oci import oci_database_vm_cluster_network
from terrascript.resource.hashicorp.oci import oci_datacatalog_catalog
from terrascript.resource.hashicorp.oci import (
oci_datacatalog_catalog_private_endpoint,
)
from terrascript.resource.hashicorp.oci import oci_datacatalog_connection
from terrascript.resource.hashicorp.oci import oci_datacatalog_data_asset
from terrascript.resource.hashicorp.oci import oci_datacatalog_metastore
from terrascript.resource.hashicorp.oci import oci_dataflow_application
from terrascript.resource.hashicorp.oci import oci_dataflow_invoke_run
from terrascript.resource.hashicorp.oci import oci_dataflow_private_endpoint
from terrascript.resource.hashicorp.oci import oci_dataintegration_workspace
from terrascript.resource.hashicorp.oci import oci_datascience_job
from terrascript.resource.hashicorp.oci import oci_datascience_job_run
from terrascript.resource.hashicorp.oci import oci_datascience_model
from terrascript.resource.hashicorp.oci import oci_datascience_model_deployment
from terrascript.resource.hashicorp.oci import oci_datascience_model_provenance
from terrascript.resource.hashicorp.oci import oci_datascience_notebook_session
from terrascript.resource.hashicorp.oci import oci_datascience_project
from terrascript.resource.hashicorp.oci import oci_devops_deploy_artifact
from terrascript.resource.hashicorp.oci import oci_devops_deploy_environment
from terrascript.resource.hashicorp.oci import oci_devops_deploy_pipeline
from terrascript.resource.hashicorp.oci import oci_devops_deploy_stage
from terrascript.resource.hashicorp.oci import oci_devops_deployment
from terrascript.resource.hashicorp.oci import oci_devops_project
from terrascript.resource.hashicorp.oci import oci_dns_record
from terrascript.resource.hashicorp.oci import oci_dns_resolver
from terrascript.resource.hashicorp.oci import oci_dns_resolver_endpoint
from terrascript.resource.hashicorp.oci import oci_dns_rrset
from terrascript.resource.hashicorp.oci import oci_dns_steering_policy
from terrascript.resource.hashicorp.oci import oci_dns_steering_policy_attachment
from terrascript.resource.hashicorp.oci import oci_dns_tsig_key
from terrascript.resource.hashicorp.oci import oci_dns_view
from terrascript.resource.hashicorp.oci import oci_dns_zone
from terrascript.resource.hashicorp.oci import oci_email_dkim
from terrascript.resource.hashicorp.oci import oci_email_email_domain
from terrascript.resource.hashicorp.oci import oci_email_sender
from terrascript.resource.hashicorp.oci import oci_email_suppression
from terrascript.resource.hashicorp.oci import oci_events_rule
from terrascript.resource.hashicorp.oci import oci_file_storage_export
from terrascript.resource.hashicorp.oci import oci_file_storage_export_set
from terrascript.resource.hashicorp.oci import oci_file_storage_file_system
from terrascript.resource.hashicorp.oci import oci_file_storage_mount_target
from terrascript.resource.hashicorp.oci import oci_file_storage_snapshot
from terrascript.resource.hashicorp.oci import oci_functions_application
from terrascript.resource.hashicorp.oci import oci_functions_function
from terrascript.resource.hashicorp.oci import oci_functions_invoke_function
from terrascript.resource.hashicorp.oci import (
oci_generic_artifacts_content_artifact_by_path,
)
from terrascript.resource.hashicorp.oci import oci_golden_gate_database_registration
from terrascript.resource.hashicorp.oci import oci_golden_gate_deployment
from terrascript.resource.hashicorp.oci import oci_golden_gate_deployment_backup
from terrascript.resource.hashicorp.oci import oci_health_checks_http_monitor
from terrascript.resource.hashicorp.oci import oci_health_checks_http_probe
from terrascript.resource.hashicorp.oci import oci_health_checks_ping_monitor
from terrascript.resource.hashicorp.oci import oci_health_checks_ping_probe
from terrascript.resource.hashicorp.oci import oci_identity_api_key
from terrascript.resource.hashicorp.oci import oci_identity_auth_token
from terrascript.resource.hashicorp.oci import oci_identity_authentication_policy
from terrascript.resource.hashicorp.oci import oci_identity_compartment
from terrascript.resource.hashicorp.oci import oci_identity_customer_secret_key
from terrascript.resource.hashicorp.oci import oci_identity_dynamic_group
from terrascript.resource.hashicorp.oci import oci_identity_group
from terrascript.resource.hashicorp.oci import oci_identity_identity_provider
from terrascript.resource.hashicorp.oci import oci_identity_idp_group_mapping
from terrascript.resource.hashicorp.oci import oci_identity_network_source
from terrascript.resource.hashicorp.oci import oci_identity_policy
from terrascript.resource.hashicorp.oci import oci_identity_smtp_credential
from terrascript.resource.hashicorp.oci import oci_identity_swift_password
from terrascript.resource.hashicorp.oci import oci_identity_tag
from terrascript.resource.hashicorp.oci import oci_identity_tag_default
from terrascript.resource.hashicorp.oci import oci_identity_tag_namespace
from terrascript.resource.hashicorp.oci import oci_identity_ui_password
from terrascript.resource.hashicorp.oci import oci_identity_user
from terrascript.resource.hashicorp.oci import (
oci_identity_user_capabilities_management,
)
from terrascript.resource.hashicorp.oci import oci_identity_user_group_membership
from terrascript.resource.hashicorp.oci import oci_integration_integration_instance
from terrascript.resource.hashicorp.oci import oci_jms_fleet
from terrascript.resource.hashicorp.oci import oci_kms_encrypted_data
from terrascript.resource.hashicorp.oci import oci_kms_generated_key
from terrascript.resource.hashicorp.oci import oci_kms_key
from terrascript.resource.hashicorp.oci import oci_kms_key_version
from terrascript.resource.hashicorp.oci import oci_kms_sign
from terrascript.resource.hashicorp.oci import oci_kms_vault
from terrascript.resource.hashicorp.oci import oci_kms_vault_replication
from terrascript.resource.hashicorp.oci import oci_kms_verify
from terrascript.resource.hashicorp.oci import oci_limits_quota
from terrascript.resource.hashicorp.oci import oci_load_balancer
from terrascript.resource.hashicorp.oci import oci_load_balancer_backend
from terrascript.resource.hashicorp.oci import oci_load_balancer_backend_set
from terrascript.resource.hashicorp.oci import oci_load_balancer_backendset
from terrascript.resource.hashicorp.oci import oci_load_balancer_certificate
from terrascript.resource.hashicorp.oci import oci_load_balancer_hostname
from terrascript.resource.hashicorp.oci import oci_load_balancer_listener
from terrascript.resource.hashicorp.oci import oci_load_balancer_load_balancer
from terrascript.resource.hashicorp.oci import (
oci_load_balancer_load_balancer_routing_policy,
)
from terrascript.resource.hashicorp.oci import oci_load_balancer_path_route_set
from terrascript.resource.hashicorp.oci import oci_load_balancer_rule_set
from terrascript.resource.hashicorp.oci import oci_load_balancer_ssl_cipher_suite
from terrascript.resource.hashicorp.oci import (
oci_log_analytics_log_analytics_entity,
)
from terrascript.resource.hashicorp.oci import (
oci_log_analytics_log_analytics_import_custom_content,
)
from terrascript.resource.hashicorp.oci import (
oci_log_analytics_log_analytics_log_group,
)
from terrascript.resource.hashicorp.oci import (
oci_log_analytics_log_analytics_object_collection_rule,
)
from terrascript.resource.hashicorp.oci import oci_log_analytics_namespace
from terrascript.resource.hashicorp.oci import oci_logging_log
from terrascript.resource.hashicorp.oci import oci_logging_log_group
from terrascript.resource.hashicorp.oci import oci_logging_log_saved_search
from terrascript.resource.hashicorp.oci import (
oci_logging_unified_agent_configuration,
)
from terrascript.resource.hashicorp.oci import oci_management_agent_management_agent
from terrascript.resource.hashicorp.oci import (
oci_management_agent_management_agent_install_key,
)
from terrascript.resource.hashicorp.oci import (
oci_management_dashboard_management_dashboards_import,
)
from terrascript.resource.hashicorp.oci import oci_marketplace_accepted_agreement
from terrascript.resource.hashicorp.oci import (
oci_marketplace_listing_package_agreement,
)
from terrascript.resource.hashicorp.oci import oci_marketplace_publication
from terrascript.resource.hashicorp.oci import oci_metering_computation_custom_table
from terrascript.resource.hashicorp.oci import oci_metering_computation_query
from terrascript.resource.hashicorp.oci import oci_metering_computation_usage
from terrascript.resource.hashicorp.oci import oci_monitoring_alarm
from terrascript.resource.hashicorp.oci import oci_mysql_analytics_cluster
from terrascript.resource.hashicorp.oci import oci_mysql_channel
from terrascript.resource.hashicorp.oci import oci_mysql_heat_wave_cluster
from terrascript.resource.hashicorp.oci import oci_mysql_mysql_backup
from terrascript.resource.hashicorp.oci import oci_mysql_mysql_db_system
from terrascript.resource.hashicorp.oci import oci_network_load_balancer_backend
from terrascript.resource.hashicorp.oci import oci_network_load_balancer_backend_set
from terrascript.resource.hashicorp.oci import oci_network_load_balancer_listener
from terrascript.resource.hashicorp.oci import (
oci_network_load_balancer_network_load_balancer,
)
from terrascript.resource.hashicorp.oci import oci_nosql_index
from terrascript.resource.hashicorp.oci import oci_nosql_table
from terrascript.resource.hashicorp.oci import oci_objectstorage_bucket
from terrascript.resource.hashicorp.oci import oci_objectstorage_namespace_metadata
from terrascript.resource.hashicorp.oci import oci_objectstorage_object
from terrascript.resource.hashicorp.oci import (
oci_objectstorage_object_lifecycle_policy,
)
from terrascript.resource.hashicorp.oci import oci_objectstorage_preauthrequest
from terrascript.resource.hashicorp.oci import oci_objectstorage_replication_policy
from terrascript.resource.hashicorp.oci import oci_oce_oce_instance
from terrascript.resource.hashicorp.oci import oci_ocvp_esxi_host
from terrascript.resource.hashicorp.oci import oci_ocvp_sddc
from terrascript.resource.hashicorp.oci import oci_oda_oda_instance
from terrascript.resource.hashicorp.oci import oci_ons_notification_topic
from terrascript.resource.hashicorp.oci import oci_ons_subscription
from terrascript.resource.hashicorp.oci import oci_opsi_database_insight
from terrascript.resource.hashicorp.oci import oci_opsi_enterprise_manager_bridge
from terrascript.resource.hashicorp.oci import oci_opsi_host_insight
from terrascript.resource.hashicorp.oci import oci_optimizer_enrollment_status
from terrascript.resource.hashicorp.oci import oci_optimizer_profile
from terrascript.resource.hashicorp.oci import oci_optimizer_recommendation
from terrascript.resource.hashicorp.oci import oci_optimizer_resource_action
from terrascript.resource.hashicorp.oci import oci_osmanagement_managed_instance
from terrascript.resource.hashicorp.oci import (
oci_osmanagement_managed_instance_group,
)
from terrascript.resource.hashicorp.oci import (
oci_osmanagement_managed_instance_management,
)
from terrascript.resource.hashicorp.oci import oci_osmanagement_software_source
from terrascript.resource.hashicorp.oci import oci_sch_service_connector
from terrascript.resource.hashicorp.oci import (
oci_service_catalog_private_application,
)
from terrascript.resource.hashicorp.oci import oci_service_catalog_service_catalog
from terrascript.resource.hashicorp.oci import (
oci_service_catalog_service_catalog_association,
)
from terrascript.resource.hashicorp.oci import oci_streaming_connect_harness
from terrascript.resource.hashicorp.oci import oci_streaming_stream
from terrascript.resource.hashicorp.oci import oci_streaming_stream_pool
from terrascript.resource.hashicorp.oci import (
oci_vulnerability_scanning_container_scan_recipe,
)
from terrascript.resource.hashicorp.oci import (
oci_vulnerability_scanning_container_scan_target,
)
from terrascript.resource.hashicorp.oci import (
oci_vulnerability_scanning_host_scan_recipe,
)
from terrascript.resource.hashicorp.oci import (
oci_vulnerability_scanning_host_scan_target,
)
from terrascript.resource.hashicorp.oci import oci_waas_address_list
from terrascript.resource.hashicorp.oci import oci_waas_certificate
from terrascript.resource.hashicorp.oci import oci_waas_custom_protection_rule
from terrascript.resource.hashicorp.oci import oci_waas_http_redirect
from terrascript.resource.hashicorp.oci import oci_waas_protection_rule
from terrascript.resource.hashicorp.oci import oci_waas_purge_cache
from terrascript.resource.hashicorp.oci import oci_waas_waas_policy
def test_datasource_import():
from terrascript.data.hashicorp.oci import (
oci_ai_anomaly_detection_ai_private_endpoint,
)
from terrascript.data.hashicorp.oci import (
oci_ai_anomaly_detection_ai_private_endpoints,
)
from terrascript.data.hashicorp.oci import oci_ai_anomaly_detection_data_asset
from terrascript.data.hashicorp.oci import oci_ai_anomaly_detection_data_assets
from terrascript.data.hashicorp.oci import oci_ai_anomaly_detection_model
from terrascript.data.hashicorp.oci import oci_ai_anomaly_detection_models
from terrascript.data.hashicorp.oci import oci_ai_anomaly_detection_project
from terrascript.data.hashicorp.oci import oci_ai_anomaly_detection_projects
from terrascript.data.hashicorp.oci import oci_analytics_analytics_instance
from terrascript.data.hashicorp.oci import (
oci_analytics_analytics_instance_private_access_channel,
)
from terrascript.data.hashicorp.oci import oci_analytics_analytics_instances
from terrascript.data.hashicorp.oci import oci_apigateway_api
from terrascript.data.hashicorp.oci import oci_apigateway_api_content
from terrascript.data.hashicorp.oci import (
oci_apigateway_api_deployment_specification,
)
from terrascript.data.hashicorp.oci import oci_apigateway_api_validation
from terrascript.data.hashicorp.oci import oci_apigateway_apis
from terrascript.data.hashicorp.oci import oci_apigateway_certificate
from terrascript.data.hashicorp.oci import oci_apigateway_certificates
from terrascript.data.hashicorp.oci import oci_apigateway_deployment
from terrascript.data.hashicorp.oci import oci_apigateway_deployments
from terrascript.data.hashicorp.oci import oci_apigateway_gateway
from terrascript.data.hashicorp.oci import oci_apigateway_gateways
from terrascript.data.hashicorp.oci import oci_apm_apm_domain
from terrascript.data.hashicorp.oci import oci_apm_apm_domains
from terrascript.data.hashicorp.oci import oci_apm_data_keys
from terrascript.data.hashicorp.oci import oci_apm_synthetics_monitor
from terrascript.data.hashicorp.oci import oci_apm_synthetics_monitors
from terrascript.data.hashicorp.oci import oci_apm_synthetics_public_vantage_point
from terrascript.data.hashicorp.oci import oci_apm_synthetics_public_vantage_points
from terrascript.data.hashicorp.oci import oci_apm_synthetics_result
from terrascript.data.hashicorp.oci import oci_apm_synthetics_script
from terrascript.data.hashicorp.oci import oci_apm_synthetics_scripts
from terrascript.data.hashicorp.oci import oci_artifacts_container_configuration
from terrascript.data.hashicorp.oci import oci_artifacts_container_image
from terrascript.data.hashicorp.oci import oci_artifacts_container_image_signature
from terrascript.data.hashicorp.oci import oci_artifacts_container_image_signatures
from terrascript.data.hashicorp.oci import oci_artifacts_container_images
from terrascript.data.hashicorp.oci import oci_artifacts_container_repositories
from terrascript.data.hashicorp.oci import oci_artifacts_container_repository
from terrascript.data.hashicorp.oci import oci_artifacts_generic_artifact
from terrascript.data.hashicorp.oci import oci_artifacts_generic_artifacts
from terrascript.data.hashicorp.oci import oci_artifacts_repositories
from terrascript.data.hashicorp.oci import oci_artifacts_repository
from terrascript.data.hashicorp.oci import oci_audit_configuration
from terrascript.data.hashicorp.oci import oci_audit_events
from terrascript.data.hashicorp.oci import (
oci_autoscaling_auto_scaling_configuration,
)
from terrascript.data.hashicorp.oci import (
oci_autoscaling_auto_scaling_configurations,
)
from terrascript.data.hashicorp.oci import oci_bastion_bastion
from terrascript.data.hashicorp.oci import oci_bastion_bastions
from terrascript.data.hashicorp.oci import oci_bastion_session
from terrascript.data.hashicorp.oci import oci_bastion_sessions
from terrascript.data.hashicorp.oci import oci_bds_auto_scaling_configuration
from terrascript.data.hashicorp.oci import oci_bds_auto_scaling_configurations
from terrascript.data.hashicorp.oci import oci_bds_bds_instance
from terrascript.data.hashicorp.oci import oci_bds_bds_instances
from terrascript.data.hashicorp.oci import oci_blockchain_blockchain_platform
from terrascript.data.hashicorp.oci import oci_blockchain_blockchain_platforms
from terrascript.data.hashicorp.oci import oci_blockchain_osn
from terrascript.data.hashicorp.oci import oci_blockchain_osns
from terrascript.data.hashicorp.oci import oci_blockchain_peer
from terrascript.data.hashicorp.oci import oci_blockchain_peers
from terrascript.data.hashicorp.oci import oci_budget_alert_rule
from terrascript.data.hashicorp.oci import oci_budget_alert_rules
from terrascript.data.hashicorp.oci import oci_budget_budget
from terrascript.data.hashicorp.oci import oci_budget_budgets
from terrascript.data.hashicorp.oci import oci_cloud_guard_cloud_guard_configuration
from terrascript.data.hashicorp.oci import oci_cloud_guard_data_mask_rule
from terrascript.data.hashicorp.oci import oci_cloud_guard_data_mask_rules
from terrascript.data.hashicorp.oci import oci_cloud_guard_detector_recipe
from terrascript.data.hashicorp.oci import oci_cloud_guard_detector_recipes
from terrascript.data.hashicorp.oci import oci_cloud_guard_managed_list
from terrascript.data.hashicorp.oci import oci_cloud_guard_managed_lists
from terrascript.data.hashicorp.oci import oci_cloud_guard_responder_recipe
from terrascript.data.hashicorp.oci import oci_cloud_guard_responder_recipes
from terrascript.data.hashicorp.oci import oci_cloud_guard_target
from terrascript.data.hashicorp.oci import oci_cloud_guard_targets
from terrascript.data.hashicorp.oci import (
oci_computeinstanceagent_instance_agent_plugin,
)
from terrascript.data.hashicorp.oci import (
oci_computeinstanceagent_instance_agent_plugins,
)
from terrascript.data.hashicorp.oci import (
oci_computeinstanceagent_instance_available_plugins,
)
from terrascript.data.hashicorp.oci import oci_containerengine_cluster_kube_config
from terrascript.data.hashicorp.oci import oci_containerengine_cluster_option
from terrascript.data.hashicorp.oci import oci_containerengine_clusters
from terrascript.data.hashicorp.oci import (
oci_containerengine_migrate_to_native_vcn_status,
)
from terrascript.data.hashicorp.oci import oci_containerengine_node_pool
from terrascript.data.hashicorp.oci import oci_containerengine_node_pool_option
from terrascript.data.hashicorp.oci import oci_containerengine_node_pools
from terrascript.data.hashicorp.oci import oci_containerengine_work_request_errors
from terrascript.data.hashicorp.oci import (
oci_containerengine_work_request_log_entries,
)
from terrascript.data.hashicorp.oci import oci_containerengine_work_requests
from terrascript.data.hashicorp.oci import oci_core_app_catalog_listing
from terrascript.data.hashicorp.oci import (
oci_core_app_catalog_listing_resource_version,
)
from terrascript.data.hashicorp.oci import (
oci_core_app_catalog_listing_resource_versions,
)
from terrascript.data.hashicorp.oci import oci_core_app_catalog_listings
from terrascript.data.hashicorp.oci import oci_core_app_catalog_subscriptions
from terrascript.data.hashicorp.oci import oci_core_block_volume_replica
from terrascript.data.hashicorp.oci import oci_core_block_volume_replicas
from terrascript.data.hashicorp.oci import oci_core_boot_volume
from terrascript.data.hashicorp.oci import oci_core_boot_volume_attachments
from terrascript.data.hashicorp.oci import oci_core_boot_volume_backup
from terrascript.data.hashicorp.oci import oci_core_boot_volume_backups
from terrascript.data.hashicorp.oci import oci_core_boot_volume_replica
from terrascript.data.hashicorp.oci import oci_core_boot_volume_replicas
from terrascript.data.hashicorp.oci import oci_core_boot_volumes
from terrascript.data.hashicorp.oci import oci_core_byoip_allocated_ranges
from terrascript.data.hashicorp.oci import oci_core_byoip_range
from terrascript.data.hashicorp.oci import oci_core_byoip_ranges
from terrascript.data.hashicorp.oci import oci_core_cluster_network
from terrascript.data.hashicorp.oci import oci_core_cluster_network_instances
from terrascript.data.hashicorp.oci import oci_core_cluster_networks
from terrascript.data.hashicorp.oci import oci_core_compute_capacity_reservation
from terrascript.data.hashicorp.oci import (
oci_core_compute_capacity_reservation_instance_shapes,
)
from terrascript.data.hashicorp.oci import (
oci_core_compute_capacity_reservation_instances,
)
from terrascript.data.hashicorp.oci import oci_core_compute_capacity_reservations
from terrascript.data.hashicorp.oci import (
oci_core_compute_global_image_capability_schema,
)
from terrascript.data.hashicorp.oci import (
oci_core_compute_global_image_capability_schemas,
)
from terrascript.data.hashicorp.oci import (
oci_core_compute_global_image_capability_schemas_version,
)
from terrascript.data.hashicorp.oci import (
oci_core_compute_global_image_capability_schemas_versions,
)
from terrascript.data.hashicorp.oci import oci_core_compute_image_capability_schema
from terrascript.data.hashicorp.oci import oci_core_compute_image_capability_schemas
from terrascript.data.hashicorp.oci import oci_core_console_histories
from terrascript.data.hashicorp.oci import oci_core_console_history_data
from terrascript.data.hashicorp.oci import oci_core_cpe_device_shape
from terrascript.data.hashicorp.oci import oci_core_cpe_device_shapes
from terrascript.data.hashicorp.oci import oci_core_cpes
from terrascript.data.hashicorp.oci import oci_core_cross_connect
from terrascript.data.hashicorp.oci import oci_core_cross_connect_group
from terrascript.data.hashicorp.oci import oci_core_cross_connect_groups
from terrascript.data.hashicorp.oci import oci_core_cross_connect_locations
from terrascript.data.hashicorp.oci import oci_core_cross_connect_port_speed_shapes
from terrascript.data.hashicorp.oci import oci_core_cross_connect_status
from terrascript.data.hashicorp.oci import oci_core_cross_connects
from terrascript.data.hashicorp.oci import oci_core_dedicated_vm_host
from terrascript.data.hashicorp.oci import (
oci_core_dedicated_vm_host_instance_shapes,
)
from terrascript.data.hashicorp.oci import oci_core_dedicated_vm_host_shapes
from terrascript.data.hashicorp.oci import oci_core_dedicated_vm_hosts
from terrascript.data.hashicorp.oci import oci_core_dedicated_vm_hosts_instances
from terrascript.data.hashicorp.oci import oci_core_dhcp_options
from terrascript.data.hashicorp.oci import oci_core_drg_attachments
from terrascript.data.hashicorp.oci import oci_core_drg_route_distribution
from terrascript.data.hashicorp.oci import (
oci_core_drg_route_distribution_statements,
)
from terrascript.data.hashicorp.oci import oci_core_drg_route_distributions
from terrascript.data.hashicorp.oci import oci_core_drg_route_table
from terrascript.data.hashicorp.oci import oci_core_drg_route_table_route_rules
from terrascript.data.hashicorp.oci import oci_core_drg_route_tables
from terrascript.data.hashicorp.oci import oci_core_drgs
from terrascript.data.hashicorp.oci import oci_core_fast_connect_provider_service
from terrascript.data.hashicorp.oci import (
oci_core_fast_connect_provider_service_key,
)
from terrascript.data.hashicorp.oci import oci_core_fast_connect_provider_services
from terrascript.data.hashicorp.oci import oci_core_image
from terrascript.data.hashicorp.oci import oci_core_image_shape
from terrascript.data.hashicorp.oci import oci_core_image_shapes
from terrascript.data.hashicorp.oci import oci_core_images
from terrascript.data.hashicorp.oci import oci_core_instance
from terrascript.data.hashicorp.oci import oci_core_instance_configuration
from terrascript.data.hashicorp.oci import oci_core_instance_configurations
from terrascript.data.hashicorp.oci import oci_core_instance_console_connections
from terrascript.data.hashicorp.oci import oci_core_instance_credentials
from terrascript.data.hashicorp.oci import oci_core_instance_devices
from terrascript.data.hashicorp.oci import oci_core_instance_measured_boot_report
from terrascript.data.hashicorp.oci import oci_core_instance_pool
from terrascript.data.hashicorp.oci import oci_core_instance_pool_instances
from terrascript.data.hashicorp.oci import (
oci_core_instance_pool_load_balancer_attachment,
)
from terrascript.data.hashicorp.oci import oci_core_instance_pools
from terrascript.data.hashicorp.oci import oci_core_instances
from terrascript.data.hashicorp.oci import oci_core_internet_gateways
from terrascript.data.hashicorp.oci import oci_core_ipsec_config
from terrascript.data.hashicorp.oci import oci_core_ipsec_connection_tunnel
from terrascript.data.hashicorp.oci import oci_core_ipsec_connection_tunnels
from terrascript.data.hashicorp.oci import oci_core_ipsec_connections
from terrascript.data.hashicorp.oci import oci_core_ipsec_status
from terrascript.data.hashicorp.oci import oci_core_ipv6
from terrascript.data.hashicorp.oci import oci_core_ipv6s
from terrascript.data.hashicorp.oci import oci_core_letter_of_authority
from terrascript.data.hashicorp.oci import oci_core_listing_resource_version
from terrascript.data.hashicorp.oci import oci_core_listing_resource_versions
from terrascript.data.hashicorp.oci import oci_core_local_peering_gateways
from terrascript.data.hashicorp.oci import oci_core_nat_gateway
from terrascript.data.hashicorp.oci import oci_core_nat_gateways
from terrascript.data.hashicorp.oci import oci_core_network_security_group
from terrascript.data.hashicorp.oci import (
oci_core_network_security_group_security_rules,
)
from terrascript.data.hashicorp.oci import oci_core_network_security_group_vnics
from terrascript.data.hashicorp.oci import oci_core_network_security_groups
from terrascript.data.hashicorp.oci import oci_core_peer_region_for_remote_peerings
from terrascript.data.hashicorp.oci import oci_core_private_ip
from terrascript.data.hashicorp.oci import oci_core_private_ips
from terrascript.data.hashicorp.oci import oci_core_public_ip
from terrascript.data.hashicorp.oci import oci_core_public_ip_pool
from terrascript.data.hashicorp.oci import oci_core_public_ip_pools
from terrascript.data.hashicorp.oci import oci_core_public_ips
from terrascript.data.hashicorp.oci import oci_core_remote_peering_connections
from terrascript.data.hashicorp.oci import oci_core_route_tables
from terrascript.data.hashicorp.oci import oci_core_security_lists
from terrascript.data.hashicorp.oci import oci_core_service_gateways
from terrascript.data.hashicorp.oci import oci_core_services
from terrascript.data.hashicorp.oci import oci_core_shape
from terrascript.data.hashicorp.oci import oci_core_shapes
from terrascript.data.hashicorp.oci import oci_core_subnet
from terrascript.data.hashicorp.oci import oci_core_subnets
from terrascript.data.hashicorp.oci import oci_core_vcn
from terrascript.data.hashicorp.oci import oci_core_vcn_dns_resolver_association
from terrascript.data.hashicorp.oci import oci_core_vcns
from terrascript.data.hashicorp.oci import oci_core_virtual_circuit
from terrascript.data.hashicorp.oci import oci_core_virtual_circuit_bandwidth_shapes
from terrascript.data.hashicorp.oci import oci_core_virtual_circuit_public_prefixes
from terrascript.data.hashicorp.oci import oci_core_virtual_circuits
from terrascript.data.hashicorp.oci import oci_core_virtual_networks
from terrascript.data.hashicorp.oci import oci_core_vlan
from terrascript.data.hashicorp.oci import oci_core_vlans
from terrascript.data.hashicorp.oci import oci_core_vnic
from terrascript.data.hashicorp.oci import oci_core_vnic_attachments
from terrascript.data.hashicorp.oci import oci_core_volume
from terrascript.data.hashicorp.oci import oci_core_volume_attachments
from terrascript.data.hashicorp.oci import oci_core_volume_backup_policies
from terrascript.data.hashicorp.oci import oci_core_volume_backup_policy_assignments
from terrascript.data.hashicorp.oci import oci_core_volume_backups
from terrascript.data.hashicorp.oci import oci_core_volume_group_backups
from terrascript.data.hashicorp.oci import oci_core_volume_groups
from terrascript.data.hashicorp.oci import oci_core_volumes
from terrascript.data.hashicorp.oci import oci_data_safe_data_safe_configuration
from terrascript.data.hashicorp.oci import oci_data_safe_data_safe_private_endpoint
from terrascript.data.hashicorp.oci import oci_data_safe_data_safe_private_endpoints
from terrascript.data.hashicorp.oci import oci_data_safe_on_prem_connector
from terrascript.data.hashicorp.oci import oci_data_safe_on_prem_connectors
from terrascript.data.hashicorp.oci import oci_data_safe_target_database
from terrascript.data.hashicorp.oci import oci_data_safe_target_databases
from terrascript.data.hashicorp.oci import (
oci_database_autonomous_container_database,
)
from terrascript.data.hashicorp.oci import (
oci_database_autonomous_container_database_dataguard_association,
)
from terrascript.data.hashicorp.oci import (
oci_database_autonomous_container_database_dataguard_associations,
)
from terrascript.data.hashicorp.oci import (
oci_database_autonomous_container_databases,
)
from terrascript.data.hashicorp.oci import oci_database_autonomous_container_patches
from terrascript.data.hashicorp.oci import oci_database_autonomous_database
from terrascript.data.hashicorp.oci import oci_database_autonomous_database_backup
from terrascript.data.hashicorp.oci import oci_database_autonomous_database_backups
from terrascript.data.hashicorp.oci import (
oci_database_autonomous_database_dataguard_association,
)
from terrascript.data.hashicorp.oci import (
oci_database_autonomous_database_dataguard_associations,
)
from terrascript.data.hashicorp.oci import (
oci_database_autonomous_database_instance_wallet_management,
)
from terrascript.data.hashicorp.oci import (
oci_database_autonomous_database_regional_wallet_management,
)
from terrascript.data.hashicorp.oci import oci_database_autonomous_database_wallet
from terrascript.data.hashicorp.oci import oci_database_autonomous_databases
from terrascript.data.hashicorp.oci import oci_database_autonomous_databases_clones
from terrascript.data.hashicorp.oci import (
oci_database_autonomous_db_preview_versions,
)
from terrascript.data.hashicorp.oci import oci_database_autonomous_db_versions
from terrascript.data.hashicorp.oci import (
oci_database_autonomous_exadata_infrastructure,
)
from terrascript.data.hashicorp.oci import (
oci_database_autonomous_exadata_infrastructure_ocpu,
)
from terrascript.data.hashicorp.oci import (
oci_database_autonomous_exadata_infrastructure_shapes,
)
from terrascript.data.hashicorp.oci import (
oci_database_autonomous_exadata_infrastructures,
)
from terrascript.data.hashicorp.oci import oci_database_autonomous_patch
from terrascript.data.hashicorp.oci import oci_database_autonomous_vm_cluster
from terrascript.data.hashicorp.oci import oci_database_autonomous_vm_clusters
from terrascript.data.hashicorp.oci import oci_database_backup_destination
from terrascript.data.hashicorp.oci import oci_database_backup_destinations
from terrascript.data.hashicorp.oci import oci_database_backups
from terrascript.data.hashicorp.oci import oci_database_cloud_exadata_infrastructure
from terrascript.data.hashicorp.oci import (
oci_database_cloud_exadata_infrastructures,
)
from terrascript.data.hashicorp.oci import oci_database_cloud_vm_cluster
from terrascript.data.hashicorp.oci import oci_database_cloud_vm_clusters
from terrascript.data.hashicorp.oci import oci_database_data_guard_association
from terrascript.data.hashicorp.oci import oci_database_data_guard_associations
from terrascript.data.hashicorp.oci import oci_database_database
from terrascript.data.hashicorp.oci import oci_database_database_software_image
from terrascript.data.hashicorp.oci import oci_database_database_software_images
from terrascript.data.hashicorp.oci import (
oci_database_database_upgrade_history_entries,
)
from terrascript.data.hashicorp.oci import (
oci_database_database_upgrade_history_entry,
)
from terrascript.data.hashicorp.oci import oci_database_databases
from terrascript.data.hashicorp.oci import oci_database_db_home
from terrascript.data.hashicorp.oci import (
oci_database_db_home_patch_history_entries,
)
from terrascript.data.hashicorp.oci import oci_database_db_home_patches
from terrascript.data.hashicorp.oci import oci_database_db_homes
from terrascript.data.hashicorp.oci import oci_database_db_node
from terrascript.data.hashicorp.oci import oci_database_db_node_console_connection
from terrascript.data.hashicorp.oci import oci_database_db_node_console_connections
from terrascript.data.hashicorp.oci import oci_database_db_nodes
from terrascript.data.hashicorp.oci import (
oci_database_db_system_patch_history_entries,
)
from terrascript.data.hashicorp.oci import oci_database_db_system_patches
from terrascript.data.hashicorp.oci import oci_database_db_system_shapes
from terrascript.data.hashicorp.oci import oci_database_db_systems
from terrascript.data.hashicorp.oci import oci_database_db_versions
from terrascript.data.hashicorp.oci import oci_database_exadata_infrastructure
from terrascript.data.hashicorp.oci import (
oci_database_exadata_infrastructure_download_config_file,
)
from terrascript.data.hashicorp.oci import oci_database_exadata_infrastructures
from terrascript.data.hashicorp.oci import oci_database_exadata_iorm_config
from terrascript.data.hashicorp.oci import oci_database_external_container_database
from terrascript.data.hashicorp.oci import oci_database_external_container_databases
from terrascript.data.hashicorp.oci import oci_database_external_database_connector
from terrascript.data.hashicorp.oci import oci_database_external_database_connectors
from terrascript.data.hashicorp.oci import (
oci_database_external_non_container_database,
)
from terrascript.data.hashicorp.oci import (
oci_database_external_non_container_databases,
)
from terrascript.data.hashicorp.oci import oci_database_external_pluggable_database
from terrascript.data.hashicorp.oci import oci_database_external_pluggable_databases
from terrascript.data.hashicorp.oci import oci_database_flex_components
from terrascript.data.hashicorp.oci import oci_database_gi_versions
from terrascript.data.hashicorp.oci import oci_database_key_store
from terrascript.data.hashicorp.oci import oci_database_key_stores
from terrascript.data.hashicorp.oci import oci_database_maintenance_run
from terrascript.data.hashicorp.oci import oci_database_maintenance_runs
from terrascript.data.hashicorp.oci import (
oci_database_management_db_management_private_endpoint,
)
from terrascript.data.hashicorp.oci import (
oci_database_management_db_management_private_endpoint_associated_database,
)
from terrascript.data.hashicorp.oci import (
oci_database_management_db_management_private_endpoint_associated_databases,
)
from terrascript.data.hashicorp.oci import (
oci_database_management_db_management_private_endpoints,
)
from terrascript.data.hashicorp.oci import oci_database_management_managed_database
from terrascript.data.hashicorp.oci import (
oci_database_management_managed_database_group,
)
from terrascript.data.hashicorp.oci import (
oci_database_management_managed_database_groups,
)
from terrascript.data.hashicorp.oci import oci_database_management_managed_databases
from terrascript.data.hashicorp.oci import (
oci_database_management_managed_databases_database_parameter,
)
from terrascript.data.hashicorp.oci import (
oci_database_management_managed_databases_database_parameters,
)
from terrascript.data.hashicorp.oci import oci_database_migration_agent
from terrascript.data.hashicorp.oci import oci_database_migration_agent_images
from terrascript.data.hashicorp.oci import oci_database_migration_agents
from terrascript.data.hashicorp.oci import oci_database_migration_connection
from terrascript.data.hashicorp.oci import oci_database_migration_connections
from terrascript.data.hashicorp.oci import oci_database_migration_job
from terrascript.data.hashicorp.oci import oci_database_migration_jobs
from terrascript.data.hashicorp.oci import oci_database_migration_migration
from terrascript.data.hashicorp.oci import oci_database_migration_migrations
from terrascript.data.hashicorp.oci import oci_database_pluggable_database
from terrascript.data.hashicorp.oci import oci_database_pluggable_databases
from terrascript.data.hashicorp.oci import oci_database_vm_cluster
from terrascript.data.hashicorp.oci import oci_database_vm_cluster_network
from terrascript.data.hashicorp.oci import (
oci_database_vm_cluster_network_download_config_file,
)
from terrascript.data.hashicorp.oci import oci_database_vm_cluster_networks
from terrascript.data.hashicorp.oci import oci_database_vm_cluster_patch
from terrascript.data.hashicorp.oci import (
oci_database_vm_cluster_patch_history_entries,
)
from terrascript.data.hashicorp.oci import (
oci_database_vm_cluster_patch_history_entry,
)
from terrascript.data.hashicorp.oci import oci_database_vm_cluster_patches
from terrascript.data.hashicorp.oci import (
oci_database_vm_cluster_recommended_network,
)
from terrascript.data.hashicorp.oci import oci_database_vm_cluster_update
from terrascript.data.hashicorp.oci import (
oci_database_vm_cluster_update_history_entries,
)
from terrascript.data.hashicorp.oci import (
oci_database_vm_cluster_update_history_entry,
)
from terrascript.data.hashicorp.oci import oci_database_vm_cluster_updates
from terrascript.data.hashicorp.oci import oci_database_vm_clusters
from terrascript.data.hashicorp.oci import oci_datacatalog_catalog
from terrascript.data.hashicorp.oci import oci_datacatalog_catalog_private_endpoint
from terrascript.data.hashicorp.oci import oci_datacatalog_catalog_private_endpoints
from terrascript.data.hashicorp.oci import oci_datacatalog_catalog_type
from terrascript.data.hashicorp.oci import oci_datacatalog_catalog_types
from terrascript.data.hashicorp.oci import oci_datacatalog_catalogs
from terrascript.data.hashicorp.oci import oci_datacatalog_connection
from terrascript.data.hashicorp.oci import oci_datacatalog_connections
from terrascript.data.hashicorp.oci import oci_datacatalog_data_asset
from terrascript.data.hashicorp.oci import oci_datacatalog_data_assets
from terrascript.data.hashicorp.oci import oci_datacatalog_metastore
from terrascript.data.hashicorp.oci import oci_datacatalog_metastores
from terrascript.data.hashicorp.oci import oci_dataflow_application
from terrascript.data.hashicorp.oci import oci_dataflow_applications
from terrascript.data.hashicorp.oci import oci_dataflow_invoke_run
from terrascript.data.hashicorp.oci import oci_dataflow_invoke_runs
from terrascript.data.hashicorp.oci import oci_dataflow_private_endpoint
from terrascript.data.hashicorp.oci import oci_dataflow_private_endpoints
from terrascript.data.hashicorp.oci import oci_dataflow_run_log
from terrascript.data.hashicorp.oci import oci_dataflow_run_logs
from terrascript.data.hashicorp.oci import oci_dataintegration_workspace
from terrascript.data.hashicorp.oci import oci_dataintegration_workspaces
from terrascript.data.hashicorp.oci import oci_datascience_job
from terrascript.data.hashicorp.oci import oci_datascience_job_run
from terrascript.data.hashicorp.oci import oci_datascience_job_runs
from terrascript.data.hashicorp.oci import oci_datascience_job_shapes
from terrascript.data.hashicorp.oci import oci_datascience_jobs
from terrascript.data.hashicorp.oci import oci_datascience_model
from terrascript.data.hashicorp.oci import oci_datascience_model_deployment
from terrascript.data.hashicorp.oci import oci_datascience_model_deployment_shapes
from terrascript.data.hashicorp.oci import oci_datascience_model_deployments
from terrascript.data.hashicorp.oci import oci_datascience_model_provenance
from terrascript.data.hashicorp.oci import oci_datascience_models
from terrascript.data.hashicorp.oci import oci_datascience_notebook_session
from terrascript.data.hashicorp.oci import oci_datascience_notebook_session_shapes
from terrascript.data.hashicorp.oci import oci_datascience_notebook_sessions
from terrascript.data.hashicorp.oci import oci_datascience_project
from terrascript.data.hashicorp.oci import oci_datascience_projects
from terrascript.data.hashicorp.oci import oci_devops_deploy_artifact
from terrascript.data.hashicorp.oci import oci_devops_deploy_artifacts
from terrascript.data.hashicorp.oci import oci_devops_deploy_environment
from terrascript.data.hashicorp.oci import oci_devops_deploy_environments
from terrascript.data.hashicorp.oci import oci_devops_deploy_pipeline
from terrascript.data.hashicorp.oci import oci_devops_deploy_pipelines
from terrascript.data.hashicorp.oci import oci_devops_deploy_stage
from terrascript.data.hashicorp.oci import oci_devops_deploy_stages
from terrascript.data.hashicorp.oci import oci_devops_deployment
from terrascript.data.hashicorp.oci import oci_devops_deployments
from terrascript.data.hashicorp.oci import oci_devops_project
from terrascript.data.hashicorp.oci import oci_devops_projects
from terrascript.data.hashicorp.oci import oci_dns_records
from terrascript.data.hashicorp.oci import oci_dns_resolver
from terrascript.data.hashicorp.oci import oci_dns_resolver_endpoint
from terrascript.data.hashicorp.oci import oci_dns_resolver_endpoints
from terrascript.data.hashicorp.oci import oci_dns_resolvers
from terrascript.data.hashicorp.oci import oci_dns_rrset
from terrascript.data.hashicorp.oci import oci_dns_steering_policies
from terrascript.data.hashicorp.oci import oci_dns_steering_policy
from terrascript.data.hashicorp.oci import oci_dns_steering_policy_attachment
from terrascript.data.hashicorp.oci import oci_dns_steering_policy_attachments
from terrascript.data.hashicorp.oci import oci_dns_tsig_key
from terrascript.data.hashicorp.oci import oci_dns_tsig_keys
from terrascript.data.hashicorp.oci import oci_dns_view
from terrascript.data.hashicorp.oci import oci_dns_views
from terrascript.data.hashicorp.oci import oci_dns_zones
from terrascript.data.hashicorp.oci import oci_email_dkim
from terrascript.data.hashicorp.oci import oci_email_dkims
from terrascript.data.hashicorp.oci import oci_email_email_domain
from terrascript.data.hashicorp.oci import oci_email_email_domains
from terrascript.data.hashicorp.oci import oci_email_sender
from terrascript.data.hashicorp.oci import oci_email_senders
from terrascript.data.hashicorp.oci import oci_email_suppression
from terrascript.data.hashicorp.oci import oci_email_suppressions
from terrascript.data.hashicorp.oci import oci_events_rule
from terrascript.data.hashicorp.oci import oci_events_rules
from terrascript.data.hashicorp.oci import oci_file_storage_export_sets
from terrascript.data.hashicorp.oci import oci_file_storage_exports
from terrascript.data.hashicorp.oci import oci_file_storage_file_systems
from terrascript.data.hashicorp.oci import oci_file_storage_mount_targets
from terrascript.data.hashicorp.oci import oci_file_storage_snapshot
from terrascript.data.hashicorp.oci import oci_file_storage_snapshots
from terrascript.data.hashicorp.oci import oci_functions_application
from terrascript.data.hashicorp.oci import oci_functions_applications
from terrascript.data.hashicorp.oci import oci_functions_function
from terrascript.data.hashicorp.oci import oci_functions_functions
from terrascript.data.hashicorp.oci import (
oci_generic_artifacts_content_artifact_by_path,
)
from terrascript.data.hashicorp.oci import (
oci_generic_artifacts_content_generic_artifacts_content,
)
from terrascript.data.hashicorp.oci import oci_golden_gate_database_registration
from terrascript.data.hashicorp.oci import oci_golden_gate_database_registrations
from terrascript.data.hashicorp.oci import oci_golden_gate_deployment
from terrascript.data.hashicorp.oci import oci_golden_gate_deployment_backup
from terrascript.data.hashicorp.oci import oci_golden_gate_deployment_backups
from terrascript.data.hashicorp.oci import oci_golden_gate_deployments
from terrascript.data.hashicorp.oci import oci_health_checks_http_monitor
from terrascript.data.hashicorp.oci import oci_health_checks_http_monitors
from terrascript.data.hashicorp.oci import oci_health_checks_http_probe_results
from terrascript.data.hashicorp.oci import oci_health_checks_ping_monitor
from terrascript.data.hashicorp.oci import oci_health_checks_ping_monitors
from terrascript.data.hashicorp.oci import oci_health_checks_ping_probe_results
from terrascript.data.hashicorp.oci import oci_health_checks_vantage_points
from terrascript.data.hashicorp.oci import oci_identity_api_keys
from terrascript.data.hashicorp.oci import oci_identity_auth_tokens
from terrascript.data.hashicorp.oci import oci_identity_authentication_policy
from terrascript.data.hashicorp.oci import oci_identity_availability_domain
from terrascript.data.hashicorp.oci import oci_identity_availability_domains
from terrascript.data.hashicorp.oci import oci_identity_compartment
from terrascript.data.hashicorp.oci import oci_identity_compartments
from terrascript.data.hashicorp.oci import oci_identity_cost_tracking_tags
from terrascript.data.hashicorp.oci import oci_identity_customer_secret_keys
from terrascript.data.hashicorp.oci import oci_identity_dynamic_groups
from terrascript.data.hashicorp.oci import oci_identity_fault_domains
from terrascript.data.hashicorp.oci import oci_identity_group
from terrascript.data.hashicorp.oci import oci_identity_groups
from terrascript.data.hashicorp.oci import oci_identity_identity_provider_groups
from terrascript.data.hashicorp.oci import oci_identity_identity_providers
from terrascript.data.hashicorp.oci import oci_identity_idp_group_mappings
from terrascript.data.hashicorp.oci import oci_identity_network_source
from terrascript.data.hashicorp.oci import oci_identity_network_sources
from terrascript.data.hashicorp.oci import oci_identity_policies
from terrascript.data.hashicorp.oci import oci_identity_region_subscriptions
from terrascript.data.hashicorp.oci import oci_identity_regions
from terrascript.data.hashicorp.oci import oci_identity_smtp_credentials
from terrascript.data.hashicorp.oci import oci_identity_swift_passwords
from terrascript.data.hashicorp.oci import oci_identity_tag
from terrascript.data.hashicorp.oci import oci_identity_tag_default
from terrascript.data.hashicorp.oci import oci_identity_tag_defaults
from terrascript.data.hashicorp.oci import oci_identity_tag_namespaces
from terrascript.data.hashicorp.oci import oci_identity_tags
from terrascript.data.hashicorp.oci import oci_identity_tenancy
from terrascript.data.hashicorp.oci import oci_identity_ui_password
from terrascript.data.hashicorp.oci import oci_identity_user
from terrascript.data.hashicorp.oci import oci_identity_user_group_memberships
from terrascript.data.hashicorp.oci import oci_identity_users
from terrascript.data.hashicorp.oci import oci_integration_integration_instance
from terrascript.data.hashicorp.oci import oci_integration_integration_instances
from terrascript.data.hashicorp.oci import oci_jms_fleet
from terrascript.data.hashicorp.oci import oci_jms_fleets
from terrascript.data.hashicorp.oci import oci_kms_decrypted_data
from terrascript.data.hashicorp.oci import oci_kms_encrypted_data
from terrascript.data.hashicorp.oci import oci_kms_key
from terrascript.data.hashicorp.oci import oci_kms_key_version
from terrascript.data.hashicorp.oci import oci_kms_key_versions
from terrascript.data.hashicorp.oci import oci_kms_keys
from terrascript.data.hashicorp.oci import oci_kms_replication_status
from terrascript.data.hashicorp.oci import oci_kms_vault
from terrascript.data.hashicorp.oci import oci_kms_vault_replicas
from terrascript.data.hashicorp.oci import oci_kms_vault_usage
from terrascript.data.hashicorp.oci import oci_kms_vaults
from terrascript.data.hashicorp.oci import oci_limits_limit_definitions
from terrascript.data.hashicorp.oci import oci_limits_limit_values
from terrascript.data.hashicorp.oci import oci_limits_quota
from terrascript.data.hashicorp.oci import oci_limits_quotas
from terrascript.data.hashicorp.oci import oci_limits_resource_availability
from terrascript.data.hashicorp.oci import oci_limits_services
from terrascript.data.hashicorp.oci import oci_load_balancer_backend_health
from terrascript.data.hashicorp.oci import oci_load_balancer_backend_set_health
from terrascript.data.hashicorp.oci import oci_load_balancer_backend_sets
from terrascript.data.hashicorp.oci import oci_load_balancer_backends
from terrascript.data.hashicorp.oci import oci_load_balancer_backendsets
from terrascript.data.hashicorp.oci import oci_load_balancer_certificates
from terrascript.data.hashicorp.oci import oci_load_balancer_health
from terrascript.data.hashicorp.oci import oci_load_balancer_hostnames
from terrascript.data.hashicorp.oci import oci_load_balancer_listener_rules
from terrascript.data.hashicorp.oci import (
oci_load_balancer_load_balancer_routing_policies,
)
from terrascript.data.hashicorp.oci import (
oci_load_balancer_load_balancer_routing_policy,
)
from terrascript.data.hashicorp.oci import oci_load_balancer_load_balancers
from terrascript.data.hashicorp.oci import oci_load_balancer_path_route_sets
from terrascript.data.hashicorp.oci import oci_load_balancer_policies
from terrascript.data.hashicorp.oci import oci_load_balancer_protocols
from terrascript.data.hashicorp.oci import oci_load_balancer_rule_set
from terrascript.data.hashicorp.oci import oci_load_balancer_rule_sets
from terrascript.data.hashicorp.oci import oci_load_balancer_shapes
from terrascript.data.hashicorp.oci import oci_load_balancer_ssl_cipher_suite
from terrascript.data.hashicorp.oci import oci_load_balancer_ssl_cipher_suites
from terrascript.data.hashicorp.oci import oci_load_balancers
from terrascript.data.hashicorp.oci import oci_log_analytics_log_analytics_entities
from terrascript.data.hashicorp.oci import (
oci_log_analytics_log_analytics_entities_summary,
)
from terrascript.data.hashicorp.oci import oci_log_analytics_log_analytics_entity
from terrascript.data.hashicorp.oci import oci_log_analytics_log_analytics_log_group
from terrascript.data.hashicorp.oci import (
oci_log_analytics_log_analytics_log_groups,
)
from terrascript.data.hashicorp.oci import (
oci_log_analytics_log_analytics_log_groups_summary,
)
from terrascript.data.hashicorp.oci import (
oci_log_analytics_log_analytics_object_collection_rule,
)
from terrascript.data.hashicorp.oci import (
oci_log_analytics_log_analytics_object_collection_rules,
)
from terrascript.data.hashicorp.oci import oci_log_analytics_namespace
from terrascript.data.hashicorp.oci import oci_log_analytics_namespaces
from terrascript.data.hashicorp.oci import oci_logging_log
from terrascript.data.hashicorp.oci import oci_logging_log_group
from terrascript.data.hashicorp.oci import oci_logging_log_groups
from terrascript.data.hashicorp.oci import oci_logging_log_saved_search
from terrascript.data.hashicorp.oci import oci_logging_log_saved_searches
from terrascript.data.hashicorp.oci import oci_logging_logs
from terrascript.data.hashicorp.oci import oci_logging_unified_agent_configuration
from terrascript.data.hashicorp.oci import oci_logging_unified_agent_configurations
from terrascript.data.hashicorp.oci import oci_management_agent_management_agent
from terrascript.data.hashicorp.oci import (
oci_management_agent_management_agent_available_histories,
)
from terrascript.data.hashicorp.oci import (
oci_management_agent_management_agent_count,
)
from terrascript.data.hashicorp.oci import (
oci_management_agent_management_agent_images,
)
from terrascript.data.hashicorp.oci import (
oci_management_agent_management_agent_install_key,
)
from terrascript.data.hashicorp.oci import (
oci_management_agent_management_agent_install_keys,
)
from terrascript.data.hashicorp.oci import (
oci_management_agent_management_agent_plugin_count,
)
from terrascript.data.hashicorp.oci import (
oci_management_agent_management_agent_plugins,
)
from terrascript.data.hashicorp.oci import oci_management_agent_management_agents
from terrascript.data.hashicorp.oci import (
oci_management_dashboard_management_dashboards_export,
)
from terrascript.data.hashicorp.oci import oci_marketplace_accepted_agreement
from terrascript.data.hashicorp.oci import oci_marketplace_accepted_agreements
from terrascript.data.hashicorp.oci import oci_marketplace_categories
from terrascript.data.hashicorp.oci import oci_marketplace_listing
from terrascript.data.hashicorp.oci import oci_marketplace_listing_package
from terrascript.data.hashicorp.oci import (
oci_marketplace_listing_package_agreements,
)
from terrascript.data.hashicorp.oci import oci_marketplace_listing_packages
from terrascript.data.hashicorp.oci import oci_marketplace_listing_taxes
from terrascript.data.hashicorp.oci import oci_marketplace_listings
from terrascript.data.hashicorp.oci import oci_marketplace_publication
from terrascript.data.hashicorp.oci import oci_marketplace_publication_package
from terrascript.data.hashicorp.oci import oci_marketplace_publication_packages
from terrascript.data.hashicorp.oci import oci_marketplace_publications
from terrascript.data.hashicorp.oci import oci_marketplace_publishers
from terrascript.data.hashicorp.oci import oci_metering_computation_configuration
from terrascript.data.hashicorp.oci import oci_metering_computation_custom_table
from terrascript.data.hashicorp.oci import oci_metering_computation_custom_tables
from terrascript.data.hashicorp.oci import oci_metering_computation_queries
from terrascript.data.hashicorp.oci import oci_metering_computation_query
from terrascript.data.hashicorp.oci import oci_monitoring_alarm
from terrascript.data.hashicorp.oci import oci_monitoring_alarm_history_collection
from terrascript.data.hashicorp.oci import oci_monitoring_alarm_statuses
from terrascript.data.hashicorp.oci import oci_monitoring_alarms
from terrascript.data.hashicorp.oci import oci_monitoring_metric_data
from terrascript.data.hashicorp.oci import oci_monitoring_metrics
from terrascript.data.hashicorp.oci import oci_mysql_analytics_cluster
from terrascript.data.hashicorp.oci import oci_mysql_channel
from terrascript.data.hashicorp.oci import oci_mysql_channels
from terrascript.data.hashicorp.oci import oci_mysql_heat_wave_cluster
from terrascript.data.hashicorp.oci import oci_mysql_mysql_backup
from terrascript.data.hashicorp.oci import oci_mysql_mysql_backups
from terrascript.data.hashicorp.oci import oci_mysql_mysql_configuration
from terrascript.data.hashicorp.oci import oci_mysql_mysql_configurations
from terrascript.data.hashicorp.oci import oci_mysql_mysql_db_system
from terrascript.data.hashicorp.oci import oci_mysql_mysql_db_systems
from terrascript.data.hashicorp.oci import oci_mysql_mysql_versions
from terrascript.data.hashicorp.oci import oci_mysql_shapes
from terrascript.data.hashicorp.oci import oci_network_load_balancer_backend_health
from terrascript.data.hashicorp.oci import oci_network_load_balancer_backend_set
from terrascript.data.hashicorp.oci import (
oci_network_load_balancer_backend_set_health,
)
from terrascript.data.hashicorp.oci import oci_network_load_balancer_backend_sets
from terrascript.data.hashicorp.oci import oci_network_load_balancer_backends
from terrascript.data.hashicorp.oci import oci_network_load_balancer_listener
from terrascript.data.hashicorp.oci import oci_network_load_balancer_listeners
from terrascript.data.hashicorp.oci import (
oci_network_load_balancer_network_load_balancer,
)
from terrascript.data.hashicorp.oci import (
oci_network_load_balancer_network_load_balancer_health,
)
from terrascript.data.hashicorp.oci import (
oci_network_load_balancer_network_load_balancers,
)
from terrascript.data.hashicorp.oci import (
oci_network_load_balancer_network_load_balancers_policies,
)
from terrascript.data.hashicorp.oci import (
oci_network_load_balancer_network_load_balancers_protocols,
)
from terrascript.data.hashicorp.oci import oci_nosql_index
from terrascript.data.hashicorp.oci import oci_nosql_indexes
from terrascript.data.hashicorp.oci import oci_nosql_table
from terrascript.data.hashicorp.oci import oci_nosql_tables
from terrascript.data.hashicorp.oci import oci_objectstorage_bucket
from terrascript.data.hashicorp.oci import oci_objectstorage_bucket_summaries
from terrascript.data.hashicorp.oci import oci_objectstorage_namespace
from terrascript.data.hashicorp.oci import oci_objectstorage_namespace_metadata
from terrascript.data.hashicorp.oci import oci_objectstorage_object
from terrascript.data.hashicorp.oci import oci_objectstorage_object_head
from terrascript.data.hashicorp.oci import oci_objectstorage_object_lifecycle_policy
from terrascript.data.hashicorp.oci import oci_objectstorage_object_versions
from terrascript.data.hashicorp.oci import oci_objectstorage_objects
from terrascript.data.hashicorp.oci import oci_objectstorage_preauthrequest
from terrascript.data.hashicorp.oci import oci_objectstorage_preauthrequests
from terrascript.data.hashicorp.oci import oci_objectstorage_replication_policies
from terrascript.data.hashicorp.oci import oci_objectstorage_replication_policy
from terrascript.data.hashicorp.oci import oci_objectstorage_replication_sources
from terrascript.data.hashicorp.oci import oci_oce_oce_instance
from terrascript.data.hashicorp.oci import oci_oce_oce_instances
from terrascript.data.hashicorp.oci import oci_ocvp_esxi_host
from terrascript.data.hashicorp.oci import oci_ocvp_esxi_hosts
from terrascript.data.hashicorp.oci import oci_ocvp_sddc
from terrascript.data.hashicorp.oci import oci_ocvp_sddcs
from terrascript.data.hashicorp.oci import oci_ocvp_supported_skus
from terrascript.data.hashicorp.oci import (
oci_ocvp_supported_vmware_software_versions,
)
from terrascript.data.hashicorp.oci import oci_oda_oda_instance
from terrascript.data.hashicorp.oci import oci_oda_oda_instances
from terrascript.data.hashicorp.oci import oci_ons_notification_topic
from terrascript.data.hashicorp.oci import oci_ons_notification_topics
from terrascript.data.hashicorp.oci import oci_ons_subscription
from terrascript.data.hashicorp.oci import oci_ons_subscriptions
from terrascript.data.hashicorp.oci import oci_opsi_database_insight
from terrascript.data.hashicorp.oci import oci_opsi_database_insights
from terrascript.data.hashicorp.oci import oci_opsi_enterprise_manager_bridge
from terrascript.data.hashicorp.oci import oci_opsi_enterprise_manager_bridges
from terrascript.data.hashicorp.oci import oci_opsi_host_insight
from terrascript.data.hashicorp.oci import oci_opsi_host_insights
from terrascript.data.hashicorp.oci import oci_optimizer_categories
from terrascript.data.hashicorp.oci import oci_optimizer_category
from terrascript.data.hashicorp.oci import oci_optimizer_enrollment_status
from terrascript.data.hashicorp.oci import oci_optimizer_enrollment_statuses
from terrascript.data.hashicorp.oci import oci_optimizer_histories
from terrascript.data.hashicorp.oci import oci_optimizer_profile
from terrascript.data.hashicorp.oci import oci_optimizer_profiles
from terrascript.data.hashicorp.oci import oci_optimizer_recommendation
from terrascript.data.hashicorp.oci import oci_optimizer_recommendation_strategies
from terrascript.data.hashicorp.oci import oci_optimizer_recommendation_strategy
from terrascript.data.hashicorp.oci import oci_optimizer_recommendations
from terrascript.data.hashicorp.oci import oci_optimizer_resource_action
from terrascript.data.hashicorp.oci import oci_optimizer_resource_actions
from terrascript.data.hashicorp.oci import oci_osmanagement_managed_instance
from terrascript.data.hashicorp.oci import (
oci_osmanagement_managed_instance_event_report,
)
from terrascript.data.hashicorp.oci import oci_osmanagement_managed_instance_group
from terrascript.data.hashicorp.oci import oci_osmanagement_managed_instance_groups
from terrascript.data.hashicorp.oci import oci_osmanagement_managed_instances
from terrascript.data.hashicorp.oci import oci_osmanagement_software_source
from terrascript.data.hashicorp.oci import oci_osmanagement_software_sources
from terrascript.data.hashicorp.oci import oci_resourcemanager_stack
from terrascript.data.hashicorp.oci import oci_resourcemanager_stack_tf_state
from terrascript.data.hashicorp.oci import oci_resourcemanager_stacks
from terrascript.data.hashicorp.oci import oci_sch_service_connector
from terrascript.data.hashicorp.oci import oci_sch_service_connectors
from terrascript.data.hashicorp.oci import oci_service_catalog_private_application
from terrascript.data.hashicorp.oci import (
oci_service_catalog_private_application_package,
)
from terrascript.data.hashicorp.oci import (
oci_service_catalog_private_application_packages,
)
from terrascript.data.hashicorp.oci import oci_service_catalog_private_applications
from terrascript.data.hashicorp.oci import oci_service_catalog_service_catalog
from terrascript.data.hashicorp.oci import (
oci_service_catalog_service_catalog_association,
)
from terrascript.data.hashicorp.oci import (
oci_service_catalog_service_catalog_associations,
)
from terrascript.data.hashicorp.oci import oci_service_catalog_service_catalogs
from terrascript.data.hashicorp.oci import oci_streaming_connect_harness
from terrascript.data.hashicorp.oci import oci_streaming_connect_harnesses
from terrascript.data.hashicorp.oci import oci_streaming_stream
from terrascript.data.hashicorp.oci import oci_streaming_stream_pool
from terrascript.data.hashicorp.oci import oci_streaming_stream_pools
from terrascript.data.hashicorp.oci import oci_streaming_streams
from terrascript.data.hashicorp.oci import oci_vault_secret
from terrascript.data.hashicorp.oci import oci_vault_secret_version
from terrascript.data.hashicorp.oci import oci_vault_secrets
from terrascript.data.hashicorp.oci import (
oci_vulnerability_scanning_container_scan_recipe,
)
from terrascript.data.hashicorp.oci import (
oci_vulnerability_scanning_container_scan_recipes,
)
from terrascript.data.hashicorp.oci import (
oci_vulnerability_scanning_container_scan_target,
)
from terrascript.data.hashicorp.oci import (
oci_vulnerability_scanning_container_scan_targets,
)
from terrascript.data.hashicorp.oci import (
oci_vulnerability_scanning_host_scan_recipe,
)
from terrascript.data.hashicorp.oci import (
oci_vulnerability_scanning_host_scan_recipes,
)
from terrascript.data.hashicorp.oci import (
oci_vulnerability_scanning_host_scan_target,
)
from terrascript.data.hashicorp.oci import (
oci_vulnerability_scanning_host_scan_targets,
)
from terrascript.data.hashicorp.oci import oci_waas_address_list
from terrascript.data.hashicorp.oci import oci_waas_address_lists
from terrascript.data.hashicorp.oci import oci_waas_certificate
from terrascript.data.hashicorp.oci import oci_waas_certificates
from terrascript.data.hashicorp.oci import oci_waas_custom_protection_rule
from terrascript.data.hashicorp.oci import oci_waas_custom_protection_rules
from terrascript.data.hashicorp.oci import oci_waas_edge_subnets
from terrascript.data.hashicorp.oci import oci_waas_http_redirect
from terrascript.data.hashicorp.oci import oci_waas_http_redirects
from terrascript.data.hashicorp.oci import oci_waas_protection_rule
from terrascript.data.hashicorp.oci import oci_waas_protection_rules
from terrascript.data.hashicorp.oci import oci_waas_waas_policies
from terrascript.data.hashicorp.oci import oci_waas_waas_policy
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.hashicorp.oci
#
# t = terrascript.provider.hashicorp.oci.oci()
# s = str(t)
#
# assert 'https://github.com/terraform-providers/terraform-provider-oci' in s
# assert '4.45.0' in s
|
env/lib/python3.8/site-packages/numpy/random/tests/test_seed_sequence.py | acrucetta/Chicago_COVI_WebApp | 1,738 | 12617143 | import numpy as np
from numpy.testing import assert_array_equal
from numpy.random import SeedSequence
def test_reference_data():
""" Check that SeedSequence generates data the same as the C++ reference.
https://gist.github.com/imneme/540829265469e673d045
"""
inputs = [
[3735928559, 195939070, 229505742, 305419896],
[3668361503, 4165561550, 1661411377, 3634257570],
[164546577, 4166754639, 1765190214, 1303880213],
[446610472, 3941463886, 522937693, 1882353782],
[1864922766, 1719732118, 3882010307, 1776744564],
[4141682960, 3310988675, 553637289, 902896340],
[1134851934, 2352871630, 3699409824, 2648159817],
[1240956131, 3107113773, 1283198141, 1924506131],
[2669565031, 579818610, 3042504477, 2774880435],
[2766103236, 2883057919, 4029656435, 862374500],
]
outputs = [
[3914649087, 576849849, 3593928901, 2229911004],
[2240804226, 3691353228, 1365957195, 2654016646],
[3562296087, 3191708229, 1147942216, 3726991905],
[1403443605, 3591372999, 1291086759, 441919183],
[1086200464, 2191331643, 560336446, 3658716651],
[3249937430, 2346751812, 847844327, 2996632307],
[2584285912, 4034195531, 3523502488, 169742686],
[959045797, 3875435559, 1886309314, 359682705],
[3978441347, 432478529, 3223635119, 138903045],
[296367413, 4262059219, 13109864, 3283683422],
]
outputs64 = [
[2477551240072187391, 9577394838764454085],
[15854241394484835714, 11398914698975566411],
[13708282465491374871, 16007308345579681096],
[15424829579845884309, 1898028439751125927],
[9411697742461147792, 15714068361935982142],
[10079222287618677782, 12870437757549876199],
[17326737873898640088, 729039288628699544],
[16644868984619524261, 1544825456798124994],
[1857481142255628931, 596584038813451439],
[18305404959516669237, 14103312907920476776],
]
for seed, expected, expected64 in zip(inputs, outputs, outputs64):
expected = np.array(expected, dtype=np.uint32)
ss = SeedSequence(seed)
state = ss.generate_state(len(expected))
assert_array_equal(state, expected)
state64 = ss.generate_state(len(expected64), dtype=np.uint64)
assert_array_equal(state64, expected64)
|
deluca/lung/utils/scripts/train_simulator.py | google/deluca | 105 | 12617156 | # Copyright 2021 The Deluca Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""functions for training simulator."""
import copy
import functools
import os
from absl import logging
from deluca.lung.utils.data.breath_dataset import get_shuffled_and_batched_data
from flax.metrics import tensorboard
import jax
import jax.numpy as jnp
import optax
# Insert any necessary file IO imports
# pylint: disable=pointless-string-statement
# pylint: disable=invalid-name
# pylint: disable=g-long-lambda
# pylint: disable=dangerous-default-value
# pylint: disable=unused-argument
# pylint: disable=logging-format-interpolation
# Case 1: i >= model.transition_threshold use true pressures for p_history
def true_func(state, u_in, pressure, model):
print("ENTER TRUE FUNC")
print("pressure:" + str(pressure))
print("scaled_pressure:" + str(model.p_normalizer(pressure).squeeze()))
new_p_history = state.p_history.at[-1].set(
model.p_normalizer(pressure).squeeze())
state = state.replace(p_history=new_p_history)
next_state, _ = model(state=state, action=(u_in, 0))
return next_state
# Case 2: i < model.transition_threshold use predicted pressures for p_history
def false_func(state, u_in, model):
print("ENTER FALSE FUNC")
next_state, _ = model(state=state, action=(u_in, 0))
return next_state
def predict_and_update_state(i, state_loss_model_data):
"""predict and update function."""
state, loss, model, data = state_loss_model_data
u_in, pressure = data[0, i], data[1, i] # unnormalized u_in and pressure
partial_true_func = functools.partial(
true_func, u_in=u_in, pressure=pressure, model=model)
partial_false_func = functools.partial(false_func, u_in=u_in, model=model)
next_state = jax.lax.cond(i >= model.transition_threshold, partial_true_func,
partial_false_func, state)
pred = model.p_normalizer(
next_state.predicted_pressure) # normalized gradient step
return (next_state, loss + jnp.abs(model.p_normalizer(data[1, i + 1]) - pred),
model, data)
@jax.jit
def rollout(model, data):
"""rollout function."""
# data.shape == (2, N)
start_idx = 0
end_idx = len(data[0]) - 1 # need minus 1 since we predict for i+1 at idx i
state, _ = model.reset()
new_u_history = jnp.zeros((model.u_history_len,))
new_p_history = jnp.zeros((model.p_history_len,))
state = state.replace(u_history=new_u_history, p_history=new_p_history)
loss_init = jnp.abs(
model.p_normalizer(state.predicted_pressure) -
model.p_normalizer(data[1, 0]))
state_loss_model_data = (state, loss_init, model, data)
(state, total_loss, _, _) = jax.lax.fori_loop(start_idx, end_idx,
predict_and_update_state,
state_loss_model_data)
"""for i in range(start_idx, end_idx):
state_loss_model_data = predict_and_update_state(i, state_loss_model_data)
(_, total_loss) = state_loss_model_data
"""
return total_loss / len(data[0])
def loop_over_loader(model_optimState_lrMult_loss, X_Y, optim, rollout_fn,
scheduler):
"""loop over data loader.
X_batch.shape = Y_batch.shape = (num_batches, batch_size, N=29)
lrMult is the multiplier for the scheduler
Args:
model_optimState_lrMult_loss: (model, optimState, lr_mult, loss)
X_Y: the data
optim: optimizer
rollout_fn: has signature (model, data) -> loss where data.shape = (2, N)
scheduler: lr scheduler
Returns:
updated model, optim_state, lr_mult, and loss
"""
X_batch, y_batch = X_Y
model, optim_state, lr_mult, loss = model_optimState_lrMult_loss
loss, grad = jax.value_and_grad(map_rollout_over_batch)(model,
(X_batch, y_batch),
rollout_fn)
updates, optim_state = optim.update(grad, optim_state, model)
if scheduler == "ReduceLROnPlateau":
updates = jax.tree_map(lambda g: lr_mult * g, updates)
model = optax.apply_updates(model, updates)
return (model, optim_state, lr_mult, loss), None
# @partial(jax.jit, static_argnums=(2,))
def map_rollout_over_batch(model, data, rollout_fn):
"""map rollout over batch dimension.
Args:
model: the model
data: data.shape = ((batch_size, N), (batch_size, N))
rollout_fn: has signature (model, data) -> loss where data.shape = (2, N)
Returns:
loss
"""
rollout_partial = lambda xs: functools.partial(
rollout_fn, model=model)(
data=xs)
data_zipped = jnp.array(list(zip(data[0], data[1]))) # (batch_size, 2, N)
losses = jax.vmap(rollout_partial)(data_zipped)
return jnp.array(losses).mean()
def train_simulator(
dataset,
model,
num_boundary_models,
activation_fn_name,
R,
C,
# idx 0 to num_boundary_models-1 are boundary models,
# idx num_boundary_models is default_model
train_key="train",
test_key="test",
batch_size=512,
epochs=500,
optimizer=optax.adamw,
optimizer_params={
"learning_rate": 1e-3,
"weight_decay": 1e-4
},
patience=10,
lr_decay_factor=0.1,
scheduler="ReduceLROnPlateau", # or "Cosine"
loss_fn=lambda x, y: (jnp.abs(x - y)).mean(),
print_loss=10,
use_tensorboard=False,
mode="train",
user_name="alexjyu-brain",
tb_dir=None,
):
"""train simulator."""
# evaluate on these at end of epoch
for key in ["train", "test"]:
dataset.data[key] = (jnp.array(dataset.data[key][0]),
jnp.array(dataset.data[key][1]))
X_train, y_train = dataset.data[train_key]
X_test, y_test = dataset.data[test_key]
# set up optimizer and lr scheduler
lr_mult = 1.0
if scheduler == "ReduceLROnPlateau":
optim = optimizer(**optimizer_params)
patience_cnt = 0
prev_loss = float("inf")
elif scheduler == "Cosine":
steps_per_epoch = float(X_train.shape[0] / batch_size)
decay_steps = int((epochs + 1) * steps_per_epoch)
logging.info("steps_per_epoch: %s", str(steps_per_epoch))
logging.info("decay_steps: %s", str(decay_steps))
cosine_scheduler_fn = optax.cosine_decay_schedule(
init_value=optimizer_params["learning_rate"], decay_steps=decay_steps)
optimizer_params["learning_rate"] = cosine_scheduler_fn
logging.info("optimizer_params: %s", str(optimizer_params))
optim = optimizer(**optimizer_params)
optim_state = optim.init(model)
loop_over_loader_partial = functools.partial(
loop_over_loader, optim=optim, rollout_fn=rollout, scheduler=scheduler)
# Tensorboard writer
if use_tensorboard:
config = copy.deepcopy(model.default_model_parameters)
del config["activation_fn"]
config["activation_fn_name"] = activation_fn_name
if mode == "train":
file_name = str(config)
gfile.SetUser(user_name)
gfile.MakeDirs(os.path.dirname(tb_dir))
write_path = tb_dir + file_name
summary_writer = tensorboard.SummaryWriter(write_path)
summary_writer.hparams(dict(config))
# Main Training Loop
prng_key = jax.random.PRNGKey(0)
for epoch in range(epochs + 1):
if epoch % 10 == 0:
logging.info("epoch: %s", str(epoch))
X, y, prng_key = get_shuffled_and_batched_data(dataset, batch_size,
train_key, prng_key)
if epoch == 0:
logging.info("X.shape: %s", str(X.shape))
logging.info("y.shape: %s", str(y.shape))
(model, optim_state, lr_mult,
loss), _ = jax.lax.scan(loop_over_loader_partial,
(model, optim_state, lr_mult, 0.), (X, y))
"""for i in range(X.shape[0]):
carry = (model, optim_state, lr_mult, 0.)
carry, _ = loop_over_loader_partial(carry, (X[i], y[i]))
model, optim_state, lr_mult, loss = carry
"""
if scheduler == "ReduceLROnPlateau":
if loss > prev_loss:
patience_cnt = patience_cnt + 1
else:
patience_cnt = 0
if patience_cnt == patience:
lr_mult = lr_mult * lr_decay_factor
patience_cnt = 0
prev_loss = loss
if epoch % print_loss == 0:
if scheduler == "ReduceLROnPlateau":
logging.info("loss: %s", str(loss))
logging.info("prev_loss: %s", str(prev_loss))
logging.info("patience_cnt: %s", str(patience_cnt))
logging.info("lr_mult: %s", str(lr_mult))
# expensive end-of-epoch eval, just for intuition
train_loss = map_rollout_over_batch(model, (X_train, y_train), rollout)
# cross-validation
test_loss = map_rollout_over_batch(model, (X_test, y_test), rollout)
if epoch % print_loss == 0:
logging.info(
f"Epoch {epoch:2d}: train={train_loss.item():.5f}, test_loss={test_loss.item():.5f}"
)
logging.info("-----------------------------------")
if use_tensorboard:
summary_writer.scalar("train_loss", train_loss, epoch)
summary_writer.scalar("test_loss", test_loss, epoch)
if use_tensorboard:
summary_writer.flush()
logging.info("finished looping over epochs")
return model, test_loss
|
InternalPythonModules/android/line.py | drwetter/autopsy | 1,473 | 12617171 | <reponame>drwetter/autopsy
"""
Autopsy Forensic Browser
Copyright 2019-2021 Basis Technology Corp.
Contact: carrier <at> sleuthkit <dot> org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from java.io import File
from java.lang import Class
from java.lang import ClassNotFoundException
from java.lang import Long
from java.lang import String
from java.sql import ResultSet
from java.sql import SQLException
from java.sql import Statement
from java.util.logging import Level
from java.util import ArrayList
from org.apache.commons.codec.binary import Base64
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import MessageNotifyUtil
from org.sleuthkit.autopsy.coreutils import AppSQLiteDB
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Content
from org.sleuthkit.datamodel import TskCoreException
from org.sleuthkit.datamodel.Blackboard import BlackboardException
from org.sleuthkit.autopsy.casemodule import NoCurrentCaseException
from org.sleuthkit.datamodel import Account
from org.sleuthkit.datamodel.blackboardutils import CommunicationArtifactsHelper
from org.sleuthkit.datamodel.blackboardutils.attributes import MessageAttachments
from org.sleuthkit.datamodel.blackboardutils.attributes.MessageAttachments import FileAttachment
from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import MessageReadStatus
from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import CommunicationDirection
from TskContactsParser import TskContactsParser
from TskMessagesParser import TskMessagesParser
from TskCallLogsParser import TskCallLogsParser
import traceback
import general
class LineAnalyzer(general.AndroidComponentAnalyzer):
"""
Parses the Line App databases for contacts,
message and call log artifacts.
About Line parser for v9.15.1:
- Line Database Design Details:
Line has unique ids associated with their users and with their groups. These ids
are referred to as mid in the database.
Databases:
- naver_line: contains contact and msg artifacts
- call_history: contains call artifacts
Tables:
- naver_line/groups: This table contains group ids paired with metadata
about the group (such as creator, group name, etc).
- naver_line/membership This table maps user mids to group ids. Each record
contains 1 group id and 1 user mid.
- naver_line/chat_history This table contains all chat history for private
(1 to 1) and group conversations. It maps a user mid
or group id to the message details. The user mid and
group id are stored into the same column "chat_id".
If the message direction is incoming, the sender mid
is stored in the from_mid column.
- naver_line/contacts This table contains all Line contacts known to the
device.
- call_history/call_history This table contains all call history for private
and group calls. It maps a user mid or a group id
to the call details. The user mid and group id are
stored in the "caller_mid" column.
- Implementation Details:
1) Both group calls and single calls are extracted in one query. The general approach
is to build one result table with both contact mids and group ids.
This result is consistently labeled contact_list_with_groups queries below.
This table is then joined once onto the messages table to produce all communication
data.
2) Both group chats and single chats are extracted in one query.
"""
def __init__(self):
self._logger = Logger.getLogger(self.__class__.__name__)
self._LINE_PACKAGE_NAME = "jp.naver.line.android"
self._PARSER_NAME = "Line Parser"
self._VERSION = "9.15.1"
def analyze(self, dataSource, fileManager, context):
try:
contact_and_message_dbs = AppSQLiteDB.findAppDatabases(dataSource,
"naver_line", True, self._LINE_PACKAGE_NAME)
calllog_dbs = AppSQLiteDB.findAppDatabases(dataSource,
"call_history", True, self._LINE_PACKAGE_NAME)
for contact_and_message_db in contact_and_message_dbs:
current_case = Case.getCurrentCaseThrows()
helper = CommunicationArtifactsHelper(
current_case.getSleuthkitCase(), self._PARSER_NAME,
contact_and_message_db.getDBFile(), Account.Type.LINE, context.getJobId())
self.parse_contacts(contact_and_message_db, helper)
self.parse_messages(contact_and_message_db, helper, current_case)
for calllog_db in calllog_dbs:
current_case = Case.getCurrentCaseThrows()
helper = CommunicationArtifactsHelper(
current_case.getSleuthkitCase(), self._PARSER_NAME,
calllog_db.getDBFile(), Account.Type.LINE, context.getJobId())
self.parse_calllogs(dataSource, calllog_db, helper)
except NoCurrentCaseException as ex:
# Error parsing Line databases.
self._logger.log(Level.WARNING, "Error parsing the Line App Databases", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
for contact_and_message_db in contact_and_message_dbs:
contact_and_message_db.close()
for calllog_db in calllog_dbs:
calllog_db.close()
def parse_contacts(self, contacts_db, helper):
try:
contacts_parser = LineContactsParser(contacts_db, self._PARSER_NAME)
while contacts_parser.next():
helper.addContact(
contacts_parser.get_contact_name(),
contacts_parser.get_phone(),
contacts_parser.get_home_phone(),
contacts_parser.get_mobile_phone(),
contacts_parser.get_email(),
contacts_parser.get_other_attributes()
)
contacts_parser.close()
except SQLException as ex:
self._logger.log(Level.WARNING, "Error parsing the Line App Database for contacts", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
#Error adding artifact to case database... case is not complete.
self._logger.log(Level.SEVERE,
"Error adding Line contact artifacts to the case database.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
#Error posting notification to blackboard
self._logger.log(Level.WARNING,
"Error posting Line contact artifacts to blackboard.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
def parse_calllogs(self, dataSource, calllogs_db, helper):
try:
calllogs_db.attachDatabase(
dataSource, "naver_line",
calllogs_db.getDBFile().getParentPath(), "naver")
calllog_parser = LineCallLogsParser(calllogs_db)
while calllog_parser.next():
helper.addCalllog(
calllog_parser.get_call_direction(),
calllog_parser.get_phone_number_from(),
calllog_parser.get_phone_number_to(),
calllog_parser.get_call_start_date_time(),
calllog_parser.get_call_end_date_time(),
calllog_parser.get_call_type()
)
calllog_parser.close()
except SQLException as ex:
self._logger.log(Level.WARNING, "Error parsing the Line App Database for calllogs", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
#Error adding artifact to case database... case is not complete.
self._logger.log(Level.SEVERE,
"Error adding Line calllog artifacts to the case database.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
#Error posting notification to blackboard
self._logger.log(Level.WARNING,
"Error posting Line calllog artifacts to blackboard.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
def parse_messages(self, messages_db, helper, current_case):
try:
messages_parser = LineMessagesParser(messages_db)
while messages_parser.next():
message_artifact = helper.addMessage(
messages_parser.get_message_type(),
messages_parser.get_message_direction(),
messages_parser.get_phone_number_from(),
messages_parser.get_phone_number_to(),
messages_parser.get_message_date_time(),
messages_parser.get_message_read_status(),
messages_parser.get_message_subject(),
messages_parser.get_message_text(),
messages_parser.get_thread_id()
)
if (messages_parser.get_file_attachment() is not None):
file_attachments = ArrayList()
file_attachments.add(FileAttachment(current_case.getSleuthkitCase(), messages_db.getDBFile().getDataSource(), messages_parser.get_file_attachment()))
message_attachments = MessageAttachments(file_attachments, [])
helper.addAttachments(message_artifact, message_attachments)
messages_parser.close()
except SQLException as ex:
self._logger.log(Level.WARNING, "Error parsing the Line App Database for messages.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
#Error adding artifact to case database... case is not complete.
self._logger.log(Level.SEVERE,
"Error adding Line message artifacts to the case database.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
#Error posting notification to blackboard
self._logger.log(Level.WARNING,
"Error posting Line message artifacts to blackboard.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
class LineCallLogsParser(TskCallLogsParser):
"""
Parses out TSK_CALLLOG information from the Line database.
TSK_CALLLOG fields that are not in the line database are given
a default value inherited from the super class.
"""
def __init__(self, calllog_db):
super(LineCallLogsParser, self).__init__(calllog_db.runQuery(
"""
SELECT Substr(calls.call_type, -1) AS direction,
calls.start_time AS start_time,
calls.end_time AS end_time,
contact_book_w_groups.members AS group_members,
calls.caller_mid,
calls.voip_type AS call_type,
calls.voip_gc_media_type AS group_call_type
FROM (SELECT id,
Group_concat(M.m_id) AS members
FROM membership AS M
GROUP BY id
UNION
SELECT m_id,
NULL
FROM naver.contacts) AS contact_book_w_groups
JOIN call_history AS calls
ON calls.caller_mid = contact_book_w_groups.id
"""
)
)
self._OUTGOING_CALL_TYPE = "O"
self._INCOMING_CALL_TYPE = "I"
self._VIDEO_CALL_TYPE = "V"
self._AUDIO_CALL_TYPE = "A"
self._GROUP_CALL_TYPE = "G"
self._GROUP_VIDEO_CALL_TYPE = "VIDEO"
self._GROUP_AUDIO_CALL_TYPE = "AUDIO"
def get_call_direction(self):
direction = self.result_set.getString("direction")
if direction == self._OUTGOING_CALL_TYPE:
return self.OUTGOING_CALL
return self.INCOMING_CALL
def get_call_start_date_time(self):
try:
return long(self.result_set.getString("start_time")) / 1000
except ValueError as ve:
return super(LineCallLogsParser, self).get_call_start_date_time()
def get_call_end_date_time(self):
try:
return long(self.result_set.getString("end_time")) / 1000
except ValueError as ve:
return super(LineCallLogsParser, self).get_call_end_date_time()
def get_phone_number_to(self):
if self.get_call_direction() == self.OUTGOING_CALL:
group_members = self.result_set.getString("group_members")
if group_members is not None:
group_members = group_members.split(",")
return group_members
return self.result_set.getString("caller_mid")
return super(LineCallLogsParser, self).get_phone_number_to()
def get_phone_number_from(self):
if self.get_call_direction() == self.INCOMING_CALL:
return self.result_set.getString("caller_mid")
return super(LineCallLogsParser, self).get_phone_number_from()
def get_call_type(self):
call_type = self.result_set.getString("call_type")
if call_type == self._VIDEO_CALL_TYPE:
return self.VIDEO_CALL
if call_type == self._AUDIO_CALL_TYPE:
return self.AUDIO_CALL
if call_type == self._GROUP_CALL_TYPE:
g_type = self.result_set.getString("group_call_type")
if g_type == self._GROUP_VIDEO_CALL_TYPE:
return self.VIDEO_CALL
if g_type == self._GROUP_AUDIO_CALL_TYPE:
return self.AUDIO_CALL
return super(LineCallLogsParser, self).get_call_type()
class LineContactsParser(TskContactsParser):
"""
Parses out TSK_CONTACT information from the Line database.
TSK_CONTACT fields that are not in the line database are given
a default value inherited from the super class.
"""
def __init__(self, contact_db, analyzer):
super(LineContactsParser, self).__init__(contact_db.runQuery(
"""
SELECT m_id,
server_name
FROM contacts
"""
)
)
self._PARENT_ANALYZER = analyzer
def get_contact_name(self):
return self.result_set.getString("server_name")
def get_other_attributes(self):
return [BlackboardAttribute(
BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ID,
self._PARENT_ANALYZER,
self.result_set.getString("m_id"))]
class LineMessagesParser(TskMessagesParser):
"""
Parse out TSK_MESSAGE information from the Line database.
TSK_MESSAGE fields that are not in the line database are given
a default value inherited from the super class.
"""
def __init__(self, message_db):
super(LineMessagesParser, self).__init__(message_db.runQuery(
"""
SELECT contact_book_w_groups.id,
contact_book_w_groups.members,
messages.from_mid,
messages.content,
messages.created_time,
messages.attachement_type,
messages.attachement_local_uri,
messages.status
FROM (SELECT id,
Group_concat(M.m_id) AS members
FROM membership AS M
GROUP BY id
UNION
SELECT m_id,
NULL
FROM contacts) AS contact_book_w_groups
JOIN chat_history AS messages
ON messages.chat_id = contact_book_w_groups.id
WHERE attachement_type != 6
"""
)
)
self._LINE_MESSAGE_TYPE = "Line Message"
#From the limited test data, it appeared that incoming
#was only associated with a 1 status. Status # 3 and 7
#was only associated with outgoing.
self._INCOMING_MESSAGE_TYPE = 1
def get_message_type(self):
return self._LINE_MESSAGE_TYPE
def get_message_date_time(self):
created_time = self.result_set.getString("created_time")
try:
#Get time in seconds (created_time is stored in ms from epoch)
return long(created_time) / 1000
except ValueError as ve:
return super(LineMessagesParser, self).get_message_date_time()
def get_message_text(self):
content = self.result_set.getString("content")
return content
def get_message_direction(self):
if self.result_set.getInt("status") == self._INCOMING_MESSAGE_TYPE:
return self.INCOMING
return self.OUTGOING
def get_phone_number_from(self):
if self.get_message_direction() == self.INCOMING:
from_mid = self.result_set.getString("from_mid")
if from_mid is not None:
return from_mid
return super(LineMessagesParser, self).get_phone_number_from()
def get_phone_number_to(self):
if self.get_message_direction() == self.OUTGOING:
group = self.result_set.getString("members")
if group is not None:
group = group.split(",")
return group
return self.result_set.getString("id")
return super(LineMessagesParser, self).get_phone_number_to()
def get_thread_id(self):
members = self.result_set.getString("members")
if members is not None:
return self.result_set.getString("id")
return super(LineMessagesParser, self).get_thread_id()
def get_file_attachment(self):
if (self.result_set.getString("attachement_local_uri") is None):
return None
# If "content:" in the beginning of the string we cannot determine at this point where a file resides. Ignoring for
# now unless data can be obtained to determine where the file may reside.
elif ("content:" in self.result_set.getString("attachement_local_uri")):
return None
else:
return self.result_set.getString("attachement_local_uri")
|
gcloud/tests/taskflow3/tasks/test_ensure_node_can_retry.py | brookylin/bk-sops | 881 | 12617191 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making ่้ฒธๆบไบPaaSๅนณๅฐ็คพๅบ็ (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from mock import patch, MagicMock, call
from django.test import TestCase
from gcloud.taskflow3.celery.tasks import _ensure_node_can_retry
class EnsureNodeCanRetryTestCase(TestCase):
def test_engine_ver_invalid(self):
self.assertRaises(ValueError, _ensure_node_can_retry, "node_id", 3)
def test_engine_v1_can_retry(self):
PipelineProcess = MagicMock()
PipelineProcess.objects.filter().exists = MagicMock(return_value=True)
PipelineProcess.objects.filter.reset_mock()
node_id = "node_id"
with patch("gcloud.taskflow3.celery.tasks.PipelineProcess", PipelineProcess):
can_retry = _ensure_node_can_retry(node_id, engine_ver=1)
self.assertTrue(can_retry)
PipelineProcess.objects.filter.assert_called_once_with(current_node_id=node_id, is_sleep=True)
def test_engine_v1_can_not_retry(self):
PipelineProcess = MagicMock()
PipelineProcess.objects.filter().exists = MagicMock(return_value=False)
PipelineProcess.objects.filter.reset_mock()
node_id = "node_id"
with patch("gcloud.taskflow3.celery.tasks.PipelineProcess", PipelineProcess):
can_retry = _ensure_node_can_retry(node_id, engine_ver=1)
self.assertFalse(can_retry)
PipelineProcess.objects.filter.assert_has_calls(
[
call(current_node_id="node_id", is_sleep=True),
call().exists(),
call(current_node_id="node_id", is_sleep=True),
call().exists(),
call(current_node_id="node_id", is_sleep=True),
call().exists(),
]
)
def test_engine_v2_can_retry(self):
BambooDjangoRuntime = MagicMock()
BambooDjangoRuntime().get_sleep_process_info_with_current_node_id = MagicMock(return_value=True)
node_id = "node_id"
with patch("gcloud.taskflow3.celery.tasks.BambooDjangoRuntime", BambooDjangoRuntime):
can_retry = _ensure_node_can_retry(node_id, engine_ver=2)
self.assertTrue(can_retry)
BambooDjangoRuntime().get_sleep_process_info_with_current_node_id.assert_called_once_with(node_id)
def test_engine_v2_can_not_retry(self):
BambooDjangoRuntime = MagicMock()
BambooDjangoRuntime().get_sleep_process_info_with_current_node_id = MagicMock(return_value=None)
node_id = "node_id"
with patch("gcloud.taskflow3.celery.tasks.BambooDjangoRuntime", BambooDjangoRuntime):
can_retry = _ensure_node_can_retry(node_id, engine_ver=2)
self.assertFalse(can_retry)
BambooDjangoRuntime().get_sleep_process_info_with_current_node_id.assert_has_calls(
[call(node_id), call(node_id), call(node_id)]
)
|
vocoders/base_vocoder.py | ishine/DiffSinger-1 | 288 | 12617192 | <reponame>ishine/DiffSinger-1
import importlib
VOCODERS = {}
def register_vocoder(cls):
VOCODERS[cls.__name__.lower()] = cls
VOCODERS[cls.__name__] = cls
return cls
def get_vocoder_cls(hparams):
if hparams['vocoder'] in VOCODERS:
return VOCODERS[hparams['vocoder']]
else:
vocoder_cls = hparams['vocoder']
pkg = ".".join(vocoder_cls.split(".")[:-1])
cls_name = vocoder_cls.split(".")[-1]
vocoder_cls = getattr(importlib.import_module(pkg), cls_name)
return vocoder_cls
class BaseVocoder:
def spec2wav(self, mel):
"""
:param mel: [T, 80]
:return: wav: [T']
"""
raise NotImplementedError
@staticmethod
def wav2spec(wav_fn):
"""
:param wav_fn: str
:return: wav, mel: [T, 80]
"""
raise NotImplementedError
|
tensorflow/python/kernel_tests/gradient_correctness_test.py | connectthefuture/tensorflow | 680 | 12617204 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.argmax_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class GradientCorrectnessTest(tf.test.TestCase):
def testMultipleOutputChainedGradients(self):
with self.test_session() as sess:
x = tf.constant(1.0, dtype=tf.float32)
yexp = tf.exp(x)
yexplog = tf.log(yexp)
grads = tf.gradients([yexp, yexplog], [x])
grad_vals = sess.run(grads)
exp1_plus_one = (1.0 + np.exp(1.0)).astype(np.float32)
# [dexp(x)/dx + d(log(exp(x)))/dx] @ x=1 == exp(1) + 1
self.assertAllClose(grad_vals[0], exp1_plus_one)
if __name__ == '__main__':
tf.test.main()
|
pecos/xmc/xtransformer/predict.py | xeisberg/pecos | 288 | 12617228 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import argparse
import logging
import os
from pecos.utils import cli, logging_util, smat_util, torch_util
from pecos.utils.featurization.text.preprocess import Preprocessor
from pecos.xmc import PostProcessor
from .model import XTransformer
LOGGER = logging.getLogger(__name__)
def parse_arguments():
"""Parse predicting arguments"""
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"-x",
"--feat-path",
type=str,
metavar="PATH",
help="Path to the instance feature matrix.",
)
parser.add_argument(
"-t",
"--text-path",
type=str,
required=True,
metavar="PATH",
help="Path to the instance text file.",
)
parser.add_argument(
"-m",
"--model-folder",
type=str,
required=True,
metavar="PATH",
help="Path to load x-transformer model.",
)
parser.add_argument(
"-o",
"--save-pred-path",
type=str,
required=True,
metavar="PATH",
help="The path where the model predictions will be written.",
)
# ======= Other parameters ========
parser.add_argument(
"--batch-size",
default=32,
type=int,
metavar="INT",
help="Batch size per GPU.",
)
parser.add_argument(
"--max-pred-chunk",
default=10 ** 7,
metavar="INT",
type=int,
help="Max number of instances to predict on at once, set to avoid OOM. Set to None to predict on all instances at once. Default 10^7",
)
parser.add_argument(
"--only-topk",
default=None,
type=int,
metavar="INT",
help="override the topk specified in the ranker (default None to disable overriding) ",
)
parser.add_argument(
"-b",
"--beam-size",
type=int,
default=None,
metavar="INT",
help="override the beam size specified in the ranker (default None to disable overriding)",
)
parser.add_argument(
"-pp",
"--post-processor",
type=str,
choices=PostProcessor.valid_list(),
default=None,
metavar="STR",
help="override the post processor specified in the ranker (default None to disable overriding)",
)
parser.add_argument(
"--use-gpu",
type=cli.str2bool,
metavar="[true/false]",
default=True,
help="if true, use CUDA if available. Default true",
)
parser.add_argument(
"--batch-gen-workers",
type=int,
metavar="INT",
default=4,
help="number of CPUs to use for batch generation",
)
parser.add_argument(
"--threads",
type=int,
default=-1,
metavar="THREADS",
help="number of threads to use for linear models(default -1 to denote all the CPUs)",
)
parser.add_argument(
"--seed", type=int, default=0, metavar="INT", help="random seed for initialization"
)
parser.add_argument(
"--verbose-level",
type=int,
choices=logging_util.log_levels.keys(),
default=1,
metavar="INT",
help=f"the verbose level, {', '.join([str(k) + ' for ' + logging.getLevelName(v) for k, v in logging_util.log_levels.items()])}, default 1",
)
return parser
def do_predict(args):
"""Predict with XTransformer and save the result.
Args:
args (argparse.Namespace): Command line arguments parsed by `parser.parse_args()`
"""
if os.path.isdir(args.save_pred_path):
args.save_pred_path = os.path.join(args.save_pred_path, "P.npz")
torch_util.set_seed(args.seed)
xtf = XTransformer.load(args.model_folder)
# load instance feature and text
if args.feat_path:
X_feat = smat_util.load_matrix(args.feat_path)
else:
X_feat = None
X_text = Preprocessor.load_data_from_file(args.text_path, label_text_path=None, text_pos=0)[
"corpus"
]
P_matrix = xtf.predict(
X_text,
X_feat=X_feat,
batch_size=args.batch_size,
batch_gen_workers=args.batch_gen_workers,
use_gpu=args.use_gpu,
beam_size=args.beam_size,
only_topk=args.only_topk,
post_processor=args.post_processor,
max_pred_chunk=args.max_pred_chunk,
threads=args.threads,
)
smat_util.save_matrix(args.save_pred_path, P_matrix)
if __name__ == "__main__":
parser = parse_arguments()
args = parser.parse_args()
logging_util.setup_logging_config(level=args.verbose_level)
do_predict(args)
|
maskrcnn_benchmark/data/datasets/flickr.py | microsoft/GLIP | 295 | 12617250 | import torch
import torchvision
import torch.utils.data as data
from maskrcnn_benchmark.data.datasets.modulated_coco import ModulatedDataset
class FlickrDataset(ModulatedDataset):
pass
|
4_efficientdet/lib/train.py | deepchatterjeevns/Monk_Object_Detection | 649 | 12617296 | import os
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from src.dataset import CocoDataset, Resizer, Normalizer, Augmenter, collater
from src.model import EfficientDet
from tensorboardX import SummaryWriter
import shutil
import numpy as np
from tqdm.autonotebook import tqdm
def get_args():
parser = argparse.ArgumentParser(
"EfficientDet: Scalable and Efficient Object Detection implementation by Signatrix GmbH")
parser.add_argument("--image_size", type=int, default=512, help="The common width and height for all images")
parser.add_argument("--batch_size", type=int, default=8, help="The number of images per batch")
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument('--alpha', type=float, default=0.25)
parser.add_argument('--gamma', type=float, default=1.5)
parser.add_argument("--num_epochs", type=int, default=500)
parser.add_argument("--test_interval", type=int, default=1, help="Number of epoches between testing phases")
parser.add_argument("--es_min_delta", type=float, default=0.0,
help="Early stopping's parameter: minimum change loss to qualify as an improvement")
parser.add_argument("--es_patience", type=int, default=0,
help="Early stopping's parameter: number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique.")
parser.add_argument("--data_path", type=str, default="data/COCO", help="the root folder of dataset")
parser.add_argument("--log_path", type=str, default="tensorboard/signatrix_efficientdet_coco")
parser.add_argument("--saved_path", type=str, default="trained_models")
args = parser.parse_args()
return args
def train(opt):
num_gpus = 1
if torch.cuda.is_available():
num_gpus = torch.cuda.device_count()
torch.cuda.manual_seed(123)
else:
torch.manual_seed(123)
training_params = {"batch_size": opt.batch_size * num_gpus,
"shuffle": True,
"drop_last": True,
"collate_fn": collater,
"num_workers": 12}
test_params = {"batch_size": opt.batch_size,
"shuffle": False,
"drop_last": False,
"collate_fn": collater,
"num_workers": 12}
training_set = CocoDataset(root_dir=opt.data_path, set="train2017",
transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))
training_generator = DataLoader(training_set, **training_params)
test_set = CocoDataset(root_dir=opt.data_path, set="val2017",
transform=transforms.Compose([Normalizer(), Resizer()]))
test_generator = DataLoader(test_set, **test_params)
model = EfficientDet(num_classes=training_set.num_classes())
if os.path.isdir(opt.log_path):
shutil.rmtree(opt.log_path)
os.makedirs(opt.log_path)
if not os.path.isdir(opt.saved_path):
os.makedirs(opt.saved_path)
writer = SummaryWriter(opt.log_path)
if torch.cuda.is_available():
model = model.cuda()
model = nn.DataParallel(model)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)
best_loss = 1e5
best_epoch = 0
model.train()
num_iter_per_epoch = len(training_generator)
for epoch in range(opt.num_epochs):
model.train()
# if torch.cuda.is_available():
# model.module.freeze_bn()
# else:
# model.freeze_bn()
epoch_loss = []
progress_bar = tqdm(training_generator)
for iter, data in enumerate(progress_bar):
try:
optimizer.zero_grad()
if torch.cuda.is_available():
cls_loss, reg_loss = model([data['img'].cuda().float(), data['annot'].cuda()])
else:
cls_loss, reg_loss = model([data['img'].float(), data['annot']])
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
loss = cls_loss + reg_loss
if loss == 0:
continue
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
epoch_loss.append(float(loss))
total_loss = np.mean(epoch_loss)
progress_bar.set_description(
'Epoch: {}/{}. Iteration: {}/{}. Cls loss: {:.5f}. Reg loss: {:.5f}. Batch loss: {:.5f} Total loss: {:.5f}'.format(
epoch + 1, opt.num_epochs, iter + 1, num_iter_per_epoch, cls_loss, reg_loss, loss,
total_loss))
writer.add_scalar('Train/Total_loss', total_loss, epoch * num_iter_per_epoch + iter)
writer.add_scalar('Train/Regression_loss', reg_loss, epoch * num_iter_per_epoch + iter)
writer.add_scalar('Train/Classfication_loss (focal loss)', cls_loss, epoch * num_iter_per_epoch + iter)
except Exception as e:
print(e)
continue
scheduler.step(np.mean(epoch_loss))
if epoch % opt.test_interval == 0:
model.eval()
loss_regression_ls = []
loss_classification_ls = []
for iter, data in enumerate(test_generator):
with torch.no_grad():
if torch.cuda.is_available():
cls_loss, reg_loss = model([data['img'].cuda().float(), data['annot'].cuda()])
else:
cls_loss, reg_loss = model([data['img'].float(), data['annot']])
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
loss_classification_ls.append(float(cls_loss))
loss_regression_ls.append(float(reg_loss))
cls_loss = np.mean(loss_classification_ls)
reg_loss = np.mean(loss_regression_ls)
loss = cls_loss + reg_loss
print(
'Epoch: {}/{}. Classification loss: {:1.5f}. Regression loss: {:1.5f}. Total loss: {:1.5f}'.format(
epoch + 1, opt.num_epochs, cls_loss, reg_loss,
np.mean(loss)))
writer.add_scalar('Test/Total_loss', loss, epoch)
writer.add_scalar('Test/Regression_loss', reg_loss, epoch)
writer.add_scalar('Test/Classfication_loss (focal loss)', cls_loss, epoch)
if loss + opt.es_min_delta < best_loss:
best_loss = loss
best_epoch = epoch
torch.save(model, os.path.join(opt.saved_path, "signatrix_efficientdet_coco.pth"))
dummy_input = torch.rand(opt.batch_size, 3, 512, 512)
if torch.cuda.is_available():
dummy_input = dummy_input.cuda()
if isinstance(model, nn.DataParallel):
model.module.backbone_net.model.set_swish(memory_efficient=False)
torch.onnx.export(model.module, dummy_input,
os.path.join(opt.saved_path, "signatrix_efficientdet_coco.onnx"),
verbose=False)
model.module.backbone_net.model.set_swish(memory_efficient=True)
else:
model.backbone_net.model.set_swish(memory_efficient=False)
torch.onnx.export(model, dummy_input,
os.path.join(opt.saved_path, "signatrix_efficientdet_coco.onnx"),
verbose=False)
model.backbone_net.model.set_swish(memory_efficient=True)
# Early stopping
if epoch - best_epoch > opt.es_patience > 0:
print("Stop training at epoch {}. The lowest loss achieved is {}".format(epoch, loss))
break
writer.close()
if __name__ == "__main__":
opt = get_args()
train(opt)
|
pdf2txt.py | anandurchandran007/Automated-Resume-Screening-System | 304 | 12617297 | import sys
import logging
import six
import pdfminer.settings
pdfminer.settings.STRICT = False
import pdfminer.high_level
import pdfminer.layout
from pdfminer.image import ImageWriter
def extract_text(files=[], outfile='-',
_py2_no_more_posargs=None,
no_laparams=False, all_texts=None, detect_vertical=None,
word_margin=None, char_margin=None, line_margin=None, boxes_flow=None,
output_type='text', codec='utf-8', strip_control=False,
maxpages=0, page_numbers=None, password="", scale=1.0, rotation=0,
layoutmode='normal', output_dir=None, debug=False,
disable_caching=False, **other):
if _py2_no_more_posargs is not None:
raise ValueError("Many args")
if not files:
raise ValueError("Enter Filename")
if not no_laparams:
laparams = pdfminer.layout.LAParams()
for param in ("all_texts", "detect_vertical", "word_margin", "char_margin", "line_margin", "boxes_flow"):
paramv = locals().get(param, None)
if paramv is not None:
setattr(laparams, param, paramv)
else:
laparams = None
imagewriter = None
if output_dir:
imagewriter = ImageWriter(output_dir)
if output_type == "text" and outfile != "-":
for override, alttype in ( (".htm", "html"),(".html", "html"),(".xml", "xml"),(".tag", "tag") ):
if outfile.endswith(override):
output_type = alttype
if outfile == "-":
outfp = sys.stdout
if outfp.encoding is not None:
codec = 'utf-8'
else:
outfp = open(outfile, "wb")
for fname in files:
with open(fname, "rb") as fp:
pdfminer.high_level.extract_text_to_fp(fp, **locals())
fp.close()
return outfp
def main(args=None):
import argparse
A = P.parse_args(args=args)
if A.page_numbers:
A.page_numbers = set([x-1 for x in A.page_numbers])
if A.pagenos:
A.page_numbers = set([int(x)-1 for x in A.pagenos.split(",")])
imagewriter = None
if A.output_dir:
imagewriter = ImageWriter(A.output_dir)
if six.PY2 and sys.stdin.encoding:
A.password = A.password.decode(sys.stdin.encoding)
if A.output_type == "text" and A.outfile != "-":
for override, alttype in ( (".htm", "html"),(".html", "html"),(".xml", "xml" ),(".tag", "tag" ) ):
if A.outfile.endswith(override):
A.output_type = alttype
if A.outfile == "-":
outfp = sys.stdout
if outfp.encoding is not None:
A.codec = 'utf-8'
else:
outfp = open(A.outfile, "wb")
outfp = extract_text(**vars(A))
outfp.close()
return 0
if __name__ == '__main__': sys.exit(main())
|
mobile_deployment/pytorch/InvBlock/models/imagenet/i2rnetv2.py | zhoudaquan/rethinking_bottleneck_structure_code_release | 153 | 12617301 | """
Creates a MobileNetV2 Model as defined in:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>. (2018).
MobileNetV2: Inverted Residuals and Linear Bottlenecks
arXiv preprint arXiv:1801.04381.
import from https://github.com/tonylins/pytorch-mobilenet-v2
"""
import torch.nn as nn
import math
__all__ = ['i2rnetv2',]
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def conv_3x3_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def group_conv_1x1_bn(inp, oup, expand_ratio):
hidden_dim = oup // expand_ratio
return nn.Sequential(
nn.Conv2d(inp, hidden_dim, 1, 1, 0, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def BlockTransition(inp, oup, stride=1, relu=True):
if stride == 2:
conv = nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True),
nn.Conv2d(oup, oup, 3, stride, 1, groups=oup, bias=False),
nn.BatchNorm2d(oup),
#nn.ReLU6(inplace=True)
)
else:
if relu:
conv = nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
else:
conv = nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup)
)
return conv
class I2RBlock(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, transition=False):
super(I2RBlock, self).__init__()
assert stride in [1, 2]
hidden_dim = inp // expand_ratio
#self.relu = nn.ReLU6(inplace=True)
self.identity = False
self.expand_ratio = expand_ratio
if expand_ratio == 2:
self.conv = nn.Sequential(
# dw
nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(oup, oup, 3, stride, 1, groups=oup, bias=False),
nn.BatchNorm2d(oup),
)
elif inp != oup and stride == 1 or transition == True:
hidden_dim = oup // expand_ratio
self.conv = nn.Sequential(
# pw-linear
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True),
)
elif inp != oup and stride == 2:
hidden_dim = oup // expand_ratio
self.conv = nn.Sequential(
# pw-linear
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(oup, oup, 3, stride, 1, groups=oup, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.identity = True
self.conv = nn.Sequential(
# dw
nn.Conv2d(inp, inp, 3, 1, 1, groups=oup, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU6(inplace=True),
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
#nn.ReLU6(inplace=True),
# pw
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(oup, oup, 3, 1, 1, groups=oup, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
out = self.conv(x)
if self.identity:
return out + x
else:
return out
class I2RNet(nn.Module):
def __init__(self, num_classes=1000, width_mult=1.):
super(I2RNet, self).__init__()
# setting of inverted residual blocks
self.cfgs = [
# t, c, n, s
[2, 96, 1, 2],
[4, 96, 2, 1],
[4, 128, 1, 1],
[4, 128, 2, 2],
[4, 256, 1, 1],
[4, 256, 2, 2],
[4, 384, 4, 1],
[4, 640, 1, 1],
[4, 640, 2, 2],
[4,1280, 2, 1],
]
#self.cfgs = [
# # t, c, n, s
# [1, 16, 1, 1],
# [4, 24, 2, 2],
# [4, 32, 3, 2],
# [4, 64, 3, 2],
# [4, 96, 4, 1],
# [4, 160, 3, 2],
# [4, 320, 1, 1],
#]
# building first layer
input_channel = _make_divisible(32 * width_mult, 4 if width_mult == 0.1 else 8)
layers = [conv_3x3_bn(3, input_channel, 2)]
# building inverted residual blocks
block = I2RBlock
for t, c, n, s in self.cfgs:
output_channel = _make_divisible(c * width_mult, 4 if width_mult == 0.1 else 8)
layers.append(block(input_channel, output_channel, s, t))
input_channel = output_channel
for i in range(n-1):
layers.append(block(input_channel, output_channel, 1, t))
input_channel = output_channel
self.features = nn.Sequential(*layers)
# building last several layers
input_channel = output_channel
output_channel = _make_divisible(input_channel, 4) # if width_mult == 0.1 else 8) if width_mult > 1.0 else input_channel
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(output_channel, num_classes)
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
#x = self.conv(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class I2RNetV2(nn.Module):
def __init__(self, num_classes=1000, width_mult=1.):
super(I2RNetV2, self).__init__()
# setting of inverted residual blocks
self.cfgs = [
# t, c, n, s
[2, 96, 1, 2, 0],
[4, 96, 1, 1, 0],
[4, 128, 3, 2, 0],
[4, 256, 2, 2, 0],
[4, 384, 2, 1, 0],
[4, 384, 2, 1, 1],
[4, 640, 2, 2, 0],
[4,1280, 2, 1, 0],
]
#self.cfgs = [
# # t, c, n, s
# [1, 16, 1, 1],
# [4, 24, 2, 2],
# [4, 32, 3, 2],
# [4, 64, 3, 2],
# [4, 96, 4, 1],
# [4, 160, 3, 2],
# [4, 320, 1, 1],
#]
# building first layer
input_channel = _make_divisible(32 * width_mult, 4 if width_mult == 0.1 else 8)
layers = [conv_3x3_bn(3, input_channel, 2)]
# building inverted residual blocks
block = I2RBlock
for t, c, n, s, b in self.cfgs:
output_channel = _make_divisible(c * width_mult, 4 if width_mult == 0.1 else 8)
layers.append(block(input_channel, output_channel, s, t, b == 1))
input_channel = output_channel
for i in range(n-1):
layers.append(block(input_channel, output_channel, 1, t))
input_channel = output_channel
self.features = nn.Sequential(*layers)
# building last several layers
input_channel = output_channel
output_channel = _make_divisible(input_channel, 4) # if width_mult == 0.1 else 8) if width_mult > 1.0 else input_channel
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(output_channel, num_classes)
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
#x = self.conv(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def i2rnet(**kwargs):
"""
Constructs a MobileNet V2 model
"""
return I2RNet(**kwargs)
def i2rnetv2(**kwargs):
"""
Constructs a MobileNet V2 model
"""
return I2RNetV2(**kwargs)
|
QSTK/qstkfeat/features.py | paulopatto/QuantSoftwareToolkit | 339 | 12617305 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Nov 7, 2011
@author: <NAME>
@contact: <EMAIL>
@summary: File containing various feature functions
'''
#''' Python imports '''
import random
#''' 3rd Party Imports '''
import pandas as pand
import numpy as np
import datetime as dt
#''' QSTK Imports '''
import QSTK.qstkutil.tsutil as tsu
from QSTK.qstkutil import DataAccess as da
import QSTK.qstkutil.qsdateutil as du
def featMomentum(dData, lLookback=20, b_human=False ):
'''
@summary: N day cumulative return (based on 1) indicator
@param dData: Dictionary of data to use
@param lLookback: Number of days to look in the past
@param b_human: if true return dataframe to plot
@return: DataFrame array containing values
'''
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
dfPrice = dData['close'].copy()
#Calculate Returns
tsu.returnize0(dfPrice.values)
#Calculate rolling sum
dfRet = pand.rolling_sum(dfPrice, lLookback)
return dfRet
def featHiLow(dData, lLookback=20, b_human=False ):
'''
@summary: 1 represents a high for the lookback -1 represents a low
@param dData: Dictionary of data to use
@param lLookback: Number of days to look in the past
@param b_human: if true return dataframe to plot
@return: DataFrame array containing values
'''
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
dfPrice = dData['close']
#Find Max for each price for lookback
maxes = pand.rolling_max(dfPrice, lLookback, 1)
#Find Min
mins = pand.rolling_min(dfPrice, lLookback, 1)
#Find Range
ranges = maxes - mins
#Calculate (price - min) * 2 / range -1
dfRet = (((dfPrice-mins)*2)/ranges)-1
return dfRet
def featDate(dData, b_human=False ):
'''
@summary: Returns -1 for jan 1st 1 for dec 31st
@param dData: Dictionary of data to use
@param lLookback: Number of days to look in the past
@param b_human: if true return dataframe to plot
@return: DataFrame array containing values
'''
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
dfPrice = dData['close']
dfRet = pand.DataFrame( index=dfPrice.index, columns=dfPrice.columns, data=np.zeros(dfPrice.shape) )
for sStock in dfPrice.columns:
tsPrice = dfPrice[sStock]
tsRet = dfRet[sStock]
#'' Loop over time '''
for i in range(len(tsPrice.index)):
#get current date
today = tsPrice.index[i]
#get days since January 1st
days = today - dt.datetime(today.year, 1, 1)
# multiply by 2, divide by 365, subtract 1
tsRet[i] = float(days.days * 2) / 365 - 1
return dfRet
def featOption(dData, b_human=False ):
'''
@summary: Returns 1 if option close is today, -1 if it was yesterday
@param dData: Dictionary of data to use
@param lLookback: Number of days to look in the past
@param b_human: if true return dataframe to plot
@return: DataFrame array containing values
'''
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
dfPrice = dData['close']
dfRet = pand.DataFrame( index=dfPrice.index, columns=dfPrice.columns, data=np.zeros(dfPrice.shape) )
for sStock in dfPrice.columns:
tsPrice = dfPrice[sStock]
tsRet = dfRet[sStock]
#'' Loop over time '''
for i in range(len(tsPrice.index)):
#get current date
today = tsPrice.index[i]
#get last option close
last_close = du.getLastOptionClose(today, tsPrice.index)
#get next option close
next_close = du.getNextOptionClose(today, tsPrice.index)
#get days between
days_between = next_close - last_close
#get days since last close
days = today - last_close
# multiply by 2, divide by 365, subtract 1
tsRet[i] = float(days.days * 2) / days_between.days - 1
return dfRet
def featMA( dData, lLookback=30, bRel=True, b_human=False ):
'''
@summary: Calculate moving average
@param dData: Dictionary of data to use
@param lLookback: Number of days to look in the past
@param b_human: if true return dataframe to plot
@return: DataFrame array containing values
'''
dfPrice = dData['close']
dfRet = pand.rolling_mean(dfPrice, lLookback)
if bRel:
dfRet = dfRet / dfPrice
if b_human:
data2 = dfRet * dData['close']
data3 = pand.DataFrame({"Raw":data2[data2.columns[0]]})
for sym in dfRet.columns:
if sym != '$SPX' and sym != '$VIX':
data3[sym + " Moving Average"] = data2[sym]
data3[sym] = dData['close'][sym]
del data3['Raw']
return data3
return dfRet
def featEMA( dData, lLookback=20, bRel=True, b_human=False ):
'''
@summary: Calculate exponential moving average
@param dData: Dictionary of data to use
@param lLookback: Number of days to look in the past
@param b_human: if true return dataframe to plot
@return: DataFrame array containing values
'''
dfPrice = dData['close']
dfRet = pand.ewma(dfPrice, span=lLookback)
if bRel:
dfRet = dfRet / dfPrice;
if b_human:
data2 = dfRet*dData['close']
data3 = pand.DataFrame({"Raw":data2[data2.columns[0]]})
for sym in dfRet.columns:
if sym != '$SPX' and sym != '$VIX':
data3[sym + " Moving Average"] = data2[sym]
data3[sym] = dData['close'][sym]
del data3['Raw']
return data3
return dfRet
def featSTD( dData, lLookback=20, bRel=True, b_human=False ):
'''
@summary: Calculate standard deviation
@param dData: Dictionary of data to use
@param lLookback: Number of days to look in the past
@param b_human: if true return dataframe to plot
@return: DataFrame array containing values
'''
dfPrice = dData['close'].copy()
tsu.returnize1(dfPrice.values)
dfRet = pand.rolling_std(dfPrice, lLookback)
if bRel:
dfRet = dfRet / dfPrice
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
return dfRet
def featRSI( dData, lLookback=14, b_human=False):
'''
@summary: Calculate RSI
@param dData: Dictionary of data to use
@param lLookback: Number of days to look in the past, 14 is standard
@param b_human: if true return dataframe to plot
@return: DataFrame array containing values
'''
# create deltas per day
dfDelta = dData['close'].copy()
dfDelta.ix[1:,:] -= dfDelta.ix[:-1,:].values
dfDelta.ix[0,:] = np.NAN
dfDeltaUp = dfDelta
dfDeltaDown = dfDelta.copy()
# seperate data into positive and negative for easy calculations
for sColumn in dfDeltaUp.columns:
tsColDown = dfDeltaDown[sColumn]
tsColDown[tsColDown >= 0] = 0
tsColUp = dfDeltaUp[sColumn]
tsColUp[tsColUp <= 0] = 0
# Note we take abs() of negative values, all should be positive now
dfRolUp = pand.rolling_mean(dfDeltaUp, lLookback, min_periods=1)
dfRolDown = pand.rolling_mean(dfDeltaDown, lLookback, min_periods=1).abs()
# relative strength
dfRS = dfRolUp / dfRolDown
dfRSI = 100.0 - (100.0 / (1.0 + dfRS))
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
return dfRSI
def featDrawDown( dData, lLookback=30, b_human=False):
'''
@summary: Calculate Drawdown for the stock
@param dData: Dictionary of data to use
@param lLookback: Days to look back
@return: DataFrame array containing values
@param b_human: if true return dataframe to plot
@warning: Drawdown and RunUp can depend heavily on sample period
'''
dfPrice = dData['close']
#''' Feature DataFrame will be 1:1, we can use the price as a template '''
dfRet = pand.DataFrame( index=dfPrice.index, columns=dfPrice.columns, data=np.zeros(dfPrice.shape) )
dfMax = pand.rolling_max(dfPrice, lLookback)
return (dfMax - dfPrice) / dfMax;
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
return dfRet
def featRunUp( dData, lLookback=30, b_human=False ):
'''
@summary: CalculateRunup for the stock
@param dData: Dictionary of data to use
@param lLookback: Number of days to calculate min over
@return: DataFrame array containing feature values
@param b_human: if true return dataframe to plot
@warning: Drawdown and RunUp can depend heavily on when the sample starts
'''
dfPrice = dData['close']
dfMax = pand.rolling_min(dfPrice, lLookback)
return dfPrice / dfMax;
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
return dfRet
def featVolumeDelta( dData, lLookback=30, b_human=False ):
'''
@summary: Calculate moving average
@param dData: Dictionary of data to use
@param lLookback: Number of days to use for MA
@param b_human: if true return dataframe to plot
@return: DataFrame array containing values
'''
dfVolume = dData['volume']
dfRet = pand.rolling_mean(dfVolume, lLookback)
dfRet /= dfVolume
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
return dfRet
def featAroon( dData, bDown=False, lLookback=25, b_human=False ):
'''
@summary: Calculate Aroon - indicator indicating days since a 25-day
high/low, weighted between 0 and 100
@param dData: Dictionary of data to use
@param bDown: If false, calculates aroonUp (high), else aroonDown (lows)
@param lLookback: Days to lookback to calculate high/low from
@param b_human: if true return dataframe to plot
@return: DataFrame array containing feature values
'''
dfPrice = dData['close']
#Feature DataFrame will be 1:1, we can use the price as a template
dfRet = pand.DataFrame( index=dfPrice.index, columns=dfPrice.columns,
data=np.zeros(dfPrice.shape) )
#Loop through time
for i in range(dfPrice.shape[0]):
if( (i-lLookback) < 0 ):
dfRet.ix[i,:] = np.NAN
else:
if bDown:
dfRet.ix[i,:] = dfPrice.values[i:(i-lLookback):-1,:].argmin(
axis=0)
else:
dfRet.ix[i,:] = dfPrice.values[i:(i-lLookback):-1,:].argmax(
axis=0)
dfRet = ((lLookback - 1.) - dfRet) / (lLookback - 1.) * 100.
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
return dfRet
def featAroonDown( dData, lLookback=25, b_human=False ):
'''
@summary: Wrapper to call aroon with flag = true
'''
return featAroon(dData, bDown=True, lLookback=lLookback, b_human=b_human)
def featStochastic( dData, lLookback=14, bFast=True, lMA=3, b_human=False ):
'''
@summary: Calculate stochastic oscillator - indicates what range of recent low-high spread we are in.
@param dData: Dictionary of data to use
@param bFast: If false, do slow stochastics, 3 day MA, if not use fast, no MA
@param b_human: if true return dataframe to plot
@return: DataFrame array containing feature values
'''
dfLow = dData['low']
dfHigh = dData['high']
dfPrice = dData['close']
#''' Loop through stocks '''
dfLows = pand.rolling_min(dfLow, lLookback)
dfHighs = pand.rolling_max(dfHigh, lLookback)
dfStoch = (dfPrice - dfLows) / (dfHighs - dfLows)
#''' For fast we just take the stochastic value, slow we need 3 day MA '''
if not bFast:
dfStoch = pand.rolling_mean(dfStoch, lMA)
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
return dfStoch
def featBeta( dData, lLookback=14, sMarket='$SPX', b_human=False ):
'''
@summary: Calculate beta relative to a given stock/index.
@param dData: Dictionary of data to use
@param sStock: Stock to calculate beta relative to
@param b_human: if true return dataframe to plot
@return: DataFrame array containing feature values
'''
dfPrice = dData['close']
#''' Calculate returns '''
dfRets = dfPrice.copy()
tsu.returnize1(dfRets.values)
tsMarket = dfRets[sMarket]
dfRet = pand.rolling_cov(tsMarket, dfRets, lLookback)
dfRet /= dfRet[sMarket]
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
return dfRet
def featBollinger( dData, lLookback=20, b_human=False ):
'''
@summary: Calculate bollinger position as a function of std deviations.
@param dData: Dictionary of data to use
@param lLookback: Number of days to calculate moving average over
@param b_human: if true return dataframe to plot
@return: DataFrame array containing feature values
'''
if b_human:
dfPrice = dData['close']
nstdsRet = pand.DataFrame( index=dfPrice.index, columns=dfPrice.columns, data=np.zeros(dfPrice.shape) )
#average minus standard deviation
pstdsRet = pand.DataFrame( index=dfPrice.index, columns=dfPrice.columns, data=np.zeros(dfPrice.shape) )
data3 = pand.DataFrame({"Raw":dfPrice[dfPrice.columns[0]]})
for sym in dfPrice.columns:
if sym != '$SPX' and sym != '$VIX':
tsPrice = dfPrice[sym]
nstdRet = nstdsRet[sym]
pstdRet = pstdsRet[sym]
for i in range(len(tsPrice.index)):
if i < lLookback - 1:
nstdRet[i] = float('nan')
pstdRet[i] = float('nan')
continue
fAvg = np.average( tsPrice[ i-(lLookback-1):i+1 ] )
fStd = np.std( tsPrice[ i-(lLookback-1):i+1 ] )
pstdRet[i] = fAvg+2.0*fStd
nstdRet[i] = fAvg-2.0*fStd
data3[sym] = dfPrice[sym]
data3[sym + " Lower"] = nstdsRet[sym]
data3[sym + " Upper"] = pstdsRet[sym]
del data3['Raw']
return data3
else:
dfPrice = dData['close']
#''' Feature DataFrame will be 1:1, we can use the price as a template '''
dfRet = pand.DataFrame( index=dfPrice.index, columns=dfPrice.columns, data=np.zeros(dfPrice.shape) )
#''' Loop through stocks '''
dfAvg = pand.rolling_mean(dfPrice, lLookback)
dfStd = pand.rolling_std(dfPrice, lLookback)
return (dfPrice - dfAvg) / (2.0*dfStd)
def featCorrelation( dData, lLookback=20, sRel='$SPX', b_human=False ):
'''
@summary: Calculate correlation of two stocks.
@param dData: Dictionary of data to use
@param lLookback: Number of days to calculate moving average over
@param b_human: if true return dataframe to plot
@return: DataFrame array containing feature values
'''
dfPrice = dData['close']
if sRel not in dfPrice.columns:
raise KeyError( "%s not found in data provided to featCorrelation"%sRel )
#''' Calculate returns '''
naRets = dfPrice.values.copy()
tsu.returnize1(naRets)
dfHistReturns = pand.DataFrame( index=dfPrice.index, columns=dfPrice.columns, data=naRets )
#''' Feature DataFrame will be 1:1, we can use the price as a template '''
dfRet = pand.DataFrame( index=dfPrice.index, columns=dfPrice.columns, data=np.zeros(dfPrice.shape) )
#''' Loop through stocks '''
for sStock in dfHistReturns.columns:
tsHistReturns = dfHistReturns[sStock]
tsRelativeReturns = dfHistReturns[sRel]
tsRet = dfRet[sStock]
#''' Loop over time '''
for i in range(len(tsHistReturns.index)):
#''' NaN if not enough data to do lookback '''
if i < lLookback - 1:
tsRet[i] = float('nan')
continue
naCorr = np.corrcoef( tsHistReturns[ i-(lLookback-1):i+1 ], tsRelativeReturns[ i-(lLookback-1):i+1 ] )
tsRet[i] = naCorr[0,1]
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
return dfRet
def featPrice(dData, b_human=False):
'''
@summary: Price feature
@param dData: Dictionary of data to use
@param b_human: if true return dataframe to plot
@return: DataFrame array containing values
'''
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
return dData['close']
def featVolume(dData, b_human=False):
'''
@summary: Volume feature
@param dData: Dictionary of data to use
@param b_human: if true return dataframe to plot
@return: DataFrame array containing values
'''
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
return dData['volume']
def featRand( dData, b_human=False ):
'''
@summary: Random feature - used for robustness testing
@param dData: Dictionary of data to use
@param b_human: if true return dataframe to plot
@return: DataFrame array containing values
'''
dfPrice = dData['close']
#''' Feature DataFrame will be 1:1, we can use the price as a template '''
dfRet = pand.DataFrame( index=dfPrice.index, columns=dfPrice.columns,
data=np.random.randn(*dfPrice.shape) )
if b_human:
for sym in dData['close']:
x=1000/dData['close'][sym][0]
dData['close'][sym]=dData['close'][sym]*x
return dData['close']
return dfRet
if __name__ == '__main__':
pass
|
image/docker/schema2/config.py | giuseppe/quay | 2,027 | 12617317 | """
Implements validation and conversion for the Schema2 config JSON.
Example:
{
"architecture": "amd64",
"config": {
"Hostname": "",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"HTTP_PROXY=http:\/\/localhost:8080",
"http_proxy=http:\/\/localhost:8080",
"PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin"
],
"Cmd": [
"sh"
],
"Image": "",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": {
}
},
"container": "b7a43694b435c8e9932615643f61f975a9213e453b15cd6c2a386f144a2d2de9",
"container_config": {
"Hostname": "b7a43694b435",
"Domainname": "",
"User": "",
"AttachStdin": true,
"AttachStdout": true,
"AttachStderr": true,
"Tty": true,
"OpenStdin": true,
"StdinOnce": true,
"Env": [
"HTTP_PROXY=http:\/\/localhost:8080",
"http_proxy=http:\/\/localhost:8080",
"PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin"
],
"Cmd": [
"sh"
],
"Image": "somenamespace\/somerepo",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": {
}
},
"created": "2018-04-16T10:41:19.079522722Z",
"docker_version": "17.09.0-ce",
"history": [
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "\/bin\/sh -c #(nop) ADD file:9e4ca21cbd24dc05b454b6be21c7c639216ae66559b21ba24af0d665c62620dc in \/ "
},
{
"created": "2018-04-03T18:37:09.613317719Z",
"created_by": "\/bin\/sh -c #(nop) CMD [\"sh\"]",
"empty_layer": true
},
{
"created": "2018-04-16T10:37:44.418262777Z",
"created_by": "sh"
},
{
"created": "2018-04-16T10:41:19.079522722Z",
"created_by": "sh"
}
],
"os": "linux",
"rootfs": {
"type": "layers",
"diff_ids": [
"sha256:3e596351c689c8827a3c9635bc1083cff17fa4a174f84f0584bd0ae6f384195b",
"sha256:4552be273c71275a88de0b8c8853dcac18cb74d5790f5383d9b38d4ac55062d5",
"sha256:1319c76152ca37fbeb7fb71e0ffa7239bc19ffbe3b95c00417ece39d89d06e6e"
]
}
}
"""
import copy
import json
import hashlib
from collections import namedtuple
from jsonschema import validate as validate_schema, ValidationError
from dateutil.parser import parse as parse_date
from digest import digest_tools
from image.shared import ManifestException
from util.bytes import Bytes
DOCKER_SCHEMA2_CONFIG_HISTORY_KEY = "history"
DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY = "rootfs"
DOCKER_SCHEMA2_CONFIG_CREATED_KEY = "created"
DOCKER_SCHEMA2_CONFIG_CREATED_BY_KEY = "created_by"
DOCKER_SCHEMA2_CONFIG_COMMENT_KEY = "comment"
DOCKER_SCHEMA2_CONFIG_AUTHOR_KEY = "author"
DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY = "empty_layer"
DOCKER_SCHEMA2_CONFIG_TYPE_KEY = "type"
LayerHistory = namedtuple(
"LayerHistory",
["created", "created_datetime", "command", "is_empty", "author", "comment", "raw_entry"],
)
class MalformedSchema2Config(ManifestException):
"""
Raised when a config fails an assertion that should be true according to the Docker Manifest
v2.2 Config Specification.
"""
pass
class DockerSchema2Config(object):
METASCHEMA = {
"type": "object",
"description": "The container configuration found in a schema 2 manifest",
"required": [DOCKER_SCHEMA2_CONFIG_HISTORY_KEY, DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY],
"properties": {
DOCKER_SCHEMA2_CONFIG_HISTORY_KEY: {
"type": "array",
"description": "The history used to create the container image",
"items": {
"type": "object",
"properties": {
DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY: {
"type": "boolean",
"description": "If present, this layer is empty",
},
DOCKER_SCHEMA2_CONFIG_CREATED_KEY: {
"type": "string",
"description": "The date/time that the layer was created",
"format": "date-time",
"x-example": "2018-04-03T18:37:09.284840891Z",
},
DOCKER_SCHEMA2_CONFIG_CREATED_BY_KEY: {
"type": "string",
"description": "The command used to create the layer",
"x-example": "\/bin\/sh -c #(nop) ADD file:somesha in /",
},
DOCKER_SCHEMA2_CONFIG_COMMENT_KEY: {
"type": "string",
"description": "Comment describing the layer",
},
DOCKER_SCHEMA2_CONFIG_AUTHOR_KEY: {
"type": "string",
"description": "The author of the layer",
},
},
"additionalProperties": True,
},
},
DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY: {
"type": "object",
"description": "Describes the root filesystem for this image",
"properties": {
DOCKER_SCHEMA2_CONFIG_TYPE_KEY: {
"type": "string",
"description": "The type of the root file system entries",
},
},
"required": [DOCKER_SCHEMA2_CONFIG_TYPE_KEY],
"additionalProperties": True,
},
},
"additionalProperties": True,
}
def __init__(self, config_bytes, skip_validation_for_testing=False):
assert isinstance(config_bytes, Bytes)
self._config_bytes = config_bytes
try:
self._parsed = json.loads(config_bytes.as_unicode())
except ValueError as ve:
raise MalformedSchema2Config("malformed config data: %s" % ve)
if not skip_validation_for_testing:
try:
validate_schema(self._parsed, DockerSchema2Config.METASCHEMA)
except ValidationError as ve:
raise MalformedSchema2Config("config data does not match schema: %s" % ve)
@property
def digest(self):
"""
Returns the digest of this config object.
"""
return digest_tools.sha256_digest(self._config_bytes.as_encoded_str())
@property
def size(self):
"""
Returns the size of this config object.
"""
return len(self._config_bytes.as_encoded_str())
@property
def bytes(self):
"""
Returns the bytes of this config object.
"""
return self._config_bytes
@property
def labels(self):
"""
Returns a dictionary of all the labels defined in this configuration.
"""
return self._parsed.get("config", {}).get("Labels", {}) or {}
@property
def has_empty_layer(self):
"""
Returns whether this config contains an empty layer.
"""
for history_entry in self._parsed[DOCKER_SCHEMA2_CONFIG_HISTORY_KEY]:
if history_entry.get(DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY, False):
return True
return False
@property
def history(self):
"""
Returns the history of the image, started at the base layer.
"""
for history_entry in self._parsed[DOCKER_SCHEMA2_CONFIG_HISTORY_KEY]:
created_datetime_str = history_entry.get(DOCKER_SCHEMA2_CONFIG_CREATED_KEY)
created_datetime = parse_date(created_datetime_str) if created_datetime_str else None
yield LayerHistory(
created_datetime=created_datetime,
created=history_entry.get(DOCKER_SCHEMA2_CONFIG_CREATED_KEY),
command=history_entry.get(DOCKER_SCHEMA2_CONFIG_CREATED_BY_KEY),
author=history_entry.get(DOCKER_SCHEMA2_CONFIG_AUTHOR_KEY),
comment=history_entry.get(DOCKER_SCHEMA2_CONFIG_COMMENT_KEY),
is_empty=history_entry.get(DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY, False),
raw_entry=history_entry,
)
def build_v1_compatibility(self, history, v1_id, v1_parent_id, is_leaf, compressed_size=None):
"""
Builds the V1 compatibility block for the given layer.
"""
# If the layer is the leaf, it gets the full config (minus 2 fields). Otherwise, it gets only
# IDs.
v1_compatibility = copy.deepcopy(self._parsed) if is_leaf else {}
v1_compatibility["id"] = v1_id
if v1_parent_id is not None:
v1_compatibility["parent"] = v1_parent_id
if "created" not in v1_compatibility and history.created:
v1_compatibility["created"] = history.created
if "author" not in v1_compatibility and history.author:
v1_compatibility["author"] = history.author
if "comment" not in v1_compatibility and history.comment:
v1_compatibility["comment"] = history.comment
if "throwaway" not in v1_compatibility and history.is_empty:
v1_compatibility["throwaway"] = True
if "container_config" not in v1_compatibility:
v1_compatibility["container_config"] = {
"Cmd": [history.command],
}
if compressed_size is not None:
v1_compatibility["Size"] = compressed_size
# The history and rootfs keys are schema2-config specific.
v1_compatibility.pop(DOCKER_SCHEMA2_CONFIG_HISTORY_KEY, None)
v1_compatibility.pop(DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY, None)
return v1_compatibility
|
src/main/python/systemds/operator/algorithm/__init__.py | mdbloice/systemds | 372 | 12617320 | <reponame>mdbloice/systemds
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
from .builtin.WoE import WoE
from .builtin.WoEApply import WoEApply
from .builtin.abstain import abstain
from .builtin.als import als
from .builtin.alsCG import alsCG
from .builtin.alsDS import alsDS
from .builtin.alsPredict import alsPredict
from .builtin.alsTopkPredict import alsTopkPredict
from .builtin.apply_pipeline import apply_pipeline
from .builtin.arima import arima
from .builtin.autoencoder_2layer import autoencoder_2layer
from .builtin.bandit import bandit
from .builtin.bivar import bivar
from .builtin.components import components
from .builtin.confusionMatrix import confusionMatrix
from .builtin.cor import cor
from .builtin.correctTypos import correctTypos
from .builtin.correctTyposApply import correctTyposApply
from .builtin.cox import cox
from .builtin.cspline import cspline
from .builtin.csplineCG import csplineCG
from .builtin.csplineDS import csplineDS
from .builtin.cvlm import cvlm
from .builtin.dbscan import dbscan
from .builtin.dbscanApply import dbscanApply
from .builtin.decisionTree import decisionTree
from .builtin.decisionTreePredict import decisionTreePredict
from .builtin.deepWalk import deepWalk
from .builtin.denialConstraints import denialConstraints
from .builtin.discoverFD import discoverFD
from .builtin.dist import dist
from .builtin.dmv import dmv
from .builtin.ema import ema
from .builtin.executePipeline import executePipeline
from .builtin.ffPredict import ffPredict
from .builtin.ffTrain import ffTrain
from .builtin.fit_pipeline import fit_pipeline
from .builtin.fixInvalidLengths import fixInvalidLengths
from .builtin.fixInvalidLengthsApply import fixInvalidLengthsApply
from .builtin.frameSort import frameSort
from .builtin.frequencyEncode import frequencyEncode
from .builtin.frequencyEncodeApply import frequencyEncodeApply
from .builtin.garch import garch
from .builtin.gaussianClassifier import gaussianClassifier
from .builtin.getAccuracy import getAccuracy
from .builtin.glm import glm
from .builtin.glmPredict import glmPredict
from .builtin.gmm import gmm
from .builtin.gmmPredict import gmmPredict
from .builtin.gnmf import gnmf
from .builtin.gridSearch import gridSearch
from .builtin.hospitalResidencyMatch import hospitalResidencyMatch
from .builtin.hyperband import hyperband
from .builtin.img_brightness import img_brightness
from .builtin.img_crop import img_crop
from .builtin.img_cutout import img_cutout
from .builtin.img_invert import img_invert
from .builtin.img_mirror import img_mirror
from .builtin.img_posterize import img_posterize
from .builtin.img_rotate import img_rotate
from .builtin.img_sample_pairing import img_sample_pairing
from .builtin.img_shear import img_shear
from .builtin.img_transform import img_transform
from .builtin.img_translate import img_translate
from .builtin.impurityMeasures import impurityMeasures
from .builtin.imputeByFD import imputeByFD
from .builtin.imputeByFDApply import imputeByFDApply
from .builtin.imputeByMean import imputeByMean
from .builtin.imputeByMeanApply import imputeByMeanApply
from .builtin.imputeByMedian import imputeByMedian
from .builtin.imputeByMedianApply import imputeByMedianApply
from .builtin.imputeByMode import imputeByMode
from .builtin.imputeByModeApply import imputeByModeApply
from .builtin.intersect import intersect
from .builtin.km import km
from .builtin.kmeans import kmeans
from .builtin.kmeansPredict import kmeansPredict
from .builtin.knn import knn
from .builtin.knnGraph import knnGraph
from .builtin.knnbf import knnbf
from .builtin.l2svm import l2svm
from .builtin.l2svmPredict import l2svmPredict
from .builtin.lasso import lasso
from .builtin.lenetPredict import lenetPredict
from .builtin.lenetTrain import lenetTrain
from .builtin.lm import lm
from .builtin.lmCG import lmCG
from .builtin.lmDS import lmDS
from .builtin.lmPredict import lmPredict
from .builtin.logSumExp import logSumExp
from .builtin.matrixProfile import matrixProfile
from .builtin.mcc import mcc
from .builtin.mdedup import mdedup
from .builtin.mice import mice
from .builtin.miceApply import miceApply
from .builtin.msvm import msvm
from .builtin.msvmPredict import msvmPredict
from .builtin.multiLogReg import multiLogReg
from .builtin.multiLogRegPredict import multiLogRegPredict
from .builtin.na_locf import na_locf
from .builtin.naiveBayes import naiveBayes
from .builtin.naiveBayesPredict import naiveBayesPredict
from .builtin.normalize import normalize
from .builtin.normalizeApply import normalizeApply
from .builtin.outlier import outlier
from .builtin.outlierByArima import outlierByArima
from .builtin.outlierByIQR import outlierByIQR
from .builtin.outlierByIQRApply import outlierByIQRApply
from .builtin.outlierBySd import outlierBySd
from .builtin.outlierBySdApply import outlierBySdApply
from .builtin.pca import pca
from .builtin.pcaInverse import pcaInverse
from .builtin.pcaTransform import pcaTransform
from .builtin.pnmf import pnmf
from .builtin.ppca import ppca
from .builtin.randomForest import randomForest
from .builtin.scale import scale
from .builtin.scaleApply import scaleApply
from .builtin.scaleMinMax import scaleMinMax
from .builtin.selectByVarThresh import selectByVarThresh
from .builtin.setdiff import setdiff
from .builtin.sherlock import sherlock
from .builtin.sherlockPredict import sherlockPredict
from .builtin.shortestPath import shortestPath
from .builtin.sigmoid import sigmoid
from .builtin.slicefinder import slicefinder
from .builtin.smote import smote
from .builtin.softmax import softmax
from .builtin.split import split
from .builtin.splitBalanced import splitBalanced
from .builtin.stableMarriage import stableMarriage
from .builtin.statsNA import statsNA
from .builtin.steplm import steplm
from .builtin.stratstats import stratstats
from .builtin.symmetricDifference import symmetricDifference
from .builtin.tSNE import tSNE
from .builtin.toOneHot import toOneHot
from .builtin.tomeklink import tomeklink
from .builtin.topk_cleaning import topk_cleaning
from .builtin.underSampling import underSampling
from .builtin.union import union
from .builtin.unique import unique
from .builtin.univar import univar
from .builtin.vectorToCsv import vectorToCsv
from .builtin.winsorize import winsorize
from .builtin.winsorizeApply import winsorizeApply
from .builtin.xdummy1 import xdummy1
from .builtin.xdummy2 import xdummy2
from .builtin.xgboost import xgboost
from .builtin.xgboostPredictClassification import xgboostPredictClassification
from .builtin.xgboostPredictRegression import xgboostPredictRegression
__all__ = ['WoE',
'WoEApply',
'abstain',
'als',
'alsCG',
'alsDS',
'alsPredict',
'alsTopkPredict',
'apply_pipeline',
'arima',
'autoencoder_2layer',
'bandit',
'bivar',
'components',
'confusionMatrix',
'cor',
'correctTypos',
'correctTyposApply',
'cox',
'cspline',
'csplineCG',
'csplineDS',
'cvlm',
'dbscan',
'dbscanApply',
'decisionTree',
'decisionTreePredict',
'deepWalk',
'denialConstraints',
'discoverFD',
'dist',
'dmv',
'ema',
'executePipeline',
'ffPredict',
'ffTrain',
'fit_pipeline',
'fixInvalidLengths',
'fixInvalidLengthsApply',
'frameSort',
'frequencyEncode',
'frequencyEncodeApply',
'garch',
'gaussianClassifier',
'getAccuracy',
'glm',
'glmPredict',
'gmm',
'gmmPredict',
'gnmf',
'gridSearch',
'hospitalResidencyMatch',
'hyperband',
'img_brightness',
'img_crop',
'img_cutout',
'img_invert',
'img_mirror',
'img_posterize',
'img_rotate',
'img_sample_pairing',
'img_shear',
'img_transform',
'img_translate',
'impurityMeasures',
'imputeByFD',
'imputeByFDApply',
'imputeByMean',
'imputeByMeanApply',
'imputeByMedian',
'imputeByMedianApply',
'imputeByMode',
'imputeByModeApply',
'intersect',
'km',
'kmeans',
'kmeansPredict',
'knn',
'knnGraph',
'knnbf',
'l2svm',
'l2svmPredict',
'lasso',
'lenetPredict',
'lenetTrain',
'lm',
'lmCG',
'lmDS',
'lmPredict',
'logSumExp',
'matrixProfile',
'mcc',
'mdedup',
'mice',
'miceApply',
'msvm',
'msvmPredict',
'multiLogReg',
'multiLogRegPredict',
'na_locf',
'naiveBayes',
'naiveBayesPredict',
'normalize',
'normalizeApply',
'outlier',
'outlierByArima',
'outlierByIQR',
'outlierByIQRApply',
'outlierBySd',
'outlierBySdApply',
'pca',
'pcaInverse',
'pcaTransform',
'pnmf',
'ppca',
'randomForest',
'scale',
'scaleApply',
'scaleMinMax',
'selectByVarThresh',
'setdiff',
'sherlock',
'sherlockPredict',
'shortestPath',
'sigmoid',
'slicefinder',
'smote',
'softmax',
'split',
'splitBalanced',
'stableMarriage',
'statsNA',
'steplm',
'stratstats',
'symmetricDifference',
'tSNE',
'toOneHot',
'tomeklink',
'topk_cleaning',
'underSampling',
'union',
'unique',
'univar',
'vectorToCsv',
'winsorize',
'winsorizeApply',
'xdummy1',
'xdummy2',
'xgboost',
'xgboostPredictClassification',
'xgboostPredictRegression']
|
src/timeago/locales/pt_BR.py | nmb10/timeago | 220 | 12617322 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2017-8-30
@author: generated by @lolobosse script
'''
LOCALE = [
["agora mesmo", "daqui um pouco"],
["hรก %s segundos", "em %s segundos"],
["hรก um minuto", "em um minuto"],
["hรก %s minutos", "em %s minutos"],
["hรก uma hora", "em uma hora"],
["hรก %s horas", "em %s horas"],
["hรก um dia", "em um dia"],
["hรก %s dias", "em %s dias"],
["hรก uma semana", "em uma semana"],
["hรก %s semanas", "em %s semanas"],
["hรก um mรชs", "em um mรชs"],
["hรก %s meses", "em %s meses"],
["hรก um ano", "em um ano"],
["hรก %s anos", "em %s anos"]
]
|
tensorflow/python/training/ftrl_test.py | connectthefuture/tensorflow | 101 | 12617323 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Ftrl operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class FtrlOptimizerTest(tf.test.TestCase):
def testFtrlwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session() as sess:
var0 = tf.Variable([0.0, 0.0], dtype=dtype)
var1 = tf.Variable([0.0, 0.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(np.array([-2.60260963, -4.29698515]),
v0_val)
self.assertAllCloseAccordingToType(np.array([-0.28432083, -0.56694895]),
v1_val)
def testFtrlwithoutRegularization2(self):
for dtype in [tf.half, tf.float32]:
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([4.0, 3.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(np.array([-2.55607247, -3.98729396]),
v0_val)
self.assertAllCloseAccordingToType(np.array([-0.28232238, -0.56096673]),
v1_val)
def testFtrlWithL1(self):
for dtype in [tf.half, tf.float32]:
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([4.0, 3.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-7.66718769, -10.91273689]),
v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]),
v1_val)
def testFtrlWithL1_L2(self):
for dtype in [tf.half, tf.float32]:
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([4.0, 3.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(np.array([-0.24059935, -0.46829352]),
v0_val)
self.assertAllCloseAccordingToType(np.array([-0.02406147, -0.04830509]),
v1_val)
def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False):
if is_sparse:
var0 = tf.Variable([[0.0], [0.0]], dtype=dtype)
var1 = tf.Variable([[0.0], [0.0]], dtype=dtype)
grads0 = tf.IndexedSlices(tf.constant([0.1], shape=[1, 1], dtype=dtype),
tf.constant([0]),
tf.constant([2, 1]))
grads1 = tf.IndexedSlices(tf.constant([0.02], shape=[1, 1], dtype=dtype),
tf.constant([1]),
tf.constant([2, 1]))
else:
var0 = tf.Variable([0.0, 0.0], dtype=dtype)
var1 = tf.Variable([0.0, 0.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
sess = tf.get_default_session()
v0_val, v1_val = sess.run([var0, var1])
if is_sparse:
self.assertAllCloseAccordingToType([[0.0], [0.0]], v0_val)
self.assertAllCloseAccordingToType([[0.0], [0.0]], v1_val)
else:
self.assertAllCloseAccordingToType([0.0, 0.0], v0_val)
self.assertAllCloseAccordingToType([0.0, 0.0], v1_val)
# Run Ftrl for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = sess.run([var0, var1])
return v0_val, v1_val
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
def testEquivAdagradwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivSparseAdagradwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
dtype, is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivSparseGradientDescentwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.GradientDescentOptimizer(3.0), dtype, is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivGradientDescentwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.GradientDescentOptimizer(3.0), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
if __name__ == "__main__":
tf.test.main()
|
feature_engine/selection/drop_duplicate_features.py | janrito/feature_engine | 650 | 12617326 | <filename>feature_engine/selection/drop_duplicate_features.py
from typing import List, Union
import pandas as pd
from feature_engine.dataframe_checks import _check_contains_na, _is_dataframe
from feature_engine.selection.base_selector import BaseSelector
from feature_engine.validation import _return_tags
from feature_engine.variable_manipulation import (
_check_input_parameter_variables,
_find_all_variables,
)
Variables = Union[None, int, str, List[Union[str, int]]]
class DropDuplicateFeatures(BaseSelector):
"""
DropDuplicateFeatures() finds and removes duplicated features in a dataframe.
Duplicated features are identical features, regardless of the variable or column
name. If they show the same values for every observation, then they are considered
duplicated.
The transformer will first identify and store the duplicated variables. Next, the
transformer will drop these variables from a dataframe.
Parameters
----------
variables: list, default=None
The list of variables to evaluate. If None, the transformer will evaluate all
variables in the dataset.
missing_values : str, default=ignore
Takes values 'raise' and 'ignore'. Whether the missing values should be raised
as error or ignored when finding duplicated features.
Attributes
----------
features_to_drop_:
Set with the duplicated features that will be dropped.
duplicated_feature_sets_:
Groups of duplicated features. Each list is a group of duplicated features.
variables_:
The variables to consider for the feature selection.
n_features_in_:
The number of features in the train set used in fit.
Methods
-------
fit:
Find duplicated features.
transform:
Remove duplicated features
fit_transform:
Fit to data. Then transform it.
"""
def __init__(self, variables: Variables = None, missing_values: str = "ignore"):
if missing_values not in ["raise", "ignore"]:
raise ValueError("missing_values takes only values 'raise' or 'ignore'.")
self.variables = _check_input_parameter_variables(variables)
self.missing_values = missing_values
def fit(self, X: pd.DataFrame, y: pd.Series = None):
"""
Find duplicated features.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The input dataframe.
y: None
y is not needed for this transformer. You can pass y or None.
Returns
-------
self
"""
# check input dataframe
X = _is_dataframe(X)
# find all variables or check those entered are in the dataframe
self.variables_ = _find_all_variables(X, self.variables)
if self.missing_values == "raise":
# check if dataset contains na
_check_contains_na(X, self.variables_)
# create tuples of duplicated feature groups
self.duplicated_feature_sets_ = []
# set to collect features that are duplicated
self.features_to_drop_ = set() # type: ignore
# create set of examined features
_examined_features = set()
for feature in self.variables_:
# append so we can remove when we create the combinations
_examined_features.add(feature)
if feature not in self.features_to_drop_:
_temp_set = set([feature])
# features that have not been examined, are not currently examined and
# were not found duplicates
_features_to_compare = [
f
for f in self.variables_
if f not in _examined_features.union(self.features_to_drop_)
]
# create combinations:
for f2 in _features_to_compare:
if X[feature].equals(X[f2]):
self.features_to_drop_.add(f2)
_temp_set.add(f2)
# if there are duplicated features
if len(_temp_set) > 1:
self.duplicated_feature_sets_.append(_temp_set)
self.n_features_in_ = X.shape[1]
return self
# Ugly work around to import the docstring for Sphinx, otherwise not necessary
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
X = super().transform(X)
return X
transform.__doc__ = BaseSelector.transform.__doc__
def _more_tags(self):
tags_dict = _return_tags()
# add additional test that fails
tags_dict["_xfail_checks"]["check_estimators_nan_inf"] = "transformer allows NA"
return tags_dict
|
dpaste/settings/tests.py | jcroot/dpaste | 278 | 12617332 | <filename>dpaste/settings/tests.py
"""
Settings for the testsuite runs.
"""
import django
from .base import * # noqa
SECRET_KEY = "test-key"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
|
experimental/heap_computation/test_mem.py | kokizzu/prometeo | 509 | 12617334 | <reponame>kokizzu/prometeo
# from prometeo.mem.ast_analyzer import get_call_graph
from prometeo.mem.ast_analyzer import compute_reach_graph
from prometeo.mem.ast_analyzer_2 import ast_visitor
from prometeo.mem.ast_analyzer_2 import compute_reach_graph
# from prometeo.cgen.code_gen import to_source
import ast
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help='Input .py file', required=True)
args = parser.parse_args()
tree = ast.parse(open(args.input).read())
# call_graph = get_call_graph(tree)
visitor = ast_visitor()
# import pdb; pdb.set_trace()
visitor.visit(tree)
call_graph = visitor.callees
print(call_graph)
# to_source(tree)
# print('call graph:\n', call_graph)
# import pdb; pdb.set_trace()
reach_map = compute_reach_graph(call_graph)
print('reach_map:\n', reach_map)
|
tests/test_utils/test_utils.py | Guangyun-Xu/mmdetection3d | 2,216 | 12617336 | <filename>tests/test_utils/test_utils.py
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet3d.core import draw_heatmap_gaussian
def test_gaussian():
heatmap = torch.zeros((128, 128))
ct_int = torch.tensor([64, 64], dtype=torch.int32)
radius = 2
draw_heatmap_gaussian(heatmap, ct_int, radius)
assert torch.isclose(torch.sum(heatmap), torch.tensor(4.3505), atol=1e-3)
|
src/olympia/amo/tests/test_migrations.py | covariant/addons-server | 843 | 12617351 | <gh_stars>100-1000
from django.db import migrations, models
from ..migrations import RenameConstraintsOperation, RenameIndexesOperation
def test_rename_constraints_operation():
add_constraint = migrations.AddConstraint(
model_name='addoncategory',
constraint=models.UniqueConstraint(
fields=('addon', 'category_id'), name='addons_categories_addon_category_id'
),
)
add_constraint2 = migrations.AddConstraint(
model_name='somemodel',
constraint=models.UniqueConstraint(fields=('addon',), name='somename'),
)
op = RenameConstraintsOperation(
'table_foo',
[(add_constraint, 'addon_id'), (add_constraint2, 'someoldname')],
)
assert op.sql == (
'ALTER TABLE `table_foo` '
'RENAME KEY `addon_id` TO `addons_categories_addon_category_id`, '
'RENAME KEY `someoldname` TO `somename`'
)
assert op.reverse_sql == (
'ALTER TABLE `table_foo` '
'RENAME KEY `addons_categories_addon_category_id` TO `addon_id`, '
'RENAME KEY `somename` TO `someoldname`'
)
assert op.state_operations[0] == add_constraint
assert op.state_operations[1].__class__ == migrations.RemoveConstraint
assert op.state_operations[1].model_name == 'addoncategory'
assert op.state_operations[1].name == 'addon_id'
assert op.state_operations[2] == add_constraint2
assert op.state_operations[3].__class__ == migrations.RemoveConstraint
assert op.state_operations[3].model_name == 'somemodel'
assert op.state_operations[3].name == 'someoldname'
def test_rename_indexes_operation():
add_index = migrations.AddIndex(
model_name='preview',
index=models.Index(fields=['addon'], name='previews_addon_idx'),
)
add_index2 = migrations.AddIndex(
model_name='somemodel',
index=models.Index(fields=['addon'], name='somename'),
)
op = RenameIndexesOperation(
'table_foo',
[(add_index, 'addon_id'), (add_index2, 'someoldname')],
)
assert op.sql == (
'ALTER TABLE `table_foo` '
'RENAME INDEX `addon_id` TO `previews_addon_idx`, '
'RENAME INDEX `someoldname` TO `somename`'
)
assert op.reverse_sql == (
'ALTER TABLE `table_foo` '
'RENAME INDEX `previews_addon_idx` TO `addon_id`, '
'RENAME INDEX `somename` TO `someoldname`'
)
assert op.state_operations[0] == add_index
assert op.state_operations[1].__class__ == migrations.RemoveIndex
assert op.state_operations[1].model_name == 'preview'
assert op.state_operations[1].name == 'addon_id'
assert op.state_operations[2] == add_index2
assert op.state_operations[3].__class__ == migrations.RemoveIndex
assert op.state_operations[3].model_name == 'somemodel'
assert op.state_operations[3].name == 'someoldname'
|
pythainlp/corpus/core.py | Gorlph/pythainlp | 569 | 12617361 | # -*- coding: utf-8 -*-
"""
Corpus related functions.
"""
import hashlib
import os
from typing import Union
from urllib.request import urlopen
import json
import requests
from pythainlp.corpus import corpus_db_path, corpus_db_url, corpus_path
from pythainlp.tools import get_full_data_path
from requests.exceptions import HTTPError
from tinydb import Query, TinyDB
from pythainlp import __version__
def get_corpus_db(url: str) -> requests.Response:
"""
Get corpus catalog from server.
:param str url: URL corpus catalog
"""
corpus_db = None
try:
corpus_db = requests.get(url)
except HTTPError as http_err:
print(f"HTTP error occurred: {http_err}")
except Exception as err:
print(f"Non-HTTP error occurred: {err}")
return corpus_db
def get_corpus_db_detail(name: str, version: str = None) -> dict:
"""
Get details about a corpus, using information from local catalog.
:param str name: name corpus
:return: details about a corpus
:rtype: dict
"""
local_db = TinyDB(corpus_db_path())
query = Query()
if version is None:
res = local_db.search(query.name == name)
else:
res = local_db.search((query.name == name) & (query.version == version))
local_db.close()
if res:
return res[0]
return dict()
def path_pythainlp_corpus(filename: str) -> str:
"""
Get path pythainlp.corpus data
:param str filename: filename of the corpus to be read
:return: : path of corpus
:rtype: str
"""
return os.path.join(corpus_path(), filename)
def get_corpus(filename: str, as_is: bool = False) -> Union[frozenset, list]:
"""
Read corpus data from file and return a frozenset or a list.
Each line in the file will be a member of the set or the list.
By default, a frozenset will be return, with whitespaces stripped, and
empty values and duplicates removed.
If as_is is True, a list will be return, with no modifications
in member values and their orders.
:param str filename: filename of the corpus to be read
:return: :class:`frozenset` or :class:`list` consists of lines in the file
:rtype: :class:`frozenset` or :class:`list`
:Example:
::
from pythainlp.corpus import get_corpus
get_corpus('negations_th.txt')
# output:
# frozenset({'เนเธเน', 'เนเธกเน'})
get_corpus('ttc_freq.txt')
# output:
# frozenset({'เนเธเธขเธเธฑเธขเธเธตเน\\t1',
# 'เธเธฑเธงเธเธ\\t10',
# 'เธซเธขเธดเธเธขเธทเนเธ\\t3',
# ...})
"""
path = path_pythainlp_corpus(filename)
lines = []
with open(path, "r", encoding="utf-8-sig") as fh:
lines = fh.read().splitlines()
if as_is:
return lines
lines = [line.strip() for line in lines]
return frozenset(filter(None, lines))
def get_corpus_default_db(name: str, version: str = None) -> Union[str, None]:
"""
Get model path from default_db.json
:param str name: corpus name
:return: path to the corpus or **None** of the corpus doesn't \
exist in the device
:rtype: str
If you want edit default_db.json, \
you can edit in pythainlp/corpus/default_db.json
"""
default_db_path = path_pythainlp_corpus("default_db.json")
with open(default_db_path, encoding="utf-8-sig") as fh:
corpus_db = json.load(fh)
if name in list(corpus_db.keys()):
if version in list(corpus_db[name]["versions"].keys()):
return path_pythainlp_corpus(
corpus_db[name]["versions"][version]["filename"]
)
elif version is None: # load latest version
version = corpus_db[name]["latest_version"]
return path_pythainlp_corpus(
corpus_db[name]["versions"][version]["filename"]
)
def get_corpus_path(name: str, version: str = None) -> Union[str, None]:
"""
Get corpus path.
:param str name: corpus name
:return: path to the corpus or **None** of the corpus doesn't \
exist in the device
:rtype: str
:Example:
(Please see the filename from
`this file
<https://pythainlp.github.io/pythainlp-corpus/db.json>`_
If the corpus already exists::
from pythainlp.corpus import get_corpus_path
print(get_corpus_path('ttc'))
# output: /root/pythainlp-data/ttc_freq.txt
If the corpus has not been downloaded yet::
from pythainlp.corpus import download, get_corpus_path
print(get_corpus_path('wiki_lm_lstm'))
# output: None
download('wiki_lm_lstm')
# output:
# Download: wiki_lm_lstm
# wiki_lm_lstm 0.32
# thwiki_lm.pth?dl=1: 1.05GB [00:25, 41.5MB/s]
# /root/pythainlp-data/thwiki_model_lstm.pth
print(get_corpus_path('wiki_lm_lstm'))
# output: /root/pythainlp-data/thwiki_model_lstm.pth
"""
# Customize your the corpus path then close the line after lines 164 through 190.
_CUSTOMIZE = {
# "the corpus name":"path"
}
if name in list(_CUSTOMIZE.keys()):
return _CUSTOMIZE[name]
default_path = get_corpus_default_db(name=name, version=version)
if default_path is not None:
return default_path
# check if the corpus is in local catalog, download if not
corpus_db_detail = get_corpus_db_detail(name)
if not corpus_db_detail or not corpus_db_detail.get("filename"):
download(name, version = version)
corpus_db_detail = get_corpus_db_detail(name)
if corpus_db_detail and corpus_db_detail.get("filename"):
# corpus is in the local catalog, get full path to the file
path = get_full_data_path(corpus_db_detail.get("filename"))
# check if the corpus file actually exists, download if not
if not os.path.exists(path):
download(name)
if os.path.exists(path):
return path
return None
def _download(url: str, dst: str) -> int:
"""
Download helper.
@param: url to download file
@param: dst place to put the file
"""
_CHUNK_SIZE = 64 * 1024 # 64 KiB
file_size = int(urlopen(url).info().get("Content-Length", -1))
r = requests.get(url, stream=True)
with open(get_full_data_path(dst), "wb") as f:
pbar = None
try:
from tqdm import tqdm
pbar = tqdm(total=int(r.headers["Content-Length"]))
except ImportError:
pbar = None
for chunk in r.iter_content(chunk_size=_CHUNK_SIZE):
if chunk:
f.write(chunk)
if pbar:
pbar.update(len(chunk))
if pbar:
pbar.close()
else:
print("Done.")
return file_size
def _check_hash(dst: str, md5: str) -> None:
"""
Check hash helper.
@param: dst place to put the file
@param: md5 place to hash the file (MD5)
"""
if md5 and md5 != "-":
with open(get_full_data_path(dst), "rb") as f:
content = f.read()
file_md5 = hashlib.md5(content).hexdigest()
if md5 != file_md5:
raise Exception("Hash does not match expected.")
def _version2int(v: str) -> int:
"""
X.X.X => X0X0X
"""
if '-' in v:
v = v.split("-")[0]
if v.endswith(".*"):
v = v.replace(".*", ".0") # X.X.* => X.X.0
v_list = v.split(".")
if len(v_list) < 3:
v_list.append('0')
v_new = ""
for i, value in enumerate(v_list):
if i != 0:
if len(value) < 2:
v_new += "0"+value
else:
v_new += value
else:
v_new += value
return int(v_new)
def _check_version(cause: str) -> bool:
temp = cause
check = False
__version = __version__
if 'dev' in __version:
__version = __version.split('dev')[0]
elif 'beta' in __version:
__version = __version.split('beta')[0]
v = _version2int(__version)
if cause == "*":
check = True
elif cause.startswith("==") and '>' not in cause and '<' not in cause:
temp = cause.replace("==", '')
check = v == _version2int(temp)
elif cause.startswith(">=") and '<' not in cause:
temp = cause.replace(">=", '')
check = v >= _version2int(temp)
elif cause.startswith(">") and '<' not in cause:
temp = cause.replace(">", '')
check = v > _version2int(temp)
elif cause.startswith(">=") and '<=' not in cause and '<' in cause:
temp = cause.replace(">=", '').split('<')
check = v >= _version2int(temp[0]) and v < _version2int(temp[1])
elif cause.startswith(">=") and '<=' in cause:
temp = cause.replace(">=", '').split('<=')
check = v >= _version2int(temp[0]) and v <= _version2int(temp[1])
elif cause.startswith(">") and '<' in cause:
temp = cause.replace(">", '').split('<')
check = v > _version2int(temp[0]) and v < _version2int(temp[1])
elif cause.startswith("<="):
temp = cause.replace("<=", '')
check = v <= _version2int(temp[0])
elif cause.startswith("<"):
temp = cause.replace("<", '')
check = v < _version2int(temp[0])
return check
def download(
name: str, force: bool = False, url: str = None, version: str = None
) -> bool:
"""
Download corpus.
The available corpus names can be seen in this file:
https://pythainlp.github.io/pythainlp-corpus/db.json
:param str name: corpus name
:param bool force: force download
:param str url: URL of the corpus catalog
:param str version: Version of the corpus
:return: **True** if the corpus is found and succesfully downloaded.
Otherwise, it returns **False**.
:rtype: bool
:Example:
::
from pythainlp.corpus import download
download('wiki_lm_lstm', force=True)
# output:
# Corpus: wiki_lm_lstm
# - Downloading: wiki_lm_lstm 0.1
# thwiki_lm.pth: 26%|โโโ | 114k/434k [00:00<00:00, 690kB/s]
By default, downloaded corpus and model will be saved in
``$HOME/pythainlp-data/``
(e.g. ``/Users/bact/pythainlp-data/wiki_lm_lstm.pth``).
"""
if not url:
url = corpus_db_url()
corpus_db = get_corpus_db(url)
if not corpus_db:
print(f"Cannot download corpus catalog from: {url}")
return False
corpus_db = corpus_db.json()
# check if corpus is available
if name in list(corpus_db.keys()):
local_db = TinyDB(corpus_db_path())
query = Query()
corpus = corpus_db[name]
print("Corpus:", name)
if version is None:
for v in corpus["versions"]:
if _check_version(corpus["versions"][v]["pythainlp_version"]):
version = v
else:
if version not in list(corpus["versions"].keys()):
print("Not found corpus")
local_db.close()
return False
elif _check_version(
corpus["versions"][version]["pythainlp_version"]
) is False:
print("Versions Corpus not support")
local_db.close()
return False
corpus_versions = corpus["versions"][version]
file_name = corpus_versions["filename"]
found = local_db.search(
(query.name == name) & (query.version == version)
)
# If not found in local, download
if force or not found:
print(f"- Downloading: {name} {version}")
_download(
corpus_versions["download_url"], file_name,
)
_check_hash(
file_name, corpus_versions["md5"],
)
if found:
local_db.update({"version": version}, query.name == name)
else:
local_db.insert(
{"name": name, "version": version, "filename": file_name}
)
else:
if local_db.search(
query.name == name and query.version == version
):
# Already has the same version
print("- Already up to date.")
else:
# Has the corpus but different version
current_ver = local_db.search(query.name == name)[0]["version"]
print(f"- Existing version: {current_ver}")
print(f"- New version available: {version}")
print("- Use download(data_name, force=True) to update")
local_db.close()
return True
print("Corpus not found:", name)
return False
def remove(name: str) -> bool:
"""
Remove corpus
:param str name: corpus name
:return: **True** if the corpus is found and succesfully removed.
Otherwise, it returns **False**.
:rtype: bool
:Example:
::
from pythainlp.corpus import remove, get_corpus_path, get_corpus
print(remove('ttc'))
# output: True
print(get_corpus_path('ttc'))
# output: None
get_corpus('ttc')
# output:
# FileNotFoundError: [Errno 2] No such file or directory:
# '/usr/local/lib/python3.6/dist-packages/pythainlp/corpus/ttc'
"""
db = TinyDB(corpus_db_path())
query = Query()
data = db.search(query.name == name)
if data:
path = get_corpus_path(name)
os.remove(path)
db.remove(query.name == name)
db.close()
return True
db.close()
return False
|
deeprobust/graph/data/utils.py | shixiongjing/DeepRobust | 647 | 12617373 | """
This file provides functions for converting deeprobust data
to pytorch geometric data.
"""
|
official/vision/gan/megengine_mimicry/utils/common.py | pepperonibo/Models | 294 | 12617374 | <filename>official/vision/gan/megengine_mimicry/utils/common.py
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
"""
Script for common utility functions.
"""
import json
import os
import numpy as np
def write_to_json(dict_to_write, output_file):
"""
Outputs a given dictionary as a JSON file with indents.
Args:
dict_to_write (dict): Input dictionary to output.
output_file (str): File path to write the dictionary.
Returns:
None
"""
with open(output_file, 'w') as file:
json.dump(dict_to_write, file, indent=4)
def load_from_json(json_file):
"""
Loads a JSON file as a dictionary and return it.
Args:
json_file (str): Input JSON file to read.
Returns:
dict: Dictionary loaded from the JSON file.
"""
with open(json_file, 'r') as file:
return json.load(file)
|
recipe_scrapers/_exceptions.py | mathiazom/recipe-scrapers | 811 | 12617455 | class RecipeScrapersExceptions(Exception):
def __init__(self, message):
self.message = message
super().__init__(message)
def __str__(self):
return f"recipe-scrapers exception: {self.message}"
class WebsiteNotImplementedError(RecipeScrapersExceptions):
"""Error when website is not supported by this library."""
def __init__(self, domain):
self.domain = domain
message = f"Website ({self.domain}) not supported."
super().__init__(message)
class NoSchemaFoundInWildMode(RecipeScrapersExceptions):
"""Error when wild_mode fails to locate schema at the url"""
def __init__(self, url):
self.url = url
message = f"No Recipe Schema found at {self.url}."
super().__init__(message)
class ElementNotFoundInHtml(RecipeScrapersExceptions):
"""Error when we cannot locate the HTML element on the page"""
def __init__(self, element):
self.element = element
message = (
"Element not found in html (self.soup.find returned None). Check traceback."
)
super().__init__(message)
class SchemaOrgException(RecipeScrapersExceptions):
"""Error in parsing or missing portion of the Schema.org data org the page"""
def __init__(self, message):
super().__init__(message)
|
src/toil/wdl/versions/dev.py | Hexotical/toil | 348 | 12617481 | <reponame>Hexotical/toil
# Copyright (C) 2020-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from wdlparse.dev.WdlLexer import WdlLexer, FileStream
from wdlparse.dev.WdlParser import WdlParser, CommonTokenStream
from toil.wdl.versions.v1 import AnalyzeV1WDL, is_context
from toil.wdl.wdl_types import WDLType
logger = logging.getLogger(__name__)
class AnalyzeDevelopmentWDL(AnalyzeV1WDL): # extend from 1.0
"""
AnalyzeWDL implementation for the development version using ANTLR4.
See: https://github.com/openwdl/wdl/blob/main/versions/development/SPEC.md
https://github.com/openwdl/wdl/blob/main/versions/development/parsers/antlr4/WdlParser.g4
"""
@property
def version(self) -> str:
return 'development'
def analyze(self):
"""
Analyzes the WDL file passed into the constructor and generates the two
intermediate data structures: `self.workflows_dictionary` and
`self.tasks_dictionary`.
"""
lexer = WdlLexer(FileStream(self.wdl_file))
parser = WdlParser(input=CommonTokenStream(lexer))
tree = parser.document()
self.visit_document(tree)
def visit_document(self, ctx: WdlParser.DocumentContext) -> None:
"""
Similar to version 1.0, except the 'workflow' element is included in
`ctx.document_element()`.
"""
for element in ctx.document_element():
self.visit_document_element(element)
def visit_document_element(self, ctx: WdlParser.Document_elementContext) -> None:
"""
Similar to version 1.0, except this also contains 'workflow'.
"""
element = ctx.children[0]
if is_context(element, 'WorkflowContext'):
self.visit_workflow(element)
else:
# let super take care of the rest.
super().visit_document_element(ctx)
def visit_call(self, ctx: WdlParser.CallContext) -> dict:
"""
Similar to version 1.0, except `ctx.call_afters()` is added.
"""
# TODO: implement call_afters
# See: https://github.com/openwdl/wdl/blob/main/versions/development/SPEC.md#call-statement
return super().visit_call(ctx)
def visit_string_expr_part(self, ctx: WdlParser.String_expr_partContext) -> str:
"""
Similar to version 1.0, except `ctx.expression_placeholder_option()`
is removed.
"""
# expression placeholder options are removed in development
# See: https://github.com/openwdl/wdl/blob/main/versions/development/parsers/antlr4/WdlParser.g4#L55
return self.visit_expr(ctx.expr())
def visit_wdl_type(self, ctx: WdlParser.Wdl_typeContext) -> WDLType:
"""
Similar to version 1.0, except Directory type is added.
"""
identifier = ctx.type_base().children[0]
if identifier == 'Directory':
# TODO: implement Directory type
raise NotImplementedError('Directory type is not implemented.')
else:
# let super take care of the rest.
return super().visit_wdl_type(ctx)
def visit_expr_core(self, expr: WdlParser.Expr_coreContext) -> str:
"""
Similar to version 1.0, except struct literal is added.
"""
if is_context(expr, 'Struct_literalContext'):
# TODO: implement struct literal
raise NotImplementedError(f'WDL struct is not implemented.')
else:
# let super take care of the rest.
return super().visit_expr_core(expr)
|
docs/conf.py | Pesa/ndn-cxx | 106 | 12617490 | <reponame>Pesa/ndn-cxx
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'ndn-cxx: NDN C++ library with eXperimental eXtensions'
copyright = u'Copyright ยฉ 2013-2021 Regents of the University of California.'
author = u'Named Data Networking Project'
# The short X.Y version.
#version = ''
# The full version, including alpha/beta/rc tags.
#release = ''
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.extlinks',
'sphinx.ext.todo',
]
def addExtensionIfExists(extension):
try:
__import__(extension)
extensions.append(extension)
except ImportError:
sys.stderr.write("Extension '%s' not found. "
"Some documentation may not build correctly.\n" % extension)
addExtensionIfExists('sphinxcontrib.doxylink')
# The master toctree document.
master_doc = 'index'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'named_data_theme'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_copy_source = False
html_show_sourcelink = False
# Disable syntax highlighting of code blocks by default.
highlight_language = 'none'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ndn-cxx-docs.tex', u'NDN C++ library with eXperimental eXtensions',
author, 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('manpages/ndnsec', 'ndnsec', 'NDN security toolkit', None, 1),
('manpages/ndnsec-cert-dump', 'ndnsec-cert-dump', 'export an NDN certificate', None, 1),
('manpages/ndnsec-cert-gen', 'ndnsec-cert-gen', 'create an NDN certificate for an identity', None, 1),
('manpages/ndnsec-cert-install', 'ndnsec-cert-install', 'import an NDN certificate from a file', None, 1),
('manpages/ndnsec-delete', 'ndnsec-delete', 'delete an NDN identity, key, or certificate', None, 1),
('manpages/ndnsec-export', 'ndnsec-export', 'export an NDN certificate and its private key to a file', None, 1),
('manpages/ndnsec-get-default', 'ndnsec-get-default', 'show the default NDN identity, key, and certificate for the current user', None, 1),
('manpages/ndnsec-import', 'ndnsec-import', 'import an NDN certificate and its private key from a file', None, 1),
('manpages/ndnsec-key-gen', 'ndnsec-key-gen', 'generate an NDN key for an identity', None, 1),
('manpages/ndnsec-list', 'ndnsec-list', 'list all known NDN identities, keys, and certificates', None, 1),
('manpages/ndnsec-set-default', 'ndnsec-set-default', 'change the default NDN identity, key, or certificate for the current user', None, 1),
('manpages/ndnsec-sign-req', 'ndnsec-sign-req', 'generate an NDN certificate signing request', None, 1),
('manpages/ndnsec-unlock-tpm', 'ndnsec-unlock-tpm', 'unlock the TPM', None, 1),
('manpages/ndn-client.conf', 'ndn-client.conf', 'configuration file for NDN applications', None, 5),
('manpages/ndn-log', 'ndn-log', 'ndn-cxx logging', None, 7),
]
# If true, show URL addresses after external links.
#man_show_urls = True
# -- Custom options ----------------------------------------------------------
doxylink = {
'ndn-cxx': ('ndn-cxx.tag', 'doxygen/'),
}
extlinks = {
'issue': ('https://redmine.named-data.net/issues/%s', 'issue #'),
}
|
artemis/experiments/demo_experiments.py | peteroconnor-bc/artemis | 235 | 12617496 | import numpy as np
from artemis.experiments.decorators import experiment_function
from matplotlib import pyplot as plt
from six.moves import xrange
__author__ = 'peter'
"""
This file demonstates Artemis's "Experiments"
When you run an experiment, all figures and console output, as well as some metadata such as total run time, arguments,
etc are saved to disk.
This demo illustrates how you can create an experiment, create variations on that experiment, and view the results.
"""
class OnlineLinearRegressor:
def __init__(self, n_in, n_out, learning_rate = 0.01):
self.w = np.zeros((n_in, n_out))
self.learning_rate = learning_rate
def train(self, x, targ): # x: (n_samples, n_in), targ: (n_samples, n_out)
y = self.predict(x)
self.w -= self.learning_rate * (x.T.dot(y-targ))
def predict(self, x): # x: (n_samples, n_in)
return x.dot(self.w)
@experiment_function
def demo_linear_regression(
n_in = 100,
n_out = 4,
n_training_samples = 500,
n_test_samples = 500,
noise = .1,
n_epochs = 10,
eta = 0.001,
random_seed = 1234,
score_report_period = 100,
):
"""
Generate a random linear regression problem and train an online predictor to solve it with Stochastic gradient descent.
Log the scores and plot the resulting learning curves.
:param n_in: Number of inputs
:param n_out: Number of outputs
:param n_training_samples: Number of training samples in generated dataset.
:param n_test_samples: Number of test samples in generated dataset.
:param noise: Noise to add to generated dataset
:param n_epochs: Number of epochs to run for
:param eta: Learning rate for SGD
:param random_seed: Random seed (for generating data)
:param score_report_period: Report score every X training iterations.
"""
# Setup data
rng = np.random.RandomState(random_seed)
w_true = rng.randn(n_in, n_out)*.1 # (n_in, n_out)
training_data = rng.randn(n_training_samples, n_in) # (n_training_samples, n_in)
training_target = training_data.dot(w_true) + noise*rng.randn(n_training_samples, n_out) # (n_training_samples, n_out)
test_data = rng.randn(n_test_samples, n_in) # (n_test_samples, n_in)
test_target = test_data.dot(w_true) + noise*rng.randn(n_test_samples, n_out) # (n_test_samples, n_out)
predictor = OnlineLinearRegressor(n_in=n_in, n_out=n_out, learning_rate=eta)
# Train and periodically record scores.
epoch_scores = []
for i in xrange(n_training_samples*n_epochs+1):
if i % score_report_period == 0:
training_out = predictor.predict(training_data)
training_cost = ((training_target-training_out)**2).sum(axis=1).mean(axis=0)
test_out = predictor.predict(test_data)
test_cost = ((test_target-test_out)**2).sum(axis=1).mean(axis=0)
print('Epoch {epoch}: Test Cost: {test}, Training Cost: {train}'.format(epoch=float(i)/n_training_samples, test=test_cost, train=training_cost))
epoch = float(i) / n_training_samples
epoch_scores.append((epoch, training_cost, test_cost))
predictor.train(training_data[[i % n_training_samples]], training_target[[i % n_training_samples]])
# Plot
epochs, training_costs, test_costs = zip(*epoch_scores)
plt.plot(epochs, np.array([training_costs, test_costs]).T)
plt.xlabel('epoch')
plt.ylabel('cost')
plt.legend(['Training Cost', 'Test Cost'])
plt.title("Learning Curve")
plt.ion()
plt.show()
return {'training_cost': training_cost, 'test_cost': test_cost}
demo_linear_regression.add_variant('fast-learn', eta=0.01)
demo_linear_regression.add_variant('large_input_space', n_in=1000)
if __name__ == "__main__":
# Open a menu that allows you to run experiments and view old ones.
demo_linear_regression.browse(display_format="flat")
|
webservices/iss_epicentro/iss_epicentroInizio.py | ffxx68/covid19italia | 237 | 12617539 | import csv
import datetime
import sys, os
pathname = os.path.dirname(sys.argv[0])
abspath=os.path.abspath(pathname)
regioni = ['Piemonte', 'Valle Aosta', 'Lombardia', 'PA Bolzano', 'PA Trento', 'Veneto', 'Friuli', 'Liguria', 'Emilia', 'Toscana',
'Umbria', 'Marche', 'Lazio', 'Abruzzo', 'Molise', 'Campania', 'Puglia', 'Basilicata', 'Calabria', 'Sicilia', 'Sardegna']
classi_casi = ['1-10', '11-50', '51-100',
'101-200', '201-500', '501-1000', '1000+']
classi_inc = ['0.01-1', '1.01-5', '5.01-10',
'10.01-15', '15.01-20', '20.01-40', '40+']
first_day = "20-02-20"
date_start = datetime.datetime.strptime(first_day, "%y-%m-%d")
today=datetime.datetime.now().strftime("%Y-%m-%d")
path=abspath+'/processing/'
f_i_name=path+today+'_incidenzaInizio.csv'
f_c_name=path+today+'_numeroCasiInizio.csv'
f_i = open(f_i_name, 'w')
f_i.write('data,regione,classe_incidenza\n')
with open(abspath+'/processing/raw_incidenzaInizio.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
# print(f'Intestazione {", ".join(row)}')
line_count += 1
elif row[0] == '1' and int(row[4]) < 7:
day = (int(row[2])-1582113600)/86400
id_regione = int((float(row[3])-0.5))
my_classe = int(row[4])
my_date = date_start + datetime.timedelta(days=day)
# print(
# f'\t{my_date.day}/{my_date.month} \t {regioni[id_regione]} \t {classi_inc[my_classe]}')
f_i.write('{:%y-%m-%d}'.format(my_date)+',' +
regioni[id_regione]+','+classi_inc[my_classe]+'\n')
line_count += 1
# print(f'Elaborate {line_count} righe.')
f_i.close()
f_c = open(f_c_name, 'w')
f_c.write('data,regione,classe_numero_casi\n')
with open(abspath+'/processing/raw_numeroCasiInizio.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
# print(f'Intestazione {", ".join(row)}')
line_count += 1
elif row[0] == '1' and int(row[4]) < 7:
day = (int(row[2])-1582113600)/86400
id_regione = int((float(row[3])-0.5))
my_classe = int(row[4])
my_date = date_start + datetime.timedelta(days=day)
# print(
# f'\t{my_date.day}/{my_date.month} \t {regioni[id_regione]} \t {classi_casi[my_classe]}')
f_c.write('{:%y-%m-%d}'.format(my_date)+',' +
regioni[id_regione]+','+classi_casi[my_classe]+'\n')
line_count += 1
# print(f'Elaborate {line_count} righe.')
f_c.close()
print(f_i_name)
print(f_c_name)
|
rl_reliability_metrics/metrics/metrics_online.py | mcx/rl-reliability-metrics | 122 | 12617555 | <gh_stars>100-1000
# coding=utf-8
# Copyright 2019 The Authors of RL Reliability Metrics.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Online metrics for evaluating robustness of an RL algorithm.
Given a learning curve or set of learning curves, these metrics provide
measures of the robustness of the RL algorithm.
"""
import abc
import copy
import functools
import gin
import numpy as np
from rl_reliability_metrics.metrics import metric_utils as utils
from rl_reliability_metrics.metrics import metrics_base
import scipy.signal
import scipy.stats
import six
@six.add_metaclass(abc.ABCMeta)
class _OnlineMetric(metrics_base.Metric):
"""Base class for online metrics."""
def all_online_metrics():
"""Get all the online metrics."""
return _OnlineMetric.public_subclasses()
class _DispersionAcrossRuns(_OnlineMetric):
"""Computes dispersion across runs."""
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATP'
bigger_is_better = False
def __init__(self,
dispersion_fn,
lowpass_thresh=None,
eval_points=None,
window_size=None,
baseline=None):
"""Initializes parameters for computing dispersion across runs.
Args:
dispersion_fn: Function for computing dispersion.
lowpass_thresh: Frequency threshold for low-pass filtering. This is the
point at which the gain drops to 1/sqrt(2) that of the passband (the
"-3 dB point"). The threshold should be normalized between 0 and 1,
where 1 is the Nyquist frequency, pi radians/sample. See documentation
for scipy.signal.butter.
eval_points: A list or Numpy array of length [# timepoints]. Standard
deviation will be computed at these timepoints. Set to None to select
all valid eval points.
window_size: If not None, defines a window centered at each eval point. We
evaluate dispersion across runs at the timepoints closest to each eval
point (but still within each window). This is useful when the available
timepoints are not precisely aligned across runs. If None, we evaluate
exactly at each eval point.
baseline: Set to "curve_range" to normalize by the curve range, defined as
the 95th percentile minus the start value. Set to a float to simply
divide by that value. Set to None for no normalization.
"""
self._dispersion_fn = dispersion_fn
self.lowpass_thresh = lowpass_thresh
self.eval_points = eval_points
self.window_size = window_size
self.baseline = baseline
def __call__(self, curves):
"""Computes normalized dispersion across runs.
Args:
curves: A list of learning curves, each a 2D numpy array where curve[0, :]
is the timepoint variable and curve[1, :] is the dependent variable.
Returns:
Dispersion across runs, computed at each of the eval_points.
(Numpy array with length n_eval_points).
"""
utils.assert_non_empty(curves)
# perform preprocessing for across-runs metrics
eval_point_values = utils.across_runs_preprocess(
curves, self.eval_points, self.window_size, self.lowpass_thresh)
# compute dispersion across curves
result = self._dispersion_fn(eval_point_values)
if self.baseline == 'curve_range':
curve_ranges = utils.curve_range(curves)
result /= np.median(curve_ranges)
elif self.baseline:
result = result / self.baseline
return result
@gin.configurable
class IqrAcrossRuns(_DispersionAcrossRuns):
"""Computes interquartile range across runs."""
def __init__(self,
lowpass_thresh=None,
eval_points=None,
window_size=None,
baseline=None):
super(IqrAcrossRuns, self).__init__(
dispersion_fn=lambda x: scipy.stats.iqr(x, axis=0),
lowpass_thresh=lowpass_thresh,
eval_points=eval_points,
window_size=window_size,
baseline=baseline)
@gin.configurable
class MadAcrossRuns(_DispersionAcrossRuns):
"""Computes median absolute deviation across runs."""
def __init__(self,
lowpass_thresh=None,
eval_points=None,
window_size=None,
baseline=None):
super(MadAcrossRuns, self).__init__(
dispersion_fn=lambda x: utils.median_absolute_deviations(x, axis=0),
lowpass_thresh=lowpass_thresh,
eval_points=eval_points,
window_size=window_size,
baseline=baseline)
@gin.configurable
class StddevAcrossRuns(_DispersionAcrossRuns):
"""Computes standard deviation across runs."""
def __init__(self,
lowpass_thresh=None,
eval_points=None,
window_size=None,
baseline=None):
super(StddevAcrossRuns, self).__init__(
dispersion_fn=lambda x: np.std(x, axis=0, ddof=1),
lowpass_thresh=lowpass_thresh,
eval_points=eval_points,
window_size=window_size,
baseline=baseline)
class _DispersionWithinRuns(_OnlineMetric):
"""Computes dispersion within runs."""
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATRP'
bigger_is_better = False
def __init__(self,
dispersion_fn,
window_size=None,
eval_points=None,
baseline=None,
detrend=True):
"""Initializes parameters for computing dispersion within runs.
Args:
dispersion_fn: Function for computing dispersion.
window_size: The number of timepoints in the window. Set to None to use
window_size = entire length of the run.
eval_points: A list or Numpy array of length [# timepoints]. Standard
deviation will be computed at these timepoints. Set to None to use all
valid timepoints (given the window_size).
baseline: Set to "curve_range" to normalize by the curve range, defined as
the 95th percentile minus the start value. Set to a float to simply
divide by that value. Set to None for no normalization.
detrend: If True, detrend by differencing, before computing dispersion.
"""
self._dispersion_fn = dispersion_fn
self.window_size = window_size
self.eval_points = eval_points
self.baseline = baseline
self.detrend = detrend
def __call__(self, curves):
"""Computes dispersion within runs.
Args:
curves: A list of learning curves, each a 2D numpy array where curve[0, :]
is the timepoint variable and curve[1, :] is the dependent variable.
Returns:
Dispersion within runs, computed at each eval_point for each run.
(Numpy array with size n_run x n_eval_points.)
"""
utils.assert_non_empty(curves)
# Detrend by differencing.
if self.detrend:
diff_curves = utils.differences(curves)
else:
diff_curves = curves
dispersions = []
# Process each curve separately, because length of run may differ for each.
for curve, diff_curve in zip(curves, diff_curves):
eval_points = copy.deepcopy(self.eval_points)
window_size = copy.deepcopy(self.window_size)
# Determine eval_points and window_size, if needed (based on diff_curve).
if self.eval_points is None or self.window_size is None:
if self.window_size is None:
valid_eval_points = utils.get_all_valid_eval_points([diff_curve], 1)
window_size = valid_eval_points.max() - valid_eval_points.min() + 1
if self.eval_points is None:
eval_points = utils.get_all_valid_eval_points([diff_curve],
window_size)
# Compute dispersion for the curve.
diffcurve_dispers = utils.apply_window_fn(
[diff_curve], eval_points, self._dispersion_fn, window_size)
if self.baseline == 'curve_range':
curve_range = utils.curve_range([curve])[0]
diffcurve_dispers = diffcurve_dispers / curve_range
elif self.baseline:
diffcurve_dispers /= self.baseline
dispersions.extend(diffcurve_dispers)
return np.array(dispersions)
@gin.configurable
class StddevWithinRuns(_DispersionWithinRuns):
"""Computes standard deviation within runs."""
def __init__(self, window_size=None, eval_points=None, baseline=None):
super(StddevWithinRuns, self).__init__(functools.partial(np.std, ddof=1),
window_size,
eval_points,
baseline,
True)
@gin.configurable
class IqrWithinRuns(_DispersionWithinRuns):
"""Computes inter-quartile range within runs."""
def __init__(self, window_size=None, eval_points=None, baseline=None):
super(IqrWithinRuns, self).__init__(scipy.stats.iqr,
window_size,
eval_points,
baseline,
True)
@gin.configurable
class MadWithinRuns(_DispersionWithinRuns):
"""Computes median absolute deviation within runs."""
def __init__(self, window_size=None, eval_points=None, baseline=None):
super(MadWithinRuns, self).__init__(utils.median_absolute_deviations,
window_size,
eval_points,
baseline,
True)
@gin.configurable
class MaxDrawdown(_OnlineMetric):
"""Maximum drawdown (borrowed from economics/finance).
Maximum drawdown measures the largest peak-to-valley loss on each curve.
https://en.wikipedia.org/wiki/Drawdown_(economics)
"""
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATR'
bigger_is_better = False
def __init__(self, baseline=None, mean_normalize=False):
"""Initializes parameters for computing maximum drawdown.
Args:
baseline: If not None, this is a float value that is subtracted from the
curves as the first step of pre-processing.
mean_normalize: If True, normalize curves by the mean value of each curve
during preprocessing (after subtracting baseline, if available).
"""
self.baseline = baseline
self.mean_normalize = mean_normalize
def __call__(self, curves):
"""Compute maximum drawdown."""
utils.assert_non_empty(curves)
if self.baseline is not None:
curves = utils.subtract_baseline(curves, self.baseline)
if self.mean_normalize:
curves = utils.mean_normalization(curves)
mdd = np.empty(len(curves))
for i, curve in enumerate(curves):
dependent_vals = curve[1, :]
drawdown = utils.compute_drawdown(dependent_vals)
mdd[i] = np.max(drawdown)
return mdd
@gin.configurable
class HighFreqEnergyWithinRuns(_OnlineMetric):
"""Computes the energy of the signal above a given frequency threshold.
Normalized by the total energy of the signal.
This is a measure of dispersion within runs.
"""
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATR'
bigger_is_better = False
def __init__(self, thresh):
"""Initialize parameters.
Args:
thresh: frequency threshold
"""
self.thresh = thresh
def __call__(self, curves):
"""Computes energy of the signal above a given frequency threshold.
Normalized by the total energy of the signal.
Args:
curves: A list of learning curves, each a 2D numpy array where curve[0, :]
is the timepoint variable and curve[1, :] is the dependent variable.
Returns:
Amount of energy above the given frequency threshold, normalized by the
total energy of the signal.
"""
utils.assert_non_empty(curves)
energies = []
for curve in curves:
data = curve[1, :]
power_spectrum = np.abs(np.fft.fft(data))**2
time_step = curve[0, 1] - curve[0, 0]
# TODO(scychan) above assumes equal spacing
freqs = np.fft.fftfreq(data.size, time_step)
energy_above_thresh = (np.sum(power_spectrum[freqs > self.thresh]) /
np.sum(power_spectrum[freqs > 0]))
energies.append(energy_above_thresh)
return energies
class _CVaR(_OnlineMetric):
"""Computes conditional value at risk (CVaR), aka "expected shortfall".
For each learning curve, this metric takes the expected value on the curve
values that fall below the quantile defined by `alpha` (if computing on the
lower tail), or above the quantile defined by 1 - `alpha` (if computing on
the upper tail).
https://en.wikipedia.org/wiki/Expected_shortfall
"""
def __init__(self,
target,
tail,
alpha=0.05,
baseline=None,
lowpass_thresh=None,
eval_points=None,
window_size=None):
"""Initializes parameters for computing CVaR.
Args:
target: What data to perform CVaR on. Options:
'across' - across the training runs, evaluated at the eval points after
low-pass thresholding.
'diffs' - the timepoint to timepoint differences (per training curve)
'raw' - the raw values (per training curve)
'drawdown' - drawdown (per training curve)
https://en.wikipedia.org/wiki/Drawdown_(economics)
tail: 'lower' or 'upper' tail
alpha: The "value at risk" (VaR) cutoff point, a float in the range [0,1].
To compute CVaR we computed expected value below this quantile.
baseline: Set to "curve_range" to normalize by the curve range, defined as
the 95th percentile minus the start value. Set to a float to simply
divide by that value. Set to None for no normalization.
lowpass_thresh: [for target == 'across' only] The frequency threshold for
low-pass thresholding before computing CVaR
eval_points: [for target == 'across' only] A list or Numpy array of length
[# timepoints]. CVaR will be computed at these timepoints. Set to None
to select all valid eval points.
window_size: [For target == 'across' only]. If not None, defines a window
centered at each eval point. We evaluate CVaR across runs at the
timepoints closest to each eval point (but still within each window).
This is useful when the available timepoints are not precisely aligned
across runs. If None, we evaluate exactly at each eval point.
"""
if target not in ['across', 'diffs', 'raw', 'drawdown']:
raise ValueError("target must be 'across', 'diffs', 'raw', or "
"'drawdown'.")
self.target = target
self.tail = tail
self.alpha = alpha
self.baseline = baseline
self.lowpass_thresh = lowpass_thresh
self.eval_points = eval_points
self.window_size = window_size
def __call__(self, curves):
"""Computes CVaR for a list of curves.
Args:
curves: A list of learning curves, each a 2D numpy array where curve[0, :]
is the timepoint variable and curve[1, :] is the dependent variable.
Returns:
for self.target in ['diffs', 'raw', 'drawdown']:
A 1-D numpy array of CVaR values, one per curve in the input
(length = the number of curves in the input).
for self.target == 'across':
A 1-D numpy array of CVaR values, one per eval point
(length = number of eval points)
"""
utils.assert_non_empty(curves)
if self.baseline == 'curve_range':
curve_ranges = utils.curve_range(curves)
curves = utils.divide_by_baseline(curves, curve_ranges)
elif self.baseline:
curves = utils.divide_by_baseline(curves, self.baseline)
cvar_list = []
if self.target == 'across':
# Compute CVaR across curves (at each eval point)
eval_point_vals = utils.across_runs_preprocess(curves, self.eval_points,
self.window_size,
self.lowpass_thresh)
n_eval_points = eval_point_vals.shape[1]
for i_point in range(n_eval_points):
cvar = utils.compute_cvar(eval_point_vals[:, i_point], self.tail,
self.alpha)
cvar_list.append(cvar)
else:
# Compute CVaR within curves (one per curve).
for curve in curves:
dependent_var = curve[1, :]
if self.target == 'raw':
pass
elif self.target == 'diffs':
normalized_diffs = utils.differences([curve])[0]
dependent_var = normalized_diffs[1, :]
elif self.target == 'drawdown':
dependent_var = utils.compute_drawdown(dependent_var)
cvar = utils.compute_cvar(dependent_var, self.tail, self.alpha)
cvar_list.append(cvar)
return np.array(cvar_list)
@gin.configurable
class LowerCVaROnDiffs(_CVaR):
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATR'
bigger_is_better = True
def __init__(self, alpha=0.05, baseline=None):
super(LowerCVaROnDiffs, self).__init__(
target='diffs',
tail='lower',
alpha=alpha,
baseline=baseline,
lowpass_thresh=None,
eval_points=None)
@gin.configurable
class UpperCVaROnDiffs(_CVaR):
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATR'
bigger_is_better = True
def __init__(self, alpha=0.05, baseline=None):
super(UpperCVaROnDiffs, self).__init__(
target='diffs',
tail='upper',
alpha=alpha,
baseline=baseline,
lowpass_thresh=None,
eval_points=None)
@gin.configurable
class LowerCVaROnRaw(_CVaR):
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATR'
bigger_is_better = True
def __init__(self, alpha=0.05, baseline=None):
super(LowerCVaROnRaw, self).__init__(
target='raw',
tail='lower',
alpha=alpha,
baseline=baseline,
lowpass_thresh=None,
eval_points=None)
@gin.configurable
class UpperCVaROnRaw(_CVaR):
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATR'
bigger_is_better = True
def __init__(self, alpha=0.05, baseline=None):
super(UpperCVaROnRaw, self).__init__(
target='raw',
tail='upper',
alpha=alpha,
baseline=baseline,
lowpass_thresh=None,
eval_points=None)
@gin.configurable
class LowerCVaROnDrawdown(_CVaR):
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATR'
bigger_is_better = False
def __init__(self, alpha=0.05, baseline=None):
super(LowerCVaROnDrawdown, self).__init__(
target='drawdown',
tail='lower',
alpha=alpha,
baseline=baseline,
lowpass_thresh=None,
eval_points=None)
@gin.configurable
class UpperCVaROnDrawdown(_CVaR):
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATR'
bigger_is_better = False
def __init__(self, alpha=0.05, baseline=None):
super(UpperCVaROnDrawdown, self).__init__(
target='drawdown',
tail='upper',
alpha=alpha,
baseline=baseline,
lowpass_thresh=None,
eval_points=None)
@gin.configurable
class LowerCVaROnAcross(_CVaR):
"""Lower CVaR across training runs."""
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATP'
bigger_is_better = True
def __init__(self,
alpha=0.05,
baseline=None,
lowpass_thresh=None,
eval_points=None,
window_size=None):
super(LowerCVaROnAcross, self).__init__(
target='across',
tail='lower',
alpha=alpha,
baseline=baseline,
lowpass_thresh=lowpass_thresh,
eval_points=eval_points,
window_size=window_size)
@gin.configurable
class UpperCVaROnAcross(_CVaR):
"""Upper CVaR across training runs."""
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATP'
bigger_is_better = True
def __init__(self,
alpha=0.05,
baseline=None,
lowpass_thresh=None,
eval_points=None,
window_size=None):
super(UpperCVaROnAcross, self).__init__(
target='across',
tail='upper',
alpha=alpha,
baseline=baseline,
lowpass_thresh=lowpass_thresh,
eval_points=eval_points,
window_size=window_size)
@gin.configurable
class MedianPerfDuringTraining(_OnlineMetric):
"""Median performance, within windows at specified points in training."""
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATRP'
bigger_is_better = True
def __init__(self, window_size=None, eval_points=None, baseline=None):
"""Initializes parameters for computing median performance.
Args:
window_size: The number of timepoints in the window. Set to None to use
window_size = entire length of the run.
eval_points: A list or Numpy array of length [# timepoints]. Performance
will be computed at these timepoints. Set to None to use all valid
timepoints (given the window_size).
baseline: If this is a single float, we normalize using
normalized = perf / baseline. If this is a tuple of floats (low, high),
we normalize using normalized = (perf - low) / (high - low). If None or
if an iterable that contains None, we do not perform any normalization.
"""
self.window_size = window_size
self.eval_points = eval_points
self.baseline = baseline
def __call__(self, curves):
"""Computes median performance.
Args:
curves: A list of learning curves, each a 2D numpy array where curve[0, :]
is the timepoint variable and curve[1, :] is the dependent variable.
Returns:
Median performance, computed in a window at each eval_point for each run.
(Numpy array with size n_run x n_eval_points.)
"""
utils.assert_non_empty(curves)
# Determine eval_points and window_size, if needed.
eval_points = copy.deepcopy(self.eval_points)
window_size = copy.deepcopy(self.window_size)
if eval_points is None or window_size is None:
if window_size is None:
valid_eval_points = utils.get_all_valid_eval_points(curves, 1)
window_size = valid_eval_points.max() - valid_eval_points.min() + 1
if eval_points is None:
eval_points = utils.get_all_valid_eval_points(curves, window_size)
curves = self._normalize(curves)
perf = utils.apply_window_fn(curves, eval_points, np.median, window_size)
return perf
def _normalize(self, curves):
"""Normalize curves depending on setting of self.baseline."""
if self.baseline is None:
return curves
if isinstance(self.baseline, tuple):
if None in self.baseline:
return curves
if len(self.baseline) != 2:
raise ValueError('If baseline is a tuple it must be of the form '
'(low, high). Got %r' % self.baseline)
low, high = self.baseline
else:
low = 0
high = self.baseline
return utils.band_normalization(curves, low, high)
# Maintain a registry linking metric names to classes.
REGISTRY = {
metric.__name__: metric for metric in all_online_metrics()
}
|
dialogue-engine/src/programy/parser/template/nodes/triple.py | cotobadesign/cotoba-agent-oss | 104 | 12617578 | <filename>dialogue-engine/src/programy/parser/template/nodes/triple.py
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Copyright (c) 2016-2019 <NAME> http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.parser.template.nodes.base import TemplateNode
from programy.utils.text.text import TextUtils
class TemplateTripleNode(TemplateNode):
def __init__(self, node_name, subj=None, pred=None, obj=None):
TemplateNode.__init__(self)
self._node_name = node_name
self._subj = subj
self._pred = pred
self._obj = obj
@property
def node_name(self):
return self._node_name
def children_to_xml(self, client_context):
xml = ""
if self._subj is not None:
subj = self._subj.resolve(client_context)
xml += "<subj>" + subj + "</subj>"
if self._pred is not None:
pred = self._pred.resolve(client_context)
xml += "<pred>" + pred + "</pred>"
if self._obj is not None:
obj = self._obj.resolve(client_context)
xml += "<obj>" + obj + "</obj>"
return xml
def parse_expression(self, graph, expression):
if 'subj' in expression.attrib:
self._subj = graph.get_word_node(expression.attrib['subj'])
if self._subj == '':
self._subj = None
if 'pred' in expression.attrib:
self._pred = graph.get_word_node(expression.attrib['pred'])
if self._pred == '':
self._pred = None
if 'obj' in expression.attrib:
self._obj = graph.get_word_node(expression.attrib['obj'])
if self._obj == '':
self._obj = None
head_text = self.get_text_from_element(expression)
self.parse_text(graph, head_text)
for child in expression:
tag_name = TextUtils.tag_from_text(child.tag)
if tag_name == 'subj':
self._subj = self.parse_children_as_word_node(graph, child)
if len(self._subj.children) == 0:
self._subj = None
elif tag_name == 'pred':
self._pred = self.parse_children_as_word_node(graph, child)
if len(self._pred.children) == 0:
self._pred = None
elif tag_name == 'obj':
self._obj = self.parse_children_as_word_node(graph, child)
if len(self._obj.children) == 0:
self._obj = None
else:
graph.parse_tag_expression(child, self)
tail_text = self.get_tail_from_element(child)
self.parse_text(graph, tail_text)
|
qiskit_nature/mappers/second_quantization/linear_mapper.py | jschuhmac/qiskit-nature | 132 | 12617584 | <reponame>jschuhmac/qiskit-nature<gh_stars>100-1000
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Linear Mapper."""
import operator
from fractions import Fraction
from functools import reduce
from typing import List, Union
import numpy as np
from qiskit.opflow import PauliSumOp
from qiskit.quantum_info.operators import Pauli, SparsePauliOp
from qiskit_nature.operators.second_quantization import SpinOp
from .spin_mapper import SpinMapper
class LinearMapper(SpinMapper): # pylint: disable=missing-class-docstring
def __init__(self):
"""The Linear spin-to-qubit mapping."""
super().__init__(allows_two_qubit_reduction=False)
def map(self, second_q_op: SpinOp) -> PauliSumOp:
qubit_ops_list: List[PauliSumOp] = []
# get linear encoding of the general spin matrices
spinx, spiny, spinz, identity = self._linear_encoding(second_q_op.spin)
for idx, (_, coeff) in enumerate(second_q_op.to_list()):
operatorlist: List[PauliSumOp] = []
for n_x, n_y, n_z in zip(second_q_op.x[idx], second_q_op.y[idx], second_q_op.z[idx]):
operator_on_spin_i: List[PauliSumOp] = []
if n_x > 0:
operator_on_spin_i.append(reduce(operator.matmul, [spinx] * int(n_x)))
if n_y > 0:
operator_on_spin_i.append(reduce(operator.matmul, [spiny] * int(n_y)))
if n_z > 0:
operator_on_spin_i.append(reduce(operator.matmul, [spinz] * int(n_z)))
if np.any([n_x, n_y, n_z]) > 0:
single_operator_on_spin_i = reduce(operator.matmul, operator_on_spin_i)
operatorlist.append(single_operator_on_spin_i.reduce())
else:
# If n_x=n_y=n_z=0, simply add the embedded Identity operator.
operatorlist.append(identity)
# Now, we can tensor all operators in this list
# NOTE: in Qiskit's opflow the `XOR` (i.e. `^`) operator does the tensor product
qubit_ops_list.append(coeff * reduce(operator.xor, reversed(operatorlist)))
qubit_op = reduce(operator.add, qubit_ops_list)
return qubit_op
def _linear_encoding(self, spin: Union[Fraction, float]) -> List[PauliSumOp]:
"""
Generates a 'linear_encoding' of the spin S operators 'X', 'Y', 'Z' and 'identity'
to qubit operators (linear combinations of pauli strings).
In this 'linear_encoding' each individual spin S system is represented via
2S+1 qubits and the state |s> is mapped to the state |00...010..00>, where the s-th qubit is
in state 1.
Returns:
The 4-element list of transformed spin S 'X', 'Y', 'Z' and 'identity' operators.
I.e. spin_op_encoding[0]` corresponds to the linear combination of pauli strings needed
to represent the embedded 'X' operator
"""
spin_op_encoding: List[PauliSumOp] = []
dspin = int(2 * spin + 1)
nqubits = dspin
# quick functions to generate a pauli with X / Y / Z at location `i`
pauli_id = Pauli("I" * nqubits)
def pauli_x(i):
return Pauli("I" * i + "X" + "I" * (nqubits - i - 1))
def pauli_y(i):
return Pauli("I" * i + "Y" + "I" * (nqubits - i - 1))
def pauli_z(i):
return Pauli("I" * i + "Z" + "I" * (nqubits - i - 1))
# 1. build the non-diagonal X operator
x_summands = []
for i, coeff in enumerate(np.diag(SpinOp("X", spin=spin).to_matrix(), 1)):
x_summands.append(
PauliSumOp(
coeff / 2.0 * SparsePauliOp(pauli_x(i).dot(pauli_x(i + 1)))
+ coeff / 2.0 * SparsePauliOp(pauli_y(i).dot(pauli_y(i + 1)))
)
)
spin_op_encoding.append(reduce(operator.add, x_summands))
# 2. build the non-diagonal Y operator
y_summands = []
for i, coeff in enumerate(np.diag(SpinOp("Y", spin=spin).to_matrix(), 1)):
y_summands.append(
PauliSumOp(
-1j * coeff / 2.0 * SparsePauliOp(pauli_x(i).dot(pauli_y(i + 1)))
+ 1j * coeff / 2.0 * SparsePauliOp(pauli_y(i).dot(pauli_x(i + 1)))
)
)
spin_op_encoding.append(reduce(operator.add, y_summands))
# 3. build the diagonal Z
z_summands = []
for i, coeff in enumerate(np.diag(SpinOp("Z", spin=spin).to_matrix())):
# get the first upper diagonal of coeff.
z_summands.append(
PauliSumOp(
coeff / 2.0 * SparsePauliOp(pauli_z(i)) + coeff / 2.0 * SparsePauliOp(pauli_id)
)
)
z_operator = reduce(operator.add, z_summands)
spin_op_encoding.append(z_operator)
# 4. add the identity operator
spin_op_encoding.append(PauliSumOp(1.0 * SparsePauliOp(pauli_id)))
# return the lookup table for the transformed XYZI operators
return spin_op_encoding
|
apple/internal/transition_support.bzl | tnek/rules_apple | 313 | 12617615 | <reponame>tnek/rules_apple
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starlark transition support for Apple rules."""
load("@bazel_skylib//lib:dicts.bzl", "dicts")
def _cpu_string(*, cpu, platform_type, settings):
"""Generates a <platform>_<arch> string for the current target based on the given parameters.
Args:
cpu: A valid Apple cpu command line option as a string, or None to infer a value from
command line options passed through settings.
platform_type: The Apple platform for which the rule should build its targets (`"ios"`,
`"macos"`, `"tvos"`, or `"watchos"`).
settings: A dictionary whose set of keys is defined by the inputs parameter, typically from
the settings argument found on the implementation function of the current Starlark
transition.
Returns:
A <platform>_<arch> string defined for the current target.
"""
if platform_type == "ios":
if cpu:
return "ios_{}".format(cpu)
ios_cpus = settings["//command_line_option:ios_multi_cpus"]
if ios_cpus:
return "ios_{}".format(ios_cpus[0])
cpu_value = settings["//command_line_option:cpu"]
if cpu_value.startswith("ios_"):
return cpu_value
return "ios_x86_64"
if platform_type == "macos":
if cpu:
return "darwin_{}".format(cpu)
macos_cpus = settings["//command_line_option:macos_cpus"]
if macos_cpus:
return "darwin_{}".format(macos_cpus[0])
return "darwin_x86_64"
if platform_type == "tvos":
if cpu:
return "tvos_{}".format(cpu)
tvos_cpus = settings["//command_line_option:tvos_cpus"]
if tvos_cpus:
return "tvos_{}".format(tvos_cpus[0])
return "tvos_x86_64"
if platform_type == "watchos":
if cpu:
return "watchos_{}".format(cpu)
watchos_cpus = settings["//command_line_option:watchos_cpus"]
if watchos_cpus:
return "watchos_{}".format(watchos_cpus[0])
return "watchos_i386"
fail("ERROR: Unknown platform type: {}".format(platform_type))
def _min_os_version_or_none(*, minimum_os_version, platform, platform_type):
if platform_type == platform:
return minimum_os_version
return None
def _command_line_options(*, cpu = None, minimum_os_version, platform_type, settings):
"""Generates a dictionary of command line options suitable for the current target.
Args:
cpu: A valid Apple cpu command line option as a string, or None to infer a value from
command line options passed through settings.
minimum_os_version: A string representing the minimum OS version specified for this
platform, represented as a dotted version number (for example, `"9.0"`).
platform_type: The Apple platform for which the rule should build its targets (`"ios"`,
`"macos"`, `"tvos"`, or `"watchos"`).
settings: A dictionary whose set of keys is defined by the inputs parameter, typically from
the settings argument found on the implementation function of the current Starlark
transition.
Returns:
A dictionary of `"//command_line_option"`s defined for the current target.
"""
return {
"//command_line_option:apple configuration distinguisher": "applebin_" + platform_type,
"//command_line_option:apple_platform_type": platform_type,
"//command_line_option:apple_split_cpu": cpu if cpu else "",
"//command_line_option:compiler": settings["//command_line_option:apple_compiler"],
"//command_line_option:cpu": _cpu_string(
cpu = cpu,
platform_type = platform_type,
settings = settings,
),
"//command_line_option:crosstool_top": (
settings["//command_line_option:apple_crosstool_top"]
),
"//command_line_option:fission": [],
"//command_line_option:grte_top": settings["//command_line_option:apple_grte_top"],
"//command_line_option:ios_minimum_os": _min_os_version_or_none(
minimum_os_version = minimum_os_version,
platform = "ios",
platform_type = platform_type,
),
"//command_line_option:macos_minimum_os": _min_os_version_or_none(
minimum_os_version = minimum_os_version,
platform = "macos",
platform_type = platform_type,
),
"//command_line_option:tvos_minimum_os": _min_os_version_or_none(
minimum_os_version = minimum_os_version,
platform = "tvos",
platform_type = platform_type,
),
"//command_line_option:watchos_minimum_os": _min_os_version_or_none(
minimum_os_version = minimum_os_version,
platform = "watchos",
platform_type = platform_type,
),
}
def _command_line_options_for_platform(
*,
minimum_os_version,
platform_attr,
platform_type,
settings,
target_environments):
"""Generates a dictionary of command line options keyed by 1:2+ transition for this platform.
Args:
minimum_os_version: A string representing the minimum OS version specified for this
platform, represented as a dotted version number (for example, `"9.0"`).
platform_attr: The attribute for the apple platform specifying in dictionary form which
architectures to build for given a target environment as the key for this platform.
platform_type: The Apple platform for which the rule should build its targets (`"ios"`,
`"macos"`, `"tvos"`, or `"watchos"`).
settings: A dictionary whose set of keys is defined by the inputs parameter, typically from
the settings argument found on the implementation function of the current Starlark
transition.
target_environments: A list of strings representing target environments supported by the
platform. Possible strings include "device" and "simulator".
Returns:
A dictionary of keys for each <platform>_<arch>_<target_environment> found with a
corresponding dictionary of `"//command_line_option"`s as each key's value.
"""
output_dictionary = {}
for target_environment in target_environments:
if platform_attr.get(target_environment):
cpus = platform_attr[target_environment]
for cpu in cpus:
found_cpu = {
_cpu_string(
cpu = cpu,
platform_type = platform_type,
settings = settings,
) + "_" + target_environment: _command_line_options(
cpu = cpu,
minimum_os_version = minimum_os_version,
platform_type = platform_type,
settings = settings,
),
}
output_dictionary = dicts.add(found_cpu, output_dictionary)
return output_dictionary
def _apple_rule_base_transition_impl(settings, attr):
"""Rule transition for Apple rules."""
return _command_line_options(
minimum_os_version = attr.minimum_os_version,
platform_type = attr.platform_type,
settings = settings,
)
# These flags are a mix of options defined in native Bazel from the following fragments:
# - https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/analysis/config/CoreOptions.java
# - https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/rules/apple/AppleCommandLineOptions.java
# - https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/rules/cpp/CppOptions.java
_apple_rule_common_transition_inputs = [
"//command_line_option:apple_compiler",
"//command_line_option:apple_crosstool_top",
"//command_line_option:apple_grte_top",
]
_apple_rule_base_transition_inputs = _apple_rule_common_transition_inputs + [
"//command_line_option:cpu",
"//command_line_option:ios_multi_cpus",
"//command_line_option:macos_cpus",
"//command_line_option:tvos_cpus",
"//command_line_option:watchos_cpus",
]
_apple_rule_base_transition_outputs = [
"//command_line_option:apple configuration distinguisher",
"//command_line_option:apple_platform_type",
"//command_line_option:apple_split_cpu",
"//command_line_option:compiler",
"//command_line_option:cpu",
"//command_line_option:crosstool_top",
"//command_line_option:fission",
"//command_line_option:grte_top",
"//command_line_option:ios_minimum_os",
"//command_line_option:macos_minimum_os",
"//command_line_option:tvos_minimum_os",
"//command_line_option:watchos_minimum_os",
]
_apple_rule_base_transition = transition(
implementation = _apple_rule_base_transition_impl,
inputs = _apple_rule_base_transition_inputs,
outputs = _apple_rule_base_transition_outputs,
)
def _apple_rule_arm64_as_arm64e_transition_impl(settings, attr):
"""Rule transition for Apple rules that map arm64 to arm64e."""
key = "//command_line_option:macos_cpus"
# These additional settings are sent to both the base implementation and the final transition.
additional_settings = {key: [cpu if cpu != "arm64" else "arm64e" for cpu in settings[key]]}
return dicts.add(
_apple_rule_base_transition_impl(dicts.add(settings, additional_settings), attr),
additional_settings,
)
_apple_rule_arm64_as_arm64e_transition = transition(
implementation = _apple_rule_arm64_as_arm64e_transition_impl,
inputs = _apple_rule_base_transition_inputs,
outputs = _apple_rule_base_transition_outputs + ["//command_line_option:macos_cpus"],
)
def _static_framework_transition_impl(settings, attr):
"""Attribute transition for static frameworks to enable swiftinterface generation."""
return {
"@build_bazel_rules_swift//swift:emit_swiftinterface": True,
}
# This transition is used, for now, to enable swiftinterface generation on swift_library targets.
# Once apple_common.split_transition is migrated to Starlark, this transition should be merged into
# that one, being enabled by reading either a private attribute on the static framework rules, or
# some other mechanism, so that it is only enabled on static framework rules and not all Apple
# rules.
_static_framework_transition = transition(
implementation = _static_framework_transition_impl,
inputs = [],
outputs = [
"@build_bazel_rules_swift//swift:emit_swiftinterface",
],
)
def _xcframework_transition_impl(settings, attr):
"""Starlark 1:2+ transition for generation of multiple frameworks for the current target."""
output_dictionary = {}
if hasattr(attr, "macos"):
command_line_options_for_platform = _command_line_options_for_platform(
minimum_os_version = attr.minimum_os_versions.get("macos"),
platform_attr = attr.macos,
platform_type = "macos",
settings = settings,
target_environments = ["device"],
)
output_dictionary = dicts.add(command_line_options_for_platform, output_dictionary)
for platform_type in ["ios", "tvos", "watchos"]:
if hasattr(attr, platform_type):
command_line_options_for_platform = _command_line_options_for_platform(
minimum_os_version = attr.minimum_os_versions.get(platform_type),
platform_attr = getattr(attr, platform_type),
platform_type = platform_type,
settings = settings,
target_environments = ["device", "simulator"],
)
output_dictionary = dicts.add(command_line_options_for_platform, output_dictionary)
return output_dictionary
_xcframework_transition = transition(
implementation = _xcframework_transition_impl,
inputs = _apple_rule_common_transition_inputs,
outputs = _apple_rule_base_transition_outputs,
)
transition_support = struct(
apple_rule_transition = _apple_rule_base_transition,
apple_rule_arm64_as_arm64e_transition = _apple_rule_arm64_as_arm64e_transition,
static_framework_transition = _static_framework_transition,
xcframework_transition = _xcframework_transition,
)
|
lib/python2.7/site-packages/samba/netcmd/__init__.py | abankalarm/pth-toolkit | 480 | 12617618 | # Unix SMB/CIFS implementation.
# Copyright (C) <NAME> <<EMAIL>> 2009-2012
# Copyright (C) <NAME> <<EMAIL>> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import optparse, samba
from samba import getopt as options
from ldb import LdbError
import sys, traceback
import textwrap
class Option(optparse.Option):
pass
# This help formatter does text wrapping and preserves newlines
class PlainHelpFormatter(optparse.IndentedHelpFormatter):
def format_description(self,description=""):
desc_width = self.width - self.current_indent
indent = " "*self.current_indent
paragraphs = description.split('\n')
wrapped_paragraphs = [
textwrap.fill(p,
desc_width,
initial_indent=indent,
subsequent_indent=indent)
for p in paragraphs]
result = "\n".join(wrapped_paragraphs) + "\n"
return result
def format_epilog(self, epilog):
if epilog:
return "\n" + epilog + "\n"
else:
return ""
class Command(object):
"""A samba-tool command."""
def _get_short_description(self):
return self.__doc__.splitlines()[0].rstrip("\n")
short_description = property(_get_short_description)
def _get_full_description(self):
lines = self.__doc__.split("\n")
return lines[0] + "\n" + textwrap.dedent("\n".join(lines[1:]))
full_description = property(_get_full_description)
def _get_name(self):
name = self.__class__.__name__
if name.startswith("cmd_"):
return name[4:]
return name
name = property(_get_name)
# synopsis must be defined in all subclasses in order to provide the
# command usage
synopsis = None
takes_args = []
takes_options = []
takes_optiongroups = {}
hidden = False
raw_argv = None
raw_args = None
raw_kwargs = None
def __init__(self, outf=sys.stdout, errf=sys.stderr):
self.outf = outf
self.errf = errf
def usage(self, prog, *args):
parser, _ = self._create_parser(prog)
parser.print_usage()
def show_command_error(self, e):
'''display a command error'''
if isinstance(e, CommandError):
(etype, evalue, etraceback) = e.exception_info
inner_exception = e.inner_exception
message = e.message
force_traceback = False
else:
(etype, evalue, etraceback) = sys.exc_info()
inner_exception = e
message = "uncaught exception"
force_traceback = True
if isinstance(inner_exception, LdbError):
(ldb_ecode, ldb_emsg) = inner_exception
self.errf.write("ERROR(ldb): %s - %s\n" % (message, ldb_emsg))
elif isinstance(inner_exception, AssertionError):
self.errf.write("ERROR(assert): %s\n" % message)
force_traceback = True
elif isinstance(inner_exception, RuntimeError):
self.errf.write("ERROR(runtime): %s - %s\n" % (message, evalue))
elif type(inner_exception) is Exception:
self.errf.write("ERROR(exception): %s - %s\n" % (message, evalue))
force_traceback = True
elif inner_exception is None:
self.errf.write("ERROR: %s\n" % (message))
else:
self.errf.write("ERROR(%s): %s - %s\n" % (str(etype), message, evalue))
force_traceback = True
if force_traceback or samba.get_debug_level() >= 3:
traceback.print_tb(etraceback)
def _create_parser(self, prog, epilog=None):
parser = optparse.OptionParser(
usage=self.synopsis,
description=self.full_description,
formatter=PlainHelpFormatter(),
prog=prog,epilog=epilog)
parser.add_options(self.takes_options)
optiongroups = {}
for name, optiongroup in self.takes_optiongroups.iteritems():
optiongroups[name] = optiongroup(parser)
parser.add_option_group(optiongroups[name])
return parser, optiongroups
def message(self, text):
self.outf.write(text+"\n")
def _run(self, *argv):
parser, optiongroups = self._create_parser(argv[0])
opts, args = parser.parse_args(list(argv))
# Filter out options from option groups
args = args[1:]
kwargs = dict(opts.__dict__)
for option_group in parser.option_groups:
for option in option_group.option_list:
if option.dest is not None:
del kwargs[option.dest]
kwargs.update(optiongroups)
# Check for a min a max number of allowed arguments, whenever possible
# The suffix "?" means zero or one occurence
# The suffix "+" means at least one occurence
min_args = 0
max_args = 0
undetermined_max_args = False
for i, arg in enumerate(self.takes_args):
if arg[-1] != "?":
min_args += 1
if arg[-1] == "+":
undetermined_max_args = True
else:
max_args += 1
if (len(args) < min_args) or (not undetermined_max_args and len(args) > max_args):
parser.print_usage()
return -1
self.raw_argv = list(argv)
self.raw_args = args
self.raw_kwargs = kwargs
try:
return self.run(*args, **kwargs)
except Exception, e:
self.show_command_error(e)
return -1
def run(self):
"""Run the command. This should be overriden by all subclasses."""
raise NotImplementedError(self.run)
def get_logger(self, name="netcmd"):
"""Get a logger object."""
import logging
logger = logging.getLogger(name)
logger.addHandler(logging.StreamHandler(self.errf))
return logger
class SuperCommand(Command):
"""A samba-tool command with subcommands."""
synopsis = "%prog <subcommand>"
subcommands = {}
def _run(self, myname, subcommand=None, *args):
if subcommand in self.subcommands:
return self.subcommands[subcommand]._run(
"%s %s" % (myname, subcommand), *args)
epilog = "\nAvailable subcommands:\n"
subcmds = self.subcommands.keys()
subcmds.sort()
max_length = max([len(c) for c in subcmds])
for cmd_name in subcmds:
cmd = self.subcommands[cmd_name]
if not cmd.hidden:
epilog += " %*s - %s\n" % (
-max_length, cmd_name, cmd.short_description)
epilog += "For more help on a specific subcommand, please type: %s <subcommand> (-h|--help)\n" % myname
parser, optiongroups = self._create_parser(myname, epilog=epilog)
args_list = list(args)
if subcommand:
args_list.insert(0, subcommand)
opts, args = parser.parse_args(args_list)
parser.print_help()
return -1
class CommandError(Exception):
"""An exception class for samba-tool Command errors."""
def __init__(self, message, inner_exception=None):
self.message = message
self.inner_exception = inner_exception
self.exception_info = sys.exc_info()
|
SBR/lib/lk/basic_utils.py | yerang823/landmark-detection | 612 | 12617628 | <filename>SBR/lib/lk/basic_utils.py
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
import numbers, math
import numpy as np
import models.model_utils as MU
#### The utils for LK
def torch_inverse(deltp):
assert deltp.dim() == 2 and deltp.size(0) == 2 and deltp.size(1) == 2, 'The deltp format is not right : {}'.format( deltp.size() )
a, b, c, d = deltp[0,0], deltp[0,1], deltp[1,0], deltp[1,1]
a = a + np.finfo(float).eps
d = d + np.finfo(float).eps
divide = a*d-b*c
inverse = torch.cat([d, -b, -c, a]).view(2,2)
return inverse / divide
class SobelConv(nn.Module):
def __init__(self, tag, dtype):
super(SobelConv, self).__init__()
if tag == 'x':
Sobel = np.array([ [-1./8, 0, 1./8], [-2./8, 0, 2./8], [ -1./8, 0, 1./8] ])
#Sobel = np.array([ [ 0, 0, 0], [-0.5,0,0.5], [ 0, 0, 0] ])
elif tag == 'y':
Sobel = np.array([ [ -1./8, -2./8, -1./8], [ 0, 0, 0], [ 1./8, 2./8, 1./8] ])
#Sobel = np.array([ [ 0,-0.5, 0], [ 0, 0, 0], [ 0, 0.5, 0] ])
else:
raise NameError('Do not know this tag for Sobel Kernel : {}'.format(tag))
Sobel = torch.from_numpy(Sobel).type(dtype)
Sobel = Sobel.view(1, 1, 3, 3)
self.register_buffer('weight', Sobel)
self.tag = tag
def forward(self, input):
weight = self.weight.expand(input.size(1), 1, 3, 3).contiguous()
return F.conv2d(input, weight, groups=input.size(1), padding=1)
def __repr__(self):
return ('{name}(tag={tag})'.format(name=self.__class__.__name__, **self.__dict__))
def ComputeGradient(feature, tag):
if feature.dim() == 3:
feature = feature.unsqueeze(0)
squeeze = True
else:
squeeze = False
assert feature.dim() == 4, 'feature must be [batch x C x H x W] not {}'.format(feature.size())
sobel = SobelConv(tag)
if feature.is_cuda: sobel.cuda()
if squeeze: return sobel(feature).squeeze(0)
else: return sobel(feature)
def Generate_Weight(patch_size, sigma=None):
assert isinstance(patch_size, list) or isinstance(patch_size, tuple)
assert patch_size[0] > 0 and patch_size[1] > 0, 'the patch size must > 0 rather :{}'.format(patch_size)
center = [(patch_size[0]-1.)/2, (patch_size[1]-1.)/2]
maps = np.fromfunction( lambda x, y: (x-center[0])**2 + (y-center[1])**2, (patch_size[0], patch_size[1]), dtype=int)
if sigma is None: sigma = min(patch_size[0], patch_size[1])/2.
maps = np.exp(maps / -2.0 / sigma / sigma)
maps[0, :] = maps[-1, :] = maps[:, 0] = maps[:, -1] = 0
return maps.astype(np.float32)
def warp_feature(feature, pts_location, patch_size):
# pts_location is [X,Y], patch_size is [H,W]
C, H, W = feature.size(0), feature.size(1), feature.size(2)
def normalize(x, L):
return -1. + 2. * x / (L-1)
crop_box = [pts_location[0]-patch_size[1], pts_location[1]-patch_size[0], pts_location[0]+patch_size[1], pts_location[1]+patch_size[0]]
crop_box[0] = normalize(crop_box[0], W)
crop_box[1] = normalize(crop_box[1], H)
crop_box[2] = normalize(crop_box[2], W)
crop_box[3] = normalize(crop_box[3], H)
affine_parameter = [(crop_box[2]-crop_box[0])/2, MU.np2variable(torch.zeros(1),feature.is_cuda,False), (crop_box[0]+crop_box[2])/2,
MU.np2variable(torch.zeros(1),feature.is_cuda,False), (crop_box[3]-crop_box[1])/2, (crop_box[1]+crop_box[3])/2]
affine_parameter = torch.cat(affine_parameter).view(2, 3)
theta = affine_parameter.unsqueeze(0)
feature = feature.unsqueeze(0)
grid_size = torch.Size([1, 1, 2*patch_size[0]+1, 2*patch_size[1]+1])
grid = F.affine_grid(theta, grid_size)
sub_feature = F.grid_sample(feature, grid).squeeze(0)
return sub_feature
|
test/YACC/YACCFLAGS-fixture/myyacc.py | Valkatraz/scons | 1,403 | 12617640 | <filename>test/YACC/YACCFLAGS-fixture/myyacc.py<gh_stars>1000+
import getopt
import sys
cmd_opts, args = getopt.getopt(sys.argv[1:], 'o:I:x', [])
opt_string = ''
i_arguments = ''
for opt, arg in cmd_opts:
if opt == '-o': out = arg
elif opt == '-I': i_arguments = i_arguments + ' ' + arg
else: opt_string = opt_string + ' ' + opt
with open(out, 'wb') as ofp:
for a in args:
with open(a, 'rb') as ifp:
contents = ifp.read()
contents = contents.replace(b'YACCFLAGS', opt_string.encode())
contents = contents.replace(b'I_ARGS', i_arguments.encode())
ofp.write(contents)
sys.exit(0)
|
src/SALib/test_functions/linear_model_1.py | zjzh/SALib | 573 | 12617675 | <reponame>zjzh/SALib
import numpy as np
def evaluate(values):
"""Linear model (#1) used in Li et al., (2010).
y = x1 + x2 + x3 + x4 + x5
Parameters
----------
values : np.array
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>, "Global Sensitivity Analysis for
Systems with Independent and/or Correlated Inputs", Journal of
Physical Chemistry A, Vol. 114 (19), pp. 6022 - 6032, 2010,
https://doi.org/10.1021/jp9096919
"""
Y = np.zeros([values.shape[0]])
Y = np.sum(values,axis=1)
return Y
|
scout/utils/algorithms.py | mhkc/scout | 111 | 12617709 | import logging
LOG = logging.getLogger(__name__)
def ui_score(set_1, set_2):
"""Get the ui score of two sets
Given two bags of HPO terms, p and q, the UI score is defined as:
- let I(t) for a set of terms t, be the set of terms in t and all the ancestors of the terms
in t
- UI(p, q) = Size{Intersection{I(p), I(q)}} / Size{Union{I(p), I(q)}}
The higher UI score, the more similar they are
Args:
set_1, set_2 (set(str))
Returns:
ui_score (float)
"""
LOG.debug("Set 1: %s", ", ".join(set_1))
LOG.debug("Set 2: %s", ", ".join(set_2))
if not (set_1 and set_2):
return 0
ui_score = len(set_1.intersection(set_2)) / len(set_1.union(set_2))
LOG.debug("Found ui score: %s", ui_score)
return ui_score
|
Lib/idlelib/idle_test/test_parenmatch.py | shawwn/cpython | 52,316 | 12617763 | <filename>Lib/idlelib/idle_test/test_parenmatch.py
"""Test parenmatch, coverage 91%.
This must currently be a gui test because ParenMatch methods use
several text methods not defined on idlelib.idle_test.mock_tk.Text.
"""
from idlelib.parenmatch import ParenMatch
from test.support import requires
requires('gui')
import unittest
from unittest.mock import Mock
from tkinter import Tk, Text
class DummyEditwin:
def __init__(self, text):
self.text = text
self.indentwidth = 8
self.tabwidth = 8
self.prompt_last_line = '>>>' # Currently not used by parenmatch.
class ParenMatchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.root = Tk()
cls.root.withdraw()
cls.text = Text(cls.root)
cls.editwin = DummyEditwin(cls.text)
cls.editwin.text_frame = Mock()
@classmethod
def tearDownClass(cls):
del cls.text, cls.editwin
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
def tearDown(self):
self.text.delete('1.0', 'end')
def get_parenmatch(self):
pm = ParenMatch(self.editwin)
pm.bell = lambda: None
return pm
def test_paren_styles(self):
"""
Test ParenMatch with each style.
"""
text = self.text
pm = self.get_parenmatch()
for style, range1, range2 in (
('opener', ('1.10', '1.11'), ('1.10', '1.11')),
('default',('1.10', '1.11'),('1.10', '1.11')),
('parens', ('1.14', '1.15'), ('1.15', '1.16')),
('expression', ('1.10', '1.15'), ('1.10', '1.16'))):
with self.subTest(style=style):
text.delete('1.0', 'end')
pm.STYLE = style
text.insert('insert', 'def foobar(a, b')
pm.flash_paren_event('event')
self.assertIn('<<parenmatch-check-restore>>', text.event_info())
if style == 'parens':
self.assertTupleEqual(text.tag_nextrange('paren', '1.0'),
('1.10', '1.11'))
self.assertTupleEqual(
text.tag_prevrange('paren', 'end'), range1)
text.insert('insert', ')')
pm.restore_event()
self.assertNotIn('<<parenmatch-check-restore>>',
text.event_info())
self.assertEqual(text.tag_prevrange('paren', 'end'), ())
pm.paren_closed_event('event')
self.assertTupleEqual(
text.tag_prevrange('paren', 'end'), range2)
def test_paren_corner(self):
"""
Test corner cases in flash_paren_event and paren_closed_event.
These cases force conditional expression and alternate paths.
"""
text = self.text
pm = self.get_parenmatch()
text.insert('insert', '# this is a commen)')
pm.paren_closed_event('event')
text.insert('insert', '\ndef')
pm.flash_paren_event('event')
pm.paren_closed_event('event')
text.insert('insert', ' a, *arg)')
pm.paren_closed_event('event')
def test_handle_restore_timer(self):
pm = self.get_parenmatch()
pm.restore_event = Mock()
pm.handle_restore_timer(0)
self.assertTrue(pm.restore_event.called)
pm.restore_event.reset_mock()
pm.handle_restore_timer(1)
self.assertFalse(pm.restore_event.called)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
tests/models/DIFM_test.py | dzzxjl/DeepCTR | 6,192 | 12617769 | <filename>tests/models/DIFM_test.py
import pytest
from deepctr.models import DIFM
from ..utils import check_model, get_test_data, SAMPLE_SIZE
@pytest.mark.parametrize(
'att_head_num,dnn_hidden_units,sparse_feature_num',
[(1, (4,), 2), (2, (4, 4,), 2), (1, (4,), 1)]
)
def test_DIFM(att_head_num, dnn_hidden_units, sparse_feature_num):
model_name = "DIFM"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num,
dense_feature_num=sparse_feature_num)
model = DIFM(feature_columns, feature_columns, dnn_hidden_units=dnn_hidden_units, dnn_dropout=0.5)
check_model(model, model_name, x, y)
if __name__ == "__main__":
pass
|
threat_hunting/CB-Command_R/config.py | knightsc/tau-tools | 202 | 12617783 | #!/usr/bin/env python
active = {
'url': 'https://<SUBDOMAIN>.carbonblack.io/api/v1/process',
'key': '<API KEY>'
}
# ======================================================================
# Place API key and URL in 'active' to use with the cmdline-search.py
# ======================================================================
env1 = {
'url': 'https://<SUBDOMAIN>.carbonblack.io/api/v1/process',
'key': '<API KEY>'
}
env2 = {
'url': 'https://<SUBDOMAIN>.carbonblack.io/api/v1/process',
'key': '<API KEY>'
}
etc = {
'url': 'https://<SUBDOMAIN>.carbonblack.io/api/v1/process',
'key': '<API KEY>'
} |
third_party/blink/tools/blinkpy/tool/commands/command.py | zipated/src | 2,151 | 12617791 | <filename>third_party/blink/tools/blinkpy/tool/commands/command.py
# Copyright (c) 2016 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
import logging
import sys
from blinkpy.tool.grammar import pluralize
_log = logging.getLogger(__name__)
class Command(object):
# These class variables can be overridden in subclasses to set specific command behavior.
name = None
show_in_main_help = False
help_text = None
argument_names = None
long_help = None
def __init__(self, options=None, requires_local_commits=False):
self.required_arguments = self._parse_required_arguments(self.argument_names)
self.options = options
self.requires_local_commits = requires_local_commits
# option_parser can be overridden by the tool using set_option_parser
# This default parser will be used for standalone_help printing.
self.option_parser = HelpPrintingOptionParser(
usage=optparse.SUPPRESS_USAGE,
add_help_option=False,
option_list=self.options)
def _exit(self, code):
sys.exit(code)
# This design is slightly awkward, but we need the
# the tool to be able to create and modify the option_parser
# before it knows what Command to run.
def set_option_parser(self, option_parser):
self.option_parser = option_parser
self._add_options_to_parser()
def _add_options_to_parser(self):
options = self.options or []
for option in options:
self.option_parser.add_option(option)
@staticmethod
def _parse_required_arguments(argument_names):
required_args = []
if not argument_names:
return required_args
split_args = argument_names.split(' ')
for argument in split_args:
if argument[0] == '[':
# For now our parser is rather dumb. Do some minimal validation that
# we haven't confused it.
if argument[-1] != ']':
raise Exception('Failure to parse argument string %s. Argument %s is missing ending ]' %
(argument_names, argument))
else:
required_args.append(argument)
return required_args
def name_with_arguments(self):
usage_string = self.name
if self.options:
usage_string += ' [options]'
if self.argument_names:
usage_string += ' ' + self.argument_names
return usage_string
def parse_args(self, args):
return self.option_parser.parse_args(args)
def check_arguments_and_execute(self, options, args, tool=None):
if len(args) < len(self.required_arguments):
_log.error("%s required, %s provided. Provided: %s Required: %s\nSee '%s help %s' for usage.",
pluralize('argument', len(self.required_arguments)),
pluralize('argument', len(args)),
"'%s'" % ' '.join(args),
' '.join(self.required_arguments),
tool.name(),
self.name)
return 1
return self.execute(options, args, tool) or 0
def standalone_help(self):
help_text = self.name_with_arguments().ljust(len(self.name_with_arguments()) + 3) + self.help_text + '\n\n'
if self.long_help:
help_text += '%s\n\n' % self.long_help
help_text += self.option_parser.format_option_help(optparse.IndentedHelpFormatter())
return help_text
def execute(self, options, args, tool):
raise NotImplementedError('subclasses must implement')
# main() exists so that Commands can be turned into stand-alone scripts.
# Other parts of the code will likely require modification to work stand-alone.
def main(self, args=None):
options, args = self.parse_args(args)
# Some commands might require a dummy tool
return self.check_arguments_and_execute(options, args)
class HelpPrintingOptionParser(optparse.OptionParser):
def __init__(self, epilog_method=None, *args, **kwargs):
self.epilog_method = epilog_method
optparse.OptionParser.__init__(self, *args, **kwargs)
def error(self, msg):
self.print_usage(sys.stderr)
error_message = '%s: error: %s\n' % (self.get_prog_name(), msg)
# This method is overridden to add this one line to the output:
error_message += '\nType \'%s --help\' to see usage.\n' % self.get_prog_name()
self.exit(1, error_message)
# We override format_epilog to avoid the default formatting which would paragraph-wrap the epilog
# and also to allow us to compute the epilog lazily instead of in the constructor (allowing it to be context sensitive).
def format_epilog(self, epilog): # pylint: disable=unused-argument
if self.epilog_method:
return '\n%s\n' % self.epilog_method()
return ''
|
PythonCode/demo/app_kd_range.py | konny0311/algorithms-nutshell-2ed | 522 | 12617792 | <gh_stars>100-1000
"""
Demonstration application for range search using kd tree.
Left mouse adds point.
Right mouse click begins drag of rectangle.
"""
import tkinter
from adk.kd import KDTree, X, Y, VERTICAL
from adk.region import Region, minValue, maxValue
RectangleSize = 4
class KDTreeApp:
def __init__(self):
"""App for creating KD tree dynamically and executing range queries."""
self.tree = KDTree()
self.static = False
# for range query
self.selectedRegion = None
self.queryRect = None
self.master = tkinter.Tk()
self.master.title('KD Tree Range Query Application')
self.w = tkinter.Frame(self.master, width=410, height=410)
self.canvas = tkinter.Canvas(self.w, width=400, height=400)
self.paint()
self.canvas.bind("<Button-1>", self.click)
self.canvas.bind("<Motion>", self.moved)
self.canvas.bind("<Button-3>", self.range) # when right mouse clicked
self.canvas.bind("<ButtonRelease-3>", self.clear)
self.canvas.bind("<B3-Motion>", self.range) # only when right mouse dragged
self.w.pack()
def toCartesian(self, y):
"""Convert tkinter point into Cartesian."""
return self.w.winfo_height() - y
def toTk(self,y):
"""Convert Cartesian into tkinter point."""
if y == maxValue: return 0
tk_y = self.w.winfo_height()
if y != minValue:
tk_y -= y
return tk_y
def clear(self, event):
"""End of range search."""
self.selectedRegion = None
self.paint()
def range(self, event):
"""Initiate a range search using a selected rectangular region."""
p = (event.x, self.toCartesian(event.y))
if self.selectedRegion is None:
self.selectedStart = Region(p[X],p[Y], p[X],p[Y])
self.selectedRegion = self.selectedStart.unionPoint(p)
self.paint()
# return (node,status) where status is True if draining entire tree rooted at node. Draw these
# as shaded red rectangle to identify whole sub-tree is selected.
for pair in self.tree.range(self.selectedRegion):
p = pair[0].point
if pair[1]:
self.canvas.create_rectangle(pair[0].region.x_min, self.toTk(pair[0].region.y_min),
pair[0].region.x_max, self.toTk(pair[0].region.y_max),
fill='Red', stipple='gray12')
else:
self.canvas.create_rectangle(p[X] - RectangleSize, self.toTk(p[Y]) - RectangleSize,
p[X] + RectangleSize, self.toTk(p[Y]) + RectangleSize, fill='Red')
self.queryRect = self.canvas.create_rectangle(self.selectedRegion.x_min, self.toTk(self.selectedRegion.y_min),
self.selectedRegion.x_max, self.toTk(self.selectedRegion.y_max),
outline='Red', dash=(2, 4))
def moved(self, event):
"""Only here for static option."""
if self.static:
self.paint()
def click(self, event):
"""Add point to KDtree."""
p = (event.x, self.toCartesian(event.y))
self.tree.add(p)
self.paint()
def drawPartition (self, r, p, orient):
"""Draw partitioning line and points itself as a small square."""
if orient == VERTICAL:
self.canvas.create_line(p[X], self.toTk(r.y_min), p[X], self.toTk(r.y_max))
else:
xlow = r.x_min
if r.x_min <= minValue: xlow = 0
xhigh = r.x_max
if r.x_max >= maxValue: xhigh = self.w.winfo_width()
self.canvas.create_line(xlow, self.toTk(p[Y]), xhigh, self.toTk(p[Y]))
self.canvas.create_rectangle(p[X] - RectangleSize, self.toTk(p[Y]) - RectangleSize,
p[X] + RectangleSize, self.toTk(p[Y]) + RectangleSize, fill='Black')
def visit (self, n):
""" Visit node to paint properly."""
if n == None: return
self.drawPartition(n.region, n.point, n.orient)
self.visit (n.below)
self.visit (n.above)
def prepare(self, event):
"""prepare to add points."""
if self.label:
self.label.destroy()
self.label = None
self.canvas.pack()
def paint(self):
"""Paint quad tree by visiting all nodes, or show introductory message."""
if self.tree.root:
self.canvas.delete(tkinter.ALL)
self.visit(self.tree.root)
else:
self.label = tkinter.Label(self.w, width=100, height = 40, text="Click To Add Points")
self.label.bind("<Button-1>", self.prepare)
self.label.pack()
if __name__ == "__main__":
app = KDTreeApp()
app.w.mainloop()
|
fuzzers/027-bram36-config/top.py | marzoul/prjxray | 583 | 12617803 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import os
import random
import json
random.seed(int(os.getenv("SEED"), 16))
from prjxray.db import Database
from prjxray import util
from prjxray import verilog
def gen_bram36(grid):
for tile_name in grid.tiles():
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
found = False
for site_name, site_type in gridinfo.sites.items():
if site_type == 'RAMBFIFO36E1':
found = True
break
if found:
bram36_site_name = site_name
for site_name, site_type in gridinfo.sites.items():
if site_type == 'RAMB18E1':
bram18_site_name = site_name
if site_type == 'FIFO18E1':
fifo18_site_name = site_name
yield tile_name, bram36_site_name, bram18_site_name, fifo18_site_name
RAM_EXTENSION_OPTS = [
"NONE",
"LOWER",
"UPPER",
]
BRAM36_WIDTHS = [1, 2]
BRAM36_TO_18_WIDTHS = {1: 1, 2: 1}
def main():
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
print('''
module top();
''')
params = []
for tile_name, bram36_site_name, bram18_site_name, fifo18_site_name in gen_bram36(
grid):
bram36_ra_width = random.choice(BRAM36_WIDTHS)
bram36_wa_width = random.choice(BRAM36_WIDTHS)
bram36_rb_width = random.choice(BRAM36_WIDTHS)
bram36_wb_width = random.choice(BRAM36_WIDTHS)
bram18_ra_width = BRAM36_TO_18_WIDTHS[bram36_ra_width]
bram18_wa_width = BRAM36_TO_18_WIDTHS[bram36_wa_width]
bram18_rb_width = BRAM36_TO_18_WIDTHS[bram36_rb_width]
bram18_wb_width = BRAM36_TO_18_WIDTHS[bram36_wb_width]
if random.random() < .8:
if bram36_ra_width == 1 and bram36_wa_width == 1:
ram_extension_a = random.choice(RAM_EXTENSION_OPTS)
else:
ram_extension_a = 'NONE'
if bram36_rb_width == 1 and bram36_wb_width == 1:
ram_extension_b = random.choice(RAM_EXTENSION_OPTS)
else:
ram_extension_b = 'NONE'
en_ecc_read = random.randint(0, 1)
en_ecc_write = random.randint(0, 1)
print(
'''
(* KEEP, DONT_TOUCH, LOC = "{site}" *)
RAMB36E1 #(
.READ_WIDTH_A({bram36_ra_width}),
.WRITE_WIDTH_A({bram36_wa_width}),
.READ_WIDTH_B({bram36_rb_width}),
.WRITE_WIDTH_B({bram36_wb_width}),
.RAM_EXTENSION_A({ram_extension_a}),
.RAM_EXTENSION_B({ram_extension_b}),
.EN_ECC_READ({en_ecc_read}),
.EN_ECC_WRITE({en_ecc_write})
) bram_{site} (
.CLKARDCLK(),
.CLKBWRCLK(),
.ENARDEN(),
.ENBWREN(),
.REGCEAREGCE(),
.REGCEB(),
.RSTRAMARSTRAM(),
.RSTRAMB(),
.RSTREGARSTREG(),
.RSTREGB(),
.ADDRARDADDR(),
.ADDRBWRADDR(),
.DIADI(),
.DIBDI(),
.DIPADIP(),
.DIPBDIP(),
.WEA(),
.WEBWE(),
.DOADO(),
.DOBDO(),
.DOPADOP(),
.DOPBDOP());
'''.format(
site=bram36_site_name,
ram_extension_a=verilog.quote(ram_extension_a),
ram_extension_b=verilog.quote(ram_extension_b),
en_ecc_read=en_ecc_read,
en_ecc_write=en_ecc_write,
bram36_ra_width=bram36_ra_width,
bram36_wa_width=bram36_wa_width,
bram36_rb_width=bram36_rb_width,
bram36_wb_width=bram36_wb_width,
))
params.append(
{
'tile': tile_name,
'BRAM36_IN_USE': True,
'site': bram36_site_name,
'RAM_EXTENSION_A': ram_extension_a,
'RAM_EXTENSION_B': ram_extension_b,
'EN_ECC_READ': en_ecc_read,
'EN_ECC_WRITE': en_ecc_write,
'bram36_ra_width': bram36_ra_width,
'bram36_wa_width': bram36_wa_width,
'bram36_rb_width': bram36_rb_width,
'bram36_wb_width': bram36_wb_width,
})
else:
print(
'''
(* KEEP, DONT_TOUCH, LOC = "{bram18}" *)
RAMB18E1 #(
.READ_WIDTH_A({bram18_ra_width}),
.WRITE_WIDTH_A({bram18_wa_width}),
.READ_WIDTH_B({bram18_rb_width}),
.WRITE_WIDTH_B({bram18_wb_width})
) bram_{bram18} (
.CLKARDCLK(),
.CLKBWRCLK(),
.ENARDEN(),
.ENBWREN(),
.REGCEAREGCE(),
.REGCEB(),
.RSTRAMARSTRAM(),
.RSTRAMB(),
.RSTREGARSTREG(),
.RSTREGB(),
.ADDRARDADDR(),
.ADDRBWRADDR(),
.DIADI(),
.DIBDI(),
.DIPADIP(),
.DIPBDIP(),
.WEA(),
.WEBWE(),
.DOADO(),
.DOBDO(),
.DOPADOP(),
.DOPBDOP());
(* KEEP, DONT_TOUCH, LOC = "{fifo18}" *)
RAMB18E1 #(
.READ_WIDTH_A({bram18_ra_width}),
.WRITE_WIDTH_A({bram18_wa_width}),
.READ_WIDTH_B({bram18_rb_width}),
.WRITE_WIDTH_B({bram18_wb_width})
) bram_{fifo18} (
.CLKARDCLK(),
.CLKBWRCLK(),
.ENARDEN(),
.ENBWREN(),
.REGCEAREGCE(),
.REGCEB(),
.RSTRAMARSTRAM(),
.RSTRAMB(),
.RSTREGARSTREG(),
.RSTREGB(),
.ADDRARDADDR(),
.ADDRBWRADDR(),
.DIADI(),
.DIBDI(),
.DIPADIP(),
.DIPBDIP(),
.WEA(),
.WEBWE(),
.DOADO(),
.DOBDO(),
.DOPADOP(),
.DOPBDOP());
'''.format(
bram18=bram18_site_name,
fifo18=fifo18_site_name,
bram18_ra_width=bram18_ra_width,
bram18_wa_width=bram18_wa_width,
bram18_rb_width=bram18_rb_width,
bram18_wb_width=bram18_wb_width,
))
params.append(
{
'tile': tile_name,
'BRAM36_IN_USE': False,
'site': bram36_site_name,
'bram36_ra_width': bram36_ra_width,
'bram36_wa_width': bram36_wa_width,
'bram36_rb_width': bram36_rb_width,
'bram36_wb_width': bram36_wb_width,
})
print("endmodule")
with open('params.json', 'w') as f:
json.dump(params, f, indent=2)
if __name__ == '__main__':
main()
|
Python3/1039.py | rakhi2001/ecom7 | 854 | 12617809 | __________________________________________________________________________________________________
sample 92 ms submission
class Solution:
def minScoreTriangulation(self, A: List[int]) -> int:
if len(A) < 3:
return 0
elif len(A) == 3:
return A[0]*A[1]*A[2]
else:
dp = [[0]*len(A) for i in range(len(A))]
for d in range(2, len(A)):
for i in range(len(A)-d):
j = i+d
dp[i][j] = min(dp[i][k] + dp[k][j] + A[i]*A[j]*A[k] for k in range(i+1,j))
return dp[0][len(A)-1]
__________________________________________________________________________________________________
sample 120 ms submission
import sys
class Solution:
cache = {}
def minScoreTriangulation(self, A) -> int:
if len(A) < 3:
return 0
if len(A) == 3:
return A[0] * A[1] * A[2]
dp = [[0]*len(A) for _ in range(len(A))]
for i in range(2, len(A)):
for j in range(0, len(A)-i):
k = j + i
dp[j][k] = sys.maxsize
for m in range(j+1, k):
dp[j][k] = min(dp[j][k], dp[j][m]+dp[m][k]+A[j]*A[m]*A[k])
# print(dp)
return dp[0][-1]
# s = Solution()
# res = s.minScoreTriangulation([3, 7, 4, 5])
# print(res)
__________________________________________________________________________________________________
|
utils/meshRelax.py | fsanges/glTools | 165 | 12617817 | <filename>utils/meshRelax.py
import maya.cmds as mc
import maya.OpenMaya as OpenMaya
import glTools.utils.component
import glTools.utils.curve
import glTools.utils.mathUtils
import glTools.utils.surface
class UserInputError(Exception): pass
def neighbour(vertexList,referenceObject,meshRelax):
'''
'''
# Get meshRelax object and target plug
sel = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getSelectionListByName(meshRelax,sel)
meshRelaxObj = OpenMaya.MObject()
sel.getDependNode(0,meshRelaxObj)
meshRelaxNode = OpenMaya.MFnDependencyNode(meshRelaxObj)
neighbourDataPlug = meshRelaxNode.findPlug('neighbourData')
neighbourDataArrayPlug = neighbourDataPlug.elementByLogicalIndex(0)
# Check reference object
isCurve = True
if not glTools.utils.curve.isCurve(referenceObject):
isCurve = False
elif not glTools.utils.curve.isSurface(referenceObject):
raise UserInputError('Reference object must be a valid nurbs curve or surface!!')
# Create neighbourData object
neighbourData = OpenMaya.MVectorArray()
# Get mesh and vertex list
mesh = glTools.utils.component.getComponentIndexList(vertexList).keys()[0]
# Get vertexIterator for mesh
sel = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getSelectionListByName(mesh,sel)
meshObj = OpenMaya.MObject()
sel.getDependNode(0,meshObj)
meshIt = OpenMaya.MItMeshVertex(meshObj)
# Get neighbour data
for i in range(len(vertexList)):
# Get current point
pnt = mc.pointPosition(vertexList[i])
pntId = glTools.utils.component.getComponentIndexList([vertexList[i]])[mesh][0]
# Get closest U tangent
if isCurve:
u = glTools.utils.curve.closestPoint(referenceObject,pnt)
tan = mc.pointOnCurve(referenceObject,pr=u,nt=True)
else:
uv = glTools.utils.surface.closestPoint(referenceObject,pnt)
tan = mc.pointOnSurface(referenceObject,u=uv[0],v=uv[1],ntu=True)
tangent = OpenMaya.MVector(tan[0],tan[1],tan[2])
# Get neighbouring points
n1 = mc.pickWalk(vertexList[i],d='up')[0]
n1Id = glTools.utils.component.getComponentIndexList([n1])[mesh][0]
n1Pt = mc.pointPosition(n1)
n1Dist = glTools.utils.mathUtils.distanceBetween(pnt,n1Pt)
n2 = mc.pickWalk(vertexList[i],d='down')[0]
n2Id = glTools.utils.component.getComponentIndexList([n2])[mesh][0]
n2Pt = mc.pointPosition(n2)
n2Dist = glTools.utils.mathUtils.distanceBetween(pnt,n2Pt)
# Build neighbour data vector
tDist = n1Dist + n2Dist
neighbourData.append(OpenMaya.MVector(float(pntId),n1Id+(n1Dist/tDist),n2Id+(n2Dist/tDist)))
# Set value
neighbourDataArrayPlug.setMObject(OpenMaya.MFnVectorArrayData().create(neighbourData))
|
userena/middleware.py | mortenwh/django-userena | 501 | 12617819 | <reponame>mortenwh/django-userena
from django.utils import translation
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from userena import settings as userena_settings
from userena.compat import SiteProfileNotAvailable
from userena.utils import get_user_profile
class UserenaLocaleMiddleware(object):
"""
Set the language by looking at the language setting in the profile.
It doesn't override the cookie that is set by Django so a user can still
switch languages depending if the cookie is set.
"""
def process_request(self, request):
lang_cookie = request.session.get(settings.LANGUAGE_COOKIE_NAME)
if not lang_cookie:
if request.user.is_authenticated():
try:
profile = get_user_profile(user=request.user)
except (ObjectDoesNotExist, SiteProfileNotAvailable):
profile = False
if profile:
try:
lang = getattr(profile, userena_settings.USERENA_LANGUAGE_FIELD)
translation.activate(lang)
request.LANGUAGE_CODE = translation.get_language()
except AttributeError: pass
|
cleverhans/torch/attacks/noise.py | xu-weizhen/cleverhans | 4,333 | 12617834 | <gh_stars>1000+
"""
The Noise Attack
"""
import numpy as np
import torch
def noise(x, eps=0.3, order=np.inf, clip_min=None, clip_max=None):
"""
A weak attack that just picks a random point in the attacker's action
space. When combined with an attack bundling function, this can be used to
implement random search.
References:
https://arxiv.org/abs/1802.00420 recommends random search to help identify
gradient masking
https://openreview.net/forum?id=H1g0piA9tQ recommends using noise as part
of an attack building recipe combining many different optimizers to
yield a strong optimizer.
Args:
:param x: the input tensor
:param eps: (optional float) maximum distortion of adversarial example
compared to original input.
:param norm: (optional) Order of the norm.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
if order != np.inf:
raise NotImplementedError(norm)
eta = torch.FloatTensor(*x.shape).to(x.device).uniform_(-eps, eps)
adv_x = x + eta
if clip_min is not None or clip_max is not None:
assert clip_min is not None and clip_max is not None
adv_x = torch.clamp(adv_x, min=clip_min, max=clip_max)
return adv_x
|
Chapter09/chapter_09_example_01.py | pesader/hands-on-music-generation-with-magenta | 123 | 12617844 | <filename>Chapter09/chapter_09_example_01.py
"""
Utility functions for finding and creating MIDI ports.
VERSION: Magenta 1.1.7
"""
import mido
from magenta.interfaces.midi.midi_hub import MidiHub
def find_midi_ports():
print(f"Input ports: {mido.get_input_names()}")
print(f"Output ports: {mido.get_output_names()}")
def create_virtual_midi_ports():
MidiHub(input_midi_ports=["magenta_in"],
output_midi_ports=["magenta_out"],
texture_type=None)
if __name__ == "__main__":
find_midi_ports()
# create_virtual_midi_ports()
|
rhn_train.py | vk496e1/RecurrentHighwayNetworks | 427 | 12617870 | """Word/Symbol level next step prediction using Recurrent Highway Networks.
To run:
$ python rhn_train.py
"""
from __future__ import absolute_import, division, print_function
from copy import deepcopy
import time
import os
import numpy as np
import tensorflow as tf
from sacred import Experiment
from rhn import Model
from data.reader import data_iterator
ex = Experiment('rhn_prediction')
logging = tf.logging
class Config:
pass
C = Config()
@ex.config
def hyperparameters():
data_path = 'data'
dataset = 'ptb'
init_scale = 0.04
init_bias = -2.0
num_layers = 1
depth = 4 # the recurrence depth
learning_rate = 0.2
lr_decay = 1.02
weight_decay = 1e-7
max_grad_norm = 10
num_steps = 35
hidden_size = 1000
max_epoch = 20
max_max_epoch = 500
batch_size = 20
drop_x = 0.25
drop_i = 0.75
drop_h = 0.25
drop_o = 0.75
tied = True
load_model = ''
mc_steps = 0
if dataset == 'ptb':
vocab_size = 10000
elif dataset == 'enwik8':
vocab_size = 205
elif dataset == 'text8':
vocab_size = 27
else:
raise AssertionError("Unsupported dataset! Only 'ptb',",
"'enwik8' and 'text8' are currently supported.")
@ex.named_config
def ptb_sota():
data_path = 'data'
dataset = 'ptb'
init_scale = 0.04
init_bias = -2.0
num_layers = 1
depth = 10
learning_rate = 0.2
lr_decay = 1.02
weight_decay = 1e-7
max_grad_norm = 10
num_steps = 35
hidden_size = 830
max_epoch = 20
max_max_epoch = 500
batch_size = 20
drop_x = 0.25
drop_i = 0.75
drop_h = 0.25
drop_o = 0.75
tied = True
vocab_size = 10000
@ex.named_config
def enwik8_sota():
# test BPC 1.27
data_path = 'data'
dataset = 'enwik8'
init_scale = 0.04
init_bias = -4.0
num_layers = 1
depth = 10
learning_rate = 0.2
lr_decay = 1.03
weight_decay = 1e-7
max_grad_norm = 10
num_steps = 50
hidden_size = 1500
max_epoch = 5
max_max_epoch = 500
batch_size = 128
drop_x = 0.10
drop_i = 0.40
drop_h = 0.10
drop_o = 0.40
tied = False
vocab_size = 205
@ex.named_config
def text8_sota():
# test BPC 1.27
data_path = 'data'
dataset = 'text8'
init_scale = 0.04
init_bias = -4.0
num_layers = 1
depth = 10
learning_rate = 0.2
lr_decay = 1.03
weight_decay = 1e-7
max_grad_norm = 10
num_steps = 50
hidden_size = 1500
max_epoch = 5
max_max_epoch = 500
batch_size = 128
drop_x = 0.10
drop_i = 0.40
drop_h = 0.10
drop_o = 0.40
tied = False
vocab_size = 27
@ex.capture
def get_config(_config):
C.__dict__ = dict(_config)
return C
def get_data(data_path, dataset):
if dataset == 'ptb':
from tensorflow.models.rnn.ptb import reader
raw_data = reader.ptb_raw_data(data_path)
elif dataset == 'enwik8':
from data import reader
raw_data = reader.enwik8_raw_data(data_path)
elif dataset == 'text8':
from data import reader
raw_data = reader.text8_raw_data(data_path)
return reader, raw_data
def get_noise(x, m, drop_x, drop_i, drop_h, drop_o):
keep_x, keep_i, keep_h, keep_o = 1.0 - drop_x, 1.0 - drop_i, 1.0 - drop_h, 1.0 - drop_o
if keep_x < 1.0:
noise_x = (np.random.random_sample((m.batch_size, m.num_steps, 1)) < keep_x).astype(np.float32) / keep_x
for b in range(m.batch_size):
for n1 in range(m.num_steps):
for n2 in range(n1 + 1, m.num_steps):
if x[b][n2] == x[b][n1]:
noise_x[b][n2][0] = noise_x[b][n1][0]
break
else:
noise_x = np.ones((m.batch_size, m.num_steps, 1), dtype=np.float32)
if keep_i < 1.0:
noise_i = (np.random.random_sample((m.batch_size, m.in_size, m.num_layers)) < keep_i).astype(np.float32) / keep_i
else:
noise_i = np.ones((m.batch_size, m.in_size, m.num_layers), dtype=np.float32)
if keep_h < 1.0:
noise_h = (np.random.random_sample((m.batch_size, m.size, m.num_layers)) < keep_h).astype(np.float32) / keep_h
else:
noise_h = np.ones((m.batch_size, m.size, m.num_layers), dtype=np.float32)
if keep_o < 1.0:
noise_o = (np.random.random_sample((m.batch_size, 1, m.size)) < keep_o).astype(np.float32) / keep_o
else:
noise_o = np.ones((m.batch_size, 1, m.size), dtype=np.float32)
return noise_x, noise_i, noise_h, noise_o
def run_epoch(session, m, data, eval_op, config, verbose=False):
"""Run the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = [x.eval() for x in m.initial_state]
for step, (x, y) in enumerate(data_iterator(data, m.batch_size, m.num_steps)):
noise_x, noise_i, noise_h, noise_o = get_noise(x, m, config.drop_x, config.drop_i, config.drop_h, config.drop_o)
feed_dict = {m.input_data: x, m.targets: y,
m.noise_x: noise_x, m.noise_i: noise_i, m.noise_h: noise_h, m.noise_o: noise_o}
feed_dict.update({m.initial_state[i]: state[i] for i in range(m.num_layers)})
cost, state, _ = session.run([m.cost, m.final_state, eval_op], feed_dict)
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" % (step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
@ex.command
def evaluate(data_path, dataset, load_model):
"""Evaluate the model on the given data."""
ex.commands["print_config"]()
print("Evaluating model:", load_model)
reader, (train_data, valid_data, test_data, _) = get_data(data_path, dataset)
config = get_config()
val_config = deepcopy(config)
test_config = deepcopy(config)
val_config.drop_x = test_config.drop_x = 0.0
val_config.drop_i = test_config.drop_i = 0.0
val_config.drop_h = test_config.drop_h = 0.0
val_config.drop_o = test_config.drop_o = 0.0
test_config.batch_size = test_config.num_steps = 1
with tf.Session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
_ = Model(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
mvalid = Model(is_training=False, config=val_config)
mtest = Model(is_training=False, config=test_config)
tf.global_variables_initializer().run()
saver = tf.train.Saver()
saver.restore(session, load_model)
print("Testing on batched Valid ...")
valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op(), config=val_config)
print("Valid Perplexity (batched): %.3f, Bits: %.3f" % (valid_perplexity, np.log2(valid_perplexity)))
print("Testing on non-batched Valid ...")
valid_perplexity = run_epoch(session, mtest, valid_data, tf.no_op(), config=test_config, verbose=True)
print("Full Valid Perplexity: %.3f, Bits: %.3f" % (valid_perplexity, np.log2(valid_perplexity)))
print("Testing on non-batched Test ...")
test_perplexity = run_epoch(session, mtest, test_data, tf.no_op(), config=test_config, verbose=True)
print("Full Test Perplexity: %.3f, Bits: %.3f" % (test_perplexity, np.log2(test_perplexity)))
def run_mc_epoch(seed, session, m, data, eval_op, config, mc_steps, verbose=False):
"""Run the model with noise on the given data multiple times for MC evaluation."""
n_steps = len(data)
all_probs = np.array([0.0]*n_steps)
sum_probs = np.array([0.0]*n_steps)
mc_i = 1
print("Total MC steps to do:", mc_steps)
if not os.path.isdir('./probs'):
print('Creating probs directory')
os.mkdir('./probs')
while mc_i <= mc_steps:
print("MC sample number:", mc_i)
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = [x.eval() for x in m.initial_state]
for step, (x, y) in enumerate(data_iterator(data, m.batch_size, m.num_steps)):
if step == 0:
noise_x, noise_i, noise_h, noise_o = get_noise(x, m, config.drop_x, config.drop_i, config.drop_h, config.drop_o)
feed_dict = {m.input_data: x, m.targets: y,
m.noise_x: noise_x, m.noise_i: noise_i, m.noise_h: noise_h, m.noise_o: noise_o}
feed_dict.update({m.initial_state[i]: state[i] for i in range(m.num_layers)})
cost, state, _ = session.run([m.cost, m.final_state, eval_op], feed_dict)
costs += cost
iters += m.num_steps
all_probs[step] = np.exp(-cost)
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" % (step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
perplexity = np.exp(costs / iters)
print("Perplexity:", perplexity)
if perplexity < 500:
savefile = 'probs/' + str(seed) + '_' + str(mc_i)
print("Accepted. Saving to:", savefile)
np.save(savefile, all_probs)
sum_probs += all_probs
mc_i += 1
return np.exp(np.mean(-np.log(np.clip(sum_probs/mc_steps, 1e-10, 1-1e-10))))
@ex.command
def evaluate_mc(data_path, dataset, load_model, mc_steps, seed):
"""Evaluate the model on the given data using MC averaging."""
ex.commands['print_config']()
print("MC Evaluation of model:", load_model)
assert mc_steps > 0
reader, (train_data, valid_data, test_data, _) = get_data(data_path, dataset)
config = get_config()
val_config = deepcopy(config)
test_config = deepcopy(config)
test_config.batch_size = test_config.num_steps = 1
with tf.Session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
_ = Model(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
_ = Model(is_training=False, config=val_config)
mtest = Model(is_training=False, config=test_config)
tf.initialize_all_variables()
saver = tf.train.Saver()
saver.restore(session, load_model)
print("Testing on non-batched Test ...")
test_perplexity = run_mc_epoch(seed, session, mtest, test_data, tf.no_op(), test_config, mc_steps, verbose=True)
print("Full Test Perplexity: %.3f, Bits: %.3f" % (test_perplexity, np.log2(test_perplexity)))
@ex.automain
def main(data_path, dataset, seed, _run):
ex.commands['print_config']()
np.random.seed(seed)
reader, (train_data, valid_data, test_data, _) = get_data(data_path, dataset)
config = get_config()
val_config = deepcopy(config)
test_config = deepcopy(config)
val_config.drop_x = test_config.drop_x = 0.0
val_config.drop_i = test_config.drop_i = 0.0
val_config.drop_h = test_config.drop_h = 0.0
val_config.drop_o = test_config.drop_o = 0.0
test_config.batch_size = test_config.num_steps = 1
with tf.Graph().as_default(), tf.Session() as session:
tf.set_random_seed(seed)
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
mtrain = Model(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
mvalid = Model(is_training=False, config=val_config)
mtest = Model(is_training=False, config=test_config)
tf.global_variables_initializer().run()
saver = tf.train.Saver()
trains, vals, tests, best_val = [np.inf], [np.inf], [np.inf], np.inf
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i - config.max_epoch + 1, 0.0)
mtrain.assign_lr(session, config.learning_rate / lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(mtrain.lr)))
train_perplexity = run_epoch(session, mtrain, train_data, mtrain.train_op, config=config,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f, Bits: %.3f" % (i + 1, train_perplexity, np.log2(train_perplexity)))
valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op(), config=val_config)
print("Epoch: %d Valid Perplexity (batched): %.3f, Bits: %.3f" % (i + 1, valid_perplexity, np.log2(valid_perplexity)))
test_perplexity = run_epoch(session, mvalid, test_data, tf.no_op(), config=val_config)
print("Epoch: %d Test Perplexity (batched): %.3f, Bits: %.3f" % (i + 1, test_perplexity, np.log2(test_perplexity)))
trains.append(train_perplexity)
vals.append(valid_perplexity)
tests.append(test_perplexity)
if valid_perplexity < best_val:
best_val = valid_perplexity
print("Best Batched Valid Perplexity improved to %.03f" % best_val)
save_path = saver.save(session, './' + dataset + "_" + str(seed) + "_best_model.ckpt")
print("Saved to:", save_path)
_run.info['epoch_nr'] = i + 1
_run.info['nr_parameters'] = mtrain.nvars.item()
_run.info['logs'] = {'train_perplexity': trains, 'valid_perplexity': vals, 'test_perplexity': tests}
print("Training is over.")
best_val_epoch = np.argmin(vals)
print("Best Batched Validation Perplexity %.03f (Bits: %.3f) was at Epoch %d" %
(vals[best_val_epoch], np.log2(vals[best_val_epoch]), best_val_epoch))
print("Training Perplexity at this Epoch was %.03f, Bits: %.3f" %
(trains[best_val_epoch], np.log2(trains[best_val_epoch])))
print("Batched Test Perplexity at this Epoch was %.03f, Bits: %.3f" %
(tests[best_val_epoch], np.log2(tests[best_val_epoch])))
_run.info['best_val_epoch'] = best_val_epoch
_run.info['best_valid_perplexity'] = vals[best_val_epoch]
with tf.Session() as sess:
saver.restore(sess, './' + dataset + "_" + str(seed) + "_best_model.ckpt")
print("Testing on non-batched Valid ...")
valid_perplexity = run_epoch(sess, mtest, valid_data, tf.no_op(), config=test_config, verbose=True)
print("Full Valid Perplexity: %.3f, Bits: %.3f" % (valid_perplexity, np.log2(valid_perplexity)))
print("Testing on non-batched Test ...")
test_perplexity = run_epoch(sess, mtest, test_data, tf.no_op(), config=test_config, verbose=True)
print("Full Test Perplexity: %.3f, Bits: %.3f" % (test_perplexity, np.log2(test_perplexity)))
_run.info['full_best_valid_perplexity'] = valid_perplexity
_run.info['full_test_perplexity'] = test_perplexity
return vals[best_val_epoch]
|
peeringdb_server/migrations/0030_affiliation_request_status_add_canceled.py | CyberFlameGO/peeringdb | 224 | 12617884 | # Generated by Django 2.2.9 on 2020-04-01 10:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("peeringdb_server", "0029_auto_20200401_1006"),
]
operations = [
migrations.AlterField(
model_name="userorgaffiliationrequest",
name="status",
field=models.CharField(
choices=[
("pending", "Pending"),
("approved", "Approved"),
("denied", "Denied"),
("canceled", "Canceled"),
],
help_text="Status of this request",
max_length=254,
),
),
]
|
tools/check-missing-ansible.py | ResilienceCare/ansible-lint | 484 | 12617886 | """Validates linter behavior when ansible python package is missing."""
import os
import subprocess
if __name__ == "__main__":
cmd = ["ansible-lint", "--version"]
result = subprocess.run(
cmd,
universal_newlines=True,
check=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ,
)
assert result.returncode == 4, result # missing ansible
|
cocotb/decorators.py | lavanyajagan/cocotb | 350 | 12617894 | # Copyright (c) 2013 Potential Ventures Ltd
# Copyright (c) 2013 SolarFlare Communications Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Potential Ventures Ltd,
# SolarFlare Communications Inc nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections.abc
import functools
import inspect
import os
import sys
import typing
import warnings
from asyncio import CancelledError, InvalidStateError
import cocotb
import cocotb.triggers
from cocotb import outcomes
from cocotb.log import SimLog
from cocotb.result import ReturnValue
from cocotb.utils import extract_coro_stack, lazy_property, remove_traceback_frames
T = typing.TypeVar("T")
Self = typing.TypeVar("Self")
# Sadly the Python standard logging module is very slow so it's better not to
# make any calls by testing a boolean flag first
if "COCOTB_SCHEDULER_DEBUG" in os.environ:
_debug = True
else:
_debug = False
def public(f):
"""Use a decorator to avoid retyping function/class names.
* Based on an idea by <NAME>:
http://groups.google.com/group/comp.lang.python/msg/11cbb03e09611b8a
* Improved via a suggestion by <NAME>:
http://groups.google.com/group/comp.lang.python/msg/3d400fb22d8a42e1
"""
all = sys.modules[f.__module__].__dict__.setdefault("__all__", [])
if f.__name__ not in all: # Prevent duplicates if run from an IDE.
all.append(f.__name__)
return f
public(public) # Emulate decorating ourself
class Task(typing.Coroutine[typing.Any, typing.Any, T]):
"""Concurrently executing task.
This class is not intended for users to directly instantiate.
Use :func:`cocotb.create_task` to create a Task object,
or use :func:`cocotb.start_soon` or :func:`cocotb.start` to
create a Task and schedule it to run.
"""
_name: str = "Task" # class name of schedulable task
_id_count = 0 # used by the scheduler for debug
def __init__(self, inst):
if isinstance(inst, collections.abc.Coroutine):
self._natively_awaitable = True
elif inspect.isgenerator(inst):
self._natively_awaitable = False
elif inspect.iscoroutinefunction(inst):
raise TypeError(
"Coroutine function {} should be called prior to being "
"scheduled.".format(inst)
)
elif inspect.isasyncgen(inst):
raise TypeError(
"{} is an async generator, not a coroutine. "
"You likely used the yield keyword instead of await.".format(
inst.__qualname__
)
)
else:
raise TypeError(
f"{inst} isn't a valid coroutine! Did you forget to use the yield keyword?"
)
self._coro = inst
self._started = False
self._outcome: outcomes.Outcome = None
self._trigger: typing.Optional[cocotb.triggers.Trigger] = None
self._cancelled: typing.Optional[CancelledError] = None
self._task_id = self._id_count
type(self)._id_count += 1
self.__name__ = f"{type(self)._name} {self._task_id}"
self.__qualname__ = self.__name__
@lazy_property
def log(self) -> SimLog:
# Creating a logger is expensive, only do it if we actually plan to
# log anything
return SimLog(f"cocotb.{self.__qualname__}.{self._coro.__qualname__}")
@property
def retval(self) -> T:
"""Return the result of the Task.
If the Task ran to completion, the result is returned.
If the Task failed with an exception, the exception is re-raised.
If the Task is not yet complete, a :exc:`RuntimeError` is raised.
.. deprecated:: 1.7.0
"""
warnings.warn(
"Deprecated in favor of the result() method. "
"Replace `task.retval` with `task.result()`.",
DeprecationWarning,
stacklevel=2,
)
if self._outcome is None:
raise RuntimeError("coroutine is not complete")
return self._outcome.get()
@property
def _finished(self) -> bool:
"""``True`` if the Task is finished executing.
.. deprecated:: 1.7.0
"""
warnings.warn(
"Deprecated in favor of the done() method. "
"Replace `task._finished` with `task.done()`.",
DeprecationWarning,
stacklevel=2,
)
return self._outcome is not None
def __iter__(self: Self) -> Self:
# for use in "yield from" statements
return self
def __str__(self) -> str:
return f"<{self.__name__}>"
def _get_coro_stack(self) -> typing.Any:
"""Get the coroutine callstack of this Task."""
coro_stack = extract_coro_stack(self._coro)
# Remove Trigger.__await__() from the stack, as it's not really useful
if self._natively_awaitable and len(coro_stack):
if coro_stack[-1].name == "__await__":
coro_stack.pop()
return coro_stack
def __repr__(self) -> str:
coro_stack = self._get_coro_stack()
if cocotb.scheduler._current_task is self:
fmt = "<{name} running coro={coro}()>"
elif self.done():
fmt = "<{name} finished coro={coro}() outcome={outcome}>"
elif self._trigger is not None:
fmt = "<{name} pending coro={coro}() trigger={trigger}>"
elif not self._started:
fmt = "<{name} created coro={coro}()>"
else:
fmt = "<{name} adding coro={coro}()>"
try:
coro_name = coro_stack[-1].name
# coro_stack may be empty if:
# - exhausted generator
# - finished coroutine
except IndexError:
coro_name = self._coro.__name__
repr_string = fmt.format(
name=self.__name__,
coro=coro_name,
trigger=self._trigger,
outcome=self._outcome,
)
return repr_string
def _advance(self, outcome: outcomes.Outcome) -> typing.Any:
"""Advance to the next yield in this coroutine.
Args:
outcome: The :any:`outcomes.Outcome` object to resume with.
Returns:
The object yielded from the coroutine or None if coroutine finished
"""
try:
self._started = True
return outcome.send(self._coro)
except ReturnValue as e:
self._outcome = outcomes.Value(e.retval)
except StopIteration as e:
self._outcome = outcomes.Value(e.value)
except BaseException as e:
self._outcome = outcomes.Error(
remove_traceback_frames(e, ["_advance", "send"])
)
def send(self, value: typing.Any) -> typing.Any:
return self._coro.send(value)
def throw(self, exc: BaseException) -> typing.Any:
return self._coro.throw(exc)
def close(self) -> None:
return self._coro.close()
def kill(self) -> None:
"""Kill a coroutine."""
if self._outcome is not None:
# already finished, nothing to kill
return
if _debug:
self.log.debug("kill() called on coroutine")
# todo: probably better to throw an exception for anyone waiting on the coroutine
self._outcome = outcomes.Value(None)
cocotb.scheduler._unschedule(self)
def join(self) -> cocotb.triggers.Join:
"""Return a trigger that will fire when the wrapped coroutine exits."""
return cocotb.triggers.Join(self)
def has_started(self) -> bool:
"""Return ``True`` if the Task has started executing."""
return self._started
def cancel(self, msg: typing.Optional[str] = None) -> None:
"""Cancel a Task's further execution.
When a Task is cancelled, a :exc:`asyncio.CancelledError` is thrown into the Task.
"""
self._cancelled = CancelledError(msg)
warnings.warn(
"Calling this method will cause a CancelledError to be thrown in the "
"Task sometime in the future.",
FutureWarning,
stacklevel=2,
)
self.kill()
def cancelled(self) -> bool:
"""Return ``True`` if the Task was cancelled."""
return self._cancelled is not None
def done(self) -> bool:
"""Return ``True`` if the Task has finished executing."""
return self._outcome is not None or self.cancelled()
def result(self) -> T:
"""Return the result of the Task.
If the Task ran to completion, the result is returned.
If the Task failed with an exception, the exception is re-raised.
If the Task was cancelled, the CancelledError is re-raised.
If the coroutine is not yet complete, a :exc:`asyncio.InvalidStateError` is raised.
"""
if not self.done():
raise InvalidStateError("result is not yet available")
elif self.cancelled():
raise self._cancelled
else:
return self._outcome.get()
def exception(self) -> typing.Optional[BaseException]:
"""Return the exception of the Task.
If the Task ran to completion, ``None`` is returned.
If the Task failed with an exception, the exception is returned.
If the Task was cancelled, the CancelledError is re-raised.
If the coroutine is not yet complete, a :exc:`asyncio.InvalidStateError` is raised.
"""
if not self.done():
raise InvalidStateError("result is not yet available")
elif self.cancelled():
raise self._cancelled
elif isinstance(self._outcome, outcomes.Error):
return self._outcome.error
else:
return None
def __bool__(self) -> bool:
"""``True`` if Task is not done.
.. deprecated:: 1.7.0
"""
warnings.warn(
"Deprecated in favor of the done() method. "
"Replace with `not task.done()`.",
DeprecationWarning,
stacklevel=2,
)
return not self.done()
def __await__(self) -> typing.Generator[typing.Any, typing.Any, T]:
# It's tempting to use `return (yield from self._coro)` here,
# which bypasses the scheduler. Unfortunately, this means that
# we can't keep track of the result or state of the coroutine,
# things which we expose in our public API. If you want the
# efficiency of bypassing the scheduler, remove the `@coroutine`
# decorator from your `async` functions.
# Hand the coroutine back to the scheduler trampoline.
return (yield self)
RunningTask = Task
class RunningCoroutine(Task[T]):
"""
The result of calling a :any:`cocotb.coroutine` decorated coroutine.
All this class does is provide some extra attributes.
"""
def __init__(self, inst, parent):
super().__init__(inst)
self._parent = parent
self.__doc__ = parent._func.__doc__
self.module = parent._func.__module__
self.funcname = parent._func.__name__
class RunningTest(RunningCoroutine[T]):
"""
The result of calling a :class:`cocotb.test` decorated object.
All this class does is change ``__name__`` to show "Test" instead of "Task".
"""
_name: str = "Test"
def __init__(self, inst, parent):
super().__init__(inst, parent)
self.__name__ = f"{type(self)._name} {self.funcname}"
self.__qualname__ = self.__name__
class coroutine:
"""Decorator class that allows us to provide common coroutine mechanisms:
``log`` methods will log to ``cocotb.coroutine.name``.
:meth:`~cocotb.decorators.Task.join` method returns an event which will fire when the coroutine exits.
Used as ``@cocotb.coroutine``.
"""
def __init__(self, func):
self._func = func
functools.update_wrapper(self, func)
@lazy_property
def log(self):
return SimLog(f"cocotb.coroutine.{self._func.__qualname__}.{id(self)}")
def __call__(self, *args, **kwargs):
return RunningCoroutine(self._func(*args, **kwargs), self)
def __get__(self, obj, owner=None):
"""Permit the decorator to be used on class methods
and standalone functions"""
return type(self)(self._func.__get__(obj, owner))
def __iter__(self):
return self
def __str__(self):
return str(self._func.__qualname__)
@public
class function:
"""Decorator class that allows a function to block.
This allows a coroutine that consumes simulation time
to be called by a thread started with :class:`cocotb.external`;
in other words, to internally block while externally
appear to yield.
"""
def __init__(self, func):
self._coro = cocotb.coroutine(func)
@lazy_property
def log(self):
return SimLog(f"cocotb.function.{self._coro.__qualname__}.{id(self)}")
def __call__(self, *args, **kwargs):
return cocotb.scheduler._queue_function(self._coro(*args, **kwargs))
def __get__(self, obj, owner=None):
"""Permit the decorator to be used on class methods
and standalone functions"""
return type(self)(self._coro._func.__get__(obj, owner))
@public
class external:
"""Decorator to apply to an external function to enable calling from cocotb.
This turns a normal function that isn't a coroutine into a blocking coroutine.
Currently, this creates a new execution thread for each function that is
called.
Scope for this to be streamlined to a queue in future.
"""
def __init__(self, func):
self._func = func
self._log = SimLog(f"cocotb.external.{self._func.__qualname__}.{id(self)}")
def __call__(self, *args, **kwargs):
return cocotb.scheduler._run_in_executor(self._func, *args, **kwargs)
def __get__(self, obj, owner=None):
"""Permit the decorator to be used on class methods
and standalone functions"""
return type(self)(self._func.__get__(obj, owner))
class _decorator_helper(type):
"""
Metaclass that allows a type to be constructed using decorator syntax,
passing the decorated function as the first argument.
So:
@MyClass(construction, args='go here')
def this_is_passed_as_f(...):
pass
ends up calling
MyClass.__init__(this_is_passed_as_f, construction, args='go here')
"""
def __call__(cls, *args, **kwargs):
def decorator(f):
# fall back to the normal way of constructing an object, now that
# we have all the arguments
return type.__call__(cls, f, *args, **kwargs)
return decorator
@public
class test(coroutine, metaclass=_decorator_helper):
"""
Decorator to mark a Callable which returns a Coroutine as a test.
The test decorator provides a test timeout, and allows us to mark tests as skipped
or expecting errors or failures.
Tests are evaluated in the order they are defined in a test module.
Used as ``@cocotb.test(...)``.
Args:
timeout_time (numbers.Real or decimal.Decimal, optional):
Simulation time duration before timeout occurs.
.. versionadded:: 1.3
.. note::
Test timeout is intended for protection against deadlock.
Users should use :class:`~cocotb.triggers.with_timeout` if they require a
more general-purpose timeout mechanism.
timeout_unit (str, optional):
Units of timeout_time, accepts any units that :class:`~cocotb.triggers.Timer` does.
.. versionadded:: 1.3
.. deprecated:: 1.5
Using ``None`` as the *timeout_unit* argument is deprecated, use ``'step'`` instead.
expect_fail (bool, optional):
Don't mark the result as a failure if the test fails.
expect_error (exception type or tuple of exception types, optional):
Mark the result as a pass only if one of the exception types is raised in the test.
This is primarily for cocotb internal regression use for when a simulator error is expected.
Users are encouraged to use the following idiom instead::
@cocotb.test()
async def my_test(dut):
try:
await thing_that_should_fail()
except ExceptionIExpect:
pass
else:
assert False, "Exception did not occur"
.. versionchanged:: 1.3
Specific exception types can be expected
.. deprecated:: 1.5
Passing a :class:`bool` value is now deprecated.
Pass a specific :class:`Exception` or a tuple of Exceptions instead.
skip (bool, optional):
Don't execute this test as part of the regression. Test can still be run
manually by setting :make:var:`TESTCASE`.
stage (int)
Order tests logically into stages, where multiple tests can share a stage.
Defaults to 0.
"""
_id_count = 0 # used by the RegressionManager to sort tests in definition order
def __init__(
self,
f,
timeout_time=None,
timeout_unit="step",
expect_fail=False,
expect_error=(),
skip=False,
stage=0,
):
if timeout_unit is None:
warnings.warn(
'Using timeout_unit=None is deprecated, use timeout_unit="step" instead.',
DeprecationWarning,
stacklevel=2,
)
timeout_unit = "step" # don't propagate deprecated value
self._id = self._id_count
type(self)._id_count += 1
if timeout_time is not None:
co = coroutine(f)
@functools.wraps(f)
async def f(*args, **kwargs):
running_co = co(*args, **kwargs)
try:
res = await cocotb.triggers.with_timeout(
running_co, self.timeout_time, self.timeout_unit
)
except cocotb.result.SimTimeoutError:
running_co.kill()
raise
else:
return res
super().__init__(f)
self.timeout_time = timeout_time
self.timeout_unit = timeout_unit
self.expect_fail = expect_fail
if isinstance(expect_error, bool):
warnings.warn(
"Passing bool values to `except_error` option of `cocotb.test` is deprecated. "
"Pass a specific Exception type instead",
DeprecationWarning,
stacklevel=2,
)
if expect_error is True:
expect_error = (Exception,)
elif expect_error is False:
expect_error = ()
self.expect_error = expect_error
self.skip = skip
self.stage = stage
self.im_test = True # For auto-regressions
self.name = self._func.__name__
def __call__(self, *args, **kwargs):
inst = self._func(*args, **kwargs)
coro = RunningTest(inst, self)
return coro
|
CalibTracker/SiPixelESProducers/python/SiPixelFakeGainForHLTESSource_cfi.py | ckamtsikis/cmssw | 852 | 12617902 | import FWCore.ParameterSet.Config as cms
SiPixelFakeGainForHLTESSource = cms.ESSource("SiPixelFakeGainForHLTESSource",
file = cms.FileInPath('CalibTracker/SiPixelESProducers/data/PixelSkimmedGeometry.txt')
)
|
bcs-ui/backend/helm/helm/urls.py | laodiu/bk-bcs | 599 | 12617904 | <filename>bcs-ui/backend/helm/helm/urls.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making ่้ฒธๆบไบPaaSๅนณๅฐ็คพๅบ็ (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.conf.urls import url
from . import views
PROJECT_ID = "(?P<project_id>[\w\-]+)"
REPO_NAME = "(?P<repo_name>[a-z0-9_-]{1,32})"
REPO_ID = "(?P<repo_id>[0-9]+)"
urlpatterns = [
# repository
url(
r'^api/bcs/k8s/configuration/(?P<project_id>\w{32})/helm/repositories/$',
views.RepositoryCreateView.as_view({'post': 'create'}),
name='api.helm.helm_repositories_create',
),
url(
r'^api/bcs/k8s/configuration/(?P<project_id>\w{32})/helm/repositories/lists/detailed',
views.RepositoryView.as_view({'get': 'list_detailed'}),
name='api.helm.helm_repositories_list_detailed',
),
url(
r'^api/bcs/k8s/configuration/(?P<project_id>\w{32})/helm/repositories/lists/minimal$',
views.RepositoryView.as_view({'get': 'list_minimal'}),
name='api.helm.helm_repositories_list_minimal',
),
url(
r'^api/bcs/k8s/configuration/(?P<project_id>\w{32})/helm/repositories/(?P<repo_id>[0-9]+)/$',
views.RepositoryView.as_view({'get': 'retrieve', 'delete': 'destroy', 'put': 'update'}),
name='api.helm.helm_repositories_delete',
),
# ็จๆทๅฏ่ฝๅนถไธๅ
ณๅฟ chart ๅฑไบ้ฃไธช repo๏ผๅชๆฏๆณไปๆๆ็chartไธญๆพๆไธชchart
url(
r'^api/bcs/k8s/configuration/(?P<project_id>\w{32})/helm/charts/$',
views.ChartViewSet.as_view({"get": "list"}),
name='api.helm.helm_repo_chart_list',
),
# chart version
url(
r'^api/bcs/k8s/configuration/(?P<project_id>\w{32})/helm/repositories/(?P<repo_id>[0-9]+)/'
'charts/(?P<chart_id>[0-9]+)/versions/$',
views.ChartVersionView.as_view({'get': 'list'}),
name='api.helm.helm_repo_chart_version_list',
),
url(
r'^api/bcs/k8s/configuration/(?P<project_id>\w{32})/helm/repositories/(?P<repo_id>[0-9]+)/'
'charts/(?P<chart_id>[0-9]+)/versions/(?P<version_id>[0-9]+)/$',
views.ChartVersionView.as_view({'get': 'retrieve'}),
name='api.helm.helm_repo_chart_version_detail',
),
url(
r'^api/bcs/k8s/configuration/(?P<project_id>\w{32})/helm/' 'charts/(?P<chart_id>[0-9]+)/versions/$',
views.ChartVersionView.as_view({'get': 'list'}),
name='api.helm.helm_repo_chart_version_list',
),
url(
r'^api/bcs/k8s/configuration/(?P<project_id>\w{32})/helm/'
'charts/(?P<chart_id>[0-9]+)/versions/(?P<version_id>[0-9]+)/$',
views.ChartVersionView.as_view({'get': 'retrieve'}),
name='api.helm.helm_repo_chart_version_detail',
),
url(
r'^api/bcs/k8s/configuration/(?P<project_id>\w{32})/helm/repositories/(?P<repo_id>[0-9]+)/sync/$',
views.RepositorySyncView.as_view({'post': 'create'}),
name='api.helm.helm_repositories_sync',
),
url(
r'^api/bcs/k8s/configuration/(?P<project_id>\w{32})/helm/repositories/sync/$',
views.RepositorySyncByProjectView.as_view({'post': 'create'}),
name='api.helm.helm_repositories_sync_by_project',
),
url(
r'^api/bcs/k8s/configuration_noauth/(?P<sync_project_id>\w{32})/helm/repositories/sync/$',
views.RepositorySyncByProjectAPIView.as_view({'post': 'create'}),
name='api.helm.helm_repositories_sync_by_project',
),
url(
r'^api/projects/(?P<project_id>\w{32})/helm/charts/(?P<chart_id>\d+)/releases/$',
views.ChartVersionViewSet.as_view({"get": "release_list"}),
),
url(
r'^api/projects/(?P<project_id>\w{32})/helm/charts/(?P<chart_id>\d+)/$',
views.ChartVersionViewSet.as_view({"delete": "delete"}),
),
url(
r'^api/projects/(?P<project_id>\w{32})/helm/charts/(?P<chart_name>[\w\-]+)/releases/$',
views.HelmChartVersionsViewSet.as_view({"post": "list_releases_by_chart_versions"}),
),
url(
r'^api/projects/(?P<project_id>\w{32})/helm/charts/(?P<chart_name>[\w\-]+)/$',
views.HelmChartVersionsViewSet.as_view({"delete": "batch_delete"}),
),
]
|
chapter_10/QASystem/answer/MongoUtil.py | LifeOfGame/mongodb_redis | 183 | 12617906 | import pymongo
import json
from bson import ObjectId
class MongoUtil(object):
def __init__(self):
db = pymongo.MongoClient().qa_system
self.question = db.question
self.answer = db.answer
def query_question(self):
question_iter_obj = self.question.aggregate([
{'$lookup': {
'from': 'answer',
'localField': '_id',
'foreignField': 'question_id',
'as': 'answer_list'}}])
question_list = []
for question in question_iter_obj:
question_list.append(
{'title': question['title'],
'detail': question['detail'],
'author': question['author'],
'vote_up': question['vote_up'] - question['vote_down'],
'answer_number': len(question['answer_list']),
'question_id': str(question['_id'])
}
)
return question_list
def query_answer(self, question_id):
answer_iter_obj = self.question.aggregate([
{'$match': {'_id': ObjectId(question_id)}},
{'$lookup': {
'from': 'answer',
'localField': '_id',
'foreignField': 'question_id',
'as': 'answer_list'}}])
question_answer = list(answer_iter_obj)[0]
question_answer_dict = {
'question_id': str(question_answer['_id']),
'question_title': question_answer['title'],
'question_detail': question_answer['detail'],
'question_author': question_answer['author'],
'answer_num': len(question_answer['answer_list'])
}
answer_list = []
for answer in question_answer['answer_list']:
answer_list.append(
{'answer_detail': answer['answer'],
'answer_author': answer['author'],
'answer_id': str(answer['_id']),
'answer_vote': answer['vote_up'] - answer['vote_down']})
question_answer_dict['answer_list'] = answer_list
return question_answer_dict
def insert_answer(self, question_id, answer, author, now, vote_up=0, vote_down=0):
data_to_insert = {
'author': author,
'question_id': ObjectId(question_id),
'answer': answer,
'answer_time': now,
'vote_up': vote_up,
'vote_down': vote_down
}
self.answer.insert_one(data_to_insert)
return True
def insert_question(self, title, detail, author, now, vote_up=0, vote_down=0):
data_to_insert = {
'title': title,
'detail': detail,
'author': author,
'ask_time': now,
'vote_up': vote_up,
'vote_down': vote_down
}
self.question.insert_one(data_to_insert)
return True
def vote_for_question(self, object_id, value):
self.question.update_one({'_id': ObjectId(object_id)}, {'$inc': {value: 1}})
return True
def vote_for_answer(self, object_id, value):
self.answer.update_one({'_id': ObjectId(object_id)}, {'$inc': {value: 1}})
|
chronologicon/__init__.py | rutherfordcraze/chronologicon | 103 | 12617911 | # -*- coding: utf-8 -*-
# Chronologicon v5.x
# <NAME>
# https://craze.co.uk
# 181028
import json
import os
import chronologicon.input
from easysettings import EasySettings
from chronologicon.strings import *
LOGS_FILENAME = 'logs.json'
STATS_FILENAME = 'stat.json'
PRESAVE_FILENAME = 'temp.json'
LOGS_DEFAULT = []
CUR_FILEPATH = os.path.dirname(__file__)
PREFS = EasySettings(os.path.join(CUR_FILEPATH, 'prefs.conf'))
# Logs version check
try:
LOGS = ''
with open(os.path.join(PREFS.get('SAVE_DIR'), LOGS_FILENAME), "r") as LOGS_FILE:
LOGS = json.load(LOGS_FILE)
if type(LOGS[0]['TIME_START']) is int:
Message('initLogsOutdated')
input.MigrateLogs()
except Exception as e:
Message('initVersionCheckFailed', e)
# Check any mission-critical files and create missing ones.
def Preflights():
global PREFS
# Check save directory
if PREFS.has_option('SAVE_DIR'):
if os.path.isdir(PREFS.get('SAVE_DIR')):
pass
else:
Message('initSaveDirNotVerified')
return False
else:
Message('initSaveDirNotSet')
return False
# Check logs file
if os.path.exists(os.path.join(PREFS.get('SAVE_DIR'), LOGS_FILENAME)):
pass
else:
Message('initCreatingLogsFile')
try:
# os.makedirs(os.path.dirname(LOGS_FILENAME), exist_ok=True)
with open(os.path.join(PREFS.get('SAVE_DIR'), LOGS_FILENAME), "w") as LOGS_FILE:
LOGS_FILE.write(json.dumps(LOGS_DEFAULT))
except Exception as e:
Message('initCreateLogFileFailed', e)
return False
# Check stats file
if os.path.exists(os.path.join(PREFS.get('SAVE_DIR'), STATS_FILENAME)):
pass
else:
Message('initCreatingStatsFile')
try:
# os.makedirs(os.path.dirname(LOGS_FILENAME), exist_ok=True)
with open(os.path.join(PREFS.get('SAVE_DIR'), STATS_FILENAME), "w") as STATS_FILE:
pass
except Exception as e:
Message('initCreateStatsFileFailed', e)
return False
# Check temp file
if os.path.exists(os.path.join(CUR_FILEPATH, PRESAVE_FILENAME)):
pass
else:
Message('initCreatingTempFile')
try:
with open(os.path.join(CUR_FILEPATH, PRESAVE_FILENAME), "w") as PRESAVE_FILE:
pass
except Exception as e:
Message('initCreateTempFileFailed', e)
return False
return True
|
Code-Sleep-Python/Sprint/sprint.py | shardul08/Code-Sleep-Python | 420 | 12617933 | import msvcrt
import time
high_score = 50
name = "no-one"
while(1):
distance = int(0)
print("\n--------------------------------------------------------------")
print('\n\nWelcome to the 100m sprint, tap z and x rapidly to move!')
print('* = 10m')
print("\n**Current record: " + str(high_score) + "s, by: " + name)
print('\nPress enter to start')
input()
print('Ready...')
time.sleep(1)
print('GO!')
start_time = time.time()
while(distance < 100):
k1 = msvcrt.getch().decode('ASCII')
if k1 == 'z':
k2 = msvcrt.getch().decode('ASCII')
if k2 == 'x':
distance += 1
if distance == 50:
print("* You're halfway there!")
elif distance % 10 == 0:
print('*')
fin_time = time.time() - start_time
fin_time = round(fin_time, 2)
print('Well done you did it in...')
print(fin_time)
if fin_time < high_score:
print("Well done you've got a new high score ")
name = input("Please enter your name : ")
high_score = fin_time
|
fernet_fields/fields.py | brianhelba/django-fernet-fields | 173 | 12617948 | from cryptography.fernet import Fernet, MultiFernet
from django.conf import settings
from django.core.exceptions import FieldError, ImproperlyConfigured
from django.db import models
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import cached_property
from . import hkdf
__all__ = [
'EncryptedField',
'EncryptedTextField',
'EncryptedCharField',
'EncryptedEmailField',
'EncryptedIntegerField',
'EncryptedDateField',
'EncryptedDateTimeField',
]
class EncryptedField(models.Field):
"""A field that encrypts values using Fernet symmetric encryption."""
_internal_type = 'BinaryField'
def __init__(self, *args, **kwargs):
if kwargs.get('primary_key'):
raise ImproperlyConfigured(
"%s does not support primary_key=True."
% self.__class__.__name__
)
if kwargs.get('unique'):
raise ImproperlyConfigured(
"%s does not support unique=True."
% self.__class__.__name__
)
if kwargs.get('db_index'):
raise ImproperlyConfigured(
"%s does not support db_index=True."
% self.__class__.__name__
)
super(EncryptedField, self).__init__(*args, **kwargs)
@cached_property
def keys(self):
keys = getattr(settings, 'FERNET_KEYS', None)
if keys is None:
keys = [settings.SECRET_KEY]
return keys
@cached_property
def fernet_keys(self):
if getattr(settings, 'FERNET_USE_HKDF', True):
return [hkdf.derive_fernet_key(k) for k in self.keys]
return self.keys
@cached_property
def fernet(self):
if len(self.fernet_keys) == 1:
return Fernet(self.fernet_keys[0])
return MultiFernet([Fernet(k) for k in self.fernet_keys])
def get_internal_type(self):
return self._internal_type
def get_db_prep_save(self, value, connection):
value = super(
EncryptedField, self
).get_db_prep_save(value, connection)
if value is not None:
retval = self.fernet.encrypt(force_bytes(value))
return connection.Database.Binary(retval)
def from_db_value(self, value, expression, connection, *args):
if value is not None:
value = bytes(value)
return self.to_python(force_text(self.fernet.decrypt(value)))
@cached_property
def validators(self):
# Temporarily pretend to be whatever type of field we're masquerading
# as, for purposes of constructing validators (needed for
# IntegerField and subclasses).
self.__dict__['_internal_type'] = super(
EncryptedField, self
).get_internal_type()
try:
return super(EncryptedField, self).validators
finally:
del self.__dict__['_internal_type']
def get_prep_lookup(self):
"""Raise errors for unsupported lookups"""
raise FieldError("{} '{}' does not support lookups".format(
self.lhs.field.__class__.__name__, self.lookup_name))
# Register all field lookups (except 'isnull') to our handler
for name, lookup in models.Field.class_lookups.items():
# Dynamically create classes that inherit from the right lookups
if name != 'isnull':
lookup_class = type('EncryptedField' + name, (lookup,), {
'get_prep_lookup': get_prep_lookup
})
EncryptedField.register_lookup(lookup_class)
class EncryptedTextField(EncryptedField, models.TextField):
pass
class EncryptedCharField(EncryptedField, models.CharField):
pass
class EncryptedEmailField(EncryptedField, models.EmailField):
pass
class EncryptedIntegerField(EncryptedField, models.IntegerField):
pass
class EncryptedDateField(EncryptedField, models.DateField):
pass
class EncryptedDateTimeField(EncryptedField, models.DateTimeField):
pass
|
tests/test_pytest_overrides.py | sflems/django-constance | 899 | 12617988 | <reponame>sflems/django-constance<filename>tests/test_pytest_overrides.py
import unittest
try:
import pytest
from constance import config
from constance.test.pytest import override_config
class TestPytestOverrideConfigFunctionDecorator:
"""Test that the override_config decorator works correctly for Pytest classes.
Test usage of override_config on test method and as context manager.
"""
def test_default_value_is_true(self):
"""Assert that the default value of config.BOOL_VALUE is True."""
assert config.BOOL_VALUE
@pytest.mark.override_config(BOOL_VALUE=False)
def test_override_config_on_method_changes_config_value(self):
"""Assert that the pytest mark decorator changes config.BOOL_VALUE."""
assert not config.BOOL_VALUE
def test_override_config_as_context_manager_changes_config_value(self):
"""Assert that the context manager changes config.BOOL_VALUE."""
with override_config(BOOL_VALUE=False):
assert not config.BOOL_VALUE
assert config.BOOL_VALUE
@override_config(BOOL_VALUE=False)
def test_method_decorator(self):
"""Ensure `override_config` can be used as test method decorator."""
assert not config.BOOL_VALUE
@pytest.mark.override_config(BOOL_VALUE=False)
class TestPytestOverrideConfigDecorator:
"""Test that the override_config decorator works on classes."""
def test_override_config_on_class_changes_config_value(self):
"""Asser that the class decorator changes config.BOOL_VALUE."""
assert not config.BOOL_VALUE
@pytest.mark.override_config(BOOL_VALUE='True')
def test_override_config_on_overrided_value(self):
"""Ensure that method mark decorator changes already overrided value for class."""
assert config.BOOL_VALUE == 'True'
def test_fixture_override_config(override_config):
"""
Ensure `override_config` fixture is available globally
and can be used in test functions.
"""
with override_config(BOOL_VALUE=False):
assert not config.BOOL_VALUE
@override_config(BOOL_VALUE=False)
def test_func_decorator():
"""Ensure `override_config` can be used as test function decorator."""
assert not config.BOOL_VALUE
except ImportError:
pass
class PytestTests(unittest.TestCase):
def setUp(self):
self.skipTest('Skip all pytest tests when using unittest')
def test_do_not_skip_silently(self):
"""
If no at least one test present, unittest silently skips module.
"""
pass
|
test/espnet2/asr/encoder/test_conformer_encoder.py | nmfisher/espnet | 5,053 | 12618007 | <filename>test/espnet2/asr/encoder/test_conformer_encoder.py
import pytest
import torch
from espnet2.asr.encoder.conformer_encoder import ConformerEncoder
@pytest.mark.parametrize(
"input_layer", ["linear", "conv2d", "conv2d2", "conv2d6", "conv2d8", "embed"]
)
@pytest.mark.parametrize("positionwise_layer_type", ["conv1d", "conv1d-linear"])
@pytest.mark.parametrize(
"rel_pos_type, pos_enc_layer_type, selfattention_layer_type",
[
("legacy", "abs_pos", "selfattn"),
("latest", "rel_pos", "rel_selfattn"),
("legacy", "rel_pos", "rel_selfattn"),
("legacy", "legacy_rel_pos", "legacy_rel_selfattn"),
],
)
def test_encoder_forward_backward(
input_layer,
positionwise_layer_type,
rel_pos_type,
pos_enc_layer_type,
selfattention_layer_type,
):
encoder = ConformerEncoder(
20,
output_size=2,
attention_heads=2,
linear_units=4,
num_blocks=2,
input_layer=input_layer,
macaron_style=False,
rel_pos_type=rel_pos_type,
pos_enc_layer_type=pos_enc_layer_type,
selfattention_layer_type=selfattention_layer_type,
activation_type="swish",
use_cnn_module=True,
cnn_module_kernel=3,
positionwise_layer_type=positionwise_layer_type,
)
if input_layer == "embed":
x = torch.randint(0, 10, [2, 32])
else:
x = torch.randn(2, 32, 20, requires_grad=True)
x_lens = torch.LongTensor([32, 28])
y, _, _ = encoder(x, x_lens)
y.sum().backward()
def test_encoder_invalid_layer_type():
with pytest.raises(ValueError):
ConformerEncoder(20, rel_pos_type="dummy")
with pytest.raises(ValueError):
ConformerEncoder(20, pos_enc_layer_type="dummy")
with pytest.raises(ValueError):
ConformerEncoder(
20, pos_enc_layer_type="abc_pos", selfattention_layer_type="dummy"
)
def test_encoder_invalid_rel_pos_combination():
with pytest.raises(AssertionError):
ConformerEncoder(
20,
rel_pos_type="latest",
pos_enc_layer_type="legacy_rel_pos",
selfattention_layer_type="legacy_rel_sselfattn",
)
with pytest.raises(AssertionError):
ConformerEncoder(
20,
pos_enc_layer_type="rel_pos",
selfattention_layer_type="legacy_rel_sselfattn",
)
with pytest.raises(AssertionError):
ConformerEncoder(
20,
pos_enc_layer_type="legacy_rel_pos",
selfattention_layer_type="rel_sselfattn",
)
def test_encoder_output_size():
encoder = ConformerEncoder(20, output_size=256)
assert encoder.output_size() == 256
def test_encoder_invalid_type():
with pytest.raises(ValueError):
ConformerEncoder(20, input_layer="fff")
|
test/test_type_hints.py | Hacky-DH/pytorch | 60,067 | 12618012 | import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, set_cwd
import tempfile
import torch
import doctest
import os
import inspect
from pathlib import Path
try:
import mypy.api
HAVE_MYPY = True
except ImportError:
HAVE_MYPY = False
def get_examples_from_docstring(docstr):
"""
Extracts all runnable python code from the examples
in docstrings; returns a list of lines.
"""
examples = doctest.DocTestParser().get_examples(docstr)
return [f' {l}' for e in examples for l in e.source.splitlines()]
def get_all_examples():
"""get_all_examples() -> str
This function grabs (hopefully all) examples from the torch documentation
strings and puts them in one nonsensical module returned as a string.
"""
blocklist = {
"_np",
}
allexamples = ""
example_file_lines = [
"import torch",
"import torch.nn.functional as F",
"import math",
"import numpy",
"import io",
"import itertools",
"",
# for requires_grad_ example
# NB: We are parsing this file as Python 2, so we must use
# Python 2 type annotation syntax
"def preprocess(inp):",
" # type: (torch.Tensor) -> torch.Tensor",
" return inp",
]
for fname in dir(torch):
fn = getattr(torch, fname)
docstr = inspect.getdoc(fn)
if docstr and fname not in blocklist:
e = get_examples_from_docstring(docstr)
if e:
example_file_lines.append(f"\n\ndef example_torch_{fname}():")
example_file_lines += e
for fname in dir(torch.Tensor):
fn = getattr(torch.Tensor, fname)
docstr = inspect.getdoc(fn)
if docstr and fname not in blocklist:
e = get_examples_from_docstring(docstr)
if e:
example_file_lines.append(f"\n\ndef example_torch_tensor_{fname}():")
example_file_lines += e
return "\n".join(example_file_lines)
class TestTypeHints(TestCase):
@unittest.skipIf(not HAVE_MYPY, "need mypy")
def test_doc_examples(self):
"""
Run documentation examples through mypy.
"""
fn = Path(__file__).resolve().parent / 'generated_type_hints_smoketest.py'
with open(fn, "w") as f:
print(get_all_examples(), file=f)
# OK, so here's the deal. mypy treats installed packages
# and local modules differently: if a package is installed,
# mypy will refuse to use modules from that package for type
# checking unless the module explicitly says that it supports
# type checking. (Reference:
# https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports
# )
#
# Now, PyTorch doesn't support typechecking, and we shouldn't
# claim that it supports typechecking (it doesn't.) However, not
# claiming we support typechecking is bad for this test, which
# wants to use the partial information we get from the bits of
# PyTorch which are typed to check if it typechecks. And
# although mypy will work directly if you are working in source,
# some of our tests involve installing PyTorch and then running
# its tests.
#
# The guidance we got from <NAME> and <NAME>,
# and also independently developed by <NAME>,
# is that we should create a fake directory and add symlinks for
# the packages that should typecheck. So that is what we do
# here.
#
# If you want to run mypy by hand, and you run from PyTorch
# root directory, it should work fine to skip this step (since
# mypy will preferentially pick up the local files first). The
# temporary directory here is purely needed for CI. For this
# reason, we also still drop the generated file in the test
# source folder, for ease of inspection when there are failures.
with tempfile.TemporaryDirectory() as tmp_dir:
try:
os.symlink(
os.path.dirname(torch.__file__),
os.path.join(tmp_dir, 'torch'),
target_is_directory=True
)
except OSError:
raise unittest.SkipTest('cannot symlink') from None
repo_rootdir = Path(__file__).resolve().parent.parent
# TODO: Would be better not to chdir here, this affects the
# entire process!
with set_cwd(str(repo_rootdir)):
(stdout, stderr, result) = mypy.api.run([
'--cache-dir=.mypy_cache/doc',
'--no-strict-optional', # needed because of torch.lu_unpack, see gh-36584
str(fn),
])
if result != 0:
self.fail(f"mypy failed:\n{stderr}\n{stdout}")
if __name__ == '__main__':
run_tests()
|
dask_cloudprovider/gcp/tests/test_utils.py | moti-jfrog/dask-cloudprovider | 102 | 12618042 | import pytest
from dask_cloudprovider.gcp.utils import build_request, is_inside_gce
def test_build_request():
assert build_request()(None, lambda x: x, "https://example.com")
@pytest.mark.xfail(
is_inside_gce(), reason="Fails if you run this test on GCE environment"
)
def test_is_gce_env():
# Note: this test isn't super valuable, but at least we run the code
assert is_inside_gce() is False
|
holoviews/tests/plotting/plotly/test_violinplot.py | TheoMathurin/holoviews | 864 | 12618046 | <reponame>TheoMathurin/holoviews
import numpy as np
from holoviews.element import Violin
from .test_plot import TestPlotlyPlot
class TestViolinPlot(TestPlotlyPlot):
def test_violin_single(self):
violin = Violin([1, 1, 2, 3, 3, 4, 5, 5])
state = self._get_plot_state(violin)
self.assertEqual(len(state['data']), 1)
self.assertEqual(state['data'][0]['type'], 'violin')
self.assertEqual(state['data'][0]['name'], '')
self.assertEqual(state['data'][0]['y'], np.array([1, 1, 2, 3, 3, 4, 5, 5]))
self.assertEqual(state['layout'].get('xaxis', {}), {})
self.assertEqual(state['layout']['yaxis']['range'], [1, 5])
self.assertEqual(state['layout']['yaxis']['title']['text'], 'y')
def test_violin_single_invert_axes(self):
violin = Violin([1, 1, 2, 3, 3, 4, 5, 5]).options(invert_axes=True)
state = self._get_plot_state(violin)
self.assertEqual(len(state['data']), 1)
self.assertEqual(state['data'][0]['type'], 'violin')
self.assertEqual(state['data'][0]['name'], '')
self.assertEqual(state['data'][0]['x'], np.array([1, 1, 2, 3, 3, 4, 5, 5]))
self.assertEqual(state['layout'].get('yaxis', {}), {})
self.assertEqual(state['layout']['xaxis']['range'], [1, 5])
self.assertEqual(state['layout']['xaxis']['title']['text'], 'y')
def test_violin_multi(self):
violin = Violin((['A']*8+['B']*8, [1, 1, 2, 3, 3, 4, 5, 5]*2), 'x', 'y')
state = self._get_plot_state(violin)
self.assertEqual(len(state['data']), 2)
self.assertEqual(state['data'][0]['type'], 'violin')
self.assertEqual(state['data'][0]['name'], 'A')
self.assertEqual(state['data'][0]['y'], np.array([1, 1, 2, 3, 3, 4, 5, 5]))
self.assertEqual(state['data'][1]['type'], 'violin')
self.assertEqual(state['data'][1]['name'], 'B')
self.assertEqual(state['data'][1]['y'], np.array([1, 1, 2, 3, 3, 4, 5, 5]))
self.assertEqual(state['layout']['xaxis']['title']['text'], 'x')
self.assertEqual(state['layout']['yaxis']['range'], [1, 5])
self.assertEqual(state['layout']['yaxis']['title']['text'], 'y')
def test_violin_multi_invert_axes(self):
violin = Violin((['A']*8+['B']*8, [1, 1, 2, 3, 3, 4, 5, 5]*2), 'x', 'y').options(
invert_axes=True)
state = self._get_plot_state(violin)
self.assertEqual(len(state['data']), 2)
self.assertEqual(state['data'][0]['type'], 'violin')
self.assertEqual(state['data'][0]['name'], 'A')
self.assertEqual(state['data'][0]['x'], np.array([1, 1, 2, 3, 3, 4, 5, 5]))
self.assertEqual(state['data'][1]['type'], 'violin')
self.assertEqual(state['data'][1]['name'], 'B')
self.assertEqual(state['data'][1]['x'], np.array([1, 1, 2, 3, 3, 4, 5, 5]))
self.assertEqual(state['layout']['yaxis']['title']['text'], 'x')
self.assertEqual(state['layout']['xaxis']['range'], [1, 5])
self.assertEqual(state['layout']['xaxis']['title']['text'], 'y')
def test_visible(self):
element = Violin([1, 1, 2, 3, 3, 4, 5, 5]).options(visible=False)
state = self._get_plot_state(element)
self.assertEqual(state['data'][0]['visible'], False)
|
docs/tutorial.py | mrtrkmn/yellowbrick | 3,662 | 12618056 | <gh_stars>1000+
#!/usr/bin/env python
# Generate the classification report images for the tutorial
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC, NuSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression, SGDClassifier
from sklearn.ensemble import (
BaggingClassifier,
ExtraTreesClassifier,
RandomForestClassifier,
)
from yellowbrick.datasets import load_mushroom
from yellowbrick.classifier import ClassificationReport
ESTIMATORS = {
"SVC": {"model": SVC(gamma="auto"), "path": "images/tutorial/modelselect_svc.png"},
"NuSVC": {
"model": NuSVC(gamma="auto"),
"path": "images/tutorial/modelselect_nu_svc.png",
},
"LinearSVC": {
"model": LinearSVC(),
"path": "images/tutorial/modelselect_linear_svc.png",
},
"SGD": {
"model": SGDClassifier(max_iter=100, tol=1e-3),
"path": "images/tutorial/modelselect_sgd_classifier.png",
},
"KNN": {
"model": KNeighborsClassifier(),
"path": "images/tutorial/modelselect_kneighbors_classifier.png",
},
"LR": {
"model": LogisticRegression(solver="lbfgs"),
"path": "images/tutorial/modelselect_logistic_regression.png",
},
"LRCV": {
"model": LogisticRegressionCV(cv=3),
"path": "images/tutorial/modelselect_logistic_regression_cv.png",
},
"Bags": {
"model": BaggingClassifier(),
"path": "images/tutorial/modelselect_bagging_classifier.png",
},
"XTrees": {
"model": ExtraTreesClassifier(n_estimators=100),
"path": "images/tutorial/modelselect_extra_trees_classifier.png",
},
"RF": {
"model": RandomForestClassifier(n_estimators=100),
"path": "images/tutorial/modelselect_random_forest_classifier.png",
},
}
def visualize_model(X, y, estimator, path, **kwargs):
"""
Test various estimators.
"""
y = LabelEncoder().fit_transform(y)
model = Pipeline([("one_hot_encoder", OneHotEncoder()), ("estimator", estimator)])
_, ax = plt.subplots()
# Instantiate the classification model and visualizer
visualizer = ClassificationReport(
model,
classes=["edible", "poisonous"],
cmap="YlGn",
size=(600, 360),
ax=ax,
**kwargs
)
visualizer.fit(X, y)
visualizer.score(X, y)
visualizer.show(outpath=path)
if __name__ == "__main__":
X, y = load_mushroom()
for clf in ESTIMATORS.values():
visualize_model(X, y, clf["model"], clf["path"])
|
lib/tamper_scripts/base64_encode.py | ikstream/Zeus-Scanner | 841 | 12618078 | <filename>lib/tamper_scripts/base64_encode.py
import base64
from lib.core.settings import (
logger,
set_color
)
def tamper(payload, **kwargs):
warning = kwargs.get("warning", True)
if warning:
logger.warning(set_color(
"base64 tamper scripts may increase the possibility of not finding vulnerabilities "
"in otherwise vulnerable sites", level=30
))
return base64.b64encode(payload) |
dl_lib/configs/segm_config.py | AndysonYs/DynamicRouting | 122 | 12618080 | <gh_stars>100-1000
from .base_config import BaseConfig
_config_dict = dict(
MODEL=dict(
LOAD_PROPOSALS=False,
MASK_ON=False,
KEYPOINT_ON=False,
BACKBONE=dict(FREEZE_AT=0, ),
RESNETS=dict(
OUT_FEATURES=["res2", "res3", "res4", "res5"],
NORM="nnSyncBN",
NUM_GROUPS=1,
WIDTH_PER_GROUP=64,
STRIDE_IN_1X1=True,
RES5_DILATION=1,
RES2_OUT_CHANNELS=256,
STEM_OUT_CHANNELS=64,
DEFORM_ON_PER_STAGE=[False, False, False, False],
DEFORM_MODULATED=False,
DEFORM_NUM_GROUPS=1,
),
FPN=dict(
IN_FEATURES=[],
OUT_CHANNELS=256,
NORM="",
FUSE_TYPE="sum",
),
SEM_SEG_HEAD=dict(
# NAME="SemSegFPNHead",
IN_FEATURES=[],
IGNORE_VALUE=255,
NUM_CLASSES=(),
CONVS_DIM=256,
COMMON_STRIDE=(),
NORM="GN",
LOSS_WEIGHT=1.0,
),
SOLVER=dict(
LR_SCHEDULER=dict(
NAME="PolyLR",
POLY_POWER=0.9,
MAX_ITER=40000,
WARMUP_ITERS=1000,
WARMUP_FACTOR=0.001,
WARMUP_METHOD="linear",
),
OPTIMIZER=dict(BASE_LR=0.01, ),
IMS_PER_BATCH=16,
CHECKPOINT_PERIOD=5000,
),
TEST=dict(PRECISE_BN=dict(ENABLED=True), ),
),
INPUT=dict(CROP_PAD=dict(
ENABLED=True,
TYPE='absolute',
SIZE=(),
IMG_PAD_VALUE=0,
SEG_PAD_VALUE=255,
), ),
)
class SemanticSegmentationConfig(BaseConfig):
def __init__(self):
super(SemanticSegmentationConfig, self).__init__()
self._register_configuration(_config_dict)
config = SemanticSegmentationConfig()
|
fedot/core/optimisers/gp_comp/operators/mutation.py | rozlana-g/FEDOT | 358 | 12618084 | from copy import deepcopy
from functools import partial
from random import choice, randint, random, sample
from typing import Any, Callable, List, TYPE_CHECKING, Union
import numpy as np
from fedot.core.composer.constraint import constraint_function
from fedot.core.log import Log
from fedot.core.optimisers.gp_comp.gp_operators import random_graph
from fedot.core.optimisers.gp_comp.individual import Individual
from fedot.core.optimisers.graph import OptGraph, OptNode
from fedot.core.optimisers.opt_history import ParentOperator
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.utils import ComparableEnum as Enum, DEFAULT_PARAMS_STUB
if TYPE_CHECKING:
from fedot.core.optimisers.gp_comp.gp_optimiser import GraphGenerationParams
MAX_NUM_OF_ATTEMPTS = 100
MAX_MUT_CYCLES = 5
STATIC_MUTATION_PROBABILITY = 0.7
class MutationTypesEnum(Enum):
simple = 'simple'
growth = 'growth'
local_growth = 'local_growth'
reduce = 'reduce'
single_add = 'single_add',
single_change = 'single_change',
single_drop = 'single_drop',
single_edge = 'single_edge'
none = 'none'
class MutationStrengthEnum(Enum):
weak = 0.2
mean = 1.0
strong = 5.0
def get_mutation_prob(mut_id, node):
""" Function returns mutation probability for certain node in the graph
:param mut_id: MutationStrengthEnum mean weak or strong mutation
:param node: root node of the graph
:return mutation_prob: mutation probability
"""
default_mutation_prob = 0.7
if mut_id in list(MutationStrengthEnum):
mutation_strength = mut_id.value
mutation_prob = mutation_strength / (node.distance_to_primary_level + 1)
else:
mutation_prob = default_mutation_prob
return mutation_prob
def _will_mutation_be_applied(mutation_prob, mutation_type) -> bool:
return not (random() > mutation_prob or mutation_type == MutationTypesEnum.none)
def _adapt_and_apply_mutations(new_graph: Any, mutation_prob: float, types: List[Union[MutationTypesEnum, Callable]],
num_mut: int, requirements, params: 'GraphGenerationParams', max_depth: int):
"""
Apply mutation in several iterations with specific adaptation of each graph
"""
is_static_mutation_type = random() < STATIC_MUTATION_PROBABILITY
static_mutation_type = choice(types)
mutation_names = []
for _ in range(num_mut):
mutation_type = static_mutation_type \
if is_static_mutation_type else choice(types)
is_custom_mutation = isinstance(mutation_type, Callable)
if is_custom_mutation:
new_graph = params.adapter.restore(new_graph)
else:
if not isinstance(new_graph, OptGraph):
new_graph = params.adapter.adapt(new_graph)
new_graph = _apply_mutation(new_graph=new_graph, mutation_prob=mutation_prob,
mutation_type=mutation_type, is_custom_mutation=is_custom_mutation,
requirements=requirements, params=params, max_depth=max_depth)
mutation_names.append(str(mutation_type))
if not isinstance(new_graph, OptGraph):
new_graph = params.adapter.adapt(new_graph)
if is_custom_mutation:
# custom mutation occurs once
break
return new_graph, mutation_names
def _apply_mutation(new_graph: Any, mutation_prob: float, mutation_type: Union[MutationTypesEnum, Callable],
is_custom_mutation: bool, requirements, params: 'GraphGenerationParams', max_depth: int):
"""
Apply mutation for adapted graph
"""
if _will_mutation_be_applied(mutation_prob, mutation_type):
if mutation_type in mutation_by_type or is_custom_mutation:
if is_custom_mutation:
mutation_func = mutation_type
else:
mutation_func = mutation_by_type[mutation_type]
new_graph = mutation_func(new_graph, requirements=requirements,
params=params,
max_depth=max_depth)
elif mutation_type != MutationTypesEnum.none:
raise ValueError(f'Required mutation type is not found: {mutation_type}')
return new_graph
def mutation(types: List[Union[MutationTypesEnum, Callable]], params: 'GraphGenerationParams',
ind: Individual, requirements, log: Log,
max_depth: int = None, add_to_history=True) -> Any:
""" Function apply mutation operator to graph """
max_depth = max_depth if max_depth else requirements.max_depth
mutation_prob = requirements.mutation_prob
for _ in range(MAX_NUM_OF_ATTEMPTS):
new_graph = deepcopy(ind.graph)
num_mut = max(int(round(np.random.lognormal(0, sigma=0.5))), 1)
new_graph, mutation_names = _adapt_and_apply_mutations(new_graph=new_graph, mutation_prob=mutation_prob,
types=types, num_mut=num_mut,
requirements=requirements, params=params,
max_depth=max_depth)
is_correct_graph = constraint_function(new_graph, params)
if is_correct_graph:
new_individual = Individual(new_graph)
if add_to_history:
new_individual = Individual(new_graph)
new_individual.parent_operators = ind.parent_operators
for mutation_name in mutation_names:
new_individual.parent_operators.append(
ParentOperator(operator_type='mutation',
operator_name=str(mutation_name),
parent_objects=[params.adapter.restore_as_template(ind.graph)]))
return new_individual
log.debug('Number of mutation attempts exceeded. '
'Please check composer requirements for correctness.')
return deepcopy(ind)
def simple_mutation(graph: Any, requirements, **kwargs) -> Any:
"""
This type of mutation is passed over all nodes of the tree started from the root node and changes
nodesโ operations with probability - 'node mutation probability'
which is initialised inside the function
"""
def replace_node_to_random_recursive(node: Any) -> Any:
if node.nodes_from:
if random() < node_mutation_probability:
secondary_node = OptNode(content={'name': choice(requirements.secondary),
'params': DEFAULT_PARAMS_STUB},
nodes_from=node.nodes_from)
graph.update_node(node, secondary_node)
for child in node.nodes_from:
replace_node_to_random_recursive(child)
else:
if random() < node_mutation_probability:
primary_node = OptNode(content={'name': choice(requirements.primary),
'params': DEFAULT_PARAMS_STUB})
graph.update_node(node, primary_node)
node_mutation_probability = get_mutation_prob(mut_id=requirements.mutation_strength,
node=graph.root_node)
replace_node_to_random_recursive(graph.root_node)
return graph
def single_edge_mutation(graph: Any, max_depth, *args, **kwargs):
old_graph = deepcopy(graph)
for _ in range(MAX_NUM_OF_ATTEMPTS):
if len(graph.nodes) < 2 or graph.depth > max_depth:
return graph
source_node, target_node = sample(graph.nodes, 2)
nodes_not_cycling = (target_node.descriptive_id not in
[n.descriptive_id for n in source_node.ordered_subnodes_hierarchy()])
if nodes_not_cycling and (target_node.nodes_from is None or source_node not in target_node.nodes_from):
graph.operator.connect_nodes(source_node, target_node)
break
if graph.depth > max_depth:
return old_graph
return graph
def _add_intermediate_node(graph: Any, requirements, params, node_to_mutate):
# add between node and parent
candidates = params.advisor.propose_parent(str(node_to_mutate.content['name']),
[str(n.content['name']) for n in node_to_mutate.nodes_from],
requirements.secondary)
if len(candidates) == 0:
return graph
new_node = OptNode(content={'name': choice(candidates),
'params': DEFAULT_PARAMS_STUB})
new_node.nodes_from = node_to_mutate.nodes_from
node_to_mutate.nodes_from = [new_node]
graph.nodes.append(new_node)
return graph
def _add_separate_parent_node(graph: Any, requirements, params, node_to_mutate):
# add as separate parent
candidates = params.advisor.propose_parent(str(node_to_mutate.content['name']), None,
requirements.primary)
if len(candidates) == 0:
return graph
for iter_num in range(randint(1, 3)):
if iter_num == len(candidates):
break
new_node = OptNode(content={'name': choice(candidates),
'params': DEFAULT_PARAMS_STUB})
if node_to_mutate.nodes_from:
node_to_mutate.nodes_from.append(new_node)
else:
node_to_mutate.nodes_from = [new_node]
graph.nodes.append(new_node)
return graph
def _add_as_child(graph: Any, requirements, params, node_to_mutate):
# add as child
new_node = OptNode(content={'name': choice(requirements.secondary),
'params': DEFAULT_PARAMS_STUB})
new_node.nodes_from = [node_to_mutate]
graph.operator.actualise_old_node_children(node_to_mutate, new_node)
graph.nodes.append(new_node)
return graph
def single_add_mutation(graph: Any, requirements, params, max_depth, *args, **kwargs):
"""
Add new node between two sequential existing modes
"""
if graph.depth >= max_depth:
# add mutation is not possible
return graph
node_to_mutate = choice(graph.nodes)
single_add_strategies = [_add_as_child, _add_separate_parent_node]
if node_to_mutate.nodes_from:
single_add_strategies.append(_add_intermediate_node)
strategy = choice(single_add_strategies)
result = strategy(graph, requirements, params, node_to_mutate)
return result
def single_change_mutation(graph: Any, requirements, params, *args, **kwargs):
"""
Add new node between two sequential existing modes
"""
node = choice(graph.nodes)
nodes_from = node.nodes_from
candidates = requirements.secondary if node.nodes_from else requirements.primary
if params.advisor:
candidates = params.advisor.propose_change(current_operation_id=str(node.content['name']),
possible_operations=candidates)
if len(candidates) == 0:
return graph
node_new = OptNode(content={'name': choice(candidates),
'params': DEFAULT_PARAMS_STUB})
node_new.nodes_from = nodes_from
graph.nodes = [node_new if n == node else n for n in graph.nodes]
graph.operator.actualise_old_node_children(node, node_new)
return graph
def single_drop_mutation(graph: Any, *args, **kwargs):
"""
Add new node between two sequential existing modes
"""
node_to_del = choice(graph.nodes)
# TODO replace as workaround
node_name = node_to_del.content['name']
if (hasattr(node_name, 'operation_type') and
'data_source' in node_name.operation_type):
nodes_to_delete = \
[n for n in graph.nodes if node_name.operation_type in n.descriptive_id and
n.descriptive_id.count('data_source') == 1]
for child_node in nodes_to_delete:
graph.delete_node(child_node)
graph.delete_node(node_to_del)
else:
graph.delete_node(node_to_del)
if node_to_del.nodes_from:
childs = graph.operator.node_children(node_to_del)
for child in childs:
if child.nodes_from:
child.nodes_from.extend(node_to_del.nodes_from)
else:
child.nodes_from = node_to_del.nodes_from
return graph
def _tree_growth(graph: Any, requirements, params, max_depth: int, local_growth=True):
"""
This mutation selects a random node in a tree, generates new subtree,
and replaces the selected node's subtree.
"""
random_layer_in_graph = randint(0, graph.depth - 1)
node_from_graph = choice(graph.operator.nodes_from_layer(random_layer_in_graph))
if local_growth:
is_primary_node_selected = (not node_from_graph.nodes_from) or (
node_from_graph.nodes_from and
node_from_graph != graph.root_node
and randint(0, 1))
else:
is_primary_node_selected = \
randint(0, 1) and \
not graph.operator.distance_to_root_level(node_from_graph) < max_depth
if is_primary_node_selected:
new_subtree = OptNode(content={'name': choice(requirements.primary),
'params': DEFAULT_PARAMS_STUB})
else:
if local_growth:
max_depth = node_from_graph.distance_to_primary_level
else:
max_depth = max_depth - graph.operator.distance_to_root_level(node_from_graph)
new_subtree = random_graph(params=params, requirements=requirements,
max_depth=max_depth).root_node
graph.update_subtree(node_from_graph, new_subtree)
return graph
def growth_mutation(graph: Any, requirements, params, max_depth: int, local_growth=True) -> Any:
"""
This mutation adds new nodes to the graph (just single node between existing nodes or new subtree).
:param local_growth: if true then maximal depth of new subtree equals depth of tree located in
selected random node, if false then previous depth of selected node doesn't affect to
new subtree depth, maximal depth of new subtree just should satisfy depth constraint in parent tree
"""
if random() > 0.5:
# simple growth (one node can be added)
return single_add_mutation(graph, requirements, params, max_depth)
else:
# advanced growth (several nodes can be added)
return _tree_growth(graph, requirements, params, max_depth, local_growth)
def reduce_mutation(graph: OptGraph, requirements, **kwargs) -> OptGraph:
"""
Selects a random node in a tree, then removes its subtree. If the current arity of the node's
parent is more than the specified minimal arity, then the selected node is also removed.
Otherwise, it is replaced by a random primary node.
"""
if len(graph.nodes) == 1:
return graph
nodes = [node for node in graph.nodes if node is not graph.root_node]
node_to_del = choice(nodes)
children = graph.operator.node_children(node_to_del)
is_possible_to_delete = all([len(child.nodes_from) - 1 >= requirements.min_arity for child in children])
if is_possible_to_delete:
graph.delete_subtree(node_to_del)
else:
primary_node = OptNode(content={'name': choice(requirements.primary),
'params': DEFAULT_PARAMS_STUB})
graph.update_subtree(node_to_del, primary_node)
return graph
mutation_by_type = {
MutationTypesEnum.simple: simple_mutation,
MutationTypesEnum.growth: partial(growth_mutation, local_growth=False),
MutationTypesEnum.local_growth: partial(growth_mutation, local_growth=True),
MutationTypesEnum.reduce: reduce_mutation,
MutationTypesEnum.single_add: single_add_mutation,
MutationTypesEnum.single_edge: single_edge_mutation,
MutationTypesEnum.single_drop: single_drop_mutation,
MutationTypesEnum.single_change: single_change_mutation,
}
|
supervisor/store/const.py | peddamat/home-assistant-supervisor-test | 597 | 12618086 | """Constants for the add-on store."""
from enum import Enum
class StoreType(str, Enum):
"""Store Types."""
CORE = "core"
LOCAL = "local"
GIT = "git"
|
Python/examples/bonds.py | yrtf/QuantLib-SWIG | 231 | 12618113 | <reponame>yrtf/QuantLib-SWIG
# ---
# jupyter:
# jupytext:
# formats: py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bonds
#
# Copyright (©) 2008 <NAME>
# Copyright (©) 2010 <NAME>
#
# This file is part of QuantLib, a free-software/open-source library
# for financial quantitative analysts and developers - https://www.quantlib.org/
#
# QuantLib is free software: you can redistribute it and/or modify it
# under the terms of the QuantLib license. You should have received a
# # copy of the license along with this program; if not, please email
# <<EMAIL>>. The license is also available online at
# <https://www.quantlib.org/license.shtml>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the license for more details.
# This example shows how to set up a term structure and then price
# some simple bonds. The last part is dedicated to peripherical
# computations such as "Yield to Price" or "Price to Yield"
import QuantLib as ql
import pandas as pd
interactive = 'get_ipython' in globals()
# ### Global data
calendar = ql.TARGET()
settlementDate = ql.Date(18, ql.September, 2008)
settlementDate = calendar.adjust(settlementDate)
fixingDays = 3
settlementDays = 3
todaysDate = calendar.advance(settlementDate, -fixingDays, ql.Days)
ql.Settings.instance().evaluationDate = todaysDate
print("Today: " + str(todaysDate))
print("Settlement Date: " + str(settlementDate))
# ### Market quotes
zcQuotes = [(0.0096, ql.Period(3, ql.Months)), (0.0145, ql.Period(6, ql.Months)), (0.0194, ql.Period(1, ql.Years))]
zcBondsDayCounter = ql.Actual365Fixed()
zcHelpers = [
ql.DepositRateHelper(
ql.QuoteHandle(ql.SimpleQuote(r)), tenor, fixingDays, calendar, ql.ModifiedFollowing, True, zcBondsDayCounter
)
for (r, tenor) in zcQuotes
]
# ### Setup bonds
redemption = 100.0
numberOfBonds = 5
bondQuotes = [
(ql.Date(15, ql.March, 2005), ql.Date(31, ql.August, 2010), 0.02375, 100.390625),
(ql.Date(15, ql.June, 2005), ql.Date(31, ql.August, 2011), 0.04625, 106.21875),
(ql.Date(30, ql.June, 2006), ql.Date(31, ql.August, 2013), 0.03125, 100.59375),
(ql.Date(15, ql.November, 2002), ql.Date(15, ql.August, 2018), 0.04000, 101.6875),
(ql.Date(15, ql.May, 1987), ql.Date(15, ql.May, 2038), 0.04500, 102.140625),
]
# ### Definition of the rate helpers
bondsHelpers = []
for issueDate, maturity, couponRate, marketQuote in bondQuotes:
schedule = ql.Schedule(
issueDate,
maturity,
ql.Period(ql.Semiannual),
ql.UnitedStates(ql.UnitedStates.GovernmentBond),
ql.Unadjusted,
ql.Unadjusted,
ql.DateGeneration.Backward,
False,
)
bondsHelpers.append(
ql.FixedRateBondHelper(
ql.QuoteHandle(ql.SimpleQuote(marketQuote)),
settlementDays,
100.0,
schedule,
[couponRate],
ql.ActualActual(ql.ActualActual.Bond),
ql.Unadjusted,
redemption,
issueDate,
)
)
# ### Curve building
termStructureDayCounter = ql.ActualActual(ql.ActualActual.ISDA)
bondInstruments = zcHelpers + bondsHelpers
bondDiscountingTermStructure = ql.PiecewiseFlatForward(settlementDate, bondInstruments, termStructureDayCounter)
# ### Building of the LIBOR forecasting curve
dQuotes = [
(0.043375, ql.Period(1, ql.Weeks)),
(0.031875, ql.Period(1, ql.Months)),
(0.0320375, ql.Period(3, ql.Months)),
(0.03385, ql.Period(6, ql.Months)),
(0.0338125, ql.Period(9, ql.Months)),
(0.0335125, ql.Period(1, ql.Years)),
]
sQuotes = [
(0.0295, ql.Period(2, ql.Years)),
(0.0323, ql.Period(3, ql.Years)),
(0.0359, ql.Period(5, ql.Years)),
(0.0412, ql.Period(10, ql.Years)),
(0.0433, ql.Period(15, ql.Years)),
]
depositDayCounter = ql.Actual360()
depositHelpers = [
ql.DepositRateHelper(
ql.QuoteHandle(ql.SimpleQuote(rate)), tenor, fixingDays, calendar, ql.ModifiedFollowing, True, depositDayCounter
)
for rate, tenor in dQuotes
]
swFixedLegFrequency = ql.Annual
swFixedLegConvention = ql.Unadjusted
swFixedLegDayCounter = ql.Thirty360(ql.Thirty360.European)
swFloatingLegIndex = ql.Euribor6M()
forwardStart = ql.Period(1, ql.Days)
swapHelpers = [
ql.SwapRateHelper(
ql.QuoteHandle(ql.SimpleQuote(rate)),
tenor,
calendar,
swFixedLegFrequency,
swFixedLegConvention,
swFixedLegDayCounter,
swFloatingLegIndex,
ql.QuoteHandle(),
forwardStart,
)
for rate, tenor in sQuotes
]
depoSwapInstruments = depositHelpers + swapHelpers
depoSwapTermStructure = ql.PiecewiseFlatForward(settlementDate, depoSwapInstruments, termStructureDayCounter)
# ### Pricing
#
# Term structures that will be used for pricing:
# the one used for discounting cash flows...
discountingTermStructure = ql.RelinkableYieldTermStructureHandle()
# ...and the one used for forward rate forecasting.
forecastingTermStructure = ql.RelinkableYieldTermStructureHandle()
# Bonds to be priced:
faceAmount = 100
bondEngine = ql.DiscountingBondEngine(discountingTermStructure)
# a zero coupon bond...
zeroCouponBond = ql.ZeroCouponBond(
settlementDays,
ql.UnitedStates(ql.UnitedStates.GovernmentBond),
faceAmount,
ql.Date(15, ql.August, 2013),
ql.Following,
116.92,
ql.Date(15, ql.August, 2003),
)
zeroCouponBond.setPricingEngine(bondEngine)
# ...a fixed 4.5% US Treasury note...
fixedBondSchedule = ql.Schedule(
ql.Date(15, ql.May, 2007),
ql.Date(15, ql.May, 2017),
ql.Period(ql.Semiannual),
ql.UnitedStates(ql.UnitedStates.GovernmentBond),
ql.Unadjusted,
ql.Unadjusted,
ql.DateGeneration.Backward,
False,
)
fixedRateBond = ql.FixedRateBond(
settlementDays,
faceAmount,
fixedBondSchedule,
[0.045],
ql.ActualActual(ql.ActualActual.Bond),
ql.ModifiedFollowing,
100.0,
ql.Date(15, ql.May, 2007),
)
fixedRateBond.setPricingEngine(bondEngine)
# ...and a floating rate bond paying 3M USD Libor + 0.1%
# (should and will be priced on another curve later).
liborTermStructure = ql.RelinkableYieldTermStructureHandle()
libor3m = ql.USDLibor(ql.Period(3, ql.Months), liborTermStructure)
libor3m.addFixing(ql.Date(17, ql.April, 2008), 0.028175)
libor3m.addFixing(ql.Date(17, ql.July, 2008), 0.0278625)
floatingBondSchedule = ql.Schedule(
ql.Date(21, ql.October, 2005),
ql.Date(21, ql.October, 2010),
ql.Period(ql.Quarterly),
ql.UnitedStates(ql.UnitedStates.NYSE),
ql.Unadjusted,
ql.Unadjusted,
ql.DateGeneration.Backward,
True,
)
floatingRateBond = ql.FloatingRateBond(
settlementDays,
faceAmount,
floatingBondSchedule,
libor3m,
ql.Actual360(),
ql.ModifiedFollowing,
spreads=[0.001],
issueDate=ql.Date(21, ql.October, 2005),
)
floatingRateBond.setPricingEngine(bondEngine)
forecastingTermStructure.linkTo(depoSwapTermStructure)
discountingTermStructure.linkTo(bondDiscountingTermStructure)
liborTermStructure.linkTo(depoSwapTermStructure)
# +
data = []
data.append(
(zeroCouponBond.cleanPrice(), fixedRateBond.cleanPrice(), floatingRateBond.cleanPrice())
)
data.append(
(zeroCouponBond.dirtyPrice(), fixedRateBond.dirtyPrice(), floatingRateBond.dirtyPrice())
)
data.append(
(zeroCouponBond.accruedAmount(),
fixedRateBond.accruedAmount(),
floatingRateBond.accruedAmount())
)
data.append(
(None, fixedRateBond.previousCouponRate(), floatingRateBond.previousCouponRate())
)
data.append(
(None, fixedRateBond.nextCouponRate(), floatingRateBond.nextCouponRate())
)
data.append(
(zeroCouponBond.bondYield(ql.Actual360(), ql.Compounded, ql.Annual),
fixedRateBond.bondYield(ql.Actual360(), ql.Compounded, ql.Annual),
floatingRateBond.bondYield(ql.Actual360(), ql.Compounded, ql.Annual))
)
df = pd.DataFrame(data, columns=["ZC", "Fixed", "Floating"],
index=["Clean price", "Dirty price", "Accrued coupon",
"Previous coupon rate", "Next coupon rate", "Yield"])
if not interactive:
print(df)
df
# -
# A few other computations:
# Yield to clean price:
floatingRateBond.cleanPrice(
floatingRateBond.bondYield(ql.Actual360(), ql.Compounded, ql.Annual),
ql.Actual360(),
ql.Compounded,
ql.Annual,
settlementDate,
)
# Clean price to yield:
floatingRateBond.bondYield(
floatingRateBond.cleanPrice(),
ql.Actual360(),
ql.Compounded,
ql.Annual,
settlementDate
)
|
fastapi_simple_security/_security_secret.py | yourkin/fastapi_simple_security | 103 | 12618118 | <reponame>yourkin/fastapi_simple_security
import os
import uuid
import warnings
from fastapi import Security
from fastapi.security import APIKeyHeader
from starlette.exceptions import HTTPException
from starlette.status import HTTP_403_FORBIDDEN
try:
SECRET = os.environ["FASTAPI_SIMPLE_SECURITY_SECRET"]
except KeyError:
SECRET = str(uuid.uuid4())
warnings.warn(
f"ENVIRONMENT VARIABLE 'FASTAPI_SIMPLE_SECURITY_SECRET' NOT FOUND\n"
f"\tGenerated a single-use secret key for this session:\n"
f"\t{SECRET=}"
)
SECRET_KEY_NAME = "secret-key" # Note: By default, nginx silently drops headers with underscores. Use hyphens instead.
secret_header = APIKeyHeader(name=SECRET_KEY_NAME, scheme_name="Secret header", auto_error=False)
async def secret_based_security(header_param: str = Security(secret_header)):
"""
Args:
header_param: parsed header field secret_header
Returns:
True if the authentication was successful
Raises:
HTTPException if the authentication failed
"""
if header_param == SECRET:
return True
if not header_param:
error = "secret_key must be passed as a header field"
else:
error = (
"Wrong secret key. If not set through environment variable 'FASTAPI_SIMPLE_SECURITY_SECRET', it was "
"generated automatically at startup and appears in the server logs."
)
raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail=error)
|
AI-env/lib/python3.7/site-packages/charset_normalizer/cd.py | parth5795/iOT-benchmarking | 150 | 12618143 | <reponame>parth5795/iOT-benchmarking<gh_stars>100-1000
import importlib
from codecs import IncrementalDecoder
from collections import Counter
from functools import lru_cache
from typing import Dict, List, Optional, Set, Tuple
from .assets import FREQUENCIES
from .md import is_suspiciously_successive_range
from .models import CoherenceMatches
from .utils import is_multi_byte_encoding, is_unicode_range_secondary, unicode_range
def encoding_unicode_range(iana_name: str) -> List[str]:
"""
Return associated unicode ranges in a single byte code page.
"""
if is_multi_byte_encoding(iana_name):
raise IOError("Function not supported on multi-byte code page")
decoder = importlib.import_module("encodings.{}".format(iana_name)).IncrementalDecoder # type: ignore
p = decoder(errors="ignore") # type: IncrementalDecoder
seen_ranges = set() # type: Set[str]
for i in range(48, 255):
chunk = p.decode(bytes([i])) # type: str
if chunk:
character_range = unicode_range(chunk) # type: Optional[str]
if character_range is None:
continue
if is_unicode_range_secondary(character_range) is False:
seen_ranges.add(character_range)
return sorted(list(seen_ranges))
def unicode_range_languages(primary_range: str) -> List[str]:
"""
Return inferred languages used with a unicode range.
"""
languages = [] # type: List[str]
for language, characters in FREQUENCIES.items():
for character in characters:
if unicode_range(character) == primary_range:
languages.append(language)
break
return languages
@lru_cache()
def encoding_languages(iana_name: str) -> List[str]:
"""
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
unicode_ranges = encoding_unicode_range(iana_name) # type: List[str]
primary_range = None # type: Optional[str]
for specified_range in unicode_ranges:
if "Latin" not in specified_range:
primary_range = specified_range
break
if primary_range is None:
return ["Latin Based"]
return unicode_range_languages(primary_range)
def mb_encoding_languages(iana_name: str) -> List[str]:
"""
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
if (
iana_name.startswith("shift_")
or iana_name.startswith("iso2022_jp")
or iana_name.startswith("euc_j")
or iana_name in {"cp932"}
):
return ["Japanese"]
if iana_name.startswith("gb") or iana_name in {"big5", "cp950", "big5hkscs"}:
return ["Chinese", "Classical Chinese"]
if iana_name.startswith("iso2022_kr") or iana_name in {"johab", "cp949", "euc_kr"}:
return ["Korean"]
return []
def alphabet_languages(characters: List[str]) -> List[str]:
"""
Return associated languages associated to given characters.
"""
languages = [] # type: List[str]
for language, language_characters in FREQUENCIES.items():
character_match_count = 0 # type: int
character_count = len(language_characters) # type: int
for character in language_characters:
if character in characters:
character_match_count += 1
if character_match_count / character_count >= 0.2:
languages.append(language)
return languages
def characters_popularity_compare(
language: str, ordered_characters: List[str]
) -> float:
"""
Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
"""
if language not in FREQUENCIES:
raise ValueError("{} not available".format(language))
character_approved_count = 0 # type: int
for character in ordered_characters:
if character not in FREQUENCIES[language]:
continue
characters_before_source = FREQUENCIES[language][
0 : FREQUENCIES[language].index(character)
] # type: List[str]
characters_after_source = FREQUENCIES[language][
FREQUENCIES[language].index(character) :
] # type: List[str]
characters_before = ordered_characters[
0 : ordered_characters.index(character)
] # type: List[str]
characters_after = ordered_characters[
ordered_characters.index(character) :
] # type: List[str]
before_match_count = [
e in characters_before for e in characters_before_source
].count(
True
) # type: int
after_match_count = [
e in characters_after for e in characters_after_source
].count(
True
) # type: int
if len(characters_before_source) == 0 and before_match_count <= 4:
character_approved_count += 1
continue
if len(characters_after_source) == 0 and after_match_count <= 4:
character_approved_count += 1
continue
if (
before_match_count / len(characters_before_source) >= 0.4
or after_match_count / len(characters_after_source) >= 0.4
):
character_approved_count += 1
continue
return character_approved_count / len(ordered_characters)
def alpha_unicode_split(decoded_sequence: str) -> List[str]:
"""
Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
One containing the latin letters and the other hebrew.
"""
layers = {} # type: Dict[str, str]
for character in decoded_sequence:
if character.isalpha() is False:
continue
character_range = unicode_range(character) # type: Optional[str]
if character_range is None:
continue
layer_target_range = None # type: Optional[str]
for discovered_range in layers:
if (
is_suspiciously_successive_range(discovered_range, character_range)
is False
):
layer_target_range = discovered_range
break
if layer_target_range is None:
layer_target_range = character_range
if layer_target_range not in layers:
layers[layer_target_range] = character.lower()
continue
layers[layer_target_range] += character.lower()
return list(layers.values())
def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
"""
This function merge results previously given by the function coherence_ratio.
The return type is the same as coherence_ratio.
"""
per_language_ratios = {} # type: Dict[str, List[float]]
merge = [] # type: CoherenceMatches
for result in results:
for sub_result in result:
language, ratio = sub_result
if language not in per_language_ratios:
per_language_ratios[language] = [ratio]
continue
per_language_ratios[language].append(ratio)
for language in per_language_ratios:
merge.append(
(
language,
round(
sum(per_language_ratios[language])
/ len(per_language_ratios[language]),
4,
),
)
)
return sorted(merge, key=lambda x: x[1], reverse=True)
@lru_cache(maxsize=2048)
def coherence_ratio(
decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
) -> CoherenceMatches:
"""
Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
A layer = Character extraction by alphabets/ranges.
"""
results = [] # type: List[Tuple[str, float]]
lg_inclusion_list = [] # type: List[str]
sufficient_match_count = 0 # type: int
if lg_inclusion is not None:
lg_inclusion_list = lg_inclusion.split(",")
if "Latin Based" in lg_inclusion_list:
lg_inclusion_list.remove("Latin Based")
for layer in alpha_unicode_split(decoded_sequence):
sequence_frequencies = Counter(layer) # type: Counter
most_common = sequence_frequencies.most_common()
character_count = sum([o for c, o in most_common]) # type: int
if character_count <= 32:
continue
popular_character_ordered = [c for c, o in most_common] # type: List[str]
for language in lg_inclusion_list or alphabet_languages(
popular_character_ordered
):
ratio = characters_popularity_compare(
language, popular_character_ordered
) # type: float
if ratio < threshold:
continue
elif ratio >= 0.8:
sufficient_match_count += 1
results.append((language, round(ratio, 4)))
if sufficient_match_count >= 3:
break
return sorted(results, key=lambda x: x[1], reverse=True)
|
paxos/essential.py | timgates42/paxos | 420 | 12618147 | <reponame>timgates42/paxos
'''
This module provides a minimal implementation of the Paxos algorithm
that is independent of the underlying messaging mechanism. These
classes implement only the essential Paxos components and omit
the practical considerations (such as durability, message
retransmissions, NACKs, etc).
'''
import collections
# In order for the Paxos algorithm to function, all proposal ids must be
# unique. A simple way to ensure this is to include the proposer's UID
# in the proposal id. This prevents the possibility of two Proposers
# from proposing different values for the same proposal ID.
#
# Python tuples are a simple mechanism that allow the proposal number
# and the UID to be combined easily and in a manner that supports
# comparison. To simplify the code, we'll use "namedtuple" instances
# from the collections module which allows us to write
# "proposal_id.number" instead of "proposal_id[0]".
#
ProposalID = collections.namedtuple('ProposalID', ['number', 'uid'])
class Messenger (object):
def send_prepare(self, proposal_id):
'''
Broadcasts a Prepare message to all Acceptors
'''
def send_promise(self, proposer_uid, proposal_id, previous_id, accepted_value):
'''
Sends a Promise message to the specified Proposer
'''
def send_accept(self, proposal_id, proposal_value):
'''
Broadcasts an Accept! message to all Acceptors
'''
def send_accepted(self, proposal_id, accepted_value):
'''
Broadcasts an Accepted message to all Learners
'''
def on_resolution(self, proposal_id, value):
'''
Called when a resolution is reached
'''
class Proposer (object):
messenger = None
proposer_uid = None
quorum_size = None
proposed_value = None
proposal_id = None
last_accepted_id = None
next_proposal_number = 1
promises_rcvd = None
def set_proposal(self, value):
'''
Sets the proposal value for this node iff this node is not already aware of
another proposal having already been accepted.
'''
if self.proposed_value is None:
self.proposed_value = value
def prepare(self):
'''
Sends a prepare request to all Acceptors as the first step in attempting to
acquire leadership of the Paxos instance.
'''
self.promises_rcvd = set()
self.proposal_id = ProposalID(self.next_proposal_number, self.proposer_uid)
self.next_proposal_number += 1
self.messenger.send_prepare(self.proposal_id)
def recv_promise(self, from_uid, proposal_id, prev_accepted_id, prev_accepted_value):
'''
Called when a Promise message is received from an Acceptor
'''
# Ignore the message if it's for an old proposal or we have already received
# a response from this Acceptor
if proposal_id != self.proposal_id or from_uid in self.promises_rcvd:
return
self.promises_rcvd.add( from_uid )
if prev_accepted_id > self.last_accepted_id:
self.last_accepted_id = prev_accepted_id
# If the Acceptor has already accepted a value, we MUST set our proposal
# to that value.
if prev_accepted_value is not None:
self.proposed_value = prev_accepted_value
if len(self.promises_rcvd) == self.quorum_size:
if self.proposed_value is not None:
self.messenger.send_accept(self.proposal_id, self.proposed_value)
class Acceptor (object):
messenger = None
promised_id = None
accepted_id = None
accepted_value = None
def recv_prepare(self, from_uid, proposal_id):
'''
Called when a Prepare message is received from a Proposer
'''
if proposal_id == self.promised_id:
# Duplicate prepare message
self.messenger.send_promise(from_uid, proposal_id, self.accepted_id, self.accepted_value)
elif proposal_id > self.promised_id:
self.promised_id = proposal_id
self.messenger.send_promise(from_uid, proposal_id, self.accepted_id, self.accepted_value)
def recv_accept_request(self, from_uid, proposal_id, value):
'''
Called when an Accept! message is received from a Proposer
'''
if proposal_id >= self.promised_id:
self.promised_id = proposal_id
self.accepted_id = proposal_id
self.accepted_value = value
self.messenger.send_accepted(proposal_id, self.accepted_value)
class Learner (object):
quorum_size = None
proposals = None # maps proposal_id => [accept_count, retain_count, value]
acceptors = None # maps from_uid => last_accepted_proposal_id
final_value = None
final_proposal_id = None
@property
def complete(self):
return self.final_proposal_id is not None
def recv_accepted(self, from_uid, proposal_id, accepted_value):
'''
Called when an Accepted message is received from an acceptor
'''
if self.final_value is not None:
return # already done
if self.proposals is None:
self.proposals = dict()
self.acceptors = dict()
last_pn = self.acceptors.get(from_uid)
if not proposal_id > last_pn:
return # Old message
self.acceptors[ from_uid ] = proposal_id
if last_pn is not None:
oldp = self.proposals[ last_pn ]
oldp[1] -= 1
if oldp[1] == 0:
del self.proposals[ last_pn ]
if not proposal_id in self.proposals:
self.proposals[ proposal_id ] = [0, 0, accepted_value]
t = self.proposals[ proposal_id ]
assert accepted_value == t[2], 'Value mismatch for single proposal!'
t[0] += 1
t[1] += 1
if t[0] == self.quorum_size:
self.final_value = accepted_value
self.final_proposal_id = proposal_id
self.proposals = None
self.acceptors = None
self.messenger.on_resolution( proposal_id, accepted_value )
|
lona/html/document.py | korantu/lona | 230 | 12618197 | from threading import RLock
from lona.html.abstract_node import AbstractNode
from lona.html.patches import PatchStack
from lona.protocol import DATA_TYPE
class Document:
def __init__(self):
self._lock = RLock()
self.html = None
self._patch_stack = PatchStack()
@property
def lock(self):
return self._lock
@property
def is_dirty(self):
return self._patch_stack.has_patches()
def add_patch(self, *args, **kwargs):
self._patch_stack.add_patch(*args, **kwargs)
# html ####################################################################
def get_node(self, node_id):
node = None
nodes = []
with self.lock:
if self.html.id == node_id:
node = self.html
else:
for _node in self.html.iter_nodes():
if _node.id == node_id:
node = _node
break
if node is None:
return []
while node is not None:
nodes.append(node)
node = node.parent
return nodes
def serialize(self):
if not self.html:
return self.apply('')
return DATA_TYPE.HTML_TREE, self.html._serialize()
def apply(self, html):
if isinstance(html, str) and html is self.html:
return
# HTML update
elif html is self.html:
if not self._patch_stack.has_patches():
return
patches = self._patch_stack.get_patches()
self._patch_stack.clear()
return DATA_TYPE.HTML_UPDATE, patches
# HTML
else:
self._patch_stack.clear()
if isinstance(self.html, AbstractNode):
self.html._set_document(None)
# node tree
if isinstance(html, AbstractNode):
self.html = html
self.html._set_document(self)
return self.serialize()
# HTML string
self.html = str(html)
return DATA_TYPE.HTML, html
|
tools/mo/openvino/tools/mo/utils/ir_reader/__init__.py | pazamelin/openvino | 2,406 | 12618222 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
|
components/py_engine/framework/ap3216c.py | wstong999/AliOS-Things | 4,538 | 12618255 | <gh_stars>1000+
"""
Copyright (C) 2015-2020 Alibaba Group Holding Limited
The driver for AP3216C chip, The AP3216C is an integrated ALS & PS module
that includes a digital ambient light sensor [ALS], a proximity sensor [PS],
and an IR LED in a single package.
"""
from micropython import const
from driver import I2C
from utime import sleep_ms
import math
AP3216C_ADDR = const(0x1e)
# System Register
AP3216C_SYS_CONFIGURATION_REG = const(0x00)
AP3216C_SYS_INT_STATUS_REG = const(0x01)
AP3216C_SYS_INT_CLEAR_MANNER_REG = const(0x02)
AP3216C_IR_DATA_L_REG = const(0x0A)
AP3216C_IR_DATA_H_REG = const(0x0B)
AP3216C_ALS_DATA_L_REG = const(0x0C)
AP3216C_ALS_DATA_H_REG = const(0x0D)
AP3216C_PS_DATA_L_REG = const(0x0E)
AP3216C_PS_DATA_H_REG = const(0x0F)
# ALS Register
AP3216C_ALS_CONFIGURATION_REG = const(0x10)
AP3216C_ALS_CALIBRATION_REG = const(0x19)
AP3216C_ALS_THRESHOLD_LOW_L_REG = const(0x1A)
AP3216C_ALS_THRESHOLD_LOW_H_REG = const(0x1B)
AP3216C_ALS_THRESHOLD_HIGH_L_REG = const(0x1C)
AP3216C_ALS_THRESHOLD_HIGH_H_REG = const(0x1D)
# PS Register
AP3216C_PS_CONFIGURATION_REG = const(0x20)
AP3216C_PS_LED_DRIVER_REG = const(0x21)
AP3216C_PS_INT_FORM_REG = const(0x22)
AP3216C_PS_MEAN_TIME_REG = const(0x23)
AP3216C_PS_LED_WAITING_TIME_REG = const(0x24)
AP3216C_PS_CALIBRATION_L_REG = const(0x28)
AP3216C_PS_CALIBRATION_H_REG = const(0x29)
AP3216C_PS_THRESHOLD_LOW_L_REG = const(0x2A)
AP3216C_PS_THRESHOLD_LOW_H_REG = const(0x2B)
AP3216C_PS_THRESHOLD_HIGH_L_REG = const(0x2C)
AP3216C_PS_THRESHOLD_HIGH_H_REG = const(0x2D)
#mode value
AP3216C_MODE_POWER_DOWN = const(0x0)
AP3216C_MODE_ALS = const(0x1)
AP3216C_MODE_PS = const(0x2)
AP3216C_MODE_ALS_AND_PS = const(0x3)
AP3216C_MODE_SW_RESET = const(0x4)
AP3216C_MODE_ALS_ONCE = const(0x5)
AP3216C_MODE_PS_ONCE = const(0x6)
AP3216C_MODE_ALS_AND_PS_ONCE = const(0x7)
#ap3216c_int_clear_manner
AP3216C_INT_CLEAR_MANNER_BY_READING = const(0x0)
AP3216C_ALS_CLEAR_MANNER_BY_SOFTWARE = const(0x1)
#als_range
AP3216C_ALS_RANGE_20661 = const(0x0)
AP3216C_ALS_RANGE_5162 = const(0x1)
AP3216C_ALS_RANGE_1291 = const(0x2)
AP3216C_ALS_RANGE_323 = const(0x3)
#als_range
AP3216C_PS_GAIN1 = const(0x0)
AP3216C_PS_GAIN2 = const(0x1)
AP3216C_PS_GAIN4 = const(0x2)
AP3216C_PS_GAIN8 = const(0x3)
AP3216C_SYSTEM_MODE = const(0x0)
AP3216C_INT_PARAM = const(0x1)
AP3216C_ALS_RANGE = const(0x2)
AP3216C_ALS_PERSIST = const(0x3)
AP3216C_ALS_CALIBRATION = const(0x4)
AP3216C_ALS_LOW_THRESHOLD_L = const(0x5)
AP3216C_ALS_LOW_THRESHOLD_H = const(0x6)
AP3216C_ALS_HIGH_THRESHOLD_L = const(0x7)
AP3216C_ALS_HIGH_THRESHOLD_H = const(0x8)
AP3216C_PS_INTEGRATED_TIME = const(0x9)
AP3216C_PS_GAIN = const(0xa)
AP3216C_PS_PERSIST = const(0xb)
AP3216C_PS_LED_CONTROL = const(0xc)
AP3216C_PS_LED_DRIVER_RATIO = const(0xd)
AP3216C_PS_INT_MODE = const(0xe)
AP3216C_PS_MEAN_TIME = const(0xf)
AP3216C_PS_WAITING_TIME = const(0x10)
AP3216C_PS_CALIBRATION_L = const(0x11)
AP3216C_PS_CALIBRATION_H = const(0x12)
AP3216C_PS_LOW_THRESHOLD_L = const(0x13)
AP3216C_PS_LOW_THRESHOLD_H = const(0x14)
AP3216C_PS_HIGH_THRESHOLD_L = const(0x15)
AP3216C_PS_HIGH_THRESHOLD_H = const(0x16)
class AP3216CError(Exception):
def __init__(self, value=0, msg="ap3216c common error"):
self.value = value
self.msg = msg
def __str__(self):
return "Error code:%d, Error message: %s" % (self.value, str(self.msg))
__repr__ = __str__
class AP3216C(object):
"""
This class implements ap3216c chip's defs.
"""
def __init__(self):
self.i2cDev = None
def open(self, devid):
self.i2cDev = I2C()
self.i2cDev.open(devid)
# ๅๅฏๅญๅจ็ๅผ
def write_reg(self, addr, data):
msgbuf = bytearray([data])
self.i2cDev.writeReg(addr, msgbuf)
print("--> write addr " + str(addr) + ", value = " + str(msgbuf))
# ่ฏปๅฏๅญๅจ็ๅผ
def read_regs(self, addr, len):
buf = bytearray(len)
self.i2cDev.readReg(addr, buf)
print("--> read " + str(len) + " bytes from addr " + str(addr) + ", " + str(len) + " bytes value = " + str(buf))
return buf;
# ่ฝฏไปถๅคไฝไผ ๆๅจ
def reset_sensor(self):
self.write_reg(AP3216C_SYS_CONFIGURATION_REG, AP3216C_MODE_SW_RESET); # reset
def read_low_and_high(self, reg, len):
# buf
# buf[0] = self.read_regs(reg, len) # ่ฏปไฝๅญ่
# buf[1] = self.read_regs(reg + 1, len) # ่ฏป้ซๅญ่
data = self.read_regs(reg, len)[0] | (self.read_regs(reg + 1, len)[0] << len * 8) # ๅๅนถๆฐๆฎ
if (data > (1 << 15)):
data = data - (1<<16)
return data
def ap3216c_get_IntStatus(self):
# ่ฏปไธญๆญ็ถๆๅฏๅญๅจ
IntStatus = self.read_regs(AP3216C_SYS_INT_STATUS_REG, 1)[0]
# IntStatus ็ฌฌ 0 ไฝ่กจ็คบ ALS ไธญๆญ๏ผ็ฌฌ 1 ไฝ่กจ็คบ PS ไธญๆญใ
return IntStatus # ่ฟๅ็ถๆ
def ap3216c_int_init(self):
print("ap3216c_int_init")
#้
็ฝฎ ไธญๆญ่พๅ
ฅๅผ่
def ap3216c_int_Config(self):
print("ap3216c_int_Config")
#ๅๅงๅๅ
ฅๅฃ
def init(self):
# reset ap3216c
self.reset_sensor()
sleep_ms(100)
self.ap3216c_set_param(AP3216C_SYSTEM_MODE, AP3216C_MODE_ALS_AND_PS)
sleep_ms(150) # delay at least 112.5ms
self.ap3216c_int_Config()
self.ap3216c_int_init()
# This function reads light by ap3216c sensor measurement
# @param no
# @return the ambient light converted to float data.
#
def ap3216c_read_ambient_light(self):
read_data = self.read_low_and_high(AP3216C_ALS_DATA_L_REG, 1)
range = self.ap3216c_get_param(AP3216C_ALS_RANGE)
print("ap3216c_read_ambient_light read_data is " , read_data, range)
if (range == AP3216C_ALS_RANGE_20661):
brightness = 0.35 * read_data # sensor ambient light converse to reality
elif (range == AP3216C_ALS_RANGE_5162):
brightness = 0.0788 * read_data # sensor ambient light converse to reality
elif (range == AP3216C_ALS_RANGE_1291):
brightness = 0.0197 * read_data # sensor ambient light converse to reality
elif (range == AP3216C_ALS_RANGE_323):
brightness = 0.0049 * read_data # sensor ambient light converse to reality
return brightness
#This function reads proximity by ap3216c sensor measurement
#@param no
#@return the proximity data.
def ap3216c_read_ps_data(self):
read_data = self.read_low_and_high(AP3216C_PS_DATA_L_REG, 1) # read two data
print("ap3216c_read_ps_data read_data is " , read_data);
if (1 == ((read_data >> 6) & 0x01 or (read_data >> 14) & 0x01)) :
return 55555 # ็บขๅค่ฟ้ซ๏ผIR๏ผ๏ผPSๆ ๆ ่ฟๅไธไธช 55555 ็ๆ ๆๆฐๆฎ
proximity = (read_data & 0x000f) + (((read_data >> 8) & 0x3f) << 4)
# sensor proximity converse to reality
if (proximity > (1 << 15)) :
proximity = proximity - (1<<16)
proximity |= read_data & 0x8000 # ๅๆ้ซไฝ๏ผ0 ่กจ็คบ็ฉไฝ่ฟ็ฆป๏ผ1 ่กจ็คบ็ฉไฝ้ ่ฟ
return proximity # proximity ๅๅไฝๆฏๆฐๆฎไฝ๏ผๆ้ซไฝไธบ็ถๆไฝ
#This function reads ir by ap3216c sensor measurement
#@param no
#@return the ir data.
def ap3216c_read_ir_data(self):
read_data = self.read_low_and_high(AP3216C_IR_DATA_L_REG, 1) # read two data
print("ap3216c_read_ir_data read_data is" , read_data);
proximity = (read_data & 0x0003) + ((read_data >> 8) & 0xFF)
# sensor proximity converse to reality
if (proximity > (1 << 15)) :
proximity = proximity - (1<<16)
return proximity
#This function sets parameter of ap3216c sensor
#@param cmd the parameter cmd of device
#@param value for setting value in cmd register
#@return the setting parameter status,RT_EOK reprensents setting successfully.
def ap3216c_set_param(self, cmd, value):
if cmd == AP3216C_SYSTEM_MODE:
# default 000,power down
self.write_reg(AP3216C_SYS_CONFIGURATION_REG, value)
elif cmd == AP3216C_INT_PARAM:
self.write_reg(AP3216C_SYS_INT_CLEAR_MANNER_REG, value)
elif cmd == AP3216C_ALS_RANGE:
args = self.read_regs(AP3216C_ALS_CONFIGURATION_REG, 1)[0]
args &= 0xcf
args |= value << 4
self.write_reg(AP3216C_ALS_CONFIGURATION_REG, args)
elif cmd == AP3216C_ALS_PERSIST:
args = self.read_regs(AP3216C_ALS_CONFIGURATION_REG, 1)[0]
args &= 0xf0
args |= value
self.write_reg(AP3216C_ALS_CONFIGURATION_REG, args)
elif cmd == AP3216C_ALS_LOW_THRESHOLD_L:
self.write_reg(AP3216C_ALS_THRESHOLD_LOW_L_REG, value)
elif cmd == AP3216C_ALS_LOW_THRESHOLD_H:
self.write_reg(AP3216C_ALS_THRESHOLD_LOW_H_REG, value)
elif cmd == AP3216C_ALS_HIGH_THRESHOLD_L:
self.write_reg(AP3216C_ALS_THRESHOLD_HIGH_L_REG, value)
elif cmd == AP3216C_ALS_HIGH_THRESHOLD_H:
self.write_reg(AP3216C_ALS_THRESHOLD_HIGH_H_REG, value)
elif cmd == AP3216C_PS_GAIN:
args = self.read_regs(AP3216C_PS_CONFIGURATION_REG, 1)[0]
args &= 0xf3
args |= value
self.write_reg(AP3216C_PS_CONFIGURATION_REG, args)
elif cmd == AP3216C_PS_PERSIST:
args = self.read_regs(AP3216C_PS_CONFIGURATION_REG, 1)[0]
args &= 0xfc
args |= value
self.write_reg(AP3216C_PS_CONFIGURATION_REG, args)
elif cmd == AP3216C_PS_LOW_THRESHOLD_L:
self.write_reg(AP3216C_PS_THRESHOLD_LOW_L_REG, value)
elif cmd == AP3216C_PS_LOW_THRESHOLD_H:
self.write_reg(AP3216C_PS_THRESHOLD_LOW_H_REG, value)
elif cmd == AP3216C_PS_HIGH_THRESHOLD_L:
self.write_reg(AP3216C_PS_THRESHOLD_HIGH_L_REG, value)
elif cmd == AP3216C_PS_HIGH_THRESHOLD_H:
self.write_reg(AP3216C_PS_THRESHOLD_HIGH_H_REG, value)
#This function gets parameter of ap3216c sensor
#@param cmd the parameter cmd of device
#@param value to get value in cmd register
#@return the getting parameter status,RT_EOK reprensents getting successfully.
def ap3216c_get_param(self, cmd):
if cmd == AP3216C_SYSTEM_MODE:
value = self.read_regs(AP3216C_SYS_CONFIGURATION_REG, 1)[0]
elif cmd == AP3216C_INT_PARAM:
value = self.read_regs(AP3216C_SYS_INT_CLEAR_MANNER_REG, 1)[0]
elif cmd == AP3216C_ALS_RANGE:
value = self.read_regs(AP3216C_ALS_CONFIGURATION_REG, 1)[0]
temp = (value & 0xff) >> 4
value = temp
elif cmd == AP3216C_ALS_PERSIST:
temp = self.read_regs(AP3216C_ALS_CONFIGURATION_REG, 1)[0]
temp = value & 0x0f
value = temp
elif cmd == AP3216C_ALS_LOW_THRESHOLD_L:
value = self.read_regs(AP3216C_ALS_THRESHOLD_LOW_L_REG, 1)[0]
elif cmd == AP3216C_ALS_LOW_THRESHOLD_H:
value = self.read_regs(AP3216C_ALS_THRESHOLD_LOW_H_REG, 1)[0]
elif cmd == AP3216C_ALS_HIGH_THRESHOLD_L:
value = self.read_regs(AP3216C_ALS_THRESHOLD_HIGH_L_REG, 1)[0]
elif cmd == AP3216C_ALS_HIGH_THRESHOLD_H:
value = self.read_regs(AP3216C_ALS_THRESHOLD_HIGH_H_REG, 1)[0]
elif cmd == AP3216C_PS_GAIN:
temp = self.read_regs(AP3216C_PS_CONFIGURATION_REG, 1)[0]
value = (temp & 0xc) >> 2
elif cmd == AP3216C_PS_PERSIST:
temp = self.read_regs(AP3216C_PS_CONFIGURATION_REG, 1)[0]
value = temp & 0x3
elif cmd == AP3216C_PS_LOW_THRESHOLD_L:
value = self.read_regs(AP3216C_PS_THRESHOLD_LOW_L_REG, 1)[0]
elif cmd == AP3216C_PS_LOW_THRESHOLD_H:
value = self.read_regs(AP3216C_PS_THRESHOLD_LOW_H_REG, 1)[0]
elif cmd == AP3216C_PS_HIGH_THRESHOLD_L:
value = self.read_regs(AP3216C_PS_THRESHOLD_HIGH_L_REG, 1)[0]
elif cmd == AP3216C_PS_HIGH_THRESHOLD_H:
value = self.read_regs(AP3216C_PS_THRESHOLD_HIGH_H_REG, 1)[0]
return value
def close(self):
self.i2cDev.close()
|
QSTK/qstkstudy/Events.py | paulopatto/QuantSoftwareToolkit | 339 | 12618267 | # (c) 2011, 2012 Georgia Tech Research Corporation
# This source code is released under the New BSD license. Please see
# http://wiki.quantsoftware.org/index.php?title=QSTK_License
# for license details.
#Created on October <day>, 2011
#
#@author: <NAME>
#@contact: <EMAIL>
#@summary: Example Event Datamatrix acceptable to EventProfiler App
#
import pandas
from QSTK.qstkutil import DataAccess as da
import numpy as np
import math
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
"""
Accepts a list of symbols along with start and end date
Returns the Event Matrix which is a pandas Datamatrix
Event matrix has the following structure :
|IBM |GOOG|XOM |MSFT| GS | JP |
(d1)|nan |nan | 1 |nan |nan | 1 |
(d2)|nan | 1 |nan |nan |nan |nan |
(d3)| 1 |nan | 1 |nan | 1 |nan |
(d4)|nan | 1 |nan | 1 |nan |nan |
...................................
...................................
Also, d1 = start date
nan = no information about any event.
1 = status bit(positively confirms the event occurence)
"""
def find_events(symbols, d_data, verbose=False):
# Get the data from the data store
storename = "Yahoo" # get data from our daily prices source
# Available field names: open, close, high, low, close, actual_close, volume
closefield = "close"
volumefield = "volume"
window = 10
if verbose:
print __name__ + " reading data"
close = d_data[closefield]
if verbose:
print __name__ + " finding events"
for symbol in symbols:
close[symbol][close[symbol]>= 1.0] = np.NAN
for i in range(1,len(close[symbol])):
if np.isnan(close[symbol][i-1]) and close[symbol][i] < 1.0 :#(i-1)th was > $1, and (i)th is <$1
close[symbol][i] = 1.0 #overwriting the price by the bit
close[symbol][close[symbol]< 1.0] = np.NAN
return close
|
Build Glyphs/Build Circled Glyphs.py | KatjaSchimmel/Glyphs-Scripts | 283 | 12618272 | #MenuTitle: Build Circled Glyphs
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Builds circled numbers and letters (U+24B6...24EA and U+2460...2473) from _part.circle and the letters and figures.
"""
from Foundation import NSPoint, NSClassFromString, NSAffineTransform
from AppKit import NSButtLineCapStyle, NSRect, NSSize
import math, vanilla
circledNumbers = (
"zero.circled",
"one.circled",
"two.circled",
"three.circled",
"four.circled",
"five.circled",
"six.circled",
"seven.circled",
"eight.circled",
"nine.circled",
"one_zero.circled",
"one_one.circled",
"one_two.circled",
"one_three.circled",
"one_four.circled",
"one_five.circled",
"one_six.circled",
"one_seven.circled",
"one_eight.circled",
"one_nine.circled",
"two_zero.circled",
)
circledUC =(
"A.circled",
"B.circled",
"C.circled",
"D.circled",
"E.circled",
"F.circled",
"G.circled",
"H.circled",
"I.circled",
"J.circled",
"K.circled",
"L.circled",
"M.circled",
"N.circled",
"O.circled",
"P.circled",
"Q.circled",
"R.circled",
"S.circled",
"T.circled",
"U.circled",
"V.circled",
"W.circled",
"X.circled",
"Y.circled",
"Z.circled",
)
circledLC = (
"a.circled",
"b.circled",
"c.circled",
"d.circled",
"e.circled",
"f.circled",
"g.circled",
"h.circled",
"i.circled",
"j.circled",
"k.circled",
"l.circled",
"m.circled",
"n.circled",
"o.circled",
"p.circled",
"q.circled",
"r.circled",
"s.circled",
"t.circled",
"u.circled",
"v.circled",
"w.circled",
"x.circled",
"y.circled",
"z.circled",
)
def offsetLayer( thisLayer, offset, makeStroke=False, position=0.5, autoStroke=False ):
offsetFilter = NSClassFromString("GlyphsFilterOffsetCurve")
try:
# GLYPHS 3:
offsetFilter.offsetLayer_offsetX_offsetY_makeStroke_autoStroke_position_metrics_error_shadow_capStyleStart_capStyleEnd_keepCompatibleOutlines_(
thisLayer,
offset, offset, # horizontal and vertical offset
makeStroke, # if True, creates a stroke
autoStroke, # if True, distorts resulting shape to vertical metrics
position, # stroke distribution to the left and right, 0.5 = middle
None, None, None, 0, 0, True )
except:
# GLYPHS 2:
offsetFilter.offsetLayer_offsetX_offsetY_makeStroke_autoStroke_position_metrics_error_shadow_capStyle_keepCompatibleOutlines_(
thisLayer,
offset, offset, # horizontal and vertical offset
makeStroke, # if True, creates a stroke
autoStroke, # if True, distorts resulting shape to vertical metrics
position, # stroke distribution to the left and right, 0.5 = middle
thisLayer.glyphMetrics(), # metrics (G3)
None, None, # error, shadow
0, # NSButtLineCapStyle, # cap style
True, # keep compatible
)
def transform(shiftX=0.0, shiftY=0.0, rotate=0.0, skew=0.0, scale=1.0):
"""
Returns an NSAffineTransform object for transforming layers.
Apply an NSAffineTransform t object like this:
Layer.transform_checkForSelection_doComponents_(t,False,True)
Access its transformation matrix like this:
tMatrix = t.transformStruct() # returns the 6-float tuple
Apply the matrix tuple like this:
Layer.applyTransform(tMatrix)
Component.applyTransform(tMatrix)
Path.applyTransform(tMatrix)
Chain multiple NSAffineTransform objects t1, t2 like this:
t1.appendTransform_(t2)
"""
myTransform = NSAffineTransform.transform()
if rotate:
myTransform.rotateByDegrees_(rotate)
if scale != 1.0:
myTransform.scaleBy_(scale)
if not (shiftX == 0.0 and shiftY == 0.0):
myTransform.translateXBy_yBy_(shiftX,shiftY)
if skew:
skewStruct = NSAffineTransformStruct()
skewStruct.m11 = 1.0
skewStruct.m22 = 1.0
skewStruct.m21 = math.tan(math.radians(skew))
skewTransform = NSAffineTransform.transform()
skewTransform.setTransformStruct_(skewStruct)
myTransform.appendTransform_(skewTransform)
return myTransform
def centerOfRect(rect):
"""
Returns the center of NSRect rect as an NSPoint.
"""
x = rect.origin.x + rect.size.width * 0.5
y = rect.origin.y + rect.size.height * 0.5
return NSPoint(x,y)
def combinedBounds(rects):
bottomLeft = NSPoint( 1000.0, 100.0 )
topRight = NSPoint( 0.0, 0.0 )
for thisRect in rects:
bottomLeft.x = min( thisRect.origin.x, bottomLeft.x )
bottomLeft.y = min( thisRect.origin.y, bottomLeft.y )
topRight.x = max( topRight.x, thisRect.origin.x+thisRect.size.width )
topRight.y = max( topRight.y, thisRect.origin.y+thisRect.size.height )
combinedRect = NSRect()
combinedRect.origin = bottomLeft
combinedRect.size = NSSize( topRight.x-bottomLeft.x, topRight.y-bottomLeft.y )
return combinedRect
def measureLayerAtHeightFromLeftOrRight( thisLayer, height, leftSide=True ):
leftX = thisLayer.bounds.origin.x
rightX = leftX + thisLayer.bounds.size.width
y = height
returnIndex = 1
if not leftSide:
returnIndex = -2
measurement = thisLayer.intersectionsBetweenPoints( NSPoint(leftX,y), NSPoint(rightX,y) )[returnIndex].pointValue().x
if leftSide:
distance = measurement - leftX
else:
distance = rightX - measurement
return distance
def minDistanceBetweenTwoLayers( comp1, comp2, interval=5.0 ):
topY = min( comp1.bounds.origin.y+comp1.bounds.size.height, comp2.bounds.origin.y+comp2.bounds.size.height )
bottomY = max( comp1.bounds.origin.y, comp2.bounds.origin.y )
distance = topY - bottomY
minDist = None
for i in range(int(distance/interval)):
height = bottomY + i * interval
left = measureLayerAtHeightFromLeftOrRight( comp1, height, leftSide=False )
right = measureLayerAtHeightFromLeftOrRight( comp2, height, leftSide=True )
total = left+right
if minDist == None or minDist > total:
minDist = total
if minDist == None:
minDist = 0.0
return minDist
def placeComponentsAtDistance( thisLayer, comp1, comp2, interval=5.0, distance=10.0 ):
if comp1 is not None:
thisMaster = thisLayer.associatedFontMaster()
masterID = thisMaster.id
original1 = comp1.component.layers[masterID]
original2 = comp2.component.layers[masterID]
minDist = minDistanceBetweenTwoLayers( original1, original2, interval=interval )
comp2shift = distance - minDist
addedSBs = original1.RSB + original2.LSB
comp2.x = comp1.x + original1.width - addedSBs + comp2shift
def buildCircledGlyph( thisGlyph, circleName, scaleFactors, minDistanceBetweenTwoLayers=90.0, suffix=None ):
isBlack = "black" in circleName.lower()
thisFont = thisGlyph.font
thisGlyph.widthMetricsKey = None # "=%i" % thisFont.upm )
thisGlyph.leftMetricsKey = "=40"
thisGlyph.rightMetricsKey = "=|"
for i, thisMaster in enumerate(thisFont.masters):
figureHeight = None
scaleFactor = scaleFactors[i]
if isBlack:
scaleFactor = max(0.6, scaleFactor)
circleGlyph = thisFont.glyphs[circleName]
circleLayer = circleGlyph.layers[thisMaster.id]
circleScaleFactor = thisFont.upm * 0.92 / max(thisFont.upm*0.66, circleLayer.bounds.size.width)
# prepare layer
thisLayer = thisGlyph.layers[thisMaster.id]
thisLayer.clear()
# add circle:
assumedCenter = NSPoint( thisFont.upm*0.5, thisFont.upm*0.3 ) # hardcoded
circleComponent = GSComponent(circleName)
thisLayer.components.append(circleComponent)
# scale circle:
circleScale = transform( scale=circleScaleFactor ).transformStruct()
circleComponent.applyTransform( circleScale )
# move circle:
circleBounds = thisLayer.components[0].bounds
circleCenter = centerOfRect(circleBounds)
xShift = assumedCenter.x - circleCenter.x
yShift = assumedCenter.y - circleCenter.y
circleShift = transform( shiftX=xShift, shiftY=yShift ).transformStruct()
circleComponent.applyTransform(circleShift)
# update metrics:
thisLayer.updateMetrics()
thisLayer.syncMetrics()
# find number and letter components to add:
suffixlessName = thisGlyph.name
if "." in suffixlessName:
suffixlessName = thisGlyph.name[:thisGlyph.name.find(".")]
componentNames = suffixlessName.split("_")
# add one component in the center:
if componentNames:
advance = 0
for j, compName in enumerate(componentNames):
lfName = "%s.lf" % compName
osfName = "%s.osf" % compName
namesToCheck = [compName]
extraSuffixes = (".osf",".lf")
for extraSuffix in extraSuffixes:
namesToCheck.insert(0,compName+extraSuffix)
if suffix:
for existingName in namesToCheck[:]:
namesToCheck.insert(0,existingName+suffix)
for nameToCheck in namesToCheck:
if thisFont.glyphs[nameToCheck]:
compName = nameToCheck
break
innerComponent = GSComponent( compName )
innerComponent.automaticAlignment = False
thisLayer.components.append( innerComponent )
innerComponent.position = NSPoint( advance, 0.0 )
if j > 0:
innerComponent.disableAlignment = True
placeComponentsAtDistance(
thisLayer,
thisLayer.components[-2],
thisLayer.components[-1], # same as innerComponent
distance = minDistanceBetweenTwoLayers
)
originalLayerWidth = thisFont.glyphs[compName].layers[thisMaster.id].width
advance += originalLayerWidth
collectedBounds = []
for i in range(1,len(thisLayer.components)):
collectedBounds.append(thisLayer.components[i].bounds)
compCenter = centerOfRect( combinedBounds(collectedBounds) )
centerAnchor = thisLayer.anchorForName_traverseComponents_("#center",True)
if centerAnchor:
circleCenter = centerAnchor.position
else:
circleCenter = centerOfRect( circleComponent.bounds )
# scale and move it in place:
shift = transform( shiftX=-compCenter.x, shiftY=-compCenter.y ).transformStruct()
scaleToFit = transform( scale=scaleFactor*circleScaleFactor ).transformStruct()
backshift = transform( shiftX=circleCenter.x, shiftY=circleCenter.y ).transformStruct()
compensateStroke = []
for i in range(1,len(thisLayer.components)):
innerComponent = thisLayer.components[i]
# optically shift so top anchor is in center:
originalLayer = topAnchor = innerComponent.component.layers[thisMaster.id]
topAnchor = originalLayer.anchors["top"]
if topAnchor:
anchorCenter = topAnchor.x
boundsCenter = centerOfRect(originalLayer.bounds).x
opticalCorrection = boundsCenter-anchorCenter
if opticalCorrection != 0.0:
threshold = 35.0
if abs(opticalCorrection) > threshold:
posNeg = opticalCorrection/abs(opticalCorrection)
rest = abs(opticalCorrection) - threshold
opticalCorrection = posNeg * ( threshold + rest * 1/rest**0.3 )
print("--", opticalCorrection)
opticalShift = transform( shiftX = opticalCorrection ).transformStruct()
innerComponent.applyTransform( opticalShift )
innerComponent.applyTransform( shift )
innerComponent.applyTransform( scaleToFit )
innerComponent.applyTransform( backshift )
# move components closer to center:
#move = 15.0
#hOffset = circleCenter.x - centerOfRect(innerComponent.bounds).x
#if abs(hOffset) > move:
# hOffset = (hOffset/abs(hOffset))*move
#if hOffset != 0.0:
# moveCloser = transform( shiftX=hOffset ).transformStruct()
# innerComponent.applyTransform( moveCloser )
# compensatory shift:
if thisGlyph.name in ("two_zero.circled", "one_nine.circled", "one_zero.circled"):
compensate = transform( shiftX=10.0 ).transformStruct()
innerComponent.applyTransform( compensate )
if innerComponent.component.glyphInfo.category == "Number":
if figureHeight == None:
figureHeight = innerComponent.position.y
else:
innerComponent.position.y = figureHeight
compensateStroke.append(innerComponent)
# make slightly bolder:
isNumber = False
for i in range(len(compensateStroke))[::-1]:
componentToDecompose = compensateStroke[i]
if componentToDecompose.component.category == "Number":
isNumber = True
thisLayer.decomposeComponent_(componentToDecompose)
offsetLayer( thisLayer, 4.0 ) #4.0 if isNumber else 3.0 )
if thisLayer.paths and isBlack:
thisLayer.removeOverlap()
for thisPath in thisLayer.paths:
# set first node (make compatible again after remove overlap):
lowestY = thisPath.bounds.origin.y
lowestNodes = [n for n in thisPath.nodes if n.y <= lowestY]
if len(lowestNodes) == 0:
lowestNode = sorted( lowestNodes, key=lambda node:node.y )[0]
elif len(lowestNodes) == 1:
lowestNode = lowestNodes[0]
elif len(lowestNodes) > 1:
lowestNode = sorted( lowestNodes, key=lambda node:node.x )[0]
while lowestNode.type == GSOFFCURVE:
lowestNode = lowestNode.nextNode
thisPath.makeNodeFirst_(lowestNode)
# reverse (white on black):
thisPath.reverse()
thisLayer.anchors = None
for thisComp in thisLayer.components:
if thisComp.componentName == circleName:
thisComp.locked = True
def buildCirclePart( thisFont, glyphName, isBlack=False ):
partCircle = (
(
(353.0, 0.0),
((152.0, 0.0),(0.0, 150.0),(0.0, 348.0)),
((0.0, 549.0),(152.0, 700.0),(353.0, 700.0)),
((556.0, 700.0),(708.0, 549.0),(708.0, 348.0)),
((708.0, 149.0),(556.0, 0.0),(353.0, 0.0))
),
)
thisGlyph = thisFont.glyphs[glyphName]
if not thisGlyph:
thisGlyph = GSGlyph()
thisGlyph.name = glyphName
thisFont.glyphs.append( thisGlyph )
thisGlyph.leftMetricsKey = "=40"
thisGlyph.rightMetricsKey = "=|"
print("Generated %s" % glyphName)
thisGlyph.export = False
# draw in every layer:
for thisLayer in thisGlyph.layers:
# make sure it is empty:
thisLayer.clear()
# draw outer circle:
for thisPath in partCircle:
pen = thisLayer.getPen()
pen.moveTo( thisPath[0] )
for thisSegment in thisPath[1:]:
if len(thisSegment) == 2: # lineto
pen.lineTo( thisSegment )
elif len(thisSegment) == 3: # curveto
pen.curveTo(
thisSegment[0],
thisSegment[1],
thisSegment[2]
)
else:
print("%s: Path drawing error. Could not process this segment:\n" % (glyphName, thisSegment))
pen.closePath()
pen.endPath()
# scale:
refHeight = thisFont.upm - 80
actualHeight = thisLayer.bounds.size.height
scaleFactor = refHeight/actualHeight
thisLayer.applyTransform( transform(scale=scaleFactor).transformStruct() )
# shift to align with capHeight:
refY = thisLayer.associatedFontMaster().capHeight * 0.5
actualY = thisLayer.bounds.origin.y + thisLayer.bounds.size.height * 0.5
shift = refY - actualY
thisLayer.applyTransform( transform(shiftY=shift).transformStruct() )
if not isBlack:
# inner circle, scaled down:
currentHeight = thisLayer.bounds.size.height
outerCircle = thisLayer.paths[0]
innerCircle = outerCircle.copy()
thisLayer.paths.append(innerCircle)
# get stems
hstems = []
vstems = []
masterStems = thisLayer.associatedFontMaster().stems
for i, stem in enumerate(thisFont.stems):
if stem.horizontal:
hstems.append(masterStems[i])
else:
vstems.append(masterStems[i])
# scale down inner circle:
stemSize = 50.0
if hstems and vstems:
stemSize = (hstems[0] + vstems[0]) * 0.25
maximumStemSize = currentHeight * 0.28
stemSize = min(maximumStemSize,stemSize)
smallerBy = stemSize * 2 * 1.06
newHeight = currentHeight - smallerBy
scaleFactor = newHeight/currentHeight
scale = transform(scale=scaleFactor).transformStruct()
centerX = innerCircle.bounds.origin.x + innerCircle.bounds.size.width * 0.5
centerY = innerCircle.bounds.origin.y + innerCircle.bounds.size.height * 0.5
shift = transform(shiftX=-centerX, shiftY=-centerY).transformStruct()
shiftBack = transform(shiftX=centerX, shiftY=centerY).transformStruct()
innerCircle.applyTransform( shift )
innerCircle.applyTransform( scale )
innerCircle.applyTransform( shiftBack )
# tidy up paths and set width:
thisLayer.correctPathDirection()
thisLayer.cleanUpPaths()
thisLayer.updateMetrics()
thisLayer.syncMetrics()
# add anchor:
centerX = thisLayer.bounds.origin.x + thisLayer.bounds.size.width * 0.5
centerY = thisLayer.bounds.origin.y + thisLayer.bounds.size.height * 0.5
centerAnchor = GSAnchor()
centerAnchor.name = "#center"
centerAnchor.position = NSPoint( centerX, centerY )
thisLayer.anchors.append(centerAnchor)
def boxArea(thisLayer):
return thisLayer.bounds.size.width * thisLayer.bounds.size.height
class BuildCircledGlyphs( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 230
windowHeight = 270
windowWidthResize = 100 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Build Circled Glyphs", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.BuildCircledGlyphs.mainwindow" # stores last window position and size
)
# UI elements:
linePos, inset, lineHeight = 12, 15, 22
self.w.descriptionText = vanilla.TextBox( (inset, linePos+2, -inset, 14), u"Builds the following glyphs:", sizeStyle='small', selectable=True )
self.w.descriptionText.getNSTextField().setToolTip_("Hint: if the letter or figure glyph contains #center anchors, the anchor position will be preferred for positioning the letter or figure inside the circle.")
linePos += lineHeight
self.w.buildUC = vanilla.CheckBox( (inset, linePos-1, -inset, 20), u"Uppercase circled letters", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.buildUC.getNSButton().setToolTip_("โถโทโธโนโบโปโผโฝโพโฟโโโ๏ธโโโ
โโโโโโโโโโ")
linePos += lineHeight
self.w.buildLC = vanilla.CheckBox( (inset, linePos-1, -inset, 20), u"Lowercase circled letters", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.buildLC.getNSButton().setToolTip_("โโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉ")
linePos += lineHeight
self.w.buildCircledNumbers = vanilla.CheckBox( (inset, linePos-1, -inset, 20), u"Circled numbers 0-20", value=True, callback=self.SavePreferences, sizeStyle='small' )
self.w.buildCircledNumbers.getNSButton().setToolTip_("๐โ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณ")
linePos += lineHeight
self.w.buildBlackUC = vanilla.CheckBox( (inset, linePos-1, -inset, 20), u"Black uppercase circled letters", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.buildBlackUC.getNSButton().setToolTip_("๐
๐
๐
๐
๐
๐
๐
๐
๐
๐
๐
๐
๐
๐
๐
๐
๐
ก๐
ข๐
ฃ๐
ค๐
ฅ๐
ฆ๐
ง๐
จ๐
ฉ")
linePos += lineHeight
self.w.buildBlackLC = vanilla.CheckBox( (inset, linePos-1, -inset, 20), u"Black lowercase circled letters โ ๏ธ", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.buildBlackLC.getNSButton().setToolTip_("Do not exist in Unicode. You will have to make them accessible through OpenType features.")
linePos += lineHeight
self.w.buildBlackCircledNumbers = vanilla.CheckBox( (inset, linePos-1, -inset, 20), u"Black circled numbers 0-20", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.buildBlackCircledNumbers.getNSButton().setToolTip_("โฟโถโทโธโนโบโปโผโฝโพโฟโซโฌโญโฎโฏโฐโฑโฒโณโด")
linePos += lineHeight
self.w.minDistanceBetweenFiguresText = vanilla.TextBox( (inset, linePos+2, 145, 14), u"Distance between figures:", sizeStyle='small', selectable=True )
self.w.minDistanceBetweenFigures = vanilla.EditText( (inset+145, linePos-1, -inset, 19), "90", callback=self.SavePreferences, sizeStyle='small' )
linePos += lineHeight
self.w.suffixesCheckbox = vanilla.CheckBox( (inset, linePos, 110, 20), "Include Suffixes:", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.suffixes = vanilla.EditText( (inset+110, linePos, -inset, 19), "ss06, ss02", callback=self.SavePreferences, sizeStyle='small' )
self.w.suffixes.getNSTextField().setToolTip_("Will look if there is a base glyph with a dot suffix, and build the circled glyph with the same suffix. Separate multiple suffixes with a comma. E.g. You have an A and an A.ss06, then you get A.blackCircled and A.blackCircled.ss06, provided you enter ss06 here.")
linePos += lineHeight
# Run Button:
self.w.runButton = vanilla.Button( (-100-inset, -20-inset, -inset, -inset), "Build", sizeStyle='regular', callback=self.BuildCircledGlyphsMain )
self.w.setDefaultButton( self.w.runButton )
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Build Circled Glyphs' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def SavePreferences( self, sender=None ):
try:
# write current settings into prefs:
Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildUC"] = self.w.buildUC.get()
Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildLC"] = self.w.buildLC.get()
Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildBlackUC"] = self.w.buildBlackUC.get()
Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildBlackLC"] = self.w.buildBlackLC.get()
Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildCircledNumbers"] = self.w.buildCircledNumbers.get()
Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildBlackCircledNumbers"] = self.w.buildBlackCircledNumbers.get()
Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.minDistanceBetweenFigures"] = self.w.minDistanceBetweenFigures.get()
Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.suffixesCheckbox"] = self.w.suffixesCheckbox.get()
Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.suffixes"] = self.w.suffixes.get()
return True
except:
import traceback
print(traceback.format_exc())
return False
def LoadPreferences( self ):
try:
# register defaults:
Glyphs.registerDefault("com.mekkablue.BuildCircledGlyphs.buildUC", 0)
Glyphs.registerDefault("com.mekkablue.BuildCircledGlyphs.buildLC", 0)
Glyphs.registerDefault("com.mekkablue.BuildCircledGlyphs.buildBlackUC", 0)
Glyphs.registerDefault("com.mekkablue.BuildCircledGlyphs.buildBlackLC", 0)
Glyphs.registerDefault("com.mekkablue.BuildCircledGlyphs.buildCircledNumbers", 1)
Glyphs.registerDefault("com.mekkablue.BuildCircledGlyphs.buildBlackCircledNumbers", 0)
Glyphs.registerDefault("com.mekkablue.BuildCircledGlyphs.minDistanceBetweenFigures", "90")
Glyphs.registerDefault("com.mekkablue.BuildCircledGlyphs.suffixesCheckbox", 0)
Glyphs.registerDefault("com.mekkablue.BuildCircledGlyphs.suffixes", "ss02, ss06")
# load previously written prefs:
self.w.buildUC.set( Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildUC"] )
self.w.buildLC.set( Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildLC"] )
self.w.buildBlackUC.set( Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildBlackUC"] )
self.w.buildBlackLC.set( Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildBlackLC"] )
self.w.buildCircledNumbers.set( Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildCircledNumbers"] )
self.w.buildBlackCircledNumbers.set( Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildBlackCircledNumbers"] )
self.w.minDistanceBetweenFigures.set( Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.minDistanceBetweenFigures"] )
self.w.suffixesCheckbox.set( Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.suffixesCheckbox"] )
self.w.suffixes.set( Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.suffixes"] )
return True
except:
import traceback
print(traceback.format_exc())
return False
def turnBlack(self, glyphNames):
searchFor = ".circled"
replaceWith = ".blackCircled"
blackGlyphNames = [n.replace(searchFor,replaceWith) for n in glyphNames if n.endswith(searchFor)]
return blackGlyphNames
def BuildCircledGlyphsMain( self, sender=None ):
try:
# clear macro window log:
Glyphs.clearLog()
# update settings to the latest user input:
if not self.SavePreferences():
print("Note: 'Build Circled Glyphs' could not write preferences.")
minDistanceBetweenFigures = 90.0
thisFont = Glyphs.font # frontmost font
buildUC = Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildUC"]
buildLC = Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildLC"]
buildCircledNumbers = Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildCircledNumbers"]
buildBlackUC = Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildBlackUC"]
buildBlackLC = Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildBlackLC"]
buildBlackCircledNumbers = Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.buildBlackCircledNumbers"]
minDistanceBetweenFigures = float(Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.minDistanceBetweenFigures"])
shouldIncludeSuffixes = Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.suffixesCheckbox"]
suffixes = Glyphs.defaults["com.mekkablue.BuildCircledGlyphs.suffixes"]
if shouldIncludeSuffixes:
suffixes = [("."+x.strip()).replace("..",".") for x in suffixes.split(",")]
else:
suffixes = ()
circledGlyphNames = []
if buildUC:
circledGlyphNames.extend(circledUC)
if buildLC:
circledGlyphNames.extend(circledLC)
if buildCircledNumbers:
circledGlyphNames.extend(circledNumbers)
if buildBlackUC:
circledGlyphNames.extend(self.turnBlack(circledUC))
if buildBlackLC:
circledGlyphNames.extend(self.turnBlack(circledLC))
if buildBlackCircledNumbers:
circledGlyphNames.extend(self.turnBlack(circledNumbers))
if not thisFont:
Message(title="No Font Open", message="The script requires a font. Open a font and run the script again.", OKButton=None)
elif circledGlyphNames:
print("Build Circled Glyphs Report for %s" % thisFont.familyName)
if thisFont.filepath:
print(thisFont.filepath)
else:
print("โ ๏ธ The font file has not been saved yet.")
print()
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
try:
print("Building: %s\n" %
", ".join(circledGlyphNames)
)
# add circles if not present in font already:
circleName = "_part.circle"
if not thisFont.glyphs[circleName]:
buildCirclePart( thisFont, circleName )
circleGlyph = thisFont.glyphs[circleName]
blackCircleGlyph = None
if buildBlackUC or buildBlackLC or buildBlackCircledNumbers:
blackCircleName = "_part.blackCircle"
if not thisFont.glyphs[blackCircleName]:
buildCirclePart( thisFont, blackCircleName, isBlack=True )
blackCircleGlyph = thisFont.glyphs[blackCircleName]
# determining scale of inscribed letters:
scaleFactors = []
for thisMaster in thisFont.masters:
radius = circleGlyph.layers[thisMaster.id].paths[1].bounds.size.width * 0.5
maxArea = 0.0
biggestLayer = None
for glyphName in circledGlyphNames:
if "." in glyphName:
glyphName = glyphName[:glyphName.find(".")]
glyphNames = [glyphName]
if suffixes:
for suffix in suffixes:
glyphNames.append("%s%s"%(glyphName,suffix))
for glyphName in glyphNames:
thisGlyph = thisFont.glyphs[glyphName]
if thisGlyph:
thisLayer = thisGlyph.layers[thisMaster.id]
thisArea = boxArea(thisLayer)
if thisArea > maxArea:
maxArea = thisArea
biggestLayer = thisLayer
if biggestLayer:
height = biggestLayer.bounds.size.height
width = biggestLayer.bounds.size.width
else:
# fallback values
height, width = 700.0, 500.0
print("โ ๏ธ Warning: could not determine bounds of relevant layers, resorting to defaults. Are the glyphs empty?")
angleInRadians = math.atan2( height, width*1.4 + minDistanceBetweenFigures )
scaledHeight = math.sin(angleInRadians) * radius * 2 * 0.9
scaleFactor = scaledHeight / height
scaleFactors.append(scaleFactor)
print("Scale factor for master '%s': %.1f" % (thisMaster.name, scaleFactor))
# actually building letters:
for glyphName in circledGlyphNames:
if "black" in glyphName.lower():
circleName = blackCircleName
# check for suffixes:
coreName = glyphName[:glyphName.find(".")]
coreNames = [coreName]
glyphNames = [glyphName]
suffixDict = {}
if suffixes:
for suffix in suffixes:
suffixedCoreName = coreName + suffix
if "_" in coreName:
particles = coreName.split("_")
for particle in particles:
if not suffixedCoreName in coreNames:
if thisFont.glyphs[particle+suffix]:
coreNames.append(suffixedCoreName)
newGlyphName = glyphName+suffix
glyphNames.append(newGlyphName)
suffixDict[newGlyphName] = suffix
else:
if thisFont.glyphs[suffixedCoreName]:
coreNames.append(suffixedCoreName)
newGlyphName = glyphName+suffix
glyphNames.append(newGlyphName)
suffixDict[newGlyphName] = suffix
for i,glyphName in enumerate(glyphNames):
thisGlyph = thisFont.glyphs[glyphName]
# generate it if it does not exist
if not thisGlyph:
thisGlyph = GSGlyph()
thisGlyph.name = glyphName
thisFont.glyphs.append(thisGlyph)
thisGlyph.updateGlyphInfo()
if glyphName in suffixDict:
suffix = suffixDict[glyphName]
else:
suffix = None
thisGlyph.beginUndo() # begin undo grouping
print("Building %s" % thisGlyph.name)
buildCircledGlyph( thisGlyph, circleName, scaleFactors, minDistanceBetweenFigures, suffix )
thisGlyph.endUndo() # end undo grouping
except Exception as e:
Glyphs.showMacroWindow()
print("\nโ ๏ธ Script Error:\n")
import traceback
print(traceback.format_exc())
print()
raise e
finally:
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
self.w.close() # delete if you want window to stay open
# Final report:
Glyphs.showNotification(
u"%s: Done" % (thisFont.familyName),
u"Build Circled Glyphs is finished. Details in Macro Window",
)
print("\nDone.")
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Build Circled Glyphs Error: %s" % e)
import traceback
print(traceback.format_exc())
BuildCircledGlyphs()
|
rio_tiler/colormap.py | kalxas/rio-tiler | 242 | 12618273 | """rio-tiler colormap functions and classes."""
import os
import pathlib
import re
from typing import Dict, List, Sequence, Tuple, Union
import attr
import numpy
from .constants import NumType
from .errors import (
ColorMapAlreadyRegistered,
InvalidColorFormat,
InvalidColorMapName,
InvalidFormat,
)
try:
from importlib.resources import files as resources_files # type: ignore
except ImportError:
# Try backported to PY<39 `importlib_resources`.
from importlib_resources import files as resources_files # type: ignore
EMPTY_COLORMAP: Dict = {i: [0, 0, 0, 0] for i in range(256)}
DEFAULT_CMAPS_FILES = {
f.stem: str(f) for f in (resources_files(__package__) / "cmap_data").glob("*.npy") # type: ignore
}
USER_CMAPS_DIR = os.environ.get("COLORMAP_DIRECTORY", None)
if USER_CMAPS_DIR:
DEFAULT_CMAPS_FILES.update(
{f.stem: str(f) for f in pathlib.Path(USER_CMAPS_DIR).glob("*.npy")}
)
def _update_alpha(cmap: Dict, idx: Sequence[int], alpha: int = 0) -> None:
"""Update the alpha value of a colormap index."""
if isinstance(idx, int):
idx = (idx,)
for i in idx:
cmap[i] = cmap[i][0:3] + [alpha]
def _remove_value(cmap: Dict, idx: Sequence[int]) -> None:
"""Remove value from a colormap dict."""
if isinstance(idx, int):
idx = (idx,)
for i in idx:
cmap.pop(i, None)
def _update_cmap(cmap: Dict, values: Dict) -> None:
"""Update a colormap dict."""
for i, color in values.items():
if len(color) == 3:
color += [255]
cmap[i] = color
# From https://github.com/mojodna/marblecutter/blob/5b9040ba6c83562a465eabdbb6e8959e6a8bf041/marblecutter/utils.py#L35
def make_lut(colormap: Dict) -> numpy.ndarray:
"""Create a lookup table numpy.ndarray from a GDAL RGBA Color Table dictionary.
Args:
colormap (dict): GDAL RGBA Color Table dictionary.
Returns:
numpy.ndarray: colormap lookup table.
"""
lut = numpy.zeros(shape=(256, 4), dtype=numpy.uint8)
for i, color in colormap.items():
lut[int(i)] = color
return lut
def apply_cmap(
data: numpy.ndarray, colormap: Union[Dict, Sequence]
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""Apply colormap on data.
Args:
data (numpy ndarray): 1D image array to translate to RGB.
colormap (dict): GDAL RGBA Color Table dictionary.
Returns:
tuple: Data (numpy.ndarray) and Mask (numpy.ndarray) values.
Raises:
InvalidFormat: If data is not a 1 band dataset (1, col, row).
"""
if data.shape[0] > 1:
raise InvalidFormat("Source data must be 1 band")
if isinstance(colormap, Sequence):
return apply_intervals_cmap(data, colormap)
# if colormap has more than 256 values OR its `max` key >= 256 we can't use
# rio_tiler.colormap.make_lut, because we don't want to create a `lookup table`
# with more than 256 entries (256 x 4) array. In this case we use `apply_discrete_cmap`
# which can work with arbitrary colormap dict.
if len(colormap) > 256 or max(colormap) >= 256:
return apply_discrete_cmap(data, colormap)
lookup_table = make_lut(colormap)
data = lookup_table[data[0], :]
data = numpy.transpose(data, [2, 0, 1])
# If the colormap has values between 0-255
# we cast the output array to Uint8.
if data.min() >= 0 and data.max() <= 255:
data = data.astype("uint8")
return data[:-1], data[-1]
def apply_discrete_cmap(
data: numpy.ndarray, colormap: Dict
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""Apply discrete colormap.
Args:
data (numpy ndarray): 1D image array to translate to RGB.
color_map (dict): Discrete ColorMap dictionary.
Returns:
tuple: Data (numpy.ndarray) and Alpha band (numpy.ndarray).
Examples:
>>> data = numpy.random.randint(0, 3, size=(1, 256, 256))
cmap = {
0: [0, 0, 0, 0],
1: [255, 255, 255, 255],
2: [255, 0, 0, 255],
3: [255, 255, 0, 255],
}
data, mask = apply_discrete_cmap(data, cmap)
assert data.shape == (3, 256, 256)
"""
res = numpy.zeros((data.shape[1], data.shape[2], 4), dtype=numpy.uint8)
for k, v in colormap.items():
res[data[0] == k] = v
data = numpy.transpose(res, [2, 0, 1])
# If the colormap has values between 0-255
# we cast the output array to Uint8
if data.min() >= 0 and data.max() <= 255:
data = data.astype("uint8")
return data[:-1], data[-1]
def apply_intervals_cmap(
data: numpy.ndarray, colormap: Sequence[Sequence[Sequence[NumType]]]
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""Apply intervals colormap.
Args:
data (numpy ndarray): 1D image array to translate to RGB.
color_map (Sequence): Sequence of intervals and color in form of [([min, max], [r, g, b, a]), ...].
Returns:
tuple: Data (numpy.ndarray) and Alpha band (numpy.ndarray).
Examples:
>>> data = numpy.random.randint(0, 3, size=(1, 256, 256))
cmap = [
([0, 1], [0, 0, 0, 0]),
([1, 2], [255, 255, 255, 255]),
([2, 3], [255, 0, 0, 255]),
([3, 4], [255, 255, 0, 255]),
]
data, mask = apply_intervals_cmap(data, cmap)
assert data.shape == (3, 256, 256)
"""
res = numpy.zeros((data.shape[1], data.shape[2], 4), dtype=numpy.uint8)
for (k, v) in colormap:
res[(data[0] >= k[0]) & (data[0] < k[1])] = v
data = numpy.transpose(res, [2, 0, 1])
# If the colormap has values between 0-255
# we cast the output array to Uint8
if data.min() >= 0 and data.max() <= 255:
data = data.astype("uint8")
return data[:-1], data[-1]
def parse_color(rgba: Union[Sequence[int], str]) -> Tuple[int, int, int, int]:
"""Parse RGB/RGBA color and return valid rio-tiler compatible RGBA colormap entry.
Args:
rgba (str or list of int): HEX encoded or list RGB or RGBA colors.
Returns:
tuple: RGBA values.
Examples:
>>> parse_color("#FFF")
[255, 255, 255, 255]
>>> parse_color("#FF0000FF")
[255, 0, 0, 255]
>>> parse_color("#FF0000")
[255, 0, 0, 255]
>>> parse_color([255, 255, 255])
[255, 255, 255, 255]
"""
if isinstance(rgba, str):
if re.match("^#[a-fA-F0-9]{3,4}$", rgba):
factor = 2
hex_pattern = (
r"^#"
r"(?P<red>[a-fA-F0-9])"
r"(?P<green>[a-fA-F0-9])"
r"(?P<blue>[a-fA-F0-9])"
r"(?P<alpha>[a-fA-F0-9])?"
r"$"
)
elif re.match("^#([a-fA-F0-9][a-fA-F0-9]){3,4}$", rgba):
factor = 1
hex_pattern = (
r"^#"
r"(?P<red>[a-fA-F0-9][a-fA-F0-9])"
r"(?P<green>[a-fA-F0-9][a-fA-F0-9])"
r"(?P<blue>[a-fA-F0-9][a-fA-F0-9])"
r"(?P<alpha>[a-fA-F0-9][a-fA-F0-9])?"
r"$"
)
else:
raise InvalidColorFormat(f"Invalid color format: {rgba}")
match = re.match(hex_pattern, rgba)
rgba = [
int(n * factor, 16) for n in match.groupdict().values() if n is not None
]
if len(rgba) > 4 or len(rgba) < 3:
raise InvalidColorFormat(f"Invalid color format: {rgba}")
rgba = tuple(rgba)
if len(rgba) == 3:
rgba += (255,)
return rgba # type: ignore
@attr.s(frozen=True)
class ColorMaps:
"""Default Colormaps holder.
Attributes:
data (dict): colormaps. Defaults to `rio_tiler.colormap.DEFAULTS_CMAPS`.
"""
data: Dict[str, Union[str, Dict]] = attr.ib(
default=attr.Factory(lambda: DEFAULT_CMAPS_FILES)
)
def get(self, name: str) -> Dict:
"""Fetch a colormap.
Args:
name (dict): colormap name.
Returns
dict: colormap dictionary.
"""
cmap = self.data.get(name, None)
if cmap is None:
raise InvalidColorMapName(f"Invalid colormap name: {name}")
if isinstance(cmap, str):
colormap = numpy.load(cmap)
assert colormap.shape == (256, 4)
assert colormap.dtype == numpy.uint8
return {idx: value.tolist() for idx, value in enumerate(colormap)}
else:
return cmap
def list(self) -> List[str]:
"""List registered Colormaps.
Returns
list: list of colormap names.
"""
return list(self.data)
def register(
self, custom_cmap: Dict[str, Union[str, Dict]], overwrite: bool = False,
) -> "ColorMaps":
"""Register a custom colormap.
Args:
custom_cmap (dict): custom colormap(s) to register.
overwrite (bool): Overwrite existing colormap with same key (default: False)
Examples:
>>> cmap = cmap.register({"acmap": {0: [0, 0, 0, 0]}})
>>> cmap = cmap.register({"acmap": "acmap.npy"})
"""
for name, cmap in custom_cmap.items():
if not overwrite and name in self.data:
raise ColorMapAlreadyRegistered(
f"{name} is already registered. Use force=True to overwrite."
)
return ColorMaps({**self.data, **custom_cmap})
cmap = ColorMaps() # noqa
|
taskw_gcal_sync/helpers.py | bergercookie/taskw_gcal_sync | 113 | 12618298 | <reponame>bergercookie/taskw_gcal_sync<gh_stars>100-1000
"""Various helper methods."""
import re
from typing import Any
def get_object_unique_name(obj: Any) -> str:
"""Return a unique string associated with the given object.
That string is constructed as follows: <object class name>_<object_hex_id>
"""
return f"{type(obj).__name__}_{hex(id(obj))}"
def xor(*args):
"""True if exactly one of the arguments of the iterable is True.
>>> xor(0,1,0,)
True
>>> xor(1,2,3,)
False
>>> xor(False, False, False)
False
>>> xor("kalimera", "kalinuxta")
False
>>> xor("", "a", "")
True
>>> xor("", "", "")
False
"""
return sum([bool(i) for i in args]) == 1
def get_valid_filename(s: str) -> str:
"""Return a filename-compatible version of the given string s
:param s: String to be used as the base of the filename. You may also pass
non-string objects that will however be able to convert to strings via the
str operator.
>>> get_valid_filename(r"5678^()^")
'5678____'
>>> get_valid_filename(r"a|string\\go/es||here")
'a_string_go_es__here'
>>> get_valid_filename(r"strin***g")
'strin___g'
.. seealso::
`https://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename`_
"""
s = str(s).strip().replace(" ", "_")
return re.sub(r"(?u)[^-\w.]", "_", s)
|
internals/fetchmetrics.py | liamnewmarch/chromium-dashboard | 450 | 12618315 | <reponame>liamnewmarch/chromium-dashboard
# -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import json
import logging
from xml.dom import minidom
import requests
from google.auth.transport import requests as reqs
from google.oauth2 import id_token
from framework import basehandlers
from framework import ramcache
from framework import utils
from internals import models
import settings
UMA_QUERY_SERVER = 'https://uma-export.appspot.com/chromestatus/'
HISTOGRAMS_URL = 'https://chromium.googlesource.com/chromium/src/+/master/' \
'tools/metrics/histograms/enums.xml?format=TEXT'
# After we have processed all metrics data for a given kind on a given day,
# we create a capstone entry with this otherwise unused bucket_id. Later
# we check for a capstone entry to avoid retrieving metrics for that
# same day again.
CAPSTONE_BUCKET_ID = -1
@utils.retry(3, delay=30, backoff=2)
def _FetchMetrics(url):
if settings.PROD or settings.STAGING:
# follow_redirects=False according to
# https://cloud.google.com/appengine/docs/python/appidentity/#asserting_identity_to_other_app_engine_apps
# GAE request limit is 60s, but it could go longer due to start-up latency.
logging.info('Requesting metrics from: %r', url)
token = id_token.fetch_id_token(reqs.Request(), url)
logging.info('token is %r', token)
return requests.request(
'GET', url, timeout=120.0, allow_redirects=False,
headers={'Authorization': 'Bearer {}'.format(token)})
else:
logging.info('Prod would get metrics from: %r', url)
return None # dev instances cannot access uma-export.
class UmaQuery(object):
"""Reads and stores stats from UMA."""
def __init__(self, query_name, model_class, property_map_class):
self.query_name = query_name
self.model_class = model_class
self.property_map_class = property_map_class
def _HasCapstone(self, date):
query = self.model_class.query()
query = query.filter(self.model_class.bucket_id == CAPSTONE_BUCKET_ID)
query = query.filter(self.model_class.date == date)
if query.count() > 0:
logging.info('Found existing capstone entry for %r', date)
return True
else:
logging.info('No capstone entry for %r, will request', date)
return False
def _SetCapstone(self, date):
entity = self.model_class(
property_name='capstone value',
bucket_id=CAPSTONE_BUCKET_ID,
date=date)
entity.put()
logging.info('Set capstone entry for %r', date)
return entity
def _FetchData(self, date):
params = '?date=%s' % date.strftime('%Y%m%d')
url = UMA_QUERY_SERVER + self.query_name + params
result = _FetchMetrics(url)
if not result or result.status_code != 200:
logging.error('Unable to retrieve UMA data from %s. Error: %s' % (
url, result.status_code))
return (None, result.status_code)
json_content = result.content.decode().split('\n', 1)[1]
j = json.loads(json_content)
if 'r' not in j:
logging.info(
'%s results do not have an "r" key in the response: %s' %
(self.query_name, repr(j)[:settings.MAX_LOG_LINE]))
logging.info('Note: uma-export can take 2 days to produce metrics')
return (None, 404)
return (j['r'], result.status_code)
def _SaveData(self, data, date):
property_map = self.property_map_class.get_all()
date_query = self.model_class.query()
date_query = date_query.filter(self.model_class.date == date)
existing_saved_data = date_query.fetch(None)
existing_saved_bucket_ids = set()
for existing_datapoint in existing_saved_data:
existing_saved_bucket_ids.add(existing_datapoint.bucket_id)
for bucket_str, bucket_dict in data.items():
bucket_id = int(bucket_str)
# Only add this entity if one doesn't already exist with the same
# bucket_id and date.
if bucket_id in existing_saved_bucket_ids:
logging.info('Cron data was already fetched for this date')
continue
# If the id is not in the map, use 'ERROR' for the name.
# TODO(ericbidelman): Non-matched bucket ids are likely new properties
# that have been added and will be updated in cron/histograms.
property_name = property_map.get(bucket_id, 'ERROR')
entity = self.model_class(
property_name=property_name,
bucket_id=bucket_id,
date=date,
#hits=num_hits,
#total_pages=total_pages,
day_percentage=bucket_dict['rate']
#day_milestone=bucket_dict['milestone']
#low_volume=bucket_dict['low_volume']
#rolling_percentage=
)
entity.put()
self._SetCapstone(date)
def FetchAndSaveData(self, date):
if self._HasCapstone(date):
return 200
data, response_code = self._FetchData(date)
if response_code == 200:
self._SaveData(data, date)
return response_code
UMA_QUERIES = [
UmaQuery(query_name='usecounter.features',
model_class=models.FeatureObserver,
property_map_class=models.FeatureObserverHistogram),
UmaQuery(query_name='usecounter.cssproperties',
model_class=models.StableInstance,
property_map_class=models.CssPropertyHistogram),
UmaQuery(query_name='usecounter.animatedcssproperties',
model_class=models.AnimatedProperty,
property_map_class=models.CssPropertyHistogram),
]
class YesterdayHandler(basehandlers.FlaskHandler):
"""Loads yesterday's UMA data."""
def get_template_data(self, today=None):
"""Loads the data file located at |filename|.
Args:
filename: The filename for the data file to be loaded.
today: date passed in for testing, defaults to today.
"""
days = []
date_str = self.request.args.get('date')
if date_str:
try:
# We accept the same format that is used by uma-export
specified_day = datetime.datetime.strptime(date_str, '%Y%m%d').date()
days.append(specified_day)
except ValueError:
self.abort(400, msg='Failed to parse date string.')
else:
today = today or datetime.date.today()
days = [today - datetime.timedelta(days_ago)
for days_ago in [1, 2, 3, 4, 5]]
for i, query_day in enumerate(days):
for query in UMA_QUERIES:
response_code = query.FetchAndSaveData(query_day)
if response_code not in (200, 404):
error_message = (
'Got error %d while fetching usage data' % response_code)
if i > 2:
logging.error(
'WebStatusAlert-1: Failed to get metrics even after 2 days')
return error_message, 500
ramcache.flush_all()
return 'Success'
class HistogramsHandler(basehandlers.FlaskHandler):
MODEL_CLASS = {
'FeatureObserver': models.FeatureObserverHistogram,
'MappedCSSProperties': models.CssPropertyHistogram,
}
def _SaveData(self, data, histogram_id):
try:
model_class = self.MODEL_CLASS[histogram_id]
except Exception:
logging.error('Invalid Histogram id used: %s' % histogram_id)
return
bucket_id = int(data['bucket_id'])
property_name = data['property_name']
key_name = '%s_%s' % (bucket_id, property_name)
# Bucket ID 1 is reserved for number of CSS Pages Visited. So don't add it.
if (model_class == models.CssPropertyHistogram and bucket_id == 1):
return
model_class.get_or_insert(key_name,
bucket_id=bucket_id,
property_name=property_name
)
def get_template_data(self):
# Attempt to fetch enums mapping file.
response = requests.get(HISTOGRAMS_URL, timeout=60)
if (response.status_code != 200):
logging.error('Unable to retrieve chromium histograms mapping file.')
return
histograms_content = base64.b64decode(response.content).decode()
dom = minidom.parseString(histograms_content)
# The enums.xml file looks like this:
# <enum name="FeatureObserver">
# <int value="0" label="OBSOLETE_PageDestruction"/>
# <int value="1" label="LegacyNotifications"/>
enum_tags = dom.getElementsByTagName('enum')
# Save bucket ids for each histogram type, FeatureObserver and
# MappedCSSProperties.
for histogram_id in list(self.MODEL_CLASS.keys()):
enum = [enum for enum in enum_tags
if enum.attributes['name'].value == histogram_id][0]
for child in enum.getElementsByTagName('int'):
self._SaveData({
'bucket_id': child.attributes['value'].value,
'property_name': child.attributes['label'].value
}, histogram_id)
return 'Success'
class BlinkComponentHandler(basehandlers.FlaskHandler):
"""Updates the list of Blink components in the db."""
def get_template_data(self):
models.BlinkComponent.update_db()
return 'Blink components updated'
|
src/genie/libs/parser/iosxe/tests/ShowSnmpGroup/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 12618343 | expected_output = {
1: {
"groupname": "2c",
"sec_model": "v1",
"contextname": "none",
"storage_type": "volatile",
"readview": "none",
"writeview": "none",
"notifyview": "*tv.FFFF58bf.eaFF58bf.eaFFFFFF.F",
"row_status": {"status": "active"},
},
2: {
"groupname": "2c",
"sec_model": "v2c",
"contextname": "none",
"storage_type": "volatile",
"readview": "none",
"writeview": "none",
"notifyview": "*tv.FFFF58bf.eaFF58bf.eaFFFFFF.F",
"row_status": {"status": "active"},
},
3: {
"groupname": "ag-ro",
"sec_model": "v1",
"contextname": "none",
"storage_type": "volatile",
"readview": "v1default",
"writeview": "none",
"notifyview": "*tv.FFFF58bf.eaFF58bf.eaFFFFFF.F",
"row_status": {"status": "active"},
},
4: {
"groupname": "ag-ro",
"sec_model": "v3 auth",
"contextname": "none",
"storage_type": "nonvolatile",
"readview": "v1default",
"writeview": "none",
"notifyview": "none",
"row_status": {"status": "active"},
},
5: {
"groupname": "ag-ro",
"sec_model": "v3 priv",
"contextname": "none",
"storage_type": "nonvolatile",
"readview": "v1default",
"writeview": "none",
"notifyview": "none",
"row_status": {"status": "active"},
},
6: {
"groupname": "ag-rw",
"sec_model": "v2c",
"contextname": "none",
"storage_type": "volatile",
"readview": "v1default",
"writeview": "v1default",
"notifyview": "none",
"row_status": {"status": "active", "access_list": "snmp-servers"},
},
7: {
"groupname": "IMI",
"sec_model": "v2c",
"contextname": "none",
"storage_type": "permanent",
"readview": "*ilmi",
"writeview": "*ilmi",
"notifyview": "none",
"row_status": {"status": "active"},
},
8: {
"groupname": "AlfaV",
"sec_model": "v2c",
"contextname": "none",
"storage_type": "permanent",
"readview": "v1default",
"writeview": "none",
"notifyview": "none",
"row_status": {"status": "active", "access_list": "90"},
},
9: {
"groupname": "ag-rw",
"sec_model": "v1",
"readview": "v1default",
"writeview": "v1default",
"notifyview": "none",
"row_status": {"status": "active", "access_list": "snmp-servers"},
},
10: {
"groupname": "2c",
"sec_model": "v2c",
"readview": "none",
"writeview": "none",
"notifyview": "*tv.FFFF58bf.eaFF58bf.eaFFFFFF.F",
"row_status": {"status": "active"},
},
}
|
examples/point_transform/sketch_point_transform.py | hishamsajid/vsketch | 221 | 12618366 | import vsketch
class PointTransformSketch(vsketch.SketchClass):
def draw(self, vsk: vsketch.Vsketch) -> None:
vsk.size("a4", landscape=False)
vsk.scale("1mm")
with vsk.pushMatrix():
for _ in range(40):
vsk.rotate(2, degrees=True)
vsk.scale(0.95)
vsk.point(-75, 75)
vsk.point(0, 75)
vsk.point(75, 75)
vsk.point(75, 0)
vsk.point(75, -75)
vsk.point(0, -75)
vsk.point(-75, -75)
vsk.point(-75, 0)
with vsk.pushMatrix():
vsk.rotate(80, degrees=True)
vsk.scale(0.95 ** 40)
vsk.square(0, 0, 150, mode="center")
def finalize(self, vsk: vsketch.Vsketch) -> None:
vsk.vpype("linemerge linesimplify reloop linesort")
if __name__ == "__main__":
PointTransformSketch.display()
|
lhotse/features/kaldi/__init__.py | stachu86/lhotse | 353 | 12618371 | from .extractors import KaldiFbank, KaldiFbankConfig, KaldiMfcc, KaldiMfccConfig
from .layers import Wav2FFT, Wav2LogFilterBank, Wav2LogSpec, Wav2MFCC, Wav2Spec, Wav2Win
|
src/Launcher.py | codexgigassys/codex-backend | 161 | 12618391 | # Copyright (C) 2016 Deloitte Argentina.
# This file is part of CodexGigas - https://github.com/codexgigassys/
# See the file 'LICENSE' for copying permission.
# Funciones para realizar los analisis
import os
import time
from czipfile import ZipFile
from Cataloger import Cataloger
from Processors.ProcessorFactory import *
from PackageControl.PackageController import *
from VersionControl.VersionController import *
from MetaControl.MetaController import *
from Utils.TimeLogger import TimeLogger
from Sample import *
import logging
from env import envget
from pymongo import MongoClient
import gridfs
from Utils.test import test
import time
class Launcher():
def __init__(self):
formato = '[%(asctime)-15s][%(levelname)s] %(message)s'
path = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
logfile = os.path.join(path, "launcher.log")
logging.basicConfig(
format=formato, filename=logfile, level=logging.INFO)
self.vc = VersionController()
self.pc = PackageController()
self.mdc = MetaController()
def launchOnlyHashingByID(self, sample):
sample.setPackageController(self.pc)
sample.setMetaController(self.mdc)
sample.setVersionController(self.vc)
category = sample.getCategory()
if(category is None):
category = Cataloger().catalog(sample.getBinary())
logging.debug(
"Category not found in DB, categorized as %s", str(category))
else:
logging.debug(
"Category found in DB, categorized as %s", str(category))
processor = ProcessorFactory().getHashProcessor(category, sample)
result_dic = processor.process()
result_version = processor.getVersion()
if(len(result_version) > 0):
logging.debug("Updating metadata")
if(self.mdc.write(sample.getID(), result_dic) != 0):
logging.error(
"Error writing Metadata to DB, sample:%s", sample.getID())
return -1
logging.debug("Metadata writed in DB")
self.vc.updateVersion(sample.getID(), result_version)
logging.debug("Versions writed to DB")
else:
logging.debug("Nothing to update")
logging.debug("Analysis Finished OK")
return 0
def launchAnalysisByID(self, sample):
logging.debug("Launching Analysis on sample:%s", sample.getID())
sample.setPackageController(self.pc)
sample.setMetaController(self.mdc)
sample.setVersionController(self.vc)
category = sample.getCategory()
if(category is None):
category = Cataloger().catalog(sample.getBinary())
logging.debug(
"Category not found in DB, categorized as %s", str(category))
else:
logging.debug(
"Category found in DB, categorized as %s", str(category))
processor = ProcessorFactory().createProcessor(category, sample)
result_dic = processor.process()
result_version = processor.getVersion()
if(len(result_version) > 0):
logging.debug("Updating metadata")
if(self.mdc.write(sample.getID(), result_dic) != 0):
logging.error(
"Error writing Metadata to DB, sample:%s", sample.getID())
return -1
logging.debug("Metadata writed in DB")
self.vc.updateVersion(sample.getID(), result_version)
logging.debug("Versions writed to DB")
else:
logging.debug("Nothing to update")
logging.debug("Analysis Finished OK")
return 0
# ****************TEST_CODE******************
def testCode():
from Utils.Functions import recursive_read
object = "./Test_files/"
files = recursive_read(object)
if(files is None):
sys.exit()
lc = Launcher()
for fp in files:
fd = open(fp, 'r')
data = fd.read()
file_id = hashlib.sha1(data).hexdigest()
print("%s %s" % (fp, file_id))
lc.launchFileAnalitics((fp, data))
print("")
print("")
# -----------------------------------------------
def testCode2():
object = "../processed/VirusShare_00000.zip"
# opening zipped package
fd = open(object, 'r')
zf = ZipFile(fd)
names = zf.namelist() # name of compressed files
lc = Launcher()
count = 0
reset = 0
for filename in names:
# print(filename)
data = zf.read(filename, "infected")
lc.launchFileAnalitics((filename, data))
reset += 1
count += 1
if(reset >= 1000):
print(str(count) + " processed")
reset = 0
print(str(count) + " processed")
# ----------------------------------------------
def testCode3():
object = "../DB/packages/fileindex"
# opening the index
fd = open(object, 'r')
lc = Launcher()
count = 0
reset = 0
while True:
# start=time.time()
rl = fd.readline()
if(rl == ""):
break
data = rl.strip().split('|')
# print(data)
fd2 = open("../DB/packages/" +
str(data[1]) + "/p" + str(data[2]) + ".index")
fd2.seek(int(data[3]))
rl2 = fd2.readline()
data1 = rl2.strip().split('|')
# print(data1)
fd3 = open("../DB/packages/" +
str(data[1]) + "/p" + str(data[2]) + ".paq")
fd3.seek(int(data1[1]))
datafin = fd3.read(int(data1[2]))
# end=time.time()
# print("search :"+str((end-start)*10000))
# start=time.time()
lc.launchFileAnalitics((data[0], datafin))
# end=time.time()
# print("analize :"+str((end-start)*10000))
# print("")
reset += 1
count += 1
if(reset >= 1000):
print(str(count) + " processed")
reset = 0
print(str(count) + " processed")
# ----------------------------------------------
def testCode4():
inicio = 10569000
client = MongoClient(envget('files.host'), envget('files.port'))
db = client[envget('db_files_name')]
fs = gridfs.GridFS(db)
res = fs.find(timeout=False).skip(inicio)
lc = Launcher()
count = inicio
reset = 0
for f in res:
data = f.read()
# print(f.filename,count)
lc.launchFileAnalitics((f.filename, data))
reset += 1
count += 1
if(reset >= 1000):
print(str(count) + " processed")
reset = 0
print(str(count) + " processed")
# ----------------------------------------------
def testCode5():
lc = Launcher()
sample = Sample()
sample.setID("0358ab4e8595db846b709cf85d7b397d92230bef")
# sample.setID("223e8761fbb93458140a3592096109501927ff64")
sample.setStorageVersion({})
lc.launchAnalysisByID(sample)
# print(sample.getCalculatedMetadata().getData())
# print(sample.getCalculatedVersion())
# print(sample.getStorageVersion())
# ----------------------------------------------
def testCode6():
inicio = 0
client = MongoClient(envget('files.host'), envget('files.port'))
db = client[envget('db_files_name')]
fs = gridfs.GridFS(db)
res = fs.find(timeout=False).skip(inicio)
lc = Launcher()
count = inicio
reset = 0
start = time.time()
first = True
for f in res:
sam_id = f.filename
sample = Sample()
sample.setID(sam_id)
sample.setStorageVersion({})
lc.launchAnalysisByID(sample)
reset += 1
count += 1
if(reset >= 1000):
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())
) + " processed:" + str(count / 1000) + "K")
reset = 0
print(str(count) + " processed")
# ****************TEST_EXECUTE******************
test("-test_Launcher", testCode6)
|
release/stubs.min/System/Windows/Forms/__init___parts/SplitterCancelEventArgs.py | htlcnn/ironpython-stubs | 182 | 12618405 | <reponame>htlcnn/ironpython-stubs
class SplitterCancelEventArgs(CancelEventArgs):
"""
Provides data for splitter events.
SplitterCancelEventArgs(mouseCursorX: int,mouseCursorY: int,splitX: int,splitY: int)
"""
@staticmethod
def __new__(self,mouseCursorX,mouseCursorY,splitX,splitY):
""" __new__(cls: type,mouseCursorX: int,mouseCursorY: int,splitX: int,splitY: int) """
pass
MouseCursorX=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the X coordinate of the mouse pointer in client coordinates.
Get: MouseCursorX(self: SplitterCancelEventArgs) -> int
"""
MouseCursorY=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the Y coordinate of the mouse pointer in client coordinates.
Get: MouseCursorY(self: SplitterCancelEventArgs) -> int
"""
SplitX=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the X coordinate of the upper left corner of the System.Windows.Forms.SplitContainer in client coordinates.
Get: SplitX(self: SplitterCancelEventArgs) -> int
Set: SplitX(self: SplitterCancelEventArgs)=value
"""
SplitY=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the Y coordinate of the upper left corner of the System.Windows.Forms.SplitContainer in client coordinates.
Get: SplitY(self: SplitterCancelEventArgs) -> int
Set: SplitY(self: SplitterCancelEventArgs)=value
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.