max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
openapi_python_client/schema/openapi_schema_pydantic/security_requirement.py | oterrier/openapi-python-client | 172 | 12646381 | <reponame>oterrier/openapi-python-client<filename>openapi_python_client/schema/openapi_schema_pydantic/security_requirement.py
from typing import Dict, List
SecurityRequirement = Dict[str, List[str]]
"""
Lists the required security schemes to execute this operation.
The name used for each property MUST correspond to a security scheme declared in the
[Security Schemes](#componentsSecuritySchemes) under the [Components Object](#componentsObject).
Security Requirement Objects that contain multiple schemes require that
all schemes MUST be satisfied for a request to be authorized.
This enables support for scenarios where multiple query parameters or HTTP headers
are required to convey security information.
When a list of Security Requirement Objects is defined on the
[OpenAPI Object](#oasObject) or [Operation Object](#operationObject),
only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request.
References:
- https://swagger.io/docs/specification/authentication/
"""
|
mpunet/evaluate/loss_functions.py | alexsosn/MultiPlanarUNet | 156 | 12646384 | import tensorflow as tf
from tensorflow.python.keras.losses import LossFunctionWrapper
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
OBS: Code implemented by Tensorflow
# Arguments
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
# Returns
A tensor.
"""
x = tf.convert_to_tensor(x)
if x.dtype != dtype:
x = tf.cast(x, dtype)
return x
def _get_shapes_and_one_hot(y_true, y_pred):
shape = y_pred.get_shape()
n_classes = shape[-1]
# Squeeze dim -1 if it is == 1, otherwise leave it
dims = tf.cond(tf.equal(y_true.shape[-1] or -1, 1), lambda: tf.shape(y_true)[:-1], lambda: tf.shape(y_true))
y_true = tf.reshape(y_true, dims)
y_true = tf.one_hot(tf.cast(y_true, tf.uint8), depth=n_classes)
return y_true, shape, n_classes
def sparse_jaccard_distance_loss(y_true, y_pred, smooth=1):
"""
Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
= sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
The jaccard distance loss is usefull for unbalanced datasets. This has been
shifted so it converges on 0 and is smoothed to avoid exploding or disapearing
gradient.
Approximates the class-wise jaccard distance computed per-batch element
across spatial image dimensions. Returns the 1 - mean(per_class_distance)
for each batch element.
:param y_true:
:param y_pred:
:param smooth:
:return:
Ref: https://en.wikipedia.org/wiki/Jaccard_index
@url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96
@author: wassname
"""
y_true, shape, n_classes = _get_shapes_and_one_hot(y_true, y_pred)
reduction_dims = range(len(shape))[1:-1]
intersection = tf.reduce_sum(y_true * y_pred, axis=reduction_dims)
sum_ = tf.reduce_sum(y_true + y_pred, axis=reduction_dims)
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return 1.0 - tf.reduce_mean(jac, axis=-1, keepdims=True)
class SparseJaccardDistanceLoss(LossFunctionWrapper):
""" tf reduction wrapper for sparse_jaccard_distance_loss """
def __init__(self,
reduction,
smooth=1,
name='sparse_jaccard_distance_loss',
**kwargs):
super(SparseJaccardDistanceLoss, self).__init__(
sparse_jaccard_distance_loss,
name=name,
reduction=reduction,
smooth=smooth
)
def sparse_dice_loss(y_true, y_pred, smooth=1):
"""
Approximates the class-wise dice coefficient computed per-batch element
across spatial image dimensions. Returns the 1 - mean(per_class_dice) for
each batch element.
:param y_true:
:param y_pred:
:param smooth:
:return:
"""
y_true, shape, n_classes = _get_shapes_and_one_hot(y_true, y_pred)
reduction_dims = range(len(shape))[1:-1]
intersection = tf.reduce_sum(y_true * y_pred, axis=reduction_dims)
union = tf.reduce_sum(y_true + y_pred, axis=reduction_dims)
dice = (2 * intersection + smooth) / (union + smooth)
return 1.0 - tf.reduce_mean(dice, axis=-1, keepdims=True)
class SparseDiceLoss(LossFunctionWrapper):
""" tf reduction wrapper for sparse_dice_loss """
def __init__(self,
reduction,
smooth=1,
name='sparse_dice_loss',
**kwargs):
super(SparseDiceLoss, self).__init__(
sparse_dice_loss,
name=name,
reduction=reduction,
smooth=smooth
)
def sparse_exponential_logarithmic_loss(y_true, y_pred, gamma_dice,
gamma_cross, weight_dice,
weight_cross):
"""
TODO
:param y_true:
:param y_pred:
:param smooth:
:return:
"""
y_true, shape, n_classes = _get_shapes_and_one_hot(y_true, y_pred)
reduction_dims = range(len(shape))[1:-1]
# Clip for numerical stability
_epsilon = _to_tensor(10e-8, y_pred.dtype.base_dtype)
y_pred = tf.clip_by_value(y_pred, _epsilon, 1. - _epsilon)
# Compute exp log dice
intersect = 2 * tf.reduce_sum(y_true * y_pred, axis=reduction_dims) + 1
union = tf.reduce_sum(y_true + y_pred, axis=reduction_dims) + 1
exp_log_dice = tf.math.pow(-tf.math.log(intersect/union), gamma_dice)
mean_exp_log_dice = tf.reduce_mean(exp_log_dice, axis=-1, keepdims=True)
# Compute exp cross entropy
entropy = tf.reduce_sum(y_true * -tf.math.log(y_pred), axis=-1, keepdims=True)
exp_entropy = tf.reduce_mean(tf.math.pow(entropy, gamma_cross), axis=reduction_dims)
# Compute output
res = weight_dice*mean_exp_log_dice + weight_cross*exp_entropy
return res
class SparseExponentialLogarithmicLoss(LossFunctionWrapper):
"""
https://link.springer.com/content/pdf/10.1007%2F978-3-030-00931-1_70.pdf
"""
def __init__(self, reduction, gamma_dice=0.3, gamma_cross=0.3,
weight_dice=1, weight_cross=1,
name="sparse_exponential_logarithmic_loss"):
super(SparseExponentialLogarithmicLoss, self).__init__(
sparse_exponential_logarithmic_loss,
name=name,
reduction=reduction,
gamma_dice=gamma_dice,
gamma_cross=gamma_cross,
weight_dice=weight_dice,
weight_cross=weight_cross
)
def sparse_focal_loss(y_true, y_pred, gamma, class_weights):
"""
TODO
:param y_true:
:param y_pred:
:param smooth:
:return:
"""
y_true, shape, n_classes = _get_shapes_and_one_hot(y_true, y_pred)
reduction_dims = range(len(shape))[1:-1]
# Clip for numerical stability
_epsilon = _to_tensor(10e-8, y_pred.dtype.base_dtype)
y_pred = tf.clip_by_value(y_pred, _epsilon, 1. - _epsilon)
if class_weights is None:
class_weights = [1] * n_classes
# Compute the focal loss
entropy = tf.math.log(y_pred)
modulator = tf.math.pow((1 - y_pred), gamma)
loss = -tf.reduce_sum(class_weights * y_true * modulator * entropy, axis=-1, keepdims=True)
return tf.reduce_mean(loss, axis=reduction_dims)
class SparseFocalLoss(LossFunctionWrapper):
"""
https://arxiv.org/pdf/1708.02002.pdf
"""
def __init__(self, reduction, gamma=2,
class_weights=None, name="sparse_focal_loss"):
super(SparseFocalLoss, self).__init__(
sparse_focal_loss,
name=name,
reduction=reduction,
gamma=gamma,
class_weights=class_weights
)
def sparse_generalized_dice_loss(y_true, y_pred, type_weight):
"""
Function to calculate the Generalised Dice Loss defined in
<NAME>. et. al. (2017) Generalised Dice overlap as a deep learning
loss function for highly unbalanced segmentations. DLMIA 2017
"""
y_true, shape, n_classes = _get_shapes_and_one_hot(y_true, y_pred)
reduction_dims = range(len(shape))[1:-1]
ref_vol = tf.reduce_sum(y_true, axis=reduction_dims)
intersect = tf.reduce_sum(y_true * y_pred, axis=reduction_dims)
seg_vol = tf.reduce_sum(y_pred, axis=reduction_dims)
if type_weight.lower() == 'square':
weights = tf.math.reciprocal(tf.math.square(ref_vol))
elif type_weight.lower() == 'simple':
weights = tf.math.reciprocal(ref_vol)
elif type_weight.lower() == 'uniform':
weights = tf.ones_like(ref_vol)
else:
raise ValueError("The variable type_weight \"{}\""
"is not defined.".format(type_weight))
# Make array of new weight in which infinite values are replaced by
# ones.
new_weights = tf.where(tf.math.is_inf(weights),
tf.zeros_like(weights),
weights)
# Set final weights as either original weights or highest observed
# non-infinite weight
weights = tf.where(tf.math.is_inf(weights), tf.ones_like(weights) *
tf.reduce_max(new_weights), weights)
# calculate generalized dice score
eps = 1e-6
numerator = 2 * tf.multiply(weights, intersect)
denom = tf.multiply(weights, seg_vol + ref_vol) + eps
generalised_dice_score = numerator / denom
return 1 - tf.reduce_mean(generalised_dice_score, axis=-1, keepdims=True)
class SparseGeneralizedDiceLoss(LossFunctionWrapper):
"""
Based on implementation in NiftyNet at:
http://niftynet.readthedocs.io/en/dev/_modules/niftynet/layer/
loss_segmentation.html#generalised_dice_loss
Class based to allow passing of parameters to the function at construction
time in keras.
"""
def __init__(self, reduction, type_weight="Square",
name='sparse_generalized_dice_loss'):
super(SparseGeneralizedDiceLoss, self).__init__(
sparse_generalized_dice_loss,
name=name,
reduction=reduction,
type_weight=type_weight
)
# Aliases
SparseExpLogDice = SparseExponentialLogarithmicLoss
|
objectModel/Python/tests/persistence/test_persistence_layer.py | rt112000/CDM | 884 | 12646404 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import asyncio
import datetime
import multiprocessing
import os
import unittest
from cdm.enums import CdmStatusLevel, CdmObjectType, CdmLogCode
from cdm.objectmodel import CdmCorpusDefinition, CdmManifestDefinition
from cdm.storage import LocalAdapter
from tests.common import async_test, TestHelper
from tests.mock_storage_adapter import MockStorageAdapter
from cdm.storage.syms import SymsAdapter
from tests.syms_test_helper import SymsTestHelper
class PersistenceLayerTest(unittest.TestCase):
test_subpath = os.path.join('Persistence', 'PersistenceLayer')
@async_test
async def test_invalid_json(self):
test_input_path = TestHelper.get_input_folder_path(self.test_subpath, 'test_invalid_json')
corpus = CdmCorpusDefinition()
corpus.storage.mount('local', LocalAdapter(test_input_path))
corpus.storage.default_namespace = 'local'
invalid_manifest = None
try:
invalid_manifest = await corpus.fetch_object_async('local:/invalidManifest.manifest.cdm.json')
except Exception as e:
self.fail('Error should not be thrown when input json is invalid.')
self.assertIsNone(invalid_manifest)
@async_test
async def test_loading_invalid_model_json_name(self):
test_input_path = TestHelper.get_input_folder_path(self.test_subpath, 'test_loading_invalid_model_json_name')
corpus = CdmCorpusDefinition()
corpus.storage.mount('local', LocalAdapter(test_input_path))
corpus.storage.default_namespace = 'local'
# We are trying to load a file with an invalid name, so fetch_object_async should just return None.
invalid_model_json = await corpus.fetch_object_async('test.model.json')
self.assertIsNone(invalid_model_json)
@async_test
async def test_saving_invalid_model_json_name(self):
corpus = CdmCorpusDefinition()
corpus.ctx.report_at_level = CdmStatusLevel.WARNING
corpus.storage.unmount('cdm')
corpus.storage.default_namespace = 'local'
manifest = CdmManifestDefinition(corpus.ctx, 'manifest')
corpus.storage.fetch_root_folder('local').documents.append(manifest)
all_docs = {} # type: Dict[str, str]
test_adapter = MockStorageAdapter(all_docs)
corpus.storage._set_adapter('local', test_adapter)
new_manifest_from_model_json_name = 'my.model.json'
await manifest.save_as_async(new_manifest_from_model_json_name, True)
# TODO: because we can load documents properly now, save_as_async returns false. Will check the value returned from save_as_async() when the problem is solved
self.assertFalse('/' + new_manifest_from_model_json_name in all_docs)
@async_test
async def test_model_json_type_attribute_persistence(self):
corpus = TestHelper.get_local_corpus(self.test_subpath, 'TestModelJsonTypeAttributePersistence')
# we need to create a second adapter to the output folder to fool the OM into thinking it's different
# this is because there is a bug currently that prevents us from saving and then loading a model.json
corpus.storage.mount('alternateOutput', LocalAdapter(TestHelper.get_actual_output_folder_path(self.test_subpath, 'TestModelJsonTypeAttributePersistence')))
# create manifest
entity_name = 'TestTypeAttributePersistence'
local_root = corpus.storage.fetch_root_folder('local')
output_root = corpus.storage.fetch_root_folder('output')
manifest = corpus.make_object(CdmObjectType.MANIFEST_DEF, 'tempAbstract') # type: CdmManifestDefinition
manifest.imports.append('cdm:/foundations.cdm.json', None)
local_root.documents.append(manifest)
# create entity
doc = corpus.make_object(CdmObjectType.DOCUMENT_DEF, entity_name + '.cdm.json') # type: CdmManifestDefinition
doc.imports.append('cdm:/foundations.cdm.json', None)
local_root.documents.append(doc, doc.name)
entity_def = doc.definitions.append(entity_name, CdmObjectType.ENTITY_DEF) # type: CdmEntityDeclarationDefinition
# create type attribute
cdm_type_attribute_definition = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, entity_name, False) # type: CdmTypeAttributeDefinition
cdm_type_attribute_definition.is_read_only = True
entity_def.attributes.append(cdm_type_attribute_definition)
manifest.entities.append(entity_def)
manifest_resolved = await manifest.create_resolved_manifest_async('default', None)
output_root.documents.append(manifest_resolved)
manifest_resolved.imports.append('cdm:/foundations.cdm.json')
await manifest_resolved.save_as_async('model.json', True)
new_manifest = await corpus.fetch_object_async('alternateOutput:/model.json') # type: CdmManifestDefinition
new_ent = await corpus.fetch_object_async(new_manifest.entities[0].entity_path, manifest) # type: CdmEntityDefinition
type_attribute = new_ent.attributes[0]
self.assertTrue(type_attribute.is_read_only)
@async_test
async def test_missing_persistence_format(self):
expected_log_codes = { CdmLogCode.ERR_PERSIST_CLASS_MISSING }
corpus = TestHelper.get_local_corpus(self.test_subpath, 'TestMissingPersistenceFormat', expected_codes=expected_log_codes) # type: CdmCorpusDefinition
folder = corpus.storage.fetch_root_folder(corpus.storage.default_namespace) # type: CdmFolderDefinition
manifest = corpus.make_object(CdmObjectType.MANIFEST_DEF, 'someManifest') # type: CdmManifestDefinition
folder.documents.append(manifest)
# trying to save to an unsupported format should return false and not fail
succeded = await manifest.save_as_async('manifest.unSupportedExtension')
self.assertFalse(succeded)
async def run_syms_save_manifest(self, manifest: CdmManifestDefinition):
self.assertTrue(await manifest.save_as_async('syms:/{}/{}.manifest.cdm.json'.format(manifest.manifest_name, manifest.manifest_name)))
async def run_syms_fetch_manifest(self, corpus: CdmCorpusDefinition, manifest_expected: 'CdmManifestDefinition', filename: str, threadnumber:str = ''):
manifest_read_databases = await corpus.fetch_object_async('syms:/databases.manifest.cdm.json')
self.assertIsNotNone(manifest_read_databases)
self.assertEqual('databases.manifest.cdm.json', manifest_read_databases.manifest_name)
if not any(db.manifest_name == manifest_expected.manifest_name for db in
manifest_read_databases.sub_manifests):
self.fail('Database {} does not exist'.format(manifest_expected.manifest_name))
manifest_actual = await corpus.fetch_object_async('syms:/{}/{}.manifest.cdm.json'.format(manifest_expected.manifest_name, manifest_expected.manifest_name),
manifest_read_databases, None, True)
await manifest_actual.save_as_async('localActOutput:/{}{}'.format(filename, threadnumber))
await manifest_expected.save_as_async('localExpOutput:/{}{}'.format(filename, threadnumber))
actual_content = TestHelper.get_actual_output_data(self.test_subpath, 'TestSymsSavingAndFetchingDocument',
filename)
expected_content = TestHelper.get_expected_output_data(self.test_subpath, 'TestSymsSavingAndFetchingDocument',
filename)
ret = TestHelper.compare_same_object(actual_content, expected_content)
if ret is not '':
self.fail(ret)
async def run_syms_fetch_document(self, corpus: 'CdmCorpusDefinition', manifest_expected: 'CdmManifestDefinition'):
for ent in manifest_expected.entities:
doc = await corpus.fetch_object_async('syms:/{}/{}.cdm.json'.format(manifest_expected.manifest_name, ent.entity_name))
self.assertIsNotNone(doc)
self.assertTrue(doc.name == '{}.cdm.json'.format(ent.entity_name))
async def run_syms_smart_adls_adapter_mount_logic(self):
syms_adapter = SymsTestHelper.create_adapter_with_clientid()
corpus = CdmCorpusDefinition()
corpus.storage.mount('syms', syms_adapter)
adls_adapter1 = SymsTestHelper.create_adapter_clientid_with_shared_key(1)
adls_adapter2 = SymsTestHelper.create_adapter_clientid_with_shared_key(2)
count_adapter_count_before = len(corpus.storage.namespace_adapters)
manifest_read_databases = await corpus.fetch_object_async('syms:/databases.manifest.cdm.json')
manifest = await corpus.fetch_object_async('syms:/{}/{}.manifest.cdm.json'.format(manifest_read_databases.sub_manifests[0].manifest_name,
manifest_read_databases.sub_manifests[0].manifest_name),
manifest_read_databases, None, True)
count_adapter_count_after = len(corpus.storage.namespace_adapters)
self.assertEqual(count_adapter_count_before + 2, count_adapter_count_after)
self.assertIsNotNone(corpus.storage.adapter_path_to_corpus_path('https://{}{}'.format(adls_adapter1.hostname, adls_adapter1.root)))
self.assertIsNotNone(corpus.storage.adapter_path_to_corpus_path('https://{}{}'.format(adls_adapter2.hostname, adls_adapter2.root)))
@async_test
@unittest.skipIf(SymsTestHelper.if_syms_run_tests_flag_not_set(), 'SYMS environment variables not set up')
async def test_syms_saving_and_fetching_document(self):
syms_adapter = SymsTestHelper.create_adapter_with_clientid()
await SymsTestHelper.clean_database(syms_adapter, SymsTestHelper.DATABASE_NAME)
test_input_path = TestHelper.get_input_folder_path(self.test_subpath,'TestSymsSavingAndFetchingDocument')
test_act_output_path = TestHelper.get_actual_output_folder_path(self.test_subpath, 'TestSymsSavingAndFetchingDocument')
test_exp_output_path = TestHelper.get_expected_output_folder_path(self.test_subpath, 'TestSymsSavingAndFetchingDocument')
corpus = CdmCorpusDefinition()
adls_adapter1 = SymsTestHelper.create_adapter_clientid_with_shared_key(1)
adls_adapter2 = SymsTestHelper.create_adapter_clientid_with_shared_key(2)
local_input_adapter = LocalAdapter(test_input_path)
local_act_output_adapter = LocalAdapter(test_act_output_path)
local_exp_output_adapter = LocalAdapter(test_exp_output_path)
corpus.storage.mount('adls1', adls_adapter1)
corpus.storage.mount('adls2', adls_adapter2)
corpus.storage.mount('syms', syms_adapter)
corpus.storage.mount('localInput', local_input_adapter)
corpus.storage.mount('localActOutput', local_act_output_adapter)
corpus.storage.mount('localExpOutput', local_exp_output_adapter)
corpus.storage.unmount('cdm')
corpus.storage.default_namespace = 'localInput'
manifest = await corpus.fetch_object_async('default.manifest.cdm.json')
manifest.manifest_name = SymsTestHelper.DATABASE_NAME
await self.run_syms_save_manifest(manifest)
await self.run_syms_fetch_manifest(corpus, manifest, 'default.manifest.cdm.json')
await self.run_syms_fetch_document(corpus, manifest)
manifest_modified = await corpus.fetch_object_async('defaultmodified.manifest.cdm.json')
manifest_modified.manifest_name = SymsTestHelper.DATABASE_NAME
manifest_modified.entities[0].set_last_file_modified_time(datetime.datetime.now(datetime.timezone.utc))
await self.run_syms_save_manifest(manifest_modified)
await self.run_syms_fetch_manifest(corpus, manifest_modified, 'defaultmodified.manifest.cdm.json')
await self.run_syms_fetch_document(corpus, manifest_modified)
await self.run_syms_smart_adls_adapter_mount_logic()
await SymsTestHelper.clean_database(syms_adapter, SymsTestHelper.DATABASE_NAME)
|
nnutils/train_utils.py | NVlabs/UMR | 184 | 12646440 | <filename>nnutils/train_utils.py
# -----------------------------------------------------------
# Code adapted from:
# https://github.com/akanazawa/cmr/blob/master/nnutils/train_utils.py
#
# MIT License
#
# Copyright (c) 2018 akanazawa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------
# Generic Training Utils.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import os
import os.path as osp
import time
import pdb
from absl import flags
from ..utils.tf_visualizer import Visualizer as TfVisualizer
import torchvision.utils as vutils
#-------------- flags -------------#
#----------------------------------#
## Flags for training
curr_path = osp.dirname(osp.abspath(__file__))
cache_path = osp.join(curr_path, '..', 'cachedir')
flags.DEFINE_string('name', 'exp_name', 'Experiment Name')
flags.DEFINE_integer('gpu_id', 0, 'Which gpu to use')
flags.DEFINE_integer('optim_bs', 1, 'Perform parameter update every optim_bs iterations')
flags.DEFINE_integer('num_epochs', 500, 'Number of epochs to train')
flags.DEFINE_integer('num_pretrain_epochs', 0, 'If >0, we will pretain from an existing saved model.')
flags.DEFINE_float('learning_rate', 1e-4, 'learning rate')
flags.DEFINE_float('beta1', 0.9, 'Momentum term of adam')
flags.DEFINE_bool('use_sgd', False, 'if true uses sgd instead of adam, beta1 is used as momentum')
flags.DEFINE_bool('multi_gpu', False, 'if true use multiple GPUs')
flags.DEFINE_integer('num_iter', 0, 'Number of training iterations. 0 -> Use epoch_iter')
## Flags for logging and snapshotting
flags.DEFINE_string('checkpoint_dir', osp.join(cache_path, 'snapshots'),
'Root directory for output files')
flags.DEFINE_string('vis_dir', osp.join(cache_path, 'visualization'),
'Root directory for visualizations')
flags.DEFINE_integer('print_freq', 20, 'scalar logging frequency')
flags.DEFINE_integer('save_latest_freq', 3000, 'save latest model every x iterations')
flags.DEFINE_integer('save_epoch_freq', 301, 'save model every k epochs')
flags.DEFINE_integer('lr_step_epoch_freq', 5, 'Reduce LR by factor of 10 every k ephochs')
flags.DEFINE_integer('batch_size', 64, 'Size of minibatches')
flags.DEFINE_integer('workers', 16, 'dataloader worker number')
## Flags for visualization
flags.DEFINE_integer('display_freq', 100, 'visuals logging frequency')
flags.DEFINE_integer('min_display_iter', 400, 'Skip plotting for initial iterations')
flags.DEFINE_boolean('display_visuals', True, 'whether to display images')
flags.DEFINE_boolean('print_scalars', True, 'whether to print scalars')
flags.DEFINE_boolean('plot_scalars', True, 'whether to plot scalars')
flags.DEFINE_boolean('is_train', True, 'Are we training ?')
#--------- training class ---------#
#----------------------------------#
class Trainer():
def __init__(self, opts):
self.opts = opts
self.gpu_id = opts.gpu_id
torch.cuda.set_device(opts.gpu_id)
self.Tensor = torch.cuda.FloatTensor if (self.gpu_id is not None) else torch.Tensor
self.invalid_batch = False #the trainer can optionally reset this every iteration during set_input call
self.save_dir = os.path.join(opts.checkpoint_dir, opts.name)
self.vis_dir = os.path.join(opts.vis_dir, opts.name)
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
if not os.path.exists(self.vis_dir):
os.makedirs(self.vis_dir)
log_file = os.path.join(self.save_dir, 'opts.log')
self.sc_dict = {}
with open(log_file, 'w') as f:
for k in dir(opts):
f.write('{}: {}\n'.format(k, opts.__getattr__(k)))
# helper saving function that can be used by subclasses
def save_network(self, network, network_label, epoch_label, gpu_id=None):
save_filename = '{}_net_{}.pth'.format(network_label, epoch_label)
save_path = os.path.join(self.save_dir, save_filename)
if(self.opts.multi_gpu):
torch.save(network.module.cpu().state_dict(), save_path)
else:
torch.save(network.cpu().state_dict(), save_path)
if gpu_id is not None and torch.cuda.is_available():
network.cuda(device=gpu_id)
return
# helper loading function that can be used by subclasses
def load_network(self, network, network_label, epoch_label, network_dir=None):
print('Loading model')
save_filename = '{}_net_{}.pth'.format(network_label, epoch_label)
if network_dir is None:
network_dir = self.save_dir
save_path = os.path.join(network_dir, save_filename)
network.load_state_dict(torch.load(save_path))
return
def define_model(self):
'''Should be implemented by the child class.'''
raise NotImplementedError
def init_dataset(self):
'''Should be implemented by the child class.'''
raise NotImplementedError
def define_criterion(self):
'''Should be implemented by the child class.'''
raise NotImplementedError
def set_input(self, batch):
'''Should be implemented by the child class.'''
raise NotImplementedError
def forward(self):
'''Should compute self.total_loss. To be implemented by the child class.'''
raise NotImplementedError
def save(self, epoch_prefix):
'''Saves the model.'''
self.save_network(self.model, 'pred', epoch_prefix, gpu_id=self.opts.gpu_id)
return
def get_current_visuals(self):
return {}
def get_current_scalars(self):
return self.sc_dict
def register_scalars(self, sc_dict, beta=0.99):
'''
Keeps a running smoothed average of some scalars.
'''
for k in sc_dict:
if k not in self.sc_dict:
self.sc_dict[k] = sc_dict[k]
else:
self.sc_dict[k] = beta*self.sc_dict[k] + (1-beta)*sc_dict[k]
def get_current_points(self):
return {}
def init_training(self):
opts = self.opts
self.iteration_num = 0
self.init_dataset()
self.define_model()
self.define_criterion()
if opts.use_sgd:
self.optimizer = torch.optim.SGD(
self.model.parameters(), lr=opts.learning_rate, momentum=opts.beta1)
else:
param_list = list(self.model.parameters())
if(opts.use_gan):
param_list = param_list + list(self.discriminator.parameters())
self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, param_list),
lr=opts.learning_rate, betas=(opts.beta1, 0.999))
def adjust_learning_rate(self, optimizer=None):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if(optimizer is None):
optimizer = self.optimizer
for param_group in optimizer.param_groups:
param_group['lr'] = self.opts.learning_rate / (1+self.iteration_num*5e-4)
def train(self):
opts = self.opts
self.visualizer = TfVisualizer(opts)
self.smoothed_total_loss = 0
visualizer = self.visualizer
total_steps = 0
optim_steps = 0
dataset_size = len(self.dataloader)
for epoch in range(opts.num_pretrain_epochs, opts.num_epochs):
epoch_iter = 0
self.curr_epoch = epoch
for i, batch in enumerate(self.dataloader):
self.iteration_num += 1
self.adjust_learning_rate()
t_init = time.time()
self.set_input(batch)
t_batch = time.time()
if not self.invalid_batch:
optim_steps += 1
if optim_steps % opts.optim_bs == 0:
self.optimizer.zero_grad()
self.forward()
self.smoothed_total_loss = self.smoothed_total_loss*0.99 + 0.01*self.total_loss
t_forw = time.time()
self.total_loss.backward()
t_backw = time.time()
if optim_steps % opts.optim_bs == 0:
self.optimizer.step()
t_opt = time.time()
total_steps += 1
epoch_iter += 1
if opts.display_visuals and (total_steps % opts.display_freq == 0):
iter_end_time = time.time()
#visualizer.log_images(self.get_current_visuals(), epoch*dataset_size + epoch_iter)
vis_dict = self.get_current_visuals()
for k,v in vis_dict.items():
if('mesh' in k):
v.save_obj(os.path.join(self.vis_dir, k + '.obj'), save_texture=True)
else:
vutils.save_image(v, os.path.join(self.vis_dir, k + '.png'))
del vis_dict
if opts.print_scalars and (total_steps % opts.print_freq == 0):
scalars = self.get_current_scalars()
visualizer.print_current_scalars(epoch, epoch_iter, scalars)
if total_steps % opts.save_latest_freq == 0:
print('saving the model at the end of epoch {:d}, iters {:d}'.format(epoch, total_steps))
self.save('latest')
if total_steps == opts.num_iter:
return
if (epoch+1) % opts.save_epoch_freq == 0:
print('saving the model at the end of epoch {:d}, iters {:d}'.format(epoch, total_steps))
self.save('latest')
self.save(epoch+1)
|
python/pmercury/utils/tls_crypto.py | raj-apoorv/mercury | 299 | 12646467 | import os
import sys
import struct
from binascii import hexlify, unhexlify
# crypto primitive imports
from cryptography.hazmat.primitives.hmac import HMAC
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.modes import GCM, CBC
from cryptography.hazmat.primitives.hashes import SHA1, SHA256, SHA384, MD5, Hash
from cryptography.hazmat.primitives.ciphers.algorithms import AES, ARC4, TripleDES, Camellia, SEED
# constants
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')
from pmercury.utils.tls_constants import *
class TLS_CRYPTO:
def __init__(self):
self.cur_mode = None
self.session_metadata = None
self.tls_sequence = None
self.tls13_handshake = None
self.kdf = {}
self.kdf['TLS 1.0'] = self.kdf_tls10
self.kdf['TLS 1.1'] = self.kdf_tls11
self.kdf['TLS 1.2'] = self.kdf_tls12
self.kdf['TLS 1.3'] = self.kdf_tls13
def kdf_tls10(self, cr, sr, secret, cipher_params, flow_key):
if flow_key+self.cur_mode not in self.session_metadata:
self.session_metadata[flow_key+self.cur_mode] = {}
if 'cbc_initial_decrypt' not in self.session_metadata[flow_key+self.cur_mode]:
self.session_metadata[flow_key+self.cur_mode]['cbc_initial_decrypt'] = 1
fixed_iv_length = cipher_params['iv_length']
else:
fixed_iv_length = cipher_params['fixed_iv_length']
label = b'key expansion'
secret_md5 = secret[:len(secret)/2]
secret_sha = secret[-len(secret)/2:]
md5_material = b''
cur_hash = self.hmac(secret_md5, MD5(), b'%s%s%s' % (label, sr, cr))
for i in range(16):
md5_material += self.hmac(secret_md5, MD5(), b'%s%s%s%s' % (cur_hash, label, sr, cr))
cur_hash = self.hmac(secret_md5, MD5(), cur_hash)
sha_material = b''
cur_hash = self.hmac(secret_sha, SHA1(), b'%s%s%s' % (label, sr, cr))
for i in range(16):
sha_material += self.hmac(secret_sha, SHA1(), b'%s%s%s%s' % (cur_hash, label, sr, cr))
cur_hash = self.hmac(secret_sha, SHA1(), cur_hash)
output = b''
for i in range(min(len(md5_material),len(sha_material))):
output += chr(ord(md5_material[i]) ^ ord(sha_material[i]))
key_material_lengths = [cipher_params['mac_key_length']]*2 + \
[cipher_params['enc_key_length']]*2 + \
[fixed_iv_length]*2
offset = 0
key_material = []
for l in key_material_lengths:
key_material.append(output[offset:offset+l])
offset += l
return key_material
def kdf_tls11(self, cr, sr, secret, cipher_params, flow_key):
label = b'key expansion'
secret_md5 = secret[:len(secret)/2]
secret_sha = secret[-len(secret)/2:]
md5_material = b''
cur_hash = self.hmac(secret_md5, MD5(), b'%s%s%s' % (label, sr, cr))
for i in range(16):
md5_material += self.hmac(secret_md5, MD5(), b'%s%s%s%s' % (cur_hash, label, sr, cr))
cur_hash = self.hmac(secret_md5, MD5(), cur_hash)
sha_material = b''
cur_hash = self.hmac(secret_sha, SHA1(), b'%s%s%s' % (label, sr, cr))
for i in range(16):
sha_material += self.hmac(secret_sha, SHA1(), b'%s%s%s%s' % (cur_hash, label, sr, cr))
cur_hash = self.hmac(secret_sha, SHA1(), cur_hash)
output = b''
for i in range(min(len(md5_material),len(sha_material))):
output += chr(ord(md5_material[i]) ^ ord(sha_material[i]))
key_material_lengths = [cipher_params['mac_key_length']]*2 + \
[cipher_params['enc_key_length']]*2 + \
[cipher_params['fixed_iv_length']]*2
offset = 0
key_material = []
for l in key_material_lengths:
key_material.append(output[offset:offset+l])
offset += l
return key_material
def kdf_tls12(self, cr, sr, secret, cipher_params, flow_key):
label = b'key expansion'
digest_type = cipher_params['prf']()
cur_hash = self.hmac(secret, digest_type, b'%s%s%s' % (label, sr, cr))
output = b''
for i in range(16):
output += self.hmac(secret, digest_type, b'%s%s%s%s' % (cur_hash, label, sr, cr))
cur_hash = self.hmac(secret, digest_type, cur_hash)
key_material_lengths = [cipher_params['mac_key_length']]*2 + \
[cipher_params['enc_key_length']]*2 + \
[cipher_params['fixed_iv_length']]*2
offset = 0
key_material = []
for l in key_material_lengths:
key_material.append(output[offset:offset+l])
offset += l
return key_material
def kdf_tls13(self, secret, label, length, cipher_params, flow_key):
digest_type = cipher_params['prf']()
key = b''
block = b''
ind = 0
while len(key) < length:
ind += 1
block = self.hmac(secret, digest_type, b'%s%s%s' % (block, label, struct.pack('B',ind)))
key += block
return key[:length]
def hmac(self, secret, digest_type, msg):
tmp = HMAC(secret, digest_type, default_backend())
tmp.update(msg)
return tmp.finalize()
def hash_(self, digest_type, msg):
tmp = Hash(digest_type, default_backend())
tmp.update(msg)
return tmp.finalize()
def get_secret(self, client_random, secrets, cur_flow_key):
secret = None
if client_random not in secrets:
return None
if not self.session_metadata['version'].startswith('TLS 1.3'):
secret = unhexlify(secrets[client_random]['master_secret'])
# find appropriate master secret
if cur_flow_key not in self.tls13_handshake:
self.tls13_handshake[cur_flow_key] = True
if self.cur_mode == 'client' and self.tls13_handshake[cur_flow_key] == True and \
'client_handshake_secret' in secrets[client_random]:
secret = unhexlify(secrets[client_random]['client_handshake_secret'])
elif self.cur_mode == 'server' and self.tls13_handshake[cur_flow_key] == True and \
'server_handshake_secret' in secrets[client_random]:
secret = unhexlify(secrets[client_random]['server_handshake_secret'])
elif self.cur_mode == 'client' and self.tls13_handshake[cur_flow_key] == False and \
'client_traffic_secret' in secrets[client_random]:
secret = unhexlify(secrets[client_random]['client_traffic_secret'])
elif self.cur_mode == 'server' and self.tls13_handshake[cur_flow_key] == False and \
'server_traffic_secret' in secrets[client_random]:
secret = unhexlify(secrets[client_random]['server_traffic_secret'])
return secret
def get_explicit_material(self, flow_key, data, cipher_params):
enc = None
iv = None
if self.session_metadata['version'] == 'TLS 1.0':
enc = data
if cipher_params['mode'] == CBC:
if flow_key+self.cur_mode not in self.session_metadata or \
'cbc_initial_decrypt' not in self.session_metadata[flow_key+self.cur_mode] or \
'cur_iv' not in self.session_metadata[flow_key+self.cur_mode]:
iv = b''
else:
iv = self.session_metadata[flow_key+self.cur_mode]['cur_iv']
elif self.session_metadata['version'] in ['TLS 1.1','TLS 1.2']:
enc = data[cipher_params['iv_length']:]
iv = data[:cipher_params['iv_length']]
elif self.session_metadata['version'].startswith('TLS 1.3'):
enc = data
iv = b''
return enc, iv
def get_implicit_material(self, client_random, server_random, master_secret, \
cipher_params, flow_key, explicit_iv):
key = None
iv = None
if self.session_metadata['version'] in ['SSL 3.0','TLS 1.0','TLS 1.1','TLS 1.2']:
c_mac_key, s_mac_key, c_key, s_key, c_iv, s_iv = \
self.kdf[self.session_metadata['version']](client_random, server_random, \
master_secret, cipher_params, flow_key)
if self.cur_mode == 'client':
key = c_key
iv = c_iv + explicit_iv
else:
key = s_key
iv = s_iv + explicit_iv
elif self.session_metadata['version'].startswith('TLS 1.3'):
cur_flow_key = flow_key + self.cur_mode
label_str = b''
if self.session_metadata['version'] == 'TLS 1.3' or self.session_metadata['version'] == 'TLS 1.3 (draft 20)':
label_str = b'tls13 '
else:
label_str = b'TLS 1.3, '
tmp_label = label_str + b'key'
len_ = struct.pack(b'!H', cipher_params['enc_key_length'])
tmp_label = b'%s%s%s%s' % (len_, struct.pack(b'B', len(tmp_label)), tmp_label, b'\x00')
key = self.kdf_tls13(master_secret, tmp_label, cipher_params['enc_key_length'], \
cipher_params, flow_key)
tmp_label = label_str + b'iv'
len_ = struct.pack(b'!H', cipher_params['iv_length'])
tmp_label = b'%s%s%s%s' % (len_, struct.pack(b'B', len(tmp_label)), tmp_label, b'\x00')
implicit_iv = self.kdf_tls13(master_secret, tmp_label, cipher_params['iv_length'], \
cipher_params, flow_key)
# calculate nonce
iv2 = struct.pack(b'!Q', self.tls_sequence[cur_flow_key]).rjust(len(implicit_iv), b'\x00')
iv = b''.join([struct.pack(b'B', v ^ implicit_iv[i]) for i, v in enumerate(iv2)])
return key, iv
# strip MAC/AEAD/Padding
def get_data(self, result, flow_key, cipher_params, encrypted_data):
padding_length = 0
# strip padding
if self.session_metadata['version'].startswith('TLS 1.3'):
for i in range(len(result)-1,-1,-1):
if result[i] != b'\x00':
break
padding_length += 1
result = result[:-padding_length-1]
else:
if cipher_params['mode'] == CBC:
padding_length = int(hexlify(result[-1:]),16)
if len(result) < padding_length+1:
padding_length = 0
else:
for i in range(1,padding_length+1):
if int(hexlify(result[-(i+1):-i]),16) != padding_length:
padding_length = 0
break
if padding_length != 0:
padding_length += 1
result = result[:-padding_length]
# set up IV for TLS 1.0
if self.session_metadata['version'] == 'TLS 1.0':
if flow_key+self.cur_mode not in self.session_metadata:
self.session_metadata[flow_key+self.cur_mode] = {}
self.session_metadata[flow_key+self.cur_mode]['cur_iv'] = encrypted_data[-cipher_params['iv_length']:]
# strip AEAD/MAC
auth_length = 0
if cipher_params['mode'] == GCM:
if cipher_params['enc_key_length'] == 32:
result = result[:-16]
elif cipher_params['enc_key_length'] == 16:
result = result
auth_length = cipher_params['enc_key_length']
elif cipher_params['mac_key_length'] > 0:
result = result[:-cipher_params['mac_key_length']]
auth_length = cipher_params['mac_key_length']
return result, padding_length, auth_length
# get encrypted data and crypto parameters, output plaintext
def get_plaintext(self, data, cipher_params, key, iv, flow_key):
if cipher_params['cipher'] == AES:
if cipher_params['mode'] == CBC:
decryptor = Cipher(cipher_params['cipher'](key), \
cipher_params['mode'](iv), \
default_backend()).decryptor()
if cipher_params['mode'] == GCM:
if len(data[-16:]) < 16:
return None
decryptor = Cipher(cipher_params['cipher'](key), \
cipher_params['mode'](iv,data[-16:]), \
default_backend()).decryptor()
elif cipher_params['cipher'] == ARC4:
if flow_key+self.cur_mode not in self.session_metadata:
self.session_metadata[flow_key+self.cur_mode] = {}
if 'decryptor' not in self.session_metadata[flow_key+self.cur_mode]:
self.session_metadata[flow_key+self.cur_mode]['decryptor'] = \
decryptor = Cipher(cipher_params['cipher'](key), \
None,
default_backend()).decryptor()
decryptor = self.session_metadata[flow_key+self.cur_mode]['decryptor']
elif cipher_params['cipher'] == TripleDES:
decryptor = Cipher(cipher_params['cipher'](key), \
cipher_params['mode'](iv), \
default_backend()).decryptor()
elif cipher_params['cipher'] == Camellia:
decryptor = Cipher(cipher_params['cipher'](key), \
cipher_params['mode'](iv), \
default_backend()).decryptor()
elif cipher_params['cipher'] == SEED:
decryptor = Cipher(cipher_params['cipher'](key), \
cipher_params['mode'](iv), \
default_backend()).decryptor()
else:
print('%s Not Supported' % cipher_params['cipher'])
return None
return decryptor.update(data)
# Main decrypt function
def decrypt(self, data, flow_key, cur_mode, session_metadata, tls_sequence, secrets, tls13_handshake):
self.cur_mode = cur_mode
self.session_metadata = session_metadata
self.tls_sequence = tls_sequence
self.tls13_handshake = tls13_handshake
if 'selected_cipher_suite' not in self.session_metadata or 'client_random' not in self.session_metadata \
or 'server_random' not in self.session_metadata:
return None, None, None
cur_flow_key = flow_key + self.cur_mode
if self.session_metadata['selected_cipher_suite'] not in TLS_CIPHER_SUITES:
print('NYI:\t' + self.session_metadata['selected_cipher_suite'])
return None, None, None
cipher_params = TLS_CIPHER_SUITES[self.session_metadata['selected_cipher_suite']]
client_random = self.session_metadata['client_random']
server_random = self.session_metadata['server_random']
# set initial sequence number for decryption
if cur_flow_key not in self.tls_sequence:
self.tls_sequence[cur_flow_key] = 0
# get master secret, varies for TLS 1.3
master_secret = self.get_secret(client_random, secrets, cur_flow_key)
if master_secret == None:
return None, None, None
# get encrypted data and (if necessary) explicit iv
encrypted_data, explicit_iv = \
self.get_explicit_material(flow_key, data, cipher_params)
if encrypted_data == None or explicit_iv == None:
return None, None, None
# get encryption key and implicit iv
key, iv = self.get_implicit_material(unhexlify(client_random), \
unhexlify(server_random), master_secret, cipher_params, \
flow_key, explicit_iv)
# decrypt encrypted text
result = self.get_plaintext(encrypted_data, cipher_params, key, iv, flow_key)
if result == None:
return None, None, None
# determine if padding is used
result, padding_length, auth_length = self.get_data(result, flow_key, \
cipher_params, encrypted_data)
# update sequence number
self.tls_sequence[cur_flow_key] += 1
return result, padding_length, auth_length
|
nlp_gym/envs/common/action_space.py | lipucky/nlp-gym | 120 | 12646472 | from typing import List
from gym.spaces.discrete import Discrete
class ActionSpace(Discrete):
def __init__(self, actions: List[str]):
self.actions = actions
self._ix_to_action = {ix: action for ix, action in enumerate(self.actions)}
self._action_to_ix = {action: ix for ix, action in enumerate(self.actions)}
super().__init__(len(self.actions))
def __post_init__(self):
self._ix_to_action = {ix: action for ix, action in enumerate(self.actions)}
self._action_to_ix = {action: ix for ix, action in enumerate(self.actions)}
def action_to_ix(self, action: str) -> int:
return self._action_to_ix[action]
def ix_to_action(self, ix: int) -> str:
return self._ix_to_action[ix]
def size(self) -> int:
return self.n
def __repr__(self):
return f"Discrete Action Space with {self.size()} actions: {self.actions}"
|
mayan/apps/mailer/models.py | eshbeata/open-paperless | 2,743 | 12646486 | <reponame>eshbeata/open-paperless
from __future__ import unicode_literals
import json
import logging
from django.core import mail
from django.db import models
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from .utils import split_recipient_list
logger = logging.getLogger(__name__)
class LogEntry(models.Model):
datetime = models.DateTimeField(
auto_now_add=True, editable=False, verbose_name=_('Date time')
)
message = models.TextField(
blank=True, editable=False, verbose_name=_('Message')
)
class Meta:
get_latest_by = 'datetime'
ordering = ('-datetime',)
verbose_name = _('Log entry')
verbose_name_plural = _('Log entries')
class UserMailer(models.Model):
label = models.CharField(
max_length=128, unique=True, verbose_name=_('Label')
)
default = models.BooleanField(
default=True, help_text=_(
'If default, this mailing profile will be pre-selected on the '
'document mailing form.'
), verbose_name=_('Default')
)
enabled = models.BooleanField(default=True, verbose_name=_('Enabled'))
backend_path = models.CharField(
max_length=128,
help_text=_('The dotted Python path to the backend class.'),
verbose_name=_('Backend path')
)
backend_data = models.TextField(
blank=True, verbose_name=_('Backend data')
)
class Meta:
ordering = ('label',)
verbose_name = _('User mailer')
verbose_name_plural = _('User mailers')
def __str__(self):
return self.label
def save(self, *args, **kwargs):
if self.default:
UserMailer.objects.select_for_update().exclude(pk=self.pk).update(
default=False
)
return super(UserMailer, self).save(*args, **kwargs)
def backend_label(self):
return self.get_backend().label
def get_backend(self):
return import_string(self.backend_path)
def get_connection(self):
return mail.get_connection(
backend=self.get_backend().class_path, **self.loads()
)
def loads(self):
return json.loads(self.backend_data)
def dumps(self, data):
self.backend_data = json.dumps(data)
self.save()
def send(self, subject='', body='', to=None, document=None, as_attachment=False):
recipient_list = split_recipient_list(recipients=[to])
with self.get_connection() as connection:
email_message = mail.EmailMultiAlternatives(
subject=subject, body=body, to=recipient_list,
connection=connection
)
if as_attachment:
with document.open() as descriptor:
email_message.attach(
filename=document.label, content=descriptor.read(),
mimetype=document.file_mimetype
)
try:
email_message.send()
except Exception as exception:
self.error_log.create(message=exception)
else:
self.error_log.all().delete()
def test(self, to):
self.send(to=to, subject=_('Test email from Mayan EDMS'))
class UserMailerLogEntry(models.Model):
user_mailer = models.ForeignKey(
UserMailer, on_delete=models.CASCADE, related_name='error_log',
verbose_name=_('User mailer')
)
datetime = models.DateTimeField(
auto_now_add=True, editable=False, verbose_name=_('Date time')
)
message = models.TextField(
blank=True, editable=False, verbose_name=_('Message')
)
class Meta:
get_latest_by = 'datetime'
ordering = ('-datetime',)
verbose_name = _('User mailer log entry')
verbose_name_plural = _('User mailer log entries')
|
python/tree/leetocde/average_levels_binary_tree.py | googege/algo-learn | 153 | 12646492 | <gh_stars>100-1000
from typing import List
# 二叉树的层平均值
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# 广度优先
def averageOfLevels_1(self, root: TreeNode) -> List[float]:
queue, res = [root], []
while len(queue) > 0:
n, count = len(queue), 0
for i in range(n):
node = queue.pop(0)
count += node.val
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
res.append(count / n)
return res
|
script_tests/line_select_tests.py | mr-c/bx-python | 122 | 12646513 | import unittest
import base
class Test(base.BaseScriptTest, unittest.TestCase):
command_line = "./scripts/line_select.py ${features}"
input_features = base.TestFile("""0
1
1
0
1
0""")
input_stdin = base.TestFile("""a
b
d
e
f""")
output_stdout = base.TestFile("""b
e""")
|
fastpunct/__init__.py | notAI-tech/fastpunct | 208 | 12646516 | <gh_stars>100-1000
from .fastpunct import FastPunct |
deepdish/core.py | raphaelquast/deepdish | 253 | 12646528 | from __future__ import division, print_function, absolute_import
import time
import warnings
import numpy as np
import itertools as itr
import sys
from contextlib import contextmanager
warnings.simplefilter("ignore", np.ComplexWarning)
_is_verbose = False
_is_silent = False
class AbortException(Exception):
"""
This exception is used for when the user wants to quit algorithms mid-way.
The `AbortException` can for instance be sent by pygame input, and caught
by whatever is running the algorithm.
"""
pass
def bytesize(arr):
"""
Returns the memory byte size of a Numpy array as an integer.
"""
byte_size = np.prod(arr.shape) * np.dtype(arr.dtype).itemsize
return byte_size
def humanize_bytesize(byte_size):
order = np.log(byte_size) / np.log(1024)
orders = [
(5, 'PB'),
(4, 'TB'),
(3, 'GB'),
(2, 'MB'),
(1, 'KB'),
(0, 'B')
]
for ex, name in orders:
if order >= ex:
return '{:.4g} {}'.format(byte_size / 1024**ex, name)
def memsize(arr):
"""
Returns the required memory of a Numpy array as a humanly readable string.
"""
return humanize_bytesize(bytesize(arr))
def span(arr):
"""
Calculate and return the mininum and maximum of an array.
Parameters
----------
arr : ndarray
Numpy array.
Returns
-------
min : dtype
Minimum of array.
max : dtype
Maximum of array.
"""
# TODO: This could be made faster with a custom ufunc
return (np.min(arr), np.max(arr))
def apply_once(func, arr, axes, keepdims=True):
"""
Similar to `numpy.apply_over_axes`, except this performs the operation over
a flattened version of all the axes, meaning that the function will only be
called once. This only makes a difference for non-linear functions.
Parameters
----------
func : callback
Function that operates well on Numpy arrays and returns a single value
of compatible dtype.
arr : ndarray
Array to do operation over.
axes : int or iterable
Specifies the axes to perform the operation. Only one call will be made
to `func`, with all values flattened.
keepdims : bool
By default, this is True, so the collapsed dimensions remain with
length 1. This is simlar to `numpy.apply_over_axes` in that regard. If
this is set to False, the dimensions are removed, just like when using
for instance `numpy.sum` over a single axis. Note that this is safer
than subsequently calling squeeze, since this option will preserve
length-1 dimensions that were not operated on.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
>>> rs = np.random.RandomState(0)
>>> x = rs.uniform(size=(10, 3, 3))
Image that you have ten 3x3 images and you want to calculate each image's
intensity standard deviation:
>>> np.apply_over_axes(np.std, x, [1, 2]).ravel()
array([ 0.06056838, 0.08230712, 0.08135083, 0.09938963, 0.08533604,
0.07830725, 0.066148 , 0.07983019, 0.08134123, 0.01839635])
This is the same as ``x.std(1).std(1)``, which is not the standard
deviation of all 9 pixels together. To fix this we can flatten the pixels
and try again:
>>> x.reshape(10, 9).std(axis=1)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
This is exactly what this function does for you:
>>> dd.apply_once(np.std, x, [1, 2], keepdims=False)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
"""
all_axes = np.arange(arr.ndim)
if isinstance(axes, int):
axes = {axes}
else:
axes = set(axis % arr.ndim for axis in axes)
principal_axis = min(axes)
for i, axis in enumerate(axes):
axis0 = principal_axis + i
if axis != axis0:
all_axes[axis0], all_axes[axis] = all_axes[axis], all_axes[axis0]
transposed_arr = arr.transpose(all_axes)
new_shape = []
new_shape_keepdims = []
for axis, dim in enumerate(arr.shape):
if axis == principal_axis:
new_shape.append(-1)
elif axis not in axes:
new_shape.append(dim)
if axis in axes:
new_shape_keepdims.append(1)
else:
new_shape_keepdims.append(dim)
collapsed = np.apply_along_axis(func,
principal_axis,
transposed_arr.reshape(new_shape))
if keepdims:
return collapsed.reshape(new_shape_keepdims)
else:
return collapsed
def tupled_argmax(a):
"""
Argmax that returns an index tuple. Note that `numpy.argmax` will return a
scalar index as if you had flattened the array.
Parameters
----------
a : array_like
Input array.
Returns
-------
index : tuple
Tuple of index, even if `a` is one-dimensional. Note that this can
immediately be used to index `a` as in ``a[index]``.
Examples
--------
>>> import numpy as np
>>> import deepdish as dd
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> dd.tupled_argmax(a)
(1, 2)
"""
return np.unravel_index(np.argmax(a), np.shape(a))
def multi_range(*args):
return itr.product(*[range(a) for a in args])
@contextmanager
def timed(name=None, file=sys.stdout, callback=None, wall_clock=True):
"""
Context manager to make it easy to time the execution of a piece of code.
This timer will never run your code several times and is meant more for
simple in-production timing, instead of benchmarking. Reports the
wall-clock time (using `time.time`) and not the processor time.
Parameters
----------
name : str
Name of the timing block, to identify it.
file : file handler
Which file handler to print the results to. Default is standard output.
If a numpy array and size 1 is given, the time in seconds will be
stored inside it. Ignored if `callback` is set.
callback : callable
This offer even more flexibility than `file`. The callable will be
called at the end of the execution with a single floating point
argument with the elapsed time in seconds.
Examples
--------
>>> import deepdish as dd
>>> import time
The `timed` function is a context manager, so everything inside the
``with`` block will be timed. The results will be printed by default to
standard output:
>>> with dd.timed('Sleep'): # doctest: +SKIP
... time.sleep(1)
[timed] Sleep: 1.001035451889038 s
Using the `callback` parameter, we can accumulate multiple runs into a
list:
>>> times = []
>>> for i in range(3): # doctest: +SKIP
... with dd.timed(callback=times.append):
... time.sleep(1)
>>> times # doctest: +SKIP
[1.0035350322723389, 1.0035550594329834, 1.0039470195770264]
"""
start = time.time()
yield
end = time.time()
delta = end - start
if callback is not None:
callback(delta)
elif isinstance(file, np.ndarray) and len(file) == 1:
file[0] = delta
else:
name_str = ' {}'.format(name) if name is not None else ''
print(("[timed]{0}: {1} s".format(name_str, delta)), file=file)
class SliceClass(object):
def __getitem__(self, index):
return index
aslice = SliceClass()
|
image_registration/fft_tools/downsample.py | Experimentica/image_registration | 122 | 12646561 | <reponame>Experimentica/image_registration<gh_stars>100-1000
import numpy as np
try:
try:
from numpy import nanmean
except ImportError:
from scipy.stats import nanmean
except ImportError as ex:
print("Image-registration requires either numpy >= 1.8 or scipy.")
raise ex
def downsample(myarr,factor,estimator=nanmean):
"""
Downsample a 2D array by averaging over *factor* pixels in each axis.
Crops upper edge if the shape is not a multiple of factor.
This code is pure np and should be fast.
keywords:
estimator - default to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
"""
ys,xs = myarr.shape
crarr = myarr[:ys-(ys % int(factor)),:xs-(xs % int(factor))]
dsarr = estimator( np.concatenate([[crarr[i::factor,j::factor]
for i in range(factor)]
for j in range(factor)]), axis=0)
return dsarr
def downsample_cube(myarr,factor,ignoredim=0):
"""
Downsample a 3D array by averaging over *factor* pixels on the last two
axes.
"""
if ignoredim > 0: myarr = myarr.swapaxes(0,ignoredim)
zs,ys,xs = myarr.shape
crarr = myarr[:,:ys-(ys % int(factor)),:xs-(xs % int(factor))]
dsarr = mean(np.concatenate([[crarr[:,i::factor,j::factor]
for i in range(factor)]
for j in range(factor)]), axis=0)
if ignoredim > 0: dsarr = dsarr.swapaxes(0,ignoredim)
return dsarr
def downsample_1d(myarr,factor,estimator=nanmean):
"""
Downsample a 1D array by averaging over *factor* pixels.
Crops right side if the shape is not a multiple of factor.
This code is pure np and should be fast.
keywords:
estimator - default to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
"""
assert xs.ndim == 1
xs = myarr.size
crarr = myarr[:xs-(xs % int(factor))]
dsarr = estimator( np.concatenate([[crarr[i::factor]
for i in range(factor)] ]),axis=0)
return dsarr
def downsample_axis(myarr, factor, axis, estimator=nanmean, truncate=False):
"""
Downsample an ND array by averaging over *factor* pixels along an axis.
Crops right side if the shape is not a multiple of factor.
This code is pure np and should be fast.
keywords:
estimator - default to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
"""
# size of the dimension of interest
xs = myarr.shape[axis]
if xs % int(factor) != 0:
if truncate:
view = [slice(None) for ii in range(myarr.ndim)]
view[axis] = slice(None,xs-(xs % int(factor)))
crarr = myarr[view]
else:
newshape = list(myarr.shape)
newshape[axis] = (factor - xs % int(factor))
extension = np.empty(newshape) * np.nan
crarr = np.concatenate((myarr,extension), axis=axis)
else:
crarr = myarr
def makeslice(startpoint,axis=axis,step=factor):
# make empty slices
view = [slice(None) for ii in range(myarr.ndim)]
# then fill the appropriate slice
view[axis] = slice(startpoint,None,step)
return view
# The extra braces here are crucial: We're adding an extra dimension so we
# can average across it!
stacked_array = np.concatenate([[crarr[makeslice(ii)]] for ii in range(factor)])
dsarr = estimator(stacked_array, axis=0)
return dsarr
|
hpfeeds_server/hpfeeds_server.py | bryanwills/honssh | 388 | 12646589 | <gh_stars>100-1000
from honssh import log
import os
import struct
import hashlib
import json
import socket
BUFSIZ = 16384
OP_ERROR = 0
OP_INFO = 1
OP_AUTH = 2
OP_PUBLISH = 3
OP_SUBSCRIBE = 4
MAXBUF = 1024 ** 2
SIZES = {
OP_ERROR: 5 + MAXBUF,
OP_INFO: 5 + 256 + 20,
OP_AUTH: 5 + 256 + 20,
OP_PUBLISH: 5 + MAXBUF,
OP_SUBSCRIBE: 5 + 256 * 2,
}
HONSSHAUTHCHAN = 'honssh.auth'
HONSSHSESHCHAN = 'honssh.sessions'
class BadClient(Exception):
pass
# packs a string with 1 byte length field
def strpack8(x):
if isinstance(x, str): x = x.encode('latin1')
return struct.pack('!B', len(x)) + x
# unpacks a string with 1 byte length field
def strunpack8(x):
l = x[0]
return x[1:1 + l], x[1 + l:]
def msghdr(op, data):
return struct.pack('!iB', 5 + len(data), op) + data
def msgpublish(ident, chan, data):
return msghdr(OP_PUBLISH, strpack8(ident) + strpack8(chan) + data)
def msgsubscribe(ident, chan):
if isinstance(chan, str): chan = chan.encode('latin1')
return msghdr(OP_SUBSCRIBE, strpack8(ident) + chan)
def msgauth(rand, ident, secret):
hash = hashlib.sha1(bytes(rand) + secret).digest()
return msghdr(OP_AUTH, strpack8(ident) + hash)
class FeedUnpack(object):
def __init__(self):
self.buf = bytearray()
def __iter__(self):
return self
def next(self):
return self.unpack()
def feed(self, data):
self.buf.extend(data)
def unpack(self):
if len(self.buf) < 5:
raise StopIteration('No message.')
ml, opcode = struct.unpack('!iB', buffer(self.buf, 0, 5))
if ml > SIZES.get(opcode, MAXBUF):
raise BadClient('Not respecting MAXBUF.')
if len(self.buf) < ml:
raise StopIteration('No message.')
data = bytearray(buffer(self.buf, 5, ml - 5))
del self.buf[:ml]
return opcode, data
class hpclient(object):
def __init__(self, server, port, ident, secret):
log.msg(log.LCYAN, '[PLUGIN][HPFEEDS]',
'hpfeeds client init broker {0}:{1}, identifier {2}'.format(server, port, ident))
self.server, self.port = server, int(port)
self.ident, self.secret = ident.encode('latin1'), secret.encode('latin1')
self.unpacker = FeedUnpack()
self.state = 'INIT'
self.connect()
self.sendfiles = []
self.filehandle = None
self.s = None
def connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.settimeout(3)
try:
self.s.connect((self.server, self.port))
except:
log.msg(log.LCYAN, '[PLUGIN][HPFEEDS]', 'hpfeeds client could not connect to broker.')
self.s = None
else:
self.s.settimeout(None)
self.handle_established()
def send(self, data):
if not self.s: return
self.s.send(data)
def close(self):
self.s.close()
self.s = None
def handle_established(self):
log.msg(log.LCYAN, '[PLUGIN][HPFEEDS]', 'hpclient established')
while self.state != 'GOTINFO':
self.read()
# quickly try to see if there was an error message
self.s.settimeout(0.5)
self.read()
self.s.settimeout(None)
def read(self):
if not self.s: return
try:
d = self.s.recv(BUFSIZ)
except socket.timeout:
return
if not d:
self.close()
return
self.unpacker.feed(d)
try:
for opcode, data in self.unpacker:
log.msg(log.LCYAN, '[PLUGIN][HPFEEDS]', 'hpclient msg opcode {0} data {1}'.format(opcode, data))
if opcode == OP_INFO:
name, rand = strunpack8(data)
log.msg(log.LCYAN, '[PLUGIN][HPFEEDS]', 'hpclient server name {0} rand {1}'.format(name, rand))
self.send(msgauth(rand, self.ident, self.secret))
self.state = 'GOTINFO'
elif opcode == OP_PUBLISH:
ident, data = strunpack8(data)
chan, data = strunpack8(data)
log.msg(log.LCYAN, '[PLUGIN][HPFEEDS]', 'publish to {0} by {1}: {2}'.format(chan, ident, data))
elif opcode == OP_ERROR:
log.err('[PLUGIN][HPFEEDS] - errormessage from server: {0}'.format(data))
else:
log.err('[PLUGIN][HPFEEDS] - unknown opcode message: {0}'.format(opcode))
except BadClient:
log.err('[PLUGIN][HPFEEDS] - unpacker error, disconnecting.')
self.close()
def publish(self, channel, **kwargs):
try:
self.send(msgpublish(self.ident, channel, json.dumps(kwargs).encode('latin1')))
except Exception, e:
log.err('[PLUGIN][HPFEEDS] - connection to hpfriends lost: {0}'.format(e))
log.err('[PLUGIN][HPFEEDS] - connecting')
self.connect()
self.send(msgpublish(self.ident, channel, json.dumps(kwargs).encode('latin1')))
def sendfile(self, filepath):
# does not read complete binary into memory, read and send chunks
if not self.filehandle:
# FIXME: Where does 'i' come from??
self.sendfileheader(i.file)
self.sendfiledata()
else:
self.sendfiles.append(filepath)
def sendfileheader(self, filepath):
self.filehandle = open(filepath, 'rb')
fsize = os.stat(filepath).st_size
# FIXME: Where does 'UNIQUECHAN' come from??
headc = strpack8(self.ident) + strpack8(UNIQUECHAN)
headh = struct.pack('!iB', 5 + len(headc) + fsize, OP_PUBLISH)
self.send(headh + headc)
def sendfiledata(self):
tmp = self.filehandle.read(BUFSIZ)
if not tmp:
if self.sendfiles:
fp = self.sendfiles.pop(0)
self.sendfileheader(fp)
else:
self.filehandle = None
self.handle_io_in(b'')
else:
self.send(tmp)
|
gfsa/model/model_util_test.py | deepneuralmachine/google-research | 23,901 | 12646591 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for gfsa.model.model_util."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
from gfsa.model import model_util
class LossUtilTest(parameterized.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "min",
"minval": 1,
"maxval": None,
"expected": [1., 1., 2., 3., 4.],
}, {
"testcase_name": "max",
"minval": None,
"maxval": 3,
"expected": [0., 1., 2., 3., 3.],
}, {
"testcase_name": "both",
"minval": 1,
"maxval": 3,
"expected": [1., 1., 2., 3., 3.],
})
def test_forward_clip(self, minval, maxval, expected):
vals, tangents = jax.jvp(
functools.partial(
model_util.forward_clip, minval=minval, maxval=maxval),
(jnp.arange(5).astype(jnp.float32),), (jnp.ones((5,)),))
np.testing.assert_allclose(vals, expected)
np.testing.assert_allclose(tangents, np.ones((5,)))
def test_safe_logit(self):
probs = jnp.array([0, 1e-20, 1e-3, 0.9, 1])
logits = model_util.safe_logit(probs)
self.assertTrue(np.all(np.isfinite(logits)))
np.testing.assert_allclose(logits[1:3], jax.scipy.special.logit(probs[1:3]))
def test_binary_logit_cross_entropy(self):
logits = jnp.array([-10., -5., 0., 5., 10.])
true_probs = jax.nn.sigmoid(logits)
false_probs = jax.nn.sigmoid(-logits)
true_nll = model_util.binary_logit_cross_entropy(logits,
jnp.ones([5], dtype=bool))
false_nll = model_util.binary_logit_cross_entropy(
logits, jnp.zeros([5], dtype=bool))
np.testing.assert_allclose(true_nll, -jnp.log(true_probs), atol=1e-7)
np.testing.assert_allclose(false_nll, -jnp.log(false_probs), atol=1e-7)
def test_linear_cross_entropy(self):
probs = jnp.array([0, 1e-20, 1e-3, 0.9, 1, 1, 1 - 1e-7, 1 - 1e-3, 0.1, 0])
targets = jnp.array([True] * 5 + [False] * 5)
losses = model_util.linear_cross_entropy(probs, targets)
# Losses are clipped to be finite.
self.assertTrue(np.all(np.isfinite(losses)))
# Loss values make sense.
np.testing.assert_allclose(
losses[1:5], [-np.log(1e-20), -np.log(1e-3), -np.log(0.9), 0],
atol=1e-5)
self.assertGreater(losses[0], losses[1])
# note: losses for false targets have especially low precision due to
# rounding errors for small values close to 1.
np.testing.assert_allclose(losses[6], -np.log(1e-7), atol=0.2)
np.testing.assert_allclose(
losses[7:10], [-np.log(1e-3), -np.log(0.9), 0], atol=1e-4)
self.assertGreater(losses[5], losses[6])
# Gradients are finite.
gradients = jax.grad(
lambda x: jnp.sum(model_util.linear_cross_entropy(x, targets)))(
probs)
self.assertTrue(np.all(np.isfinite(gradients)))
if __name__ == "__main__":
absltest.main()
|
userOperation/logout.py | fsdsoyu/xxqg | 317 | 12646623 | # -*- encoding: utf-8 -*-
from random import uniform
from time import sleep
from custom.xuexi_chrome import XuexiChrome
def logout(browser: XuexiChrome):
browser.xuexi_get('https://www.xuexi.cn/')
sleep(round(uniform(1, 2), 2))
logoutBtn = browser.find_element_by_class_name('logged-link')
logoutBtn.click()
|
tests/_projects/large_without_circle/run.py | marek-trmac/pycycle | 319 | 12646634 | from a_module.a_file import a_func
if __name__ == '__main__':
a_func() |
models/classifiers/resnext.py | daili0015/ModelFeast | 247 | 12646635 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: zcy
# @Date: 2019-02-10 12:44:46
# @Last Modified by: zcy
# @Last Modified time: 2019-02-11 11:52:19
import logging # 引入logging模块
import torch, os
import torch.nn as nn
from torch import load as TorchLoad
import torch.utils.model_zoo as model_zoo
from models.classifiers.ResNext101_module import resnext101_32x4d_features
from models.classifiers.ResNext101_module2 import resnext101_64x4d_features
from base import BaseModel
__all__ = ['resnext', 'resnext101_32x4d', 'resnext101_64x4d']
model_urls = {
'resnext101_32x4d': 'http://data.lip6.fr/cadene/pretrainedmodels/resnext101_32x4d-29e315fa.pth',
'resnext101_64x4d': 'http://data.lip6.fr/cadene/pretrainedmodels/resnext101_64x4d-e77a0586.pth',
}
model_names = {
'resnext101_32x4d': 'resnext101_32x4d-29e315fa.pth',
'resnext101_64x4d': 'resnext101_64x4d-e77a0586.pth',
}
class ResNeXt101_32x4d(BaseModel):
def __init__(self, num_classes=1000):
super(ResNeXt101_32x4d, self).__init__()
self.num_classes = num_classes
self.features = resnext101_32x4d_features
self.avg_pool = nn.AvgPool2d((7, 7), (1, 1))
self.last_linear = nn.Linear(2048, num_classes)
def forward(self, x):
super(ResNeXt101_32x4d, self).isValidSize(x) #check the input size
x = self.features(x)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
# 自动调整全连接层
def adaptive_set_fc(self, n_class):
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.last_linear = nn.Linear(2048, n_class)
class ResNeXt101_64x4d(BaseModel):
def __init__(self, num_classes=1000):
super(ResNeXt101_64x4d, self).__init__()
self.num_classes = num_classes
self.features = resnext101_64x4d_features
self.avg_pool = nn.AvgPool2d((7, 7), (1, 1))
self.last_linear = nn.Linear(2048, num_classes)
def forward(self, x):
super(ResNeXt101_64x4d, self).isValidSize(x) #check the input size
x = self.features(x)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
# 自动调整全连接层
def adaptive_set_fc(self, n_class):
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.last_linear = nn.Linear(2048, n_class)
def get_resnext(param, pretrained = False, pretrained_path="./pretrained/"):
r''' param['model_url']: download url
param['file_name']: model file's name
param['model_name']: model file's name
param['n_class']: how many classes to be classified
param['img_size']: img_size, a tuple(height, width)
'''
if isinstance(param['img_size'], (tuple, list)):
h, w = param['img_size'][0], param['img_size'][1]
else:
h = w = param['img_size']
# assert h>74 and w>74, 'image size should >= 75 !!!'
#先创建一个跟预训练模型一样结构的,方便导入权重
if param['model_name']=='resnext101_32x4d':
model = ResNeXt101_32x4d(num_classes=1000)
elif param['model_name']=='resnext101_64x4d':
model = ResNeXt101_64x4d(num_classes=1000)
model.img_size = (h, w)
# 导入预训练模型的权值,预训练模型必须放在pretrained_path里
if pretrained:
if os.path.exists(os.path.join(pretrained_path, param['file_name'])):
model.load_state_dict(TorchLoad(os.path.join(pretrained_path, param['file_name'])))
logging.info("Find local model file, load model from local !!")
logging.info("找到本地下载的预训练模型!!载入权重!!")
else:
logging.info("pretrained 文件夹下没有,从网上下载 !!")
model.load_state_dict(model_zoo.load_url(param['model_url'], model_dir = pretrained_path))
logging.info("下载完毕!!载入权重!!")
# 根据输入图像大小和类别数,自动调整
model.adaptive_set_fc(param['n_class'])
return model
def resnext(n_class, img_size=(224, 224), pretrained=False, pretrained_path="./pretrained/"):
return resnext101_32x4d(n_class, img_size, pretrained, pretrained_path)
def resnext101_32x4d(n_class, img_size=(224, 224), pretrained=False, pretrained_path="./pretrained/"):
param = {'model_url': model_urls['resnext101_32x4d'], 'file_name': model_names['resnext101_32x4d'],
'model_name': 'resnext101_32x4d', 'n_class': n_class, 'img_size': img_size }
return get_resnext(param, pretrained, pretrained_path)
def resnext101_64x4d(n_class, img_size=(224, 224), pretrained=False, pretrained_path="./pretrained/"):
param = {'model_url': model_urls['resnext101_64x4d'], 'file_name': model_names['resnext101_64x4d'],
'model_name': 'resnext101_64x4d', 'n_class': n_class, 'img_size': img_size }
return get_resnext(param, pretrained, pretrained_path)
|
app/server/labml_app/docs.py | elgalu/labml | 463 | 12646651 | from labml_app.db import job
SAMPLE_SPECS_DICT = {'parameters': [], 'definitions': {}, 'response': {}}
sync = {
"parameters": [
{
"name": "computer_uuid",
"in": "query",
"type": "string",
"required": "true",
"description": "0c112ffda506f10f9f793c0fb6d9de4b43595d03",
},
{
"name": "runs",
"in": "body",
"type": "list",
"description": "Runs to be synced with the server",
"example": [{
'uuid': '0c112ffda506f10f9f793c0fb6d9de4b43595d03',
'size_tensorboard': 10.2,
'size_checkpoints': 15.4
}]
},
],
"responses": {
"200": {
"description": "Synced server side run_uuid list",
"schema": {
'type': 'object',
'properties': {
'runs': {
'type': 'object',
'example': {
'active': ['0c112ffda506f10f9f793c0fb6d9de4b43595d03'],
'deleted': ['0c112ffda506f10f9f793c0fb6d9de4b43595d03'],
'unknown': ['0c112ffda506f10f9f793c0fb6d9de4b43595d03']
}
},
}
},
}
}
}
polling = {
"parameters": [
{
"name": "computer_uuid",
"in": "query",
"type": "string",
"required": "true",
"description": "0c112ffda506f10f9f793c0fb6d9de4b43595d03",
},
{
"name": "jobs",
"in": "body",
"type": "list",
"description": "Status of the jobs initiated by UI",
"example": [{'uuid': '0c112ffda506f10f9f793c0fb6d9de4b43595d03', 'status': job.JobStatuses.SUCCESS},
{'uuid': '0c112ffda506f10f9f793c0fb6d9de4b43595d03', 'status': job.JobStatuses.FAIL}]
}
],
"responses": {
"200": {
"description": "List of pending jobs",
"schema": {
'type': 'object',
'properties': {
'jobs': {
'type': 'list',
'example': [
{
'uuid': '0c112ffda506f10f9f793c0fb6d9de4b43595d03',
'status': job.JobStatuses.INITIATED,
'created_time': '16234567',
'completed_time': None,
'method': job.JobMethods.START_TENSORBOARD,
'data': {'runs': ['0c112ffda506f10f9f793c0fb6d9de4b43595d03']}
}
]
}
}
},
}
}
}
start_tensor_board = {
"parameters": [
{
"name": "computer_uuid",
"in": "path",
"type": "string",
"required": "true",
"description": "0c112ffda506f10f9f793c0fb6d9de4b43595d03",
},
{
"name": "runs",
"in": "body",
"type": "list",
"description": "Set of runs to start TB. Note that all the runs should be from a same computer",
"example": ['0c112ffda506f10f9f793c0fb6d9de4b43595d03']
},
],
"responses": {
"200": {
"description": "job details with the response",
"schema": {
'type': 'object',
'example':
{
'uuid': '0c112ffda506f10f9f793c0fb6d9de4b43595d03',
'status': job.JobStatuses.SUCCESS,
'created_time': '16234567',
'completed_time': '16234567',
'method': job.JobMethods.START_TENSORBOARD
}
},
}
}
}
clear_checkpoints = {
"parameters": [
{
"name": "computer_uuid",
"in": "path",
"type": "string",
"required": "true",
"description": "0c112ffda506f10f9f793c0fb6d9de4b43595d03",
},
{
"name": "runs",
"in": "body",
"type": "list",
"description": "Set of runs to clear checkpoints. Note that all the runs should be from same a computer",
"example": ['0c112ffda506f10f9f793c0fb6d9de4b43595d03']
},
],
"responses": {
"200": {
"description": "job details with the response",
"schema": {
'type': 'object',
'example':
{
'uuid': '0c112ffda506f10f9f793c0fb6d9de4b43595d03',
'status': job.JobStatuses.SUCCESS,
'created_time': '16234567',
'completed_time': '16234567',
'method': job.JobMethods.START_TENSORBOARD
}
},
}
}
}
get_computer = {
"parameters": [
{
"name": "session_uuid",
"in": "path",
"type": "string",
"required": "true",
"description": "0c112ffda506f10f9f793c0fb6d9de4b43595d03",
},
],
"responses": {
"200": {
"description": "Synced server side run_uuid list",
"schema": {
'type': 'object',
'example': {
'sessions': ['0c112ffda506f10f9f793c0fb6d9de4b43595d03',
'0c112ffda506f10f9f793c0fb6d9de4b43595d03'
],
'session_uuid': '0c112ffda506f10f9f793c0fb6d9de4b43595d03',
}
},
}
}
}
|
unique_paths_ii/solution2.py | mahimadubey/leetcode-python | 528 | 12646684 | """
Follow up for "Unique Paths":
Now consider if some obstacles are added to the grids. How many unique paths
would there be?
An obstacle and empty space is marked as 1 and 0 respectively in the grid.
For example,
There is one obstacle in the middle of a 3x3 grid as illustrated below.
[
[0,0,0],
[0,1,0],
[0,0,0]
]
The total number of unique paths is 2.
Note: m and n will be at most 100.
"""
class Solution:
# @param obstacleGrid, a list of lists of integers
# @return an integer
def uniquePathsWithObstacles(self, obstacleGrid):
n = len(obstacleGrid)
m = len(obstacleGrid[0])
t = [[-1 for i in range(m)] for j in range(n)]
return self.unique_paths(obstacleGrid, m - 1, n - 1, t)
def unique_paths(self, grid, x, y, t):
if x == 0 and y == 0:
t[y][x] = 1 if grid[y][x] == 0 else 0
return t[y][x]
elif grid[y][x] == 1:
t[y][x] = 0
return t[y][x]
elif t[y][x] != -1:
return t[y][x]
elif x > 0 and y == 0:
t[y][x] = self.unique_paths(grid, x - 1, y, t)
return t[y][x]
elif y > 0 and x == 0:
t[y][x] = self.unique_paths(grid, x, y - 1, t)
return t[y][x]
else:
a = self.unique_paths(grid, x - 1, y, t)
b = self.unique_paths(grid, x, y - 1, t)
t[y][x] = a + b
return t[y][x]
|
cakechat/dialog_model/quality/logging.py | sketscripter/emotional-chatbot-cakechat | 1,608 | 12646701 | import csv
import os
from datetime import datetime
import pandas as pd
from cakechat.config import PREDICTION_MODE_FOR_TESTS, MAX_PREDICTIONS_LENGTH
from cakechat.dialog_model.inference import get_nn_responses
from cakechat.dialog_model.model_utils import transform_context_token_ids_to_sentences
from cakechat.dialog_model.quality import calculate_model_mean_perplexity, calculate_response_ngram_distinctness
from cakechat.utils.files_utils import ensure_dir
from cakechat.utils.logger import get_logger
_logger = get_logger(__name__)
def calculate_and_log_val_metrics(nn_model,
context_sensitive_val,
context_free_val,
prediction_mode=PREDICTION_MODE_FOR_TESTS,
calculate_ngram_distance=True):
metric_name_to_value = {
'context_free_perplexity': calculate_model_mean_perplexity(nn_model, context_free_val),
'context_sensitive_perplexity': calculate_model_mean_perplexity(nn_model, context_sensitive_val)
}
if calculate_ngram_distance:
for metric_name, ngram_len in [('unigram_distinctness', 1), ('bigram_distinctness', 2)]:
metric_name_to_value[metric_name] = calculate_response_ngram_distinctness(
context_sensitive_val.x,
nn_model,
ngram_len=ngram_len,
mode=prediction_mode,
condition_ids=context_sensitive_val.condition_ids)
for metric_name, metric_value in metric_name_to_value.items():
_logger.info('Val set {}: {:.3f}'.format(metric_name, metric_value))
return metric_name_to_value
def _init_csv_writer(predictions_path, output_seq_len, model_name):
with open(predictions_path, 'w', encoding='utf-8') as fh:
csv_writer = csv.writer(fh, delimiter='\t')
csv_writer.writerow([model_name])
csv_writer.writerow(['date: {}'.format(datetime.now().strftime('%Y-%m-%d %H:%M'))])
csv_writer.writerow(['{} maximum tokens in the response'.format(output_seq_len)])
csv_writer.writerow(['']) # empty row for better readability
def log_predictions(predictions_path,
contexts_token_ids,
nn_model,
prediction_modes,
output_seq_len=MAX_PREDICTIONS_LENGTH,
**kwargs):
"""
Generate responses for provided contexts and save the results on the disk. For a given context
several responses will be generated - one for each mode from the prediction_modes list.
:param predictions_path: Generated responses will be saved to this file
:param contexts_token_ids: contexts token ids, numpy array of shape (batch_size, context_len, INPUT_SEQUENCE_LENGTH)
:param nn_model: instance of CakeChatModel class
:param prediction_modes: See PREDICTION_MODES for available options
:param output_seq_len: Max number of tokens in generated responses
"""
_logger.info('Logging responses for test set')
# Create all the directories for the prediction path in case they don't exist
ensure_dir(os.path.dirname(predictions_path))
_init_csv_writer(predictions_path, output_seq_len, nn_model.model_name)
contexts = transform_context_token_ids_to_sentences(contexts_token_ids, nn_model.index_to_token)
predictions_data = pd.DataFrame()
predictions_data['contexts'] = contexts
for prediction_mode in prediction_modes:
predicted_responses = get_nn_responses(contexts_token_ids, nn_model, prediction_mode, **kwargs)
# list of lists of strings, shape (contexts_num, 1)
predicted_responses = [response[0] for response in predicted_responses]
# list of strings, shape (contexts_num)
predictions_data[prediction_mode] = predicted_responses
predictions_data.to_csv(predictions_path, sep='\t', index=False, encoding='utf-8', mode='a', float_format='%.2f')
_logger.info('Dumped {} predicted responses to {}'.format(len(contexts), predictions_path))
|
modin/core/dataframe/algebra/default2pandas/any.py | Rubtsowa/modin | 7,258 | 12646702 | <filename>modin/core/dataframe/algebra/default2pandas/any.py
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
# FIXME: This whole module is duplicating the logic of `default.py` and should be removed.
"""Module houses default functions builder class."""
from .default import DefaultMethod
class ObjTypeDeterminer:
"""
Class that routes work to the frame.
Provides an instance which forwards all of the `__getattribute__` calls
to an object under which `key` function is applied.
"""
def __getattr__(self, key):
"""
Build function that executes `key` function over passed frame.
Parameters
----------
key : str
Returns
-------
callable
Function that takes DataFrame and executes `key` function on it.
"""
def func(df, *args, **kwargs):
"""Access specified attribute of the passed object and call it if it's callable."""
prop = getattr(df, key)
if callable(prop):
return prop(*args, **kwargs)
else:
return prop
return func
class AnyDefault(DefaultMethod):
"""Builder for default-to-pandas methods which can be executed under any type of object."""
@classmethod
def register(cls, func, obj_type=None, **kwargs):
"""
Build function that do fallback to default pandas implementation for passed `func`.
Parameters
----------
func : callable or str,
Function to apply to the casted to pandas frame.
obj_type : object, optional
If `func` is a string with a function name then `obj_type` provides an
object to search function in. If not specified `ObjTypeDeterminer` will be used.
**kwargs : kwargs
Additional parameters that will be used for building.
Returns
-------
callable
Function that takes query compiler, does fallback to pandas and applies `func`
to the casted to pandas frame.
"""
if obj_type is None:
obj_type = ObjTypeDeterminer()
return cls.call(func, obj_type=obj_type, **kwargs)
|
mozi/log.py | hycis/Mozi | 122 | 12646713 |
from datetime import datetime
import os
import sys
import logging
import cPickle
import sqlite3
import operator
import copy
import numpy as np
import theano
from theano.sandbox.cuda.var import CudaNdarraySharedVariable
floatX = theano.config.floatX
class Log:
def __init__(self, experiment_name="experiment", description=None,
save_outputs=False, save_model=False,
save_epoch_error=False, save_to_database=None, logger=None):
self.experiment_name = experiment_name
self.description = description
self.save_outputs = save_outputs
self.save_model = save_model
self.save_epoch_error = save_epoch_error
self.save_to_database = save_to_database
dt = datetime.now()
dt = dt.strftime('%Y%m%d_%H%M_%S%f')
self.exp_id = experiment_name + '_' + dt
if save_outputs or save_model:
save_dir = os.environ['MOZI_SAVE_PATH']
if not os.path.exists(save_dir):
os.mkdir(save_dir)
self.exp_dir = save_dir + '/' + self.exp_id
if not os.path.exists(self.exp_dir):
os.mkdir(self.exp_dir)
self.logger = logger
if self.logger is None:
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.info('exp_id: ' + experiment_name)
if save_outputs:
ch = logging.FileHandler(filename=self.exp_dir+'/outputs.log')
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
if save_epoch_error:
self.epoch_error_path = self.exp_dir+'/epoch_error.csv'
with open(self.epoch_error_path, 'wb') as epoch_file:
epoch_file.write('Epoch,Train_Cost,Valid_Cost,Valid_Error\n')
if description is not None:
self.logger.info('Description: ' + self.description)
if save_to_database:
self.first_time_record = True
if not os.path.exists(os.environ['MOZI_DATABASE_PATH']):
os.mkdir(os.environ['MOZI_DATABASE_PATH'])
def info(self, msg):
self.logger.info(msg)
def print_records(self):
sorted_ls = sorted(self.save_to_database['records'].iteritems(),
key=operator.itemgetter(0))
for key, value in sorted_ls:
self.info(key + ': ' + str(value))
def _log_outputs(self, outputs):
dt = datetime.now()
dt = dt.strftime('%Y-%m-%d %H:%M')
self.logger.info('Time: ' + dt)
for (name, val) in outputs:
self.logger.info(name + ': ' + str(val))
if self.save_outputs:
self.logger.info('[ outputs saved to: %s ]\n' %self.exp_id)
def _save_model(self, model):
with open(self.exp_dir+'/model.pkl', 'wb') as pkl_file:
cPickle.dump(model, pkl_file)
def _save_epoch_error(self, epoch, train_cost, valid_cost, valid_error):
with open(self.epoch_error_path, 'ab') as epoch_file:
epoch_file.write('{},{},{},{}\n'.format(epoch, train_cost, valid_cost, valid_error))
def _save_to_database(self, epoch, train_cost, valid_error, best_valid_error):
conn = sqlite3.connect(os.environ['MOZI_DATABASE_PATH'] + '/' + self.save_to_database['name'])
cur = conn.cursor()
if self.first_time_record:
query = 'CREATE TABLE IF NOT EXISTS ' + self.experiment_name + \
'(exp_id TEXT PRIMARY KEY NOT NULL,'
for k,v in self.save_to_database['records'].items():
if type(v) is str:
query += k + ' TEXT,'
elif type(v) is int:
query += k + ' INT,'
elif type(v) is float:
query += k + ' REAL,'
else:
try:
self.save_to_database['records'][k] = str(v)
query += str(k) + ' TEXT,'
except:
raise Exception("Error: The input types for records '{}' of {}".format(k, type(v))
+ " is not primitive types (str, int, float) and not castable as str.")
query += 'epoch INT, train_cost REAL, valid_error REAL, best_valid_error REAL);'
cur.execute(query)
try:
query = 'INSERT INTO ' + self.experiment_name + ' VALUES('
ls = [self.exp_id]
for k, v in self.save_to_database['records'].items():
query += '?,'
ls.append(v)
query += '?,?,?,?,?);'
ls.extend([epoch, train_cost, valid_error, best_valid_error])
cur.execute(query, ls)
self.first_time_record = False
except sqlite3.OperationalError as err:
self.logger.error('sqlite3.OperationalError: ' + err.message)
self.logger.error('Solution: Change the experiment_name in Log() to a new name, '
+ 'or drop the same table name from the database. '
+ 'experiment_name is used as the table name.')
raise
else:
cur.execute('UPDATE ' + self.experiment_name + ' SET ' +
'epoch = ?, ' +
'train_cost = ?,' +
'valid_error = ?,' +
'best_valid_error = ?' +
"WHERE exp_id='%s'"%self.exp_id,
[epoch,
train_cost,
valid_error,
best_valid_error])
conn.commit()
conn.close()
|
datafiles/__init__.py | colltoaction/datafiles | 151 | 12646716 | # pylint: disable=unused-import
from dataclasses import field
from . import converters, settings
from .decorators import auto, datafile
from .manager import Missing
from .model import Model
|
Python/58. length_of_last_word.py | nizD/LeetCode-Solutions | 263 | 12646717 | <filename>Python/58. length_of_last_word.py
'''
PROBLEM: Length of Last Word
Given a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word (last word means the last appearing word if we loop from left to right) in the string.
If the last word does not exist, return 0.
Note: A word is defined as a maximal substring consisting of non-space characters only.
Example:
Input: "<NAME>"
Output: 5
Problem link : https://leetcode.com/problems/length-of-last-word/
'''
'''
APPROACH -
We can convert string into list of words and can calculate length using reverse indexing
'''
class Solution:
def lengthOfLastWord(self, s: str) -> int:
a = s.split()
if (len(a)>=1):
return len(a[-1])
else:
return 0
|
Python3/583.py | rakhi2001/ecom7 | 854 | 12646726 | __________________________________________________________________________________________________
sample 208 ms submission
class Solution:
def minDistance(self, s: str, t: str) -> int:
"""Modified Wagner-Fischer algorithm"""
if len(s) < len(t):
s, t = t, s
pre = [None] * (len(t) + 1)
cur = list(range(len(pre)))
for i, sc in enumerate(s, 1):
pre, cur = cur, pre
cur[0] = i
for j, tc in enumerate(t, 1):
if sc == tc:
cur[j] = pre[j - 1]
else:
cur[j] = 1 + min(cur[j - 1], pre[j])
return cur[-1]
__________________________________________________________________________________________________
sample 13276 kb submission
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
n, m = len(word1), len(word2)
memory = [0 for j in range(m+1)]
for i in range(1, n+1):
temp = [0 for j in range(m+1)]
for j in range(1, m+1):
if word1[i-1] == word2[j-1]:
temp[j] = memory[j - 1] + 1
else:
temp[j] = max(memory[j], temp[j-1])
memory = temp
return n+m - 2*memory[m]
__________________________________________________________________________________________________
|
examples/FastAPI/data/database.py | Aliemeka/supabase-py | 181 | 12646732 | <reponame>Aliemeka/supabase-py
# variables for database and url configuration
from config import Config
from supabase import Client, create_client
class SupabaseDB:
"""
class instance for database connection to supabase
:str: url: configuration for database url for data inside supafast project
:str: key: configuration for database secret key for authentication
:object: supabase: Supabase instance for connection to database environment
"""
url: str = Config.URL
key: str = Config.KEY
supabase: Client = create_client(url, key)
|
sanic/constants.py | Varriount/sanic | 4,959 | 12646739 | from enum import Enum, auto
class HTTPMethod(str, Enum):
def _generate_next_value_(name, start, count, last_values):
return name.upper()
def __eq__(self, value: object) -> bool:
value = str(value).upper()
return super().__eq__(value)
def __hash__(self) -> int:
return hash(self.value)
def __str__(self) -> str:
return self.value
GET = auto()
POST = auto()
PUT = auto()
HEAD = auto()
OPTIONS = auto()
PATCH = auto()
DELETE = auto()
HTTP_METHODS = tuple(HTTPMethod.__members__.values())
DEFAULT_HTTP_CONTENT_TYPE = "application/octet-stream"
|
templates/go/payloads/go_memorymodule.py | ohoph/Ebowla | 738 | 12646768 | <reponame>ohoph/Ebowla
imports=""
loader="""
//handle := C.MemoryLoadLibrary(unsafe.Pointer(&full_payload[0]),(C.size_t)(len(full_payload)))
handle := C.MemoryLoadLibraryEx(unsafe.Pointer(&full_payload[0]),
(C.size_t)(len(full_payload)),
(*[0]byte)(C.MemoryDefaultLoadLibrary), // loadLibrary func ptr
(*[0]byte)(C.MemoryDefaultGetProcAddress), // getProcAddress func ptr
(*[0]byte)(C.MemoryDefaultFreeLibrary), // freeLibrary func ptr
unsafe.Pointer(nil), // void *userdata (we're not passing any data to the dll or exe)
)
if handle == nil {
fmt.Println("MemoryLoadLibrary failed")
os.Exit(1)
}
//output := C.MemoryCallEntryPoint(handle)
_ = C.MemoryCallEntryPoint(handle)
//fmt.Println(output)
C.MemoryFreeLibrary(handle)
""" |
tests/test_data.py | jerryc05/python-progressbar | 806 | 12646774 | import pytest
import progressbar
@pytest.mark.parametrize('value,expected', [
(None, ' 0.0 B'),
(1, ' 1.0 B'),
(2 ** 10 - 1, '1023.0 B'),
(2 ** 10 + 0, ' 1.0 KiB'),
(2 ** 20, ' 1.0 MiB'),
(2 ** 30, ' 1.0 GiB'),
(2 ** 40, ' 1.0 TiB'),
(2 ** 50, ' 1.0 PiB'),
(2 ** 60, ' 1.0 EiB'),
(2 ** 70, ' 1.0 ZiB'),
(2 ** 80, ' 1.0 YiB'),
(2 ** 90, '1024.0 YiB'),
])
def test_data_size(value, expected):
widget = progressbar.DataSize()
assert widget(None, dict(value=value)) == expected
|
ahmia/ahmia/models.py | donno2048/ahmia-site | 185 | 12646781 | <gh_stars>100-1000
"""Models for the database of ahmia."""
import logging
from datetime import timedelta
from django.conf import settings
from django.db import models, DatabaseError
from django.utils import timezone
from . import utils
from .validators import validate_onion_url, validate_status, validate_onion
logger = logging.getLogger("ahmia")
class HiddenWebsite(models.Model):
"""Hidden service website."""
# For instance: http://3g2upl4pq6kufc4m.onion/
onion = models.URLField(validators=[validate_onion_url, validate_status],
unique=True)
def __str__(self):
return str(self.onion)
class PagePopScoreManager(models.Manager):
"""
Manager for PagePopScore Model
"""
def get_or_None(self, **kwargs):
"""
:param kwargs: same that would be given to get()
:return: the object found or None
"""
try:
return self.get(**kwargs)
except self.model.DoesNotExist:
return None
def get_score(self, **kwargs):
"""
Returns the score but handles the DoesNotExist case
returning None instead.
:param kwargs: the lookup attributes for get()
:rtype: float
"""
try:
return self.get(**kwargs).score
except self.model.DoesNotExist:
return None
class PagePopScore(models.Model):
"""
Note: This will be called by bulk create thus
save(), pre_save(), post_save() will not be called
"""
onion = models.URLField(
validators=[validate_onion, validate_status],
unique=True)
score = models.FloatField(
default=0,
verbose_name='PagePop score',
help_text='Score as returned by PagePop algorithm')
objects = PagePopScoreManager()
def __str__(self):
return "{0}: {1}".format(self.onion, self.score)
class PagePopStats(models.Model):
"""One entry/row is created by rank_pages command"""
day = models.DateField(default=utils.timezone_today, unique=True)
num_links = models.IntegerField(
null=True,
verbose_name='Number of Links',
help_text='Number of links in general')
num_edges = models.IntegerField(
null=True,
verbose_name='Number of Edges',
help_text='Number of Unique inter-domain Links')
num_nodes = models.IntegerField(
null=True,
verbose_name='Number of nodes',
help_text='Number of onion domains (nodes)')
def __str__(self):
return str(self.day)
# *** Statistics related models and managers following *** #
class MetricQuerySet(models.QuerySet):
"""Custom queryset to be used to filter SearchQueries per time"""
def today(self):
"""Used to count the daily number so far"""
utc = timezone.now()
today_start = utc.replace(hour=0, minute=0, second=0, microsecond=0)
return self.filter(updated__gte=today_start)
def month(self):
"""
Filter the queryset by looking up `settings.USAGE_STATS_DAYS`
(default 30) back
"""
utc = timezone.now()
oldest_utc = utc - timedelta(days=settings.USAGE_STATS_DAYS)
return self.filter(updated__gte=oldest_utc)
class MetricManager(models.Manager):
def get_queryset(self):
return MetricQuerySet(self.model, using=self._db)
def today(self):
return self.get_queryset().today()
def month(self):
return self.get_queryset().month()
def add_or_increment(self, **kwargs):
"""
Handles Metric table updates:
If object does not exist create it, else update the
counter (occurrences) of same instances in the table
:param kwargs: A Dict containing the attributes that identify the obj
:return the object that was created or updated
"""
try:
obj, created = self.get_or_create(**kwargs)
if not created:
obj.occurrences += 1
obj.save()
except DatabaseError as e:
logger.exception(e)
obj = None
# stats shouldn't disrupt website functionality
return obj
class Metric(models.Model):
"""Abstract base class for all Metric models"""
NETWORKS = (
('T', 'TOR'),
('I', 'I2P'),
)
updated = models.DateTimeField(default=timezone.now)
network = models.CharField(max_length=1, default='T', choices=NETWORKS)
occurrences = models.IntegerField(default=1)
objects = MetricManager()
class Meta:
abstract = True
class SearchQuery(Metric):
"""Used for search stastistics"""
search_term = models.CharField(max_length=64)
def __str__(self):
return self.search_term[:25]
class Meta:
unique_together = ('search_term', 'network')
class SearchResultsClick(Metric):
"""Used for clicks statistics"""
clicked = models.URLField()
onion_domain = models.URLField(validators=[validate_onion_url])
search_term = models.CharField(max_length=64)
def __str__(self):
return self.clicked[:50]
class Meta:
unique_together = ("clicked", "search_term", "onion_domain")
# todo Reconsider the current workflow: We recalculate Stats for
# the current day when `manage.py update_stats` is ran. Thus it
# ends up being redundant to keep *Stats tables in the DB?
class StatsQuerySet(models.QuerySet):
"""Custom queryset to be used to filter Stats per time"""
def month(self):
"""
Actually rather than looking into the current month (e.g April)
we filter back `settings.USAGE_STATS_DAYS` (default 30) days
"""
# todo can we merge with MetricManager.month - DRY ?
utc = timezone.now().date()
oldest_utc = utc - timedelta(days=settings.USAGE_STATS_DAYS)
return self.filter(day__gte=oldest_utc)
class Stats(models.Model):
"""
Abstract base class. Subclasses to be used for storing precalculated
statistics, computed by update_stats management command (app: stats)
"""
# horizontal axis: 30 last days (common for 4 plots)
day = models.DateField(unique=True, default=utils.timezone_today)
# Vertical axis: Metrics (4 plots)
num_queries = models.IntegerField(default=0)
num_unique_queries = models.IntegerField(default=0)
num_clicks = models.IntegerField(default=0)
num_unique_clicks = models.IntegerField(default=0)
objects = StatsQuerySet.as_manager()
class Meta:
abstract = True
ordering = ['day']
class TorStats(Stats):
def __str__(self):
return str("Tor stats: %s" % self.day)
class I2PStats(Stats):
def __str__(self):
return str("I2P stats: %s" % self.day)
|
bigflow_python/python/bigflow/util/test/path_util_test.py | tushushu/bigflow | 1,236 | 12646787 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# Copyright (c) 2017 Baidu, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
########################################################################
"""
Unit test for utils
Author: <NAME>(<EMAIL>)
"""
import unittest
from bigflow.util import path_util
class UtilsTestCase(unittest.TestCase):
def test_extract_namenode(self):
# accepte path
accepte_paths = [
"hdfs://host:port/a/b/c",
"hdfs://fs/a/b/c",
"hdfs:///a/b/c",
"hdfs:////tmp",
]
except_results = [
"hdfs://host:port",
"hdfs://fs",
"hdfs://",
"hdfs://",
]
for idx, path in enumerate(accepte_paths):
ret = path_util.extract_namenode(path)
self.assertEqual(ret, except_results[idx])
# wrong path
wrong_paths = [
"hdfs:/host:port/a/b/c",
"hdfs:/fs/a/b/c",
"xhdfs:///tmp",
]
for path in wrong_paths:
ret = path_util.extract_namenode(path)
self.assertEqual(ret, None)
def test_can_be_archive_path(self):
input_paths = ["a.tgz", "a.tar", "a.tar.gz", "a.zip", "a.qp", "a.jar", "a.xx"]
expected = [True, True, True, True, True, True, False]
for path, expect in zip(input_paths, expected):
self.assertEqual(path_util.can_be_archive_path(path), expect)
def test_rm_r_path(self):
import os
import uuid
# delete path not exists
path_util.rm_rf("/path/not/exist")
self.assertFalse(os.path.exists("/path/not/exist"))
# delete single file
tmp_path = os.path.join('.', str(uuid.uuid4()))
with open(tmp_path, "w") as fp:
pass
self.assertTrue(os.path.exists(tmp_path))
path_util.rm_rf(tmp_path)
self.assertFalse(os.path.exists(tmp_path))
# delete empty directory
tmp_path = os.path.join('.', str(uuid.uuid4()))
os.mkdir(tmp_path)
path_util.rm_rf(tmp_path)
self.assertFalse(os.path.exists(tmp_path))
# delete link path
tmp_path = os.path.join('.', str(uuid.uuid4()))
os.mkdir(tmp_path)
os.symlink(tmp_path, ".soft_link")
path_util.rm_rf(".soft_link")
self.assertFalse(os.path.exists(".soft_link"))
self.assertTrue(os.path.exists(tmp_path))
os.rmdir(tmp_path)
tmp_path = os.path.join('.', str(uuid.uuid4()))
with open(tmp_path, "w") as fp:
pass
os.link(tmp_path, ".hard_link")
path_util.rm_rf(".hard_link")
self.assertFalse(os.path.exists(".hard_link"))
self.assertTrue(os.path.exists(tmp_path))
path_util.rm_rf(tmp_path)
self.assertFalse(os.path.exists(tmp_path))
# delete dir contains file
tmp_path = os.path.join('.', str(uuid.uuid4()))
os.mkdir(tmp_path)
another_path = os.path.join(tmp_path, str(uuid.uuid4()))
with open(another_path, "w") as fp:
pass
self.assertTrue(os.path.exists(tmp_path))
path_util.rm_rf(tmp_path)
self.assertFalse(os.path.exists(tmp_path))
if __name__ == "__main__":
unittest.main()
|
homeassistant/components/citybikes/__init__.py | domwillcode/home-assistant | 30,023 | 12646792 | <reponame>domwillcode/home-assistant
"""The citybikes component."""
|
iceoryx_integrationtest/iceoryx_integrationtest/test_complexdata_example.py | ijnek/iceoryx | 560 | 12646834 | <reponame>ijnek/iceoryx<gh_stars>100-1000
# Copyright (c) 2021 by Apex.AI Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import os
import unittest
import launch
from launch_ros.substitutions import ExecutableInPackage
import launch_testing
import launch_testing.actions
from launch_testing.asserts import assertSequentialStdout
import pytest
# @brief Test goal: "Integrationtest for the complexdata example of iceoryx"
# @pre setup ROS2 launch executables for RouDi (debug mode) and the example processes
# @post check if all applications return exitcode 0 (success) after test run
@pytest.mark.launch_test
def generate_test_description():
proc_env = os.environ.copy()
colcon_prefix_path = os.environ.get('COLCON_PREFIX_PATH', '')
executable_list = ['iox-cpp-publisher-vector', 'iox-cpp-subscriber-vector',
'iox-cpp-publisher-complexdata', 'iox-cpp-subscriber-complexdata']
process_list = []
for exec in executable_list:
tmp_exec = os.path.join(
colcon_prefix_path,
'example_complexdata/bin/',
exec)
tmp_process = launch.actions.ExecuteProcess(
cmd=[tmp_exec],
env=proc_env, output='screen')
process_list.append(tmp_process)
print("Process list:", process_list)
roudi_executable = os.path.join(
colcon_prefix_path,
'iceoryx_posh/bin/',
'iox-roudi'
)
roudi_process = launch.actions.ExecuteProcess(
cmd=[roudi_executable, '-l', 'debug'],
env=proc_env, output='screen',
sigterm_timeout='20')
return launch.LaunchDescription([
process_list[0],
process_list[1],
process_list[2],
process_list[3],
roudi_process,
launch_testing.actions.ReadyToTest()
]), {'iox-cpp-publisher-vector': process_list[0], 'iox-cpp-subscriber-vector': process_list[1],
'iox-cpp-publisher-complexdata': process_list[2], 'iox-cpp-subscriber-complexdata': process_list[3],
'roudi_process': roudi_process}
# These tests will run concurrently with the dut process. After this test is done,
# the launch system will shut down RouDi
class TestComplexDataExample(unittest.TestCase):
def test_roudi_ready(self, proc_output):
proc_output.assertWaitFor(
'RouDi is ready for clients', timeout=45, stream='stdout')
def test_publisher_subscriber_data_exchange(self, proc_output):
proc_output.assertWaitFor(
'iox-cpp-subscriber-vector got values: 15, 16, 17, 18, 19', timeout=45, stream='stdout')
def test_publisher_subscriber_untyped_data_exchange(self, proc_output):
proc_output.assertWaitFor(
'iox-cpp-subscriber-complexdata got values:\nstringForwardList: hello, world\nintegerList: 15, 22, 11\noptionalList: optional is empty, 42\nfloatStack: 44, 33, 22, 11, 0\nsomeString: hello iceoryx\ndoubleVector: 11, 12, 13, 14, 15\nvariantVector: seven, 8, nine',
timeout=45, stream='stdout')
# These tests run after shutdown and examine the stdout log
@launch_testing.post_shutdown_test()
class TestComplexdataExampleExitCodes(unittest.TestCase):
def test_exit_code(self, proc_info):
launch_testing.asserts.assertExitCodes(proc_info)
|
endpoints/appr/test/test_digest_prefix.py | ibazulic/quay | 2,027 | 12646854 | import pytest
from endpoints.appr.models_cnr import _strip_sha256_header
@pytest.mark.parametrize(
"digest,expected",
[
(
"sha256:251b6897608fb18b8a91ac9abac686e2e95245d5a041f2d1e78fe7a815e6480a",
"251b6897608fb18b8a91ac9abac686e2e95245d5a041f2d1e78fe7a815e6480a",
),
(
"251b6897608fb18b8a91ac9abac686e2e95245d5a041f2d1e78fe7a815e6480a",
"251b6897608fb18b8a91ac9abac686e2e95245d5a041f2d1e78fe7a815e6480a",
),
],
)
def test_stip_sha256(digest, expected):
assert _strip_sha256_header(digest) == expected
|
src/genie/libs/parser/nxos/show_fabricpath.py | balmasea/genieparser | 204 | 12646860 | <gh_stars>100-1000
# Python (this imports the Python re module for RegEx)
import re
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Any, Or, Optional
from genie.libs.parser.utils.common import Common
# ==============================
# Schema for 'show fabricpath isis adjacency'
# ==============================
class ShowFabricpathIsisAdjacencySchema(MetaParser):
''' Schema for "show fabricpath isis adjacency" '''
# These are the key-value pairs to add to the parsed dictionary
# schema = {
# Any():
# {'adj-hold-time-out': str,
# 'adj-intf-name-out': str,
# 'adj-sys-name-out': str,
# 'adj-state-out':str}
# }
schema = {
'domain': {
Any(): {
Optional('interfaces'): {
Any(): {
'system_id': str,
'snpa': str,
'level': int,
'state': str,
'hold_time': str,
}
}
}
}
}
# ==============================
# Parser for 'show fabricpath isis adjacency'
# ==============================
# The parser class inherits from the schema class
class ShowFabricpathIsisAdjacency(ShowFabricpathIsisAdjacencySchema):
''' Parser for "show fabricpath isis adjacency"'''
cli_command = 'show fabricpath isis adjacency'
# Defines a function to run the cli_command
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Initializes the Python dictionary variable
parsed_dict = {}
# Defines the regex for the first line of device output, which is:
# Sessions for VRF default, total: 3, established: 3
p1 = re.compile(r'Fabricpath IS-IS domain: +(?P<domain>(\S+)) +Fabricpath IS-IS adjacency database:$')
# Defines the regex for the next line of device output, which is:
# System ID SNPA Level State Hold Time Interface
# Switch-A N/A 1 UP 00:00:28 port-channel1
p2 = re.compile(
r'(?P<system_id>(\S+)) + (?P<snpa>(\S+)) + (?P<level>(\d+)) +(?P<state>(UP|DOWN)) + (?P<hold_time>(\S+)) + (?P<interface>(\S+))$')
for line in out.splitlines():
line = line.strip()
# IS-IS Process: test VRF: default
m = p1.match(line)
if m:
group = m.groupdict()
domain = group['domain']
intf_dict = parsed_dict.setdefault('domain', {}). \
setdefault(domain, {})
m = p2.match(line)
if m:
group = m.groupdict()
system_id = group['system_id']
snpa = group['snpa']
level = int(group['level'])
state = group['state']
hold_time = group['hold_time']
interface = Common.convert_intf_name(group['interface'])
level_dict = intf_dict.setdefault('interfaces', {}).setdefault(interface, {})
level_dict.update({'system_id': system_id})
level_dict.update({'snpa': snpa})
level_dict.update({'level': level})
level_dict.update({'state': state})
level_dict.update({'hold_time': hold_time})
return parsed_dict |
neural_compressor/ux/components/db_manager/db_manager.py | intel/neural-compressor | 172 | 12646919 | <reponame>intel/neural-compressor
# -*- coding: utf-8 -*-
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""INC Bench database manager."""
import logging
import os
from typing import Any, Optional
from sqlalchemy import MetaData, create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.orm import declarative_base
from neural_compressor.ux.utils.consts import WORKDIR_LOCATION
from neural_compressor.ux.utils.logger import log
from neural_compressor.ux.utils.singleton import Singleton
naming_convention = {
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s",
}
meta = MetaData(naming_convention=naming_convention)
Base: Any = declarative_base(metadata=meta)
class DBManager(metaclass=Singleton):
"""Database manager class."""
def __init__(self, database_location: Optional[str] = None, log_level: Optional[int] = None):
"""Initialize database manager."""
self._engine: Optional[Engine] = None
self.database_location: str = os.path.join(WORKDIR_LOCATION, "bench.db")
self.debug: bool = False
self.dialect: str = "sqlite"
if database_location is not None:
self.database_location = database_location
self.database_entrypoint = f"{self.dialect}:///{self.database_location}"
if log_level == logging.DEBUG:
self.debug = True
def initialize_database(self) -> None:
"""Initialize database by creating engine and session."""
self.create_sqlalchemy_engine()
def create_sqlalchemy_engine(self) -> Engine:
"""Create SQLAlchemy engine."""
log.debug(f"Making engine with database: {self.database_entrypoint}")
return create_engine(self.database_entrypoint, echo=self.debug)
@property
def engine(self) -> Engine:
"""Ensure that SQLAlchemy engine is created."""
is_engine_instance = isinstance(self._engine, Engine)
if not is_engine_instance:
self._engine = self.create_sqlalchemy_engine()
return self._engine # type: ignore
def create_all(self) -> None:
"""Make a call to database to create all tables."""
log.debug("Creating connection")
connection = self.engine.connect()
try:
log.debug("Creating all")
Base.metadata.create_all(self.engine)
finally:
connection.close()
|
torchelie/models/attention.py | maxferrari/Torchelie | 117 | 12646941 | <reponame>maxferrari/Torchelie
import torch
import torchelie.utils as tu
import torch.nn as nn
import torchelie.nn as tnn
from torchelie.models import ClassificationHead
from typing import Optional
from collections import OrderedDict
Block = tnn.PreactResBlockBottleneck
class UBlock(nn.Module):
inner: Optional[nn.Module]
skip: Optional[nn.Module]
encode: nn.Module
@tu.experimental
def __init__(self,
ch: int,
inner: Optional[nn.Module],
with_skip: bool = True) -> None:
super(UBlock, self).__init__()
self.inner = inner
if with_skip and inner is not None:
self.skip = Block(ch, ch)
else:
self.skip = None
self.encode = tnn.CondSeq(nn.MaxPool2d(3, 1, 1),
nn.UpsamplingBilinear2d(scale_factor=0.5),
Block(ch, ch))
self.decode = tnn.CondSeq(Block(ch, ch),
nn.UpsamplingBilinear2d(scale_factor=2))
def forward(self, x: torch.Tensor) -> torch.Tensor:
e = self.encode(x)
if self.inner is not None:
e2 = self.inner(e)
else:
e2 = e
if self.skip is not None:
e2 += self.skip(e)
return self.decode(e2)
class UBlock1(nn.Module):
@tu.experimental
def __init__(self, ch):
super(UBlock1, self).__init__()
self.inner = tnn.CondSeq(nn.MaxPool2d(3, 1, 1),
nn.UpsamplingBilinear2d(scale_factor=0.5),
Block(ch, ch),
nn.UpsamplingBilinear2d(scale_factor=2))
def forward(self, x):
return self.inner(x)
class AttentionBlock(nn.Module):
mask: Optional[tnn.CondSeq]
def __init__(self,
ch: int,
n_down: int,
n_trunk: int = 2,
n_post: int = 1,
n_pre: int = 1,
n_att_conv: int = 2,
with_skips: bool = True) -> None:
super(AttentionBlock, self).__init__()
self.pre = tnn.CondSeq(*[Block(ch, ch) for _ in range(n_pre)])
self.post = tnn.CondSeq(*[Block(ch, ch) for _ in range(n_post)])
self.trunk = tnn.CondSeq(*[Block(ch, ch) for _ in range(n_trunk)])
soft: nn.Module = UBlock1(ch)
for _ in range(n_down - 1):
soft = UBlock(ch, soft, with_skip=with_skips)
if n_down >= 0:
conv1 = [soft]
for i in range(n_att_conv):
conv1 += [
nn.BatchNorm2d(ch),
nn.ReLU(True),
tu.kaiming(tnn.Conv1x1(ch, ch, bias=(i != n_att_conv - 1)))
]
conv1.append(nn.Sigmoid())
self.mask = tnn.CondSeq(*conv1)
else:
self.mask = None
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.pre(x)
t = self.trunk(x)
if self.mask is not None:
t = t * (self.mask(x) + 1)
return self.post(t)
class Attention56Bone(tnn.CondSeq):
"""
Attention56 bone
Args:
in_ch (int): number of channels in the images
"""
@tu.experimental
def __init__(self, num_classes: int) -> None:
super(Attention56Bone, self).__init__(
OrderedDict([
('head',
tnn.CondSeq(tu.kaiming(tnn.Conv2d(3, 64, 7, stride=2)),
nn.ReLU(True), nn.MaxPool2d(3, 2, 1))),
('pre1', Block(64, 256)), ('attn1', AttentionBlock(256, 3)),
('pre2', Block(256, 512, stride=2)),
('attn2', AttentionBlock(512, 2)),
('pre3', Block(512, 1024, stride=2)),
('attn3', AttentionBlock(1024, 1)),
('pre4',
tnn.CondSeq(
Block(1024, 2048, stride=2),
Block(2048, 2048),
Block(2048, 2048),
)),
('classifier', ClassificationHead(2048, num_classes))
]))
@tu.experimental
def attention56(num_classes):
"""
Build a attention56 network
Args:
num_classes (int): number of classes
in_ch (int): number of channels in the images
"""
return Attention56Bone(num_classes)
|
data_wrangling/get_song_lyrics.py | eutychius/encore.ai | 314 | 12646969 | from bs4 import BeautifulSoup
import urllib2
import os
from random import random
import time
import sys
BASE_URL = 'http://www.azlyrics.com/'
def download_lyrics(artist, url):
print url
time.sleep(random() + 2)
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page, 'html.parser')
# Get the song title
song_title = soup.find('title').get_text().split(' - ')[1].lower().replace('/', ' ').replace(' ', '_')
# Get the lyrics div
lyrics = soup.findAll('div', {'class': ''})
for i in lyrics:
lyrics = i.get_text().strip()
if len(lyrics) > 10:
with open('artists/' + artist + '/' + song_title + '.txt', 'wb') as w:
cleaned_lyrics = lyrics.replace('\r\n', ' *BREAK* ').replace('\n', ' *BREAK* ').replace(' ', ' ')
w.write(cleaned_lyrics.encode('utf-8'))
def download_all_lyrics(artist):
if not os.path.exists('artists/'+artist):
os.mkdir('artists/'+artist)
with open('artist_data/'+artist+'.txt', 'r') as songs:
for song in songs.readlines():
url = BASE_URL + song[2:].strip()
download_lyrics(artist, url)
artist = sys.argv[1]
download_all_lyrics(artist)
|
spectral/database/usgs.py | wwlswj/spectral | 398 | 12646970 | <gh_stars>100-1000
'''
Code for reading and managing USGS spectral library data.
References:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>.,
2017, USGS Spectral Library Version 7: U.S. Geological Survey Data Series 1035,
61 p., https://doi.org/10.3133/ds1035.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
from spectral.utilities.python23 import IS_PYTHON3, tobytes, frombytes
from .spectral_database import SpectralDatabase
import re
import logging
import sqlite3
import array
if IS_PYTHON3:
def readline(fin): return fin.readline()
def open_file(filename): return open(filename, encoding='iso-8859-1')
else:
def readline(fin): return fin.readline().decode('iso-8859-1')
def open_file(filename): return open(filename)
table_schemas = [
'CREATE TABLE Samples (SampleID INTEGER PRIMARY KEY, LibName TEXT, Record INTEGER, '
'Description TEXT, Spectrometer TEXT, Purity TEXT, MeasurementType TEXT, Chapter TEXT, FileName TEXT, '
'AssumedWLSpmeterDataID INTEGER, '
'NumValues INTEGER, MinValue FLOAT, MaxValue FLOAT, ValuesArray BLOB)',
'CREATE TABLE SpectrometerData (SpectrometerDataID INTEGER PRIMARY KEY, LibName TEXT, '
'Record INTEGER, MeasurementType TEXT, Unit TEXT, Name TEXT, Description TEXT, FileName TEXT, '
'NumValues INTEGER, MinValue FLOAT, MaxValue FLOAT, ValuesArray BLOB)'
]
arraytypecode = chr(ord('f'))
def array_from_blob(blob):
a = array.array(arraytypecode)
frombytes(a, blob)
return a
def array_to_blob(arr):
return sqlite3.Binary(tobytes(array.array(arraytypecode, arr)))
# Actually these are not all spectrometer names, but kind of it.
_spectrometer_names = {
'ASD': ['ASD'],
'ASTER': ['ASTER'],
'AVIRIS': ['AVIRIS', 'aviris'],
'BECK': ['BECK'],
'CRISM JOINED MTR3': ['CRISM Bandpass(FWHM) JOINED MTR3', 'CRISM Waves JOINED MTR3', 'CRISM Bandpass JOINED MTR3', 'CRISM JOINED MTR3'],
'CRISM GLOBAL': ['CRISM Bandpass(FWHM) GLOBAL', 'CRISM Wavelengths GLOBAL', 'CRISM Waves GLOBAL', 'CRISM GLOBAL'],
'Hyperion': ['Hyperion'],
'HyMap2': ['HyMap2'],
'Landsat8': ['Landsat8'],
'M3': ['M3'],
'NIC4': ['NIC4'],
'Sentinel2': ['Sentinel2', 'Sentinel-2'],
'VIMS': ['VIMS'],
'WorldView3': ['WorldView3']
}
class SpectrometerData:
'''
Holds data for spectrometer, from USGS spectral library.
'''
def __init__(self, libname, record, measurement_type, unit, spectrometer_name,
description, file_name, values):
self.libname = libname
self.record = record
self.measurement_type = measurement_type
self.unit = unit
self.spectrometer_name = spectrometer_name
self.description = description
self.file_name = file_name
self.values = values
def header(self):
'''
Returns:
String representation of basic meta data.
'''
return '{0} Record={1}: {2} {3} {4}'.format(self.libname, self.record,
self.measurement, self.description)
@ classmethod
def read_from_file(cls, filename):
'''
Constructs SpectrometerData from file.
Arguments:
`filename` (str):
Path to file containing data.
Returns:
A `SpectrometerData` constructed from data parsed from file.
'''
import os
logger = logging.getLogger('spectral')
with open_file(filename) as f:
header_line = readline(f)
if not header_line:
raise Exception(
'{0} has empty header line or no lines at all.'.format(filename))
libname, record, measurement_type, unit, spectrometer_name, description = \
SpectrometerData._parse_header(header_line.strip())
values = []
for line in f:
if not line:
break
try:
values.append(float(line.strip()))
except:
logger.error('In file %s found unparsable line.', filename)
file_name = os.path.basename(filename)
return cls(libname, record, measurement_type, unit, spectrometer_name, description, file_name, values)
@staticmethod
def _find_spectrometer_name(header_line):
for sname, alt_names in _spectrometer_names.items():
for alt_name in alt_names:
if alt_name in header_line:
return sname
raise Exception(
'Could not find spectrometer for header {0}'.format(header_line))
@staticmethod
def _assume_measurement_type(header_line):
header_line = header_line.lower()
# The order of checking these things is important.
if 'wavelength' in header_line or 'waves' in header_line:
return 'Wavelengths'
if 'bandpass' in header_line or 'fwhm' in header_line or 'bandwidths' in header_line:
return 'Bandpass'
if 'resolution' in header_line:
return 'Resolution'
if 'wavenumber' in header_line:
return 'Wavenumber'
if 'srf' in header_line:
return 'SRF'
raise Exception(
'Could not assume measurement type for header line {0}'.format(header_line))
@ staticmethod
def _assume_unit(header_line, measurement_type):
if measurement_type == 'Wavelengths' or measurement_type == 'Bandpass' or measurement_type == 'Resolution':
if re.search(r'\bnm\b', header_line) is not None:
return 'nanometer'
if 'nanometer' in header_line:
return 'nanometer'
# 'um', 'microns' are usually found in these files, but this is default
# anyway.
return 'micrometer'
elif measurement_type == 'Wavenumber':
return 'cm^-1'
elif measurement_type == 'SRF':
return 'none'
else:
return 'unknown'
@ staticmethod
def _parse_header(header_line):
# It is difficult to parse this data,
# things are separated by spaces, but inside of what should be single datum,
# there are spaces, so only human can get it right.
elements = header_line.split()
libname = elements[0]
# From 'Record=1234:' extract 1234.
record = int(elements[1].split('=')[1][:-1])
# Join everything after record into description.
description = ' '.join(elements[2:])
measurement_type = SpectrometerData._assume_measurement_type(
header_line)
unit = SpectrometerData._assume_unit(header_line, measurement_type)
spectrometer_name = SpectrometerData._find_spectrometer_name(
header_line)
return libname, record, measurement_type, unit, spectrometer_name, description
class SampleData:
'''
Holds parsed data for single sample from USGS spectral library.
'''
def __init__(self, libname=None, record=None, description=None, spectrometer=None,
purity=None, measurement_type=None, chapter=None, file_name=None, values=None):
self.libname = libname
self.record = record
self.description = description
self.spectrometer = spectrometer
self.purity = purity
self.measurement_type = measurement_type
self.chapter = chapter
self.file_name = file_name
self.values = values
def header(self):
'''
Returns:
String representation of basic meta data.
'''
return '{0} Record={1}: {2} {3}{4} {5}'.format(self.libname, self.record,
self.description, self.spectrometer,
self.purity, self.measurement_type)
@staticmethod
def _parse_header(header_line):
elements = header_line.split()
libname = elements[0]
# From 'Record=1234:' extract 1234.
record = int(elements[1].split('=')[1][:-1])
# Join everything after record into description.
description = ' '.join(elements[2:])
# Split 'AVIRIS13aa' into ['', 'AVIRIS13', 'aa', ''].
smpurity = re.split('([A-Z0-9]+)([a-z]+)', elements[-2])
# There is case with capital leters like 'NIC4AA'
if len(smpurity) == 1:
smpurity = re.split('([A-Z]+[0-9])([A-Z]+)', elements[-2])
smpurity[2] = smpurity[2].lower()
spectrometer = smpurity[1]
purity = smpurity[2]
measurement_type = elements[-1]
return libname, record, description, spectrometer, purity, measurement_type
@classmethod
def read_from_file(cls, filename, chapter=None):
'''
Constructs SampleData from file.
Arguments:
`filename` (str):
Path to file containing data.
Returns:
A `SampleData` constructed from data parsed from file.
'''
import os
logger = logging.getLogger('spectral')
with open(filename) as f:
header_line = f.readline()
if not header_line:
raise Exception(
'{0} has empty header line or no lines at all.'.format(filename))
libname, record, description, spectrometer, purity, measurement_type = \
SampleData._parse_header(header_line.strip())
values = []
for line in f:
if not line:
break
try:
values.append(float(line.strip()))
except:
logger.error('In file %s found unparsable line.', filename)
file_name = os.path.basename(filename)
return cls(libname, record, description, spectrometer, purity,
measurement_type, chapter, file_name, values)
class USGSDatabase(SpectralDatabase):
'''A relational database to manage USGS spectral library data.'''
schemas = table_schemas
def _assume_wavelength_spectrometer_data_id(self, sampleData):
# We can't know this for sure, but these heuristics haven't failed so far.
# Prepare paramters.
# These parameters are mandatory to match.
libname = sampleData.libname
num_values = len(sampleData.values)
# Spectrometer might not match in subdirectories where data is convolved
# or resampled. In other directories, without spectrometer there is
# few possible choices, so spectrometer isolates the one we need.
spectrometer = sampleData.spectrometer
logger = logging.getLogger('spectral')
# Start with the most specific.
query = '''SELECT SpectrometerDataID FROM SpectrometerData WHERE
MeasurementType = 'Wavelengths' AND LibName = ? AND NumValues = ?
AND Name = ?'''
result = self.cursor.execute(
query, (libname, num_values, spectrometer))
rows = result.fetchall()
if len(rows) == 0:
query = '''SELECT SpectrometerDataID FROM SpectrometerData WHERE
MeasurementType = 'Wavelengths' AND LibName = ? AND NumValues = ?
AND Name LIKE ?'''
result = self.cursor.execute(
# ASDFR -> ASD, and '%' just to be sure.
query, (libname, num_values, spectrometer[:3] + '%'))
rows = result.fetchall()
if len(rows) >= 1:
if len(rows) > 1:
logger.warning('Found multiple spectrometers with measurement_type Wavelengths, '
' LibName %s, NumValues %d and Name %s', libname, num_values, spectrometer)
return rows[0][0]
# Try to be less specific without spectrometer name.
query = '''SELECT SpectrometerDataID FROM SpectrometerData WHERE
MeasurementType = 'Wavelengths' AND LibName = ? AND NumValues = ?'''
result = self.cursor.execute(query, (libname, num_values))
rows = result.fetchall()
if len(rows) < 1:
raise Exception('Wavelengths for spectrometer not found, for LibName = {0} and NumValues = {1}, from file {2}'.format(
libname, num_values, sampleData.file_name))
if len(rows) > 1:
logger.warning('Found multiple spectrometers with measurement_type Wavelengths, '
' LibName %s and NumValues %d, from file %s', libname, num_values, sampleData.file_name)
return rows[0][0]
def _add_sample_data(self, spdata):
sql = '''INSERT INTO Samples (LibName, Record,
Description, Spectrometer, Purity, MeasurementType, Chapter, FileName,
AssumedWLSpmeterDataID,
NumValues, MinValue, MaxValue, ValuesArray)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'''
values = array_to_blob(spdata.values)
num_values = len(spdata.values)
min_value = min(spdata.values)
max_value = max(spdata.values)
assumedWLSpmeterDataID = self._assume_wavelength_spectrometer_data_id(spdata)
self.cursor.execute(sql, (spdata.libname, spdata.record, spdata.description,
spdata.spectrometer, spdata.purity, spdata.measurement_type,
spdata.chapter, spdata.file_name, assumedWLSpmeterDataID,
num_values, min_value, max_value, values))
rowId = self.cursor.lastrowid
self.db.commit()
return rowId
def _add_spectrometer_data(self, spdata):
sql = '''INSERT INTO SpectrometerData (LibName, Record, MeasurementType, Unit,
Name, Description, FileName, NumValues, MinValue, MaxValue, ValuesArray)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'''
values = array_to_blob(spdata.values)
num_values = len(spdata.values)
min_value = min(spdata.values)
max_value = max(spdata.values)
self.cursor.execute(
sql, (spdata.libname, spdata.record, spdata.measurement_type, spdata.unit,
spdata.spectrometer_name, spdata.description,
spdata.file_name, num_values, min_value, max_value, values))
rowId = self.cursor.lastrowid
self.db.commit()
return rowId
@classmethod
def create(cls, filename, usgs_data_dir=None):
'''Creates an USGS relational database by parsing USGS data files.
Arguments:
`filename` (str):
Name of the new sqlite database file to create.
`usgs_data_dir` (str):
Path to the USGS ASCII data directory. This directory should
contain subdirectories, which containes chapter directories.
E.g. if provided `usgs_data_dir` is '/home/user/usgs/ASCIIdata',
then relative path to single sample could be
'ASCIIdata_splib07b/ChapterL_Liquids/splib07b_H2O-Ice_GDS136_77K_BECKa_AREF.txt'
If this argument is not provided, no data will be imported.
Returns:
An :class:`~spectral.database.USGSDatabase` object.
Example::
>>> USGSDatabase.create("usgs_lib.db", "/home/user/usgs/ASCIIdata")
This is a class method (it does not require instantiating an
USGSDatabase object) that creates a new database by parsing files in the
USGS library ASCIIdata directory. Normally, this should only
need to be called once. Subsequently, a corresponding database object
can be created by instantiating a new USGSDatabase object with the
path the database file as its argument. For example::
>>> from spectral.database.usgs import USGSDatabase
>>> db = USGSDatabase("usgs_lib.db")
'''
import os
if os.path.isfile(filename):
raise Exception('Error: Specified file already exists.')
db = cls()
db._connect(filename)
for schema in cls.schemas:
db.cursor.execute(schema)
if usgs_data_dir:
db._import_files(usgs_data_dir)
return db
def __init__(self, sqlite_filename=None):
'''Creates a database object to interface an existing database.
Arguments:
`sqlite_filename` (str):
Name of the database file. If this argument is not provided,
an interface to a database file will not be established.
Returns:
An :class:`~spectral.USGSDatabase` connected to the database.
'''
from spectral.io.spyfile import find_file_path
if sqlite_filename:
self._connect(find_file_path(sqlite_filename))
else:
self.db = None
self.cursor = None
def _import_files(self, data_dir):
from glob import glob
import numpy
import os
logger = logging.getLogger('spectral')
if not os.path.isdir(data_dir):
raise Exception('Error: Invalid directory name specified.')
num_sample_files = 0
num_spectrometer_files = 0
num_failed_sample_files = 0
num_failed_spectromter_files = 0
for sublib in os.listdir(data_dir):
sublib_dir = os.path.join(data_dir, sublib)
if not os.path.isdir(sublib_dir):
continue
# Process instrument data one by one.
for f in glob(sublib_dir + '/*.txt'):
logger.info('Importing spectrometer file %s', f)
try:
spdata = SpectrometerData.read_from_file(f)
self._add_spectrometer_data(spdata)
num_spectrometer_files += 1
except Exception as e:
logger.error('Failed to import spectrometer file %s', f)
logger.error(e)
num_failed_spectromter_files += 1
# Go into each chapter directory and process individual samples.
for chapter in os.listdir(sublib_dir):
# Skip errorbars directory. Maybe add support for parsing it later.
if chapter == 'errorbars':
continue
chapter_dir = os.path.join(sublib_dir, chapter)
if not os.path.isdir(chapter_dir):
continue
for f in glob(chapter_dir + '/*.txt'):
logger.info('Importing sample file %s', f)
try:
spdata = SampleData.read_from_file(f, chapter)
self._add_sample_data(spdata)
num_sample_files += 1
except Exception as e:
logger.error(
'Failed to import sample file %s', f)
logger.error(e)
num_failed_sample_files += 1
logger.info('Imported %d sample files and %d spectrometer files. '
'%d failed sample files, and %d failed spectrometer files.',
num_sample_files, num_spectrometer_files, num_failed_sample_files,
num_failed_spectromter_files)
def get_spectrum(self, sampleID):
'''Returns a spectrum from the database.
Usage:
(x, y) = usgs.get_spectrum(sampleID)
Arguments:
`sampleID` (int):
The **SampleID** value for the desired spectrum from the
**Samples** table in the database.
Returns:
`x` (list):
Band centers for the spectrum.
This is extraced from assumed spectrometer for given sample.
`y` (list):
Spectrum data values for each band.
Returns a pair of vectors containing the wavelengths and measured
values values of a measurment.
'''
import array
query = '''SELECT ValuesArray, AssumedWLSpmeterDataID FROM Samples WHERE SampleID = ?'''
result = self.cursor.execute(query, (sampleID,))
rows = result.fetchall()
if len(rows) < 1:
raise Exception('Measurement record not found.')
y = array_from_blob(rows[0][0])
assumedWLSpmeterDataID = rows[0][1]
query = '''SELECT ValuesArray FROM SpectrometerData WHERE SpectrometerDataID = ?'''
result = self.cursor.execute(
query, (assumedWLSpmeterDataID,))
rows = result.fetchall()
if len(rows) < 1:
raise Exception('Measurement (wavelengths) record not found.')
x = array_from_blob(rows[0][0])
return (list(x), list(y))
def create_envi_spectral_library(self, spectrumIDs, bandInfo):
'''Creates an ENVI-formatted spectral library for a list of spectra.
Arguments:
`spectrumIDs` (list of ints):
List of **SampleID** values for of spectra in the "Samples"
table of the USGS database.
`bandInfo` (:class:`~spectral.BandInfo`):
The spectral bands to which the original USGS library spectra
will be resampled.
Returns:
A :class:`~spectral.io.envi.SpectralLibrary` object.
The IDs passed to the method should correspond to the SampleID field
of the USGS database "Samples" table. All specified spectra will be
resampled to the same discretization specified by the bandInfo
parameter. See :class:`spectral.BandResampler` for details on the
resampling method used.
Note that expected units for bands are micrometers.
'''
from spectral.algorithms.resampling import BandResampler
from spectral.io.envi import SpectralLibrary
import numpy
import unicodedata
spectra = numpy.empty((len(spectrumIDs), len(bandInfo.centers)))
cursor = self.cursor.execute('''
SELECT a.ValuesArray, b.ValuesArray, a.Description, b.Unit
FROM Samples AS a INNER JOIN SpectrometerData AS b
ON a.AssumedWLSpmeterDataID = b.SpectrometerDataID
WHERE a.SampleID IN ({0})'''.format(','.join(['?']*len(spectrumIDs))),
spectrumIDs)
names = []
for i, s in enumerate(cursor):
y = array_from_blob(s[0])
x = array_from_blob(s[1])
name = s[2]
unit = s[3]
if unit == 'nanometers':
x /= 1000
resample = BandResampler(
x, bandInfo.centers, None, bandInfo.bandwidths)
spectra[i] = resample(y)
names.append(unicodedata.normalize('NFKD', name).
encode('ascii', 'ignore'))
header = {}
header['wavelength units'] = 'um'
header['spectra names'] = names
header['wavelength'] = bandInfo.centers
header['fwhm'] = bandInfo.bandwidths
return SpectralLibrary(spectra, header, {})
|
chapter10/docker/images/movielens-fetch/scripts/fetch_ratings.py | add54/Data_PipeLine_Apache_Airflow | 303 | 12646981 | #!/usr/bin/env python
from pathlib import Path
import logging
import json
import click
import requests
logging.basicConfig(level=logging.INFO)
@click.command()
@click.option(
"--start_date",
type=click.DateTime(formats=["%Y-%m-%d"]),
required=True,
help="Start date for ratings.",
)
@click.option(
"--end_date",
type=click.DateTime(formats=["%Y-%m-%d"]),
required=True,
help="End date for ratings.",
)
@click.option(
"--output_path",
type=click.Path(dir_okay=False),
required=True,
help="Output file path.",
)
@click.option(
"--host", type=str, default="http://movielens:5000", help="Movielens API URL."
)
@click.option(
"--user",
type=str,
envvar="MOVIELENS_USER",
required=True,
help="Movielens API user.",
)
@click.option(
"--password",
type=str,
envvar="MOVIELENS_PASSWORD",
required=True,
help="Movielens API password.",
)
@click.option(
"--batch_size", type=int, default=100, help="Batch size for retrieving records."
)
def main(start_date, end_date, output_path, host, user, password, batch_size):
"""CLI script for fetching movie ratings from the movielens API."""
# Setup session.
session = requests.Session()
session.auth = (user, password)
# Fetch ratings.
logging.info("Fetching ratings from %s (user: %s)", host, user)
ratings = list(
_get_ratings(
session=session,
host=host,
start_date=start_date,
end_date=end_date,
batch_size=batch_size,
)
)
logging.info("Retrieved %d ratings!", len(ratings))
# Write output.
output_path = Path(output_path)
output_dir = output_path.parent
output_dir.mkdir(parents=True, exist_ok=True)
logging.info("Writing to %s", output_path)
with output_path.open("w") as file_:
json.dump(ratings, file_)
def _get_ratings(session, host, start_date, end_date, batch_size=100):
yield from _get_with_pagination(
session=session,
url=host + "/ratings",
params={
"start_date": start_date.strftime("%Y-%m-%d"),
"end_date": end_date.strftime("%Y-%m-%d"),
},
batch_size=batch_size,
)
def _get_with_pagination(session, url, params, batch_size=100):
"""
Fetches records using a get request with given url/params,
taking pagination into account.
"""
offset = 0
total = None
while total is None or offset < total:
response = session.get(
url, params={**params, **{"offset": offset, "limit": batch_size}}
)
response.raise_for_status()
response_json = response.json()
yield from response_json["result"]
offset += batch_size
total = response_json["total"]
if __name__ == "__main__":
main()
|
tests/test_audience_summary.py | hiribarne/twitter-python-ads-sdk | 162 | 12646985 | import responses
import unittest
from tests.support import with_resource, with_fixture, characters
from twitter_ads.account import Account
from twitter_ads.client import Client
from twitter_ads.targeting import AudienceSummary
from twitter_ads import API_VERSION
@responses.activate
def test_audience_summary():
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph'),
body=with_fixture('accounts_load'),
content_type='application/json')
responses.add(responses.POST,
with_resource('/' + API_VERSION + '/accounts/2iqph/audience_summary'),
body=with_fixture('audience_summary'),
content_type='application/json')
client = Client(
characters(40),
characters(40),
characters(40),
characters(40)
)
account = Account.load(client, '2iqph')
params = {
"targeting_criteria": [
{
"targeting_type":"LOCATION",
"targeting_value":"96683cc9126741d1"
},
{
"targeting_type":"BROAD_KEYWORD",
"targeting_value":"cats"
},
{
"targeting_type":"SIMILAR_TO_FOLLOWERS_OF_USER",
"targeting_value": "14230524"
},
{
"targeting_type":"SIMILAR_TO_FOLLOWERS_OF_USER",
"targeting_value": "90420314"
}
]
}
audience_summary = AudienceSummary.load(
account=account,
params=params
)
print (audience_summary)
assert audience_summary is not None
assert audience_summary.audience_size is not None
assert audience_summary.audience_size['min'] == 41133600
assert audience_summary.audience_size['max'] == 50274400
|
src/sage/schemes/hyperelliptic_curves/jacobian_generic.py | UCD4IDS/sage | 1,742 | 12647086 | <filename>src/sage/schemes/hyperelliptic_curves/jacobian_generic.py
"""
Jacobian of a general hyperelliptic curve
"""
# ****************************************************************************
# Copyright (C) 2006 <NAME> <<EMAIL>>
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
# ****************************************************************************
from sage.rings.all import Integer, QQ
from sage.misc.lazy_attribute import lazy_attribute
from sage.schemes.jacobians.abstract_jacobian import Jacobian_generic
from . import jacobian_homset
from . import jacobian_morphism
from sage.misc.lazy_import import lazy_import
from .jacobian_endomorphism_utils import get_is_geom_field, is_geom_trivial_when_field
lazy_import('sage.interfaces.genus2reduction', ['genus2reduction', 'Genus2reduction'])
class HyperellipticJacobian_generic(Jacobian_generic):
"""
EXAMPLES::
sage: FF = FiniteField(2003)
sage: R.<x> = PolynomialRing(FF)
sage: f = x**5 + 1184*x**3 + 1846*x**2 + 956*x + 560
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: a = x**2 + 376*x + 245; b = 1015*x + 1368
sage: X = J(FF)
sage: D = X([a,b])
sage: D
(x^2 + 376*x + 245, y + 988*x + 635)
sage: J(0)
(1)
sage: D == J([a,b])
True
sage: D == D + J(0)
True
An more extended example, demonstrating arithmetic in J(QQ) and
J(K) for a number field K/QQ.
::
sage: P.<x> = PolynomialRing(QQ)
sage: f = x^5 - x + 1; h = x
sage: C = HyperellipticCurve(f,h,'u,v')
sage: C
Hyperelliptic Curve over Rational Field defined by v^2 + u*v = u^5 - u + 1
sage: PP = C.ambient_space()
sage: PP
Projective Space of dimension 2 over Rational Field
sage: C.defining_polynomial()
-x0^5 + x0*x1*x2^3 + x1^2*x2^3 + x0*x2^4 - x2^5
sage: C(QQ)
Set of rational points of Hyperelliptic Curve over Rational Field defined by v^2 + u*v = u^5 - u + 1
sage: K.<t> = NumberField(x^2-2)
sage: C(K)
Set of rational points of Hyperelliptic Curve over Number Field in t with defining polynomial x^2 - 2 defined by v^2 + u*v = u^5 - u + 1
sage: P = C(QQ)(0,1,1); P
(0 : 1 : 1)
sage: P == C(0,1,1)
True
sage: C(0,1,1).parent()
Set of rational points of Hyperelliptic Curve over Rational Field defined by v^2 + u*v = u^5 - u + 1
sage: P1 = C(K)(P)
sage: P2 = C(K)([2,4*t-1,1])
sage: P3 = C(K)([-1/2,1/8*(7*t+2),1])
sage: P1, P2, P3
((0 : 1 : 1), (2 : 4*t - 1 : 1), (-1/2 : 7/8*t + 1/4 : 1))
sage: J = C.jacobian()
sage: J
Jacobian of Hyperelliptic Curve over Rational Field defined by v^2 + u*v = u^5 - u + 1
sage: Q = J(QQ)(P); Q
(u, v - 1)
sage: for i in range(6): Q*i
(1)
(u, v - 1)
(u^2, v + u - 1)
(u^2, v + 1)
(u, v + 1)
(1)
sage: Q1 = J(K)(P1); print("%s -> %s"%( P1, Q1 ))
(0 : 1 : 1) -> (u, v - 1)
sage: Q2 = J(K)(P2); print("%s -> %s"%( P2, Q2 ))
(2 : 4*t - 1 : 1) -> (u - 2, v - 4*t + 1)
sage: Q3 = J(K)(P3); print("%s -> %s"%( P3, Q3 ))
(-1/2 : 7/8*t + 1/4 : 1) -> (u + 1/2, v - 7/8*t - 1/4)
sage: R.<x> = PolynomialRing(K)
sage: Q4 = J(K)([x^2-t,R(1)])
sage: for i in range(4): Q4*i
(1)
(u^2 - t, v - 1)
(u^2 + (-3/4*t - 9/16)*u + 1/2*t + 1/4, v + (-1/32*t - 57/64)*u + 1/2*t + 9/16)
(u^2 + (1352416/247009*t - 1636930/247009)*u - 1156544/247009*t + 1900544/247009, v + (-2326345442/122763473*t + 3233153137/122763473)*u + 2439343104/122763473*t - 3350862929/122763473)
sage: R2 = Q2*5; R2
(u^2 - 3789465233/116983808*u - 267915823/58491904, v + (-233827256513849/1789384327168*t + 1/2)*u - 15782925357447/894692163584*t)
sage: R3 = Q3*5; R3
(u^2 + 5663300808399913890623/14426454798950909645952*u - 26531814176395676231273/28852909597901819291904, v + (253155440321645614070860868199103/2450498420175733688903836378159104*t + 1/2)*u + 2427708505064902611513563431764311/4900996840351467377807672756318208*t)
sage: R4 = Q4*5; R4
(u^2 - 3789465233/116983808*u - 267915823/58491904, v + (233827256513849/1789384327168*t + 1/2)*u + 15782925357447/894692163584*t)
Thus we find the following identity::
sage: 5*Q2 + 5*Q4
(1)
Moreover the following relation holds in the 5-torsion subgroup::
sage: Q2 + Q4 == 2*Q1
True
TESTS::
sage: k.<a> = GF(9); R.<x> = k[]
sage: J1 = HyperellipticCurve(x^3 + x - 1, x+a).jacobian()
sage: FF = FiniteField(2003)
sage: R.<x> = PolynomialRing(FF)
sage: f = x**5 + 1184*x**3 + 1846*x**2 + 956*x + 560
sage: J2 = HyperellipticCurve(f).jacobian()
sage: J1 == J1
True
sage: J1 == J2
False
"""
def dimension(self):
"""
Return the dimension of this Jacobian.
OUTPUT:
Integer
EXAMPLES::
sage: k.<a> = GF(9); R.<x> = k[]
sage: HyperellipticCurve(x^3 + x - 1, x+a).jacobian().dimension()
1
sage: g = HyperellipticCurve(x^6 + x - 1, x+a).jacobian().dimension(); g
2
sage: type(g)
<... 'sage.rings.integer.Integer'>
"""
return Integer(self.curve().genus())
def point(self, mumford, check=True):
try:
return self(self.base_ring())(mumford)
except AttributeError:
raise ValueError("Arguments must determine a valid Mumford divisor.")
def _point_homset(self, *args, **kwds):
return jacobian_homset.JacobianHomset_divisor_classes(*args, **kwds)
def _point(self, *args, **kwds):
return jacobian_morphism.JacobianMorphism_divisor_class_field(*args, **kwds)
####################################################################
# Some properties of geometric Endomorphism ring and algebra
####################################################################
@lazy_attribute
def _have_established_geometrically_trivial(self):
r"""
Initialize the flag which determines whether or not we have
already established if the geometric endomorphism ring is
trivial.
This is related to the warning at the top of the
`jacobian_endomorphism_utils.py` module.
INPUT:
- ``self`` -- The Jacobian.
OUTPUT:
The boolean ``False``; this will be updated by other methods.
EXAMPLES:
This is LMFDB curve 262144.d.524288.2::
sage: R.<x> = QQ[]
sage: f = x^5 + x^4 + 4*x^3 + 8*x^2 + 5*x + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J._have_established_geometrically_trivial
False
"""
return False
@lazy_attribute
def _have_established_geometrically_field(self):
r"""
Initialize the flag which determines whether or not we have
already established if the geometric endomorphism ring is
trivial.
This is related to the warning at the top of the
`jacobian_endomorphism_utils.py` module.
INPUT:
- ``self`` -- The Jacobian.
OUTPUT:
The boolean ``False``; this will be updated by other methods.
EXAMPLES:
This is LMFDB curve 262144.d.524288.2::
sage: R.<x> = QQ[]
sage: f = x^5 + x^4 + 4*x^3 + 8*x^2 + 5*x + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J._have_established_geometrically_field
False
"""
return False
def geometric_endomorphism_algebra_is_field(self, B=200, proof=False):
r"""
Return whether the geometric endomorphism algebra is a field.
This implies that the Jacobian of the curve is geometrically
simple. It is based on Algorithm 4.10 from from [Lom2019]_
INPUT:
- ``B`` -- (default: 200) the bound which appears in the statement of
the algorithm from [Lom2019]_
- ``proof`` -- (default: False) whether or not to insist on a provably
correct answer. This is related to the warning in the docstring
of this module: if this function returns ``False``, then
strictly speaking this has not been proven to be ``False`` until one
has exhibited a non-trivial endomorphism, which these methods are not
designed to carry out. If one is convinced that this method should
return ``True``, but it is returning ``False``, then this can be
exhibited by increasing `B`.
OUTPUT:
Boolean indicating whether or not the geometric endomorphism
algebra is a field.
EXAMPLES:
This is LMFDB curve 262144.d.524288.2 which has QM. Although its
Jacobian is geometrically simple, the geometric endomorphism algebra
is not a field::
sage: R.<x> = QQ[]
sage: f = x^5 + x^4 + 4*x^3 + 8*x^2 + 5*x + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_algebra_is_field()
False
This is LMFDB curve 50000.a.200000.1::
sage: f = 8*x^5 + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_algebra_is_field()
True
"""
if self._have_established_geometrically_field:
return True
C = self.curve()
if C.genus() != 2:
raise NotImplementedError("Current implementation requires the curve to be of genus 2")
if C.base_ring() != QQ:
raise NotImplementedError("Current implementation requires the curve to be defined over the rationals")
f, h = C.hyperelliptic_polynomials()
if h != 0:
raise NotImplementedError("Current implementation requires the curve to be in the form y^2 = f(x)")
red_data = genus2reduction(0,f)
cond_C = red_data.conductor # WARNING: this is only the prime_to_2 conductor.
bad_primes = cond_C.prime_divisors()
self._bad_primes = bad_primes
is_abs_simp, is_def_geom_trivial = get_is_geom_field(f, C, bad_primes, B)
if is_def_geom_trivial:
self._have_established_geometrically_trivial = True
if is_abs_simp:
self._have_established_geometrically_field = True
return True
if proof:
raise NotImplementedError("Rigorous computation of lower bounds of endomorphism algebras has not yet been implemented.")
return False
def geometric_endomorphism_ring_is_ZZ(self, B=200, proof=False):
r"""
Return whether the geometric endomorphism ring of ``self`` is the
integer ring `\ZZ`.
INPUT:
- ``B`` -- (default: 200) the bound which appears in the statement of
the algorithm from [Lom2019]_
- ``proof`` -- (default: False) whether or not to insist on a provably
correct answer. This is related to the warning in the module docstring
of `jacobian_endomorphisms.py`: if this function returns ``False``, then
strictly speaking this has not been proven to be ``False`` until one has
exhibited a non-trivial endomorphism, which the methods in that module
are not designed to carry out. If one is convinced that this method
should return ``True``, but it is returning ``False``, then this can be
exhibited by increasing `B`.
OUTPUT:
Boolean indicating whether or not the geometric endomorphism
ring is isomorphic to the integer ring.
EXAMPLES:
This is LMFDB curve 603.a.603.2::
sage: R.<x> = QQ[]
sage: f = 4*x^5 + x^4 - 4*x^3 + 2*x^2 + 4*x + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
True
This is LMFDB curve 1152.a.147456.1 whose geometric endomorphism ring
is isomorphic to the group of 2x2 matrices over `\QQ`::
sage: f = x^6 - 2*x^4 + 2*x^2 - 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
False
This is LMFDB curve 20736.k.373248.1 whose geometric endomorphism ring
is isomorphic to the group of 2x2 matrices over a CM field::
sage: f = x^6 + 8
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
False
This is LMFDB curve 708.a.181248.1::
sage: R.<x> = QQ[]
sage: f = -3*x^6 - 16*x^5 + 36*x^4 + 194*x^3 - 164*x^2 - 392*x - 143
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
True
This is LMFDB curve 10609.a.10609.1 whose geometric endomorphism ring
is an order in a real quadratic field::
sage: f = x^6 + 2*x^4 + 2*x^3 + 5*x^2 + 6*x + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
False
This is LMFDB curve 160000.c.800000.1 whose geometric endomorphism ring
is an order in a CM field::
sage: f = x^5 - 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
False
This is LMFDB curve 262144.d.524288.2 whose geometric endomorphism ring
is an order in a quaternion algebra::
sage: f = x^5 + x^4 + 4*x^3 + 8*x^2 + 5*x + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
False
This is LMFDB curve 578.a.2312.1 whose geometric endomorphism ring
is `\QQ \times \QQ`::
sage: f = 4*x^5 - 7*x^4 + 10*x^3 - 7*x^2 + 4*x
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
False
"""
if self._have_established_geometrically_trivial:
return True
is_abs_simple = self.geometric_endomorphism_algebra_is_field(B=B, proof=proof)
if self._have_established_geometrically_trivial:
return True
if is_abs_simple and is_geom_trivial_when_field(self.curve(), self._bad_primes):
return True
if proof:
raise NotImplementedError("Rigorous computation of lower bounds of endomorphism rings has not yet been implemented.")
return False
|
fabtools/tests/functional_tests/test_shorewall.py | timgates42/fabtools | 308 | 12647098 | import pytest
pytestmark = pytest.mark.network
@pytest.fixture(scope='module')
def firewall():
from fabtools.require.shorewall import firewall
import fabtools.shorewall
firewall(
rules=[
fabtools.shorewall.Ping(),
fabtools.shorewall.SSH(),
fabtools.shorewall.HTTP(),
fabtools.shorewall.HTTPS(),
fabtools.shorewall.SMTP(),
fabtools.shorewall.rule(
port=1234,
source=fabtools.shorewall.hosts(['example.com']),
),
]
)
def test_require_firewall_started(firewall):
from fabtools.require.shorewall import started
from fabtools.shorewall import is_started
started()
assert is_started()
def test_require_firewall_stopped(firewall):
from fabtools.require.shorewall import stopped
from fabtools.shorewall import is_stopped
stopped()
assert is_stopped()
|
dbaas/physical/forms/plan_admin.py | didindinn/database-as-a-service | 303 | 12647123 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from django.utils.translation import ugettext_lazy as _
from django import forms
from ckeditor.widgets import CKEditorWidget
from system.models import Configuration
from .. import models
log = logging.getLogger(__name__)
class PlanForm(forms.ModelForm):
description = forms.CharField(widget=CKEditorWidget(), required=False)
replication_topology = forms.ModelChoiceField(
queryset=models.ReplicationTopology.objects.all()
)
class Meta:
model = models.Plan
def clean_has_persistence(self):
engine = self.cleaned_data['engine']
if not engine.engine_type.is_in_memory:
return True
return self.cleaned_data['has_persistence']
def clean(self):
cleaned_data = super(PlanForm, self).clean()
engine = cleaned_data.get("engine")
if not engine:
msg = _("Please select a Engine Type")
log.warning(u"%s" % msg)
raise forms.ValidationError(msg)
return cleaned_data
class PlanAttrInlineFormset(forms.models.BaseInlineFormSet):
def clean(self):
if self.instance.is_pre_provisioned:
return
if not self.instance.is_ha:
return
if not self.is_valid():
return
bundles = self.cleaned_data[0].get('bundle_group')
if not bundles:
raise forms.ValidationError("Please select the bundle's")
|
contrib/gen-scripts/gen-write_reverse_arrays.py | zyedidia/boolector | 209 | 12647146 | <reponame>zyedidia/boolector
#! /usr/bin/env python3
# Boolector: Satisfiablity Modulo Theories (SMT) solver.
#
# Copyright (C) 2007-2021 by the authors listed in the AUTHORS file.
#
# This file is part of Boolector.
# See COPYING for more information on using this software.
#
from argparse import ArgumentParser
def sexpr(l):
l = [str(i) for i in l]
return "({})".format(" ".join(l))
def cmd(tag, string = ""):
if string == "":
print(sexpr([tag]))
else:
print(sexpr([tag, string]))
def arsort(index_bw, elem_bw):
return sexpr(["Array", bvsort(index_bw), bvsort(elem_bw)])
def bvsort(bw):
return sexpr(["_", "BitVec", bw])
def var(sym, sort):
print("(declare-fun {} () {})".format(sym, sort))
def bvconst(val, bw):
return "(_ bv{} {})".format(int(val), bw)
def fun(sym, params, sort, term):
s_params = " ".join(["({} {})".format(p, s) for [p, s] in params])
print("(define-fun {} ({}) {} {})".format(sym, s_params, sort, term))
def funapp(sym, terms):
l = [sym]
l.extend(terms)
return sexpr(l)
if __name__ == "__main__":
aparser = ArgumentParser ()
aparser.add_argument ("index_bw", type=int, help="index bit width")
args = aparser.parse_args()
args.elem_bw = args.index_bw
max_idx = 2**args.index_bw - 1
cmd ("set-logic", "QF_AUFBV")
var ("k", bvsort (args.index_bw))
var ("a", arsort (args.index_bw, args.elem_bw))
for i in range(0, max_idx + 1):
var ("j{}".format(i), bvsort (args.index_bw))
for i in range(0, max_idx + 1):
fun ("w{}".format(i), [("p{}".format(i), bvsort (args.index_bw))],
bvsort (args.elem_bw),
"(ite (= p{} j{}) j{} ({} p{}))".format(
i, i, i, "select a" if not i else "w{}".format(i - 1), i))
for i in range(0, max_idx + 1):
fun ("rw{}".format(i), [("p{}".format(i), bvsort (args.index_bw))],
bvsort (args.elem_bw),
"(ite (= p{} (bvsub (_ bv{} {}) j{})) p{} ({} p{}))".format(
i, max_idx, args.index_bw, i, i,
"select a" if not i else "rw{}".format(i - 1), i))
print ("(assert (distinct", end ='')
for i in range(0, max_idx + 1):
print (" j{}".format(i), end ='')
print ("))")
cmd ("assert", "(not (= (w{} k) (rw{} k)))".format(max_idx, max_idx))
cmd ("check-sat")
cmd ("exit")
|
client/verta/verta/registry/stage_change/_production.py | vishalbelsare/modeldb | 835 | 12647152 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from verta._protos.public.registry import StageService_pb2
from . import _stage_change
class Production(_stage_change._StageChange):
"""The model version is in production.
Parameters
----------
comment : str, optional
Comment associated with this stage change.
Attributes
----------
comment : str or None
Comment associated with this stage change.
Examples
--------
.. code-block:: python
from verta.registry.stage_change import Production
model_ver.change_stage(Production("Rolling out to prod."))
model_ver.stage
# "production"
"""
_STAGE = StageService_pb2.StageEnum.PRODUCTION
|
usaspending_api/download/tests/integration/test_download_disaster.py | ststuck/usaspending-api | 217 | 12647180 | <filename>usaspending_api/download/tests/integration/test_download_disaster.py
import json
import pytest
import re
from model_mommy import mommy
from rest_framework import status
from usaspending_api.download.lookups import JOB_STATUS
from usaspending_api.etl.award_helpers import update_awards
from usaspending_api.references.models import DisasterEmergencyFundCode
from usaspending_api.search.tests.data.utilities import setup_elasticsearch_test
def _post(client, def_codes=None, query=None, award_type_codes=None, file_format=None):
request_body = {}
filters = {}
if def_codes:
filters["def_codes"] = def_codes
if query:
filters["query"] = query
if award_type_codes:
filters["award_type_codes"] = award_type_codes
request_body["filters"] = filters
if file_format:
request_body["file_format"] = file_format
resp = client.post("/api/v2/download/disaster/", content_type="application/json", data=json.dumps(request_body))
return resp
@pytest.fixture
def awards_and_transactions(transactional_db):
# Populate job status lookup table
for js in JOB_STATUS:
mommy.make("download.JobStatus", job_status_id=js.id, name=js.name, description=js.desc)
# Awards
award1 = mommy.make("awards.Award", type="07", total_loan_value=3, generated_unique_award_id="ASST_NEW_1")
award2 = mommy.make("awards.Award", type="07", total_loan_value=30, generated_unique_award_id="ASST_NEW_2")
award3 = mommy.make("awards.Award", type="08", total_loan_value=300, generated_unique_award_id="ASST_NEW_3")
award4 = mommy.make("awards.Award", type="B", total_loan_value=0, generated_unique_award_id="CONT_NEW_1")
award5 = mommy.make("awards.Award", type="A", total_loan_value=0, generated_unique_award_id="CONT_NEW_2")
award6 = mommy.make("awards.Award", type="C", total_loan_value=0, generated_unique_award_id="CONT_NEW_3")
award7 = mommy.make("awards.Award", type="D", total_loan_value=0, generated_unique_award_id="CONT_NEW_4")
# Disaster Emergency Fund Code
defc1 = mommy.make(
"references.DisasterEmergencyFundCode",
code="L",
public_law="PUBLIC LAW FOR CODE L",
title="TITLE FOR CODE L",
group_name="covid_19",
)
defc2 = mommy.make(
"references.DisasterEmergencyFundCode",
code="M",
public_law="PUBLIC LAW FOR CODE M",
title="TITLE FOR CODE M",
group_name="covid_19",
)
mommy.make(
"references.DisasterEmergencyFundCode",
code="N",
public_law="PUBLIC LAW FOR CODE N",
title="TITLE FOR CODE N",
group_name="covid_19",
)
# Submission Attributes
sub1 = mommy.make(
"submissions.SubmissionAttributes",
reporting_fiscal_year=2022,
reporting_fiscal_period=8,
quarter_format_flag=False,
reporting_period_start="2022-05-01",
)
sub2 = mommy.make(
"submissions.SubmissionAttributes",
reporting_fiscal_year=2022,
reporting_fiscal_period=8,
quarter_format_flag=False,
reporting_period_start="2022-05-01",
)
sub3 = mommy.make(
"submissions.SubmissionAttributes",
reporting_fiscal_year=2022,
reporting_fiscal_period=8,
quarter_format_flag=False,
reporting_period_start="2022-05-01",
)
# Financial Accounts by Awards
mommy.make(
"awards.FinancialAccountsByAwards",
pk=1,
award=award1,
submission=sub1,
disaster_emergency_fund=defc1,
gross_outlay_amount_by_award_cpe=1,
transaction_obligated_amount=2,
)
mommy.make(
"awards.FinancialAccountsByAwards",
pk=2,
award=award2,
submission=sub1,
disaster_emergency_fund=defc1,
gross_outlay_amount_by_award_cpe=10,
transaction_obligated_amount=20,
)
mommy.make(
"awards.FinancialAccountsByAwards",
pk=3,
award=award3,
submission=sub2,
disaster_emergency_fund=defc1,
gross_outlay_amount_by_award_cpe=100,
transaction_obligated_amount=200,
)
mommy.make(
"awards.FinancialAccountsByAwards",
pk=4,
award=award4,
submission=sub2,
disaster_emergency_fund=defc1,
gross_outlay_amount_by_award_cpe=1000,
transaction_obligated_amount=2000,
)
mommy.make(
"awards.FinancialAccountsByAwards",
pk=5,
award=award5,
submission=sub3,
disaster_emergency_fund=defc2,
gross_outlay_amount_by_award_cpe=10000,
transaction_obligated_amount=20000,
)
mommy.make(
"awards.FinancialAccountsByAwards",
pk=6,
award=award6,
submission=sub3,
disaster_emergency_fund=defc2,
gross_outlay_amount_by_award_cpe=100000,
transaction_obligated_amount=200000,
)
mommy.make(
"awards.FinancialAccountsByAwards",
pk=7,
award=award7,
submission=sub3,
disaster_emergency_fund=defc2,
gross_outlay_amount_by_award_cpe=1000000,
transaction_obligated_amount=2000000,
)
# DABS Submission Window Schedule
mommy.make(
"submissions.DABSSubmissionWindowSchedule",
id="2022081",
is_quarter=False,
period_start_date="2022-05-01",
period_end_date="2022-05-30",
submission_fiscal_year=2022,
submission_fiscal_quarter=3,
submission_fiscal_month=8,
submission_reveal_date="2020-5-15",
)
mommy.make(
"submissions.DABSSubmissionWindowSchedule",
id="2022080",
is_quarter=True,
period_start_date="2022-05-01",
period_end_date="2022-05-30",
submission_fiscal_year=2022,
submission_fiscal_quarter=3,
submission_fiscal_month=8,
submission_reveal_date="2020-5-15",
)
# Transaction Normalized
mommy.make(
"awards.TransactionNormalized",
id=10,
award=award1,
federal_action_obligation=5,
action_date="2022-01-01",
is_fpds=False,
unique_award_key="ASST_NEW_1",
)
mommy.make(
"awards.TransactionNormalized",
id=20,
award=award2,
federal_action_obligation=50,
action_date="2022-01-02",
is_fpds=False,
unique_award_key="ASST_NEW_2",
)
mommy.make(
"awards.TransactionNormalized",
id=30,
award=award3,
federal_action_obligation=500,
action_date="2022-01-03",
is_fpds=False,
unique_award_key="ASST_NEW_3",
)
mommy.make(
"awards.TransactionNormalized",
id=40,
award=award4,
federal_action_obligation=5000,
action_date="2022-01-04",
is_fpds=True,
unique_award_key="CONT_NEW_1",
)
mommy.make(
"awards.TransactionNormalized",
id=50,
award=award5,
federal_action_obligation=50000,
action_date="2022-01-05",
is_fpds=True,
unique_award_key="CONT_NEW_2",
)
mommy.make(
"awards.TransactionNormalized",
id=60,
award=award6,
federal_action_obligation=500000,
action_date="2022-01-06",
is_fpds=True,
unique_award_key="CONT_NEW_3",
)
mommy.make(
"awards.TransactionNormalized",
id=70,
award=award7,
federal_action_obligation=5000000,
action_date="2022-01-07",
is_fpds=True,
unique_award_key="CONT_NEW_4",
)
# Transaction FABS
mommy.make(
"awards.TransactionFABS",
transaction_id=10,
cfda_number="10.100",
legal_entity_country_code="USA",
legal_entity_state_code=None,
legal_entity_county_code=None,
legal_entity_county_name=None,
legal_entity_congressional=None,
awardee_or_recipient_legal="RECIPIENT 1",
awardee_or_recipient_uniqu=None,
)
mommy.make(
"awards.TransactionFABS",
transaction_id=20,
cfda_number="20.200",
legal_entity_country_code="USA",
legal_entity_state_code="SC",
legal_entity_county_code="001",
legal_entity_county_name="CHARLESTON",
legal_entity_congressional="90",
awardee_or_recipient_legal="RECIPIENT 2",
awardee_or_recipient_uniqu="456789123",
)
mommy.make(
"awards.TransactionFABS",
transaction_id=30,
cfda_number="20.200",
legal_entity_country_code="USA",
legal_entity_state_code="SC",
legal_entity_county_code="001",
legal_entity_county_name="CHARLESTON",
legal_entity_congressional="50",
awardee_or_recipient_legal="RECIPIENT 3",
awardee_or_recipient_uniqu="987654321",
)
# Transaction FPDS
mommy.make(
"awards.TransactionFPDS",
transaction_id=40,
legal_entity_country_code="USA",
legal_entity_state_code="WA",
legal_entity_county_code="005",
legal_entity_county_name="TEST NAME",
legal_entity_congressional="50",
awardee_or_recipient_legal="MULTIPLE RECIPIENTS",
awardee_or_recipient_uniqu="096354360",
)
mommy.make(
"awards.TransactionFPDS",
transaction_id=50,
legal_entity_country_code="USA",
legal_entity_state_code="WA",
legal_entity_county_code="005",
legal_entity_county_name="TEST NAME",
legal_entity_congressional="50",
awardee_or_recipient_legal=None,
awardee_or_recipient_uniqu="987654321",
)
mommy.make(
"awards.TransactionFPDS",
transaction_id=60,
legal_entity_country_code="USA",
legal_entity_state_code="SC",
legal_entity_county_code="005",
legal_entity_county_name="TEST NAME",
legal_entity_congressional="50",
awardee_or_recipient_legal=None,
awardee_or_recipient_uniqu="987654321",
)
mommy.make(
"awards.TransactionFPDS",
transaction_id=70,
legal_entity_country_code="USA",
legal_entity_state_code="SC",
legal_entity_county_code="01",
legal_entity_county_name="CHARLESTON",
legal_entity_congressional="10",
awardee_or_recipient_legal="MULTIPLE RECIPIENTS",
awardee_or_recipient_uniqu=None,
)
def_codes = list(
DisasterEmergencyFundCode.objects.filter(group_name="covid_19").order_by("code").values_list("code", flat=True)
)
mommy.make(
"download.DownloadJob",
job_status_id=1,
file_name="COVID-19_Profile_2021-09-20_H20M11S49647843.zip",
error_message=None,
json_request=json.dumps({"filters": {"def_codes": def_codes}}),
)
# Set latest_award for each award
update_awards()
def test_csv_download_success(client, monkeypatch, awards_and_transactions, elasticsearch_award_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_award_index)
resp = _post(client, def_codes=["L"])
resp_json = resp.json()
assert resp.status_code == status.HTTP_200_OK
assert re.match(r".*COVID-19_Profile_.*\.zip", resp_json["file_url"])
assert resp_json["download_request"]["file_format"] == "csv"
# "def_codes" intentionally out of order to test that the order doesn't matter
resp = _post(client, def_codes=["M", "N", "L"])
resp_json = resp.json()
assert resp.status_code == status.HTTP_200_OK
assert re.match(r".*COVID-19_Profile_2021-09-20_H20M11S49647843.zip", resp_json["file_url"])
resp = _post(client)
resp_json = resp.json()
assert resp.status_code == status.HTTP_200_OK
assert re.match(r".*COVID-19_Profile_2021-09-20_H20M11S49647843.zip", resp_json["file_url"])
def test_tsv_download_success(client, monkeypatch, awards_and_transactions, elasticsearch_award_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_award_index)
resp = _post(client, def_codes=["L"], file_format="tsv")
resp_json = resp.json()
assert resp.status_code == status.HTTP_200_OK
assert re.match(r".*COVID-19_Profile_.*\.zip", resp_json["file_url"])
assert resp_json["download_request"]["file_format"] == "tsv"
def test_pstxt_download_success(client, monkeypatch, awards_and_transactions, elasticsearch_award_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_award_index)
resp = _post(client, def_codes=["L"], file_format="pstxt")
resp_json = resp.json()
assert resp.status_code == status.HTTP_200_OK
assert re.match(r".*COVID-19_Profile_.*\.zip", resp_json["file_url"])
assert resp_json["download_request"]["file_format"] == "pstxt"
def test_download_failure_with_two_defc(client, monkeypatch, awards_and_transactions, elasticsearch_award_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_award_index)
resp = _post(client, def_codes=["L", "M"])
assert resp.status_code == status.HTTP_400_BAD_REQUEST
assert (
resp.json()["detail"]
== "The Disaster Download is currently limited to either all COVID-19 DEFC or a single COVID-19 DEFC."
)
|
climt/_components/dcmip/__init__.py | Ai33L/climt | 116 | 12647211 | <reponame>Ai33L/climt<gh_stars>100-1000
from .component import DcmipInitialConditions
__all__ = (DcmipInitialConditions)
|
tests/batching_test.py | gglin001/poptorch | 128 | 12647244 | <gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import torch
import pytest
import helpers
import poptorch
def test_inferenceBatching():
torch.manual_seed(42)
model = torch.nn.Linear(6, 20)
# Actually batched by 100.
input = torch.randn([10, 1, 5, 6])
# Run pytorch native on CPU batchsize 10.
native_output = model(input)
# Run on IPU batch size 1 * 10 popart batches.
opts = poptorch.Options().deviceIterations(10)
ipuModel = poptorch.inferenceModel(model, opts)
poptorch_out = ipuModel(input)
# Check that inference wrapper has defaulted to "All".
assert len(poptorch_out.size()) == 4
assert poptorch_out.size()[0] == 10
helpers.assert_allclose(expected=native_output, actual=poptorch_out)
def test_trainingBatching():
torch.manual_seed(4424242)
# 10 Batches of 10.
input = torch.randn(10, 10)
# 10 batches of 1
label = torch.randint(0, 10, [1])
label = label.expand([10])
model = torch.nn.Linear(10, 10)
# Run on IPU batch size 1 * 10 popart batches.
opts = poptorch.Options().deviceIterations(10)
poptorch_model = helpers.trainingModelWithLoss(
model, options=opts, loss=torch.nn.CrossEntropyLoss())
# Run all 10 batches as batchsize 10.
out = model(input)
# Sanity check we weren't already matching the label.
assert not torch.equal(torch.argmax(out, dim=1), label)
for _ in range(0, 1000):
_, loss = poptorch_model(input, label)
# Each batch should NOT report its own loss. As by default training model should have a "Final" anchor.
assert len(loss.size()) == 0
# Run with trained weights.
out = model(input)
# Check we are now equal with labels.
helpers.assert_allequal(actual=torch.argmax(out, dim=1), expected=label)
@pytest.mark.parametrize("anchor", list(poptorch.AnchorMode))
def test_inferenceAnchors(anchor):
torch.manual_seed(42)
model = torch.nn.Linear(6, 20)
# Actually batched by 100.
input = torch.randn([10, 1, 5, 6])
# Run pytorch native on CPU batchsize 10.
native_out = model(input)
# Run on IPU batch size 1 * 10 popart batches. anchor_return_period ignored if not EVERYN
opts = poptorch.Options().deviceIterations(10)
opts.anchorMode(anchor, anchor_return_period=5)
ipuModel = poptorch.inferenceModel(model, opts)
poptorch_out = ipuModel(input)
if anchor in [poptorch.AnchorMode.All, poptorch.AnchorMode.Default]:
# Expect the full batch.
assert len(poptorch_out.size()) == 4
assert poptorch_out.size()[0] == 10
helpers.assert_allclose(expected=native_out, actual=poptorch_out)
elif anchor == poptorch.AnchorMode.EveryN:
# Otherwise we are expecting device_iterations / N
assert len(poptorch_out.size()) == 4
assert poptorch_out.size()[0] == 2
# Check each N is the correct batch
helpers.assert_allclose(actual=poptorch_out[0], expected=native_out[4])
helpers.assert_allclose(actual=poptorch_out[1], expected=native_out[9])
else:
# Otherwise we are expecting just one element per batch.
assert len(poptorch_out.size()) == 4
assert poptorch_out.size()[0] == 1
if anchor == poptorch.AnchorMode.Final:
# Check we are the same as the last output.
helpers.assert_allclose(actual=poptorch_out.reshape(
native_out[-1].shape),
expected=native_out[-1])
elif anchor == poptorch.AnchorMode.Sum:
# Check we are close to the sum of the batch dim.
sum = torch.sum(native_out, dim=0, keepdim=True)
helpers.assert_allclose(actual=poptorch_out, expected=sum)
else:
assert False, "Unexpected anchor type %s" % anchor
@pytest.mark.parametrize("anchor", list(poptorch.AnchorMode))
def test_trainingAnchors(anchor):
torch.manual_seed(42)
# 1000 Batches of 10.
input = torch.randn(1000, 10)
# 1000 batches of 1
label = torch.randint(0, 10, [1])
label = label.expand([1000])
# The model
model = torch.nn.Linear(10, 10)
# Run pytorch native on CPU batchsize 10.
model(input)
# Run on IPU batch size 1 * 1000 popart batches.
opts = poptorch.Options().deviceIterations(1000)
opts.anchorMode(anchor, anchor_return_period=20)
poptorch_model = helpers.trainingModelWithLoss(
model, options=opts, loss=torch.nn.CrossEntropyLoss())
poptorch_out, loss = poptorch_model(input, label)
if anchor == poptorch.AnchorMode.All:
# Expect the full batch.
assert len(poptorch_out.size()) == 2
assert poptorch_out.size()[0] == 1000
assert len(loss.size()) == 1
assert loss.size()[0] == 1000
# Check the rolling average loss is downward sloped.
interval = 100
previous_average = torch.mean(loss[:interval])
for i in range(1, 1000 // interval):
start = interval * i
end = start + interval
new_average = torch.mean(loss[start:end])
assert new_average < previous_average
previous_average = new_average
elif anchor == poptorch.AnchorMode.EveryN:
# Otherwise we are expecting device_iterations / N
assert len(poptorch_out.size()) == 2
assert poptorch_out.size()[0] == 50
# There's too much noise in the losses for us to test directly without averaging like above so just test sizes.
assert len(loss.size()) == 1
assert loss.size()[0] == 50
else:
# Otherwise we are expecting just one element per batch.
assert len(poptorch_out.size()) == 2
assert poptorch_out.size()[0] == 1
assert len(loss.size()) == 0
if anchor in [poptorch.AnchorMode.Final, poptorch.AnchorMode.Default]:
# We just have to check the loss is small.
# This is just relative to the previously observed loss values on this test with this seed.
assert loss < 0.2
elif anchor == poptorch.AnchorMode.Sum:
# We just have to check that the loss is huge.
assert loss > 500.0
else:
assert False, "Unexpected anchor type %s" % anchor
def run_gradient_accumulation_test(input, target, gradient_accumulations,
accumulation_reduction_type, lr):
torch.manual_seed(42)
model = torch.nn.Linear(10, 10)
opts = poptorch.Options()
opts.anchorMode(poptorch.AnchorMode.All)
opts.Training.gradientAccumulation(gradient_accumulations)
if accumulation_reduction_type is not None:
opts.Training.accumulationAndReplicationReductionType(
accumulation_reduction_type)
poptorch_model = helpers.trainingModelWithLoss(
model,
loss=torch.nn.L1Loss(reduction="mean"),
options=opts,
optimizer=torch.optim.SGD(model.parameters(), lr=lr))
# Run 10 training steps
for _ in range(10):
poptorch_model(input, target)
# return trained weight matrix
return poptorch_model.weight.data
def test_gradient_accumulation():
torch.manual_seed(42)
target = torch.randn(4, 10)
input = torch.randn(4, 10)
# Testing gradient accumulations 1 vs 2 and Mean reduction
w_with_1 = run_gradient_accumulation_test(
target,
input,
1,
poptorch.ReductionType.Mean,
0.01,
)
w_with_2 = run_gradient_accumulation_test(
target,
input,
2,
poptorch.ReductionType.Mean,
0.01,
)
helpers.assert_allclose(actual=w_with_1, expected=w_with_2)
# Test the default matches as well (i.e. the default is mean)
w_with_2 = run_gradient_accumulation_test(target, input, 2, None, 0.01)
helpers.assert_allclose(actual=w_with_1, expected=w_with_2)
# Testing gradient accumulations 1 vs 2 and Sum reduction (different lr)
w_with_1 = run_gradient_accumulation_test(
target,
input,
1,
poptorch.ReductionType.Sum,
0.02,
)
w_with_2 = run_gradient_accumulation_test(
target,
input,
2,
poptorch.ReductionType.Sum,
0.01,
)
helpers.assert_allclose(actual=w_with_1, expected=w_with_2)
|
Src/Plugins/GLShaderEdit/scite/scripts/CheckMentioned.py | vinjn/glintercept | 468 | 12647252 | # CheckMentioned.py
# Find all the symbols in scintilla/include/Scintilla.h and check if they
# are mentioned in scintilla/doc/ScintillaDoc.html.
import string
srcRoot = "../.."
incFileName = srcRoot + "/scintilla/include/Scintilla.h"
docFileName = srcRoot + "/scintilla/doc/ScintillaDoc.html"
identCharacters = "_" + string.letters + string.digits
# Convert all punctuation characters except '_' into spaces.
def depunctuate(s):
d = ""
for ch in s:
if ch in identCharacters:
d = d + ch
else:
d = d + " "
return d
symbols = {}
incFile = open(incFileName, "rt")
for line in incFile.readlines():
if line.startswith("#define"):
identifier = line.split()[1]
symbols[identifier] = 0
incFile.close()
docFile = open(docFileName, "rt")
for line in docFile.readlines():
for word in depunctuate(line).split():
if word in symbols.keys():
symbols[word] = 1
docFile.close()
identifiersSorted = symbols.keys()
identifiersSorted.sort()
for identifier in identifiersSorted:
if not symbols[identifier]:
print identifier
|
rl_coach/saver.py | jl45621/coach | 1,960 | 12647272 | <reponame>jl45621/coach
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Module for abstract base class for checkpoint object and checkpoint collection
"""
from typing import Any, Dict, List
class Saver(object):
"""
ABC for saver objects that implement saving/restoring to/from path, and merging two savers.
"""
@property
def path(self):
"""
Relative path for save/load. If two saver objects return the same path, they must be merge-able.
"""
raise NotImplementedError
def save(self, sess: Any, save_path: str) -> List[str]:
"""
Save to save_path
:param sess: active session for session-based frameworks (e.g. TF)
:param save_path: full path to save checkpoint (typically directory plus self.path plus checkpoint count).
:return: list of all saved paths
"""
raise NotImplementedError
def restore(self, sess: Any, restore_path: str) -> None:
"""
Restore from restore_path
:param sess: active session for session-based frameworks (e.g. TF)
:param restore_path: full path to load checkpoint from.
"""
raise NotImplementedError
def merge(self, other: 'Saver') -> None:
"""
Merge other saver into this saver
:param other: saver to be merged into self
"""
raise NotImplementedError
class SaverCollection(object):
"""
Object for storing a collection of saver objects. It takes care of ensuring uniqueness of saver paths
and merging savers if they have the same path. For example, if a saver handles saving a generic key/value
file for all networks in a single file, it can use a more generic path and all savers of all networks would be
merged into a single saver that saves/restores parameters for all networks.
NOTE: If two savers have the same path, the respective saver class must support merging them
into a single saver that saves/restores all merged parameters.
"""
def __init__(self, saver: Saver = None):
"""
:param saver: optional initial saver for the collection
"""
self._saver_dict = dict() # type: Dict[str, Saver]
if saver is not None:
self._saver_dict[saver.path] = saver
def add(self, saver: Saver):
"""
Add a new saver to the collection. If saver.path is already in the collection, merge
the new saver with the existing saver.
:param saver: new saver to be added to collection
"""
if saver.path in self._saver_dict:
self._saver_dict[saver.path].merge(saver)
else:
self._saver_dict[saver.path] = saver
def update(self, other: 'SaverCollection'):
"""
Merge savers from other collection into self
:param other: saver collection to update self with.
"""
for c in other:
self.add(c)
def save(self, sess: Any, save_path: str) -> List[str]:
"""
Call save on all savers in the collection
:param sess: active session for session-based frameworks (e.g. TF)
:param save_path: path for saving checkpoints using savers. All saved file paths must
start with this path in their full path. For example if save_path is '/home/checkpoints/checkpoint-01',
then saved file paths can be '/home/checkpoints/checkpoint-01.main-network' but not
'/home/checkpoints/main-network'
:return: list of all saved paths
"""
paths = list()
for saver in self:
paths.extend(saver.save(sess, self._full_path(save_path, saver)))
return paths
def restore(self, sess: Any, restore_path: str) -> None:
"""
Call restore on all savers in the collection
:param sess: active session for session-based frameworks (e.g. TF)
:param restore_path: path for restoring checkpoint using savers.
"""
for saver in self:
saver.restore(sess, self._full_path(restore_path, saver))
def __iter__(self):
"""
Return an iterator for savers in the collection
:return: saver iterator
"""
return (v for v in self._saver_dict.values())
@staticmethod
def _full_path(path_prefix: str, saver: Saver) -> str:
"""
Concatenates path of the saver to parent prefix to create full save path
:param path_prefix: prefix of the path
:param saver: saver object to get unique path extension from
:return: full path
"""
if saver.path == "":
return path_prefix
return "{}.{}".format(path_prefix, saver.path)
|
examples/cifar_isonet/model.py | Be-Secure/pykale | 324 | 12647313 | <filename>examples/cifar_isonet/model.py
"""
Define the ISONet model for the CIFAR datasets.
"""
import kale.predict.isonet as isonet
def get_config(cfg):
"""
Sets the hypermeters (architecture) for ISONet using the config file
Args:
cfg: A YACS config object.
"""
config_params = {
"net_params": {
"use_dirac": cfg.ISON.DIRAC_INIT,
"use_dropout": cfg.ISON.DROPOUT,
"dropout_rate": cfg.ISON.DROPOUT_RATE,
"nc": cfg.DATASET.NUM_CLASSES,
"depths": cfg.ISON.DEPTH,
"has_bn": cfg.ISON.HAS_BN,
"use_srelu": cfg.ISON.SReLU,
"transfun": cfg.ISON.TRANS_FUN,
"has_st": cfg.ISON.HAS_ST,
}
}
return config_params
# Inherite and override
class CifarIsoNet(isonet.ISONet):
"""Constructs the ISONet for CIFAR datasets
Args:
cfg: A YACS config object.
"""
def __init__(self, net_params):
super(CifarIsoNet, self).__init__(net_params)
# define network structures (override)
self._construct(net_params)
# initialization
self._network_init(net_params["use_dirac"])
def _construct(self, net_params):
assert (
net_params["depths"] - 2
) % 6 == 0, "Model depth should be of the format 6n + 2 for cifar" # Seems because this is a ResNet
# Each stage has the same number of blocks for cifar
d = int((net_params["depths"] - 2) / 6)
# Stem: (N, 3, 32, 32) -> (N, 16, 32, 32)
self.stem = isonet.ResStem(w_in=3, w_out=16, net_params=net_params, kernelsize=3, stride=1, padding=1)
# Stage 1: (N, 16, 32, 32) -> (N, 16, 32, 32)
self.s1 = isonet.ResStage(w_in=16, w_out=16, stride=1, net_params=net_params, d=d)
# Stage 2: (N, 16, 32, 32) -> (N, 32, 16, 16)
self.s2 = isonet.ResStage(w_in=16, w_out=32, stride=2, net_params=net_params, d=d)
# Stage 3: (N, 32, 16, 16) -> (N, 64, 8, 8)
self.s3 = isonet.ResStage(w_in=32, w_out=64, stride=2, net_params=net_params, d=d)
# Head: (N, 64, 8, 8) -> (N, num_classes)
self.head = isonet.ResHead(w_in=64, net_params=net_params)
def get_model(cfg):
"""
Builds and returns an ISONet model for CIFAR datasets according to the config
object passed.
Args:
cfg: A YACS config object.
"""
config_params = get_config(cfg)
net_params = config_params["net_params"]
net = CifarIsoNet(net_params)
return net
|
scale/job/test/test_models.py | kaydoh/scale | 121 | 12647314 | from __future__ import unicode_literals
from __future__ import absolute_import
import copy
import datetime
import json
import time
import django
import django.utils.timezone as timezone
from django.test import TestCase, TransactionTestCase
import error.test.utils as error_test_utils
import job.test.utils as job_test_utils
import storage.test.utils as storage_test_utils
import trigger.test.utils as trigger_test_utils
from data.data.data import Data
from data.data.json.data_v6 import convert_data_to_v6_json
from error.models import Error
from job.configuration.data.exceptions import InvalidConnection
from job.configuration.data.job_data import JobData
from job.configuration.interface.job_interface import JobInterface
from job.configuration.results.job_results import JobResults
from job.seed.results.job_results import JobResults as SeedJobResults
from job.models import Job, JobExecution, JobExecutionOutput, JobInputFile, JobType, JobTypeRevision, JobTypeTag
from node.resources.json.resources import Resources
class TestJobManager(TransactionTestCase):
def test_process_job_input(self):
"""Tests calling JobManager.process_job_input()"""
date_1 = timezone.now()
min_src_started_job_1 = date_1 - datetime.timedelta(days=200)
max_src_ended_job_1 = date_1 + datetime.timedelta(days=200)
date_2 = date_1 + datetime.timedelta(minutes=30)
date_3 = date_1 + datetime.timedelta(minutes=40)
date_4 = date_1 + datetime.timedelta(minutes=50)
min_src_started_job_2 = date_1 - datetime.timedelta(days=500)
max_src_ended_job_2 = date_1 + datetime.timedelta(days=500)
s_class = 'A'
s_sensor = '1'
collection = '12345'
task = 'abcd'
workspace = storage_test_utils.create_workspace()
file_1 = storage_test_utils.create_file(workspace=workspace, file_size=10485760.0,
source_sensor_class=s_class, source_sensor=s_sensor,
source_collection=collection, source_task=task)
file_2 = storage_test_utils.create_file(workspace=workspace, file_size=104857600.0,
source_started=date_2, source_ended=date_3,
source_sensor_class = s_class, source_sensor = s_sensor,
source_collection = collection, source_task=task)
file_3 = storage_test_utils.create_file(workspace=workspace, file_size=987654321.0,
source_started=min_src_started_job_1, source_ended=date_4)
file_4 = storage_test_utils.create_file(workspace=workspace, file_size=46546.0,
source_ended=max_src_ended_job_1)
file_5 = storage_test_utils.create_file(workspace=workspace, file_size=83457.0, source_started=date_2)
file_6 = storage_test_utils.create_file(workspace=workspace, file_size=42126588636633.0, source_ended=date_4)
file_7 = storage_test_utils.create_file(workspace=workspace, file_size=76645464662354.0)
file_8 = storage_test_utils.create_file(workspace=workspace, file_size=4654.0,
source_started=min_src_started_job_2)
file_9 = storage_test_utils.create_file(workspace=workspace, file_size=545.0, source_started=date_3,
source_ended=max_src_ended_job_2)
file_10 = storage_test_utils.create_file(workspace=workspace, file_size=0.154, source_ended=date_4,
source_sensor_class=s_class, source_sensor=s_sensor,
source_collection=collection, source_task=task)
interface = {
'command': 'my_command',
'inputs': {
'files': [{
'name': 'Input 1',
'mediaTypes': ['text/plain'],
}, {
'name': 'Input 2',
'mediaTypes': ['text/plain'],
}]
},
'outputs': {
'files': [{
'name': 'Output 1',
'mediaType': 'image/png',
}]}}
job_type = job_test_utils.create_seed_job_type(interface=interface)
data_1 = {
'version': '1.0',
'input_data': [{
'name': 'Input 1',
'file_id': file_1.id
}, {
'name': 'Input 2',
'file_ids': [file_2.id, file_3.id, file_4.id, file_5.id]
}],
'output_data': [{
'name': 'Output 1',
'workspace_id': workspace.id
}]}
data_2 = {
'version': '1.0',
'input_data': [{
'name': 'Input 1',
'file_id': file_6.id
}, {
'name': 'Input 2',
'file_ids': [file_7.id, file_8.id, file_9.id, file_10.id]
}],
'output_data': [{
'name': 'Output 1',
'workspace_id': workspace.id
}]}
job_1 = job_test_utils.create_job(job_type=job_type, num_exes=0, status='PENDING', input_file_size=None,
input=data_1)
job_2 = job_test_utils.create_job(job_type=job_type, num_exes=0, status='PENDING', input_file_size=None,
input=data_2)
# Execute method
Job.objects.process_job_input(job_1)
Job.objects.process_job_input(job_2)
# Retrieve updated job models
jobs = Job.objects.filter(id__in=[job_1.id, job_2.id]).order_by('id')
job_1 = jobs[0]
job_2 = jobs[1]
# Check jobs for expected fields
self.assertEqual(job_1.input_file_size, 1053.0)
self.assertEqual(job_1.source_started, min_src_started_job_1)
self.assertEqual(job_1.source_ended, max_src_ended_job_1)
self.assertEqual(job_1.source_sensor_class, s_class)
self.assertEqual(job_1.source_sensor, s_sensor)
self.assertEqual(job_1.source_collection, collection)
self.assertEqual(job_1.source_task, task)
self.assertEqual(job_2.input_file_size, 113269857.0)
self.assertEqual(job_2.source_started, min_src_started_job_2)
self.assertEqual(job_2.source_ended, max_src_ended_job_2)
self.assertEqual(job_2.source_sensor_class, s_class)
self.assertEqual(job_2.source_sensor, s_sensor)
self.assertEqual(job_2.source_collection, collection)
self.assertEqual(job_2.source_task, task)
# Make sure job input file models are created
job_input_files = JobInputFile.objects.filter(job_id=job_1.id)
self.assertEqual(len(job_input_files), 5)
input_files_dict = {'Input 1': set(), 'Input 2': set()}
for job_input_file in job_input_files:
input_files_dict[job_input_file.job_input].add(job_input_file.input_file_id)
self.assertDictEqual(input_files_dict, {'Input 1': {file_1.id}, 'Input 2': {file_2.id, file_3.id, file_4.id,
file_5.id}})
job_input_files = JobInputFile.objects.filter(job_id=job_2.id)
self.assertEqual(len(job_input_files), 5)
input_files_dict = {'Input 1': set(), 'Input 2': set()}
for job_input_file in job_input_files:
input_files_dict[job_input_file.job_input].add(job_input_file.input_file_id)
self.assertDictEqual(input_files_dict, {'Input 1': {file_6.id}, 'Input 2': {file_7.id, file_8.id, file_9.id,
file_10.id}})
def test_process_job_output(self):
"""Tests calling JobManager.process_job_output()"""
output_1 = JobResults()
output_1.add_file_parameter('foo', 1)
output_2 = JobResults()
output_2.add_file_parameter('foo', 2)
# These jobs have completed and have their execution results
job_exe_1 = job_test_utils.create_job_exe(status='COMPLETED', output=output_1)
job_exe_2 = job_test_utils.create_job_exe(status='COMPLETED', output=output_2)
# These jobs have their execution results, but have not completed
job_exe_3 = job_test_utils.create_job_exe(status='RUNNING')
job_exe_4 = job_test_utils.create_job_exe(status='RUNNING')
for job_exe in [job_exe_3, job_exe_4]:
job_exe_output = JobExecutionOutput()
job_exe_output.job_exe_id = job_exe.id
job_exe_output.job_id = job_exe.job_id
job_exe_output.job_type_id = job_exe.job.job_type_id
job_exe_output.exe_num = job_exe.exe_num
job_exe_output.output = JobResults().get_dict()
job_exe_output.save()
# These jobs have completed, but do not have their execution results
job_exe_5 = job_test_utils.create_job_exe(status='RUNNING')
job_exe_6 = job_test_utils.create_job_exe(status='RUNNING')
for job in [job_exe_5.job, job_exe_6.job]:
job.status = 'COMPLETED'
job.save()
# Test method
job_ids = [job_exe.job_id for job_exe in [job_exe_1, job_exe_2, job_exe_3, job_exe_4, job_exe_5, job_exe_6]]
result_ids = Job.objects.process_job_output(job_ids, timezone.now())
self.assertEqual(set(result_ids), {job_exe_1.job_id, job_exe_2.job_id})
# Jobs 1 and 2 should have output populated, jobs 3 through 6 should not
jobs = list(Job.objects.filter(id__in=job_ids).order_by('id'))
self.assertEqual(len(jobs), 6)
self.assertTrue(jobs[0].has_output())
self.assertDictEqual(jobs[0].output, output_1.get_dict())
self.assertTrue(jobs[1].has_output())
self.assertDictEqual(jobs[1].output, output_2.get_dict())
self.assertFalse(jobs[2].has_output())
self.assertFalse(jobs[3].has_output())
self.assertFalse(jobs[4].has_output())
self.assertFalse(jobs[5].has_output())
def test_queue_job_timestamps(self):
"""Tests that job attributes are updated when a job is queued."""
data_dict = convert_data_to_v6_json(Data()).get_dict()
job = job_test_utils.create_job(num_exes=1, status='CANCELED', input=data_dict, started=timezone.now(),
ended=timezone.now())
Job.objects.update_jobs_to_queued([job], timezone.now(), requeue=True)
job = Job.objects.get(pk=job.id)
self.assertEqual(job.status, 'QUEUED')
self.assertIsNotNone(job.queued)
self.assertIsNone(job.started)
self.assertIsNone(job.ended)
def test_queue_superseded_jobs(self):
"""Tests that JobManager.update_jobs_to_queued() does not queue superseded jobs"""
job = job_test_utils.create_job(status='FAILED')
Job.objects.supersede_jobs([job.id], timezone.now())
job_ids = Job.objects.update_jobs_to_queued([job], timezone.now())
job = Job.objects.get(pk=job.id)
self.assertListEqual(job_ids, [])
self.assertEqual(job.status, 'FAILED')
self.assertTrue(job.is_superseded)
def test_superseded_job(self):
"""Tests creating a job that supersedes another job"""
old_job = job_test_utils.create_job()
event = trigger_test_utils.create_trigger_event()
new_job = Job.objects.create_job_v6(old_job.job_type_rev, event.id, superseded_job=old_job)
new_job.save()
when = timezone.now()
Job.objects.supersede_jobs([old_job.id], when)
new_job = Job.objects.get(pk=new_job.id)
self.assertEqual(new_job.status, 'PENDING')
self.assertFalse(new_job.is_superseded)
self.assertEqual(new_job.root_superseded_job_id, old_job.id)
self.assertEqual(new_job.superseded_job_id, old_job.id)
self.assertIsNone(new_job.superseded)
old_job = Job.objects.get(pk=old_job.id)
self.assertTrue(old_job.is_superseded)
self.assertEqual(old_job.superseded, when)
class TestJob(TestCase):
def setUp(self):
django.setup()
def test_get_seed_job_results(self):
"""Test retrieving job results from a Seed job type"""
job_type = job_test_utils.create_seed_job_type()
input = {
"version": "1.0",
"input_data": {},
"output_data": {}
}
job = job_test_utils.create_job(job_type, input=input)
self.assertIsInstance(job.get_job_results(), SeedJobResults)
class TestJobType(TransactionTestCase):
def setUp(self):
django.setup()
seed_interface_str = \
"""
{
"seedVersion": "1.0.0",
"job": {
"name": "test",
"jobVersion": "1.0.0",
"packageVersion": "1.0.0",
"title": "Test job to exercise Seed functionality",
"description": "Reads input file and ",
"tags": [
"testing",
"seed"
],
"maintainer": {
"name": "<NAME>",
"organization": "E-corp",
"email": "<EMAIL>",
"url": "http://www.example.com",
"phone": "666-555-4321"
},
"timeout": 3600,
"interface": {
"command": "${INPUT_TEXT} ${INPUT_FILES} ${READ_LENGTH}",
"inputs": {
"files": [
{
"name": "INPUT_TEXT",
"mediaTypes": [
"text/plain"
],
"partial": true
},
{
"name": "INPUT_FILES",
"multiple": true
}
],
"json": [
{
"name": "READ_LENGTH",
"type": "integer"
},
{
"name": "OUTPUT_COUNT",
"type": "integer"
}
]
},
"outputs": {
"files": [
{
"name": "OUTPUT_FILES",
"mediaType": "text/plain",
"multiple": true,
"pattern": "output_files*.txt"
},
{
"name": "OUTPUT_TEXT",
"mediaType": "text/plain",
"pattern": "output_text.txt"
}
],
"json": [
{
"name": "cell_count",
"key": "cellCount",
"type": "integer"
}
]
},
"mounts": [
{
"name": "MOUNT_PATH",
"path": "/the/container/path",
"mode": "ro"
}
],
"settings": [
{
"name": "DB_HOST",
"secret": false
},
{
"name": "DB_PASS",
"secret": true
}
]
},
"resources": {
"scalar": [
{ "name": "cpus", "value": 1.5 },
{ "name": "mem", "value": 244.0 },
{ "name": "sharedMem", "value": 1.0 },
{ "name": "disk", "value": 11.0, "inputMultiplier": 4.0 }
]
},
"errors": [
{
"code": 1,
"name": "data-issue",
"title": "Data Issue discovered",
"description": "There was a problem with input data",
"category": "data"
},
{
"code": 2,
"name": "missing-mount",
"title": "Missing mount",
"description": "Expected mount point not available at run time",
"category": "job"
},
{
"code": 3,
"name": "missing-setting",
"title": "Missing setting",
"description": "Expected setting not defined in environment variable",
"category": "job"
},
{
"code": 4,
"name": "missing-env",
"title": "Missing environment",
"description": "Expected environment not provided",
"category": "job"
}
]
}
}
"""
self.seed_job_type = job_test_utils.create_seed_job_type(manifest=json.loads(seed_interface_str))
def test_get_seed_cpu_resource_from_seed_interface(self):
job_type = self.seed_job_type
value = job_type.get_resources().get_json().get_dict()
self.assertEqual(1.5, value['resources']['cpus'])
def test_get_seed_mem_resource_from_seed_interface(self):
job_type = self.seed_job_type
value = job_type.get_resources().get_json().get_dict()
self.assertEqual(244.0, value['resources']['mem'])
def test_get_seed_sharedmem_resource_from_seed_interface(self):
job_type = self.seed_job_type
value = job_type.get_resources().get_json().get_dict()
self.assertEqual(1.0, value['resources']['sharedmem'])
def test_get_seed_disk_resource_from_seed_interface(self):
job_type = self.seed_job_type
value = job_type.get_resources().get_json().get_dict()
self.assertEqual(11.0, value['resources']['disk'])
def test_get_job_version_array(self):
job_type = self.seed_job_type
version = '1.0.0'
value = job_type.get_job_version_array(version)
self.assertEqual([1,0,0,None], value)
version = '1.0.0-0'
value = job_type.get_job_version_array(version)
self.assertEqual([1,0,0,0], value)
version = '1.0.0-alpha'
value = job_type.get_job_version_array(version)
self.assertEqual([1,0,0,97], value)
version = '1.0'
value = job_type.get_job_version_array(version)
self.assertEqual([0,0,0,0], value)
class TestJobTypeRevision(TransactionTestCase):
def setUp(self):
django.setup()
self.seed_job_type = job_test_utils.create_seed_job_type()
self.seed_job_type_rev = JobTypeRevision.objects.get_revision(self.seed_job_type.name,
self.seed_job_type.version,
self.seed_job_type.revision_num)
def test_revision_get_input_interface(self):
self.assertEqual(self.seed_job_type_rev.get_input_interface().parameters['INPUT_IMAGE'].PARAM_TYPE, 'file')
def test_revision_get_output_interface(self):
self.assertEqual(self.seed_job_type_rev.get_output_interface().parameters['OUTPUT_IMAGE'].PARAM_TYPE, 'file')
class TestJobTypeRunningStatus(TestCase):
def setUp(self):
django.setup()
manifest1 = job_test_utils.create_seed_manifest(name='type-1', jobVersion='1.0.0')
self.job_type_1 = job_test_utils.create_seed_job_type(manifest=manifest1)
manifest2 = job_test_utils.create_seed_manifest(name='type-2', jobVersion='2.0.0')
self.job_type_2 = job_test_utils.create_seed_job_type(manifest=manifest2)
manifest3 = job_test_utils.create_seed_manifest(name='type-1', jobVersion='2.0.0')
self.job_type_3 = job_test_utils.create_seed_job_type(manifest=manifest3)
self.entry_1_longest = datetime.datetime.utcfromtimestamp(500000).replace(tzinfo=timezone.utc)
self.entry_1_shortest = datetime.datetime.utcfromtimestamp(650000).replace(tzinfo=timezone.utc)
self.entry_2_longest = datetime.datetime.utcfromtimestamp(600000).replace(tzinfo=timezone.utc)
self.entry_2_shortest = datetime.datetime.utcfromtimestamp(750000).replace(tzinfo=timezone.utc)
self.entry_3_longest = datetime.datetime.utcfromtimestamp(700000).replace(tzinfo=timezone.utc)
self.entry_3_shortest = datetime.datetime.utcfromtimestamp(800000).replace(tzinfo=timezone.utc)
job_test_utils.create_job(job_type=self.job_type_1, status='RUNNING', last_status_change=self.entry_1_longest)
job_test_utils.create_job(job_type=self.job_type_1, status='RUNNING', last_status_change=self.entry_1_shortest)
job_test_utils.create_job(job_type=self.job_type_2, status='RUNNING', last_status_change=self.entry_2_shortest)
job_test_utils.create_job(job_type=self.job_type_2, status='RUNNING', last_status_change=self.entry_2_longest)
job_test_utils.create_job(job_type=self.job_type_2, status='RUNNING', last_status_change=self.entry_2_shortest)
job_test_utils.create_job(job_type=self.job_type_3, status='RUNNING', last_status_change=self.entry_3_shortest)
job_test_utils.create_job(job_type=self.job_type_3, status='RUNNING', last_status_change=self.entry_3_longest)
job_test_utils.create_job(job_type=self.job_type_3, status='RUNNING', last_status_change=self.entry_3_longest)
job_test_utils.create_job(job_type=self.job_type_3, status='RUNNING', last_status_change=self.entry_3_shortest)
def test_successful(self):
"""Tests calling the get_running_job_status method on JobExecutionManager."""
status = JobType.objects.get_running_status()
self.assertEqual(len(status), 3)
# Check entry 1
self.assertEqual(status[0].job_type.id, self.job_type_1.id)
self.assertEqual(status[0].job_type.name, 'type-1')
self.assertEqual(status[0].job_type.version, '1.0.0')
self.assertEqual(status[0].count, 2)
self.assertEqual(status[0].longest_running, self.entry_1_longest)
# Check entry 2
self.assertEqual(status[1].job_type.id, self.job_type_2.id)
self.assertEqual(status[1].job_type.name, 'type-2')
self.assertEqual(status[1].job_type.version, '2.0.0')
self.assertEqual(status[1].count, 3)
self.assertEqual(status[1].longest_running, self.entry_2_longest)
# Check entry 3
self.assertEqual(status[2].job_type.id, self.job_type_3.id)
self.assertEqual(status[2].job_type.name, 'type-1')
self.assertEqual(status[2].job_type.version, '2.0.0')
self.assertEqual(status[2].count, 4)
self.assertEqual(status[2].longest_running, self.entry_3_longest)
class TestJobTypeFailedStatus(TestCase):
def setUp(self):
django.setup()
self.job_type_1 = job_test_utils.create_seed_job_type(job_version='1.0')
self.job_type_2 = job_test_utils.create_seed_job_type(job_version='2.0')
self.job_type_3 = job_test_utils.create_seed_job_type(job_version='2.0')
self.error_1 = Error.objects.create(name='Error 1', description='Test', category='SYSTEM')
self.error_2 = Error.objects.create(name='Error 2', description='Test', category='SYSTEM')
self.error_3 = Error.objects.create(name='Error 3', description='Test', category='DATA')
# Date stamps for errors
self.entry_1_last_time = datetime.datetime.utcfromtimestamp(590000).replace(tzinfo=timezone.utc)
self.entry_1_first_time = datetime.datetime.utcfromtimestamp(580000).replace(tzinfo=timezone.utc)
self.entry_2_time = datetime.datetime.utcfromtimestamp(585000).replace(tzinfo=timezone.utc)
self.entry_3_last_time = datetime.datetime.utcfromtimestamp(490000).replace(tzinfo=timezone.utc)
self.entry_3_mid_time = datetime.datetime.utcfromtimestamp(480000).replace(tzinfo=timezone.utc)
self.entry_3_first_time = datetime.datetime.utcfromtimestamp(470000).replace(tzinfo=timezone.utc)
self.entry_4_time = datetime.datetime.utcfromtimestamp(385000).replace(tzinfo=timezone.utc)
# Create jobs
job_test_utils.create_job(job_type=self.job_type_1, status='RUNNING', last_status_change=timezone.now())
job_test_utils.create_job(job_type=self.job_type_1, error=self.error_1, status='FAILED',
last_status_change=self.entry_2_time)
job_test_utils.create_job(job_type=self.job_type_2, error=self.error_1, status='FAILED',
last_status_change=self.entry_4_time)
job_test_utils.create_job(job_type=self.job_type_2, error=self.error_2, status='FAILED',
last_status_change=self.entry_1_last_time)
job_test_utils.create_job(job_type=self.job_type_2, error=self.error_2, status='FAILED',
last_status_change=self.entry_1_first_time)
job_test_utils.create_job(job_type=self.job_type_3, error=self.error_2, status='FAILED',
last_status_change=self.entry_3_mid_time)
job_test_utils.create_job(job_type=self.job_type_3, error=self.error_2, status='FAILED',
last_status_change=self.entry_3_last_time)
job_test_utils.create_job(job_type=self.job_type_3, error=self.error_2, status='FAILED',
last_status_change=self.entry_3_first_time)
job_test_utils.create_job(job_type=self.job_type_3, error=self.error_3, status='FAILED',
last_status_change=timezone.now())
def test_successful(self):
"""Tests calling the get_failed_jobs_with_system_errors method on JobManager."""
status = JobType.objects.get_failed_status()
self.assertEqual(len(status), 4)
# Check entry 1
self.assertEqual(status[0].job_type.id, self.job_type_2.id)
self.assertEqual(status[0].job_type.version, '2.0')
self.assertEqual(status[0].error.name, 'Error 2')
self.assertEqual(status[0].count, 2)
self.assertEqual(status[0].first_error, self.entry_1_first_time)
self.assertEqual(status[0].last_error, self.entry_1_last_time)
# Check entry 2
self.assertEqual(status[1].job_type.id, self.job_type_1.id)
self.assertEqual(status[1].job_type.version, '1.0')
self.assertEqual(status[1].error.name, 'Error 1')
self.assertEqual(status[1].count, 1)
self.assertEqual(status[1].first_error, self.entry_2_time)
self.assertEqual(status[1].last_error, self.entry_2_time)
# Check entry 3
self.assertEqual(status[2].job_type.id, self.job_type_3.id)
self.assertEqual(status[2].job_type.version, '2.0')
self.assertEqual(status[2].error.name, 'Error 2')
self.assertEqual(status[2].count, 3)
self.assertEqual(status[2].first_error, self.entry_3_first_time)
self.assertEqual(status[2].last_error, self.entry_3_last_time)
# Check entry 4
self.assertEqual(status[3].job_type.id, self.job_type_2.id)
self.assertEqual(status[3].job_type.version, '2.0')
self.assertEqual(status[3].error.name, 'Error 1')
self.assertEqual(status[3].count, 1)
self.assertEqual(status[3].first_error, self.entry_4_time)
self.assertEqual(status[3].last_error, self.entry_4_time)
class TestJobTypeTagManager(TransactionTestCase):
def setUp(self):
django.setup()
self.job_type1 = job_test_utils.create_seed_job_type()
self.tag_set1 = ["tag1", "tag2", "oneandfour"]
self.job_type2 = job_test_utils.create_seed_job_type()
self.tag_set2 = ["tag3", "tag4"]
self.job_type3 = job_test_utils.create_seed_job_type()
self.tag_set3 = ["tag5", "tag6"]
self.job_type4 = job_test_utils.create_seed_job_type()
self.tag_set4 = ["tag7", "tag8", "oneandfour"]
JobTypeTag.objects.create_job_type_tags(self.job_type1, self.tag_set1)
JobTypeTag.objects.create_job_type_tags(self.job_type3, self.tag_set3)
JobTypeTag.objects.create_job_type_tags(self.job_type4, self.tag_set4)
def test_create_job_type_tags(self):
"""Tests calling JobTypeManager.create_job_type_tags()"""
result = JobTypeTag.objects.create_job_type_tags(self.job_type2, self.tag_set2)
self.assertEqual(len(result), 2)
def test_clear_job_type_tags(self):
"""Tests calling JobTypeManager.clear_job_type_tags()"""
tags = [jt_tag.tag for jt_tag in JobTypeTag.objects.filter(job_type_id=self.job_type3.id)]
self.assertListEqual(tags, self.tag_set3)
JobTypeTag.objects.clear_job_type_tags(self.job_type3.id)
tags = [jt_tag.tag for jt_tag in JobTypeTag.objects.filter(job_type_id=self.job_type3.id)]
self.assertEqual(len(tags), 0)
|
azure-cs-functions/functions/Hello/__init__.py | jandom/examples | 1,628 | 12647316 | <gh_stars>1000+
# Copyright 2016-2021, Pulumi Corporation. All rights reserved.
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
body = 'Hello there {}'.format(req.params.get('name'))
return func.HttpResponse(
body,
status_code=200)
|
paas-ce/paas/paas/saas/urls.py | renmcc/bk-PaaS | 767 | 12647341 | <filename>paas-ce/paas/paas/saas/urls.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from __future__ import unicode_literals
from django.conf.urls import include, url
from common.constants import SAAS_CODE_REGEX
from saas import views
urlpatterns = [
# 应用列表
url(r'^list/', include([
url(r'^$', views.SaaSListPageView.as_view(), name="saas_list"),
url(r'^query/$', views.SaaSListView.as_view()),
])),
# 应用基本信息
url(r'^(?P<app_code>' + SAAS_CODE_REGEX + ')/', include([
url(r'^info/$', views.InfoView.as_view()),
# FIXME: change to restful-like api if more action on saas
# 删除SaaS应用
url(r'^delete/$', views.DeleteSaaSView.as_view()),
url(r'^logo/$', views.ModifyAppLogoView.as_view()),
# 上传SaaS应用
url(r'^upload/$', views.UploadView.as_view()),
# 发布相关
# 发布部署页面
url(r'^release/', include([
url(r'^$', views.ReleasePageView.as_view()),
# 发布记录页面
url(r'^record/$', views.RecordView.as_view()),
# 下架页面
url(r'^offline/$', views.OfflinePageView.as_view()),
# 执行发布
url(r'^online/(?P<saas_app_version_id>\d+)/$', views.OnlineView.as_view()),
])),
])),
url(r'^0/release/$', views.ReleasePageView.as_view(), {'app_code': 0}),
# for legency system, keep below
# saas/release/online,
# saas/upload,
url(r'^release/online/(?P<saas_app_version_id>\d+)/$', views.OnlineView.as_view()),
url(r'^upload/(?P<app_code>' + SAAS_CODE_REGEX + ')/$', views.UploadView.as_view()),
]
|
firefly/models/consts.py | matrixorz/firefly | 247 | 12647354 | <reponame>matrixorz/firefly
# coding=utf-8
KEYBOARD_URL_MAPS = {
'default': [
[
'Site wide shortcuts', # keyboard category
[
# ('keyboard shortcut', 'keyboard info')
('s', 'Focus search bar'),
('g n', 'Go to Notifications'),
('g h', 'Go to personal page'),
('?', 'Bring up this help dialog'),
],
],
[
'Registration and login',
[
('l r', 'Open register window'),
('l o', 'Open login window'),
('l t', 'Logout'),
('l c', 'Close register/login window'),
],
],
[
'Notifications',
[
('e / I / y', 'Mark as read'),
],
],
[
'Personal page',
[
('g s', 'Go to personal settings page'),
('g t', 'Go to personal topic page'),
]
]
],
'/': [
[
'Topic list shortcuts',
[
('j', 'Move selection down'),
('k', 'Move selection up'),
('o', 'Open selection'),
],
],
[
'Create Topic',
[
('t o', 'Open create topic window'),
('t q', 'Close create topic window'),
('t s', 'Submit create topic'),
],
]
],
'/post': [
[
'Reply Topic',
[
('p o', 'Open reply topic window'),
('p q', 'Close reply topic window'),
('p s', 'Submit reply topic'),
],
],
]
}
# http://clrs.cc/
CATEGORY_COLORS = (
'#001f3f', # Navy
'#0074D9', # Blue
'#7FDBFF', # Aqua
'#39CCCC', # Teal
'#3D9970', # Olive
'#2ECC40', # Green
'#01FF70', # Lime
'#FFDC00', # Yellow
'#FF851B', # Orange
'#FF4136', # Red
'#85144b', # Maroon
'#F012BE', # Fuchsia
'#b10dc9', # Purple
'#111111', # black
'#aaaaaa', # Gray
'#dddddd', # Silver
)
|
tests/comparison/leopard/report.py | suifengzhuliu/impala | 1,523 | 12647379 | <reponame>suifengzhuliu/impala
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pickle
import re
import os
import job
from collections import defaultdict
class Report(object):
'''Contains information about a completed job, such as the number of crashes and stack
traces from every crash. The report is usually displayed on a web page.
'''
def __init__(self, job_id):
self.num_queries = 0
self.run_time = 0
self.run_date = 0
self.job_name = ''
self.num_crashes = 0
self.num_row_count_mismatch = 0
self.num_mismatch = 0
self.job_id = job_id
self.git_hash = ''
self.grouped_results = None
self.parent_job_name = ''
self.num_queries_returned_correct_data = 0
self.get_results()
@property
def run_time_str(self):
'''Return the running time of the job as a string in human readable format.'''
m, s = divmod(self.run_time, 60)
h, m = divmod(m, 60)
return '{0:02d}:{1:02d}:{2:02d}'.format(int(h), int(m), int(s))
def classify_error(self, error):
d = {
ur'LINE \d+:': 'Postgres_error',
ur'Permission denied': 'permission_denied',
ur'^AnalysisException': 'AnalysisException',
ur'^Column \d+ in row \d+ does not match': 'mismatch',
ur'^Could not connect': 'could_not_connect',
ur'^IllegalStateException': 'IllegalStateException',
ur'^Invalid query handle: ': 'invalid_query_handle',
ur'^Known issue:': 'known_issue',
ur'^Operation is in ERROR_STATE': 'error_state',
ur'^Query timed out after \d+ seconds': 'timeout',
ur'^Row counts do not match': 'row_counts',
ur'^Too much data': 'too_much_data',
ur'^Unknown expr node type: \d+': 'unkown_node',
ur'^Year is out of valid range': 'year_range',
ur'^[A-Za-z]+ out of range': 'out_of_range',
ur'^division by zero': 'division_by_zero'}
for r in d:
if re.search(r, error):
return d[r]
return 'unrecognized'
def group_queries(self, all_queries, group_func):
'''General function that returns a dictionary with keys that are generated by
group_func. all_queries is a list of queries.
group_func should take query as a parameter and return a string containing an
interesting property of the query which will be used as key in the dictionary.
'''
grouped_queries = defaultdict(list)
for query in all_queries:
grouped_queries[group_func(query)].append(query)
return grouped_queries
def __str__(self):
'''TODO: Render report as text.
'''
return ''
def get_first_impala_frame(self, query_result):
'''Extracts the first impala frame in the stack trace.
'''
stack = query_result['formatted_stack']
if stack:
for line in stack.split('\n'):
match = re.search(ur'(impala::.*) \(', line)
if match:
return match.group(1)
else:
return None
def _format_stack(self, stack):
'''Cleans up the stack trace.
'''
def clean_frame(frame):
#remove memory address from each frame
reg = re.match(ur'#\d+ *0x[0123456789abcdef]* in (.*)', frame)
if reg: return reg.group(1)
# this is for matching lines like "#7 SLL_Next (this=0x9046780, src=0x90467c8...
reg = re.match(ur'#\d+ *(\S.*)', frame)
if reg: return reg.group(1)
return frame
def stack_gen():
'''Generator that yields impala stack trace lines line by line.
'''
if stack:
active = False
for line in stack.split('\n'):
if active or line.startswith('#0'):
active = True
yield line
return '\n'.join(clean_frame(l) for l in stack_gen())
def get_results(self):
'''Analyses the completed job and extracts important results into self. This method
should be called as soon as the object is created.
'''
from controller import PATH_TO_FINISHED_JOBS
def group_outer_func(query):
if 'stack' in query:
return 'stack'
return self.classify_error(query['error'])
def stack_group_func(query):
return self.get_first_impala_frame(query['stack'])
with open(os.path.join(PATH_TO_FINISHED_JOBS, self.job_id)) as f:
job = pickle.load(f)
self.grouped_results = self.group_queries(job.result_list, group_outer_func)
# Format the stack for queries that have a stack
for query in self.grouped_results['stack']:
query['formatted_stack'] = self._format_stack(query['stack'])
self.num_crashes = len(self.grouped_results['stack'])
self.num_row_count_mismatch = len(self.grouped_results['row_counts'])
self.num_mismatch = len(self.grouped_results['mismatch'])
self.grouped_stacks = self.group_queries(
self.grouped_results['stack'], self.get_first_impala_frame)
self.run_time = job.stop_time - job.start_time
self.run_date = job.start_time
self.job_name = job.job_name
self.git_hash = job.git_hash
self.num_queries_executed = job.num_queries_executed
self.num_queries_returned_correct_data = job.num_queries_returned_correct_data
if job.parent_job:
with open(os.path.join(PATH_TO_FINISHED_JOBS, job.parent_job)) as f:
parent_job = pickle.load(f)
self.parent_job_name = parent_job.job_name
def save_pickle(self):
from controller import PATH_TO_REPORTS
with open(os.path.join(PATH_TO_REPORTS, self.job_id), 'w') as f:
pickle.dump(self, f)
|
venv/Lib/site-packages/altair/examples/density_stack.py | ajayiagbebaku/NFL-Model | 6,831 | 12647385 | <gh_stars>1000+
"""
Stacked Density Estimates
-------------------------
To plot a stacked graph of estimates, use a shared ``extent`` and a fixed
number of subdivision ``steps`` to ensure that the points for each area align
well. Density estimates of measurements for each iris flower feature are plot
in a stacked method. In addition, setting ``counts`` to true multiplies the
densities by the number of data points in each group, preserving proportional
differences.
"""
# category: area charts
import altair as alt
from vega_datasets import data
source = data.iris()
alt.Chart(source).transform_fold(
['petalWidth',
'petalLength',
'sepalWidth',
'sepalLength'],
as_ = ['Measurement_type', 'value']
).transform_density(
density='value',
bandwidth=0.3,
groupby=['Measurement_type'],
extent= [0, 8],
counts = True,
steps=200
).mark_area().encode(
alt.X('value:Q'),
alt.Y('density:Q', stack='zero'),
alt.Color('Measurement_type:N')
).properties(width=400, height=100)
|
test_with_saved_features.py | Aamer98/cdfsl-benchmark | 184 | 12647391 | <gh_stars>100-1000
import torch
import numpy as np
from torch.autograd import Variable
import torch.nn as nn
import torch.optim
import json
import torch.utils.data.sampler
import os
import glob
import random
import time
import configs
import backbone
import data.feature_loader as feat_loader
from data.datamgr import SetDataManager
from methods.baselinetrain import BaselineTrain
from methods.baselinefinetune import BaselineFinetune
from methods.protonet import ProtoNet
from io_utils import model_dict, parse_args, get_resume_file, get_best_file , get_assigned_file
from datasets import ISIC_few_shot, EuroSAT_few_shot, CropDisease_few_shot, Chest_few_shot
def feature_evaluation(cl_data_file, model, n_way = 5, n_support = 5, n_query = 15, adaptation = False):
class_list = cl_data_file.keys()
select_class = random.sample(class_list, n_way)
z_all = []
for cl in select_class:
img_feat = cl_data_file[cl]
perm_ids = np.random.permutation(len(img_feat)).tolist()
z_all.append( [ np.squeeze( img_feat[perm_ids[i]]) for i in range(n_support+n_query) ] ) # stack each batch
z_all = torch.from_numpy(np.array(z_all) )
model.n_query = n_query
if adaptation:
scores = model.set_forward_adaptation(z_all, is_feature = True)
else:
scores = model.set_forward(z_all, is_feature = True)
pred = scores.data.cpu().numpy().argmax(axis = 1)
y = np.repeat(range( n_way ), n_query )
acc = np.mean(pred == y)*100
return acc
if __name__ == '__main__':
params = parse_args('test')
acc_all = []
iter_num = 600
few_shot_params = dict(n_way = params.test_n_way , n_support = params.n_shot)
if params.method == 'baseline':
model = BaselineFinetune( model_dict[params.model], **few_shot_params )
elif params.method == 'protonet':
model = ProtoNet( model_dict[params.model], **few_shot_params )
else:
raise ValueError('Unknown method')
model = model.cuda()
checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, 'miniImageNet', params.model, params.method)
if params.train_aug:
checkpoint_dir += '_aug'
if not params.method in ['baseline'] :
checkpoint_dir += '_%dway_%dshot' %( params.train_n_way, params.n_shot)
if not params.method in ['baseline'] :
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir,params.save_iter)
else:
modelfile = get_best_file(checkpoint_dir)
if modelfile is not None:
tmp = torch.load(modelfile)
model.load_state_dict(tmp['state'])
#params.save_iter = 399
if params.save_iter != -1:
novel_file = os.path.join( checkpoint_dir.replace("checkpoints","features"), params.dataset + "_" + str(params.save_iter)+".hdf5") #defaut split = novel, but you can also test base or val classes
else:
novel_file = os.path.join( checkpoint_dir.replace("checkpoints","features"), params.dataset + ".hdf5") #defaut split = novel, but you can also test base or val classes
cl_data_file = feat_loader.init_loader(novel_file)
for i in range(iter_num):
print (i)
acc = feature_evaluation(cl_data_file, model, n_query = 15, adaptation = params.adaptation, **few_shot_params)
print (acc)
acc_all.append(acc)
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
|
Python3/488.py | rakhi2001/ecom7 | 854 | 12647393 | __________________________________________________________________________________________________
sample 28 ms submission
class Solution:
def findMinStep(self, board: str, hand: str) -> int:
from collections import Counter
balls_count = Counter(hand)
return self.dfs(board, balls_count)
def dfs(self, board, balls_count):
if not board:
return 0
answer = float('inf')
i = 0
while i < len(board):
j = i + 1
while j < len(board) and board[j] == board[i]:
j += 1
gap = 3 - (j - i)
if balls_count[board[i]] >= gap:
if (j - i) > 3:
gap = 0
balls_count[board[i]] -= gap
a = self.dfs(board[:i] + board[j:], balls_count)
if a >= 0:
answer = min(answer, a + gap)
balls_count[board[i]] += gap
i = j
return answer if answer != float('inf') else -1
__________________________________________________________________________________________________
sample 32 ms submission
class Solution:
def findMinStep(self, board: str, hand: str) -> int:
if not board or len(board) == 0:
return -1
hand_map = {}
for b in hand:
hand_map[b] = hand_map.get(b, 0) + 1
min_res = [len(hand) + 1]
self.dfs(board, hand_map, 0, min_res)
return min_res[0] if min_res[0] != len(hand) + 1 else -1
def dfs(self, board, hand_map, used, min_res):
l = len(board)
if l == 0:
if min_res[0] > used:
min_res[0] = used
return
if len(hand_map) == 0:
return
for i in range(l):
ch = board[i]
if ch not in hand_map:
continue
count = hand_map[ch]
if i < l-1 and board[i+1] == ch:
new_count = count - 1
if new_count == 0:
del hand_map[ch]
else:
hand_map[ch] = new_count
new_board = self.create_board(board, i-1, i+2)
self.dfs(new_board, hand_map, used+1, min_res)
hand_map[ch] = count
elif count >= 2:
new_count = count - 2
if new_count == 0:
del hand_map[ch]
else:
hand_map[ch] = new_count
new_board = self.create_board(board, i-1, i+1)
self.dfs(new_board, hand_map, used+2, min_res)
hand_map[ch] = count
def create_board(self, board, left, right):
l = len(board)
while left >= 0 and right < l:
ch = board[left]
count = 0
i, j = left, right
while i >= 0 and board[i] == ch:
i -= 1
count += 1
while j < l and board[j] == ch:
j += 1
count += 1
if count < 3:
break
else:
left, right = i, j
return board[:left+1] + board[right:]
__________________________________________________________________________________________________
|
backend/www/admin/formatters.py | sleepingAnt/viewfinder | 645 | 12647409 | # Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Customization for display of database tables.
Default: default display of a table row
*: customized versions by table
"""
__author__ = '<EMAIL> (<NAME>)'
import logging
import pprint
import time
from tornado.escape import url_escape, xhtml_escape
from viewfinder.backend.base.util import ConvertToString
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.db_client import DBKey
from viewfinder.backend.db.follower import Follower
from viewfinder.backend.db.schema import UnpackLocation, UnpackPlacemark
from viewfinder.backend.db.episode import Episode
from viewfinder.backend.db.photo import Photo
from viewfinder.backend.db.viewpoint import Viewpoint
class FmtDefault(object):
def __init__(self, table):
self._table = table
def FormatItemAttributes(self, item):
"""Returns an array of item attributes, one per column in the
table definition, formatted for display in HTML table.
"""
attributes = self._FormatAllAttributes(item)
rows = [pretty for _, _, _, pretty in attributes]
return rows
def FormatItemAttributesForView(self, item):
"""Return an array of rows. Each row consists of "column name",
"key", "value".
"""
attributes = self._FormatAllAttributes(item)
rows = [(name, key, pretty) for name, key, _, pretty in attributes]
rows.extend(self._GetExtraViewFields(item))
return rows
def _GetExtraViewFields(self, item):
"""Class used to append new fields in per-object view. Nothing by default.
Must be a list of (name, key, pretty)."""
return []
@staticmethod
def _Escape(val):
# Need to cast to string for int-valued columns (eg: user_id).
return url_escape(ConvertToString(val))
@staticmethod
def _XEscape(val):
# Need to cast to string for int-valued columns (eg: user_id).
return xhtml_escape(ConvertToString(val))
@staticmethod
def _HashQueryLink(table, key, name=None):
return '<a href="/admin/db?table=%s&type=query&hash_key=%s">%s</a>' % \
(FmtDefault._Escape(table), FmtDefault._Escape(key), FmtDefault._XEscape(name if name is not None else key))
@staticmethod
def _SortQueryLink(table, hash_key, sort_key, name=None):
"""Builds a query link for a hash_key and sort_key. Sort key operator is 'EQ'."""
return '<a href="/admin/db?table=%s&type=query&hash_key=%s&sort_key=%s&sort_desc=EQ">%s</a>' % \
(FmtDefault._Escape(table), FmtDefault._Escape(hash_key), FmtDefault._Escape(sort_key),
FmtDefault._XEscape(name if name is not None else '%s:%s' % (hash_key, sort_key)))
@staticmethod
def _EpisodeLink(vp, name=None):
return FmtDefault._HashQueryLink('Episode', vp, name)
@staticmethod
def _PhotoLink(vp, name=None):
return FmtDefault._HashQueryLink('Photo', vp, name)
@staticmethod
def _UserLink(vp, name=None):
return FmtDefault._HashQueryLink('User', vp, name)
@staticmethod
def _ViewpointLink(vp, name=None):
return FmtDefault._HashQueryLink('Viewpoint', vp, name)
def _FormatAllAttributes(self, item):
"""Build list of (column, key, value, pretty_value). We need a list to keep the columns ordered."""
attrs = []
for name in self._table.GetColumnNames():
c = self._table.GetColumn(name)
value = item.get(c.key, None)
pretty = self._FormatAttribute(name, value) if value is not None else '-'
attrs.append((name, c.key, value, pretty))
return attrs
def _FormatAttribute(self, name, value):
"""Returns the attribute value; If none, returns '-'. Formats by
default the following fields: 'viewpoint_id', 'episode_id',
'photo_id', 'timestamp', 'Location', 'Placemark'.
"""
if name == 'viewpoint_id' or name == 'private_vp_id':
did, (vid, sid) = Viewpoint.DeconstructViewpointId(value)
pretty = '%s/%d/%d' % (value, did, vid)
return FmtDefault._ViewpointLink(value, pretty)
elif name == 'user_id' or name == 'sender_id':
return self._UserLink(value)
elif name == 'episode_id' or name == 'parent_ep_id':
ts, did, (eid, sid) = Episode.DeconstructEpisodeId(value)
pretty = '%s/%d/%d' % (value, did, eid)
return self._EpisodeLink(value, pretty)
elif name == 'photo_id' or name == 'parent_id':
ts, did, (pid, sid) = Photo.DeconstructPhotoId(value)
pretty = '%s/%d/%d' % (value, did, pid)
return self._PhotoLink(value, pretty)
elif name == 'timestamp' or name == 'last_updated' or name == 'expires' or name == 'last_fetch':
return self._FormatTimestamp(value)
elif name == 'location':
return self._XEscape(', '.join(['%s: %s' % (k, v) for k, v in UnpackLocation(value)._asdict().items()]))
elif name == 'placemark':
return self._XEscape(', '.join(['%s: %s' % (k, v) for k, v in UnpackPlacemark(value)._asdict().items()]))
else:
return self._XEscape('%s' % value)
def _FormatTimestamp(self, timestamp):
"""Formats a timestamp (in UTC) via default format."""
return self._XEscape(time.asctime(time.gmtime(timestamp)))
def _GetQueryURL(self, table, hash_key):
"""Returns a URL to display a DB query of the table using
hash key 'hash_key'.
"""
return '/admin/db?table=%s&type=query&hash_key=%s' % (self._Escape(table), self._Escape(repr(hash_key)))
class FmtAccounting(FmtDefault):
_names = { 'vs': 'viewpoint_size', 'us': 'user_size',
'ow': 'owned_by', 'sb': 'shared_by', 'vt': 'visible_to' }
def _FormatAttribute(self, name, value):
if name == 'hash_key':
split = value.split(':')
prefix = split[0]
prefix_name = self._names[prefix]
if prefix == 'vs':
return '%s:%s' % (self._XEscape(prefix_name), self._ViewpointLink(split[1]))
elif prefix == 'us':
return '%s:%s' % (self._XEscape(prefix_name), self._UserLink(split[1]))
elif name == 'sort_key':
split = value.split(':')
prefix = split[0]
prefix_name = self._names[prefix]
if len(split) == 1:
return prefix_name
elif prefix == 'ow' or prefix == 'sb':
return '%s:%s' % (self._XEscape(prefix_name), self._UserLink(split[1]))
return FmtDefault._FormatAttribute(self, name, value)
class FmtEpisode(FmtDefault):
def _GetExtraViewFields(self, item):
ep_id = item.get('ei')
extras = []
extras.append(self._HashQueryLink('Index', 'ev:pa:%s' % ep_id, 'Children'))
extras.append(self._HashQueryLink('Post', ep_id, 'Posts'))
return [('Extras', '', ' · '.join(extras))]
class FmtIdentity(FmtDefault):
def _GetExtraViewFields(self, item):
id_id = item.get('ke')
extras = []
extras.append(self._HashQueryLink('Index', 'co:id:%s' % id_id, 'In-contacts'))
return [('Extras', '', ' · '.join(extras))]
class FmtIndex(FmtDefault):
def _FormatAllAttributes(self, item):
"""Build list of (column, key, value, pretty_value). We need a list to keep the columns ordered.
The interpretation of the 'key' column depends on the beginning of the 'term' column."""
attrs = []
term = item.get('t', None)
key = item.get('k', None)
data = item.get('d', None)
split = term.split(':')
table = split[0]
key_pretty = key
if table == 'co':
db_key = Contact._ParseIndexKey(key)
key_pretty = self._SortQueryLink('Contact', db_key.hash_key, db_key.range_key)
elif table == 'ev':
key_pretty = self._EpisodeLink(key)
elif table == 'fo':
db_key = Follower._ParseIndexKey(key)
key_pretty = self._SortQueryLink('Follower', db_key.hash_key, db_key.range_key)
elif table == 'id':
key_pretty = self._HashQueryLink('Identity', key)
elif table == 'vp':
key_pretty = self._ViewpointLink(key)
attrs.append(('term', 't', term, term))
attrs.append(('key', 'k', key, key_pretty))
attrs.append(('data', 't', data, data))
attrs.append(('_version', '_ve', data, data))
return attrs
class FmtLock(FmtDefault):
def _FormatAttribute(self, name, value):
"""Formats 'expiration' as human readable date/times.
"""
if name == 'expiration':
if value < time.time():
return '<i>Expired</i>'
else:
return self._FormatTimestamp(value)
else:
return FmtDefault._FormatAttribute(self, name, value)
class FmtOperation(FmtDefault):
def _FormatAttribute(self, name, value):
"""Formats 'timestamp' as human readable date/time, {'json',
'first_exception', 'last_exception'} as <pre/> blocks for readability.
"""
if name in ('json', 'first_exception', 'last_exception'):
return '<pre>%s</pre>' % self._XEscape(value)
elif name == 'backoff':
if value < time.time():
return '<i>Expired</i>'
else:
return self._FormatTimestamp(value)
else:
return FmtDefault._FormatAttribute(self, name, value)
class FmtUser(FmtDefault):
def _GetExtraViewFields(self, item):
user_id = item.get('ui')
extras = []
extras.append(self._HashQueryLink('Accounting', 'us:%s' % user_id, 'Accounting'))
extras.append(self._HashQueryLink('Contact', user_id, 'Contacts'))
extras.append(self._HashQueryLink('Device', user_id, 'Devices'))
extras.append(self._HashQueryLink('Index', 'ev:ui:%s' % user_id, 'Episodes'))
extras.append(self._HashQueryLink('Followed', user_id, 'Followed'))
extras.append(self._HashQueryLink('Follower', user_id, 'Follower'))
extras.append(self._HashQueryLink('Friend', user_id, 'Friends'))
extras.append(self._HashQueryLink('Index', 'id:ui:%s' % user_id, 'Identities'))
extras.append(self._HashQueryLink('Notification', user_id, 'Notifications'))
extras.append(self._HashQueryLink('Settings', 'us:%s' % user_id, 'Settings'))
extras.append(self._HashQueryLink('Subscription', user_id, 'Subscriptions'))
extras.append(self._HashQueryLink('Index', 'vp:ui:%s' % user_id, 'Viewpoints'))
return [('Extras', '', ' · '.join(extras))]
class FmtViewpoint(FmtDefault):
def _GetExtraViewFields(self, item):
vp_id = item.get('vi')
extras = []
extras.append(self._HashQueryLink('Accounting', 'vs:%s' % vp_id, 'Accounting'))
extras.append(self._HashQueryLink('Activity', vp_id, 'Activities'))
extras.append(self._HashQueryLink('Comment', vp_id, 'Comments'))
extras.append(self._HashQueryLink('Index', 'ev:vi:%s' % vp_id, 'Episodes'))
extras.append(self._HashQueryLink('Index', 'fo:vi:%s' % vp_id, 'Followers'))
return [('Extras', '', ' · '.join(extras))]
|
nested_admin/tests/nested_polymorphic/test_polymorphic_add_filtering/admin.py | jpic/django-nested-admin | 580 | 12647420 | <reponame>jpic/django-nested-admin<filename>nested_admin/tests/nested_polymorphic/test_polymorphic_add_filtering/admin.py<gh_stars>100-1000
from django.contrib import admin
from django.db import models
from django import forms
import nested_admin
from .models import (
FreeText, Poll, Question, MultipleChoiceGroup, MultipleChoice, Survey, Text, Textarea)
class TextInline(nested_admin.NestedTabularInline):
model = Text
extra = 1
min_num = 1
max_num = 1
sortable_field_name = "position"
formfield_overrides = {
models.PositiveSmallIntegerField: {'widget': forms.HiddenInput},
}
class TextareaInline(nested_admin.NestedTabularInline):
model = Textarea
extra = 1
min_num = 1
max_num = 1
sortable_field_name = "position"
formfield_overrides = {
models.PositiveSmallIntegerField: {'widget': forms.HiddenInput},
}
class RadioInline(nested_admin.NestedTabularInline):
model = MultipleChoice
sortable_field_name = "position"
extra = 0
min_num = 1
max_num = 8
radio_fields = {'style': admin.HORIZONTAL}
formfield_overrides = {
models.PositiveSmallIntegerField: {'widget': forms.HiddenInput},
}
class RadioGroupInline(nested_admin.NestedTabularInline):
model = MultipleChoiceGroup
inlines = (RadioInline,)
extra = 0
min_num = 1
max_num = 1
sortable_field_name = "position"
formfield_overrides = {
models.PositiveSmallIntegerField: {'widget': forms.HiddenInput},
}
class DropDownInline(nested_admin.NestedTabularInline):
model = MultipleChoice
sortable_field_name = "position"
extra = 0
min_num = 1
max_num = 8
formfield_overrides = {
models.PositiveSmallIntegerField: {'widget': forms.HiddenInput},
}
class DropDownGroupInline(nested_admin.NestedTabularInline):
model = MultipleChoiceGroup
inlines = (DropDownInline,)
extra = 0
min_num = 1
max_num = 1
sortable_field_name = "position"
formfield_overrides = {
models.PositiveSmallIntegerField: {'widget': forms.HiddenInput},
}
class QuestionInline(nested_admin.NestedStackedPolymorphicInline):
class FreeTextInline(nested_admin.NestedStackedPolymorphicInline.Child):
model = FreeText
inlines = (TextInline, TextareaInline, DropDownGroupInline)
sortable_field_name = "position"
formfield_overrides = {
models.PositiveSmallIntegerField: {'widget': forms.HiddenInput},
}
class PollInline(nested_admin.NestedStackedPolymorphicInline.Child):
model = Poll
inlines = (TextInline, RadioGroupInline,)
sortable_field_name = "position"
formfield_overrides = {
models.PositiveSmallIntegerField: {'widget': forms.HiddenInput},
}
model = Question
extra = 0
sortable_field_name = "position"
child_inlines = (FreeTextInline, PollInline,)
formfield_overrides = {
models.PositiveSmallIntegerField: {'widget': forms.HiddenInput},
}
@admin.register(Survey)
class SurveyAdmin(nested_admin.NestedPolymorphicModelAdmin):
inlines = (QuestionInline,)
|
stonesoup/updater/information.py | Red-Portal/Stone-Soup-1 | 157 | 12647428 | # -*- coding: utf-8 -*-
from functools import lru_cache
import numpy as np
from ..base import Property
from ..types.prediction import GaussianMeasurementPrediction
from ..types.update import Update
from ..models.measurement.linear import LinearGaussian
from ..updater.kalman import KalmanUpdater
class InformationKalmanUpdater(KalmanUpdater):
r"""A class which implements the update of information form of the Kalman filter. This is
conceptually very simple. The update proceeds as:
.. math::
Y_{k|k} = Y_{k|k-1} + H^{T}_k R^{-1}_k H_k
\mathbf{y}_{k|k} = \mathbf{y}_{k|k-1} + H^{T}_k R^{-1}_k \mathbf{z}_{k}
where :math:`\mathbf{y}_{k|k-1}` is the predicted information state and :math:`Y_{k|k-1}` the
predicted information matrix which form the :class:`~.InformationStatePrediction` object. The
measurement matrix :math:`H_k` and measurement covariance :math:`R_k` are those in the Kalman
filter (see tutorial 1). An :class:`~.InformationStateUpdate` object is returned.
Note
----
Analogously with the :class:`~.InformationKalmanPredictor`, the measurement model is queried
for the existence of an :meth:`inverse_covar()` property. If absent, the :meth:`covar()` is
inverted.
"""
measurement_model: LinearGaussian = Property(
default=None,
doc="A linear Gaussian measurement model. This need not be defined if "
"a measurement model is provided in the measurement. If no model "
"specified on construction, or in the measurement, then error "
"will be thrown.")
def _inverse_measurement_covar(self, measurement_model, **kwargs):
"""Return the inverse of the measurement covariance (or calculate it)
Parameters
----------
measurement_model
The measurement model to be queried
**kwargs : various, optional
These are passed to :meth:`~.LinearGaussian.covar()`
Returns
-------
: :class:`numpy.ndarray`
The inverse of the measurement covariance, :math:`R_k^{-1}`
"""
if hasattr(measurement_model, 'inverse_covar'):
inv_measurement_covar = measurement_model.inverse_covar(**kwargs)
else:
inv_measurement_covar = np.linalg.inv(measurement_model.covar(**kwargs))
return inv_measurement_covar
@lru_cache()
def predict_measurement(self, predicted_state, measurement_model=None, **kwargs):
r"""There's no direct analogue of a predicted measurement in the information form. This
method is therefore provided to return the predicted measurement as would the standard
Kalman updater. This is mainly for compatibility as it's not anticipated that it would
be used in the usual operation of the information filter.
Parameters
----------
predicted_information_state : :class:`~.State`
The predicted state in information form :math:`\mathbf{y}_{k|k-1}`
measurement_model : :class:`~.MeasurementModel`
The measurement model. If omitted, the model in the updater object
is used
**kwargs : various
These are passed to :meth:`~.MeasurementModel.matrix()`
Returns
-------
: :class:`~.GaussianMeasurementPrediction`
The measurement prediction, :math:`H \mathbf{x}_{k|k-1}`
"""
# If a measurement model is not specified then use the one that's
# native to the updater
measurement_model = self._check_measurement_model(measurement_model)
hh = self._measurement_matrix(predicted_state=predicted_state,
measurement_model=measurement_model,
**kwargs)
predicted_covariance = np.linalg.inv(predicted_state.precision)
predicted_state_mean = predicted_covariance @ predicted_state.state_vector
predicted_measurement = hh @ predicted_state_mean
innovation_covariance = hh @ predicted_covariance @ hh.T + measurement_model.covar()
return GaussianMeasurementPrediction(predicted_measurement, innovation_covariance,
predicted_state.timestamp,
cross_covar=predicted_covariance @ hh.T)
def update(self, hypothesis, **kwargs):
r"""The Information filter update (corrector) method. Given a hypothesised association
between a predicted information state and an actual measurement, calculate the posterior
information state.
Parameters
----------
hypothesis : :class:`~.SingleHypothesis`
the prediction-measurement association hypothesis. This hypothesis
carries a predicted information state.
**kwargs : various
These are passed to :meth:`predict_measurement`
Returns
-------
: :class:`~.InformationStateUpdate`
The posterior information state with information state :math:`\mathbf{y}_{k|k}` and
precision :math:`Y_{k|k}`
"""
measurement_model = hypothesis.measurement.measurement_model
measurement_model = self._check_measurement_model(measurement_model)
pred_info_mean = hypothesis.prediction.state_vector
hh = measurement_model.matrix()
invr = self._inverse_measurement_covar(measurement_model)
posterior_precision = hypothesis.prediction.precision + hh.T @ invr @ hh
posterior_information_mean = pred_info_mean + hh.T @ invr @ \
hypothesis.measurement.state_vector
if self.force_symmetric_covariance:
posterior_precision = (posterior_precision + posterior_precision.T)/2
return Update.from_state(hypothesis.prediction, posterior_information_mean,
posterior_precision,
timestamp=hypothesis.measurement.timestamp, hypothesis=hypothesis)
|
examples/docs_snippets_crag/docs_snippets_crag/concepts/partitions_schedules_sensors/schedules/preset_helper.py | dbatten5/dagster | 4,606 | 12647434 | from dagster import daily_schedule
# start_preset_helper
def daily_schedule_definition_from_pipeline_preset(pipeline, preset_name, start_date):
preset = pipeline.get_preset(preset_name)
if not preset:
raise Exception(
"Preset {preset_name} was not found "
"on pipeline {pipeline_name}".format(
preset_name=preset_name, pipeline_name=pipeline.name
)
)
@daily_schedule(
start_date=start_date,
pipeline_name=pipeline.name,
solid_selection=preset.solid_selection,
mode=preset.mode,
tags_fn_for_date=lambda _: preset.tags,
)
def my_schedule(_date):
return preset.run_config
return my_schedule
# end_preset_helper
|
keras/test.py | desperado11/BraTs | 124 | 12647439 | <reponame>desperado11/BraTs
import argparse
from data import *
from unet import *
def test(args):
# Data Load
testset = dataset(args, mode='test')
# Model Load
model = unet(args)
model.load_weights(args.ckpt_path)
# Model Test
results = model.predict_generator(testset, steps=1, verbose=1)
# Save predictions
save_result(args, results)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--batch_size', type=int, default=155,
help='batch size.')
parser.add_argument('--data', type=str, default='complete',
help='MRI Label data to train')
parser.add_argument('--image_root', type=str, default='../data/train/image_FLAIR',
help='the root directory containing the image dataset.')
parser.add_argument('--label_root', type=str, default='../data/train/label',
help='the root directory containing the label dataset')
parser.add_argument('--image_folder1', type=str, default='BRATS_074',
help='the directory containing the image dataset.')
parser.add_argument('--label_folder1', type=str, default='BRATS_074',
help='the directory containing the label dataset.')
parser.add_argument('--output_root', type=str, default='./output',
help='the directory to save results')
parser.add_argument('--ckpt_path', type=str, default='./checkpoint/unet.hdf5',
help='The directory containing the segmentation model checkpoint.')
args = parser.parse_args()
test(args)
|
mimic/plugins/heat_plugin.py | ksheedlo/mimic | 141 | 12647468 | <gh_stars>100-1000
"""
Plugin for Rackspace Cloud Orchestration mock.
"""
from mimic.rest.heat_api import HeatApi
heat = HeatApi()
|
sdk/python/pulumi_aws/glue/get_connection.py | chivandikwa/pulumi-aws | 260 | 12647541 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetConnectionResult',
'AwaitableGetConnectionResult',
'get_connection',
'get_connection_output',
]
@pulumi.output_type
class GetConnectionResult:
"""
A collection of values returned by getConnection.
"""
def __init__(__self__, arn=None, catalog_id=None, connection_properties=None, connection_type=None, description=None, id=None, match_criterias=None, name=None, physical_connection_requirements=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if catalog_id and not isinstance(catalog_id, str):
raise TypeError("Expected argument 'catalog_id' to be a str")
pulumi.set(__self__, "catalog_id", catalog_id)
if connection_properties and not isinstance(connection_properties, dict):
raise TypeError("Expected argument 'connection_properties' to be a dict")
pulumi.set(__self__, "connection_properties", connection_properties)
if connection_type and not isinstance(connection_type, str):
raise TypeError("Expected argument 'connection_type' to be a str")
pulumi.set(__self__, "connection_type", connection_type)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if match_criterias and not isinstance(match_criterias, list):
raise TypeError("Expected argument 'match_criterias' to be a list")
pulumi.set(__self__, "match_criterias", match_criterias)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if physical_connection_requirements and not isinstance(physical_connection_requirements, list):
raise TypeError("Expected argument 'physical_connection_requirements' to be a list")
pulumi.set(__self__, "physical_connection_requirements", physical_connection_requirements)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> str:
"""
The ARN of the Glue Connection.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> str:
"""
The catalog ID of the Glue Connection.
"""
return pulumi.get(self, "catalog_id")
@property
@pulumi.getter(name="connectionProperties")
def connection_properties(self) -> Mapping[str, str]:
return pulumi.get(self, "connection_properties")
@property
@pulumi.getter(name="connectionType")
def connection_type(self) -> str:
"""
The type of Glue Connection.
"""
return pulumi.get(self, "connection_type")
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the connection.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="matchCriterias")
def match_criterias(self) -> Sequence[str]:
"""
A list of criteria that can be used in selecting this connection.
"""
return pulumi.get(self, "match_criterias")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the Glue Connection.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="physicalConnectionRequirements")
def physical_connection_requirements(self) -> Sequence['outputs.GetConnectionPhysicalConnectionRequirementResult']:
"""
A map of physical connection requirements, such as VPC and SecurityGroup.
"""
return pulumi.get(self, "physical_connection_requirements")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
The tags assigned to the resource
"""
return pulumi.get(self, "tags")
class AwaitableGetConnectionResult(GetConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConnectionResult(
arn=self.arn,
catalog_id=self.catalog_id,
connection_properties=self.connection_properties,
connection_type=self.connection_type,
description=self.description,
id=self.id,
match_criterias=self.match_criterias,
name=self.name,
physical_connection_requirements=self.physical_connection_requirements,
tags=self.tags)
def get_connection(id: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectionResult:
"""
This data source can be used to fetch information about a specific Glue Connection.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.glue.get_connection(id="123456789123:connection")
```
:param str id: A concatenation of the catalog ID and connection name. For example, if your account ID is
`123456789123` and the connection name is `conn` then the ID is `123456789123:conn`.
:param Mapping[str, str] tags: The tags assigned to the resource
"""
__args__ = dict()
__args__['id'] = id
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:glue/getConnection:getConnection', __args__, opts=opts, typ=GetConnectionResult).value
return AwaitableGetConnectionResult(
arn=__ret__.arn,
catalog_id=__ret__.catalog_id,
connection_properties=__ret__.connection_properties,
connection_type=__ret__.connection_type,
description=__ret__.description,
id=__ret__.id,
match_criterias=__ret__.match_criterias,
name=__ret__.name,
physical_connection_requirements=__ret__.physical_connection_requirements,
tags=__ret__.tags)
@_utilities.lift_output_func(get_connection)
def get_connection_output(id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConnectionResult]:
"""
This data source can be used to fetch information about a specific Glue Connection.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.glue.get_connection(id="123456789123:connection")
```
:param str id: A concatenation of the catalog ID and connection name. For example, if your account ID is
`123456789123` and the connection name is `conn` then the ID is `123456789123:conn`.
:param Mapping[str, str] tags: The tags assigned to the resource
"""
...
|
WebMirror/management/rss_parser_funcs/feed_parse_extractLilBlissNovels.py | fake-name/ReadableWebProxy | 193 | 12647560 | <filename>WebMirror/management/rss_parser_funcs/feed_parse_extractLilBlissNovels.py
def extractLilBlissNovels(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if ':' in item['title'] and 'Side Story' in item['title'] and not postfix:
postfix = item['title'].split(':')[-1]
if '<NAME>' in item['tags']:
return buildReleaseMessageWithType(item, '<NAME>', vol, chp, frag=frag, postfix=postfix)
if 'Memory Lost' in item['tags']:
return buildReleaseMessageWithType(item, 'Memory Lost', vol, chp, frag=frag, postfix=postfix)
return False
|
text/src/autogluon/text/text_prediction/constants.py | zhiqiangdon/autogluon | 4,462 | 12647589 | <reponame>zhiqiangdon/autogluon<filename>text/src/autogluon/text/text_prediction/constants.py<gh_stars>1000+
# Column/Label Types
NULL = 'null'
CATEGORICAL = 'categorical'
TEXT = 'text'
NUMERICAL = 'numerical'
ENTITY = 'entity'
# Feature Types
ARRAY = 'array'
|
flambe/nn/mos.py | ethan-asapp/flambe | 148 | 12647607 | # type: ignore[override]
import torch
import torch.nn as nn
from torch import Tensor
from flambe.nn.mlp import MLPEncoder
from flambe.nn.module import Module
class MixtureOfSoftmax(Module):
"""Implement the MixtureOfSoftmax output layer.
Attributes
----------
pi: FullyConnected
softmax layer over the different softmax
layers: [FullyConnected]
list of the k softmax layers
"""
def __init__(self,
input_size: int,
output_size: int,
k: int = 1,
take_log: bool = True) -> None:
"""Initialize the MOS layer.
Parameters
----------
input_size: int
input dimension
output_size: int
output dimension
k: int (Default: 1)
number of softmax in the mixture
"""
super().__init__()
self.pi_w = MLPEncoder(input_size, k)
self.softmax = nn.Softmax()
self.layers = [MLPEncoder(input_size, output_size) for _ in range(k)]
self.tanh = nn.Tanh()
self.activation = nn.LogSoftmax() if take_log else nn.Softmax()
def forward(self, data: Tensor) -> Tensor:
"""Implement mixture of softmax for language modeling.
Parameters
----------
data: torch.Tensor
seq_len x batch_size x hidden_size
Return
-------
out: Variable
output matrix of shape seq_len x batch_size x out_size
"""
w = self.softmax(self.pi_w(data))
# Compute k softmax, and combine using above weights
out = [w[:, :, i] * self.tanh(W(data)) for i, W in enumerate(self.layers)]
out = torch.cat(out, dim=0).sum(dim=0)
return self.activation(out)
|
mrgeo-python/src/test/python/mrgeotest.py | ngageoint/mrgeo | 198 | 12647610 | <gh_stars>100-1000
import os
import shutil
import sys
from osgeo import gdal
from unittest import TestCase
from py4j.java_gateway import java_import
import gdaltest
from pymrgeo.instance import is_instance_of
from pymrgeo.mrgeo import MrGeo
class MrGeoTests(TestCase):
GENERATE_BASELINE_DATA = False
classname = None
mrgeo = None
gateway = None
_CWD = os.getcwd()
_OUTPUT = "output"
_OUTPUT_HDFS = None
_OUTPUT_BASE = "/mrgeo/test-files/output/"
_INPUT = "testFiles"
_INPUT_HDFS = None
_INPUT_BASE = "/mrgeo/test-files/"
inputdir = None
inputhdfs = None
outputdir = None
outputhdfs = None
def compareraster(self, raster, testname, nodata=-9999):
if self.GENERATE_BASELINE_DATA:
self.saveraster(raster, testname, nodata)
else:
# jvm = self.gateway.jvm
# test = raster.mapop.toDataset(False)
testimage = self.outputdir + testname
raster.export(testimage, singleFile=True, format="tiff", overridenodata=nodata)
testimage += ".tif"
test = gdal.Open(testimage)
golden = gdal.Open(self.inputdir + testname + ".tif")
# compare as GDAL Datasets.
gdaltest.compare_db(self, golden, test)
os.remove(testimage)
def comparelocalraster(self, testname):
if not self.GENERATE_BASELINE_DATA:
golden = gdal.Open(self.inputdir + testname + ".tif")
test = gdal.Open(self.outputdir + testname + ".tif")
# compare as GDAL Datasets.
gdaltest.compare_db(self, golden, test)
def saveraster(self, raster, testname, nodata=-9999):
name = self.inputdir + testname
raster.export(name, singleFile=True, format="tiff", overridenodata=nodata)
def savevector(self, vector, testname):
name = self.inputdir + testname + ".tsv"
vector.save(name)
def comparevector(self, vector, testname):
if self.GENERATE_BASELINE_DATA:
self.savevector(vector, str(testname))
else:
jvm = self.mrgeo._get_jvm()
# test = raster.mapop.toDataset(False)
java_import(jvm, "org.mrgeo.hdfs.vector.DelimitedVectorReader")
testvector = str(self.outputhdfs + testname + ".tsv")
vector.ssave(testvector)
expectedvector = str(self.inputdir + testname + ".tsv")
vdp_expected = jvm.DataProviderFactory.getVectorDataProvider(
expectedvector,
jvm.DataProviderFactory.AccessMode.READ,
jvm.HadoopUtils.createConfiguration())
expected_geom_reader = vdp_expected.getVectorReader().get()
vdp = jvm.DataProviderFactory.getVectorDataProvider(
testvector,
jvm.DataProviderFactory.AccessMode.READ,
jvm.HadoopUtils.createConfiguration())
self.assertTrue(vdp is not None)
vector_reader = vdp.getVectorReader()
self.assertTrue(vector_reader is not None)
self.assertTrue(is_instance_of(self.mrgeo.gateway, vector_reader, jvm.DelimitedVectorReader))
self.assertEquals(vdp_expected.getVectorReader().count(), vector_reader.count())
geom_reader = vector_reader.get()
self.assertTrue(geom_reader is not None)
while expected_geom_reader.hasNext():
expected_geom = expected_geom_reader.next()
geom = geom_reader.next()
self.assertTrue(geom is not None)
self.assertEquals(expected_geom.type(), geom.type())
self.assertAlmostEquals(float(expected_geom.getAttribute("COST_S")),
float(geom.getAttribute("COST_S")), delta=0.001)
self.assertAlmostEquals(float(expected_geom.getAttribute("DISTANCE_M")),
float(geom.getAttribute("DISTANCE_M")), delta=0.001)
self.assertAlmostEquals(float(expected_geom.getAttribute("MINSPEED_MPS")),
float(geom.getAttribute("MINSPEED_MPS")), delta=0.001)
self.assertAlmostEquals(float(expected_geom.getAttribute("MAXSPEED_MPS")),
float(geom.getAttribute("MAXSPEED_MPS")), delta=0.001)
self.assertAlmostEquals(float(expected_geom.getAttribute("AVGSPEED_MPS")),
float(geom.getAttribute("AVGSPEED_MPS")), delta=0.001)
# Should not be any more geometries in the actual output
self.assertFalse(geom_reader.hasNext())
jvm.HadoopFileUtils.delete(testvector)
@classmethod
def copy(cls, srcfile, srcpath=None, dstpath=None, dstfile=None):
jvm = cls.mrgeo._get_jvm()
java_import(jvm, "org.mrgeo.hdfs.utils.HadoopFileUtils")
java_import(jvm, "org.apache.hadoop.fs.Path")
if srcpath is not None:
src = srcpath
if not src.endswith('/'):
src += '/'
src += srcfile
else:
src = srcfile
if not os.path.exists(src):
if os.path.exists(cls.inputdir + src):
src = cls.inputdir + src
if not os.path.exists(src):
raise Exception("Source (" + src + ") is not a file or directory")
if dstfile is not None:
dst = dstfile
if not dst.endswith('/'):
dst += '/'
dst += dstfile
if not os.path.isfile(src):
raise Exception("Source (" + src + ") is must be a file")
if jvm.HadoopFileUtils.exists(dst):
jvm.HadoopFileUtils.delete(dst)
jvm.HadoopFileUtils.copyFileToHdfs(src, dst)
return dst
elif dstpath is not None:
dst = dstpath
else:
dst = cls.inputhdfs
basefile = os.path.basename(src)
dstfile = dst + basefile
if jvm.HadoopFileUtils.exists(dstfile):
jvm.HadoopFileUtils.delete(dstfile)
jvm.HadoopFileUtils.copyToHdfs(src, dst)
return dstfile
@classmethod
def setUpClass(cls):
cls.classname = cls.__name__
# print(cls.classname + " setup")
cls.mrgeo = MrGeo()
# cls.mrgeo = MrGeo(host="localhost", port=12345) # already running, remote mrgeo
jvm = cls.mrgeo._get_jvm()
java_import(jvm, "org.apache.hadoop.conf.Configuration")
java_import(jvm, "org.apache.hadoop.fs.Path")
java_import(jvm, "org.mrgeo.data.DataProviderFactory")
java_import(jvm, "org.mrgeo.data.vector.VectorDataProvider")
java_import(jvm, "org.mrgeo.data.vector.VectorReader")
java_import(jvm, "org.mrgeo.hdfs.vector.DelimitedVectorReader")
fs = jvm.HadoopFileUtils.getFileSystem()
p = jvm.Path(cls._INPUT_BASE).makeQualified(fs)
cls._INPUT_HDFS = p
p = jvm.Path(cls._OUTPUT_BASE).makeQualified(fs)
cls._OUTPUT_HDFS = p
basedir = os.getenv('BASEDIR', '.')
dirname = os.path.abspath(basedir)
try:
while True:
names = os.listdir(dirname)
if cls._INPUT in names:
break
dirname = os.path.abspath(os.path.join(dirname, os.pardir))
except OSError:
pass
basedir = os.path.abspath(dirname)
cls.inputdir = os.path.abspath(basedir + '/' + cls._INPUT + "/" + cls.classname) + '/'
cls.outputdir = os.path.abspath(basedir + '/' + cls._INPUT + '/' + cls._OUTPUT + "/" + cls.classname) + '/'
cls.inputhdfs = jvm.Path(cls._INPUT_HDFS, "python/" + cls.classname).makeQualified(fs).toString() + '/'
cls.outputhdfs = jvm.Path(cls._OUTPUT_HDFS, "python/" + cls.classname).makeQualified(fs).toString() + '/'
if not os.path.exists(cls.inputdir):
os.makedirs(cls.inputdir)
if os.path.exists(cls.outputdir):
shutil.rmtree(cls.outputdir, ignore_errors=True)
if not os.path.exists(cls.outputdir):
os.makedirs(cls.outputdir)
jvm.HadoopFileUtils.create(cls.inputhdfs)
if jvm.HadoopFileUtils.exists(cls.outputhdfs):
jvm.HadoopFileUtils.cleanDirectory(cls.outputhdfs)
jvm.HadoopFileUtils.create(cls.outputhdfs)
jvm.MrGeoProperties.getInstance().setProperty(jvm.MrGeoConstants.MRGEO_HDFS_IMAGE, cls.inputhdfs)
jvm.MrGeoProperties.getInstance().setProperty(jvm.MrGeoConstants.MRGEO_HDFS_VECTOR, cls.inputhdfs)
jvm.LoggingUtils.setDefaultLogLevel(jvm.LoggingUtils.ERROR)
@classmethod
def tearDownClass(cls):
cls.mrgeo.disconnect()
def setUp(self):
self.name = self._testMethodName
self._doublebox("Starting", self.classname + ":" + self.name)
self.mrgeo.usedebug()
self.mrgeo.start()
jvm = self.mrgeo._get_jvm()
jvm.MrGeoProperties.getInstance().setProperty(jvm.MrGeoConstants.MRGEO_HDFS_IMAGE, self.inputhdfs)
jvm.MrGeoProperties.getInstance().setProperty(jvm.MrGeoConstants.MRGEO_HDFS_VECTOR, self.inputhdfs)
def tearDown(self):
self.mrgeo.stop()
self._doublebox("Test Finished", self.classname + ":" + self.name)
def debug_logging(self):
jvm = self.mrgeo._get_jvm()
jvm.LoggingUtils.setDefaultLogLevel(jvm.LoggingUtils.DEBUG)
def info_logging(self):
jvm = self.mrgeo._get_jvm()
jvm.LoggingUtils.setDefaultLogLevel(jvm.LoggingUtils.INFO)
def warn_logging(self):
jvm = self.mrgeo._get_jvm()
jvm.LoggingUtils.setDefaultLogLevel(jvm.LoggingUtils.WARN)
def error_logging(self):
jvm = self.mrgeo._get_jvm()
jvm.LoggingUtils.setDefaultLogLevel(jvm.LoggingUtils.ERROR)
@staticmethod
def _doublebox(text, name):
sys.stdout.flush()
width = len(name)
if width < len(text):
width = len(text)
fmt = "{:*<" + str(width + 4) + "}"
print(fmt.format(""))
fmt = "{:<" + str(width + 2) + "}"
print(fmt.format("*") + " *")
fmt = "{:<" + str(width) + "}"
print("* " + fmt.format(text) + " *")
fmt = "{:<" + str(width + 2) + "}"
print(fmt.format("*") + " *")
fmt = "{:*<" + str(width + 4) + "}"
print(fmt.format(""))
fmt = "{:<" + str(width) + "}"
print("* " + fmt.format(name) + " *")
fmt = "{:*<" + str(width + 4) + "}"
print(fmt.format(""))
print("")
sys.stdout.flush()
class VectorTestExpectation:
def __init__(self, cost, distance, minSpeed, maxSpeed, avgSpeed):
self.cost = cost
self.distance = distance
self.minSpeed = minSpeed
self.maxSpeed = maxSpeed
self.avgSpeed = avgSpeed
|
fastrunner/apps.py | huanjoyous/FasterRunner20190716 | 227 | 12647628 | <filename>fastrunner/apps.py<gh_stars>100-1000
from django.apps import AppConfig
class FastrunnerConfig(AppConfig):
name = 'fastrunner'
|
boto3_type_annotations/boto3_type_annotations/amplify/paginator.py | cowboygneox/boto3_type_annotations | 119 | 12647698 | <reponame>cowboygneox/boto3_type_annotations<filename>boto3_type_annotations/boto3_type_annotations/amplify/paginator.py<gh_stars>100-1000
from typing import Dict
from botocore.paginate import Paginator
class ListApps(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
class ListBranches(Paginator):
def paginate(self, appId: str, PaginationConfig: Dict = None) -> Dict:
pass
class ListDomainAssociations(Paginator):
def paginate(self, appId: str, PaginationConfig: Dict = None) -> Dict:
pass
class ListJobs(Paginator):
def paginate(self, appId: str, branchName: str, PaginationConfig: Dict = None) -> Dict:
pass
|
src/oci/encryption/internal/serialization.py | Manny27nyc/oci-python-sdk | 249 | 12647699 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
import struct
import json
from oci.encryption.internal.utils import (
convert_to_bytes,
convert_to_str,
convert_bytes_to_base64_encoded_string,
convert_base64_encoded_string_to_bytes,
)
from oci.encryption.internal.models import (
EncryptedDataHeader,
EncryptedDataHeaderDataEncryptionKey,
)
SERIALIZATION_FORMAT_VERSION = 0x0001
METADATA_KEY_ENCRYPTED_CONTENT_FORMAT = "encryptedContentFormat"
METADATA_KEY_ENCRYPTED_DATA_KEYS = "encryptedDataKeys"
METADATA_KEY_IV = "iv"
METADATA_KEY_ALGORITHM_ID = "algorithmId"
METADATA_KEY_ADDITIONAL_AUTHENTICATED_DATA = "additionalAuthenticatedData"
ENCRYPTED_DATA_KEY_MASTER_KEY_ID = "masterKeyId"
ENCRYPTED_DATA_KEY_VAULT_ID = "vaultId"
ENCRYPTED_DATA_KEY_ENCRYPTED_DATA_KEY = "encryptedDataKey"
ENCRYPTED_DATA_KEY_REGION = "region"
# docs: https://docs.python.org/3.8/library/struct.html
STRUCT_HEADER_FORMAT = (
">" # big endian
"H" # serialization format version ID
"I" # JSON metadata length
"{json_metadata_length}s" # JSON metadata
)
def serialize_header(encrypted_data_header):
encrypted_data_keys = []
for encrypted_data_key in encrypted_data_header.encrypted_data_keys:
encrypted_data_keys.append(
{
ENCRYPTED_DATA_KEY_MASTER_KEY_ID: encrypted_data_key.master_key_id,
ENCRYPTED_DATA_KEY_VAULT_ID: encrypted_data_key.vault_id,
ENCRYPTED_DATA_KEY_ENCRYPTED_DATA_KEY: convert_bytes_to_base64_encoded_string(
encrypted_data_key.encrypted_data_key_bytes
),
ENCRYPTED_DATA_KEY_REGION: encrypted_data_key.region,
}
)
metadata = {
METADATA_KEY_ENCRYPTED_CONTENT_FORMAT: encrypted_data_header.encrypted_content_format,
METADATA_KEY_ENCRYPTED_DATA_KEYS: encrypted_data_keys,
METADATA_KEY_IV: convert_bytes_to_base64_encoded_string(
encrypted_data_header.iv_bytes
),
METADATA_KEY_ALGORITHM_ID: encrypted_data_header.algorithm_id,
METADATA_KEY_ADDITIONAL_AUTHENTICATED_DATA: convert_to_str(
encrypted_data_header.additional_authenticated_data_bytes
),
}
json_header_as_string = json.dumps(metadata)
header_format = STRUCT_HEADER_FORMAT.format(
json_metadata_length=len(json_header_as_string)
)
packed_header = struct.pack(
header_format,
SERIALIZATION_FORMAT_VERSION,
len(json_header_as_string),
convert_to_bytes(json_header_as_string),
)
return packed_header
def deserialize_header_from_stream(ciphertext_stream):
short_format = ">H"
short_size_offset = struct.calcsize(short_format)
unsigned_int_format = ">I"
unsigned_int_size_offset = struct.calcsize(unsigned_int_format)
offset = 0
# get serialization format version
next_content = ciphertext_stream.read(short_size_offset)
(serialization_format_version,) = struct.unpack_from(
short_format, next_content, offset
)
offset = offset + short_size_offset
if serialization_format_version != SERIALIZATION_FORMAT_VERSION:
raise ValueError(
"Could not deserialize header with unrecognized serialization format version: {}".format(
serialization_format_version
)
)
# get json metadata length
next_content = ciphertext_stream.read(unsigned_int_size_offset)
(json_metadata_length,) = struct.unpack_from(unsigned_int_format, next_content)
offset = offset + short_size_offset
# get json metadata
chunk_format = "{}s".format(json_metadata_length)
next_content = ciphertext_stream.read(struct.calcsize(chunk_format))
(json_metadata_bytes,) = struct.unpack_from(chunk_format, next_content)
offset = offset + struct.calcsize(chunk_format)
json_metadata = convert_to_str(json_metadata_bytes)
try:
metadata = json.loads(json_metadata)
except ValueError as e:
raise ValueError(
"Could not parse metadata inside header. Error: {}".format(str(e))
)
required_top_level_keys = [
METADATA_KEY_IV,
METADATA_KEY_ALGORITHM_ID,
METADATA_KEY_ADDITIONAL_AUTHENTICATED_DATA,
]
required_encrypted_data_key_keys = [
ENCRYPTED_DATA_KEY_MASTER_KEY_ID,
ENCRYPTED_DATA_KEY_VAULT_ID,
ENCRYPTED_DATA_KEY_ENCRYPTED_DATA_KEY,
ENCRYPTED_DATA_KEY_REGION,
]
missing_or_none_top_level_keys = [required_key for required_key in required_top_level_keys if (required_key not in metadata) or (metadata.get(required_key, None) is None) or (isinstance(metadata.get(required_key), list) and len(metadata.get(required_key)) == 0)]
if missing_or_none_top_level_keys:
raise ValueError(
"Invalid header. The following metadata keys must be present and not null: {}.".format(
", ".join(missing_or_none_top_level_keys)
)
)
encrypted_data_keys_raw = metadata.get(METADATA_KEY_ENCRYPTED_DATA_KEYS)
encrypted_data_keys = []
for encrypted_data_key_raw in encrypted_data_keys_raw:
missing_or_none_dek_keys = [required_key for required_key in required_encrypted_data_key_keys if (required_key not in encrypted_data_key_raw) or (encrypted_data_key_raw.get(required_key, None) is None)]
if missing_or_none_dek_keys:
raise ValueError(
"Invalid header. The following metadata keys must be present and not null in each encrypted data key: {}.".format(
", ".join(missing_or_none_dek_keys)
)
)
encrypted_data_keys.append(
EncryptedDataHeaderDataEncryptionKey(
master_key_id=encrypted_data_key_raw.get(
ENCRYPTED_DATA_KEY_MASTER_KEY_ID
),
vault_id=encrypted_data_key_raw.get(ENCRYPTED_DATA_KEY_VAULT_ID),
encrypted_data_key_bytes=convert_base64_encoded_string_to_bytes(
encrypted_data_key_raw.get(ENCRYPTED_DATA_KEY_ENCRYPTED_DATA_KEY)
),
region=encrypted_data_key_raw.get(ENCRYPTED_DATA_KEY_REGION),
)
)
header = EncryptedDataHeader(
encrypted_data_keys=encrypted_data_keys,
iv_bytes=convert_base64_encoded_string_to_bytes(metadata.get(METADATA_KEY_IV)),
algorithm_id=metadata.get(METADATA_KEY_ALGORITHM_ID),
additional_authenticated_data_bytes=convert_to_bytes(
metadata.get(METADATA_KEY_ADDITIONAL_AUTHENTICATED_DATA)
),
)
return header
|
aioch/utils.py | maxmouchet/aioch | 119 | 12647720 | from functools import partial
def run_in_executor(executor, loop, func, *args, **kwargs):
if kwargs:
func = partial(func, **kwargs)
return loop.run_in_executor(executor, func, *args)
|
datasets/mat2dic_maskrcnn.py | qianrusun1015/Disentangled-Person-Image-Generation | 165 | 12647725 | <filename>datasets/mat2dic_maskrcnn.py
import scipy.io
import numpy as np
import os, sys, pdb, pickle
######## Mask-RCNN keypoint order ########
# % 1: nose
# % 2: left eye
# % 3: right eye
# % 4: left ear
# % 5: right ear
# % 6: left shoulder
# % 7: right shoulder
# % 8: left elbow
# % 9: right elbow
# % 10: left wrist
# % 11: right wrist
# % 12: left hip
# % 13: right hip
# % 14: left knee
# % 15: right knee
# % 16: left ankle
# % 17: right ankle
######## OpenPose keypoint order ########
# MSCOCO Pose part_str = [nose, neck, Rsho, Relb, Rwri, Lsho, Lelb, Lwri, Rhip, Rkne, Rank, Lhip, Lkne, Lank, Leye, Reye, Lear, Rear, pt19]
keyNum = 18
openPose_maskRCNN_trans_dic = {0:0, 1:None, 2:6, 3:8, 4:10, 5:5, 6:7, 7:9, 8:12, 9:14, 10:16, 11:11, 12:13, 13:15, 14:1, 15:2, 16:3, 17:4}
def mat2dic(img_dir, pose_mat_path):
pose_mat = scipy.io.loadmat(pose_mat_path)['joint2d']
N, _, _ = pose_mat.shape
img_name_list = sorted(os.listdir(img_dir))
assert N==len(img_name_list), 'number of pose and img are different'
pose_dic = {}
for idx, img_name in enumerate(img_name_list):
crs = pose_mat[idx,:,:]
RCV = np.zeros([keyNum, 3])
for k in range(keyNum):
k_idx = openPose_maskRCNN_trans_dic[k]
if k_idx is not None:
c,r = crs[:,k_idx]
if not (0==c and 0==r):
RCV[k,0] = r
RCV[k,1] = c
RCV[k,2] = 1 ## 1 means visible, 0 means invisible
## Makeup neck keypoint with leftShoulder and rightShoulder
r0, c0, v0 = RCV[2,:]
r1, c1, v1 = RCV[5,:]
if v0 and v1:
RCV[1,0] = (r0+r1)/2
RCV[1,1] = (c0+c1)/2
RCV[1,2] = 1
pose_dic[img_name] = RCV
save_path = os.path.join(os.path.dirname(pose_mat_path), os.path.basename(pose_mat_path).split('_')[-1].replace('.mat','.pickle'))
with open(save_path, 'w') as f:
pickle.dump(pose_dic, f)
img_dir = ''
pose_mat_path = ''
mat2dic(img_dir, pose_mat_path)
|
Data Structure/Array Or Vector/Sort An Array of 0s and 1s/SolutionByPK.py | dream-achiever/Programmers-Community | 261 | 12647744 | <filename>Data Structure/Array Or Vector/Sort An Array of 0s and 1s/SolutionByPK.py
def next0(A,n,x):
while x<n and A[x]!=0:
x+=1
return x
n=int(input())
A=[int(j) for j in input().split()]
b=0
for i in range(n):
if A[i]==1:
b=next0(A,n,max(b,i))
if b==n:
break
A[i],A[b]=A[b],A[i]
for i in A:
print(i,end=" ")
|
pyhawkes/utils/utils.py | nticea/superhawkes | 221 | 12647760 | import os
import numpy as np
def initialize_pyrngs():
from gslrandom import PyRNG, get_omp_num_threads
if "OMP_NUM_THREADS" in os.environ:
num_threads = os.environ["OMP_NUM_THREADS"]
else:
num_threads = get_omp_num_threads()
assert num_threads > 0
# Choose random seeds
seeds = np.random.randint(2**16, size=num_threads)
return [PyRNG(seed) for seed in seeds]
def convert_discrete_to_continuous(S, dt):
# Convert S to continuous time
from pybasicbayes.util.general import ibincount
T = S.shape[0] * dt
S_ct = dt * np.concatenate([ibincount(Sk) for Sk in S.T]).astype(float)
S_ct += dt * np.random.rand(*S_ct.shape)
assert np.all(S_ct < T)
C_ct = np.concatenate([k*np.ones(Sk.sum()) for k,Sk in enumerate(S.T)]).astype(int)
# Sort the data
perm = np.argsort(S_ct)
S_ct = S_ct[perm]
C_ct = C_ct[perm]
return S_ct, C_ct, T
def convert_continuous_to_discrete(S, C, dt, T_min, T_max):
bins = np.arange(T_min, T_max, dt)
if bins[-1] != T_max:
bins = np.hstack((bins, [T_max]))
T = bins.size - 1
K = C.max()+1
S_dt = np.zeros((T,K))
for k in range(K):
S_dt[:,k] = np.histogram(S[C==k], bins)[0]
assert S_dt.sum() == len(S)
return S_dt.astype(np.int)
def get_unique_file_name(filedir, filename):
"""
Get a unique filename by appending filename with .x, where x
is the next untaken number
"""
import fnmatch
# Get the number of conflicting log files
fnames = os.listdir(filedir)
conflicts = fnmatch.filter(fnames, "%s*" % filename)
nconflicts = len(conflicts)
if nconflicts > 0:
unique_name = "%s.%d" % (filename, nconflicts+1)
else:
unique_name = filename
return unique_name
def logistic(x,lam_max=1.0):
return lam_max*1.0/(1.0+np.exp(-x))
def logit(x,lam_max=1.0):
return np.log(x/lam_max)-np.log(1-(x/lam_max))
def sample_nig(mu0, lmbda0, alpha0, beta0):
mu0, lmbda0, alpha0, beta0 = np.broadcast_arrays(mu0, lmbda0, alpha0, beta0)
shp = mu0.shape
assert lmbda0.shape == alpha0.shape == beta0.shape == shp
tau = np.array(np.random.gamma(alpha0, 1./beta0)).reshape(shp)
mu = np.array(np.random.normal(mu0, np.sqrt(1./(lmbda0 * tau)))).reshape(shp)
return mu, tau
|
dataviva/api/sc/models.py | joelvisroman/dataviva-site | 126 | 12647785 | <filename>dataviva/api/sc/models.py
from dataviva import db
from dataviva.utils.auto_serialize import AutoSerialize
from dataviva.api.attrs.models import Bra, Course_sc, School
class Sc(db.Model, AutoSerialize):
__abstract__ = True
year = db.Column(db.Integer(4), primary_key=True)
age = db.Column(db.Float())
classes = db.Column(db.Integer(11))
enrolled = db.Column(db.Integer(11))
enrolled_growth = db.Column(db.Float())
enrolled_growth_5 = db.Column(db.Float())
class Yb_sc(Sc):
__tablename__ = 'sc_yb'
bra_id = db.Column(db.String(9), db.ForeignKey(Bra.id), primary_key=True)
num_schools = db.Column(db.Integer(11))
bra_id_len = db.Column(db.Integer(1))
def __repr__(self):
return '<Yb {0}.{1}>'.format(self.year, self.bra_id)
class Ys(Sc):
__tablename__ = 'sc_ys'
school_id = db.Column(db.String(8), db.ForeignKey(School.id), primary_key=True)
def __repr__(self):
return '<Ys %d.%s>' % (self.year, self.school_id)
class Ybs(Sc):
__tablename__ = 'sc_ybs'
bra_id = db.Column(db.String(9), db.ForeignKey(Bra.id), primary_key=True)
school_id = db.Column(db.String(8), db.ForeignKey(School.id), primary_key=True)
bra_id_len = db.Column(db.Integer(1))
def __repr__(self):
return '<Ybs %d.%s.%s>' % (self.year, self.bra_id, self.school_id)
class Ybc_sc(Sc):
__tablename__ = 'sc_ybc'
bra_id = db.Column(db.String(9), db.ForeignKey(Bra.id), primary_key=True)
course_sc_id = db.Column(db.String(5), db.ForeignKey(Course_sc.id), primary_key=True)
bra_id_len = db.Column(db.Integer(1))
course_sc_id_len = db.Column(db.Integer(1))
def __repr__(self):
return '<Ybc %d.%s.%s>' % (self.year, self.bra_id, self.course_sc_id)
class Yc_sc(Sc):
__tablename__ = 'sc_yc'
course_sc_id = db.Column(db.String(5), db.ForeignKey(Course_sc.id), primary_key=True)
course_sc_id_len = db.Column(db.Integer(1))
def __repr__(self):
return '<Ybc %d.%s>' % (self.year, self.course_sc_id)
class Ysc(Sc):
__tablename__ = 'sc_ysc'
school_id = db.Column(db.String(8), db.ForeignKey(School.id), primary_key=True)
course_sc_id = db.Column(db.String(5), db.ForeignKey(Course_sc.id), primary_key=True)
course_sc_id_len = db.Column(db.Integer(1))
def __repr__(self):
return '<Ysc %d.%s>' % (self.year, self.school_id)
class Ybsc(Sc):
__tablename__ = 'sc_ybsc'
bra_id = db.Column(db.String(9), db.ForeignKey(Bra.id), primary_key=True)
school_id = db.Column(db.String(8), db.ForeignKey(School.id), primary_key=True)
course_sc_id = db.Column(db.String(5), db.ForeignKey(Course_sc.id), primary_key=True)
course_sc_id_len = db.Column(db.Integer(1))
bra_id_len = db.Column(db.Integer(1))
def __repr__(self):
return '<Ybsc %d.%s.%s.%s>' % (self.year, self.bra_id, self.school_id, self.course_sc_id)
|
ch6/string_matching.py | lyskevin/cpbook-code | 1,441 | 12647788 | import time, random
MAX_N = 200010
def naiveMatching(T, P):
n = len(T)
m = len(P)
freq = 0
for i in range(n):
found = True
for j in range(m):
if not found:
break
if i+j >= n or P[j] != T[i+j]:
found = False
if found:
freq += 1
return freq
b = [0] * MAX_N
def kmpPreprocess(P):
global b
m = len(P)
i, j = 0, -1
b[0] = -1
while i < m:
while j >= 0 and P[i] != P[j]:
j = b[j]
i += 1
j += 1
b[i] = j
def kmpSearch(T, P):
global b
n = len(T)
m = len(P)
freq = 0
i, j = 0, 0
while i < n:
while j >= 0 and T[i] != P[j]:
j = b[j]
i += 1
j += 1
if j == m:
freq += 1
j = b[j]
return freq
p = 131
M = 10**9+7
Pow = [0] * MAX_N
h = [0] * MAX_N
def computeRollingHash(T):
n = len(T)
Pow[0] = 1
for i in range(1, n):
Pow[i] = (Pow[i-1]*p) % M
h[0] = 0
for i in range(n):
if i != 0:
h[i] = h[i-1];
h[i] = (h[i] + (ord(T[i])*Pow[i]) % M) % M
def extEuclid(a, b):
xx, yy = 0, 1
x, y = 1, 0
while b != 0:
q = a//b
a, b = b, a%b
x, xx = xx, x-q*xx
y, yy = yy, y-q*yy
return a, x, y
def modInverse(b, m):
d, x, y = extEuclid(b, m)
if d != 1:
return -1
return (x+m)%m
def hash_fast(L, R):
if L == 0:
return h[R]
ans = ((h[R] - h[L-1]) % M + M) % M
ans = (ans * modInverse(Pow[L], M)) % M
return ans
def main():
extreme_limit = 100000
letters = ['A', 'B']
T = ''.join([random.choice(letters) for _ in range(extreme_limit-1)]) + 'B'
P = ''.join([random.choice(letters) for _ in range(10)])
n = len(T)
m = len(P)
time.clock()
freq = 0
pos = T.find(P, 0)
while pos != -1:
freq += 1
pos = T.find(P, pos+1)
print('String Library, #match = %d' % freq)
print('Runtime =', time.clock(), 's')
time.clock()
print('Naive Matching, #match = %d' % naiveMatching(T, P))
print('Runtime =', time.clock(), 's')
time.clock()
computeRollingHash(T)
hP = 0;
for i in range(m):
hP = (hP + ord(P[i])*Pow[i]) % M
freq = 0
for i in range(n-m+1):
if hash_fast(i, i+m-1) == hP:
freq += 1
print('Rabin-Karp, #match = %d' % freq)
print('Runtime =', time.clock(), 's')
time.clock()
kmpPreprocess(P)
print('Knuth-Morris-Pratt, #match = %d' % kmpSearch(T, P))
print('Runtime =', time.clock(), 's')
main()
|
maskrcnn_benchmark/solver/__init__.py | witwitchayakarn/6DVNET | 295 | 12647838 | <gh_stars>100-1000
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .build import make_optimizer
from .build import make_lr_scheduler
from .lr_scheduler import WarmupMultiStepLR
|
notebook/float_to_hex.py | vhn0912/python-snippets | 174 | 12647844 | import struct
import sys
f_max = sys.float_info.max
print(f_max)
# 1.7976931348623157e+308
print(struct.pack('>d', f_max))
# b'\x7f\xef\xff\xff\xff\xff\xff\xff'
print(type(struct.pack('>d', f_max)))
# <class 'bytes'>
print(struct.pack('<d', f_max))
# b'\xff\xff\xff\xff\xff\xff\xef\x7f'
print(struct.unpack('>Q', struct.pack('>d', f_max)))
# (9218868437227405311,)
print(type(struct.unpack('>Q', struct.pack('>d', f_max))))
# <class 'tuple'>
print(struct.unpack('>Q', struct.pack('>d', f_max))[0])
# 9218868437227405311
print(type(struct.unpack('>Q', struct.pack('>d', f_max))[0]))
# <class 'int'>
print(struct.unpack('>d', struct.pack('>d', f_max))[0])
# 1.7976931348623157e+308
print(hex(struct.unpack('>Q', struct.pack('>d', f_max))[0]))
# 0x7fefffffffffffff
print(type(hex(struct.unpack('>Q', struct.pack('>d', f_max))[0])))
# <class 'str'>
def double_to_hex(f):
return hex(struct.unpack('>Q', struct.pack('>d', f))[0])
print(double_to_hex(f_max))
# 0x7fefffffffffffff
print(double_to_hex(42.195))
# 0x404518f5c28f5c29
print(double_to_hex(1e500))
# 0x7ff0000000000000
print(double_to_hex(1e-500))
# 0x0
print(int(double_to_hex(f_max), 16))
# 9218868437227405311
print(bin(int(double_to_hex(f_max), 16)))
# 0b111111111101111111111111111111111111111111111111111111111111111
print(oct(int(double_to_hex(f_max), 16)))
# 0o777577777777777777777
def double_to_bin(f):
return bin(struct.unpack('>Q', struct.pack('>d', f))[0])
def double_to_oct(f):
return oct(struct.unpack('>Q', struct.pack('>d', f))[0])
print(double_to_bin(f_max))
# 0b111111111101111111111111111111111111111111111111111111111111111
print(double_to_oct(f_max))
# 0o777577777777777777777
def float_to_hex(f):
return hex(struct.unpack('>I', struct.pack('>f', f))[0])
print(float_to_hex(42.195))
# 0x4228c7ae
|
api/tests/integration/tests/basic/rsite.py | tsingdao-Tp/Indigo | 204 | 12647859 | <filename>api/tests/integration/tests/basic/rsite.py
import os
import sys
import errno
sys.path.append('../../common')
from env_indigo import *
indigo = Indigo()
indigo.setOption("molfile-saving-skip-date", "1")
if not os.path.exists(joinPathPy("out", __file__)):
try:
os.makedirs(joinPathPy("out", __file__))
except OSError as e:
if e.errno != errno.EEXIST:
raise
saver = indigo.createFileSaver(joinPathPy("out/rsite.sdf", __file__), "sdf")
mol = indigo.loadMolecule("CCNNCN")
mol.addRSite("R")
mol.addRSite("R")
mol.addRSite("R1")
mol.addRSite("")
a3 = mol.addRSite("R3")
print(mol.molfile())
saver.append(mol)
mol.addRSite("R1, R3")
print(mol.molfile())
saver.append(mol)
a3.resetAtom("N")
print(mol.molfile())
saver.append(mol)
a0 = mol.getAtom(0)
a0.setRSite("R4")
print(mol.molfile())
saver.append(mol)
a1 = mol.getAtom(1)
a1.resetAtom("O")
print(mol.molfile())
saver.append(mol)
a1.setRSite("R4")
a1.highlight()
print(mol.molfile())
saver.append(mol)
mol = indigo.loadMolecule("CCNNCN")
print(mol.checkRGroups())
mol.addRSite("R1")
print(mol.checkRGroups())
mol = indigo.loadMolecule('''
Ketcher 12091616232D 1 1.00000 0.00000 0
2 1 0 0 0 999 V2000
13.6750 -5.9750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
14.5410 -6.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0
M APO 1 2 1
M END
''')
print(mol.checkRGroups())
mol = indigo.loadMolecule('''$MDL REV 1
$MOL
$HDR
$END HDR
$CTAB
2 1 0 0 0 999 V2000
13.6750 -5.9750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
14.5410 -6.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0
M END
$END CTAB
$RGP
1
$CTAB
2 1 0 0 0 999 V2000
13.3500 -9.9750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
14.2160 -10.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0
M END
$END CTAB
$END RGP
$END MOL
''')
print(mol.checkRGroups())
mol = indigo.loadMolecule('''$MDL REV 1 0209181741
$MOL
$HDR
Mrv0541 02091817412D
$END HDR
$CTAB
6 6 0 0 0 0 999 V2000
0.0000 0.8250 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7145 0.4125 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
0.7145 -0.4125 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
-0.0000 -0.8250 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7145 -0.4125 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-0.7145 0.4125 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
2 3 1 0 0 0 0
3 4 1 0 0 0 0
4 5 1 0 0 0 0
5 6 1 0 0 0 0
1 6 1 0 0 0 0
M LOG 1 1 0 0
M LOG 1 2 0 0
M RGP 2 3 2 6 1
M END
$END CTAB
$RGP
1
$CTAB
1 0 0 0 0 0 999 V2000
3.8966 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
M END
$END CTAB
$CTAB
1 0 0 0 0 0 999 V2000
6.2538 -2.4750 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
M END
$END CTAB
$END RGP
$RGP
2
$CTAB
1 0 0 0 0 0 999 V2000
3.8966 -4.9500 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
M END
$END CTAB
$CTAB
1 0 0 0 0 0 999 V2000
6.2538 -4.9500 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
M END
$END CTAB
$END RGP
$END MOL
''')
print(mol.molfile())
print(mol.smiles())
mol = indigo.loadMolecule('''$MDL REV 1
$MOL
$HDR
$END HDR
$CTAB
8 8 0 0 0 999 V2000
0.1786 1.3406 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
0.1786 0.5156 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.8931 0.1031 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.8931 -0.7219 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.1786 -1.1344 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.5359 -0.7219 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.5359 0.1031 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.2503 0.5156 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
2 3 1 0 0 0 0
3 4 1 0 0 0 0
4 5 1 0 0 0 0
5 6 1 0 0 0 0
6 7 1 0 0 0 0
2 7 1 0 0 0 0
7 8 1 0 0 0 0
M RGP 2 1 1 8 2
M LOG 1 2 1 1 0,1
M END
$END CTAB
$RGP
2
$CTAB
1 0 0 0 0 999 V2000
4.0752 -5.2594 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
M END
$END CTAB
$END RGP
$RGP
1
$CTAB
3 2 0 0 0 999 V2000
4.0752 -2.3719 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
4.7897 -2.7844 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
5.5042 -2.3719 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
2 3 1 0 0 0 0
M END
$END CTAB
$END RGP
$END MOL
''')
print(mol.smiles())
mol = indigo.loadMolecule('''$MDL REV 1 0212181244
$MOL
$HDR
Mrv0541 02121812442D
$END HDR
$CTAB
4 3 0 0 0 0 999 V2000
0.4125 0.7145 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 -0.0000 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
0.4125 -0.7145 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-0.8250 -0.0000 0.0000 I 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
2 3 1 0 0 0 0
2 4 1 0 0 0 0
M LOG 1 1 0 0
M RGP 1 2 1
M END
$END CTAB
$RGP
1
$CTAB
7 6 0 0 0 0 999 V2000
3.8304 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
4.5448 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
5.2593 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
5.9738 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
5.9738 -3.7125 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
6.6882 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
7.4027 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
2 3 1 0 0 0 0
3 4 1 0 0 0 0
4 5 1 0 0 0 0
4 6 1 0 0 0 0
6 7 1 0 0 0 0
M APO 2 5 2 7 1
M END
$END CTAB
$CTAB
7 6 0 0 0 0 999 V2000
10.7100 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
11.4245 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
12.1390 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
12.8535 -2.8875 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
12.8535 -3.7125 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
13.5679 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
14.2824 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
2 3 1 0 0 0 0
3 4 1 0 0 0 0
4 5 1 0 0 0 0
4 6 1 0 0 0 0
6 7 1 0 0 0 0
M APO 2 5 2 7 1
M END
$END CTAB
$END RGP
$END MOL
''')
print(mol.smiles())
m = indigo.loadMolecule("C1O[*]CO[*]1 |$;;_R2;;;_R1$,RG:_R1={C},{N},_R2={C},{N}|")
print(m.molfile())
m = indigo.loadMolecule("[*]C1CCCCC1[*] |$_R1;;;;;;;_R2$,RG:_R1={CCC},_R2={N},LOG={_R1:;;>0._R2:_R1;H;0,1}|")
print(m.molfile())
m = indigo.loadMolecule("|RG:_R1={CCCCCC}|")
print(m.molfile())
|
pyltr/metrics/tests/test_dcg.py | Haiga/pyltr | 432 | 12647865 | """
Testing for (Normalized) DCG metric.
"""
from . import helpers
import itertools
import numpy as np
import pyltr
class TestDCG(helpers.TestMetric):
def get_metric(self):
return pyltr.metrics.DCG(k=3)
def get_queries_with_values(self):
yield [], 0.0
yield [0], 0.0
yield [1], 1.0
yield [2], 3.0
yield [2, 1, 0], 3.6309297535714578
yield [0, 0, 0], 0.0
yield [2, 5, 1], 23.058822360715183
yield [2, 5, 1, 9], 23.058822360715183
def get_queries(self):
for i in range(0, 5):
for tup in itertools.product(*([(0, 1, 2.5)] * i)):
yield np.array(tup)
class TestNDCG(helpers.TestMetric):
def get_metric(self):
return pyltr.metrics.NDCG(k=3)
def get_queries_with_values(self):
yield [], 0.0
yield [0], 0.0
yield [1], 1.0
yield [2], 1.0
yield [2, 1, 0], 1.0
yield [1, 2, 0], 0.7967075809905066
yield [0, 0, 0], 0.0
yield [2, 5, 1], 0.6905329824556825
yield [2, 5, 1, 9], 0.04333885914794999
yield [3, 2, 1, 1], 1.0
def get_queries(self):
for i in range(0, 5):
for tup in itertools.product(*([(0, 1, 2.5)] * i)):
yield np.array(tup)
|
scripts/lib/clean_unused_caches.py | Pulkit007/zulip | 17,004 | 12647874 | <reponame>Pulkit007/zulip
#!/usr/bin/env python3
import argparse
import os
import sys
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ZULIP_PATH)
from scripts.lib import clean_emoji_cache, clean_node_cache, clean_venv_cache, clean_yarn_cache
from scripts.lib.zulip_tools import parse_cache_script_args
def main(args: argparse.Namespace) -> None:
os.chdir(ZULIP_PATH)
clean_venv_cache.main(args)
clean_node_cache.main(args)
clean_yarn_cache.main(args)
clean_emoji_cache.main(args)
if __name__ == "__main__":
args = parse_cache_script_args("This script cleans unused Zulip caches.")
main(args)
|
examples/pgu/gui/theme.py | h-vetinari/pybox2d | 421 | 12647885 | <reponame>h-vetinari/pybox2d
# theme.py
"""
"""
import os, re
import pygame
from .const import *
from . import widget
from . import surface
from .basic import parse_color, is_color
__file__ = os.path.abspath(__file__)
def _list_themes(dir):
d = {}
for entry in os.listdir(dir):
if os.path.exists(os.path.join(dir, entry, 'config.txt')):
d[entry] = os.path.join(dir, entry)
return d
class Theme:
"""Theme interface.
If you wish to create your own theme, create a class with this interface, and
pass it to gui.App via gui.App(theme=MyTheme()).
"""
def __init__(self,dirs='default'):
"""Theme constructor.
Keyword arguments:
dirs -- Name of the theme dir to load a theme from. May be an
absolute path to a theme, if pgu is not installed, or if you
created your own theme. May include several dirs in a list if
data is spread across several themes.
Example:
theme = gui.Theme("default")
theme = gui.Theme(["mytheme","mytheme2"])
"""
self.config = {}
self._loaded = []
self.cache = {}
self._preload(dirs)
pygame.font.init()
def _preload(self,ds):
if not isinstance(ds, list):
ds = [ds]
for d in ds:
if d not in self._loaded:
self._load(d)
self._loaded.append(d)
def _load(self, name):
#theme_dir = themes[name]
#try to load the local dir, or absolute path
dnames = [name]
#if the package isn't installed and people are just
#trying out the scripts or examples
dnames.append(os.path.join(os.path.dirname(__file__),"..","..","data","themes",name))
#if the package is installed, and the package is installed
#in /usr/lib/python2.3/site-packages/pgu/
#or c:\python23\lib\site-packages\pgu\
#the data is in ... lib/../share/ ...
dnames.append(os.path.join(os.path.dirname(__file__),"..","..","..","..","share","pgu","themes",name))
dnames.append(os.path.join(os.path.dirname(__file__),"..","..","..","..","..","share","pgu","themes",name))
dnames.append(os.path.join(os.path.dirname(__file__),"..","..","share","pgu","themes",name))
for dname in dnames:
if os.path.isdir(dname): break
if not os.path.isdir(dname):
raise Exception('could not find theme '+name)
fname = os.path.join(dname,"config.txt")
if os.path.isfile(fname):
try:
f = open(fname)
for line in f.readlines():
args = line.strip().split()
if len(args) < 3:
continue
pcls = ""
(cls, attr, vals) = (args[0], args[1], args[2:])
if (":" in cls):
(cls, pcls) = cls.split(":")
self.config[cls, pcls, attr] = (dname, vals)
finally:
f.close()
fname = os.path.join(dname,"style.ini")
if os.path.isfile(fname):
import ConfigParser
cfg = ConfigParser.ConfigParser()
f = open(fname,'r')
cfg.readfp(f)
for section in cfg.sections():
cls = section
pcls = ''
if cls.find(":")>=0:
cls,pcls = cls.split(":")
for attr in cfg.options(section):
vals = cfg.get(section,attr).strip().split()
self.config[cls,pcls,attr] = (dname, vals)
image_extensions = (".gif", ".jpg", ".bmp", ".png", ".tga")
def _get(self, cls, pcls, attr):
key = (cls, pcls, attr)
if not key in self.config:
return
if key in self.cache:
# This property is already in the cache
return self.cache[key]
(dname, vals) = self.config[key]
if (os.path.splitext(vals[0].lower())[1] in self.image_extensions):
# This is an image attribute
v = pygame.image.load(os.path.join(dname, vals[0]))
elif (attr == "color" or attr == "background"):
# This is a color value
v = parse_color(vals[0])
elif (attr == "font"):
# This is a font value
name = vals[0]
size = int(vals[1])
if (name.endswith(".ttf")):
# Load the font from a file
v = pygame.font.Font(os.path.join(dname, name), size)
else:
# Must be a system font
v = pygame.font.SysFont(name, size)
else:
try:
v = int(vals[0])
except:
v = vals[0]
self.cache[key] = v
return v
def get(self,cls,pcls,attr):
"""Interface method -- get the value of a style attribute.
Arguments:
cls -- class, for example "checkbox", "button", etc.
pcls -- pseudo class, for example "hover", "down", etc.
attr -- attribute, for example "image", "background", "font", "color", etc.
This method is called from [[gui-style]]
"""
if not self._loaded:
# Load the default theme
self._preload("default")
o = (cls, pcls, attr)
#if o in self.cache:
# return self.cache[o]
v = self._get(cls, pcls, attr)
if v:
#self.cache[o] = v
return v
v = self._get(cls, "", attr)
if v:
#self.cache[o] = v
return v
v = self._get("default", "", attr)
if v:
#self.cache[o] = v
return v
self.cache[o] = 0
return 0
def box(self,w,s):
style = w.style
c = (0,0,0)
if style.border_color != 0: c = style.border_color
w,h = s.get_width(),s.get_height()
s.fill(c,(0,0,w,style.border_top))
s.fill(c,(0,h-style.border_bottom,w,style.border_bottom))
s.fill(c,(0,0,style.border_left,h))
s.fill(c,(w-style.border_right,0,style.border_right,h))
def getspacing(self,w):
# return the top, right, bottom, left spacing around the widget
if not hasattr(w,'_spacing'): #HACK: assume spacing doesn't change re pcls
s = w.style
xt = s.margin_top+s.border_top+s.padding_top
xr = s.padding_right+s.border_right+s.margin_right
xb = s.padding_bottom+s.border_bottom+s.margin_bottom
xl = s.margin_left+s.border_left+s.padding_left
w._spacing = xt,xr,xb,xl
return w._spacing
def resize(self,w,m):
# Returns the rectangle expanded in each direction
def expand_rect(rect, left, top, right, bottom):
return pygame.Rect(rect.x - left,
rect.y - top,
rect.w + left + right,
rect.h + top + bottom)
def func(width=None,height=None):
s = w.style
pt,pr,pb,pl = (s.padding_top,s.padding_right,
s.padding_bottom,s.padding_left)
bt,br,bb,bl = (s.border_top,s.border_right,
s.border_bottom,s.border_left)
mt,mr,mb,ml = (s.margin_top,s.margin_right,
s.margin_bottom,s.margin_left)
# Calculate the total space on each side
top = pt+bt+mt
right = pr+br+mr
bottom = pb+bb+mb
left = pl+bl+ml
ttw = left+right
tth = top+bottom
ww,hh = None,None
if width != None: ww = width-ttw
if height != None: hh = height-tth
ww,hh = m(ww,hh)
if width == None: width = ww
if height == None: height = hh
#if the widget hasn't respected the style.width,
#style height, we'll add in the space for it...
width = max(width-ttw, ww, w.style.width)
height = max(height-tth, hh, w.style.height)
#width = max(ww,w.style.width-tw)
#height = max(hh,w.style.height-th)
r = pygame.Rect(left,top,width,height)
w._rect_padding = expand_rect(r, pl, pt, pr, pb)
w._rect_border = expand_rect(w._rect_padding, bl, bt, br, bb)
w._rect_margin = expand_rect(w._rect_border, ml, mt, mr, mb)
# align it within it's zone of power.
rect = pygame.Rect(left, top, ww, hh)
dx = width-rect.w
dy = height-rect.h
rect.x += (w.style.align+1)*dx/2
rect.y += (w.style.valign+1)*dy/2
w._rect_content = rect
return (w._rect_margin.w, w._rect_margin.h)
return func
def paint(self,w,m):
def func(s):
# if w.disabled:
# if not hasattr(w,'_disabled_bkgr'):
# w._disabled_bkgr = s.convert()
# orig = s
# s = w._disabled_bkgr.convert()
# if not hasattr(w,'_theme_paint_bkgr'):
# w._theme_paint_bkgr = s.convert()
# else:
# s.blit(w._theme_paint_bkgr,(0,0))
#
# if w.disabled:
# orig = s
# s = w._theme_paint_bkgr.convert()
if w.disabled:
if (not (hasattr(w,'_theme_bkgr') and
w._theme_bkgr.get_width() == s.get_width() and
w._theme_bkgr.get_height() == s.get_height())):
w._theme_bkgr = s.copy()
orig = s
s = w._theme_bkgr
s.fill((0,0,0,0))
s.blit(orig,(0,0))
if w.background:
w.background.paint(surface.subsurface(s,w._rect_border))
self.box(w,surface.subsurface(s,w._rect_border))
r = m(surface.subsurface(s,w._rect_content))
if w.disabled:
s.set_alpha(128)
orig.blit(s,(0,0))
# if w.disabled:
# orig.blit(w._disabled_bkgr,(0,0))
# s.set_alpha(128)
# orig.blit(s,(0,0))
w._painted = True
return r
return func
def event(self,w,m):
def func(e):
rect = w._rect_content
if (not rect):
# This should never be the case, but it sometimes happens that _rect_content isn't
# set before a mouse event is received. In this case we'll ignore the event.
return m(e)
if e.type == MOUSEBUTTONUP or e.type == MOUSEBUTTONDOWN:
sub = pygame.event.Event(e.type,{
'button':e.button,
'pos':(e.pos[0]-rect.x,e.pos[1]-rect.y)})
elif e.type == CLICK:
sub = pygame.event.Event(e.type,{
'button':e.button,
'pos':(e.pos[0]-rect.x,e.pos[1]-rect.y)})
elif e.type == MOUSEMOTION:
sub = pygame.event.Event(e.type,{
'buttons':e.buttons,
'pos':(e.pos[0]-rect.x,e.pos[1]-rect.y),
'rel':e.rel})
else:
sub = e
return m(sub)
return func
def update(self,w,m):
def func(s):
if w.disabled: return []
r = m(surface.subsurface(s,w._rect_content))
if type(r) == list:
dx,dy = w._rect_content.topleft
for rr in r:
rr.x,rr.y = rr.x+dx,rr.y+dy
return r
return func
def open(self,w,m):
def func(widget=None,x=None,y=None):
if not hasattr(w,'_rect_content'):
# HACK: so that container.open won't resize again!
w.rect.w,w.rect.h = w.resize()
rect = w._rect_content
##print w.__class__.__name__, rect
if x != None: x += rect.x
if y != None: y += rect.y
return m(widget,x,y)
return func
#def open(self,w,m):
# def func(widget=None):
# return m(widget)
# return func
def decorate(self,widget,level):
"""Interface method -- decorate a widget.
The theme system is given the opportunity to decorate a widget
methods at the end of the Widget initializer.
Arguments:
widget -- the widget to be decorated
level -- the amount of decoration to do, False for none, True for
normal amount, 'app' for special treatment of App objects.
"""
w = widget
if level == False: return
if type(w.style.background) != int:
w.background = Background(w,self)
if level == 'app': return
for k,v in list(w.style.__dict__.items()):
if k in ('border','margin','padding'):
for kk in ('top','bottom','left','right'):
setattr(w.style,'%s_%s'%(k,kk),v)
w.paint = self.paint(w,w.paint)
w.event = self.event(w,w.event)
w.update = self.update(w,w.update)
w.resize = self.resize(w,w.resize)
w.open = self.open(w,w.open)
def render(self,s,box,r):
"""Interface method - render a special widget feature.
Arguments:
s -- a pygame surface
box -- box data, a value returned from Theme.get, typically a surface
r -- pygame.Rect with the size that the box data should be rendered
"""
if box == 0: return
if is_color(box):
s.fill(box,r)
return
x,y,w,h=r.x,r.y,r.w,r.h
ww,hh=int(box.get_width()/3),int(box.get_height()/3)
xx,yy=x+w,y+h
src = pygame.rect.Rect(0,0,ww,hh)
dest = pygame.rect.Rect(0,0,ww,hh)
s.set_clip(pygame.Rect(x+ww,y+hh,w-ww*2,h-hh*2))
src.x,src.y = ww,hh
for dest.y in range(y+hh,yy-hh,hh):
for dest.x in range(x+ww,xx-ww,ww): s.blit(box,dest,src)
s.set_clip(pygame.Rect(x+ww,y,w-ww*3,hh))
src.x,src.y,dest.y = ww,0,y
for dest.x in range(x+ww,xx-ww*2,ww): s.blit(box,dest,src)
dest.x = xx-ww*2
s.set_clip(pygame.Rect(x+ww,y,w-ww*2,hh))
s.blit(box,dest,src)
s.set_clip(pygame.Rect(x+ww,yy-hh,w-ww*3,hh))
src.x,src.y,dest.y = ww,hh*2,yy-hh
for dest.x in range(x+ww,xx-ww*2,ww): s.blit(box,dest,src)
dest.x = xx-ww*2
s.set_clip(pygame.Rect(x+ww,yy-hh,w-ww*2,hh))
s.blit(box,dest,src)
s.set_clip(pygame.Rect(x,y+hh,xx,h-hh*3))
src.y,src.x,dest.x = hh,0,x
for dest.y in range(y+hh,yy-hh*2,hh): s.blit(box,dest,src)
dest.y = yy-hh*2
s.set_clip(pygame.Rect(x,y+hh,xx,h-hh*2))
s.blit(box,dest,src)
s.set_clip(pygame.Rect(xx-ww,y+hh,xx,h-hh*3))
src.y,src.x,dest.x=hh,ww*2,xx-ww
for dest.y in range(y+hh,yy-hh*2,hh): s.blit(box,dest,src)
dest.y = yy-hh*2
s.set_clip(pygame.Rect(xx-ww,y+hh,xx,h-hh*2))
s.blit(box,dest,src)
s.set_clip(s.get_rect())
src.x,src.y,dest.x,dest.y = 0,0,x,y
s.blit(box,dest,src)
src.x,src.y,dest.x,dest.y = ww*2,0,xx-ww,y
s.blit(box,dest,src)
src.x,src.y,dest.x,dest.y = 0,hh*2,x,yy-hh
s.blit(box,dest,src)
src.x,src.y,dest.x,dest.y = ww*2,hh*2,xx-ww,yy-hh
s.blit(box,dest,src)
class Background(widget.Widget):
def __init__(self,value,theme,**params):
params['decorate'] = False
widget.Widget.__init__(self,**params)
self.value = value
self.theme = theme
def paint(self,s):
r = pygame.Rect(0,0,s.get_width(),s.get_height())
v = self.value.style.background
if is_color(v):
s.fill(v)
else:
self.theme.render(s,v,r)
|
holoviews/plotting/mpl/heatmap.py | pyviz/holoviews | 304 | 12648035 | from __future__ import absolute_import, division, unicode_literals
from itertools import product
import numpy as np
import param
from matplotlib.patches import Wedge, Circle
from matplotlib.collections import LineCollection, PatchCollection
from ...core.data import GridInterface
from ...core.util import dimension_sanitizer, is_nan
from ...core.spaces import HoloMap
from ..mixins import HeatMapMixin
from .element import ColorbarPlot
from .raster import QuadMeshPlot
from .util import filter_styles
class HeatMapPlot(HeatMapMixin, QuadMeshPlot):
clipping_colors = param.Dict(default={'NaN': 'white'}, doc="""
Dictionary to specify colors for clipped values, allows
setting color for NaN values and for values above and below
the min and max value. The min, max or NaN color may specify
an RGB(A) color as a color hex string of the form #FFFFFF or
#FFFFFFFF or a length 3 or length 4 tuple specifying values in
the range 0-1 or a named HTML color.""")
padding = param.ClassSelector(default=0, class_=(int, float, tuple))
radial = param.Boolean(default=False, doc="""
Whether the HeatMap should be radial""")
show_values = param.Boolean(default=False, doc="""
Whether to annotate each pixel with its value.""")
xmarks = param.Parameter(default=None, doc="""
Add separation lines to the heatmap for better readability. By
default, does not show any separation lines. If parameter is of type
integer, draws the given amount of separations lines spread across
heatmap. If parameter is of type list containing integers, show
separation lines at given indices. If parameter is of type tuple, draw
separation lines at given categorical values. If parameter is of type
function, draw separation lines where function returns True for passed
heatmap category.""")
ymarks = param.Parameter(default=None, doc="""
Add separation lines to the heatmap for better readability. By
default, does not show any separation lines. If parameter is of type
integer, draws the given amount of separations lines spread across
heatmap. If parameter is of type list containing integers, show
separation lines at given indices. If parameter is of type tuple, draw
separation lines at given categorical values. If parameter is of type
function, draw separation lines where function returns True for passed
heatmap category.""")
xticks = param.Parameter(default=20, doc="""
Ticks along x-axis/segments specified as an integer, explicit list of
ticks or function. If `None`, no ticks are shown.""")
yticks = param.Parameter(default=20, doc="""
Ticks along y-axis/annulars specified as an integer, explicit list of
ticks or function. If `None`, no ticks are shown.""")
@classmethod
def is_radial(cls, heatmap):
heatmap = heatmap.last if isinstance(heatmap, HoloMap) else heatmap
opts = cls.lookup_options(heatmap, 'plot').options
return ((any(o in opts for o in ('start_angle', 'radius_inner', 'radius_outer'))
and not (opts.get('radial') == False)) or opts.get('radial', False))
def _annotate_plot(self, ax, annotations):
for a in self.handles.get('annotations', {}).values():
a.remove()
handles = {}
for plot_coord, text in annotations.items():
handles[plot_coord] = ax.annotate(text, xy=plot_coord,
xycoords='data',
horizontalalignment='center',
verticalalignment='center')
return handles
def _annotate_values(self, element, xvals, yvals):
val_dim = element.vdims[0]
vals = element.dimension_values(val_dim).flatten()
xpos = xvals[:-1] + np.diff(xvals)/2.
ypos = yvals[:-1] + np.diff(yvals)/2.
plot_coords = product(xpos, ypos)
annotations = {}
for plot_coord, v in zip(plot_coords, vals):
text = '-' if is_nan(v) else val_dim.pprint_value(v)
annotations[plot_coord] = text
return annotations
def _compute_ticks(self, element, xvals, yvals, xfactors, yfactors):
xdim, ydim = element.kdims
if self.invert_axes:
xdim, ydim = ydim, xdim
opts = self.lookup_options(element, 'plot').options
xticks = opts.get('xticks')
if xticks is None:
xpos = xvals[:-1] + np.diff(xvals)/2.
if not xfactors:
xfactors = element.gridded.dimension_values(xdim, False)
xlabels = [xdim.pprint_value(k) for k in xfactors]
xticks = list(zip(xpos, xlabels))
yticks = opts.get('yticks')
if yticks is None:
ypos = yvals[:-1] + np.diff(yvals)/2.
if not yfactors:
yfactors = element.gridded.dimension_values(ydim, False)
ylabels = [ydim.pprint_value(k) for k in yfactors]
yticks = list(zip(ypos, ylabels))
return xticks, yticks
def _draw_markers(self, ax, element, marks, values, factors, axis='x'):
if marks is None or self.radial:
return
self.param.warning('Only radial HeatMaps supports marks, to make the'
'HeatMap quads more distinguishable set linewidths'
'to a non-zero value.')
def init_artists(self, ax, plot_args, plot_kwargs):
xfactors = plot_kwargs.pop('xfactors')
yfactors = plot_kwargs.pop('yfactors')
annotations = plot_kwargs.pop('annotations', None)
prefixes = ['annular', 'xmarks', 'ymarks']
plot_kwargs = {k: v for k, v in plot_kwargs.items()
if not any(p in k for p in prefixes)}
artist = ax.pcolormesh(*plot_args, **plot_kwargs)
if self.show_values and annotations:
self.handles['annotations'] = self._annotate_plot(ax, annotations)
self._draw_markers(ax, self.current_frame, self.xmarks,
plot_args[0], xfactors, axis='x')
self._draw_markers(ax, self.current_frame, self.ymarks,
plot_args[1], yfactors, axis='y')
return {'artist': artist}
def get_data(self, element, ranges, style):
xdim, ydim = element.kdims
aggregate = element.gridded
if not element._unique:
self.param.warning('HeatMap element index is not unique, ensure you '
'aggregate the data before displaying it, e.g. '
'using heatmap.aggregate(function=np.mean). '
'Duplicate index values have been dropped.')
data = aggregate.dimension_values(2, flat=False)
data = np.ma.array(data, mask=np.logical_not(np.isfinite(data)))
if self.invert_axes:
xdim, ydim = ydim, xdim
data = data.T[::-1, ::-1]
xtype = aggregate.interface.dtype(aggregate, xdim)
if xtype.kind in 'SUO':
xvals = np.arange(data.shape[1]+1)-0.5
else:
xvals = aggregate.dimension_values(xdim, expanded=False)
xvals = GridInterface._infer_interval_breaks(xvals)
ytype = aggregate.interface.dtype(aggregate, ydim)
if ytype.kind in 'SUO':
yvals = np.arange(data.shape[0]+1)-0.5
else:
yvals = aggregate.dimension_values(ydim, expanded=False)
yvals = GridInterface._infer_interval_breaks(yvals)
xfactors = list(ranges.get(xdim.name, {}).get('factors', []))
yfactors = list(ranges.get(ydim.name, {}).get('factors', []))
xticks, yticks = self._compute_ticks(element, xvals, yvals, xfactors, yfactors)
style['xfactors'] = xfactors
style['yfactors'] = yfactors
if self.show_values:
style['annotations'] = self._annotate_values(element.gridded, xvals, yvals)
vdim = element.vdims[0]
self._norm_kwargs(element, ranges, style, vdim)
if 'vmin' in style:
style['clim'] = style.pop('vmin'), style.pop('vmax')
return (xvals, yvals, data), style, {'xticks': xticks, 'yticks': yticks}
class RadialHeatMapPlot(ColorbarPlot):
start_angle = param.Number(default=np.pi/2, doc="""
Define starting angle of the first annulars. By default, beings
at 12 o clock.""")
max_radius = param.Number(default=0.5, doc="""
Define the maximum radius which is used for the x and y range extents.
""")
radius_inner = param.Number(default=0.1, bounds=(0, 0.5), doc="""
Define the radius fraction of inner, empty space.""")
radius_outer = param.Number(default=0.05, bounds=(0, 1), doc="""
Define the radius fraction of outer space including the labels.""")
radial = param.Boolean(default=True, doc="""
Whether the HeatMap should be radial""")
show_values = param.Boolean(default=False, doc="""
Whether to annotate each pixel with its value.""")
xmarks = param.Parameter(default=None, doc="""
Add separation lines between segments for better readability. By
default, does not show any separation lines. If parameter is of type
integer, draws the given amount of separations lines spread across
radial heatmap. If parameter is of type list containing integers, show
separation lines at given indices. If parameter is of type tuple, draw
separation lines at given segment values. If parameter is of type
function, draw separation lines where function returns True for passed
segment value.""")
ymarks = param.Parameter(default=None, doc="""
Add separation lines between annulars for better readability. By
default, does not show any separation lines. If parameter is of type
integer, draws the given amount of separations lines spread across
radial heatmap. If parameter is of type list containing integers, show
separation lines at given indices. If parameter is of type tuple, draw
separation lines at given annular values. If parameter is of type
function, draw separation lines where function returns True for passed
annular value.""")
xticks = param.Parameter(default=4, doc="""
Ticks along x-axis/segments specified as an integer, explicit list of
ticks or function. If `None`, no ticks are shown.""")
yticks = param.Parameter(default=4, doc="""
Ticks along y-axis/annulars specified as an integer, explicit list of
ticks or function. If `None`, no ticks are shown.""")
projection = param.ObjectSelector(default='polar', objects=['polar'])
_style_groups = ['annular', 'xmarks', 'ymarks']
style_opts = ['annular_edgecolors', 'annular_linewidth',
'xmarks_linewidth', 'xmarks_edgecolor', 'cmap',
'ymarks_linewidth', 'ymarks_edgecolor']
@staticmethod
def _map_order_to_ticks(start, end, order, reverse=False):
"""Map elements from given `order` array to bins ranging from `start`
to `end`.
"""
size = len(order)
bounds = np.linspace(start, end, size + 1)
if reverse:
bounds = bounds[::-1]
mapping = list(zip(bounds[:-1]%(np.pi*2), order))
return mapping
@staticmethod
def _compute_separations(inner, outer, angles):
"""Compute x and y positions for separation lines for given angles.
"""
return [np.array([[a, inner], [a, outer]]) for a in angles]
@staticmethod
def _get_markers(ticks, marker):
if callable(marker):
marks = [v for v, l in ticks if marker(l)]
elif isinstance(marker, int) and marker:
nth_mark = max([np.ceil(len(ticks) / marker).astype(int), 1])
marks = [v for v, l in ticks[::nth_mark]]
elif isinstance(marker, tuple):
marks = [v for v, l in ticks if l in marker]
else:
marks = []
return marks
@staticmethod
def _get_ticks(ticks, ticker):
if callable(ticker):
ticks = [(v, l) for v, l in ticks if ticker(l)]
elif isinstance(ticker, int):
nth_mark = max([np.ceil(len(ticks) / ticker).astype(int), 1])
ticks = ticks[::nth_mark]
elif isinstance(ticker, (tuple, list)):
nth_mark = max([np.ceil(len(ticks) / len(ticker)).astype(int), 1])
ticks = [(v, tl) for (v, l), tl in zip(ticks[::nth_mark], ticker)]
elif ticker:
ticks = list(ticker)
else:
ticks = []
return ticks
def get_extents(self, view, ranges, range_type='combined'):
if range_type == 'hard':
return (np.nan,)*4
return (0, 0, np.pi*2, self.max_radius+self.radius_outer)
def get_data(self, element, ranges, style):
# dimension labels
dim_labels = element.dimensions(label=True)[:3]
x, y, z = [dimension_sanitizer(d) for d in dim_labels]
if self.invert_axes: x, y = y, x
# get raw values
aggregate = element.gridded
xvals = aggregate.dimension_values(x, expanded=False)
yvals = aggregate.dimension_values(y, expanded=False)
zvals = aggregate.dimension_values(2, flat=False)
# pretty print x and y dimension values if necessary
def _pprint(dim_label, vals):
if vals.dtype.kind not in 'SU':
dim = aggregate.get_dimension(dim_label)
return [dim.pprint_value(v) for v in vals]
return vals
xvals = _pprint(x, xvals)
yvals = _pprint(y, yvals)
# annular wedges
start_angle = self.start_angle
end_angle = self.start_angle + 2 * np.pi
bins_segment = np.linspace(start_angle, end_angle, len(xvals)+1)
segment_ticks = self._map_order_to_ticks(start_angle, end_angle,
xvals, True)
radius_max = 0.5
radius_min = radius_max * self.radius_inner
bins_annular = np.linspace(radius_min, radius_max, len(yvals)+1)
radius_ticks = self._map_order_to_ticks(radius_min, radius_max,
yvals)
patches = []
for j in range(len(yvals)):
ybin = bins_annular[j:j+2]
for i in range(len(xvals))[::-1]:
xbin = np.rad2deg(bins_segment[i:i+2])
width = ybin[1]-ybin[0]
wedge = Wedge((0.5, 0.5), ybin[1], xbin[0], xbin[1], width)
patches.append(wedge)
angles = self._get_markers(segment_ticks, self.xmarks)
xmarks = self._compute_separations(radius_min, radius_max, angles)
radii = self._get_markers(radius_ticks, self.ymarks)
ymarks = [Circle((0.5, 0.5), r) for r in radii]
style['array'] = zvals.flatten()
self._norm_kwargs(element, ranges, style, element.vdims[0])
if 'vmin' in style:
style['clim'] = style.pop('vmin'), style.pop('vmax')
data = {'annular': patches, 'xseparator': xmarks, 'yseparator': ymarks}
xticks = self._get_ticks(segment_ticks, self.xticks)
if not isinstance(self.xticks, int):
xticks = [(v-((np.pi)/len(xticks)), l) for v, l in xticks]
yticks = self._get_ticks(radius_ticks, self.yticks)
ticks = {'xticks': xticks, 'yticks': yticks}
return data, style, ticks
def init_artists(self, ax, plot_args, plot_kwargs):
# Draw edges
color_opts = ['c', 'cmap', 'vmin', 'vmax', 'norm', 'array']
groups = [g for g in self._style_groups if g != 'annular']
edge_opts = filter_styles(plot_kwargs, 'annular', groups)
annuli = plot_args['annular']
edge_opts.pop('interpolation', None)
annuli = PatchCollection(annuli, transform=ax.transAxes, **edge_opts)
ax.add_collection(annuli)
artists = {'artist': annuli}
paths = plot_args['xseparator']
if paths:
groups = [g for g in self._style_groups if g != 'xmarks']
xmark_opts = filter_styles(plot_kwargs, 'xmarks', groups, color_opts)
xmark_opts.pop('edgecolors', None)
xseparators = LineCollection(paths, **xmark_opts)
ax.add_collection(xseparators)
artists['xseparator'] = xseparators
paths = plot_args['yseparator']
if paths:
groups = [g for g in self._style_groups if g != 'ymarks']
ymark_opts = filter_styles(plot_kwargs, 'ymarks', groups, color_opts)
ymark_opts.pop('edgecolors', None)
yseparators = PatchCollection(paths, facecolor='none',
transform=ax.transAxes, **ymark_opts)
ax.add_collection(yseparators)
artists['yseparator'] = yseparators
return artists
|
sdss/fields.py | juandesant/astrometry.net | 460 | 12648060 | <gh_stars>100-1000
#! /usr/bin/env python3
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
from __future__ import absolute_import
from astrometry.util.fits import *
from astrometry.util.starutil_numpy import *
from astrometry.util.find_data_file import *
from os.path import basename,dirname
import numpy as np
def get_photoobj_filename(photoobjdir, rr, run, camcol, field):
fn = os.path.join(photoobjdir, rr, '%i'%run, '%i'%camcol,
'photoObj-%06i-%i-%04i.fits' % (run, camcol, field))
return fn
def read_photoobjs_in_wcs(wcs, margin, cols=None,
cutToPrimary=True,
wfn=None,
sdss=None):
'''
Read photoObjs that are inside the given 'wcs', plus 'margin' in degrees.
'''
import logging
log = logging.getLogger('read_photoobjs_in_wcs')
ra,dec = wcs.radec_center()
rad = wcs.radius()
rad += np.hypot(14., 10.) / 2 / 60.
# a little extra margin
rad += margin
if sdss is None:
from astrometry.sdss import DR9
sdss = DR9()
if wfn is None:
wfn = sdss.filenames.get('window_flist', None)
if wfn is None:
wfn = 'window_flist.fits'
if not os.path.exists(wfn):
print('File does not exist:', wfn, '; downloading...')
wfn = sdss.retrieve('window_flist', None, None, None, rerun='xxx')
print('Retrieved', wfn)
else:
print('Using', wfn)
print('Searching for run,camcol,fields with radius', rad, 'deg')
RCF = radec_to_sdss_rcf(ra, dec, radius=rad*60., tablefn=wfn)
log.debug('Found %i fields possibly in range' % len(RCF))
pixmargin = margin * 3600. / wcs.pixel_scale()
W,H = wcs.get_width(), wcs.get_height()
TT = []
for run,camcol,field,r,d in RCF:
log.debug('RCF %i/%i/%i' % (run, camcol, field))
rr = sdss.get_rerun(run, field=field)
if rr in [None, '157']:
log.debug('Rerun 157')
continue
fn = sdss.retrieve('photoObj', run, camcol, field, rerun=rr)
#fn = get_photoobj_filename(rr, run, camcol, field)
T = fits_table(fn, columns=cols)
if T is None:
log.debug('read 0 from %s' % fn)
continue
log.debug('read %i from %s' % (len(T), fn))
# while we're reading it, record its length for later...
#get_photoobj_length(rr, run, camcol, field)
ok,x,y = wcs.radec2pixelxy(T.ra, T.dec)
x -= 1
y -= 1
T.cut((x > -pixmargin) * (x < (W + pixmargin)) *
(y > -pixmargin) * (y < (H + pixmargin)))
if cutToPrimary:
T.cut((T.resolve_status & 256) > 0)
log.debug('cut to %i within target area and PRIMARY.' % len(T))
else:
log.debug('cut to %i within target area.' % len(T))
if len(T) == 0:
continue
TT.append(T)
if not len(TT):
return None
T = merge_tables(TT)
return T
class RaDecToRcf(object):
def __init__(self, tablefn=None):
self.kd = None
self.sdssxyz = None
if tablefn is None:
tablefn = find_data_file('dr7fields.fits')
self.tab = fits_table(tablefn)
if self.tab is None:
raise Exception('Failed to read table of SDSS fields from file: "' + str(tablefn) + '"')
def __call__(self, ra, dec, spherematch=True, radius=0, contains=False):
T = self.tab
# HACK - magic 13x9 +1 arcmin.
if radius == 0:
radius = sqrt(14.**2 + 10.**2)/2.
d2 = arcmin2distsq(radius)
if self.sdssxyz is None:
self.sdssxyz = radectoxyz(T.ra, T.dec)
if not spherematch:
rcfs = []
for r,d in broadcast(ra,dec):
xyz = radectoxyz(r,d)
dist2s = sum((xyz - self.sdssxyz)**2, axis=1)
I = flatnonzero(dist2s < d2)
rcfs.append(zip(T[I].run, T[I].camcol,
T[I].field, T[I].ra, T[I].dec))
else:
from astrometry.libkd import spherematch
if self.kd is None:
self.kd = spherematch.tree_build(self.sdssxyz)
rds = array([x for x in broadcast(ra,dec)])
xyz = radectoxyz(rds[:,0], rds[:,1]).astype(double)
kd2 = spherematch.tree_build(xyz)
notself = False
inds,D = spherematch.trees_match(self.kd, kd2, np.sqrt(d2),
notself=notself, permuted=True)
if len(inds) == 0:
return []
I = np.argsort(D[:,0])
inds = inds[I]
rcfs = [[] for i in range(len(rds))]
cols = T.columns()
gotem = False
if contains:
if ('ramin' in cols and 'ramax' in cols and
'decmin' in cols and 'decmax' in cols):
gotem = True
for j,i in inds:
(r,d) = rds[i]
if (r >= T.ramin[j] and r <= T.ramax[j]
and d >= T.decmin[j] and d <= T.decmax[j]):
rcfs[i].append((T.run[j], T.camcol[j],
T.field[j], T.ra[j], T.dec[j]))
#print '%i fields contain the first query RA,Dec' % len(rcfs[0])
else:
print('you requested fields *containing* the query RA,Dec,')
print('but the fields list file \"%s\" doesn\'t contain RAMIN,RAMAX,DECMIN, and DECMAX columns' % tablefn)
if not gotem:
for j,i in inds:
rcfs[i].append((T.run[j], T.camcol[j], T.field[j],
T.ra[j], T.dec[j]))
if isscalar(ra) and isscalar(dec):
return rcfs[0]
return rcfs
# RA,Dec are either scalars or iterables.
# Radius is in *arcmin*. sigh.
# If scalars, returns a list of (run, camcol, field, ra, dec) tuples, one for each matching field.
# If iterable, returns a list containing one list per query (ra,dec) of the same tuple.
def radec_to_sdss_rcf(ra, dec, spherematch=True, radius=0, tablefn=None, contains=False):
RD = RaDecToRcf(tablefn=tablefn)
return RD(ra, dec, spherematch=spherematch, radius=radius, contains=contains)
def OLD_radec_to_sdss_rcf(ra, dec, spherematch=True, radius=0, tablefn=None, contains=False):
# This file is generated by merging the files "dr7_e.fits", "dr7_g.fits", and "dr7_a.fits",
# whose construction is described in http://trac.astrometry.net/browser/trunk/projects/sdss-tests/README
# (and in comments below that I didn't notice before writing this)
if tablefn is None:
tablefn = find_data_file('dr7fields.fits')
sdss = table_fields(tablefn)
if sdss is None:
print('Failed to read table of SDSS fields from file', tablefn)
raise Exception('Failed to read table of SDSS fields from file: "' + str(tablefn) + '"')
sdssxyz = radectoxyz(sdss.ra, sdss.dec)
## HACK - magic 13x9 arcmin.
if radius == 0:
radius = sqrt(13.**2 + 9.**2)/2.
radius2 = arcmin2distsq(radius)
if not spherematch:
rcfs = []
for r,d in broadcast(ra,dec):
xyz = radectoxyz(r,d)
dist2s = sum((xyz - sdssxyz)**2, axis=1)
I = flatnonzero(dist2s < radius2)
if False:
print('I:', I)
print('fields:', sdss[I].run, sdss[I].field, sdss[I].camcol)
print('RA', sdss[I].ra)
print('Dec', sdss[I].dec)
rcfs.append(zip(sdss[I].run, sdss[I].camcol, sdss[I].field, sdss[I].ra, sdss[I].dec))
else:
from astrometry.libkd import spherematch
rds = array([x for x in broadcast(ra,dec)])
xyz = radectoxyz(rds[:,0], rds[:,1]).astype(double)
(inds,dists) = spherematch.match(xyz, sdssxyz, sqrt(radius2))
#print 'found %i matches' % len(inds)
if len(inds) == 0:
return []
#print 'inds:', inds.shape
I = np.argsort(dists[:,0])
#print 'dists:', dists.shape
inds = inds[I,:]
rcfs = [[] for i in range(len(rds))]
cols = sdss.columns()
gotem = False
if contains:
if 'ramin' in cols and 'ramax' in cols and 'decmin' in cols and 'decmax' in cols:
gotem = True
for i,j in inds:
(r,d) = rds[i]
if r >= sdss.ramin[j] and r <= sdss.ramax[j] and d >= sdss.decmin[j] and d <= sdss.decmax[j]:
rcfs[i].append((sdss.run[j], sdss.camcol[j], sdss.field[j], sdss.ra[j], sdss.dec[j]))
print('%i fields contain the first query RA,Dec' % len(rcfs[0]))
else:
print('you requested fields *containing* the query RA,Dec,')
print('but the fields list file \"%s\" doesn\'t contain RAMIN,RAMAX,DECMIN, and DECMAX columns' % tablefn)
if not gotem:
for i,j in inds:
rcfs[i].append((sdss.run[j], sdss.camcol[j], sdss.field[j], sdss.ra[j], sdss.dec[j]))
if isscalar(ra) and isscalar(dec):
return rcfs[0]
return rcfs
# The field list was created starting with dstn's list of fields in DR7:
# fitscopy dr7_e.fits"[col RUN;FIELD;CAMCOL;RA=(RAMIN+RAMAX)/2;DEC=(DECMIN+DECMAX)/2]" e.fits
# fitscopy dr7_g.fits"[col RUN;FIELD;CAMCOL;RA=(RAMIN+RAMAX)/2;DEC=(DECMIN+DECMAX)/2]" g.fits
# fitscopy dr7_a.fits"[col RUN;FIELD;CAMCOL;RA=(RAMIN+RAMAX)/2;DEC=(DECMIN+DECMAX)/2]" a.fits
# tabmerge g.fits e.fits
# tabmerge g.fits+1 e.fits+1
# tabmerge a.fits+1 e.fits+1
# mv e.fits dr7fields.fits
# rm g.fits a.fits
'''
cd ~/sdss-tests
casjobs.py $SDSS_CAS_USER $SDSS_CAS_PASS querywait @dr7_ngood.sql
casjobs.py $SDSS_CAS_USER $SDSS_CAS_PASS querywait @dr7_ngood2.sql
casjobs.py $SDSS_CAS_USER $SDSS_CAS_PASS outputdownloaddelete mydb.goodfields2 /tmp/dr7.fits
fitscopy /tmp/dr7.fits"[col RA=(ramin+ramax)/2;DEC=(decmin+decmax)/2;run;field;camcol;ngood;ramin;ramax;decmin;decmax]" dr7fields.fits
casjobs.py $SDSS_CAS_USER $SDSS_CAS_PASS querywait @s82_ngood.sql
# Stripe82 has no RunQA table.
casjobs.py $SDSS_CAS_USER $SDSS_CAS_PASS querywait @s82_ngood2.sql
casjobs.py $SDSS_CAS_USER $SDSS_CAS_PASS outputdownloaddelete mydb.s82goodfields2 s82.fits
fitscopy s82.fits"[col RA=(ramin+ramax)/2;DEC=(decmin+decmax)/2;run;field;camcol;ngood;ramin;ramax;decmin;decmax]" s82fields.fits
'''
def main():
import sys
from optparse import OptionParser
parser = OptionParser(usage='%prog [options] <ra> <dec>')
parser.add_option('-f', dest='fields', help='FITS table of fields to use; default is astrometry/data/dr7fields.fits')
parser.add_option('-c', dest='contains', action='store_true', help='Print only fields that *contain* the given point; requires RAMIN,RAMAX,DECMIN,DECMAX fields.')
parser.add_option('-b', '--bands', dest='bands', help='Retrieve fpCs of the given bands; default "ugriz"')
parser.add_option('-t', dest='filetypes', help='Retrieve this file type (fpC, fpM, psField, tsField, tsObj, etc)', action='append', default=['fpC'])
parser.add_option('-r', dest='radius', type=float, default=15., help='Search radius (arcmin)')
parser.set_defaults(fields=None, contains=False, bands='ugriz')
(opt, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
print()
print('Got extra arguments:', args)
sys.exit(-1)
# parse RA,Dec.
try:
ra = float(args[0])
except ValueError:
ra = hmsstring2ra(args[0])
try:
dec = float(args[1])
except ValueError:
dec = dmsstring2dec(args[1])
tablefn = None
if opt.fields is not None:
if os.path.exists(opt.fields):
tablefn = opt.fields
else:
tablefn = find_data_file(opt.fields)
if tablefn is None:
print('Failed to find list of fields:', opt.fields)
sys.exit(-1)
# arcmin
radius = opt.radius
rcfs = radec_to_sdss_rcf(ra,dec,radius=radius, tablefn=tablefn, contains=opt.contains)
print('ra,dec', ra,dec)
print('rcfs:', rcfs)
print()
for (r,c,f,ra1,dec1) in rcfs:
print('%i %i %i (dist: %g arcmin)' % (r,c,f, deg2arcmin(degrees_between(ra,dec,ra1,dec1))))
print()
for (r,c,f,ra1,dec1) in rcfs:
print('http://cas.sdss.org/dr7/en/get/frameByRCFZ.asp?R=%i&C=%i&F=%i&Z=0&submit1=Get+Image' % (r,c,f))
print()
for (r,c,f,ra1,dec1) in rcfs:
print('wget "http://cas.sdss.org/dr7/en/get/frameByRCFZ.asp?R=%i&C=%i&F=%i&Z=0&submit1=Get+Image" -O sdss-%04i-%i-%04i.jpg' % (r,c,f,r,c,f))
from .sdss_das import sdss_das_get
for (r,c,f,ra1,dec1) in rcfs:
for t in opt.filetypes:
for b in opt.bands:
R = sdss_das_get(t, None, r, c, f, b)
if R is False:
continue
if t == 'fpC':
fpc = sdss_filename('fpC', r, c, f, b)
os.system('gunzip -cd %s.gz > %s' % (fpc,fpc))
wcs = Tan(filename=fpc)
x,y = wcs.radec2pixelxy(ra, dec)
x,y = int(x),int(y)
os.system('imcopy %s"[%i:%i,%i:%i]" !/tmp/cut-%s' % (fpc, max(0, x-100), x+100, max(0, y-100), y+100, fpc))
os.system('an-fitstopnm -i /tmp/cut-%s -N 1150 -X 1400 | pnmtopng > cut-%s.png' % (fpc, fpc))
print('R,C,F', r,c,f)
print('x,y', x,y)
if __name__ == '__main__':
main()
|
src/chime_dash/app/components/menu.py | covidcaremap/chime | 222 | 12648065 | """component/menu
Dropdown menu which appears on the navigation bar at the top of the screen
refactor incoming
"""
from typing import List
from dash.development.base_component import ComponentMeta
import dash_bootstrap_components as dbc
from chime_dash.app.components.base import Component
class Menu(Component):
"""
"""
def get_html(self) -> List[ComponentMeta]:
menu = dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("Penn Medicine", header=True),
dbc.DropdownMenuItem(
"Predictive Healthcare",
href="http://predictivehealthcare.pennmedicine.org/",
external_link=True,
target="_blank",
),
dbc.DropdownMenuItem(
"How to Use CHIME",
href="https://code-for-philly.gitbook.io/chime/",
external_link=True,
target="_blank",
),
],
in_navbar=True,
label="Learn More",
color="light",
right=True
)
return [menu]
|
TopQuarkAnalysis/TopEventProducers/python/tqafSequences_old_cff.py | ckamtsikis/cmssw | 852 | 12648091 | <filename>TopQuarkAnalysis/TopEventProducers/python/tqafSequences_old_cff.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
## produce ttGenEvent
from TopQuarkAnalysis.TopEventProducers.sequences.ttGenEvent_cff import *
## produce event solution
from TopQuarkAnalysis.TopEventProducers.producers.TtSemiEvtSolProducer_cfi import *
## make tqaf layer2
tqafLayer2_ttSemiLeptonic_old = cms.Sequence(makeGenEvt * solutions)
## produce ttGenEvent
from TopQuarkAnalysis.TopEventProducers.sequences.ttGenEvent_cff import *
## produce event solution
from TopQuarkAnalysis.TopEventProducers.producers.TtDilepEvtSolProducer_cfi import *
## make tqaf layer2
tqafLayer2_ttFullLeptonic_old = cms.Sequence(makeGenEvt*solutions)
|
Analysis/TMAP/scripts/util/sam2hpstats.py | konradotto/TS | 125 | 12648109 | <gh_stars>100-1000
#!/usr/bin/env python
# Copyright (C) 2010 Ion Torrent Systems, Inc. All Rights Reserved
import os
import re
import sys
from optparse import OptionParser
import pysam
import string
# MATCH = 0
# INS = 1
# DEL = 2
# REF_SKIP = 3
# SOFT_CLIP = 4
# HARD_CLIP = 5
# PAD = 6
def get_qref(md, cigar, qseq):
seq = ""
for i in cigar:
operation, count = i[0], int(i[1])
# print operation, count
if operation == 0:
seq += qseq[0:count]
# qseq = qseq.replace(qseq[0:count], '')
qseq = qseq[:0] + "" + qseq[0 + count :]
elif operation == 4 or operation == 1:
# print "beforeeeeeeeeeeeeeee = " + qseq
# qseq = qseq.replace(qseq[0:count], '')
qseq = qseq[:0] + "" + qseq[0 + count :]
# print "qseqqqqqqqqqqqqqqqqqqqqqqqqqqqq = " + qseq
# print "seq = " + seq
start = 0
result = ""
md_split = re.findall(
"(\d+)|\^([gatcnryswkmbdhv]+)|([gatcnryswkmbdhvn]+)", md, re.IGNORECASE
)
# print md_split
for i in md_split:
# print i
if i[0]:
end = start + int(i[0])
result += seq[start:end]
start += int(i[0])
# print result
elif i[1]:
result += i[1]
# print result
elif i[2]:
result += i[2]
start += len(i[2])
# print result
# print "result = " + result
return result
def translate(read, ref, match):
read = read[::-1]
read = read.translate(string.maketrans("ACGTacgt", "TGCAtgca"))
ref = ref[::-1]
ref = ref.translate(string.maketrans("ACGTacgtRYKMBVDH", "TGCAtgcaYRMKVBHD"))
match = match[::-1]
return (read, ref, match)
def alignment(strand, md, cigar, qseq):
ref, read, match = ("", "", "")
qref_i, qseq_i, n = (0, 0, 0)
iupac = {
"R": "AG",
"Y": "CT",
"S": "GC",
"W": "AT",
"K": "GT",
"M": "AC",
"B": "CGT",
"D": "AGT",
"H": "ACT",
"V": "ACG",
"N": "ACGT",
}
qref = get_qref(md, cigar, qseq)
# print qref
qref = qref.upper()
qseq = qseq.upper()
# make the alignment
# print "cigar = " + str(cigar)
for i in cigar:
# print i[0]
operation, count = i[0], int(i[1])
if operation == 0:
for i in range(0, count):
if qref[qref_i + i : qref_i + i + 1] in iupac:
if (
qseq[qseq_i + i : qseq_i + i + 1]
in iupac[qref[qref_i + i : qref_i + i + 1]]
):
match += "|"
else:
match += " "
elif (
qseq[qseq_i + i : qseq_i + i + 1]
== qref[qref_i + i : qref_i + i + 1]
):
match += "|"
else:
match += " "
read += qseq[qseq_i : qseq_i + count]
qseq_i += count
ref += qref[qref_i : qref_i + count]
qref_i += count
# print "readh = " + read
# print "match = " + match
# print "refdh = " + ref
elif operation == 1:
read += qseq[qseq_i : qseq_i + count]
qseq_i += count
ref += "-" * count
match += "-" * count
# print "readh = " + read
# print "match = " + match
# print "refdh = " + ref
elif operation == 2:
read += "-" * count
match += "-" * count
ref += qref[qref_i : qref_i + count]
qref_i += count
# print "readh = " + read
# print "match = " + match
# print "refdh = " + ref
elif operation == 4:
read += qseq[0:count]
match += "S" * count
ref += "-" * count
qseq = qseq[count:]
# print "readh = " + read
# print "match = " + match
# print "refdh = " + ref
n += 1
if strand == 1:
# print "readh = " + read
# print "match = " + match
# print "refdh = " + ref
(read, ref, match) = translate(read, ref, match)
return (read.upper(), ref.upper(), match)
class HPStats:
def __init__(self, maxhp, maxrl):
self._data = list()
self._maxhp = maxhp
self._maxrl = maxrl
def add(self, l, i, aln_type):
""" l is the hp length (one-based), i is the read index (zero-based), aln_type is (0 - non-indel, 1 - insertion, 2 - deletion) """
if self._maxhp < l:
l = self._maxhp
if self._maxrl <= i:
return # do not add
while len(self._data) < l:
self._data.append(list())
while len(self._data[l - 1]) <= i:
self._data[l - 1].append([0, 0, 0])
self._data[l - 1][i][0] += 1 # total
if 0 < aln_type:
self._data[l - 1][i][aln_type] += 1
def get(self, l, i, aln_type):
if self._maxhp < l:
l = self._maxhp
if len(self._data) < l:
return None
if len(self._data[l - 1]) <= i:
return None
if aln_type < 0 or 2 < aln_type:
return None
return self._data[l - 1][i][aln_type]
def dump(self):
for i in xrange(len(self._data)): # hp length
for j in xrange(len(self._data[i])): # read index
print(
"%d\t%d\t%d\t%d\t%d"
% (
i + 1,
j + 1,
self._data[i][j][0],
self._data[i][j][1],
self._data[i][j][2],
)
)
def dump_table(self, num_reads):
num_total = num_insertions = num_deletions = 0
for i in xrange(len(self._data)): # hp length
for j in xrange(len(self._data[i])): # read index
num_total += self._data[i][j][0]
num_insertions += self._data[i][j][1]
num_deletions += self._data[i][j][2]
print(
"%d\t%d\t%.2f\t%.2f"
% (
num_insertions,
num_deletions,
100.0 * (num_insertions + num_deletions) / num_total,
float(num_insertions + num_deletions) / num_reads,
)
)
def main(options):
if re.search(r"sam$", options.sam):
sam = pysam.Samfile(options.sam, "r")
elif re.search(r"bam$", options.sam):
sam = pysam.Samfile(options.sam, "rb")
if options.hpstats:
hpstats = HPStats(options.hpstats_maxhp, options.hpstats_maxrl)
num_reads = 0
while True:
try:
read = sam.next()
except Exception:
break
if None == read:
break
if read.is_unmapped:
continue
num_reads += 1
md = read.opt("MD")
cigar = read.cigar
qseq = read.seq
strand = 0
if read.is_reverse:
strand = 1
# print str(strand), md, cigar, qseq
read, ref, match = alignment(strand, md, cigar, qseq)
if not options.hpstats:
print(read)
print(match)
print(ref)
else:
read_i = [0 for i in xrange(len(match))]
j = 0
for i in xrange(len(match)):
read_i[i] = j
if "-" != read[i]:
j += 1
# init
if 1 == strand:
(read, ref, match) = translate(read, ref, match)
i = 0
while i < len(match) and "S" == match[i]:
i += 1
i_end = i
while i_end < len(match) and "S" != match[i_end]:
i_end += 1
while i < i_end:
# go through an hp
start = end = i
read_l = 0
ref_l = 0
# find the end
if "-" == read[end]:
end = start
while (
end < i_end
and ("-" == read[end] or ref[start] == read[end])
and ref[start] == ref[end]
):
ref_l += 1
if "-" != read[end]:
read_l += 1
end += 1
else:
end = start
while (
end < i_end
and ("-" == read[end] or read[start] == read[end])
and ("-" == ref[end] or read[start] == ref[end])
):
if "-" != read[end]:
read_l += 1
if "-" != ref[end]:
ref_l += 1
end += 1
if 0 < read_l or 0 < ref_l:
if options.hpstats_verbose:
print(
"HP Found at:%d ref:%d read:%d"
% (read_i[start], ref_l, read_l)
)
if read_l < ref_l: # deletion
hpstats.add(ref_l, read_i[start], 2)
elif ref_l < read_l: # insertion
if options.hpstats_ins_by_ref:
hpstats.add(ref_l, read_i[start], 1)
else:
hpstats.add(read_l, read_i[start], 1)
else:
# normal
hpstats.add(ref_l, read_i[start], 0)
if end == start:
i += 1
else:
i = end
if options.hpstats_verbose:
print(read)
print(match)
print(ref)
if options.hpstats:
if options.hpstats_table:
hpstats.dump_table(num_reads)
else:
hpstats.dump()
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--sam", help="input sam file", dest="sam", default=None)
parser.add_option(
"--hpstats", help="hpstats", action="store_true", dest="hpstats", default=False
)
parser.add_option(
"--hpstats-ins-by-ref",
help="use the reference hp length for insertions",
action="store_true",
dest="hpstats_ins_by_ref",
default=False,
)
parser.add_option(
"--hpstats-maxhp",
type=int,
help="maximum homopolymer length for hpstats",
dest="hpstats_maxhp",
default=9,
)
parser.add_option(
"--hpstats-maxrl",
type=int,
help="maximum read length for hpstats",
dest="hpstats_maxrl",
default=100,
)
parser.add_option(
"--hpstats-table",
help="dump indel summary for hpstats",
dest="hpstats_table",
action="store_true",
default=False,
)
parser.add_option(
"--hpstats-verbose",
help="hpstats verbose",
dest="hpstats_verbose",
action="store_true",
default=False,
)
if len(sys.argv[1:]) < 1:
parser.print_help()
else:
options, args = parser.parse_args()
main(options)
|
varken/lidarr.py | hpalmete/Varken | 920 | 12648110 | from logging import getLogger
from requests import Session, Request
from datetime import datetime, timezone, date, timedelta
from varken.structures import LidarrQueue, LidarrAlbum
from varken.helpers import hashit, connection_handler
class LidarrAPI(object):
def __init__(self, server, dbmanager):
self.dbmanager = dbmanager
self.server = server
# Create session to reduce server web thread load, and globally define pageSize for all requests
self.session = Session()
self.session.headers = {'X-Api-Key': self.server.api_key}
self.logger = getLogger()
def __repr__(self):
return f"<lidarr-{self.server.id}>"
def get_calendar(self, query="Missing"):
endpoint = '/api/v1/calendar'
today = str(date.today())
last_days = str(date.today() - timedelta(days=self.server.missing_days))
future = str(date.today() + timedelta(days=self.server.future_days))
now = datetime.now(timezone.utc).astimezone().isoformat()
if query == "Missing":
params = {'start': last_days, 'end': today}
else:
params = {'start': today, 'end': future}
influx_payload = []
influx_albums = []
req = self.session.prepare_request(Request('GET', self.server.url + endpoint, params=params))
get = connection_handler(self.session, req, self.server.verify_ssl)
if not get:
return
# Iteratively create a list of LidarrAlbum Objects from response json
albums = []
for album in get:
try:
albums.append(LidarrAlbum(**album))
except TypeError as e:
self.logger.error('TypeError has occurred : %s while creating LidarrAlbum structure for album. Data '
'attempted is: %s', e, album)
# Add Album to missing list if album is not complete
for album in albums:
percent_of_tracks = album.statistics.get('percentOfTracks', 0)
if percent_of_tracks != 100:
influx_albums.append(
(album.title, album.releaseDate, album.artist['artistName'], album.id, percent_of_tracks,
f"{album.statistics.get('trackFileCount', 0)}/{album.statistics.get('trackCount', 0)}")
)
for title, release_date, artist_name, album_id, percent_complete, complete_count in influx_albums:
hash_id = hashit(f'{self.server.id}{title}{album_id}')
influx_payload.append(
{
"measurement": "Lidarr",
"tags": {
"type": query,
"sonarrId": album_id,
"server": self.server.id,
"albumName": title,
"artistName": artist_name,
"percentComplete": percent_complete,
"completeCount": complete_count,
"releaseDate": release_date
},
"time": now,
"fields": {
"hash": hash_id
}
}
)
self.dbmanager.write_points(influx_payload)
def get_queue(self):
endpoint = '/api/v1/queue'
now = datetime.now(timezone.utc).astimezone().isoformat()
influx_payload = []
params = {'pageSize': 1000}
req = self.session.prepare_request(Request('GET', self.server.url + endpoint, params=params))
get = connection_handler(self.session, req, self.server.verify_ssl)
if not get:
return
queue = []
for song in get['records']:
try:
queue.append(LidarrQueue(**song))
except TypeError as e:
self.logger.error('TypeError has occurred : %s while creating LidarrQueue structure for show. Data '
'attempted is: %s', e, song)
if not queue:
return
for song in queue:
if song.protocol.upper() == 'USENET':
protocol_id = 1
else:
protocol_id = 0
hash_id = hashit(f'{self.server.id}{song.title}{song.artistId}')
influx_payload.append(
{
"measurement": "Lidarr",
"tags": {
"type": "Queue",
"id": song.id,
"server": self.server.id,
"title": song.title,
"quality": song.quality['quality']['name'],
"protocol": song.protocol,
"protocol_id": protocol_id,
"indexer": song.indexer
},
"time": now,
"fields": {
"hash": hash_id
}
}
)
self.dbmanager.write_points(influx_payload)
|
pml/LibFFMClassifier.py | gatapia/py_ml_utils | 183 | 12648125 | from __future__ import print_function, absolute_import
import os, sys, subprocess, shlex, tempfile, time, sklearn.base, math
import numpy as np
import pandas as pd
from pandas_extensions import *
from ExeEstimator import *
class LibFFMClassifier(ExeEstimator, sklearn.base.ClassifierMixin):
'''
options:
-l <lambda>: set regularization parameter (default 0)
-k <factor>: set number of latent factors (default 4)
-t <iteration>: set number of iterations (default 15)
-r <eta>: set learning rate (default 0.1)
-s <nr_threads>: set number of threads (default 1)
-p <path>: set path to the validation set
--quiet: quiet model (no output)
--norm: do instance-wise normalization
--no-rand: disable random update
`--norm' helps you to do instance-wise normalization. When it is enabled,
you can simply assign `1' to `value' in the data.
'''
def __init__(self, columns, lambda_v=0, factor=4, iteration=15, eta=0.1,
nr_threads=1, quiet=False, normalize=None, no_rand=None):
ExeEstimator.__init__(self)
self.columns = columns.tolist() if hasattr(columns, 'tolist') else columns
self.lambda_v = lambda_v
self.factor = factor
self.iteration = iteration
self.eta = eta
self.nr_threads = nr_threads
self.quiet = quiet
self.normalize = normalize
self.no_rand = no_rand
def fit(self, X, y=None):
if type(X) is str: train_file = X
else:
if not hasattr(X, 'values'): X = pd.DataFrame(X, columns=self.columns)
train_file = self.save_reusable('_libffm_train', 'to_libffm', X, y)
# self._model_file = self.save_tmp_file(X, '_libffm_model', True)
self._model_file = self.tmpfile('_libffm_model')
command = 'utils/lib/ffm-train.exe' + ' -l ' + repr(v) + \
' -k ' + repr(r) + ' -t ' + repr(n) + ' -r ' + repr(a) + \
' -s ' + repr(s)
if self.quiet: command += ' --quiet'
if self.normalize: command += ' --norm'
if self.no_rand: command += ' --no-rand'
command += ' ' + train_file
command += ' ' + self._model_file
running_process = self.make_subprocess(command)
self.close_process(running_process)
return self
def predict(self, X):
if type(X) is str: test_file = X
else:
if not hasattr(X, 'values'): X = pd.DataFrame(X, columns=self.columns)
test_file = self.save_reusable('_libffm_test', 'to_libffm', X)
output_file = self.tmpfile('_libffm_predictions')
command = 'utils/lib/ffm-predict.exe ' + test_file + ' ' + self._model_file + ' ' + output_file
running_process = self.make_subprocess(command)
self.close_process(running_process)
preds = list(self.read_predictions(output_file))
return preds
def predict_proba(self, X):
predictions = np.asarray(map(lambda p: 1 / (1 + math.exp(-p)), self.predict(X)))
return np.vstack([1 - predictions, predictions]).T
|
koku/api/report/ocp/view.py | rubik-ai/koku | 157 | 12648173 | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""View for OpenShift Usage Reports."""
from api.common.permissions.openshift_access import OpenShiftAccessPermission
from api.models import Provider
from api.report.ocp.query_handler import OCPReportQueryHandler
from api.report.ocp.serializers import OCPCostQueryParamSerializer
from api.report.ocp.serializers import OCPInventoryQueryParamSerializer
from api.report.view import ReportView
from reporting.provider.ocp.models import OCPStorageVolumeLabelSummary
from reporting.provider.ocp.models import OCPUsagePodLabelSummary
class OCPView(ReportView):
"""OCP Base View."""
permission_classes = [OpenShiftAccessPermission]
provider = Provider.PROVIDER_OCP
serializer = OCPInventoryQueryParamSerializer
query_handler = OCPReportQueryHandler
tag_handler = [OCPUsagePodLabelSummary, OCPStorageVolumeLabelSummary]
class OCPMemoryView(OCPView):
"""Get OpenShift memory usage data."""
report = "memory"
class OCPCpuView(OCPView):
"""Get OpenShift compute usage data."""
report = "cpu"
class OCPCostView(OCPView):
"""Get OpenShift cost data."""
report = "costs"
serializer = OCPCostQueryParamSerializer
class OCPVolumeView(OCPView):
"""Get OpenShift volume usage data."""
report = "volume"
|
pyvgdlmaster/examples/gridphysics/missilecommand.py | LRHammond/pv4dsrl | 111 | 12648186 | '''
VGDL example: Missile Command.
@author: <NAME> and <NAME>
'''
missilecommand_level = """
w m m m m w
w w
w w
w w
w w
w w
w A w
w w
w w
w w
w c c c w
wwwwwwwwwwwwwwwwwwwwwwww
"""
missilecommand_game = """
BasicGame
SpriteSet
city > Immovable color=GREEN
incoming > Chaser stype=city color=ORANGE speed=0.1
explosion > Flicker limit=5
avatar > ShootAvatar stype=explosion
LevelMapping
c > city
m > incoming
InteractionSet
movable wall > stepBack
incoming city > killSprite
city incoming > killSprite
incoming explosion > killSprite
TerminationSet
SpriteCounter stype=city win=False
SpriteCounter stype=incoming win=True
"""
if __name__ == "__main__":
from vgdl.core import VGDLParser
VGDLParser.playGame(missilecommand_game, missilecommand_level) |
static/ppdet/modeling/architectures/input_helper.py | violetweir/PaddleDetection | 7,782 | 12648193 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def multiscale_def(image_shape, num_scale, use_flip=True):
base_name_list = ['image']
multiscale_def = {}
ms_def_names = []
if use_flip:
num_scale //= 2
base_name_list.append('image_flip')
multiscale_def['image_flip'] = {
'shape': [None] + image_shape,
'dtype': 'float32',
'lod_level': 0
}
multiscale_def['im_info_image_flip'] = {
'shape': [None, 3],
'dtype': 'float32',
'lod_level': 0
}
ms_def_names.append('image_flip')
ms_def_names.append('im_info_image_flip')
for base_name in base_name_list:
for i in range(0, num_scale - 1):
name = base_name + '_scale_' + str(i)
multiscale_def[name] = {
'shape': [None] + image_shape,
'dtype': 'float32',
'lod_level': 0
}
im_info_name = 'im_info_' + name
multiscale_def[im_info_name] = {
'shape': [None, 3],
'dtype': 'float32',
'lod_level': 0
}
ms_def_names.append(name)
ms_def_names.append(im_info_name)
return multiscale_def, ms_def_names
|
.modules/.recon-ng/modules/reporting/csv.py | termux-one/EasY_HaCk | 1,103 | 12648219 | <filename>.modules/.recon-ng/modules/reporting/csv.py
from recon.core.module import BaseModule
import csv
import os
class Module(BaseModule):
meta = {
'name': 'CSV File Creator',
'author': '<NAME> (@LaNMaSteR53)',
'description': 'Creates a CSV file containing the specified harvested data.',
'options': (
('table', 'hosts', True, 'source table of data to export'),
('filename', os.path.join(BaseModule.workspace, 'results.csv'), True, 'path and filename for output'),
),
}
def module_run(self):
filename = self.options['filename']
# codecs module not used because the csv module converts to ascii
with open(filename, 'w') as outfile:
# build a list of table names
table = self.options['table']
rows = self.query('SELECT * FROM "%s" ORDER BY 1' % (table))
cnt = 0
for row in rows:
row = [x if x else '' for x in row]
if any(row):
cnt += 1
csvwriter = csv.writer(outfile, quoting=csv.QUOTE_ALL)
csvwriter.writerow([s.encode("utf-8") for s in row])
self.output('%d records added to \'%s\'.' % (cnt, filename))
|
tools/third_party/h2/test/test_events.py | meyerweb/wpt | 2,479 | 12648225 | <filename>tools/third_party/h2/test/test_events.py<gh_stars>1000+
# -*- coding: utf-8 -*-
"""
test_events.py
~~~~~~~~~~~~~~
Specific tests for any function that is logically self-contained as part of
events.py.
"""
import inspect
import sys
from hypothesis import given
from hypothesis.strategies import (
integers, lists, tuples
)
import pytest
import h2.errors
import h2.events
import h2.settings
# We define a fairly complex Hypothesis strategy here. We want to build a list
# of two tuples of (Setting, value). For Setting we want to make sure we can
# handle settings that the rest of hyper knows nothing about, so we want to
# use integers from 0 to (2**16-1). For values, they're from 0 to (2**32-1).
# Define that strategy here for clarity.
SETTINGS_STRATEGY = lists(
tuples(
integers(min_value=0, max_value=2**16-1),
integers(min_value=0, max_value=2**32-1),
)
)
class TestRemoteSettingsChanged(object):
"""
Validate the function of the RemoteSettingsChanged event.
"""
@given(SETTINGS_STRATEGY)
def test_building_settings_from_scratch(self, settings_list):
"""
Missing old settings are defaulted to None.
"""
settings_dict = dict(settings_list)
e = h2.events.RemoteSettingsChanged.from_settings(
old_settings={},
new_settings=settings_dict,
)
for setting, new_value in settings_dict.items():
assert e.changed_settings[setting].setting == setting
assert e.changed_settings[setting].original_value is None
assert e.changed_settings[setting].new_value == new_value
@given(SETTINGS_STRATEGY, SETTINGS_STRATEGY)
def test_only_reports_changed_settings(self,
old_settings_list,
new_settings_list):
"""
Settings that were not changed are not reported.
"""
old_settings_dict = dict(old_settings_list)
new_settings_dict = dict(new_settings_list)
e = h2.events.RemoteSettingsChanged.from_settings(
old_settings=old_settings_dict,
new_settings=new_settings_dict,
)
assert len(e.changed_settings) == len(new_settings_dict)
assert (
sorted(list(e.changed_settings.keys())) ==
sorted(list(new_settings_dict.keys()))
)
@given(SETTINGS_STRATEGY, SETTINGS_STRATEGY)
def test_correctly_reports_changed_settings(self,
old_settings_list,
new_settings_list):
"""
Settings that are changed are correctly reported.
"""
old_settings_dict = dict(old_settings_list)
new_settings_dict = dict(new_settings_list)
e = h2.events.RemoteSettingsChanged.from_settings(
old_settings=old_settings_dict,
new_settings=new_settings_dict,
)
for setting, new_value in new_settings_dict.items():
original_value = old_settings_dict.get(setting)
assert e.changed_settings[setting].setting == setting
assert e.changed_settings[setting].original_value == original_value
assert e.changed_settings[setting].new_value == new_value
class TestEventReprs(object):
"""
Events have useful representations.
"""
example_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
]
example_informational_headers = [
(':status', '100'),
('server', 'fake-serv/0.1.0')
]
example_response_headers = [
(':status', '200'),
('server', 'fake-serv/0.1.0')
]
def test_requestreceived_repr(self):
"""
RequestReceived has a useful debug representation.
"""
e = h2.events.RequestReceived()
e.stream_id = 5
e.headers = self.example_request_headers
assert repr(e) == (
"<RequestReceived stream_id:5, headers:["
"(':authority', 'example.<EMAIL>'), "
"(':path', '/'), "
"(':scheme', 'https'), "
"(':method', 'GET')]>"
)
def test_responsereceived_repr(self):
"""
ResponseReceived has a useful debug representation.
"""
e = h2.events.ResponseReceived()
e.stream_id = 500
e.headers = self.example_response_headers
assert repr(e) == (
"<ResponseReceived stream_id:500, headers:["
"(':status', '200'), "
"('server', 'fake-serv/0.1.0')]>"
)
def test_trailersreceived_repr(self):
"""
TrailersReceived has a useful debug representation.
"""
e = h2.events.TrailersReceived()
e.stream_id = 62
e.headers = self.example_response_headers
assert repr(e) == (
"<TrailersReceived stream_id:62, headers:["
"(':status', '200'), "
"('server', 'fake-serv/0.1.0')]>"
)
def test_informationalresponsereceived_repr(self):
"""
InformationalResponseReceived has a useful debug representation.
"""
e = h2.events.InformationalResponseReceived()
e.stream_id = 62
e.headers = self.example_informational_headers
assert repr(e) == (
"<InformationalResponseReceived stream_id:62, headers:["
"(':status', '100'), "
"('server', 'fake-serv/0.1.0')]>"
)
def test_datareceived_repr(self):
"""
DataReceived has a useful debug representation.
"""
e = h2.events.DataReceived()
e.stream_id = 888
e.data = b"abcdefghijklmnopqrstuvwxyz"
e.flow_controlled_length = 88
assert repr(e) == (
"<DataReceived stream_id:888, flow_controlled_length:88, "
"data:6162636465666768696a6b6c6d6e6f7071727374>"
)
def test_windowupdated_repr(self):
"""
WindowUpdated has a useful debug representation.
"""
e = h2.events.WindowUpdated()
e.stream_id = 0
e.delta = 2**16
assert repr(e) == "<WindowUpdated stream_id:0, delta:65536>"
def test_remotesettingschanged_repr(self):
"""
RemoteSettingsChanged has a useful debug representation.
"""
e = h2.events.RemoteSettingsChanged()
e.changed_settings = {
h2.settings.SettingCodes.INITIAL_WINDOW_SIZE:
h2.settings.ChangedSetting(
h2.settings.SettingCodes.INITIAL_WINDOW_SIZE, 2**16, 2**15
),
}
assert repr(e) == (
"<RemoteSettingsChanged changed_settings:{ChangedSetting("
"setting=SettingCodes.INITIAL_WINDOW_SIZE, original_value=65536, "
"new_value=32768)}>"
)
def test_pingreceived_repr(self):
"""
PingReceived has a useful debug representation.
"""
e = h2.events.PingReceived()
e.ping_data = b'abcdefgh'
assert repr(e) == "<PingReceived ping_data:6162636465666768>"
def test_pingackreceived_repr(self):
"""
PingAckReceived has a useful debug representation.
"""
e = h2.events.PingAckReceived()
e.ping_data = b'abcdefgh'
assert repr(e) == "<PingAckReceived ping_data:6162636465666768>"
def test_streamended_repr(self):
"""
StreamEnded has a useful debug representation.
"""
e = h2.events.StreamEnded()
e.stream_id = 99
assert repr(e) == "<StreamEnded stream_id:99>"
def test_streamreset_repr(self):
"""
StreamEnded has a useful debug representation.
"""
e = h2.events.StreamReset()
e.stream_id = 919
e.error_code = h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
e.remote_reset = False
assert repr(e) == (
"<StreamReset stream_id:919, "
"error_code:ErrorCodes.ENHANCE_YOUR_CALM, remote_reset:False>"
)
def test_pushedstreamreceived_repr(self):
"""
PushedStreamReceived has a useful debug representation.
"""
e = h2.events.PushedStreamReceived()
e.pushed_stream_id = 50
e.parent_stream_id = 11
e.headers = self.example_request_headers
assert repr(e) == (
"<PushedStreamReceived pushed_stream_id:50, parent_stream_id:11, "
"headers:["
"(':authority', 'example.com'), "
"(':path', '/'), "
"(':scheme', 'https'), "
"(':method', 'GET')]>"
)
def test_settingsacknowledged_repr(self):
"""
SettingsAcknowledged has a useful debug representation.
"""
e = h2.events.SettingsAcknowledged()
e.changed_settings = {
h2.settings.SettingCodes.INITIAL_WINDOW_SIZE:
h2.settings.ChangedSetting(
h2.settings.SettingCodes.INITIAL_WINDOW_SIZE, 2**16, 2**15
),
}
assert repr(e) == (
"<SettingsAcknowledged changed_settings:{ChangedSetting("
"setting=SettingCodes.INITIAL_WINDOW_SIZE, original_value=65536, "
"new_value=32768)}>"
)
def test_priorityupdated_repr(self):
"""
PriorityUpdated has a useful debug representation.
"""
e = h2.events.PriorityUpdated()
e.stream_id = 87
e.weight = 32
e.depends_on = 8
e.exclusive = True
assert repr(e) == (
"<PriorityUpdated stream_id:87, weight:32, depends_on:8, "
"exclusive:True>"
)
@pytest.mark.parametrize("additional_data,data_repr", [
(None, "None"),
(b'some data', "736f6d652064617461")
])
def test_connectionterminated_repr(self, additional_data, data_repr):
"""
ConnectionTerminated has a useful debug representation.
"""
e = h2.events.ConnectionTerminated()
e.error_code = h2.errors.ErrorCodes.INADEQUATE_SECURITY
e.last_stream_id = 33
e.additional_data = additional_data
assert repr(e) == (
"<ConnectionTerminated error_code:ErrorCodes.INADEQUATE_SECURITY, "
"last_stream_id:33, additional_data:%s>" % data_repr
)
def test_alternativeserviceavailable_repr(self):
"""
AlternativeServiceAvailable has a useful debug representation.
"""
e = h2.events.AlternativeServiceAvailable()
e.origin = b"example.com"
e.field_value = b'h2=":8000"; ma=60'
assert repr(e) == (
'<AlternativeServiceAvailable origin:example.com, '
'field_value:h2=":8000"; ma=60>'
)
def test_unknownframereceived_repr(self):
"""
UnknownFrameReceived has a useful debug representation.
"""
e = h2.events.UnknownFrameReceived()
assert repr(e) == '<UnknownFrameReceived>'
def all_events():
"""
Generates all the classes (i.e., events) defined in h2.events.
"""
for _, obj in inspect.getmembers(sys.modules['h2.events']):
# We are only interested in objects that are defined in h2.events;
# objects that are imported from other modules are not of interest.
if hasattr(obj, '__module__') and (obj.__module__ != 'h2.events'):
continue
if inspect.isclass(obj):
yield obj
@pytest.mark.parametrize('event', all_events())
def test_all_events_subclass_from_event(event):
"""
Every event defined in h2.events subclasses from h2.events.Event.
"""
assert (event is h2.events.Event) or issubclass(event, h2.events.Event)
|
rl/replay_buffer.py | qbetterk/user-simulator | 534 | 12648238 | from collections import deque
import random
class ReplayBuffer(object):
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.num_experiences = 0
self.buffer = deque()
def getBatch(self, batch_size):
# random draw N
return random.sample(self.buffer, batch_size)
def size(self):
return self.buffer_size
def add(self, state, action, reward, next_action, done):
new_experience = (state, action, reward, next_action, done)
if self.num_experiences < self.buffer_size:
self.buffer.append(new_experience)
self.num_experiences += 1
else:
self.buffer.popleft()
self.buffer.append(new_experience)
def count(self):
# if buffer is full, return buffer size
# otherwise, return experience counter
return self.num_experiences
def erase(self):
self.buffer = deque()
self.num_experiences = 0 |
nbtutor/ipython/factories/trace_step_factory.py | vincentxavier/nbtutor | 423 | 12648239 | <filename>nbtutor/ipython/factories/trace_step_factory.py
from typing import Dict, List, Optional
from ..factories.stack_frame_factory import create_stack
from ..models.heap_object import HeapObject
from ..models.stack import StackFrame
from ..models.trace_step import TraceStep
def create_trace_step(
stack_frames: List[StackFrame],
heap: Dict[str, HeapObject],
line_numbers: List[int],
stdout: Optional[str] = None,
stderr: Optional[str] = None) -> TraceStep:
stack = create_stack(stack_frames)
trace_step = TraceStep(stack, heap)
trace_step.line_numbers = line_numbers
trace_step.stdout = stdout
trace_step.stderr = stderr
return trace_step
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.