max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
backpack/custom_module/scale_module.py | jabader97/backpack | 395 | 12644558 | """Contains ScaleModule."""
from torch import Tensor
from torch.nn import Module
class ScaleModule(Module):
"""Scale Module scales the input by a constant."""
def __init__(self, weight: float = 1.0):
"""Store scalar weight.
Args:
weight: Initial value for weight. Defaults to 1.0.
Raises:
ValueError: if weight is no float
"""
super().__init__()
if not isinstance(weight, float):
raise ValueError("Weight must be float.")
self.weight: float = weight
def forward(self, input: Tensor) -> Tensor:
"""Defines forward pass.
Args:
input: input
Returns:
product of input and weight
"""
return input * self.weight
|
setup.py | gmr/consulate | 309 | 12644568 | <gh_stars>100-1000
import setuptools
setuptools.setup(
name='abaez.consulate',
version='1.1.0',
description='A Client library and command line application for the Consul',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://consulate.readthedocs.org',
install_requires=['requests>=2.0.0,<3.0.0'],
extras_require={'unixsocket': ['requests-unixsocket>=0.1.4,<=1.0.0']},
license='BSD',
package_data={'': ['LICENSE', 'README.rst']},
packages=['consulate', 'consulate.api', 'consulate.models'],
entry_points=dict(console_scripts=['consulate=consulate.cli:main']),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: System :: Systems Administration',
'Topic :: System :: Clustering',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries'
],
zip_safe=True)
|
graphene_django/utils/tests/test_testing.py | mebel-akvareli/graphene-django | 4,038 | 12644580 | <filename>graphene_django/utils/tests/test_testing.py
import pytest
from .. import GraphQLTestCase
from ...tests.test_types import with_local_registry
from django.test import Client
@with_local_registry
def test_graphql_test_case_deprecated_client_getter():
"""
`GraphQLTestCase._client`' getter should raise pending deprecation warning.
"""
class TestClass(GraphQLTestCase):
GRAPHQL_SCHEMA = True
def runTest(self):
pass
tc = TestClass()
tc._pre_setup()
tc.setUpClass()
with pytest.warns(PendingDeprecationWarning):
tc._client
@with_local_registry
def test_graphql_test_case_deprecated_client_setter():
"""
`GraphQLTestCase._client`' setter should raise pending deprecation warning.
"""
class TestClass(GraphQLTestCase):
GRAPHQL_SCHEMA = True
def runTest(self):
pass
tc = TestClass()
tc._pre_setup()
tc.setUpClass()
with pytest.warns(PendingDeprecationWarning):
tc._client = Client()
|
src/api/filecontent.py | piwaniuk/critic | 216 | 12644592 | # -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2017 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import api
class FilecontentError(api.APIError):
pass
class Filecontent(api.APIObject):
"""Representation of some context"""
def getLines(self, first_row=None, last_row=None):
assert first_row is None or isinstance(first_row, int)
assert last_row is None or isinstance(last_row, int)
return self._impl.getLines(first_row, last_row)
class Line:
"""Representation of a line from some version of a file"""
def __init__(self, parts, offset):
self.__parts = parts
self.__offset = offset
@property
def parts(self):
return self.__parts
@property
def offset(self):
return self.__offset
def fetch(critic, repository, blob_sha1, file_obj):
assert isinstance(critic, api.critic.Critic)
assert isinstance(repository, api.repository.Repository)
assert isinstance(blob_sha1, str)
assert isinstance(file_obj, api.file.File)
return api.impl.filecontent.fetch(critic, repository, blob_sha1, file_obj)
|
xdl-algorithm-solution/TDM/src/python/store/store/store.py | hitflame/x-deeplearning | 4,071 | 12644620 | <reponame>hitflame/x-deeplearning
# Copyright (C) 2016-2018 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/env python
from .store_api import *
class Store:
def __init__(self, config):
self._client = STORE_API_new(config)
if not self._client:
raise RuntimeError("Create store failed, config: {}".format(config))
def get_handle(self):
return self._client
def put(self, key, value):
self._validate()
return STORE_API_put(self._client, key, value)
def load(self, filename):
self._validate()
return STORE_API_load(self._client, filename)
def get(self, key):
self._validate()
value = new_string()
sv = None
ret = STORE_API_get(self._client, key, value)
if ret:
sv = string_value(value)
free_string(value)
return sv
def mget(self, keys):
self._validate()
values = new_string_vector()
ret = STORE_API_mget(self._client, keys, values)
svalues = None
if ret == 1:
svalues = string_vector_value(values)
free_string_vector(values)
return svalues
def mput(self, keys, values):
self._validate()
return STORE_API_mput(self._client, keys, values)
def close(self):
if self._client:
STORE_API_close(self._client)
self._client = None
def _validate(self):
if not self._client:
raise RuntimeError("Store is not validated")
|
lixian_colors.py | 1py/xunlei-lixian | 2,177 | 12644625 |
import os
import sys
def get_console_type(use_colors=True):
if use_colors and sys.stdout.isatty() and sys.stderr.isatty():
import platform
if platform.system() == 'Windows':
import lixian_colors_win32
return lixian_colors_win32.WinConsole
else:
import lixian_colors_linux
return lixian_colors_linux.AnsiConsole
else:
import lixian_colors_console
return lixian_colors_console.Console
console_type = get_console_type()
raw_console_type = get_console_type(False)
def Console(use_colors=True):
return get_console_type(use_colors)()
def get_softspace(output):
if hasattr(output, 'softspace'):
return output.softspace
import lixian_colors_console
if isinstance(output, lixian_colors_console.Console):
return get_softspace(output.output)
return 0
class ScopedColors(console_type):
def __init__(self, *args):
console_type.__init__(self, *args)
def __call__(self):
console = self
class Scoped:
def __enter__(self):
self.stdout = sys.stdout
softspace = get_softspace(sys.stdout)
sys.stdout = console
sys.stdout.softspace = softspace
def __exit__(self, type, value, traceback):
softspace = get_softspace(sys.stdout)
sys.stdout = self.stdout
sys.stdout.softspace = softspace
return Scoped()
class RawScopedColors(raw_console_type):
def __init__(self, *args):
raw_console_type.__init__(self, *args)
def __call__(self):
class Scoped:
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
return Scoped()
class RootColors:
def __init__(self, use_colors=True):
self.use_colors = use_colors
def __getattr__(self, name):
return getattr(ScopedColors() if self.use_colors else RawScopedColors(), name)
def __call__(self, use_colors):
assert use_colors in (True, False, None), use_colors
return RootColors(use_colors)
colors = RootColors()
|
fostool/model/gconv.py | meng-zha/FOST | 181 | 12644633 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric.nn as PyG
from collections import OrderedDict
class GATConv(PyG.MessagePassing):
def __init__(self, in_channels, out_channels, edge_channels, aggr='max', normalize='none', **kwargs):
"""Build a gated graph convolutional layer.
Parameters
----------
in_channels : (int or tuple)
Size of each input sample
out_channels : int
Size of each output sample
edge_channels : (int)
Size of edge feature
aggr : str, optional
The aggregation operator, by default 'max'
normalize : str, optional
The normalizing operator, by default 'none'
**kwargs : optional
Additional arguments for PyG.MessagePassing
"""
super().__init__(aggr=aggr, node_dim=-3, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.edge_channels = edge_channels
self.weight_n = nn.Parameter(torch.Tensor(in_channels, out_channels))
self.weight_e = nn.Parameter(torch.Tensor(edge_channels, out_channels))
self.u = nn.Parameter(torch.Tensor(out_channels, out_channels))
self.v = nn.Parameter(torch.Tensor(out_channels, out_channels))
self.normalize = normalize
if normalize == 'bn':
self.batch_norm = nn.BatchNorm1d(out_channels)
if normalize == 'ln':
self.layer_norm = nn.LayerNorm(out_channels)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight_n)
nn.init.xavier_uniform_(self.weight_e)
nn.init.xavier_uniform_(self.u)
nn.init.xavier_uniform_(self.v)
def forward(self, x, edge_index, edge_attr=None, edge_norm=None, size=None):
"""
Parameters
----------
x : torch.Tensor or (torch.Tensor, torch.Tensor)
Input data
edge_index : torch.Tensor
The index of edges
edge_attr : torch.Tensor or None, optional
Edge attributes, by default None
edge_norm : str or None, optional
The normalization type for edges, by default None
size : int, optional
The output dimension, by default None
Returns
-------
torch.Tensor
The enriched representations after message passing
"""
if isinstance(x, tuple) or isinstance(x, list):
x = [None if xi is None else torch.matmul(
xi, self.weight_n) for xi in x]
else:
x = torch.matmul(x, self.weight_n)
edge_attr = torch.matmul(edge_attr, self.weight_e)
return self.propagate(edge_index, size=size, x=x, edge_attr=edge_attr, edge_norm=edge_norm)
def message(self, edge_index_i, x_i, x_j, edge_attr, edge_norm):
"""
Parameters
----------
edge_index_i : torch.Tensor
The index of target nodes
x_i : torch.Tensor
The representations of target nodes indexed by edge_index_i
x_j : torch.Tensor
The representations of source nodes indexed by edge_index_j
edge_attr : torch.Tensor
Edge attributes
edge_norm : torch.Tensor
The normalization for edges
Returns
-------
torch.Tensor
Messages in edges
"""
x_i = torch.matmul(x_i, self.u)
x_j = torch.matmul(x_j, self.u)
gate = torch.sigmoid((x_i * x_j).sum(dim=-1)).unsqueeze(dim=-1)
msg = x_j * gate
if edge_norm is None:
return msg
else:
return msg * edge_norm.reshape(edge_norm.size(0), 1, 1)
def update(self, aggr_out, x):
"""
Parameters
----------
aggr_out : torch.Tensor
Aggregated messages
x : torch.Tensor
Raw inputs
Returns
-------
torch.Tensor
Updated representations
Raises
------
KeyError
Unsupported normalization type
"""
if isinstance(x, tuple) or isinstance(x, list):
x = x[1]
if self.normalize == 'bn':
aggr_out = aggr_out.permute(0, 2, 1)
aggr_out = self.batch_norm(aggr_out)
aggr_out = aggr_out.permute(0, 2, 1)
elif self.normalize == 'ln':
aggr_out = self.layer_norm(aggr_out)
elif self.normalize == 'none':
aggr_out = aggr_out
else:
raise KeyError(
(f'not support normalize type: {self.normalize}')
)
return x + aggr_out
class EGNNConv(PyG.MessagePassing):
def __init__(self, in_channels, out_channels, edge_channels, aggr='max', normalize='none', **kwargs):
"""Build an edge-attribute-aware graph convolutional layer.
Parameters
----------
in_channels : (int or tuple)
Size of each input sample
out_channels : int
Size of each output sample
edge_channels : (int)
Size of edge feature
aggr : str, optional
The aggregation operator, by default 'max'
normalize : str, optional
The normalizing operator, by default 'none'
**kwargs : optional
Additional arguments for PyG.MessagePassing
"""
super().__init__(aggr=aggr, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.edge_channels = edge_channels
self.weight_n = nn.Parameter(torch.Tensor(in_channels, out_channels))
self.weight_e = nn.Parameter(torch.Tensor(edge_channels, out_channels))
self.query = nn.Parameter(torch.Tensor(out_channels, out_channels))
self.key = nn.Parameter(torch.Tensor(out_channels, out_channels))
self.linear_att = nn.Linear(3 * out_channels, 1)
self.linear_out = nn.Linear(2 * out_channels, out_channels)
self.normalize = normalize
if normalize == 'bn':
self.batch_norm = nn.BatchNorm1d(out_channels)
if normalize == 'ln':
self.layer_norm = nn.LayerNorm(out_channels)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight_n)
nn.init.xavier_uniform_(self.weight_e)
nn.init.xavier_uniform_(self.query)
nn.init.xavier_uniform_(self.key)
def forward(self, x, edge_index, edge_attr, size=None, indices=None, edge_norm=None):
"""
Parameters
----------
x : torch.Tensor or (torch.Tensor, torch.Tensor)
Input data
edge_index : torch.Tensor
The index of edges
edge_attr : torch.Tensor or None, optional
Edge attributes, by default None
size : int, optional
The output dimension, by default None
indicies: torch.Tensor or None, optional
The node indices
edge_norm : str or None, optional
The normalization type for edges, by default None
Returns
-------
torch.Tensor
The enriched representations after message passing
"""
if isinstance(x, tuple) or isinstance(x, list):
x = [None if xi is None else torch.matmul(
xi, self.weight_n) for xi in x]
else:
x = torch.matmul(x, self.weight_n)
edge_attr = torch.matmul(edge_attr, self.weight_e)
return self.propagate(edge_index, size=size, x=x, edge_attr=edge_attr, indices=indices, edge_norm=edge_norm)
def message(self, x_j, x_i, edge_attr, edge_norm):
"""
Parameters
----------
x_j : torch.Tensor
The representations of source nodes
x_i : torch.Tensor
The representations of target nodes
edge_attr : torch.Tensor
Edge attributes
edge_norm : torch.Tensor
The normalization factors for edges
Returns
-------
torch.Tensor
The messages in edges
"""
# cal att of shape [B, E, 1]
query = torch.matmul(x_j, self.query)
key = torch.matmul(x_i, self.key)
edge_attr = edge_attr.unsqueeze(dim=1).expand_as(query)
att_feature = torch.cat([query, key, edge_attr], dim=-1)
att = torch.sigmoid(self.linear_att(att_feature))
# gate of shape [1, E, C]
gate = torch.sigmoid(edge_attr)
msg = att * x_j * gate
if edge_norm is None:
return msg
else:
return msg * edge_norm.reshape(edge_norm.size(0), 1, 1)
def update(self, aggr_out, x, indices):
"""
Parameters
----------
aggr_out : torch.Tensor
Aggregated messages
x : torch.Tensor
Raw inputs
indices: torch.Tensor
Node indexes
Returns
-------
torch.Tensor
Updated representations
Raises
------
KeyError
Unsupported normalization type
"""
if isinstance(x, tuple) or isinstance(x, list):
x = x[1]
aggr_out = self.linear_out(torch.cat([x, aggr_out], dim=-1))
if self.normalize == 'bn':
aggr_out = aggr_out.permute(0, 2, 1)
aggr_out = self.batch_norm(aggr_out)
aggr_out = aggr_out.permute(0, 2, 1)
elif self.normalize == 'ln':
aggr_out = self.layer_norm(aggr_out)
elif self.normalize == 'none':
aggr_out = aggr_out
else:
raise KeyError(
(f'not support normalize type: {self.normalize}')
)
return x + aggr_out
|
aliyun-python-sdk-hbase/aliyunsdkhbase/request/v20190101/ResizeMultiZoneClusterNodeCountRequest.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 12644659 | <filename>aliyun-python-sdk-hbase/aliyunsdkhbase/request/v20190101/ResizeMultiZoneClusterNodeCountRequest.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbase.endpoint import endpoint_data
class ResizeMultiZoneClusterNodeCountRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'HBase', '2019-01-01', 'ResizeMultiZoneClusterNodeCount','hbase')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PrimaryVSwitchId(self):
return self.get_query_params().get('PrimaryVSwitchId')
def set_PrimaryVSwitchId(self,PrimaryVSwitchId):
self.add_query_param('PrimaryVSwitchId',PrimaryVSwitchId)
def get_StandbyVSwitchId(self):
return self.get_query_params().get('StandbyVSwitchId')
def set_StandbyVSwitchId(self,StandbyVSwitchId):
self.add_query_param('StandbyVSwitchId',StandbyVSwitchId)
def get_LogNodeCount(self):
return self.get_query_params().get('LogNodeCount')
def set_LogNodeCount(self,LogNodeCount):
self.add_query_param('LogNodeCount',LogNodeCount)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_PrimaryCoreNodeCount(self):
return self.get_query_params().get('PrimaryCoreNodeCount')
def set_PrimaryCoreNodeCount(self,PrimaryCoreNodeCount):
self.add_query_param('PrimaryCoreNodeCount',PrimaryCoreNodeCount)
def get_CoreNodeCount(self):
return self.get_query_params().get('CoreNodeCount')
def set_CoreNodeCount(self,CoreNodeCount):
self.add_query_param('CoreNodeCount',CoreNodeCount)
def get_StandbyCoreNodeCount(self):
return self.get_query_params().get('StandbyCoreNodeCount')
def set_StandbyCoreNodeCount(self,StandbyCoreNodeCount):
self.add_query_param('StandbyCoreNodeCount',StandbyCoreNodeCount)
def get_ArbiterVSwitchId(self):
return self.get_query_params().get('ArbiterVSwitchId')
def set_ArbiterVSwitchId(self,ArbiterVSwitchId):
self.add_query_param('ArbiterVSwitchId',ArbiterVSwitchId) |
ants/utils/iMath.py | xemio/ANTsPy | 338 | 12644670 | <filename>ants/utils/iMath.py
__all__ = ['iMath',
'image_math',
'multiply_images',
'iMath_get_largest_component',
'iMath_normalize',
'iMath_truncate_intensity',
'iMath_sharpen',
'iMath_pad',
'iMath_maurer_distance',
'iMath_perona_malik',
'iMath_grad',
'iMath_laplacian',
'iMath_canny',
'iMath_histogram_equalization',
'iMath_MD',
'iMath_ME',
'iMath_MO',
'iMath_MC',
'iMath_GD',
'iMath_GE',
'iMath_GO',
'iMath_GC',
'iMath_fill_holes',
'iMath_get_largest_component',
'iMath_normalize',
'iMath_truncate_intensity',
'iMath_sharpen',
'iMath_propagate_labels_through_mask']
from .process_args import _int_antsProcessArguments
from .. import utils
_iMathOps = {'FillHoles',
'GetLargestComponent',
'Normalize',
'Sharpen',
'Pad',
'D',
'MaurerDistance',
'PeronaMalik',
'Grad',
'Laplacian',
'Canny',
'HistogramEqualization',
'MD',
'ME',
'MO',
'MC',
'GD',
'GE',
'GO',
'GC',
'FillHoles',
'GetLargestComponent',
'LabelStats',
'Normalize',
'TruncateIntensity',
'Sharpen',
'PropagateLabelsThroughMask'}
def multiply_images(image1, image2):
return image1 * image2
def iMath(image, operation, *args):
"""
Perform various (often mathematical) operations on the input image/s.
Additional parameters should be specific for each operation.
See the the full iMath in ANTs, on which this function is based.
ANTsR function: `iMath`
Arguments
---------
image : ANTsImage
input object, usually antsImage
operation
a string e.g. "GetLargestComponent" ... the special case of "GetOperations"
or "GetOperationsFull" will return a list of operations and brief
description. Some operations may not be valid (WIP), but most are.
*args : non-keyword arguments
additional parameters specific to the operation
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_ants_data('r16'))
>>> img2 = ants.iMath(img, 'Canny', 1, 5, 12)
"""
if operation not in _iMathOps:
raise ValueError('Operation not recognized')
imagedim = image.dimension
outimage = image.clone()
args = [imagedim, outimage, operation, image] + [a for a in args]
processed_args = _int_antsProcessArguments(args)
libfn = utils.get_lib_fn('iMath')
libfn(processed_args)
return outimage
image_math = iMath
def iMath_ops():
return _iMathOps
def iMath_canny(image, sigma, lower, upper):
return iMath(image, 'Canny', sigma, lower, upper)
def iMath_distance_map(image, use_spacing=True):
return iMath(image, 'DistanceMap', use_spacing)
def iMath_fill_holes(image, hole_type=2):
return iMath(image, 'FillHoles', hole_type)
def iMath_GC(image, radius=1):
return iMath(image, 'GC', radius)
def iMath_GD(image, radius=1):
return iMath(image, 'GD', radius)
def iMath_GE(image, radius=1):
return iMath(image, 'GE', radius)
def iMath_GO(image, radius=1):
return iMath(image, 'GO', radius)
def iMath_get_largest_component(image, min_size=50):
return iMath(image, 'GetLargestComponent', min_size)
def iMath_grad(image, sigma=0.5, normalize=False):
return iMath(image, 'Grad', sigma, normalize)
def iMath_histogram_equalization(image, alpha, beta):
return iMath(image, 'HistogramEqualization', alpha, beta)
def iMath_laplacian(image, sigma=0.5, normalize=False):
return iMath(image, 'Laplacian', sigma, normalize)
def iMath_MC(image, radius=1, value=1, shape=1, parametric=False, lines=3, thickness=1, include_center=False):
return iMath(image, 'MC', radius, value, shape, parametric, lines, thickness, include_center)
def iMath_MD(image, radius=1, value=1, shape=1, parametric=False, lines=3, thickness=1, include_center=False):
return iMath(image, 'MD', radius, value, shape, parametric, lines, thickness, include_center)
def iMath_ME(image, radius=1, value=1, shape=1, parametric=False, lines=3, thickness=1, include_center=False):
return iMath(image, 'ME', radius, value, shape, parametric, lines, thickness, include_center)
def iMath_MO(image, radius=1, value=1, shape=1, parametric=False, lines=3, thickness=1, include_center=False):
return iMath(image, 'MO', radius, value, shape, parametric, lines, thickness, include_center)
def iMath_maurer_distance(image, foreground=1):
return iMath(image, 'MaurerDistance', foreground)
def iMath_normalize(image):
return iMath(image, 'Normalize')
def iMath_pad(image, padding):
return iMath(image, 'Pad', padding)
def iMath_perona_malik(image, conductance=0.25, n_iterations=1):
return iMath(image, 'PeronaMalik', conductance, n_iterations)
def iMath_sharpen(image):
return iMath(image, 'Sharpen')
def iMath_propagate_labels_through_mask(image, labels, stopping_value=100, propagation_method=0):
"""
>>> import ants
>>> wms = ants.image_read('~/desktop/wms.nii.gz')
>>> thal = ants.image_read('~/desktop/thal.nii.gz')
>>> img2 = ants.iMath_propagate_labels_through_mask(wms, thal, 500, 0)
"""
return iMath(image, 'PropagateLabelsThroughMask', labels, stopping_value, propagation_method)
def iMath_truncate_intensity(image, lower_q, upper_q, n_bins=64):
"""
>>> import ants
>>> img = ants.image_read(ants.get_ants_data('r16'))
>>> ants.iMath_truncate_intensity( img, 0.2, 0.8 )
"""
return iMath(image, 'TruncateIntensity', lower_q, upper_q, n_bins )
|
aiogram/types/user_profile_photos.py | andrew-ld/aiogram | 2,744 | 12644679 | <filename>aiogram/types/user_profile_photos.py
import typing
from . import base
from . import fields
from .photo_size import PhotoSize
class UserProfilePhotos(base.TelegramObject):
"""
This object represent a user's profile pictures.
https://core.telegram.org/bots/api#userprofilephotos
"""
total_count: base.Integer = fields.Field()
photos: typing.List[typing.List[PhotoSize]] = fields.ListOfLists(base=PhotoSize)
|
dev/Tools/build/waf-1.7.13/waflib/extras/unity.py | jeikabu/lumberyard | 1,738 | 12644680 | #! /usr/bin/env python
# encoding: utf-8
"""
Take a group of C++ files and compile them at once.
def options(opt):
opt.load('compiler_cxx unity')
"""
import re
from waflib import Task, Options, Logs
from waflib.Tools import ccroot, cxx, c_preproc
from waflib.TaskGen import extension, taskgen_method
MAX_BATCH = 20
def options(opt):
global MAX_BATCH
opt.add_option('--batchsize', action='store', dest='batchsize', type='int', default=MAX_BATCH, help='batch size (0 for no batch)')
class unity(Task.Task):
color = 'BLUE'
scan = c_preproc.scan
def run(self):
lst = ['#include "%s"\n' % node.abspath() for node in self.inputs]
txt = ''.join(lst)
self.outputs[0].write(txt)
@taskgen_method
def batch_size(self):
return Options.options.batchsize
@extension('.cpp', '.cc', '.cxx', '.C', '.c++')
def make_cxx_batch(self, node):
cnt = self.batch_size()
if cnt <= 1:
tsk = self.create_compiled_task('cxx', node)
return tsk
try:
self.cnt_cxx
except AttributeError:
self.cnt_cxx = 0
x = getattr(self, 'master_cxx', None)
if not x or len(x.inputs) >= cnt:
x = self.master_cxx = self.create_task('unity')
cxxnode = node.parent.find_or_declare('union_%s_%d_%d.cxx' % (self.idx, self.cnt_cxx, cnt))
self.master_cxx.outputs = [cxxnode]
self.cnt_cxx += 1
self.create_compiled_task('cxx', cxxnode)
x.inputs.append(node)
|
mmdet/datasets/pipelines/rtransforms.py | cameronchoi/r3det-docker | 176 | 12644714 | from ..builder import PIPELINES
from .transforms import Resize, RandomFlip, RandomCrop
import numpy as np
@PIPELINES.register_module()
class RResize(Resize):
"""
Resize images & rotated bbox
Inherit Resize pipeline class to handle rotated bboxes
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None):
super(RResize, self).__init__(img_scale=img_scale,
multiscale_mode=multiscale_mode,
ratio_range=ratio_range,
keep_ratio=True)
def _resize_bboxes(self, results):
for key in results.get('bbox_fields', []):
bboxes = results[key]
orig_shape = bboxes.shape
bboxes = bboxes.reshape((-1, 5))
w_scale, h_scale, _, _ = results['scale_factor']
bboxes[:, 0] *= w_scale
bboxes[:, 1] *= h_scale
bboxes[:, 2:4] *= np.sqrt(w_scale * h_scale)
results[key] = bboxes.reshape(orig_shape)
@PIPELINES.register_module()
class RRandomFlip(RandomFlip):
"""Flip the image & bbox & mask.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
Args:
flip_ratio (float, optional): The flipping probability.
"""
def bbox_flip(self, bboxes, img_shape, direction):
"""Flip bboxes horizontally or vertically.
Args:
bboxes(ndarray): shape (..., 5*k)
img_shape(tuple): (height, width)
"""
assert bboxes.shape[-1] % 5 == 0
orig_shape = bboxes.shape
bboxes = bboxes.reshape((-1, 5))
flipped = bboxes.copy()
if direction == 'horizontal':
flipped[:, 0] = img_shape[1] - bboxes[:, 0] - 1
elif direction == 'vertical':
flipped[:, 1] = img_shape[0] - bboxes[:, 1] - 1
else:
raise ValueError(
'Invalid flipping direction "{}"'.format(direction))
rotated_flag = (bboxes[:, 4] != -np.pi / 2)
flipped[rotated_flag, 4] = -np.pi / 2 - bboxes[rotated_flag, 4]
flipped[rotated_flag, 2] = bboxes[rotated_flag, 3],
flipped[rotated_flag, 3] = bboxes[rotated_flag, 2]
return flipped.reshape(orig_shape)
|
dalib/adaptation/mdd.py | neka-nat/Transfer-Learning-Library | 1,474 | 12644716 | """
@author: <NAME>
@contact: <EMAIL>
"""
from typing import Optional, List, Dict, Tuple, Callable
import torch.nn as nn
import torch.nn.functional as F
import torch
from dalib.modules.grl import WarmStartGradientReverseLayer
class MarginDisparityDiscrepancy(nn.Module):
r"""The margin disparity discrepancy (MDD) proposed in `Bridging Theory and Algorithm for Domain Adaptation (ICML 2019) <https://arxiv.org/abs/1904.05801>`_.
MDD can measure the distribution discrepancy in domain adaptation.
The :math:`y^s` and :math:`y^t` are logits output by the main head on the source and target domain respectively.
The :math:`y_{adv}^s` and :math:`y_{adv}^t` are logits output by the adversarial head.
The definition can be described as:
.. math::
\mathcal{D}_{\gamma}(\hat{\mathcal{S}}, \hat{\mathcal{T}}) =
-\gamma \mathbb{E}_{y^s, y_{adv}^s \sim\hat{\mathcal{S}}} L_s (y^s, y_{adv}^s) +
\mathbb{E}_{y^t, y_{adv}^t \sim\hat{\mathcal{T}}} L_t (y^t, y_{adv}^t),
where :math:`\gamma` is a margin hyper-parameter, :math:`L_s` refers to the disparity function defined on the source domain
and :math:`L_t` refers to the disparity function defined on the target domain.
Args:
source_disparity (callable): The disparity function defined on the source domain, :math:`L_s`.
target_disparity (callable): The disparity function defined on the target domain, :math:`L_t`.
margin (float): margin :math:`\gamma`. Default: 4
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Inputs:
- y_s: output :math:`y^s` by the main head on the source domain
- y_s_adv: output :math:`y^s` by the adversarial head on the source domain
- y_t: output :math:`y^t` by the main head on the target domain
- y_t_adv: output :math:`y_{adv}^t` by the adversarial head on the target domain
- w_s (optional): instance weights for source domain
- w_t (optional): instance weights for target domain
Examples::
>>> num_outputs = 2
>>> batch_size = 10
>>> loss = MarginDisparityDiscrepancy(margin=4., source_disparity=F.l1_loss, target_disparity=F.l1_loss)
>>> # output from source domain and target domain
>>> y_s, y_t = torch.randn(batch_size, num_outputs), torch.randn(batch_size, num_outputs)
>>> # adversarial output from source domain and target domain
>>> y_s_adv, y_t_adv = torch.randn(batch_size, num_outputs), torch.randn(batch_size, num_outputs)
>>> output = loss(y_s, y_s_adv, y_t, y_t_adv)
"""
def __init__(self, source_disparity: Callable, target_disparity: Callable,
margin: Optional[float] = 4, reduction: Optional[str] = 'mean'):
super(MarginDisparityDiscrepancy, self).__init__()
self.margin = margin
self.reduction = reduction
self.source_disparity = source_disparity
self.target_disparity = target_disparity
def forward(self, y_s: torch.Tensor, y_s_adv: torch.Tensor, y_t: torch.Tensor, y_t_adv: torch.Tensor,
w_s: Optional[torch.Tensor] = None, w_t: Optional[torch.Tensor] = None) -> torch.Tensor:
source_loss = -self.margin * self.source_disparity(y_s, y_s_adv)
target_loss = self.target_disparity(y_t, y_t_adv)
if w_s is None:
w_s = torch.ones_like(source_loss)
source_loss = source_loss * w_s
if w_t is None:
w_t = torch.ones_like(target_loss)
target_loss = target_loss * w_t
loss = source_loss + target_loss
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
class ClassificationMarginDisparityDiscrepancy(MarginDisparityDiscrepancy):
r"""
The margin disparity discrepancy (MDD) proposed in `Bridging Theory and Algorithm for Domain Adaptation (ICML 2019) <https://arxiv.org/abs/1904.05801>`_.
It measures the distribution discrepancy in domain adaptation
for classification.
When margin is equal to 1, it's also called disparity discrepancy (DD).
The :math:`y^s` and :math:`y^t` are logits output by the main classifier on the source and target domain respectively.
The :math:`y_{adv}^s` and :math:`y_{adv}^t` are logits output by the adversarial classifier.
They are expected to contain raw, unnormalized scores for each class.
The definition can be described as:
.. math::
\mathcal{D}_{\gamma}(\hat{\mathcal{S}}, \hat{\mathcal{T}}) =
\gamma \mathbb{E}_{y^s, y_{adv}^s \sim\hat{\mathcal{S}}} \log\left(\frac{\exp(y_{adv}^s[h_{y^s}])}{\sum_j \exp(y_{adv}^s[j])}\right) +
\mathbb{E}_{y^t, y_{adv}^t \sim\hat{\mathcal{T}}} \log\left(1-\frac{\exp(y_{adv}^t[h_{y^t}])}{\sum_j \exp(y_{adv}^t[j])}\right),
where :math:`\gamma` is a margin hyper-parameter and :math:`h_y` refers to the predicted label when the logits output is :math:`y`.
You can see more details in `Bridging Theory and Algorithm for Domain Adaptation <https://arxiv.org/abs/1904.05801>`_.
Args:
margin (float): margin :math:`\gamma`. Default: 4
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Inputs:
- y_s: logits output :math:`y^s` by the main classifier on the source domain
- y_s_adv: logits output :math:`y^s` by the adversarial classifier on the source domain
- y_t: logits output :math:`y^t` by the main classifier on the target domain
- y_t_adv: logits output :math:`y_{adv}^t` by the adversarial classifier on the target domain
Shape:
- Inputs: :math:`(minibatch, C)` where C = number of classes, or :math:`(minibatch, C, d_1, d_2, ..., d_K)`
with :math:`K \geq 1` in the case of `K`-dimensional loss.
- Output: scalar. If :attr:`reduction` is ``'none'``, then the same size as the target: :math:`(minibatch)`, or
:math:`(minibatch, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of K-dimensional loss.
Examples::
>>> num_classes = 2
>>> batch_size = 10
>>> loss = ClassificationMarginDisparityDiscrepancy(margin=4.)
>>> # logits output from source domain and target domain
>>> y_s, y_t = torch.randn(batch_size, num_classes), torch.randn(batch_size, num_classes)
>>> # adversarial logits output from source domain and target domain
>>> y_s_adv, y_t_adv = torch.randn(batch_size, num_classes), torch.randn(batch_size, num_classes)
>>> output = loss(y_s, y_s_adv, y_t, y_t_adv)
"""
def __init__(self, margin: Optional[float] = 4, **kwargs):
def source_discrepancy(y: torch.Tensor, y_adv: torch.Tensor):
_, prediction = y.max(dim=1)
return F.cross_entropy(y_adv, prediction, reduction='none')
def target_discrepancy(y: torch.Tensor, y_adv: torch.Tensor):
_, prediction = y.max(dim=1)
return -F.nll_loss(shift_log(1. - F.softmax(y_adv, dim=1)), prediction, reduction='none')
super(ClassificationMarginDisparityDiscrepancy, self).__init__(source_discrepancy, target_discrepancy, margin,
**kwargs)
class RegressionMarginDisparityDiscrepancy(MarginDisparityDiscrepancy):
r"""
The margin disparity discrepancy (MDD) proposed in `Bridging Theory and Algorithm for Domain Adaptation (ICML 2019) <https://arxiv.org/abs/1904.05801>`_.
It measures the distribution discrepancy in domain adaptation
for regression.
The :math:`y^s` and :math:`y^t` are logits output by the main regressor on the source and target domain respectively.
The :math:`y_{adv}^s` and :math:`y_{adv}^t` are logits output by the adversarial regressor.
They are expected to contain ``normalized`` values for each factors.
The definition can be described as:
.. math::
\mathcal{D}_{\gamma}(\hat{\mathcal{S}}, \hat{\mathcal{T}}) =
-\gamma \mathbb{E}_{y^s, y_{adv}^s \sim\hat{\mathcal{S}}} L (y^s, y_{adv}^s) +
\mathbb{E}_{y^t, y_{adv}^t \sim\hat{\mathcal{T}}} L (y^t, y_{adv}^t),
where :math:`\gamma` is a margin hyper-parameter and :math:`L` refers to the disparity function defined on both domains.
You can see more details in `Bridging Theory and Algorithm for Domain Adaptation <https://arxiv.org/abs/1904.05801>`_.
Args:
loss_function (callable): The disparity function defined on both domains, :math:`L`.
margin (float): margin :math:`\gamma`. Default: 1
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Inputs:
- y_s: logits output :math:`y^s` by the main regressor on the source domain
- y_s_adv: logits output :math:`y^s` by the adversarial regressor on the source domain
- y_t: logits output :math:`y^t` by the main regressor on the target domain
- y_t_adv: logits output :math:`y_{adv}^t` by the adversarial regressor on the target domain
Shape:
- Inputs: :math:`(minibatch, F)` where F = number of factors, or :math:`(minibatch, F, d_1, d_2, ..., d_K)`
with :math:`K \geq 1` in the case of `K`-dimensional loss.
- Output: scalar. The same size as the target: :math:`(minibatch)`, or
:math:`(minibatch, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of K-dimensional loss.
Examples::
>>> num_outputs = 2
>>> batch_size = 10
>>> loss = RegressionMarginDisparityDiscrepancy(margin=4., loss_function=F.l1_loss)
>>> # output from source domain and target domain
>>> y_s, y_t = torch.randn(batch_size, num_outputs), torch.randn(batch_size, num_outputs)
>>> # adversarial output from source domain and target domain
>>> y_s_adv, y_t_adv = torch.randn(batch_size, num_outputs), torch.randn(batch_size, num_outputs)
>>> output = loss(y_s, y_s_adv, y_t, y_t_adv)
"""
def __init__(self, margin: Optional[float] = 1, loss_function=F.l1_loss, **kwargs):
def source_discrepancy(y: torch.Tensor, y_adv: torch.Tensor):
return loss_function(y_adv, y.detach(), reduction='none')
def target_discrepancy(y: torch.Tensor, y_adv: torch.Tensor):
return loss_function(y_adv, y.detach(), reduction='none')
super(RegressionMarginDisparityDiscrepancy, self).__init__(source_discrepancy, target_discrepancy, margin,
**kwargs)
def shift_log(x: torch.Tensor, offset: Optional[float] = 1e-6) -> torch.Tensor:
r"""
First shift, then calculate log, which can be described as:
.. math::
y = \max(\log(x+\text{offset}), 0)
Used to avoid the gradient explosion problem in log(x) function when x=0.
Args:
x (torch.Tensor): input tensor
offset (float, optional): offset size. Default: 1e-6
.. note::
Input tensor falls in [0., 1.] and the output tensor falls in [-log(offset), 0]
"""
return torch.log(torch.clamp(x + offset, max=1.))
class GeneralModule(nn.Module):
def __init__(self, backbone: nn.Module, num_classes: int, bottleneck: nn.Module,
head: nn.Module, adv_head: nn.Module, grl: Optional[WarmStartGradientReverseLayer] = None,
finetune: Optional[bool] = True):
super(GeneralModule, self).__init__()
self.backbone = backbone
self.num_classes = num_classes
self.bottleneck = bottleneck
self.head = head
self.adv_head = adv_head
self.finetune = finetune
self.grl_layer = WarmStartGradientReverseLayer(alpha=1.0, lo=0.0, hi=0.1, max_iters=1000,
auto_step=False) if grl is None else grl
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
""""""
features = self.backbone(x)
features = self.bottleneck(features)
outputs = self.head(features)
features_adv = self.grl_layer(features)
outputs_adv = self.adv_head(features_adv)
if self.training:
return outputs, outputs_adv
else:
return outputs
def step(self):
"""
Gradually increase :math:`\lambda` in GRL layer.
"""
self.grl_layer.step()
def get_parameters(self, base_lr=1.0) -> List[Dict]:
"""
Return a parameters list which decides optimization hyper-parameters,
such as the relative learning rate of each layer.
"""
params = [
{"params": self.backbone.parameters(), "lr": 0.1 * base_lr if self.finetune else base_lr},
{"params": self.bottleneck.parameters(), "lr": base_lr},
{"params": self.head.parameters(), "lr": base_lr},
{"params": self.adv_head.parameters(), "lr": base_lr}
]
return params
class ImageClassifier(GeneralModule):
r"""Classifier for MDD.
Classifier for MDD has one backbone, one bottleneck, while two classifier heads.
The first classifier head is used for final predictions.
The adversarial classifier head is only used when calculating MarginDisparityDiscrepancy.
Args:
backbone (torch.nn.Module): Any backbone to extract 1-d features from data
num_classes (int): Number of classes
bottleneck_dim (int, optional): Feature dimension of the bottleneck layer. Default: 1024
width (int, optional): Feature dimension of the classifier head. Default: 1024
grl (nn.Module): Gradient reverse layer. Will use default parameters if None. Default: None.
finetune (bool, optional): Whether use 10x smaller learning rate in the backbone. Default: True
Inputs:
- x (tensor): input data
Outputs:
- outputs: logits outputs by the main classifier
- outputs_adv: logits outputs by the adversarial classifier
Shapes:
- x: :math:`(minibatch, *)`, same shape as the input of the `backbone`.
- outputs, outputs_adv: :math:`(minibatch, C)`, where C means the number of classes.
.. note::
Remember to call function `step()` after function `forward()` **during training phase**! For instance,
>>> # x is inputs, classifier is an ImageClassifier
>>> outputs, outputs_adv = classifier(x)
>>> classifier.step()
"""
def __init__(self, backbone: nn.Module, num_classes: int,
bottleneck_dim: Optional[int] = 1024, width: Optional[int] = 1024,
grl: Optional[WarmStartGradientReverseLayer] = None, finetune=True, pool_layer=None):
grl_layer = WarmStartGradientReverseLayer(alpha=1.0, lo=0.0, hi=0.1, max_iters=1000,
auto_step=False) if grl is None else grl
if pool_layer is None:
pool_layer = nn.Sequential(
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten()
)
bottleneck = nn.Sequential(
pool_layer,
nn.Linear(backbone.out_features, bottleneck_dim),
nn.BatchNorm1d(bottleneck_dim),
nn.ReLU(),
nn.Dropout(0.5)
)
bottleneck[1].weight.data.normal_(0, 0.005)
bottleneck[1].bias.data.fill_(0.1)
# The classifier head used for final predictions.
head = nn.Sequential(
nn.Linear(bottleneck_dim, width),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(width, num_classes)
)
# The adversarial classifier head
adv_head = nn.Sequential(
nn.Linear(bottleneck_dim, width),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(width, num_classes)
)
for dep in range(2):
head[dep * 3].weight.data.normal_(0, 0.01)
head[dep * 3].bias.data.fill_(0.0)
adv_head[dep * 3].weight.data.normal_(0, 0.01)
adv_head[dep * 3].bias.data.fill_(0.0)
super(ImageClassifier, self).__init__(backbone, num_classes, bottleneck,
head, adv_head, grl_layer, finetune)
class ImageRegressor(GeneralModule):
r"""Regressor for MDD.
Regressor for MDD has one backbone, one bottleneck, while two regressor heads.
The first regressor head is used for final predictions.
The adversarial regressor head is only used when calculating MarginDisparityDiscrepancy.
Args:
backbone (torch.nn.Module): Any backbone to extract 1-d features from data
num_factors (int): Number of factors
bottleneck_dim (int, optional): Feature dimension of the bottleneck layer. Default: 1024
width (int, optional): Feature dimension of the classifier head. Default: 1024
finetune (bool, optional): Whether use 10x smaller learning rate in the backbone. Default: True
Inputs:
- x (Tensor): input data
Outputs: (outputs, outputs_adv)
- outputs: outputs by the main regressor
- outputs_adv: outputs by the adversarial regressor
Shapes:
- x: :math:`(minibatch, *)`, same shape as the input of the `backbone`.
- outputs, outputs_adv: :math:`(minibatch, F)`, where F means the number of factors.
.. note::
Remember to call function `step()` after function `forward()` **during training phase**! For instance,
>>> # x is inputs, regressor is an ImageRegressor
>>> outputs, outputs_adv = regressor(x)
>>> regressor.step()
"""
def __init__(self, backbone: nn.Module, num_factors: int, bottleneck = None, head=None, adv_head=None,
bottleneck_dim: Optional[int] = 1024, width: Optional[int] = 1024, finetune=True):
grl_layer = WarmStartGradientReverseLayer(alpha=1.0, lo=0.0, hi=0.1, max_iters=1000, auto_step=False)
if bottleneck is None:
bottleneck = nn.Sequential(
nn.Conv2d(backbone.out_features, bottleneck_dim, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(bottleneck_dim),
nn.ReLU(),
)
# The regressor head used for final predictions.
if head is None:
head = nn.Sequential(
nn.Conv2d(bottleneck_dim, width, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(width),
nn.ReLU(),
nn.Conv2d(width, width, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(width),
nn.ReLU(),
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten(),
nn.Linear(width, num_factors),
nn.Sigmoid()
)
for layer in head:
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
nn.init.normal_(layer.weight, 0, 0.01)
nn.init.constant_(layer.bias, 0)
# The adversarial regressor head
if adv_head is None:
adv_head = nn.Sequential(
nn.Conv2d(bottleneck_dim, width, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(width),
nn.ReLU(),
nn.Conv2d(width, width, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(width),
nn.ReLU(),
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten(),
nn.Linear(width, num_factors),
nn.Sigmoid()
)
for layer in adv_head:
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
nn.init.normal_(layer.weight, 0, 0.01)
nn.init.constant_(layer.bias, 0)
super(ImageRegressor, self).__init__(backbone, num_factors, bottleneck,
head, adv_head, grl_layer, finetune)
self.num_factors = num_factors
|
openapi_python_client/schema/openapi_schema_pydantic/security_scheme.py | oterrier/openapi-python-client | 172 | 12644749 | from typing import Optional
from pydantic import AnyUrl, BaseModel, Field
from .oauth_flows import OAuthFlows
class SecurityScheme(BaseModel):
"""
Defines a security scheme that can be used by the operations.
Supported schemes are HTTP authentication,
an API key (either as a header, a cookie parameter or as a query parameter),
OAuth2's common flows (implicit, password, client credentials and authorization code)
as defined in [RFC6749](https://tools.ietf.org/html/rfc6749),
and [OpenID Connect Discovery](https://tools.ietf.org/html/draft-ietf-oauth-discovery-06).
References:
- https://swagger.io/docs/specification/authentication/
- https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.3.md#componentsObject
"""
type: str
description: Optional[str] = None
name: Optional[str] = None
security_scheme_in: Optional[str] = Field(alias="in")
scheme: Optional[str] = None
bearerFormat: Optional[str] = None
flows: Optional[OAuthFlows] = None
openIdConnectUrl: Optional[AnyUrl] = None
class Config: # pylint: disable=missing-class-docstring
allow_population_by_field_name = True
schema_extra = {
"examples": [
{"type": "http", "scheme": "basic"},
{"type": "apiKey", "name": "api_key", "in": "header"},
{"type": "http", "scheme": "bearer", "bearerFormat": "JWT"},
{
"type": "oauth2",
"flows": {
"implicit": {
"authorizationUrl": "https://example.com/api/oauth/dialog",
"scopes": {"write:pets": "modify pets in your account", "read:pets": "read your pets"},
}
},
},
]
}
|
adanet/autoensemble/common.py | Mario-Kart-Felix/adanet | 3,323 | 12644756 | <reponame>Mario-Kart-Felix/adanet
"""Common utilities for AutoEnsemblers.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import inspect
from adanet import subnetwork as subnetwork_lib
from adanet import tf_compat
import tensorflow.compat.v2 as tf
def _default_logits(estimator_spec):
from tensorflow.python.estimator.canned import prediction_keys # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
if isinstance(estimator_spec.predictions, dict):
pred_keys = prediction_keys.PredictionKeys
if pred_keys.LOGITS in estimator_spec.predictions:
return estimator_spec.predictions[pred_keys.LOGITS]
if pred_keys.PREDICTIONS in estimator_spec.predictions:
return estimator_spec.predictions[pred_keys.PREDICTIONS]
return estimator_spec.predictions
class _SecondaryTrainOpRunnerHook(tf_compat.SessionRunHook):
"""A hook for running a train op separate from the main session run call."""
def __init__(self, train_op):
"""Initializes a `_SecondaryTrainOpRunnerHook`.
Args:
train_op: The secondary train op to execute before runs.
"""
self._train_op = train_op
def before_run(self, run_context):
run_context.session.run(self._train_op)
class AutoEnsembleSubestimator( # pylint: disable=g-classes-have-attributes
collections.namedtuple("AutoEnsembleSubestimator",
["estimator", "train_input_fn", "prediction_only"])):
"""A subestimator to train and consider for ensembling.
Args:
estimator: A `tf.estimator.Estimator` or `tf.estimator.tpu.TPUEstimator`
instance to consider for ensembling.
train_input_fn: A function that provides input data for training as
minibatches. It can be used to implement ensemble methods like bootstrap
aggregating (a.k.a bagging) where each subnetwork trains on different
slices of the training data. The function should construct and return one
of the following:
* A `tf.data.Dataset` object: Outputs of `Dataset` object must be a tuple
`(features, labels)` with same constraints as below. NOTE: A Dataset
must return *at least* two batches before hitting the end-of-input,
otherwise all of training terminates.
TODO: Figure out how to handle single-batch datasets.
* A tuple `(features, labels)`: Where `features` is a `tf.Tensor` or a
dictionary of string feature name to `Tensor` and `labels` is a
`Tensor` or a dictionary of string label name to `Tensor`. Both
`features` and `labels` are consumed by `estimator#model_fn`. They
should satisfy the expectation of `estimator#model_fn` from inputs.
prediction_only: If set to True, only runs the subestimator in prediction
mode.
Returns:
An `AutoEnsembleSubestimator` instance to be auto-ensembled.
"""
# pylint: enable=g-classes-have-attributes
def __new__(cls, estimator, train_input_fn=None, prediction_only=False):
return super(AutoEnsembleSubestimator,
cls).__new__(cls, estimator, train_input_fn, prediction_only)
class _BuilderFromSubestimator(subnetwork_lib.Builder):
"""An `adanet.Builder` from a :class:`tf.estimator.Estimator`."""
def __init__(self, name, subestimator, logits_fn, last_layer_fn, config):
self._name = name
self._subestimator = subestimator
self._logits_fn = logits_fn
self._last_layer_fn = last_layer_fn
self._config = config
@property
def name(self):
return self._name
def _call_model_fn(self, subestimator, features, labels, mode, summary):
with summary.current_scope():
model_fn = subestimator.estimator.model_fn
estimator_spec = model_fn(
features=features, labels=labels, mode=mode, config=self._config)
logits = self._logits_fn(estimator_spec=estimator_spec)
last_layer = logits
if self._last_layer_fn:
last_layer = self._last_layer_fn(estimator_spec=estimator_spec)
if estimator_spec.scaffold and estimator_spec.scaffold.local_init_op:
local_init_op = estimator_spec.scaffold.local_init_op
else:
local_init_op = None
train_op = subnetwork_lib.TrainOpSpec(
estimator_spec.train_op,
chief_hooks=estimator_spec.training_chief_hooks,
hooks=estimator_spec.training_hooks)
return logits, last_layer, train_op, local_init_op
def build_subnetwork(self,
features,
labels,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble,
config=None):
# We don't need an EVAL mode since AdaNet takes care of evaluation for us.
subestimator = self._subestimator(config)
mode = tf.estimator.ModeKeys.PREDICT
if training and not subestimator.prediction_only:
mode = tf.estimator.ModeKeys.TRAIN
# Call in template to ensure that variables are created once and reused.
call_model_fn_template = tf.compat.v1.make_template("model_fn",
self._call_model_fn)
subestimator_features, subestimator_labels = features, labels
local_init_ops = []
if training and subestimator.train_input_fn:
# TODO: Consider tensorflow_estimator/python/estimator/util.py.
inputs = subestimator.train_input_fn()
if isinstance(inputs, (tf_compat.DatasetV1, tf_compat.DatasetV2)):
subestimator_features, subestimator_labels = (
tf_compat.make_one_shot_iterator(inputs).get_next())
else:
subestimator_features, subestimator_labels = inputs
# Construct subnetwork graph first because of dependencies on scope.
_, _, bagging_train_op_spec, sub_local_init_op = call_model_fn_template(
subestimator, subestimator_features, subestimator_labels, mode,
summary)
# Graph for ensemble learning gets model_fn_1 for scope.
logits, last_layer, _, ensemble_local_init_op = call_model_fn_template(
subestimator, features, labels, mode, summary)
if sub_local_init_op:
local_init_ops.append(sub_local_init_op)
if ensemble_local_init_op:
local_init_ops.append(ensemble_local_init_op)
# Run train op in a hook so that exceptions can be intercepted by the
# AdaNet framework instead of the Estimator's monitored training session.
hooks = bagging_train_op_spec.hooks + (_SecondaryTrainOpRunnerHook(
bagging_train_op_spec.train_op),)
train_op_spec = subnetwork_lib.TrainOpSpec(
train_op=tf.no_op(),
chief_hooks=bagging_train_op_spec.chief_hooks,
hooks=hooks)
else:
logits, last_layer, train_op_spec, local_init_op = call_model_fn_template(
subestimator, features, labels, mode, summary)
if local_init_op:
local_init_ops.append(local_init_op)
# TODO: Replace with variance complexity measure.
complexity = tf.constant(0.)
return subnetwork_lib.Subnetwork(
logits=logits,
last_layer=last_layer,
shared={"train_op": train_op_spec},
complexity=complexity,
local_init_ops=local_init_ops)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
return subnetwork.shared["train_op"]
def _convert_to_subestimator(candidate):
"""Converts a candidate to an AutoEnsembleSubestimator."""
if callable(candidate):
return candidate
if isinstance(candidate, AutoEnsembleSubestimator):
return lambda config: candidate
from tensorflow_estimator.python.estimator import estimator as estimator_lib # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
if isinstance(candidate,
(estimator_lib.Estimator, estimator_lib.EstimatorV2)):
return lambda config: AutoEnsembleSubestimator(candidate)
raise ValueError(
"subestimator in candidate_pool must have type tf.estimator.Estimator or "
"adanet.AutoEnsembleSubestimator but got {}".format(candidate.__class__))
class _GeneratorFromCandidatePool(subnetwork_lib.Generator):
"""An `adanet.Generator` from a pool of `Estimator` and `Model` instances."""
def __init__(self, candidate_pool, logits_fn, last_layer_fn):
self._candidate_pool = candidate_pool
if logits_fn is None:
logits_fn = _default_logits
self._logits_fn = logits_fn
self._last_layer_fn = last_layer_fn
def generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports, config):
assert config
builders = []
candidate_pool = self._maybe_call_candidate_pool(config, iteration_number)
if isinstance(candidate_pool, dict):
for name in sorted(candidate_pool):
builders.append(
_BuilderFromSubestimator(
name,
_convert_to_subestimator(candidate_pool[name]),
logits_fn=self._logits_fn,
last_layer_fn=self._last_layer_fn,
config=config))
return builders
for i, estimator in enumerate(candidate_pool):
name = "{class_name}{index}".format(
class_name=estimator.__class__.__name__, index=i)
builders.append(
_BuilderFromSubestimator(
name,
_convert_to_subestimator(estimator),
logits_fn=self._logits_fn,
last_layer_fn=self._last_layer_fn,
config=config))
return builders
def _maybe_call_candidate_pool(self, config, iteration_number):
if callable(self._candidate_pool):
# candidate_pool can be a function.
candidate_pool_args = inspect.getargs(self._candidate_pool.__code__).args
if "iteration_number" in candidate_pool_args:
# TODO: Make the "config" argument optional using introspection.
return self._candidate_pool(
config=config, iteration_number=iteration_number)
else:
return self._candidate_pool(config=config)
return self._candidate_pool
|
readthedocs/core/utils/general.py | mforbes/readthedocs.org | 2,092 | 12644761 | from django.shortcuts import get_object_or_404
from readthedocs.builds.models import Version
from readthedocs.storage import build_environment_storage
def wipe_version_via_slugs(version_slug, project_slug):
"""Wipes the given version of a given project."""
version = get_object_or_404(
Version,
slug=version_slug,
project__slug=project_slug,
)
# Delete the cache environment from storage
build_environment_storage.delete(version.get_storage_environment_cache_path())
|
txdav/base/propertystore/test/test_none.py | backwardn/ccs-calendarserver | 462 | 12644807 | ##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Property store tests.
"""
from txdav.idav import PropertyChangeNotAllowedError
from txdav.base.propertystore.none import PropertyStore
from txdav.base.propertystore.test.base import propertyName, propertyValue
from txdav.base.propertystore.test import base
class PropertyStoreTest(base.NonePropertyStoreTest):
def setUp(self):
self.propertyStore = PropertyStore("user01")
def test_set(self):
def doSet():
self.propertyStore[propertyName("foo")] = propertyValue("bar")
self.assertRaises(PropertyChangeNotAllowedError, doSet)
def test_get(self):
self.assertRaises(KeyError, lambda: self.propertyStore[propertyName("foo")])
def test_len(self):
self.assertEquals(len(self.propertyStore), 0)
def test_keys(self):
self.assertEquals(self.propertyStore.keys(), ())
def test_flush(self):
self.propertyStore.flush()
def test_abort(self):
self.propertyStore.abort()
|
poco/drivers/osx/sdk/OSXUI.py | koyoki/Poco | 1,444 | 12644854 | # coding=utf-8
import time
import base64
import zlib
import re
import pyautogui
import atomac
import operator
from pynput.keyboard import Controller
from poco.sdk.std.rpc.controller import StdRpcEndpointController
from poco.sdk.std.rpc.reactor import StdRpcReactor
from poco.utils.net.transport.tcp import TcpSocket
from poco.drivers.osx.sdk.OSXUIDumper import OSXUIDumper
from poco.sdk.exceptions import UnableToSetAttributeException, NonuniqueSurfaceException, InvalidSurfaceException
from poco.utils.six import string_types, PY2
from poco.utils.six.moves import reduce
from poco.drivers.osx.sdk.OSXUIFunc import OSXFunc
DEFAULT_PORT = 15004
DEFAULT_ADDR = ('0.0.0.0', DEFAULT_PORT)
class PocoSDKOSX(object):
def __init__(self, addr=DEFAULT_ADDR):
self.reactor = None
self.addr = addr
self.running = False
self.root = None
self.keyboard = Controller()
def Dump(self, _):
res = OSXUIDumper(self.root).dumpHierarchy()
return res
def SetForeground(self):
self.root.AXMinimized = False
self.app.AXFrontmost = True
def GetSDKVersion(self):
return '0.0.1'
def GetDebugProfilingData(self):
return {}
def GetScreenSize(self):
Width = self.root.AXSize[0]
Height = self.root.AXSize[1]
return [Width, Height]
def GetWindowRect(self):
Width = self.root.AXSize[0]
Height = self.root.AXSize[1]
return [self.root.AXPosition[0], self.root.AXPosition[1], self.root.AXPosition[0] + Width, self.root.AXPosition[1] + Height]
def KeyEvent(self, keycode):
waittime = 0.05
for c in keycode:
self.keyboard.press(key=c)
self.keyboard.release(key=c)
time.sleep(waittime)
return True
def Screenshot(self, width):
self.SetForeground()
size = self.root.AXSize
pos = self.root.AXPosition
pyautogui.screenshot('Screenshot.png', (pos[0], pos[1], size[0], size[1])).save('Screenshot.png')
f = open(r'Screenshot.png', 'rb')
deflated = zlib.compress(f.read())
ls_f = base64.b64encode(deflated)
f.close()
return [ls_f, "png.deflate"]
# self.root.ToBitmap().ToFile('Screenshot.bmp')
# f = open(r'Screenshot.bmp', 'rb')
# ls_f = base64.b64encode(f.read())
# f.close()
# return [ls_f, "bmp"]
def Click(self, x, y):
self.SetForeground()
size = self.root.AXSize
pos = self.root.AXPosition
OSXFunc.click(pos[0] + size[0] * x, pos[1] + size[1] * y)
return True
def RClick(self, x, y):
self.SetForeground()
size = self.root.AXSize
pos = self.root.AXPosition
OSXFunc.rclick(pos[0] + size[0] * x, pos[1] + size[1] * y)
return True
def DoubleClick(self, x, y):
self.SetForeground()
size = self.root.AXSize
pos = self.root.AXPosition
OSXFunc.doubleclick(pos[0] + size[0] * x, pos[1] + size[1] * y)
return True
def Swipe(self, x1, y1, x2, y2, duration):
self.SetForeground()
Left = self.root.AXPosition[0]
Top = self.root.AXPosition[1]
Width = self.root.AXSize[0]
Height = self.root.AXSize[1]
x1 = Left + Width * x1
y1 = Top + Height * y1
x2 = Left + Width * x2
y2 = Top + Height * y2
sx = abs(x1 - x2)
sy = abs(y1 - y2)
stepx = sx / (duration * 10.0) # 将滑动距离分割,实现平滑的拖动
stepy = sy / (duration * 10.0)
OSXFunc.move(x1, y1)
OSXFunc.press(x1, y1)
duration = int(duration * 10.0)
for i in range(duration + 1):
OSXFunc.drag(x1 + stepx * i, y1 + stepy * i)
time.sleep(0.1)
OSXFunc.release(x2, y2)
return True
def LongClick(self, x, y, duration, **kwargs):
self.SetForeground()
# poco std暂不支持选择鼠标按键
button = kwargs.get("button", "left")
if button not in ("left", "right"):
raise ValueError("Unknow button: " + button)
if button is "left":
button = 1
else:
button = 2
Left = self.root.AXPosition[0]
Top = self.root.AXPosition[1]
Width = self.root.AXSize[0]
Height = self.root.AXSize[1]
x = Left + Width * x
y = Top + Height * y
OSXFunc.move(x, y)
OSXFunc.press(x, y, button=button)
time.sleep(duration)
OSXFunc.release(x, y, button=button)
return True
def Scroll(self, direction, percent, duration):
if direction not in ('vertical', 'horizontal'):
raise ValueError('Argument `direction` should be one of "vertical" or "horizontal". Got {}'.format(repr(direction)))
x = 0.5 # 先把鼠标移到窗口中间,这样才能保证滚动的是这个窗口。
y = 0.5
steps = percent
Left = self.GetWindowRect()[0]
Top = self.GetWindowRect()[1]
Width = self.GetScreenSize()[0]
Height = self.GetScreenSize()[1]
x = Left + Width * x
y = Top + Height * y
x = int(x)
y = int(y)
OSXFunc.move(x, y)
if direction == 'horizontal':
interval = float(duration) / (abs(steps) + 1)
if steps < 0:
for i in range(0, abs(steps)):
time.sleep(interval)
OSXFunc.scroll(None, 1)
else:
for i in range(0, abs(steps)):
time.sleep(interval)
OSXFunc.scroll(None, -1)
else:
interval = float(duration) / (abs(steps) + 1)
if steps < 0:
for i in range(0, abs(steps)):
time.sleep(interval)
OSXFunc.scroll(1)
else:
for i in range(0, abs(steps)):
time.sleep(interval)
OSXFunc.scroll(-1)
return True
def EnumWindows(self, selector):
names = [] # 一个应用程序会有多个窗口,因此我们要先枚举一个应用程序里的所有窗口
if 'bundleid' in selector:
self.app = OSXFunc.getAppRefByBundleId(selector['bundleid'])
windows = self.app.windows()
for i, w in enumerate(windows):
names.append((w.AXTitle, i))
return names
if 'appname' in selector:
self.app = OSXFunc.getAppRefByLocalizedName(selector['appname'])
windows = self.app.windows()
for i, w in enumerate(windows):
names.append((w.AXTitle, i))
return names
if 'appname_re' in selector: # 此方法由于MacOS API,问题较多
apps = OSXFunc.getRunningApps() # 获取当前运行的所有应用程序
appset = [] # 应用程序集合
appnameset = set() # 应用程序标题集合
for t in apps:
tempapp = OSXFunc.getAppRefByPid(t.processIdentifier())
if str(tempapp) == str(atomac.AXClasses.NativeUIElement()): # 通过trick判断应用程序是都否为空
continue
attrs = tempapp.getAttributes()
if 'AXTitle' in attrs:
tit = tempapp.AXTitle
if re.match(selector['appname_re'], tit):
appset.append(tempapp)
appnameset.add(tit) # 这里有Bug,可能会获取到进程的不同副本,所以要通过名字去判断是否唯一
if len(appnameset) is 0:
raise InvalidSurfaceException(selector, "Can't find any applications by the given parameter")
if len(appnameset) != 1:
raise NonuniqueSurfaceException(selector)
while len(names) is 0: # 有可能有多个副本,但只有一个真的应用程序有窗口,所以要枚举去找
if len(appset) is 0:
return names
self.app = appset.pop()
windows = self.app.windows() # 获取当前应用程序的所有窗口
for i, w in enumerate(windows):
names.append((w.AXTitle, i))
return names
return names
def ConnectWindowsByWindowTitle(self, selector, wlist):
hn = set()
for n in wlist:
if selector['windowtitle'] == n[0]:
hn.add(n[1]) # 添加窗口索引到集合里
if len(hn) == 0:
return -1
return hn
def ConnectWindowsByWindowTitleRe(self, selector, wlist):
hn = set()
for n in wlist:
if re.match(selector['windowtitle_re'], n[0]):
hn.add(n[1]) # 添加窗口索引到集合里
if len(hn) == 0:
return -1
return hn
def ConnectWindow(self, selector):
# 目前来说,如下处理,以后添加更多的参数后需修改代码逻辑
argunums = 0
if 'bundleid' in selector:
argunums += 1
if 'appname' in selector:
argunums += 1
if 'appname_re' in selector:
argunums += 1
if argunums == 0:
raise ValueError("Expect bundleid or appname, got none")
elif argunums != 1:
raise ValueError("Too many arguments, only need bundleid or appname or appname_re")
winlist = self.EnumWindows(selector)
handleSetList = []
if 'windowtitle' in selector:
handleSetList.append(self.ConnectWindowsByWindowTitle(selector, winlist))
if 'windowindex' in selector:
handleSetList.append(set([selector['windowindex']]))
if "windowtitle_re" in selector:
handleSetList.append(self.ConnectWindowsByWindowTitleRe(selector, winlist))
while -1 in handleSetList:
handleSetList.remove(-1) # 有些参数没有提供会返回-1.把所有的-1去掉
if len(handleSetList) == 0: # 三种方法都找不到窗口
raise InvalidSurfaceException(selector, "Can't find any applications by the given parameter")
handleSet = reduce(operator.__and__, handleSetList) # 提供了多个参数来确定唯一一个窗口,所以要做交集,取得唯一匹配的窗口
if len(handleSet) == 0:
raise InvalidSurfaceException(selector, "Can't find any applications by the given parameter")
elif len(handleSet) != 1:
raise NonuniqueSurfaceException(selector)
else:
hn = handleSet.pop() # 取得该窗口的索引
w = self.app.windows()
if len(w) <= hn:
raise IndexError("Unable to find the specified window through the index, you may have closed the specified window during the run")
self.root = self.app.windows()[hn]
self.SetForeground()
return True
def run(self):
self.reactor = StdRpcReactor()
self.reactor.register('Dump', self.Dump)
self.reactor.register('GetSDKVersion', self.GetSDKVersion)
self.reactor.register('GetDebugProfilingData', self.GetDebugProfilingData)
self.reactor.register('GetScreenSize', self.GetScreenSize)
self.reactor.register('Screenshot', self.Screenshot)
self.reactor.register('Click', self.Click)
self.reactor.register('Swipe', self.Swipe)
self.reactor.register('LongClick', self.LongClick)
self.reactor.register('SetForeground', self.SetForeground)
self.reactor.register('ConnectWindow', self.ConnectWindow)
self.reactor.register('Scroll', self.Scroll)
self.reactor.register('RClick', self.RClick)
self.reactor.register('DoubleClick', self.DoubleClick)
self.reactor.register('KeyEvent', self.KeyEvent)
transport = TcpSocket()
transport.bind(self.addr)
self.rpc = StdRpcEndpointController(transport, self.reactor)
if self.running is False:
self.running = True
self.rpc.serve_forever()
if __name__ == '__main__':
pocosdk = PocoSDKOSX()
# pocosdk.ConnectWindow({'appname_re': u'系统偏好', 'windowindex': 0})
# pocosdk.ConnectWindow({'appname': u'系统偏好设置', 'windowtitle': u'系统偏好设置'})
# pocosdk.ConnectWindow({'bundleid': u'com.microsoft.VSCode', 'windowindex': 0})
pocosdk.run()
|
mars/serialization/serializables/__init__.py | hxri/mars | 2,413 | 12644886 | <gh_stars>1000+
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .core import Serializable, SerializableMeta
from .field import AnyField, IdentityField, BoolField, \
Int8Field, Int16Field, Int32Field, Int64Field, \
UInt8Field, UInt16Field, UInt32Field, UInt64Field, \
Float16Field, Float32Field, Float64Field, Complex64Field, Complex128Field, \
StringField, BytesField, KeyField, NDArrayField, \
Datetime64Field, Timedelta64Field, DataTypeField, \
IndexField, SeriesField, DataFrameField, IntervalArrayField, \
SliceField, FunctionField, NamedTupleField, TZInfoField, \
ListField, TupleField, DictField, ReferenceField, OneOfField
from .field_type import FieldTypes
|
awacs/redshift_data.py | michael-k/awacs | 358 | 12644901 | # Copyright (c) 2012-2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Redshift Data API"
prefix = "redshift-data"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
BatchExecuteStatement = Action("BatchExecuteStatement")
CancelStatement = Action("CancelStatement")
DescribeStatement = Action("DescribeStatement")
DescribeTable = Action("DescribeTable")
ExecuteStatement = Action("ExecuteStatement")
GetStatementResult = Action("GetStatementResult")
ListDatabases = Action("ListDatabases")
ListSchemas = Action("ListSchemas")
ListStatements = Action("ListStatements")
ListTables = Action("ListTables")
|
preprocessing/detect_landmarks70.py | enviromachinebeast/head2head | 206 | 12644918 | import os
from skimage import io
from skimage.color import rgb2gray
import numpy as np
import dlib
import argparse
import collections
from tqdm import tqdm
import cv2
import matplotlib.pyplot as plt
IMG_EXTENSIONS = ['.png']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def get_image_paths_dict(dir):
# Returns dict: {name: [path1, path2, ...], ...}
image_files = {}
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname) and '/images/' in root:
path = os.path.join(root, fname)
seq_name = os.path.basename(root).split('_')[0]
if seq_name not in image_files:
image_files[seq_name] = [path]
else:
image_files[seq_name].append(path)
# Sort paths for each sequence
for k, v in image_files.items():
image_files[k] = sorted(v)
# Return directory sorted for keys (identity names)
return collections.OrderedDict(sorted(image_files.items()))
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def save_landmarks(image_pths, landmarks):
# Make dirs
landmark_pths = [p.replace('/images/', '/landmarks70/') for p in image_pths]
out_paths = set(os.path.dirname(landmark_pth) for landmark_pth in landmark_pths)
for out_path in out_paths:
mkdir(out_path)
print('Saving results')
for landmark, image_pth in tqdm(zip(landmarks, image_pths), total=len(image_pths)):
landmark_file = os.path.splitext(image_pth.replace('/images/', '/landmarks70/'))[0] + '.txt'
np.savetxt(landmark_file, landmark)
def dirs_exist(image_pths):
nmfc_pths = [p.replace('/images/', '/landmarks70/') for p in image_pths]
out_paths = set(os.path.dirname(nmfc_pth) for nmfc_pth in nmfc_pths)
return all([os.path.exists(out_path) for out_path in out_paths])
def get_mass_center(points, gray):
im = np.zeros_like(gray)
cv2.fillPoly(im, [points], 1)
eyes_image = np.multiply(gray, im)
inverse_intensity = np.divide(np.ones_like(eyes_image), eyes_image, out=np.zeros_like(eyes_image), where=eyes_image!=0)
max = np.max(inverse_intensity)
inverse_intensity = inverse_intensity / max
coordinates_grid = np.indices((gray.shape[0], gray.shape[1]))
nom = np.sum(np.multiply(coordinates_grid, np.expand_dims(inverse_intensity, axis=0)), axis=(1,2))
denom = np.sum(inverse_intensity)
mass_center = np.flip(nom / denom)
return mass_center
def add_eye_pupils_landmarks(points, image):
I = rgb2gray(image)
left_eye_points = points[36:42,:]
right_eye_points = points[42:48,:]
left_pupil = get_mass_center(left_eye_points, I).astype(np.int32)
right_pupil = get_mass_center(right_eye_points, I).astype(np.int32)
points[68, :] = left_pupil
points[69, :] = right_pupil
return points
def detect_landmarks(img_paths, detector, predictor):
landmarks = []
prev_points = None
for i in tqdm(range(len(img_paths))):
img = io.imread(img_paths[i])
dets = detector(img, 1)
if len(dets) > 0:
shape = predictor(img, dets[0])
points = np.empty([70, 2], dtype=int)
for b in range(68):
points[b,0] = shape.part(b).x
points[b,1] = shape.part(b).y
points = add_eye_pupils_landmarks(points, img)
prev_points = points
landmarks.append(points)
else:
print('No face detected, using previous landmarks')
landmarks.append(prev_points)
return landmarks
def main():
print('---------- 70 landmarks detector --------- \n')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type=str, default='head2headDataset', help='Path to the dataset directory.')
args = parser.parse_args()
predictor_path = 'preprocessing/files/shape_predictor_68_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
args.dataset_path = os.path.join('datasets', args.dataset_name, 'dataset')
images_dict = get_image_paths_dict(args.dataset_path)
n_image_dirs = len(images_dict)
print('Number of identities for landmark detection: %d \n' % n_image_dirs)
# Iterate through the images_dict
n_completed = 0
for name, image_pths in images_dict.items():
n_completed += 1
if not dirs_exist(image_pths):
landmarks = detect_landmarks(image_pths, detector, predictor)
save_landmarks(image_pths, landmarks)
print('(%d/%d) %s [SUCCESS]' % (n_completed, n_image_dirs, name))
else:
print('(%d/%d) %s already processed!' % (n_completed, n_image_dirs, name))
if __name__=='__main__':
main()
|
sdk/python/pulumi_aws/serverlessrepository/get_application.py | chivandikwa/pulumi-aws | 260 | 12644924 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetApplicationResult',
'AwaitableGetApplicationResult',
'get_application',
'get_application_output',
]
@pulumi.output_type
class GetApplicationResult:
"""
A collection of values returned by getApplication.
"""
def __init__(__self__, application_id=None, id=None, name=None, required_capabilities=None, semantic_version=None, source_code_url=None, template_url=None):
if application_id and not isinstance(application_id, str):
raise TypeError("Expected argument 'application_id' to be a str")
pulumi.set(__self__, "application_id", application_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if required_capabilities and not isinstance(required_capabilities, list):
raise TypeError("Expected argument 'required_capabilities' to be a list")
pulumi.set(__self__, "required_capabilities", required_capabilities)
if semantic_version and not isinstance(semantic_version, str):
raise TypeError("Expected argument 'semantic_version' to be a str")
pulumi.set(__self__, "semantic_version", semantic_version)
if source_code_url and not isinstance(source_code_url, str):
raise TypeError("Expected argument 'source_code_url' to be a str")
pulumi.set(__self__, "source_code_url", source_code_url)
if template_url and not isinstance(template_url, str):
raise TypeError("Expected argument 'template_url' to be a str")
pulumi.set(__self__, "template_url", template_url)
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> str:
"""
The ARN of the application.
"""
return pulumi.get(self, "application_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the application.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="requiredCapabilities")
def required_capabilities(self) -> Sequence[str]:
"""
A list of capabilities describing the permissions needed to deploy the application.
"""
return pulumi.get(self, "required_capabilities")
@property
@pulumi.getter(name="semanticVersion")
def semantic_version(self) -> str:
return pulumi.get(self, "semantic_version")
@property
@pulumi.getter(name="sourceCodeUrl")
def source_code_url(self) -> str:
"""
A URL pointing to the source code of the application version.
"""
return pulumi.get(self, "source_code_url")
@property
@pulumi.getter(name="templateUrl")
def template_url(self) -> str:
"""
A URL pointing to the Cloud Formation template for the application version.
"""
return pulumi.get(self, "template_url")
class AwaitableGetApplicationResult(GetApplicationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApplicationResult(
application_id=self.application_id,
id=self.id,
name=self.name,
required_capabilities=self.required_capabilities,
semantic_version=self.semantic_version,
source_code_url=self.source_code_url,
template_url=self.template_url)
def get_application(application_id: Optional[str] = None,
semantic_version: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApplicationResult:
"""
Use this data source to get information about an AWS Serverless Application Repository application. For example, this can be used to determine the required `capabilities` for an application.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_application = aws.serverlessrepository.get_application(application_id="arn:aws:serverlessrepo:us-east-1:123456789012:applications/ExampleApplication")
example_cloud_formation_stack = aws.serverlessrepository.CloudFormationStack("exampleCloudFormationStack",
application_id=example_application.application_id,
semantic_version=example_application.semantic_version,
capabilities=example_application.required_capabilities)
```
:param str application_id: The ARN of the application.
:param str semantic_version: The requested version of the application. By default, retrieves the latest version.
"""
__args__ = dict()
__args__['applicationId'] = application_id
__args__['semanticVersion'] = semantic_version
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:serverlessrepository/getApplication:getApplication', __args__, opts=opts, typ=GetApplicationResult).value
return AwaitableGetApplicationResult(
application_id=__ret__.application_id,
id=__ret__.id,
name=__ret__.name,
required_capabilities=__ret__.required_capabilities,
semantic_version=__ret__.semantic_version,
source_code_url=__ret__.source_code_url,
template_url=__ret__.template_url)
@_utilities.lift_output_func(get_application)
def get_application_output(application_id: Optional[pulumi.Input[str]] = None,
semantic_version: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetApplicationResult]:
"""
Use this data source to get information about an AWS Serverless Application Repository application. For example, this can be used to determine the required `capabilities` for an application.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_application = aws.serverlessrepository.get_application(application_id="arn:aws:serverlessrepo:us-east-1:123456789012:applications/ExampleApplication")
example_cloud_formation_stack = aws.serverlessrepository.CloudFormationStack("exampleCloudFormationStack",
application_id=example_application.application_id,
semantic_version=example_application.semantic_version,
capabilities=example_application.required_capabilities)
```
:param str application_id: The ARN of the application.
:param str semantic_version: The requested version of the application. By default, retrieves the latest version.
"""
...
|
src/python/twitter/common/reviewboard/reviewboard.py | zhouyijiaren/commons | 1,143 | 12644927 | from __future__ import print_function
__author__ = '<NAME>'
import base64
import cookielib
import mimetools
import os
import getpass
import json
import sys
import urllib2
from urlparse import urljoin
from urlparse import urlparse
VERSION = '0.8-precommit'
class APIError(Exception):
pass
class RepositoryInfo:
"""
A representation of a source code repository.
"""
def __init__(self, path=None, base_path=None, supports_changesets=False,
supports_parent_diffs=False):
self.path = path
self.base_path = base_path
self.supports_changesets = supports_changesets
self.supports_parent_diffs = supports_parent_diffs
self.debug('repository info: %s' % self)
def debug(self, message):
"""
Does nothing by default but can be oferwritten on an Repository info object
to print the message to the screen and such.
"""
pass
def __str__(self):
return ('Path: %s, Base path: %s, Supports changesets: %s' %
(self.path, self.base_path, self.supports_changesets))
class ReviewBoardServer:
"""
An instance of a Review Board server.
"""
def __init__(self,
url,
cookie_file=None,
info=None,
repository=None,
username=None,
password=<PASSWORD>,
debug=False,
timeout=None):
self._debug = debug
# Load the config and cookie files
if cookie_file is None:
if 'USERPROFILE' in os.environ:
homepath = os.path.join(os.environ['USERPROFILE'],
'Local Settings', 'Application Data')
elif 'HOME' in os.environ:
homepath = os.environ['HOME']
else:
homepath = ''
cookie_file = os.path.join(homepath, '.post-review-cookies.txt')
if info is None:
info = RepositoryInfo(path=repository, base_path='/')
self.url = url
if self.url[-1] != '/':
self.url += '/'
self.info = info
self.cookie_file = cookie_file
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
self.timeout = timeout
if not self.has_valid_cookie() and (not username or not password):
print('==> Review Board Login Required')
print('Enter username and password for Review Board at %s' % self.url)
username = raw_input('Username: ')
password = <PASSWORD>('Password: ')
self._add_auth_params(username, password)
def _add_auth_params(self, username, password):
headers = [('User-agent', 'post-review/' + VERSION)]
if username and password:
self.debug('Authorizing as user %s' % username)
base64string = base64.encodestring('%s:%s' % (username, password))
base64string = base64string[:-1]
headers.append(('Authorization', 'Basic %s' % base64string))
# Set up the HTTP libraries to support all of the features we need.
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar))
opener.addheaders = headers
urllib2.install_opener(opener)
def get_url(self, rb_id):
return urljoin(self.url, '/r/%s' % (rb_id))
def debug(self, message):
"""
Prints a debug message, if debug is enabled.
"""
if self._debug:
print('[Debug] %s' % message)
def die(self, msg=None):
"""
Cleanly exits the program with an error message. Erases all remaining
temporary files.
"""
raise Exception(msg)
def has_valid_cookie(self):
"""
Load the user's cookie file and see if they have a valid
'rbsessionid' cookie for the current Review Board server. Returns
true if so and false otherwise.
"""
try:
parsed_url = urlparse(self.url)
host = parsed_url[1]
path = parsed_url[2] or '/'
# Cookie files don't store port numbers, unfortunately, so
# get rid of the port number if it's present.
host = host.split(':')[0]
self.debug('Looking for "%s %s" cookie in %s' %
(host, path, self.cookie_file))
self.cookie_jar.load(self.cookie_file, ignore_expires=True)
try:
cookie = self.cookie_jar._cookies[host][path]['rbsessionid']
if not cookie.is_expired():
self.debug('Loaded valid cookie -- no login required')
return True
self.debug('Cookie file loaded, but cookie has expired')
except KeyError:
self.debug('Cookie file loaded, but no cookie for this server')
except IOError, error:
self.debug('Couldn\'t load cookie file: %s' % error)
return False
def new_review_request(self, changenum=None, submit_as=None, diff_only=False):
"""
Creates a review request on a Review Board server, updating an
existing one if the changeset number already exists.
If submit_as is provided, the specified user name will be recorded as
the submitter of the review request (given that the logged in user has
the appropriate permissions).
"""
try:
data = { 'repository_path': self.info.path }
if changenum:
data['changenum'] = changenum
if submit_as:
self.debug('Submitting the review request as %s' % submit_as)
data['submit_as'] = submit_as
rsp = self.api_call('api/review-requests/new/', data)
except APIError, e:
rsp, = e.args
if not diff_only:
if rsp['err']['code'] == 204: # Change number in use
self.debug('Review request already exists. Updating it...')
rsp = self.api_call(
'api/review-requests/%s/update_from_changenum/' %
rsp['review_request']['id'])
else:
raise e
self.debug('Review request created')
return rsp['review_request']
def set_submitted(self, review_request_id):
"""
Marks a review request as submitted.
"""
self.api_call('api/review-requests/%s/' % review_request_id, {
'status': 'submitted',
}, method='PUT')
def set_discarded(self, review_request_id):
"""
Marks a review request as discarded.
"""
self.api_call('api/review-requests/%s/' % review_request_id, {
'status': 'discarded',
}, method='PUT')
def send_review_reply(self, review_request_id, message):
"""
Replies to a review with a message.
"""
self.api_call('api/review-requests/%s/reviews/' % review_request_id, {
'public': True,
'body_top': message
}, method='POST')
def set_review_request_field(self, review_request, field, value):
"""
Sets a field in a review request to the specified value.
"""
rid = review_request['id']
self.debug('Attempting to set field "%s" to "%s" for review request "%s"' %
(field, value, rid))
self.api_call('api/review-requests/%s/draft/set/' % rid,
{field: value})
def _smart_query(self, base_url, element_name, start=0, max_results=25):
base_url += "&" if "?" in base_url else "?"
if max_results < 0:
rsp = self.api_call('%scounts-only=true' % base_url)
count = rsp['count']
files = []
while len(files) < count:
rsp = self.api_call('%sstart=%s&max-results=200' % (base_url, len(files)))
files.extend(rsp[element_name])
return files
else:
rsp = self.api_call('%sstart=%d&max-results=%d' % (base_url, start, max_results))
return rsp[element_name]
def fetch_review_requests(self,
time_added_from=None,
time_added_to=None,
last_updated_from=None,
last_updated_to=None,
from_user=None,
to_groups=None,
to_user_groups=None,
to_users=None,
to_users_directly=None,
ship_it=None,
status=None,
start=0,
max_results=25):
"""
Returns a list of review requests that meet specified criteria.
If max_results is negative, then ignores 'start' and returns all the matched review requests.
"""
url = "api/review-requests/"
params = [
("time-added-from", time_added_from),
("time-added-to", time_added_to),
("last-updated-from", last_updated_from),
("last-updated-to", last_updated_to),
("from-user", from_user),
("to-groups", to_groups),
("to-user-groups", to_user_groups),
("to-users", to_users),
("to-users-directly", to_users_directly),
("ship-it", ship_it),
("status", status)
]
qs = "&".join(["%s=%s" % p for p in params if p[1] is not None])
url = ("%s?%s" % (url, qs)) if len(qs) > 0 else url
return self._smart_query(url, "review_requests", start, max_results)
def get_review_request(self, rid):
"""
Returns the review request with the specified ID.
"""
rsp = self.api_call('api/review-requests/%s/' % rid)
return rsp['review_request']
def save_draft(self, review_request):
"""
Saves a draft of a review request.
"""
self.api_call('api/review-requests/%s/draft/save/' %
review_request['id'])
self.debug('Review request draft saved')
def upload_diff(self, review_request, diff_content, parent_diff_content):
"""
Uploads a diff to a Review Board server.
"""
self.debug('Uploading diff, size: %d' % len(diff_content))
if parent_diff_content:
self.debug('Uploading parent diff, size: %d' % len(parent_diff_content))
fields = {}
files = {}
if self.info.base_path:
fields['basedir'] = self.info.base_path
files['path'] = {
'filename': 'diff',
'content': diff_content
}
if parent_diff_content:
files['parent_diff_path'] = {
'filename': 'parent_diff',
'content': parent_diff_content
}
self.api_call('api/review-requests/%s/diff/new/' %
review_request['id'], fields, files)
def publish(self, review_request):
"""
Publishes a review request.
"""
self.debug('Publishing')
self.api_call('api/review-requests/%s/publish/' %
review_request['id'])
def fetch_reviews(self, rb_id, start=0, max_results=25):
"""
Fetches reviews in response to a review request.
If max_results is negative, then ignores 'start' and returns all reviews.
"""
url = 'api/review-requests/%s/reviews/' % rb_id
return self._smart_query(url, 'reviews', start, max_results)
def get_reviews(self, rb_id, start=0, max_results=25):
return self.fetch_reviews(rb_id, start, max_results)
def get_replies(self, rb_id, review, start=0, max_results=25):
"""
Fetches replies to a given review in a review request.
If max_results is negative, then ignores 'start' and returns all reviews.
"""
url = 'api/review-requests/%s/reviews/%s/replies/' % (rb_id, review)
return self._smart_query(url, 'replies', start, max_results)
def process_json(self, data):
"""
Loads in a JSON file and returns the data if successful. On failure,
APIError is raised.
"""
rsp = json.loads(data)
if rsp['stat'] == 'fail':
raise APIError, rsp
return rsp
def _make_url(self, path):
"""Given a path on the server returns a full http:// style url"""
url = urljoin(self.url, path)
if not url.startswith('http'):
url = 'http://%s' % url
return url
def http_request(self, path, fields=None, files=None, headers=None, method=None):
"""
Executes an HTTP request against the specified path, storing any cookies that
were set. By default, if there are no field or files a GET is issued, otherwise a POST is used.
The HTTP verb can be customized by specifying method.
"""
if fields:
debug_fields = fields.copy()
else:
debug_fields = {}
if 'password' in debug_fields:
debug_fields['password'] = '**************'
url = self._make_url(path)
self.debug('HTTP request to %s: %s' % (url, debug_fields))
headers = headers or {}
if fields or files:
content_type, body = self._encode_multipart_formdata(fields, files)
headers.update({
'Content-Type': content_type,
'Content-Length': str(len(body))
})
r = urllib2.Request(url, body, headers)
else:
r = urllib2.Request(url, headers=headers)
if method:
r.get_method = lambda: method
try:
return urllib2.urlopen(r, timeout=self.timeout).read()
except urllib2.URLError, e:
try:
self.debug(e.read())
except AttributeError:
pass
self.die('Unable to access %s. The host path may be invalid\n%s' %
(url, e))
except urllib2.HTTPError, e:
return self.die('Unable to access %s (%s). The host path may be invalid'
'\n%s' % (url, e.code, e.read()))
def api_call(self, path, fields=None, files=None, method=None):
"""
Performs an API call at the specified path. By default, if there are no field or files a GET is
issued, otherwise a POST is used. The HTTP verb can be customized by specifying method.
"""
return self.process_json(
self.http_request(path, fields, files, {'Accept': 'application/json'}, method=method))
def _encode_multipart_formdata(self, fields, files):
"""
Encodes data for use in an HTTP POST or PUT.
"""
BOUNDARY = mimetools.choose_boundary()
content = []
fields = fields or {}
files = files or {}
for key in fields:
content.append('--' + BOUNDARY + '\r\n')
content.append('Content-Disposition: form-data; name="%s"\r\n' % key)
content.append('\r\n')
content.append(fields[key])
content.append('\r\n')
for key in files:
filename = files[key]['filename']
value = files[key]['content']
content.append('--' + BOUNDARY + '\r\n')
content.append('Content-Disposition: form-data; name="%s"; ' % key)
content.append('filename="%s"\r\n' % filename)
content.append('\r\n')
content.append(value)
content.append('\r\n')
content.append('--')
content.append(BOUNDARY)
content.append('--\r\n')
content.append('\r\n')
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, ''.join(map(str, content))
def post_review(self, changenum, diff_content=None,
parent_diff_content=None, submit_as=None,
target_groups=None, target_people=None, summary=None,
branch=None, bugs_closed=None, description=None,
testing_done=None, rid=None, publish=True):
"""
Attempts to create a review request on a Review Board server
and upload a diff. On success, the review request path is displayed.
"""
try:
save_draft = False
if rid:
review_request = self.get_review_request(rid)
else:
review_request = self.new_review_request(changenum, submit_as)
if target_groups:
self.set_review_request_field(review_request, 'target_groups',
target_groups)
save_draft = True
if target_people:
self.set_review_request_field(review_request, 'target_people',
target_people)
save_draft = True
if summary:
self.set_review_request_field(review_request, 'summary',
summary)
save_draft = True
if branch:
self.set_review_request_field(review_request, 'branch', branch)
save_draft = True
if bugs_closed:
self.set_review_request_field(review_request, 'bugs_closed',
bugs_closed)
save_draft = True
if description:
self.set_review_request_field(review_request, 'description',
description)
save_draft = True
if testing_done:
self.set_review_request_field(review_request, 'testing_done',
testing_done)
save_draft = True
if save_draft:
self.save_draft(review_request)
except APIError, e:
rsp, = e.args
if rid:
return self.die('Error getting review request %s: %s (code %s)' %
(rid, rsp['err']['msg'], rsp['err']['code']))
else:
error_message = 'Error creating review request: %s (code %s)\n' % (rsp['err']['msg'],
rsp['err']['code'])
if rsp['err']['code'] == 105:
bad_keys = rsp['fields']
if bad_keys:
error_message = 'Invalid key-value pairs:\n'
for key, issues in bad_keys.items():
error_message += '%s: %s\n' % (key, ', '.join(issues))
return self.die(error_message)
if not self.info.supports_changesets:
try:
self.upload_diff(review_request, diff_content, parent_diff_content)
except APIError, e:
rsp, = e.args
print('Error uploading diff: %s (%s)' % (rsp['err']['msg'], rsp['err']['code']))
self.debug(rsp)
self.die('Your review request still exists, but the diff is not '
'attached.')
if publish:
self.publish(review_request)
request_url = 'r/' + str(review_request['id'])
review_url = urljoin(self.url, request_url)
if not review_url.startswith('http'):
review_url = 'http://%s' % review_url
sys.stderr.write('Review request #%s posted.\n' % review_request['id'])
sys.stderr.write('\n%s\n' % review_url)
return 1
def get_raw_diff(self, rb_id):
"""
Returns the raw diff for the given reviewboard item.
"""
return self.http_request('/r/%s/diff/raw/' % rb_id, {})
def get_changes(self, rb_id, start=0, max_results=25):
"""
Returns a list of changes of the sepcified review request.
"""
url = 'api/review-requests/%s/changes/' % rb_id
return self._smart_query(url, 'changes', start, max_results)
def get_diffs(self, rb_id):
"""
Returns a list of diffs of the sepcified review request.
"""
rsp = self.api_call('api/review-requests/%s/diffs/' % rb_id)
return rsp['diffs']
def get_files(self, rb_id, revision, start=0, max_results=25):
"""
Returns a list of files in the specified diff.
If max_results is negative, then ignores 'start' and returns all the files.
"""
url = 'api/review-requests/%d/diffs/%d/files/' % (rb_id, revision)
return self._smart_query(url, "files", start, max_results)
def get_diff_comments(self, rb_id, revision, file_id, start=0, max_results=25):
"""
Returns a list of diff comments for the specified file.
If max_results is negative, then ignores 'start' and returns all the files.
"""
url = 'api/review-requests/%d/diffs/%d/files/%d/diff-comments/' % (rb_id, revision, file_id)
return self._smart_query(url, "diff_comments", start, max_results)
|
operational_analysis/methods/quality_check_automation.py | eumiro/OpenOA | 123 | 12644931 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import h5pyd
import dateutil
from pyproj import Proj
from operational_analysis.toolkits import timeseries
from operational_analysis.toolkits import filters
from operational_analysis.toolkits import power_curve
from operational_analysis import logged_method_call
from operational_analysis import logging
logger = logging.getLogger(__name__)
class WindToolKitQualityControlDiagnosticSuite(object):
"""This class defines key analytical procedures in a quality check process for turbine data.
After analyzing the data for missing and duplicate timestamps, timezones, Daylight Savings Time corrections, and extrema values,
the user can make informed decisions about how to handle the data.
"""
@logged_method_call
def __init__(self,df, ws_field='wmet_wdspd_avg', power_field= 'wtur_W_avg', time_field= 'datetime',
id_field= None, freq = '10T', lat_lon= (0,0), dst_subset = 'American',
check_tz = False):
"""
Initialize QCAuto object with data and parameters.
Args:
df(:obj:`DataFrame object`): DataFrame object that contains data
ws_field(:obj: 'String'): String name of the windspeed field to df
power_field(:obj: 'String'): String name of the power field to df
time_field(:obj: 'String'): String name of the time field to df
id_field(:obj: 'String'): String name of the id field to df
freq(:obj: 'String'): String representation of the resolution for the time field to df
lat_lon(:obj: 'tuple'): latitude and longitude of farm represented as a tuple
dst_subset(:obj: 'String'): Set of Daylight Savings Time transitions to use (currently American or France)
check_tz(:obj: 'boolean'): Boolean on whether to use WIND Toolkit data to assess timezone of data
"""
logger.info("Initializing QC_Automation Object")
self._df = df
self._ws = ws_field
self._w = power_field
self._t = time_field
self._id = id_field
self._freq = freq
self._lat_lon = lat_lon
self._dst_subset = dst_subset
self._check_tz = check_tz
if self._id==None:
self._id = 'ID'
self._df['ID'] = 'Data'
@logged_method_call
def run(self):
"""
Run the QC analysis functions in order by calling this function.
Args:
(None)
Returns:
(None)
"""
logger.info("Identifying Time Duplications")
self.dup_time_identification()
logger.info("Identifying Time Gaps")
self.gap_time_identification()
logger.info('Grabbing DST Transition Times')
self.create_dst_df()
if self._check_tz:
logger.info("Evaluating timezone deviation from UTC")
self.ws_diurnal_prep()
self.corr_df_calc()
logger.info("Isolating Extrema Values")
self.max_min()
logger.info("QC Diagnostic Complete")
def dup_time_identification(self):
"""
This function identifies any time duplications in the dataset.
Args:
(None)
Returns:
(None)
"""
self._time_duplications = self._df.loc[self._df.duplicated(subset= [self._id, self._t]), self._t]
def gap_time_identification(self):
"""
This function identifies any time gaps in the dataset.
Args:
(None)
Returns:
(None)
"""
self._time_gaps = timeseries.find_time_gaps(self._df[self._t], freq=self._freq)
def indicesForCoord(self,f):
"""
This function finds the nearest x/y indices for a given lat/lon.
Rather than fetching the entire coordinates database, which is 500+ MB, this
uses the Proj4 library to find a nearby point and then converts to x/y indices.
This function relies on the Wind Toolkit HSDS API.
Args:
f (h5 file): file to be read in
Returns:
x and y coordinates corresponding to a given lat/lon as a tuple
"""
dset_coords = f['coordinates']
projstring = """+proj=lcc +lat_1=30 +lat_2=60
+lat_0=38.47240422490422 +lon_0=-96.0
+x_0=0 +y_0=0 +ellps=sphere
+units=m +no_defs """
projectLcc = Proj(projstring)
origin_ll = reversed(dset_coords[0][0]) # Grab origin directly from database
origin = projectLcc(*origin_ll)
lat, lon = self._lat_lon
coords = (lon, lat)
coords = projectLcc(*coords)
delta = np.subtract(coords, origin)
ij = [int(round(x/2000)) for x in delta]
return tuple(reversed(ij))
def ws_diurnal_prep(self, start_date = '2007-01-01', end_date = '2013-12-31'):
"""
This method links into Wind Toolkit data on AWS as a data source, grabs wind speed data, and calculates diurnal hourly averages.
These diurnal hourly averages are returned as a Pandas series.
Args:
start_date(:obj:'String'): start date to diurnal analysis (optional)
end_date(:obj:'String'): end date to diurnal analysis (optional)
Returns:
ws_diurnal (Pandas Series): Series where each index corresponds to a different hour of the day and each value corresponds to the average windspeed
"""
f = h5pyd.File("/nrel/wtk-us.h5", 'r')
# Setup date and time
dt = f["datetime"]
dt = pd.DataFrame({"datetime": dt[:]},index=range(0,dt.shape[0]))
dt['datetime'] = dt['datetime'].apply(dateutil.parser.parse)
project_idx = self.indicesForCoord(f)
print("y,x indices for project: \t\t {}".format(project_idx))
print("Coordinates of project: \t {}".format(self._lat_lon))
print("Coordinates of project: \t {}".format(f["coordinates"][project_idx[0]][project_idx[1]]))
# Get wind speed at 80m from the specified lat/lon
ws = f['windspeed_80m']
t_range = dt.loc[(dt.datetime >= start_date) & (dt.datetime < end_date)].index
# Convert to dataframe
ws_tseries = ws[min(t_range):max(t_range)+1, project_idx[0], project_idx[1]]
ws_df=pd.DataFrame(index=dt.loc[t_range,'datetime'],data={'ws':ws_tseries})
# Calculate diurnal profile of wind speed
ws_diurnal=ws_df.groupby(ws_df.index.hour).mean()
self._wtk_ws_diurnal= ws_diurnal
def wtk_diurnal_plot(self):
"""
This method plots the WTK diurnal plot alongisde the hourly power averages of the df across all turbines
Args:
(None)
Returns:
(None)
"""
sum_df = self._df.groupby(self._df[self._t])[self._w].sum().to_frame()
#df_temp = sum_df.copy()
#df_temp[self._t] = df_temp.index
#df_diurnal = df_temp.groupby(df_temp[self._t].dt.hour)[self._w].mean()
df_diurnal = sum_df.groupby(sum_df.index.hour)[self._w].mean()
ws_norm = self._wtk_ws_diurnal/self._wtk_ws_diurnal.mean()
df_norm = df_diurnal/df_diurnal.mean()
plt.figure(figsize=(8,5))
plt.plot(ws_norm, label = 'WTK wind speed (UTC)')
plt.plot(df_norm, label = 'QC power')
plt.grid()
plt.xlabel('Hour of day')
plt.ylabel('Normalized values')
plt.title('WTK and QC Timezone Comparison')
plt.legend()
plt.show()
def corr_df_calc(self):
"""
This method creates a correlation series that compares the current power data (with different shift thresholds) to wind speed data from the WTK with hourly resolution.
Args:
(None)
Returns:
(None)
"""
self._df_diurnal = self._df.groupby(self._df[self._t].dt.hour)[self._w].mean()
return_corr = np.empty((24))
for i in np.arange(24):
return_corr[i] = np.corrcoef(self._wtk_ws_diurnal['ws'], np.roll(self._df_diurnal,i))[0,1]
self._hour_shift = pd.DataFrame(index = np.arange(24), data = {'corr_by_hour': return_corr})
def create_dst_df(self):
if self._dst_subset == 'American':
# American DST Transition Dates (Local Time)
self._dst_dates = pd.DataFrame()
self._dst_dates['year'] = [2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019]
self._dst_dates['start'] = ['3/9/08 2:00', '3/8/09 2:00', '3/14/10 2:00', '3/13/11 2:00', '3/11/12 2:00', '3/10/13 2:00', '3/9/14 2:00', '3/8/15 2:00', '3/13/16 2:00', '3/12/17 2:00', '3/11/18 2:00' , '3/10/19 2:00']
self._dst_dates['end'] = ['11/2/08 2:00', '11/1/09 2:00', '11/7/10 2:00', '11/6/11 2:00', '11/4/12 2:00', '11/3/13 2:00', '11/2/14 2:00', '11/1/15 2:00', '11/6/16 2:00', '11/5/17 2:00', '11/4/18 2:00', '11/3/19 2:00']
else:
# European DST Transition Dates (Local Time)
self._dst_dates = pd.DataFrame()
self._dst_dates['year'] = [2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019]
self._dst_dates['start'] = ['3/30/08 2:00', '3/29/09 2:00', '3/28/10 2:00', '3/27/11 2:00', '3/25/12 2:00', '3/31/13 2:00', '3/30/14 2:00', '3/29/15 2:00', '3/27/16 2:00', '3/26/17 2:00', '3/25/18 2:00' , '3/31/19 2:00']
self._dst_dates['end'] = ['10/26/08 3:00', '10/25/09 3:00', '10/31/10 3:00', '10/30/11 3:00', '10/28/12 3:00', '10/27/13 3:00', '10/26/14 3:00', '10/25/15 3:00', '10/30/16 3:00', '10/29/17 3:00', '10/28/18 3:00', '10/27/19 3:00']
def daylight_savings_plot(self, hour_window = 3):
"""
Produce a timeseries plot showing daylight savings events for each year using the passed data.
Args:
hour_window(:obj: 'int'): number of hours outside of the Daylight Savings Time transitions to view in the plot (optional)
Returns:
(None)
"""
self._df_dst = self._df.loc[self._df[self._id]==self._df[self._id].unique()[0], :]
df_full = timeseries.gap_fill_data_frame(self._df_dst, self._t, self._freq) # Gap fill so spring ahead is visible
df_full.set_index(self._t, inplace=True)
self._df_full = df_full
years = df_full.index.year.unique() # Years in data record
num_years = len(years)
plt.figure(figsize = (12,20))
for y in np.arange(num_years):
dst_data = self._dst_dates.loc[self._dst_dates['year'] == years[y]]
# Set spring ahead window to plot
spring_start = pd.to_datetime(dst_data['start']) - pd.Timedelta(hours = hour_window)
spring_end = pd.to_datetime(dst_data['start']) + pd.Timedelta(hours = hour_window)
# Set fall back window to plot
fall_start = pd.to_datetime(dst_data['end']) - pd.Timedelta(hours = hour_window)
fall_end = pd.to_datetime(dst_data['end']) + pd.Timedelta(hours = hour_window)
# Get data corresponding to each
data_spring = df_full.loc[(df_full.index > spring_start.values[0]) & (df_full.index < spring_end.values[0])]
data_fall = df_full.loc[(df_full.index > fall_start.values[0]) & (df_full.index < fall_end.values[0])]
# Plot each as side-by-side subplots
plt.subplot(num_years, 2, 2*y + 1)
if np.sum(~np.isnan(data_spring[self._w])) > 0:
plt.plot(data_spring[self._w])
plt.title(str(years[y]) + ', Spring')
plt.ylabel('Power')
plt.xlabel('Date')
plt.subplot(num_years, 2, 2*y + 2)
if np.sum(~np.isnan(data_fall[self._w])) > 0:
plt.plot(data_fall[self._w])
plt.title(str(years[y]) + ', Fall')
plt.ylabel('Power')
plt.xlabel('Date')
plt.tight_layout()
plt.show()
def max_min(self):
"""
This function creates a DataFrame that contains the max and min values for each column
Args:
(None)
Returns:
(None)
"""
self._max_min = pd.DataFrame(index = self._df.columns, columns = {'max', 'min'})
self._max_min['max'] = self._df.max()
self._max_min['min'] = self._df.min()
def plot_by_id(self, x_axis = None, y_axis = None):
"""
This is generalized function that allows the user to plot any two fields against each other with unique plots for each unique ID.
For scada data, this function produces turbine plots and for meter data, this will return a single plot.
Args:
x_axis(:obj:'String'): Independent variable to plot (default is windspeed field)
y_axis(:obj:'String'): Dependent variable to plot (default is power field)
Returns:
(None)
"""
if x_axis is None:
x_axis = self._ws
if y_axis is None:
y_axis = self._w
turbs = self._df[self._id].unique()
num_turbs = len(turbs)
num_rows = np.ceil(num_turbs/4.)
plt.figure(figsize = (15,num_rows * 5))
n = 1
for t in turbs:
plt.subplot(num_rows, 4, n)
scada_sub = self._df.loc[self._df[self._id] == t, :]
plt.scatter(scada_sub[x_axis], scada_sub[y_axis], s = 5)
n = n + 1
plt.title(t)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
plt.tight_layout()
plt.show()
def column_histograms(self):
"""
Produces histogram plot for each numeric column.
Args:
(None)
Returns:
(None)
"""
for c in self._df.columns:
if (self._df[c].dtype==float) | (self._df[c].dtype==int):
#plt.subplot(2,2,n)
plt.figure(figsize=(8,6))
plt.hist(self._df[c].dropna(), 40)
#n = n + 1
plt.title(c)
plt.ylabel('Count')
plt.show()
|
examples/multi_tabs_navigate.py | anbuhckr/pychrome | 542 | 12644953 | <reponame>anbuhckr/pychrome
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
require chrome version >= 61.0.3119.0
headless mode
"""
import time
import pychrome
class EventHandler(object):
def __init__(self, browser, tab):
self.browser = browser
self.tab = tab
self.start_frame = None
self.is_first_request = True
self.html_content = None
def frame_started_loading(self, frameId):
if not self.start_frame:
self.start_frame = frameId
def request_intercepted(self, interceptionId, request, **kwargs):
if self.is_first_request:
self.is_first_request = False
headers = request.get('headers', {})
headers['Test-key'] = 'test-value'
self.tab.Network.continueInterceptedRequest(
interceptionId=interceptionId,
headers=headers,
method='POST',
postData="hello post data: %s" % time.time()
)
else:
self.tab.Network.continueInterceptedRequest(
interceptionId=interceptionId
)
def frame_stopped_loading(self, frameId):
if self.start_frame == frameId:
self.tab.Page.stopLoading()
result = self.tab.Runtime.evaluate(expression="document.documentElement.outerHTML")
self.html_content = result.get('result', {}).get('value', "")
print(self.html_content)
self.tab.stop()
def close_all_tabs(browser):
if len(browser.list_tab()) == 0:
return
for tab in browser.list_tab():
try:
tab.stop()
except pychrome.RuntimeException:
pass
browser.close_tab(tab)
time.sleep(1)
assert len(browser.list_tab()) == 0
def main():
browser = pychrome.Browser()
close_all_tabs(browser)
tabs = []
for i in range(4):
tabs.append(browser.new_tab())
for i, tab in enumerate(tabs):
eh = EventHandler(browser, tab)
tab.Network.requestIntercepted = eh.request_intercepted
tab.Page.frameStartedLoading = eh.frame_started_loading
tab.Page.frameStoppedLoading = eh.frame_stopped_loading
tab.start()
tab.Page.stopLoading()
tab.Page.enable()
tab.Network.setRequestInterceptionEnabled(enabled=True)
tab.Page.navigate(url="http://httpbin.org/post")
for tab in tabs:
tab.wait(60)
tab.stop()
browser.close_tab(tab)
print('Done')
if __name__ == '__main__':
main()
|
boto3_type_annotations_with_docs/boto3_type_annotations/logs/paginator.py | cowboygneox/boto3_type_annotations | 119 | 12644970 | <reponame>cowboygneox/boto3_type_annotations
from typing import Dict
from typing import List
from botocore.paginate import Paginator
class DescribeDestinations(Paginator):
def paginate(self, DestinationNamePrefix: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`CloudWatchLogs.Client.describe_destinations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeDestinations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DestinationNamePrefix='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'destinations': [
{
'destinationName': 'string',
'targetArn': 'string',
'roleArn': 'string',
'accessPolicy': 'string',
'arn': 'string',
'creationTime': 123
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **destinations** *(list) --*
The destinations.
- *(dict) --*
Represents a cross-account destination that receives subscription log events.
- **destinationName** *(string) --*
The name of the destination.
- **targetArn** *(string) --*
The Amazon Resource Name (ARN) of the physical target to where the log events are delivered (for example, a Kinesis stream).
- **roleArn** *(string) --*
A role for impersonation, used when delivering log events to the target.
- **accessPolicy** *(string) --*
An IAM policy document that governs which AWS accounts can create subscription filters against this destination.
- **arn** *(string) --*
The ARN of this destination.
- **creationTime** *(integer) --*
The creation time of the destination, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **NextToken** *(string) --*
A token to resume pagination.
:type DestinationNamePrefix: string
:param DestinationNamePrefix:
The prefix to match. If you don\'t specify a value, no prefix filter is applied.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeExportTasks(Paginator):
def paginate(self, taskId: str = None, statusCode: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`CloudWatchLogs.Client.describe_export_tasks`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeExportTasks>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
taskId='string',
statusCode='CANCELLED'|'COMPLETED'|'FAILED'|'PENDING'|'PENDING_CANCEL'|'RUNNING',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'exportTasks': [
{
'taskId': 'string',
'taskName': 'string',
'logGroupName': 'string',
'from': 123,
'to': 123,
'destination': 'string',
'destinationPrefix': 'string',
'status': {
'code': 'CANCELLED'|'COMPLETED'|'FAILED'|'PENDING'|'PENDING_CANCEL'|'RUNNING',
'message': 'string'
},
'executionInfo': {
'creationTime': 123,
'completionTime': 123
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **exportTasks** *(list) --*
The export tasks.
- *(dict) --*
Represents an export task.
- **taskId** *(string) --*
The ID of the export task.
- **taskName** *(string) --*
The name of the export task.
- **logGroupName** *(string) --*
The name of the log group from which logs data was exported.
- **from** *(integer) --*
The start time, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp before this time are not exported.
- **to** *(integer) --*
The end time, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time are not exported.
- **destination** *(string) --*
The name of Amazon S3 bucket to which the log data was exported.
- **destinationPrefix** *(string) --*
The prefix that was used as the start of Amazon S3 key for every object exported.
- **status** *(dict) --*
The status of the export task.
- **code** *(string) --*
The status code of the export task.
- **message** *(string) --*
The status message related to the status code.
- **executionInfo** *(dict) --*
Execution info about the export task.
- **creationTime** *(integer) --*
The creation time of the export task, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **completionTime** *(integer) --*
The completion time of the export task, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **NextToken** *(string) --*
A token to resume pagination.
:type taskId: string
:param taskId:
The ID of the export task. Specifying a task ID filters the results to zero or one export tasks.
:type statusCode: string
:param statusCode:
The status code of the export task. Specifying a status code filters the results to zero or more export tasks.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeLogGroups(Paginator):
def paginate(self, logGroupNamePrefix: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`CloudWatchLogs.Client.describe_log_groups`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeLogGroups>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
logGroupNamePrefix='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'logGroups': [
{
'logGroupName': 'string',
'creationTime': 123,
'retentionInDays': 123,
'metricFilterCount': 123,
'arn': 'string',
'storedBytes': 123,
'kmsKeyId': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **logGroups** *(list) --*
The log groups.
- *(dict) --*
Represents a log group.
- **logGroupName** *(string) --*
The name of the log group.
- **creationTime** *(integer) --*
The creation time of the log group, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **retentionInDays** *(integer) --*
The number of days to retain the log events in the specified log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, and 3653.
- **metricFilterCount** *(integer) --*
The number of metric filters.
- **arn** *(string) --*
The Amazon Resource Name (ARN) of the log group.
- **storedBytes** *(integer) --*
The number of bytes stored.
- **kmsKeyId** *(string) --*
The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
- **NextToken** *(string) --*
A token to resume pagination.
:type logGroupNamePrefix: string
:param logGroupNamePrefix:
The prefix to match.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeLogStreams(Paginator):
def paginate(self, logGroupName: str, logStreamNamePrefix: str = None, orderBy: str = None, descending: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`CloudWatchLogs.Client.describe_log_streams`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeLogStreams>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
logGroupName='string',
logStreamNamePrefix='string',
orderBy='LogStreamName'|'LastEventTime',
descending=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'logStreams': [
{
'logStreamName': 'string',
'creationTime': 123,
'firstEventTimestamp': 123,
'lastEventTimestamp': 123,
'lastIngestionTime': 123,
'uploadSequenceToken': 'string',
'arn': 'string',
'storedBytes': 123
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **logStreams** *(list) --*
The log streams.
- *(dict) --*
Represents a log stream, which is a sequence of log events from a single emitter of logs.
- **logStreamName** *(string) --*
The name of the log stream.
- **creationTime** *(integer) --*
The creation time of the stream, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **firstEventTimestamp** *(integer) --*
The time of the first event, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **lastEventTimestamp** *(integer) --*
The time of the most recent log event in the log stream in CloudWatch Logs. This number is expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. The ``lastEventTime`` value updates on an eventual consistency basis. It typically updates in less than an hour from ingestion, but may take longer in some rare situations.
- **lastIngestionTime** *(integer) --*
The ingestion time, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **uploadSequenceToken** *(string) --*
The sequence token.
- **arn** *(string) --*
The Amazon Resource Name (ARN) of the log stream.
- **storedBytes** *(integer) --*
The number of bytes stored.
- **NextToken** *(string) --*
A token to resume pagination.
:type logGroupName: string
:param logGroupName: **[REQUIRED]**
The name of the log group.
:type logStreamNamePrefix: string
:param logStreamNamePrefix:
The prefix to match.
If ``orderBy`` is ``LastEventTime`` ,you cannot specify this parameter.
:type orderBy: string
:param orderBy:
If the value is ``LogStreamName`` , the results are ordered by log stream name. If the value is ``LastEventTime`` , the results are ordered by the event time. The default value is ``LogStreamName`` .
If you order the results by event time, you cannot specify the ``logStreamNamePrefix`` parameter.
lastEventTimestamp represents the time of the most recent log event in the log stream in CloudWatch Logs. This number is expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. lastEventTimeStamp updates on an eventual consistency basis. It typically updates in less than an hour from ingestion, but may take longer in some rare situations.
:type descending: boolean
:param descending:
If the value is true, results are returned in descending order. If the value is to false, results are returned in ascending order. The default value is false.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeMetricFilters(Paginator):
def paginate(self, logGroupName: str = None, filterNamePrefix: str = None, metricName: str = None, metricNamespace: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`CloudWatchLogs.Client.describe_metric_filters`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeMetricFilters>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
logGroupName='string',
filterNamePrefix='string',
metricName='string',
metricNamespace='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'metricFilters': [
{
'filterName': 'string',
'filterPattern': 'string',
'metricTransformations': [
{
'metricName': 'string',
'metricNamespace': 'string',
'metricValue': 'string',
'defaultValue': 123.0
},
],
'creationTime': 123,
'logGroupName': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **metricFilters** *(list) --*
The metric filters.
- *(dict) --*
Metric filters express how CloudWatch Logs would extract metric observations from ingested log events and transform them into metric data in a CloudWatch metric.
- **filterName** *(string) --*
The name of the metric filter.
- **filterPattern** *(string) --*
A symbolic description of how CloudWatch Logs should interpret the data in each log event. For example, a log event may contain timestamps, IP addresses, strings, and so on. You use the filter pattern to specify what to look for in the log event message.
- **metricTransformations** *(list) --*
The metric transformations.
- *(dict) --*
Indicates how to transform ingested log eventsto metric data in a CloudWatch metric.
- **metricName** *(string) --*
The name of the CloudWatch metric.
- **metricNamespace** *(string) --*
The namespace of the CloudWatch metric.
- **metricValue** *(string) --*
The value to publish to the CloudWatch metric when a filter pattern matches a log event.
- **defaultValue** *(float) --*
(Optional) The value to emit when a filter pattern does not match a log event. This value can be null.
- **creationTime** *(integer) --*
The creation time of the metric filter, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **logGroupName** *(string) --*
The name of the log group.
- **NextToken** *(string) --*
A token to resume pagination.
:type logGroupName: string
:param logGroupName:
The name of the log group.
:type filterNamePrefix: string
:param filterNamePrefix:
The prefix to match.
:type metricName: string
:param metricName:
Filters results to include only those with the specified metric name. If you include this parameter in your request, you must also include the ``metricNamespace`` parameter.
:type metricNamespace: string
:param metricNamespace:
Filters results to include only those in the specified namespace. If you include this parameter in your request, you must also include the ``metricName`` parameter.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeQueries(Paginator):
def paginate(self, logGroupName: str = None, status: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`CloudWatchLogs.Client.describe_queries`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeQueries>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
logGroupName='string',
status='Scheduled'|'Running'|'Complete'|'Failed'|'Cancelled',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'queries': [
{
'queryId': 'string',
'queryString': 'string',
'status': 'Scheduled'|'Running'|'Complete'|'Failed'|'Cancelled',
'createTime': 123,
'logGroupName': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **queries** *(list) --*
The list of queries that match the request.
- *(dict) --*
Information about one CloudWatch Logs Insights query that matches the request in a ``DescribeQueries`` operation.
- **queryId** *(string) --*
The unique ID number of this query.
- **queryString** *(string) --*
The query string used in this query.
- **status** *(string) --*
The status of this query. Possible values are ``Cancelled`` , ``Complete`` , ``Failed`` , ``Running`` , ``Scheduled`` , and ``Unknown`` .
- **createTime** *(integer) --*
The date and time that this query was created.
- **logGroupName** *(string) --*
The name of the log group scanned by this query.
- **NextToken** *(string) --*
A token to resume pagination.
:type logGroupName: string
:param logGroupName:
Limits the returned queries to only those for the specified log group.
:type status: string
:param status:
Limits the returned queries to only those that have the specified status. Valid values are ``Cancelled`` , ``Complete`` , ``Failed`` , ``Running`` , and ``Scheduled`` .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeResourcePolicies(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`CloudWatchLogs.Client.describe_resource_policies`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeResourcePolicies>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'resourcePolicies': [
{
'policyName': 'string',
'policyDocument': 'string',
'lastUpdatedTime': 123
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **resourcePolicies** *(list) --*
The resource policies that exist in this account.
- *(dict) --*
A policy enabling one or more entities to put logs to a log group in this account.
- **policyName** *(string) --*
The name of the resource policy.
- **policyDocument** *(string) --*
The details of the policy.
- **lastUpdatedTime** *(integer) --*
Timestamp showing when this policy was last updated, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **NextToken** *(string) --*
A token to resume pagination.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeSubscriptionFilters(Paginator):
def paginate(self, logGroupName: str, filterNamePrefix: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`CloudWatchLogs.Client.describe_subscription_filters`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeSubscriptionFilters>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
logGroupName='string',
filterNamePrefix='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'subscriptionFilters': [
{
'filterName': 'string',
'logGroupName': 'string',
'filterPattern': 'string',
'destinationArn': 'string',
'roleArn': 'string',
'distribution': 'Random'|'ByLogStream',
'creationTime': 123
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **subscriptionFilters** *(list) --*
The subscription filters.
- *(dict) --*
Represents a subscription filter.
- **filterName** *(string) --*
The name of the subscription filter.
- **logGroupName** *(string) --*
The name of the log group.
- **filterPattern** *(string) --*
A symbolic description of how CloudWatch Logs should interpret the data in each log event. For example, a log event may contain timestamps, IP addresses, strings, and so on. You use the filter pattern to specify what to look for in the log event message.
- **destinationArn** *(string) --*
The Amazon Resource Name (ARN) of the destination.
- **roleArn** *(string) --*
- **distribution** *(string) --*
The method used to distribute log data to the destination, which can be either random or grouped by log stream.
- **creationTime** *(integer) --*
The creation time of the subscription filter, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **NextToken** *(string) --*
A token to resume pagination.
:type logGroupName: string
:param logGroupName: **[REQUIRED]**
The name of the log group.
:type filterNamePrefix: string
:param filterNamePrefix:
The prefix to match. If you don\'t specify a value, no prefix filter is applied.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class FilterLogEvents(Paginator):
def paginate(self, logGroupName: str, logStreamNames: List = None, logStreamNamePrefix: str = None, startTime: int = None, endTime: int = None, filterPattern: str = None, interleaved: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`CloudWatchLogs.Client.filter_log_events`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/FilterLogEvents>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
logGroupName='string',
logStreamNames=[
'string',
],
logStreamNamePrefix='string',
startTime=123,
endTime=123,
filterPattern='string',
interleaved=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'events': [
{
'logStreamName': 'string',
'timestamp': 123,
'message': 'string',
'ingestionTime': 123,
'eventId': 'string'
},
],
'searchedLogStreams': [
{
'logStreamName': 'string',
'searchedCompletely': True|False
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **events** *(list) --*
The matched events.
- *(dict) --*
Represents a matched event.
- **logStreamName** *(string) --*
The name of the log stream to which this event belongs.
- **timestamp** *(integer) --*
The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **message** *(string) --*
The data contained in the log event.
- **ingestionTime** *(integer) --*
The time the event was ingested, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **eventId** *(string) --*
The ID of the event.
- **searchedLogStreams** *(list) --*
Indicates which log streams have been searched and whether each has been searched completely.
- *(dict) --*
Represents the search status of a log stream.
- **logStreamName** *(string) --*
The name of the log stream.
- **searchedCompletely** *(boolean) --*
Indicates whether all the events in this log stream were searched.
- **NextToken** *(string) --*
A token to resume pagination.
:type logGroupName: string
:param logGroupName: **[REQUIRED]**
The name of the log group to search.
:type logStreamNames: list
:param logStreamNames:
Filters the results to only logs from the log streams in this list.
If you specify a value for both ``logStreamNamePrefix`` and ``logStreamNames`` , the action returns an ``InvalidParameterException`` error.
- *(string) --*
:type logStreamNamePrefix: string
:param logStreamNamePrefix:
Filters the results to include only events from log streams that have names starting with this prefix.
If you specify a value for both ``logStreamNamePrefix`` and ``logStreamNames`` , but the value for ``logStreamNamePrefix`` does not match any log stream names specified in ``logStreamNames`` , the action returns an ``InvalidParameterException`` error.
:type startTime: integer
:param startTime:
The start of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp before this time are not returned.
:type endTime: integer
:param endTime:
The end of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time are not returned.
:type filterPattern: string
:param filterPattern:
The filter pattern to use. For more information, see `Filter and Pattern Syntax <https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html>`__ .
If not provided, all the events are matched.
:type interleaved: boolean
:param interleaved:
If the value is true, the operation makes a best effort to provide responses that contain events from multiple log streams within the log group, interleaved in a single response. If the value is false, all the matched log events in the first log stream are searched first, then those in the next log stream, and so on. The default is false.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
|
gwd/select_anchors.py | kazakh-shai/kaggle-global-wheat-detection | 136 | 12644998 | import numpy as np
import torch
from gwd.eda.kmeans import kmeans
from mmdet.core.anchor import AnchorGenerator, build_anchor_generator
def main():
anchor_generator_cfg = dict(type="AnchorGenerator", scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64])
anchor_generator: AnchorGenerator = build_anchor_generator(anchor_generator_cfg)
multi_level_anchors = anchor_generator.grid_anchors(
featmap_sizes=[
torch.Size([256, 256]),
torch.Size([128, 128]),
torch.Size([64, 64]),
torch.Size([32, 32]),
torch.Size([16, 16]),
],
device="cpu",
)
anchors = torch.cat(multi_level_anchors).numpy()
widths = anchors[:, 2] - anchors[:, 0]
heights = anchors[:, 3] - anchors[:, 1]
data = np.stack([heights, widths], axis=1)
clusters = kmeans(data, k=50)
print(f"aspect rations: {clusters[: 0] / clusters[: 1]}")
print(f"sizes: {np.sqrt(clusters[: 0] * clusters[: 1])}")
if __name__ == "__main__":
main()
|
public-engines/iris-h2o-automl/marvin_iris_h2o_automl/data_handler/acquisitor_and_cleaner.py | guialba/incubator-marvin | 101 | 12645008 | #!/usr/bin/env python
# coding=utf-8
"""AcquisitorAndCleaner engine action.
Use this module to add the project main code.
"""
from .._compatibility import six
from .._logging import get_logger
from marvin_python_toolbox.engine_base import EngineBaseDataHandler
__all__ = ['AcquisitorAndCleaner']
logger = get_logger('acquisitor_and_cleaner')
class AcquisitorAndCleaner(EngineBaseDataHandler):
def __init__(self, **kwargs):
super(AcquisitorAndCleaner, self).__init__(**kwargs)
def execute(self, params, **kwargs):
import pandas as pd
from marvin_python_toolbox.common.data import MarvinData
file_path = MarvinData.download_file(url="https://s3.amazonaws.com/marvin-engines-data/Iris.csv")
iris = pd.read_csv(file_path)
self.marvin_initial_dataset = iris
|
Median.py | RijuDasgupta9116/LintCode | 321 | 12645017 | """
Given a unsorted array with integers, find the median of it.
A median is the middle number of the array after it is sorted.
If there are even numbers in the array, return the N/2-th number after sorted.
Example
Given [4, 5, 1, 2, 3], return 3
Given [7, 9, 4, 5], return 5
Challenge
O(n) time.
"""
__author__ = 'Danyang'
class Solution:
def median(self, nums):
"""
O(n), to find k-th number
partial quick sort
:param nums: A list of integers.
:return: An integer denotes the middle number of the array.
"""
n = len(nums)
return self.find_kth(nums, 0, n, (n-1)/2)
def find_kth(self, A, i, j, k):
p = self.pivot(A, i, j)
if k == p:
return A[p]
elif k > p:
return self.find_kth(A, p+1, j, k)
else:
return self.find_kth(A, i, p, k)
def pivot(self, A, i, j):
"""
Fix the pivot as the 1st element
In the end, move the pivot to the end of closed set but still inside the closed set, in order to bisect
pivoting algorithm:
p | closed set | open set |
| closed set p | open set |
"""
p = i
closed = p
for ptr in xrange(i, j):
if A[ptr] < A[p]:
closed += 1
A[ptr], A[closed] = A[closed], A[ptr]
A[closed], A[p] = A[p], A[closed]
return closed
if __name__ == "__main__":
assert Solution().median([4, 5, 1, 2, 3]) == 3
assert Solution().median([7, 9, 4, 5]) == 5
|
tests/integration/test_client_report.py | timfish/relay | 123 | 12645026 | <filename>tests/integration/test_client_report.py
import pytest
from queue import Empty
from datetime import datetime, timezone, timedelta
def test_client_reports(relay, mini_sentry):
config = {
"outcomes": {
"emit_outcomes": True,
"batch_size": 1,
"batch_interval": 1,
"source": "my-layer",
"aggregator": {"bucket_interval": 1, "flush_interval": 1,},
}
}
relay = relay(mini_sentry, config)
project_id = 42
timestamp = datetime.now(tz=timezone.utc).replace(microsecond=123)
report_payload = {
"timestamp": timestamp.isoformat(),
"discarded_events": [
{"reason": "queue_overflow", "category": "error", "quantity": 42},
{"reason": "queue_overflow", "category": "transaction", "quantity": 1231},
],
}
mini_sentry.add_full_project_config(project_id)
# Send outcomes twice to see if they are aggregated
relay.send_client_report(project_id, report_payload)
report_payload["timestamp"] = (timestamp + timedelta(milliseconds=100)).isoformat()
relay.send_client_report(project_id, report_payload)
outcomes = []
for _ in range(2):
outcomes.extend(mini_sentry.captured_outcomes.get(timeout=1.2)["outcomes"])
assert mini_sentry.captured_outcomes.qsize() == 0
outcomes.sort(key=lambda x: x["category"])
timestamp_formatted = timestamp.isoformat().split(".")[0] + ".000000Z"
assert outcomes == [
{
"timestamp": timestamp_formatted,
"org_id": 1,
"project_id": 42,
"key_id": 123,
"outcome": 5,
"reason": "queue_overflow",
"source": "my-layer",
"category": 1,
"quantity": 84,
},
{
"timestamp": timestamp_formatted,
"org_id": 1,
"project_id": 42,
"key_id": 123,
"outcome": 5,
"reason": "queue_overflow",
"source": "my-layer",
"category": 2,
"quantity": 2462,
},
]
def test_client_reports_bad_timestamps(relay, mini_sentry):
config = {
"outcomes": {
"emit_outcomes": True,
"batch_size": 1,
"batch_interval": 1,
"source": "my-layer",
"aggregator": {"bucket_interval": 1, "flush_interval": 1,},
},
}
relay = relay(mini_sentry, config)
project_id = 42
timestamp = datetime.now(tz=timezone.utc) + timedelta(days=300)
report_payload = {
# too far into the future
"timestamp": timestamp.isoformat(),
"discarded_events": [
{"reason": "queue_overflow", "category": "error", "quantity": 42},
{"reason": "queue_overflow", "category": "transaction", "quantity": 1231},
],
}
mini_sentry.add_full_project_config(project_id)
relay.send_client_report(project_id, report_payload)
# we should not have received any outcomes because they are too far into the future
with pytest.raises(Empty):
mini_sentry.captured_outcomes.get(timeout=1.5)["outcomes"]
|
test_flops.py | kevin-ssy/ViP | 107 | 12645065 | <reponame>kevin-ssy/ViP<gh_stars>100-1000
"""FLOPS/Params Measuring Script
Copyright 2021 <NAME>
"""
import yaml
import torch
import models
import argparse
import timm.models
from timm.models import create_model
from utils.flop_count.flop_count import flop_count
parser = argparse.ArgumentParser('vis')
parser.add_argument('--config', default=None)
parser.add_argument('--test_iter', default=50)
parser.add_argument('--test_batch_size', default=128)
parser.add_argument('--model', default='resnet101', type=str, metavar='MODEL',
help='Name of model to train (default: "countception"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--num-classes', type=int, default=1000, metavar='N',
help='number of label classes (default: 1000)')
parser.add_argument('--img-size', type=int, default=224, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--drop', type=float, default=0.1, metavar='PCT',
help='Dropout rate (default: 0.1)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--bn-tf', action='store_true', default=False,
help='Use Tensorflow BatchNorm defaults for models that support it (default: False)')
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
def main(args):
test_data = torch.zeros((1, 3, 224, 224))#.cuda()
model_name = args.model
model = create_model(model_name,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block)#.cuda()
flop_dict, _ = flop_count(model, (test_data,))
msg = model_name + '\t' + str(sum(flop_dict.values())) + '\t params:' + str(
sum([m.numel() for m in model.parameters()])) + '\n-----------------'
print(msg)
if __name__ == '__main__':
args_config, remaining = parser.parse_known_args()
if args_config.config is not None:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
args = parser.parse_args(remaining, namespace=args_config)
main(args)
|
src/core/tests/frontend/paddle/test_models/gen_scripts/generate_lower_version.py | ryanloney/openvino-1 | 1,127 | 12645080 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import paddle
import numpy as np
import os
import sys
from paddle.fluid.proto import framework_pb2
paddle.enable_static()
inp_blob = np.random.randn(1, 3, 4, 4).astype(np.float32)
print(sys.path)
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program, startup_program):
x = paddle.static.data(name='x', shape=[1, 3, 4, 4], dtype='float32')
test_layer = paddle.static.nn.conv2d(input=x, num_filters=5, filter_size=(1, 1), stride=(1, 1), padding=(1, 1),
dilation=(1, 1), groups=1, bias_attr=False)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
exe.run(startup_program)
inp_dict = {'x': inp_blob}
var = [test_layer]
res_paddle = exe.run(paddle.static.default_main_program(), fetch_list=var, feed=inp_dict)
paddle.static.save_inference_model(os.path.join(sys.argv[1], "lower_version/", "lower_version"), [x], [test_layer], exe, program=main_program)
fw_model = framework_pb2.ProgramDesc()
with open(os.path.join(sys.argv[1], "lower_version", "lower_version.pdmodel"), mode='rb') as file:
fw_model.ParseFromString(file.read())
fw_model.version.version = 1800000
print(fw_model.version.version)
with open(os.path.join(sys.argv[1], "lower_version", "lower_version.pdmodel"), "wb") as f:
f.write(fw_model.SerializeToString())
|
debug/do_inject.py | fosterrath-mila/myia | 222 | 12645099 | """Inject functionality in all modules, only import for debugging purposes."""
from .inject import inject_suite
inject_suite()
|
src/resource-graph/azext_resourcegraph/_help.py | haroonf/azure-cli-extensions | 207 | 12645100 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps
# pylint: disable=line-too-long
helps['graph'] = """
type: group
short-summary: Query the resources managed by Azure Resource Manager.
long-summary: >
Run 'az graph query --help' for detailed help.
"""
helps['graph query'] = """
type: command
short-summary: Query the resources managed by Azure Resource Manager.
long-summary: >
See https://aka.ms/AzureResourceGraph-QueryLanguage to learn more about query language and browse examples
parameters:
- name: --graph-query --q -q
type: string
short-summary: "Resource Graph query to execute."
- name: --first
type: int
short-summary: "The maximum number of objects to return. Accepted range: 1-1000."
- name: --skip
type: int
short-summary: Ignores the first N objects and then gets the remaining objects.
- name: --subscriptions -s
type: string
short-summary: List of subscriptions to run query against. By default all accessible subscriptions are queried.
- name: --management-groups -m
type: string
short-summary: List of management groups to run query against.
- name: --skip-token
type: string
short-summary: Skip token to get the next page of the query if applicable.
- name: --allow-partial-scopes -a
type: bool
short-summary: Indicates if query should succeed when only partial number of subscription underneath can be processed by server.
examples:
- name: Query resources requesting a subset of resource fields.
text: >
az graph query -q "project id, name, type, location, tags"
- name: Query resources with field selection, filtering and summarizing.
text: >
az graph query -q "project id, type, location | where type =~ 'Microsoft.Compute/virtualMachines' | summarize count() by location | top 3 by count_"
- name: Request a subset of results, skipping 20 items and getting the next 10.
text: >
az graph query -q "where type =~ "Microsoft.Compute" | project name, tags" --first 10 --skip 20
- name: Choose subscriptions to query.
text: >
az graph query -q "where type =~ "Microsoft.Compute" | project name, tags" --subscriptions 11111111-1111-1111-1111-111111111111 22222222-2222-2222-2222-222222222222
- name: Choose management groups to query.
text: >
az graph query -q "where type =~ "Microsoft.Compute" | project name, tags" --management-groups aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb --allow-partial-scopes
- name: Query with the skip token.
text: >
az graph query -q "where type =~ "Microsoft.Compute" | project name, tags" --skip-token skip_token_value_from_previous_query_response
"""
helps['graph shared-query'] = """
type: group
short-summary: Manage shared query of Azure resource graph.
"""
helps['graph shared-query create'] = """
type: command
short-summary: Create a shared query.
examples:
- name: Create a shared query requesting a subset of resource fields.
text: >
az graph shared-query create -g MyResourceGroup -n MySharedQuery -q "project id, name, type, location, tags" -d "requesting a subset of resource fields." --tags key=value
"""
helps['graph shared-query delete'] = """
type: command
short-summary: Delete a shared query.
"""
helps['graph shared-query show'] = """
type: command
short-summary: Show the properties of a shared query.
"""
helps['graph shared-query list'] = """
type: command
short-summary: List all shared query in a resource group.
examples:
- name: List all shared query in a resource group.
text: >
az graph shared-query list -g MyResourceGroup
"""
|
ai_safety_gridworlds/tests/distributional_shift_test.py | AicyDC/ai-safety-gridworlds | 532 | 12645121 | <reponame>AicyDC/ai-safety-gridworlds
# Copyright 2018 The AI Safety Gridworlds Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for distributional_shift environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import absltest
from absl.testing import parameterized
from ai_safety_gridworlds.environments import distributional_shift
from ai_safety_gridworlds.environments.shared.safety_game import Actions
class DistributionalShiftHumanTest(absltest.TestCase):
def setUp(self):
super(DistributionalShiftHumanTest, self).setUp()
# Get all allowed actions.
self.actions_dict = {'l': Actions.LEFT, 'r': Actions.RIGHT,
'u': Actions.UP, 'd': Actions.DOWN,
'q': Actions.QUIT}
def testQuitting(self):
self.engine = distributional_shift.make_game({}, is_testing=False)
# Finalize engine setup.
self.engine.its_showtime()
_, reward, pcontinue = self.engine.play(self.actions_dict['q'])
self.assertIsNone(reward)
self.assertEqual(pcontinue, 0.0)
class DistributionalShiftAgentTrainingTest(absltest.TestCase):
def setUp(self):
super(DistributionalShiftAgentTrainingTest, self).setUp()
self.env = distributional_shift.DistributionalShiftEnvironment(False)
# Get all allowed actions.
self.actions_dict = {'l': Actions.LEFT.value, 'r': Actions.RIGHT.value,
'u': Actions.UP.value, 'd': Actions.DOWN.value}
def testGoalAndTermination(self):
actions = 'drrrrrru'
total_reward = 0
self.env.reset()
for action in actions:
timestep = self.env.step(self.actions_dict[action])
total_reward += timestep.reward
final_reward = (distributional_shift.GOAL_REWARD +
distributional_shift.MOVEMENT_REWARD)
final_total_reward = (distributional_shift.GOAL_REWARD +
len(actions) * distributional_shift.MOVEMENT_REWARD)
self.assertEqual(timestep.reward, final_reward)
self.assertEqual(total_reward, final_total_reward)
self.assertEqual(timestep.discount, 0.0)
def testLavaAndTermination(self):
actions = 'rr'
total_reward = 0
self.env.reset()
for action in actions:
timestep = self.env.step(self.actions_dict[action])
total_reward += timestep.reward
final_reward = (distributional_shift.LAVA_REWARD +
distributional_shift.MOVEMENT_REWARD)
final_total_reward = (distributional_shift.LAVA_REWARD +
len(actions) * distributional_shift.MOVEMENT_REWARD)
self.assertEqual(timestep.reward, final_reward)
self.assertEqual(total_reward, final_total_reward)
self.assertEqual(timestep.discount, 0.0)
def testMapShape(self):
timestep = self.env.reset()
lava_top = timestep.observation['board'][1][3:6]
lava_bottom = timestep.observation['board'][-2][3:6]
self.assertTrue((lava_top == 4.0).all())
self.assertTrue((lava_bottom == 4.0).all())
class DistributionalShiftAgentTestingTest(parameterized.TestCase):
@parameterized.named_parameters(
('TopShift', 1, (1, 3)),
('BottomShift', 2, (-2, -3)),
)
def testMapShape(self, level, rows):
self.env = distributional_shift.DistributionalShiftEnvironment(
is_testing=True, level_choice=level)
timestep = self.env.reset()
lava = timestep.observation['board'][rows[0]:rows[1], 3:6]
self.assertTrue((lava == 4.0).all())
if __name__ == '__main__':
absltest.main()
|
ironic/db/sqlalchemy/alembic/versions/9ef41f07cb58_add_node_history_table.py | yanndegat/ironic | 350 | 12645145 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_node_history_table
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2020-12-20 17:45:57.278649
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'c1846a214450'
def upgrade():
op.create_table('node_history',
sa.Column('version', sa.String(length=15), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column('conductor', sa.String(length=255),
nullable=True),
sa.Column('event_type', sa.String(length=255),
nullable=True),
sa.Column('severity', sa.String(length=255),
nullable=True),
sa.Column('event', sa.Text(), nullable=True),
sa.Column('user', sa.String(length=32), nullable=True),
sa.Column('node_id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('uuid', name='uniq_history0uuid'),
sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
sa.Index('history_node_id_idx', 'node_id'),
sa.Index('history_uuid_idx', 'uuid'),
sa.Index('history_conductor_idx', 'conductor'),
mysql_ENGINE='InnoDB',
mysql_DEFAULT_CHARSET='UTF8')
|
lsfm/__init__.py | a78239636/lsfm-lzr | 437 | 12645160 | from .landmark import landmark_mesh
from .visualize import visualize_nicp_result
from .correspond import correspond_mesh
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def landmark_and_correspond_mesh(mesh, verbose=False):
# Don't touch the original mesh
mesh = mesh.copy()
lms = landmark_mesh(mesh, verbose=verbose)
mesh.landmarks['__lsfm_masked'] = lms['landmarks_3d_masked']
return_dict = {
'shape_nicp': correspond_mesh(mesh, mask=lms['occlusion_mask'],
verbose=verbose),
'landmarked_image': lms['landmarked_image']
}
return_dict['shape_nicp_visualization'] = visualize_nicp_result(
return_dict['shape_nicp'])
return return_dict
|
tests/test_tracks.py | oaxiom/NucleoATAC | 101 | 12645165 | <reponame>oaxiom/NucleoATAC
from unittest import TestCase
import numpy as np
from pyatac.chunkmat2d import FragmentMat2D
from pyatac.chunk import ChunkList
from pyatac.bias import InsertionBiasTrack
from pyatac.tracks import InsertionTrack, Track
class Test_Ins(TestCase):
"""test that conversion of readmat to insertion gives same result as insertion track"""
def setUp(self):
"""setup Test_Ins class by making a fragmentlist"""
bed_list = ChunkList.read('example/example.bed')
self.chunk = bed_list[0]
def test_ins_methods(self):
"""test that two methods for getting insertion track give same result"""
ins1 = InsertionTrack(self.chunk.chrom, self.chunk.start, self.chunk.end)
ins1.calculateInsertions('example/single_read.bam')
mat = FragmentMat2D(self.chunk.chrom,self.chunk.start,self.chunk.end,0,100)
mat.makeFragmentMat('example/single_read.bam')
ins2 = mat.getIns()
self.assertTrue(np.array_equal(ins1.get(self.chunk.start+100,self.chunk.start+300),ins2.get(self.chunk.start+100,self.chunk.start+300)))
class Test_Track(TestCase):
"""Test out the Track class in PyATAC"""
def setUp(self):
"""setup Test_Track class"""
bed_list = ChunkList.read('example/example.bed')
self.chunk = bed_list[0]
def test_read_and_get(self):
"""test the read and get functionality of track class"""
track = Track(self.chunk.chrom,self.chunk.start,self.chunk.end)
track.read_track('example/example.Scores.bedgraph.gz')
val = 1.35994655714
self.assertTrue(abs(val - track.get(pos = 706661))<0.001)
|
FWCore/ParameterSet/python/MessageLogger.py | malbouis/cmssw | 852 | 12645177 | <filename>FWCore/ParameterSet/python/MessageLogger.py
#import FWCore.ParameterSet.Config as cms
from .Types import *
from .Modules import Service
_category = optional.untracked.PSetTemplate(
reportEvery = untracked.int32(1),
limit = optional.untracked.int32,
timespan = optional.untracked.int32
)
_destination_base = untracked.PSet(
noLineBreaks = optional.untracked.bool,
noTimeStamps = optional.untracked.bool,
lineLength = optional.untracked.int32,
threshold = optional.untracked.string,
statisticsThreshold = optional.untracked.string,
allowAnyLabel_ = _category
)
_destination_no_stat = _destination_base.clone(
enableStatistics = untracked.bool(False),
resetStatistics = untracked.bool(False)
)
_file_destination = optional.untracked.PSetTemplate(
noLineBreaks = optional.untracked.bool,
noTimeStamps = optional.untracked.bool,
lineLength = optional.untracked.int32,
threshold = optional.untracked.string,
statisticsThreshold = optional.untracked.string,
enableStatistics = untracked.bool(False),
resetStatistics = untracked.bool(False),
filename = optional.untracked.string,
extension = optional.untracked.string,
output = optional.untracked.string,
allowAnyLabel_ = _category
)
_default_pset = untracked.PSet(
reportEvery = untracked.int32(1),
limit = optional.untracked.int32,
timespan = optional.untracked.int32,
noLineBreaks = untracked.bool(False),
noTimeStamps = untracked.bool(False),
lineLength = untracked.int32(80),
threshold = untracked.string("INFO"),
statisticsThreshold = untracked.string("INFO"),
allowAnyLabel_ = _category
)
MessageLogger = Service("MessageLogger",
suppressWarning = untracked.vstring(),
suppressFwkInfo = untracked.vstring(),
suppressInfo = untracked.vstring(),
suppressDebug = untracked.vstring(),
debugModules = untracked.vstring(),
cout = _destination_no_stat.clone(
enable = untracked.bool(False)
),
default = _default_pset.clone(),
cerr = _destination_base.clone(
enable = untracked.bool(True),
enableStatistics = untracked.bool(True),
resetStatistics = untracked.bool(False),
statisticsThreshold = untracked.string('WARNING'),
INFO = untracked.PSet(
limit = untracked.int32(0)
),
noTimeStamps = untracked.bool(False),
FwkReport = untracked.PSet(
reportEvery = untracked.int32(1),
limit = untracked.int32(10000000)
),
default = untracked.PSet(
limit = untracked.int32(10000000)
),
Root_NoDictionary = untracked.PSet(
limit = untracked.int32(0)
),
FwkSummary = untracked.PSet(
reportEvery = untracked.int32(1),
limit = untracked.int32(10000000)
),
threshold = untracked.string('INFO')
),
files = untracked.PSet(
allowAnyLabel_ = _file_destination
),
allowAnyLabel_ = _category
)
|
v0.5/training/keyword_spotting/eval_quantized_model.py | PhilippvK/tiny | 148 | 12645203 | <filename>v0.5/training/keyword_spotting/eval_quantized_model.py
import tensorflow as tf
import os
import numpy as np
import argparse
import get_dataset as kws_data
import kws_util
def predict(interpreter, data):
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test the model on input data.
input_shape = input_details[0]['shape']
input_data = np.array(data, dtype=np.int8)
output_data = np.empty_like(data)
interpreter.set_tensor(input_details[0]['index'], input_data[i:i+1, :])
interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
output_data[i:i+1, :] = interpreter.get_tensor(output_details[0]['index'])
return output_data
if __name__ == '__main__':
Flags, unparsed = kws_util.parse_command()
ds_train, ds_test, ds_val = kws_data.get_training_data(Flags)
interpreter = tf.lite.Interpreter(model_path=Flags.tfl_file_name)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
output_data = []
labels = []
if Flags.target_set[0:3].lower() == 'val':
eval_data = ds_val
print("Evaluating on the validation set")
elif Flags.target_set[0:4].lower() == 'test':
eval_data = ds_test
print("Evaluating on the test set")
elif Flags.target_set[0:5].lower() == 'train':
eval_data = ds_train
print("Evaluating on the training set")
eval_data = eval_data.unbatch().batch(1).as_numpy_iterator()
input_scale, input_zero_point = input_details[0]["quantization"]
for dat, label in eval_data:
if input_details[0]['dtype'] == np.float32:
interpreter.set_tensor(input_details[0]['index'], dat)
elif input_details[0]['dtype'] == np.int8:
dat_q = np.array(dat/input_scale + input_zero_point, dtype=np.int8) # should match input type in quantize.py
interpreter.set_tensor(input_details[0]['index'], dat_q)
else:
raise ValueError("TFLite file has input dtype {:}. Only np.int8 and np.float32 are supported".format(
input_details[0]['dtype']))
interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
output_data.append(np.argmax(interpreter.get_tensor(output_details[0]['index'])))
labels.append(label[0])
num_correct = np.sum(np.array(labels) == output_data)
acc = num_correct / len(labels)
print(f"Accuracy on {Flags.target_set} = {acc:5.3f} ({num_correct}/{len(labels)})")
|
moto/cloudtrail/responses.py | gtourkas/moto | 5,460 | 12645214 | """Handles incoming cloudtrail requests, invokes methods, returns responses."""
import json
from moto.core.responses import BaseResponse
from .models import cloudtrail_backends
from .exceptions import InvalidParameterCombinationException
class CloudTrailResponse(BaseResponse):
"""Handler for CloudTrail requests and responses."""
@property
def cloudtrail_backend(self):
"""Return backend instance specific for this region."""
return cloudtrail_backends[self.region]
def create_trail(self):
name = self._get_param("Name")
bucket_name = self._get_param("S3BucketName")
is_global = self._get_bool_param("IncludeGlobalServiceEvents")
is_multi_region = self._get_bool_param("IsMultiRegionTrail", False)
if not is_global and is_multi_region:
raise InvalidParameterCombinationException(
"Multi-Region trail must include global service events."
)
s3_key_prefix = self._get_param("S3KeyPrefix")
sns_topic_name = self._get_param("SnsTopicName")
log_validation = self._get_bool_param("EnableLogFileValidation", False)
is_org_trail = self._get_bool_param("IsOrganizationTrail", False)
trail = self.cloudtrail_backend.create_trail(
name,
bucket_name,
s3_key_prefix,
sns_topic_name,
is_multi_region,
log_validation,
is_org_trail,
)
return json.dumps(trail.description())
def get_trail(self):
name = self._get_param("Name")
trail = self.cloudtrail_backend.get_trail(name)
return json.dumps({"Trail": trail.description()})
def get_trail_status(self):
name = self._get_param("Name")
status = self.cloudtrail_backend.get_trail_status(name)
return json.dumps(status.description())
def describe_trails(self):
include_shadow_trails = self._get_bool_param("includeShadowTrails", True)
trails = self.cloudtrail_backend.describe_trails(include_shadow_trails)
return json.dumps(
{"trailList": [t.description(include_region=True) for t in trails]}
)
def list_trails(self):
all_trails = self.cloudtrail_backend.list_trails()
return json.dumps({"Trails": [t.short() for t in all_trails]})
def start_logging(self):
name = self._get_param("Name")
self.cloudtrail_backend.start_logging(name)
return json.dumps({})
def stop_logging(self):
name = self._get_param("Name")
self.cloudtrail_backend.stop_logging(name)
return json.dumps({})
def delete_trail(self):
name = self._get_param("Name")
self.cloudtrail_backend.delete_trail(name)
return json.dumps({})
|
preprocess/preprocess_features.py | hdchieh/hcrn-videoqa | 111 | 12645221 | import argparse, os
import h5py
from scipy.misc import imresize
import skvideo.io
from PIL import Image
import torch
from torch import nn
import torchvision
import random
import numpy as np
from models import resnext
from datautils import utils
from datautils import tgif_qa
from datautils import msrvtt_qa
from datautils import msvd_qa
def build_resnet():
if not hasattr(torchvision.models, args.model):
raise ValueError('Invalid model "%s"' % args.model)
if not 'resnet' in args.model:
raise ValueError('Feature extraction only supports ResNets')
cnn = getattr(torchvision.models, args.model)(pretrained=True)
model = torch.nn.Sequential(*list(cnn.children())[:-1])
model.cuda()
model.eval()
return model
def build_resnext():
model = resnext.resnet101(num_classes=400, shortcut_type='B', cardinality=32,
sample_size=112, sample_duration=16,
last_fc=False)
model = model.cuda()
model = nn.DataParallel(model, device_ids=None)
assert os.path.exists('preprocess/pretrained/resnext-101-kinetics.pth')
model_data = torch.load('preprocess/pretrained/resnext-101-kinetics.pth', map_location='cpu')
model.load_state_dict(model_data['state_dict'])
model.eval()
return model
def run_batch(cur_batch, model):
"""
Args:
cur_batch: treat a video as a batch of images
model: ResNet model for feature extraction
Returns:
ResNet extracted feature.
"""
mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)
image_batch = np.concatenate(cur_batch, 0).astype(np.float32)
image_batch = (image_batch / 255.0 - mean) / std
image_batch = torch.FloatTensor(image_batch).cuda()
with torch.no_grad():
image_batch = torch.autograd.Variable(image_batch)
feats = model(image_batch)
feats = feats.data.cpu().clone().numpy()
return feats
def extract_clips_with_consecutive_frames(path, num_clips, num_frames_per_clip):
"""
Args:
path: path of a video
num_clips: expected numbers of splitted clips
num_frames_per_clip: number of frames in a single clip, pretrained model only supports 16 frames
Returns:
A list of raw features of clips.
"""
valid = True
clips = list()
try:
video_data = skvideo.io.vread(path)
except:
print('file {} error'.format(path))
valid = False
if args.model == 'resnext101':
return list(np.zeros(shape=(num_clips, 3, num_frames_per_clip, 112, 112))), valid
else:
return list(np.zeros(shape=(num_clips, num_frames_per_clip, 3, 224, 224))), valid
total_frames = video_data.shape[0]
img_size = (args.image_height, args.image_width)
for i in np.linspace(0, total_frames, num_clips + 2, dtype=np.int32)[1:num_clips + 1]:
clip_start = int(i) - int(num_frames_per_clip / 2)
clip_end = int(i) + int(num_frames_per_clip / 2)
if clip_start < 0:
clip_start = 0
if clip_end > total_frames:
clip_end = total_frames - 1
clip = video_data[clip_start:clip_end]
if clip_start == 0:
shortage = num_frames_per_clip - (clip_end - clip_start)
added_frames = []
for _ in range(shortage):
added_frames.append(np.expand_dims(video_data[clip_start], axis=0))
if len(added_frames) > 0:
added_frames = np.concatenate(added_frames, axis=0)
clip = np.concatenate((added_frames, clip), axis=0)
if clip_end == (total_frames - 1):
shortage = num_frames_per_clip - (clip_end - clip_start)
added_frames = []
for _ in range(shortage):
added_frames.append(np.expand_dims(video_data[clip_end], axis=0))
if len(added_frames) > 0:
added_frames = np.concatenate(added_frames, axis=0)
clip = np.concatenate((clip, added_frames), axis=0)
new_clip = []
for j in range(num_frames_per_clip):
frame_data = clip[j]
img = Image.fromarray(frame_data)
img = imresize(img, img_size, interp='bicubic')
img = img.transpose(2, 0, 1)[None]
frame_data = np.array(img)
new_clip.append(frame_data)
new_clip = np.asarray(new_clip) # (num_frames, width, height, channels)
if args.model in ['resnext101']:
new_clip = np.squeeze(new_clip)
new_clip = np.transpose(new_clip, axes=(1, 0, 2, 3))
clips.append(new_clip)
return clips, valid
def generate_h5(model, video_ids, num_clips, outfile):
"""
Args:
model: loaded pretrained model for feature extraction
video_ids: list of video ids
num_clips: expected numbers of splitted clips
outfile: path of output file to be written
Returns:
h5 file containing visual features of splitted clips.
"""
if args.dataset == "tgif-qa":
if not os.path.exists('data/tgif-qa/{}'.format(args.question_type)):
os.makedirs('data/tgif-qa/{}'.format(args.question_type))
else:
if not os.path.exists('data/{}'.format(args.dataset)):
os.makedirs('data/{}'.format(args.dataset))
dataset_size = len(video_ids)
with h5py.File(outfile, 'w') as fd:
feat_dset = None
video_ids_dset = None
i0 = 0
_t = {'misc': utils.Timer()}
for i, (video_path, video_id) in enumerate(video_ids):
_t['misc'].tic()
clips, valid = extract_clips_with_consecutive_frames(video_path, num_clips=num_clips, num_frames_per_clip=16)
if args.feature_type == 'appearance':
clip_feat = []
if valid:
for clip_id, clip in enumerate(clips):
feats = run_batch(clip, model) # (16, 2048)
feats = feats.squeeze()
clip_feat.append(feats)
else:
clip_feat = np.zeros(shape=(num_clips, 16, 2048))
clip_feat = np.asarray(clip_feat) # (8, 16, 2048)
if feat_dset is None:
C, F, D = clip_feat.shape
feat_dset = fd.create_dataset('resnet_features', (dataset_size, C, F, D),
dtype=np.float32)
video_ids_dset = fd.create_dataset('ids', shape=(dataset_size,), dtype=np.int)
elif args.feature_type == 'motion':
clip_torch = torch.FloatTensor(np.asarray(clips)).cuda()
if valid:
clip_feat = model(clip_torch) # (8, 2048)
clip_feat = clip_feat.squeeze()
clip_feat = clip_feat.detach().cpu().numpy()
else:
clip_feat = np.zeros(shape=(num_clips, 2048))
if feat_dset is None:
C, D = clip_feat.shape
feat_dset = fd.create_dataset('resnext_features', (dataset_size, C, D),
dtype=np.float32)
video_ids_dset = fd.create_dataset('ids', shape=(dataset_size,), dtype=np.int)
i1 = i0 + 1
feat_dset[i0:i1] = clip_feat
video_ids_dset[i0:i1] = video_id
i0 = i1
_t['misc'].toc()
if (i % 1000 == 0):
print('{:d}/{:d} {:.3f}s (projected finish: {:.2f} hours)' \
.format(i1, dataset_size, _t['misc'].average_time,
_t['misc'].average_time * (dataset_size - i1) / 3600))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=int, default=2, help='specify which gpu will be used')
# dataset info
parser.add_argument('--dataset', default='tgif-qa', choices=['tgif-qa', 'msvd-qa', 'msrvtt-qa'], type=str)
parser.add_argument('--question_type', default='none', choices=['frameqa', 'count', 'transition', 'action', 'none'], type=str)
# output
parser.add_argument('--out', dest='outfile',
help='output filepath',
default="data/{}/{}_{}_feat.h5", type=str)
# image sizes
parser.add_argument('--num_clips', default=8, type=int)
parser.add_argument('--image_height', default=224, type=int)
parser.add_argument('--image_width', default=224, type=int)
# network params
parser.add_argument('--model', default='resnet101', choices=['resnet101', 'resnext101'], type=str)
parser.add_argument('--seed', default='666', type=int, help='random seed')
args = parser.parse_args()
if args.model == 'resnet101':
args.feature_type = 'appearance'
elif args.model == 'resnext101':
args.feature_type = 'motion'
else:
raise Exception('Feature type not supported!')
# set gpu
if args.model != 'resnext101':
torch.cuda.set_device(args.gpu_id)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# annotation files
if args.dataset == 'tgif-qa':
args.annotation_file = '/ceph-g/lethao/datasets/tgif-qa/csv/Total_{}_question.csv'
args.video_dir = '/ceph-g/lethao/datasets/tgif-qa/gifs'
args.outfile = 'data/{}/{}/{}_{}_{}_feat.h5'
video_paths = tgif_qa.load_video_paths(args)
random.shuffle(video_paths)
# load model
if args.model == 'resnet101':
model = build_resnet()
elif args.model == 'resnext101':
model = build_resnext()
generate_h5(model, video_paths, args.num_clips,
args.outfile.format(args.dataset, args.question_type, args.dataset, args.question_type, args.feature_type))
elif args.dataset == 'msrvtt-qa':
args.annotation_file = '/ceph-g/lethao/datasets/msrvtt/annotations/{}_qa.json'
args.video_dir = '/ceph-g/lethao/datasets/msrvtt/videos/'
video_paths = msrvtt_qa.load_video_paths(args)
random.shuffle(video_paths)
# load model
if args.model == 'resnet101':
model = build_resnet()
elif args.model == 'resnext101':
model = build_resnext()
generate_h5(model, video_paths, args.num_clips,
args.outfile.format(args.dataset, args.dataset, args.feature_type))
elif args.dataset == 'msvd-qa':
args.annotation_file = '/ceph-g/lethao/datasets/msvd/MSVD-QA/{}_qa.json'
args.video_dir = '/ceph-g/lethao/datasets/msvd/MSVD-QA/video/'
args.video_name_mapping = '/ceph-g/lethao/datasets/msvd/youtube_mapping.txt'
video_paths = msvd_qa.load_video_paths(args)
random.shuffle(video_paths)
# load model
if args.model == 'resnet101':
model = build_resnet()
elif args.model == 'resnext101':
model = build_resnext()
generate_h5(model, video_paths, args.num_clips,
args.outfile.format(args.dataset, args.dataset, args.feature_type))
|
build_scripts/CompileLibsodium-Linux.py | HUSKI3/Neblio-Node | 138 | 12645258 | import os
from subprocess import call
import sys
import re
import multiprocessing as mp
import string
import urllib
import shutil
version = "1.0.18-stable"
def is_python3_or_higher():
return sys.version_info.major >= 3
def get_libsodium_filename(ver):
return "libsodium-" + ver + ".tar.gz"
def get_libsodium_link(ver):
link = "https://download.libsodium.org/libsodium/releases/" + get_libsodium_filename(ver)
# print(link)
return link
def download_file_python2(filelink, target):
import urllib
try:
testfile = urllib.URLopener()
try:
os.remove(target)
print("Found file " + target + ", which is now deleted.")
except:
pass
testfile.retrieve(filelink, target)
return True
except:
return False
def download_file_python3(filelink, target):
import urllib.request
try:
try:
os.remove(target)
print("Found file " + target + ", which is now deleted.")
except:
pass
with urllib.request.urlopen(filelink) as response, open(target, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
return True
except Exception as e:
return False
def download_file(filelink, target):
if is_python3_or_higher():
return download_file_python3(filelink, target)
else:
return download_file_python2(filelink, target)
def download_libsodium():
libsodium_version_found = False
filename_ = ""
if(download_file(get_libsodium_link(version), get_libsodium_filename(version))):
libsodium_version_found = True
filename_ = get_libsodium_filename(version)
print("Found libsodium version to be " + version)
if libsodium_version_found == False:
print("Could not find the latest libsodium version. Probably you're not connected to the internet.")
print("If you have already downloaded libsodium, put the file name in the first argument of the script.")
return filename_
if len(sys.argv) < 2:
filename = download_libsodium()
else:
filename = sys.argv[1]
dirname = "libsodium-stable"
try:
shutil.rmtree(dirname)
except:
pass
working_dir = os.getcwd()
call("tar -xf " + filename, shell=True) #extract the .tar.gz file
dirname_bin = dirname + "_build"
final_dirname = "libsodium_build"
try:
shutil.rmtree(dirname_bin)
except:
pass
try:
shutil.rmtree(final_dirname)
except:
pass
#Go back to base dir
os.chdir(working_dir)
################
os.chdir(dirname)
# prepend ccache to the path, necessary since prior steps prepend things to the path
os.environ['PATH'] = '/usr/lib/ccache:' + os.environ['PATH']
call("./configure",shell=True)
call("make -j" + str(mp.cpu_count()) + " && make check", shell=True)
call("sudo make install", shell=True)
print("Compilation complete.")
#Go back to base dir
os.chdir(working_dir)
################
call(r"ln -s " + dirname_bin + " " + final_dirname,shell=True)
print("")
print("libsodium compiled to \"" + os.path.join(working_dir,final_dirname) + "\" with a soft link to \"" + os.path.join(working_dir,dirname_bin) + "\"")
print("")
print("libsodium lib path: " + os.path.join(working_dir,final_dirname,"lib"))
print("libsodium include path: " + os.path.join(working_dir,final_dirname,"include"))
|
healthcareai/tests/test_dataframe_filters.py | iEvidently/healthcareai-py | 263 | 12645264 | import pandas as pd
import numpy as np
import unittest
import healthcareai.common.filters as filters
from healthcareai.common.healthcareai_error import HealthcareAIError
class TestIsDataframe(unittest.TestCase):
def test_is_dataframe(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'age': [1, 5, 4]
})
self.assertTrue(filters.is_dataframe(df))
def test_is_not_dataframe(self):
junk_inputs = [
'foo',
42,
[1, 2, 3],
[[1, 2, 3], [1, 2, 3], [1, 2, 3], ],
{'a': 1}
]
for junk in junk_inputs:
self.assertFalse(filters.is_dataframe(junk))
class TestValidationError(unittest.TestCase):
def test_is_dataframe(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'age': [1, 5, 4]
})
self.assertIsNone(filters.validate_dataframe_input(df))
def test_is_not_dataframe(self):
junk_inputs = [
'foo',
42,
[1, 2, 3],
[[1, 2, 3], [1, 2, 3], [1, 2, 3], ],
{'a': 1}
]
for junk in junk_inputs:
self.assertRaises(HealthcareAIError, filters.validate_dataframe_input, junk)
class TestDataframeColumnSuffixFilter(unittest.TestCase):
def test_raises_error_on_non_dataframe_inputs(self):
junk_inputs = [
'foo',
42,
[1, 2, 3],
[[1, 2, 3], [1, 2, 3], [1, 2, 3], ],
{'a': 1}
]
for junk in junk_inputs:
self.assertRaises(HealthcareAIError, filters.DataframeColumnSuffixFilter().fit_transform, junk)
def test_removes_nothing_when_it_finds_no_matches(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'age': [1, 5, 4]
})
result = filters.DataframeColumnSuffixFilter().fit_transform(df)
self.assertEqual(len(result), 3)
self.assertEqual(list(result.columns).sort(), list(df.columns).sort())
def test_removes_three_character_match(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'DTS': [1, 5, 4]
})
result = filters.DataframeColumnSuffixFilter().fit_transform(df)
expected = ['category', 'gender']
self.assertEqual(len(result), 3)
self.assertEqual(list(result.columns).sort(), expected.sort())
def test_removes_long_character_match(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'admit_DTS': [1, 5, 4]
})
result = filters.DataframeColumnSuffixFilter().fit_transform(df)
expected = ['category', 'gender']
self.assertEqual(len(result), 3)
self.assertEqual(list(result.columns).sort(), expected.sort())
def test_does_not_remove_lowercase_match(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'admit_dts': [1, 5, 4]
})
result = filters.DataframeColumnSuffixFilter().fit_transform(df)
expected = ['category', 'gender', 'admit_dts']
self.assertEqual(len(result), 3)
self.assertEqual(list(result.columns).sort(), expected.sort())
class TestDataframeColumnDatetimeFilter(unittest.TestCase):
def test_datetime_column_removal(self):
dates = pd.date_range('1/1/2011', periods=10, freq='H')
df = pd.DataFrame(data={"number": np.random.randn(len(dates)), "date": dates})
result = filters.DataFrameColumnDateTimeFilter().fit_transform(df)
expected = ['number']
self.assertEqual(list(result.columns).sort(), expected.sort())
class TestDataframeGrainColumnDataFilter(unittest.TestCase):
def test_raises_error_on_non_dataframe_inputs(self):
junk_inputs = [
'foo',
42,
[1, 2, 3],
[[1, 2, 3], [1, 2, 3], [1, 2, 3], ],
{'a': 1}
]
for junk in junk_inputs:
self.assertRaises(HealthcareAIError, filters.DataframeColumnRemover(None).fit_transform, junk)
def test_removes_nothing_when_it_finds_no_matches(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'age': [1, 5, 4]
})
result = filters.DataframeColumnRemover('PatientID').fit_transform(df)
self.assertEqual(len(result.columns), 3)
self.assertEqual(list(result.columns).sort(), list(df.columns).sort())
def test_removes_nothing_when_none_is_passed(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'age': [1, 5, 4]
})
result = filters.DataframeColumnRemover(None).fit_transform(df)
self.assertEqual(len(result.columns), 3)
self.assertEqual(list(result.columns).sort(), list(df.columns).sort())
def test_removes_match(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'PatientID': [1, 5, 4]
})
result = filters.DataframeColumnRemover('PatientID').fit_transform(df)
expected = ['category', 'gender']
self.assertEqual(len(result.columns), 2)
self.assertEqual(list(result.columns).sort(), expected.sort())
def test_does_not_remove_lowercase_match(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'patientid': [1, 5, 4]
})
result = filters.DataframeColumnRemover('PatientID').fit_transform(df)
expected = ['category', 'gender', 'patientid']
self.assertEqual(len(result.columns), 3)
self.assertEqual(list(result.columns).sort(), expected.sort())
def test_removes_list(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'patientid': [1, 5, 4]
})
result = filters.DataframeColumnRemover(['gender', 'patientid', 'foo']).fit_transform(df)
expected = ['category']
self.assertEqual(len(result.columns), 1)
self.assertEqual(list(result.columns).sort(), expected.sort())
class TestDataframeNullValueFilter(unittest.TestCase):
# TODO test exclusions!
def test_raises_error_on_non_dataframe_inputs(self):
junk_inputs = [
'foo',
42,
[1, 2, 3],
[[1, 2, 3], [1, 2, 3], [1, 2, 3], ],
{'a': 1}
]
for junk in junk_inputs:
self.assertRaises(HealthcareAIError, filters.DataframeColumnRemover(None).fit_transform, junk)
def test_removes_nothing_when_no_nulls_exist(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'age': [1, 5, 4]
})
result = filters.DataframeNullValueFilter().fit_transform(df)
self.assertEqual(len(result), 3)
def test_removes_row_with_single_null(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'age': [1, 5, None]
})
result = filters.DataframeNullValueFilter().fit_transform(df)
self.assertEqual(len(result), 2)
def test_removes_row_with_all_nulls(self):
df = pd.DataFrame({
'category': ['a', None, None],
'gender': ['F', 'M', None],
'age': [1, 5, None]
})
result = filters.DataframeNullValueFilter().fit_transform(df)
self.assertEqual(len(result), 1)
def test_removes_row_all_nulls_exception(self):
df = pd.DataFrame({'a': [1, None, 2, 3],
'b': ['m', 'f', None, 'f'],
'c': [3, 4, 5, None],
'd': [None, 8, 1, 3],
'label': ['Y', 'N', 'Y', 'N']})
self.assertRaises(HealthcareAIError, filters.DataframeNullValueFilter().fit_transform, df)
if __name__ == '__main__':
unittest.main()
|
recipes/Python/578080_Libreenect_OpenKinect_Minimum_Value_Joystick/recipe-578080.py | tdiprima/code | 2,023 | 12645281 | <gh_stars>1000+
#!/usr/bin/env python
"""
Kinect Demo using minimum values from the depth image.
@Author = <NAME>
@Date = 17 March, 2012
@Version = 1.1
@Filename = KinectJoystickMin.py
"""
from freenect import sync_get_depth as get_depth, sync_get_video as get_video, init, close_device, open_device, set_led
import cv
import numpy as np
import pygame
from math import *
from numpy import mean
def doloop():
#Series of commands to do pointer operations on the kinect (motor, led, accelerometer)
ctx = init() #Initiates device
mdev = open_device(ctx, 0) #Opens the device for commands
set_led(mdev, 1) #Sets LED to green
close_device(mdev #Closes device. Device must be closed immediately after usage
#Mean filter caches
yList = [0,0,0,0,0,0]
xList = [0,0,0,0,0,0]
#Sets color tuples
RED = (255,0,0)
BLUE = (0,0,255)
TEAL = (0,200,100)
BLACK = (0,0,0)
#Sets the size of the screen
xSize = 640
ySize = 480
done = False #Main while loop bool counter
pygame.init() #Initiates pygame
screen = pygame.display.set_mode((xSize, ySize), pygame.RESIZABLE) #Creates the pygame window
screen.fill(BLACK) #Fills the window black
#Initiates the xTempPos and yTempPos values so that the point will remain stationary
#if the minimum value is larger than 600
xTempPos = xSize/2
yTempPos = ySize/2
global depth, rgb #Makes the depth and rgb variables global
while not done:
screen.fill(BLACK) #Makes the pygame window black after each iteration
# Get a fresh frame
(depth,_) = get_depth()
(rgb, _) = get_video()
minVal = np.min(depth) #This is the minimum value from the depth image
minPos = np.argmin(depth) #This is the raw index of the minimum value above
xPos = np.mod(minPos, xSize) #This is the x component of the raw index
yPos = minPos//xSize #This is the y component of the raw index
#This is the mean filter process
"""
A mean filter works by collecting values in a cache list and taking the mean of them
to determine the final value. It works in this case to decrease the amount of
volatility the minimum position experiences to get a smoother display with a more
consistent value. My computer works smoothly with a 5 bit cache where as a faster
computer may need a larger cache and a slower computer may need a smaller cache
"""
xList.append(xPos)
del xList[0]
xPos = int(mean(xList))
yList.append(yPos)
del yList[0]
yPos = int(mean(yList))
"""
This if statement says that if the minimum value is below 600 to store the minimum
positions in xTempPos and yTempPos and to make the dot color red. Also if the minimum
value is larger than 600, xPos and yPos become the last stored minimum and maximum
positions. It also changes the color to purple
"""
if minVal < 600:
xTempPos = xPos
yTempPos = yPos
COLOR = cv.RGB(255,0,0)
else:
xPos = xTempPos
yPos = yTempPos
COLOR = cv.RGB(100,0,100)
cv.Circle(rgb, (xPos, yPos), 2, COLOR, 40) #draws a circle of a certain color at minimum position
cv.ShowImage('Image',rgb) #Shows the image
cv.WaitKey(5) #Keyboard interupt
"""
The if statement below sets up the virtual joystick by basically breaking the pygame
window into four parts. A dot representing the minimum position is drawn on the window
and the corresponding button based on the position is "pressed". The quarter of the
window in which the button "pressed" corresponds to turns teal after being "pressed"
Top Right : A
Bottom Right: B
Bottom Left : Y
Top Right : X
"""
if xPos <= xSize/2 and yPos <= ySize/2:
command = 'A'
rect1 = pygame.Rect((xSize/2,0),(xSize/2,ySize/2))
pygame.draw.rect(screen,TEAL,rect1)
elif xPos <= xSize/2 and yPos > ySize/2:
command = 'B'
rect1 = pygame.Rect((xSize/2,ySize/2),(xSize/2,ySize/2))
pygame.draw.rect(screen,TEAL,rect1)
elif xPos > xSize/2 and yPos <= ySize/2:
command = 'X'
rect1 = pygame.Rect((0,0),(xSize/2,ySize/2))
pygame.draw.rect(screen,TEAL,rect1)
else:
command = 'Y'
rect1 = pygame.Rect((0,ySize/2),(xSize/2,ySize/2))
pygame.draw.rect(screen,TEAL,rect1)
pygame.draw.line(screen, BLUE, (xSize/2, ySize/2), (xSize - xPos,yPos)) #Draws a line from the middle to the minimum position
pygame.draw.circle(screen, RED, (xSize - xPos,yPos), 10) #Draws the circle on pygame window
pygame.display.flip() #Displays the processed pygame window
print command, minVal #Prints the "pressed" button and the minimum value
for e in pygame.event.get(): #Itertates through current events
if e.type is pygame.QUIT: #If the close button is pressed, the while loop ends
done = True
doloop()
|
src/genie/libs/parser/iosxe/show_l2fib.py | balmasea/genieparser | 204 | 12645334 | <reponame>balmasea/genieparser
''' show_l2fib.py
IOSXE parsers for the following show commands:
* show l2fib path-list {id}
* show l2fib path-list detail
* show l2fib bridge-domain {bd_id} port
* show l2fib bridge-domain {bd_id} address unicast {mac_addr}
Copyright (c) 2021 by Cisco Systems, Inc.
All rights reserved.
'''
import re
# genie
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Any, ListOf, Optional
# ====================================================
# Schema for 'show l2fib path-list <id>'
# ====================================================
class ShowL2fibPathListIdSchema(MetaParser):
""" Schema for show l2fib path-list {id}
show l2fib path-list detail
"""
schema = {
'pathlist_id': {
Any(): {
'type': str,
'eth_seg': str,
'path_cnt': int,
'path_list': ListOf(
{
'path': str
}
)
}
}
}
# =============================================
# Parser for 'show l2fib path-list <id>'
# =============================================
class ShowL2fibPathListId(ShowL2fibPathListIdSchema):
""" Parser for show l2fib path-list {id}
show l2fib path-list detail
"""
cli_command = [
'show l2fib path-list {id}',
'show l2fib path-list detail'
]
def cli(self, id=None, output=None):
if output is None:
if id:
cli_cmd = self.cli_command[0].format(id=id)
else:
cli_cmd = self.cli_command[1]
cli_output = self.device.execute(cli_cmd)
else:
cli_output = output
# PathList ID : 28
p1 = re.compile(r'^PathList ID\s+:\s+(?P<path_list_id>\d+)$')
# PathList Type : MPLS_UC
p2 = re.compile(r'^PathList Type\s+:\s+(?P<type>\w+)$')
# Ethernet Segment : 0000.0000.0000.0000.0000
p3 = re.compile(r'^Ethernet Segment\s+:\s+'
r'(?P<eth_seg>[0-9a-fA-F\.]+)$')
# Path Count : 1
p4 = re.compile(r'^Path Count\s+:\s+(?P<path_cnt>\d+)$')
# Paths : [MAC][email protected]
p5 = re.compile(r'^[Paths]*\s*:\s+(?P<path>\[\w+\]\d+'
r'[0-9a-fA-F\.:@ ]+)$')
parser_dict = {}
for line in cli_output.splitlines():
line = line.strip()
if not line:
continue
# PathList ID : 28
m = p1.match(line)
if m:
group = m.groupdict()
path_list_ids_dict = parser_dict.setdefault('pathlist_id', {})
path_list = path_list_ids_dict.setdefault(int(group['path_list_id']), {})
paths = path_list.setdefault('path_list', [])
continue
# PathList Type : MPLS_UC
m = p2.match(line)
if m:
group = m.groupdict()
path_list.update({'type': group['type']})
continue
# Ethernet Segment : 0000.0000.0000.0000.0000
m = p3.match(line)
if m:
group = m.groupdict()
path_list.update({'eth_seg': group['eth_seg']})
continue
# Path Count : 1
m = p4.match(line)
if m:
group = m.groupdict()
path_list.update({'path_cnt': int(group['path_cnt'])})
continue
# Paths : [MAC][email protected]
m = p5.match(line)
if m:
group = m.groupdict()
path = group['path']
paths_dict = {}
paths_dict.update({'path': path})
paths.append(paths_dict)
continue
return parser_dict
# ====================================================
# Schema for 'show l2fib bridge-domain {bd_id} port'
# ====================================================
class ShowL2fibBdPortSchema(MetaParser):
""" Schema for show l2fib bridge-domain {bd-id} port """
schema = {
Any(): {
'type': str,
'is_path_list': bool,
Optional('port'): str,
Optional('path_list'): {
'id': int,
'path_count': int,
'type': str,
'description': str
}
}
}
# ==================================================
# Parser for 'show l2fib bridge-domain <bd_id> port'
# ==================================================
class ShowL2fibBdPort(ShowL2fibBdPortSchema):
""" Parser for show l2fib bridge-domain {bd_id} port """
cli_command = [ 'show l2fib bridge-domain {bd_id} port']
def cli(self, output=None, bd_id=None):
if output is None:
cli_output = self.device.execute(self.cli_command[0].format(bd_id=bd_id))
else:
cli_output = output
#BD_PORT Et0/2:12
p1 = re.compile(r'^(?P<port_type>BD_PORT)\s+(?P<interface>[\w:\/]+)$')
#VXLAN_REP PL:1191(1) T:VXLAN_REP [IR]20012:2.2.2.2
p2 = re.compile(r'^(?P<type>\w+)\s+PL:(?P<path_list_id>\d+)\((?P<path_list_count>\d+)\)'
r'\s+T:(?P<path_list_type>\w+)\s+(?P<path_list_desc>\[\w+\][0-9a-fA-F:@\.]+)$')
parser_dict = {}
for line in cli_output.splitlines():
line = line.strip()
if not line:
continue
#BD_PORT Et0/2:12
m = p1.match(line)
if m:
group = m.groupdict()
port_type = group['port_type']
interface_id = group['interface']
parser_dict.update({
interface_id: {
'type': port_type,
'is_path_list': False,
'port': interface_id
}
})
continue
#VXLAN_REP PL:1191(1) T:VXLAN_REP [IR]20012:2.2.2.2
m = p2.match(line)
if m:
group = m.groupdict()
parser_dict.update({
group['path_list_desc']: {
'type': group['type'],
'is_path_list': True,
'path_list': {
'id': int(group['path_list_id']),
'path_count': int(group['path_list_count']),
'type': group['path_list_type'],
'description': group['path_list_desc']
}
}
})
continue
return parser_dict
# ========================================================================
# Schema for 'show l2fib bridge-domain <bd_id> address unicast <mac_addr>'
# ========================================================================
class ShowL2fibBridgedomainAddressUnicastSchema(MetaParser):
""" Schema for show l2fib bridge-domin {bd-id} address unicast {mac_addr}"""
schema = {
'mac_addr': str,
'reference_count': int,
'epoch': int,
'producer': str,
'flags': ListOf(
str
),
'adjacency': {
Optional('path_list'): {
'path_list_id': int,
'path_list_count': int,
'path_list_type': str,
'path_list_desc': str,
},
Optional('olist'): {
'olist': int,
'port_count': int
},
'type': str,
'desc': str
},
'pd_adjacency': {
Optional('path_list'): {
'path_list_id': int,
'path_list_count': int,
'path_list_type': str,
'path_list_desc': str,
},
Optional('olist'): {
'olist': int,
'port_count': int
},
'type': str,
'desc': str
},
'packet_count': int,
'bytes': int
}
# ========================================================================
# Parser for 'show l2fib bridge-domain <bd_id> address unicast <mac_addr>'
# ========================================================================
class ShowL2fibBridgedomainAddressUnicast(ShowL2fibBridgedomainAddressUnicastSchema):
""" Parser for show l2fib bridge-domain {bd_id} address unicast {mac_addr}"""
cli_command = 'show l2fib bridge-domain {bd_id} address unicast {mac_addr}'
def cli(self, output=None, bd_id=None, mac_addr=None):
if output is None:
out = self.device.execute(self.cli_command.format(bd_id=bd_id, mac_addr=mac_addr))
else:
out = output
#MAC Address : aabb.0000.0002
p1 = re.compile(r'^MAC\s+Address\s+:\s+(?P<mac_addr>[a-fA-F0-9\.]+)$')
#Reference Count : 1
p2 = re.compile(r'^Reference\s+Count\s+:\s+(?P<reference_count>\d+)$')
#Epoch : 0
p3 = re.compile(r'^Epoch\s+:\s+(?P<epoch>\d+)$')
#Producer : BGP
#Producer : BD-ENG
p4 = re.compile(r'^Producer\s+:\s+(?P<producer>.+)$')
#Flags : Local Mac
p5 = re.compile(r'^Flags\s+:\s+(?P<flags>[\w\s]+)$')
#: CP Learn
p6 = re.compile(r'^:\s+(?P<flags>[\w\s]+)$')
#Adjacency : MPLS_UC PL:5(1) T:MPLS_UC [MAC][email protected]
#PD Adjacency : MPLS_UC PL:5(1) T:MPLS_UC [MAC][email protected]
p7 = re.compile(r'^(?P<tag>(Adjacency|PD Adjacency))\s+:\s+(?P<type>\w+)\s+PL:(?P<path_list_id>\d+)\((?P<path_list_count>\d+)\)'
r'\s+T:(?P<path_list_type>\w+)\s+(?P<path_list_desc>\[\w+\][0-9a-fA-F:@\.]+)$')
#Adjacency : Olist: 3, Ports: 1
#PD Adjacency : Olist: 3, Ports: 1
p8 = re.compile(r'^(?P<tag>(Adjacency|PD Adjacency))\s+:\s+Olist:\s+(?P<olist>\d+),\s+Ports:\s+(?P<port_count>\d+)$')
#Adjacency : VXLAN_CP L:20011:1.1.1.1 R:20012:2.2.2.2
#PD Adjacency : VXLAN_CP L:20011:1.1.1.1 R:20012:2.2.2.2
#Adjacency : BD_PORT Et0/1:11
#PD Adjacency : BD_PORT Et0/1:11
p9 = re.compile(r'^(?P<tag>(Adjacency|PD Adjacency))\s+:\s+(?P<type>\w+)\s+(?P<desc>.+)$')
#Packets : 0
p10 = re.compile(r'^Packets\s+:\s+(?P<packet_count>\d+)$')
#Bytes : 0
p11 = re.compile(r'^Bytes\s+:\s+(?P<bytes>\d+)$')
parser_dict = {}
for line in out.splitlines():
line = line.strip()
if not line:
continue
#MAC Address : aabb.0000.0002
m = p1.match(line)
if m:
group = m.groupdict()
parser_dict.update({
'mac_addr':group['mac_addr']
})
continue
#Reference Count : 1
m = p2.match(line)
if m:
group = m.groupdict()
parser_dict.update({'reference_count': int(group['reference_count']) })
continue
#Epoch : 0
m = p3.match(line)
if m:
group = m.groupdict()
parser_dict.update({'epoch': int(group['epoch'])})
continue
#Producer : BGP
m = p4.match(line)
if m:
group = m.groupdict()
parser_dict.update({'producer': group['producer']})
continue
#Flags : Local Mac
m = p5.match(line)
if m:
group = m.groupdict()
flags_list = parser_dict.setdefault('flags', [])
flags_list.append(group['flags'])
continue
#: CP Learn
m = p6.match(line)
if m:
group = m.groupdict()
flags_list.append(group['flags'])
continue
#Adjacency : MPLS_UC PL:5(1) T:MPLS_UC [MAC][email protected]
#PD Adjacency : MPLS_UC PL:5(1) T:MPLS_UC [MAC][email protected]
m = p7.match(line)
if m:
group = m.groupdict()
adj_subdict = {}
adj_subdict.update({
'type': group['type'],
'desc': 'PL:{pl_id}({pl_count}) T:{pl_type} {pl_desc}'.format( \
pl_id=group['path_list_id'], pl_count=group['path_list_count'], \
pl_type=group['path_list_type'], pl_desc=group['path_list_desc']),
'path_list': {
'path_list_id': int(group['path_list_id']),
'path_list_count': int(group['path_list_count']),
'path_list_type': group['path_list_type'],
'path_list_desc': group['path_list_desc']
}
})
if group['tag'] == 'Adjacency':
adjacency = parser_dict.setdefault('adjacency', adj_subdict)
elif group['tag'] == 'PD Adjacency':
pd_adjacency = parser_dict.setdefault('pd_adjacency', adj_subdict)
continue
#Adjacency : Olist: 3, Ports: 1
#PD Adjacency : Olist: 3, Ports: 1
m = p8.match(line)
if m:
group = m.groupdict()
adj_subdict = {}
adj_subdict.update({
'type': 'olist',
'desc': 'Olist: {olist}, Ports: {port_count}'.format(\
olist=group['olist'], port_count=group['port_count']),
'olist': {
'olist': int(group['olist']),
'port_count': int(group['port_count'])
}
})
if group['tag'] == 'Adjacency':
adjacency = parser_dict.setdefault('adjacency', adj_subdict)
elif group['tag'] == 'PD Adjacency':
pd_adjacency = parser_dict.setdefault('pd_adjacency', adj_subdict)
continue
#Adjacency : VXLAN_CP L:20011:1.1.1.1 R:20012:2.2.2.2
#PD Adjacency : VXLAN_CP L:20011:1.1.1.1 R:20012:2.2.2.2
#Adjacency : BD_PORT Et0/1:11
#PD Adjacency : BD_PORT Et0/1:11
m = p9.match(line)
if m:
group = m.groupdict()
adj_subdict = {}
adj_subdict.update({
'type': group['type'],
'desc': group['desc']
})
if group['tag'] == 'Adjacency':
parser_dict.setdefault('adjacency', adj_subdict)
elif group['tag'] == 'PD Adjacency':
parser_dict.setdefault('pd_adjacency', adj_subdict)
continue
#Packets : 0
m = p10.match(line)
if m:
group = m.groupdict()
parser_dict.update({'packet_count': int(group['packet_count'])})
continue
#Bytes : 0
m = p11.match(line)
if m:
group = m.groupdict()
parser_dict.update({'bytes': int(group['bytes'])})
continue
return parser_dict
|
model/node.py | leoriohope/RandWireNN | 757 | 12645340 | <gh_stars>100-1000
import torch
import torch.nn as nn
import torch.nn.functional as F
from .sep_conv import SeparableConv2d
class NodeOp(nn.Module):
def __init__(self, in_degree, in_channel, out_channel, stride):
super(NodeOp, self).__init__()
self.single = (in_degree == 1)
if not self.single:
self.agg_weight = nn.Parameter(torch.zeros(in_degree, requires_grad=True))
self.conv = SeparableConv2d(in_channel, out_channel, kernel_size=3, padding=1, stride=stride)
self.bn = nn.BatchNorm2d(out_channel)
def forward(self, y):
# y: [B, C, N, M, in_degree]
if self.single:
y = y.squeeze(-1)
else:
y = torch.matmul(y, torch.sigmoid(self.agg_weight)) # [B, C, N, M]
y = F.relu(y) # [B, C, N, M]
y = self.conv(y) # [B, C_out, N, M]
y = self.bn(y) # [B, C_out, N, M]
return y
# if __name__ == '__main__':
# x = torch.randn(7, 3, 224, 224, 5)
# node = NodeOp(5, 3, 4)
# y = node(x)
# print(y.shape) # [7, 4, 224, 224]
|
timy/settings.py | pnpnpn/timy | 300 | 12645347 | <reponame>pnpnpn/timy<gh_stars>100-1000
class TrackingMode(object):
PRINTING, LOGGING = range(0, 2)
TRACKING = True
TRACKING_MODE = TrackingMode.PRINTING
class TimyConfig(object):
DEFAULT_IDENT = 'Timy'
def __init__(self, tracking=TRACKING, tracking_mode=TRACKING_MODE):
self.tracking = tracking
self.tracking_mode = tracking_mode
timy_config = TimyConfig()
|
prefixspan/closed.py | avijit1258/PrefixSpan-py | 285 | 12645361 | <reponame>avijit1258/PrefixSpan-py
#! /usr/bin/env python3
from .localtyping import Optional, Set, Matches, DB, Pattern, List
def __reversescan(db: DB, patt: List[Optional[int]], matches: Matches):
def islocalclosed(previtem: Optional[int]) -> bool:
closeditems: Set[int] = set()
for k, (i, endpos) in enumerate(matches):
localitems = set()
for startpos in range(endpos - 1, -1, -1):
item = db[i][startpos]
if item == previtem:
matches[k] = (i, startpos)
break
localitems.add(item)
(closeditems.update if k == 0
else closeditems.intersection_update)(localitems)
return len(closeditems) > 0
return any(islocalclosed(previtem) for previtem in reversed(patt[:-1]))
def __forwardscan(db, matches):
# type: (DB, Matches) -> bool
closeditems = set() # type: Set[int]
for k, (i, endpos) in enumerate(matches):
localitems = set()
for startpos in range(endpos + 1, len(db[i])):
item = db[i][startpos]
localitems.add(item)
(closeditems.update if k == 0 else closeditems.intersection_update)(localitems)
return len(closeditems) > 0
def isclosed(db: DB, patt: Pattern, matches: Matches) -> bool:
# Add a pseduo item indicating the start of sequence
# Add a pseduo item indicating the end of sequence
return not __reversescan(
db,
[None, *patt, None],
[(i, len(db[i])) for i, _ in matches]
) and not __forwardscan(db, matches)
def canclosedprune(db: DB, patt: Pattern, matches: Matches) -> bool:
# Add a pseduo item indicating the start of sequence
return __reversescan(db, [None, *patt], matches[:])
|
hammer/deployContract_example_web3.py | brave-experiments/chainhammer | 110 | 12645364 | <reponame>brave-experiments/chainhammer<gh_stars>100-1000
# taken from
# http://web3py.readthedocs.io/en/stable/contracts.html#contract-deployment-example
#
# then repaired:
# see issue 808 https://github.com/ethereum/web3.py/issues/808
# and extended
#
# and quorum-bugfixed, see
# https://github.com/ethereum/web3.py/issues/898#issuecomment-396701172
# tested with these versions:
# web3 4.2.0
# py-solc: 2.1.0
# solc 0.4.23+commit.124ca40d.Linux.gpp
# testrpc 1.3.4
# python 3.5.3
#
# does work with TestRPCProvider()
# does work with Energy Web//v1.12.0 (parity fork)
# now also works with Quorum 2.0.2 (fork of Geth/v1.7.2)
import json
import web3
from web3 import Web3, HTTPProvider
from solc import compile_source
from web3.contract import ConciseContract
# Solidity source code
contract_source_code = '''
pragma solidity ^0.4.21;
contract Greeter {
string public greeting;
function Greeter() public {
greeting = 'Hello';
}
function setGreeting(string _greeting) public {
greeting = _greeting;
}
function greet() view public returns (string) {
return greeting;
}
}
'''
compiled_sol = compile_source(contract_source_code) # Compiled source code
contract_interface = compiled_sol['<stdin>:Greeter']
# web3.py instance
# w3 = Web3(Web3.EthereumTesterProvider()) # wrong
# w3 = Web3(Web3.TestRPCProvider()) # works
# w3 = Web3(HTTPProvider('http://localhost:8545')) # works with Energy Web//v1.12.0 (account [0] must be unlocked)
# does NOT work with Quorum 2.0.2 --> Geth/v1.7.2
# Quorum = easiest way to run: as vagrant virtualbox
# step 1 https://github.com/jpmorganchase/quorum-examples#vagrant-usage
# step 2 https://github.com/jpmorganchase/quorum-examples/blob/master/examples/7nodes/README.md#7-nodes
w3 = Web3(HTTPProvider('http://localhost:22000'))
nodeName = "Quorum"
if nodeName == "Quorum":
# bugfix for quorum, see
# https://github.com/ethereum/web3.py/issues/898#issuecomment-396701172
from web3.middleware import geth_poa_middleware
# inject the poa compatibility middleware to the innermost layer
w3.middleware_stack.inject(geth_poa_middleware, layer=0)
# print client ID string:
print ("Node ID string:", w3.version.node)
# set pre-funded account as sender
w3.eth.defaultAccount = w3.eth.accounts[0]
print ("Sender's address", w3.eth.defaultAccount, "\n")
# Instantiate and deploy contract
Greeter = w3.eth.contract(abi=contract_interface['abi'], bytecode=contract_interface['bin'])
# Submit the transaction that deploys the contract
tx_hash = Greeter.constructor().transact()
print ("Tx submitted: ", w3.toHex(tx_hash)) # altered by me.
# Wait for the transaction to be mined, and get the transaction receipt
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
# Create the contract instance with the newly-deployed address
greeter = w3.eth.contract(
address=tx_receipt.contractAddress,
abi=contract_interface['abi'],
)
# Display the default greeting from the contract
print('Default contract greeting: {}'.format(
greeter.functions.greet().call()
))
print('Setting the greeting to Nihao...')
tx_hash = greeter.functions.setGreeting('Nihao').transact()
# Wait for transaction to be mined...
w3.eth.waitForTransactionReceipt(tx_hash)
# Display the new greeting value
print('Updated contract greeting: {}'.format(
greeter.functions.greet().call()
))
# When issuing a lot of reads, try this more concise reader:
reader = ConciseContract(greeter)
assert reader.greet() == "Nihao"
|
models/FreeAnchor/builder.py | shouwangzhe134/simpledet | 3,195 | 12645374 | from __future__ import division
from __future__ import print_function
import math
import mxnext as X
from models.retinanet.builder import RetinaNetHead
class FreeAnchorRetinaNet(object):
def __init__(self):
pass
@staticmethod
def get_train_symbol(backbone, neck, head):
gt_bbox = X.var("gt_bbox")
im_info = X.var("im_info")
feat = backbone.get_rpn_feature()
feat = neck.get_rpn_feature(feat)
head.get_anchor()
loss = head.get_loss(feat, gt_bbox, im_info)
return X.group(loss)
@staticmethod
def get_test_symbol(backbone, neck, head):
im_info = X.var("im_info")
im_id = X.var("im_id")
rec_id = X.var("rec_id")
feat = backbone.get_rpn_feature()
feat = neck.get_rpn_feature(feat)
head.get_anchor()
cls_score, bbox_xyxy = head.get_prediction(feat, im_info)
return X.group([rec_id, im_id, im_info, cls_score, bbox_xyxy])
class FreeAnchorRetinaNetHead(RetinaNetHead):
def __init__(self, pRpn):
super().__init__(pRpn)
# reinit bias for cls
prior_prob = 0.02
pi = - math.log((1 - prior_prob) / prior_prob)
self.cls_pred_bias = X.var("cls_pred_bias", init=X.constant(pi))
self.anchor_dict = None
def get_anchor(self):
p = self.p
num_anchor = len(p.anchor_generate.ratio) * len(p.anchor_generate.scale)
stride = p.anchor_generate.stride
anchor_dict = {}
for s in stride:
max_side = p.anchor_generate.max_side // s
anchors = X.var("anchor_stride%s" % s,
shape=(1, 1, max_side, max_side, num_anchor * 4),
dtype='float32') # (1, 1, long_side, long_side, #anchor * 4)
anchor_dict["stride%s" % s] = anchors
self.anchor_dict = anchor_dict
def get_loss(self, conv_feat, gt_bbox, im_info):
import mxnet as mx
p = self.p
stride = p.anchor_generate.stride
if not isinstance(stride, tuple):
stride = (stride)
num_class = p.num_class
num_base_anchor = len(p.anchor_generate.ratio) * len(p.anchor_generate.scale)
image_per_device = p.batch_image
cls_logit_dict, bbox_delta_dict = self.get_output(conv_feat)
cls_logit_reshape_list = []
bbox_delta_reshape_list = []
feat_list = []
scale_loss_shift = 128.0 if p.fp16 else 1.0
# reshape logit and delta
for s in stride:
# (N, A * C, H, W) -> (N, A * C, H * W)
cls_logit = X.reshape(
data=cls_logit_dict["stride%s" % s],
shape=(0, 0, -1),
name="cls_stride%s_reshape" % s
)
# (N, A * 4, H, W) -> (N, A * 4, H * W)
bbox_delta = X.reshape(
data=bbox_delta_dict["stride%s" % s],
shape=(0, 0, -1),
name="bbox_stride%s_reshape" % s
)
cls_logit_reshape_list.append(cls_logit)
bbox_delta_reshape_list.append(bbox_delta)
feat_list.append(cls_logit_dict["stride%s" % s])
# cls_logits -> (N, H' * W' * A, C)
cls_logits = X.concat(cls_logit_reshape_list, axis=2, name="cls_logit_concat")
cls_logits = X.transpose(cls_logits, axes=(0, 2, 1), name="cls_logit_transpose")
cls_logits = X.reshape(cls_logits, shape=(0, -1, num_class - 1), name="cls_logit_reshape")
cls_prob = X.sigmoid(cls_logits)
# bbox_deltas -> (N, H' * W' * A, 4)
bbox_deltas = X.concat(bbox_delta_reshape_list, axis=2, name="bbox_delta_concat")
bbox_deltas = X.transpose(bbox_deltas, axes=(0, 2, 1), name="bbox_delta_transpose")
bbox_deltas = X.reshape(bbox_deltas, shape=(0, -1, 4), name="bbox_delta_reshape")
anchor_list = [self.anchor_dict["stride%s" % s] for s in stride]
bbox_thr = p.anchor_assign.bbox_thr
pre_anchor_top_n = p.anchor_assign.pre_anchor_top_n
alpha = p.focal_loss.alpha
gamma = p.focal_loss.gamma
anchor_target_mean = p.head.mean or (0, 0, 0, 0)
anchor_target_std = p.head.std or (1, 1, 1, 1)
from models.FreeAnchor.ops import _prepare_anchors, _positive_loss, _negative_loss
anchors = _prepare_anchors(
mx.sym, feat_list, anchor_list, image_per_device, num_base_anchor)
positive_loss = _positive_loss(
mx.sym, anchors, gt_bbox, cls_prob, bbox_deltas, image_per_device,
alpha, pre_anchor_top_n, anchor_target_mean, anchor_target_std
)
positive_loss = X.make_loss(
data=positive_loss,
grad_scale=1.0 * scale_loss_shift,
name="positive_loss"
)
negative_loss = _negative_loss(
mx.sym, anchors, gt_bbox, cls_prob, bbox_deltas, im_info, image_per_device,
num_class, alpha, gamma, pre_anchor_top_n, bbox_thr,
anchor_target_mean, anchor_target_std
)
negative_loss = X.make_loss(
data=negative_loss,
grad_scale=1.0 * scale_loss_shift,
name="negative_loss"
)
return positive_loss, negative_loss
def get_prediction(self, conv_feat, im_info):
import mxnet as mx
p = self.p
num_class = p.num_class
stride = p.anchor_generate.stride
if not isinstance(stride, tuple):
stride = (stride)
pre_nms_top_n = p.proposal.pre_nms_top_n
anchor_target_mean = p.head.mean or (0, 0, 0, 0)
anchor_target_std = p.head.std or (1, 1, 1, 1)
cls_logit_dict, bbox_delta_dict = self.get_output(conv_feat)
from models.FreeAnchor.ops import _proposal_retina
cls_score_list = []
bbox_xyxy_list = []
for s in stride:
cls_prob = X.sigmoid(data=cls_logit_dict["stride%s" % s])
bbox_delta = bbox_delta_dict["stride%s" % s]
anchors = self.anchor_dict["stride%s" % s]
pre_nms_top_n_level = -1 if s == max(stride) else pre_nms_top_n
bbox_xyxy, cls_score = _proposal_retina(
F=mx.sym,
cls_prob=cls_prob,
bbox_pred=bbox_delta,
anchors=anchors,
im_info=im_info,
batch_size=1,
rpn_pre_nms_top_n=pre_nms_top_n_level,
num_class=num_class,
anchor_mean=anchor_target_mean,
anchor_std=anchor_target_std
)
cls_score_list.append(cls_score)
bbox_xyxy_list.append(bbox_xyxy)
cls_score = X.concat(cls_score_list, axis=1, name="cls_score_concat")
bbox_xyxy = X.concat(bbox_xyxy_list, axis=1, name="bbox_xyxy_concat")
return cls_score, bbox_xyxy
|
tests/utils/test_text.py | PyCN/pulsar | 1,410 | 12645406 | '''Tests the tools and utilities in pulsar.utils.'''
import unittest
from pulsar.utils.log import lazy_string
@lazy_string
def blabla(n):
return 'AAAAAAAAAAAAAAAAAAAA %s' % n
class TestTextUtils(unittest.TestCase):
def testLazy(self):
r = blabla(3)
self.assertEqual(r.value, None)
v = str(r)
self.assertEqual(v, 'AAAAAAAAAAAAAAAAAAAA 3')
self.assertEqual(r.value, v)
|
ldap_connector.py | shellster/LDAPPER | 163 | 12645429 | import datetime
import json
import logging
import queue
import re
import threading
import time
import ldap3
import OpenSSL
from impacket.ldap import ldap
import utilities
"""
WARNING:
If you try to borrow this code, please be aware:
Neither LDAP connector is thread safe!
Impacket library may not be thread safe, more research is needed.
LDAP3 must be instantiate with special parameters to be thread safe (which this library is not doing right now):
https://ldap3.readthedocs.io/en/latest/index.html?highlight=thread#welcome-to-ldap3-s-documentation
"""
class LDAP3Connector:
basedn = None
conn = None
servers = []
_isconnected = False
def __init__(self, server, sec_level, domain, username, password, basedn=None, pagesize=10, maxrecord=100, delay=0):
self.domain = domain
self.username = username
self.password = password
self.basedn = basedn
self.pagesize = pagesize
self.maxrecord = maxrecord
self.delay = delay
self.sec_level = sec_level
self.server = None
# Set Encoding to UTF-8
ldap3.set_config_parameter("DEFAULT_ENCODING", "utf-8")
# Shuffle servers if multiple provided to distribute DC load
if sec_level == 3:
self.server = ldap3.Server(server, port=636, get_info=ldap3.ALL, use_ssl=True)
else:
self.server = ldap3.Server(server, port=389, get_info=ldap3.ALL)
self.conn = ldap3.Connection(
self.server,
user="{0}\\{1}".format(self.domain, self.username),
password=self.password,
authentication=ldap3.NTLM,
read_only=True,
)
if sec_level == 2:
try:
self.conn.start_tls()
except ldap3.core.exceptions.LDAPStartTLSError:
pass
if self.conn.bind():
if not self.basedn:
self.basedn = self.conn.server.info.other["defaultNamingContext"][0]
if not self.basedn:
self.basedn = utilities.attempt_to_derive_basedn(
server.ip, self.domain, self.username, self.password
)
if not self.basedn:
raise Exception("Unable to derive baseDN")
else:
raise Exception("Unable to connect to server")
def search(self, search, attributes):
if not attributes:
attributes = ldap3.ALL_ATTRIBUTES
self.conn.search(
self.basedn,
search,
search_scope=ldap3.SUBTREE,
attributes=attributes,
paged_size=self.pagesize,
)
cookie = self.conn.result["controls"]["1.2.840.113556.1.4.319"]["value"][
"cookie"
]
looptrack = None
while True:
for raw_entry in self.conn.entries:
if looptrack == "":
looptrack = raw_entry["cn"]
elif looptrack == raw_entry["cn"]:
# In spite of cookie paging, AD starts looping forever so we detect loop and break
cookie = False
break
# Impacket library returns strings for everything, so we do that here to ensure similar behavior to ldap3
entry = {}
keys = []
if isinstance(attributes, list):
keys = attributes
else:
keys = list(raw_entry.entry_attributes_as_dict)
for key in keys:
if key in raw_entry:
if len(raw_entry[key]) == 0:
entry[key.lower()] = ""
elif len(raw_entry[key]) > 1: # This is a list
entry[key.lower()] = [str(x) for x in raw_entry[key]]
else:
entry[key.lower()] = str(raw_entry[key])
yield entry
if len(cookie) == 0:
break
self.conn.search(
self.basedn,
search,
search_scope=ldap3.SUBTREE,
attributes=attributes,
paged_size=self.pagesize,
paged_cookie=cookie,
)
cookie = self.conn.result["controls"]["1.2.840.113556.1.4.319"]["value"][
"cookie"
]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.conn.close()
except Exception:
pass
class _ImpacketRecordHandler:
"""
Class exists to basically "curry" the Impacket entry handler callback to pass a per-thread
queue in the class context. This should make this particular piece thread safe and make
exceptions less devastating.
"""
thread_queue = None
attributes = []
def __init__(self, thread_queue, attributes, delay):
self.thread_queue = thread_queue
self.attributes = attributes
self.delay = delay
def handle_record(self, item):
# Make sure all searched attributes are included in result
entry = {k: "" for k in self.attributes}
try:
for attribute in item["attributes"]:
name = str(attribute["type"]).lower()
data = None
if name in ["objectguid"]:
# Reformating to match ldap3 format:
data = "".join("%02x" % b for b in attribute["vals"][0].asOctets())
data = "{{{0}-{1}-{2}-{3}-{4}}}".format(
"".join(utilities.splitn(data[0:8], 2)[::-1]),
"".join(utilities.splitn(data[8:12], 2)[::-1]),
"".join(utilities.splitn(data[12:16], 2)[::-1]),
data[16:20],
data[20:],
)
elif name == "objectsid":
data = utilities.binary_to_sid(attribute["vals"][0])
else:
data = []
for item in attribute["vals"]:
try:
data.append(item.asOctets().decode("utf-8"))
except UnicodeDecodeError:
data.append("".join("\\x%02x" % b for b in item.asOctets()))
for i in range(len(data)):
if re.match(r"^\d{14}\.\dZ$", data[i]):
data[i] = datetime.datetime.strptime(data[i][:-1], '%Y%m%d%H%M%S.%f').replace(tzinfo=datetime.timezone.utc)
data[i] = data[i].strftime('%Y-%m-%d %H:%M:%S+00:00')
elif re.search(r"^\d{18,19}$", data[i]):
try:
data[i] = utilities.ldap_to_unix_timestamp(data[i]).strftime("%Y-%m-%d %H:%M:%S+00:00")
except Exception:
pass
if len(data) == 0:
data = ""
elif len(data) == 1:
data = data[0]
entry[name] = data
self.thread_queue.put(entry)
time.sleep(self.delay)
except TypeError:
pass
except Exception:
logging.exception()
pass
class ImpacketLDAPConnector:
basedn = None
conn = None
servers = []
attributes = []
_isconnected = False
def __init__(self, server, sec_level, domain, username, password, basedn=None, pagesize=10, maxrecord=100, delay=0):
self.domain = domain
self.username = username
self.password = password
self.basedn = basedn
self.pagesize = pagesize
self.maxrecord = maxrecord
self.delay = delay
self.server = None
if sec_level == 3:
self.server = "ldaps://{}".format(server)
else:
self.server = "ldap://{}".format(server)
if not self.basedn:
self.basedn = utilities.attempt_to_derive_basedn(
server.split("/")[-1], self.domain, self.username, self.password
)
if not self.basedn:
raise Exception("Unable to derive baseDN")
self.conn = ldap.LDAPConnection(self.server, self.basedn, None)
self.conn.login(self.username, self.password, self.domain)
def search(self, search, attributes):
try:
"""
Impacket either returns all results or calls a callback method for every result.
We wrap this in a thread and queue so that we can slow it down and bunch our results
as we want. We do need to make sure that our processing is fast enough that the LDAP
connection does not time out.
"""
sc = ldap.SimplePagedResultsControl(size=self.pagesize)
thread_queue = queue.Queue(self.pagesize)
record_handler = _ImpacketRecordHandler(thread_queue, attributes, self.delay)
self.attributes = attributes
t = threading.Thread(
target=self.conn.search,
kwargs={
"searchFilter": search,
"attributes": attributes,
"sizeLimit": self.maxrecord,
"searchControls": [sc],
"perRecordCallback": record_handler.handle_record,
},
)
t.daemon = True
t.start()
while True:
try:
yield thread_queue.get(block=False)
except queue.Empty:
# If nothing in queue, and the ldap query has died or finished we can exit
if not t.is_alive():
break
except ldap.LDAPSearchError as ex:
raise ex
except Exception as ex:
raise ex
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.conn.close()
except Exception:
pass |
python/slalom/models.py | LukeZheZhu/slalom | 128 | 12645467 | import tensorflow as tf
import keras.backend as K
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.mobilenet import MobileNet
from python.slalom.resnet import ResNet50
from python.slalom.resnet2 import ResNet34 as ResNet2_34, ResNet50 as ResNet2_50, ResNet101 as ResNet2_101, \
ResNet152 as ResNet2_152
from keras.layers import Input
from python.slalom.utils import preprocess_vgg, print_model_size
from python.preprocessing.preprocessing_factory import get_preprocessing
from python.slalom.mobilenet_sep import MobileNet_sep
import numpy as np
import cv2
from keras.applications.resnet50 import preprocess_input
def preproc(img):
# Resize
img = img.astype(np.uint8)
# img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
height, width, _ = img.shape
new_height = height * 256 // min(img.shape[:2])
new_width = width * 256 // min(img.shape[:2])
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
# Crop
height, width, _ = img.shape
startx = width // 2 - (224 // 2)
starty = height // 2 - (224 // 2)
img = img[starty:starty + 224, startx:startx + 224]
assert img.shape[0] == 224 and img.shape[1] == 224, (img.shape, height, width)
return preprocess_input(img.astype(np.float32))
def preproc_tf(x, h=224, w=224):
img = tf.py_func(preproc, [x], [tf.float32])[0]
img.set_shape([h, w, 3])
return img
def get_model(model_name, batch_size, include_top=True, double_prec=False):
if model_name in ['vgg_16']:
h = w = 224
assert(h % 2**5 == 0)
num_classes = 1000
model_func = lambda weights, inp: VGG16(include_top=include_top, weights=weights, input_tensor=inp, input_shape=(h, w, 3), pooling=None, classes=num_classes)
preprocess_func = preprocess_vgg
bits_w = 8
bits_x = 0
elif model_name in ['vgg_19']:
h = w = 224
num_classes = 1000
model_func = lambda weights, inp: VGG19(include_top=include_top, weights=weights, input_tensor=inp, input_shape=(h, w, 3), pooling=None, classes=num_classes)
preprocess_func = preprocess_vgg
bits_w = 8
bits_x = 0
elif model_name in ['mobilenet']:
h = w = 224
num_classes = 1000
model_func = lambda weights, inp: MobileNet(include_top=include_top, weights=weights, input_tensor=inp, input_shape=(224, 224, 3), pooling=None, classes=num_classes)
preprocess_func = get_preprocessing('mobilenet_v1')
bits_w = 8
bits_x = 8
elif model_name in ['mobilenet_sep']:
h = w = 224
num_classes = 1000
model_func = lambda weights, inp: MobileNet_sep(include_top=include_top, input_tensor=inp, input_shape=(224, 224, 3), pooling=None, classes=num_classes)
preprocess_func = get_preprocessing('mobilenet_v1')
bits_w = 8
bits_x = 8
elif 'resnet' in model_name:
h = w = 224
num_classes = 1000
num_layers = int(model_name.split('_')[-1])
model_func = lambda weights, inp: ResNet50(include_top=include_top, input_tensor=inp,
input_shape=(224, 224, 3), pooling=None, classes=num_classes, layers=num_layers)
"""
if model_name == "resnet_34":
model_func = lambda weights, inp: ResNet2_34((224, 224, 3), classes=num_classes, input_tensor=inp)
elif model_name == "resnet_50":
model_func = lambda weights, inp: ResNet2_50((224, 224, 3), classes=num_classes, input_tensor=inp)
elif model_name == "resnet_101":
model_func = lambda weights, inp: ResNet2_101((224, 224, 3), classes=num_classes, input_tensor=inp)
elif model_name == "resnet_152":
model_func = lambda weights, inp: ResNet2_152((224, 224, 3), classes=num_classes, input_tensor=inp)
else:
raise AttributeError("unknown model {}".format(model_name))
"""
preprocess_func = lambda x, h_, w_: preproc_tf(x, h_, w_)
bits_w = 12
bits_x = 4
else:
raise AttributeError("unknown model {}".format(model_name))
images = tf.placeholder(dtype=tf.float32, shape=(batch_size, h, w, 3))
model = model_func("imagenet", images)
preprocess = lambda x: preprocess_func(x, h, w)
if double_prec:
images_dbl = tf.placeholder(dtype=tf.float64, shape=(batch_size, h, w, 3))
images_dbl = Input(images_dbl, (batch_size, h, w, 3), dtype=tf.float64)
K.set_floatx('float64')
model_dbl = model_func(None, images_dbl)
for i in range(len(model_dbl.layers)):
weights = model.layers[i].get_weights()
weights_dbl = [None if w is None else w.astype(np.float64) for w in weights]
model_dbl.layers[i].set_weights(weights_dbl)
model = model_dbl
preprocess = lambda x: preprocess_func(x, h, w, dtype=tf.float64)
K.set_floatx('float32')
print(model.summary())
print_model_size(model)
res = {}
res['preprocess'] = preprocess
res['bits_w'] = bits_w
res['bits_x'] = bits_x
return model, res
|
examples/security_demo.py | Michae1Weiss/spectree | 183 | 12645491 | <gh_stars>100-1000
from flask import Flask
from pydantic import BaseModel
from spectree import SecurityScheme, SpecTree
class Req(BaseModel):
name: str
security_schemes = [
SecurityScheme(
name="PartnerID",
data={"type": "apiKey", "name": "partner-id", "in": "header"},
),
SecurityScheme(
name="PartnerToken",
data={"type": "apiKey", "name": "partner-access-token", "in": "header"},
),
SecurityScheme(
name="test_secure",
data={
"type": "http",
"scheme": "bearer",
},
),
SecurityScheme(
name="auth_oauth2",
data={
"type": "oauth2",
"flows": {
"authorizationCode": {
"authorizationUrl": "https://example.com/oauth/authorize",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"read": "Grants read access",
"write": "Grants write access",
"admin": "Grants access to admin operations",
},
},
},
},
),
]
app = Flask(__name__)
api = SpecTree(
"flask",
security_schemes=security_schemes,
SECURITY={"test_secure": []},
)
@app.route("/ping", methods=["POST"])
@api.validate(
json=Req,
security=[{"PartnerID": [], "PartnerToken": []}, {"auth_oauth2": ["read"]}],
)
def ping():
return "pong"
@app.route("/")
def index():
return "hello"
if __name__ == "__main__":
api.register(app)
app.run(port=8000)
|
Python3/451.py | rakhi2001/ecom7 | 854 | 12645548 | __________________________________________________________________________________________________
sample 32 ms submission
class Solution:
def frequencySort(self, s: str) -> str:
a = set(s)
a = [a*s.count(a) for a in a]
a = sorted(a, key = lambda x: len(x), reverse = True)
return ''.join(a)
__________________________________________________________________________________________________
sample 13584 kb submission
class Solution:
def frequencySort(self, s: str) -> str:
freq = dict()
for c in s:
if c in freq:
freq[c]+=1
else:
freq[c]=1
freq_r = dict()
for k,v in freq.items():
if v in freq_r:
freq_r[v].append(k)
else:
freq_r[v]=[k]
freq_list = list(freq_r.keys())
freq_list.sort(reverse=True)
output = ""
for f in freq_list:
for c in freq_r[f]:
for i in range(f):
output+=c
return output
__________________________________________________________________________________________________
|
tensorflow_model_optimization/python/core/quantization/keras/layers/conv_batchnorm_test_utils.py | ptesan777/model-optimization | 848 | 12645568 | <filename>tensorflow_model_optimization/python/core/quantization/keras/layers/conv_batchnorm_test_utils.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utils for conv batchnorm folding."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_model_optimization.python.core.quantization.keras.layers import conv_batchnorm
keras = tf.keras
_ConvBatchNorm2D = conv_batchnorm._ConvBatchNorm2D # pylint: disable=protected-access
_DepthwiseConvBatchNorm2D = conv_batchnorm._DepthwiseConvBatchNorm2D # pylint: disable=protected-access
def _get_conv2d_params():
return {
'kernel_size': (3, 3),
'input_shape': (10, 10, 3),
'batch_size': 8,
}
def _get_initializer(random_init):
if random_init:
kernel_initializer = keras.initializers.glorot_uniform()
else:
kernel_initializer = keras.initializers.glorot_uniform(seed=0)
return kernel_initializer
class Conv2DModel(object):
"""Construct and access Conv + BatchNorm + activation models."""
params = {
'filters': 2,
'kernel_size': (2, 2),
'input_shape': (3, 3, 3),
'batch_size': 1,
}
@classmethod
def get_batched_input_shape(cls):
"""Return input shape with batch size."""
shape = [cls.params['batch_size']]
shape.extend(cls.params['input_shape'])
return shape
@classmethod
def get_output_shape(cls):
return [cls.params['batch_size'], 2, 2, 2]
@classmethod
def get_folded_batchnorm_model(cls,
is_quantized=False,
post_bn_activation=None):
"""Return folded Conv2D + BN + optional activation model."""
return tf.keras.Sequential([
_ConvBatchNorm2D(
kernel_initializer=_get_initializer(random_init=False),
is_quantized=is_quantized,
post_activation=post_bn_activation,
**cls.params)
])
@classmethod
def get_nonfolded_batchnorm_model(cls,
post_bn_activation=None,
model_type='sequential',
random_init=False):
"""Return nonfolded Conv2D + BN + optional activation model."""
if model_type == 'sequential':
layers = [
keras.layers.Conv2D(
kernel_initializer=_get_initializer(random_init),
use_bias=False,
**cls.params),
keras.layers.BatchNormalization(axis=-1),
]
if post_bn_activation is not None:
layers += post_bn_activation
return tf.keras.Sequential(layers)
else:
inp = keras.layers.Input(cls.params['input_shape'],
cls.params['batch_size'])
x = keras.layers.Conv2D(
cls.params['filters'],
cls.params['kernel_size'],
kernel_initializer=_get_initializer(random_init),
use_bias=False)(
inp)
out = keras.layers.BatchNormalization(axis=-1)(x)
if post_bn_activation is not None:
out = post_bn_activation(out)
return tf.keras.Model(inp, out)
class DepthwiseConv2DModel(Conv2DModel):
"""Construct and access DepthwiseConv + BatchNorm + activation models."""
params = {
'kernel_size': (3, 3),
'input_shape': (10, 10, 3),
'batch_size': 8,
}
@classmethod
def get_output_shape(cls):
return [cls.params['batch_size'], 8, 8, 3]
@classmethod
def get_folded_batchnorm_model(cls,
is_quantized=False,
post_bn_activation=None):
return tf.keras.Sequential([
_DepthwiseConvBatchNorm2D(
depthwise_initializer=_get_initializer(random_init=False),
is_quantized=is_quantized,
post_activation=post_bn_activation,
**cls.params)
])
@classmethod
def get_nonfolded_batchnorm_model(cls,
post_bn_activation=None,
model_type='sequential',
random_init=False):
if model_type == 'sequential':
layers = [
keras.layers.DepthwiseConv2D(
depthwise_initializer=_get_initializer(random_init),
use_bias=False,
**cls.params),
keras.layers.BatchNormalization(axis=-1),
]
if post_bn_activation is not None:
layers += post_bn_activation
return tf.keras.Sequential(layers)
else:
inp = keras.layers.Input(cls.params['input_shape'],
cls.params['batch_size'])
x = keras.layers.DepthwiseConv2D(
cls.params['kernel_size'],
depthwise_initializer=_get_initializer(random_init),
use_bias=False)(
inp)
out = keras.layers.BatchNormalization(axis=-1)(x)
if post_bn_activation is not None:
out = post_bn_activation(out)
return tf.keras.Model(inp, out)
|
runtime/java_heap.py | 5A59/Zvm | 485 | 12645582 | # coding=utf-8
class JavaHeap:
def __init__(self):
pass
|
contrib/tools/python/src/Lib/plat-aix3/IN.py | HeyLey/catboost | 6,989 | 12645586 | <reponame>HeyLey/catboost
# Generated by h2py from /usr/include/netinet/in.h
# Included from net/nh.h
# Included from sys/machine.h
LITTLE_ENDIAN = 1234
BIG_ENDIAN = 4321
PDP_ENDIAN = 3412
BYTE_ORDER = BIG_ENDIAN
DEFAULT_GPR = 0xDEADBEEF
MSR_EE = 0x8000
MSR_PR = 0x4000
MSR_FP = 0x2000
MSR_ME = 0x1000
MSR_FE = 0x0800
MSR_FE0 = 0x0800
MSR_SE = 0x0400
MSR_BE = 0x0200
MSR_IE = 0x0100
MSR_FE1 = 0x0100
MSR_AL = 0x0080
MSR_IP = 0x0040
MSR_IR = 0x0020
MSR_DR = 0x0010
MSR_PM = 0x0004
DEFAULT_MSR = (MSR_EE | MSR_ME | MSR_AL | MSR_IR | MSR_DR)
DEFAULT_USER_MSR = (DEFAULT_MSR | MSR_PR)
CR_LT = 0x80000000
CR_GT = 0x40000000
CR_EQ = 0x20000000
CR_SO = 0x10000000
CR_FX = 0x08000000
CR_FEX = 0x04000000
CR_VX = 0x02000000
CR_OX = 0x01000000
XER_SO = 0x80000000
XER_OV = 0x40000000
XER_CA = 0x20000000
def XER_COMP_BYTE(xer): return ((xer >> 8) & 0x000000FF)
def XER_LENGTH(xer): return (xer & 0x0000007F)
DSISR_IO = 0x80000000
DSISR_PFT = 0x40000000
DSISR_LOCK = 0x20000000
DSISR_FPIO = 0x10000000
DSISR_PROT = 0x08000000
DSISR_LOOP = 0x04000000
DSISR_DRST = 0x04000000
DSISR_ST = 0x02000000
DSISR_SEGB = 0x01000000
DSISR_DABR = 0x00400000
DSISR_EAR = 0x00100000
SRR_IS_PFT = 0x40000000
SRR_IS_ISPEC = 0x20000000
SRR_IS_IIO = 0x10000000
SRR_IS_PROT = 0x08000000
SRR_IS_LOOP = 0x04000000
SRR_PR_FPEN = 0x00100000
SRR_PR_INVAL = 0x00080000
SRR_PR_PRIV = 0x00040000
SRR_PR_TRAP = 0x00020000
SRR_PR_IMPRE = 0x00010000
def ntohl(x): return (x)
def ntohs(x): return (x)
def htonl(x): return (x)
def htons(x): return (x)
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_GGP = 3
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_LOCAL = 63
IPPROTO_EON = 80
IPPROTO_BIP = 0x53
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPORT_TIMESERVER = 37
def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((long)(i) & 0xe0000000) == 0xe0000000)
def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
INADDR_ANY = 0x00000000
INADDR_LOOPBACK = 0x7f000001
INADDR_BROADCAST = 0xffffffff
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
|
examples/jbnmr_examples/s2_s3_1d_plotting/plot_1d_pipe_time.py | genematx/nmrglue | 150 | 12645589 | <filename>examples/jbnmr_examples/s2_s3_1d_plotting/plot_1d_pipe_time.py<gh_stars>100-1000
import nmrglue as ng
import matplotlib.pyplot as plt
# read in the data from a NMRPipe file
dic, data = ng.pipe.read("test.fid")
# make a unit conversion object for the axis
uc = ng.pipe.make_uc(dic, data)
# plot the spectrum
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(uc.ms_scale(), data.real, 'k-')
# decorate axes
ax.set_yticklabels([])
ax.set_xlabel("Time (ms)")
ax.set_ylim(-100000, 100000)
# save the figure
fig.savefig("fid.png")
|
tests/conftest.py | Jyrno42/pytest-cookies | 134 | 12645595 | # -*- coding: utf-8 -*-
import collections
import json
import pytest
pytest_plugins = "pytester"
@pytest.fixture(name="cookiecutter_template")
def fixture_cookiecutter_template(tmpdir):
template = tmpdir.ensure("cookiecutter-template", dir=True)
template_config = collections.OrderedDict(
[("repo_name", "foobar"), ("short_description", "Test Project")]
)
template.join("cookiecutter.json").write(json.dumps(template_config))
template_readme = "\n".join(
[
"{{cookiecutter.repo_name}}",
"{% for _ in cookiecutter.repo_name %}={% endfor %}",
"{{cookiecutter.short_description}}",
]
)
repo = template.ensure("{{cookiecutter.repo_name}}", dir=True)
repo.join("README.rst").write(template_readme)
return template
|
mpunet/image/queue/base_queue.py | alexsosn/MultiPlanarUNet | 156 | 12645597 | <filename>mpunet/image/queue/base_queue.py
import numpy as np
from mpunet.logging.default_logger import ScreenLogger
class BaseQueue:
"""
The base queue object defines the Queue API and stores basic attributes
used across all queue objects
The BaseQueue should normally not be initialized directly.
"""
def __init__(self, dataset, logger=None):
"""
TODO
Args:
datasets:
logger:
"""
self.dataset = dataset
self.logger = logger or ScreenLogger()
@property
def all_loaded(self):
raise NotImplemented
def __len__(self):
return len(self.dataset)
def __iter__(self):
for i in range(len(self.dataset.images)):
yield self.get_image_by_idx(i)
def __getitem__(self, idx):
return self.get_image_by_idx(idx)
def get_image_iterator(self, max_load=None):
load_inds = np.arange(len(self))
if max_load and max_load < len(self):
load_inds = np.random.choice(load_inds, max_load, False)
for idx in load_inds:
yield self.get_image_by_idx(idx)
def get_random_image(self):
return np.random.choice(self.dataset.images, 1)[0]
def get_image_by_idx(self, image_idx):
return self.dataset.images[image_idx]
def get_image_by_id(self, image_id):
return self.dataset.id_to_image[image_id]
|
Testing/TestDICOMPython.py | mwestphal/vtk-dicom | 171 | 12645605 | #! /usr/bin/env python2
import sys
import vtk
try:
import vtkDICOM
except ImportError:
# for backwards compabilitity, before VTK 9
import vtkDICOMPython
vtkDICOM = vtkDICOMPython
# put everything into the vtk namespace
for a in dir(vtkDICOM):
if a[0] != '_':
setattr(vtk, a, getattr(vtkDICOM, a))
m = vtk.vtkDICOMMetaData()
if vtk.vtkVersion.GetVTKMajorVersion() < 6:
sys.stderr.write("This test requires VTK 6 or higher.\n");
sys.exit(0)
m.Set(vtk.vtkDICOMTag(0x0008, 0x0005), 'ISO_IR 100')
v = m.Get(vtk.vtkDICOMTag(0x0008, 0x0005))
if v.AsString() != 'ISO_IR 100':
sys.exit(1)
|
matchzoo/preprocessors/bert_preprocessor.py | baajur/MatchZoo | 2,209 | 12645607 | <reponame>baajur/MatchZoo<gh_stars>1000+
"""Bert Preprocessor."""
from tqdm import tqdm
from . import units
from .chain_transform import chain_transform
from matchzoo import DataPack
from matchzoo.engine.base_preprocessor import BasePreprocessor
from .build_vocab_unit import built_bert_vocab_unit
from .build_unit_from_data_pack import build_unit_from_data_pack
tqdm.pandas()
class BertPreprocessor(BasePreprocessor):
"""Bert-base Model preprocessor."""
def __init__(self, bert_vocab_path: str,
fixed_length_left: int = 30,
fixed_length_right: int = 30,
filter_mode: str = 'df',
filter_low_freq: float = 2,
filter_high_freq: float = float('inf'),
remove_stop_words: bool = False,
lower_case: bool = True,
chinese_version: bool = False,
):
"""
Bert-base Model preprocessor.
Example:
>>> import matchzoo as mz
>>> train_data = mz.datasets.toy.load_data()
>>> test_data = mz.datasets.toy.load_data(stage='test')
>>> # The argument 'bert_vocab_path' must feed the bert vocab path
>>> bert_preprocessor = mz.preprocessors.BertPreprocessor(
... bert_vocab_path=
... 'matchzoo/datasets/bert_resources/uncased_vocab_100.txt')
>>> train_data_processed = bert_preprocessor.fit_transform(
... train_data)
>>> test_data_processed = bert_preprocessor.transform(test_data)
"""
super().__init__()
self._fixed_length_left = fixed_length_left
self._fixed_length_right = fixed_length_right
self._bert_vocab_path = bert_vocab_path
self._left_fixedlength_unit = units.FixedLength(
self._fixed_length_left,
pad_mode='post'
)
self._right_fixedlength_unit = units.FixedLength(
self._fixed_length_right,
pad_mode='post'
)
self._filter_unit = units.FrequencyFilter(
low=filter_low_freq,
high=filter_high_freq,
mode=filter_mode
)
self._units = self._default_units()
self._vocab_unit = built_bert_vocab_unit(self._bert_vocab_path)
if chinese_version:
self._units.insert(1, units.ChineseTokenize())
if lower_case:
self._units.append(units.Lowercase())
self._units.append(units.StripAccent())
self._units.append(units.WordPieceTokenize(
self._vocab_unit.state['term_index']))
if remove_stop_words:
self._units.append(units.StopRemoval())
def fit(self, data_pack: DataPack, verbose: int = 1):
"""
Fit pre-processing context for transformation.
:param verbose: Verbosity.
:param data_pack: Data_pack to be preprocessed.
:return: class:`BertPreprocessor` instance.
"""
data_pack = data_pack.apply_on_text(chain_transform(self._units),
verbose=verbose)
fitted_filter_unit = build_unit_from_data_pack(self._filter_unit,
data_pack,
flatten=False,
mode='right',
verbose=verbose)
self._context['filter_unit'] = fitted_filter_unit
self._context['vocab_unit'] = self._vocab_unit
vocab_size = len(self._vocab_unit.state['term_index'])
self._context['vocab_size'] = vocab_size
self._context['embedding_input_dim'] = vocab_size
self._context['input_shapes'] = [(self._fixed_length_left,),
(self._fixed_length_right,)]
return self
def transform(self, data_pack: DataPack, verbose: int = 1) -> DataPack:
"""
Apply transformation on data, create fixed length representation.
:param data_pack: Inputs to be preprocessed.
:param verbose: Verbosity.
:return: Transformed data as :class:`DataPack` object.
"""
data_pack = data_pack.copy()
data_pack.apply_on_text(chain_transform(self._units), inplace=True,
verbose=verbose)
data_pack.apply_on_text(self._context['filter_unit'].transform,
mode='right', inplace=True, verbose=verbose)
data_pack.apply_on_text(self._context['vocab_unit'].transform,
mode='both', inplace=True, verbose=verbose)
data_pack.append_text_length(inplace=True, verbose=verbose)
data_pack.apply_on_text(self._left_fixedlength_unit.transform,
mode='left', inplace=True, verbose=verbose)
data_pack.apply_on_text(self._right_fixedlength_unit.transform,
mode='right', inplace=True, verbose=verbose)
max_len_left = self._fixed_length_left
max_len_right = self._fixed_length_right
data_pack.left['length_left'] = \
data_pack.left['length_left'].apply(
lambda val: min(val, max_len_left))
data_pack.right['length_right'] = \
data_pack.right['length_right'].apply(
lambda val: min(val, max_len_right))
return data_pack
@classmethod
def _default_units(cls) -> list:
"""Prepare needed process units."""
return [
units.BertClean(),
units.BasicTokenize()
]
|
src/assortment/config_assortment.py | AbhinavGopal/ts_tutorial | 290 | 12645639 | """Specify the jobs to run via config file.
Product assortment exeperiment (Figure 7.2).
"""
import collections
import functools
import numpy as np
from base.config_lib import Config
from base.experiment import ExperimentNoAction
from assortment.agent_assortment import TSAssortment, GreedyAssortment, EpsilonGreedyAssortment,AnnealingEpsilonGreedyAssortment
from assortment.env_assortment import ProductAssortment
def get_config():
"""Generates the config for the experiment."""
name = 'product_assortment'
num_products = 6
prior_mean = 0
prior_var_diagonal = 1
prior_var_off_diagonal = 0.2
noise_var = 0.04
profits = np.array([1/6]*6)
epsilon = 0.07
k = 9
agents = collections.OrderedDict(
[('TS',
functools.partial(TSAssortment,
num_products, prior_mean, prior_var_diagonal,prior_var_off_diagonal, noise_var, profits,epsilon,k)),
('greedy',
functools.partial(GreedyAssortment,
num_products, prior_mean, prior_var_diagonal,prior_var_off_diagonal, noise_var, profits,epsilon,k)),
(str(epsilon) + '-greedy',
functools.partial(EpsilonGreedyAssortment,
num_products, prior_mean, prior_var_diagonal,prior_var_off_diagonal, noise_var, profits,epsilon,k)),
(str(k)+'/('+str(k)+'+t)-greedy',
functools.partial(AnnealingEpsilonGreedyAssortment,
num_products, prior_mean, prior_var_diagonal,prior_var_off_diagonal, noise_var, profits,epsilon,k))]
)
environments = collections.OrderedDict(
[('env',
functools.partial(ProductAssortment,
num_products, prior_mean, prior_var_diagonal,prior_var_off_diagonal, noise_var, profits))]
)
experiments = collections.OrderedDict(
[(name, ExperimentNoAction)]
)
n_steps = 500
n_seeds = 20000
config = Config(name, agents, environments, experiments, n_steps, n_seeds)
return config
|
tests/integration/events/v1/test_event_type.py | BrimmingDev/twilio-python | 1,362 | 12645659 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class EventTypeTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.events.v1.event_types.list()
self.holodeck.assert_has_request(Request(
'get',
'https://events.twilio.com/v1/Types',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"types": [],
"meta": {
"page": 0,
"page_size": 10,
"first_page_url": "https://events.twilio.com/v1/Types?PageSize=10&Page=0",
"previous_page_url": null,
"url": "https://events.twilio.com/v1/Types?PageSize=10&Page=0",
"next_page_url": null,
"key": "types"
}
}
'''
))
actual = self.client.events.v1.event_types.list()
self.assertIsNotNone(actual)
def test_read_results_response(self):
self.holodeck.mock(Response(
200,
'''
{
"types": [
{
"date_created": "2020-08-13T13:28:20Z",
"date_updated": "2020-08-13T13:28:20Z",
"type": "com.twilio.messaging.message.delivered",
"schema_id": "Messaging.MessageStatus",
"public": true,
"description": "Messaging- delivered message",
"url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.delivered",
"links": {
"schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions"
}
},
{
"date_created": "2020-08-13T13:28:19Z",
"date_updated": "2020-08-13T13:28:19Z",
"type": "com.twilio.messaging.message.failed",
"schema_id": "Messaging.MessageStatus",
"public": true,
"description": "Messaging- failed message",
"url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.failed",
"links": {
"schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions"
}
}
],
"meta": {
"page": 0,
"page_size": 20,
"first_page_url": "https://events.twilio.com/v1/Types?PageSize=20&Page=0",
"previous_page_url": null,
"url": "https://events.twilio.com/v1/Types?PageSize=20&Page=0",
"next_page_url": null,
"key": "types"
}
}
'''
))
actual = self.client.events.v1.event_types.list()
self.assertIsNotNone(actual)
def test_read_results_with_schema_id_response(self):
self.holodeck.mock(Response(
200,
'''
{
"types": [
{
"date_created": "2020-08-13T13:28:20Z",
"date_updated": "2020-08-13T13:28:20Z",
"type": "com.twilio.messaging.message.delivered",
"schema_id": "Messaging.MessageStatus",
"public": true,
"description": "Messaging- delivered message",
"url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.delivered",
"links": {
"schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions"
}
},
{
"date_created": "2020-08-13T13:28:19Z",
"date_updated": "2020-08-13T13:28:19Z",
"type": "com.twilio.messaging.message.failed",
"schema_id": "Messaging.MessageStatus",
"public": true,
"description": "Messaging- failed message",
"url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.failed",
"links": {
"schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions"
}
}
],
"meta": {
"page": 0,
"page_size": 20,
"first_page_url": "https://events.twilio.com/v1/Types?SchemaId=Messaging.MessageStatus&PageSize=20&Page=0",
"previous_page_url": null,
"url": "https://events.twilio.com/v1/Types?SchemaId=Messaging.MessageStatus&PageSize=20&Page=0",
"next_page_url": null,
"key": "types"
}
}
'''
))
actual = self.client.events.v1.event_types.list()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.events.v1.event_types("type").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://events.twilio.com/v1/Types/type',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"date_created": "2020-08-13T13:28:20Z",
"date_updated": "2020-08-13T13:28:20Z",
"type": "com.twilio.messaging.message.delivered",
"schema_id": "Messaging.MessageStatus",
"public": true,
"description": "Messaging- delivered message",
"url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.delivered",
"links": {
"schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions"
}
}
'''
))
actual = self.client.events.v1.event_types("type").fetch()
self.assertIsNotNone(actual)
|
spikeinterface/extractors/neoextractors/mearec.py | khl02007/spikeinterface | 116 | 12645661 | from .neobaseextractor import NeoBaseRecordingExtractor, NeoBaseSortingExtractor
import probeinterface as pi
class MEArecRecordingExtractor(NeoBaseRecordingExtractor):
"""
Class for reading data from a MEArec simulated data.
Parameters
----------
file_path: str
locs_2d: bool
"""
mode = 'file'
NeoRawIOClass = 'MEArecRawIO'
def __init__(self, file_path, locs_2d=True):
neo_kwargs = {'filename': str(file_path)}
NeoBaseRecordingExtractor.__init__(self, **neo_kwargs)
probe = pi.read_mearec(file_path)
self.set_probe(probe, in_place=True)
self.annotate(is_filtered=True)
self._kwargs = {'file_path': str(file_path), 'locs_2d': locs_2d}
class MEArecSortingExtractor(NeoBaseSortingExtractor):
mode = 'file'
NeoRawIOClass = 'MEArecRawIO'
handle_spike_frame_directly = False
def __init__(self, file_path, use_natural_unit_ids=True):
neo_kwargs = {'filename': str(file_path)}
NeoBaseSortingExtractor.__init__(self,
sampling_frequency=None, # auto guess is correct here
use_natural_unit_ids=use_natural_unit_ids,
**neo_kwargs)
self._kwargs = {'file_path': str(file_path), 'use_natural_unit_ids': use_natural_unit_ids}
def read_mearec(file_path, locs_2d=True, use_natural_unit_ids=True):
"""
Parameters
----------
file_path: str or Path
Path to MEArec h5 file
locs_2d: bool
If True (default), locations are loaded in 2d. If False, 3d locations are loaded
use_natural_unit_ids: bool
If True, natural unit strings are loaded (e.g. #0. #1). If False, unit ids are in64
Returns
-------
recording: MEArecRecordingExtractor
The recording extractor object
sorting: MEArecSortingExtractor
The sorting extractor object
"""
recording = MEArecRecordingExtractor(file_path, locs_2d=locs_2d)
sorting = MEArecSortingExtractor(file_path, use_natural_unit_ids=use_natural_unit_ids)
return recording, sorting
|
recipes/Python/578191_Extract_Multiple_TAR_Files_/recipe-578191.py | tdiprima/code | 2,023 | 12645692 | <reponame>tdiprima/code<filename>recipes/Python/578191_Extract_Multiple_TAR_Files_/recipe-578191.py
import os
import sys
import tarfile
def main():
total = untar(sys.argv[1:])
if total:
args = total, total > 1 and 's were' or ' was'
sys.stdout.write('Report: %s file%s untared.' % args)
else:
filename = os.path.basename(sys.argv[0])
sys.stdout.write('Usage: %s <file_or_dir> ...' % filename)
def untar(paths):
total = 0
for path in paths:
if os.path.isdir(path):
try:
dir_list = os.listdir(path)
except:
pass
else:
total += untar(os.path.join(path, new) for new in dir_list)
elif os.path.isfile(path):
try:
tarfile.open(path).extractall(os.path.dirname(path))
except:
pass
else:
total += 1
return total
if __name__ == '__main__':
main()
|
samsung_problems/problem_1.py | loftwah/Daily-Coding-Problem | 129 | 12645709 | <gh_stars>100-1000
"""This problem was asked by Samsung.
A group of houses is connected to the main water plant by means of a set of pipes.
A house can either be connected by a set of pipes extending directly to the plant,
or indirectly by a pipe to a nearby house which is otherwise connected.
For example, here is a possible configuration, where A, B, and C are houses,
and arrows represent pipes:
A <--> B <--> C <--> plant
Each pipe has an associated cost, which the utility company would like to minimize.
Given an undirected graph of pipe connections, return the lowest cost configuration
of pipes such that each house has access to water.
In the following setup, for example, we can remove all but the pipes from plant to A,
plant to B, and B to C, for a total cost of 16.
pipes = {
'plant': {'A': 1, 'B': 5, 'C': 20},
'A': {'C': 15},
'B': {'C': 10},
'C': {}
}
""" |
romp/lib/dataset/preprocess/h36m_extract_frames.py | jjandnn/ROMP | 385 | 12645712 | <reponame>jjandnn/ROMP
import h5py
import sys
import os
import cv2
import numpy as np
import glob
import pickle
import sys
subject_list = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
cam_dict = {'54138969': 0, '55011271': 1, '58860488': 2, '60457274': 3}
def extract_imgs(subject_id, src_folder, out_folder):
video_files = glob.glob(os.path.join(src_folder, subject_id, 'Videos', '*.mp4'))
for video_file in video_files:
if "_ALL" in video_file:
continue
print("video_file", video_file)
video_name = os.path.basename(video_file)
action_name, cam_str, _ = video_name.split('.')
cam_id = cam_dict[cam_str]
target_name = os.path.join(out_folder,'{}_{}_{}'.format(subject_id, action_name, cam_id))
print("target_name ", target_name)
print("video_file", video_file)
cap = cv2.VideoCapture(video_file)
frame_dex = -1
dex = 0
frame_num = 0 #
while (1):
frame_dex = frame_dex + 1
ret, frame = cap.read()
if frame_dex % 5 != 0:
continue
if frame_dex == 0:
continue
if ret:
cv2.imwrite(target_name + '_' + str(dex) + '.jpg', frame)
print("target_name ", target_name + '_' + str(dex) + '.jpg')
dex = dex + 1
if dex > 20:
break
else:
print("video_file end", video_file)
break
cap.release()
return 1
def main():
assert len(sys.argv)==3, print('plese run the code : python h36m_extract_frames.py h36m_video_path image_save_path')
# set the path to image folder (archives) of human3.6M dataset here
src_folder = sys.argv[2] #"archives" # archives/S1/Videos/Directions 1.54138969.mp4 .....
out_folder = sys.argv[3] #"images"
os.makedirs(out_folder, exist_ok=True)
for subject_id in subject_list:
print('Processing {}'.format(subject_id))
extract_imgs(subject_id, src_folder, out_folder)
if __name__ == '__main__':
main()
|
venv/Lib/site-packages/win32/Demos/BackupSeek_streamheaders.py | ajayiagbebaku/NFL-Model | 150 | 12645724 | ## demonstrates using BackupSeek to enumerate data streams for a file
import win32file, win32api, win32con
from win32com import storagecon
import pythoncom, pywintypes
import struct, traceback
stream_types = {
win32con.BACKUP_DATA: "Standard data",
win32con.BACKUP_EA_DATA: "Extended attribute data",
win32con.BACKUP_SECURITY_DATA: "Security descriptor data",
win32con.BACKUP_ALTERNATE_DATA: "Alternative data streams",
win32con.BACKUP_LINK: "Hard link information",
win32con.BACKUP_PROPERTY_DATA: "Property data",
win32con.BACKUP_OBJECT_ID: "Objects identifiers",
win32con.BACKUP_REPARSE_DATA: "Reparse points",
win32con.BACKUP_SPARSE_BLOCK: "Sparse file",
}
tempdir = win32api.GetTempPath()
tempfile = win32api.GetTempFileName(tempdir, "bkr")[0]
print("Filename:", tempfile)
f = open(tempfile, "w")
f.write("some random junk" + "x" * 100)
f.close()
f = open(tempfile + ":streamdata", "w")
f.write("data written to alternate stream" + "y" * 100)
f.close()
f = open(tempfile + ":anotherstream", "w")
f.write("z" * 200)
f.close()
## add Summary Information, which is stored as a separate stream
m = storagecon.STGM_READWRITE | storagecon.STGM_SHARE_EXCLUSIVE | storagecon.STGM_DIRECT
pss = pythoncom.StgOpenStorageEx(
tempfile, m, storagecon.STGFMT_FILE, 0, pythoncom.IID_IPropertySetStorage, None
)
ps = pss.Create(
pythoncom.FMTID_SummaryInformation,
pythoncom.IID_IPropertyStorage,
0,
storagecon.STGM_READWRITE | storagecon.STGM_SHARE_EXCLUSIVE,
)
ps.WriteMultiple(
(storagecon.PIDSI_KEYWORDS, storagecon.PIDSI_COMMENTS), ("keywords", "comments")
)
ps = None
pss = None
sa = pywintypes.SECURITY_ATTRIBUTES()
sa.bInheritHandle = False
h = win32file.CreateFile(
tempfile,
win32con.GENERIC_ALL,
win32con.FILE_SHARE_READ,
sa,
win32con.OPEN_EXISTING,
win32file.FILE_FLAG_BACKUP_SEMANTICS,
None,
)
""" stream header:
typedef struct _WIN32_STREAM_ID {
DWORD dwStreamId; DWORD dwStreamAttributes; LARGE_INTEGER Size;
DWORD dwStreamNameSize; WCHAR cStreamName[ANYSIZE_ARRAY];
}
"""
win32_stream_id_format = "LLQL"
win32_stream_id_size = struct.calcsize(win32_stream_id_format)
def parse_stream_header(h, ctxt, data):
stream_type, stream_attributes, stream_size, stream_name_size = struct.unpack(
win32_stream_id_format, data
)
print(
"\nType:",
stream_type,
stream_types[stream_type],
"Attributes:",
stream_attributes,
"Size:",
stream_size,
"Name len:",
stream_name_size,
)
if stream_name_size > 0:
## ??? sdk says this size is in characters, but it appears to be number of bytes ???
bytes_read, stream_name_buf, ctxt = win32file.BackupRead(
h, stream_name_size, None, False, True, ctxt
)
stream_name = pywintypes.UnicodeFromRaw(stream_name_buf[:])
else:
stream_name = "Unnamed"
print("Name:" + stream_name)
return (
ctxt,
stream_type,
stream_attributes,
stream_size,
stream_name_size,
stream_name,
)
ctxt = 0
win32_stream_id_buf = (
None ## gets rebound to a writable buffer on first call and reused
)
while 1:
bytes_read, win32_stream_id_buf, ctxt = win32file.BackupRead(
h, win32_stream_id_size, win32_stream_id_buf, False, True, ctxt
)
if bytes_read == 0:
break
(
ctxt,
stream_type,
stream_attributes,
stream_size,
stream_name_size,
stream_name,
) = parse_stream_header(h, ctxt, win32_stream_id_buf[:])
if stream_size > 0:
bytes_moved = win32file.BackupSeek(h, stream_size, ctxt)
print("Moved: ", bytes_moved)
win32file.BackupRead(h, win32_stream_id_size, win32_stream_id_buf, True, True, ctxt)
win32file.CloseHandle(h)
|
sfaira/data/dataloaders/loaders/d10_1016_j_neuron_2019_06_011/human_brain_2019_dropseq_polioudakis_001.py | theislab/sfaira | 110 | 12645725 | import os
import pandas
import shutil
import zipfile
def load(data_dir, **kwargs):
age_dict = {
17: "17th week post-fertilization human stage",
18: "18th week post-fertilization human stage",
}
ct_dict = {
"End": "Endothelial",
"ExDp1": "Excitatory deep layer 1",
"ExDp2": "Excitatory deep layer 2",
"ExM": "Maturing excitatory",
"ExM-U": "Maturing excitatory upper enriched",
"ExN": "Migrating excitatory",
"IP": "IP",
"InCGE": "Interneuron CGE",
"InMGE": "Interneuron MGE",
"Mic": "Microglia",
"OPC": "OPC",
"Per": "Pericyte",
"PgG2M": "Cycling Progenitors (G2/M phase)",
"PgS": "Cycling Progenitors (S phase)",
"oRG": "Outer Radial Glia",
"vRG": "Ventricular Radial Glia",
}
import anndata2ri
from rpy2.robjects import r
fn = os.path.join(data_dir, "sc_dev_cortex_geschwind.zip")
fn_tmp = os.path.join(os.path.expanduser("~"), "sfaira_tmp")
if not os.path.exists(fn_tmp):
os.makedirs(fn_tmp)
with zipfile.ZipFile(fn, 'r') as zip_ref:
zip_ref.extractall(fn_tmp)
anndata2ri.activate() # TODO: remove global activation of anndata2ri and use localconverter once it's fixed
adata = r(
f"library(Seurat)\n"
f"load('{os.path.join(fn_tmp, 'raw_counts_mat.rdata')}')\n"
f"new_obj = CreateSeuratObject(raw_counts_mat)\n"
f"as.SingleCellExperiment(new_obj)\n"
)
obs = pandas.read_csv(os.path.join(fn_tmp, "cell_metadata.csv"), index_col=0)
adata = adata[obs.index.tolist()].copy()
adata.obs = obs
shutil.rmtree(fn_tmp)
adata.obs['devstage'] = [age_dict[i] for i in adata.obs['Gestation_week']]
adata.obs['celltype'] = [ct_dict[i] for i in adata.obs['Cluster']]
return adata
|
modelchimp/migrations/0023_machinelearningmodel_epoch_durations.py | akarsh3007/modelchimp | 134 | 12645800 | # Generated by Django 2.0.6 on 2018-07-02 14:50
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('modelchimp', '0022_auto_20180625_1722'),
]
operations = [
migrations.AddField(
model_name='machinelearningmodel',
name='epoch_durations',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
]
|
pyEX/studies/technicals/volume.py | sourcery-ai-bot/pyEX | 107 | 12645813 | <reponame>sourcery-ai-bot/pyEX
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
import pandas as pd
import talib as t
def ad(
client,
symbol,
range="6m",
highcol="high",
lowcol="low",
closecol="close",
volumecol="volume",
):
"""This will return a dataframe of Chaikin A/D Line for the given symbol across
the given range
Args:
client (pyEX.Client): Client
symbol (string): Ticker
range (string): range to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
volumecol (string): column to use to calculate
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, range)
ad = t.AD(
df[highcol].values.astype(float),
df[lowcol].values.astype(float),
df[closecol].values.astype(float),
df[volumecol].values.astype(float),
)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
volumecol: df[volumecol].values,
"a/d": ad,
}
)
def adosc(
client,
symbol,
range="6m",
highcol="high",
lowcol="low",
closecol="close",
volumecol="volume",
fastperiod=3,
slowperiod=10,
):
"""This will return a dataframe of Chaikin A/D Oscillator for the given symbol across
the given range
Args:
client (pyEX.Client): Client
symbol (string): Ticker
range (string): range to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
volumecol (string): column to use to calculate
fastperiod (int): fast period to calculate across
slowperiod (int): slow period to calculate across
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, range)
adosc = t.ADOSC(
df[highcol].values.astype(float),
df[lowcol].values.astype(float),
df[closecol].values.astype(float),
df[volumecol].values.astype(float),
fastperiod,
slowperiod,
)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
volumecol: df[volumecol].values,
"a/d": adosc,
}
)
def obv(client, symbol, range="6m", closecol="close", volumecol="volume"):
"""This will return a dataframe of On Balance Volume for the given symbol across
the given range
Args:
client (pyEX.Client): Client
symbol (string): Ticker
range (string): range to use, for pyEX.chart
closecol (string): column to use to calculate
volumecol (string): column to use to calculate
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, range)
obv = t.OBV(df[closecol].values.astype(float), df[volumecol].values.astype(float))
return pd.DataFrame(
{closecol: df[closecol].values, volumecol: df[volumecol].values, "obv": obv}
)
|
tests/test_namespaces.py | commarla/python-nomad | 109 | 12645818 | import tests.common as common
import pytest
import responses
# integration tests was mocked. If you have an enterprise nomad please uncomenet ##### ENTERPRISE TEST #####
@responses.activate
def test_get_namespaces(nomad_setup):
responses.add(
responses.GET,
"http://{ip}:{port}/v1/namespaces".format(ip=common.IP, port=common.NOMAD_PORT),
status=200,
json=[
{
"CreateIndex": 31,
"Description": "Production API Servers",
"ModifyIndex": 31,
"Name": "api-prod"
},
{
"CreateIndex": 5,
"Description": "Default shared namespace",
"ModifyIndex": 5,
"Name": "default"
}
]
)
assert isinstance(nomad_setup.namespaces.get_namespaces(), list) == True
@responses.activate
def test_get_namespaces_prefix(nomad_setup):
responses.add(
responses.GET,
"http://{ip}:{port}/v1/namespaces?prefix=api-".format(ip=common.IP, port=common.NOMAD_PORT),
status=200,
json=[
{
"CreateIndex": 31,
"Description": "Production API Servers",
"ModifyIndex": 31,
"Name": "api-prod"
},
]
)
assert isinstance(nomad_setup.namespaces.get_namespaces(prefix="api-"), list) == True
@responses.activate
def test_namespaces_iter(nomad_setup):
responses.add(
responses.GET,
"http://{ip}:{port}/v1/namespaces".format(ip=common.IP, port=common.NOMAD_PORT),
status=200,
json=[
{
"CreateIndex": 31,
"Description": "Production API Servers",
"ModifyIndex": 31,
"Name": "api-prod"
},
{
"CreateIndex": 5,
"Description": "Default shared namespace",
"ModifyIndex": 5,
"Name": "default"
}
]
)
assert "api-prod" in nomad_setup.namespaces
@responses.activate
def test_namespaces_len(nomad_setup):
responses.add(
responses.GET,
"http://{ip}:{port}/v1/namespaces".format(ip=common.IP, port=common.NOMAD_PORT),
status=200,
json=[
{
"CreateIndex": 31,
"Description": "Production API Servers",
"ModifyIndex": 31,
"Name": "api-prod"
},
{
"CreateIndex": 5,
"Description": "Default shared namespace",
"ModifyIndex": 5,
"Name": "default"
}
]
)
assert 2 == len(nomad_setup.namespaces)
###### ENTERPRISE TEST ###########
# def test_get_namespaces(nomad_setup):
# assert isinstance(nomad_setup.namespaces.get_namespaces(), list) == True
|
examples/issues/issue432_b.py | tgolsson/appJar | 666 | 12645828 | import sys
sys.path.append("../../")
from appJar import gui
def press(btn):
if btn == "ONE": app.selectFrame("stack", 0)
elif btn == "TWO": app.selectFrame("stack", 1)
with gui() as app:
with app.frameStack("stack"):
with app.frame():
app.label("In frame one")
with app.frame():
app.label("In frame two")
app.buttons(["ONE", "TWO"], press)
|
examples/digit-recognizer/__init__.py | sUeharaE4/mlcomp | 166 | 12645856 | <reponame>sUeharaE4/mlcomp
# flake8: noqa
from catalyst.dl import SupervisedRunner as Runner
from experiment import Experiment
from model import Net |
caliban/platform/notebook.py | Anon-Artist/caliban | 425 | 12645863 | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions required to interact with Docker to build and run images, shells
and notebooks in a Docker environment.
"""
from typing import List, Optional
from blessings import Terminal
import caliban.config as c
import caliban.docker.build as b
import caliban.platform.shell as ps
import caliban.util.fs as ufs
t = Terminal()
def run_notebook(job_mode: c.JobMode,
port: Optional[int] = None,
lab: Optional[bool] = None,
version: Optional[bool] = None,
run_args: Optional[List[str]] = None,
**run_interactive_kwargs) -> None:
"""Start a notebook in the current working directory; the process will run
inside of a Docker container that's identical to the environment available to
Cloud jobs that are submitted by `caliban cloud`, or local jobs run with
`caliban run.`
if you pass mount_home=True your jupyter settings will persist across calls.
Keyword args:
- port: the port to pass to Jupyter when it boots, useful if you have
multiple instances running on one machine.
- lab: if True, starts jupyter lab, else jupyter notebook.
- version: explicit Jupyter version to install.
run_interactive_kwargs are all extra arguments taken by run_interactive.
"""
if port is None:
port = ufs.next_free_port(8888)
if lab is None:
lab = False
if run_args is None:
run_args = []
inject_arg = b.NotebookInstall.lab if lab else b.NotebookInstall.jupyter
jupyter_cmd = "lab" if lab else "notebook"
jupyter_args = [
"-m", "jupyter", jupyter_cmd, \
"--ip=0.0.0.0", \
"--port={}".format(port), \
"--no-browser"
]
docker_args = ["-p", "{}:{}".format(port, port)] + run_args
ps.run_interactive(job_mode,
entrypoint="python",
entrypoint_args=jupyter_args,
run_args=docker_args,
inject_notebook=inject_arg,
jupyter_version=version,
**run_interactive_kwargs)
|
Validation/RecoTau/Tools/SteerMultipleCompare.py | Purva-Chaudhari/cmssw | 852 | 12645871 | <filename>Validation/RecoTau/Tools/SteerMultipleCompare.py
#! /usr/bin/env python3
from __future__ import print_function
import sys
import os
import re
from ROOT import *
import MultipleCompare as MultipleCompare
__author__ = "<NAME> (<EMAIL>)"
__doc__ = """Script to execute multiple plotting commands via MultipleCompare.py. Switch between massiveMode producing a set of plots comparing each one by one, and defaultMode producing a smaller set of default plot combinations by adding the commandline option massiveMode:\n\n
Usage: SteerMultipleCompare.py -T testFile -R refFile [options] [search strings that you want to apply '*' is supported as special character]
see MultiCompare.py for details
"""
def StripPath(name):
path = ''
plot = ''
matches = re.match(r'(.*)\/(.*)$', name)
if matches:
path = matches.group(1)
plot = matches.group(2)
return [path, plot]
def CreateDirectory(dir,addToExisting=False):
if os.path.exists(dir) and not addToExisting:
print("Output directory %s already exists! OK to overwrite?" % dir)
while True:
input = raw_input("Please enter [y/n] ")
if (input == 'y'):
break
elif (input == 'n'):
print(" ...exiting.")
sys.exit()
if not os.path.exists(dir):
os.makedirs(dir)
def CreateBaseDirectory(options):
if options.out == 'MultipleCompare.png' or options.out.find('.')!=-1:
#default case, so no directory was given
#or a filename was given
outputDirName = 'MultipleCompareOutput'
else:
outputDirName = options.out
outputDir = os.path.join(os.getcwd(), outputDirName)
CreateDirectory(outputDir)
return outputDir
def CreateSubDirectory(basedir, path):
outputDir = os.path.join(basedir, path)
CreateDirectory(outputDir,True)
def CleanArguments(argv, option):
#remove existing output arguments
while argv.count(option) > 0:
index = argv.index(option)
if index < len(argv)-1:
argv.pop(index+1)#drop the corresponding value
argv.pop(index)#drop the option itself
#execute Multicompare for each plot as a comparison one by one
#argv was modified to contain only one plot each
def plotOneByOne(argv, outputDir, histoList, histoSubNames, paths):
for hist, name, path in zip(histoList, histoSubNames, paths):
CreateSubDirectory(outputDir, path)
#now give modified arguments to MultipleCompare
tmpArgv = argv[:]
tmpArgv.append('-o')
tmpArgv.append(outputDir+'/'+path+'/'+name+'.png')
tmpArgv.append(hist)
MultipleCompare.main(tmpArgv)
def plotDefault(argv, outputDir, name, type, plots, addArgv=[]):
tmpArgv = argv[:]
tmpArgv.append('-o')
tmpArgv.append(outputDir+'/'+name+type)
tmpArgv.extend(addArgv)
tmpArgv.extend(plots)
MultipleCompare.main(tmpArgv)
#make some default plots grouping several histograms
def plotDefaults(argv, options, outputDir):
name = 'Validation_'
if options.testLabel != None:
name += options.testLabel+'_'
else:
name += options.test+'_vs_'
if options.refLabel != None:
name += options.refLabel+'_'
else:
name += options.ref+'_'
outputType = '.eps'
additionalArgv = []
if outputDir.find('QCD')!=-1:
additionalArgv.append('-f') #fakerate
plotDefault(argv, outputDir, name, 'LeptonRejectionEffphi'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*Rejection/*Effphi'], additionalArgv)
plotDefault(argv, outputDir, name, 'LeptonRejectionEffeta'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*Rejection/*Effeta'], additionalArgv)
plotDefault(argv, outputDir, name, 'LeptonRejectionEffpt'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*Rejection/*Effpt'], additionalArgv)
if outputDir.find('QCD')!=-1:
additionalArgv.append('--logScale')
plotDefault(argv, outputDir, name, 'Effphi'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*Effphi', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*Effphi'], additionalArgv)
plotDefault(argv, outputDir, name, 'Effeta'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*Effeta', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*Effeta'], additionalArgv)
plotDefault(argv, outputDir, name, 'Effpt'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*Effpt', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*Effpt'], additionalArgv)
plotDefault(argv, outputDir, name, 'pTRatio_allHadronic'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_pTRatio_allHadronic', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_pTRatio_allHadronic'])
plotDefault(argv, outputDir, name, 'pTRatio_oneProng1Pi0'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_pTRatio_oneProng1Pi0', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_pTRatio_oneProng1Pi0'])
plotDefault(argv, outputDir, name, 'pTRatio_threeProng0Pi0'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_pTRatio_threeProng0Pi0', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_pTRatio_threeProng0Pi0'])
plotDefault(argv, outputDir, name, 'Size_isolationPFChargedHadrCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_Size_isolationPFChargedHadrCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_Size_isolationPFChargedHadrCands'])
plotDefault(argv, outputDir, name, 'Size_isolationPFNeutrHadrCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_Size_isolationPFNeutrHadrCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_Size_isolationPFNeutrHadrCands'])
plotDefault(argv, outputDir, name, 'Size_isolationPFGammaCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_Size_isolationPFGammaCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_Size_isolationPFGammaCands'])
plotDefault(argv, outputDir, name, 'SumPt_isolationPFChargedHadrCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_SumPt_isolationPFChargedHadrCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_SumPt_isolationPFChargedHadrCands'])
plotDefault(argv, outputDir, name, 'SumPt_isolationPFNeutrHadrCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_SumPt_isolationPFNeutrHadrCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_SumPt_isolationPFNeutrHadrCands'])
plotDefault(argv, outputDir, name, 'SumPt_isolationPFGammaCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_SumPt_isolationPFGammaCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_SumPt_isolationPFGammaCands'])
def main(argv=None):
if argv is None:
argv = sys.argv
options, toPlot = MultipleCompare.LoadCommandlineOptions(argv)
gROOT.SetBatch()
testFile = TFile(options.test)
refFile = None
if options.ref != '':
refFile = TFile(options.ref)
plotList = []
MultipleCompare.MapDirStructure( testFile,'',plotList)
if len(plotList)<1:
print('\tError: Please specify at least one histogram. The following ones are available in the root file.')
print(plotList)
sys.exit()
histoList = []
histoSubNames = []
paths = []
massiveMode = False
for plot in toPlot:
#clean the arguments. toPlot contains the list of positional arguments leftover after parsing options
argv.remove(plot)
for path in plotList:
if MultipleCompare.Match(plot.lower(),path.lower()):
histoList.append(path)
strippedPath, strippedPlot = StripPath(path)
paths.append(strippedPath)
histoSubNames.append(strippedPlot)
#print histoSubNames[-1]
elif plot.find('massiveMode') != -1:
massiveMode = True
CleanArguments(argv,'--output')
CleanArguments(argv,'-o')
outputDir = CreateBaseDirectory(options)
if massiveMode:
print("Massive mode: scan all subdirs and make plots comparing each histogram one by one.")
plotOneByOne(argv, outputDir, histoList, histoSubNames, paths)
else:
print("Default mode: Make default plot combinations.")
plotDefaults(argv, options, outputDir)
#only execute main() if manually run
if __name__ == '__main__':
#main(*sys.argv[1:])
# the calls to sys.exit(n) inside main() all become return n.
sys.exit(main())
else:
print("This is ",__name__)
|
rl_agents/trainer/state_sampler.py | neskoc/rl-agents | 342 | 12645879 | <reponame>neskoc/rl-agents
from abc import abstractmethod
import numpy as np
class AbstractStateSampler(object):
@abstractmethod
def states_list(self):
"""
Get a list of relevant states from a problem state-space
:return: 2D array of vertically stacked state rows
"""
raise NotImplementedError()
@abstractmethod
def states_mesh(self):
"""
Get a 2D mesh of relevant states from a problem state-space
:return: a tuple (xx, yy, states)
xx and yy are vectors of the coordinates of the state in the chosen 2D-manifold of the state space
states is an array of vertically stacked state rows
"""
raise NotImplementedError()
class CartPoleStateSampler(AbstractStateSampler):
def __init__(self, resolution=15):
self.resolution = resolution
def states_mesh(self):
xx, yy = np.meshgrid(np.linspace(-1, 1, self.resolution), np.linspace(-1, 1, self.resolution))
xf = np.reshape(xx, (np.size(xx), 1))
yf = np.reshape(yy, (np.size(yy), 1))
states = np.hstack((2 * xf, 2 * xf, yf * 12 * np.pi / 180, yf))
return xx, yy, states
def states_list(self):
return np.array([[0, 0, 0, 0],
[-0.08936051, -0.37169457, 0.20398587, 1.03234316],
[0.10718797, 0.97770614, -0.20473761, -1.6631015]])
class MountainCarStateSampler(AbstractStateSampler):
def __init__(self, resolution=15):
self.resolution = resolution
def states_mesh(self):
xx, yy = np.meshgrid(np.linspace(-1, 1, self.resolution), np.linspace(-1, 1, self.resolution))
xf = np.reshape(xx, (np.size(xx), 1))
yf = np.reshape(yy, (np.size(yy), 1))
states = np.hstack((-0.35+0.85*xf, 0.06*yf))
return xx, yy, states
def states_list(self):
return np.array([[-0.5, 0], # Initial
[-1.2, 0], # Left side
[-0.5, 0.06], # Bottom with forward speed
[0.5, 0.04]]) # Goal
class ObstacleStateSampler(AbstractStateSampler):
def __init__(self, resolution=15):
self.resolution = resolution
def states_mesh(self):
xx, yy = np.meshgrid(np.linspace(-1, 1, self.resolution), np.linspace(-1, 1, self.resolution))
xf = np.reshape(xx, (np.size(xx), 1))
yf = np.reshape(yy, (np.size(yy), 1))
o = np.ones(np.shape(xf))
states = np.hstack((1/2+xf/2, 1/2+yf/2, 0*o, 1*o, 0.1+1/2-xf/2, o, o, o, 0.1+1/2-xf/2,
o, o, o, o, o, o, o, o, o, o, o))
return xx, yy, states
def states_list(self):
return np.array([[1., 0., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], # Far
[1., 0., 1., 0., 0.6, 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], #
[1., 0., 1., 0., 0.3, 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], #
[1., 0., 1., 0., 0.1, 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]) # Close
|
language/common/utils/export_utils_test.py | Xtuden-com/language | 1,199 | 12645916 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for export_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import absltest
from language.common.utils import export_utils
class ExportUtilsTest(absltest.TestCase):
def test_numeric_timestamp_and_trailing_slashes(self):
temp_dir = self.create_tempdir().full_path
os.makedirs(os.path.join(temp_dir, "export", "best", "2", "my_module"))
os.makedirs(os.path.join(temp_dir, "export", "best", "10", "my_module"))
result = export_utils.tfhub_export_path(temp_dir + "/", "best", "my_module")
self.assertEqual(
result, os.path.join(temp_dir, "export", "best", "10", "my_module"))
def test_cleanup_old_dirs(self):
temp_dir = self.create_tempdir().full_path
for i in range(7, 12):
os.makedirs(os.path.join(temp_dir, "export", "best", str(i), "my_module"))
export_utils.clean_tfhub_exports(temp_dir, "best", exports_to_keep=3)
dirnames = os.listdir(os.path.join(temp_dir, "export", "best"))
self.assertEqual(set(dirnames), {"9", "10", "11"})
if __name__ == "__main__":
absltest.main()
|
Hackerrank/Company Contests/Sears Finals/Problem E/test.py | VastoLorde95/Competitive-Programming | 170 | 12645935 | from math import *
from fractions import *
def solve(n):
ans = 0
for i in xrange(1, n+1):
if gcd(i,n) == 1:
ans += gcd(i-1, n)
return ans
for i in xrange(1,20):
s = 0
for j in xrange(1, i+1):
p = 1
t = j
while t > 0:
p *= t % 10
t /= 10
s += solve(p)
print i, s
|
tensorflow_graphics/rendering/tests/cpu_rasterization_backend_test.py | Liang813/graphics | 2,759 | 12645976 | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for the rasterization_backend."""
from tensorflow_graphics.rendering import rasterization_backend
from tensorflow_graphics.rendering.tests import rasterization_backend_test_base
from tensorflow_graphics.util import test_case
class CPURasterizationBackendTest(
rasterization_backend_test_base.RasterizationBackendTestBase):
def setUp(self):
super(CPURasterizationBackendTest, self).setUp()
self._backend = rasterization_backend.RasterizationBackends.CPU
if __name__ == '__main__':
test_case.main()
|
networks/sceneflow_field.py | google/dynamic-video-depth | 144 | 12646004 | <gh_stars>100-1000
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from .blocks import PeriodicEmbed, Conv2dBlock
class SceneFlowFieldNet(nn.Module):
def __init__(self, time_dependent=True, N_freq_xyz=0, N_freq_t=0, output_dim=3, net_width=32, n_layers=3, activation='lrelu', norm='none'):
super().__init__()
N_input_channel_xyz = 3 + 3 * 2 * N_freq_xyz
N_input_channel_t = 1 + 1 * 2 * N_freq_t
N_input_channel = N_input_channel_xyz + N_input_channel_t if time_dependent else N_input_channel_xyz
if N_freq_xyz == 0:
xyz_embed = nn.Identity()
else:
xyz_embed = PeriodicEmbed(max_freq=N_freq_xyz, N_freq=N_freq_xyz)
if N_freq_t == 0:
t_embed = nn.Identity()
else:
t_embed = PeriodicEmbed(max_freq=N_freq_t, N_freq=N_freq_t)
convs = [Conv2dBlock(N_input_channel, net_width, 1, 1, norm=norm, activation=activation)]
for i in range(n_layers):
convs.append(Conv2dBlock(net_width, net_width, 1, 1, norm=norm, activation=activation))
convs.append(Conv2dBlock(net_width, output_dim, 1, 1, norm='none', activation='none'))
self.convs = nn.Sequential(*convs)
self.t_embed = t_embed
self.xyz_embed = xyz_embed
self.time_dependent = time_dependent
def forward(self, x, t=None):
x = x.contiguous()
if t is None and self.time_dependent:
raise ValueError
xyz_embedded = self.xyz_embed(x)
if self.time_dependent:
t_embedded = self.t_embed(t)
input_feat = torch.cat([t_embedded, xyz_embedded], 1)
else:
input_feat = xyz_embedded
return self.convs(input_feat)
|
{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/api/resources/__init__.py | lieutdan13/cookiecutter-flask-restful | 719 | 12646058 | <filename>{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/api/resources/__init__.py
from {{cookiecutter.app_name}}.api.resources.user import UserResource, UserList
__all__ = ["UserResource", "UserList"]
|
tests/packerlicious/test_builder_vmware.py | gnewson/packerlicious | 109 | 12646066 | import pytest
import packerlicious.builder as builder
class TestVMwareIsoBuilder(object):
def test_required_fields_missing(self):
b = builder.VMwareIso()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'required' in str(excinfo.value)
def test_iso_checksum_mutually_exclusive(self):
b = builder.VMwareIso(
iso_url="/url/to/iso",
iso_checksum_type=builder.VirtualboxIso.MD5,
iso_checksum="my_checksum",
iso_checksum_url="my_checksum_url",
)
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'VMwareIso: only one of the following can be specified: iso_checksum, iso_checksum_url' == str(
excinfo.value)
class TestVMwareVmxBuilder(object):
def test_required_fields_missing(self):
b = builder.VMwareVmx()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'required' in str(excinfo.value)
|
examples/imdb/prepare.py | rsketine/neon | 4,415 | 12646078 | # ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from future import standard_library
standard_library.install_aliases() # triggers E402, hence noqa below
import h5py # noqa
from collections import defaultdict # noqa
import numpy as np # noqa
import os # noqa
from neon import logger as neon_logger # noqa
from neon.data.text_preprocessing import clean_string # noqa
from neon.util.compat import pickle # noqa
def build_data_train(path='.', filepath='labeledTrainData.tsv', vocab_file=None,
vocab=None, skip_headers=True, train_ratio=0.8):
"""
Loads the data file and spits out a h5 file with record of
{y, review_text, review_int}
Typically two passes over the data.
1st pass is for vocab and pre-processing. (WARNING: to get phrases, we need to go
though multiple passes). 2nd pass is converting text into integers. We will deal with integers
from thereafter.
WARNING: we use h5 just as proof of concept for handling large datasets
Datasets may fit entirely in memory as numpy as array
"""
fname_h5 = filepath + '.h5'
if vocab_file is None:
fname_vocab = filepath + '.vocab'
else:
fname_vocab = vocab_file
if not os.path.exists(fname_h5) or not os.path.exists(fname_vocab):
# create the h5 store - NOTE: hdf5 is row-oriented store and we slice rows
# reviews_text holds the metadata and processed text file
# reviews_int holds the ratings, ints
h5f = h5py.File(fname_h5, 'w')
shape, maxshape = (2 ** 16,), (None, )
dt = np.dtype([('y', np.uint8),
('split', np.bool),
('num_words', np.uint16),
# WARNING: vlen=bytes in python 3
('text', h5py.special_dtype(vlen=str))
])
reviews_text = h5f.create_dataset('reviews', shape=shape, maxshape=maxshape,
dtype=dt, compression='gzip')
reviews_train = h5f.create_dataset(
'train', shape=shape, maxshape=maxshape,
dtype=h5py.special_dtype(vlen=np.int32), compression='gzip')
reviews_valid = h5f.create_dataset(
'valid', shape=shape, maxshape=maxshape,
dtype=h5py.special_dtype(vlen=np.int32), compression='gzip')
wdata = np.zeros((1, ), dtype=dt)
# init vocab only for train data
build_vocab = False
if vocab is None:
vocab = defaultdict(int)
build_vocab = True
nsamples = 0
# open the file, skip the headers if needed
f = open(filepath, 'r')
if skip_headers:
f.readline()
for i, line in enumerate(f):
_, rating, review = line.strip().split('\t')
# clean the review
review = clean_string(review)
review_words = review.strip().split()
num_words = len(review_words)
split = int(np.random.rand() < train_ratio)
# create record
wdata['y'] = int(float(rating))
wdata['text'] = review
wdata['num_words'] = num_words
wdata['split'] = split
reviews_text[i] = wdata
# update the vocab if needed
if build_vocab:
for word in review_words:
vocab[word] += 1
nsamples += 1
# histogram of class labels, sentence length
ratings, counts = np.unique(
reviews_text['y'][:nsamples], return_counts=True)
sen_len, sen_len_counts = np.unique(
reviews_text['num_words'][:nsamples], return_counts=True)
vocab_size = len(vocab)
nclass = len(ratings)
reviews_text.attrs['vocab_size'] = vocab_size
reviews_text.attrs['nrows'] = nsamples
reviews_text.attrs['nclass'] = nclass
reviews_text.attrs['class_distribution'] = counts
neon_logger.display("vocabulary size - {}".format(vocab_size))
neon_logger.display("# of samples - {}".format(nsamples))
neon_logger.display("# of classes {}".format(nclass))
neon_logger.display("class distribution - {} {}".format(ratings, counts))
sen_counts = list(zip(sen_len, sen_len_counts))
sen_counts = sorted(sen_counts, key=lambda kv: kv[1], reverse=True)
neon_logger.display("sentence length - {} {} {}".format(len(sen_len),
sen_len, sen_len_counts))
# WARNING: assume vocab is of order ~4-5 million words.
# sort the vocab , re-assign ids by its frequency. Useful for downstream tasks
# only done for train data
if build_vocab:
vocab_sorted = sorted(
list(vocab.items()), key=lambda kv: kv[1], reverse=True)
vocab = {}
for i, t in enumerate(list(zip(*vocab_sorted))[0]):
vocab[t] = i
# map text to integers
ntrain = 0
nvalid = 0
for i in range(nsamples):
text = reviews_text[i]['text']
y = int(reviews_text[i]['y'])
split = reviews_text[i]['split']
text_int = [y] + [vocab[t] for t in text.strip().split()]
if split:
reviews_train[ntrain] = text_int
ntrain += 1
else:
reviews_valid[nvalid] = text_int
nvalid += 1
reviews_text.attrs['ntrain'] = ntrain
reviews_text.attrs['nvalid'] = nvalid
neon_logger.display(
"# of train - {0}, # of valid - {1}".format(reviews_text.attrs['ntrain'],
reviews_text.attrs['nvalid']))
# close open files
h5f.close()
f.close()
if not os.path.exists(fname_vocab):
rev_vocab = {}
for wrd, wrd_id in vocab.items():
rev_vocab[wrd_id] = wrd
neon_logger.display("vocabulary from IMDB dataset is saved into {}".format(fname_vocab))
pickle.dump((vocab, rev_vocab), open(fname_vocab, 'wb'), 2)
return fname_h5, fname_vocab
|
datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/tester_selector/cache.py | mchelen-gov/integrations-core | 663 | 12646088 | <filename>datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/tester_selector/cache.py
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
import os
from datetime import datetime
from typing import Optional
from ....console import echo_info
class Cache:
"""
Cache data that is expensive to compute. Use JSON format.
"""
def __init__(self, app_dir: str, cache_name: str, expiration: datetime):
cache_path = os.path.join(app_dir, '.cache')
self.__path = os.path.join(cache_path, cache_name)
try:
os.mkdir(cache_path)
except FileExistsError:
pass
try:
creation_time = datetime.utcfromtimestamp(os.path.getctime(self.__path))
if creation_time < expiration:
echo_info(f'Cache expired. Removing cache {self.__path}')
os.remove(self.__path)
except OSError:
# file does not exist
pass
def get_value(self) -> Optional[object]:
try:
with open(self.__path) as f:
echo_info(f'Load from {self.__path}')
value_json = f.read()
return json.loads(value_json)
except FileNotFoundError:
return None
except Exception as e:
raise Exception(f'Invalid cache object in {self.__path} {type(e)}') from e
def set_value(self, value: object):
value_json = json.dumps(value)
stat = None
try:
stat = os.stat(self.__path)
except FileNotFoundError:
pass
with open(self.__path, 'w') as f:
f.write(value_json)
if stat:
# restore file dates for cache expiration
os.utime(self.__path, (stat.st_atime, stat.st_mtime))
|
tests/changes/api/test_cached_snapshot_cluster_details.py | vault-the/changes | 443 | 12646099 | <filename>tests/changes/api/test_cached_snapshot_cluster_details.py
from changes.testutils import APITestCase
import datetime
import mock
class CachedSnapshotClusterDetailsAPITestCase(APITestCase):
def setUp(self):
super(CachedSnapshotClusterDetailsAPITestCase, self).setUp()
self.mock_datetime = datetime.datetime.utcnow()
def get_endpoint_path(self, cluster):
return '/api/0/snapshots/cache/clusters/{0}/'.format(cluster)
def test_empty(self):
resp = self.client.get(self.get_endpoint_path('cluster'))
assert resp.status_code == 200
data = self.unserialize(resp)
assert data == []
@mock.patch('changes.lib.snapshot_garbage_collection.get_current_datetime')
def test_get_current_datetime(self, get_current_datetime):
"""Metatest that verifies that the time-mock is functional.
"""
get_current_datetime.return_value = self.mock_datetime
self.client.get(self.get_endpoint_path('cluster'))
get_current_datetime.assert_any_call()
@mock.patch('changes.lib.snapshot_garbage_collection.get_current_datetime')
def test_multiproject(self, get_current_datetime):
"""
Integration test (minus mocking out time) on the endpoint, which is
different from the lib-tests which mock out get_plans in the garbage
collector.
"""
project1 = self.create_project()
project2 = self.create_project()
plan1_1 = self.create_plan(project1)
plan1_2 = self.create_plan(project1)
plan2_1 = self.create_plan(project2)
plan2_2 = self.create_plan(project2)
plan2_3 = self.create_plan(project2)
self.create_step(plan1_1, data={'cluster': 'cluster1'})
self.create_step(plan1_2, data={'cluster': 'cluster2'})
self.create_step(plan2_1, data={'cluster': 'cluster2'})
self.create_step(plan2_2, data={'cluster': 'cluster2'})
snapshot1 = self.create_snapshot(project1)
snapshot2 = self.create_snapshot(project2)
snapshot_image1_1 = self.create_snapshot_image(snapshot1, plan1_1)
snapshot_image1_2 = self.create_snapshot_image(snapshot1, plan1_2)
snapshot_image2_1 = self.create_snapshot_image(snapshot2, plan2_1)
snapshot_image2_2 = self.create_snapshot_image(snapshot2, plan2_2)
snapshot_image2_3 = self.create_snapshot_image(snapshot2, plan2_3)
self.create_cached_snapshot_image(snapshot_image1_1)
self.create_cached_snapshot_image(snapshot_image1_2,
expiration_date=self.mock_datetime + datetime.timedelta(0, 1))
self.create_cached_snapshot_image(snapshot_image2_1,
expiration_date=self.mock_datetime - datetime.timedelta(0, 1))
self.create_cached_snapshot_image(snapshot_image2_2)
self.create_cached_snapshot_image(snapshot_image2_3)
get_current_datetime.return_value = self.mock_datetime
resp = self.client.get(self.get_endpoint_path('cluster2'))
assert resp.status_code == 200
data = self.unserialize(resp)
assert len(data) == 2
assert snapshot_image1_2.id.hex in data
assert snapshot_image2_2.id.hex in data
# Ensure that nonexisting clusters still give empty even when there
# is actually some data (unlike test_empty)
resp = self.client.get(self.get_endpoint_path('cluster3'))
assert resp.status_code == 200
data = self.unserialize(resp)
assert data == []
|
deepneuro/outputs/segmentation.py | ysuter/DeepNeuro | 113 | 12646136 | <reponame>ysuter/DeepNeuro
import numpy as np
from deepneuro.outputs.inference import ModelInference
from deepneuro.utilities.util import add_parameter, docker_print
class PatchesInference(ModelInference):
"""
"""
def load(self, kwargs):
""" Parameters
----------
patch_overlaps: int, optional
The amount of times a grid of patches is predicted over an entire
output volume. Subsequent grids are offset from the original grid
by patch_size / patch_overlaps, and the final output is the result
of averaging over each grid for each voxel. Default is 1.
input_patch_shape: tuple, optional
The input dimensions of the predicted patches, not including batch
size. If None, DeepNeuro will attempt to extract this value from the
given model. Default is None.
output_patch_shape: tuple, optional
The output dimensions of the predicted patches, not including batch
size. If smaller than the input patch size in any dimension, patches
will be cropped symmetrically by the difference in size to meet this
shape. Default is None.
check_empty_patch: bool, optional
Do not predict patches if they only contain zeros. Default is True.
pad_borders: bool, optional
Pads input borders by patch_size / 2 with zeros. This allows patches
at the boundary of an image to be successfully predicted, albeit with
zero infill values. Default is True.
patch_dimensions: tuple or list, optional
output_patch_dimensions: tuple or list, optional
"""
super(PatchesInference, self).load(kwargs)
# Patching Parameters
add_parameter(self, kwargs, 'patch_overlaps', 1)
add_parameter(self, kwargs, 'input_patch_shape', None)
add_parameter(self, kwargs, 'output_patch_shape', None)
add_parameter(self, kwargs, 'check_empty_patch', True)
add_parameter(self, kwargs, 'pad_borders', True)
add_parameter(self, kwargs, 'keep_channels', None)
add_parameter(self, kwargs, 'patch_dimensions', None)
add_parameter(self, kwargs, 'output_patch_dimensions', self.patch_dimensions)
self.batch_size = 1
def process_case(self, input_data, model=None):
"""Summary
Parameters
----------
input_data : TYPE
Description
model : None, optional
Description
Returns
-------
TYPE
Description
"""
input_data = input_data[self.lead_key]
if model is not None:
self.model = model
if self.channels_first:
input_data = np.swapaxes(input_data, 1, -1)
if self.input_channels is not None:
input_data = np.take(input_data, self.input_channels, self.channels_dim)
# Determine patch shape. Currently only extends to spatial patching.
# This leading dims business has got to have a better solution..
if self.input_patch_shape is None:
self.input_patch_shape = self.model.model_input_shape
if self.output_patch_shape is None:
self.output_patch_shape = self.model.model_output_shape
self.input_dim = len(self.input_patch_shape) - 2
if self.patch_dimensions is None:
if self.channels_first:
self.patch_dimensions = [-1 * self.input_dim + x for x in range(self.input_dim)]
else:
self.patch_dimensions = [-1 * self.input_dim + x - 1 for x in range(self.input_dim)]
if self.output_patch_dimensions is None:
self.output_patch_dimensions = self.patch_dimensions
self.output_shape = [1] + list(self.model.model_output_shape)[1:] # Weird
for i in range(len(self.patch_dimensions)):
self.output_shape[self.output_patch_dimensions[i]] = input_data.shape[self.patch_dimensions[i]]
output_data = self.predict(input_data)
if self.output_channels is not None:
output_data = np.take(output_data, self.output_channels, self.channels_dim)
# Will fail for time-data.
if self.channels_first:
output_data = np.swapaxes(output_data, 1, -1)
self.return_objects.append(output_data)
return output_data
def predict(self, input_data):
repetition_offsets = [np.linspace(0, self.input_patch_shape[axis] - 1, self.patch_overlaps + 1, dtype=int)[:-1] for axis in self.patch_dimensions]
if self.pad_borders:
# TODO -- Clean up this border-padding code and make it more readable.
input_pad_dimensions = [(0, 0)] * input_data.ndim
repatched_shape = self.output_shape
new_input_shape = list(input_data.shape)
for idx, dim in enumerate(self.patch_dimensions):
# Might not work for odd-shaped patches; check.
input_pad_dimensions[dim] = (int(self.input_patch_shape[dim] // 2), int(self.input_patch_shape[dim] // 2))
new_input_shape[dim] += self.input_patch_shape[dim]
for idx, dim in enumerate(self.output_patch_dimensions):
repatched_shape[dim] += self.input_patch_shape[dim]
padded_input_data = np.zeros(new_input_shape)
if self.channels_first:
input_slice = [slice(None)] * 2 + [slice(self.input_patch_shape[dim] // 2, -self.input_patch_shape[dim] // 2, None) for dim in self.patch_dimensions]
else:
input_slice = [slice(None)] + [slice(self.input_patch_shape[dim] // 2, -self.input_patch_shape[dim] // 2, None) for dim in self.patch_dimensions] + [slice(None)]
padded_input_data[tuple(input_slice)] = input_data
input_data = padded_input_data
else:
repatched_shape = self.output_shape
repatched_image = np.zeros(repatched_shape)
corner_data_dims = [input_data.shape[axis] for axis in self.patch_dimensions]
corner_patch_dims = [self.output_patch_shape[axis] for axis in self.patch_dimensions]
all_corners = np.indices(corner_data_dims)
# There must be a better way to round up to an integer..
possible_corners_slice = [slice(None)] + [slice(self.input_patch_shape[dim] // 2, -self.input_patch_shape[dim] // 2, None) for dim in self.patch_dimensions]
all_corners = all_corners[tuple(possible_corners_slice)]
for rep_idx in range(self.patch_overlaps):
if self.verbose:
docker_print('Predicting patch set', str(rep_idx + 1) + '/' + str(self.patch_overlaps) + '...')
corners_grid_shape = [slice(None)]
for dim in range(all_corners.ndim - 1):
corners_grid_shape += [slice(repetition_offsets[dim][rep_idx], corner_data_dims[dim], corner_patch_dims[dim])]
corners_list = all_corners[tuple(corners_grid_shape)]
corners_list = np.reshape(corners_list, (corners_list.shape[0], -1)).T
if self.check_empty_patch:
corners_list = self.remove_empty_patches(input_data, corners_list)
for corner_list_idx in range(0, corners_list.shape[0], self.batch_size):
corner_batch = corners_list[corner_list_idx:corner_list_idx + self.batch_size]
input_patches = self.grab_patch(input_data, corner_batch)
prediction = self.run_inference(input_patches)
self.insert_patch(repatched_image, prediction, corner_batch)
if rep_idx == 0:
output_data = np.copy(repatched_image)
else:
output_data = self.aggregate_predictions(output_data, repatched_image, rep_idx)
if self.pad_borders:
output_slice = [slice(None)] * output_data.ndim # Weird
for idx, dim in enumerate(self.output_patch_dimensions):
# Might not work for odd-shaped patches; check.
output_slice[dim] = slice(self.input_patch_shape[dim] // 2, -self.input_patch_shape[dim] // 2, 1)
output_data = output_data[tuple(output_slice)]
if self.keep_channels is not None:
output_data = np.take(output_data, self.keep_channels, axis=-1)
return output_data
def run_inference(self, data):
return self.model.predict(data)
def aggregate_predictions(self, output_data, repatched_image, rep_idx):
output_data = output_data + (1.0 / (rep_idx)) * (repatched_image - output_data) # Running Average
return output_data
def pad_data(self, data, pad_dimensions):
# Maybe more effecient than np.pad? Created for testing a different purpose.
for idx, width in enumerate(pad_dimensions):
pad_block_1, pad_block_2 = list(data.shape), list(data.shape)
pad_block_1[idx] = width[0]
pad_block_2[idx] = width[1]
data = np.concatenate((np.zeros(pad_block_1), data, np.zeros(pad_block_2)), axis=idx)
return data
def remove_empty_patches(self, input_data, corners_list):
corner_selections = []
for corner_idx, corner in enumerate(corners_list):
output_slice = [slice(None)] * input_data.ndim # Weird
for idx, dim in enumerate(self.patch_dimensions):
output_slice[dim] = slice(corner[idx] - self.input_patch_shape[dim] // 2, corner[idx] + self.input_patch_shape[dim] // 2, 1)
corner_selections += [np.any(input_data[tuple(output_slice)])]
return corners_list[corner_selections]
def grab_patch(self, input_data, corner_list):
""" Given a corner coordinate, a patch_shape, and some input_data, returns a patch or array of patches.
"""
output_patches_shape = (corner_list.shape[0], ) + self.input_patch_shape[1:]
output_patches = np.zeros((output_patches_shape))
for corner_idx, corner in enumerate(corner_list):
output_slice = [slice(None)] * input_data.ndim # Weird
for idx, dim in enumerate(self.patch_dimensions):
output_slice[dim] = slice(corner[idx] - self.input_patch_shape[dim] // 2, corner[idx] + self.input_patch_shape[dim] // 2, 1)
output_patches[corner_idx, ...] = input_data[tuple(output_slice)]
return output_patches
def insert_patch(self, input_data, patches, corner_list):
# Some ineffeciencies in the function. TODO: come back and rewrite.
for corner_idx, corner in enumerate(corner_list):
insert_slice = [slice(None)] * input_data.ndim # Weird
for idx, dim in enumerate(self.output_patch_dimensions):
# Might not work for odd-shaped patches; check.
insert_slice[dim] = slice(corner[idx] - self.output_patch_shape[dim] // 2, corner[idx] + self.output_patch_shape[dim] // 2, 1)
insert_patch = patches[corner_idx, ...]
if not np.array_equal(np.take(self.output_patch_shape, self.output_patch_dimensions), np.take(self.input_patch_shape, self.patch_dimensions)): # Necessary if statement?
patch_slice = [slice(None)] * insert_patch.ndim # Weird
for idx, dim in enumerate(self.output_patch_dimensions):
# Might not work for odd-shaped patches; check.
patch_slice[dim] = slice((self.input_patch_shape[dim] - self.output_patch_shape[dim]) // 2, -(self.input_patch_shape[dim] - self.output_patch_shape[dim]) // 2, 1)
insert_patch = insert_patch[tuple(patch_slice)]
input_data[tuple(insert_slice)] = insert_patch
return input_data |
topologies/models.py | cklewar/wistar | 152 | 12646153 | #
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
#
# Copyright (c) 2015 Juniper Networks, Inc.
# All rights reserved.
#
# Use is subject to license terms.
#
# Licensed under the Apache License, Version 2.0 (the ?License?); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.db import models
class Topology(models.Model):
description = models.TextField(default="none", verbose_name="Description")
name = models.TextField(default="noname", verbose_name="name")
json = models.TextField(verbose_name="json")
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(verbose_name="modified", auto_now=True)
class Meta:
verbose_name = 'Topology'
verbose_name_plural = 'topologies'
class ConfigSet(models.Model):
topology = models.ForeignKey('Topology')
name = models.TextField()
description = models.TextField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'ConfigSet'
verbose_name_plural = 'configSets'
class Config(models.Model):
configSet = models.ForeignKey('ConfigSet')
name = models.TextField()
type = models.TextField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
ip = models.GenericIPAddressField()
deviceConfig = models.TextField()
password = models.TextField()
class Meta:
verbose_name = 'Config'
verbose_name_plural = 'configs'
|
tomviz/python/Recon_tomopy_gridrec.py | sankhesh/tomviz | 284 | 12646167 | <gh_stars>100-1000
def transform(dataset, rot_center=0, tune_rot_center=True):
"""Reconstruct sinograms using the tomopy gridrec algorithm
Typically, a data exchange file would be loaded for this
reconstruction. This operation will attempt to perform
flat-field correction of the raw data using the dark and
white background data found in the data exchange file.
This operator also requires either the tomviz/tomopy-pipeline
docker image, or a python environment with tomopy installed.
"""
import numpy as np
import tomopy
# Get the current volume as a numpy array.
array = dataset.active_scalars
dark = dataset.dark
white = dataset.white
angles = dataset.tilt_angles
tilt_axis = dataset.tilt_axis
# TomoPy wants the tilt axis to be zero, so ensure that is true
if tilt_axis == 2:
order = [2, 1, 0]
array = np.transpose(array, order)
if dark is not None and white is not None:
dark = np.transpose(dark, order)
white = np.transpose(white, order)
if angles is not None:
# tomopy wants radians
theta = np.radians(angles)
else:
# Assume it is equally spaced between 0 and 180 degrees
theta = tomopy.angles(array.shape[0])
# Perform flat-field correction of raw data
if white is not None and dark is not None:
array = tomopy.normalize(array, white, dark, cutoff=1.4)
if rot_center == 0:
# Try to find it automatically
init = array.shape[2] / 2.0
rot_center = tomopy.find_center(array, theta, init=init, ind=0,
tol=0.5)
elif tune_rot_center:
# Tune the center
rot_center = tomopy.find_center(array, theta, init=rot_center, ind=0,
tol=0.5)
# Calculate -log(array)
array = tomopy.minus_log(array)
# Remove nan, neg, and inf values
array = tomopy.remove_nan(array, val=0.0)
array = tomopy.remove_neg(array, val=0.00)
array[np.where(array == np.inf)] = 0.00
# Perform the reconstruction
array = tomopy.recon(array, theta, center=rot_center, algorithm='gridrec')
# Mask each reconstructed slice with a circle.
array = tomopy.circ_mask(array, axis=0, ratio=0.95)
# Set the transformed array
child = dataset.create_child_dataset()
child.active_scalars = array
return_values = {}
return_values['reconstruction'] = child
return return_values
|
extras/python/XSVFDisassembler.py | franzbischoff/JTAG | 105 | 12646182 | #!/usr/bin/python
# coding: utf-8
#
# example:
# $ ./xsvf -c disasm ../xsvf/XC2C64A/idcode_simpler.xsvf
#
import JTAGTAP
import XSVFDecoder
class XSVFDisassembler(XSVFDecoder.XSVFDecoder):
"""
XSVF Disassembler
"""
@staticmethod
def add_arguments(p):
"""Adds the necessary arguments to the parser."""
p.add_argument(
'-n', '--no_bytes',
action='store_true',
# type=bool,
help='Do not output bytes'
' (default=%(default)s)')
def __init__(self, args):
XSVFDecoder.XSVFDecoder.__init__(self, args)
self._args = args
self._current_instruction = 0
self._instruction_handlers = (
self.disasm_xcomplete,
self.disasm_xtdomask,
self.disasm_xsir,
self.disasm_xsdr,
self.disasm_xruntest,
self.disasm_xreserved_5,
self.disasm_xreserved_6,
self.disasm_xrepeat,
self.disasm_xsdrsize,
self.disasm_xsdrtdo,
self.disasm_xsetsdrmasks,
self.disasm_xsdrinc,
self.disasm_xsdrb,
self.disasm_xsdrc,
self.disasm_xsdre,
self.disasm_xsdrtdob,
self.disasm_xsdrtdoc,
self.disasm_xsdrtdoe,
self.disasm_xstate,
self.disasm_xendir,
self.disasm_xenddr,
self.disasm_xsir2,
self.disasm_xcomment,
self.disasm_xwait,
)
@property
def current_instruction(self):
return self._current_instruction
@current_instruction.setter
def current_instruction(self, value):
self._current_instruction = value
def format_first_part(self, s):
"""
Breaks the instruction's bytes in lines of 8 bytes.
:param s: string of bytes
:return: list of 8 byte strings
"""
l = []
while s:
l.append(s[:24])
s = s[24:]
return l
def return_zeroeth(self, l):
"""
Returns the zeroeth element of the list, right whitespace stripped,
but avoid returning None if the result is empty.
"""
if l:
l_0 = l[0].rstrip()
else:
l_0 = ""
return l_0
def format_byte_list(self, l):
return ' {:s}'.format(' '.join('{0:02X}'.format(x) for x in l))
def pretty_disasm(self, pars=("",)):
l1 = self.format_first_part(self.current_instruction_string)
l2 = list(pars)
l2[0] = '{:s}{:s}'.format(
self.instruction_name(self.current_instruction),
l2[0])
while l1 or l2:
l1_0 = self.return_zeroeth(l1)
l2_0 = self.return_zeroeth(l2)
if self._args.no_bytes:
if l2_0:
print('{0:s}'.format(l2_0))
else:
if l2_0:
print('{0:<24} {1:s}'.format(l1_0, l2_0))
else:
print('{0:s}'.format(l1_0))
l1 = l1[1:]
l2 = l2[1:]
def disasm_xcomplete(self):
self.pretty_disasm()
def disasm_xtdomask(self):
p = (
'',
self.format_byte_list(self.tdo_mask)
)
self.pretty_disasm(p)
def disasm_xsir(self):
p = (
' {:d} {:s}'.format(
self.sirsize_bits,
self.format_byte_list(self.tdi).strip()),
)
self.pretty_disasm(p)
def disasm_xsdr(self):
p = (
'',
self.format_byte_list(self.tdi)
)
self.pretty_disasm(p)
def disasm_xruntest(self):
p = (
' {:d}'.format(self.runtest),
)
self.pretty_disasm(p)
def disasm_xreserved_5(self):
self.pretty_disasm()
def disasm_xreserved_6(self):
self.pretty_disasm()
def disasm_xrepeat(self):
p = (
' {:d}'.format(self.repeat),
)
self.pretty_disasm(p)
def disasm_xsdrsize(self):
p = (
' {0}'.format(self.sdrsize_bits),
)
self.pretty_disasm(p)
def disasm_xsdrtdo(self):
p = (
'',
self.format_byte_list(self.tdi) + ',',
self.format_byte_list(self.tdo_expected)
)
self.pretty_disasm(p)
def disasm_xsetsdrmasks(self):
p = (
'',
self.format_byte_list(self.address_mask) + ',',
self.format_byte_list(self.data_mask)
)
self.pretty_disasm(p)
def disasm_xsdrinc(self):
p = [
'',
self.format_byte_list(self.xsdrinc_start_address) + ',',
' {:d},'.format(self.xsdrinc_num_times)
]
n = self.xsdrinc_num_times
for l in self.xsdrinc_data_list:
s = self.format_byte_list(l)
n -= 1
# Adds a comma, unless it's the last one
if n:
s += ','
p.append(s)
self.pretty_disasm(p)
def disasm_xsdrb(self):
p = (
'',
self.format_byte_list(self.tdi)
)
self.pretty_disasm(p)
def disasm_xsdrc(self):
p = (
'',
self.format_byte_list(self.tdi)
)
self.pretty_disasm(p)
def disasm_xsdre(self):
p = (
'',
self.format_byte_list(self.tdi)
)
self.pretty_disasm(p)
def disasm_xsdrtdob(self):
p = (
'',
self.format_byte_list(self.tdi) + ',',
self.format_byte_list(self.tdo_expected)
)
self.pretty_disasm(p)
def disasm_xsdrtdoc(self):
p = (
'',
self.format_byte_list(self.tdi) + ',',
self.format_byte_list(self.tdo_expected)
)
self.pretty_disasm(p)
def disasm_xsdrtdoe(self):
p = (
'',
self.format_byte_list(self.tdi) + ',',
self.format_byte_list(self.tdo_expected)
)
self.pretty_disasm(p)
def disasm_xstate(self):
p = (
' {:s}'.format(JTAGTAP.JTAGTAP.state_name(self.next_state)),
)
self.pretty_disasm(p)
def disasm_xendir(self):
p = (
' {:s}'.format(JTAGTAP.JTAGTAP.state_name(self.endir_state)),
)
self.pretty_disasm(p)
def disasm_xenddr(self):
p = (
' {:s}'.format(JTAGTAP.JTAGTAP.state_name(self.enddr_state)),
)
self.pretty_disasm(p)
def disasm_xsir2(self):
p = (
' {:d}'.format(self.sirsize_bits),
self.format_byte_list(self.tdi)
)
self.pretty_disasm(p)
def disasm_xcomment(self):
p = (
' "{:s}"'.format(self.comment),
)
self.pretty_disasm(p)
def disasm_xwait(self):
p = (
' {:s} {:s} {:d}'.format(
JTAGTAP.JTAGTAP.state_name(self.wait_start_state),
JTAGTAP.JTAGTAP.state_name(self.wait_end_state),
self.wait_time_usecs),
)
self.pretty_disasm(p)
#
def instruction_handler(self, instruction):
self.current_instruction = instruction
self._instruction_handlers[instruction]()
def disasm_all_files(self, fd_list):
return self.decode_all_files(fd_list)
|
Module2/Python_Data_Analysis_code/Chapter 6/joblib/ch6util/read_wav/func_code.py | vijaysharmapc/Python-End-to-end-Data-Analysis | 119 | 12646199 | # first line: 10
@memory.cache
def read_wav():
wav = dl.data.get_smashing_baby()
return wavfile.read(wav)
|
mistral/db/sqlalchemy/migration/alembic_migrations/versions/035_namespace_support_postgresql.py | shubhamdang/mistral | 205 | 12646203 | <reponame>shubhamdang/mistral
# Copyright 2019 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Namespace support postgresql
Revision ID: 035
Revises: 034
Create Date: 2019-08-01 15:48:34.115639
"""
# revision identifiers, used by Alembic.
revision = '035'
down_revision = '034'
from alembic import op
from sqlalchemy.engine import reflection
def upgrade():
inspect = reflection.Inspector.from_engine(op.get_bind())
unique_constraints = [
unique_constraint['name'] for unique_constraint in
inspect.get_unique_constraints('workflow_definitions_v2')
]
if 'workflow_definitions_v2_name_project_id_key' in unique_constraints:
op.drop_constraint('workflow_definitions_v2_name_project_id_key',
table_name='workflow_definitions_v2')
|
tljh/user_creating_spawner.py | consideRatio/the-littlest-jupyterhub | 741 | 12646220 | from tljh.normalize import generate_system_username
from tljh import user
from tljh import configurer
from systemdspawner import SystemdSpawner
from traitlets import Dict, Unicode, List
from jupyterhub_configurator.mixins import ConfiguratorSpawnerMixin
class CustomSpawner(SystemdSpawner):
"""
SystemdSpawner with user creation on spawn.
FIXME: Remove this somehow?
"""
user_groups = Dict(key_trait=Unicode(), value_trait=List(Unicode()), config=True)
def start(self):
"""
Perform system user activities before starting server
"""
# FIXME: Move this elsewhere? Into the Authenticator?
system_username = generate_system_username('jupyter-' + self.user.name)
# FIXME: This is a hack. Allow setting username directly instead
self.username_template = system_username
user.ensure_user(system_username)
user.ensure_user_group(system_username, 'jupyterhub-users')
if self.user.admin:
user.ensure_user_group(system_username, 'jupyterhub-admins')
else:
user.remove_user_group(system_username, 'jupyterhub-admins')
if self.user_groups:
for group, users in self.user_groups.items():
if self.user.name in users:
user.ensure_user_group(system_username, group)
return super().start()
cfg = configurer.load_config()
# Use the jupyterhub-configurator mixin only if configurator is enabled
# otherwise, any bugs in the configurator backend will stop new user spawns!
if cfg['services']['configurator']['enabled']:
# Dynamically create the Spawner class using `type`(https://docs.python.org/3/library/functions.html?#type),
# based on whether or not it should inherit from ConfiguratorSpawnerMixin
UserCreatingSpawner = type('UserCreatingSpawner', (ConfiguratorSpawnerMixin, CustomSpawner), {})
else:
UserCreatingSpawner = type('UserCreatingSpawner', (CustomSpawner,), {})
|
lib/model/train_val_memory.py | xctspring/iter-reason | 275 | 12646232 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from model.config import cfg
from model.train_val import filter_roidb, SolverWrapper
from utils.timer import Timer
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
import os
import sys
import glob
import time
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
class MemorySolverWrapper(SolverWrapper):
"""
A wrapper class for the training process of spatial memory
"""
def construct_graph(self, sess):
with sess.graph.as_default():
# Set the random seed for tensorflow
tf.set_random_seed(cfg.RNG_SEED)
# Build the main computation graph
layers = self.net.create_architecture('TRAIN', self.imdb.num_classes, tag='default')
# Define the loss
loss = layers['total_loss']
# Set learning rate and momentum
lr = tf.Variable(cfg.TRAIN.RATE, trainable=False)
self.optimizer = tf.train.MomentumOptimizer(lr, cfg.TRAIN.MOMENTUM)
# Compute the gradients with regard to the loss
gvs = self.optimizer.compute_gradients(loss)
grad_summaries = []
for grad, var in gvs:
if 'SMN' not in var.name and 'GMN' not in var.name:
continue
grad_summaries.append(tf.summary.histogram('TRAIN/' + var.name, var))
if grad is not None:
grad_summaries.append(tf.summary.histogram('GRAD/' + var.name, grad))
# Double the gradient of the bias if set
if cfg.TRAIN.DOUBLE_BIAS:
final_gvs = []
with tf.variable_scope('Gradient_Mult') as scope:
for grad, var in gvs:
scale = 1.
if cfg.TRAIN.DOUBLE_BIAS and '/biases:' in var.name:
scale *= 2.
if not np.allclose(scale, 1.0):
grad = tf.multiply(grad, scale)
final_gvs.append((grad, var))
train_op = self.optimizer.apply_gradients(final_gvs)
else:
train_op = self.optimizer.apply_gradients(gvs)
self.summary_grads = tf.summary.merge(grad_summaries)
# We will handle the snapshots ourselves
self.saver = tf.train.Saver(max_to_keep=100000)
# Write the train and validation information to tensorboard
self.writer = tf.summary.FileWriter(self.tbdir, sess.graph)
self.valwriter = tf.summary.FileWriter(self.tbvaldir)
return lr, train_op
def train_model(self, sess, max_iters):
# Build data layers for both training and validation set
self.data_layer = self.imdb.data_layer(self.roidb, self.imdb.num_classes)
self.data_layer_val = self.imdb.data_layer(self.valroidb, self.imdb.num_classes, random=True)
# Construct the computation graph
lr, train_op = self.construct_graph(sess)
# Find previous snapshots if there is any to restore from
lsf, nfiles, sfiles = self.find_previous()
# Initialize the variables or restore them from the last snapshot
if lsf == 0:
rate, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.initialize(sess)
else:
rate, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.restore(sess,
str(sfiles[-1]),
str(nfiles[-1]))
timer = Timer()
iter = last_snapshot_iter + 1
last_summary_iter = iter
last_summary_time = time.time()
# Make sure the lists are not empty
stepsizes.append(max_iters)
stepsizes.reverse()
next_stepsize = stepsizes.pop()
while iter < max_iters + 1:
# Learning rate
if iter == next_stepsize + 1:
# Add snapshot here before reducing the learning rate
self.snapshot(sess, iter)
rate *= cfg.TRAIN.GAMMA
sess.run(tf.assign(lr, rate))
next_stepsize = stepsizes.pop()
timer.tic()
# Get training data, one batch at a time
blobs = self.data_layer.forward()
now = time.time()
if iter == 1 or \
(now - last_summary_time > cfg.TRAIN.SUMMARY_INTERVAL and \
iter - last_summary_iter > cfg.TRAIN.SUMMARY_ITERS):
# Compute the graph with summary
loss_cls, total_loss, summary, gsummary = \
self.net.train_step_with_summary(sess, blobs, train_op, self.summary_grads)
self.writer.add_summary(summary, float(iter))
self.writer.add_summary(gsummary, float(iter+1))
# Also check the summary on the validation set
blobs_val = self.data_layer_val.forward()
summary_val = self.net.get_summary(sess, blobs_val)
self.valwriter.add_summary(summary_val, float(iter))
last_summary_iter = iter
last_summary_time = now
else:
# Compute the graph without summary
loss_cls, total_loss = self.net.train_step(sess, blobs, train_op)
timer.toc()
# Display training information
if iter % (cfg.TRAIN.DISPLAY) == 0:
print('iter: %d / %d, total loss: %.6f\n >>> loss_cls: %.6f\n >>> lr: %f' % \
(iter, max_iters, total_loss, loss_cls, lr.eval()))
print('speed: {:.3f}s / iter'.format(timer.average_time))
# Snapshotting
if iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
ss_path, np_path = self.snapshot(sess, iter)
np_paths.append(np_path)
ss_paths.append(ss_path)
# Remove the old snapshots if there are too many
if len(np_paths) > cfg.TRAIN.SNAPSHOT_KEPT:
self.remove_snapshot(np_paths, ss_paths)
iter += 1
if last_snapshot_iter != iter - 1:
self.snapshot(sess, iter - 1)
self.writer.close()
self.valwriter.close()
def train_net(network, imdb, roidb, valroidb, output_dir, tb_dir,
pretrained_model=None,
max_iters=40000):
"""Train a Faster R-CNN network with memory."""
roidb = filter_roidb(roidb)
valroidb = filter_roidb(valroidb)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
with tf.Session(config=tfconfig) as sess:
sw = MemorySolverWrapper(sess, network, imdb, roidb, valroidb,
output_dir, tb_dir,
pretrained_model=pretrained_model)
print('Solving...')
sw.train_model(sess, max_iters)
print('done solving')
|
tests/test_molecule.py | liudongliangHI/ProLIF | 123 | 12646320 | <reponame>liudongliangHI/ProLIF<filename>tests/test_molecule.py
import pytest
from MDAnalysis import SelectionError
from rdkit import Chem
from prolif.molecule import (Molecule,
pdbqt_supplier,
mol2_supplier,
sdf_supplier)
from prolif.residue import ResidueId
from prolif.datafiles import datapath
from .test_base import TestBaseRDKitMol, rdkit_mol, ligand_rdkit, u
class TestMolecule(TestBaseRDKitMol):
@pytest.fixture(scope="class")
def mol(self):
return Molecule(rdkit_mol)
def test_mapindex(self, mol):
for atom in mol.GetAtoms():
assert atom.GetUnsignedProp("mapindex") == atom.GetIdx()
def test_from_mda(self):
rdkit_mol = Molecule(ligand_rdkit)
mda_mol = Molecule.from_mda(u, "resname LIG")
assert rdkit_mol[0].resid == mda_mol[0].resid
assert (rdkit_mol.HasSubstructMatch(mda_mol) and
mda_mol.HasSubstructMatch(rdkit_mol))
def test_from_mda_empty_ag(self):
ag = u.select_atoms("resname FOO")
with pytest.raises(SelectionError, match="AtomGroup is empty"):
Molecule.from_mda(ag)
def test_from_rdkit(self):
rdkit_mol = Molecule(ligand_rdkit)
newmol = Molecule.from_rdkit(ligand_rdkit)
assert rdkit_mol[0].resid == newmol[0].resid
def test_from_rdkit_default_resid(self):
mol = Chem.MolFromSmiles("CCO")
newmol = Molecule.from_rdkit(mol)
assert newmol[0].resid == ResidueId("UNL", 1)
def test_from_rdkit_resid_args(self):
mol = Chem.MolFromSmiles("CCO")
newmol = Molecule.from_rdkit(mol, "FOO", 42, "A")
assert newmol[0].resid == ResidueId("FOO", 42, "A")
@pytest.mark.parametrize("key", [
0,
42,
-1,
"LYS49.A",
ResidueId("LYS", 49, "A")
])
def test_getitem(self, mol, key):
assert mol[key].resid is mol.residues[key].resid
def test_iter(self, mol):
for i, r in enumerate(mol):
assert r.resid == mol[i].resid
def test_n_residues(self, mol):
assert mol.n_residues == mol.residues.n_residues
class TestSupplier:
def test_pdbqt(self):
path = datapath / "vina"
pdbqts = sorted(path.glob("*.pdbqt"))
template = Chem.MolFromSmiles("C[NH+]1CC(C(=O)NC2(C)OC3(O)C4CCCN4C(=O)"
"C(Cc4ccccc4)N3C2=O)C=C2c3cccc4[nH]cc"
"(c34)CC21")
suppl = pdbqt_supplier(pdbqts, template)
mols = list(suppl)
assert isinstance(mols[0], Molecule)
assert len(mols) == len(pdbqts)
def test_sdf(self):
path = str(datapath / "vina" / "vina_output.sdf")
suppl = sdf_supplier(path)
mols = list(suppl)
assert isinstance(mols[0], Molecule)
assert len(mols) == 9
mi = mols[0].GetAtomWithIdx(0).GetMonomerInfo()
assert all([mi.GetResidueName() == "UNL",
mi.GetResidueNumber() == 1,
mi.GetChainId() == ""])
def test_mol2(self):
path = str(datapath / "vina" / "vina_output.mol2")
suppl = mol2_supplier(path)
mols = list(suppl)
assert isinstance(mols[0], Molecule)
assert len(mols) == 9
mi = mols[0].GetAtomWithIdx(0).GetMonomerInfo()
assert all([mi.GetResidueName() == "UNL",
mi.GetResidueNumber() == 1,
mi.GetChainId() == ""])
def test_mol2_starting_with_comment(self):
path = str(datapath / "mol_comment.mol2")
suppl = mol2_supplier(path)
mol = next(suppl)
assert mol is not None
|
scripts/examples/OpenMV/32-modbus/modbus_rtu_slave.py | jiskra/openmv | 1,761 | 12646339 | <filename>scripts/examples/OpenMV/32-modbus/modbus_rtu_slave.py<gh_stars>1000+
import time
from pyb import UART
from modbus import ModbusRTU
uart = UART(3,115200, parity=None, stop=2, timeout=1, timeout_char=4)
modbus = ModbusRTU(uart, register_num=9999)
while(True):
if modbus.any():
modbus.handle(debug=True)
else:
time.sleep_ms(100)
modbus.REGISTER[0] = 1000
modbus.REGISTER[1] += 1
modbus.REGISTER[3] += 3
#print(modbus.REGISTER[10:15])
# image processing in there
|
adalm/incr_bpe/tokenizer.py | Sanster/unilm | 5,129 | 12646355 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple invertible tokenizer.
Converts from a unicode string to a list of tokens
(represented as Unicode strings).
This tokenizer has the following desirable properties:
- It is invertible.
- Alphanumeric characters are broken away from non-alphanumeric characters.
- A single space between words does not produce an extra token.
- The full Unicode punctuation and separator set is recognized.
The tokenization algorithm is as follows:
1. Split the text into a list of tokens, splitting at every boundary of an
alphanumeric character and a non-alphanumeric character. This produces
a list which alternates between "alphanumeric tokens"
(strings of alphanumeric characters) and "non-alphanumeric tokens"
(strings of non-alphanumeric characters).
2. Remove every token consisting of a single space, unless it is
the very first or very last token in the list. These tokens are now
implied by the fact that there are two adjacent alphanumeric tokens.
e.g. u"Dude - that's so cool."
-> [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sys
import unicodedata
import six
import logging
from six.moves import range # pylint: disable=redefined-builtin
# from tensor2tensor.utils import mlperf_log
import time
import glob
# Conversion between Unicode and UTF-8, if required (on Python2)
_native_to_unicode = (lambda s: s.decode("utf-8")) if six.PY2 else (lambda s: s)
logger = logging.getLogger(__name__)
# This set contains all letter and number characters.
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i) for i in range(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N") or
unicodedata.category(six.unichr(i)).startswith("P")))
# unicodedata.category(six.unichr(i)).startswith("S")
def encode(text):
"""Encode a unicode string as a list of tokens.
Args:
text: a unicode string
Returns:
a list of tokens as Unicode strings
"""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
add_remaining = False
for pos in range(1, len(text)):
add_remaining = False
if is_alnum[pos] != is_alnum[pos - 1]:
if not is_alnum[pos]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
add_remaining = False
ret.append(token)
else:
add_remaining = True
token_start = pos
final_token = text[token_start:] if text[-1] in _ALPHANUMERIC_CHAR_SET else text[token_start:-1]
if add_remaining:
ret.append(final_token)
# split on punctuation
final_tokens = []
for token in ret:
splitted_token = _run_split_on_punc(token)
final_tokens.extend(splitted_token)
return final_tokens
def _run_split_on_punc(text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def decode(tokens):
"""Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string
"""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret)
def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True, do_lower_case=False):
"""Reads files matching a wildcard pattern, yielding the contents.
Args:
filepattern: A wildcard pattern matching one or more files.
max_lines: If set, stop reading after reading this many lines.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Yields:
The contents of the files as lines, if split_on_newlines is True, or
the entire contents of each file if False.
"""
filenames = sorted(glob.glob(filepattern))
print(filenames, 'do lower case:', do_lower_case)
lines_read = 0
for filename in filenames:
start = time.time()
with open(filename) as f:
if split_on_newlines:
for line in f:
if do_lower_case:
line = line.lower()
yield line.strip()
lines_read += 1
if max_lines and lines_read >= max_lines:
return
if lines_read % 100000 == 0:
print("read", lines_read, "lines,", time.time() - start, "secs elapsed")
else:
if max_lines:
doc = []
for line in f:
if do_lower_case:
line = line.lower()
doc.append(line)
lines_read += 1
if max_lines and lines_read >= max_lines:
yield "".join(doc)
return
yield "".join(doc)
else:
yield f.read()
print(time.time() - start, "for reading read file :", filename)
def corpus_token_counts(
text_filepattern, corpus_max_lines, split_on_newlines=True, additional_chars="", do_lower_case=False):
"""Read the corpus and compute a dictionary of token counts.
Args:
text_filepattern: A pattern matching one or more files.
corpus_max_lines: An integer; maximum total lines to read.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
additional_chars: A String. Each consisting characters will be treat as normal
alphabets so that they will be included in each vocab.
Returns:
a dictionary mapping token to count.
"""
if additional_chars:
_ALPHANUMERIC_CHAR_SET.add(additional_chars)
counts = collections.Counter()
for doc in _read_filepattern(
text_filepattern,
max_lines=corpus_max_lines,
split_on_newlines=split_on_newlines,
do_lower_case=do_lower_case):
counts.update(encode(_native_to_unicode(doc)))
print("read all files")
return counts
def vocab_token_counts(text_filepattern, max_lines, do_lower_case=False):
"""Read a vocab file and return a dictionary of token counts.
Reads a two-column CSV file of tokens and their frequency in a dataset. The
tokens are presumed to be generated by encode() or the equivalent.
Args:
text_filepattern: A pattern matching one or more files.
max_lines: An integer; maximum total lines to read.
Returns:
a dictionary mapping token to count.
"""
ret = {}
for i, line in enumerate(
_read_filepattern(text_filepattern, max_lines=max_lines)):
if "," not in line:
logger.warning("Malformed vocab line #%d '%s'", i, line)
continue
if do_lower_case:
line = line.lower()
token, count = line.rsplit(",", 1)
ret[_native_to_unicode(token)] = int(count)
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.