max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
venv/Lib/site-packages/zope/interface/common/io.py | asanka9/Quession-Discussion-App-Socket.Io-NLP | 275 | 12624670 | <gh_stars>100-1000
##############################################################################
# Copyright (c) 2020 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
##############################################################################
"""
Interface definitions paralleling the abstract base classes defined in
:mod:`io`.
After this module is imported, the standard library types will declare
that they implement the appropriate interface.
.. versionadded:: 5.0.0
"""
from __future__ import absolute_import
import io as abc
from zope.interface.common import ABCInterface
# pylint:disable=inherit-non-class,
# pylint:disable=no-member
class IIOBase(ABCInterface):
abc = abc.IOBase
class IRawIOBase(IIOBase):
abc = abc.RawIOBase
class IBufferedIOBase(IIOBase):
abc = abc.BufferedIOBase
try:
import cStringIO
except ImportError:
# Python 3
extra_classes = ()
else:
import StringIO
extra_classes = (StringIO.StringIO, cStringIO.InputType, cStringIO.OutputType)
del cStringIO
del StringIO
class ITextIOBase(IIOBase):
abc = abc.TextIOBase
|
Python/Numpy/fft.py | Gjacquenot/training-material | 115 | 12624674 | <reponame>Gjacquenot/training-material
#!/usr/bin/env python
from argparse import ArgumentParser
from math import ceil
import numpy as np
import matplotlib.pyplot as plt
arg_parser = ArgumentParser(description='plot function and frequency '
'spectrum')
arg_parser.add_argument('output', nargs='?', help='name of output file')
arg_parser.add_argument('--noise', type=float, default=0.05,
help='amplitude of normally distirbuted noise')
options = arg_parser.parse_args()
# amplitudes and frequencies of signals
ampl = np.array([1.0, 0.75, 0.5, 0.25])
freq = np.array([1.1, 1.9, 3.1, 4.5])
# total number of samples, and number of samples/second
n = 2**12
sample_freq = 2**7
# create figure environment
plt.figure(1)
# compute signal
t = np.arange(n, dtype=np.float64)
y = np.zeros(len(t))
for i in range(len(ampl)):
y += ampl[i]*np.cos(2.0*np.pi*freq[i]*t/sample_freq)
noise_ampl = options.noise
y += noise_ampl*np.random.randn(len(t))
# plot signal
# plt.subplot(2, 1, 1)
plt.subplot(2, 1, 1)
plt.axis([0.0, 4000, -3.0, 3.0])
plt.plot(t, y)
plt.xlabel(r'$t$')
plt.ylabel(r'signal')
# create a periodic signal for FFT, and compute
t = np.arange(2*n, dtype=np.float64)
y = np.concatenate((y, y[::-1]))
f = np.fft.fftfreq(len(t), 1.0/sample_freq)
ty = np.fft.fft(y)
rnorm = np.max(np.abs(ty.real))
ty = ty/rnorm
# plot normalized frequency spectrum
max_freq = ceil(max(freq))
plt.subplot(2, 1, 2)
plt.axis([0.0, max_freq, 0.0, 1.0])
plt.plot(f[:n], np.abs(ty[:n]))
plt.xlabel(r'$f$')
plt.ylabel(r'spectrum')
# fix layout problems caused by default axes of subplot
plt.tight_layout()
# show plot, or save it to a file, if specified
if options.output:
plt.savefig(options.output)
else:
plt.show()
|
third_party/gsutil/test/gsutil_measure_imports.py | tingshao/catapult | 2,151 | 12624676 | # -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom importer to handle module load times. Wraps gsutil."""
from __future__ import absolute_import
import __builtin__
import atexit
from collections import OrderedDict
from operator import itemgetter
import os
import sys
import timeit
# This file will function in an identical manner to 'gsutil'. Instead of calling
# 'gsutil some_command' run 'gsutil_measure_imports some_command' from this test
# directory. When used, this file will change the MEASURING_TIME_ACTIVE variable
# in the gsutil.py file to True, signaling that the we are measuring import
# times. In all other cases, this variable will be set to False. This behavior
# will allow gsutil developers to analyze which modules are taking the most time
# to initialize. This is especially important because not all of those modules
# will be used. Therefore it is important to speed up the ones which are not
# used and take a significant amount of time to initialize (e.g. 100ms).
INITIALIZATION_TIMES = {}
real_importer = __builtin__.__import__
def get_sorted_initialization_times(items=10):
"""Returns a sorted OrderedDict.
The keys are module names and the values are the corresponding times taken to
import.
Args:
items: The number of items to return in the list.
Returns:
An OrderedDict object, sorting initialization times in increasing order.
"""
return OrderedDict(sorted(INITIALIZATION_TIMES.items(),
key=itemgetter(1), reverse=True)[:items])
def print_sorted_initialization_times():
"""Prints the most expensive imports in descending order."""
print '\n***Most expensive imports***'
for item in get_sorted_initialization_times().iteritems():
print item
def timed_importer(name, *args, **kwargs):
"""Wrapper for the default Python import function.
Args:
name: The name of the module.
*args: A list of arguments passed to import.
**kwargs: A dictionary of arguments to pass to import.
Returns:
The value provided by the default import function.
"""
# TODO: Build an import tree to better understand which areas need more
# attention.
import_start_time = timeit.default_timer()
import_value = real_importer(name, *args, **kwargs)
import_end_time = timeit.default_timer()
INITIALIZATION_TIMES[name] = import_end_time - import_start_time
return import_value
__builtin__.__import__ = timed_importer
def initialize():
"""Initializes gsutil."""
sys.path.insert(0, os.path.abspath(os.path.join(sys.path[0], '..')))
import gsutil # pylint: disable=g-import-not-at-top
atexit.register(print_sorted_initialization_times)
gsutil.MEASURING_TIME_ACTIVE = True
gsutil.RunMain()
|
tests/components/overland_flow/test_kinwave_implicit.py | amanaster2/landlab | 257 | 12624682 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Unit tests for KinwaveImplicitOverlandFlowModel.
Created on Sat Apr 1 10:49:33 2017
@author: gtucker
"""
import numpy as np
from landlab import RasterModelGrid
from landlab.components import KinwaveImplicitOverlandFlow
def test_initialization():
"""Test initialization with various parameters."""
rg = RasterModelGrid((3, 4), xy_spacing=2.0)
rg.add_zeros("topographic__elevation", at="node")
kw = KinwaveImplicitOverlandFlow(rg)
# Make sure fields have been created
for field_name in kw._info:
if kw._info[field_name]["mapping"] == "node":
assert field_name in kw.grid.at_node
elif kw._info[field_name]["mapping"] == "link":
assert field_name in kw.grid.at_link
# Re-initialize, this time with fields already existing in the grid
# (this triggers the "if" instead of "else" in the field setup in init)
kw = KinwaveImplicitOverlandFlow(rg)
def test_first_iteration():
"""Test stuff that happens only on first iteration"""
# Create a basic ramp
rg = RasterModelGrid((10, 10), xy_spacing=(2, 2))
rg.add_field("topographic__elevation", 0.1 * rg.node_y, at="node")
# Create component and run it
kw = KinwaveImplicitOverlandFlow(rg)
kw.run_one_step(1.0)
# Max gradient should be 0.1, and min should be zero
assert round(np.amax(kw.grid.at_link["topographic__gradient"]), 2) == 0.1
assert round(np.amin(kw.grid.at_link["topographic__gradient"]), 2) == 0.0
assert round(np.amax(kw._sqrt_slope), 3) == 0.316
assert round(np.amax(kw._grad_width_sum), 3) == 0.632
assert round(np.amax(kw._alpha), 3) == 15.811
def test_steady_basic_ramp():
"""Run to steady state with basic ramp"""
# Create a basic ramp
rg = RasterModelGrid((10, 10), xy_spacing=(2, 2))
rg.add_field("topographic__elevation", 0.1 * rg.node_y, at="node")
# Create component and run it
kw = KinwaveImplicitOverlandFlow(rg, runoff_rate=0.001 * 3600000.0)
for i in range(12):
kw.run_one_step(1.0)
# Look at a column of nodes down the middle. The inflow from uphill should
# be, from top to bottom: 0, 0.004, 0.008, 0.012, 0.016, 0.02, 0.024, 0.028
assert kw._disch_in[85] == 0.0
assert round(kw._disch_in[75], 3) == 0.004
assert round(kw._disch_in[65], 3) == 0.008
assert round(kw._disch_in[55], 3) == 0.012
assert round(kw._disch_in[45], 3) == 0.016
assert round(kw._disch_in[35], 3) == 0.020
assert round(kw._disch_in[25], 3) == 0.024
assert round(kw._disch_in[15], 3) == 0.028
# Try with passing in runoff
kw = KinwaveImplicitOverlandFlow(rg, runoff_rate=360.0)
kw.depth[:] = 0.0
for i in range(22):
kw.run_one_step(1.0)
# Again, look at a column of nodes down the middle. The inflow from uphill
# should now be 1/10 of the prior example.
assert round(kw._disch_in[75], 4) == 0.0004
assert round(kw._disch_in[65], 4) == 0.0008
assert round(kw._disch_in[55], 4) == 0.0012
assert round(kw._disch_in[45], 4) == 0.0016
assert round(kw._disch_in[35], 4) == 0.0020
assert round(kw._disch_in[25], 4) == 0.0024
assert round(kw._disch_in[15], 4) == 0.0028
# Try with default runoff rate of 1 mm/hr = 2.78e-7 m/s
kw = KinwaveImplicitOverlandFlow(rg)
assert round(kw.runoff_rate * 1.0e7, 2) == 2.78
kw.depth[:] = 0.0
for i in range(18):
kw.run_one_step(10.0)
# Look at a column of nodes down the middle. The inflow from uphill should
# be, from top to bottom: 0, 0.004, 0.008, 0.012, 0.016, 0.02, 0.024, 0.028
assert kw._disch_in[85] == 0.0
assert round(kw._disch_in[75], 7) == 0.0000011
assert round(kw._disch_in[65], 7) == 0.0000022
assert round(kw._disch_in[55], 7) == 0.0000033
assert round(kw._disch_in[45], 7) == 0.0000044
assert round(kw._disch_in[35], 7) == 0.0000055
assert round(kw._disch_in[25], 7) == 0.0000066
assert round(kw._disch_in[15], 7) == 0.0000077
def test_curved_surface():
"""Test flow across a curved surface."""
# Create a grid
rg = RasterModelGrid((10, 10), xy_spacing=(2, 2))
rg.add_field(
"topographic__elevation", 3.0 * rg.node_x ** 2 + rg.node_y ** 2, at="node"
)
# Create component and run it
kw = KinwaveImplicitOverlandFlow(rg, runoff_rate=0.001 * 3600000.0)
for i in range(8):
kw.run_one_step(1.0)
# The inflow discharge to each cell at steady state should equal the
# runoff rate times the "inflow" drainage area, which is the total drainage
# area minus the area of the cell itself. Here we'll test a column of core
# nodes across the middle of the domain.
area = rg.at_node["drainage_area"]
runoff_rate = 0.001
unit_area = 4.0
for i in range(15, 95, 10):
assert round(kw._disch_in[i], 6) == round(
runoff_rate * (area[i] - unit_area), 6
)
if __name__ == "__main__":
test_initialization()
test_first_iteration()
test_steady_basic_ramp()
test_curved_surface()
|
diffxpy/unit_test/test_partition.py | adkinsrs/diffxpy | 111 | 12624692 | <gh_stars>100-1000
import unittest
import logging
import numpy as np
import pandas as pd
import scipy.stats as stats
from batchglm.api.models.numpy.glm_nb import Simulator
import diffxpy.api as de
class TestPartitionNull(unittest.TestCase):
def test_null_distribution_wald(self, n_cells: int = 4000, n_genes: int = 200):
"""
Test if Partition.wald() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distribution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=2)
sim.generate()
sample_description = pd.DataFrame({
"covar1": np.random.randint(2, size=sim.nobs),
"covar2": np.random.randint(2, size=sim.nobs)
})
sample_description["cond"] = sim.sample_description["condition"].values
partition = de.test.partition(
data=sim.x,
parts="cond",
sample_description=sample_description
)
det = partition.wald(
factor_loc_totest="covar1",
formula_loc="~ 1 + covar1 + covar2",
training_strategy="DEFAULT",
dtype="float64"
)
_ = det.summary()
# Compare p-value distribution under null model against uniform distribution.
pval_h0 = stats.kstest(det.pval.flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0=%f is <= 0.05!" % np.round(pval_h0, 5)
return True
def test_null_distribution_wald_multi(self, n_cells: int = 4000, n_genes: int = 200):
"""
Test if de.wald() (multivariate mode) generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distribution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=2)
sim.generate()
sample_description = pd.DataFrame({
"covar1": np.random.randint(4, size=sim.nobs),
"covar2": np.random.randint(2, size=sim.nobs)
})
sample_description["cond"] = sim.sample_description["condition"].values
partition = de.test.partition(
data=sim.x,
parts="cond",
sample_description=sample_description
)
det = partition.wald(
factor_loc_totest="covar1",
formula_loc="~ 1 + covar1 + covar2",
training_strategy="DEFAULT",
dtype="float64"
)
_ = det.summary()
# Compare p-value distribution under null model against uniform distribution.
pval_h0 = stats.kstest(det.pval.flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0=%f is <= 0.05!" % np.round(pval_h0, 5)
return True
def test_null_distribution_lrt(self, n_cells: int = 4000, n_genes: int = 200):
"""
Test if de.lrt() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distribution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=2)
sim.generate()
sample_description = pd.DataFrame({
"covar1": np.random.randint(2, size=sim.nobs),
"covar2": np.random.randint(2, size=sim.nobs)
})
sample_description["cond"] = sim.sample_description["condition"].values
partition = de.test.partition(
data=sim.x,
parts="cond",
sample_description=sample_description
)
det = partition.lrt(
full_formula_loc="~ 1 + covar1",
full_formula_scale="~ 1",
reduced_formula_loc="~ 1",
reduced_formula_scale="~ 1",
training_strategy="DEFAULT",
dtype="float64"
)
_ = det.summary()
# Compare p-value distribution under null model against uniform distribution.
pval_h0 = stats.kstest(det.pval.flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of lrt(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0=%f is <= 0.05!" % np.round(pval_h0, 5)
return True
def test_null_distribution_ttest(self, n_cells: int = 4000, n_genes: int = 200):
"""
Test if de.t_test() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distribution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=2)
sim.generate()
sample_description = pd.DataFrame({
"covar1": np.random.randint(2, size=sim.nobs)
})
sample_description["cond"] = sim.sample_description["condition"].values
partition = de.test.partition(
data=sim.x,
parts="cond",
sample_description=sample_description
)
det = partition.t_test(
grouping="covar1",
is_logged=False,
dtype="float64"
)
summary = det.summary()
# Compare p-value distribution under null model against uniform distribution.
pval_h0 = stats.kstest(det.pval.flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of t_test(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0=%f is <= 0.05!" % np.round(pval_h0, 5)
return True
def test_null_distribution_rank(self, n_cells: int = 4000, n_genes: int = 200):
"""
Test if rank_test() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distribution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=2)
sim.generate()
sample_description = pd.DataFrame({
"covar1": np.random.randint(2, size=sim.nobs)
})
sample_description["cond"] = sim.sample_description["condition"].values
partition = de.test.partition(
data=sim.x,
parts="cond",
sample_description=sample_description
)
det = partition.rank_test(
grouping="covar1",
dtype="float64"
)
summary = det.summary()
# Compare p-value distribution under null model against uniform distribution.
pval_h0 = stats.kstest(det.pval.flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of rank_test(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0=%f is <= 0.05!" % np.round(pval_h0, 5)
return True
if __name__ == '__main__':
unittest.main()
|
data/synthia_dataset.py | Jo-wang/ProDA | 193 | 12624693 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
import torch
import numpy as np
from PIL import Image
import random
import imageio
from data import BaseDataset
from data.randaugment import RandAugmentMC
class Synthia_loader(BaseDataset):
"""
Synthia synthetic dataset
for domain adaptation to Cityscapes
"""
def __init__(self, opt, logger, augmentations=None):
self.opt = opt
self.root = opt.src_rootpath
self.augmentations = augmentations
self.randaug = RandAugmentMC(2, 10)
self.n_classes = opt.n_class
self.img_size = (1280, 760)
self.mean = [0.0, 0.0, 0.0] #TODO: calculating the mean value of rgb channels on GTA5
self.image_base_path = os.path.join(self.root, 'RGB')
self.label_base_path = os.path.join(self.root, 'GT/LABELS')
self.distribute = np.zeros(self.n_classes, dtype=float)
ids = os.listdir(self.image_base_path)
self.ids = []
for i in range(len(ids)):
self.ids.append(os.path.join(self.label_base_path, ids[i]))
if self.n_classes == 19:
self.valid_classes = [3,4,2,21,5,7,15,9,6,16,1,10,17,8,18,19,20,12,11,]
self.class_names = ["unlabelled","Road","Sidewalk","Building","Wall",
"Fence","Pole","Traffic_light","Traffic_sign","Vegetation",
"Terrain","sky","Pedestrian","Rider","Car",
"Truck","Bus","Train","Motorcycle","Bicycle",
]
elif self.n_classes == 16:
self.valid_classes = [3,4,2,21,5,7,15,9,6,1,10,17,8,19,12,11,]
self.class_names = ["unlabelled","Road","Sidewalk","Building","Wall",
"Fence","Pole","Traffic_light","Traffic_sign","Vegetation",
"sky","Pedestrian","Rider","Car","Bus",
"Motorcycle","Bicycle",
]
elif self.n_classes == 13:
self.valid_classes = [3,4,2,15,9,6,1,10,17,8,19,12,11,]
self.class_names = ["unlabelled","Road","Sidewalk","Building","Traffic_light",
"Traffic_sign","Vegetation","sky","Pedestrian","Rider",
"Car","Bus","Motorcycle","Bicycle",
]
self.ignore_index = 250
self.class_map = dict(zip(self.valid_classes, range(self.n_classes)))
imageio.plugins.freeimage.download()
if len(self.ids) == 0:
raise Exception(
"No files found in %s" % (self.image_base_path)
)
print("Found {} images".format(len(self.ids)))
def __len__(self):
return len(self.ids)
def __getitem__(self, index):
"""__getitem__
param: index
"""
id = self.ids[index]
img_path = os.path.join(self.image_base_path, id.split('/')[-1])
lbl_path = id
img = Image.open(img_path)
lbl = np.asarray(imageio.imread(lbl_path, format='PNG-FI'))[:,:,0]
lbl = Image.fromarray(lbl)
img = img.resize(self.img_size, Image.BILINEAR)
lbl = lbl.resize(self.img_size, Image.NEAREST)
img = np.asarray(img, dtype=np.uint8)
# lbl = lbl.convert('L')
lbl = np.asarray(lbl, dtype=np.uint8)
lbl = self.encode_segmap(np.array(lbl, dtype=np.uint8))
input_dict = {}
if self.augmentations!=None:
img, lbl, _, _, _ = self.augmentations(img, lbl)
img_strong, params = self.randaug(Image.fromarray(img))
img_strong, _ = self.transform(img_strong, lbl)
input_dict['img_strong'] = img_strong
input_dict['params'] = params
img, lbl = self.transform(img, lbl)
input_dict['img'] = img
input_dict['label'] = lbl
input_dict['img_path'] = self.ids[index]
return input_dict
def encode_segmap(self, lbl):
label_copy = 250 * np.ones(lbl.shape, dtype=np.uint8)
for k, v in list(self.class_map.items()):
label_copy[lbl == k] = v
return label_copy
# def decode_segmap(self, temp):
# r = temp.copy()
# g = temp.copy()
# b = temp.copy()
# for l in range(0, self.n_classes):
# r[temp == l] = self.label_colours[l][0]
# g[temp == l] = self.label_colours[l][1]
# b[temp == l] = self.label_colours[l][2]
# rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
# rgb[:, :, 0] = r / 255.0
# rgb[:, :, 1] = g / 255.0
# rgb[:, :, 2] = b / 255.0
# return rgb
def transform(self, img, lbl):
"""transform
img, lbl
"""
# img = m.imresize(
# img, self.img_size,
# )
img = np.array(img)
# img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float64)
img -= self.mean
img = img.astype(float) / 255.0
img = img.transpose(2, 0, 1)
classes = np.unique(lbl)
lbl = np.array(lbl)
lbl = lbl.astype(float)
# lbl = m.imresize(lbl, self.img_size, "nearest", mode='F')
lbl = lbl.astype(int)
if not np.all(classes == np.unique(lbl)):
print("WARN: resizing labels yielded fewer classes") #TODO: compare the original and processed ones
if not np.all(np.unique(lbl[lbl != self.ignore_index]) < self.n_classes):
print("after det", classes, np.unique(lbl))
raise ValueError("Segmentation map contained invalid class values")
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
def get_cls_num_list(self):
return None
|
Tilemap_Game_With_CircuitPython/tilegame_assets/text_helper.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 12624714 | # SPDX-FileCopyrightText: 2020 FoamyGuy for Adafruit Industries
#
# SPDX-License-Identifier: MIT
def wrap_nicely(string, max_chars):
""" From: https://www.richa1.com/RichardAlbritton/circuitpython-word-wrap-for-label-text/
A helper that will return the string with word-break wrapping.
:param str string: The text to be wrapped.
:param int max_chars: The maximum number of characters on a line before wrapping.
"""
string = string.replace("\n", "").replace("\r", "") # strip confusing newlines
words = string.split(" ")
the_lines = []
the_line = ""
for w in words:
if len(the_line + " " + w) <= max_chars:
the_line += " " + w
else:
the_lines.append(the_line)
the_line = w
if the_line:
the_lines.append(the_line)
the_lines[0] = the_lines[0][1:]
the_newline = ""
for w in the_lines:
the_newline += "\n" + w
return the_newline
|
pyretri/utils/module_base.py | dongan-beta/PyRetri | 1,063 | 12624759 | <reponame>dongan-beta/PyRetri
# -*- coding: utf-8 -*-
from abc import ABCMeta
from copy import deepcopy
from typing import Dict
class ModuleBase:
"""
The base class of all classes. You can access default hyper-parameters by Class. And
set hyper-parameters for each instance at the initialization.
"""
__metaclass__ = ABCMeta
default_hyper_params = dict()
def __init__(self, hps: Dict or None = None):
"""
Args:
hps (dict): default hyper parameters in a dict (keys, values).
"""
# copy hyper_params from class attribute.
self._hyper_params = deepcopy(self.default_hyper_params)
if hps is not None:
self._set_hps(hps)
def __setattr__(self, key, value) -> None:
assert key != "hyper_params", "default Hyper-Parameters can not be set in each instance"
self.__dict__[key] = value
def get_hps(self) -> Dict:
return self._hyper_params
def _set_hps(self, hps: Dict or None = None):
for key in hps:
if key not in self._hyper_params:
raise KeyError
self._hyper_params[key] = hps[key]
|
sympy/solvers/tests/test_polysys.py | bigfooted/sympy | 603 | 12624761 | """Tests for solvers of systems of polynomial equations. """
from sympy import (flatten, I, Integer, Poly, QQ, Rational, S, sqrt,
solve, symbols)
from sympy.abc import x, y, z
from sympy.polys import PolynomialError
from sympy.solvers.polysys import (solve_poly_system,
solve_triangulated, solve_biquadratic, SolveFailed)
from sympy.polys.polytools import parallel_poly_from_expr
from sympy.testing.pytest import raises
def test_solve_poly_system():
assert solve_poly_system([x - 1], x) == [(S.One,)]
assert solve_poly_system([y - x, y - x - 1], x, y) is None
assert solve_poly_system([y - x**2, y + x**2], x, y) == [(S.Zero, S.Zero)]
assert solve_poly_system([2*x - 3, y*Rational(3, 2) - 2*x, z - 5*y], x, y, z) == \
[(Rational(3, 2), Integer(2), Integer(10))]
assert solve_poly_system([x*y - 2*y, 2*y**2 - x**2], x, y) == \
[(0, 0), (2, -sqrt(2)), (2, sqrt(2))]
assert solve_poly_system([y - x**2, y + x**2 + 1], x, y) == \
[(-I*sqrt(S.Half), Rational(-1, 2)), (I*sqrt(S.Half), Rational(-1, 2))]
f_1 = x**2 + y + z - 1
f_2 = x + y**2 + z - 1
f_3 = x + y + z**2 - 1
a, b = sqrt(2) - 1, -sqrt(2) - 1
assert solve_poly_system([f_1, f_2, f_3], x, y, z) == \
[(0, 0, 1), (0, 1, 0), (1, 0, 0), (a, a, a), (b, b, b)]
solution = [(1, -1), (1, 1)]
assert solve_poly_system([Poly(x**2 - y**2), Poly(x - 1)]) == solution
assert solve_poly_system([x**2 - y**2, x - 1], x, y) == solution
assert solve_poly_system([x**2 - y**2, x - 1]) == solution
assert solve_poly_system(
[x + x*y - 3, y + x*y - 4], x, y) == [(-3, -2), (1, 2)]
raises(NotImplementedError, lambda: solve_poly_system([x**3 - y**3], x, y))
raises(NotImplementedError, lambda: solve_poly_system(
[z, -2*x*y**2 + x + y**2*z, y**2*(-z - 4) + 2]))
raises(PolynomialError, lambda: solve_poly_system([1/x], x))
def test_solve_biquadratic():
x0, y0, x1, y1, r = symbols('x0 y0 x1 y1 r')
f_1 = (x - 1)**2 + (y - 1)**2 - r**2
f_2 = (x - 2)**2 + (y - 2)**2 - r**2
s = sqrt(2*r**2 - 1)
a = (3 - s)/2
b = (3 + s)/2
assert solve_poly_system([f_1, f_2], x, y) == [(a, b), (b, a)]
f_1 = (x - 1)**2 + (y - 2)**2 - r**2
f_2 = (x - 1)**2 + (y - 1)**2 - r**2
assert solve_poly_system([f_1, f_2], x, y) == \
[(1 - sqrt((2*r - 1)*(2*r + 1))/2, Rational(3, 2)),
(1 + sqrt((2*r - 1)*(2*r + 1))/2, Rational(3, 2))]
query = lambda expr: expr.is_Pow and expr.exp is S.Half
f_1 = (x - 1 )**2 + (y - 2)**2 - r**2
f_2 = (x - x1)**2 + (y - 1)**2 - r**2
result = solve_poly_system([f_1, f_2], x, y)
assert len(result) == 2 and all(len(r) == 2 for r in result)
assert all(r.count(query) == 1 for r in flatten(result))
f_1 = (x - x0)**2 + (y - y0)**2 - r**2
f_2 = (x - x1)**2 + (y - y1)**2 - r**2
result = solve_poly_system([f_1, f_2], x, y)
assert len(result) == 2 and all(len(r) == 2 for r in result)
assert all(len(r.find(query)) == 1 for r in flatten(result))
s1 = (x*y - y, x**2 - x)
assert solve(s1) == [{x: 1}, {x: 0, y: 0}]
s2 = (x*y - x, y**2 - y)
assert solve(s2) == [{y: 1}, {x: 0, y: 0}]
gens = (x, y)
for seq in (s1, s2):
(f, g), opt = parallel_poly_from_expr(seq, *gens)
raises(SolveFailed, lambda: solve_biquadratic(f, g, opt))
seq = (x**2 + y**2 - 2, y**2 - 1)
(f, g), opt = parallel_poly_from_expr(seq, *gens)
assert solve_biquadratic(f, g, opt) == [
(-1, -1), (-1, 1), (1, -1), (1, 1)]
ans = [(0, -1), (0, 1)]
seq = (x**2 + y**2 - 1, y**2 - 1)
(f, g), opt = parallel_poly_from_expr(seq, *gens)
assert solve_biquadratic(f, g, opt) == ans
seq = (x**2 + y**2 - 1, x**2 - x + y**2 - 1)
(f, g), opt = parallel_poly_from_expr(seq, *gens)
assert solve_biquadratic(f, g, opt) == ans
def test_solve_triangulated():
f_1 = x**2 + y + z - 1
f_2 = x + y**2 + z - 1
f_3 = x + y + z**2 - 1
a, b = sqrt(2) - 1, -sqrt(2) - 1
assert solve_triangulated([f_1, f_2, f_3], x, y, z) == \
[(0, 0, 1), (0, 1, 0), (1, 0, 0)]
dom = QQ.algebraic_field(sqrt(2))
assert solve_triangulated([f_1, f_2, f_3], x, y, z, domain=dom) == \
[(0, 0, 1), (0, 1, 0), (1, 0, 0), (a, a, a), (b, b, b)]
def test_solve_issue_3686():
roots = solve_poly_system([((x - 5)**2/250000 + (y - Rational(5, 10))**2/250000) - 1, x], x, y)
assert roots == [(0, S.Half - 15*sqrt(1111)), (0, S.Half + 15*sqrt(1111))]
roots = solve_poly_system([((x - 5)**2/250000 + (y - 5.0/10)**2/250000) - 1, x], x, y)
# TODO: does this really have to be so complicated?!
assert len(roots) == 2
assert roots[0][0] == 0
assert roots[0][1].epsilon_eq(-499.474999374969, 1e12)
assert roots[1][0] == 0
assert roots[1][1].epsilon_eq(500.474999374969, 1e12)
|
tick/survival/simu_coxreg.py | sumau/tick | 411 | 12624794 | <filename>tick/survival/simu_coxreg.py
# License: BSD 3 clause
import numpy as np
from tick.base.simulation import SimuWithFeatures
from tick.preprocessing.features_binarizer import FeaturesBinarizer
# TODO: something better to tune the censoring level than this censoring factor
class SimuCoxReg(SimuWithFeatures):
"""Simulation of a Cox regression for proportional hazards
Parameters
----------
coeffs : `numpy.ndarray`, shape=(n_coeffs,)
The array of coefficients of the model
features : `numpy.ndarray`, shape=(n_samples, n_features), default=`None`
The features matrix to use. If None, it is simulated
n_samples : `int`, default=200
Number of samples
times_distribution : `str`, default="weibull"
The distrubution of times. Only ``"weibull"``
is implemented for now
scale : `float`, default=1.0
Scaling parameter to use in the distribution of times
shape : `float`, default=1.0
Shape parameter to use in the distribution of times
censoring_factor : `float`, default=2.0
Level of censoring. Increasing censoring_factor leads
to less censored times and conversely.
features_type : `str`, default="cov_toeplitz"
The type of features matrix to simulate
* If ``"cov_toeplitz"`` : a Gaussian distribution with
Toeplitz correlation matrix
* If ``"cov_uniform"`` : a Gaussian distribution with
correlation matrix given by O.5 * (U + U.T), where U is
uniform on [0, 1] and diagonal filled with ones.
cov_corr : `float`, default=0.5
Correlation to use in the Toeplitz correlation matrix
features_scaling : `str`, default="none"
The way the features matrix is scaled after simulation
* If ``"standard"`` : the columns are centered and
normalized
* If ``"min-max"`` : remove the minimum and divide by
max-min
* If ``"norm"`` : the columns are normalized but not centered
* If ``"none"`` : nothing is done to the features
seed : `int`, default=None
The seed of the random number generator. If `None` it is not
seeded
verbose : `bool`, default=True
If True, print things
Attributes
----------
features : `numpy.ndarray`, shape=(n_samples, n_features)
The simulated (or given) features matrix
times : `numpy.ndarray`, shape=(n_samples,)
Simulated times
censoring : `numpy.ndarray`, shape=(n_samples,)
Simulated censoring indicator, where ``censoring[i] == 1``
indicates that the time of the i-th individual is a failure
time, and where ``censoring[i] == 0`` means that the time of
the i-th individual is a censoring time
time_start : `str`
Start date of the simulation
time_elapsed : `int`
Duration of the simulation, in seconds
time_end : `str`
End date of the simulation
dtype : `{'float64', 'float32'}`, default='float64'
Type of the generated arrays.
Used in the case features is None
Notes
-----
There is no intercept in this model
"""
_attrinfos = {
"times": {
"writable": False
},
"censoring": {
"writable": False
},
"_times_distribution": {
"writable": False
},
"_scale": {
"writable": False
},
"_shape": {
"writable": False
}
}
def __init__(self, coeffs: np.ndarray,
features: np.ndarray = None, n_samples: int = 200,
times_distribution: str = "weibull",
shape: float = 1., scale: float = 1.,
censoring_factor: float = 2.,
features_type: str = "cov_toeplitz",
cov_corr: float = 0.5, features_scaling: str = "none",
seed: int = None, verbose: bool = True, dtype="float64"):
n_features = coeffs.shape[0]
# intercept=None in this model
SimuWithFeatures.__init__(self, None, features, n_samples,
n_features, features_type, cov_corr,
features_scaling, seed, verbose, dtype=dtype)
self.coeffs = coeffs
self.shape = shape
self.scale = scale
self.censoring_factor = censoring_factor
self.times_distribution = times_distribution
self.features = None
self.times = None
self.censoring = None
def simulate(self):
"""Launch simulation of the data
Returns
-------
features : `numpy.ndarray`, shape=(n_samples, n_features)
The simulated (or given) features matrix
times : `numpy.ndarray`, shape=(n_samples,)
Simulated times
censoring : `numpy.ndarray`, shape=(n_samples,)
Simulated censoring indicator, where ``censoring[i] == 1``
indicates that the time of the i-th individual is a failure
time, and where ``censoring[i] == 0`` means that the time of
the i-th individual is a censoring time
"""
return SimuWithFeatures.simulate(self)
@property
def times_distribution(self):
return self._times_distribution
@times_distribution.setter
def times_distribution(self, val):
if val != "weibull":
raise ValueError("``times_distribution`` was not "
"understood, try using 'weibull' instead")
self._set("_times_distribution", val)
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, val):
if val <= 0:
raise ValueError("``shape`` must be strictly positive")
self._set("_shape", val)
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, val):
if val <= 0:
raise ValueError("``scale`` must be strictly positive")
self._set("_scale", val)
def _simulate(self):
# The features matrix already exists, and is created by the
# super class
features = self.features
n_samples, n_features = features.shape
u = features.dot(self.coeffs)
# Simulation of true times
E = np.random.exponential(scale=1., size=n_samples)
E *= np.exp(-u)
scale = self.scale
shape = self.shape
if self.times_distribution == "weibull":
T = 1. / scale * E ** (1. / shape)
else:
# There is not point in this test, but let's do it like that
# since we're likely to implement other distributions
T = 1. / scale * E ** (1. / shape)
m = T.mean()
# Simulation of the censoring
c = self.censoring_factor
C = np.random.exponential(scale=c * m, size=n_samples)
# Observed time
self._set("times", np.minimum(T, C).astype(self.dtype))
# Censoring indicator: 1 if it is a time of failure, 0 if it's
# censoring. It is as int8 and not bool as we might need to
# construct a memory access on it later
censoring = (T <= C).astype(np.ushort)
self._set("censoring", censoring)
return self.features, self.times, self.censoring
def _as_dict(self):
dd = SimuWithFeatures._as_dict(self)
dd.pop("features", None)
dd.pop("times", None)
dd.pop("censoring", None)
return dd
class SimuCoxRegWithCutPoints(SimuWithFeatures):
"""Simulation of a Cox regression for proportional hazards with cut-points
effects in the features
Parameters
----------
features : `numpy.ndarray`, shape=(n_samples, n_features), default=`None`
The features matrix to use. If None, it is simulated
n_samples : `int`, default=200
Number of samples
n_features : `int`, default=5
Number of features
times_distribution : `str`, default="weibull"
The distrubution of times. Only ``"weibull"``
is implemented for now
scale : `float`, default=1.0
Scaling parameter to use in the distribution of times
shape : `float`, default=1.0
Shape parameter to use in the distribution of times
censoring_factor : `float`, default=2.0
Level of censoring. Increasing censoring_factor leads
to less censored times and conversely.
features_type : `str`, default="cov_toeplitz"
The type of features matrix to simulate
* If ``"cov_toeplitz"`` : a Gaussian distribution with
Toeplitz correlation matrix
* If ``"cov_uniform"`` : a Gaussian distribution with
correlation matrix given by O.5 * (U + U.T), where U is
uniform on [0, 1] and diagonal filled with ones.
cov_corr : `float`, default=0.5
Correlation to use in the Toeplitz correlation matrix
features_scaling : `str`, default="none"
The way the features matrix is scaled after simulation
* If ``"standard"`` : the columns are centered and
normalized
* If ``"min-max"`` : remove the minimum and divide by
max-min
* If ``"norm"`` : the columns are normalized but not centered
* If ``"none"`` : nothing is done to the features
seed : `int`, default=None
The seed of the random number generator. If `None` it is not
seeded
verbose : `bool`, default=True
If True, print things
n_cut_points : `int`, default="none"
Number of cut-points generated per feature. If `None` it is sampled from
a geometric distribution of parameter n_cut_points_factor.
n_cut_points_factor : `float`, default=0.7
Parameter of the geometric distribution used to generate the number of
cut-points when n_cut_points is `None`. Increasing n_cut_points_factor
leads to less cut-points per feature on average.
sparsity : `float`, default=0
Percentage of block sparsity induced in the coefficient vector. Must be
in [0, 1].
Attributes
----------
features : `numpy.ndarray`, shape=(n_samples, n_features)
The simulated (or given) features matrix
times : `numpy.ndarray`, shape=(n_samples,)
Simulated times
censoring : `numpy.ndarray`, shape=(n_samples,)
Simulated censoring indicator, where ``censoring[i] == 1``
indicates that the time of the i-th individual is a failure
time, and where ``censoring[i] == 0`` means that the time of
the i-th individual is a censoring time
Notes
-----
There is no intercept in this model
"""
_attrinfos = {
"times": {
"writable": False
},
"censoring": {
"writable": False
},
"_times_distribution": {
"writable": False
},
"_scale": {
"writable": False
},
"_shape": {
"writable": False
},
"_sparsity": {
"writable": False
}
}
def __init__(self, features: np.ndarray = None, n_samples: int = 200,
n_features: int = 5, n_cut_points: int = None,
n_cut_points_factor: float = .7,
times_distribution: str = "weibull",
shape: float = 1., scale: float = 1.,
censoring_factor: float = 2.,
features_type: str = "cov_toeplitz",
cov_corr: float = 0.5, features_scaling: str = "none",
seed: int = None, verbose: bool = True, sparsity=0):
# intercept=None in this model
SimuWithFeatures.__init__(self, None, features, n_samples,
n_features, features_type, cov_corr,
features_scaling, seed, verbose)
self.shape = shape
self.scale = scale
self.censoring_factor = censoring_factor
self.times_distribution = times_distribution
self.n_cut_points = n_cut_points
self.n_cut_points_factor = n_cut_points_factor
self.sparsity = sparsity
self.features = None
self.times = None
self.censoring = None
def simulate(self):
"""Launch simulation of the data
Returns
-------
features : `numpy.ndarray`, shape=(n_samples, n_features)
The simulated (or given) features matrix
times : `numpy.ndarray`, shape=(n_samples,)
Simulated times
censoring : `numpy.ndarray`, shape=(n_samples,)
Simulated censoring indicator, where ``censoring[i] == 1``
indicates that the time of the i-th individual is a failure
time, and where ``censoring[i] == 0`` means that the time of
the i-th individual is a censoring time
"""
return SimuWithFeatures.simulate(self)
@property
def times_distribution(self):
return self._times_distribution
@times_distribution.setter
def times_distribution(self, val):
if val != "weibull":
raise ValueError("``times_distribution`` was not "
"understood, try using 'weibull' instead")
self._set("_times_distribution", val)
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, val):
if val <= 0:
raise ValueError("``shape`` must be strictly positive")
self._set("_shape", val)
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, val):
if val <= 0:
raise ValueError("``scale`` must be strictly positive")
self._set("_scale", val)
@property
def sparsity(self):
return self._sparsity
@sparsity.setter
def sparsity(self, val):
if not 0 <= val <= 1:
raise ValueError("``sparsity`` must be in (0, 1)")
self._set("_sparsity", val)
def _simulate(self):
# The features matrix already exists, and is created by the
# super class
features = self.features
n_samples, n_features = features.shape
# Simulation of cut-points
n_cut_points = self.n_cut_points
n_cut_points_factor = self.n_cut_points_factor
sparsity = self.sparsity
s = round(n_features * sparsity)
# sparsity index set
S = np.random.choice(n_features, s, replace=False)
if n_cut_points is None:
n_cut_points = np.random.geometric(n_cut_points_factor, n_features)
else:
n_cut_points = np.repeat(n_cut_points, n_features)
cut_points = {}
coeffs_binarized = np.array([])
for j in range(n_features):
feature_j = features[:, j]
quantile_cuts = np.linspace(10, 90, 10)
candidates = np.percentile(feature_j, quantile_cuts,
interpolation="nearest")
cut_points_j = np.random.choice(candidates, n_cut_points[j],
replace=False)
cut_points_j = np.sort(cut_points_j)
cut_points_j = np.insert(cut_points_j, 0, -np.inf)
cut_points_j = np.append(cut_points_j, np.inf)
cut_points[str(j)] = cut_points_j
# generate beta star
if j in S:
coeffs_block = np.zeros(n_cut_points[j] + 1)
else:
coeffs_block = np.random.normal(1, .5, n_cut_points[j] + 1)
# make sure 2 consecutive coeffs are different enough
coeffs_block = np.abs(coeffs_block)
coeffs_block[::2] *= -1
# sum-to-zero constraint in each block
coeffs_block = coeffs_block - coeffs_block.mean()
coeffs_binarized = np.append(coeffs_binarized, coeffs_block)
binarizer = FeaturesBinarizer(method='given',
bins_boundaries=cut_points)
binarized_features = binarizer.fit_transform(features)
u = binarized_features.dot(coeffs_binarized)
# Simulation of true times
E = np.random.exponential(scale=1., size=n_samples)
E *= np.exp(-u)
scale = self.scale
shape = self.shape
if self.times_distribution == "weibull":
T = 1. / scale * E ** (1. / shape)
else:
# There is not point in this test, but let's do it like that
# since we're likely to implement other distributions
T = 1. / scale * E ** (1. / shape)
m = T.mean()
# Simulation of the censoring
c = self.censoring_factor
C = np.random.exponential(scale=c * m, size=n_samples)
# Observed time
self._set("times", np.minimum(T, C).astype(self.dtype))
# Censoring indicator: 1 if it is a time of failure, 0 if censoring.
censoring = (T <= C).astype(np.ushort)
self._set("censoring", censoring)
return self.features, self.times, self.censoring, cut_points, \
coeffs_binarized, S
def _as_dict(self):
dd = SimuWithFeatures._as_dict(self)
dd.pop("features", None)
dd.pop("times", None)
dd.pop("censoring", None)
return dd
|
awsiot/iotshadow.py | vietmaiquoc/aws-iot-device-sdk-python-v2 | 224 | 12624830 | <reponame>vietmaiquoc/aws-iot-device-sdk-python-v2
# Copyright Amazon.com, Inc. or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0.
# This file is generated
import awsiot
import concurrent.futures
import datetime
import typing
class IotShadowClient(awsiot.MqttServiceClient):
def publish_delete_named_shadow(self, request, qos):
# type: (DeleteNamedShadowRequest, int) -> concurrent.futures.Future
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#delete-pub-sub-topic
Args:
request: `DeleteNamedShadowRequest` instance.
qos: The Quality of Service guarantee of this message
Returns:
A Future whose result will be None if the
request is successfully published. The Future's result will be an
exception if the request cannot be published.
"""
if not request.shadow_name:
raise ValueError("request.shadow_name is required")
if not request.thing_name:
raise ValueError("request.thing_name is required")
return self._publish_operation(
topic='$aws/things/{0.thing_name}/shadow/name/{0.shadow_name}/delete'.format(request),
qos=qos,
payload=request.to_payload())
def publish_delete_shadow(self, request, qos):
# type: (DeleteShadowRequest, int) -> concurrent.futures.Future
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#delete-pub-sub-topic
Args:
request: `DeleteShadowRequest` instance.
qos: The Quality of Service guarantee of this message
Returns:
A Future whose result will be None if the
request is successfully published. The Future's result will be an
exception if the request cannot be published.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
return self._publish_operation(
topic='$aws/things/{0.thing_name}/shadow/delete'.format(request),
qos=qos,
payload=request.to_payload())
def publish_get_named_shadow(self, request, qos):
# type: (GetNamedShadowRequest, int) -> concurrent.futures.Future
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#get-pub-sub-topic
Args:
request: `GetNamedShadowRequest` instance.
qos: The Quality of Service guarantee of this message
Returns:
A Future whose result will be None if the
request is successfully published. The Future's result will be an
exception if the request cannot be published.
"""
if not request.shadow_name:
raise ValueError("request.shadow_name is required")
if not request.thing_name:
raise ValueError("request.thing_name is required")
return self._publish_operation(
topic='$aws/things/{0.thing_name}/shadow/name/{0.shadow_name}/get'.format(request),
qos=qos,
payload=request.to_payload())
def publish_get_shadow(self, request, qos):
# type: (GetShadowRequest, int) -> concurrent.futures.Future
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#get-pub-sub-topic
Args:
request: `GetShadowRequest` instance.
qos: The Quality of Service guarantee of this message
Returns:
A Future whose result will be None if the
request is successfully published. The Future's result will be an
exception if the request cannot be published.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
return self._publish_operation(
topic='$aws/things/{0.thing_name}/shadow/get'.format(request),
qos=qos,
payload=request.to_payload())
def publish_update_named_shadow(self, request, qos):
# type: (UpdateNamedShadowRequest, int) -> concurrent.futures.Future
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#update-pub-sub-topic
Args:
request: `UpdateNamedShadowRequest` instance.
qos: The Quality of Service guarantee of this message
Returns:
A Future whose result will be None if the
request is successfully published. The Future's result will be an
exception if the request cannot be published.
"""
if not request.shadow_name:
raise ValueError("request.shadow_name is required")
if not request.thing_name:
raise ValueError("request.thing_name is required")
return self._publish_operation(
topic='$aws/things/{0.thing_name}/shadow/name/{0.shadow_name}/update'.format(request),
qos=qos,
payload=request.to_payload())
def publish_update_shadow(self, request, qos):
# type: (UpdateShadowRequest, int) -> concurrent.futures.Future
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#update-pub-sub-topic
Args:
request: `UpdateShadowRequest` instance.
qos: The Quality of Service guarantee of this message
Returns:
A Future whose result will be None if the
request is successfully published. The Future's result will be an
exception if the request cannot be published.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
return self._publish_operation(
topic='$aws/things/{0.thing_name}/shadow/update'.format(request),
qos=qos,
payload=request.to_payload())
def subscribe_to_delete_named_shadow_accepted(self, request, qos, callback):
# type: (DeleteNamedShadowSubscriptionRequest, int, typing.Callable[[DeleteShadowResponse], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#delete-accepted-pub-sub-topic
Args:
request: `DeleteNamedShadowSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `DeleteShadowResponse`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not request.shadow_name:
raise ValueError("request.shadow_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/name/{0.shadow_name}/delete/accepted'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=DeleteShadowResponse.from_payload)
def subscribe_to_delete_named_shadow_rejected(self, request, qos, callback):
# type: (DeleteNamedShadowSubscriptionRequest, int, typing.Callable[[ErrorResponse], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#delete-rejected-pub-sub-topic
Args:
request: `DeleteNamedShadowSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `ErrorResponse`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not request.shadow_name:
raise ValueError("request.shadow_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/name/{0.shadow_name}/delete/rejected'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=ErrorResponse.from_payload)
def subscribe_to_delete_shadow_accepted(self, request, qos, callback):
# type: (DeleteShadowSubscriptionRequest, int, typing.Callable[[DeleteShadowResponse], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#delete-accepted-pub-sub-topic
Args:
request: `DeleteShadowSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `DeleteShadowResponse`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/delete/accepted'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=DeleteShadowResponse.from_payload)
def subscribe_to_delete_shadow_rejected(self, request, qos, callback):
# type: (DeleteShadowSubscriptionRequest, int, typing.Callable[[ErrorResponse], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#delete-rejected-pub-sub-topic
Args:
request: `DeleteShadowSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `ErrorResponse`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/delete/rejected'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=ErrorResponse.from_payload)
def subscribe_to_get_named_shadow_accepted(self, request, qos, callback):
# type: (GetNamedShadowSubscriptionRequest, int, typing.Callable[[GetShadowResponse], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#get-accepted-pub-sub-topic
Args:
request: `GetNamedShadowSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `GetShadowResponse`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not request.shadow_name:
raise ValueError("request.shadow_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/name/{0.shadow_name}/get/accepted'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=GetShadowResponse.from_payload)
def subscribe_to_get_named_shadow_rejected(self, request, qos, callback):
# type: (GetNamedShadowSubscriptionRequest, int, typing.Callable[[ErrorResponse], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#get-rejected-pub-sub-topic
Args:
request: `GetNamedShadowSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `ErrorResponse`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not request.shadow_name:
raise ValueError("request.shadow_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/name/{0.shadow_name}/get/rejected'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=ErrorResponse.from_payload)
def subscribe_to_get_shadow_accepted(self, request, qos, callback):
# type: (GetShadowSubscriptionRequest, int, typing.Callable[[GetShadowResponse], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#get-accepted-pub-sub-topic
Args:
request: `GetShadowSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `GetShadowResponse`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/get/accepted'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=GetShadowResponse.from_payload)
def subscribe_to_get_shadow_rejected(self, request, qos, callback):
# type: (GetShadowSubscriptionRequest, int, typing.Callable[[ErrorResponse], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#get-rejected-pub-sub-topic
Args:
request: `GetShadowSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `ErrorResponse`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/get/rejected'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=ErrorResponse.from_payload)
def subscribe_to_named_shadow_delta_updated_events(self, request, qos, callback):
# type: (NamedShadowDeltaUpdatedSubscriptionRequest, int, typing.Callable[[ShadowDeltaUpdatedEvent], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#update-delta-pub-sub-topic
Args:
request: `NamedShadowDeltaUpdatedSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `ShadowDeltaUpdatedEvent`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not request.shadow_name:
raise ValueError("request.shadow_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/name/{0.shadow_name}/update/delta'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=ShadowDeltaUpdatedEvent.from_payload)
def subscribe_to_named_shadow_updated_events(self, request, qos, callback):
# type: (NamedShadowUpdatedSubscriptionRequest, int, typing.Callable[[ShadowUpdatedEvent], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#update-documents-pub-sub-topic
Args:
request: `NamedShadowUpdatedSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `ShadowUpdatedEvent`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.shadow_name:
raise ValueError("request.shadow_name is required")
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/name/{0.shadow_name}/update/documents'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=ShadowUpdatedEvent.from_payload)
def subscribe_to_shadow_delta_updated_events(self, request, qos, callback):
# type: (ShadowDeltaUpdatedSubscriptionRequest, int, typing.Callable[[ShadowDeltaUpdatedEvent], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#update-delta-pub-sub-topic
Args:
request: `ShadowDeltaUpdatedSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `ShadowDeltaUpdatedEvent`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/update/delta'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=ShadowDeltaUpdatedEvent.from_payload)
def subscribe_to_shadow_updated_events(self, request, qos, callback):
# type: (ShadowUpdatedSubscriptionRequest, int, typing.Callable[[ShadowUpdatedEvent], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#update-documents-pub-sub-topic
Args:
request: `ShadowUpdatedSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `ShadowUpdatedEvent`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/update/documents'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=ShadowUpdatedEvent.from_payload)
def subscribe_to_update_named_shadow_accepted(self, request, qos, callback):
# type: (UpdateNamedShadowSubscriptionRequest, int, typing.Callable[[UpdateShadowResponse], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#update-accepted-pub-sub-topic
Args:
request: `UpdateNamedShadowSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `UpdateShadowResponse`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not request.shadow_name:
raise ValueError("request.shadow_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/name/{0.shadow_name}/update/accepted'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=UpdateShadowResponse.from_payload)
def subscribe_to_update_named_shadow_rejected(self, request, qos, callback):
# type: (UpdateNamedShadowSubscriptionRequest, int, typing.Callable[[ErrorResponse], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#update-rejected-pub-sub-topic
Args:
request: `UpdateNamedShadowSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `ErrorResponse`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not request.shadow_name:
raise ValueError("request.shadow_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/name/{0.shadow_name}/update/rejected'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=ErrorResponse.from_payload)
def subscribe_to_update_shadow_accepted(self, request, qos, callback):
# type: (UpdateShadowSubscriptionRequest, int, typing.Callable[[UpdateShadowResponse], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#update-accepted-pub-sub-topic
Args:
request: `UpdateShadowSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `UpdateShadowResponse`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/update/accepted'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=UpdateShadowResponse.from_payload)
def subscribe_to_update_shadow_rejected(self, request, qos, callback):
# type: (UpdateShadowSubscriptionRequest, int, typing.Callable[[ErrorResponse], None]) -> typing.Tuple[concurrent.futures.Future, str]
"""
API Docs: https://docs.aws.amazon.com/iot/latest/developerguide/device-shadow-mqtt.html#update-rejected-pub-sub-topic
Args:
request: `UpdateShadowSubscriptionRequest` instance.
qos: The Quality of Service guarantee of this message
callback: Callback to invoke each time the event is received.
The callback should take 1 argument of type `ErrorResponse`.
The callback is not expected to return anything.
Returns:
Tuple with two values. The first is a Future
which will contain a result of `None` when the server has acknowledged
the subscription, or an exception if the subscription fails. The second
value is a topic which may be passed to `unsubscribe()` to stop
receiving messages. Note that messages may arrive before the
subscription is acknowledged.
"""
if not request.thing_name:
raise ValueError("request.thing_name is required")
if not callable(callback):
raise ValueError("callback is required")
return self._subscribe_operation(
topic='$aws/things/{0.thing_name}/shadow/update/rejected'.format(request),
qos=qos,
callback=callback,
payload_to_class_fn=ErrorResponse.from_payload)
class DeleteNamedShadowRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
client_token (str)
shadow_name (str)
thing_name (str)
Attributes:
client_token (str)
shadow_name (str)
thing_name (str)
"""
__slots__ = ['client_token', 'shadow_name', 'thing_name']
def __init__(self, *args, **kwargs):
self.client_token = kwargs.get('client_token')
self.shadow_name = kwargs.get('shadow_name')
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['client_token', 'shadow_name', 'thing_name'], args):
setattr(self, key, val)
def to_payload(self):
# type: () -> typing.Dict[str, typing.Any]
payload = {} # type: typing.Dict[str, typing.Any]
if self.client_token is not None:
payload['clientToken'] = self.client_token
return payload
class DeleteNamedShadowSubscriptionRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
shadow_name (str)
thing_name (str)
Attributes:
shadow_name (str)
thing_name (str)
"""
__slots__ = ['shadow_name', 'thing_name']
def __init__(self, *args, **kwargs):
self.shadow_name = kwargs.get('shadow_name')
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['shadow_name', 'thing_name'], args):
setattr(self, key, val)
class DeleteShadowRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
client_token (str)
thing_name (str)
Attributes:
client_token (str)
thing_name (str)
"""
__slots__ = ['client_token', 'thing_name']
def __init__(self, *args, **kwargs):
self.client_token = kwargs.get('client_token')
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['thing_name'], args):
setattr(self, key, val)
def to_payload(self):
# type: () -> typing.Dict[str, typing.Any]
payload = {} # type: typing.Dict[str, typing.Any]
if self.client_token is not None:
payload['clientToken'] = self.client_token
return payload
class DeleteShadowResponse(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
client_token (str)
timestamp (datetime.datetime)
version (int)
Attributes:
client_token (str)
timestamp (datetime.datetime)
version (int)
"""
__slots__ = ['client_token', 'timestamp', 'version']
def __init__(self, *args, **kwargs):
self.client_token = kwargs.get('client_token')
self.timestamp = kwargs.get('timestamp')
self.version = kwargs.get('version')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['timestamp', 'version'], args):
setattr(self, key, val)
@classmethod
def from_payload(cls, payload):
# type: (typing.Dict[str, typing.Any]) -> DeleteShadowResponse
new = cls()
val = payload.get('clientToken')
if val is not None:
new.client_token = val
val = payload.get('timestamp')
if val is not None:
new.timestamp = datetime.datetime.fromtimestamp(val)
val = payload.get('version')
if val is not None:
new.version = val
return new
class DeleteShadowSubscriptionRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
thing_name (str)
Attributes:
thing_name (str)
"""
__slots__ = ['thing_name']
def __init__(self, *args, **kwargs):
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['thing_name'], args):
setattr(self, key, val)
class ErrorResponse(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
client_token (str)
code (int)
message (str)
timestamp (datetime.datetime)
Attributes:
client_token (str)
code (int)
message (str)
timestamp (datetime.datetime)
"""
__slots__ = ['client_token', 'code', 'message', 'timestamp']
def __init__(self, *args, **kwargs):
self.client_token = kwargs.get('client_token')
self.code = kwargs.get('code')
self.message = kwargs.get('message')
self.timestamp = kwargs.get('timestamp')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['client_token', 'code', 'message', 'timestamp'], args):
setattr(self, key, val)
@classmethod
def from_payload(cls, payload):
# type: (typing.Dict[str, typing.Any]) -> ErrorResponse
new = cls()
val = payload.get('clientToken')
if val is not None:
new.client_token = val
val = payload.get('code')
if val is not None:
new.code = val
val = payload.get('message')
if val is not None:
new.message = val
val = payload.get('timestamp')
if val is not None:
new.timestamp = datetime.datetime.fromtimestamp(val)
return new
class GetNamedShadowRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
client_token (str)
shadow_name (str)
thing_name (str)
Attributes:
client_token (str)
shadow_name (str)
thing_name (str)
"""
__slots__ = ['client_token', 'shadow_name', 'thing_name']
def __init__(self, *args, **kwargs):
self.client_token = kwargs.get('client_token')
self.shadow_name = kwargs.get('shadow_name')
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['client_token', 'shadow_name', 'thing_name'], args):
setattr(self, key, val)
def to_payload(self):
# type: () -> typing.Dict[str, typing.Any]
payload = {} # type: typing.Dict[str, typing.Any]
if self.client_token is not None:
payload['clientToken'] = self.client_token
return payload
class GetNamedShadowSubscriptionRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
shadow_name (str)
thing_name (str)
Attributes:
shadow_name (str)
thing_name (str)
"""
__slots__ = ['shadow_name', 'thing_name']
def __init__(self, *args, **kwargs):
self.shadow_name = kwargs.get('shadow_name')
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['shadow_name', 'thing_name'], args):
setattr(self, key, val)
class GetShadowRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
client_token (str)
thing_name (str)
Attributes:
client_token (str)
thing_name (str)
"""
__slots__ = ['client_token', 'thing_name']
def __init__(self, *args, **kwargs):
self.client_token = kwargs.get('client_token')
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['thing_name'], args):
setattr(self, key, val)
def to_payload(self):
# type: () -> typing.Dict[str, typing.Any]
payload = {} # type: typing.Dict[str, typing.Any]
if self.client_token is not None:
payload['clientToken'] = self.client_token
return payload
class GetShadowResponse(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
client_token (str)
metadata (ShadowMetadata)
state (ShadowStateWithDelta)
timestamp (datetime.datetime)
version (int)
Attributes:
client_token (str)
metadata (ShadowMetadata)
state (ShadowStateWithDelta)
timestamp (datetime.datetime)
version (int)
"""
__slots__ = ['client_token', 'metadata', 'state', 'timestamp', 'version']
def __init__(self, *args, **kwargs):
self.client_token = kwargs.get('client_token')
self.metadata = kwargs.get('metadata')
self.state = kwargs.get('state')
self.timestamp = kwargs.get('timestamp')
self.version = kwargs.get('version')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['metadata', 'state', 'timestamp', 'version'], args):
setattr(self, key, val)
@classmethod
def from_payload(cls, payload):
# type: (typing.Dict[str, typing.Any]) -> GetShadowResponse
new = cls()
val = payload.get('clientToken')
if val is not None:
new.client_token = val
val = payload.get('metadata')
if val is not None:
new.metadata = ShadowMetadata.from_payload(val)
val = payload.get('state')
if val is not None:
new.state = ShadowStateWithDelta.from_payload(val)
val = payload.get('timestamp')
if val is not None:
new.timestamp = datetime.datetime.fromtimestamp(val)
val = payload.get('version')
if val is not None:
new.version = val
return new
class GetShadowSubscriptionRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
thing_name (str)
Attributes:
thing_name (str)
"""
__slots__ = ['thing_name']
def __init__(self, *args, **kwargs):
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['thing_name'], args):
setattr(self, key, val)
class NamedShadowDeltaUpdatedSubscriptionRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
shadow_name (str)
thing_name (str)
Attributes:
shadow_name (str)
thing_name (str)
"""
__slots__ = ['shadow_name', 'thing_name']
def __init__(self, *args, **kwargs):
self.shadow_name = kwargs.get('shadow_name')
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['shadow_name', 'thing_name'], args):
setattr(self, key, val)
class NamedShadowUpdatedSubscriptionRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
shadow_name (str)
thing_name (str)
Attributes:
shadow_name (str)
thing_name (str)
"""
__slots__ = ['shadow_name', 'thing_name']
def __init__(self, *args, **kwargs):
self.shadow_name = kwargs.get('shadow_name')
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['shadow_name', 'thing_name'], args):
setattr(self, key, val)
class ShadowDeltaUpdatedEvent(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
metadata (typing.Dict[str, typing.Any])
state (typing.Dict[str, typing.Any])
timestamp (datetime.datetime)
version (int)
Attributes:
metadata (typing.Dict[str, typing.Any])
state (typing.Dict[str, typing.Any])
timestamp (datetime.datetime)
version (int)
"""
__slots__ = ['metadata', 'state', 'timestamp', 'version']
def __init__(self, *args, **kwargs):
self.metadata = kwargs.get('metadata')
self.state = kwargs.get('state')
self.timestamp = kwargs.get('timestamp')
self.version = kwargs.get('version')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['metadata', 'state', 'timestamp', 'version'], args):
setattr(self, key, val)
@classmethod
def from_payload(cls, payload):
# type: (typing.Dict[str, typing.Any]) -> ShadowDeltaUpdatedEvent
new = cls()
val = payload.get('metadata')
if val is not None:
new.metadata = val
val = payload.get('state')
if val is not None:
new.state = val
val = payload.get('timestamp')
if val is not None:
new.timestamp = datetime.datetime.fromtimestamp(val)
val = payload.get('version')
if val is not None:
new.version = val
return new
class ShadowDeltaUpdatedSubscriptionRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
thing_name (str)
Attributes:
thing_name (str)
"""
__slots__ = ['thing_name']
def __init__(self, *args, **kwargs):
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['thing_name'], args):
setattr(self, key, val)
class ShadowMetadata(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
desired (typing.Dict[str, typing.Any])
reported (typing.Dict[str, typing.Any])
Attributes:
desired (typing.Dict[str, typing.Any])
reported (typing.Dict[str, typing.Any])
"""
__slots__ = ['desired', 'reported']
def __init__(self, *args, **kwargs):
self.desired = kwargs.get('desired')
self.reported = kwargs.get('reported')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['desired', 'reported'], args):
setattr(self, key, val)
@classmethod
def from_payload(cls, payload):
# type: (typing.Dict[str, typing.Any]) -> ShadowMetadata
new = cls()
val = payload.get('desired')
if val is not None:
new.desired = val
val = payload.get('reported')
if val is not None:
new.reported = val
return new
class ShadowState(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
desired (typing.Dict[str, typing.Any])
reported (typing.Dict[str, typing.Any])
Attributes:
desired (typing.Dict[str, typing.Any])
reported (typing.Dict[str, typing.Any])
"""
__slots__ = ['desired', 'reported']
def __init__(self, *args, **kwargs):
self.desired = kwargs.get('desired')
self.reported = kwargs.get('reported')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['desired', 'reported'], args):
setattr(self, key, val)
@classmethod
def from_payload(cls, payload):
# type: (typing.Dict[str, typing.Any]) -> ShadowState
new = cls()
val = payload.get('desired')
if val is not None:
new.desired = val
val = payload.get('reported')
if val is not None:
new.reported = val
return new
def to_payload(self):
# type: () -> typing.Dict[str, typing.Any]
payload = {} # type: typing.Dict[str, typing.Any]
if self.desired is not None:
payload['desired'] = self.desired
if self.reported is not None:
payload['reported'] = self.reported
return payload
class ShadowStateWithDelta(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
delta (typing.Dict[str, typing.Any])
desired (typing.Dict[str, typing.Any])
reported (typing.Dict[str, typing.Any])
Attributes:
delta (typing.Dict[str, typing.Any])
desired (typing.Dict[str, typing.Any])
reported (typing.Dict[str, typing.Any])
"""
__slots__ = ['delta', 'desired', 'reported']
def __init__(self, *args, **kwargs):
self.delta = kwargs.get('delta')
self.desired = kwargs.get('desired')
self.reported = kwargs.get('reported')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['delta', 'desired', 'reported'], args):
setattr(self, key, val)
@classmethod
def from_payload(cls, payload):
# type: (typing.Dict[str, typing.Any]) -> ShadowStateWithDelta
new = cls()
val = payload.get('delta')
if val is not None:
new.delta = val
val = payload.get('desired')
if val is not None:
new.desired = val
val = payload.get('reported')
if val is not None:
new.reported = val
return new
class ShadowUpdatedEvent(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
current (ShadowUpdatedSnapshot)
previous (ShadowUpdatedSnapshot)
timestamp (datetime.datetime)
Attributes:
current (ShadowUpdatedSnapshot)
previous (ShadowUpdatedSnapshot)
timestamp (datetime.datetime)
"""
__slots__ = ['current', 'previous', 'timestamp']
def __init__(self, *args, **kwargs):
self.current = kwargs.get('current')
self.previous = kwargs.get('previous')
self.timestamp = kwargs.get('timestamp')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['current', 'previous', 'timestamp'], args):
setattr(self, key, val)
@classmethod
def from_payload(cls, payload):
# type: (typing.Dict[str, typing.Any]) -> ShadowUpdatedEvent
new = cls()
val = payload.get('current')
if val is not None:
new.current = ShadowUpdatedSnapshot.from_payload(val)
val = payload.get('previous')
if val is not None:
new.previous = ShadowUpdatedSnapshot.from_payload(val)
val = payload.get('timestamp')
if val is not None:
new.timestamp = datetime.datetime.fromtimestamp(val)
return new
class ShadowUpdatedSnapshot(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
metadata (ShadowMetadata)
state (ShadowState)
version (int)
Attributes:
metadata (ShadowMetadata)
state (ShadowState)
version (int)
"""
__slots__ = ['metadata', 'state', 'version']
def __init__(self, *args, **kwargs):
self.metadata = kwargs.get('metadata')
self.state = kwargs.get('state')
self.version = kwargs.get('version')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['metadata', 'state', 'version'], args):
setattr(self, key, val)
@classmethod
def from_payload(cls, payload):
# type: (typing.Dict[str, typing.Any]) -> ShadowUpdatedSnapshot
new = cls()
val = payload.get('metadata')
if val is not None:
new.metadata = ShadowMetadata.from_payload(val)
val = payload.get('state')
if val is not None:
new.state = ShadowState.from_payload(val)
val = payload.get('version')
if val is not None:
new.version = val
return new
class ShadowUpdatedSubscriptionRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
thing_name (str)
Attributes:
thing_name (str)
"""
__slots__ = ['thing_name']
def __init__(self, *args, **kwargs):
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['thing_name'], args):
setattr(self, key, val)
class UpdateNamedShadowRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
client_token (str)
shadow_name (str)
state (ShadowState)
thing_name (str)
version (int)
Attributes:
client_token (str)
shadow_name (str)
state (ShadowState)
thing_name (str)
version (int)
"""
__slots__ = ['client_token', 'shadow_name', 'state', 'thing_name', 'version']
def __init__(self, *args, **kwargs):
self.client_token = kwargs.get('client_token')
self.shadow_name = kwargs.get('shadow_name')
self.state = kwargs.get('state')
self.thing_name = kwargs.get('thing_name')
self.version = kwargs.get('version')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['client_token', 'shadow_name', 'state', 'thing_name', 'version'], args):
setattr(self, key, val)
def to_payload(self):
# type: () -> typing.Dict[str, typing.Any]
payload = {} # type: typing.Dict[str, typing.Any]
if self.client_token is not None:
payload['clientToken'] = self.client_token
if self.state is not None:
payload['state'] = self.state.to_payload()
if self.version is not None:
payload['version'] = self.version
return payload
class UpdateNamedShadowSubscriptionRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
shadow_name (str)
thing_name (str)
Attributes:
shadow_name (str)
thing_name (str)
"""
__slots__ = ['shadow_name', 'thing_name']
def __init__(self, *args, **kwargs):
self.shadow_name = kwargs.get('shadow_name')
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['shadow_name', 'thing_name'], args):
setattr(self, key, val)
class UpdateShadowRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
client_token (str)
state (ShadowState)
thing_name (str)
version (int)
Attributes:
client_token (str)
state (ShadowState)
thing_name (str)
version (int)
"""
__slots__ = ['client_token', 'state', 'thing_name', 'version']
def __init__(self, *args, **kwargs):
self.client_token = kwargs.get('client_token')
self.state = kwargs.get('state')
self.thing_name = kwargs.get('thing_name')
self.version = kwargs.get('version')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['client_token', 'state', 'thing_name', 'version'], args):
setattr(self, key, val)
def to_payload(self):
# type: () -> typing.Dict[str, typing.Any]
payload = {} # type: typing.Dict[str, typing.Any]
if self.client_token is not None:
payload['clientToken'] = self.client_token
if self.state is not None:
payload['state'] = self.state.to_payload()
if self.version is not None:
payload['version'] = self.version
return payload
class UpdateShadowResponse(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
client_token (str)
metadata (ShadowMetadata)
state (ShadowState)
timestamp (datetime.datetime)
version (int)
Attributes:
client_token (str)
metadata (ShadowMetadata)
state (ShadowState)
timestamp (datetime.datetime)
version (int)
"""
__slots__ = ['client_token', 'metadata', 'state', 'timestamp', 'version']
def __init__(self, *args, **kwargs):
self.client_token = kwargs.get('client_token')
self.metadata = kwargs.get('metadata')
self.state = kwargs.get('state')
self.timestamp = kwargs.get('timestamp')
self.version = kwargs.get('version')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['client_token', 'metadata', 'state', 'timestamp', 'version'], args):
setattr(self, key, val)
@classmethod
def from_payload(cls, payload):
# type: (typing.Dict[str, typing.Any]) -> UpdateShadowResponse
new = cls()
val = payload.get('clientToken')
if val is not None:
new.client_token = val
val = payload.get('metadata')
if val is not None:
new.metadata = ShadowMetadata.from_payload(val)
val = payload.get('state')
if val is not None:
new.state = ShadowState.from_payload(val)
val = payload.get('timestamp')
if val is not None:
new.timestamp = datetime.datetime.fromtimestamp(val)
val = payload.get('version')
if val is not None:
new.version = val
return new
class UpdateShadowSubscriptionRequest(awsiot.ModeledClass):
"""
All attributes are None by default, and may be set by keyword in the constructor.
Keyword Args:
thing_name (str)
Attributes:
thing_name (str)
"""
__slots__ = ['thing_name']
def __init__(self, *args, **kwargs):
self.thing_name = kwargs.get('thing_name')
# for backwards compatibility, read any arguments that used to be accepted by position
for key, val in zip(['thing_name'], args):
setattr(self, key, val)
|
torch_kalman/kalman_filter/__init__.py | strongio/torch_kalman | 105 | 12624836 | from .base import KalmanFilter
|
test/run.py | miaopei/deep_landmark | 327 | 12624852 | #!/usr/bin/env python2.7
# coding: utf-8
"""
This file use Caffe model to predict data from http://mmlab.ie.cuhk.edu.hk/archive/CNN_FacePoint.htm
"""
import os, sys
from functools import partial
import cv2
from common import getDataFromTxt, createDir, logger, drawLandmark
from common import level1, level2, level3
TXT = 'dataset/test/lfpw_test_249_bbox.txt'
if __name__ == '__main__':
assert(len(sys.argv) == 2)
level = int(sys.argv[1])
if level == 0:
P = partial(level1, FOnly=True)
elif level == 1:
P = level1
elif level == 2:
P = level2
else:
P = level3
OUTPUT = 'dataset/test/out_{0}'.format(level)
createDir(OUTPUT)
data = getDataFromTxt(TXT, with_landmark=False)
for imgPath, bbox in data:
img = cv2.imread(imgPath)
assert(img is not None)
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
logger("process %s" % imgPath)
landmark = P(imgGray, bbox)
landmark = bbox.reprojectLandmark(landmark)
drawLandmark(img, bbox, landmark)
cv2.imwrite(os.path.join(OUTPUT, os.path.basename(imgPath)), img)
|
a02_TextCNN/data_util.py | sunshinenum/text_classification | 7,723 | 12624872 | <reponame>sunshinenum/text_classification
# -*- coding: utf-8 -*-
import codecs
import random
import numpy as np
from tflearn.data_utils import pad_sequences
from collections import Counter
import os
import pickle
PAD_ID = 0
UNK_ID=1
_PAD="_PAD"
_UNK="UNK"
def load_data_multilabel(traning_data_path,vocab_word2index, vocab_label2index,sentence_len,training_portion=0.95):
"""
convert data as indexes using word2index dicts.
:param traning_data_path:
:param vocab_word2index:
:param vocab_label2index:
:return:
"""
file_object = codecs.open(traning_data_path, mode='r', encoding='utf-8')
lines = file_object.readlines()
random.shuffle(lines)
label_size=len(vocab_label2index)
X = []
Y = []
for i,line in enumerate(lines):
raw_list = line.strip().split("__label__")
input_list = raw_list[0].strip().split(" ")
input_list = [x.strip().replace(" ", "") for x in input_list if x != '']
x=[vocab_word2index.get(x,UNK_ID) for x in input_list]
label_list = raw_list[1:]
label_list=[l.strip().replace(" ", "") for l in label_list if l != '']
label_list=[vocab_label2index[label] for label in label_list]
y=transform_multilabel_as_multihot(label_list,label_size)
X.append(x)
Y.append(y)
if i<10:print(i,"line:",line)
X = pad_sequences(X, maxlen=sentence_len, value=0.) # padding to max length
number_examples = len(lines)
training_number=int(training_portion* number_examples)
train = (X[0:training_number], Y[0:training_number])
valid_number=min(1000,number_examples-training_number)
test = (X[training_number+ 1:training_number+valid_number+1], Y[training_number + 1:training_number+valid_number+1])
return train,test
def transform_multilabel_as_multihot(label_list,label_size):
"""
convert to multi-hot style
:param label_list: e.g.[0,1,4], here 4 means in the 4th position it is true value(as indicate by'1')
:param label_size: e.g.199
:return:e.g.[1,1,0,1,0,0,........]
"""
result=np.zeros(label_size)
#set those location as 1, all else place as 0.
result[label_list] = 1
return result
#use pretrained word embedding to get word vocabulary and labels, and its relationship with index
def create_vocabulary(training_data_path,vocab_size,name_scope='cnn'):
"""
create vocabulary
:param training_data_path:
:param vocab_size:
:param name_scope:
:return:
"""
cache_vocabulary_label_pik='cache'+"_"+name_scope # path to save cache
if not os.path.isdir(cache_vocabulary_label_pik): # create folder if not exists.
os.makedirs(cache_vocabulary_label_pik)
# if cache exists. load it; otherwise create it.
cache_path =cache_vocabulary_label_pik+"/"+'vocab_label.pik'
print("cache_path:",cache_path,"file_exists:",os.path.exists(cache_path))
if os.path.exists(cache_path):
with open(cache_path, 'rb') as data_f:
return pickle.load(data_f)
else:
vocabulary_word2index={}
vocabulary_index2word={}
vocabulary_word2index[_PAD]=PAD_ID
vocabulary_index2word[PAD_ID]=_PAD
vocabulary_word2index[_UNK]=UNK_ID
vocabulary_index2word[UNK_ID]=_UNK
vocabulary_label2index={}
vocabulary_index2label={}
#1.load raw data
file_object = codecs.open(training_data_path, mode='r', encoding='utf-8')
lines=file_object.readlines()
#2.loop each line,put to counter
c_inputs=Counter()
c_labels=Counter()
for line in lines:
raw_list=line.strip().split("__label__")
input_list = raw_list[0].strip().split(" ")
input_list = [x.strip().replace(" ", "") for x in input_list if x != '']
label_list=[l.strip().replace(" ","") for l in raw_list[1:] if l!='']
c_inputs.update(input_list)
c_labels.update(label_list)
#return most frequency words
vocab_list=c_inputs.most_common(vocab_size)
label_list=c_labels.most_common()
#put those words to dict
for i,tuplee in enumerate(vocab_list):
word,_=tuplee
vocabulary_word2index[word]=i+2
vocabulary_index2word[i+2]=word
for i,tuplee in enumerate(label_list):
label,_=tuplee;label=str(label)
vocabulary_label2index[label]=i
vocabulary_index2label[i]=label
#save to file system if vocabulary of words not exists.
if not os.path.exists(cache_path):
with open(cache_path, 'ab') as data_f:
pickle.dump((vocabulary_word2index,vocabulary_index2word,vocabulary_label2index,vocabulary_index2label), data_f)
return vocabulary_word2index,vocabulary_index2word,vocabulary_label2index,vocabulary_index2label
def load_data(cache_file_h5py,cache_file_pickle):
"""
load data from h5py and pickle cache files, which is generate by take step by step of pre-processing.ipynb
:param cache_file_h5py:
:param cache_file_pickle:
:return:
"""
if not os.path.exists(cache_file_h5py) or not os.path.exists(cache_file_pickle):
raise RuntimeError("############################ERROR##############################\n. "
"please download cache file, it include training data and vocabulary & labels. "
"link can be found in README.md\n download zip file, unzip it, then put cache files as FLAGS."
"cache_file_h5py and FLAGS.cache_file_pickle suggested location.")
print("INFO. cache file exists. going to load cache file")
f_data = h5py.File(cache_file_h5py, 'r')
print("f_data.keys:",list(f_data.keys()))
train_X=f_data['train_X'] # np.array(
print("train_X.shape:",train_X.shape)
train_Y=f_data['train_Y'] # np.array(
print("train_Y.shape:",train_Y.shape,";")
vaild_X=f_data['vaild_X'] # np.array(
valid_Y=f_data['valid_Y'] # np.array(
test_X=f_data['test_X'] # np.array(
test_Y=f_data['test_Y'] # np.array(
#print(train_X)
#f_data.close()
word2index, label2index=None,None
with open(cache_file_pickle, 'rb') as data_f_pickle:
word2index, label2index=pickle.load(data_f_pickle)
print("INFO. cache file load successful...")
return word2index, label2index,train_X,train_Y,vaild_X,valid_Y,test_X,test_Y
#training_data_path='../data/sample_multiple_label3.txt'
#vocab_size=100
#create_voabulary(training_data_path,vocab_size)
|
Hackathon/hackerearth_improved_table.py | elawang9/Scripting-and-Web-Scraping | 119 | 12624913 | <reponame>elawang9/Scripting-and-Web-Scraping<filename>Hackathon/hackerearth_improved_table.py
# Install PhantomJS to use this script
from selenium import webdriver
from bs4 import BeautifulSoup
from time import sleep
from terminaltables import DoubleTable
from colorclass import Color
print('--- Fetching hackathons--- \n')
driver = webdriver.PhantomJS()
driver.get('https://www.hackerearth.com/challenges/')
res = driver.page_source
soup = BeautifulSoup(res, 'lxml')
upcoming = soup.find('div',{'class':'upcoming challenge-list'})
all_hackathons = upcoming.find_all('div',{'class':'challenge-content'})
table_data = [['S.No', 'Name', 'Type', 'Timings']]
for s_no,hackathon in enumerate(all_hackathons,1):
row = []
challenge_type = hackathon.find('div',{'class':'challenge-type'}).text.replace("\n"," ").strip()
challenge_name = hackathon.find('div',{'class':'challenge-name'}).text.replace("\n"," ").strip()
date_time = hackathon.find('div',{'class':'challenge-list-meta challenge-card-wrapper'}).text.replace("\n"," ").strip()
row.extend((Color('{autoyellow}' + str(s_no) + '.' + '{/autoyellow}'),
Color('{autocyan}' + challenge_name + '{/autogreen}'),
Color('{autogreen}' + challenge_type + '{/autoyellow}'),
Color('{autoyellow}' + date_time + '{/autoyellow}')))
table_data.append(row)
table_instance = DoubleTable(table_data)
table_instance.inner_row_border = True
print(table_instance.table)
print()
|
pwnlib/commandline/unhex.py | IMULMUL/python3-pwntools | 325 | 12624914 | <filename>pwnlib/commandline/unhex.py
#!/usr/bin/env python3
import argparse
import re
import sys
from pwnlib.util.fiddling import unhex
parser = argparse.ArgumentParser(description='''
Decodes hex-encoded data provided on the command line or via stdin.
''')
parser.add_argument('hex', nargs='*',
help='Hex bytes to decode')
def main():
args = parser.parse_args()
try:
if not args.hex:
s = sys.stdin.read()
s = re.sub(r'\s', '', s)
sys.stdout.buffer.write(unhex(s))
else:
sys.stdout.buffer.write(unhex(''.join(sys.argv[1:])))
except TypeError as e:
sys.stderr.write(str(e) + '\n')
if __name__ == '__main__':
main()
|
tests/plugins/slow_init.py | Bladez1753/lightning | 2,288 | 12624933 | <gh_stars>1000+
#!/usr/bin/env python3
from pyln.client import Plugin
import os
import time
plugin = Plugin()
@plugin.init()
def init(options, configuration, plugin):
plugin.log("slow_init.py initializing {}".format(configuration))
time.sleep(int(os.getenv('SLOWINIT_TIME', "0")))
plugin.run()
|
etc/release/bootstrap.py | quepop/scancode-toolkit | 1,511 | 12624951 | <gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import itertools
import click
import utils_thirdparty
from utils_thirdparty import Environment
from utils_thirdparty import PypiPackage
@click.command()
@click.option('-r', '--requirements-file',
type=click.Path(exists=True, readable=True, path_type=str, dir_okay=False),
metavar='FILE',
multiple=True,
default=['requirements.txt'],
show_default=True,
help='Path to the requirements file(s) to use for thirdparty packages.',
)
@click.option('-d', '--thirdparty-dir',
type=click.Path(exists=True, readable=True, path_type=str, file_okay=False),
metavar='DIR',
default=utils_thirdparty.THIRDPARTY_DIR,
show_default=True,
help='Path to the thirdparty directory where wheels are built and '
'sources, ABOUT and LICENSE files fetched.',
)
@click.option('-p', '--python-version',
type=click.Choice(utils_thirdparty.PYTHON_VERSIONS),
metavar='PYVER',
default=utils_thirdparty.PYTHON_VERSIONS,
show_default=True,
multiple=True,
help='Python version(s) to use for this build.',
)
@click.option('-o', '--operating-system',
type=click.Choice(utils_thirdparty.PLATFORMS_BY_OS),
metavar='OS',
default=tuple(utils_thirdparty.PLATFORMS_BY_OS),
multiple=True,
show_default=True,
help='OS(ses) to use for this build: one of linux, mac or windows.',
)
@click.option('-l', '--latest-version',
is_flag=True,
help='Get the latest version of all packages, ignoring version specifiers.',
)
@click.option('--sync-dejacode',
is_flag=True,
help='Synchronize packages with DejaCode.',
)
@click.option('--with-deps',
is_flag=True,
help='Also include all dependent wheels.',
)
@click.help_option('-h', '--help')
def bootstrap(
requirements_file,
thirdparty_dir,
python_version,
operating_system,
with_deps,
latest_version,
sync_dejacode,
build_remotely=False,
):
"""
Boostrap a thirdparty Python packages directory from pip requirements.
Fetch or build to THIRDPARTY_DIR all the wheels and source distributions for
the pip ``--requirement-file`` requirements FILE(s). Build wheels compatible
with all the provided ``--python-version`` PYVER(s) and ```--operating_system``
OS(s) defaulting to all supported combinations. Create or fetch .ABOUT and
.LICENSE files.
Optionally ignore version specifiers and use the ``--latest-version``
of everything.
Sources and wheels are fetched with attempts first from PyPI, then our remote repository.
If missing wheels are built as needed.
"""
# rename variables for clarity since these are lists
requirements_files = requirements_file
python_versions = python_version
operating_systems = operating_system
# create the environments we need
evts = itertools.product(python_versions, operating_systems)
environments = [Environment.from_pyver_and_os(pyv, os) for pyv, os in evts]
# collect all packages to process from requirements files
# this will fail with an exception if there are packages we cannot find
required_name_versions = set()
for req_file in requirements_files:
nvs = utils_thirdparty.load_requirements(
requirements_file=req_file, force_pinned=False)
required_name_versions.update(nvs)
if latest_version:
required_name_versions = set((name, None) for name, _ver in required_name_versions)
print(f'PROCESSING {len(required_name_versions)} REQUIREMENTS in {len(requirements_files)} FILES')
# fetch all available wheels, keep track of missing
# start with local, then remote, then PyPI
print('==> COLLECTING ALREADY LOCALLY AVAILABLE REQUIRED WHEELS')
# list of all the wheel filenames either pre-existing, fetched or built
# updated as we progress
available_wheel_filenames = []
local_packages_by_namever = {
(p.name, p.version): p
for p in utils_thirdparty.get_local_packages(directory=thirdparty_dir)
}
# list of (name, version, environment) not local and to fetch
name_version_envt_to_fetch = []
# start with a local check
for (name, version), envt in itertools.product(required_name_versions, environments):
local_pack = local_packages_by_namever.get((name, version,))
if local_pack:
supported_wheels = list(local_pack.get_supported_wheels(environment=envt))
if supported_wheels:
available_wheel_filenames.extend(w.filename for w in supported_wheels)
print(f'====> No fetch or build needed. '
f'Local wheel already available for {name}=={version} '
f'on os: {envt.operating_system} for Python: {envt.python_version}')
continue
name_version_envt_to_fetch.append((name, version, envt,))
print(f'==> TRYING TO FETCH #{len(name_version_envt_to_fetch)} REQUIRED WHEELS')
# list of (name, version, environment) not fetch and to build
name_version_envt_to_build = []
# then check if the wheel can be fetched without building from remote and Pypi
for name, version, envt in name_version_envt_to_fetch:
fetched_fwn = utils_thirdparty.fetch_package_wheel(
name=name,
version=version,
environment=envt,
dest_dir=thirdparty_dir,
)
if fetched_fwn:
available_wheel_filenames.append(fetched_fwn)
else:
name_version_envt_to_build.append((name, version, envt,))
# At this stage we have all the wheels we could obtain without building
for name, version, envt in name_version_envt_to_build:
print(f'====> Need to build wheels for {name}=={version} on os: '
f'{envt.operating_system} for Python: {envt.python_version}')
packages_and_envts_to_build = [
(PypiPackage(name, version), envt)
for name, version, envt in name_version_envt_to_build
]
print(f'==> BUILDING #{len(packages_and_envts_to_build)} MISSING WHEELS')
package_envts_not_built, wheel_filenames_built = utils_thirdparty.build_missing_wheels(
packages_and_envts=packages_and_envts_to_build,
build_remotely=build_remotely,
with_deps=with_deps,
dest_dir=thirdparty_dir,
)
if wheel_filenames_built:
available_wheel_filenames.extend(available_wheel_filenames)
for pack, envt in package_envts_not_built:
print(
f'====> FAILED to build any wheel for {pack.name}=={pack.version} '
f'on os: {envt.operating_system} for Python: {envt.python_version}'
)
print(f'==> FETCHING SOURCE DISTRIBUTIONS')
# fetch all sources, keep track of missing
# This is a list of (name, version)
utils_thirdparty.fetch_missing_sources(dest_dir=thirdparty_dir)
print(f'==> FETCHING ABOUT AND LICENSE FILES')
utils_thirdparty.add_fetch_or_update_about_and_license_files(dest_dir=thirdparty_dir)
############################################################################
if sync_dejacode:
print(f'==> SYNC WITH DEJACODE')
# try to fetch from DejaCode any missing ABOUT
# create all missing DejaCode packages
pass
utils_thirdparty.find_problems(dest_dir=thirdparty_dir)
if __name__ == '__main__':
bootstrap()
|
contrib/python/CUBRIDdb/FIELD_TYPE.py | eido5/cubrid | 253 | 12624954 | <gh_stars>100-1000
"""CUBRID FIELD_TYPE Constants
These constants represent the various column (field) types that are
supported by CUBRID.
"""
CHAR = 1
VARCHAR = 2
NCHAR = 3
VARNCHAR = 4
BIT = 5
VARBIT = 6
NUMERIC = 7
INT = 8
SMALLINT = 9
MONETARY = 10
BIGINT = 21
FLOAT = 11
DOUBLE = 12
DATE = 13
TIME = 14
TIMESTAMP = 15
OBJECT = 19
SET = 32
MULTISET = 64
SEQUENCE = 96
BLOB = 254
CLOB = 255
STRING = VARCHAR
|
nebula2/data/DataObject.py | xiaoronghuang/nebula-python | 110 | 12624987 | #!/usr/bin/env python
# --coding:utf-8--
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
import pytz
from datetime import datetime, timezone, timedelta
from nebula2.Exception import (
InvalidValueTypeException,
InvalidKeyException,
OutOfRangeException
)
from nebula2.common.ttypes import Value, Vertex, Edge, NullType, DateTime, Time
def date_time_convert_with_timezone(date_time: DateTime, timezone_offset: int):
"""the function to convert utc date_time to local date_time
:param date_time: the utc date_time
:param timezone_offset: the timezone offset
:return: the date_time with timezone
"""
native_date_time = datetime(date_time.year,
date_time.month,
date_time.day,
date_time.hour,
date_time.minute,
date_time.sec,
date_time.microsec,
pytz.timezone("utc"))
local_date_time = native_date_time.astimezone(timezone(timedelta(seconds=timezone_offset)))
new_date_time = DateTime()
new_date_time.year = local_date_time.year
new_date_time.month = local_date_time.month
new_date_time.day = local_date_time.day
new_date_time.hour = local_date_time.hour
new_date_time.minute = local_date_time.minute
new_date_time.sec = local_date_time.second
new_date_time.microsec = local_date_time.microsecond
return new_date_time
def time_convert_with_timezone(n_time: Time, timezone_offset: int):
"""the function to convert utc date_time to local date_time
:param n_time: the utc time
:param timezone_offset: the timezone offset
:return: the time with the timezone
"""
native_date_time = datetime(1,
1,
1,
n_time.hour,
n_time.minute,
n_time.sec,
n_time.microsec,
pytz.timezone("utc"))
local_date_time = native_date_time.astimezone(timezone(timedelta(seconds=timezone_offset)))
local_time = Time()
local_time.hour = local_date_time.hour
local_time.minute = local_date_time.minute
local_time.sec = local_date_time.second
local_time.microsec = local_date_time.microsecond
return local_time
class BaseObject(object):
def __init__(self):
self._decode_type = 'utf-8'
self._timezone_offset = 0
def set_decode_type(self, decode_type):
self._decode_type = decode_type
return self
def set_timezone_offset(self, timezone_offset):
self._timezone_offset = timezone_offset
return self
def get_decode_type(self):
return self._decode_type
def get_timezone_offset(self):
return self._timezone_offset
class Record(object):
def __init__(self, values, names, decode_type='utf-8', timezone_offset: int = 0):
assert len(names) == len(values),\
'len(names): {} != len(values): {}, names: {}, values: {}'.format(
len(names), len(values), str(names), str(values))
self._record = list()
self._names = names
for val in values:
self._record.append(ValueWrapper(val,
decode_type=decode_type,
timezone_offset=timezone_offset))
def __iter__(self):
return iter(self._record)
def size(self):
"""the size of record
:return: record size
"""
return len(self._names)
def get_value(self, index):
"""get value by specified index
:param index: the index of column
:return: ValueWrapper
"""
if index >= len(self._names):
raise OutOfRangeException()
return self._record[index]
def get_value_by_key(self, key):
"""get value by key
:return: Value
"""
try:
return self._record[self._names.index(key)]
except Exception:
raise InvalidKeyException(key)
def keys(self):
"""get column names of record
:return: the column names
"""
return self._names
def values(self):
"""get all values
:return: values
"""
return self._record
def __repr__(self):
return "{}".format('\n'.join([str(val_wrap) for val_wrap in self._record]))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DataSetWrapper(object):
def __init__(self, data_set, decode_type='utf-8', timezone_offset: int = 0):
assert data_set is not None
self._decode_type = decode_type
self._timezone_offset = timezone_offset
self._data_set = data_set
self._column_names = []
self._key_indexes = {}
self._pos = -1
for index, name in enumerate(self._data_set.column_names):
d_name = name.decode(self._decode_type)
self._column_names.append(d_name)
self._key_indexes[d_name] = index
def get_row_size(self):
return len(self._data_set.rows)
def get_col_names(self):
return self._column_names
def get_rows(self):
return self._data_set.rows
def get_row_types(self):
"""Get row types
:param empty
:return: list<int>
ttypes.Value.__EMPTY__ = 0
ttypes.Value.NVAL = 1
ttypes.Value.BVAL = 2
ttypes.Value.IVAL = 3
ttypes.Value.FVAL = 4
ttypes.Value.SVAL = 5
ttypes.Value.DVAL = 6
ttypes.Value.TVAL = 7
ttypes.Value.DTVAL = 8
ttypes.Value.VVAL = 9
ttypes.Value.EVAL = 10
ttypes.Value.PVAL = 11
ttypes.Value.LVAL = 12
ttypes.Value.MVAL = 13
ttypes.Value.UVAL = 14
ttypes.Value.GVAL = 15
"""
if len(self._data_set.rows) == 0:
return []
return [(value.getType()) for value in self._data_set.rows[0].values]
def row_values(self, row_index):
"""get row values
:param row_index: the Record index
:return: list<ValueWrapper>
"""
if row_index >= len(self._data_set.rows):
raise OutOfRangeException()
return [(ValueWrapper(value=value,
decode_type=self._decode_type,
timezone_offset=self._timezone_offset))
for value in self._data_set.rows[row_index].values]
def column_values(self, key):
"""get column values
:param key: the col name
:return: list<ValueWrapper>
"""
if key not in self._column_names:
raise InvalidKeyException(key)
return [(ValueWrapper(value=row.values[self._key_indexes[key]],
decode_type=self._decode_type,
timezone_offset=self._timezone_offset))
for row in self._data_set.rows]
def __iter__(self):
self._pos = -1
return self
def __next__(self):
"""The record iterator
:return: record
"""
if len(self._data_set.rows) == 0 or self._pos >= len(self._data_set.rows) - 1:
raise StopIteration
self._pos = self._pos + 1
return Record(values=self._data_set.rows[self._pos].values,
names=self._column_names,
decode_type=self._decode_type,
timezone_offset=self._timezone_offset)
def __repr__(self):
data_str = []
for i in range(self.get_row_size()):
data_str.append(str(self.row_values(i)))
value_str = ','.join(data_str)
return 'keys: {}, values: {}'.format(self._column_names, value_str)
class Null(object):
__NULL__ = NullType.__NULL__
NaN = NullType.NaN
BAD_DATA = NullType.BAD_DATA
BAD_TYPE = NullType.BAD_TYPE
ERR_OVERFLOW = NullType.ERR_OVERFLOW
UNKNOWN_PROP = NullType.UNKNOWN_PROP
DIV_BY_ZERO = NullType.DIV_BY_ZERO
OUT_OF_RANGE = NullType.OUT_OF_RANGE
def __init__(self, type):
self._type = type
def __repr__(self):
return NullType._VALUES_TO_NAMES[self._type]
def __eq__(self, other):
return self._type == other._type
class ValueWrapper(object):
def __init__(self, value, decode_type='utf-8', timezone_offset: int = 0):
self._value = value
self._decode_type = decode_type
self._timezone_offset = timezone_offset
def get_value(self):
"""get raw data
:return: Value
"""
return self._value
def is_null(self):
"""judge the value if is Null type
:return: true or false
"""
return self._value.getType() == Value.NVAL
def is_empty(self):
"""judge the value if is Empty type
:return: true or false
"""
return self._value.getType() == Value.__EMPTY__
def is_bool(self):
"""judge the value if is Bool type
:return: true or false
"""
return self._value.getType() == Value.BVAL
def is_int(self):
"""judge the value if is Int type
:return: true or false
"""
return self._value.getType() == Value.IVAL
def is_double(self):
"""judge the value if is Double type
:return: true or false
"""
return self._value.getType() == Value.FVAL
def is_string(self):
"""judge the value if is String type
:return: true or false
"""
return self._value.getType() == Value.SVAL
def is_list(self):
"""judge the value if is List type
:return: true or false
"""
return self._value.getType() == Value.LVAL
def is_set(self):
"""judge the value if is Set type
:return: true or false
"""
return self._value.getType() == Value.UVAL
def is_map(self):
"""judge the value if is Map type
:return: true or false
"""
return self._value.getType() == Value.MVAL
def is_time(self):
"""judge the value if is Time type
:return: true or false
"""
return self._value.getType() == Value.TVAL
def is_date(self):
"""judge the value if is Date type
:return: true or false
"""
return self._value.getType() == Value.DVAL
def is_datetime(self):
"""judge the value if is Datetime type
:return: true or false
"""
return self._value.getType() == Value.DTVAL
def is_vertex(self):
"""judge the value if is Vertex type
:return: true or false
"""
return self._value.getType() == Value.VVAL
def is_edge(self):
"""judge the value if is Edge type
:return: true or false
"""
return self._value.getType() == Value.EVAL
def is_path(self):
"""judge the value if is Path type
:return: true or false
"""
return self._value.getType() == Value.PVAL
def as_null(self):
"""converts the original data type to Null type
:return: Null value
"""
if self._value.getType() == Value.NVAL:
return Null(self._value.get_nVal())
raise InvalidValueTypeException("expect NULL type, but is " + self._get_type_name())
def as_bool(self):
"""converts the original data type to Bool type
:return: Bool value
"""
if self._value.getType() == Value.BVAL:
return self._value.get_bVal()
raise InvalidValueTypeException("expect bool type, but is " + self._get_type_name())
def as_int(self):
"""converts the original data type to Int type
:return: Int value
"""
if self._value.getType() == Value.IVAL:
return self._value.get_iVal()
raise InvalidValueTypeException("expect bool type, but is " + self._get_type_name())
def as_double(self):
"""converts the original data type to Double type
:return: Double value
"""
if self._value.getType() == Value.FVAL:
return self._value.get_fVal()
raise InvalidValueTypeException("expect int type, but is " + self._get_type_name())
def as_string(self):
"""converts the original data type to String type
:return: String value
"""
if self._value.getType() == Value.SVAL:
return self._value.get_sVal().decode(self._decode_type)
raise InvalidValueTypeException("expect string type, but is " + self._get_type_name())
def as_time(self):
"""converts the original data type to Time type
:return: Time value
"""
if self._value.getType() == Value.TVAL:
return TimeWrapper(self._value.get_tVal()).set_timezone_offset(self._timezone_offset)
raise InvalidValueTypeException("expect time type, but is " + self._get_type_name())
def as_date(self):
"""converts the original data type to Date type
:return: Date value
"""
if self._value.getType() == Value.DVAL:
return DateWrapper(self._value.get_dVal())
raise InvalidValueTypeException("expect date type, but is " + self._get_type_name())
def as_datetime(self):
"""converts the original data type to Datetime type
:return: Datetime value
"""
if self._value.getType() == Value.DTVAL:
return DateTimeWrapper(self._value.get_dtVal()).set_timezone_offset(self._timezone_offset)
raise InvalidValueTypeException("expect datetime type, but is " + self._get_type_name())
def as_list(self):
"""converts the original data type to list of ValueWrapper
:return: list<ValueWrapper>
"""
if self._value.getType() == Value.LVAL:
result = []
for val in self._value.get_lVal().values:
result.append(ValueWrapper(val,
decode_type=self._decode_type,
timezone_offset=self._timezone_offset))
return result
raise InvalidValueTypeException("expect list type, but is " + self._get_type_name())
def as_set(self):
"""converts the original data type to set of ValueWrapper
:return: set<ValueWrapper>
"""
if self._value.getType() == Value.UVAL:
result = set()
for val in self._value.get_uVal().values:
result.add(ValueWrapper(val,
decode_type=self._decode_type,
timezone_offset=self._timezone_offset))
return result
raise InvalidValueTypeException("expect set type, but is " + self._get_type_name())
def as_map(self):
"""converts the original data type to map type
:return: map<String, ValueWrapper>
"""
if self._value.getType() == Value.MVAL:
result = {}
kvs = self._value.get_mVal().kvs
for key in kvs.keys():
result[key.decode(self._decode_type)] = ValueWrapper(kvs[key],
decode_type=self._decode_type,
timezone_offset=self._timezone_offset)
return result
raise InvalidValueTypeException("expect map type, but is " + self._get_type_name())
def as_node(self):
"""converts the original data type to Node type
:return: Node type
"""
if self._value.getType() == Value.VVAL:
return Node(self._value.get_vVal())\
.set_decode_type(self._decode_type)\
.set_timezone_offset(self._timezone_offset)
raise InvalidValueTypeException("expect vertex type, but is " + self._get_type_name())
def as_relationship(self):
"""converts the original data type to Relationship type
:return: Relationship type
"""
if self._value.getType() == Value.EVAL:
return Relationship(self._value.get_eVal())\
.set_decode_type(self._decode_type)\
.set_timezone_offset(self._timezone_offset)
raise InvalidValueTypeException("expect edge type, but is " + self._get_type_name())
def as_path(self):
"""converts the original data type to PathWrapper type
:return: PathWrapper type
"""
if self._value.getType() == Value.PVAL:
return PathWrapper(self._value.get_pVal())\
.set_decode_type(self._decode_type)\
.set_timezone_offset(self._timezone_offset)
raise InvalidValueTypeException("expect path type, but is " + self._get_type_name())
def _get_type_name(self):
if self.is_empty():
return "empty"
if self.is_null():
return "null"
if self.is_bool():
return "bool"
if self.is_int():
return "int"
if self.is_double():
return "double"
if self.is_string():
return "string"
if self.is_list():
return "list"
if self.is_set():
return "set"
if self.is_map():
return "map"
if self.is_time():
return "time"
if self.is_date():
return "date"
if self.is_datetime():
return "datetime"
if self.is_vertex():
return "vertex"
if self.is_edge():
return "edge"
if self.is_path():
return "path"
return "unknown"
def __eq__(self, o: object) -> bool:
if not isinstance(o, self.__class__):
return False
if self.get_value().getType() != o.get_value().getType():
return False
if self.is_empty():
return o.is_empty()
elif self.is_null():
return self.as_null() == o.as_null()
elif self.is_bool():
return self.as_bool() == o.as_bool()
elif self.is_int():
return self.as_int() == o.as_int()
elif self.is_double():
return self.as_double() == o.as_double()
elif self.is_string():
return self.as_string() == o.as_string()
elif self.is_list():
return self.as_list() == o.as_list()
elif self.is_set():
return self.as_set() == o.as_set()
elif self.is_map():
return self.as_map() == o.as_map()
elif self.is_vertex():
return self.as_node() == o.as_node()
elif self.is_edge():
return self.as_relationship() == o.as_relationship()
elif self.is_path():
return self.as_path() == o.as_path()
elif self.is_time():
return self.as_time() == self.as_time()
elif self.is_date():
return self.as_date() == self.as_date()
elif self.is_datetime():
return self.as_datetime() == self.as_datetime()
else:
raise RuntimeError('Unsupported type:{} to compare'.format(self._get_type_name()))
return False
def __repr__(self):
if self.is_empty():
return '__EMPTY__'
elif self.is_null():
return str(self.as_null())
elif self.is_bool():
return 'True' if self.as_bool() else 'False'
elif self.is_int():
return str(self.as_int())
elif self.is_double():
return str(self.as_double())
elif self.is_string():
return '\"{}\"'.format(self.as_string())
elif self.is_list():
return str(self.as_list())
elif self.is_set():
return str(self.as_set())
elif self.is_map():
return str(self.as_map())
elif self.is_vertex():
return str(self.as_node())
elif self.is_edge():
return str(self.as_relationship())
elif self.is_path():
return str(self.as_path())
elif self.is_time():
return str(self.as_time())
elif self.is_date():
return str(self.as_date())
elif self.is_datetime():
return str(self.as_datetime())
else:
raise RuntimeError('Unsupported type:{} to compare'.format(self._get_type_name()))
return False
def __hash__(self):
return self._value.__hash__()
class TimeWrapper(BaseObject):
def __init__(self, time):
super(TimeWrapper, self).__init__()
self._time = time
def get_hour(self):
"""get utc hour
:return: hour
"""
return self._time.hour
def get_minute(self):
"""get utc minute
:return: minute
"""
return self._time.minute
def get_sec(self):
"""get utc second
:return: second
"""
return self._time.sec
def get_microsec(self):
"""get utc microseconds
:return: microseconds
"""
return self._time.microsec
def get_time(self):
"""get utc time
:return: Time value
"""
return self._time
def get_local_time(self):
"""get time with the timezone from graph service
:return: Time value with timezone offset
"""
return time_convert_with_timezone(self._time, self.get_timezone_offset())
def get_local_time_by_timezone_offset(self, timezone_offset):
"""get local time with the specified timezone by user
:return: Time value with timezone offset
"""
return time_convert_with_timezone(self._time, timezone_offset)
def get_local_time_str(self):
"""convert local time string format
:return: return local time string format
"""
local_time = time_convert_with_timezone(self._time, self.get_timezone_offset())
return "%02d:%02d:%02d.%06d" % (local_time.hour,
local_time.minute,
local_time.sec,
local_time.microsec)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._time.hour == other.get_hour() and \
self._time.minute == other.get_minute() and \
self._time.sec == other.get_sec() and \
self._time.microsec == self.get_microsec()
def __repr__(self):
return "utc time: %02d:%02d:%02d.%06d, timezone_offset: %d" % (
self._time.hour,
self._time.minute,
self._time.sec,
self._time.microsec,
self.get_timezone_offset())
class DateWrapper(object):
def __init__(self, date):
self._date = date
def get_year(self):
"""get year
:return: year
"""
return self._date.year
def get_month(self):
"""get month
:return: month
"""
return self._date.month
def get_day(self):
"""get day
:return: day
"""
return self._date.day
def get_date(self):
"""get original date
:return: Date
"""
return self._date
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._date.year == other.get_year() and \
self._date.month == other.get_month() and \
self._date.day == other.get_day()
def __repr__(self):
return "%d-%02d-%02d" % (self._date.year, self._date.month, self._date.day)
class DateTimeWrapper(BaseObject):
def __init__(self, date_time):
super(DateTimeWrapper, self).__init__()
self._date_time = date_time
def get_year(self):
"""get utc year
:return: year
"""
return self._date_time.year
def get_month(self):
"""get utc month
:return: month
"""
return self._date_time.month
def get_day(self):
"""get utc day
:return: day
"""
return self._date_time.day
def get_hour(self):
"""get utc hour
:return: hour
"""
return self._date_time.hour
def get_minute(self):
"""get utc minute
:return: minute
"""
return self._date_time.minute
def get_sec(self):
"""get utc seconds
:return: seconds
"""
return self._date_time.sec
def get_microsec(self):
"""get utc microseconds
:return: microseconds
"""
return self._date_time.microsec
def get_datetime(self):
"""get utc datetime
:return: datetime
"""
return self._date_time
def get_local_datetime(self):
"""get datetime with the timezone from graph service
:return: Datetime value with timezone offset
"""
return date_time_convert_with_timezone(self._date_time, self.get_timezone_offset())
def get_local_datetime_by_timezone_offset(self, timezone_offset):
"""get local datetime with the specified timezone by user
:return: Time value with timezone offset
"""
return date_time_convert_with_timezone(self._date_time, timezone_offset)
def get_local_datetime_str(self):
"""convert local datetime string format
:return: return local datetime string format
"""
local_date_time = date_time_convert_with_timezone(self._date_time, self.get_timezone_offset())
return "%d-%02d-%02dT%02d:%02d:%02d.%06d" % (local_date_time.year,
local_date_time.month,
local_date_time.day,
local_date_time.hour,
local_date_time.minute,
local_date_time.sec,
local_date_time.microsec)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._date_time.year == other.get_year() and \
self._date_time.month == other.get_month() and \
self._date_time.day == other.get_day() and \
self._date_time.hour == other.get_hour() and \
self._date_time.minute == other.get_minute() and \
self._date_time.sec == other.get_sec() and \
self._date_time.microsec == other.get_microsec()
def __repr__(self):
return "utc datetime: %d-%02d-%02dT%02d:%02d:%02d.%06d, timezone_offset: %d" % (
self._date_time.year,
self._date_time.month,
self._date_time.day,
self._date_time.hour,
self._date_time.minute,
self._date_time.sec,
self._date_time.microsec,
self.get_timezone_offset())
class GenValue(object):
@classmethod
def gen_vertex(cls, vid, tags):
vertex = Vertex()
vertex.vid = vid
vertex.tags = tags
return vertex
@classmethod
def gen_edge(cls, src_id, dst_id, type, edge_name, ranking, props):
edge = Edge()
edge.src = src_id
edge.dst = dst_id
edge.type = type
edge.name = edge_name
edge.ranking = ranking
edge.props = props
return edge
@classmethod
def gen_segment(cls, start_node, end_node, relationship):
segment = Segment()
segment.start_node = start_node
segment.end_node = end_node
segment.relationship = relationship
return segment
class Node(BaseObject):
def __init__(self, vertex):
super(Node, self).__init__()
self._value = vertex
self._tag_indexes = dict()
for index, tag in enumerate(self._value.tags, start=0):
self._tag_indexes[tag.name.decode(self.get_decode_type())] = index
def get_id(self):
"""get the vid of Node
:return: ValueWrapper type vid
"""
return ValueWrapper(value=self._value.vid,
decode_type=self.get_decode_type(),
timezone_offset=self.get_timezone_offset())
def tags(self):
"""get tag names
:return: the list of tag name
"""
return list(self._tag_indexes.keys())
def has_tag(self, tag):
"""whether the specified tag is included
:param tag: the tag name
:return: true or false
"""
return True if tag in self._tag_indexes.keys() else False
def properties(self, tag):
"""get all properties of the specified tag
:param tag: the tag name
:return: the properties
"""
if tag not in self._tag_indexes.keys():
raise InvalidKeyException(tag)
props = self._value.tags[self._tag_indexes[tag]].props
result_props = {}
if props is None:
return result_props
for key in props.keys():
result_props[key.decode(self.get_decode_type())] = ValueWrapper(props[key],
decode_type=self.get_decode_type(),
timezone_offset=self._timezone_offset)
return result_props
def prop_names(self, tag):
"""get the property names of the specified tag
:param tag: the tag name
:return: property name list
"""
if tag not in self._tag_indexes.keys():
raise InvalidKeyException(tag)
index = self._tag_indexes[tag]
props = self._value.tags[index].props
if props is None:
return []
return [(key.decode(self.get_decode_type())) for key in self._value.tags[index].props.keys()]
def prop_values(self, tag):
"""get all property values of the specified tag
:param tag: the tag name
:return: property name list
"""
if tag not in self._tag_indexes.keys():
raise InvalidKeyException(tag)
index = self._tag_indexes[tag]
props = self._value.tags[index].props
if props is None:
return []
return [(ValueWrapper(value,
decode_type=self.get_decode_type(),
timezone_offset=self._timezone_offset))
for value in self._value.tags[index].props.values()]
def __repr__(self):
tag_str_list = list()
for tag in self._tag_indexes.keys():
prop_strs = ['%s: %s' % (key, str(val)) for key, val in self.properties(tag).items()]
tag_str_list.append(':%s{%s}' % (tag, ', '.join(prop_strs)))
return '({} {})'.format(str(self.get_id()), ' '.join(tag_str_list))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.get_id() == other.get_id()
def __ne__(self, other):
return not (self == other)
class Relationship(BaseObject):
def __init__(self, edge: Edge):
super(Relationship, self).__init__()
self._value = edge
def start_vertex_id(self):
"""get start vertex vid, if your space vid_type is int, you can use start_vertex_id().as_int(),
if your space vid_type is fixed_string, you can use start_vertex_id().as_string()
:return: ValueWrapper type vid
"""
if self._value.type > 0:
return ValueWrapper(self._value.src, self.get_decode_type())
else:
return ValueWrapper(self._value.dst, self.get_decode_type())
def end_vertex_id(self):
"""get end vertex vid, if your space vid_type is int, you can use end_vertex_id().as_int(),
if your space vid_type is fixed_string, you can use end_vertex_id().as_string()
:return: ValueWrapper type vid
"""
if self._value.type > 0:
return ValueWrapper(self._value.dst, self.get_decode_type())
else:
return ValueWrapper(self._value.src, self.get_decode_type())
def edge_name(self):
"""get the edge name
:return: edge name
"""
return self._value.name.decode(self.get_decode_type())
def ranking(self):
"""get the edge ranking
:return: ranking
"""
return self._value.ranking
def properties(self):
"""get all properties
:return: the properties
"""
props = {}
if self._value.props is None:
return props
for key in self._value.props.keys():
props[key.decode(self.get_decode_type())] = ValueWrapper(self._value.props[key],
decode_type=self.get_decode_type(),
timezone_offset=self.get_timezone_offset())
return props
def keys(self):
"""get all property names
:return: the property names
"""
if self._value.props is None:
return []
return [(key.decode(self._decode_type)) for key in self._value.props.keys()]
def values(self):
"""get all property values
:return: the property values
"""
if self._value.props is None:
return []
return [(ValueWrapper(value,
decode_type=self.get_decode_type(),
timezone_offset=self.get_timezone_offset()))
for value in self._value.props.values()]
def __repr__(self):
prop_strs = ['%s: %s' % (key, str(val)) for key, val in self.properties().items()]
return "(%s)-[:%s@%d{%s}]->(%s)" % (str(self.start_vertex_id()),
self.edge_name(),
self.ranking(),
', '.join(prop_strs),
str(self.end_vertex_id()))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.start_vertex_id() == other.start_vertex_id() \
and self.end_vertex_id() == other.end_vertex_id() \
and self.edge_name() == other.edge_name() \
and self.ranking() == self.ranking()
def __ne__(self, other):
return not (self == other)
class Segment:
start_node = None
end_node = None
relationship = None
def __repr__(self):
return "{}-[:{}@{}{}]->{}".format(self.start_node,
self.relationship.edge_name(),
self.relationship.ranking(),
self.relationship.properties(),
self.end_node)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.start_node == other.start_node \
and self.end_node == other.end_node \
and self.relationship == other.relationship
class PathWrapper(BaseObject):
"""
PathWrapper is wrapper handling for the Path from the service
"""
def __init__(self, path):
super(PathWrapper, self).__init__()
self._nodes = list()
self._segments = list()
self._relationships = list()
self._path = path
self._nodes.append(Node(path.src)
.set_decode_type(self.get_decode_type())
.set_timezone_offset(self.get_timezone_offset()))
vids = []
vids.append(path.src.vid)
for step in self._path.steps:
type = step.type
if step.type > 0:
start_node = self._nodes[-1]
end_node = Node(step.dst)\
.set_decode_type(self.get_decode_type())\
.set_timezone_offset(self.get_timezone_offset())
src_id = vids[-1]
dst_id = step.dst.vid
else:
type = -type
end_node = self._nodes[-1]
start_node = Node(step.dst)\
.set_decode_type(self.get_decode_type())\
.set_timezone_offset(self.get_timezone_offset())
dst_id = vids[-1]
src_id = step.dst.vid
vids.append(step.dst.vid)
relationship = Relationship(GenValue.gen_edge(src_id,
dst_id,
type,
step.name,
step.ranking,
step.props))\
.set_decode_type(self.get_decode_type())\
.set_timezone_offset(self.get_timezone_offset())
self._relationships.append(relationship)
segment = GenValue.gen_segment(start_node, end_node, relationship)
if segment.start_node == self._nodes[-1]:
self._nodes.append(segment.end_node)
elif segment.end_node == self._nodes[-1]:
self._nodes.append(segment.start_node)
else:
raise Exception("Relationship [{}] does not connect to the last node".
format(relationship))
self._segments.append(segment)
def __iter__(self):
return iter(self._segments)
def start_node(self):
"""get start node of the Path
:return: start node
"""
if len(self._nodes) == 0:
return None
return self._nodes[0]
def length(self):
"""get the length of the path
:return: path length
"""
return len(self._segments)
def contain_node(self, node):
"""whether the node is in the path
:param node: the specified node
:return: true or false
"""
return True if node in self._nodes else False
def contain_relationship(self, relationship):
"""whether the relationship is in the path
:param relationship: the specified relationship
:return: true or false
"""
return True if relationship in self._relationships else False
def nodes(self):
"""get all nodes of the path
:return: nodes
"""
return self._nodes
def relationships(self):
"""get all relationships of the path
:return: relationships
"""
return self._relationships
def segments(self):
"""get all segments of the path
:return: segments
"""
return self._segments
def __repr__(self):
edge_strs = []
for step in self._path.steps:
relationship = Relationship(GenValue.gen_edge(step.dst.vid,
step.dst.vid,
type,
step.name,
step.ranking,
step.props))\
.set_decode_type(self.get_decode_type())\
.set_timezone_offset(self.get_timezone_offset())
edge_str = ''
prop_strs = ['%s: %s' % (key, str(val)) for key, val in relationship.properties().items()]
if step.type > 0:
edge_str = '-[:%s@%d{%s}]->%s' % (relationship.edge_name(),
relationship.ranking(),
', '.join(prop_strs),
Node(step.dst)
.set_decode_type(self.get_decode_type())
.set_timezone_offset(self.get_timezone_offset()))
else:
edge_str = "<-[:%s@%d{%s}]-%s" % (relationship.edge_name(),
relationship.ranking(),
', '.join(prop_strs),
Node(step.dst)
.set_decode_type(self.get_decode_type())
.set_timezone_offset(self.get_timezone_offset())
)
edge_strs.append(edge_str)
return '{}{}'.format(Node(self._path.src)
.set_decode_type(self.get_decode_type())
.set_timezone_offset(self.get_timezone_offset()), ''.join(edge_strs))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._segments == other.segments()
def __ne__(self, other):
return not (self == other)
|
openbook_connections/serializers.py | TamaraAbells/okuna-api | 164 | 12624989 | from django.conf import settings
from rest_framework import serializers
from openbook_auth.models import User, UserProfile
from openbook_auth.validators import user_username_exists, username_characters_validator
from openbook_circles.models import Circle
from openbook_circles.validators import circle_id_exists
from openbook_common.models import Badge
from openbook_common.serializers_fields.user import IsConnectedField, ConnectedCirclesField, IsFollowingField, \
IsPendingConnectionConfirmation, IsFullyConnectedField, AreNewPostNotificationsEnabledForUserField
from openbook_connections.models import Connection
class ConnectWithUserSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=settings.USERNAME_MAX_LENGTH,
allow_blank=False,
validators=[
username_characters_validator,
user_username_exists
],
required=False
)
circles_ids = serializers.ListSerializer(
child=serializers.IntegerField(required=True, validators=[circle_id_exists])
)
class ConnectionUserProfileBadgeSerializer(serializers.ModelSerializer):
class Meta:
model = Badge
fields = (
'keyword',
'keyword_description'
)
class ConnectionUserProfileSerializer(serializers.ModelSerializer):
badges = ConnectionUserProfileBadgeSerializer(many=True)
class Meta:
model = UserProfile
fields = (
'name',
'avatar',
'badges'
)
class ConnectionUserCircleSerializer(serializers.ModelSerializer):
class Meta:
model = Circle
fields = (
'id',
'name',
'color',
'users_count'
)
class ConnectionUserSerializer(serializers.ModelSerializer):
profile = ConnectionUserProfileSerializer(many=False)
is_connected = IsConnectedField()
is_following = IsFollowingField()
are_new_post_notifications_enabled = AreNewPostNotificationsEnabledForUserField()
connected_circles = ConnectedCirclesField(circle_serializer=ConnectionUserCircleSerializer)
is_pending_connection_confirmation = IsPendingConnectionConfirmation()
is_fully_connected = IsFullyConnectedField()
class Meta:
model = User
fields = (
'id',
'username',
'profile',
'is_connected',
'is_fully_connected',
'is_following',
'are_new_post_notifications_enabled',
'connected_circles',
'is_pending_connection_confirmation'
)
class ConnectionSerializer(serializers.ModelSerializer):
target_user = ConnectionUserSerializer(many=False)
class Meta:
model = Connection
fields = (
'id',
'user',
'circles',
'target_user',
)
class DisconnectFromUserSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=settings.USERNAME_MAX_LENGTH,
allow_blank=False,
validators=[
username_characters_validator,
user_username_exists
],
required=False
)
class UpdateConnectionSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=settings.USERNAME_MAX_LENGTH,
allow_blank=False,
validators=[
username_characters_validator,
user_username_exists
],
required=False
)
circles_ids = serializers.ListSerializer(
child=serializers.IntegerField(required=True, validators=[circle_id_exists])
)
class ConfirmConnectionSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=settings.USERNAME_MAX_LENGTH,
allow_blank=False,
validators=[
username_characters_validator,
user_username_exists
],
required=False
)
circles_ids = serializers.ListSerializer(
required=False,
child=serializers.IntegerField(validators=[circle_id_exists])
)
|
python/tvm/autotvm/tuner/metric.py | jiangzoi/incubator-tvm | 286 | 12625001 | <reponame>jiangzoi/incubator-tvm<gh_stars>100-1000
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Metrics for evaluating tuning process"""
import numpy as np
from ..util import get_rank
def max_curve(trial_scores):
""" f(n) = max([s[i] fo i < n])
Parameters
----------
trial_scores: Array of float
the score of i th trial
Returns
-------
curve: Array of float
function values
"""
ret = np.empty(len(trial_scores))
keep = -1e9
for i, score in enumerate(trial_scores):
keep = max(keep, score)
ret[i] = keep
return ret
def mean_curve(trial_scores):
""" f(n) = mean([s[i] fo i < n])
Parameters
----------
trial_scores: Array of float
the score of i th trial
Returns
-------
curve: Array of float
function values
"""
ret = np.empty(len(trial_scores))
keep = 0
for i, score in enumerate(trial_scores):
keep += score
ret[i] = keep / (i+1)
return ret
def recall_curve(trial_ranks, top=None):
"""
if top is None, f(n) = sum([I(rank[i] < n) for i < n]) / n
if top is K, f(n) = sum([I(rank[i] < K) for i < n]) / K
Parameters
----------
trial_ranks: Array of int
the rank of i th trial in labels
top: int or None
top-n recall
Returns
-------
curve: Array of float
function values
"""
if not isinstance(trial_ranks, np.ndarray):
trial_ranks = np.array(trial_ranks)
ret = np.zeros(len(trial_ranks))
if top is None:
for i in range(len(trial_ranks)):
ret[i] = np.sum(trial_ranks[:i] <= i) / (i+1)
else:
for i in range(len(trial_ranks)):
ret[i] = 1.0 * np.sum(trial_ranks[:i] < top) / top
return ret
def cover_curve(trial_ranks):
"""
f(n) = max k s.t. {1,2,...,k} is a subset of {ranks[i] for i < n}
Parameters
----------
trial_ranks: Array of int
the rank of i th trial in labels
Returns
-------
curve: Array of float
function values
"""
ret = np.empty(len(trial_ranks))
keep = -1
cover = set()
for i, rank in enumerate(trial_ranks):
cover.add(rank)
while keep+1 in cover:
keep += 1
ret[i] = keep + 1
return ret / len(trial_ranks)
def average_recall(preds, labels, N):
"""evaluate average recall-n for predictions and labels"""
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks)
return np.sum(curve[:N]) / N
|
examples/copy_boot_volume_backup_example.py | Manny27nyc/oci-python-sdk | 249 | 12625005 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# This example demonstrates how to copy boot volume backups to a different region and wait on the copy status.
#
# # USAGE:
# `python examples/copy_boot_volume_backup_example.py \
# --boot-volume-backup-id 'foo' \
# --destination-region '<destination_region>' \
# --display_name 'bar' \
# --kms-key-id 'baz'`
#
# Example (copying from us-phoenix-1 to eu-frankfurt-1 :
# `python examples/copy_boot_volume_backup_example.py \
# --boot-volume-backup-id 'ocid1.bootvolumebackup.oc1.phx.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' \
# --destination-region 'us-ashburn-1'
# --display-name 'copied backup from phoenix' \
# --kms-key-id '<KEY>'`
#
# This script accepts up to for arguments:
# - boot-volume-backup-id: is the OCID of the boot volume backup to copy.
# - destination-region: is the name of the region to copy the boot volume backup to.
# - display_name (optional): is the new display name of the copied boot volume backup.
# If omitted, the copied boot volume backup will have the same display name as the source backup.
# - kms-key-id (optional): is the OCID of the kms key to use in the destination region to encrypt
# the copied backup with. If not specified, a platform ad-master key will be used.
import oci
import argparse
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--boot-volume-backup-id',
help='the OCID of the boot volume backup to copy',
required=True
)
parser.add_argument('--destination-region',
help='the name of the destination region to copy the backup to',
required=True
)
parser.add_argument('--display-name',
help='the display name of the copied boot volume backup. If not specified, '
'defaults to the same as the original backup\'s display name',
required=False
)
parser.add_argument('--kms-key-id',
help='the OCID of the kms key in the destination region to encrypt the copied boot volume backup',
required=False
)
args = parser.parse_args()
source_backup_id = args.boot_volume_backup_id
destination_region = args.destination_region
kms_key_id = args.kms_key_id
display_name = args.display_name
# load config and create clients one for BlockstorageClient and another for BlockstorageClientCompositeOperations.
source_config = oci.config.from_file()
print('Copying boot volume backup with ID {} from {} to {} using new display name: {} and kms key id: {} \n'.format(
source_backup_id, source_config["region"], destination_region, display_name, kms_key_id))
blockstorage_client = oci.core.BlockstorageClient(source_config)
blockstorage_composite_client = oci.core.BlockstorageClientCompositeOperations(blockstorage_client)
copied_backup = blockstorage_composite_client.copy_boot_volume_backup_and_wait_for_work_request(
boot_volume_backup_id=source_backup_id,
copy_boot_volume_backup_details=oci.core.models.CopyBootVolumeBackupDetails(
destination_region=destination_region,
display_name=display_name,
kms_key_id=kms_key_id
)).data
print('Backup successfully copied: {}'.format(copied_backup))
print('Example script done')
|
cacreader/swig-4.0.2/Examples/test-suite/python/cpp11_null_pointer_constant_runme.py | kyletanyag/LL-Smartcard | 1,031 | 12625022 | <reponame>kyletanyag/LL-Smartcard
import cpp11_null_pointer_constant
a = cpp11_null_pointer_constant.A()
if a._myA != None:
raise RuntimeError, (
"cpp11_null_pointer_constant: _myA should be None, but is ", a._myA)
b = cpp11_null_pointer_constant.A()
if a._myA != b._myA:
raise RuntimeError, (
"cpp11_null_pointer_constant: a._myA should be the same as b._myA, but ", a._myA, "!=", b._myA)
a._myA = cpp11_null_pointer_constant.A()
if a._myA == None:
raise RuntimeError, (
"cpp11_null_pointer_constant: _myA should be object, but is None")
|
src/gausskernel/dbmind/tools/ai_server/common/logger.py | Yanci0/openGauss-server | 360 | 12625053 | <reponame>Yanci0/openGauss-server
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#############################################################################
# Copyright (c): 2021, Huawei Tech. Co., Ltd.
# FileName : logger.py
# Version :
# Date : 2021-4-7
# Description : Logger for project
#############################################################################
try:
import os
import sys
from configparser import ConfigParser
sys.path.insert(0, os.path.dirname(__file__))
from common.utils import Common, CONFIG_PATH
except ImportError as err:
sys.exit("logger.py: Failed to import module: %s." % str(err))
class CreateLogger:
def __init__(self, level, log_name):
self.level = level
self.log_name = log_name
def create_log(self):
config = ConfigParser()
config.read(CONFIG_PATH)
log_path = os.path.realpath(config.get("log", "log_path"))
if not os.path.isdir(log_path):
os.makedirs(log_path)
logger = Common.create_logger(level=self.level,
log_name=self.log_name,
log_path=os.path.join(log_path, self.log_name))
return logger
|
bibliopixel/util/threads/task_thread.py | rec/leds | 253 | 12625097 | import collections, threading
from . import runnable
class Task(object):
def __init__(self, task=None, event=None):
self.task = task or (lambda: None)
self.event = event or threading.Event()
def run(self, next_task):
"""Wait for the event, run the task, trigger the next task."""
self.event.wait()
self.task()
self.event.clear()
next_task.event.set()
class TaskThread(runnable.LoopThread):
def __init__(self, producer_task, consumer_task, daemon=True, **kwds):
super().__init__(daemon=daemon, **kwds)
self.producer_task = producer_task
self.consumer_task = consumer_task
def produce(self):
self.producer_task.run(self.consumer_task)
def run_once(self):
self.consumer_task.run(self.producer_task)
|
tools/cr/cr/targets/chrome.py | iplo/Chain | 231 | 12625107 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module for the chrome targets."""
import cr
class ChromeTarget(cr.NamedTarget):
NAME = 'chrome'
DEFAULT = cr.Config.From(
# CR_URL is the page to open when the target is run.
CR_URL='https://www.google.com/',
)
CONFIG = cr.Config.From(
CR_RUN_ARGUMENTS=cr.Config.Optional('-d "{CR_URL!e}"'),
CR_TARGET_NAME='Chrome',
)
class ChromeTestTarget(cr.NamedTarget):
NAME = 'chrome_test'
CONFIG = cr.Config.From(
CR_TARGET_NAME='ChromeTest',
)
|
chainer/links/activation/simplified_dropconnect.py | zaltoprofen/chainer | 3,705 | 12625118 | <gh_stars>1000+
import numpy
from chainer.functions.noise import simplified_dropconnect
from chainer import initializers
from chainer import link
from chainer import variable
class SimplifiedDropconnect(link.Link):
"""Fully-connected layer with simplified dropconnect regularization.
Notice:
This implementation cannot be used for reproduction of the paper.
There is a difference between the current implementation and the
original one.
The original version uses sampling with gaussian distribution before
passing activation function, whereas the current implementation averages
before activation.
Args:
in_size (int): Dimension of input vectors. If ``None``, parameter
initialization will be deferred until the first forward data pass
at which time the size will be determined.
out_size (int): Dimension of output vectors.
nobias (bool): If ``True``, then this link does not use the bias term.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 3.
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 2.
Attributes:
W (~chainer.Variable): Weight parameter.
b (~chainer.Variable): Bias parameter.
.. seealso:: :func:`~chainer.functions.simplified_dropconnect`
.. seealso::
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>. (2013).
Regularization of Neural Network using DropConnect.
International Conference on Machine Learning.
`URL <https://cs.nyu.edu/~wanli/dropc/>`_
"""
def __init__(self, in_size, out_size, ratio=.5, nobias=False,
initialW=None, initial_bias=None):
super(SimplifiedDropconnect, self).__init__()
self.out_size = out_size
self.ratio = ratio
if initialW is None:
initialW = initializers.HeNormal(1. / numpy.sqrt(2))
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer)
if in_size is not None:
self._initialize_params(in_size)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = initializers.Constant(0)
bias_initializer = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(bias_initializer, out_size)
def _initialize_params(self, in_size):
self.W.initialize((self.out_size, in_size))
def forward(self, x, train=True, mask=None, use_batchwise_mask=True):
"""Applies the simplified dropconnect layer.
Args:
x (chainer.Variable or :ref:`ndarray`):
Batch of input vectors. Its first dimension ``n`` is assumed
to be the *minibatch dimension*.
train (bool):
If ``True``, executes simplified dropconnect.
Otherwise, simplified dropconnect link works as a linear unit.
mask (None or chainer.Variable or :ref:`ndarray`):
If ``None``, randomized simplified dropconnect mask is
generated. Otherwise, The mask must be ``(n, M, N)`` or
``(M, N)`` shaped array, and `use_batchwise_mask` is ignored.
Main purpose of this option is debugging.
`mask` array will be used as a dropconnect mask.
use_batchwise_mask (bool):
If ``True``, dropped connections depend on each sample in
mini-batch.
Returns:
~chainer.Variable: Output of the simplified dropconnect layer.
"""
if self.W.array is None:
self._initialize_params(x.size // len(x))
if mask is not None and 'mask' not in self.__dict__:
self.add_persistent('mask', mask)
return simplified_dropconnect.simplified_dropconnect(
x, self.W, self.b, self.ratio, train, mask, use_batchwise_mask)
|
glucosetracker/blogs/views.py | arhanair/glucose-tracker-monitor | 134 | 12625120 | <filename>glucosetracker/blogs/views.py
from django.views.generic.base import ContextMixin
from django.views.generic import DetailView, ListView
from taggit.models import TaggedItem
from .models import Blog, BlogAd
class BlogBaseView(ContextMixin):
def get_context_data(self, **kwargs):
context = super(BlogBaseView, self).get_context_data(**kwargs)
context['blog_tags'] = TaggedItem.tags_for(Blog).order_by('name')
context['recent_blog_list'] = Blog.objects.recent_posts()
return context
class BlogDetailView(DetailView, BlogBaseView):
model = Blog
def get_queryset(self):
"""
Only return the object if it's public, unless the user is a superuser.
"""
if self.request.user.is_authenticated() and \
self.request.user.is_superuser:
return Blog.objects.all()
else:
return Blog.objects.publicly_viewable()
def get_context_data(self, **kwargs):
context = super(BlogDetailView, self).get_context_data(**kwargs)
context['ad_top'] = BlogAd.objects.filter(position=BlogAd.TOP)\
.order_by('?').first()
context['ad_middle'] = BlogAd.objects.filter(position=BlogAd.MIDDLE)\
.order_by('?').first()
context['ad_bottom'] = BlogAd.objects.filter(position=BlogAd.BOTTOM)\
.order_by('?').first()
return context
class BlogListView(ListView, BlogBaseView):
model = Blog
paginate_by = 15
def get_queryset(self):
"""
Only return public blog posts.
"""
return Blog.objects.publicly_viewable()
class BlogTagListView(BlogListView):
"""
Display a Blog List page filtered by tag.
"""
def get_queryset(self):
result = super(BlogTagListView, self).get_queryset()
return result.filter(tags__name=self.kwargs.get('tag'))
|
vumi/errors.py | seidu626/vumi | 199 | 12625125 | class VumiError(Exception):
pass
class InvalidMessage(VumiError):
pass
class InvalidMessageType(VumiError):
pass
class MissingMessageField(InvalidMessage):
pass
class InvalidMessageField(InvalidMessage):
pass
class DuplicateConnectorError(VumiError):
pass
class InvalidEndpoint(VumiError):
"""Raised when attempting to send a message to an invalid endpoint."""
class DispatcherError(VumiError):
"""Raised when an error is encounter while dispatching a message."""
# Re-export this for compatibility.
from confmodel.errors import ConfigError
ConfigError
|
tests/test_serialization_format.py | hsadeghidw/dimod | 101 | 12625129 | <filename>tests/test_serialization_format.py<gh_stars>100-1000
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import dimod
import numpy as np
from dimod.serialization.format import Formatter
class TestUnknownType(unittest.TestCase):
def test_int(self):
with self.assertRaises(TypeError):
Formatter().format(5)
class TestSampleSet(unittest.TestCase):
def test_empty(self):
empty = dimod.SampleSet.from_samples([], dimod.BINARY, energy=[])
s = Formatter(width=80).format(empty)
target = '\n'.join(['Empty SampleSet',
"Record Fields: ['sample', 'energy', 'num_occurrences']",
"Variables: []",
"['BINARY', 0 rows, 0 samples, 0 variables]"])
self.assertEqual(s, target)
def test_empty_width_50(self):
empty = dimod.SampleSet.from_samples([], dimod.BINARY, energy=[])
s = Formatter(width=50).format(empty)
target = '\n'.join(['Empty SampleSet',
"Record Fields: ['sample', 'energy', ...]",
"Variables: []",
"['BINARY', 0 rows, 0 samples, 0 variables]"])
self.assertEqual(s, target)
def test_empty_with_variables(self):
samples = dimod.SampleSet.from_samples(([], 'abcdefghijklmnopqrstuvwxyz'),
dimod.SPIN, energy=[])
s = Formatter(width=49).format(samples)
target = '\n'.join(['Empty SampleSet',
"Record Fields: ['sample', 'energy', ...]",
"Variables: ['a', 'b', 'c', 'd', 'e', 'f', 'g', ...]",
"['SPIN', 0 rows, 0 samples, 26 variables]"])
self.assertEqual(s, target)
def test_triu_binary(self):
arr = np.triu(np.ones((5, 5)))
variables = [0, 1, 'a', 'b', 'c']
samples = dimod.SampleSet.from_samples((arr, variables),
dimod.BINARY, energy=[4., 3, 2, 1, 0])
s = Formatter(width=79, depth=None).format(samples)
target = '\n'.join([" 0 1 a b c energy num_oc.",
"4 0 0 0 0 1 0.0 1",
"3 0 0 0 1 1 1.0 1",
"2 0 0 1 1 1 2.0 1",
"1 0 1 1 1 1 3.0 1",
"0 1 1 1 1 1 4.0 1",
"['BINARY', 5 rows, 5 samples, 5 variables]"])
self.assertEqual(s, target)
def test_triu_spin(self):
arr = np.triu(np.ones((5, 5)))
variables = [0, 1, 'a', 'b', 'c']
samples = dimod.SampleSet.from_samples((2*arr-1, variables),
dimod.SPIN, energy=[4., 3, 2, 1, 0])
s = Formatter(width=79, depth=None).format(samples)
target = '\n'.join([" 0 1 a b c energy num_oc.",
"4 -1 -1 -1 -1 +1 0.0 1",
"3 -1 -1 -1 +1 +1 1.0 1",
"2 -1 -1 +1 +1 +1 2.0 1",
"1 -1 +1 +1 +1 +1 3.0 1",
"0 +1 +1 +1 +1 +1 4.0 1",
"['SPIN', 5 rows, 5 samples, 5 variables]"])
self.assertEqual(s, target)
def test_triu_row_summation(self):
arr = np.triu(np.ones((5, 5)))
variables = [0, 1, 'a', 'b', 'c']
samples = dimod.SampleSet.from_samples((arr, variables),
dimod.BINARY, energy=[4., 3, 2, 1, 0])
s = Formatter(width=79, depth=4).format(samples)
target = '\n'.join([" 0 1 a b c energy num_oc.",
"4 0 0 0 0 1 0.0 1",
"3 0 0 0 1 1 1.0 1",
"...",
"0 1 1 1 1 1 4.0 1",
"['BINARY', 5 rows, 5 samples, 5 variables]"])
self.assertEqual(s, target)
def test_triu_col_summation(self):
arr = np.triu(np.ones((5, 5)))
variables = [0, 1, 'a', 'b', 'c']
samples = dimod.SampleSet.from_samples((arr, variables),
dimod.BINARY, energy=[4., 3, 2, 1, 0])
s = Formatter(width=30, depth=None).format(samples)
# without summation length would be 31
target = '\n'.join([" 0 1 ... c energy num_oc.",
"4 0 0 ... 1 0.0 1",
"3 0 0 ... 1 1.0 1",
"2 0 0 ... 1 2.0 1",
"1 0 1 ... 1 3.0 1",
"0 1 1 ... 1 4.0 1",
"['BINARY',",
" 5 rows,",
" 5 samples,",
" 5 variables]"])
self.assertEqual(s, target)
def test_additional_fields_summation(self):
arr = np.ones((2, 5))
variables = list(range(5))
samples = dimod.SampleSet.from_samples((arr, variables),
dimod.BINARY, energy=1,
other=[5, 6],
anotherother=[234029348023948234, 3])
s = Formatter(width=30, depth=None).format(samples)
target = '\n'.join([" 0 ... 4 energy num_oc. ...",
"0 1 ... 1 1 1 ...",
"1 1 ... 1 1 1 ...",
"['BINARY',",
" 2 rows,",
" 2 samples,",
" 5 variables]"])
self.assertEqual(target, s)
def test_additional_fields(self):
arr = np.ones((2, 5))
variables = list(range(5))
samples = dimod.SampleSet.from_samples((arr, variables),
dimod.BINARY, energy=1,
other=[5, 6],
anotherother=[234029348023948234, object()])
s = Formatter(width=79, depth=None).format(samples)
target = '\n'.join([" 0 1 2 3 4 energy num_oc. anothe. other",
"0 1 1 1 1 1 1 1 2340... 5",
"1 1 1 1 1 1 1 1 <obj... 6",
"['BINARY', 2 rows, 2 samples, 5 variables]"])
self.assertEqual(target, s)
def test_discrete(self):
ss = dimod.SampleSet.from_samples(([[0, 17, 236], [3, 321, 1]], 'abc'),
'INTEGER', energy=[1, 2])
s = Formatter(width=79, depth=None).format(ss)
target = '\n'.join([" a b c energy num_oc.",
"0 0 17 236 1 1",
"1 3 321 1 2 1",
"['INTEGER', 2 rows, 2 samples, 3 variables]"])
self.assertEqual(target, s)
def test_depth(self):
ss = dimod.SampleSet.from_samples(([[0, 17, 236],
[3, 321, 1],
[4444444444, 312, 1],
[4, 3, 3]], 'abc'),
'INTEGER', energy=[1, 2, 3, 4])
s = Formatter(width=79, depth=2).format(ss)
target = '\n'.join([" a b c energy num_oc.",
"0 0 17 236 1 1",
"...",
"3 4 3 3 4 1",
"['INTEGER', 4 rows, 4 samples, 3 variables]"])
self.assertEqual(target, s)
def test_misalignment(self):
ss = dimod.SampleSet.from_samples([[0, 1], [0, 1], [1, 0]], 'BINARY',
energy=[-1, 1.55, 2])
s = Formatter().format(ss)
target = (" 0 1 energy num_oc.\n"
"0 0 1 -1.0 1\n"
"1 0 1 1.55 1\n"
"2 1 0 2.0 1\n"
"['BINARY', 3 rows, 3 samples, 2 variables]")
self.assertEqual(target, s)
|
deepspeed/runtime/sparse_tensor.py | ganik/DeepSpeed | 6,728 | 12625132 | <gh_stars>1000+
"""
Copyright 2020 The Microsoft DeepSpeed Team
Implementation of a compressed sparse tensor. Similar in
functionality to TensorFlow's IndexedSlices implementation.
"""
import torch
class SparseTensor(object):
""" Compressed Sparse Tensor """
def __init__(self, dense_tensor=None):
self.orig_dense_tensor = dense_tensor
self.is_sparse = dense_tensor.is_sparse
if dense_tensor is not None:
if dense_tensor.is_sparse:
dense_tensor = dense_tensor.coalesce()
self.indices = dense_tensor.indices().flatten()
self.values = dense_tensor.values()
else:
result = torch.sum(dense_tensor, dim=1)
self.indices = result.nonzero().flatten()
self.values = dense_tensor[self.indices]
self.dense_size = list(dense_tensor.size())
else:
self.indices = None
self.values = None
self.dense_size = None
def to_coo_tensor(self):
return torch.sparse_coo_tensor(self.indices.unsqueeze(0),
self.values,
self.dense_size)
@staticmethod
def type():
return "deepspeed.SparseTensor"
def to_dense(self):
it = self.indices.unsqueeze(1)
full_indices = torch.cat([it for _ in range(self.dense_size[1])], dim=1)
return self.values.new_zeros(self.dense_size).scatter_add_(
0,
full_indices,
self.values)
def sparse_size(self):
index_size = list(self.indices.size())
index_size = index_size[0]
value_size = list(self.values.size())
value_size = value_size[0] * value_size[1]
dense_size = self.dense_size[0] * self.dense_size[1]
return index_size + value_size, dense_size
def add(self, b):
assert self.dense_size == b.dense_size
self.indices = torch.cat([self.indices, b.indices])
self.values = torch.cat([self.values, b.values])
def __str__(self):
sparse_size, dense_size = self.sparse_size()
return "DeepSpeed.SparseTensor(indices_size={}, values_size={}, " \
"dense_size={}, device={}, reduction_factor={})".format(
self.indices.size(), self.values.size(), self.dense_size,
self.indices.get_device(), dense_size / sparse_size
)
def __repr__(self):
return self.__str__()
|
src/network/advanceddispatcher.py | BeholdersEye/PyBitmessage | 1,583 | 12625147 | """
Improved version of asyncore dispatcher
"""
import socket
import threading
import time
import network.asyncore_pollchoose as asyncore
import state
from threads import BusyError, nonBlocking
class ProcessingError(Exception):
"""General class for protocol parser exception,
use as a base for others."""
pass
class UnknownStateError(ProcessingError):
"""Parser points to an unknown (unimplemented) state."""
pass
class AdvancedDispatcher(asyncore.dispatcher):
"""Improved version of asyncore dispatcher,
with buffers and protocol state."""
# pylint: disable=too-many-instance-attributes
_buf_len = 131072 # 128kB
def __init__(self, sock=None):
if not hasattr(self, '_map'):
asyncore.dispatcher.__init__(self, sock)
self.connectedAt = 0
self.close_reason = None
self.read_buf = bytearray()
self.write_buf = bytearray()
self.state = "init"
self.lastTx = time.time()
self.sentBytes = 0
self.receivedBytes = 0
self.expectBytes = 0
self.readLock = threading.RLock()
self.writeLock = threading.RLock()
self.processingLock = threading.RLock()
self.uploadChunk = self.downloadChunk = 0
def append_write_buf(self, data):
"""Append binary data to the end of stream write buffer."""
if data:
if isinstance(data, list):
with self.writeLock:
for chunk in data:
self.write_buf.extend(chunk)
else:
with self.writeLock:
self.write_buf.extend(data)
def slice_write_buf(self, length=0):
"""Cut the beginning of the stream write buffer."""
if length > 0:
with self.writeLock:
if length >= len(self.write_buf):
del self.write_buf[:]
else:
del self.write_buf[0:length]
def slice_read_buf(self, length=0):
"""Cut the beginning of the stream read buffer."""
if length > 0:
with self.readLock:
if length >= len(self.read_buf):
del self.read_buf[:]
else:
del self.read_buf[0:length]
def process(self):
"""Process (parse) data that's in the buffer,
as long as there is enough data and the connection is open."""
while self.connected and not state.shutdown:
try:
with nonBlocking(self.processingLock):
if not self.connected or state.shutdown:
break
if len(self.read_buf) < self.expectBytes:
return False
try:
cmd = getattr(self, "state_" + str(self.state))
except AttributeError:
self.logger.error(
'Unknown state %s', self.state, exc_info=True)
raise UnknownStateError(self.state)
if not cmd():
break
except BusyError:
return False
return False
def set_state(self, state_str, length=0, expectBytes=0):
"""Set the next processing state."""
self.expectBytes = expectBytes
self.slice_read_buf(length)
self.state = state_str
def writable(self):
"""Is data from the write buffer ready to be sent to the network?"""
self.uploadChunk = AdvancedDispatcher._buf_len
if asyncore.maxUploadRate > 0:
self.uploadChunk = int(asyncore.uploadBucket)
self.uploadChunk = min(self.uploadChunk, len(self.write_buf))
return asyncore.dispatcher.writable(self) and (
self.connecting or (
self.connected and self.uploadChunk > 0))
def readable(self):
"""Is the read buffer ready to accept data from the network?"""
self.downloadChunk = AdvancedDispatcher._buf_len
if asyncore.maxDownloadRate > 0:
self.downloadChunk = int(asyncore.downloadBucket)
try:
if self.expectBytes > 0 and not self.fullyEstablished:
self.downloadChunk = min(
self.downloadChunk, self.expectBytes - len(self.read_buf))
if self.downloadChunk < 0:
self.downloadChunk = 0
except AttributeError:
pass
return asyncore.dispatcher.readable(self) and (
self.connecting or self.accepting or (
self.connected and self.downloadChunk > 0))
def handle_read(self):
"""Append incoming data to the read buffer."""
self.lastTx = time.time()
newData = self.recv(self.downloadChunk)
self.receivedBytes += len(newData)
asyncore.update_received(len(newData))
with self.readLock:
self.read_buf.extend(newData)
def handle_write(self):
"""Send outgoing data from write buffer."""
self.lastTx = time.time()
written = self.send(self.write_buf[0:self.uploadChunk])
asyncore.update_sent(written)
self.sentBytes += written
self.slice_write_buf(written)
def handle_connect_event(self):
"""Callback for connection established event."""
try:
asyncore.dispatcher.handle_connect_event(self)
except socket.error as e:
# pylint: disable=protected-access
if e.args[0] not in asyncore._DISCONNECTED:
raise
def handle_connect(self):
"""Method for handling connection established implementations."""
self.lastTx = time.time()
def state_close(self): # pylint: disable=no-self-use
"""Signal to the processing loop to end."""
return False
def handle_close(self):
"""Callback for connection being closed,
but can also be called directly when you want connection to close."""
with self.readLock:
self.read_buf = bytearray()
with self.writeLock:
self.write_buf = bytearray()
self.set_state("close")
self.close()
|
ttp/__init__.py | showipintbri/ttp | 254 | 12625153 | name = "ttp"
__all__ = ["ttp"]
__author__ = "<NAME> <<EMAIL>>"
__version__ = "0.0.2"
from sys import version_info
# get python version:
python_major_version = version_info.major
if python_major_version == 3:
from ttp.ttp import ttp
from ttp.utils.quick_parse import quick_parse
elif python_major_version == 2:
from ttp import ttp
from utils.quick_parse import quick_parse
|
src/genie/libs/parser/iosxe/tests/ShowInventory/cli/equal/golden_output_5_expected.py | balmasea/genieparser | 204 | 12625166 | <gh_stars>100-1000
expected_output = {
"main": {
"chassis": {
"C9407R": {
"name": "Chassis",
"descr": "Cisco Catalyst 9400 Series 7 Slot Chassis",
"pid": "C9407R",
"vid": "V01",
"sn": "******",
}
},
"TenGigabitEthernet3/0/1": {
"SFP-10G-SR": {
"name": "TenGigabitEthernet3/0/1",
"descr": "SFP 10GBASE-SR",
"pid": "SFP-10G-SR",
"vid": "01",
"sn": "******",
}
},
"TenGigabitEthernet3/0/2": {
"SFP-10G-SR": {
"name": "TenGigabitEthernet3/0/2",
"descr": "SFP 10GBASE-SR",
"pid": "SFP-10G-SR",
"vid": "01",
"sn": "******",
}
},
"TenGigabitEthernet3/0/3": {
"SFP-10G-SR": {
"name": "TenGigabitEthernet3/0/3",
"descr": "SFP 10GBASE-SR",
"pid": "SFP-10G-SR",
"vid": "01",
"sn": "******",
}
},
"TenGigabitEthernet3/0/4": {
"SFP-10G-SR": {
"name": "TenGigabitEthernet3/0/4",
"descr": "SFP 10GBASE-SR",
"pid": "SFP-10G-SR",
"vid": "01",
"sn": "******",
}
},
"TenGigabitEthernet3/0/8": {
"QFBR-5798L": {
"name": "TenGigabitEthernet3/0/8",
"descr": "GE SX",
"pid": "QFBR-5798L",
"vid": "",
"sn": "******",
}
},
},
"slot": {
"Slot_1_Linecard": {
"lc": {
"C9400-LC-48P": {
"name": "Slot 1 Linecard",
"descr": "Cisco Catalyst 9400 Series 48-Port POE 10/100/1000 (RJ-45)",
"pid": "C9400-LC-48P",
"vid": "V01",
"sn": "******",
}
}
},
"Slot_2_Linecard": {
"lc": {
"C9400-LC-48P": {
"name": "Slot 2 Linecard",
"descr": "Cisco Catalyst 9400 Series 48-Port POE 10/100/1000 (RJ-45)",
"pid": "C9400-LC-48P",
"vid": "V01",
"sn": "******",
}
}
},
"Slot_5_Linecard": {
"lc": {
"C9400-LC-48P": {
"name": "Slot 5 Linecard",
"descr": "Cisco Catalyst 9400 Series 48-Port POE 10/100/1000 (RJ-45)",
"pid": "C9400-LC-48P",
"vid": "V01",
"sn": "******",
}
}
},
"Slot_6_Linecard": {
"lc": {
"C9400-LC-48P": {
"name": "Slot 6 Linecard",
"descr": "Cisco Catalyst 9400 Series 48-Port POE 10/100/1000 (RJ-45)",
"pid": "C9400-LC-48P",
"vid": "V01",
"sn": "******",
}
}
},
"Slot_3_Supervisor": {
"other": {
"C9400-SUP-1": {
"name": "Slot 3 Supervisor",
"descr": "Cisco Catalyst 9400 Series Supervisor 1 Module",
"pid": "C9400-SUP-1",
"vid": "V02",
"sn": "******",
}
}
},
"P1": {
"other": {
"C9400-PWR-3200AC": {
"name": "Power Supply Module 1",
"descr": "Cisco Catalyst 9400 Series 3200W AC Power Supply",
"pid": "C9400-PWR-3200AC",
"vid": "V01",
"sn": "******",
}
}
},
"P2": {
"other": {
"C9400-PWR-3200AC": {
"name": "Power Supply Module 2",
"descr": "Cisco Catalyst 9400 Series 3200W AC Power Supply",
"pid": "C9400-PWR-3200AC",
"vid": "V01",
"sn": "DTM224703G0",
}
}
},
"Fan_Tray": {
"other": {
"C9407-FAN": {
"name": "Fan Tray",
"descr": "Cisco Catalyst 9400 Series 7 Slot Chassis Fan Tray",
"pid": "C9407-FAN",
"vid": "V01",
"sn": "******",
}
}
},
},
} |
cairocffi/test_pixbuf.py | Afoucaul/cairocffi | 116 | 12625170 | """
cairocffi.test_pixbuf
~~~~~~~~~~~~~~~~~~~~~
Test suite for cairocffi.pixbuf.
:copyright: Copyright 2013-2019 by <NAME>
:license: BSD, see LICENSE for details.
"""
import base64
import sys
import zlib
import pytest
from . import constants, pixbuf
PNG_BYTES = base64.b64decode(
b'iVBORw0KGgoAAAANSUhEUgAAAAMAAAACCAYAAACddGYaAAAAE0lEQV'
b'QI12NkaPjfwAAFTAxIAAAuNwIDqJbDRgAAAABJRU5ErkJggg==')
JPEG_BYTES = zlib.decompress(base64.b64decode(
b'eJylzb0JgFAMBOA704hYvIC9oygIou7nPFq4g3+Nm0RT+iy9VPkIF9vsQhjavgVJdM/ATjS'
b'+/YqX/O2gzdAUCUSoSJSitAUFiHdS1xArXBlr5qrf2wO58HkiigrlWK+T7TezChqU'))
def test_api():
with pytest.raises(pixbuf.ImageLoadingError):
pixbuf.decode_to_image_surface(b'')
with pytest.raises(pixbuf.ImageLoadingError):
pixbuf.decode_to_image_surface(b'Not a valid image.')
with pytest.raises(pixbuf.ImageLoadingError):
pixbuf.decode_to_image_surface(PNG_BYTES[:10])
surface, format_name = pixbuf.decode_to_image_surface(PNG_BYTES)
assert format_name == 'png'
assert_decoded(surface)
def test_gdk():
if pixbuf.gdk is None:
pytest.xfail()
pixbuf_obj, format_name = pixbuf.decode_to_pixbuf(PNG_BYTES)
assert format_name == 'png'
assert_decoded(pixbuf.pixbuf_to_cairo_gdk(pixbuf_obj))
def test_slices():
pixbuf_obj, format_name = pixbuf.decode_to_pixbuf(PNG_BYTES)
assert format_name == 'png'
assert_decoded(pixbuf.pixbuf_to_cairo_png(pixbuf_obj))
def test_size():
pixbuf_obj, format_name = pixbuf.decode_to_pixbuf(PNG_BYTES, 10, 10)
assert format_name == 'png'
surface = pixbuf.pixbuf_to_cairo_png(pixbuf_obj)
assert surface.get_width() == 10
assert surface.get_height() == 10
assert surface.get_format() == constants.FORMAT_ARGB32
def test_png():
pixbuf_obj, format_name = pixbuf.decode_to_pixbuf(JPEG_BYTES)
assert format_name == 'jpeg'
assert_decoded(pixbuf.pixbuf_to_cairo_slices(pixbuf_obj),
constants.FORMAT_RGB24, b'\xff\x00\x80\xff')
def assert_decoded(surface, format_=constants.FORMAT_ARGB32,
rgba=b'\x80\x00\x40\x80'):
assert surface.get_width() == 3
assert surface.get_height() == 2
assert surface.get_format() == format_
if sys.byteorder == 'little': # pragma: no cover
rgba = rgba[::-1]
assert surface.get_data()[:] == rgba * 6
|
glucosetracker/core/forms.py | arhanair/glucose-tracker-monitor | 134 | 12625211 | from django import forms
from crispy_forms.helper import FormHelper, Layout
from crispy_forms.layout import Submit, Fieldset, HTML, Field
from crispy_forms.bootstrap import FormActions
class ContactForm(forms.Form):
email = forms.EmailField(label='Your Email Address')
subject = forms.CharField(required=False)
message = forms.CharField(widget=forms.Textarea(
attrs={'cols': 50, 'rows': 6}))
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'col-xs-12 col-md-6'
self. helper.layout = Layout(
HTML('''
{% if messages %}
{% for message in messages %}
<p {% if message.tags %} class="alert alert-{{ message.tags }}"\
{% endif %}>{{ message }}</p>{% endfor %}{% endif %}
</p>
'''),
Fieldset(
'Contact Us',
Field('email'),
Field('subject'),
Field('message'),
),
FormActions(Submit('submit', 'Send', css_class='pull-right'))
) |
tests/utils/lp_utils_test.py | sash-a/Mava | 337 | 12625221 | <filename>tests/utils/lp_utils_test.py<gh_stars>100-1000
# python3
# Copyright 2021 InstaDeep Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from absl.testing import parameterized
from launchpad.nodes.python.local_multi_processing import PythonProcess
from mava.utils import lp_utils
test_data = [
dict(
testcase_name="cpu_only",
program_nodes=["replay", "counter", "trainer", "evaluator", "executor"],
nodes_on_gpu=[],
expected_resourse_list={
"replay": PythonProcess(env={"CUDA_VISIBLE_DEVICES": str(-1)}),
"counter": PythonProcess(env={"CUDA_VISIBLE_DEVICES": str(-1)}),
"trainer": PythonProcess(env={"CUDA_VISIBLE_DEVICES": str(-1)}),
"evaluator": PythonProcess(env={"CUDA_VISIBLE_DEVICES": str(-1)}),
"executor": PythonProcess(env={"CUDA_VISIBLE_DEVICES": str(-1)}),
},
),
dict(
testcase_name="trainer_only_on_gpu",
program_nodes=["replay", "counter", "trainer", "evaluator", "executor"],
nodes_on_gpu=["trainer"],
expected_resourse_list={
"replay": PythonProcess(env={"CUDA_VISIBLE_DEVICES": str(-1)}),
"counter": PythonProcess(env={"CUDA_VISIBLE_DEVICES": str(-1)}),
"trainer": [],
"evaluator": PythonProcess(env={"CUDA_VISIBLE_DEVICES": str(-1)}),
"executor": PythonProcess(env={"CUDA_VISIBLE_DEVICES": str(-1)}),
},
),
dict(
testcase_name="gpu_only",
program_nodes=["replay", "counter", "trainer", "evaluator", "executor"],
nodes_on_gpu=["replay", "counter", "trainer", "evaluator", "executor"],
expected_resourse_list={
"replay": [],
"counter": [],
"trainer": [],
"evaluator": [],
"executor": [],
},
),
]
class TestLPResourceUtils(parameterized.TestCase):
@parameterized.named_parameters(*test_data)
def test_resource_specification(
self, program_nodes: List, nodes_on_gpu: List, expected_resourse_list: Dict
) -> None:
"""Test resource allocation works for lp.
Args:
program_nodes (List): lp program nodes.
nodes_on_gpu (List): which nodes to have on gpu.
expected_resourse_list (List): expected resource list.
"""
resource_list = lp_utils.to_device(
program_nodes=program_nodes, nodes_on_gpu=nodes_on_gpu
)
assert resource_list == expected_resourse_list
|
tests/components/litterrobot/test_button.py | MrDelik/core | 22,481 | 12625264 | <filename>tests/components/litterrobot/test_button.py
"""Test the Litter-Robot button entity."""
from unittest.mock import MagicMock
from freezegun import freeze_time
from homeassistant.components.button import DOMAIN as BUTTON_DOMAIN, SERVICE_PRESS
from homeassistant.const import ATTR_ENTITY_ID, ATTR_ICON, STATE_UNKNOWN
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.entity import EntityCategory
from .conftest import setup_integration
BUTTON_ENTITY = "button.test_reset_waste_drawer"
@freeze_time("2021-11-15 17:37:00", tz_offset=-7)
async def test_button(hass: HomeAssistant, mock_account: MagicMock) -> None:
"""Test the creation and values of the Litter-Robot button."""
await setup_integration(hass, mock_account, BUTTON_DOMAIN)
entity_registry = er.async_get(hass)
state = hass.states.get(BUTTON_ENTITY)
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:delete-variant"
assert state.state == STATE_UNKNOWN
entry = entity_registry.async_get(BUTTON_ENTITY)
assert entry
assert entry.entity_category is EntityCategory.CONFIG
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{ATTR_ENTITY_ID: BUTTON_ENTITY},
blocking=True,
)
await hass.async_block_till_done()
assert mock_account.robots[0].reset_waste_drawer.call_count == 1
mock_account.robots[0].reset_waste_drawer.assert_called_with()
state = hass.states.get(BUTTON_ENTITY)
assert state
assert state.state == "2021-11-15T10:37:00+00:00"
|
skompiler/fromskast/sqlalchemy.py | odinsemvosem/SKompiler | 112 | 12625266 | <reponame>odinsemvosem/SKompiler
"""
SKompiler: Generate SQLAlchemy expressions from SKAST.
"""
from functools import reduce
from collections import namedtuple
import numpy as np
import sqlalchemy as sa
from sqlalchemy.sql.selectable import Join, FromGrouping
from ..ast import ArgMax, VecMax, Softmax, IsElemwise, VecSum, Max, IsAtom
from ._common import ASTProcessor, StandardOps, StandardArithmetics, is_, tolist,\
not_implemented, prepare_assign_to, id_generator, denumpyfy
#pylint: disable=trailing-whitespace
def translate(node, dialect=None, assign_to='y', component=None,
multistage=True, key_column='id', from_obj='data'):
"""Translates SKAST to an SQLAlchemy expression (or a list of those, if the output should be a vector).
If dialect is not None, further compiles the expression(s) to a given dialect via to_sql.
Kwargs:
assign_to (None/string/list of str): See to_sql
component (int): If the result is a vector and you only need one component of it, specify its index (0-based) here.
multistage (bool): When multistage=False, the returned value is a single expression which can be selected directly from the
source data table. This, however, may make the resulting query rather long, as some functions (e.g. argmax)
require repeated computaion of the same parts over and over.
The problem is solved by splitting the computation in a sequence of CTE subqueries - the "multistage" mode.
The resulting query may then look like
with _tmp1 as (select [probability computations] from data),
_tmp2 as (select [argmax computation] from _tmp1),
...
select [final values] from _tmpX
Default - True
from_obj: A string or a SQLAlchemy selectable object - the source table for the data.
In non-multistage mode this may be None - in this case the returned value is
simply 'SELECT cols'.
key_column: A string or a sa.column object, naming the key column in the source table.
Compulsory for multistage mode.
>>> from skompiler.toskast.string import translate as skast
>>> expr = skast('[2*x[0], 1] if x[1] <= 3 else [12.0, 45.5]')
>>> print(translate(expr, 'sqlite', multistage=False, from_obj=None))
SELECT CASE WHEN (x2 <= 3) THEN 2 * x1 ELSE 12.0 END AS y1, CASE WHEN (x2 <= 3) THEN 1 ELSE 45.5 END AS y2
>>> expr = skast('x=1; y=2; x+y')
>>> print(translate(expr, 'sqlite', multistage=True))
WITH _tmp1 AS
(SELECT data.id AS __id__, 1 AS f1
FROM data),
_tmp2 AS
(SELECT data.id AS __id__, 2 AS f1
FROM data)
SELECT _tmp1.f1 + _tmp2.f1 AS y
FROM _tmp1 JOIN _tmp2 ON _tmp1.__id__ = _tmp2.__id__
>>> expr = skast('x+y')
>>> stbl = sa.select([sa.column('id'), sa.column('x'), sa.column('y')], from_obj=sa.table('test')).cte('_data')
>>> print(translate(expr, 'sqlite', multistage=False, from_obj=stbl))
WITH _data AS
(SELECT id, x, y
FROM test)
SELECT x + y AS y
FROM _data
"""
if multistage and from_obj is None:
raise ValueError("from_obj must be specified in multistage mode")
result = SQLAlchemyWriter(from_obj=from_obj, key_column=key_column, multistage=multistage)(node)
if component is not None:
result = result._replace(cols=[result.cols[component]])
assign_to = prepare_assign_to(assign_to, len(result.cols))
if assign_to is not None:
result = result._replace(cols=[col.label(lbl) for col, lbl in zip(result.cols, assign_to)])
result = sa.select(result.cols, from_obj=result.from_obj)
if dialect is not None:
result = to_sql(result, dialect)
return result
def _max(xs):
if len(xs) == 1:
return xs[0]
return reduce(greatest, xs)
def _sum(iterable):
"The built-in 'sum' does not work for us as we need."
return reduce(lambda x, y: x+y, iterable)
def _iif(cond, iftrue, iffalse):
# Optimize if (...) then X else X for literal X
# A lot of these occur when compiling trees
if isinstance(iftrue, sa.sql.elements.BindParameter) and \
isinstance(iffalse, sa.sql.elements.BindParameter) and \
iftrue.value == iffalse.value:
return iftrue
return sa.case([(cond, iftrue)], else_=iffalse)
def _matvecproduct(M, x):
return [_sum(m_i[j] * x[j] for j in range(len(x))) for m_i in M]
def _dotproduct(xs, ys):
return [_sum(x * y for x, y in zip(xs, ys))]
def _step(x):
return _iif(x > 0, 1, 0)
def extract_tables(from_obj):
if isinstance(from_obj, FromGrouping):
return extract_tables(from_obj.element)
elif isinstance(from_obj, Join):
return extract_tables(from_obj.left) + extract_tables(from_obj.right)
else:
return [from_obj]
def _merge(tbl1, tbl2):
if tbl1 is None:
return tbl2
elif tbl2 is None:
return tbl1
if tbl1 is tbl2:
return tbl1
# Either of the arguments may be a join clause and these
# may include repeated elements. If so, we have to extract them and recombine.
all_tables = list(sorted(set(extract_tables(tbl1) + extract_tables(tbl2)), key=lambda x: x.name))
tbl1 = all_tables[0]
joined = tbl1
for tbl_next in all_tables[1:]:
joined = joined.join(tbl_next, onclause=tbl1.key_ == tbl_next.key_)
joined.key_ = tbl1.key_
return joined
Result = namedtuple('Result', 'cols from_obj')
class SQLAlchemyWriter(ASTProcessor, StandardOps, StandardArithmetics):
"""A SK AST processor, producing a SQLAlchemy "multistage" expression.
The interpretation of each node is a tuple, containing a list of column expressions and a from_obj,
where these columns must be queried from."""
def __init__(self, from_obj='data', key_column='id',
positive_infinity=float(np.finfo('float64').max),
negative_infinity=float(np.finfo('float64').min),
multistage=True):
self.positive_infinity = positive_infinity
self.negative_infinity = negative_infinity
if multistage:
if isinstance(from_obj, str):
from_obj = sa.table(from_obj, sa.column(key_column))
# This is a bit hackish, but quite convenient.
# This way we do not have to carry around an extra "key" field in our results all the time
from_obj.key_ = from_obj.columns[key_column]
else:
if key_column not in from_obj.columns:
raise ValueError("The provided selectable does not contain the key column {0}".format(key_column))
from_obj.key_ = from_obj.columns[key_column]
elif isinstance(from_obj, str):
from_obj = sa.table(from_obj)
self.from_obj = from_obj
self.temp_ids = id_generator()
self.references = [{}]
self.multistage = multistage
def Identifier(self, id):
return Result([sa.column(id.id)], self.from_obj)
def _indexed_identifier(self, id, idx):
return sa.column("{0}{1}".format(id, idx+1))
def IndexedIdentifier(self, sub):
return Result([self._indexed_identifier(sub.id, sub.index)], self.from_obj)
def _number_constant(self, value):
# Infinities have to be handled separately
if np.isinf(value):
value = self.positive_infinity if value > 0 else self.negative_infinity
else:
value = denumpyfy(value)
return sa.literal(value)
def NumberConstant(self, num):
return Result([self._number_constant(num.value)], self.from_obj)
def VectorIdentifier(self, id):
return Result([self._indexed_identifier(id.id, i) for i in range(id.size)], self.from_obj)
def VectorConstant(self, vec):
return Result([self._number_constant(v) for v in tolist(vec.value)], self.from_obj)
def MatrixConstant(self, mtx):
return Result([[self._number_constant(v) for v in tolist(row)] for row in mtx.value], self.from_obj)
def UnaryFunc(self, node, **kw):
arg = self(node.arg)
if isinstance(node.op, ArgMax):
return self._argmax(arg)
elif isinstance(node.op, VecMax):
return self._vecmax(arg)
elif isinstance(node.op, VecSum):
return self._vecsum(arg)
elif isinstance(node.op, Softmax):
return self._softmax(arg)
else:
op = self(node.op)
return Result([op(el) for el in arg.cols], arg.from_obj)
ArgMax = VecSumNormalize = VecSum = VecMax = Softmax = not_implemented
def BinOp(self, node, **kw):
left, right, op = self(node.left), self(node.right), self(node.op)
if not isinstance(node.op, IsElemwise):
# MatVecProduct requires atomizing the argument, otherwise it will be repeated multiple times in the output
if not isinstance(node.right, IsAtom):
right = self._make_cte(right)
return Result(op(left.cols, right.cols), _merge(left.from_obj, right.from_obj))
elif len(left.cols) != len(right.cols):
raise ValueError("Mismatching operand dimensions in {0}".format(repr(node.op)))
elif isinstance(node.op, Max):
# Max is implemented as (if x > y then x else y), hence to avoid double-computation,
# we save x and y in separate CTE's
if not isinstance(node.left, IsAtom):
left = self._make_cte(left)
if not isinstance(node.right, IsAtom):
right = self._make_cte(right)
return Result([op(lc, rc) for lc, rc in zip(left.cols, right.cols)], _merge(left.from_obj, right.from_obj))
else:
return Result([op(lc, rc) for lc, rc in zip(left.cols, right.cols)], _merge(left.from_obj, right.from_obj))
def MakeVector(self, vec):
result = []
tbls = set()
for el in vec.elems:
el = self(el)
tbls.add(el.from_obj)
if len(el.cols) != 1:
raise ValueError("MakeVector expects a list of scalars")
result.append(el.cols[0])
tbls = list(tbls)
target_table = tbls[0]
for tbl in tbls[1:]:
new_joined = target_table.join(tbl, onclause=target_table.key_ == tbl.key_)
new_joined.key_ = target_table.key_
target_table = new_joined
return Result(result, target_table)
def IfThenElse(self, node):
test, iftrue, iffalse = self(node.test), self(node.iftrue), self(node.iffalse)
return Result([_iif(test.cols[0], ift, iff) for ift, iff in zip(iftrue.cols, iffalse.cols)],
reduce(_merge, [test.from_obj, iftrue.from_obj, iffalse.from_obj]))
MatVecProduct = is_(_matvecproduct)
DotProduct = is_(_dotproduct)
Exp = is_(sa.func.exp)
Log = is_(sa.func.log)
Sqrt = is_(sa.func.sqrt)
Abs = is_(sa.func.abs)
Step = is_(_step)
Max = is_(lambda x, y: _max([x, y]))
# ------ The actual "multi-stage" logic -----
def Let(self, node, **kw):
if not self.multistage:
return StandardOps.Let(self, node, **kw)
self.references.append({})
for defn in node.defs:
self.references[-1][defn.name] = self._make_cte(self(defn.body))
result = self(node.body)
self.references.pop()
return result
def Reference(self, node):
if not self.multistage:
raise ValueError("References are not supported in non-multistage mode")
if node.name not in self.references[-1]:
raise ValueError("Undefined reference: {0}".format(node.name))
return self.references[-1][node.name]
def _make_cte(self, result, col_names=None, key_label='__id__'):
if not self.multistage:
return result
if col_names is None:
col_names = ['f{0}'.format(i+1) for i in range(len(result.cols))]
labeled_cols = [c.label(n) for c, n in zip(result.cols, col_names)]
new_tbl = sa.select([result.from_obj.key_.label(key_label)] + labeled_cols, from_obj=result.from_obj).cte(next(self.temp_ids))
new_tbl.key_ = new_tbl.columns[key_label]
new_cols = [new_tbl.columns[n] for n in col_names]
return Result(new_cols, new_tbl)
def _argmax(self, result):
if len(result.cols) == 1:
return Result([sa.literal(0)], self.from_obj)
features = self._make_cte(result)
max_val = Result([_max(features.cols)], features.from_obj)
max_val = self._make_cte(max_val, ['_max'])
argmax = sa.case([(col == max_val.cols[0], i)
for i, col in enumerate(features.cols[:-1])],
else_=len(features.cols)-1)
return Result([argmax], _merge(features.from_obj, max_val.from_obj))
def _vecmax(self, result):
return Result([_max(result.cols)], result.from_obj)
def _softmax(self, result):
return self._vecsumnormalize(Result([sa.func.exp(col) for col in result.cols], result.from_obj))
def _vecsumnormalize(self, result):
features = self._make_cte(result)
sum_val = Result([_sum(features.cols)], features.from_obj)
sum_val = self._make_cte(sum_val, ['_sum'])
return Result([col/sum_val.cols[0] for col in features.cols],
_merge(features.from_obj, sum_val.from_obj))
def _vecsum(self, result):
return Result([_sum(result.cols)], result.from_obj)
# ------- SQLAlchemy "greatest" function
# See https://docs.sqlalchemy.org/en/latest/core/compiler.html
#pylint: disable=wrong-import-position,wrong-import-order
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import Numeric
class greatest(expression.FunctionElement):
type = Numeric()
name = 'greatest'
@compiles(greatest)
def default_greatest(element, compiler, **kw):
res = compiler.visit_function(element, **kw)
return res
@compiles(greatest, 'sqlite')
@compiles(greatest, 'mssql')
@compiles(greatest, 'oracle')
def case_greatest(element, compiler, **kw):
arg1, arg2 = list(element.clauses)
return compiler.process(sa.case([(arg1 > arg2, arg1)], else_=arg2), **kw)
# Utilities ----------------------------------
import sqlalchemy.dialects
#pylint: disable=wildcard-import,unused-wildcard-import
from sqlalchemy.dialects import * # Must do it in order to getattr(sqlalchemy.dialects, ...)
def to_sql(sa_expr, dialect_name='sqlite'):
"""
Helper function. Given a SQLAlchemy expression, returns the corresponding
SQL string in a given dialect.
"""
dialect_module = getattr(sqlalchemy.dialects, dialect_name)
return str(sa_expr.compile(dialect=dialect_module.dialect(),
compile_kwargs={'literal_binds': True}))
|
Packs/HealthCheck/Scripts/HealthCheckDiskUsageLine/HealthCheckDiskUsageLine.py | diCagri/content | 799 | 12625270 | <gh_stars>100-1000
from CommonServerPython import * # noqa: F401
def main():
res = execute_command("demisto-api-get", {"uri": "/system/config"})
config_json = res['response']
partition = config_json.get('sysConf', {}).get('disk.partitions.to.monitor') or '/'
res = execute_command(
"demisto-api-post",
{
"uri": "/statistics/widgets/query",
"body": {
"size": 1440,
"dataType": "system",
"params": {
"timeFrame": "minutes",
},
"query": f"disk.usedPercent.{partition}",
"dateRange": {
"period": {
"byFrom": "hours",
"fromValue": 24,
},
},
"widgetType": "line",
},
})
stats = res["response"]
output = []
higher = 0
build_number = get_demisto_version()['buildNumber']
# in local development instances, the build number will be "REPLACE_THIS_WITH_CI_BUILD_NUM"
build_number = f'{build_number}' if build_number != "REPLACE_THIS_WITH_CI_BUILD_NUM" else "618658"
if int(build_number) >= 618657:
# Line graph:
for counter, entry in enumerate(stats):
higher = max(entry["data"][0], higher)
if counter % 2 == 0:
output.append({"name": counter, "data": [higher]})
higher = 0
data = {
"Type": 17,
"ContentsFormat": "line",
"Contents": {
"stats": output,
"params": {
"timeFrame": "minutes",
"format": "HH:mm",
"layout": "vertical"
}
}
}
else:
# Bar graph:
now = datetime.utcnow()
then = now - timedelta(days=1)
for counter, entry in enumerate(stats):
higher = max(entry["data"][0], higher)
if counter % 60 == 0:
then = then + timedelta(hours=1)
name = then.strftime("%H:%M")
output.append({"name": name, "data": [higher]})
higher = 0
data = {
"Type": 17,
"ContentsFormat": "bar",
"Contents": {
"stats": output,
"params": {
"layout": "horizontal"
}
}
}
return data
if __name__ in ('__main__', '__builtin__', 'builtins'): # pragma: no cover
return_results(main())
|
inference/Megatron-LM-v2.3/tools/openwebtext/filter_ngrams.py | ganik/DeepSpeedExamples | 309 | 12625285 | <gh_stars>100-1000
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Deduplicate downstream tasks from training dataset. 13-grams have been used.
All split documents with less than 200 characters got filtered. Any document
with more than 10 splits got filtered as well.
"""
from functools import partial
import json
import multiprocessing
import nltk
import re
import string
import sys
import time
def get_words(text):
# get all the lowercase words from text
words, positions = [], []
for match in re.finditer(r'\w+', text.lower()):
words.append(match.group(0))
positions.append(match.start())
return words, positions
def free_ngram(line, ngrams, ngram_size, filter_text_len,
splits_count, split_window_each_size):
# remove all the ngrams
try:
myjson = json.loads(line)
text_buf = [myjson['text']]
except Exception as e:
print("Error: {}".format(e), flush=True)
text_buf = []
text_buf_ngram_free = []
while len(text_buf) > 0:
# get the first one from the buffer
text = text_buf.pop(0)
words, positions = get_words(text)
not_ngram_free = True
punctuations = ".!?"
# find n-grams
for i in range(len(words) - ngram_size + 1):
seq = " ".join(words[i:i+ngram_size])
if seq in ngrams:
# splits the text
# first part of the text
pos = positions[i] - split_window_each_size
text_first = ""
while pos > 0 and not text[pos] in punctuations:
pos -= 1
if pos > 0:
text_first = text[0:pos+1]
pos = positions[i] + split_window_each_size
# last part of the text
text_second = ""
while pos < len(text) and not text[pos] in punctuations:
pos += 1
if pos + 1 < len(text):
text_second = text[pos+1:len(text)]
# first part of ngrams free
if len(text_first) > filter_text_len:
text_buf_ngram_free.append(text_first)
# add second part for further processing
if len(text_second) > filter_text_len:
text_buf.append(text_second)
not_ngram_free = False
break
# text are ngram free
if not_ngram_free:
text_buf_ngram_free.append(text)
return text_buf_ngram_free
if __name__ == '__main__':
print('finding possible duplicate content ...')
main_file = sys.argv[1] # lambada file
dedup_file = sys.argv[2] # Book corpus
output_file = sys.argv[3] #Filtered book corpus
ngrams = {}
id_prefix = "lambada"
# we use 13-grams, any text less than 200 characters got removed
# any text splitted more than 10 got removed as well
ngram_size = 13
filter_text_len = 200
splits_count = 10
split_window_each_size = 200
print('Reading file {} and computing ngrams'.format(main_file))
with open(main_file, 'r') as f:
for line in f:
try:
myjson = json.loads(line)
words, positions = get_words(myjson['text'])
for i in range(len(words) - ngram_size+1):
seq = " ".join(words[i:i+ngram_size])
if seq not in ngrams:
ngrams[seq] = positions[i]
except Exception as e:
print('Error:', e)
print("ngrams size {}".format(len(ngrams)))
print('Reading file {} and deduping n-grams'.format(dedup_file))
counter = 0
start_time = time.time()
out_f = open(output_file, 'wb')
splitted, ignored, split_mt_thld = 0, 0, 0
# Setup multi-processing.
num_workers = 40
fin = open(dedup_file, 'r', encoding='utf-8')
pool = multiprocessing.Pool(num_workers)
free_ngram_x=partial(free_ngram, ngrams=ngrams, ngram_size=ngram_size,
filter_text_len=filter_text_len, splits_count=splits_count,
split_window_each_size=split_window_each_size)
free_ngrams = pool.imap(free_ngram_x, fin, 25)
for text_buf_ngram_free in free_ngrams:
counter += 1
try:
if len(text_buf_ngram_free) > 1:
splitted += (len(text_buf_ngram_free) - 1)
if len(text_buf_ngram_free) == 0:
ignored += 1
# more than 10 splits ignored
if len(text_buf_ngram_free) > splits_count:
text_buf_ngram_free = []
split_mt_thld += 1
for i in range(len(text_buf_ngram_free)):
split_id_string = id_prefix + '-{:010d}'.format(int(counter)) \
+ '-{:010d}'.format(int(i))
outjson = json.dumps({"text":text_buf_ngram_free[i],
id_prefix+"_split_id":split_id_string},
ensure_ascii=False)
out_f.write(outjson.encode('utf-8'))
out_f.write('\n'.encode('utf-8'))
if counter % 1000 == 0:
print(' [search]> processed {} documents in {:.2f} seconds ...'.
format(counter, time.time() - start_time), flush=True)
except Exception as e:
print('Error:', e)
print("Deduped file written to: {}".format(output_file), flush=True)
print("Total docs {} splitted {} ignored {} docs with many splits {}".\
format(counter, splitted, ignored, split_mt_thld), flush=True)
print('done :-)')
|
tests/integrational/native_sync/test_revoke_v3.py | natekspencer/pubnub-python | 146 | 12625288 | from pubnub.pubnub import PubNub
from pubnub.models.consumer.v3.channel import Channel
from tests.integrational.vcr_helper import pn_vcr
from tests.helper import pnconf_pam_stub_copy
from pubnub.models.consumer.v3.access_manager import PNGrantTokenResult, PNRevokeTokenResult
pubnub = PubNub(pnconf_pam_stub_copy())
pubnub.config.uuid = "test_revoke"
@pn_vcr.use_cassette(
'tests/integrational/fixtures/native_sync/pam/revoke_token.yaml',
filter_query_parameters=['uuid', 'seqn', 'pnsdk', 'timestamp', 'signature']
)
def test_grant_and_revoke_token():
grant_envelope = pubnub.grant_token()\
.channels([Channel.id("test_channel").read().write().manage().update().join().delete()])\
.authorized_uuid("test")\
.ttl(60)\
.sync()
assert isinstance(grant_envelope.result, PNGrantTokenResult)
token = grant_envelope.result.get_token()
assert token
revoke_envelope = pubnub.revoke_token(token).sync()
assert isinstance(revoke_envelope.result, PNRevokeTokenResult)
assert revoke_envelope.result.status == 200
|
nipy/utils/__init__.py | bpinsard/nipy | 236 | 12625316 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
General utilities for code support.
These are modules that we (broadly-speaking) wrote; packages that other people
wrote, that we ship, go in the nipy.externals tree.
"""
from __future__ import absolute_import
from nibabel.data import make_datasource, DataError, datasource_or_bomber
# Module level datasource instances for convenience
from ..info import DATA_PKGS
templates = datasource_or_bomber(DATA_PKGS['nipy-templates'])
example_data = datasource_or_bomber(DATA_PKGS['nipy-data'])
try:
example_data.get_filename()
except DataError:
HAVE_EXAMPLE_DATA = False
else:
HAVE_EXAMPLE_DATA = True
try:
templates.get_filename()
except DataError:
HAVE_TEMPLATES = False
else:
HAVE_TEMPLATES = True
from .utilities import is_iterable, is_numlike, seq_prod
from nipy.testing import Tester
test = Tester().test
bench = Tester().bench
class VisibleDeprecationWarning(UserWarning):
"""Visible deprecation warning.
Python does not show any DeprecationWarning by default. Sometimes we do
want to show a deprecation warning, when the deprecation is urgent, or the
usage is probably a bug.
"""
class _NoValue:
"""Special keyword value.
This class may be used as the default value assigned to a deprecated
keyword in order to check if it has been given a user defined value.
"""
|
MITx-6.00.2x/Midterm Quiz/Problem_3-song_playlist.py | Sam-Gao-Xin/Courses- | 622 | 12625330 | # -*- coding: utf-8 -*-
"""
@author: salimt
"""
#Problem 3
#20.0/20.0 points (graded)
#You are creating a song playlist for your next party. You have a collection of songs that can be represented as a list of tuples. Each tuple has the following elements:
#name: the first element, representing the song name (non-empty string)
#song_length: the second, element representing the song duration (float >= 0)
#song_size: the third, element representing the size on disk (float >= 0)
#You want to try to optimize your playlist to play songs for as long as possible while making sure that the songs you pick do not take up more than a given amount of space on disk (the sizes should be less than or equal to the max_disk_size).
#You decide the best way to achieve your goal is to start with the first song in the given song list. If the first song doesn't fit on disk, return an empty list. If there is enough space for this song, add it to the playlist.
#For subsequent songs, you choose the next song such that its size on disk is smallest and that the song hasn't already been chosen. You do this until you cannot fit any more songs on the disk.
#Write a function implementing this algorithm, that returns a list of the song names in the order in which they were chosen, with the first element in the list being the song chosen first. Assume song names are unique and all the songs have different sizes on disk and different durations.
#You may not mutate any of the arguments.
#For example,
#If songs = [('Roar',4.4, 4.0),('Sail',3.5, 7.7),('Timber', 5.1, 6.9),('Wannabe',2.7, 1.2)] and max_size = 12.2, the function will return ['Roar','Wannabe','Timber']
#If songs = [('Roar',4.4, 4.0),('Sail',3.5, 7.7),('Timber', 5.1, 6.9),('Wannabe',2.7, 1.2)] and max_size = 11, the function will return ['Roar','Wannabe']
# Paste your entire function (including the definition) in the box. Do not import anything. Do not leave any debugging print statements.
# Paste your code here
def song_playlist(songs, max_size):
"""
songs: list of tuples, ('song_name', song_len, song_size)
max_size: float, maximum size of total songs that you can fit
Start with the song first in the 'songs' list, then pick the next
song to be the one with the lowest file size not already picked, repeat
Returns: a list of a subset of songs fitting in 'max_size' in the order
in which they were chosen.
"""
temp = []
temp.append(songs[0])
max_size -= songs[0][2]
songs_sorted = (sorted(songs, reverse=True, key=lambda x: x[2]))
if max_size < 0:
return []
for i, song in enumerate(songs_sorted):
weightB = songs_sorted[-(i+1)][2]
if weightB <= max_size and songs_sorted[-(i+1)] not in temp:
max_size -= weightB
temp.append(songs_sorted[-(i+1)])
names = []
for name in range(len(temp)):
names.append(temp[name][0])
return names
songs = [('Roar',4.4, 4.0),('Sail',3.5, 7.7),('Timber', 5.1, 6.9),('Wannabe',2.7, 1.2)]
max_size = 12.2
print(song_playlist(songs, max_size)) #['Roar','Wannabe','Timber']
print(song_playlist([('a', 4.0, 4.4), ('b', 7.7, 3.5), ('c', 6.9, 5.1), ('d', 1.2, 2.7)], 12.3)) #['a', 'd', 'b']
print(song_playlist([('a', 4.4, 4.0), ('b', 3.5, 7.7), ('c', 5.1, 6.9), ('d', 2.7, 1.2)], 20)) #['a', 'd', 'c', 'b'] |
setup.py | MojixCoder/python-jalali | 235 | 12625366 | import setuptools
from distutils.core import setup
setup(
name='jdatetime',
version='3.7.0',
packages=['jdatetime', ],
license='Python Software Foundation License',
keywords='Jalali implementation of Python datetime',
platforms='any',
author='<NAME>',
author_email='<EMAIL>',
description=("Jalali datetime binding for python"),
url="https://github.com/slashmili/python-jalali",
long_description=open('README').read(),
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Software Development",
],
)
|
src/tools/libraryInfo/BUILD.py | t3kt/raytk | 108 | 12625371 | <reponame>t3kt/raytk
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
from raytkBuild import BuildTaskContext
from .libraryInfoExt import LibraryInfoBuilder
ext.libraryInfo = LibraryInfoBuilder(COMP())
context = args[0] # type: BuildTaskContext
context.log('Updating library info')
context.safeDestroyOps(ops('write_*'))
ext.libraryInfo.Forcebuild()
context.finishTask()
|
GUI/AboutDialog.py | yasoob/youtube-dl-GUI | 203 | 12625378 | from PyQt5 import QtCore, QtWidgets
from UI.AboutDialog import Ui_Dialog
class AboutDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super(AboutDialog, self).__init__(parent, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint) # type: ignore
self.ui = Ui_Dialog()
self.ui.setupUi(self)
|
Pytorch/train.py | jiajunhua/qjadud1994-Text_Detector | 221 | 12625387 | <filename>Pytorch/train.py
from __future__ import print_function
import time
import os
import argparse
import numpy as np
import cv2
from subprocess import Popen, PIPE
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from tensorboardX import SummaryWriter
from augmentations import Augmentation_traininig
from loss import FocalLoss, OHEM_loss
from retinanet import RetinaNet
from datagen import ListDataset
from encoder import DataEncoder
from torch.autograd import Variable
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1")
def adjust_learning_rate(cur_lr, optimizer, gamma, step):
lr = cur_lr * (gamma ** (step))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
parser = argparse.ArgumentParser(description='PyTorch RetinaNet Training')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--input_size', default=768, type=int, help='Input size for training')
parser.add_argument('--batch_size', default=8, type=int, help='Batch size for training')
parser.add_argument('--num_workers', default=8, type=int, help='Number of workers used in dataloading')
parser.add_argument('--resume', default=None, type=str, help='resume from checkpoint')
parser.add_argument('--dataset', type=str, help='select training dataset')
parser.add_argument('--multi_scale', default=False, type=str2bool, help='Use multi-scale training')
parser.add_argument('--focal_loss', default=True, type=str2bool, help='Use Focal loss or OHEM loss')
parser.add_argument('--logdir', default='logs/', type=str, help='Tensorboard log dir')
parser.add_argument('--max_iter', default=1200000, type=int, help='Number of training iterations')
parser.add_argument('--gamma', default=0.5, type=float, help='Gamma update for SGD')
parser.add_argument('--save_interval', default=500, type=int, help='Location to save checkpoint models')
parser.add_argument('--save_folder', default='eval/', help='Location to save checkpoint models')
parser.add_argument('--evaluation', default=False, type=str2bool, help='Evaulation during training')
parser.add_argument('--eval_step', default=1000, type=int, help='Evauation step')
parser.add_argument('--eval_device', default=6, type=int, help='GPU device for evaluation')
args = parser.parse_args()
assert torch.cuda.is_available(), 'Error: CUDA not found!'
assert args.focal_loss, "OHEM + ce_loss is not working... :("
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if not os.path.exists(args.logdir):
os.mkdir(args.logdir)
# Data
print('==> Preparing data..')
trainset = ListDataset(root='/root/DB/', dataset=args.dataset, train=True,
transform=Augmentation_traininig, input_size=args.input_size, multi_scale=args.multi_scale)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, num_workers=args.num_workers, collate_fn=trainset.collate_fn)
# set model (focal_loss vs OHEM_CE loss)
if args.focal_loss:
imagenet_pretrain = 'weights/retinanet_se50.pth'
criterion = FocalLoss()
num_classes = 1
else:
imagenet_pretrain = 'weights/retinanet_se50_OHEM.pth'
criterion = OHEM_loss()
num_classes = 2
# Training Detail option\
stepvalues = (10000, 20000, 30000, 40000, 50000) if args.dataset in ["SynthText"] else (2000, 4000, 6000, 8000, 10000)
best_loss = float('inf') # best test loss
start_epoch = 0 # start from epoch 0 or last epoch
iteration = 0
cur_lr = args.lr
mean=(0.485,0.456,0.406)
var=(0.229,0.224,0.225)
step_index = 0
pEval = None
# Model
net = RetinaNet(num_classes)
net.load_state_dict(torch.load(imagenet_pretrain))
if args.resume:
print('==> Resuming from checkpoint..', args.resume)
checkpoint = torch.load(args.resume)
net.load_state_dict(checkpoint['net'])
#start_epoch = checkpoint['epoch']
#iteration = checkpoint['iteration']
#cur_lr = checkpoint['lr']
#step_index = checkpoint['step_index']
#optimizer.load_state_dict(state["optimizer"])
print("multi_scale : ", args.multi_scale)
print("input_size : ", args.input_size)
print("stepvalues : ", stepvalues)
print("start_epoch : ", start_epoch)
print("iteration : ", iteration)
print("cur_lr : ", cur_lr)
print("step_index : ", step_index)
print("num_gpus : ", torch.cuda.device_count())
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
net.cuda()
# Training
net.train()
net.module.freeze_bn() # you must freeze batchnorm
optimizer = optim.SGD(net.parameters(), lr=cur_lr, momentum=0.9, weight_decay=1e-4)
#optimizer = optim.Adam(net.parameters(), lr=cur_lr)
encoder = DataEncoder()
# tensorboard visualize
writer = SummaryWriter(log_dir=args.logdir)
t0 = time.time()
for epoch in range(start_epoch, 10000):
if iteration > args.max_iter:
break
for inputs, loc_targets, cls_targets in trainloader:
inputs = Variable(inputs.cuda())
loc_targets = Variable(loc_targets.cuda())
cls_targets = Variable(cls_targets.cuda())
optimizer.zero_grad()
loc_preds, cls_preds = net(inputs)
loc_loss, cls_loss = criterion(loc_preds, loc_targets, cls_preds, cls_targets)
loss = loc_loss + cls_loss
loss.backward()
optimizer.step()
if iteration % 20 == 0:
t1 = time.time()
print('iter ' + repr(iteration) + ' (epoch ' + repr(epoch) + ') || loss: %.4f || l loc_loss: %.4f || l cls_loss: %.4f (Time : %.1f)'\
% (loss.sum().item(), loc_loss.sum().item(), cls_loss.sum().item(), (t1 - t0)))
t0 = time.time()
writer.add_scalar('loc_loss', loc_loss.sum().item(), iteration)
writer.add_scalar('cls_loss', cls_loss.sum().item(), iteration)
writer.add_scalar('loss', loss.sum().item(), iteration)
# show inference image in tensorboard
infer_img = np.transpose(inputs[0].cpu().numpy(), (1,2,0))
infer_img *= var
infer_img += mean
infer_img *= 255.
infer_img = np.clip(infer_img, 0, 255)
infer_img = infer_img.astype(np.uint8)
h, w, _ = infer_img.shape
boxes, labels, scores = encoder.decode(loc_preds[0], cls_preds[0], (w,h))
boxes = boxes.reshape(-1, 4, 2).astype(np.int32)
if boxes.shape[0] is not 0:
infer_img = cv2.polylines(infer_img, boxes, True, (0,255,0), 4)
writer.add_image('image', infer_img, iteration)
writer.add_scalar('input_size', h, iteration)
writer.add_scalar('learning_rate', cur_lr, iteration)
t0 = time.time()
if iteration % args.save_interval == 0 and iteration > 0:
print('Saving state, iter : ', iteration)
state = {
'net': net.module.state_dict(),
"optimizer": optimizer.state_dict(),
'iteration' : iteration,
'epoch': epoch,
'lr' : cur_lr,
'step_index' : step_index
}
model_file = args.save_folder + 'ckpt_' + repr(iteration) + '.pth'
torch.save(state, model_file)
if iteration in stepvalues:
step_index += 1
cur_lr = adjust_learning_rate(cur_lr, optimizer, args.gamma, step_index)
if iteration > args.max_iter:
break
if args.evaluation and iteration % args.eval_step == 0:
try:
if pEval is None:
print("Evaluation started at iteration {} on IC15...".format(iteration))
eval_cmd = "CUDA_VISIBLE_DEVICES=" + str(args.eval_device) + \
" python eval.py" + \
" --tune_from=" + args.save_folder + 'ckpt_' + repr(iteration) + '.pth' + \
" --input_size=1024" + \
" --output_zip=result_temp1"
pEval = Popen(eval_cmd, shell=True, stdout=PIPE, stderr=PIPE)
elif pEval.poll() is not None:
(scorestring, stderrdata) = pEval.communicate()
hmean = float(str(scorestring).strip().split(":")[3].split(",")[0].split("}")[0].strip())
writer.add_scalar('test_hmean', hmean, iteration)
print("test_hmean for {}-th iter : {:.4f}".format(iteration, hmean))
if pEval is not None:
pEval.kill()
pEval = None
except Exception as e:
print("exception happened in evaluation ", e)
if pEval is not None:
pEval.kill()
pEval = None
iteration += 1
|
third_party/blink/renderer/build/scripts/make_document_policy_features_tests.py | zealoussnow/chromium | 14,668 | 12625393 | <reponame>zealoussnow/chromium
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Note(crbug.com/1167597): Do NOT change the file name to end with
# '_test.py' or '_unittest.py' as these files will be recognized by
# 'run_blinkpy_tests.py' task, where jinja2 module is not available.
import unittest
import os
from make_document_policy_features import DocumentPolicyFeatureWriter
from writer_test_util import path_to_test_file, WriterTest
class MakeDocumentPolicyFeaturesTest(WriterTest):
def test_default_value_control(self):
self._test_writer(
DocumentPolicyFeatureWriter, [
path_to_test_file('document_policy_default_value_control',
'input', 'document_policy_features.json5')
],
path_to_test_file('document_policy_default_value_control',
'output'))
if __name__ == "__main__":
unittest.main()
|
Stephanie/TextProcessor/audio_text_manager.py | JeremyARussell/stephanie-va | 866 | 12625394 | import re
import speech_recognition as sr
from Stephanie.AudioManager.audio_manager import AudioManager
from Stephanie.TextManager.text_manager import TextManager
from Stephanie.TextProcessor.text_sorter import TextSorter
from Stephanie.TextProcessor.text_learner import TextLearner
class AudioTextManager(AudioManager, TextManager):
def __init__(self, events):
self.modules = ()
self.events = events
self.r = self.get_recognizer()
self.sr = self.get_speech_recognition()
AudioManager.__init__(self, self.r, self.sr.UnknownValueError, self.sr.RequestError)
TextManager.__init__(self)
self.sorter = TextSorter()
self.learner = TextLearner()
self.audio = None
def listen(self):
self.audio = self.process_listen()
return self
def hear(self, source):
audio = self.get_audio(source)
return audio
def decipher(self):
text = self.get_text_from_speech(self.audio)
return text
def say(self, speech):
speech_result_filename = self.get_speech_from_text(speech).save_speech_result()
self.speak_result(speech_result_filename)
def understand(self, modules, raw_text, explicit=False):
module_info = self.set_modules(modules).learn(raw_text, explicit)
return self.get_method_name(module_info['module_info'])
def set_modules(self, modules):
self.modules = modules
return self
def sort(self, raw_text, explicit):
if raw_text:
subwords, keywords = self.sorter.sort(raw_text, explicit)
return subwords, keywords
return False, False
def learn(self, raw_text, explicit=False):
subwords, keywords = self.sort(raw_text, explicit)
if keywords:
module_info = self.learner.set_modules(self.modules).learn(keywords)
print(module_info)
return {
'subwords': subwords,
'keywords': keywords,
'module_info': module_info
}
else:
return False
@staticmethod
def get_recognizer():
return sr.Recognizer()
@staticmethod
def get_speech_recognition():
return sr
def process_listen(self):
with self.sr.Microphone() as source:
return self.hear(source)
def get_method_name(self, module_info):
raw_func_name = module_info[0].split("@")[1]
func_name = self.convert_to_snake_case(raw_func_name)
return func_name
@staticmethod
def convert_to_snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
examples/basic/picture2mesh.py | evanphilip/vedo | 836 | 12625396 | # Transform a picture into a mesh
from vedo import Picture, dataurl, show
import numpy as np
pic = Picture(dataurl+"images/dog.jpg").smooth(5)
msh = pic.tomesh() # make a quad-mesh out of it
# build a scalar array with intensities
rgb = msh.pointdata["RGBA"]
intensity = np.sum(rgb, axis=1)
intensityz = np.zeros_like(rgb)
intensityz[:,2] = intensity / 10
# set the new vertex points
pts = msh.points() + intensityz
msh.points(pts)
# more cosmetics
msh.triangulate().smooth()
msh.lighting("default").lineWidth(0.1)
msh.cmap("bone", "RGBA").addScalarBar()
msht = pic.clone().threshold(100).lineWidth(0)
show([[pic, "A normal jpg image.."],
[msh, "..becomes a polygonal Mesh"],
[msht, "Thresholding also generates a Mesh"]
], N=3, axes=1, zoom=5, elevation=-20, bg='black').close()
|
__scraping__/automationpractice.com - selenium/main.py | whitmans-max/python-examples | 140 | 12625414 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException, TimeoutException
import time
try:
#driver = webdriver.Chrome()
driver = webdriver.Firefox()
url = driver.get("http://automationpractice.com/index.php")
#driver.maximize_window()
search_text_box = driver.find_element_by_id("search_query_top")
search_text_box.send_keys("Printed")
time.sleep(1) # page display (and update) autocompletion when you make little longer delay
# --- select using arrow key ---
# move selection on list and accept it
#search_text_box.send_keys(Keys.ARROW_DOWN)
#search_text_box.send_keys(Keys.ARROW_DOWN)
#search_text_box.send_keys(Keys.ARROW_DOWN)
#search_text_box.send_keys(Keys.ENTER)
# OR
# --- select using tag `<li>` and `text()` in autocompletion ---
# click on first matching item on list
#one_option = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, "//li[contains(text(),'Dress')]")))
one_option = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, "//div[@class='ac_results']//li[contains(text(),'Dress')]")))
print(' tag:', one_option.tag_name)
print('text:', one_option.text)
print('bold:', one_option.find_element_by_xpath('.//strong').text)
one_option.click()
# OR
# --- get all elements in autocompletion using `<li>` tag ---
# get many matching items and use [index] to click on some item on list
#one_option = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, "//li[contains(text(),'Dress')]")))
#all_options = driver.find_elements_by_xpath("//li[contains(text(),'Dress')]")
#for option in all_options:
# print(option.tag_name, ':', option.text)
#all_options[1].click()
print(' current:', driver.current_url)
print('expected:', "http://automationpractice.com/index.php?id_product=3&controller=product")
print('the same:', driver.current_url == "http://automationpractice.com/index.php?id_product=3&controller=product")
assert "http://automationpractice.com/index.php?id_product=3&controller=product" == driver.current_url, "This Test case is fallied"
#assertEqual("http://automationpractice.com/index.php?id_product=3&controller=product", self.driver.current_url, "This Test case is fallied")
except NoSuchElementException as e:
print('NoSuchElementException:', e)
except TimeoutException as e:
print('TimeoutException:', e)
except AssertionError as e:
print('AssertionError:', e)
|
InnerEye-DataQuality/create_environment.py | faz1993/InnerEye-DeepLearning | 402 | 12625423 | <filename>InnerEye-DataQuality/create_environment.py<gh_stars>100-1000
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import subprocess
from pathlib import Path
INNEREYE_DQ_ENVIRONMENT_FILE = Path(__file__).parent.absolute() / 'environment.yml'
def create_environment(environment_name: str = "InnerEyeDataQuality") -> None:
print(f"Creating environment {environment_name} with the settings in "
f"{INNEREYE_DQ_ENVIRONMENT_FILE}")
subprocess.Popen(
f"conda env create --file {INNEREYE_DQ_ENVIRONMENT_FILE} --name {environment_name}",
shell=True).communicate()
if __name__ == '__main__':
create_environment()
|
scripts/change_incar_parameters.py | faradaymahe/VASPpy | 225 | 12625435 | '''
Modify recursively parameters in all INCAR file.
'''
import argparse
import commands
import logging
from vaspy import PY2
from vaspy.incar import InCar
SHELL_COMMAND = "find ./ -name 'INCAR'"
_logger = logging.getLogger("vaspy.script")
if "__main__" == __name__:
# Check command validity.
status, output = commands.getstatusoutput(SHELL_COMMAND)
if status:
raise SystemExit("Invalid shell commands - '{}'".format(SHELL_COMMAND))
# Get InCar objects.
incar_paths = (incar_path.strip() for incar_path in output.split('\n'))
incars = [InCar(incar_path) for incar_path in incar_paths]
# Get all possible arguments.
set_list = [set(incar.pnames) for incar in incars]
possible_args = set.intersection(*set_list)
# Set arguments for this script.
parser = argparse.ArgumentParser()
for arg in possible_args:
arg_str = "--{}".format(arg)
parser.add_argument(arg_str, help="set {} INCAR parameter".format(arg))
args_space = parser.parse_args()
# Change parameters for all incars.
if PY2:
pname_value_pairs = args_space.__dict__.iteritems()
else:
pname_value_pairs = args_space.__dict__.items()
for pname, value in pname_value_pairs :
if value is None:
continue
for incar in incars:
_logger.info("{} --> {} in {}.".format(pname, value, incar.filename))
incar.set(pname, value)
incar.tofile()
_logger.info("{} INCAR files ... ok.".format(len(incars)))
|
cement/core/mail.py | tomekr/cement | 826 | 12625447 | """Cement core mail module."""
from abc import abstractmethod
from ..core.interface import Interface
from ..core.handler import Handler
from ..utils.misc import minimal_logger
LOG = minimal_logger(__name__)
class MailInterface(Interface):
"""
This class defines the Mail Interface. Handlers that implement this
interface must provide the methods and attributes defined below. In
general, most implementations should sub-class from the provided
:class:`MailHandler` base class as a starting point.
"""
class Meta:
"""Handler meta-data."""
interface = 'mail'
"""The label identifier of the interface."""
@abstractmethod
def send(self, body, **kwargs):
"""
Send a mail message. Keyword arguments override configuration
defaults (cc, bcc, etc).
Args:
body (str): The message body to send
Keyword Args:
to (list): List of recipients (generally email addresses)
from_addr (str): Address (generally email) of the sender
cc (list): List of CC Recipients
bcc (list): List of BCC Recipients
subject (str): Message subject line
Returns:
bool: ``True`` if message was sent successfully, ``False``
otherwise
Example:
.. code-block:: python
# Using all configuration defaults
app.mail.send('This is my message body')
# Overriding configuration defaults
app.mail.send('My message body'
to=['<EMAIL>'],
from_addr='<EMAIL>',
cc=['<EMAIL>', '<EMAIL>'],
subject='This is my subject',
)
"""
pass # pragma: nocover
class MailHandler(MailInterface, Handler):
"""
Mail handler implementation.
**Configuration**
This handler supports the following configuration settings:
* **to** - Default ``to`` addresses (list, or comma separated depending
on the ConfigHandler in use)
* **from_addr** - Default ``from_addr`` address
* **cc** - Default ``cc`` addresses (list, or comma separated depending
on the ConfigHandler in use)
* **bcc** - Default ``bcc`` addresses (list, or comma separated depending
on the ConfigHandler in use)
* **subject** - Default ``subject``
* **subject_prefix** - Additional string to prepend to the ``subject``
"""
class Meta:
"""
Handler meta-data (can be passed as keyword arguments to the parent
class).
"""
#: Configuration default values
config_defaults = {
'to': [],
'from_addr': '<EMAIL>',
'cc': [],
'bcc': [],
'subject': 'Default Subject Line',
'subject_prefix': '',
}
def _setup(self, app_obj):
super()._setup(app_obj)
self._validate_config()
def _validate_config(self):
# convert comma separated strings to lists (ConfigParser)
for item in ['to', 'cc', 'bcc']:
if item in self.app.config.keys(self._meta.config_section):
value = self.app.config.get(self._meta.config_section, item)
# convert a comma-separated string to a list
if type(value) is str:
value_list = value.split(',')
# clean up extra space if they had it inbetween commas
value_list = [x.strip() for x in value_list]
# set the new extensions value in the config
self.app.config.set(self._meta.config_section, item,
value_list)
|
romp/lib/models/resnet_50.py | iory/ROMP | 385 | 12625451 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torchvision.models.resnet as resnet
import torchvision.transforms.functional as F
import sys, os
root_dir = os.path.join(os.path.dirname(__file__),'..')
if root_dir not in sys.path:
sys.path.insert(0, root_dir)
from utils import BHWC_to_BCHW, copy_state_dict
from models.CoordConv import get_coord_maps
import config
from config import args
from models.basic_modules import BasicBlock,Bottleneck,HighResolutionModule
BN_MOMENTUM = 0.1
class ResNet_50(nn.Module):
def __init__(self, **kwargs):
self.inplanes = 64
super(ResNet_50, self).__init__()
self.make_resnet()
self.backbone_channels = 64
#self.init_weights()
#self.load_pretrain_params()
def load_pretrain_params(self):
if os.path.exists(args().resnet_pretrain):
success_layer = copy_state_dict(self.state_dict(), torch.load(args().resnet_pretrain), prefix = '', fix_loaded=True)
def image_preprocess(self, x):
if args().pretrain == 'imagenet' or args().pretrain == 'spin':
x = BHWC_to_BCHW(x)/255.
#x = F.normalize(x, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225],inplace=True).contiguous() # for pytorch version>1.8.0
x = torch.stack(list(map(lambda x:F.normalize(x, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225],inplace=False),x)))
else:
x = ((BHWC_to_BCHW(x)/ 255.) * 2.0 - 1.0).contiguous()
return x
def make_resnet(self):
block, layers = Bottleneck, [3, 4, 6, 3]
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_resnet_layer(block, 64, layers[0])
self.layer2 = self._make_resnet_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_resnet_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_resnet_layer(block, 512, layers[3], stride=2)
self.deconv_layers = self._make_deconv_layer(3,(256,128,64),(4,4,4))
def forward(self,x):
x = self.image_preprocess(x)
x = self.maxpool(self.relu(self.bn1(self.conv1(x))))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
return x
def _make_resnet_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),)#,affine=False),)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
if i==0:
self.inplanes=2048
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=False))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))#,affine=False))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
for name, _ in m.named_parameters():
if name in ['bias']:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
for name, _ in m.named_parameters():
if name in ['bias']:
nn.init.constant_(m.bias, 0)
if __name__ == '__main__':
args().pretrain = 'spin'
model = ResNet_50().cuda()
a=model(torch.rand(2,512,512,3).cuda())
for i in a:
print(i.shape) |
synapse/logging/_terse_json.py | mlakkadshaw/synapse | 9,945 | 12625488 | <filename>synapse/logging/_terse_json.py<gh_stars>1000+
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Log formatters that output terse JSON.
"""
import json
import logging
_encoder = json.JSONEncoder(ensure_ascii=False, separators=(",", ":"))
# The properties of a standard LogRecord that should be ignored when generating
# JSON logs.
_IGNORED_LOG_RECORD_ATTRIBUTES = {
"args",
"asctime",
"created",
"exc_info",
# exc_text isn't a public attribute, but is used to cache the result of formatException.
"exc_text",
"filename",
"funcName",
"levelname",
"levelno",
"lineno",
"message",
"module",
"msecs",
"msg",
"name",
"pathname",
"process",
"processName",
"relativeCreated",
"stack_info",
"thread",
"threadName",
}
class JsonFormatter(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
event = {
"log": record.getMessage(),
"namespace": record.name,
"level": record.levelname,
}
return self._format(record, event)
def _format(self, record: logging.LogRecord, event: dict) -> str:
# Add attributes specified via the extra keyword to the logged event.
for key, value in record.__dict__.items():
if key not in _IGNORED_LOG_RECORD_ATTRIBUTES:
event[key] = value
if record.exc_info:
exc_type, exc_value, _ = record.exc_info
if exc_type:
event["exc_type"] = f"{exc_type.__name__}"
event["exc_value"] = f"{exc_value}"
return _encoder.encode(event)
class TerseJsonFormatter(JsonFormatter):
def format(self, record: logging.LogRecord) -> str:
event = {
"log": record.getMessage(),
"namespace": record.name,
"level": record.levelname,
"time": round(record.created, 2),
}
return self._format(record, event)
|
alipay/aop/api/response/AlipayPcreditLoanBeikeaccountInterestfreeModifyResponse.py | antopen/alipay-sdk-python-all | 213 | 12625490 | <reponame>antopen/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.BeikeAccountResponse import BeikeAccountResponse
class AlipayPcreditLoanBeikeaccountInterestfreeModifyResponse(AlipayResponse):
def __init__(self):
super(AlipayPcreditLoanBeikeaccountInterestfreeModifyResponse, self).__init__()
self._beike_account_response = None
@property
def beike_account_response(self):
return self._beike_account_response
@beike_account_response.setter
def beike_account_response(self, value):
if isinstance(value, BeikeAccountResponse):
self._beike_account_response = value
else:
self._beike_account_response = BeikeAccountResponse.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayPcreditLoanBeikeaccountInterestfreeModifyResponse, self).parse_response_content(response_content)
if 'beike_account_response' in response:
self.beike_account_response = response['beike_account_response']
|
websauna/tests/sitemapsamples.py | stevepiercy/websauna | 286 | 12625502 | <filename>websauna/tests/sitemapsamples.py
"""Permission test views."""
# Standard Library
import typing as t
# Pyramid
from pyramid.interfaces import ILocation
from pyramid.response import Response
from pyramid.view import view_config
from zope.interface import implementer
# Websauna
from websauna.system.core.interfaces import IContainer
from websauna.system.core.root import Root
from websauna.system.core.route import simple_route
from websauna.system.core.sitemap import include_in_sitemap
from websauna.system.core.traversal import Resource
from websauna.system.http import Request
class SampleResource(Resource):
"""Leaf resource in tree."""
def __init__(self, request: Request, name: str):
super(SampleResource, self).__init__(request)
self.name = name
@implementer(IContainer)
class SampleContainer(SampleResource):
"""Node resource in tree."""
def __init__(self, request, name):
super(SampleContainer, self).__init__(request, name)
def items(self) -> t.Iterable[t.Tuple[str, ILocation]]:
# Every container instan
request = self.request
def construct_child(child_id, resource: Resource):
# Set __parent__ pointer
resource = Resource.make_lineage(self, resource, child_id)
return child_id, resource
# Assume this gets populated dynamically from the database
# when items() is first time called.
yield construct_child("foo", SampleResource(request, "Foo"))
yield construct_child("bar", SampleResource(request, "Bar"))
# First level container has second level nested container
if self.name == "Container folder":
yield construct_child("nested", SampleContainer(request, "Nested"))
@view_config(context=SampleResource, name="", route_name="sitemap_test")
def default_sample_view(sample_resource: SampleResource, request: Request):
return Response()
@view_config(context=SampleResource, name="additional", route_name="sitemap_test")
def additional_sample_view(sample_resource: SampleResource, request: Request):
return Response()
def traverse_condition(context, request):
return True
@view_config(context=SampleResource, name="conditional", route_name="sitemap_test")
@include_in_sitemap(condition=traverse_condition)
def conditional_sample_view(sample_resource: SampleResource, request: Request):
return Response()
def skipped_condition(context, request):
return False
@view_config(context=SampleResource, name="skipped_conditional", route_name="sitemap_test")
@include_in_sitemap(condition=skipped_condition)
def skipped_conditional(sample_resource: SampleResource, request: Request):
return Response()
@view_config(context=SampleContainer, name="", route_name="sitemap_test")
def default_container_view(sample_resource: SampleResource, request: Request):
return Response()
@view_config(context=SampleContainer, name="additional", route_name="sitemap_test")
def additional_container_view(sample_resource: SampleResource, request: Request):
return Response()
@view_config(context=SampleContainer, name="permissioned", route_name="sitemap_test", permission="edit")
def permissioned_container_view(sample_resource: SampleResource, request: Request):
return Response()
@simple_route("/parameter_free_route", route_name="parameter_free_route")
def parameter_free_route(request: Request):
return Response()
@simple_route("/parameterized_route/{param}", route_name="parameterized_route")
def parameterized_route(request: Request):
return Response()
@simple_route("/permissioned_route", route_name="permissioned_route", permission="edit")
def permissioned_route(request: Request):
return Response()
@simple_route("/post_only_route", route_name="post_only_route", request_method="POST")
def post_only_route(request: Request):
return Response()
@simple_route("/included_route", route_name="included_route")
@include_in_sitemap(True)
def included_route(request: Request):
return Response()
@simple_route("/skipped_route", route_name="skipped_route")
@include_in_sitemap(False)
def skipped_route(request: Request):
return Response()
def condition(context, request):
return True
@simple_route("/conditional_route", route_name="conditional_route")
@include_in_sitemap(condition=condition)
def another_skipped_route(request: Request):
return Response()
def container_factory(request):
"""Set up __parent__ and __name__ pointers required for traversal for container root."""
container = SampleContainer(request, "Container folder")
root = Root.root_factory(request)
return Resource.make_lineage(root, container, "container")
|
salt/tests/unit/formulas/fixtures/templates.py | SaintLoong/metalk8s | 255 | 12625521 | """Expose a `template_path` parameterized fixture to list all "testable" templates.
This will list all files with one of the extensions in `TEMPLATE_EXTS`, filtering out
directories listed in `EXCLUDE_DIRS`.
"""
from pathlib import Path
from typing import List, Optional
import pytest
from tests.unit.formulas import paths
TEMPLATE_EXTS = ["sls", "sls.in", "j2", "j2.in", "jinja"]
EXCLUDE_DIRS = [ # relative to paths.SALT_DIR
"tests",
"_auth",
"_beacons",
"_modules",
"_pillar",
"_renderers",
"_roster",
"_runners",
"_states",
"_utils",
]
def _filter_path(path: Path) -> Optional[Path]:
if not path.is_file():
return None
path = path.relative_to(paths.SALT_DIR)
for exclude in EXCLUDE_DIRS:
try:
path.relative_to(exclude)
except ValueError:
continue
else:
return None
return path
def list_templates() -> List[Path]:
"""List all template files to validate in rendering tests."""
templates: List[Path] = []
for ext in TEMPLATE_EXTS:
templates.extend(
path
for path in map(_filter_path, paths.SALT_DIR.glob(f"**/*.{ext}"))
if path is not None
)
return templates
@pytest.fixture(name="template_path", params=list_templates(), ids=str)
def fixture_template_path(request: pytest.FixtureRequest) -> Path:
"""Yields template paths from the return of `list_templates`."""
param: Optional[Path] = getattr(request, "param", None)
assert (
param is not None
), "The `template_path` fixture must be indirectly parametrized"
return param
|
tools/gr_to_mtx_symmetric.py | jdwapman/gunrock | 790 | 12625531 | <filename>tools/gr_to_mtx_symmetric.py
#!/usr/local/bin/python
"""
Simple python script to convert .gr format graph to .mtx format
"""
import os
import sys
import string
### check command line args
if (len(sys.argv)) != 2:
print ' Usage: python gr_to_mtx_symmetric.py graph.gr'
sys.exit()
### gr graph input
file_gr = sys.argv[1]
### matrix-market format output file
file_mm = sys.argv[1].split('.')[0] + ".symmetric.mtx"
line_num = 0;
with open(file_gr, 'r') as gr, open(file_mm, 'w') as mm:
mm.write('%%MatrixMarket matrix coordinate Integer symmetric\n')
for line in gr:
### skip blank lines and comments
if line.strip() == '' or 'c' in line:
pass
else:
item = line.split(' ')
if item[0] == 'p':
### write first line -> nodes nodes edges
n = item[2]
e = item[3].split()
e = e[0]
write = str(n) + ' ' + str(n)+ ' ' + str(e) + '\n'
mm.write(write)
if item[0] == 'a':
### write rest of mtx contents -> dst src wight
v = item[1]
u = item[2]
w = item[3].split()
w = w[0]
write = str(u) + ' ' + str(v) + ' ' + str(w) + '\n'
mm.write(write)
gr.close()
mm.close()
|
cli/tests/pcluster/models/test_s3_bucket.py | maclema/aws-parallelcluster | 415 | 12625537 | <filename>cli/tests/pcluster/models/test_s3_bucket.py
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from pcluster.aws.common import AWSClientError
from tests.pcluster.aws.dummy_aws_api import mock_aws_api
from tests.pcluster.models.dummy_s3_bucket import dummy_cluster_bucket, mock_bucket
@pytest.mark.parametrize(
"region,create_error",
[
("eu-west-1", None),
("us-east-1", None),
("eu-west-1", AWSClientError("create_bucket", "An error occurred")),
],
)
def test_create_s3_bucket(region, create_error, mocker):
bucket_name = "test"
expected_params = {"Bucket": bucket_name}
os.environ["AWS_DEFAULT_REGION"] = region
if region != "us-east-1":
# LocationConstraint specifies the region where the bucket will be created.
# When the region is us-east-1 we are not specifying this parameter because it's the default region.
expected_params["CreateBucketConfiguration"] = {"LocationConstraint": region}
mock_aws_api(mocker)
mocker.patch("pcluster.aws.s3.S3Client.create_bucket", side_effect=create_error)
mock_bucket(mocker)
bucket = dummy_cluster_bucket(bucket_name=bucket_name)
if create_error:
with pytest.raises(AWSClientError, match="An error occurred"):
bucket.create_bucket()
@pytest.mark.parametrize(
"put_bucket_versioning_error, put_bucket_encryption_error, put_bucket_policy_error",
[
(None, None, None),
(AWSClientError("put_bucket_versioning", "An error occurred"), None, None),
(None, AWSClientError("put_bucket_encryption", "An error occurred"), None),
(None, None, AWSClientError("put_bucket_policy", "An error occurred")),
],
)
def test_configure_s3_bucket(mocker, put_bucket_versioning_error, put_bucket_encryption_error, put_bucket_policy_error):
mock_aws_api(mocker)
mock_bucket(mocker)
bucket = dummy_cluster_bucket()
mocker.patch("pcluster.aws.s3.S3Client.put_bucket_versioning", side_effect=put_bucket_versioning_error)
mocker.patch("pcluster.aws.s3.S3Client.put_bucket_encryption", side_effect=put_bucket_encryption_error)
mocker.patch("pcluster.aws.s3.S3Client.put_bucket_policy", side_effect=put_bucket_policy_error)
if put_bucket_versioning_error or put_bucket_encryption_error or put_bucket_policy_error:
with pytest.raises(AWSClientError, match="An error occurred"):
bucket.configure_s3_bucket()
|
horovod/common/exceptions.py | Null233/horovod | 5,089 | 12625628 | # Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
# Modifications copyright Microsoft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
class HorovodInternalError(RuntimeError):
"""Internal error raised when a Horovod collective operation (e.g., allreduce) fails.
This is handled in elastic mode as a recoverable error, and will result in a reset event.
"""
pass
class HostsUpdatedInterrupt(RuntimeError):
"""Internal interrupt event indicating that the set of hosts in the job has changed.
In elastic mode, this will result in a reset event without a restore to committed state.
"""
def __init__(self, skip_sync):
self.skip_sync = skip_sync
def get_version_mismatch_message(name, version, installed_version):
return f'Framework {name} installed with version {installed_version} but found version {version}.\n\
This can result in unexpected behavior including runtime errors.\n\
Reinstall Horovod using `pip install --no-cache-dir` to build with the new version.'
class HorovodVersionMismatchError(ImportError):
"""Internal error raised when the runtime version of a framework mismatches its version at
Horovod installation time.
"""
def __init__(self, name, version, installed_version):
super().__init__(get_version_mismatch_message(name, version, installed_version))
self.name = name
self.version = version
self.installed_version = installed_version
|
examples/attach.py | DrKeineLust/pwntools | 8,966 | 12625699 | <filename>examples/attach.py
"""
Example showing `pwnlib.gdb.attach()`
"""
from pwn import *
bash = process('/bin/bash')
gdb.attach(bash, gdbscript = '''
p "hello from pwnlib"
c
''')
bash.interactive()
|
depthai_helpers/supervisor.py | Davidsastresas/depthai | 476 | 12625772 | <filename>depthai_helpers/supervisor.py<gh_stars>100-1000
import importlib.util
import os
import subprocess
import sys
import time
from pathlib import Path
def createNewArgs(args):
def removeArg(name, withValue=True):
if name in sys.argv:
idx = sys.argv.index(name)
if withValue:
del sys.argv[idx + 1]
del sys.argv[idx]
removeArg("-gt")
removeArg("--guiType")
removeArg("--noSupervisor")
return sys.argv[1:] + ["--noSupervisor", "--guiType", args.guiType]
class Supervisor:
def runDemo(self, args):
repo_root = Path(__file__).parent.parent
args.noSupervisor = True
new_args = createNewArgs(args)
env = os.environ.copy()
if args.guiType == "qt":
new_env = env.copy()
new_env["QT_QUICK_BACKEND"] = "software"
new_env["LD_LIBRARY_PATH"] = str(Path(importlib.util.find_spec("PyQt5").origin).parent / "Qt5/lib")
new_env["DEPTHAI_INSTALL_SIGNAL_HANDLER"] = "0"
try:
subprocess.check_call(' '.join([f'"{sys.executable}"', "depthai_demo.py"] + new_args), env=new_env, shell=True, cwd=repo_root)
except subprocess.CalledProcessError as ex:
print("Error while running demo script... {}".format(ex))
print("Waiting 5s for the device to be discoverable again...")
time.sleep(5)
args.guiType = "cv"
if args.guiType == "cv":
new_env = env.copy()
new_env["DEPTHAI_INSTALL_SIGNAL_HANDLER"] = "0"
new_args = createNewArgs(args)
subprocess.check_call(' '.join([f'"{sys.executable}"', "depthai_demo.py"] + new_args), env=new_env, shell=True, cwd=repo_root)
def checkQtAvailability(self):
return importlib.util.find_spec("PyQt5") is not None
|
env/Lib/site-packages/OpenGL/GL/NV/conservative_raster_dilate.py | 5gconnectedbike/Navio2 | 210 | 12625785 | <reponame>5gconnectedbike/Navio2
'''OpenGL extension NV.conservative_raster_dilate
This module customises the behaviour of the
OpenGL.raw.GL.NV.conservative_raster_dilate to provide a more
Python-friendly API
Overview (from the spec)
This extension extends the conservative rasterization functionality
provided by NV_conservative_raster. It provides a new control to generate
an "over-conservative" rasterization by dilating primitives prior to
rasterization.
When using conservative raster to bin geometry, this extension provides a
programmable overlap region between adjacent primitives. Regular
rasterization bins triangles with a shared edge uniquely into pixels.
Conservative raster has a one-pixel overlap along the shared edge. Using
a half-pixel raster dilation, this overlap region increases to two pixels.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/conservative_raster_dilate.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.conservative_raster_dilate import *
from OpenGL.raw.GL.NV.conservative_raster_dilate import _EXTENSION_NAME
def glInitConservativeRasterDilateNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
fedml_api/data_preprocessing/Landmarks/data_loader.py | xuwanwei/FedML | 1,120 | 12625798 | import os
import sys
import time
import logging
import collections
import csv
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from .datasets import Landmarks
def _read_csv(path: str):
"""Reads a csv file, and returns the content inside a list of dictionaries.
Args:
path: The path to the csv file.
Returns:
A list of dictionaries. Each row in the csv file will be a list entry. The
dictionary is keyed by the column names.
"""
with open(path, 'r') as f:
return list(csv.DictReader(f))
# class Cutout(object):
# def __init__(self, length):
# self.length = length
# def __call__(self, img):
# h, w = img.size(1), img.size(2)
# mask = np.ones((h, w), np.float32)
# y = np.random.randint(h)
# x = np.random.randint(w)
# y1 = np.clip(y - self.length // 2, 0, h)
# y2 = np.clip(y + self.length // 2, 0, h)
# x1 = np.clip(x - self.length // 2, 0, w)
# x2 = np.clip(x + self.length // 2, 0, w)
# mask[y1: y2, x1: x2] = 0.
# mask = torch.from_numpy(mask)
# mask = mask.expand_as(img)
# img *= mask
# return img
# def _data_transforms_landmarks():
# landmarks_MEAN = [0.5071, 0.4865, 0.4409]
# landmarks_STD = [0.2673, 0.2564, 0.2762]
# train_transform = transforms.Compose([
# transforms.ToPILImage(),
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize(landmarks_MEAN, landmarks_STD),
# ])
# train_transform.transforms.append(Cutout(16))
# valid_transform = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize(landmarks_MEAN, landmarks_STD),
# ])
# return train_transform, valid_transform
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_landmarks():
# IMAGENET_MEAN = [0.5071, 0.4865, 0.4409]
# IMAGENET_STD = [0.2673, 0.2564, 0.2762]
IMAGENET_MEAN = [0.5, 0.5, 0.5]
IMAGENET_STD = [0.5, 0.5, 0.5]
image_size = 224
train_transform = transforms.Compose([
# transforms.ToPILImage(),
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
])
train_transform.transforms.append(Cutout(16))
valid_transform = transforms.Compose([
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
])
return train_transform, valid_transform
def get_mapping_per_user(fn):
"""
mapping_per_user is {'user_id': [{'user_id': xxx, 'image_id': xxx, 'class': xxx} ... {}],
'user_id': [{'user_id': xxx, 'image_id': xxx, 'class': xxx} ... {}],
} or
[{'user_id': xxx, 'image_id': xxx, 'class': xxx} ...
{'user_id': xxx, 'image_id': xxx, 'class': xxx} ... ]
}
"""
mapping_table = _read_csv(fn)
expected_cols = ['user_id', 'image_id', 'class']
if not all(col in mapping_table[0].keys() for col in expected_cols):
logging.error('%s has wrong format.', mapping_file)
raise ValueError(
'The mapping file must contain user_id, image_id and class columns. '
'The existing columns are %s' % ','.join(mapping_table[0].keys()))
data_local_num_dict = dict()
mapping_per_user = collections.defaultdict(list)
data_files = []
net_dataidx_map = {}
sum_temp = 0
for row in mapping_table:
user_id = row['user_id']
mapping_per_user[user_id].append(row)
for user_id, data in mapping_per_user.items():
num_local = len(mapping_per_user[user_id])
# net_dataidx_map[user_id]= (sum_temp, sum_temp+num_local)
# data_local_num_dict[user_id] = num_local
net_dataidx_map[int(user_id)]= (sum_temp, sum_temp+num_local)
data_local_num_dict[int(user_id)] = num_local
sum_temp += num_local
data_files += mapping_per_user[user_id]
assert sum_temp == len(data_files)
return data_files, data_local_num_dict, net_dataidx_map
# for centralized training
def get_dataloader(dataset, datadir, train_files, test_files, train_bs, test_bs, dataidxs=None):
return get_dataloader_Landmarks(datadir, train_files, test_files, train_bs, test_bs, dataidxs)
# for local devices
def get_dataloader_test(dataset, datadir, train_files, test_files, train_bs, test_bs, dataidxs_train, dataidxs_test):
return get_dataloader_test_Landmarks(datadir, train_files, test_files, train_bs, test_bs, dataidxs_train, dataidxs_test)
def get_dataloader_Landmarks(datadir, train_files, test_files, train_bs, test_bs, dataidxs=None):
dl_obj = Landmarks
transform_train, transform_test = _data_transforms_landmarks()
train_ds = dl_obj(datadir, train_files, dataidxs=dataidxs, train=True, transform=transform_train, download=True)
test_ds = dl_obj(datadir, test_files, dataidxs=None, train=False, transform=transform_test, download=True)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=False)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False)
return train_dl, test_dl
def get_dataloader_test_Landmarks(datadir, train_files, test_files, train_bs, test_bs, dataidxs_train=None, dataidxs_test=None):
dl_obj = Landmarks
transform_train, transform_test = _data_transforms_landmarks()
train_ds = dl_obj(datadir, train_files, dataidxs=dataidxs_train, train=True, transform=transform_train, download=True)
test_ds = dl_obj(datadir, test_files, dataidxs=dataidxs_test, train=False, transform=transform_test, download=True)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=False)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False)
return train_dl, test_dl
def load_partition_data_landmarks(dataset, data_dir, fed_train_map_file, fed_test_map_file,
partition_method=None, partition_alpha=None, client_number=233, batch_size=10):
train_files, data_local_num_dict, net_dataidx_map = get_mapping_per_user(fed_train_map_file)
test_files = _read_csv(fed_test_map_file)
class_num = len(np.unique([item['class'] for item in train_files]))
# logging.info("traindata_cls_counts = " + str(traindata_cls_counts))
train_data_num = len(train_files)
train_data_global, test_data_global = get_dataloader(dataset, data_dir, train_files, test_files, batch_size, batch_size)
# logging.info("train_dl_global number = " + str(len(train_data_global)))
# logging.info("test_dl_global number = " + str(len(test_data_global)))
test_data_num = len(test_files)
# get local dataset
data_local_num_dict = data_local_num_dict
train_data_local_dict = dict()
test_data_local_dict = dict()
for client_idx in range(client_number):
dataidxs = net_dataidx_map[client_idx]
# local_data_num = len(dataidxs)
local_data_num = dataidxs[1] - dataidxs[0]
# data_local_num_dict[client_idx] = local_data_num
# logging.info("client_idx = %d, local_sample_number = %d" % (client_idx, local_data_num))
# training batch size = 64; algorithms batch size = 32
train_data_local, test_data_local = get_dataloader(dataset, data_dir, train_files, test_files, batch_size, batch_size,
dataidxs)
# logging.info("client_idx = %d, batch_num_train_local = %d, batch_num_test_local = %d" % (
# client_idx, len(train_data_local), len(test_data_local)))
train_data_local_dict[client_idx] = train_data_local
test_data_local_dict[client_idx] = test_data_local
# logging("data_local_num_dict: %s" % data_local_num_dict)
return train_data_num, test_data_num, train_data_global, test_data_global, \
data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num
if __name__ == '__main__':
data_dir = './cache/images'
fed_g23k_train_map_file = '../../../data/gld/data_user_dict/gld23k_user_dict_train.csv'
fed_g23k_test_map_file = '../../../data/gld/data_user_dict/gld23k_user_dict_test.csv'
fed_g160k_train_map_file = '../../../data/gld/data_user_dict/gld160k_user_dict_train.csv'
fed_g160k_map_file = '../../../data/gld/data_user_dict/gld160k_user_dict_test.csv'
dataset_name = 'g160k'
if dataset_name == 'g23k':
client_number = 233
fed_train_map_file = fed_g23k_train_map_file
fed_test_map_file = fed_g23k_test_map_file
elif dataset_name == 'g160k':
client_number = 1262
fed_train_map_file = fed_g160k_train_map_file
fed_test_map_file = fed_g160k_map_file
train_data_num, test_data_num, train_data_global, test_data_global, \
data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num = \
load_partition_data_landmarks(None, data_dir, fed_train_map_file, fed_test_map_file,
partition_method=None, partition_alpha=None, client_number=client_number, batch_size=10)
print(train_data_num, test_data_num, class_num)
print(data_local_num_dict)
i = 0
for data, label in train_data_global:
print(data)
print(label)
i += 1
if i > 5:
break
print("=============================\n")
for client_idx in range(client_number):
i = 0
for data, label in train_data_local_dict[client_idx]:
print(data)
print(label)
i += 1
if i > 5:
break
|
flask_superadmin/tests/test_sqlamodel.py | romeojulietthotel/Flask-NotSuperAdmin | 414 | 12625801 | <filename>flask_superadmin/tests/test_sqlamodel.py
from nose.tools import eq_, ok_, raises
import wtforms
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.exc import InvalidRequestError
from flask_superadmin import Admin
from flask_superadmin.model.backends.sqlalchemy.view import ModelAdmin
class CustomModelView(ModelAdmin):
def __init__(self, model, session, name=None, category=None,
endpoint=None, url=None, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
super(CustomModelView, self).__init__(model, session, name, category,
endpoint, url)
def create_models(db):
class Model1(db.Model):
id = db.Column(db.Integer, primary_key=True)
test1 = db.Column(db.String(20))
test2 = db.Column(db.Unicode(20))
test3 = db.Column(db.Text)
test4 = db.Column(db.UnicodeText)
def __init__(self, test1=None, test2=None, test3=None, test4=None):
self.test1 = test1
self.test2 = test2
self.test3 = test3
self.test4 = test4
def __unicode__(self):
return self.test1
class Model2(db.Model):
id = db.Column(db.Integer, primary_key=True)
int_field = db.Column(db.Integer)
bool_field = db.Column(db.Boolean)
db.create_all()
return Model1, Model2
def setup():
app = Flask(__name__)
app.config['SECRET_KEY'] = '1'
app.config['WTF_CSRF_ENABLED'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'
db = SQLAlchemy(app)
admin = Admin(app)
return app, db, admin
def test_model():
app, db, admin = setup()
Model1, Model2 = create_models(db)
db.create_all()
view = CustomModelView(Model1, db.session)
admin.add_view(view)
eq_(view.model, Model1)
eq_(view.name, 'Model1')
eq_(view.endpoint, 'model1')
eq_(view._primary_key, 'id')
# Verify form
with app.test_request_context():
Form = view.get_form()
ok_(isinstance(Form()._fields['test1'], wtforms.TextField))
ok_(isinstance(Form()._fields['test2'], wtforms.TextField))
ok_(isinstance(Form()._fields['test3'], wtforms.TextAreaField))
ok_(isinstance(Form()._fields['test4'], wtforms.TextAreaField))
# Make some test clients
client = app.test_client()
resp = client.get('/admin/model1/')
eq_(resp.status_code, 200)
resp = client.get('/admin/model1/add/')
eq_(resp.status_code, 200)
resp = client.post('/admin/model1/add/',
data=dict(test1='test1large', test2='test2'))
eq_(resp.status_code, 302)
model = db.session.query(Model1).first()
eq_(model.test1, 'test1large')
eq_(model.test2, 'test2')
eq_(model.test3, '')
eq_(model.test4, '')
resp = client.get('/admin/model1/')
eq_(resp.status_code, 200)
ok_('test1large' in resp.data)
resp = client.get('/admin/model1/%s/' % model.id)
eq_(resp.status_code, 200)
resp = client.post('/admin/model1/%s/' % model.id, data=dict(test1='test1small', test2='test2large'))
eq_(resp.status_code, 302)
model = db.session.query(Model1).first()
eq_(model.test1, 'test1small')
eq_(model.test2, 'test2large')
eq_(model.test3, '')
eq_(model.test4, '')
resp = client.post('/admin/model1/%s/delete/' % model.id)
eq_(resp.status_code, 200)
eq_(db.session.query(Model1).count(), 1)
resp = client.post('/admin/model1/%s/delete/' % model.id, data={'confirm_delete': True})
eq_(resp.status_code, 302)
eq_(db.session.query(Model1).count(), 0)
@raises(InvalidRequestError)
def test_no_pk():
app, db, admin = setup()
class Model(db.Model):
test = db.Column(db.Integer)
view = CustomModelView(Model, db.session)
admin.add_view(view)
def test_list_display():
return
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, db.session,
list_columns=['test1', 'test3'],
rename_columns=dict(test1='Column1'))
admin.add_view(view)
eq_(len(view._list_columns), 2)
eq_(view._list_columns, [('test1', 'Column1'), ('test3', 'Test3')])
client = app.test_client()
resp = client.get('/admin/model1view/')
ok_('Column1' in resp.data)
ok_('Test2' not in resp.data)
def test_exclude():
return
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, db.session,
excluded_list_columns=['test2', 'test4'])
admin.add_view(view)
eq_(view._list_columns, [('test1', 'Test1'), ('test3', 'Test3')])
client = app.test_client()
resp = client.get('/admin/model1view/')
ok_('Test1' in resp.data)
ok_('Test2' not in resp.data)
def test_search_fields():
return
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, db.session,
searchable_columns=['test1', 'test2'])
admin.add_view(view)
eq_(view._search_supported, True)
eq_(len(view._search_fields), 2)
ok_(isinstance(view._search_fields[0], db.Column))
ok_(isinstance(view._search_fields[1], db.Column))
eq_(view._search_fields[0].name, 'test1')
eq_(view._search_fields[1].name, 'test2')
db.session.add(Model1('model1'))
db.session.add(Model1('model2'))
db.session.commit()
client = app.test_client()
resp = client.get('/admin/model1view/?search=model1')
ok_('model1' in resp.data)
ok_('model2' not in resp.data)
def test_url_args():
return
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, db.session,
page_size=2,
searchable_columns=['test1'],
column_filters=['test1'])
admin.add_view(view)
db.session.add(Model1('data1'))
db.session.add(Model1('data2'))
db.session.add(Model1('data3'))
db.session.add(Model1('data4'))
db.session.commit()
client = app.test_client()
resp = client.get('/admin/model1view/')
ok_('data1' in resp.data)
ok_('data3' not in resp.data)
# page
resp = client.get('/admin/model1view/?page=1')
ok_('data1' not in resp.data)
ok_('data3' in resp.data)
# sort
resp = client.get('/admin/model1view/?sort=0&desc=1')
ok_('data1' not in resp.data)
ok_('data3' in resp.data)
ok_('data4' in resp.data)
# search
resp = client.get('/admin/model1view/?search=data1')
ok_('data1' in resp.data)
ok_('data2' not in resp.data)
resp = client.get('/admin/model1view/?search=^data1')
ok_('data2' not in resp.data)
# like
resp = client.get('/admin/model1view/?flt0=0&flt0v=data1')
ok_('data1' in resp.data)
# not like
resp = client.get('/admin/model1view/?flt0=1&flt0v=data1')
ok_('data2' in resp.data)
def test_non_int_pk():
return
app, db, admin = setup()
class Model(db.Model):
id = db.Column(db.String, primary_key=True)
test = db.Column(db.String)
db.create_all()
view = CustomModelView(Model, db.session, form_columns=['id', 'test'])
admin.add_view(view)
client = app.test_client()
resp = client.get('/admin/modelview/')
eq_(resp.status_code, 200)
resp = client.post('/admin/modelview/new/',
data=dict(id='test1', test='test2'))
eq_(resp.status_code, 302)
resp = client.get('/admin/modelview/')
eq_(resp.status_code, 200)
ok_('test1' in resp.data)
resp = client.get('/admin/modelview/edit/?id=test1')
eq_(resp.status_code, 200)
ok_('test2' in resp.data)
def test_reference_linking():
app, db, admin = setup()
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
pet = db.relationship("Dog", uselist=False, backref="person")
def __init__(self, name=None):
self.name = name
class Dog(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
person_id = db.Column(db.Integer, db.ForeignKey('person.id'))
def __init__(self, name=None, person_id=None):
self.name = name
self.person_id = person_id
def __unicode__(self):
return self.name
db.create_all()
class DogAdmin(ModelAdmin):
session = db.session
class PersonAdmin(ModelAdmin):
list_display = ('name', 'pet')
fields = ('name', 'pet')
readonly_fields = ('pet',)
session = db.session
db.session.add(Person(name='Stan'))
db.session.commit()
person = db.session.query(Person).first()
db.session.add(Dog(name='Sparky', person_id=person.id))
db.session.commit()
person = db.session.query(Person).first()
dog = db.session.query(Dog).first()
admin.register(Dog, DogAdmin, name='Dogs')
admin.register(Person, PersonAdmin, name='People')
client = app.test_client()
# test linking on a list page
resp = client.get('/admin/person/')
dog_link = '<a href="/admin/dog/%s/">Sparky</a>' % dog.id
ok_(dog_link in resp.data)
# test linking on an edit page
resp = client.get('/admin/person/%s/' % person.id)
ok_('<input class="" id="name" name="name" type="text" value="Stan">' in resp.data)
ok_(dog_link in resp.data)
|
limits/storage/base.py | mymedia2/limits | 140 | 12625804 | import threading
from abc import ABCMeta, abstractmethod
import six
from limits.storage.registry import StorageRegistry
@six.add_metaclass(StorageRegistry)
@six.add_metaclass(ABCMeta)
class Storage(object):
"""
Base class to extend when implementing a storage backend.
"""
def __init__(self, uri=None, **options):
self.lock = threading.RLock()
@abstractmethod
def incr(self, key, expiry, elastic_expiry=False):
"""
increments the counter for a given rate limit key
:param str key: the key to increment
:param int expiry: amount in seconds for the key to expire in
:param bool elastic_expiry: whether to keep extending the rate limit
window every hit.
"""
raise NotImplementedError
@abstractmethod
def get(self, key):
"""
:param str key: the key to get the counter value for
"""
raise NotImplementedError
@abstractmethod
def get_expiry(self, key):
"""
:param str key: the key to get the expiry for
"""
raise NotImplementedError
@abstractmethod
def check(self):
"""
check if storage is healthy
"""
raise NotImplementedError
@abstractmethod
def reset(self):
"""
reset storage to clear limits
"""
raise NotImplementedError
@abstractmethod
def clear(self, key):
"""
resets the rate limit key
:param str key: the key to clear rate limits for
"""
raise NotImplementedError
|
src/condor_tests/test_late_materialization.py | datadvance/htcondor | 217 | 12625823 | <filename>src/condor_tests/test_late_materialization.py
#!/usr/bin/env pytest
# this test replicates the first part of job_late_materialize_py
import logging
import htcondor
from ornithology import (
config,
standup,
action,
Condor,
write_file,
parse_submit_result,
JobID,
SetAttribute,
SetJobStatus,
JobStatus,
in_order,
SCRIPTS,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@standup
def condor(test_dir):
with Condor(
local_dir=test_dir / "condor",
config={
"NUM_CPUS": "10",
"NUM_SLOTS": "10", # must be larger than the max number of jobs we hope to materialize
"SCHEDD_MATERIALIZE_LOG": "$(LOG)/MaterializeLog",
"SCHEDD_DEBUG": "D_MATERIALIZE:2 D_CAT $(SCHEDD_DEBUG)",
},
) as condor:
yield condor
MAX_IDLE = {"idle=2": 2, "idle=3": 3, "idle=5": 5}
MAX_MATERIALIZE = {"materialize=2": 2, "materialize=3": 3, "materialize=5": 5}
@action(params=MAX_IDLE)
def max_idle(request):
return request.param
@action(params=MAX_MATERIALIZE)
def max_materialize(request):
return request.param
@action
def jobids_for_sleep_jobs(test_dir, path_to_sleep, condor, max_idle, max_materialize):
sub_description = """
executable = {exe}
arguments = 1
request_memory = 1MB
request_disk = 1MB
max_materialize = {max_materialize}
max_idle = {max_idle}
queue {q}
""".format(
exe=path_to_sleep,
max_materialize=max_materialize,
max_idle=max_idle,
q=max_materialize + max_idle + 1,
)
submit_file = write_file(test_dir / "queue.sub", sub_description)
submit_cmd = condor.run_command(["condor_submit", submit_file])
clusterid, num_procs = parse_submit_result(submit_cmd)
jobids = [JobID(clusterid, n) for n in range(num_procs)]
condor.job_queue.wait_for_events(
{jobid: [SetJobStatus(JobStatus.COMPLETED)] for jobid in jobids}
)
return jobids
@action
def num_materialized_jobs_history(condor, jobids_for_sleep_jobs):
num_materialized = 0
history = []
for jobid, event in condor.job_queue.filter(
lambda j, e: j in jobids_for_sleep_jobs
):
if event == SetJobStatus(JobStatus.IDLE):
num_materialized += 1
if event == SetJobStatus(JobStatus.COMPLETED):
num_materialized -= 1
history.append(num_materialized)
return history
@action
def num_idle_jobs_history(condor, jobids_for_sleep_jobs):
num_idle = 0
history = []
for jobid, event in condor.job_queue.filter(
lambda j, e: j in jobids_for_sleep_jobs
):
if event == SetJobStatus(JobStatus.IDLE):
num_idle += 1
if event == SetJobStatus(JobStatus.RUNNING):
num_idle -= 1
history.append(num_idle)
return history
class TestLateMaterializationLimits:
def test_all_jobs_ran(self, condor, jobids_for_sleep_jobs):
for jobid in jobids_for_sleep_jobs:
assert in_order(
condor.job_queue.by_jobid[jobid],
[
SetJobStatus(JobStatus.IDLE),
SetJobStatus(JobStatus.RUNNING),
SetJobStatus(JobStatus.COMPLETED),
],
)
def test_never_more_materialized_than_max(
self, num_materialized_jobs_history, max_materialize
):
assert max(num_materialized_jobs_history) <= max_materialize
def test_hit_max_materialize_limit(
self, num_materialized_jobs_history, max_materialize
):
assert max_materialize in num_materialized_jobs_history
def test_never_more_idle_than_max(
self, num_idle_jobs_history, max_idle, max_materialize
):
assert max(num_idle_jobs_history) <= min(max_idle, max_materialize)
def test_hit_max_idle_limit(self, num_idle_jobs_history, max_idle, max_materialize):
assert min(max_idle, max_materialize) in num_idle_jobs_history
@action
def clusterid_for_itemdata(test_dir, path_to_sleep, condor):
# enable late materialization, but with a high enough limit that they all
# show up immediately (on hold, because we don't need to actually run
# the jobs to do the tests)
sub_description = """
executable = {exe}
arguments = 0
request_memory = 1MB
request_disk = 1MB
max_materialize = 5
hold = true
My.Foo = "$(Item)"
queue in (A, B, C, D, E)
""".format(
exe=path_to_sleep,
)
submit_file = write_file(test_dir / "queue_in.sub", sub_description)
submit_cmd = condor.run_command(["condor_submit", submit_file])
clusterid, num_procs = parse_submit_result(submit_cmd)
jobids = [JobID(clusterid, n) for n in range(num_procs)]
condor.job_queue.wait_for_events(
{jobid: [SetAttribute("Foo", None)] for jobid in jobids}
)
yield clusterid
condor.run_command(["condor_rm", clusterid])
class TestLateMaterializationItemdata:
def test_itemdata_turns_into_job_attributes(self, condor, clusterid_for_itemdata):
actual = {}
for jobid, event in condor.job_queue.filter(
lambda j, e: j.cluster == clusterid_for_itemdata
):
# the My. doesn't end up being part of the key in the jobad
if event.matches(SetAttribute("Foo", None)):
actual[jobid] = event.value
expected = {
# first item gets put on the clusterad!
JobID(clusterid_for_itemdata, -1): '"A"',
JobID(clusterid_for_itemdata, 1): '"B"',
JobID(clusterid_for_itemdata, 2): '"C"',
JobID(clusterid_for_itemdata, 3): '"D"',
JobID(clusterid_for_itemdata, 4): '"E"',
}
assert actual == expected
def test_query_produces_expected_results(self, condor, clusterid_for_itemdata):
with condor.use_config():
schedd = htcondor.Schedd()
ads = schedd.query(
constraint="clusterid == {}".format(clusterid_for_itemdata),
# the My. doesn't end up being part of the key in the jobad
projection=["clusterid", "procid", "foo"],
)
actual = [ad["foo"] for ad in sorted(ads, key=lambda ad: int(ad["procid"]))]
expected = list("ABCDE")
assert actual == expected
|
desktop/core/ext-py/nose-1.3.7/examples/plugin/setup.py | kokosing/hue | 5,079 | 12625839 | <reponame>kokosing/hue
"""
An example of how to create a simple nose plugin.
"""
try:
import ez_setup
ez_setup.use_setuptools()
except ImportError:
pass
from setuptools import setup
setup(
name='Example plugin',
version='0.1',
author='<NAME>',
author_email = '<EMAIL>',
description = 'Example nose plugin',
license = 'GNU LGPL',
py_modules = ['plug'],
entry_points = {
'nose.plugins.0.10': [
'example = plug:ExamplePlugin'
]
}
)
|
mmtbx/command_line/nqh_minimize.py | dperl-sol/cctbx_project | 155 | 12625852 |
from __future__ import absolute_import, division, print_function
import sys
from mmtbx.validation.molprobity import nqh_minimize
if __name__ == "__main__":
nqh_minimize.run(sys.argv[1:])
|
hubspot/hubspot.py | fakepop/hubspot-api-python | 117 | 12625855 | <reponame>fakepop/hubspot-api-python<gh_stars>100-1000
from .client import Client
class HubSpot(Client):
pass
|
cuesubmit/cuesubmit/ui/Frame.py | mb0rt/OpenCue | 334 | 12625864 | <reponame>mb0rt/OpenCue
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Widget for entering a frame spec."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from cuesubmit.ui import Widgets
class FrameSpecWidget(Widgets.CueHelpWidget):
"""Widget for entering a frame spec."""
helpText = 'Enter a FrameSpec value.\n' \
'A frame spec consists of a start time, an optional end time, a step, ' \
'and an interleave.\n' \
'Multiple ranges can be added together by separating with commas.\n' \
' Ex:\n' \
' 1-10x3\n' \
' 1-10y3 // inverted step\n' \
' 10-1x-1\n' \
' 1 // same as "1-1x1"\n' \
' 1-10:5 // interleave of 5\n' \
' 1-5x2, 6-10 // 1 through 5 with a step of 2 and 6 through 10\n'
def __init__(self, parent=None):
super(FrameSpecWidget, self).__init__(parent)
self.frameSpecInput = Widgets.CueLabelLineEdit('Frame Spec:')
self.contentLayout.addWidget(self.frameSpecInput)
|
scripts/pendulum_irl.py | SaminYeasar/inverse_rl | 220 | 12625906 | import tensorflow as tf
from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from inverse_rl.algos.irl_trpo import IRLTRPO
from inverse_rl.models.imitation_learning import AIRLStateAction
from inverse_rl.utils.log_utils import rllab_logdir, load_latest_experts
def main():
env = TfEnv(GymEnv('Pendulum-v0', record_video=False, record_log=False))
experts = load_latest_experts('data/pendulum', n=5)
irl_model = AIRLStateAction(env_spec=env.spec, expert_trajs=experts)
policy = GaussianMLPPolicy(name='policy', env_spec=env.spec, hidden_sizes=(32, 32))
algo = IRLTRPO(
env=env,
policy=policy,
irl_model=irl_model,
n_itr=200,
batch_size=1000,
max_path_length=100,
discount=0.99,
store_paths=True,
discrim_train_itrs=50,
irl_model_wt=1.0,
entropy_weight=0.1, # this should be 1.0 but 0.1 seems to work better
zero_environment_reward=True,
baseline=LinearFeatureBaseline(env_spec=env.spec)
)
with rllab_logdir(algo=algo, dirname='data/pendulum_gcl'):
with tf.Session():
algo.train()
if __name__ == "__main__":
main()
|
sdk/communication/azure-communication-identity/azure/communication/identity/_generated/models/__init__.py | vincenttran-msft/azure-sdk-for-python | 2,728 | 12625911 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import CommunicationError
from ._models_py3 import CommunicationErrorResponse
from ._models_py3 import CommunicationIdentity
from ._models_py3 import CommunicationIdentityAccessToken
from ._models_py3 import CommunicationIdentityAccessTokenRequest
from ._models_py3 import CommunicationIdentityAccessTokenResult
from ._models_py3 import CommunicationIdentityCreateRequest
from ._models_py3 import TeamsUserAccessTokenRequest
except (SyntaxError, ImportError):
from ._models import CommunicationError # type: ignore
from ._models import CommunicationErrorResponse # type: ignore
from ._models import CommunicationIdentity # type: ignore
from ._models import CommunicationIdentityAccessToken # type: ignore
from ._models import CommunicationIdentityAccessTokenRequest # type: ignore
from ._models import CommunicationIdentityAccessTokenResult # type: ignore
from ._models import CommunicationIdentityCreateRequest # type: ignore
from ._models import TeamsUserAccessTokenRequest # type: ignore
from ._communication_identity_client_enums import (
CommunicationTokenScope,
)
__all__ = [
'CommunicationError',
'CommunicationErrorResponse',
'CommunicationIdentity',
'CommunicationIdentityAccessToken',
'CommunicationIdentityAccessTokenRequest',
'CommunicationIdentityAccessTokenResult',
'CommunicationIdentityCreateRequest',
'TeamsUserAccessTokenRequest',
'CommunicationTokenScope',
]
|
sknetwork/utils/tests/test_projection_simplex.py | altana-tech/scikit-network | 457 | 12625948 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""tests for simplex.py"""
import unittest
import numpy as np
from scipy import sparse
from sknetwork.utils.check import is_proba_array
from sknetwork.utils.simplex import projection_simplex
class TestProjSimplex(unittest.TestCase):
def test_array(self):
x = np.random.rand(5)
proj = projection_simplex(x)
self.assertTrue(is_proba_array(proj))
x = np.random.rand(4, 3)
proj = projection_simplex(x)
self.assertTrue(is_proba_array(proj))
def test_csr(self):
x = sparse.csr_matrix(np.ones((3, 3)))
proj1 = projection_simplex(x)
proj2 = projection_simplex(x.astype(bool))
self.assertEqual(0, (proj1-proj2).nnz)
def test_other(self):
with self.assertRaises(TypeError):
projection_simplex('toto')
|
api/chalicelib/core/mobile.py | nogamenofun98/openreplay | 3,614 | 12625978 | <reponame>nogamenofun98/openreplay
from chalicelib.core import projects
from chalicelib.utils import s3
from chalicelib.utils.helper import environ
def sign_keys(project_id, session_id, keys):
result = []
project_key = projects.get_project_key(project_id)
for k in keys:
result.append(s3.get_presigned_url_for_sharing(bucket=environ["iosBucket"],
key=f"{project_key}/{session_id}/{k}",
expires_in=60 * 60))
return result
|
Lib/test/test_importlib/builtin/test_loader.py | deadsnakes/python3.3 | 652 | 12625983 | <reponame>deadsnakes/python3.3
import importlib
from importlib import machinery
from .. import abc
from .. import util
from . import util as builtin_util
import sys
import types
import unittest
class LoaderTests(abc.LoaderTests):
"""Test load_module() for built-in modules."""
verification = {'__name__': 'errno', '__package__': '',
'__loader__': machinery.BuiltinImporter}
def verify(self, module):
"""Verify that the module matches against what it should have."""
self.assertIsInstance(module, types.ModuleType)
for attr, value in self.verification.items():
self.assertEqual(getattr(module, attr), value)
self.assertIn(module.__name__, sys.modules)
load_module = staticmethod(lambda name:
machinery.BuiltinImporter.load_module(name))
def test_module(self):
# Common case.
with util.uncache(builtin_util.NAME):
module = self.load_module(builtin_util.NAME)
self.verify(module)
# Built-in modules cannot be a package.
test_package = test_lacking_parent = None
# No way to force an import failure.
test_state_after_failure = None
def test_module_reuse(self):
# Test that the same module is used in a reload.
with util.uncache(builtin_util.NAME):
module1 = self.load_module(builtin_util.NAME)
module2 = self.load_module(builtin_util.NAME)
self.assertIs(module1, module2)
def test_unloadable(self):
name = 'dssdsdfff'
assert name not in sys.builtin_module_names
with self.assertRaises(ImportError) as cm:
self.load_module(name)
self.assertEqual(cm.exception.name, name)
def test_already_imported(self):
# Using the name of a module already imported but not a built-in should
# still fail.
assert hasattr(importlib, '__file__') # Not a built-in.
with self.assertRaises(ImportError) as cm:
self.load_module('importlib')
self.assertEqual(cm.exception.name, 'importlib')
class InspectLoaderTests(unittest.TestCase):
"""Tests for InspectLoader methods for BuiltinImporter."""
def test_get_code(self):
# There is no code object.
result = machinery.BuiltinImporter.get_code(builtin_util.NAME)
self.assertIsNone(result)
def test_get_source(self):
# There is no source.
result = machinery.BuiltinImporter.get_source(builtin_util.NAME)
self.assertIsNone(result)
def test_is_package(self):
# Cannot be a package.
result = machinery.BuiltinImporter.is_package(builtin_util.NAME)
self.assertTrue(not result)
def test_not_builtin(self):
# Modules not built-in should raise ImportError.
for meth_name in ('get_code', 'get_source', 'is_package'):
method = getattr(machinery.BuiltinImporter, meth_name)
with self.assertRaises(ImportError) as cm:
method(builtin_util.BAD_NAME)
self.assertRaises(builtin_util.BAD_NAME)
def test_main():
from test.support import run_unittest
run_unittest(LoaderTests, InspectLoaderTests)
if __name__ == '__main__':
test_main()
|
raiden/transfer/identifiers.py | tirkarthi/raiden | 2,101 | 12626008 | from dataclasses import dataclass
from raiden.constants import EMPTY_ADDRESS, UINT256_MAX
from raiden.utils.formatting import to_checksum_address
from raiden.utils.typing import (
Address,
ChainID,
ChannelID,
T_Address,
T_ChainID,
T_ChannelID,
TokenNetworkAddress,
typecheck,
)
@dataclass(frozen=True, order=True)
class CanonicalIdentifier:
chain_identifier: ChainID
token_network_address: TokenNetworkAddress
channel_identifier: ChannelID
def validate(self) -> None:
typecheck(self.chain_identifier, T_ChainID)
typecheck(self.token_network_address, T_Address)
typecheck(self.channel_identifier, T_ChannelID)
if self.channel_identifier < 0 or self.channel_identifier > UINT256_MAX:
raise ValueError("channel id is invalid")
def __str__(self) -> str:
return (
"CanonicalIdentifier("
f"chain_identifier={self.chain_identifier}, "
f"token_network_address={to_checksum_address(self.token_network_address)}, "
f"channel_identifier={self.channel_identifier}"
")"
)
@dataclass(frozen=True)
class QueueIdentifier:
recipient: Address
canonical_identifier: CanonicalIdentifier
def __str__(self) -> str:
return (
"QueueIdentifier("
f"recipient={to_checksum_address(self.recipient)}, "
f"canonical_identifier={self.canonical_identifier}"
")"
)
CANONICAL_IDENTIFIER_UNORDERED_QUEUE = CanonicalIdentifier(
ChainID(0), TokenNetworkAddress(EMPTY_ADDRESS), ChannelID(0)
)
|
environ.py | XuhanLiu/DrugEx | 109 | 12626033 | #!/usr/bin/env python
import numpy as np
import pandas as pd
import torch
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.preprocessing import MinMaxScaler as Scaler
from sklearn.cross_decomposition import PLSRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC, SVR
from sklearn.model_selection import StratifiedKFold, KFold
from torch.utils.data import DataLoader, TensorDataset
import models
import os
import utils
import joblib
from copy import deepcopy
from rdkit import Chem
def SVM(X, y, X_ind, y_ind, reg=False):
""" Cross validation and Independent test for SVM classifion/regression model.
Arguments:
X (np.ndarray): m x n feature matrix for cross validation, where m is the number of samples
and n is the number of features.
y (np.ndarray): m-d label array for cross validation, where m is the number of samples and
equals to row of X.
X_ind (np.ndarray): m x n Feature matrix for independent set, where m is the number of samples
and n is the number of features.
y_ind (np.ndarray): m-d label array for independent set, where m is the number of samples and
equals to row of X_ind, and l is the number of types.
reg (bool): it True, the training is for regression, otherwise for classification.
Returns:
cvs (np.ndarray): m x l result matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
inds (np.ndarray): m x l result matrix for independent test, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
"""
if reg:
folds = KFold(5).split(X)
alg = SVR()
else:
folds = StratifiedKFold(5).split(X, y)
alg = SVC(probability=True)
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
gs = GridSearchCV(deepcopy(alg), {'C': 2.0 ** np.array([-15, 15]), 'gamma': 2.0 ** np.array([-15, 15])}, n_jobs=10)
gs.fit(X, y)
params = gs.best_params_
print(params)
for i, (trained, valided) in enumerate(folds):
model = deepcopy(alg)
model.C = params['C']
model.gamma = params['gamma']
if not reg:
model.probability=True
model.fit(X[trained], y[trained], sample_weight=[1 if v >= 4 else 0.1 for v in y[trained]])
if reg:
cvs[valided] = model.predict(X[valided])
inds += model.predict(X_ind)
else:
cvs[valided] = model.predict_proba(X[valided])[:, 1]
inds += model.predict_proba(X_ind)[:, 1]
return cvs, inds / 5
def RF(X, y, X_ind, y_ind, reg=False):
""" Cross validation and Independent test for RF classifion/regression model.
Arguments:
X (np.ndarray): m x n feature matrix for cross validation, where m is the number of samples
and n is the number of features.
y (np.ndarray): m-d label array for cross validation, where m is the number of samples and
equals to row of X.
X_ind (np.ndarray): m x n Feature matrix for independent set, where m is the number of samples
and n is the number of features.
y_ind (np.ndarray): m-d label array for independent set, where m is the number of samples and
equals to row of X_ind, and l is the number of types.
reg (bool): it True, the training is for regression, otherwise for classification.
Returns:
cvs (np.ndarray): m x l result matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
inds (np.ndarray): m x l result matrix for independent test, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
"""
if reg:
folds = KFold(5).split(X)
alg = RandomForestRegressor
else:
folds = StratifiedKFold(5).split(X, y)
alg = RandomForestClassifier
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
for i, (trained, valided) in enumerate(folds):
model = alg(n_estimators=1000, n_jobs=10)
model.fit(X[trained], y[trained], sample_weight=[1 if v >= 4 else 0.1 for v in y[trained]])
if reg:
cvs[valided] = model.predict(X[valided])
inds += model.predict(X_ind)
else:
cvs[valided] = model.predict_proba(X[valided])[:, 1]
inds += model.predict_proba(X_ind)[:, 1]
return cvs, inds / 5
def KNN(X, y, X_ind, y_ind, reg=False):
""" Cross validation and Independent test for KNN classifion/regression model.
Arguments:
X (np.ndarray): m x n feature matrix for cross validation, where m is the number of samples
and n is the number of features.
y (np.ndarray): m-d label array for cross validation, where m is the number of samples and
equals to row of X.
X_ind (np.ndarray): m x n Feature matrix for independent set, where m is the number of samples
and n is the number of features.
y_ind (np.ndarray): m-d label array for independent set, where m is the number of samples and
equals to row of X_ind, and l is the number of types.
reg (bool): it True, the training is for regression, otherwise for classification.
Returns:
cvs (np.ndarray): m x l result matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
inds (np.ndarray): m x l result matrix for independent test, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
"""
if reg:
folds = KFold(5).split(X)
alg = KNeighborsRegressor
else:
folds = StratifiedKFold(5).split(X, y)
alg = KNeighborsClassifier
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
for i, (trained, valided) in enumerate(folds):
model = alg(n_jobs=10)
model.fit(X[trained], y[trained])
if reg:
cvs[valided] = model.predict(X[valided])
inds += model.predict(X_ind)
else:
cvs[valided] = model.predict_proba(X[valided])[:, 1]
inds += model.predict_proba(X_ind)[:, 1]
return cvs, inds / 5
def NB(X, y, X_ind, y_ind):
""" Cross validation and Independent test for Naive Bayes classifion model.
Arguments:
X (np.ndarray): m x n feature matrix for cross validation, where m is the number of samples
and n is the number of features.
y (np.ndarray): m-d label array for cross validation, where m is the number of samples and
equals to row of X.
X_ind (np.ndarray): m x n Feature matrix for independent set, where m is the number of samples
and n is the number of features.
y_ind (np.ndarray): m-d label array for independent set, where m is the number of samples and
equals to row of X_ind, and l is the number of types.
Returns:
cvs (np.ndarray): m x l result matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
inds (np.ndarray): m x l result matrix for independent test, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
"""
folds = KFold(5).split(X)
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
for i, (trained, valided) in enumerate(folds):
model = GaussianNB()
model.fit(X[trained], y[trained], sample_weight=[1 if v >= 4 else 0.1 for v in y[trained]])
cvs[valided] = model.predict_proba(X[valided])[:, 1]
inds += model.predict_proba(X_ind)[:, 1]
return cvs, inds / 5
def PLS(X, y, X_ind, y_ind):
""" Cross validation and Independent test for PLS regression model.
Arguments:
X (np.ndarray): m x n feature matrix for cross validation, where m is the number of samples
and n is the number of features.
y (np.ndarray): m-d label array for cross validation, where m is the number of samples and
equals to row of X.
X_ind (np.ndarray): m x n Feature matrix for independent set, where m is the number of samples
and n is the number of features.
y_ind (np.ndarray): m-d label array for independent set, where m is the number of samples and
equals to row of X_ind, and l is the number of types.
reg (bool): it True, the training is for regression, otherwise for classification.
Returns:
cvs (np.ndarray): m x l result matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
inds (np.ndarray): m x l result matrix for independent test, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
"""
folds = KFold(5).split(X)
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
for i, (trained, valided) in enumerate(folds):
model = PLSRegression()
model.fit(X[trained], y[trained])
cvs[valided] = model.predict(X[valided])[:, 0]
inds += model.predict(X_ind)[:, 0]
return cvs, inds / 5
def DNN(X, y, X_ind, y_ind, out, reg=False):
""" Cross validation and Independent test for DNN classifion/regression model.
Arguments:
X (np.ndarray): m x n feature matrix for cross validation, where m is the number of samples
and n is the number of features.
y (np.ndarray): m x l label matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types.
X_ind (np.ndarray): m x n Feature matrix for independent set, where m is the number of samples
and n is the number of features.
y_ind (np.ndarray): m-d label arrays for independent set, where m is the number of samples and
equals to row of X_ind, and l is the number of types.
reg (bool): it True, the training is for regression, otherwise for classification.
Returns:
cvs (np.ndarray): m x l result matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
inds (np.ndarray): m x l result matrix for independent test, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
"""
if y.shape[1] > 1 or reg:
folds = KFold(5).split(X)
else:
folds = StratifiedKFold(5).split(X, y[:, 0])
NET = models.STFullyConnected if y.shape[1] == 1 else models.MTFullyConnected
indep_set = TensorDataset(torch.Tensor(X_ind), torch.Tensor(y_ind))
indep_loader = DataLoader(indep_set, batch_size=BATCH_SIZE)
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
for i, (trained, valided) in enumerate(folds):
train_set = TensorDataset(torch.Tensor(X[trained]), torch.Tensor(y[trained]))
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE)
valid_set = TensorDataset(torch.Tensor(X[valided]), torch.Tensor(y[valided]))
valid_loader = DataLoader(valid_set, batch_size=BATCH_SIZE)
net = NET(X.shape[1], y.shape[1], is_reg=reg)
net.fit(train_loader, valid_loader, out='%s_%d' % (out, i), epochs=N_EPOCH, lr=LR)
cvs[valided] = net.predict(valid_loader)
inds += net.predict(indep_loader)
return cvs, inds / 5
def Train_RF(X, y, out, reg=False):
if reg:
model = RandomForestRegressor(n_estimators=1000, n_jobs=10)
else:
model = RandomForestClassifier(n_estimators=1000, n_jobs=10)
model.fit(X, y, sample_weight=[1 if v >= 4 else 0.1 for v in y])
joblib.dump(model, out, compress=3)
def mt_task(fname, out, reg=False, is_extra=True, time_split=False):
df = pd.read_table(fname)[pair].dropna(subset=pair[1:2])
df = df[df.Target_ChEMBL_ID.isin(trgs)]
year = df.groupby(pair[1])[pair[-1:]].min().dropna()
year = year[year.Document_Year > 2015].index
df = df[pair].set_index(pair[0:2])
numery = df[pair[2]].groupby(pair[0:2]).mean().dropna()
comments = df[(df.Comment.str.contains('Not Active') == True)]
inhibits = df[(df.Standard_Type == 'Inhibition') & df.Standard_Relation.isin(['<', '<='])]
relations = df[df.Standard_Type.isin(['EC50', 'IC50', 'Kd', 'Ki']) & df.Standard_Relation.isin(['>', '>='])]
binary = pd.concat([comments, inhibits, relations], axis=0)
binary = binary[~binary.index.isin(numery.index)]
binary[pair[2]] = 3.99
binary = binary[pair[2]].groupby(pair[0:2]).first()
df = numery.append(binary) if is_extra else numery
if not reg:
df[pair[2]] = (df[pair[2]] > th).astype(float)
df = df.unstack(pair[0])
test_ix = set(df.index).intersection(year)
df_test = df.loc[test_ix] if time_split else df.sample(len(test_ix))
df_data = df.drop(df_test.index)
df_data = df_data.sample(len(df_data))
for alg in ['RF', 'MT_DNN', 'SVM', 'PLS', 'KNN', 'DNN']:
if alg == 'MT_DNN':
test_x = utils.Predictor.calc_fp([Chem.MolFromSmiles(mol) for mol in df_test.index])
data_x = utils.Predictor.calc_fp([Chem.MolFromSmiles(mol) for mol in df_data.index])
scaler = Scaler(); scaler.fit(data_x)
test_x = scaler.transform(test_x)
data_x = scaler.transform(data_x)
data = df_data.stack().to_frame(name='Label')
test = df_test.stack().to_frame(name='Label')
data_p, test_p = DNN(data_x, df_data.values, test_x, df_test.values, out=out, reg=reg)
data['Score'] = pd.DataFrame(data_p, index=df_data.index, columns=df_data.columns).stack()
test['Score'] = pd.DataFrame(test_p, index=df_test.index, columns=df_test.columns).stack()
data.to_csv(out + alg + '_LIGAND.cv.tsv', sep='\t')
test.to_csv(out + alg + '_LIGAND.ind.tsv', sep='\t')
else:
for trg in trgs:
test_y = df_test[trg].dropna()
data_y = df_data[trg].dropna()
test_x = utils.Predictor.calc_fp([Chem.MolFromSmiles(mol) for mol in test_y.index])
data_x = utils.Predictor.calc_fp([Chem.MolFromSmiles(mol) for mol in data_y.index])
if alg != 'RF':
scaler = Scaler(); scaler.fit(data_x)
test_x = scaler.transform(test_x)
data_x = scaler.transform(data_x)
else:
X = np.concatenate([data_x, test_x], axis=0)
y = np.concatenate([data_y.values, test_y.values], axis=0)
Train_RF(X, y, out=out + '%s_%s.pkg' % (alg, trg), reg=reg)
data, test = data_y.to_frame(name='Label'), test_y.to_frame(name='Label')
a, b = cross_validation(data_x, data.values, test_x, test.values,
alg, out + '%s_%s' % (alg, trg), reg=reg)
data['Score'], test['Score'] = a, b
data.to_csv(out + '%s_%s.cv.tsv' % (alg, trg), sep='\t')
test.to_csv(out + '%s_%s.ind.tsv' % (alg, trg), sep='\t')
def single_task(feat, alg='RF', reg=False, is_extra=True):
df = pd.read_table('data/LIGAND_RAW.tsv').dropna(subset=pair[1:2])
df = df[df[pair[0]] == feat]
df = df[pair].set_index(pair[1])
year = df[pair[-1:]].groupby(pair[1]).min().dropna()
test = year[year[pair[-1]] > 2015].index
numery = df[pair[2]].groupby(pair[1]).mean().dropna()
comments = df[(df.Comment.str.contains('Not Active') == True)]
inhibits = df[(df.Standard_Type == 'Inhibition') & df.Standard_Relation.isin(['<', '<='])]
relations = df[df.Standard_Type.isin(['EC50', 'IC50', 'Kd', 'Ki']) & df.Standard_Relation.isin(['>', '>='])]
binary = pd.concat([comments, inhibits, relations], axis=0)
binary = binary[~binary.index.isin(numery.index)]
binary[pair[2]] = 3.99
binary = binary[pair[2]].groupby(binary.index).first()
df = numery.append(binary) if is_extra else numery
if not reg:
df = (df > th).astype(float)
df = df.sample(len(df))
print(feat, len(numery[numery >= th]), len(numery[numery < th]), len(binary))
test_ix = set(df.index).intersection(test)
test = df.loc[test_ix].dropna()
data = df.drop(test.index)
test_x = utils.Predictor.calc_fp([Chem.MolFromSmiles(mol) for mol in test.index])
data_x = utils.Predictor.calc_fp([Chem.MolFromSmiles(mol) for mol in data.index])
out = 'output/single/%s_%s_%s' % (alg, 'REG' if reg else 'CLS', feat)
if alg != 'RF':
scaler = Scaler(); scaler.fit(data_x)
test_x = scaler.transform(test_x)
data_x = scaler.transform(data_x)
else:
X = np.concatenate([data_x, test_x], axis=0)
y = np.concatenate([data.values, test.values], axis=0)
Train_RF(X, y[:, 0], out=out + '.pkg', reg=reg)
data, test = data.to_frame(name='Label'), test.to_frame(name='Label')
data['Score'], test['Score'] = cross_validation(data_x, data.values, test_x, test.values, alg, out, reg=reg)
data.to_csv(out + '.cv.tsv', sep='\t')
test.to_csv(out + '.ind.tsv', sep='\t')
def cross_validation(X, y, X_ind, y_ind, alg='DNN', out=None, reg=False):
if alg == 'RF':
cv, ind = RF(X, y[:, 0], X_ind, y_ind[:, 0], reg=reg)
elif alg == 'SVM':
cv, ind = SVM(X, y[:, 0], X_ind, y_ind[:, 0], reg=reg)
elif alg == 'KNN':
cv, ind = KNN(X, y[:, 0], X_ind, y_ind[:, 0], reg=reg)
elif alg == 'NB':
cv, ind = NB(X, y[:, 0], X_ind, y_ind[:, 0])
elif alg == 'PLS':
cv, ind = PLS(X, y[:, 0], X_ind, y_ind[:, 0])
elif alg == 'DNN':
cv, ind = DNN(X, y, X_ind, y_ind, out=out, reg=reg)
return cv, ind
if __name__ == '__main__':
pair = ['Target_ChEMBL_ID', 'Smiles', 'pChEMBL_Value', 'Comment',
'Standard_Type', 'Standard_Relation', 'Document_Year']
BATCH_SIZE = int(2 ** 11)
N_EPOCH = 1000
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
th= 6.5
trgs = ['CHEMBL226', 'CHEMBL251', 'CHEMBL240']
for reg in [False, True]:
LR = 1e-4 if reg else 1e-5
for chembl in trgs:
single_task(chembl, 'DNN', reg=reg)
single_task(chembl, 'RF', reg=reg)
single_task(chembl, 'SVM', reg=reg)
if reg:
single_task(chembl, 'PLS', reg=reg)
else:
single_task(chembl, 'NB', reg=reg)
single_task(chembl, 'KNN', reg=reg)
mt_task('data/LIGAND_RAW.tsv', 'output/random_split/', reg=reg, time_split=False)
mt_task('data/LIGAND_RAW.tsv', 'output/time_split/', reg=reg, time_split=True)
|
kolibri/core/__init__.py | MBKayro/kolibri | 545 | 12626036 | <reponame>MBKayro/kolibri
"""TODO: Write something about this module (everything in the docstring
enters the docs)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
default_app_config = "kolibri.core.apps.KolibriCoreConfig"
|
koalixcrm/crm/factories/factory_payment_reminder.py | Cataldir/koalixcrm | 290 | 12626041 | # -*- coding: utf-8 -*-
from koalixcrm.crm.models import PaymentReminder
from koalixcrm.crm.factories.factory_sales_document import StandardSalesDocumentFactory
class StandardPaymentReminderFactory(StandardSalesDocumentFactory):
class Meta:
model = PaymentReminder
payable_until = "2018-05-20"
payment_bank_reference = "This is a bank account reference"
iteration_number = "1"
status = "C"
|
src/exabgp/bgp/message/nop.py | pierky/exabgp | 1,560 | 12626047 | <gh_stars>1000+
# encoding: utf-8
"""
nop.py
Created by <NAME> on 2009-11-06.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from exabgp.bgp.message.message import Message
# ========================================================================= NOP
#
class NOP(Message):
ID = Message.CODE.NOP
TYPE = bytes([Message.CODE.NOP])
def message(self, negotiated=None):
raise RuntimeError('NOP messages can not be sent on the wire')
def __str__(self):
return "NOP"
@classmethod
def unpack_message(cls, data, direction, negotiated): # pylint: disable=W0613
return NOP()
_NOP = NOP()
|
pywsd/__init__.py | mihal277/pywsd | 581 | 12626061 | <reponame>mihal277/pywsd<gh_stars>100-1000
#!/usr/bin/env python -*- coding: utf-8 -*-
#
# Python Word Sense Disambiguation (pyWSD)
#
# Copyright (C) 2014-2020 alvations
# URL:
# For license information, see LICENSE.md
from __future__ import absolute_import, print_function
import sys
import time
from wn import WordNet
from wn.constants import wordnet_30_dir
__builtins__['wn'] = WordNet(wordnet_30_dir)
__version__ = '1.2.4'
# Warm up the library.
print('Warming up PyWSD (takes ~10 secs)...', end=' ', file=sys.stderr, flush=True)
start = time.time()
from pywsd.lesk import *
from pywsd.baseline import *
from pywsd.similarity import *
#import semcor
#import semeval
from pywsd.allwords_wsd import disambiguate
simple_lesk('This is a foo bar sentence', 'bar')
print('took {} secs.'.format(time.time()-start), file=sys.stderr)
|
fairseq/tokenizer.py | fairseq-FT/fairseq | 16,259 | 12626104 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
SPACE_NORMALIZER = re.compile(r"\s+")
def tokenize_line(line):
line = SPACE_NORMALIZER.sub(" ", line)
line = line.strip()
return line.split()
|
scripts/stl/udp_inc_len_9k.py | ajitkhaparde/trex-core | 956 | 12626132 | <filename>scripts/stl/udp_inc_len_9k.py
from trex_stl_lib.api import *
import argparse
class STLS1(object):
def __init__ (self):
self.max_pkt_size_l3 =9*1024;
def create_stream (self):
# pkt
p_l2 = Ether();
p_l3 = IP(src="192.168.127.12",dst="172.16.58.3")
p_l4 = UDP(dport=12,sport=1025)
pyld_size = max(0, self.max_pkt_size_l3 - len(p_l3/p_l4));
base_pkt = p_l2/p_l3/p_l4/('\x55'*(pyld_size))
l3_len_fix =-(len(p_l2));
l4_len_fix =-(len(p_l2/p_l3));
# vm
vm = STLScVmRaw( [ STLVmFlowVar(name="fv_rand", min_value=64, max_value=len(base_pkt), size=2, op="inc"),
STLVmTrimPktSize("fv_rand"), # total packet size
STLVmWrFlowVar(fv_name="fv_rand", pkt_offset= "IP.len", add_val=l3_len_fix), # fix ip len
STLVmFixIpv4(offset = "IP"), # fix checksum
STLVmWrFlowVar(fv_name="fv_rand", pkt_offset= "UDP.len", add_val=l4_len_fix) # fix udp len
]
)
pkt = STLPktBuilder(pkt = base_pkt,
vm = vm)
return STLStream(packet = pkt,
mode = STLTXCont())
def get_streams (self, tunables, **kwargs):
parser = argparse.ArgumentParser(description='Argparser for {}'.format(os.path.basename(__file__)),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parser.parse_args(tunables)
# create 1 stream
return [ self.create_stream() ]
# dynamic load - used for trex console or simulator
def register():
return STLS1()
|
tests/elf/test_equality.py | junghee/LIEF | 2,999 | 12626150 | <reponame>junghee/LIEF
#!/usr/bin/env python
import itertools
import logging
import os
import random
import stat
import subprocess
import sys
import tempfile
import unittest
from unittest import TestCase
import lief
from utils import get_sample
lief.logging.set_level(lief.logging.LOGGING_LEVEL.INFO)
class TestEquality64(TestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
self.input = lief.parse(get_sample("ELF/ELF64_x86-64_binary_all.bin"))
_, output = tempfile.mkstemp(prefix="all_bis")
self.input.write(output)
self.output = lief.parse(output)
def test_header(self):
self.assertEqual(self.input.header, self.output.header)
def test_sections(self):
for l, r in zip(self.input.sections, self.output.sections):
self.assertEqual(l, r, "\n{!s}\n{!s}".format(l, r))
def test_segments(self):
for l, r in zip(self.input.segments, self.output.segments):
self.assertEqual(l, r, "\n{!s}\n{!s}".format(l, r))
def test_relocations(self):
for l, r in zip(self.input.relocations, self.output.relocations):
self.assertEqual(l, r, "\n{!s}\n{!s}".format(l, r))
def test_symbols(self):
for l, r in zip(self.input.symbols, self.output.symbols):
self.assertEqual(l, r, "\n{!s}\n{!s}".format(l, r))
def test_dynamic_entries(self):
for l, r in zip(self.input.dynamic_entries, self.output.dynamic_entries):
self.assertEqual(l, r, "\n{!s}\n{!s}".format(l, r))
class TestEquality32(TestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
self.input = lief.parse(get_sample("ELF/ELF32_x86_binary_all.bin"))
_, output = tempfile.mkstemp(prefix="all_bis")
self.input.write(output)
self.output = lief.parse(output)
def test_header(self):
self.assertEqual(self.input.header, self.output.header)
def test_sections(self):
for l, r in zip(self.input.sections, self.output.sections):
self.assertEqual(l, r, "\n{!s}\n{!s}".format(l, r))
def test_segments(self):
for l, r in zip(self.input.segments, self.output.segments):
self.assertEqual(l, r, "\n{!s}\n{!s}".format(l, r))
def test_relocations(self):
for l, r in zip(self.input.relocations, self.output.relocations):
self.assertEqual(l, r, "\n{!s}\n{!s}".format(l, r))
def test_symbols(self):
for l, r in zip(self.input.symbols, self.output.symbols):
self.assertEqual(l, r, "\n{!s}\n{!s}".format(l, r))
def test_dynamic_entries(self):
for l, r in zip(self.input.dynamic_entries, self.output.dynamic_entries):
self.assertEqual(l, r, "\n{!s}\n{!s}".format(l, r))
if __name__ == '__main__':
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
root_logger.addHandler(ch)
unittest.main(verbosity=2)
|
src/simian/mac/munki/handlers/pkgsinfo.py | tristansgray/simian | 326 | 12626152 | #!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PackagesInfo handlers."""
import hashlib
import httplib
import logging
import urllib
from simian.mac.common import datastore_locks
from simian.auth import gaeserver
from simian.mac import models
from simian.mac.common import auth
from simian.mac.munki import handlers
from simian.mac.munki import plist
from simian.mac.munki.handlers import pkgs
class PackageDoesNotExistError(plist.PlistError):
"""The package referenced in the pkginfo plist does not exist."""
class MunkiPackageInfoPlistStrict(plist.MunkiPackageInfoPlist):
"""Class for Munki plist with added strict validation."""
def __init__(self, *args, **kwargs):
super(MunkiPackageInfoPlistStrict, self).__init__(*args, **kwargs)
self.AddValidationHook(self.ValidatePackageExists)
def ValidatePackageExists(self):
"""Verifies if a particular package exists or not."""
if not pkgs.PackageExists(self._plist['installer_item_location']):
raise PackageDoesNotExistError(
'Package %s does not exist' % self._plist['installer_item_location'])
class PackagesInfo(handlers.AuthenticationHandler):
"""Handler for /pkgsinfo/"""
def get(self, filename=None):
"""GET
Args:
filename: string like Firefox-1.0.dmg
"""
auth_return = auth.DoAnyAuth()
if hasattr(auth_return, 'email'):
email = auth_return.email()
if not any((auth.IsAdminUser(email),
auth.IsSupportUser(email),
)):
raise auth.IsAdminMismatch
if filename:
filename = urllib.unquote(filename)
hash_str = self.request.get('hash')
if hash_str:
lock = models.GetLockForPackage(filename)
try:
lock.Acquire(timeout=30, max_acquire_attempts=5)
except datastore_locks.AcquireLockError:
self.response.set_status(httplib.FORBIDDEN)
self.response.out.write('Could not lock pkgsinfo')
return
pkginfo = models.PackageInfo.get_by_key_name(filename)
if pkginfo:
self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
if hash_str:
self.response.headers['X-Pkgsinfo-Hash'] = self._Hash(pkginfo.plist)
self.response.out.write(pkginfo.plist)
else:
if hash_str:
lock.Release()
self.response.set_status(httplib.NOT_FOUND)
return
if hash_str:
lock.Release()
else:
query = models.PackageInfo.all()
filename = self.request.get('filename')
if filename:
query.filter('filename', filename)
install_types = self.request.get_all('install_types')
for install_type in install_types:
query.filter('install_types =', install_type)
catalogs = self.request.get_all('catalogs')
for catalog in catalogs:
query.filter('catalogs =', catalog)
pkgs = []
for p in query:
pkg = {}
for k in p.properties():
if k != '_plist':
pkg[k] = getattr(p, k)
pkgs.append(pkg)
self.response.out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
self.response.out.write(plist.GetXmlStr(pkgs))
self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
def _Hash(self, s):
"""Return a sha256 hash for a string.
Args:
s: str
Returns:
str, sha256 digest
"""
h = hashlib.sha256(str(s))
return h.hexdigest()
def put(self, filename):
"""PUT
Args:
filename: string like Firefox-1.0.dmg
"""
session = gaeserver.DoMunkiAuth(require_level=gaeserver.LEVEL_UPLOADPKG)
filename = urllib.unquote(filename)
hash_str = self.request.get('hash')
catalogs = self.request.get('catalogs', None)
manifests = self.request.get('manifests', None)
install_types = self.request.get('install_types')
if catalogs == '':
catalogs = []
elif catalogs:
catalogs = catalogs.split(',')
if manifests == '':
manifests = []
elif manifests:
manifests = manifests.split(',')
if install_types:
install_types = install_types.split(',')
mpl = MunkiPackageInfoPlistStrict(self.request.body)
try:
mpl.Parse()
except plist.PlistError, e:
logging.exception('Invalid pkginfo plist PUT: \n%s\n', self.request.body)
self.response.set_status(httplib.BAD_REQUEST)
self.response.out.write(str(e))
return
lock_name = 'pkgsinfo_%s' % filename
lock = datastore_locks.DatastoreLock(lock_name)
try:
lock.Acquire(timeout=30, max_acquire_attempts=5)
except datastore_locks.AcquireLockError:
self.response.set_status(httplib.FORBIDDEN)
self.response.out.write('Could not lock pkgsinfo')
return
# To avoid pkginfo uploads without corresponding packages, only allow
# updates to existing PackageInfo entities, not creations of new ones.
pkginfo = models.PackageInfo.get_by_key_name(filename)
if pkginfo is None:
logging.warning(
'pkginfo "%s" does not exist; PUT only allows updates.', filename)
self.response.set_status(httplib.FORBIDDEN)
self.response.out.write('Only updates supported')
lock.Release()
return
# If the pkginfo is not modifiable, ensure only manifests have changed.
if not pkginfo.IsSafeToModify():
if not mpl.EqualIgnoringManifestsAndCatalogs(pkginfo.plist):
logging.warning(
'pkginfo "%s" is in stable or testing; change prohibited.',
filename)
self.response.set_status(httplib.FORBIDDEN)
self.response.out.write('Changes to pkginfo not allowed')
lock.Release()
return
# If the update parameter asked for a careful update, by supplying
# a hash of the last known pkgsinfo, then compare the hash to help
# the client make a non destructive update.
if hash_str:
if self._Hash(pkginfo.plist) != hash_str:
self.response.set_status(httplib.CONFLICT)
self.response.out.write('Update hash does not match')
lock.Release()
return
# All verification has passed, so let's create the PackageInfo entity.
pkginfo.plist = mpl
pkginfo.name = mpl.GetPackageName()
if catalogs is not None:
pkginfo.catalogs = catalogs
if manifests is not None:
pkginfo.manifests = manifests
if install_types:
pkginfo.install_types = install_types
pkginfo.put()
lock.Release()
for track in pkginfo.catalogs:
models.Catalog.Generate(track, delay=1)
# Log admin pkginfo put to Datastore.
user = session.uuid
admin_log = models.AdminPackageLog(
user=user, action='pkginfo', filename=filename,
catalogs=pkginfo.catalogs, manifests=pkginfo.manifests,
install_types=pkginfo.install_types, plist=pkginfo.plist.GetXml())
admin_log.put()
|
utils/swift_build_support/tests/test_xcrun.py | lwhsu/swift | 427 | 12626153 | <gh_stars>100-1000
# test_xcrun.py - Unit tests for swift_build_support.xcrun -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import platform
import unittest
from swift_build_support import xcrun
@unittest.skipUnless(platform.system() == 'Darwin',
'xcrun is available in Darwin platform only')
class XCRunTestCase(unittest.TestCase):
def test_find(self):
# Unknown tool
self.assertIsNone(xcrun.find('a-tool-that-isnt-on-osx',
sdk='macosx',
toolchain='default'))
# Available tool
self.assertTrue(xcrun.find('clang',
sdk='macosx',
toolchain='default').endswith('/clang'))
def test_sdk_path(self):
# Unknown SDK
self.assertIsNone(xcrun.sdk_path('not-a-sdk'))
# Available SDK
self.assertIsNotNone(xcrun.sdk_path('macosx'))
if __name__ == '__main__':
unittest.main()
|
usaspending_api/awards/tests/unit/test_location_filter_geocode.py | g4brielvs/usaspending-api | 217 | 12626175 | <filename>usaspending_api/awards/tests/unit/test_location_filter_geocode.py<gh_stars>100-1000
import pytest
from model_mommy import mommy
from usaspending_api.search.models import ContractAwardSearchMatview
from usaspending_api.awards.v2.filters.location_filter_geocode import (
create_nested_object,
geocode_filter_locations,
get_fields_list,
location_error_handling,
validate_location_keys,
)
from usaspending_api.common.exceptions import InvalidParameterException
@pytest.fixture
def award_data_fixture(db):
mommy.make("awards.TransactionNormalized", id=1, action_date="2010-10-01", award_id=1, is_fpds=True, type="A")
mommy.make(
"awards.TransactionFPDS",
transaction_id=1,
legal_entity_city_name="BURBANK",
legal_entity_country_code="USA",
legal_entity_state_code="CA",
piid="piiiiid",
place_of_perform_city_name="AUSTIN",
place_of_performance_state="TX",
place_of_perform_country_c="USA",
)
mommy.make("awards.Award", id=1, is_fpds=True, latest_transaction_id=1, piid="piiiiid", type="A")
mommy.make("awards.TransactionNormalized", id=2, action_date="2010-10-01", award_id=2, is_fpds=True, type="A")
mommy.make(
"awards.TransactionFPDS",
transaction_id=2,
legal_entity_city_name="BRISTOL",
legal_entity_country_code="GBR",
piid="piiiiid",
place_of_perform_city_name="MCCOOL JUNCTION",
place_of_performance_state="TX",
place_of_perform_country_c="USA",
)
mommy.make("awards.Award", id=2, is_fpds=True, latest_transaction_id=2, piid="0001", type="A")
mommy.make("awards.TransactionNormalized", id=3, action_date="2010-10-01", award_id=3, is_fpds=True, type="A")
mommy.make(
"awards.TransactionFPDS",
transaction_id=3,
legal_entity_city_name="BRISBANE",
piid="0002",
place_of_perform_city_name="BRISBANE",
place_of_performance_state="NE",
place_of_perform_country_c="USA",
)
mommy.make("awards.Award", id=3, is_fpds=True, latest_transaction_id=3, piid="0002", type="A")
mommy.make("awards.TransactionNormalized", id=4, action_date="2010-10-01", award_id=4, is_fpds=True, type="A")
mommy.make(
"awards.TransactionFPDS",
transaction_id=4,
legal_entity_city_name="NEW YORK",
legal_entity_country_code="USA",
piid="0003",
place_of_perform_city_name="NEW YORK",
place_of_performance_state="NE",
place_of_perform_country_c="USA",
)
mommy.make("awards.Award", id=4, is_fpds=True, latest_transaction_id=4, piid="0003", type="A")
mommy.make("awards.TransactionNormalized", id=5, action_date="2010-10-01", award_id=5, is_fpds=True, type="A")
mommy.make(
"awards.TransactionFPDS",
transaction_id=5,
legal_entity_city_name="NEW AMSTERDAM",
legal_entity_country_code="USA",
piid="0004",
place_of_perform_city_name="NEW AMSTERDAM",
place_of_performance_state="NE",
place_of_perform_country_c="USA",
)
mommy.make("awards.Award", id=5, is_fpds=True, latest_transaction_id=5, piid="0004", type="A")
mommy.make("references.RefCountryCode", country_code="USA", country_name="UNITED STATES")
def test_geocode_filter_locations(award_data_fixture):
to = ContractAwardSearchMatview.objects
values = [
{"city": "McCool Junction", "state": "TX", "country": "USA"},
{"city": "Burbank", "state": "CA", "country": "USA"},
]
assert to.filter(geocode_filter_locations("nothing", [])).count() == 5
assert to.filter(geocode_filter_locations("pop", values)).count() == 1
assert to.filter(geocode_filter_locations("recipient_location", values)).count() == 1
values = [
{"city": "Houston", "state": "TX", "country": "USA"},
{"city": "McCool Junction", "state": "TX", "country": "USA"},
]
assert to.filter(geocode_filter_locations("pop", values)).count() == 1
assert to.filter(geocode_filter_locations("recipient_location", values)).count() == 0
def test_validate_location_keys():
assert validate_location_keys([]) is None
with pytest.raises(InvalidParameterException):
assert validate_location_keys([{}]) is None
with pytest.raises(InvalidParameterException):
assert validate_location_keys([{"district": ""}]) is None
with pytest.raises(InvalidParameterException):
assert validate_location_keys([{"county": ""}]) is None
assert validate_location_keys([{"country": "", "state": ""}]) is None
assert validate_location_keys([{"country": "", "state": "", "feet": ""}]) is None
assert (
validate_location_keys(
[
{
"country": "USA",
"zip": "12345",
"city": "Chicago",
"state": "IL",
"county": "Yes",
"district": "Also Yes",
},
{"country": "USA", "zip": "12345", "city": "Chicago"},
]
)
is None
)
def test_create_nested_object():
with pytest.raises(InvalidParameterException):
location_error_handling([])
with pytest.raises(InvalidParameterException):
location_error_handling([{"country": "", "state": ""}])
with pytest.raises(InvalidParameterException):
location_error_handling([{"country": "", "state": "", "feet": ""}])
assert create_nested_object(
[
{
"country": "USA",
"zip": "12345",
"city": "Chicago",
"state": "IL",
"county": "Yes",
"district": "Also Yes",
},
{"country": "USA", "zip": "12345", "city": "Chicago"},
]
) == {
"USA": {
"city": ["CHICAGO"],
"zip": ["12345", "12345"],
"IL": {"county": ["YES"], "district": ["ALSO YES"], "city": ["CHICAGO"]},
}
}
def test_location_error_handling():
with pytest.raises(InvalidParameterException):
location_error_handling({})
with pytest.raises(InvalidParameterException):
location_error_handling({"country": "", "county": ""})
with pytest.raises(InvalidParameterException):
location_error_handling({"country": "", "district": ""})
assert location_error_handling({"country": "", "state": "", "county": ""}) is None
assert location_error_handling({"country": "", "state": "", "district": ""}) is None
assert location_error_handling({"country": "", "state": "", "county": "", "district": ""}) is None
assert location_error_handling({"country": "", "state": "", "county": "", "district": "", "feet": ""}) is None
def test_get_fields_list():
assert get_fields_list("congressional_code", "01") == ["1", "01", "1.0"]
assert get_fields_list("county_code", "01") == ["1", "01", "1.0"]
assert get_fields_list("feet", "01") == ["01"]
assert get_fields_list("congressional_code", "abc") == ["abc"]
|
gerapy_auto_extractor/__init__.py | zanachka/GerapyAutoExtractor | 214 | 12626181 | from gerapy_auto_extractor.settings import APP_DEBUG
from gerapy_auto_extractor.extractors import extract_detail, extract_list, extract_datetime, extract_content, \
extract_title
from gerapy_auto_extractor.classifiers.list import is_list, probability_of_list
from gerapy_auto_extractor.classifiers.detail import is_detail, probability_of_detail
from loguru import logger
try:
logger.level('inspect', no=100000 if APP_DEBUG else 0, color='<yellow>')
except (ValueError, TypeError):
pass
|
api/views.py | zhy0216/random-read | 239 | 12626215 | <reponame>zhy0216/random-read<gh_stars>100-1000
import random
from django.shortcuts import redirect
from django.http import Http404
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.views.decorators.http import require_http_methods
from article.models import (Article, UserPostArticle,
UserRemoveArticle, UserStarArticle, UserReadArticle, UserArchiveArticle)
from utils import q, to_json, redis_conn, required_login
from exceptions import APIException, ParseError
@to_json
@required_login
@require_http_methods(["POST"])
def add_article(request):
url = request.POST.get("url") or None
if(url is None):
raise ParseError("require url parameter")
article, _ = Article.objects.get_or_create(original_url=url)
upa, created = UserPostArticle.objects.get_or_create(article=article, user=request.user)
# post process in rq worker
if created:
q.enqueue(upa.defer_process)
return {}
def random_article(request):
''' put all primary article id to the redis
redis[all] = set(1,2,3)
put all user read in redis
redis[user] = set(1,2, 3)
consider use sdiff: r.sdiff(keys=('local', 'mythtv'))
'''
# use request.user
# if we need to collect anonymous user data
if request.user.is_authenticated():
article_id_sets = redis_conn.sdiff((Article.ALL_PRIMARY_IDS_KEY, request.user.id)) or 0
if article_id_sets == 0:
raise Http404
article_id = random.sample(article_id_sets, 1)[0]
redis_conn.sadd(request.user.id, article_id)
UserReadArticle.objects.get_or_create(user=request.user, article_id=article_id)
else:
article_id = redis_conn.srandmember(Article.ALL_PRIMARY_IDS_KEY) or 0
return redirect("/api/article/%s/"%article_id)
@to_json
def get_article_by_id(request, articleid):
article = Article.objects.filter(id=articleid).first()
if article is None:
raise Http404
return article.to_dict()
def _get_article_list_by_rs(request, rs, page):
user = request.user
relationships = rs.get_rs_by_user(user=user, filters={"article__finished": True})
relationships = relationships[page*15-15:]
articles = relationships.values("article__id",
"article__title",
"article__original_url")
article_list = []
for article_dic in articles:
_dict = {}
_dict["id"] = article_dic["article__id"]
_dict["title"] = article_dic["article__title"]
_dict["original_url"] = article_dic["article__original_url"]
_dict["is_star"] = UserStarArticle.has_rs_between_user_article(user, article_dic["article__id"])
article_list.append(_dict)
return article_list
@to_json
@required_login
def get_inbox_article(request):
page = request.GET.get("page") or 1
article_list = _get_article_list_by_rs(request, UserPostArticle, page)
return {
"articles": article_list
}
@to_json
@required_login
def get_archive_article(request):
page = request.GET.get("page") or 1
article_list = _get_article_list_by_rs(request, UserArchiveArticle, page)
return {
"articles": article_list
}
@to_json
@required_login
def get_star_article(request):
page = request.GET.get("page") or 1
article_list = _get_article_list_by_rs(request, UserStarArticle, page)
return {
"articles": article_list
}
|
dataviva/api/secex/models.py | joelvisroman/dataviva-site | 126 | 12626254 | from dataviva import db
from dataviva.api.secex.abstract_models import BaseSecex, Eci, Rca, Rca_wld, Rcd, Distance, OppGain
from dataviva.api.secex.abstract_models import BraId, WldId, HsId
from dataviva.api.secex.abstract_models import BraDiversity, HsDiversity, WldDiversity
class Ymb(BaseSecex, BraId, HsDiversity, WldDiversity, Eci):
__tablename__ = "secex_ymb"
class Ymp(BaseSecex, HsId, BraDiversity, WldDiversity, Rca_wld):
__tablename__ = "secex_ymp"
pci = db.Column(db.Float())
class Ymw(BaseSecex, WldId, Eci, BraDiversity, HsDiversity):
__tablename__ = "secex_ymw"
class Ymbp(BaseSecex, BraId, HsId, Rca, Rca_wld, Rcd, Distance, OppGain):
__tablename__ = "secex_ymbp"
class Ymbpw(BaseSecex, BraId, HsId, WldId):
__tablename__ = "secex_ymbpw"
class Ymbw(BaseSecex, BraId, WldId):
__tablename__ = "secex_ymbw"
class Ympw(BaseSecex, HsId, WldId):
__tablename__ = "secex_ympw"
|
mutant/contrib/numeric/models.py | pombredanne/django-mutant | 152 | 12626255 | <filename>mutant/contrib/numeric/models.py
from __future__ import unicode_literals
from django.db.models import fields
from django.utils.translation import ugettext_lazy as _
from ...models.field import FieldDefinition, FieldDefinitionManager
class _NumericMeta:
defined_field_category = _('Numeric')
class SmallIntegerFieldDefinition(FieldDefinition):
class Meta(_NumericMeta):
app_label = 'numeric'
proxy = True
defined_field_class = fields.SmallIntegerField
class PositiveSmallIntegerFieldDefinition(FieldDefinition):
class Meta(_NumericMeta):
app_label = 'numeric'
proxy = True
defined_field_class = fields.PositiveSmallIntegerField
class IntegerFieldDefinition(FieldDefinition):
class Meta(_NumericMeta):
app_label = 'numeric'
proxy = True
defined_field_class = fields.IntegerField
class PositiveIntegerFieldDefinition(FieldDefinition):
class Meta(_NumericMeta):
app_label = 'numeric'
proxy = True
defined_field_class = fields.PositiveIntegerField
class BigIntegerFieldDefinition(FieldDefinition):
class Meta(_NumericMeta):
app_label = 'numeric'
proxy = True
defined_field_class = fields.BigIntegerField
class FloatFieldDefinition(FieldDefinition):
class Meta(_NumericMeta):
app_label = 'numeric'
proxy = True
defined_field_class = fields.FloatField
max_digits_help_text = _('The maximum number of digits allowed in the number. '
'Note that this number must be greater than or equal '
'to ``decimal_places``, if it exists.')
decimal_places_help_text = _('The number of decimal places to store '
'with the number.')
class DecimalFieldDefinition(FieldDefinition):
max_digits = fields.PositiveSmallIntegerField(_('max digits'),
help_text=max_digits_help_text)
decimal_places = fields.PositiveSmallIntegerField(_('decimal_places'),
help_text=decimal_places_help_text)
objects = FieldDefinitionManager()
class Meta(_NumericMeta):
app_label = 'numeric'
defined_field_class = fields.DecimalField
defined_field_options = ('max_digits', 'decimal_places',)
|
djs_playground/apps.py | akarca/django-summernote | 869 | 12626280 | from django.apps import AppConfig
class DjsConfig(AppConfig):
name = 'djs_playground'
|
ML/gan/discriminator_module.py | saneravi/ML_Stuff | 209 | 12626281 | <reponame>saneravi/ML_Stuff
#!/usr/bin/env python
from keras.layers import Input
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Convolution2D
from keras.layers.core import Dense, Dropout, Flatten
from keras.models import Model
def create_model(shp=(28, 28, 1), dropout_rate=0.25):
d_input = Input(shape=shp)
H = Convolution2D(256, (5, 5), strides=(2, 2), padding='same',
activation='relu')(d_input)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
H = Convolution2D(512, (5, 5), strides=(2, 2), padding='same',
activation='relu')(H)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
H = Flatten()(H)
H = Dense(256)(H)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
d_V = Dense(2, activation='softmax')(H)
discriminator = Model(d_input, d_V)
return discriminator
if __name__ == '__main__':
discriminator = create_model()
discriminator.summary()
|
qmsolve/time_dependent_solver/split_step.py | quantum-visualizations/qmsolve | 356 | 12626296 | <filename>qmsolve/time_dependent_solver/split_step.py
import numpy as np
from .method import Method
import time
from ..util.constants import hbar, Å, femtoseconds
from ..particle_system import SingleParticle, TwoParticles
import progressbar
"""
Split-operator method for the Schrödinger equation.
Prototype and original implementation:
https://github.com/marl0ny/split-operator-simulations
References:
https://www.algorithm-archive.org/contents/
split-operator_method/split-operator_method.html
https://en.wikipedia.org/wiki/Split-step_method
"""
class SplitStep(Method):
def __init__(self, simulation):
self.simulation = simulation
self.H = simulation.H
self.simulation.Vmin = np.amin(self.H.Vgrid)
self.simulation.Vmax = np.amax(self.H.Vgrid)
self.H.particle_system.compute_momentum_space(self.H)
self.p2 = self.H.particle_system.p2
def run(self, initial_wavefunction, total_time, dt, store_steps = 1):
self.simulation.store_steps = store_steps
dt_store = total_time/store_steps
self.simulation.total_time = total_time
Nt = int(np.round(total_time / dt))
Nt_per_store_step = int(np.round(dt_store / dt))
self.simulation.Nt_per_store_step = Nt_per_store_step
#time/dt and dt_store/dt must be integers. Otherwise dt is rounded to match that the Nt_per_store_stepdivisions are integers
self.simulation.dt = dt_store/Nt_per_store_step
if isinstance(self.simulation.H.particle_system ,SingleParticle):
Ψ = np.zeros((store_steps + 1, *([self.H.N] *self.H.ndim )), dtype = np.complex128)
elif isinstance(self.simulation.H.particle_system,TwoParticles):
Ψ = np.zeros((store_steps + 1, *([self.H.N] * 2)), dtype = np.complex128)
Ψ[0] = np.array(initial_wavefunction(self.H.particle_system))
m = self.H.particle_system.m
Ur = np.exp(-0.5j*(self.simulation.dt/hbar)*np.array(self.H.Vgrid))
Uk = np.exp(-0.5j*(self.simulation.dt/(m*hbar))*self.p2)
t0 = time.time()
bar = progressbar.ProgressBar()
for i in bar(range(store_steps)):
tmp = np.copy(Ψ[i])
for j in range(Nt_per_store_step):
c = np.fft.fftshift(np.fft.fftn(Ur*tmp))
tmp = Ur*np.fft.ifftn( np.fft.ifftshift(Uk*c))
Ψ[i+1] = tmp
print("Took", time.time() - t0)
self.simulation.Ψ = Ψ
self.simulation.Ψmax = np.amax(np.abs(Ψ))
class SplitStepCupy(Method):
def __init__(self, simulation):
self.simulation = simulation
self.H = simulation.H
self.simulation.Vmin = np.amin(self.H.Vgrid)
self.simulation.Vmax = np.amax(self.H.Vgrid)
self.H.particle_system.compute_momentum_space(self.H)
self.p2 = self.H.particle_system.p2
def run(self, initial_wavefunction, total_time, dt, store_steps = 1):
import cupy as cp
self.p2 = cp.array(self.p2)
self.simulation.store_steps = store_steps
dt_store = total_time/store_steps
self.simulation.total_time = total_time
Nt = int(np.round(total_time / dt))
Nt_per_store_step = int(np.round(dt_store / dt))
self.simulation.Nt_per_store_step = Nt_per_store_step
#time/dt and dt_store/dt must be integers. Otherwise dt is rounded to match that the Nt_per_store_stepdivisions are integers
self.simulation.dt = dt_store/Nt_per_store_step
Ψ = cp.zeros((store_steps + 1, *([self.H.N] *self.H.ndim )), dtype = cp.complex128)
Ψ[0] = cp.array(initial_wavefunction(self.H.particle_system))
m = self.H.particle_system.m
Ur = cp.exp(-0.5j*(self.simulation.dt/hbar)*cp.array(self.H.Vgrid))
Uk = cp.exp(-0.5j*(self.simulation.dt/(m*hbar))*self.p2)
t0 = time.time()
bar = progressbar.ProgressBar()
for i in bar(range(store_steps)):
tmp = cp.copy(Ψ[i])
for j in range(Nt_per_store_step):
c = cp.fft.fftshift(cp.fft.fftn(Ur*tmp))
tmp = Ur*cp.fft.ifftn( cp.fft.ifftshift(Uk*c))
Ψ[i+1] = tmp
print("Took", time.time() - t0)
self.simulation.Ψ = Ψ.get()
self.simulation.Ψmax = np.amax(np.abs(self.simulation.Ψ ))
|
tests/test_pyquery.py | roehling/jello | 265 | 12626325 | <reponame>roehling/jello
#!/usr/bin/env python3
import unittest
import jello.cli
from jello.cli import opts
class MyTests(unittest.TestCase):
def setUp(self):
# initialize options
opts.initialize = None
opts.version_info = None
opts.helpme = None
opts.compact = None
opts.nulls = None
opts.raw = None
opts.lines = None
opts.mono = None
opts.schema = None
opts.types = None
opts.keyname_color = None
opts.keyword_color = None
opts.number_color = None
opts.string_color = None
# create samples
self.dict_sample = {
'string': 'string\nwith newline\ncharacters in it',
'true': True,
'false': False,
'null': None,
'int': 42,
'float': 3.14,
'array': [
'string\nwith newline\ncharacters in it',
True,
False,
None,
42,
3.14
]
}
self.list_sample = [
'string\nwith newline\ncharacters in it',
True,
False,
None,
42,
3.14
]
self.list_of_dicts_sample = [
{
'string': 'string\nwith newline\ncharacters in it',
'true': True,
'false': False,
'null': None,
'int': 42,
'float': 3.14,
'array': [
'string\nwith newline\ncharacters in it',
True,
False,
None,
42,
3.14
]
},
{
'string': 'another string\nwith newline\ncharacters in it',
'true': True,
'false': False,
'null': None,
'int': 10001,
'float': -400.45,
'array': [
'string\nwith newline\ncharacters in it',
True,
False,
None,
-6000034,
999999.854321
]
}
]
self.list_of_lists_sample = [
[
'string\nwith newline\ncharacters in it',
True,
False,
None,
42,
3.14
],
[
'another string\nwith newline\ncharacters in it',
True,
False,
None,
42001,
-3.14
]
]
# ------------ Tests ------------
def test_KeyError(self):
"""
Test _.foo.nonexistent_key (KeyError)
"""
self.data_in = {"foo": "bar"}
self.query = '_.nonexistent_key'
self.assertRaises(KeyError, jello.cli.pyquery, self.data_in, self.query)
def test_IndexError(self):
"""
Test _.foo[99] (IndexError)
"""
self.data_in = [1, 2, 3]
self.query = '_[9]'
self.assertRaises(IndexError, jello.cli.pyquery, self.data_in, self.query)
def test_SyntaxError(self):
"""
Test % (SyntaxError)
"""
self.data_in = [1, 2, 3]
self.query = '%'
self.assertRaises(SyntaxError, jello.cli.pyquery, self.data_in, self.query)
def test_TypeError(self):
"""
Test _[5] on None (TypeError)
"""
self.data_in = None
self.query = '_[5]'
self.assertRaises(TypeError, jello.cli.pyquery, self.data_in, self.query)
def test_AttributeError(self):
"""
Test _.items() on list (AttributeError)
"""
self.data_in = [1, 2, 3]
self.query = '_.items()'
self.assertRaises(AttributeError, jello.cli.pyquery, self.data_in, self.query)
def test_NameError(self):
"""
Test variable (NameError)
"""
self.data_in = {"foo": "bar"}
self.query = 'variable'
self.assertRaises(NameError, jello.cli.pyquery, self.data_in, self.query)
def test_ValueError(self):
"""
Test _.get (ValueError)
"""
self.data_in = {"foo": "bar"}
self.query = '_.get'
self.assertRaises(ValueError, jello.cli.pyquery, self.data_in, self.query)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.