max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
extension_management/01_ManageExtensions.py | IBM/api-samples | 172 | 12788993 | <reponame>IBM/api-samples<filename>extension_management/01_ManageExtensions.py<gh_stars>100-1000
#!/usr/bin/env python3
# In this sample you will see how to manage extensions using the REST API.
# The sample contains uploading extension, installing extension, checking
# installing task and delete extension.
import json
import os
import sys
import time
import importlib
sys.path.append(os.path.realpath('../modules'))
client_module = importlib.import_module('RestApiClient')
SampleUtilities = importlib.import_module('SampleUtilities')
def upload_extension():
# Create our client
client = client_module.RestApiClient(version='6.0')
# Add Content-Type to request header
request_header = {}
request_header['Content-Type'] = 'application/zip'
# setup file for posting
cwd = os.path.dirname(os.path.realpath(__file__))
app_zip_file_path = os.path.join(cwd, 'ExtensionPackageTest.zip')
app_zip_file = open(app_zip_file_path, 'rb')
data = app_zip_file.read()
response = client.call_api('config/extension_management/extensions',
'POST', headers=request_header, data=data)
# If the response code is 201, that means the extension package has been
# successfully uploaded and the extension id will be returned.
# Otherwise -1 will be returned and the full response body is provided with
# error message inside.
if (response.code != 201):
print('Failed to upload the extension package.')
SampleUtilities.pretty_print_response(response)
return -1
else:
# Extract the extension id from the response body.
response_body = json.loads(response.read().decode('utf-8'))
extension_id = response_body['id']
print('The extension has been uploaded with id = ' +
str(extension_id))
return extension_id
def install_extension(extension_id):
# Create our client
client = client_module.RestApiClient(version='6.0')
# query parameters
# action_type: The desired action to take on
# the Extension (INSTALL or PREVIEW)
# overwrite: If true, any existing items on the importing system will be
# overwritten if the extension contains the same items.
# If false, existing items will be preserved,
# and the corresponding items in the extension will be skipped.
params = {'action_type': 'INSTALL',
'overwrite': 'true'}
# construct api url with path parameter.
url = 'config/extension_management/extensions/' + str(extension_id)
response = client.call_api(url, 'POST', params=params)
# Installing extension process is asynchronous. If 202 is returned,
# that means the installing task is started and the returned status id
# is used for tracking the asynchronous task status.
if (response.code != 202):
print("Failed to start installing task.")
SampleUtilities.pretty_print_response(response)
return -1
else:
response_body = json.loads(response.read().decode('utf-8'))
status_id = response_body['status_id']
print('The extension installing task has been started.')
return status_id
def check_install_status(status_id):
# Create our client
client = client_module.RestApiClient(version='6.0')
# construct api url with path parameter.
url = 'config/extension_management/extensions_task_status/'+str(status_id)
response = client.call_api(url, 'GET')
# if there is no error, the status of installing task will be returned.
if (response.code != 200):
print("Failed to check installing task status.")
SampleUtilities.pretty_print_response(response)
status = 'FAILED'
else:
response_body = json.loads(response.read().decode('utf-8'))
status = response_body['status']
return status
def delete_installed_extension(extension_id):
# Create our client
client = client_module.RestApiClient(version='6.0')
# construct api url with path parameter.
url = 'config/extension_management/extensions/' + str(extension_id)
response = client.call_api(url, 'DELETE')
if (response.code == 202):
print('The extension has been deleted.')
else:
print('Failed to delete the extension.')
def main():
# upload the extension package
extension_id = upload_extension()
if (extension_id != -1):
# if extension package uploaded successfully, start installing
# extension task
status_id = install_extension(extension_id)
if (status_id != -1):
# if extension installing task start wit no error, keep checking
# the status every 5s until the it is completed or has errors or
# time out.
status = 'PROCESSING'
count = 60
while ((status == 'PROCESSING' or status == "QUEUED") and
count > 0):
status = check_install_status(status_id)
print('Installing status: ' + status)
count = count - 1
if ((status == 'PROCESSING' or status == "QUEUED") and
count == 0):
print('Installing process timed out.')
sys.exit(1)
time.sleep(5)
if (status == 'COMPLETED'):
# delete the extension once it complete installed. If you want
# to keep the extension, please comment out the line below.
delete_installed_extension(extension_id)
else:
sys.exit(1)
else:
sys.exit(1)
if __name__ == "__main__":
main()
|
tests/agent_tests.py | tomakehurst/saboteur | 258 | 12789023 | from saboteur.agent import SaboteurWebApp
import json
import unittest
from test_utils import MockShell
from saboteur.apicommands import FAULT_TYPES, alphabetical_keys
def post_request(params):
return request('POST', params)
def delete_request():
return {'path': '/',
'method': 'DELETE'}
def request(method, params):
return {'path': '/',
'method': method,
'body': json.dumps(params)}
def http_request(method, params_json):
return {'path': '/',
'method': method,
'body': params_json}
class TestAgent(unittest.TestCase):
def setUp(self):
self.shell = MockShell()
self.app = SaboteurWebApp(self.shell)
def test_successful_iptables_based_fault_returns_200_and_executes_correct_command(self):
params = json.dumps({
'name': 'isolate-web-server',
'type': 'NETWORK_FAILURE',
'direction': 'IN',
'to_port': 80,
'protocol': 'TCP'
})
response = self.app.handle(http_request('POST', params))
self.assertEqual(response['status'], 200)
self.assertEqual(self.shell.last_command, 'sudo /sbin/iptables -A INPUT -p TCP -j DROP --dport 80')
def test_invalid_json_returns_400(self):
params = '{ "name": }'
response = self.app.handle(http_request('POST', params))
self.assertEqual(400, response['status'])
self.assertEqual(json.dumps('Not valid JSON'), response['body'])
def test_invalid_fault_type(self):
params = json.dumps({
'name': 'isolate-web-server',
'type': 'WORMS'
})
response = self.app.handle(http_request('POST', params))
self.assertEqual(400, response['status'])
self.assertEqual(json.dumps({
"errors": {
"type": "must be present and one of " + str(alphabetical_keys(FAULT_TYPES))
}
}),
response['body'])
def test_fault_with_single_invalid_field_returns_400(self):
params = json.dumps({
'name': 'isolate-web-server',
'type': 'NETWORK_FAILURE',
'to_port': 7871
})
response = self.app.handle(http_request('POST', params))
self.assertEqual(400, response['status'])
self.assertEqual(json.dumps({
"errors": {
"direction": "required key not provided"
}
}),
response['body'])
def test_fault_with_multiple_invalid_fields_returns_400(self):
params = json.dumps({
'name': 'isolate-web-server',
'type': 'DELAY',
'direction': 'IN',
'to_port': 7871,
'delay': 'bad',
'probability': 'worse'
})
response = self.app.handle(http_request('POST', params))
self.assertEqual(400, response['status'])
self.assertEqual(json.dumps({
"errors": {
"delay": "expected int",
"probability": "expected float"
}
}),
response['body'])
def test_reset(self):
self.shell.next_result = 'eth1'
response = self.app.handle(delete_request())
self.assertEqual(response['status'], 200)
self.assertEqual(self.shell.commands, [
'sudo /sbin/iptables -F',
"netstat -i | tail -n+3 | cut -f1 -d ' '",
'sudo /sbin/tc qdisc del dev eth1 root'])
def test_returns_500_when_shell_command_exits_with_non_zero(self):
params = json.dumps({
'name': 'whatever',
'type': 'NETWORK_FAILURE',
'direction': 'IN',
'to_port': 80,
'protocol': 'TCP'
})
self.shell.next_exit_code = 1
response = self.app.handle(http_request('POST', params))
self.assertEqual(500, response['status'])
if __name__ == '__main__':
unittest.main()
|
OpenMatch/modules/encoders/positional_encoder.py | vishalbelsare/OpenMatch | 403 | 12789038 | import torch
import torch.nn as nn
class PositionalEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
max_len: int = 512
) -> None:
super(PositionalEncoder, self).__init__()
self._embed_dim = embed_dim
self._max_len = max_len
self._embed_matrix = torch.tensor(
[[pos / pow(1.0e4, 2.0 * (i // 2) / self._embed_dim) for i in range(self._embed_dim)] for pos in range(self._max_len)]
)
self._embed_matrix[:, 0::2] = torch.sin(self._embed_matrix[:, 0::2])
self._embed_matrix[:, 1::2] = torch.cos(self._embed_matrix[:, 1::2])
self._embedder = nn.Embedding(self._max_len, self._embed_dim)
self._embedder.weight = nn.Parameter(self._embed_matrix, requires_grad=False)
def forward(self, embed: torch.Tensor) -> torch.Tensor:
token_len = embed.size()[1]
if embed.is_cuda:
ids = torch.cuda.LongTensor([l for l in range(token_len)])
else:
ids = torch.LongTensor([l for l in range(token_len)])
embed += self._embedder(ids)
return embed
|
lldb/test/API/tools/lldb-server/TestGdbRemoteProcessInfo.py | mkinsner/llvm | 2,338 | 12789058 | import gdbremote_testcase
import lldbgdbserverutils
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteProcessInfo(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
def test_qProcessInfo_returns_running_process(self):
self.build()
procs = self.prep_debug_monitor_and_inferior()
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the process id looks reasonable.
pid_text = process_info.get("pid")
self.assertIsNotNone(pid_text)
pid = int(pid_text, base=16)
self.assertNotEqual(0, pid)
# If possible, verify that the process is running.
self.assertTrue(lldbgdbserverutils.process_is_running(pid, True))
def test_attach_commandline_qProcessInfo_reports_correct_pid(self):
self.build()
self.set_inferior_startup_attach()
procs = self.prep_debug_monitor_and_inferior()
self.assertIsNotNone(procs)
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the process id matches what we expected.
pid_text = process_info.get('pid', None)
self.assertIsNotNone(pid_text)
reported_pid = int(pid_text, base=16)
self.assertEqual(reported_pid, procs["inferior"].pid)
def test_qProcessInfo_reports_valid_endian(self):
self.build()
procs = self.prep_debug_monitor_and_inferior()
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the process id looks reasonable.
endian = process_info.get("endian")
self.assertIsNotNone(endian)
self.assertIn(endian, ["little", "big", "pdp"])
def qProcessInfo_contains_keys(self, expected_key_set):
procs = self.prep_debug_monitor_and_inferior()
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the expected keys are present and non-None within the process
# info.
missing_key_set = set()
for expected_key in expected_key_set:
if expected_key not in process_info:
missing_key_set.add(expected_key)
self.assertEqual(
missing_key_set,
set(),
"the listed keys are missing in the qProcessInfo result")
def qProcessInfo_does_not_contain_keys(self, absent_key_set):
procs = self.prep_debug_monitor_and_inferior()
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the unexpected keys are not present
unexpected_key_set = set()
for unexpected_key in absent_key_set:
if unexpected_key in process_info:
unexpected_key_set.add(unexpected_key)
self.assertEqual(
unexpected_key_set,
set(),
"the listed keys were present but unexpected in qProcessInfo result")
@add_test_categories(["debugserver"])
def test_qProcessInfo_contains_cputype_cpusubtype(self):
self.build()
self.qProcessInfo_contains_keys(set(['cputype', 'cpusubtype']))
@add_test_categories(["llgs"])
def test_qProcessInfo_contains_triple_ppid(self):
self.build()
self.qProcessInfo_contains_keys(set(['triple', 'parent-pid']))
@add_test_categories(["debugserver"])
def test_qProcessInfo_does_not_contain_triple(self):
self.build()
# We don't expect to see triple on darwin. If we do, we'll prefer triple
# to cputype/cpusubtype and skip some darwin-based ProcessGDBRemote ArchSpec setup
# for the remote Host and Process.
self.qProcessInfo_does_not_contain_keys(set(['triple']))
@add_test_categories(["llgs"])
def test_qProcessInfo_does_not_contain_cputype_cpusubtype(self):
self.build()
self.qProcessInfo_does_not_contain_keys(set(['cputype', 'cpusubtype']))
|
chembl_webresource_client/scripts/chembl_m2t.py | RowAnalytics/chembl_webresource_client | 248 | 12789074 | #!/usr/bin/env python
from __future__ import print_function
__author__ = 'mnowotka'
# ----------------------------------------------------------------------------------------------------------------------
import sys
import argparse
from chembl_webresource_client.scripts.utils import get_serializer, chembl_id_regex, smiles_regex, convert_to_smiles
from chembl_webresource_client.scripts.utils import resolve, mols_to_targets
AVAILABLE_SOURCE_FORMATS = ('chembl_id', 'sdf', 'smi')
# ----------------------------------------------------------------------------------------------------------------------
def get_options():
description = 'Find related targets for a set of compounds'
parser = argparse.ArgumentParser(description=description, prog='chembl_m2t')
parser.add_argument('-i', '--input', action='store', dest='input',
help='input file, standard input by default')
parser.add_argument('-o', '--output', action='store', dest='output',
help='output file, standard output by default')
parser.add_argument('-s', '--source-format', action='store', dest='source_format', default='csv',
help='input file format. Can be one of 3: chembl_id (a comma separated list of chembl IDs), '
'sdf: (MDL molfile), smi (file containing smiles)')
parser.add_argument('-d', '--destination-format', action='store', dest='dest_format', default='uniprot',
help='output file format. can be chosen from 3 options: '
'[uniprot, gene_name, chembl_id]')
parser.add_argument('-H', '--Human', action='store_true', dest='human',
help='human readable output: prints header and first column with original names')
parser.add_argument('-O', '--organism', action='store', dest='organism',
help='Filter results by organism')
parser.add_argument('-p', '--parent', action='store_true', dest='parent',
help='when fetching targets include also targets from parents of given molecules')
parser.add_argument('-c', '--chunk-size', action='store', dest='chunk', default='1000',
help='Size of chunk of data retrieved from API')
return parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------------
def main():
options = get_options()
source_format = options.source_format.lower()
if source_format not in AVAILABLE_SOURCE_FORMATS:
sys.stderr.write('Unsupported source format', options.source_format)
return
inp = sys.stdin
if source_format == 'sdf':
with open(options.input) if options.input else sys.stdin as in_f:
options.input = None
inp = convert_to_smiles(in_f)
with open(options.input) if options.input else inp as in_f, \
open(options.output, 'w') if options.output else sys.stdout as out_f:
serializer_cls = get_serializer(options.dest_format)
if not serializer_cls:
sys.stderr.write('Unsupported format', options.dest_format)
return
if options.human:
serializer_cls.write_header(out_f)
for line in in_f:
if not line or line.lower().startswith('smiles'):
continue
chunk = line.strip().split()[0]
identifiers = chunk.strip().split(',')
valid_identifiers = list()
for identifier in identifiers:
if chembl_id_regex.match(identifier):
valid_identifiers.append(identifier)
elif smiles_regex.match(identifier):
valid_identifiers.extend([x['molecule_chembl_id'] for x in resolve(identifier)])
targets = mols_to_targets(valid_identifiers,
organism=options.organism,
only_ids=(options.dest_format == 'chembl_id'),
include_parents=options.parent,
chunk_size=int(options.chunk))
out_f.write(serializer_cls.serialize_line(targets, human=options.human, name=','.join(valid_identifiers)))
# ----------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
main()
# ----------------------------------------------------------------------------------------------------------------------
|
tools/bin/pythonSrc/PSI-0.3b2_gp/psi/_version.py | YangHao666666/hawq | 450 | 12789081 | # The MIT License
#
# Copyright (C) 2007 <NAME>
#
# Copyright (C) 2008-2009 <NAME>
#
# Copyright (C) 2008-2009 Abilisoft Ltd.
#
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""psi._version
This is used so that this information can stay easily in sync in both
psi and setup.py.
"""
version = '0.3b2'
author = '<NAME>, <NAME>, <NAME>'
copyright = """\
Copyright (C) 2007-2009 <NAME>
Copyright (C) 2008, 2009 <NAME>
Copyright (C) 2008, 2009 Abilisoft Ltd.
Copyright (C) 2009 <NAME>"""
license = 'MIT'
|
general/chainerrl/baselines/train_dqfd.py | marioyc/baselines | 127 | 12789107 | """original source: https://github.com/chainer/chainerrl/pull/480
MIT License
Copyright (c) Preferred Networks, Inc.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import *
from future import standard_library
standard_library.install_aliases()
import argparse
from inspect import getsourcefile
import os
import sys
import numpy as np
import chainer
import minerl # noqa: register MineRL envs as Gym envs.
import gym
import chainerrl
from chainerrl import experiments, explorers
from chainerrl.experiments.evaluator import Evaluator
from dqfd import DQfD, PrioritizedDemoReplayBuffer
from q_functions import CNNBranchingQFunction
from env_wrappers import (
BranchedRandomizedAction, BranchedActionWrapper,
MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper,
PoVWithCompassAngleWrapper, FullObservationSpaceWrapper)
from expert_converter import choose_top_experts, fill_buffer
class ScaleGradHook(object):
name = 'ScaleGrad'
call_for_each_param = True
timing = 'pre'
def __init__(self, scale):
self.scale = scale
def __call__(self, rule, param):
if getattr(param, 'scale_param', False):
param.grad *= self.scale
def main():
"""Parses arguments and runs the example
"""
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='MineRLTreechop-v0',
choices=[
'MineRLTreechop-v0',
'MineRLNavigate-v0', 'MineRLNavigateDense-v0', 'MineRLNavigateExtreme-v0', 'MineRLNavigateExtremeDense-v0',
'MineRLObtainIronPickaxe-v0', 'MineRLObtainIronPickaxeDense-v0',
'MineRLObtainDiamond-v0', 'MineRLObtainDiamondDense-v0',
'MineRLNavigateDenseFixed-v0' # for debug use
],
help='MineRL environment identifier')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 31)')
parser.add_argument('--gpu', type=int, default=-1,
help='GPU to use, set to -1 if no GPU.')
parser.add_argument('--final-exploration-frames',
type=int, default=10**6,
help='Timesteps after which we stop ' +
'annealing exploration rate')
parser.add_argument('--final-epsilon', type=float, default=0.01,
help='Final value of epsilon during training.')
parser.add_argument('--eval-epsilon', type=float, default=0.001,
help='Exploration epsilon used during eval episodes.')
parser.add_argument('--replay-start-size', type=int, default=1000,
help='Minimum replay buffer size before ' +
'performing gradient updates.')
parser.add_argument('--target-update-interval', type=int, default=10**4,
help='Frequency (in timesteps) at which ' +
'the target network is updated.')
parser.add_argument('--update-interval', type=int, default=4,
help='Frequency (in timesteps) of network updates.')
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--no-clip-delta',
dest='clip_delta', action='store_false')
parser.add_argument('--error-max', type=float, default=1.0)
parser.add_argument('--num-step-return', type=int, default=10)
parser.set_defaults(clip_delta=True)
parser.add_argument('--logging-level', type=int, default=20,
help='Logging level. 10:DEBUG, 20:INFO etc.')
parser.add_argument('--logging-filename', type=str, default=None)
parser.add_argument('--monitor', action='store_true', default=False,
help='Monitor env. Videos and additional information are saved as output files when evaluation')
# parser.add_argument('--render', action='store_true', default=False,
# help='Render env states in a GUI window.')
parser.add_argument('--optimizer', type=str, default='rmsprop',
choices=['rmsprop', 'adam'])
parser.add_argument('--lr', type=float, default=2.5e-4, help='Learning rate')
parser.add_argument("--replay-buffer-size", type=int, default=10**6,
help="Size of replay buffer (Excluding demonstrations)")
parser.add_argument("--minibatch-size", type=int, default=32)
parser.add_argument('--batch-accumulator', type=str, default="sum")
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default=None)
parser.add_argument("--save-demo-trajectories", action="store_true",
default=False)
# DQfD specific parameters for loading and pretraining.
parser.add_argument('--n-experts', type=int, default=10)
parser.add_argument('--expert-demo-path', type=str, default=None)
parser.add_argument('--n-pretrain-steps', type=int, default=750000)
parser.add_argument('--demo-supervised-margin', type=float, default=0.8)
parser.add_argument('--loss-coeff-l2', type=float, default=1e-5)
parser.add_argument('--loss-coeff-nstep', type=float, default=1.0)
parser.add_argument('--loss-coeff-supervised', type=float, default=1.0)
parser.add_argument('--bonus-priority-agent', type=float, default=0.001)
parser.add_argument('--bonus-priority-demo', type=float, default=1.0)
# Action branching architecture
parser.add_argument('--gradient-clipping', action='store_true', default=False)
parser.add_argument('--gradient-rescaling', action='store_true', default=False)
# NoisyNet parameters
parser.add_argument('--use-noisy-net', type=str, default=None,
choices=['before-pretraining', 'after-pretraining'])
parser.add_argument('--noisy-net-sigma', type=float, default=0.5)
# Parameters for state/action handling
parser.add_argument('--frame-stack', type=int, default=None, help='Number of frames stacked (None for disable).')
parser.add_argument('--frame-skip', type=int, default=None, help='Number of frames skipped (None for disable).')
parser.add_argument('--camera-atomic-actions', type=int, default=10)
parser.add_argument('--max-range-of-camera', type=float, default=10.)
parser.add_argument('--use-full-observation', action='store_true', default=False)
args = parser.parse_args()
assert args.expert_demo_path is not None,"DQfD needs collected \
expert demonstrations"
import logging
if args.logging_filename is not None:
logging.basicConfig(filename=args.logging_filename, filemode='w',
level=args.logging_level)
else:
logging.basicConfig(level=args.logging_level)
logger = logging.getLogger(__name__)
train_seed = args.seed
test_seed = 2 ** 31 - 1 - args.seed
chainerrl.misc.set_random_seed(args.seed, gpus=(args.gpu,))
args.outdir = experiments.prepare_output_dir(args, args.outdir)
logger.info('Output files are saved in {}'.format(args.outdir))
if args.env == 'MineRLTreechop-v0':
branch_sizes = [9, 16, args.camera_atomic_actions, args.camera_atomic_actions]
elif args.env in ['MineRLNavigate-v0', 'MineRLNavigateDense-v0',
'MineRLNavigateExtreme-v0', 'MineRLNavigateExtremeDense-v0']:
branch_sizes = [9, 16, args.camera_atomic_actions, args.camera_atomic_actions, 2]
elif args.env in ['MineRLObtainIronPickaxe-v0', 'MineRLObtainIronPickaxeDense-v0',
'MineRLObtainDiamond-v0', 'MineRLObtainDiamondDense-v0']:
branch_sizes = [9, 16, args.camera_atomic_actions, args.camera_atomic_actions, 32]
else:
raise Exception("Unknown environment")
def make_env(env, test):
# wrap env: observation...
# NOTE: wrapping order matters!
if args.use_full_observation:
env = FullObservationSpaceWrapper(env)
elif args.env.startswith('MineRLNavigate'):
env = PoVWithCompassAngleWrapper(env)
else:
env = ObtainPoVWrapper(env)
if test and args.monitor:
env = gym.wrappers.Monitor(
env, os.path.join(args.outdir, 'monitor'),
mode='evaluation' if test else 'training', video_callable=lambda episode_id: True)
if args.frame_skip is not None:
env = FrameSkip(env, skip=args.frame_skip)
# convert hwc -> chw as Chainer requires
env = MoveAxisWrapper(env, source=-1, destination=0,
use_tuple=args.use_full_observation)
#env = ScaledFloatFrame(env)
if args.frame_stack is not None:
env = FrameStack(env, args.frame_stack, channel_order='chw',
use_tuple=args.use_full_observation)
# wrap env: action...
env = BranchedActionWrapper(env, branch_sizes, args.camera_atomic_actions, args.max_range_of_camera)
if test:
env = BranchedRandomizedAction(env, branch_sizes, args.eval_epsilon)
env_seed = test_seed if test else train_seed
env.seed(int(env_seed))
return env
core_env = gym.make(args.env)
env = make_env(core_env, test=False)
eval_env = make_env(core_env, test=True)
# Q function
if args.env.startswith('MineRLNavigate'):
if args.use_full_observation:
base_channels = 3 # RGB
else:
base_channels = 4 # RGB + compass
elif args.env.startswith('MineRLObtain'):
base_channels = 3 # RGB
else:
base_channels = 3 # RGB
if args.frame_stack is None:
n_input_channels = base_channels
else:
n_input_channels = base_channels * args.frame_stack
q_func = CNNBranchingQFunction(branch_sizes,
n_input_channels=n_input_channels,
gradient_rescaling=args.gradient_rescaling,
use_tuple=args.use_full_observation)
def phi(x):
# observation -> NN input
if args.use_full_observation:
pov = np.asarray(x[0], dtype=np.float32)
others = np.asarray(x[1], dtype=np.float32)
return (pov / 255, others)
else:
return np.asarray(x, dtype=np.float32) / 255
explorer = explorers.LinearDecayEpsilonGreedy(
1.0, args.final_epsilon,
args.final_exploration_frames,
lambda: np.array([np.random.randint(n) for n in branch_sizes]))
# Draw the computational graph and save it in the output directory.
if args.use_full_observation:
sample_obs = tuple([x[None] for x in env.observation_space.sample()])
else:
sample_obs = env.observation_space.sample()[None]
chainerrl.misc.draw_computational_graph(
[q_func(phi(sample_obs))], os.path.join(args.outdir, 'model'))
if args.optimizer == 'rmsprop':
opt = chainer.optimizers.RMSpropGraves(args.lr, alpha=0.95, momentum=0.0, eps=1e-2)
elif args.optimizer == 'adam':
opt = chainer.optimizers.Adam(args.lr)
if args.use_noisy_net is None:
opt.setup(q_func)
if args.gradient_rescaling:
opt.add_hook(ScaleGradHook(1 / (1 + len(q_func.branch_sizes))))
if args.gradient_clipping:
opt.add_hook(chainer.optimizer_hooks.GradientClipping(10.0))
# calculate corresponding `steps` and `eval_interval` according to frameskip
maximum_frames = 8640000 # = 1440 episodes if we count an episode as 6000 frames.
if args.frame_skip is None:
steps = maximum_frames
eval_interval = 6000 * 100 # (approx.) every 100 episode (counts "1 episode = 6000 steps")
else:
steps = maximum_frames // args.frame_skip
eval_interval = 6000 * 100 // args.frame_skip # (approx.) every 100 episode (counts "1 episode = 6000 steps")
# Anneal beta from beta0 to 1 throughout training
betasteps = steps / args.update_interval
replay_buffer = PrioritizedDemoReplayBuffer(
args.replay_buffer_size, alpha=0.4,
beta0=0.6, betasteps=betasteps,
error_max=args.error_max,
num_steps=args.num_step_return)
# Fill the demo buffer with expert transitions
if not args.demo:
chosen_dirs = choose_top_experts(args.expert_demo_path, args.n_experts,
logger=logger)
fill_buffer(args.env, chosen_dirs, replay_buffer, args.frame_skip,
args.frame_stack, args.camera_atomic_actions,
args.max_range_of_camera, args.use_full_observation,
logger=logger)
logger.info("Demo buffer loaded with {} transitions".format(
len(replay_buffer)))
def reward_transform(x):
return np.sign(x) * np.log(1 + np.abs(x))
if args.use_noisy_net is not None and args.use_noisy_net == 'before-pretraining':
chainerrl.links.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma)
explorer = explorers.Greedy()
opt.setup(q_func)
agent = DQfD(q_func, opt, replay_buffer,
gamma=0.99,
explorer=explorer,
n_pretrain_steps=args.n_pretrain_steps,
demo_supervised_margin=args.demo_supervised_margin,
bonus_priority_agent=args.bonus_priority_agent,
bonus_priority_demo=args.bonus_priority_demo,
loss_coeff_nstep=args.loss_coeff_nstep,
loss_coeff_supervised=args.loss_coeff_supervised,
loss_coeff_l2=args.loss_coeff_l2,
gpu=args.gpu,
replay_start_size=args.replay_start_size,
target_update_interval=args.target_update_interval,
clip_delta=args.clip_delta,
update_interval=args.update_interval,
batch_accumulator=args.batch_accumulator,
phi=phi, reward_transform=reward_transform,
minibatch_size=args.minibatch_size)
if args.use_noisy_net is not None and args.use_noisy_net == 'after-pretraining':
chainerrl.links.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma)
explorer = explorers.Greedy()
if args.optimizer == 'rmsprop':
opt = chainer.optimizers.RMSpropGraves(args.lr, alpha=0.95, momentum=0.0, eps=1e-2)
elif args.optimizer == 'adam':
opt = chainer.optimizers.Adam(args.lr)
opt.setup(q_func)
opt.add_hook(
chainer.optimizer_hooks.WeightDecay(args.loss_coeff_l2))
agent.optimizer = opt
agent.target_model = None
agent.sync_target_network()
if args.load:
agent.load(args.load)
if args.demo:
eval_stats = experiments.eval_performance(
env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs)
logger.info('n_runs: {} mean: {} median: {} stdev: {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'], eval_stats['stdev']))
else:
agent.pretrain()
evaluator = Evaluator(agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
eval_interval=eval_interval,
outdir=args.outdir,
max_episode_len=None,
env=eval_env,
step_offset=0,
save_best_so_far_agent=True,
logger=logger)
# Evaluate the agent BEFORE training begins
evaluator.evaluate_and_update_max_score(t=0, episodes=0)
experiments.train_agent(agent=agent,
env=env,
steps=steps,
outdir=args.outdir,
max_episode_len=None,
step_offset=0,
evaluator=evaluator,
successful_score=None,
step_hooks=[])
env.close()
if __name__ == "__main__":
main()
|
pymclevel/id_definitions_2.py | bennettdc/MCEdit-Unified | 237 | 12789111 | <gh_stars>100-1000
import os
import json
from logging import getLogger
import collections
#from pymclevel import MCEDIT_DEFS, MCEDIT_IDS
#import pymclevel
import re
#import id_definitions
log = getLogger(__name__)
def get_deps(base_version, file_name):
deps = [base_version]
print "Base: {}".format(base_version)
fp = open(os.path.join('mcver', base_version, file_name))
data = json.loads(fp.read())
fp.close()
if "load" in data:
deps.extend(get_deps(data["load"], file_name))
return deps
def update(orig_dict, new_dict):
for key, val in new_dict.iteritems():
if isinstance(val, collections.Mapping):
tmp = update(orig_dict.get(key, { }), val)
orig_dict[key] = tmp
elif isinstance(val, list):
orig_dict[key] = (orig_dict.get(key, []) + val)
else:
orig_dict[key] = new_dict[key]
return orig_dict
def aggregate(base_version, file_name):
deps = get_deps(base_version, file_name)
deps.reverse()
print deps
aggregate_data = {}
for dep in deps:
fp = open(os.path.join('mcver', dep, file_name))
data = json.loads(fp.read())
fp.close()
update(aggregate_data, data)
print aggregate_data
with open("out.json", 'wb') as out:
json.dump(aggregate_data, out)
#print get_deps("1.12", "entities.json")
aggregate("1.12", "entities.json")
|
Calibration/HcalAlCaRecoProducers/python/ALCARECOHcalCalIsoTrkProducerFilter_cff.py | malbouis/cmssw | 852 | 12789133 | <reponame>malbouis/cmssw
import FWCore.ParameterSet.Config as cms
#------------------------------------------------
#AlCaReco filtering for HCAL isotrk:
#------------------------------------------------
from Calibration.HcalAlCaRecoProducers.alcaHcalIsotrkProducer_cfi import *
from Calibration.HcalAlCaRecoProducers.alcaHcalIsotrkFilter_cfi import *
seqALCARECOHcalCalIsoTrkProducerFilter = cms.Sequence(alcaHcalIsotrkProducer * alcaHcalIsotrkFilter)
|
{{cookiecutter.project_slug}}/backend/app/app/core/config.py | abnerjacobsen/full-stack | 516 | 12789141 | <filename>{{cookiecutter.project_slug}}/backend/app/app/core/config.py
import os
def getenv_boolean(var_name, default_value=False):
result = default_value
env_value = os.getenv(var_name)
if env_value is not None:
result = env_value.upper() in ("TRUE", "1")
return result
API_V1_STR = "/api/v1"
SECRET_KEY = os.getenvb(b"SECRET_KEY")
if not SECRET_KEY:
SECRET_KEY = os.urandom(32)
ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24 * 8 # 60 minutes * 24 hours * 8 days
SERVER_NAME = os.getenv("SERVER_NAME")
BACKEND_CORS_ORIGINS = os.getenv("BACKEND_CORS_ORIGINS")
PROJECT_NAME = os.getenv("PROJECT_NAME")
SENTRY_DSN = os.getenv("SENTRY_DSN")
POSTGRES_SERVER = os.getenv("POSTGRES_SERVER")
POSTGRES_USER = os.getenv("POSTGRES_USER")
POSTGRES_PASSWORD = os.getenv("POSTGRES_PASSWORD")
POSTGRES_DB = os.getenv("POSTGRES_DB")
SQLALCHEMY_DATABASE_URI = (
f"postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_SERVER}/{POSTGRES_DB}"
)
FIRST_SUPERUSER = os.getenv("FIRST_SUPERUSER")
FIRST_SUPERUSER_PASSWORD = os.getenv("FIRST_SUPERUSER_PASSWORD")
USERS_OPEN_REGISTRATION = getenv_boolean("USERS_OPEN_REGISTRATION")
|
terrascript/provider/hashicorp/googleworkspace.py | mjuenema/python-terrascript | 507 | 12789147 | # terrascript/provider/hashicorp/googleworkspace.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:17:22 UTC)
import terrascript
class googleworkspace(terrascript.Provider):
"""terraform-provider-googleworkspace"""
__description__ = "terraform-provider-googleworkspace"
__namespace__ = "hashicorp"
__name__ = "googleworkspace"
__source__ = "https://github.com/hashicorp/terraform-provider-googleworkspace"
__version__ = "0.4.1"
__published__ = "2021-08-16T19:18:13Z"
__tier__ = "official"
__all__ = ["googleworkspace"]
|
prompt_tuning/data/preprocessors_test.py | dumpmemory/prompt-tuning | 108 | 12789192 | <reponame>dumpmemory/prompt-tuning
# Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for preprocessors."""
import os
import textwrap
import unittest.mock as mock
from absl.testing import parameterized
import numpy as np
from prompt_tuning.data import preprocessors
import seqio
from seqio import test_utils
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
TEST_DATA = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "test_data")
INPUTS_SIZE = 10
TARGETS_SIZE = 5
TEXT_SIZE = 10
TEST_T5_FEATURES = {
"inputs": seqio.Feature(
vocabulary=seqio.SentencePieceVocabulary(
os.path.join(TEST_DATA, "t5_vocab"), 100),
add_eos=True,
required=False),
"targets": seqio.Feature(
vocabulary=seqio.SentencePieceVocabulary(
os.path.join(TEST_DATA, "t5_vocab"), 100),
add_eos=True)
}
def create_fake_text_dataset(examples: int = 10, text_size: int = TEXT_SIZE):
text = np.reshape(
# Start at 2 so we skip EOS=1 which could be a problem on any tests that
# actually decode the fake inputs.
np.arange(2, examples * text_size + 2),
(-1, text_size)).astype(np.int32)
return tf.data.Dataset.from_tensor_slices({"targets": text})
class PreprocessorsTest(tf.test.TestCase):
def test_remove_first_text_token(self):
input_strings = ["This is my first example", "The second"]
gold_strings = [" ".join(s.split()[1:]) for s in input_strings]
ds = tf.data.Dataset.from_tensor_slices({"inputs": input_strings})
processed_ds = preprocessors.remove_first_text_token(ds)
for res, gold in zip(processed_ds, gold_strings):
self.assertEqual(res["inputs"].numpy().decode("utf-8"), gold)
def test_add_sentinel_to_beginning(self):
vocab_size = 100
offset = 0
field = "targets"
ds = tf.data.Dataset.from_tensor_slices({
field: tf.zeros([3, 4], dtype=tf.int32),
})
output_features = {
field: mock.MagicMock(vocabulary=mock.MagicMock(vocab_size=vocab_size))
}
processed_ds = preprocessors.add_sentinel_to_beginning(
ds, output_features, field, offset)
for ex in processed_ds:
self.assertEqual(ex[field][0].numpy().item(), vocab_size - (offset + 1))
def test_tsv_to_qa(self):
fake_data = textwrap.dedent("""
id\tcontext\tquestion\tanswer\tanswers
0\tThe capital of France is Paris\tWhat is the capital of France?\tParis\tParis|||paris
1\tAn ant can carry many times it's body weight making it v strong.\tAre ants strong?\tYes\tYes
""".strip("\n"))
ds = tf.data.Dataset.from_tensor_slices(fake_data.split("\n")[1:-1])
ds = preprocessors.preprocess_tsv_to_qa(ds)
gold_data = [{
"id": "0",
"question": "What is the capital of France ? ",
"answer": "Paris",
"answers": ["Paris", "paris"],
"context": "The capital of France is Paris",
"inputs":
"question: What is the capital of France ? context: The capital of"
" France is Paris",
"targets": "Paris"
}, {
"id":
"1",
"question":
"Are ants strong ? ",
"answer":
"Yes",
"answers": ["Yes"],
"context":
"An ant can carry many times it ' s body weight making it v strong . ",
"inputs":
"question: Are ants strong ? context: An ant can carry many times "
"it ' s body weight making it v strong . ",
"targets":
"Yes"
}]
for ex, gold in zip(ds, gold_data):
self.assertEqual(ex["id"].numpy().decode("utf-8"), gold["id"])
self.assertEqual(ex["question"].numpy().decode("utf-8"), gold["question"])
self.assertEqual(ex["answer"].numpy().decode("utf-8"), gold["answer"])
self.assertEqual(ex["context"].numpy().decode("utf-8"), gold["context"])
self.assertEqual(ex["targets"].numpy().decode("utf-8"), gold["targets"])
for answer, gold_answer in zip(ex["answers"].numpy(), gold["answers"]):
self.assertEqual(answer.decode("utf-8"), gold_answer)
def test_preprocess_text_generation(self):
example = tf.data.Dataset.from_tensor_slices({
"source_aligned": {
"en": ["english input"],
"es": ["spanish input"]
},
"target_aligned": {
"en": ["english target"],
"es": ["spanish target"]
}
})
processed_example = preprocessors.preprocess_text_generation(
example,
source_key="source_aligned",
target_key="target_aligned",
task_name=None,
prefix="summarize:",
source_nested_key="en",
target_nested_key="es",
)
test_utils.assert_dataset(processed_example, {
"inputs": "summarize: english input",
"targets": "spanish target"
})
class BARTTaskTest(parameterized.TestCase):
@parameterized.named_parameters(
dict(testcase_name="text_infilling",
preprocessor=preprocessors.text_infilling),
dict(testcase_name="token_deletion",
preprocessor=preprocessors.token_deletion))
def test_inputs_shorter_than_targets(self, preprocessor):
ds = create_fake_text_dataset()
ds = preprocessor(ds,
{"inputs": INPUTS_SIZE + 1, "targets": TARGETS_SIZE + 1},
TEST_T5_FEATURES,
noise_density=0.5)
for ex in tfds.as_numpy(ds):
self.assertLess(ex["inputs"].shape[0], ex["targets"].shape[0])
@parameterized.named_parameters(
dict(testcase_name="text_infilling",
preprocessor=preprocessors.text_infilling),
dict(testcase_name="token_deletion",
preprocessor=preprocessors.token_deletion))
def test_extra_id_not_in_targets(self, preprocessor):
ds = create_fake_text_dataset()
ds = preprocessor(ds,
{"inputs": INPUTS_SIZE + 1, "targets": TARGETS_SIZE + 1},
TEST_T5_FEATURES,
noise_density=0.5)
vocab = TEST_T5_FEATURES["targets"].vocabulary
for ex in tfds.as_numpy(ds):
targets_text = vocab.decode(ex["targets"].tolist())
self.assertNotIn("extra_id", targets_text)
@parameterized.named_parameters(
dict(testcase_name="text_infilling",
preprocessor=preprocessors.text_infilling),
dict(testcase_name="token_deletion",
preprocessor=preprocessors.token_deletion))
def test_target_tokens_match_original_tokens(self, preprocessor):
ds = create_fake_text_dataset()
processed_ds = preprocessor(
ds,
{"inputs": INPUTS_SIZE + 1, "targets": TARGETS_SIZE + 1},
TEST_T5_FEATURES,
noise_density=0.5)
for processed_ex, ex in zip(tfds.as_numpy(processed_ds), tfds.as_numpy(ds)):
np.testing.assert_array_equal(processed_ex["targets"], ex["targets"])
def test_extra_id_not_in_token_deletion_inputs(self):
ds = create_fake_text_dataset()
ds = preprocessors.token_deletion(
ds,
{"inputs": INPUTS_SIZE + 1, "targets": TARGETS_SIZE + 1},
TEST_T5_FEATURES,
noise_density=0.5)
vocab = TEST_T5_FEATURES["inputs"].vocabulary
for ex in tfds.as_numpy(ds):
inputs_text = vocab.decode(ex["inputs"].tolist())
self.assertNotIn("extra_id", inputs_text)
def test_extra_id_in_text_infilling_inputs(self):
ds = create_fake_text_dataset()
ds = preprocessors.text_infilling(
ds,
{"inputs": INPUTS_SIZE + 1, "targets": TARGETS_SIZE + 1},
TEST_T5_FEATURES,
noise_density=0.5)
vocab = TEST_T5_FEATURES["inputs"].vocabulary
for ex in tfds.as_numpy(ds):
inputs_text = vocab.decode(ex["inputs"].tolist())
self.assertIn("extra_id", inputs_text)
if __name__ == "__main__":
tf.test.main()
|
neural_compressor/ux/components/model/shape.py | intel/neural-compressor | 172 | 12789198 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data shape class."""
from typing import Any, Dict, List, Optional, Union
from neural_compressor.ux.utils.json_serializer import JsonSerializer
class Shape(JsonSerializer):
"""Data shape definition."""
def __init__(self, shape: Optional[str] = "", trusted: bool = False) -> None:
"""Object construction."""
super().__init__()
self.shape = shape
self.trusted = trusted
def serialize(
self,
serialization_type: str = "default",
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
"""Serialize Shape class to dict."""
result = {}
for key, value in self.__dict__.items():
if key in self._skip:
continue
result.update({key: value})
return result
|
tools/build/v2/test/testing_primitives.py | mike-code/boost_1_38_0 | 130 | 12789231 | #!/usr/bin/python
# Copyright 2002 <NAME>
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
import re
def match_re(actual, expected):
return re.match(expected, actual, re.DOTALL) != None
t = BoostBuild.Tester(match=match_re)
t.set_tree('testing-primitives')
# We expect t5 and t7's output to be dumped to stdout.
t.run_build_system(stdout=r'''.*failing t5.*failing t7''')
t.expect_addition('t2.txt')
t.expect_addition('t3.txt')
t.expect_addition('t5.out')
t.expect_addition('t6.out')
t.expect_addition('t6.txt')
t.expect_addition('t7.out')
t.expect_addition('t7.txt')
t.expect_addition('t8.out')
t.expect_nothing_more()
t.cleanup()
|
tests/losses/test_neuralndcg.py | almajo/allRank | 473 | 12789249 | <filename>tests/losses/test_neuralndcg.py<gh_stars>100-1000
import math
from functools import partial
from pytest import approx
from allrank.data.dataset_loading import PADDED_Y_VALUE
from tests.losses.utils import neuralNDCG_wrap, ndcg_wrap
test_cases = [{"stochastic": False, "transposed": False},
{"stochastic": True, "transposed": False},
{"stochastic": False, "transposed": True},
{"stochastic": True, "transposed": True}]
def test_neuralNDCG_simple():
for tc in test_cases:
neuralNDCG_simple(partial(neuralNDCG_wrap, **tc))
def neuralNDCG_simple(fun):
y_pred = [0.5, 0.2]
y_true = [1.0, 0.0]
result = fun(y_pred, y_true)
expected = ndcg_wrap(y_pred, y_true)
assert math.isfinite(result)
assert (-1 * result == approx(expected))
def test_neuralNDCG_longer():
for tc in test_cases:
neuralNDCG_longer(partial(neuralNDCG_wrap, **tc))
def neuralNDCG_longer(fun):
y_pred = [0.5, 0.2, 0.1, 0.4, 1.0, -1.0, 0.63]
y_true = [1.0, 2.0, 2.0, 4.0, 1.0, 4.0, 3.0]
result = fun(y_pred, y_true)
expected = ndcg_wrap(y_pred, y_true)
assert math.isfinite(result)
assert (-1 * result == approx(expected))
def test_neuralNDCG_stable_for_very_small_prediction():
for tc in test_cases:
neuralNDCG_stable_for_very_small_prediction(partial(neuralNDCG_wrap, **tc))
def neuralNDCG_stable_for_very_small_prediction(fun):
y_pred = [0.5, -1e30]
y_true = [1.0, 0.0]
result = fun(y_pred, y_true)
expected = ndcg_wrap(y_pred, y_true)
assert math.isfinite(result)
assert (-1 * result == approx(expected))
def test_neuralNDCG_ignores_padded_value():
for tc in test_cases:
neuralNDCG_ignores_padded_value(partial(neuralNDCG_wrap, **tc))
def neuralNDCG_ignores_padded_value(fun):
y_pred = [0.5, 0.2, 0.1, 0.4, 1.0, -1.0, 0.63, 1., 0.5, 0.3]
y_true = [1.0, 2.0, 2.0, 4.0, 1.0, 4.0, 3.0, PADDED_Y_VALUE, PADDED_Y_VALUE, PADDED_Y_VALUE]
result = fun(y_pred, y_true, temperature=0.001)
expected = ndcg_wrap(y_pred, y_true)
assert math.isfinite(result)
assert (-1 * result == approx(expected))
def test_neuralNDCG_at_3():
for tc in test_cases:
neuralNDCG_at_3(partial(neuralNDCG_wrap, **tc))
def neuralNDCG_at_3(fun):
y_pred = [0.5, 0.2, 0.1, 0.4, 1.0, -1.0, 0.63]
y_true = [1.0, 2.0, 2.0, 4.0, 1.0, 4.0, 3.0]
ats = 3
result = fun(y_pred, y_true, k=ats)
expected = ndcg_wrap(y_pred, y_true, ats=[ats])
assert math.isfinite(result)
assert (-1 * result == approx(expected))
|
PyObjCTest/test_nsbitmapimagerep.py | Khan/pyobjc-framework-Cocoa | 132 | 12789323 | <reponame>Khan/pyobjc-framework-Cocoa
from PyObjCTools.TestSupport import *
import objc
import array
import sys
from objc import YES, NO
from AppKit import *
try:
unicode
except NameError:
unicode = str
try:
long
except NameError:
long = int
class TestNSBitmapImageRep(TestCase):
def testInstantiation(self):
# widthxheight RGB 24bpp image
width = 256
height = 256
dataPlanes = (None, None, None, None, None)
dataPlanes = None
i1 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, 0, 0)
self.assertTrue(i1)
i2 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(None, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, 0, 0)
self.assertTrue(i2)
def testPixelFormat(self):
width = 16
height = 16
i1 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_(None, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, NSAlphaFirstBitmapFormat, 0, 0)
self.assertIsInstance(i1, NSBitmapImageRep)
singlePlane = objc.allocateBuffer(width*height*4)
for i in range(0, width*height):
si = i * 4
singlePlane[si] = 1
singlePlane[si+1] = 2
singlePlane[si+2] = 3
singlePlane[si+3] = 4
dataPlanes = (singlePlane, None, None, None, None)
# test non-planar, premade buffer
i2 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, NSAlphaFirstBitmapFormat, 0, 0)
self.assertIsInstance(i2, NSBitmapImageRep)
bitmapData = i2.bitmapData()
self.assertEqual(len(bitmapData), width * height * 4)
def testImageData(self):
width = 256
height = 256
rPlane = array.array('B')
rPlane.fromlist( [y%256 for y in range(0,height) for x in range(0,width)] )
if sys.version_info[0] == 3:
buffer = memoryview
else:
from __builtin__ import buffer
rPlane = buffer(rPlane)
gPlane = array.array('B')
gPlane.fromlist( [y%256 for y in range(0,height) for x in range(width,0,-1)] )
gPlane = buffer(gPlane)
bPlane = array.array('B')
bPlane.fromlist( [x%256 for y in range(0,height) for x in range(0,width)] )
bPlane = buffer(bPlane)
dataPlanes = (rPlane, gPlane, bPlane, None, None)
# test planar, pre-made buffer
i1 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0)
self.assertTrue(i1)
singlePlane = objc.allocateBuffer(width*height*3)
for i in range(0, width*height):
si = i * 3
if sys.version_info[0] == 2:
singlePlane[si] = rPlane[i]
singlePlane[si+1] = gPlane[i]
singlePlane[si+2] = bPlane[i]
else:
def as_byte(v):
if isinstance(v, int):
return v
else:
return ord(v)
singlePlane[si] = as_byte(rPlane[i])
singlePlane[si+1] = as_byte(gPlane[i])
singlePlane[si+2] = as_byte(bPlane[i])
dataPlanes = (singlePlane, None, None, None, None)
# test non-planar, premade buffer
i2 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, 0, 0)
# test grey scale
greyPlane = array.array('B')
greyPlane.fromlist( [x%256 for x in range(0,height) for x in range(0,width)] )
greyPlanes = (greyPlane, None, None, None, None)
greyImage = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(greyPlanes, width, height, 8, 1, NO, YES, NSCalibratedWhiteColorSpace, width, 8)
# test planar, NSBIR allocated buffer
i3 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(None, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0)
r,g,b,a,o = i3.getBitmapDataPlanes_()
self.assertTrue(r)
self.assertTrue(g)
self.assertTrue(b)
self.assertTrue(not a)
self.assertTrue(not o)
self.assertEqual(len(r), len(rPlane))
self.assertEqual(len(g), len(gPlane))
self.assertEqual(len(b), len(bPlane))
r[0:len(r)] = rPlane[0:len(rPlane)]
g[0:len(g)] = gPlane[0:len(gPlane)]
b[0:len(b)] = bPlane[0:len(bPlane)]
bitmapData = i2.bitmapData()
self.assertEqual(len(bitmapData), len(singlePlane))
try:
memoryview
except NameError:
self.assertEqual(bitmapData, singlePlane)
else:
self.assertEqual(bitmapData.tobytes(),
singlePlane)
a = array.array('L', [255]*4)
self.assertArgIsOut(NSBitmapImageRep.getPixel_atX_y_, 0)
d = i2.getPixel_atX_y_(a, 1, 1)
self.assertIs(a, d)
class TestBadCreation(TestCase):
# Redirect stderr to /dev/null for the duration of this test,
# NSBitmapImageRep will write an error message to stderr.
def setUp(self):
import os
self.duppedStderr = os.dup(2)
fp = os.open('/dev/null', os.O_RDWR)
os.dup2(fp, 2)
os.close(fp)
def tearDown(self):
import os
os.dup2(self.duppedStderr, 2)
def test_AllocInit(self):
y = NSBitmapImageRep.alloc()
try:
self.assertRaises(ValueError, y.init)
finally:
width = 256
height = 256
dataPlanes = (None, None, None, None, None)
y = y.initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, 0, 0)
def testConstants(self):
self.assertEqual(NSTIFFCompressionNone, 1)
self.assertEqual(NSTIFFCompressionCCITTFAX3, 3)
self.assertEqual(NSTIFFCompressionCCITTFAX4, 4)
self.assertEqual(NSTIFFCompressionLZW, 5)
self.assertEqual(NSTIFFCompressionJPEG, 6)
self.assertEqual(NSTIFFCompressionNEXT, 32766)
self.assertEqual(NSTIFFCompressionPackBits, 32773)
self.assertEqual(NSTIFFCompressionOldJPEG, 32865)
self.assertEqual(NSTIFFFileType, 0)
self.assertEqual(NSBMPFileType, 1)
self.assertEqual(NSGIFFileType, 2)
self.assertEqual(NSJPEGFileType, 3)
self.assertEqual(NSPNGFileType, 4)
self.assertEqual(NSJPEG2000FileType, 5)
self.assertEqual(NSImageRepLoadStatusUnknownType, -1)
self.assertEqual(NSImageRepLoadStatusReadingHeader, -2)
self.assertEqual(NSImageRepLoadStatusWillNeedAllData, -3)
self.assertEqual(NSImageRepLoadStatusInvalidData, -4)
self.assertEqual(NSImageRepLoadStatusUnexpectedEOF, -5)
self.assertEqual(NSImageRepLoadStatusCompleted, -6)
self.assertEqual(NSAlphaFirstBitmapFormat, 1 << 0)
self.assertEqual(NSAlphaNonpremultipliedBitmapFormat, 1 << 1)
self.assertEqual(NSFloatingPointSamplesBitmapFormat, 1 << 2)
self.assertIsInstance(NSImageCompressionMethod, unicode)
self.assertIsInstance(NSImageCompressionFactor, unicode)
self.assertIsInstance(NSImageDitherTransparency, unicode)
self.assertIsInstance(NSImageRGBColorTable, unicode)
self.assertIsInstance(NSImageInterlaced, unicode)
self.assertIsInstance(NSImageColorSyncProfileData, unicode)
self.assertIsInstance(NSImageFrameCount, unicode)
self.assertIsInstance(NSImageCurrentFrame, unicode)
self.assertIsInstance(NSImageCurrentFrameDuration, unicode)
self.assertIsInstance(NSImageLoopCount, unicode)
self.assertIsInstance(NSImageGamma, unicode)
self.assertIsInstance(NSImageProgressive, unicode)
self.assertIsInstance(NSImageEXIFData, unicode)
self.assertIsInstance(NSImageFallbackBackgroundColor, unicode)
def testTiffCompression(self):
lst, nr = NSBitmapImageRep.getTIFFCompressionTypes_count_(None, None)
self.assertIsInstance(lst, tuple)
self.assertIsInstance(nr, (int, long))
self.assertEqual(len(lst), nr)
self.assertNotEqual(len(lst), 0)
self.assertIsInstance(lst[0], (int, long))
def testMethods(self):
self.assertResultIsBOOL(NSBitmapImageRep.isPlanar)
self.assertResultIsBOOL(NSBitmapImageRep.canBeCompressedUsing_)
self.assertArgIsBOOL(NSBitmapImageRep.incrementalLoadFromData_complete_, 1)
self.assertArgIsOut(NSBitmapImageRep.getCompression_factor_, 0)
self.assertArgIsOut(NSBitmapImageRep.getCompression_factor_, 1)
if __name__ == '__main__':
main( )
|
apps/async_task/utils.py | goztrk/django-htk | 206 | 12789356 | <reponame>goztrk/django-htk
# Python Standard Library Imports
import base64
import json
def build_async_task_result(content, content_type, filename):
"""Builds an Async Task result from JSON
This is necessary if we want to return multiple values, as the result by default is just a plain string.
"""
payload = {
'content' : base64.b64encode(content),
'content_type' : content_type,
'filename' : filename,
}
result = json.dumps(payload)
return result
def extract_async_task_result_json_values(result_data):
"""Companion function to perform the inverse of `build_async_task_result()`
"""
payload = json.loads(result_data)
content = base64.b64decode(payload['content'])
content_type = payload['content_type']
filename = payload['filename']
return (content, content_type, filename,)
|
foliant/preprocessors/base.py | foliant-docs/foliant | 105 | 12789371 | import re
from logging import Logger
from typing import Dict
import yaml
OptionValue = int or float or bool or str
class BasePreprocessor():
'''Base preprocessor. All preprocessors must inherit from this one.'''
# pylint: disable=too-many-instance-attributes
defaults = {}
tags = ()
@staticmethod
def get_options(options_string: str) -> Dict[str, OptionValue]:
'''Get a dictionary of typed options from a string with XML attributes.
:param options_string: String of XML attributes
:returns: Dictionary with options
'''
if not options_string:
return {}
option_pattern = re.compile(
r'(?P<key>[A-Za-z_:][0-9A-Za-z_:\-\.]*)=(\'|")(?P<value>.+?)\2',
flags=re.DOTALL
)
return {
option.group('key'): yaml.load(option.group('value'), yaml.Loader)
for option in option_pattern.finditer(options_string)
}
def __init__(self, context: dict, logger: Logger, quiet=False, debug=False, options={}):
# pylint: disable=dangerous-default-value
# pylint: disable=too-many-arguments
self.project_path = context['project_path']
self.config = context['config']
self.context = context
self.logger = logger
self.quiet = quiet
self.debug = debug
self.options = {**self.defaults, **options}
self.working_dir = self.project_path / self.config['tmp_dir']
if self.tags:
self.pattern = re.compile(
rf'(?<!\<)\<(?P<tag>{"|".join(self.tags)})' +
r'(\s(?P<options>[^\<\>]*))?\>' +
r'(?P<body>.*?)\<\/(?P=tag)\>',
flags=re.DOTALL
)
def apply(self):
'''Run the preprocessor against the project directory. Must be implemented
by every preprocessor.
'''
raise NotImplementedError
|
tests/common/test_op/ascend/fake_quant_with_min_max_args.py | tianjiashuo/akg | 286 | 12789408 | <reponame>tianjiashuo/akg<gh_stars>100-1000
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: fake_quant_with_min_max_args"""
import akg
from akg import tvm, topi
from akg.utils.format_transform import get_shape
import akg.utils as utils
from akg.ops.math.ascend import Floor
def nudge_min_max(min, max, num_bits, narrow_range):
"""
Calculate the maximum and minimum values of the quantization
Args:
min: scalar, input min
max: input max
num_bits: scalar
Defaults to 8. num_bits is the bitwidth of the quantization, range [2,16]
narrow_range: bool
Returns:
nudged_min, nudged_max, scale
"""
quant_max = (2**num_bits) - 1
if narrow_range is False:
quant_min = 0.00
else:
quant_min = 1.00
scale = (max - min) / (float(quant_max) - quant_min)
zero_point_from_min = quant_min - min / scale
# Calculate the maximum and minimum values of the quantization
if zero_point_from_min < quant_min:
nudged_zero_point = quant_min
elif zero_point_from_min > quant_max:
nudged_zero_point = quant_max
else:
nudged_zero_point = (zero_point_from_min + 0.5) // 1
nudged_min = (quant_min - nudged_zero_point) * scale
nudged_max = (quant_max - nudged_zero_point) * scale
return nudged_min, nudged_max, scale
@utils.check_input_type(tvm.tensor.Tensor,
(float, int, type(None)),
(float, int, type(None)),
(int, type(None)),
(bool, type(None)))
def fake_quant_with_min_max_args(input_data, min=-6, max=6, num_bits=8,
narrow_range=False):
"""
Computes Fake-quantize the 'input_data' tensor,
type float32 to 'output_data' tensor of same type
output_data = (floor(clamped_shifted * inv_nudged_scale + 0.5f))) * scale
+ nudged_min
scale = (max-min) / (quant_max-quant_min)
Args:
data_x1 (tvm.tensor.Tensor): Tensor of dtype "float32"
min ([float, int]): scalar, defaults to -6
max ([float, int]): scalar, defaults to 6. [min; max] define the
clamping range for the input_data data
num_bits ([float, int]): Defaults to 8. num_bits is the bitwidth
of the quantization,between 2 and 16
narrow_range ([bool]):
True, quantized into the quantization range [1; 2^num_bits - 1]
False,quantized into the quantization range [0; 2^num_bits - 1]
Returns:
tvm.tensor.Tensor
"""
shape = get_shape(input_data)
utils.check_shape(shape)
dtype = input_data.dtype
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.FLOAT32)
nudged_min, nudged_max, scale = nudge_min_max(min, max, num_bits,
narrow_range)
zero_tensor = tvm.compute(input_data.shape,
lambda *i: tvm.const(0, dtype="float32"),
name="zero_tensor")
nudged_max_tensor = topi.add(zero_tensor, nudged_max)
nudged_min_tensor = topi.add(zero_tensor, nudged_min)
inv_nudged_scale = 1.00 / scale
# Transform the input between nudged_max and nudged_min
clamped_vmin = topi.minimum(input_data, nudged_max_tensor)
clamped = topi.maximum(clamped_vmin, nudged_min_tensor)
# Calculate the quantized and dequantized results
clamped_shifted = topi.subtract(clamped, nudged_min_tensor)
vmul_shifted = topi.multiply(clamped_shifted, inv_nudged_scale)
vadds_shifted = topi.add(vmul_shifted, 0.5)
floor_vadds_shifted = Floor(vadds_shifted)
floor_cast = akg.lang.ascend.cast_to(floor_vadds_shifted, dtype)
res_scale = topi.multiply(floor_cast, scale)
res = topi.add(res_scale, nudged_min_tensor)
return res
|
analytics/object/language.py | dpatel257/Smart-City-Sample | 126 | 12789412 | #!/usr/bin/python3
text={
"object-detection": "object-detection",
"svcq-counting": "svcq-counting",
}
|
rollbar/contrib/django/tests.py | arthurio/pyrollbar | 177 | 12789424 | """
Unit tests
"""
from django.test import TestCase
from django.conf import settings
class BasicTests(TestCase):
def test_configuration(self):
"""
Test that the configuration is sane.
"""
self.assertTrue('ROLLBAR' in dir(settings),
msg='The ROLLBAR setting is not present.')
self.assertTrue(settings.ROLLBAR.get('access_token'),
msg='The ROLLBAR["access_token"] setting is blank.')
|
integration_test/test_heartbeat_checker.py | lynix94/nbase-arc | 176 | 12789486 | <gh_stars>100-1000
#
# Copyright 2015 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import testbase
import util
import time
import gateway_mgmt
import redis_mgmt
import smr_mgmt
import default_cluster
import config
import load_generator
import telnet
import json
import constant as c
class TestHeartbeatChecker( unittest.TestCase ):
cluster = config.clusters[0]
leader_cm = config.clusters[0]['servers'][0]
max_load_generator = 1
load_gen_thrd_list = {}
key_base = 'key_thbc'
@classmethod
def setUpClass( cls ):
return 0
@classmethod
def tearDownClass( cls ):
return 0
def setUp( self ):
util.set_process_logfile_prefix( 'TestHeartbeatChecker_%s' % self._testMethodName )
self.conf_checker = default_cluster.initialize_starting_up_smr_before_redis( self.cluster )
self.assertIsNotNone(self.conf_checker, 'failed to initialize cluster')
def tearDown( self ):
testbase.defaultTearDown(self)
def getseq_log(self, s):
smr = smr_mgmt.SMR( s['id'] )
try:
ret = smr.connect( s['ip'], s['smr_mgmt_port'] )
if ret != 0:
return
smr.write( 'getseq log\r\n' )
response = smr.read_until( '\r\n', 1 )
util.log('getseq log (pgs%d) = %s' % (s['id'], response[:-2]))
smr.disconnect()
except IOError:
pass
def get_expected_smr_state( self, server, expected, max_try=60 ):
for i in range( 0, max_try ):
state = util.get_smr_state( server, self.leader_cm )
if state == expected:
break;
time.sleep( 1 )
return state
def state_transition( self ):
server = util.get_server_by_role( self.cluster['servers'], 'slave' )
self.assertNotEquals( server, None, 'failed to get_server_by_role-slave' )
# get gateway info
ip, port = util.get_rand_gateway( self.cluster )
gw = gateway_mgmt.Gateway( self.cluster['servers'][0]['id'] )
# check initial state
state = self.get_expected_smr_state( server, 'N' )
role = util.get_role_of_server( server )
self.assertEquals( 'N', state,
'server%d - state:%s, role:%s, expected:N' % (server['id'], state, role) )
# shutdown
ret = testbase.request_to_shutdown_smr( server )
self.assertEquals( ret, 0, 'failed to shutdown smr' )
ret = testbase.request_to_shutdown_redis( server )
self.assertEquals( ret, 0, 'failed to shutdown redis' )
time.sleep( 3 )
# check state F
expected = 'F'
state = self.get_expected_smr_state( server, expected )
self.assertEquals( expected , state,
'server%d - state:%s, but expected:%s' % (server['id'], state, expected) )
# set value
ret = gw.connect( ip, port )
self.assertEquals( ret, 0, 'failed to connect to gateway, %s:%d' % (ip, port) )
timestamp = 0.0
for i in range( 0, 100 ):
timestamp = time.time()
key = 'new_key_haha'
cmd = 'set %s %f\r\n' % (key, timestamp)
gw.write( cmd )
res = gw.read_until( '\r\n' )
self.assertEquals( res, '+OK\r\n' )
gw.disconnect()
# recovery
ret = testbase.request_to_start_smr( server )
self.assertEquals( ret, 0, 'failed to start smr' )
ret = testbase.request_to_start_redis( server )
self.assertEquals( ret, 0, 'failed to start redis' )
ret = testbase.wait_until_finished_to_set_up_role( server, 10 )
self.assertEquals( ret, 0, 'failed to role change. smr_id:%d' % (server['id']) )
time.sleep( 5 )
redis = redis_mgmt.Redis( server['id'] )
ret = redis.connect( server['ip'], server['redis_port'] )
self.assertEquals( ret, 0, 'failed to connect to redis' )
# check state N
expected = 'N'
max_try = 20
for i in range( 0, max_try ):
state = self.get_expected_smr_state( server, expected )
if state == expected:
break
time.sleep( 1 )
role = util.get_role_of_server( server )
self.assertEquals( expected , state,
'server%d - state:%s, role:%s, but expected:%s' % (server['id'], state, role, expected) )
def test_1_state_transition( self ):
util.print_frame()
self.state_transition()
def get_mss( self ):
# get master, slave1, and slave2
master = util.get_server_by_role( self.cluster['servers'], 'master' )
self.assertNotEquals( master, None, 'failed to get master' )
slave1 = util.get_server_by_role( self.cluster['servers'], 'slave' )
self.assertNotEquals( slave1, None, 'failed to get slave1' )
slave2 = None
for server in self.cluster['servers']:
id = server['id']
if id != master['id'] and id != slave1['id']:
slave2 = server
break
self.assertNotEquals( slave2, None, 'failed to get slave2' )
return master, slave1, slave2
def test_2_consistent_after_failover( self ):
util.print_frame()
for i in range(3):
util.log('loop %d' % i)
self.consistent_after_failover()
def consistent_after_failover( self ):
max = 10000
wait_count = 15
key = 'caf'
# get master, slave1, and slave2
master, slave1, slave2 = self.get_mss()
# set value
ip, port = util.get_rand_gateway( self.cluster )
gw = gateway_mgmt.Gateway( ip )
gw.connect( ip, port )
for i in range( 0, max ):
cmd = 'set %s%d %d\r\n' % (key, i, i)
gw.write( cmd )
res = gw.read_until( '\r\n' )
self.assertEquals( res, '+OK\r\n' )
time.sleep( 5 )
# shutdown
servers = [master, slave1, slave2]
for server in servers:
util.log('before shutdown pgs%d' % server['id'])
for s in servers:
self.getseq_log(s)
ret = testbase.request_to_shutdown_smr( server )
self.assertEqual( ret, 0, 'failed to shutdown smr, server:%d' % server['id'] )
ret = testbase.request_to_shutdown_redis( server )
self.assertEquals( ret, 0, 'failed to shutdown redis' )
time.sleep( 5 )
# check state F
for server in servers:
state = self.get_expected_smr_state( server, 'F' )
self.assertEquals( 'F', state,
'server%d - state:%s' % (server['id'], state) )
# recovery
for server in servers:
ret = testbase.request_to_start_smr( server )
self.assertEqual( ret, 0, 'failed to start smr, server:%d' % server['id'] )
ret = testbase.request_to_start_redis( server, False )
self.assertEqual( ret, 0, 'failed to start redis, server:%d' % server['id'] )
util.log('after restart pgs%d' % server['id'])
for s in servers:
self.getseq_log(s)
time.sleep( 5 )
# wait for master election
for i in xrange(10):
ret = util.check_cluster( self.cluster['cluster_name'], self.leader_cm['ip'], self.leader_cm['cm_port'] )
if ret:
break
time.sleep(1)
# check state
for server in servers:
ret = testbase.wait_until_finished_to_set_up_role( server, wait_count )
self.assertEquals( ret, 0, 'failed to role change. server:%d' % (server['id']) )
state = self.get_expected_smr_state( server, 'N' )
role = util.get_role_of_server( server )
self.assertEquals( 'N', state,
'server%d - state:%s, role:%s' % (server['id'], state, role) )
the_number_of_master = 0
the_number_of_slave = 0
for server in servers:
role = util.get_role_of_server( server )
if role == c.ROLE_MASTER:
the_number_of_master = the_number_of_master + 1
elif role == c.ROLE_SLAVE:
the_number_of_slave = the_number_of_slave + 1
self.assertTrue( 1 == the_number_of_master and 2 == the_number_of_slave,
'failed to set roles, the number of master:%d, the number of slave:%d' %
(the_number_of_master, the_number_of_slave) )
# get master, slave1, and slave2
master, slave1, slave2 = self.get_mss()
# connect to a master`s redis and set data
redis = redis_mgmt.Redis( master['id'] )
ret = redis.connect( master['ip'], master['redis_port'] )
self.assertEquals( ret, 0, 'failed to connect to redis, server:%d' % master['id'] )
for i in range( max, max*2 ):
cmd = 'set %s%d %d\r\n' % (key, i, i)
redis.write( cmd )
res = redis.read_until( '\r\n' )
self.assertEquals( res, '+OK\r\n',
'failed to get response, server:%d' % master['id'] )
redis.disconnect()
# check slaves`s data
slaves = [slave1, slave2]
for slave in slaves:
slave_redis = redis_mgmt.Redis( slave['id'] )
ret = slave_redis .connect( slave['ip'], slave['redis_port'] )
self.assertEquals( ret, 0, 'failed to connect to redis, server:%d' % slave['id'] )
for i in range( 0, max*2 ):
cmd = 'get %s%d\r\n' % (key, i)
slave_redis.write( cmd )
trash = slave_redis.read_until( '\r\n' )
res = slave_redis.read_until( '\r\n' )
self.assertEquals( res, '%d\r\n' % i,
'inconsistent, server:%d, expected %d but %s' % (slave['id'], i, res) )
slave_redis.disconnect()
def test_3_heartbeat_target_connection_count( self ):
util.print_frame()
util.log( 'wait until all connections are established' )
for i in range(1, 8):
time.sleep(1)
util.log( '%d sec' % i )
# check pgs
for server in self.cluster['servers']:
before_cnt_redis = util.get_clients_count_of_redis(server['ip'], server['redis_port'])
before_cnt_smr = util.get_clients_count_of_smr(server['smr_mgmt_port'])
cmd = 'pgs_leave %s %d forced' % (self.cluster['cluster_name'], server['id'])
ret = util.cm_command(self.leader_cm['ip'], self.leader_cm['cm_port'], cmd)
jobj = json.loads(ret)
self.assertEqual( jobj['state'], 'success', 'failed : cmd="%s", reply="%s"' % (cmd, ret[:-2]) )
util.log( 'succeeded : cmd="%s", reply="%s"' % (cmd, ret[:-2]) )
# check redis
success = False
for i in range(5):
after_cnt = util.get_clients_count_of_redis(server['ip'], server['redis_port'])
if after_cnt <= 2:
success = True
break
time.sleep(1)
self.assertEquals( success, True, 'failed : the number of connections to redis%d(%s:%d) is %d, exptected:n<=2, before=%d' % (server['id'], server['ip'], server['redis_port'], after_cnt, before_cnt_redis) )
util.log( 'succeeded : the number of connections to redis%d(%s:%d) is %d, exptected=n<=2, before=%d' % (server['id'], server['ip'], server['redis_port'], after_cnt, before_cnt_redis) )
# check smr
success = False
expected = 1
for i in range(5):
after_cnt = util.get_clients_count_of_smr(server['smr_mgmt_port'])
if after_cnt == expected:
success = True
break
time.sleep(1)
self.assertEquals( success, True, 'failed : the number of connections to smr%d(%s:%d) is %d, exptected=%d, before=%d' % (server['id'], server['ip'], server['smr_mgmt_port'], after_cnt, expected, before_cnt_smr) )
util.log( 'succeeded : the number of connections to smr%d(%s:%d) is %d, exptected=%d, before=%d' % (server['id'], server['ip'], server['smr_mgmt_port'], after_cnt, expected, before_cnt_smr) )
# Go back to initial configuration
self.assertTrue(util.pgs_join(self.leader_cm['ip'], self.leader_cm['cm_port'], server['cluster_name'], server['id']),
'failed to join pgs %d' % server['id'])
# check gateway
for server in self.cluster['servers']:
before_cnt = util.get_clients_count_of_gw(server['ip'], server['gateway_port'])
cmd = 'gw_del %s %d' % (self.cluster['cluster_name'], server['id'])
ret = util.cm_command(self.leader_cm['ip'], self.leader_cm['cm_port'], cmd)
jobj = json.loads(ret)
self.assertEqual( jobj['state'], 'success', 'failed : cmd="%s", reply="%s"' % (cmd, ret[:-2]) )
util.log( 'succeeded : cmd="%s", reply="%s"' % (cmd, ret[:-2]) )
success = False
expected = 1
for i in range(5):
after_cnt = util.get_clients_count_of_gw(server['ip'], server['gateway_port'])
if after_cnt == expected:
success = True
break
time.sleep(1)
self.assertEquals( success, True, 'failed : the number of connections to gateway%d(%s:%d) is %d, exptected=%d.' % (server['id'], server['ip'], server['gateway_port'], after_cnt, expected) )
util.log( 'succeeded : the number of connections to gateway%d(%s:%d) is %d, exptected=%d.' % (server['id'], server['ip'], server['gateway_port'], after_cnt, expected) )
# Go back to initial configuration
self.assertTrue(util.gw_add(server['cluster_name'], server['id'], server['pm_name'], server['ip'], server['gateway_port'], self.leader_cm['ip'], self.leader_cm['cm_port']),
'failed to add gw %d' % server['id'])
def test_4_elect_master_randomly( self ):
util.print_frame()
for i in range(1):
self.elect_master_randomly()
def elect_master_randomly( self ):
# set data
ip, port = util.get_rand_gateway(self.cluster)
gw = gateway_mgmt.Gateway( '0' )
gw.connect( ip, port )
for i in range( 0, 1000 ):
cmd = 'set %s%d %d\r\n' % (self.key_base, i, i)
gw.write( cmd )
res = gw.read_until( '\r\n' )
self.assertEqual( res, '+OK\r\n', 'failed to set values to gw(%s:%d). cmd:%s, res:%s' % (ip, port, cmd[:-2], res[:-2]) )
server_ids = []
for server in self.cluster['servers']:
server_ids.append( server['id'] )
for try_cnt in range( 30 ):
# get master, slave1, slave2
m, s1, s2 = util.get_mss( self.cluster )
self.assertNotEqual( m, None, 'master is None.' )
self.assertNotEqual( s1, None, 'slave1 is None.' )
self.assertNotEqual( s2, None, 'slave2 is None.' )
util.log( 'master id : %d' % m['id'] )
if try_cnt != 0:
if m['id'] in server_ids:
server_ids.remove( m['id'] )
smr = smr_mgmt.SMR( m['id'] )
ret = smr.connect( m['ip'], m['smr_mgmt_port'] )
self.assertEqual( ret, 0, 'failed to connect to master. %s:%d' % (m['ip'], m['smr_mgmt_port']) )
cmd = 'role lconn\r\n'
smr.write( cmd )
reply = smr.read_until( '\r\n' )
self.assertEqual( reply, '+OK\r\n', 'failed : cmd="%s", reply="%s"' % (cmd[:-2], reply[:-2]) )
util.log( 'succeeded : cmd="%s", reply="%s"' % (cmd[:-2], reply[:-2]) )
# wait until role-change is finished
for role_change_try_cnt in range( 5 ):
count_master = 0
count_slave = 0
for server in self.cluster['servers']:
real_role = util.get_role_of_server( server )
real_role = util.roleNumberToChar( real_role )
if real_role == 'M':
count_master = count_master + 1
elif real_role == 'S':
count_slave = count_slave + 1
if count_master == 1 and count_slave == 2:
break;
time.sleep( 1 )
# check the number of master and slave
self.assertEqual( count_master, 1, 'failed : the number of master is not 1, count_master=%d, count_slave=%d' % (count_master, count_slave) )
self.assertEqual( count_slave, 2, 'failed : the number of slave is not 2, count_master=%d, count_slave=%d' % (count_master, count_slave) )
util.log( 'succeeded : the number of master is 1 and the number of slave is 2' )
# check states of all pgs in pg
for try_cnt in range( 3 ):
ok = True
for s in self.cluster['servers']:
real_role = util.get_role_of_server( s )
real_role = util.roleNumberToChar( real_role )
smr_info = util.get_smr_info( s, self.leader_cm )
cc_role = smr_info['smr_Role']
cc_hb = smr_info['hb']
if cc_hb != 'Y':
ok = False
if real_role != cc_role:
ok = False
if ok:
util.log( 'succeeded : a role of real pgs is the same with a role in cc, id=%d, real=%s, cc=%s, hb=%s' % (s['id'], real_role, cc_role, cc_hb) )
else:
util.log( '\n\n**********************************************************\n\nretry: a role of real pgs is not the same with a role in cc, id=%d, real=%s, cc=%s, hb=%s' % (s['id'], real_role, cc_role, cc_hb) )
if ok == False:
time.sleep( 0.5 )
else:
break
self.assertTrue( ok, 'failed : role check' )
if len( server_ids ) == 0:
util.log( 'succeeded : all smrs have been as a master' )
return 0
self.assertEqual( 0, len( server_ids ) , 'failed : remains server ids=[%s]' % (','.join('%d' % id for id in server_ids)) )
return 0
def test_5_from_n_to_1_heartbeat_checkers( self ):
util.print_frame()
for i in range( 0, len( self.cluster['servers'] ) - 1 ):
util.log( 'loop %d' % i )
server = self.cluster['servers'][i]
self.assertEquals( 0, testbase.request_to_shutdown_cm( server ),
'failed to request_to_shutdown_cm, server:%d' % server['id'] )
time.sleep( 20 )
self.leader_cm = self.cluster['servers'][i+1]
self.match_cluster_info(self.leader_cm['ip'], self.leader_cm['cm_port'], self.cluster)
self.state_transition()
# Go back to initial configuration
self.assertTrue(util.recover_confmaster(self.cluster, [0,1], 0),
'failed to recover confmaster.')
def test_6_from_3_to_6_heartbeat_checkers( self ):
util.print_frame()
hbc_svr_list = []
i = 5000 + len( self.cluster['servers'] )
for server in self.cluster['servers']:
i = i + 1
hbc_svr = {}
hbc_svr['id'] = i
hbc_svr['ip'] = server['ip']
hbc_svr['zk_port'] = server['zk_port']
hbc_svr_list.append(hbc_svr)
ret = testbase.setup_cm( i )
self.assertEquals( 0, ret, 'failed to copy heartbeat checker, server:%d' % hbc_svr['id'] )
ret = testbase.request_to_start_cm( i, i )
self.assertEquals( 0, ret,
'failed to request_to_start_cm, server:%d' % hbc_svr['id'] )
self.state_transition()
# Go back to initial configuration
for hbc_svr in hbc_svr_list:
self.assertEqual(0, testbase.request_to_shutdown_cm(hbc_svr),
'failed to shutdown confmaster')
def test_7_remaining_hbc_connection( self ):
util.print_frame()
# check pgs
for server in self.cluster['servers']:
before_cnt_redis = util.get_clients_count_of_redis(server['ip'], server['redis_port'])
before_cnt_smr = util.get_clients_count_of_smr(server['smr_mgmt_port'])
cmd = 'pgs_leave %s %d forced\r\npgs_del %s %d' % (self.cluster['cluster_name'], server['id'], self.cluster['cluster_name'], server['id'])
util.cm_command(self.leader_cm['ip'], self.leader_cm['cm_port'], cmd)
for server in self.cluster['servers']:
# check redis
success = False
for i in range(5):
after_cnt = util.get_clients_count_of_redis(server['ip'], server['redis_port'])
if after_cnt <= 2:
success = True
break
time.sleep(1)
self.assertEquals( success, True, 'failed : the number of connections to redis%d(%s:%d) is %d, exptected=n<=2, before=%d' % (server['id'], server['ip'], server['redis_port'], after_cnt, before_cnt_redis) )
util.log( 'succeeded : the number of connections to redis%d(%s:%d) is %d, exptected=n<=2, before=%d' % (server['id'], server['ip'], server['redis_port'], after_cnt, before_cnt_redis) )
# check smr
success = False
expected = 0
for i in range(5):
after_cnt = util.get_clients_count_of_smr(server['smr_mgmt_port'])
if after_cnt == expected:
success = True
break
time.sleep(1)
self.assertEquals( success, True, 'failed : the number of connections to smr%d(%s:%d) is %d, exptected=%d, before=%d' % (server['id'], server['ip'], server['smr_mgmt_port'], after_cnt, expected, before_cnt_smr) )
util.log( 'succeeded : the number of connections to smr%d(%s:%d) is %d, exptected=%d, before=%d' % (server['id'], server['ip'], server['smr_mgmt_port'], after_cnt, expected, before_cnt_smr) )
# check gateway
for server in self.cluster['servers']:
before_cnt = util.get_clients_count_of_gw(server['ip'], server['gateway_port'])
cmd = 'gw_del %s %d' % (self.cluster['cluster_name'], server['id'])
util.cm_command(self.leader_cm['ip'], self.leader_cm['cm_port'], cmd)
for server in self.cluster['servers']:
success = False
expected = 1
for i in range(5):
after_cnt = util.get_clients_count_of_gw(server['ip'], server['gateway_port'])
if after_cnt == expected:
success = True
break
time.sleep(1)
self.assertEquals( success, True, 'failed : the number of connections to gateway%d(%s:%d) is %d, exptected=%d.' % (server['id'], server['ip'], server['gateway_port'], after_cnt, expected) )
util.log( 'succeeded : the number of connections to gateway%d(%s:%d) is %d, exptected=%d.' % (server['id'], server['ip'], server['gateway_port'], after_cnt, expected) )
# Go back to initial configuration
# Cleanup PG
self.assertTrue(util.cm_success(util.cm_command(
self.leader_cm['ip'], self.leader_cm['cm_port'],
'pg_del %s %d' % (self.cluster['cluster_name'], self.cluster['servers'][0]['pg_id'])))[0])
# Cleanup processes of PGS and GW
for s in self.cluster['servers']:
self.assertEqual(0, util.shutdown_redis(s['id'], s['redis_port']),
'failed to kill redis %d process' % s['id'])
self.assertEqual(0, util.shutdown_smr(s['id'], s['ip'], s['smr_base_port']),
'failed to kill smr %d process' % s['id'])
self.assertEqual(0, util.shutdown_gateway(s['id'], s['gateway_port']),
'failed to kill gw %d process' % s['id'])
# Recover PG
self.assertTrue(
util.install_pg(self.cluster, self.cluster['servers'], self.cluster['servers'][0], start_gw=True),
'failed to recover PGS and GW in a PM')
def match_cluster_info(self, cm_ip, cm_port, cluster):
# Cluster
cluster_info = util.cluster_info(cm_ip, cm_port, cluster['cluster_name'])['cluster_info']
self.assertEquals(cluster_info['PN_PG_Map'], '0 8192')
self.assertEquals(cluster_info['Key_Space_Size'], 8192)
# PG
for pg_id in cluster['pg_id_list']:
pg = util.pg_info(cm_ip, cm_port, cluster['cluster_name'], pg_id)
self.assertIsNotNone(pg)
for s in self.cluster['servers']:
# GW
gw_info = util.get_gw_info(cm_ip, cm_port, cluster['cluster_name'], s['id'])
self.assertEquals(gw_info['port'], s['gateway_port'])
self.assertEquals(gw_info['state'], 'N')
self.assertEquals(gw_info['hb'], 'Y')
self.assertEquals(gw_info['pm_Name'], s['pm_name'])
self.assertEquals(gw_info['pm_IP'], s['ip'])
# PGS
pgs_info = util.get_pgs_info(cm_ip, cm_port, cluster['cluster_name'], s['id'])
self.assertEquals(pgs_info['pg_ID'], s['pg_id'])
self.assertEquals(pgs_info['pm_Name'], s['pm_name'])
self.assertEquals(pgs_info['pm_IP'], s['ip'])
self.assertEquals(pgs_info['backend_Port_Of_Redis'], s['redis_port'])
self.assertEquals(pgs_info['replicator_Port_Of_SMR'], s['smr_base_port'])
self.assertEquals(pgs_info['management_Port_Of_SMR'], s['smr_mgmt_port'])
self.assertEquals(pgs_info['state'], 'N')
self.assertEquals(pgs_info['hb'], 'Y')
self.assertEquals(pgs_info['color'], 'GREEN')
self.assertTrue(pgs_info['smr_Role'] == 'M' or pgs_info['smr_Role'] == 'S')
self.assertEquals(pgs_info['old_master_version'], '201')
|
pyxtal/miscellaneous/bugs/bug.py | ubikpt/PyXtal | 127 | 12789507 | from pyxtal import pyxtal
from ase.io import read
from ase.spacegroup.symmetrize import prep_symmetry
from spglib import get_symmetry_dataset
#ans1 = get_symmetry_dataset(s, symprec=1e-2)
#print(ans1)
s = pyxtal()
s.from_seed('bug.vasp', tol=1e-2)
print(s)
#s1=s.subgroup(eps=0.1, group_type='t+k', max_cell=4)
#for a in s1:
# print(a)
#s1=s.subgroup(eps=0.1, group_type='k', max_cell=4)
#for a in s1:
# print(a)
#permutation = {"C":"Si", "Si":"C"}
#for i in range(100):
# struc = s.subgroup_once(0.01, None, permutation, max_cell=1)
# print(struc.group.number, struc.formula)
for i in range(100):
struc = s.subgroup_once(0.2, None, None, 't+k', max_cell=2)
print(struc.group.number, struc.formula)
#for i in range(1000):
# struc = s.subgroup_with_substitution(permutation, once=True, max_cell=4)
# print(struc)
|
server/zmq_server_pirate.py | merlinran/acorn-precision-farming-rover | 143 | 12789533 | <gh_stars>100-1000
"""
*********************************************************************
This file is part of:
The Acorn Project
https://wwww.twistedfields.com/research
*********************************************************************
Copyright (c) 2019-2021 <NAME>, Twisted Fields LLC
Copyright (c) 2021 The Acorn Project contributors (cf. AUTHORS.md).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*********************************************************************
"""
# Modified from example file
# Paranoid Pirate Worker by <NAME> <dln(at)eintr(dot)org>
from random import randint
import time
import zmq
import redis
import zmq_server
# keep the two imported to keep pickle working
# TODO: avoid this by moving the class defs to a separate module.
from master_process import Robot, RobotCommand
REDIS_PORT = 6379
HEARTBEAT_LIVENESS = 3
HEARTBEAT_INTERVAL = 1
INTERVAL_INIT = 1
INTERVAL_MAX = 32
# Paranoid Pirate Protocol constants
PPP_READY = b"\x01" # Signals worker is ready
PPP_HEARTBEAT = b"\x02" # Signals worker heartbeat
def worker_socket(context, poller):
"""Helper function that returns a new configured socket
connected to the Paranoid Pirate queue"""
worker = context.socket(zmq.DEALER) # DEALER
identity = b"%04X-%04X" % (randint(0, 0x10000), randint(0, 0x10000))
worker.setsockopt(zmq.IDENTITY, identity)
poller.register(worker, zmq.POLLIN)
worker.connect("tcp://localhost:5569")
worker.send(PPP_READY)
return worker
def main():
r = redis.Redis(host='localhost', port=REDIS_PORT)
context = zmq.Context(1)
poller = zmq.Poller()
liveness = HEARTBEAT_LIVENESS
interval = INTERVAL_INIT
heartbeat_at = time.time() + HEARTBEAT_INTERVAL
worker = worker_socket(context, poller)
cycles = 0
while True:
socks = dict(poller.poll(HEARTBEAT_INTERVAL * 1000))
# Handle worker activity on backend
if socks.get(worker) == zmq.POLLIN:
# Get message
# - 3-part envelope + content -> request
# - 1-part HEARTBEAT -> heartbeat
frames = worker.recv_multipart()
if not frames:
break # Interrupted
if len(frames) >= 5:
cycles += 1
print("I: Normal reply")
# print(len(frames))
# print(frames)
ident, zero_frame, idx, command, key, msg = frames
return_command, reply = zmq_server.handle_command(r, command, key, msg)
worker.send_multipart([ident, zero_frame, idx, return_command, reply])
# worker.send_multipart(frames)
liveness = HEARTBEAT_LIVENESS
elif len(frames) == 1 and frames[0] == PPP_HEARTBEAT:
print("I: Queue heartbeat")
liveness = HEARTBEAT_LIVENESS
else:
print("E: Invalid message: %s" % frames)
interval = INTERVAL_INIT
else:
liveness -= 1
if liveness == 0:
print("W: Heartbeat failure, can't reach queue")
print("W: Reconnecting in %0.2fs..." % interval)
time.sleep(interval)
if interval < INTERVAL_MAX:
interval *= 2
poller.unregister(worker)
worker.setsockopt(zmq.LINGER, 0)
worker.close()
worker = worker_socket(context, poller)
liveness = HEARTBEAT_LIVENESS
if time.time() > heartbeat_at:
heartbeat_at = time.time() + HEARTBEAT_INTERVAL
print("I: Worker heartbeat")
worker.send(PPP_HEARTBEAT)
if __name__ == "__main__":
main()
|
fastmri_recon/evaluate/scripts/dealiasing_eval.py | samiulshuvo/fastmri-reproducible-benchmark | 105 | 12789560 | <gh_stars>100-1000
import os
from tqdm import tqdm
from fastmri_recon.config import *
from fastmri_recon.data.datasets.fastmri_pyfunc import train_masked_kspace_dataset_from_indexable as singlecoil_dataset
from fastmri_recon.evaluate.metrics.np_metrics import METRIC_FUNCS, Metrics
from fastmri_recon.models.subclassed_models.denoisers.proposed_params import build_model_from_specs
from fastmri_recon.models.subclassed_models.multiscale_complex import MultiscaleComplex
def evaluate_xpdnet_dealiasing(
model_fun,
model_kwargs,
run_id,
n_scales=0,
n_epochs=200,
contrast='CORPD_FBK',
af=4,
n_samples=None,
cuda_visible_devices='0123',
):
val_path = f'{FASTMRI_DATA_DIR}singlecoil_val/'
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(cuda_visible_devices)
val_set = singlecoil_dataset(
val_path,
AF=af,
contrast=contrast,
inner_slices=None,
rand=False,
scale_factor=1e6,
)
if n_samples is not None:
val_set = val_set.take(n_samples)
else:
val_set = val_set.take(199)
model = MultiscaleComplex(
model_fun=model_fun,
model_kwargs=model_kwargs,
res=False,
n_scales=n_scales,
fastmri_format=True,
)
model(next(iter(val_set))[0])
model.load_weights(f'{CHECKPOINTS_DIR}checkpoints/{run_id}-{n_epochs:02d}.hdf5')
m = Metrics(METRIC_FUNCS)
for x, y_true in tqdm(val_set.as_numpy_iterator(), total=199 if n_samples is None else n_samples):
y_pred = model.predict(x, batch_size=1)
m.push(y_true[..., 0], y_pred[..., 0])
return ['PSNR', 'SSIM'], list(m.means().values())
|
recipes/opencolorio/all/conanfile.py | rockandsalt/conan-center-index | 562 | 12789567 | from conans import ConanFile, CMake, tools
import os
required_conan_version = ">=1.33.0"
class OpenColorIOConan(ConanFile):
name = "opencolorio"
description = "A color management framework for visual effects and animation."
license = "BSD-3-Clause"
homepage = "https://opencolorio.org/"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
"use_sse": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"use_sse": True
}
generators = "cmake", "cmake_find_package"
exports_sources = ["CMakeLists.txt", "patches/*"]
topics = ("colors", "visual", "effects", "animation")
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.arch not in ["x86", "x86_64"]:
del self.options.use_sse
def configure(self):
if self.options.shared:
del self.options.fPIC
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
def requirements(self):
# TODO: add GLUT (needed for ociodisplay tool)
self.requires("lcms/2.12")
self.requires("yaml-cpp/0.7.0")
if tools.Version(self.version) < "2.1.0":
self.requires("tinyxml/2.6.2")
if tools.Version(self.version) >= "2.1.0":
self.requires("pystring/1.1.3")
self.requires("expat/2.4.1")
self.requires("openexr/2.5.7")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
if tools.Version(self.version) >= "2.1.0":
self._cmake.definitions["OCIO_BUILD_PYTHON"] = False
else:
self._cmake.definitions["OCIO_BUILD_SHARED"] = self.options.shared
self._cmake.definitions["OCIO_BUILD_STATIC"] = not self.options.shared
self._cmake.definitions["OCIO_BUILD_PYGLUE"] = False
self._cmake.definitions["USE_EXTERNAL_YAML"] = True
self._cmake.definitions["USE_EXTERNAL_TINYXML"] = True
self._cmake.definitions["USE_EXTERNAL_LCMS"] = True
self._cmake.definitions["OCIO_USE_SSE"] = self.options.get_safe("use_sse", False)
# openexr 2.x provides Half library
self._cmake.definitions["OCIO_USE_OPENEXR_HALF"] = True
self._cmake.definitions["OCIO_BUILD_APPS"] = True
self._cmake.definitions["OCIO_BUILD_DOCS"] = False
self._cmake.definitions["OCIO_BUILD_TESTS"] = False
self._cmake.definitions["OCIO_BUILD_GPU_TESTS"] = False
self._cmake.definitions["OCIO_USE_BOOST_PTR"] = False
# avoid downloading dependencies
self._cmake.definitions["OCIO_INSTALL_EXT_PACKAGE"] = "NONE"
if self.settings.compiler == "Visual Studio" and not self.options.shared:
# define any value because ifndef is used
self._cmake.definitions["OpenColorIO_SKIP_IMPORTS"] = True
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
for module in ("expat", "lcms2", "pystring", "yaml-cpp", "Imath"):
tools.remove_files_by_mask(os.path.join(self._source_subfolder, "share", "cmake", "modules"), "Find"+module+".cmake")
def build(self):
self._patch_sources()
cm = self._configure_cmake()
cm.build()
def package(self):
cm = self._configure_cmake()
cm.install()
if not self.options.shared:
self.copy("*", src=os.path.join(self.package_folder,
"lib", "static"), dst="lib")
tools.rmdir(os.path.join(self.package_folder, "lib", "static"))
tools.rmdir(os.path.join(self.package_folder, "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "share"))
# nop for 2.x
tools.remove_files_by_mask(self.package_folder, "OpenColorIOConfig*.cmake")
tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), "*.pdb")
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "OpenColorIO"
self.cpp_info.names["cmake_find_package_multi"] = "OpenColorIO"
self.cpp_info.names["pkg_config"] = "OpenColorIO"
self.cpp_info.libs = tools.collect_libs(self)
if tools.Version(self.version) < "2.1.0":
if not self.options.shared:
self.cpp_info.defines.append("OpenColorIO_STATIC")
if self.settings.os == "Macos":
self.cpp_info.frameworks.extend(["Foundation", "IOKit", "ColorSync", "CoreGraphics"])
if self.settings.compiler == "Visual Studio" and not self.options.shared:
self.cpp_info.defines.append("OpenColorIO_SKIP_IMPORTS")
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH env var with: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
|
python/pythonstartup.py | andrewpsp/dotfiles | 249 | 12789575 | def _init():
import atexit
import os
import sys
try:
import readline
except Exception:
readline = None
import types
import time
import uuid
import json
import pprint
import hashlib
import subprocess
import datetime
try:
import __builtin__
except ImportError:
import builtins as __builtin__
PY2 = sys.version_info[0] == 2
__import__('rlcompleter')
histdir = os.path.expanduser('~/.pyhist')
try:
os.makedirs(histdir)
except OSError:
pass
if PY2:
text_type = unicode
else:
text_type = str
def _b(x):
if not isinstance(x, bytes):
x = x.encode('utf-8')
return x
histfile = os.path.join(histdir, hashlib.sha1(
os.path.normpath(_b(os.path.abspath(sys.prefix)))).hexdigest())
if readline is not None:
try:
readline.read_history_file(histfile)
except IOError:
pass
if 'libedit' in readline.__doc__:
readline.parse_and_bind("bind '\t' rl_complete")
else:
readline.parse_and_bind("tab: complete")
atexit.register(readline.write_history_file, histfile)
def _magic_uuid(val=None):
if val is None:
return uuid.uuid4()
elif isinstance(val, uuid.UUID):
return val
elif len(val) == 16:
return uuid.UUID(bytes=val)
return uuid.UUID(val)
def _dump_json(x, as_string=False, indent=2, cp=False):
s = '\n'.join(x.rstrip() for x in json.dumps(x, indent=indent).rstrip().splitlines())
if cp:
_copy(s)
if as_string:
return s
print(s)
def _cat(path):
with open(path, 'rb') as f:
return f.read()
def _tcat(path):
return _cat(path).decode('utf-8')
def _paste():
return subprocess.Popen(['pbpaste'], stdout=subprocess.PIPE).communicate()[0]
def _tpaste():
return _paste().decode('utf-8')
def _jpaste():
return json.loads(_paste())
def _copy(val):
if isinstance(val, text_type):
val = val.encode('utf-8')
return subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE).communicate(val)
def _jcopy(val, indent=None):
_copy(_dump_json(val, indent=indent, as_string=True))
helpers = types.ModuleType('helpers')
helpers.histfile = histfile
helpers.pp = pprint.pprint
helpers.uuid = _magic_uuid
helpers.UUID = uuid.UUID
helpers.uuid3 = uuid.uuid3
helpers.uuid4 = uuid.uuid4
helpers.uuid5 = uuid.uuid5
helpers.dt = datetime.datetime
helpers.datetime = datetime.datetime
helpers.td = datetime.timedelta
helpers.timedelta = datetime.timedelta
helpers.time = time.time
helpers.j = _dump_json
helpers.cat = _cat
helpers.tcat = _tcat
helpers.cp = _copy
helpers.jcp = _jcopy
helpers.copy = _copy
helpers.jcopy = _jcopy
helpers.paste = _paste
helpers.tpaste = _tpaste
helpers.jpaste = _jpaste
__builtin__.h = helpers
__builtin__.true = True
__builtin__.false = False
__builtin__.null = None
_init()
del _init
|
mixly_arduino/sample/mixpy/海龟画图/py/海龟画图04盛开的向日葵_01太阳公公.py | wecake/Mixly_Arduino | 118 | 12789617 | import turtle
tina= turtle.Turtle()
tina.pencolor("#ffcc33")
tina.fillcolor("#ffcc33")
tina.pensize(5)
tina.begin_fill()
tina.circle (80,360)
tina.end_fill()
tina.penup()
tina.goto(-40,100)
tina.pendown()
tina.pencolor("#000000")
tina.setheading(30)
tina.circle ((-30),60)
tina.penup()
tina.goto(20,100)
tina.pendown()
tina.setheading(30)
tina.circle ((-30),60)
tina.penup()
tina.goto(-20,60)
tina.pendown()
tina.setheading(-30)
tina.circle (50,60)
tina.penup()
tina.goto(-30,-30)
tina.pendown()
tina.pencolor("#ffcc33")
tina.setheading(60)
for i in range(0, 12, 1):
tina.circle ((-35),120)
tina.left(150)
tina.hideturtle()
|
checkov/arm/registry.py | pmalkki/checkov | 4,013 | 12789647 | <reponame>pmalkki/checkov
from checkov.arm.base_registry import Registry
arm_resource_registry = Registry()
arm_parameter_registry = Registry()
|
Python/other/sudoku_backtracking.py | zhcet19/NeoAlgo-1 | 897 | 12789672 | <filename>Python/other/sudoku_backtracking.py
def solve(board, i=0, j=0):
i,j = nextCell(board, i, j)
if i == -1:
return True
for e in range(1,10):
if isValid(board,i,j,e):
board[i][j] = e
if solve(board, i, j):
return True
board[i][j] = 0
return False
def print_board(board):
for i in range(len(board)):
if i % 3 ==0 and i != 0:
print("------------------------")
for j in range(len(board[0])):
if j % 3 ==0 and j !=0:
print("|",end="")
if j == 8:
print(board[i][j])
else:
print(str(board[i][j]) + " " , end="")
def nextCell(board, i, j):
for x in range(i,9):
for y in range(j,9):
if board[x][y] == 0:
return x,y
for x in range(0,9):
for y in range(0,9):
if board[x][y] == 0:
return x,y
return -1,-1
def isValid(board,x,y,n):
for i in range(9):
if board[x][i] == n or board[i][y] == n:
return False
new_x = x//3 * 3
new_y = y//3 * 3
for i in range(3):
for j in range(3):
if board[new_x + i][new_y + j] == n:
return False
return True
if __name__ == "__main__":
print("Enter the numbers row by row, and put 0 for empty space:")
board = [[int(input()) for x in range (9)] for y in range(9)]
solve(board)
print_board(board)
"""
Let's say we have this board: Empty space is replaced with 0.
[[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 7, 9]]
# When the program asks for input we give like this :
5, 3, 0, 0, 7, 0, 0, 0, 0,6, 0, 0, 1, 9, 5, 0, 0, 0,0, 9, 8, 0, 0, 0, 0, 6, 0, 8, 0, 0, 0, 6, 0, 0, 0, 3,4,
0, 0, 8, 0, 3, 0, 0, 1, 7, 0, 0, 0, 2, 0, 0, 0, 6, 0, 6, 0, 0, 0, 0, 2, 8, 0,0, 0, 0, 4, 1, 9, 0, 0, 5, 0, 0, 0, 0, 8, 0, 0, 7, 9
#output will look like this:
5 3 4 |6 7 8 |9 1 2
6 7 2 |1 9 5 |3 4 8
1 9 8 |3 4 2 |5 6 7
------------------------
8 5 9 |7 6 1 |4 2 3
4 2 6 |8 5 3 |7 9 1
7 1 3 |9 2 4 |8 5 6
------------------------
9 6 1 |5 3 7 |2 8 4
2 8 7 |4 1 9 |6 3 5
3 4 5 |2 8 6 |1 7 9
"""
|
tools/SDKTool/src/ui/tree/ai_tree/action_dqn_data.py | Passer-D/GameAISDK | 1,210 | 12789707 | <filename>tools/SDKTool/src/ui/tree/ai_tree/action_dqn_data.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import logging
from collections import OrderedDict
from ....common.define import DQN_ACTION_TYPES, CONTACTS, AI_ACTION_TYPES
from ....config_manager.ai.ai_manager import AIManager, AIAlgorithmType
from .action_data import ActionData
from ...utils import get_value
logger = logging.getLogger("sdktool")
class DqnActionData(ActionData):
@staticmethod
def get_game_action_inner():
return AIManager().get_game_action(AIAlgorithmType.DQN)
@staticmethod
def get_ai_action_inner():
return AIManager().get_ai_action(AIAlgorithmType.DQN)
def game_action_extend_param(self):
param = OrderedDict()
out_params = OrderedDict()
out_params['path'] = ''
out_params['region'] = OrderedDict()
out_params['region']['x'] = 0
out_params['region']['y'] = 0
out_params['region']['w'] = 0
out_params['region']['h'] = 0
param['actionRegion'] = out_params
param['durationMS'] = 0
return param
def get_type_param(self):
out_params = OrderedDict()
out_params['path'] = ''
out_params['region'] = OrderedDict()
out_params['region']['x'] = 0
out_params['region']['y'] = 0
out_params['region']['w'] = 0
out_params['region']['h'] = 0
return out_params
def get_game_action_type_param(self):
return DQN_ACTION_TYPES
def init_swipe_params(self, params=None):
if params is None:
params = OrderedDict()
swipe_param = OrderedDict()
swipe_param['startX'] = get_value(params, 'startX', 0)
swipe_param['startY'] = get_value(params, 'startY', 0)
swipe_param['endX'] = get_value(params, 'endX', 0)
swipe_param['endY'] = get_value(params, 'endY', 0)
return swipe_param
def new_game_action(self, action_name, game_action):
action_value = OrderedDict()
action_value['id'] = game_action.alloc_id()
action_value['name'] = action_name
action_value['contact'] = CONTACTS[0]
action_value['sceneTask'] = -1
action_value['type'] = AI_ACTION_TYPES[0]
return action_value
|
problems/bubble-sort/bubble-sort.py | vidyadeepa/the-coding-interview | 1,571 | 12789711 | <reponame>vidyadeepa/the-coding-interview
def bubblesort(l):
"""
Runtime: O(n^2)
"""
last = len(l)-1
for i in range(last):
for j in range(i+1, last):
if l[i] > l[j]:
l[i], l[j] = l[j], l[i]
return l
print bubblesort([8,2,4,7,9,0,1,4,5,7,8,9])
print bubblesort([])
print bubblesort([1])
print bubblesort([1,3])
|
become_yukarin/dataset/utility.py | nameless-writer/become-yukarin | 562 | 12789731 | <gh_stars>100-1000
import math
import fastdtw
import numpy
_logdb_const = 10.0 / numpy.log(10.0) * numpy.sqrt(2.0)
# should work on torch and numpy arrays
def _sqrt(x):
isnumpy = isinstance(x, numpy.ndarray)
isscalar = numpy.isscalar(x)
return numpy.sqrt(x) if isnumpy else math.sqrt(x) if isscalar else x.sqrt()
def _exp(x):
isnumpy = isinstance(x, numpy.ndarray)
isscalar = numpy.isscalar(x)
return numpy.exp(x) if isnumpy else math.exp(x) if isscalar else x.exp()
def _sum(x):
if isinstance(x, list) or isinstance(x, numpy.ndarray):
return numpy.sum(x)
return float(x.sum())
def melcd(X, Y, lengths=None):
"""Mel-cepstrum distortion (MCD).
The function computes MCD for time-aligned mel-cepstrum sequences.
Args:
X (ndarray): Input mel-cepstrum, shape can be either of
(``D``,), (``T x D``) or (``B x T x D``). Both Numpy and torch arrays
are supported.
Y (ndarray): Target mel-cepstrum, shape can be either of
(``D``,), (``T x D``) or (``B x T x D``). Both Numpy and torch arrays
are supported.
lengths (list): Lengths of padded inputs. This should only be specified
if you give mini-batch inputs.
Returns:
float: Mean mel-cepstrum distortion in dB.
.. note::
The function doesn't check if inputs are actually mel-cepstrum.
"""
# summing against feature axis, and then take mean against time axis
# Eq. (1a)
# https://www.cs.cmu.edu/~awb/papers/sltu2008/kominek_black.sltu_2008.pdf
if lengths is None:
z = X - Y
r = _sqrt((z * z).sum(-1))
if not numpy.isscalar(r):
r = r.mean()
return _logdb_const * r
# Case for 1-dim features.
if len(X.shape) == 2:
# Add feature axis
X, Y = X[:, :, None], Y[:, :, None]
s = 0.0
T = _sum(lengths)
for x, y, length in zip(X, Y, lengths):
x, y = x[:length], y[:length]
z = x - y
s += _sqrt((z * z).sum(-1)).sum()
return _logdb_const * s / T
class DTWAligner(object):
"""
from https://github.com/r9y9/nnmnkwii/blob/4cade86b5c35b4e35615a2a8162ddc638018af0e/nnmnkwii/preprocessing/alignment.py#L14
"""
def __init__(self, x, y, dist=lambda x, y: numpy.linalg.norm(x - y), radius=1) -> None:
assert x.ndim == 2 and y.ndim == 2
_, path = fastdtw.fastdtw(x, y, radius=radius, dist=dist)
path = numpy.array(path)
self.normed_path_x = path[:, 0] / len(x)
self.normed_path_y = path[:, 1] / len(y)
def align_x(self, x):
path = self._interp_path(self.normed_path_x, len(x))
return x[path]
def align_y(self, y):
path = self._interp_path(self.normed_path_y, len(y))
return y[path]
def align(self, x, y):
return self.align_x(x), self.align_y(y)
@staticmethod
def align_and_transform(x, y, *args, **kwargs):
aligner = DTWAligner(*args, x=x, y=y, **kwargs)
return aligner.align(x, y)
@staticmethod
def _interp_path(normed_path: numpy.ndarray, target_length: int):
path = numpy.floor(normed_path * target_length).astype(numpy.int)
return path
class MelCepstrumAligner(DTWAligner):
def __init__(self, x, y, *args, **kwargs) -> None:
x = self._calc_aligner_feature(x)
y = self._calc_aligner_feature(y)
kwargs.update(dist=melcd)
super().__init__(x, y, *args, **kwargs)
@classmethod
def _calc_delta(cls, x):
x = numpy.zeros_like(x, x.dtype)
x[:-1] = x[1:] - x[:-1]
x[-1] = 0
return x
@classmethod
def _calc_aligner_feature(cls, x):
d = cls._calc_delta(x)
feature = numpy.concatenate((x, d), axis=1)[:, 1:]
return feature
|
snapx/setup.py | ruth-ann/snap-python | 242 | 12789772 | from setuptools import setup, find_packages
if __name__ == "__main__":
setup(
name="snapx",
author="<EMAIL>",
version="0.0.1",
packages=find_packages(),
description="""SnapX: An experimental SNAP API with NetworkX-like interface"""
)
|
23_yolov3-nano/01_float32/03_weight_quantization.py | khanfarhan10/PINTO_model_zoo | 1,529 | 12789793 | ### tf-nightly-2.2.0.dev20200418
import tensorflow as tf
# Weight Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_saved_model('./saved_model')
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS,tf.lite.OpsSet.SELECT_TF_OPS]
tflite_quant_model = converter.convert()
with open('yolov3_nano_voc_416_weight_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Weight Quantization complete! - yolov3_nano_voc_416_weight_quant.tflite")
|
tests/simple/test_simple.py | iklasky/timemachines | 253 | 12789798 | <gh_stars>100-1000
from timemachines.skaters.simple.movingaverage import precision_ema_ensemble, aggressive_ema_ensemble
SIMPLE_TO_TEST = [ precision_ema_ensemble, aggressive_ema_ensemble ]
from timemachines.inclusion.sklearninclusion import using_sklearn
if using_sklearn:
from timemachines.skatertools.evaluation.evaluators import hospital_mean_square_error_with_sporadic_fit, \
hospital_exog_mean_square_error_with_sporadic_fit
def test_ensemble_errors():
for f in SIMPLE_TO_TEST:
err = hospital_mean_square_error_with_sporadic_fit(f=f, k=5, n=150, fit_frequency=1)
if __name__=='__main__':
assert using_sklearn
test_ensemble_errors() |
devops/__init__.py | crazypenguin/devops | 300 | 12789900 | <gh_stars>100-1000
from __future__ import absolute_import, unicode_literals
from .celery import app as celery_app
# from .job import scheduler # 第一个获取到文件锁的进程执行任务后,如果在运行中途进程关闭重新启动了一个新的,则依然会多次执行
__all__ = ['celery_app']
# __all__ = ['celery_app', 'scheduler']
# import pymysql
# pymysql.install_as_MySQLdb()
|
webservices/common/models/costs.py | 18F/openFEC | 246 | 12790045 | from sqlalchemy.dialects.postgresql import TSVECTOR
from .base import db
class CommunicationCost(db.Model):
__tablename__ = 'ofec_communication_cost_mv'
sub_id = db.Column(db.Integer, primary_key=True)
original_sub_id = db.Column('orig_sub_id', db.Integer, index=True)
candidate_id = db.Column('cand_id', db.String, index=True)
committee_id = db.Column('cmte_id', db.String, index=True)
committee_name = db.Column(db.String)
pdf_url = db.Column(db.String)
candidate_name = db.Column('s_o_cand_nm', db.String)
candidate_last_name = db.Column('s_o_cand_l_nm', db.String)
candidate_middle_name = db.Column('s_o_cand_m_nm', db.String)
candidate_first_name = db.Column('s_o_cand_f_nm', db.String)
candidate_office_state = db.Column('s_o_cand_office_st', db.String, index=True)
state_full = db.Column('s_o_cand_office_st_desc', db.String)
candidate_office_district = db.Column('s_o_cand_office_district', db.String, index=True)
candidate_office = db.Column('s_o_cand_office', db.String, index=True)
candidate_office_full =db.Column('s_o_cand_office_desc', db.String)
transaction_date = db.Column('communication_dt', db.Date, index=True)
transaction_amount = db.Column('communication_cost', db.Numeric(30, 2), index=True)
transaction_type = db.Column('transaction_tp', db.String)
communication_type = db.Column('communication_tp', db.String, index=True)
communication_type_full = db.Column('communication_tp_desc', db.String)
communication_class = db.Column('communication_class', db.String, index=True)
purpose = db.Column('communication_class_desc', db.String, index=True)
support_oppose_indicator = db.Column('s_o_ind', db.String, index=True)
#new columns added from ware house transition
action_code = db.Column('action_cd', db.String)
action_code_full = db.Column('action_cd_desc', db.String)
primary_general_indicator = db.Column('s_o_rpt_pgi', db.String)
primary_general_indicator_description = db.Column('s_o_rpt_pgi_desc', db.String)
report_type = db.Column('rpt_tp', db.String)
report_year = db.Column('rpt_yr', db.Integer)
cycle = db.Column('election_cycle', db.Integer, index=True)
form_type_code = db.Column('filing_form', db.String, index=True)
schedule_type = db.Column(db.String, index=True)
schedule_type_full = db.Column('schedule_type_desc', db.String)
tran_id = db.Column(db.String)
file_number = db.Column('file_num', db.Integer)
image_number = db.Column('image_num', db.String, index=True)
class Electioneering(db.Model):
__tablename__ = 'ofec_electioneering_mv'
idx = db.Column(db.Integer, primary_key=True)
committee_id = db.Column('cmte_id', db.String, index=True)
committee_name = db.Column('cmte_nm', db.String)
candidate_id = db.Column('cand_id', db.String, index=True)
candidate_name = db.Column('cand_name', db.String)
candidate_office = db.Column('cand_office', db.String, index=True)
candidate_district = db.Column('cand_office_district', db.String, index=True)
candidate_state = db.Column('cand_office_st', db.String, index=True)
beginning_image_number = db.Column('f9_begin_image_num', db.String, index=True)
sb_image_num = db.Column(db.String, index=True)
sub_id = db.Column(db.Integer, doc="The identifier for each electioneering record")
link_id = db.Column(db.Integer)
sb_link_id = db.Column(db.String)
number_of_candidates = db.Column(db.Numeric)
calculated_candidate_share = db.Column('calculated_cand_share', db.Numeric(30, 2), doc="If an electioneering cost targets several candidates, the total cost is divided by the number of candidates. If it only mentions one candidate the full cost of the communication is listed.")
communication_date = db.Column('comm_dt', db.Date, doc='It is the airing, broadcast, cablecast or other dissemination of the communication')
public_distribution_date = db.Column('pub_distrib_dt', db.Date, doc='The pubic distribution date is the date that triggers disclosure of the electioneering communication (date reported on page 1 of Form 9)')
disbursement_date = db.Column('disb_dt', db.Date, index=True, doc='Disbursement date includes actual disbursements and execution of contracts creating an obligation to make disbursements (SB date of disbursement)')
disbursement_amount = db.Column('reported_disb_amt', db.Numeric(30, 2), index=True)
purpose_description = db.Column('disb_desc', db.String)
report_year = db.Column('rpt_yr', db.Integer, index=True)
file_number = db.Column('file_num', db.Integer)
amendment_indicator = db.Column('amndt_ind', db.String)
receipt_date = db.Column('receipt_dt', db.Date)
election_type_raw = db.Column('election_tp', db.String)
pdf_url = db.Column(db.String)
purpose_description_text = db.Column(TSVECTOR)
@property
def election_type(self):
return self.election_type_raw[:1]
|
atlas/foundations_rest_api/src/foundations_rest_api/filters/null_filter.py | DeepLearnI/atlas | 296 | 12790072 | <reponame>DeepLearnI/atlas
from foundations_rest_api.filters.api_filter_mixin import APIFilterMixin
class NullFilter(APIFilterMixin):
def __call__(self, result, params):
if result and isinstance(result, list):
new_params = {key: value for key, value in params.items() if key.endswith('_isnull')}
if new_params:
self._filter(result, new_params)
return result
def _filter(self, result, params):
for key, param_value in params.items():
column_name = key.split('_isnull', 1)[0]
value = self._parse_value(param_value)
if value is not None:
self._filter_column(result, column_name, value)
def _parse_value(self, param_value):
from foundations_rest_api.filters.parsers import BoolParser
parser = BoolParser()
return parser.parse(param_value)
def _filter_column(self, result, column_name, value):
# Explicit is better than implicit [Zen of Python, 1]
# This is because "value" can also be None and in that case filtering is discarded
if value is True:
self._filter_by_null_values(result, column_name)
elif value is False:
self._filter_by_not_null_values(result, column_name)
def _is_none(self, value):
return value is None or self._is_nan(value)
def _is_nan(self, value):
import math
return isinstance(value, float) and math.isnan(value)
def _filter_by_null_values(self, result, column_name):
def column_value_is_null(item):
value, item_parser = self._get_item_property_value_and_parser(item, column_name, parse=False)
return item_parser is not None and self._is_none(value)
return self._in_place_filter(column_value_is_null, result)
def _filter_by_not_null_values(self, result, column_name):
def column_value_is_not_null(item):
value, item_parser = self._get_item_property_value_and_parser(item, column_name, parse=False)
return item_parser is not None and not self._is_none(value)
return self._in_place_filter(column_value_is_not_null, result)
|
devserver/modules/profile.py | leture/django-devserver | 467 | 12790098 | from devserver.modules import DevServerModule
from devserver.utils.time import ms_from_timedelta
from devserver.settings import DEVSERVER_AUTO_PROFILE
from datetime import datetime
import functools
import gc
class ProfileSummaryModule(DevServerModule):
"""
Outputs a summary of cache events once a response is ready.
"""
logger_name = 'profile'
def process_init(self, request):
self.start = datetime.now()
def process_complete(self, request):
duration = datetime.now() - self.start
self.logger.info('Total time to render was %.2fs', ms_from_timedelta(duration) / 1000)
class LeftOversModule(DevServerModule):
"""
Outputs a summary of events the garbage collector couldn't handle.
"""
# TODO: Not even sure this is correct, but the its a general idea
logger_name = 'profile'
def process_init(self, request):
gc.enable()
gc.set_debug(gc.DEBUG_SAVEALL)
def process_complete(self, request):
gc.collect()
self.logger.info('%s objects left in garbage', len(gc.garbage))
from django.template.defaultfilters import filesizeformat
try:
from guppy import hpy
except ImportError:
import warnings
class MemoryUseModule(DevServerModule):
def __new__(cls, *args, **kwargs):
warnings.warn('MemoryUseModule requires guppy to be installed.')
return super(MemoryUseModule, cls).__new__(cls)
else:
class MemoryUseModule(DevServerModule):
"""
Outputs a summary of memory usage of the course of a request.
"""
logger_name = 'profile'
def __init__(self, request):
super(MemoryUseModule, self).__init__(request)
self.hpy = hpy()
self.oldh = self.hpy.heap()
self.logger.info('heap size is %s', filesizeformat(self.oldh.size))
def process_complete(self, request):
newh = self.hpy.heap()
alloch = newh - self.oldh
dealloch = self.oldh - newh
self.oldh = newh
self.logger.info('%s allocated, %s deallocated, heap size is %s', *map(filesizeformat, [alloch.size, dealloch.size, newh.size]))
try:
from line_profiler import LineProfiler
except ImportError:
import warnings
class LineProfilerModule(DevServerModule):
def __new__(cls, *args, **kwargs):
warnings.warn('LineProfilerModule requires line_profiler to be installed.')
return super(LineProfilerModule, cls).__new__(cls)
class devserver_profile(object):
def __init__(self, follow=[]):
pass
def __call__(self, func):
return func
else:
class LineProfilerModule(DevServerModule):
"""
Outputs a Line by Line profile of any @devserver_profile'd functions that were run
"""
logger_name = 'profile'
def process_view(self, request, view_func, view_args, view_kwargs):
request.devserver_profiler = LineProfiler()
request.devserver_profiler_run = False
if (DEVSERVER_AUTO_PROFILE):
_unwrap_closure_and_profile(request.devserver_profiler, view_func)
request.devserver_profiler.enable_by_count()
def process_complete(self, request):
if hasattr(request, 'devserver_profiler_run') and (DEVSERVER_AUTO_PROFILE or request.devserver_profiler_run):
from cStringIO import StringIO
out = StringIO()
if (DEVSERVER_AUTO_PROFILE):
request.devserver_profiler.disable_by_count()
request.devserver_profiler.print_stats(stream=out)
self.logger.info(out.getvalue())
def _unwrap_closure_and_profile(profiler, func):
if not hasattr(func, 'func_code'):
return
profiler.add_function(func)
if func.func_closure:
for cell in func.func_closure:
if hasattr(cell.cell_contents, 'func_code'):
_unwrap_closure_and_profile(profiler, cell.cell_contents)
class devserver_profile(object):
def __init__(self, follow=[]):
self.follow = follow
def __call__(self, func):
def profiled_func(*args, **kwargs):
request = args[0]
if hasattr(request, 'request'):
# We're decorating a Django class-based-view and the first argument is actually self:
request = args[1]
try:
request.devserver_profiler.add_function(func)
request.devserver_profiler_run = True
for f in self.follow:
request.devserver_profiler.add_function(f)
request.devserver_profiler.enable_by_count()
return func(*args, **kwargs)
finally:
request.devserver_profiler.disable_by_count()
return functools.wraps(func)(profiled_func)
|
code/config.py | SimonSuster/rc-cnn-dailymail | 325 | 12790106 |
import theano
import argparse
_floatX = theano.config.floatX
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def get_args():
parser = argparse.ArgumentParser()
parser.register('type', 'bool', str2bool)
# Basics
parser.add_argument('--debug',
type='bool',
default=False,
help='whether it is debug mode')
parser.add_argument('--test_only',
type='bool',
default=False,
help='test_only: no need to run training process')
parser.add_argument('--random_seed',
type=int,
default=1013,
help='Random seed')
# Data file
parser.add_argument('--train_file',
type=str,
default=None,
help='Training file')
parser.add_argument('--dev_file',
type=str,
default=None,
help='Development file')
parser.add_argument('--pre_trained',
type=str,
default=None,
help='Pre-trained model.')
parser.add_argument('--model_file',
type=str,
default='model.pkl.gz',
help='Model file to save')
parser.add_argument('--log_file',
type=str,
default=None,
help='Log file')
parser.add_argument('--embedding_file',
type=str,
default=None,
help='Word embedding file')
parser.add_argument('--max_dev',
type=int,
default=None,
help='Maximum number of dev examples to evaluate on')
parser.add_argument('--relabeling',
type='bool',
default=True,
help='Whether to relabel the entities when loading the data')
# Model details
parser.add_argument('--embedding_size',
type=int,
default=None,
help='Default embedding size if embedding_file is not given')
parser.add_argument('--hidden_size',
type=int,
default=128,
help='Hidden size of RNN units')
parser.add_argument('--bidir',
type='bool',
default=True,
help='bidir: whether to use a bidirectional RNN')
parser.add_argument('--num_layers',
type=int,
default=1,
help='Number of RNN layers')
parser.add_argument('--rnn_type',
type=str,
default='gru',
help='RNN type: lstm or gru (default)')
parser.add_argument('--att_func',
type=str,
default='bilinear',
help='Attention function: bilinear (default) or mlp or avg or last or dot')
# Optimization details
parser.add_argument('--batch_size',
type=int,
default=32,
help='Batch size')
parser.add_argument('--num_epoches',
type=int,
default=100,
help='Number of epoches')
parser.add_argument('--eval_iter',
type=int,
default=100,
help='Evaluation on dev set after K updates')
parser.add_argument('--dropout_rate',
type=float,
default=0.2,
help='Dropout rate')
parser.add_argument('--optimizer',
type=str,
default='sgd',
help='Optimizer: sgd (default) or adam or rmsprop')
parser.add_argument('--learning_rate', '-lr',
type=float,
default=0.1,
help='Learning rate for SGD')
parser.add_argument('--grad_clipping',
type=float,
default=10.0,
help='Gradient clipping')
return parser.parse_args()
|
html_parsing/get_price_game/from_gama-gama.py | DazEB2/SimplePyScripts | 117 | 12790122 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# Основа взята из http://stackoverflow.com/a/37755811/5909792
def get_html(url):
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEnginePage
class ExtractorHtml:
def __init__(self, url):
_app = QApplication([])
self._page = QWebEnginePage()
self._page.loadFinished.connect(self._load_finished_handler)
self.html = None
# Небольшой костыль для получения содержимого страницы сайта http://gama-gama.ru
# Загрузка страницы проходит 2 раза: сначада кусок хитрого javascript кода, потом страница
# сайта с содержимым
self._counter_finished = 0
self._page.load(QUrl(url))
# Ожидание загрузки страницы и получения его содержимого
# Этот цикл асинхронный код делает синхронным
while self.html is None:
_app.processEvents()
_app.quit()
# Чтобы избежать падений скрипта
self._page = None
def _callable(self, data):
self.html = data
def _load_finished_handler(self, _):
self._counter_finished += 1
if self._counter_finished == 2:
self._page.toHtml(self._callable)
return ExtractorHtml(url).html
text = 'mad'
url = 'http://gama-gama.ru/search/?searchField=' + text
html = get_html(url)
from bs4 import BeautifulSoup
root = BeautifulSoup(html, 'lxml')
for game in root.select('.catalog-content > a'):
name = game['title'].strip()
name = name.replace('Купить ', '')
price = None
price_holder = game.select_one('.catalog_price_holder')
price_1 = price_holder.select_one('.price_1')
if price_1:
price = price_1.text.strip()
else:
# Содержит описание цены со скидкой. Вытаскиваем цену со скидкой
price_2 = price_holder.select_one('.price_2')
if price_2:
price = price_2.select_one('.price_group > .promo_price').text
# Удаление пустых символов пробелом
import re
price = re.sub(r'\s+', ' ', price)
price = price.strip()
print(name, price)
|
shap/plots/_utils.py | willianfco/shap | 16,097 | 12790129 | from .. import Explanation
from ..utils import OpChain
from . import colors
import numpy as np
def convert_color(color):
try:
color = pl.get_cmap(color)
except:
pass
if color == "shap_red":
color = colors.red_rgb
elif color == "shap_blue":
color = colors.blue_rgb
return color
def convert_ordering(ordering, shap_values):
if issubclass(type(ordering), OpChain):
ordering = ordering.apply(Explanation(shap_values))
if issubclass(type(ordering), Explanation):
if "argsort" in [op["name"] for op in ordering.op_history]:
ordering = ordering.values
else:
ordering = ordering.argsort.flip.values
return ordering
def get_sort_order(dist, clust_order, cluster_threshold, feature_order):
""" Returns a sorted order of the values where we respect the clustering order when dist[i,j] < cluster_threshold
"""
#feature_imp = np.abs(values)
# if partition_tree is not None:
# new_tree = fill_internal_max_values(partition_tree, shap_values)
# clust_order = sort_inds(new_tree, np.abs(shap_values))
clust_inds = np.argsort(clust_order)
feature_order = feature_order.copy()#order.apply(Explanation(shap_values))
# print("feature_order", feature_order)
for i in range(len(feature_order)-1):
ind1 = feature_order[i]
next_ind = feature_order[i+1]
next_ind_pos = i + 1
for j in range(i+1,len(feature_order)):
ind2 = feature_order[j]
#if feature_imp[ind] >
# if ind1 == 2:
# print(ind1, ind2, dist[ind1,ind2])
if dist[ind1,ind2] <= cluster_threshold:
# if ind1 == 2:
# print(clust_inds)
# print(ind1, ind2, next_ind, dist[ind1,ind2], clust_inds[ind2], clust_inds[next_ind])
if dist[ind1,next_ind] > cluster_threshold or clust_inds[ind2] < clust_inds[next_ind]:
next_ind = ind2
next_ind_pos = j
# print("next_ind", next_ind)
# print("next_ind_pos", next_ind_pos)
# insert the next_ind next
for j in range(next_ind_pos, i+1, -1):
#print("j", j)
feature_order[j] = feature_order[j-1]
feature_order[i+1] = next_ind
#print(feature_order)
return feature_order
def merge_nodes(values, partition_tree):
""" This merges the two clustered leaf nodes with the smallest total value.
"""
M = partition_tree.shape[0] + 1
ptind = 0
min_val = np.inf
for i in range(partition_tree.shape[0]):
ind1 = int(partition_tree[i,0])
ind2 = int(partition_tree[i,1])
if ind1 < M and ind2 < M:
val = np.abs(values[ind1]) + np.abs(values[ind2])
if val < min_val:
min_val = val
ptind = i
#print("ptind", ptind, min_val)
ind1 = int(partition_tree[ptind,0])
ind2 = int(partition_tree[ptind,1])
if ind1 > ind2:
tmp = ind1
ind1 = ind2
ind2 = tmp
partition_tree_new = partition_tree.copy()
for i in range(partition_tree_new.shape[0]):
i0 = int(partition_tree_new[i,0])
i1 = int(partition_tree_new[i,1])
if i0 == ind2:
partition_tree_new[i,0] = ind1
elif i0 > ind2:
partition_tree_new[i,0] -= 1
if i0 == ptind + M:
partition_tree_new[i,0] = ind1
elif i0 > ptind + M:
partition_tree_new[i,0] -= 1
if i1 == ind2:
partition_tree_new[i,1] = ind1
elif i1 > ind2:
partition_tree_new[i,1] -= 1
if i1 == ptind + M:
partition_tree_new[i,1] = ind1
elif i1 > ptind + M:
partition_tree_new[i,1] -= 1
partition_tree_new = np.delete(partition_tree_new, ptind, axis=0)
# update the counts to be correct
fill_counts(partition_tree_new)
return partition_tree_new, ind1, ind2
def dendrogram_coords(leaf_positions, partition_tree):
""" Returns the x and y coords of the lines of a dendrogram where the leaf order is given.
Note that scipy can compute these coords as well, but it does not allow you to easily specify
a specific leaf order, hence this reimplementation.
"""
xout = []
yout = []
_dendrogram_coords_rec(partition_tree.shape[0]-1, leaf_positions, partition_tree, xout, yout)
return np.array(xout), np.array(yout)
def _dendrogram_coords_rec(pos, leaf_positions, partition_tree, xout, yout):
M = partition_tree.shape[0] + 1
if pos < 0:
return leaf_positions[pos + M], 0
left = int(partition_tree[pos, 0]) - M
right = int(partition_tree[pos, 1]) - M
x_left, y_left = _dendrogram_coords_rec(left, leaf_positions, partition_tree, xout, yout)
x_right, y_right = _dendrogram_coords_rec(right, leaf_positions, partition_tree, xout, yout)
y_curr = partition_tree[pos, 2]
xout.append([x_left, x_left, x_right, x_right])
yout.append([y_left, y_curr, y_curr, y_right])
return (x_left + x_right) / 2, y_curr
def fill_internal_max_values(partition_tree, leaf_values):
""" This fills the forth column of the partition tree matrix with the max leaf value in that cluster.
"""
M = partition_tree.shape[0] + 1
new_tree = partition_tree.copy()
for i in range(new_tree.shape[0]):
val = 0
if new_tree[i,0] < M:
ind = int(new_tree[i,0])
val = max(val, np.abs(leaf_values[ind]))
else:
ind = int(new_tree[i,0])-M
val = max(val, np.abs(new_tree[ind,3])) # / partition_tree[ind,2])
if new_tree[i,1] < M:
ind = int(new_tree[i,1])
val = max(val, np.abs(leaf_values[ind]))
else:
ind = int(new_tree[i,1])-M
val = max(val, np.abs(new_tree[ind,3])) # / partition_tree[ind,2])
new_tree[i,3] = val
return new_tree
def fill_counts(partition_tree):
""" This updates the
"""
M = partition_tree.shape[0] + 1
for i in range(partition_tree.shape[0]):
val = 0
if partition_tree[i,0] < M:
ind = int(partition_tree[i,0])
val += 1
else:
ind = int(partition_tree[i,0])-M
val += partition_tree[ind,3]
if partition_tree[i,1] < M:
ind = int(partition_tree[i,1])
val += 1
else:
ind = int(partition_tree[i,1])-M
val += partition_tree[ind,3]
partition_tree[i,3] = val
def sort_inds(partition_tree, leaf_values, pos=None, inds=None):
if inds is None:
inds = []
if pos is None:
partition_tree = fill_internal_max_values(partition_tree, leaf_values)
pos = partition_tree.shape[0]-1
M = partition_tree.shape[0] + 1
if pos < 0:
inds.append(pos + M)
return
left = int(partition_tree[pos, 0]) - M
right = int(partition_tree[pos, 1]) - M
left_val = partition_tree[left,3] if left >= 0 else leaf_values[left + M]
right_val = partition_tree[right,3] if right >= 0 else leaf_values[right + M]
if left_val < right_val:
tmp = right
right = left
left = tmp
sort_inds(partition_tree, leaf_values, left, inds)
sort_inds(partition_tree, leaf_values, right, inds)
return inds |
src/third_party/swiftshader/third_party/subzero/pydir/run-pnacl-sz.py | rhencke/engine | 2,151 | 12790141 | #!/usr/bin/env python2
import argparse
import itertools
import os
import re
import subprocess
import sys
import tempfile
from utils import FindBaseNaCl, GetObjdumpCmd, shellcmd
def TargetAssemblerFlags(target, sandboxed):
# TODO(reed kotler). Need to find out exactly we need to
# add here for Mips32.
flags = { 'x8632': ['-triple=%s' % ('i686-nacl' if sandboxed else 'i686')],
'x8664': ['-triple=%s' % (
'x86_64-nacl' if sandboxed else 'x86_64')],
'arm32': ['-triple=%s' % (
'armv7a-nacl' if sandboxed else 'armv7a'),
'-mcpu=cortex-a9', '-mattr=+neon'],
'mips32': ['-triple=%s' % (
'mipsel-nacl' if sandboxed else 'mipsel'),
'-mcpu=mips32'] }
return flags[target]
def TargetDisassemblerFlags(target):
flags = { 'x8632': ['-Mintel'],
'x8664': ['-Mintel'],
'arm32': [],
'mips32':[] }
return flags[target]
def main():
"""Run the pnacl-sz compiler on an llvm file.
Takes an llvm input file, freezes it into a pexe file, converts
it to a Subzero program, and finally compiles it.
"""
argparser = argparse.ArgumentParser(
description=' ' + main.__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
argparser.add_argument('--input', '-i', required=True,
help='LLVM source file to compile')
argparser.add_argument('--output', '-o', required=False,
help='Output file to write')
argparser.add_argument('--insts', required=False,
action='store_true',
help='Stop after translating to ' +
'Subzero instructions')
argparser.add_argument('--no-local-syms', required=False,
action='store_true',
help="Don't keep local symbols in the pexe file")
argparser.add_argument('--llvm', required=False,
action='store_true',
help='Parse pexe into llvm IR first, then ' +
'convert to Subzero')
argparser.add_argument('--llvm-source', required=False,
action='store_true',
help='Parse source directly into llvm IR ' +
'(without generating a pexe), then ' +
'convert to Subzero')
argparser.add_argument(
'--pnacl-sz', required=False, default='./pnacl-sz', metavar='PNACL-SZ',
help="Subzero translator 'pnacl-sz'")
argparser.add_argument('--pnacl-bin-path', required=False,
default=(
'{root}/toolchain/linux_x86/pnacl_newlib_raw/bin'
).format(root=FindBaseNaCl()),
metavar='PNACL_BIN_PATH',
help='Path to LLVM & Binutils executables ' +
'(e.g. for building PEXE files)')
argparser.add_argument('--assemble', required=False,
action='store_true',
help='Assemble the output')
argparser.add_argument('--disassemble', required=False,
action='store_true',
help='Disassemble the assembled output')
argparser.add_argument('--dis-flags', required=False,
action='append', default=[],
help='Add a disassembler flag')
argparser.add_argument('--filetype', default='iasm', dest='filetype',
choices=['obj', 'asm', 'iasm'],
help='Output file type. Default %(default)s')
argparser.add_argument('--forceasm', required=False, action='store_true',
help='Force --filetype=asm')
argparser.add_argument('--target', default='x8632', dest='target',
choices=['x8632','x8664','arm32','mips32'],
help='Target architecture. Default %(default)s')
argparser.add_argument('--echo-cmd', required=False,
action='store_true',
help='Trace command that generates ICE instructions')
argparser.add_argument('--tbc', required=False, action='store_true',
help='Input is textual bitcode (not .ll)')
argparser.add_argument('--expect-fail', required=False, action='store_true',
help='Negate success of run by using LLVM not')
argparser.add_argument('--allow-pnacl-reader-error-recovery',
action='store_true',
help='Continue parsing after first error')
argparser.add_argument('--args', '-a', nargs=argparse.REMAINDER,
default=[],
help='Remaining arguments are passed to pnacl-sz')
argparser.add_argument('--sandbox', required=False, action='store_true',
help='Sandboxes the generated code')
args = argparser.parse_args()
pnacl_bin_path = args.pnacl_bin_path
llfile = args.input
if args.llvm and args.llvm_source:
raise RuntimeError("Can't specify both '--llvm' and '--llvm-source'")
if args.llvm_source and args.no_local_syms:
raise RuntimeError("Can't specify both '--llvm-source' and " +
"'--no-local-syms'")
if args.llvm_source and args.tbc:
raise RuntimeError("Can't specify both '--tbc' and '--llvm-source'")
if args.llvm and args.tbc:
raise RuntimeError("Can't specify both '--tbc' and '--llvm'")
if args.forceasm:
if args.expect_fail:
args.forceasm = False
elif args.filetype == 'asm':
pass
elif args.filetype == 'iasm':
# TODO(sehr) implement forceasm for iasm.
pass
elif args.filetype == 'obj':
args.filetype = 'asm'
args.assemble = True
cmd = []
if args.tbc:
cmd = [os.path.join(pnacl_bin_path, 'pnacl-bcfuzz'), llfile,
'-bitcode-as-text', '-output', '-', '|']
elif not args.llvm_source:
cmd = [os.path.join(pnacl_bin_path, 'llvm-as'), llfile, '-o', '-', '|',
os.path.join(pnacl_bin_path, 'pnacl-freeze')]
if not args.no_local_syms:
cmd += ['--allow-local-symbol-tables']
cmd += ['|']
if args.expect_fail:
cmd += [os.path.join(pnacl_bin_path, 'not')]
cmd += [args.pnacl_sz]
cmd += ['--target', args.target]
if args.sandbox:
cmd += ['-sandbox']
if args.insts:
# If the tests are based on '-verbose inst' output, force
# single-threaded translation because dump output does not get
# reassembled into order.
cmd += ['-verbose', 'inst,global_init', '-notranslate', '-threads=0']
elif args.allow_pnacl_reader_error_recovery:
cmd += ['-allow-pnacl-reader-error-recovery', '-threads=0']
if not args.llvm_source:
cmd += ['--bitcode-format=pnacl']
if not args.no_local_syms:
cmd += ['--allow-local-symbol-tables']
if args.llvm or args.llvm_source:
cmd += ['--build-on-read=0']
else:
cmd += ['--build-on-read=1']
cmd += ['--filetype=' + args.filetype]
cmd += ['--emit-revision=0']
script_name = os.path.basename(sys.argv[0])
for _, arg in enumerate(args.args):
# Redirecting the output file needs to be done through the script
# because forceasm may introduce a new temporary file between pnacl-sz
# and llvm-mc. Similar issues could occur when setting filetype, target,
# or sandbox through --args. Filter and report an error.
if re.search('^-?-(o|output|filetype|target|sandbox)(=.+)?$', arg):
preferred_option = '--output' if re.search('^-?-o(=.+)?$', arg) else arg
print 'Option should be set using:'
print ' %s ... %s ... --args' % (script_name, preferred_option)
print 'rather than:'
print ' %s ... --args %s ...' % (script_name, arg)
exit(1)
asm_temp = None
output_file_name = None
keep_output_file = False
if args.output:
output_file_name = args.output
keep_output_file = True
cmd += args.args
if args.llvm_source:
cmd += [llfile]
if args.assemble or args.disassemble:
if not output_file_name:
# On windows we may need to close the file first before it can be
# re-opened by the other tools, so don't do delete-on-close,
# and instead manually delete.
asm_temp = tempfile.NamedTemporaryFile(delete=False)
asm_temp.close()
output_file_name = asm_temp.name
if args.assemble and args.filetype != 'obj':
cmd += (['|', os.path.join(pnacl_bin_path, 'llvm-mc')] +
TargetAssemblerFlags(args.target, args.sandbox) +
['-filetype=obj', '-o', output_file_name])
elif output_file_name:
cmd += ['-o', output_file_name]
if args.disassemble:
# Show wide instruction encodings, diassemble, show relocs and
# dissasemble zeros.
cmd += (['&&', os.path.join(pnacl_bin_path, GetObjdumpCmd(args.target))] +
args.dis_flags +
['-w', '-d', '-r', '-z'] + TargetDisassemblerFlags(args.target) +
[output_file_name])
stdout_result = shellcmd(cmd, echo=args.echo_cmd)
if not args.echo_cmd:
sys.stdout.write(stdout_result)
if asm_temp and not keep_output_file:
os.remove(output_file_name)
if __name__ == '__main__':
main()
|
examples/dataflow-python-examples/streaming-examples/slowlychanging-sideinput/sideinput_refresh/dofns.py | ruchirjain86/professional-services | 2,116 | 12790156 | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List
import apache_beam as beam
from apache_beam.io.filesystems import FileSystems
from sideinput_refresh import util
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(beam.pvalue.TaggedOutput)
class SplitToMultiple(beam.DoFn):
"""Generates a base path for each side input type combining root path received via file notification subscription
and side input type. PCollection recieved will contain only single element representing base path and will
be fired once every x hours matching the side input refresh frequency
Attributes:
sideinput_types: List of Side input types
file_prefix: file_prefix matching required files. Default is * indicating all files
"""
def __init__(self, sideinput_types: List[str], file_prefix: str = "*"):
self.sideinput_types = sideinput_types
self.file_prefix = file_prefix
def process(self,
element,
timestamp=beam.DoFn.TimestampParam,
window=beam.DoFn.WindowParam,
pane_info=beam.DoFn.PaneInfoParam):
# Logging to audit triggering of side input refresh process. Statement will be logged only whenever the pubsub notification
# triggers side input refresh process (i.e normally once in every x hours)
if isinstance(window, beam.transforms.window.GlobalWindow):
logging.info(
f"(Re)loading side input data from basepath {element.decode()} for global window: {timestamp} - {window}"
)
else:
logging.info(
f"(Re)loading side input data from basepath {element.decode()} for window: {util.get_formatted_time(window.start)} - {util.get_formatted_time(window.end)}"
)
for sideinput_type in self.sideinput_types:
yield beam.pvalue.TaggedOutput(
sideinput_type,
FileSystems.join(element.decode(), sideinput_type,
self.file_prefix))
|
backend/apps/mails/views.py | KuanWeiLee/froggy-service | 174 | 12790157 | <gh_stars>100-1000
from django.shortcuts import redirect
from django.urls import reverse
from rest_framework.viewsets import ModelViewSet
from rest_framework.decorators import action
from rest_framework.permissions import IsAdminUser
from .models import SendGridMail
from .serializers import SendGridMailSerializer
class MailViewSet(ModelViewSet):
queryset = SendGridMail.objects.all()
serializer_class = SendGridMailSerializer
permission_classes = [IsAdminUser]
http_method_names = ['get', 'post', 'retrieve']
@action(methods=['GET'], detail=True)
def resend(self, request, pk=None):
mail = SendGridMail.objects.get(id=pk)
mail.send()
return redirect(reverse("admin:cases_case_change", args=(mail.case.id,)))
|
contrib/cookiecutter/ckan_extension/{{cookiecutter.project}}/ckanext/{{cookiecutter.project_shortname}}/logic/schema.py | gg2/ckan | 2,805 | 12790222 | <reponame>gg2/ckan
import ckan.plugins.toolkit as tk
def {{cookiecutter.project_shortname}}_get_sum():
not_empty = tk.get_validator("not_empty")
convert_int = tk.get_validator("convert_int")
return {
"left": [not_empty, convert_int],
"right": [not_empty, convert_int]
}
|
lpot/utils/logger.py | intelkevinputnam/lpot-docs | 172 | 12790244 | <reponame>intelkevinputnam/lpot-docs
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
class Logger(object):
__instance = None
def __new__(cls):
if Logger.__instance is None:
Logger.__instance = object.__new__(cls)
Logger.__instance._log()
return Logger.__instance
def _log(self):
LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
self._logger = logging.getLogger()
self._logger.handlers.clear()
self._logger.setLevel(LOGLEVEL)
formatter = logging.Formatter(
'%(asctime)s [%(levelname)s] %(message)s',
"%Y-%m-%d %H:%M:%S")
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
self._logger.addHandler(streamHandler)
self._logger.propagate = False
def get_logger(self):
return self._logger
def _pretty_dict(value, indent=0):
prefix = '\n' + ' ' * (indent + 4)
if isinstance(value, dict):
items = [
prefix + repr(key) + ': ' + _pretty_dict(value[key], indent + 4)
for key in value
]
return '{%s}' % (','.join(items) + '\n' + ' ' * indent)
elif isinstance(value, list):
items = [
prefix + _pretty_dict(item, indent + 4)
for item in value
]
return '[%s]' % (','.join(items) + '\n' + ' ' * indent)
elif isinstance(value, tuple):
items = [
prefix + _pretty_dict(item, indent + 4)
for item in value
]
return '(%s)' % (','.join(items) + '\n' + ' ' * indent)
else:
return repr(value)
level = Logger().get_logger().level
DEBUG = logging.DEBUG
def log(level, msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().log(level, line, *args, **kwargs)
else:
Logger().get_logger().log(level, msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().debug(line, *args, **kwargs)
else:
Logger().get_logger().debug(msg, *args, **kwargs)
def error(msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().error(line, *args, **kwargs)
else:
Logger().get_logger().error(msg, *args, **kwargs)
def fatal(msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().fatal(line, *args, **kwargs)
else:
Logger().get_logger().fatal(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().info(line, *args, **kwargs)
else:
Logger().get_logger().info(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().warning(line, *args, **kwargs)
else:
Logger().get_logger().warning(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().warning(line, *args, **kwargs)
else:
Logger().get_logger().warning(msg, *args, **kwargs)
|
gimp-plugins/PD-Denoising-pytorch/utils.py | sunlin7/GIMP-ML | 1,077 | 12790254 | <reponame>sunlin7/GIMP-ML
import math
import torch
import torch.nn as nn
import numpy as np
# from skimage.measure.simple_metrics import compare_psnr
from torch.autograd import Variable
import cv2
import scipy.ndimage
import scipy.io as sio
# import matplotlib as mpl
# mpl.use('Agg')
# import matplotlib.pyplot as plt
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
# nn.init.uniform(m.weight.data, 1.0, 0.02)
m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025)
nn.init.constant(m.bias.data, 0.0)
# def batch_PSNR(img, imclean, data_range):
# Img = img.data.cpu().numpy().astype(np.float32)
# Iclean = imclean.data.cpu().numpy().astype(np.float32)
# PSNR = 0
# for i in range(Img.shape[0]):
# PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range)
# return (PSNR/Img.shape[0])
def data_augmentation(image, mode):
out = np.transpose(image, (1,2,0))
if mode == 0:
# original
out = out
elif mode == 1:
# flip up and down
out = np.flipud(out)
elif mode == 2:
# rotate counterwise 90 degree
out = np.rot90(out)
elif mode == 3:
# rotate 90 degree and flip up and down
out = np.rot90(out)
out = np.flipud(out)
elif mode == 4:
# rotate 180 degree
out = np.rot90(out, k=2)
elif mode == 5:
# rotate 180 degree and flip
out = np.rot90(out, k=2)
out = np.flipud(out)
elif mode == 6:
# rotate 270 degree
out = np.rot90(out, k=3)
elif mode == 7:
# rotate 270 degree and flip
out = np.rot90(out, k=3)
out = np.flipud(out)
return np.transpose(out, (2,0,1))
def visual_va2np(Out, mode=1, ps=0, pss=1, scal=1, rescale=0, w=10, h=10, c=3, refill=0, refill_img=0, refill_ind=[0, 0]):
if mode == 0 or mode == 1 or mode==3:
out_numpy = Out.data.squeeze(0).cpu().numpy()
elif mode == 2:
out_numpy = Out.data.squeeze(1).cpu().numpy()
if out_numpy.shape[0] == 1:
out_numpy = np.tile(out_numpy, (3, 1, 1))
if mode == 0 or mode == 1:
out_numpy = (np.transpose(out_numpy, (1, 2, 0))) * 255.0 * scal
else:
out_numpy = (np.transpose(out_numpy, (1, 2, 0)))
if ps == 1:
out_numpy = reverse_pixelshuffle(out_numpy, pss, refill, refill_img, refill_ind)
if rescale == 1:
out_numpy = cv2.resize(out_numpy, (h, w))
#print(out_numpy.shape)
return out_numpy
def temp_ps_4comb(Out, In):
pass
def np2ts(x, mode=0): #now assume the input only has one channel which is ignored
w, h, c= x.shape
x_ts = x.transpose(2, 0, 1)
x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor)
if mode == 0 or mode == 1:
x_ts = x_ts.unsqueeze(0)
elif mode == 2:
x_ts = x_ts.unsqueeze(1)
return x_ts
def np2ts_4d(x):
x_ts = x.transpose(0, 3, 1, 2)
x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor)
return x_ts
def get_salient_noise_in_maps(lm, thre = 0., chn=3):
'''
Description: To find out the most frequent estimated noise level in the images
----------
[Input]
a multi-channel tensor of noise map
[Output]
A list of noise level value
'''
lm_numpy = lm.data.cpu().numpy()
lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1)))
nl_list = np.zeros((lm_numpy.shape[0], chn,1))
for n in range(lm_numpy.shape[0]):
for c in range(chn):
selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1))
selected_lm = selected_lm[selected_lm>thre]
if selected_lm.shape[0] == 0:
nl_list[n, c] = 0
else:
hist = np.histogram(selected_lm, density=True)
nl_ind = np.argmax(hist[0])
#print(nl_ind)
#print(hist[0])
#print(hist[1])
nl = ( hist[1][nl_ind] + hist[1][nl_ind+1] ) / 2.
nl_list[n, c] = nl
return nl_list
def get_cdf_noise_in_maps(lm, thre=0.8, chn=3):
'''
Description: To find out the most frequent estimated noise level in the images
----------
[Input]
a multi-channel tensor of noise map
[Output]
A list of noise level value
'''
lm_numpy = lm.data.cpu().numpy()
lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1)))
nl_list = np.zeros((lm_numpy.shape[0], chn,1))
for n in range(lm_numpy.shape[0]):
for c in range(chn):
selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1))
H, x = np.histogram(selected_lm, normed=True)
dx = x[1]-x[0]
F = np.cumsum(H)*dx
F_ind = np.where(F>0.9)[0][0]
nl_list[n, c] = x[F_ind]
print(nl_list[n,c])
return nl_list
def get_pdf_in_maps(lm, mark, chn=1):
'''
Description: get the noise estimation cdf of each channel
----------
[Input]
a multi-channel tensor of noise map and channel dimension
chn: the channel number for gaussian
[Output]
CDF function of each sample and each channel
'''
lm_numpy = lm.data.cpu().numpy()
lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1)))
pdf_list = np.zeros((lm_numpy.shape[0], chn, 10))
for n in range(lm_numpy.shape[0]):
for c in range(chn):
selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1))
H, x = np.histogram(selected_lm, range=(0.,1.), bins=10, normed=True)
dx = x[1]-x[0]
F = H * dx
pdf_list[n, c, :] = F
#sio.savemat(mark + str(c) + '.mat',{'F':F})
# plt.bar(range(10), F)
#plt.savefig(mark + str(c) + '.png')
# plt.close()
return pdf_list
def get_pdf_matching_score(F1, F2):
'''
Description: Given two sets of CDF, get the overall matching score for each channel
-----------
[Input] F1, F2
[Output] score for each channel
'''
return np.mean((F1-F2)**2)
def decide_scale_factor(noisy_image, estimation_model, color=1, thre = 0, plot_flag = 1, stopping = 4, mark=''):
'''
Description: Given a noisy image and the noise estimation model, keep multiscaling the image\\
using pixel-shuffle methods, and estimate the pdf and cdf of AWGN channel
Compare the changes of the density function and decide the optimal scaling factor
------------
[Input] noisy_image, estimation_model, plot_flag, stopping
[Output] plot the middle vector
score_seq: the matching score sequence between the two subsequent pdf
opt_scale: the optimal scaling factor
'''
if color == 1:
c = 3
elif color == 0:
c = 1
score_seq = []
Pre_CDF = None
flag = 0
for pss in range(1, stopping+1): #scaling factor from 1 to the limit
noisy_image = pixelshuffle(noisy_image, pss)
INoisy = np2ts(noisy_image, color)
INoisy = Variable(INoisy.cuda(), volatile=True)
EMap = torch.clamp(estimation_model(INoisy), 0., 1.)
EPDF = get_pdf_in_maps(EMap, mark + str(pss), c)[0]
if flag != 0:
score = get_pdf_matching_score(EPDF, Pre_PDF) #TODO: How to match these two
print(score)
score_seq.append(score)
if score <= thre:
print('optimal scale is %d:' % (pss-1))
return (pss-1, score_seq)
Pre_PDF = EPDF
flag = 1
return (stopping, score_seq)
def get_max_noise_in_maps(lm, chn=3):
'''
Description: To find out the maximum level of noise level in the images
----------
[Input]
a multi-channel tensor of noise map
[Output]
A list of noise level value
'''
lm_numpy = lm.data.cpu().numpy()
lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1)))
nl_list = np.zeros((lm_numpy.shape[0], chn, 1))
for n in range(lm_numpy.shape[0]):
for c in range(chn):
nl = np.amax(lm_numpy[n, :, :, c])
nl_list[n, c] = nl
return nl_list
def get_smooth_maps(lm, dilk = 50, gsd = 10):
'''
Description: To return the refined maps after dilation and gaussian blur
[Input] a multi-channel tensor of noise map
[Output] a multi-channel tensor of refined noise map
'''
kernel = np.ones((dilk, dilk))
lm_numpy = lm.data.squeeze(0).cpu().numpy()
lm_numpy = (np.transpose(lm_numpy, (1, 2, 0)))
ref_lm_numpy = lm_numpy.copy() #a refined map
for c in range(lm_numpy.shape[2]):
nmap = lm_numpy[:, :, c]
nmap_dilation = cv2.dilate(nmap, kernel, iterations=1)
ref_lm_numpy[:, :, c] = nmap_dilation
#ref_lm_numpy[:, :, c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd)
RF_tensor = np2ts(ref_lm_numpy)
RF_tensor = Variable(RF_tensor.cuda(),volatile=True)
def zeroing_out_maps(lm, keep=0):
'''
Only Keep one channel and zero out other channels
[Input] a multi-channel tensor of noise map
[Output] a multi-channel tensor of noise map after zeroing out items
'''
lm_numpy = lm.data.squeeze(0).cpu().numpy()
lm_numpy = (np.transpose(lm_numpy, (1, 2, 0)))
ref_lm_numpy = lm_numpy.copy() #a refined map
for c in range(lm_numpy.shape[2]):
if np.isin(c,keep)==0:
ref_lm_numpy[:, :, c] = 0.
print(ref_lm_numpy)
RF_tensor = np2ts(ref_lm_numpy)
RF_tensor = Variable(RF_tensor.cuda(),volatile=True)
return RF_tensor
def level_refine(NM_tensor, ref_mode, chn=3,cFlag=False):
'''
Description: To refine the estimated noise level maps
[Input] the noise map tensor, and a refinement mode
Mode:
[0] Get the most salient (the most frequent estimated noise level)
[1] Get the maximum value of noise level
[2] Gaussian smooth the noise level map to make the regional estimation more smooth
[3] Get the average maximum value of the noise level
[5] Get the CDF thresholded value
[Output] a refined map tensor with four channels
'''
#RF_tensor = NM_tensor.clone() #get a clone version of NM tensor without changing the original one
if ref_mode == 0 or ref_mode == 1 or ref_mode == 4 or ref_mode==5: #if we use a single value for the map
if ref_mode == 0 or ref_mode == 4:
nl_list = get_salient_noise_in_maps(NM_tensor, 0., chn)
if ref_mode == 4: #half the estimation
nl_list = nl_list - nl_list
print(nl_list)
elif ref_mode == 1:
nl_list = get_max_noise_in_maps(NM_tensor, chn)
elif ref_mode == 5:
nl_list = get_cdf_noise_in_maps(NM_tensor, 0.999, chn)
noise_map = np.zeros((NM_tensor.shape[0], chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating
for n in range(NM_tensor.shape[0]):
noise_map[n,:,:,:] = np.reshape(np.tile(nl_list[n], NM_tensor.size()[2] * NM_tensor.size()[3]),
(chn, NM_tensor.size()[2], NM_tensor.size()[3]))
RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor)
if torch.cuda.is_available() and not cFlag:
RF_tensor = Variable(RF_tensor.cuda(),volatile=True)
else:
RF_tensor = Variable(RF_tensor,volatile=True)
elif ref_mode == 2:
RF_tensor = get_smooth_maps(NM_tensor, 10, 5)
elif ref_mode == 3:
lb = get_salient_noise_in_maps(NM_tensor)
up = get_max_noise_in_maps(NM_tensor)
nl_list = ( lb + up ) * 0.5
noise_map = np.zeros((1, chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating
noise_map[0, :, :, :] = np.reshape(np.tile(nl_list, NM_tensor.size()[2] * NM_tensor.size()[3]),
(chn, NM_tensor.size()[2], NM_tensor.size()[3]))
RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor)
RF_tensor = Variable(RF_tensor.cuda(),volatile=True)
return (RF_tensor, nl_list)
def normalize(a, len_v, min_v, max_v):
'''
normalize the sequence of factors
'''
norm_a = np.reshape(a, (len_v,1))
norm_a = (norm_a - float(min_v)) / float(max_v - min_v)
return norm_a
def generate_training_noisy_image(current_image, s_or_m, limit_set, c, val=0):
noise_level_list = np.zeros((c, 1))
if s_or_m == 0: #single noise type
if val == 0:
for chn in range(c):
noise_level_list[chn] = np.random.uniform(limit_set[0][0], limit_set[0][1])
elif val == 1:
for chn in range(c):
noise_level_list[chn] = 35
noisy_img = generate_noisy(current_image, 0, noise_level_list /255.)
return (noisy_img, noise_level_list)
def generate_ground_truth_noise_map(noise_map, n, noise_level_list, limit_set, c, pn, pw, ph):
for chn in range(c):
noise_level_list[chn] = normalize(noise_level_list[chn], 1, limit_set[0][0], limit_set[0][1]) #normalize the level value
noise_map[n, :, :, :] = np.reshape(np.tile(noise_level_list, pw * ph), (c, pw, ph)) #total number of channels
return noise_map
#Add noise to the original images
def generate_noisy(image, noise_type, noise_level_list=0, sigma_s=20, sigma_c=40):
'''
Description: To generate noisy images of different types
----------
[Input]
image : ndarray of float type: [0,1] just one image, current support gray or color image input (w,h,c)
noise_type: 0,1,2,3
noise_level_list: pre-defined noise level for each channel, without normalization: only information of 3 channels
[0]'AWGN' Multi-channel Gaussian-distributed additive noise
[1]'RVIN' Replaces random pixels with 0 or 1. noise_level: ratio of the occupation of the changed pixels
[2]'Gaussian-Poisson' GP noise approximator, the combinatin of signal-dependent and signal independent noise
[Output]
A noisy image
'''
w, h, c = image.shape
#Some unused noise type: Poisson and Uniform
#if noise_type == *:
#vals = len(np.unique(image))
#vals = 2 ** np.ceil(np.log2(vals))
#noisy = np.random.poisson(image * vals) / float(vals)
#if noise_type == *:
#uni = np.random.uniform(-factor,factor,(w, h, c))
#uni = uni.reshape(w, h, c)
#noisy = image + uni
noisy = image.copy()
if noise_type == 0: #MC-AWGN model
gauss = np.zeros((w, h, c))
for chn in range(c):
gauss[:,:,chn] = np.random.normal(0, noise_level_list[chn], (w, h))
noisy = image + gauss
elif noise_type == 1: #MC-RVIN model
for chn in range(c): #process each channel separately
prob_map = np.random.uniform(0.0, 1.0, (w, h))
noise_map = np.random.uniform(0.0, 1.0, (w, h))
noisy_chn = noisy[: , :, chn]
noisy_chn[ prob_map < noise_level_list[chn] ] = noise_map[ prob_map < noise_level_list[chn] ]
elif noise_type == 2:
#sigma_s = np.random.uniform(0.0, 0.16, (3,))
#sigma_c = np.random.uniform(0.0, 0.06, (3,))
sigma_c = [sigma_c]*3
sigma_s = [sigma_s]*3
sigma_s = np.reshape(sigma_s, (1, 1, c)) #reshape the sigma factor to [1,1,c] to multiply with the image
noise_s_map = np.multiply(sigma_s, image) #according to x or temp_x?? (according to clean image or irradience)
#print(noise_s_map) # different from the official code, here we use the original clean image x to compute the variance
noise_s = np.random.randn(w, h, c) * noise_s_map #use the new variance to shift the normal distribution
noisy = image + noise_s
#add signal_independent noise to L
noise_c = np.zeros((w, h, c))
for chn in range(3):
noise_c [:, :, chn] = np.random.normal(0, sigma_c[chn], (w, h))
noisy = noisy + noise_c
return noisy
#generate AWGN-RVIN noise together
def generate_comp_noisy(image, noise_level_list):
'''
Description: To generate mixed AWGN and RVIN noise together
----------
[Input]
image: a float image between [0,1]
noise_level_list: AWGN and RVIN noise level
[Output]
A noisy image
'''
w, h, c = image.shape
noisy = image.copy()
for chn in range(c):
mix_thre = noise_level_list[c+chn] #get the mix ratio of AWGN and RVIN
gau_std = noise_level_list[chn] #get the gaussian std
prob_map = np.random.uniform( 0, 1, (w, h) ) #the prob map
noise_map = np.random.uniform( 0, 1, (w, h) ) #the noisy map
noisy_chn = noisy[: ,: ,chn]
noisy_chn[prob_map < mix_thre ] = noise_map[prob_map < mix_thre ]
gauss = np.random.normal(0, gau_std, (w, h))
noisy_chn[prob_map >= mix_thre ] = noisy_chn[prob_map >= mix_thre ] + gauss[prob_map >= mix_thre]
return noisy
def generate_denoise(image, model, noise_level_list):
'''
Description: Generate Denoised Blur Images
----------
[Input]
image:
model:
noise_level_list:
[Output]
A blur image patch
'''
#input images
ISource = np2ts(image)
ISource = torch.clamp(ISource, 0., 1.)
ISource = Variable(ISource.cuda(),volatile=True)
#input denoise conditions
noise_map = np.zeros((1, 6, image.shape[0], image.shape[1])) #initialize the noise map before concatenating
noise_map[0, :, :, :] = np.reshape(np.tile(noise_level_list, image.shape[0] * image.shape[1]), (6, image.shape[0], image.shape[1]))
NM_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor)
NM_tensor = Variable(NM_tensor.cuda(),volatile=True)
#generate blur images
Res = model(ISource, NM_tensor)
Out = torch.clamp(ISource-Res, 0., 1.)
out_numpy = Out.data.squeeze(0).cpu().numpy()
out_numpy = np.transpose(out_numpy, (1, 2, 0))
return out_numpy
#TODO: two pixel shuffle functions to process the images
def pixelshuffle(image, scale):
'''
Discription: Given an image, return a reversible sub-sampling
[Input]: Image ndarray float
[Return]: A mosic image of shuffled pixels
'''
if scale == 1:
return image
w, h ,c = image.shape
mosaic = np.array([])
for ws in range(scale):
band = np.array([])
for hs in range(scale):
temp = image[ws::scale, hs::scale, :] #get the sub-sampled image
band = np.concatenate((band, temp), axis = 1) if band.size else temp
mosaic = np.concatenate((mosaic, band), axis = 0) if mosaic.size else band
return mosaic
def reverse_pixelshuffle(image, scale, fill=0, fill_image=0, ind=[0,0]):
'''
Discription: Given a mosaic image of subsampling, recombine it to a full image
[Input]: Image
[Return]: Recombine it using different portions of pixels
'''
w, h, c = image.shape
real = np.zeros((w, h, c)) #real image
wf = 0
hf = 0
for ws in range(scale):
hf = 0
for hs in range(scale):
temp = real[ws::scale, hs::scale, :]
wc, hc, cc = temp.shape #get the shpae of the current images
if fill==1 and ws==ind[0] and hs==ind[1]:
real[ws::scale, hs::scale, :] = fill_image[wf:wf+wc, hf:hf+hc, :]
else:
real[ws::scale, hs::scale, :] = image[wf:wf+wc, hf:hf+hc, :]
hf = hf + hc
wf = wf + wc
return real
def scal2map(level, h, w, min_v=0., max_v=255.):
'''
Change a single normalized noise level value to a map
[Input]: level: a scaler noise level(0-1), h, w
[Return]: a pytorch tensor of the cacatenated noise level map
'''
#get a tensor from the input level
level_tensor = torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor)
#make the noise level to a map
level_tensor = level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1), 1, 1)
level_tensor = level_tensor.repeat(1, 1, h, w)
return level_tensor
def scal2map_spatial(level1, level2, h, w):
stdN_t1 = scal2map(level1, int(h/2), w)
stdN_t2 = scal2map(level2, h-int(h/2), w)
stdN_tensor = torch.cat([stdN_t1, stdN_t2], dim=2)
return stdN_tensor
|
seq2seq/corpus.py | shinoyuki222/torch-light | 310 | 12790271 | import torch
import argparse
import logging
from utils import corpora2idx, normalizeString
from const import *
class Dictionary(object):
def __init__(self):
self.word2idx = {
WORD[BOS]: BOS,
WORD[EOS]: EOS,
WORD[PAD]: PAD,
WORD[UNK]: UNK
}
self.idx = 4
def add(self, word):
if self.word2idx.get(word) is None:
self.word2idx[word] = self.idx
self.idx += 1
def __call__(self, sents, min_count):
words = [word for sent in sents for word in sent]
word_count = {w: 0 for w in set(words)}
for w in words: word_count[w]+=1
ignored_word_count = 0
for word, count in word_count.items():
if count <= min_count:
ignored_word_count += 1
continue
self.add(word)
return ignored_word_count
def __len__(self):
return self.idx
def __str__(self):
return "%s(size = %d)".format(self.__class__.__name__, len(self.idx))
class Corpus(object):
def __init__(self, save_data, max_len=20, min_word_count=1):
self._save_data = save_data
self._max_len = max_len
self._min_word_count = min_word_count
self.src_sents = None
self.tgt_sents = None
self.src_valid_sents = None
self.tgt_valid_sents = None
self.src_dict = Dictionary()
self.tgt_dict = Dictionary()
def parse(self):
def gather_file(file_, max_len):
en_sents, fra_sents, en_cut_count, fra_cut_count = [], [], 0, 0
for sentences in open(file_):
en_, fra_ = [normalizeString(s) for s in sentences.strip().split('\t')]
en_ws = [word for word in en_.strip().split()]
fra_ws = [word for word in fra_.strip().split()]
if len(en_ws) > max_len:
en_cut_count += 1
en_ws = en_ws[:max_len]
en_sents.append([WORD[BOS]] + en_ws + [WORD[EOS]])
if len(fra_ws) > max_len:
fra_cut_count += 1
fra_ws = fra_ws[:max_len]
fra_sents.append([WORD[BOS]] + fra_ws + [WORD[EOS]])
return fra_sents, en_sents, fra_cut_count, en_cut_count
max_len = self._max_len - 2
src_train, tgt_train, fra_cut_count, en_cut_count = gather_file('data/train', max_len)
src_valid, tgt_valid, _, _ = gather_file('data/test', max_len)
print("English data`s length out of range numbers - [{}]".format(en_cut_count))
print("French data`s length out of range numbers - [{}]".format(fra_cut_count))
src_ignore = self.src_dict(src_train, self._min_word_count)
tgt_ignore = self.tgt_dict(tgt_train, self._min_word_count)
if src_ignore != 0:
print("Ignored src word counts - [{}]".format(src_ignore))
if tgt_ignore != 0:
print("Ignored tgt word counts - [{}]".format(tgt_ignore))
self.src_train = src_train
self.tgt_train = tgt_train
self.src_valid = src_valid
self.tgt_valid = tgt_valid
def save(self):
data = {
'max_word_len': self._max_len,
'dict': {
'src': self.src_dict.word2idx,
'src_size': len(self.src_dict),
'tgt': self.tgt_dict.word2idx,
'tgt_size': len(self.tgt_dict)
},
'train': {
'src': corpora2idx(self.src_train, self.src_dict.word2idx),
'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx)
},
'valid': {
'src': corpora2idx(self.src_valid, self.src_dict.word2idx),
'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx)
}
}
torch.save(data, self._save_data)
print('src corpora length - [{}] | target corpora length - [{}]'.format(len(self.src_dict), len(self.tgt_dict)))
def process(self):
self.parse()
self.save()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='seq2sqe corpora')
parser.add_argument('--save-data', type=str, default='data/seq2seq.pt',
help='path to save processed data')
parser.add_argument('--max-lenth', type=int, default=20,
help='max length of sentence')
parser.add_argument('--min-word-count', type=int, default=1,
help='min corpora count to discard')
args = parser.parse_args()
corpus = Corpus(args.save_data, args.max_lenth, args.min_word_count)
corpus.process()
|
third_party/upb/docs/render.py | echo80313/grpc | 515 | 12790279 | <reponame>echo80313/grpc<gh_stars>100-1000
#!/usr/bin/env python3
import subprocess
import sys
import shutil
import os
if len(sys.argv) < 2:
print("Must pass a filename argument")
sys.exit(1)
in_filename = sys.argv[1]
out_filename = in_filename.replace(".in.md", ".md")
out_dir = in_filename.replace(".in.md", "")
if in_filename == out_filename:
print("File must end in .in.md")
sys.exit(1)
if os.path.isdir(out_dir):
shutil.rmtree(out_dir)
os.mkdir(out_dir)
file_num = 1
with open(out_filename, "wb") as out_file, open(in_filename, "rb") as in_file:
for line in in_file:
if line.startswith(b"```dot"):
dot_lines = []
while True:
dot_line = next(in_file)
if dot_line == b"```\n":
break
dot_lines.append(dot_line)
dot_input = b"".join(dot_lines)
svg_filename = out_dir + "/" + str(file_num) + ".svg"
svg = subprocess.check_output(['dot', '-Tsvg', '-o', svg_filename], input=dot_input)
out_file.write(b"<div align=center>\n")
out_file.write(b"<img src='%s'/>\n" % (svg_filename.encode('utf-8')))
out_file.write(b"</div>\n")
file_num += 1
else:
out_file.write(line)
|
tools/real_world_impact/nsfw_urls.py | zealoussnow/chromium | 14,668 | 12790301 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""NSFW urls in the Alexa top 2000 sites."""
nsfw_urls = set([
"http://xhamster.com/",
"http://xvideos.com/",
"http://livejasmin.com/",
"http://pornhub.com/",
"http://redtube.com/",
"http://youporn.com/",
"http://xnxx.com/",
"http://tube8.com/",
"http://youjizz.com/",
"http://adultfriendfinder.com/",
"http://hardsextube.com/",
"http://yourlust.com/",
"http://drtuber.com/",
"http://beeg.com/",
"http://largeporntube.com/",
"http://nuvid.com/",
"http://bravotube.net/",
"http://spankwire.com/",
"http://discreethearts.com/",
"http://keezmovies.com/",
"http://xtube.com/",
"http://alphaporno.com/",
"http://4tube.com/",
"http://nudevista.com/",
"http://porntube.com/",
"http://xhamstercams.com/",
"http://porn.com/",
"http://video-one.com/",
"http://perfectgirls.net/",
"http://slutload.com/",
"http://sunporno.com/",
"http://tnaflix.com/",
"http://pornerbros.com/",
"http://h2porn.com/",
"http://adult-empire.com/",
"http://pornhublive.com/",
"http://sexitnow.com/",
"http://pornsharia.com/",
"http://freeones.com/",
"http://tubegalore.com/",
"http://xvideos.jp/",
"http://brazzers.com/",
"http://fapdu.com/",
"http://pornoxo.com/",
"http://extremetube.com/",
"http://hot-sex-tube.com/",
"http://xhamsterhq.com/",
"http://18andabused.com/",
"http://tubepleasure.com/",
"http://18schoolgirlz.com/",
"http://chaturbate.com/",
"http://motherless.com/",
"http://yobt.com/",
"http://empflix.com/",
"http://hellporno.com/",
"http://ashemaletube.com/",
"http://watchmygf.com/",
"http://redtubelive.com/",
"http://met-art.com/",
"http://gonzoxxxmovies.com/",
"http://shufuni.com/",
"http://vid2c.com/",
"http://dojki.com/",
"http://cerdas.com/",
"http://overthumbs.com/",
"http://xvideoslive.com/",
"http://playboy.com/",
"http://caribbeancom.com/",
"http://tubewolf.com/",
"http://xmatch.com/",
"http://ixxx.com/",
"http://nymphdate.com/",
]) |
python/keepsake/version.py | jsemric/keepsake | 810 | 12790337 | # This file is auto-generated by the root Makefile. Do not edit manually.
version = "0.4.2"
|
beartype_test/a00_unit/a00_util/cache/test_utilcachecall.py | posita/beartype | 1,056 | 12790348 | <gh_stars>1000+
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype callable caching utility unit tests.**
This submodule unit tests the public API of the private
:mod:`beartype._util.cache.utilcachecall` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype.roar._roarwarn import _BeartypeUtilCallableCachedKwargsWarning
from beartype_test.util.mark.pytmark import ignore_warnings
from pytest import raises
# ....................{ TESTS }....................
# Prevent pytest from capturing and displaying all expected non-fatal
# beartype-specific warnings emitted by the @callable_cached decorator.
@ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning)
def test_callable_cached_pass() -> None:
'''
Test successful usage of the
:func:`beartype._util.cache.utilcachecall.callable_cached` decorator.
'''
# Defer heavyweight imports.
from beartype._util.cache.utilcachecall import callable_cached
# Callable memoized by this decorator.
@callable_cached
def still_i_rise(bitter, twisted, lies):
# If an arbitrary condition, raise an exception whose value depends on
# these parameters to exercise this decorator's conditional caching of
# exceptions.
if len(lies) == 6:
raise ValueError(lies)
# Else, return a value depending on these parameters to exercise this
# decorator's conditional caching of return values.
return bitter + twisted + lies
# Objects to be passed as parameters below.
bitter = ('You', 'may', 'write', 'me', 'down', 'in', 'history',)
twisted = ('With', 'your', 'bitter,', 'twisted,', 'lies.',)
lies = ('You', 'may', 'trod,', 'me,', 'in', 'the', 'very', 'dirt',)
dust = ('But', 'still,', 'like', 'dust,', "I'll", 'rise',)
# Assert that memoizing two calls passed the same positional arguments
# caches and returns the same value.
assert (
still_i_rise(bitter, twisted, lies) is
still_i_rise(bitter, twisted, lies))
# Assert that memoizing two calls passed the same positional and keyword
# arguments in the same order caches and returns the same value.
assert (
still_i_rise(bitter, twisted=twisted, lies=lies) is
still_i_rise(bitter, twisted=twisted, lies=lies))
# Assert that memoizing two calls passed the same keyword arguments in the
# same order cache and return the same value.
assert (
still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is
still_i_rise(bitter=bitter, twisted=twisted, lies=lies))
# Assert that memoizing a call expected to raise an exception does so.
with raises(ValueError) as exception_first_info:
still_i_rise(bitter, twisted, dust)
# Assert that repeating that call reraises the same exception.
with raises(ValueError) as exception_next_info:
still_i_rise(bitter, twisted, dust)
assert exception_first_info is exception_next_info
# Assert that memoizing two calls passed the same keyword arguments in a
# differing order cache and return differing values.
assert (
still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is not
still_i_rise(twisted=twisted, lies=lies, bitter=bitter))
# Assert that passing one or more unhashable parameters to this callable
# succeeds with the expected return value.
assert still_i_rise(
('Just', 'like', 'moons',),
('and', 'like', 'suns',),
('With the certainty of tides',),
) == (
'Just', 'like', 'moons',
'and', 'like', 'suns',
'With the certainty of tides',
)
def test_callable_cached_fail() -> None:
'''
Test unsuccessful usage of the
:func:`beartype._util.cache.utilcachecall.callable_cached` decorator.
'''
# Defer heavyweight imports.
from beartype._util.cache.utilcachecall import callable_cached
from beartype.roar._roarexc import _BeartypeUtilCallableCachedException
# Assert that attempting to memoize a callable accepting one or more
# variadic positional parameters fails with the expected exception.
with raises(_BeartypeUtilCallableCachedException):
@callable_cached
def see_me_broken(*args):
return args
# Assert that attempting to memoize a callable accepting one or more
# variadic keyword parameters fails with the expected exception.
with raises(_BeartypeUtilCallableCachedException):
@callable_cached
def my_soulful_cries(**kwargs):
return kwargs
|
tests/integration/test_networks.py | unparalleled-js/ape | 210 | 12790410 | <filename>tests/integration/test_networks.py
import pytest
from eth_typing import HexStr
@pytest.mark.parametrize("block_id", ("latest", 0, "0", "0x0", HexStr("0x0")))
def test_get_block(eth_tester_provider, block_id):
latest_block = eth_tester_provider.get_block(block_id)
# Each parameter is the same as requesting the first block.
assert latest_block.number == 0
assert latest_block.gas_data.base_fee == 1000000000
assert latest_block.gas_data.gas_used == 0
|
alipay/aop/api/domain/AlipayUserApplepayProvisioningbundleCreateModel.py | antopen/alipay-sdk-python-all | 213 | 12790420 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserApplepayProvisioningbundleCreateModel(object):
def __init__(self):
self._alipay_user_identifier = None
@property
def alipay_user_identifier(self):
return self._alipay_user_identifier
@alipay_user_identifier.setter
def alipay_user_identifier(self, value):
self._alipay_user_identifier = value
def to_alipay_dict(self):
params = dict()
if self.alipay_user_identifier:
if hasattr(self.alipay_user_identifier, 'to_alipay_dict'):
params['alipay_user_identifier'] = self.alipay_user_identifier.to_alipay_dict()
else:
params['alipay_user_identifier'] = self.alipay_user_identifier
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserApplepayProvisioningbundleCreateModel()
if 'alipay_user_identifier' in d:
o.alipay_user_identifier = d['alipay_user_identifier']
return o
|
examples/house-credit-default/get_input.py | wqruan/tf-encrypted | 825 | 12790428 | """CLI for data preparation and processing."""
import argparse
from utils import data_prep
from utils import read_one_row
from utils import save_input
parser = argparse.ArgumentParser()
parser.add_argument(
"--save_row",
type=int,
default="0",
help="Saves a single row to a file defaults to row 0",
)
parser.add_argument(
"--input_file",
type=str,
default="final_data_with_feature_engineered.csv",
help=(
"File to read the row from defaults to "
"final_data_with_feature_engineered.csv"
),
)
parser.add_argument(
"--output_file",
type=str,
default="input.npy",
help=("Output file with the input row defaults to " "input.npy"),
)
config = parser.parse_args()
input_file = config.input_file
output_file = config.output_file
save_row = config.save_row
train_x_df, _ = data_prep(input_file)
out = read_one_row(save_row, train_x_df)
save_input(output_file, out)
|
evaluation/datasets/test_datasets.py | hsiehkl/pdffigures2 | 296 | 12790469 | import unittest
import math
import datasets
from pdffigures_utils import get_num_pages_in_pdf
class TestDataset(unittest.TestCase):
def test_pages_annotated_consistency(self):
for dataset in datasets.DATASETS.values():
dataset = dataset()
pages_annotated = dataset.get_annotated_pages_map()
if pages_annotated is None:
continue
pdf_file_map = dataset.get_pdf_file_map()
annotations = dataset.get_annotations("all")
docs = dataset.get_doc_ids("all")
self.assertEqual(set(docs), pages_annotated.keys())
for doc, pages in pages_annotated.items():
filename = pdf_file_map[doc]
self.assertTrue(len(pages) <= dataset.MAX_PAGES_TO_ANNOTATE)
num_pages = get_num_pages_in_pdf(filename)
self.assertTrue(num_pages >= max(pages) - 1)
expected_pages = math.ceil(num_pages*dataset.PAGE_SAMPLE_PERCENT)
expected_pages = min(expected_pages, dataset.MAX_PAGES_TO_ANNOTATE)
self.assertTrue(len(pages) == expected_pages)
if doc in annotations:
ann = annotations[doc]
self.assertEqual(set(ann["annotated_pages"]), set(pages))
for fig in ann["figures"]:
self.assertTrue(fig.page in pages)
def test_consistency(self):
for dataset in datasets.DATASETS.values():
dataset = dataset()
all_docs = set(dataset.get_doc_ids(datasets.DatasetPartition("all")))
doc_map = dataset.get_pdf_file_map()
self.assertEqual(len(all_docs - doc_map.keys()), 0)
doc_map = dataset.get_color_image_file_map()
if doc_map is not None:
self.assertEqual(len(all_docs - doc_map.keys()), 0)
doc_map = dataset.get_gray_image_file_map()
if doc_map is not None:
self.assertEqual(len(all_docs - doc_map.keys()), 0)
documents = dataset.load_doc_ids(all_docs)
self.assertEqual(all_docs, set([x.doc_id for x in documents]))
for doc in documents:
if doc.color_images is not None and doc.gray_images is not None:
self.assertEqual(doc.gray_images.keys(), doc.color_images.keys())
pages_annotated = doc.pages_annotated
for fig in doc.figures:
self.assertTrue(fig.page in pages_annotated)
self.assertEqual(doc.pdffile.split("/")[-1][:-4], doc.doc_id)
if __name__ == '__main__':
unittest.main()
|
mmdet/core/anchor/__init__.py | MinliangLin/TSD | 454 | 12790473 | from .anchor_generator import AnchorGenerator
from .anchor_target import anchor_inside_flags, anchor_target, images_to_levels, unmap
from .guided_anchor_target import ga_loc_target, ga_shape_target
from .point_generator import PointGenerator
from .point_target import point_target
__all__ = [
"AnchorGenerator",
"anchor_target",
"anchor_inside_flags",
"ga_loc_target",
"ga_shape_target",
"PointGenerator",
"point_target",
"images_to_levels",
"unmap",
]
|
scrubadub/filth/date_of_birth.py | datascopeanalytics/scrubadub | 190 | 12790533 | import random
import datetime
import dateparser
from faker import Faker
from .base import Filth
class DateOfBirthFilth(Filth):
type = 'date_of_birth'
min_age_years = 18
max_age_years = 100
@staticmethod
def generate(faker: Faker) -> str:
"""Generates an example of this ``Filth`` type, usually using the faker python library.
:param faker: The ``Faker`` class from the ``faker`` library
:type faker: Faker
:return: An example of this ``Filth``
:rtype: str
"""
formats = [
'%c', # Tue Aug 16 21:30:00 1988 (en_US); locale dependant
'%x', # 08/16/1988 (en_US); locale dependant
'%a %d %b %Y', # Sun 19 Jan 1999
'%A %d %B %Y', # Sunday 19 January 1999
'%d-%m-%Y', # 15-01-1999
'%A %dth, %B, %Y', # Monday 08th, January, 1973
]
return faker.date_of_birth().strftime(random.choice(formats))
def is_valid(self) -> bool:
"""Check to see if the found filth is valid."""
found_date = dateparser.parse(self.text)
if found_date is None:
return False
years_since_identified_date = datetime.date.today().year - found_date.year
return DateOfBirthFilth.min_age_years <= years_since_identified_date <= DateOfBirthFilth.max_age_years
|
scripts/kopf/example.py | victoriouscoder/oreilly-kubernetes | 323 | 12790545 | <gh_stars>100-1000
import kopf
@kopf.on.create('oreilly.com', 'v1alpha1', 'book')
def create_fn(spec, **kwargs):
print(f"And here we are! Creating: {spec}")
return {'message': 'hello world'} # will be the new status
<EMAIL>('<EMAIL>', 'v1alpha1', 'book')
#def update_fn(old, new, diff, **kwargs):
# print('UPDATED')
# print(f"The following object got updated: {spec}")
# return {'message': 'updated'}
<EMAIL>.delete('ore<EMAIL>', 'v1alpha1', 'book')
#def delete_fn(metadata, **kwargs):
|
Chapter10/clean_sample.py | fbitti/Bioinformatics-with-Python-Cookbook-Second-Edition | 244 | 12790549 | <reponame>fbitti/Bioinformatics-with-Python-Cookbook-Second-Edition<gh_stars>100-1000
import sys
sys.stdout.write('ID_1 ID_2 missing\n0 0 0 \n')
for line in sys.stdin:
ind = line.rstrip()
sys.stdout.write('%s %s 0\n' % (ind, ind))
|
xv_leak_tools/scriptlets/command_line_for_pid.py | UAEKondaya1/expressvpn_leak_testing | 219 | 12790558 | #!/usr/bin/env python3
import argparse
import sys
import psutil
from wrap_scriptlet import wrap_scriptlet
def run():
parser = argparse.ArgumentParser()
parser.add_argument('pid')
args = parser.parse_args(sys.argv[1:])
process = psutil.Process(int(args.pid))
return process.cmdline()
sys.exit(wrap_scriptlet(run))
|
addons/Sprytile-6b68d00/rx/linq/observable/dowhile.py | trisadmeslek/V-Sekai-Blender-tools | 733 | 12790564 | from rx.core import Observable
from rx.internal import extensionmethod
@extensionmethod(Observable)
def do_while(self, condition):
"""Repeats source as long as condition holds emulating a do while loop.
Keyword arguments:
condition -- {Function} The condition which determines if the source
will be repeated.
Returns an observable {Observable} sequence which is repeated as long
as the condition holds.
"""
return Observable.concat([self, Observable.while_do(condition, self)])
|
tests/missing_data/test_missing_data_air_passengers_None_None.py | shaido987/pyaf | 377 | 12790570 | <filename>tests/missing_data/test_missing_data_air_passengers_None_None.py
import tests.missing_data.test_missing_data_air_passengers_generic as gen
gen.test_air_passengers_missing_data(None, None)
|
vivit.py | rishikksh20/ViViT-pytorch | 204 | 12790612 | <filename>vivit.py
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from module import Attention, PreNorm, FeedForward
import numpy as np
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
self.norm = nn.LayerNorm(dim)
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return self.norm(x)
class ViViT(nn.Module):
def __init__(self, image_size, patch_size, num_classes, num_frames, dim = 192, depth = 4, heads = 3, pool = 'cls', in_channels = 3, dim_head = 64, dropout = 0.,
emb_dropout = 0., scale_dim = 4, ):
super().__init__()
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_size // patch_size) ** 2
patch_dim = in_channels * patch_size ** 2
self.to_patch_embedding = nn.Sequential(
Rearrange('b t c (h p1) (w p2) -> b t (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
nn.Linear(patch_dim, dim),
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_frames, num_patches + 1, dim))
self.space_token = nn.Parameter(torch.randn(1, 1, dim))
self.space_transformer = Transformer(dim, depth, heads, dim_head, dim*scale_dim, dropout)
self.temporal_token = nn.Parameter(torch.randn(1, 1, dim))
self.temporal_transformer = Transformer(dim, depth, heads, dim_head, dim*scale_dim, dropout)
self.dropout = nn.Dropout(emb_dropout)
self.pool = pool
self.mlp_head = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_classes)
)
def forward(self, x):
x = self.to_patch_embedding(x)
b, t, n, _ = x.shape
cls_space_tokens = repeat(self.space_token, '() n d -> b t n d', b = b, t=t)
x = torch.cat((cls_space_tokens, x), dim=2)
x += self.pos_embedding[:, :, :(n + 1)]
x = self.dropout(x)
x = rearrange(x, 'b t n d -> (b t) n d')
x = self.space_transformer(x)
x = rearrange(x[:, 0], '(b t) ... -> b t ...', b=b)
cls_temporal_tokens = repeat(self.temporal_token, '() n d -> b n d', b=b)
x = torch.cat((cls_temporal_tokens, x), dim=1)
x = self.temporal_transformer(x)
x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
return self.mlp_head(x)
if __name__ == "__main__":
img = torch.ones([1, 16, 3, 224, 224]).cuda()
model = ViViT(224, 16, 100, 16).cuda()
parameters = filter(lambda p: p.requires_grad, model.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
print('Trainable Parameters: %.3fM' % parameters)
out = model(img)
print("Shape of out :", out.shape) # [B, num_classes]
|
tests/test_engine.py | adithyavis/pywarm | 194 | 12790680 | <reponame>adithyavis/pywarm<filename>tests/test_engine.py
# 08-31-2019;
"""
Test cases for warm.engine.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
from pathlib import Path
import sys
sys.path.append(str(Path(__file__).parent.parent))
from warm import engine
def test_set_get_default_parent():
a = nn.Identity()
b = nn.Identity()
engine.set_default_parent(a)
assert engine.get_default_parent() is a, 'get_default_parent result mismatchs set_default_parent.'
engine.set_default_parent(b)
assert engine.get_default_parent() is b, 'get_default_parent result mismatchs set_default_parent.'
def test_auto_name():
a = nn.Identity()
for i in range(10):
assert engine._auto_name('test', a) == f'test_{i+1}', 'new calls to _auto_name failed to increment name count.'
a(None) # test if forward pre hook is triggered to reset names
assert engine._auto_name('test', a) == 'test_1', 'forward_pre_hook did not work.'
def test_initialize():
a = nn.Parameter(torch.zeros(3, 4))
b = nn.Parameter(torch.zeros(3, 4))
c = nn.Parameter(torch.zeros(3, 4))
torch.manual_seed(1)
engine.initialize_(a, 'normal_')
torch.manual_seed(1)
nn.init.normal_(b)
assert torch.equal(a, b), 'initialize_ with str spec did not work correctly.'
assert not torch.equal(a, c), 'initialize_ with str spec did not work.'
torch.manual_seed(1)
engine.initialize_(c, nn.init.normal_)
assert torch.equal(a, c), 'initialize_ with function spec did not work correctly.'
def test_activate():
a = torch.randn(3, 4)
b = copy.deepcopy(a)
a = engine.activate(a, 'hardshrink')
b = F.hardshrink(b)
assert torch.equal(a, b), 'activate with str spec did not work correctly.'
a = engine.activate(a, 'relu')
b = F.relu(b)
assert torch.equal(a, b), 'activate with str spec did not work correctly.'
def test_permute():
x = torch.randn(1, 2, 3)
y = engine.permute(x, 'BCD', 'DCB')
assert list(y.shape) == [3, 2, 1], 'permute 3d tensor with str in_shape and str out_shape did not work correctly.'
y = engine.permute(x, 'BCD', None)
assert list(y.shape) == [1, 2, 3], 'permute tensor with None out_shape did not work corretly.'
y = engine.permute(x, 'BCD', [1, 0, 2])
assert list(y.shape) == [2, 1, 3], 'permute tensor with list out_shape did not work corretly.'
x = torch.randn(1, 2, 3, 4)
y = engine.permute(x, 'BCD', 'DCB')
assert list(y.shape) == [3, 4, 2, 1], 'permute 4d tensor with str in_shape and str out_shape did not work correctly.'
y = engine.permute(x, 'DBC', 'CDB')
assert list(y.shape) == [4, 1, 2, 3], 'permute 4d tensor with str in_shape and str out_shape did not work correctly.'
x = torch.randn(1, 2, 3, 4, 5)
y = engine.permute(x, 'BDC', 'BCD')
assert list(y.shape) == [1, 5, 2, 3, 4], 'permute 5d tensor with str in_shape and str out_shape did not work correctly.'
x = torch.randn(1, 2)
y = engine.permute(x, 'BDC', 'BCD')
assert list(y.shape) == [1, 2], 'permute 2d tensor with str in_shape and str out_shape did not work correctly.'
y = engine.permute(x, 'CBD', 'DBC')
assert list(y.shape) == [2, 1], 'permute 2d tensor with str in_shape and str out_shape did not work correctly.'
def test_unused_kwargs():
kw = {'unused1':0, 'unused2':0, 'base_class':0}
unused = engine.unused_kwargs(kw)
assert 'base_class' not in unused, 'unused_kwargs leaks used.'
assert set(unused.keys()) == {'unused1', 'unused2'}, 'unused_kwargs did not filter kw correctly.'
def test_prepare_model_is_ready():
class TestModel(nn.Module):
def forward(self, x):
x = engine.forward(x, nn.Linear, 'linear',
base_arg=(x.shape[-1], 4, False), # in_features, out_features, bias
in_shape=None, out_shape=None, base_shape=None,
initialization={'weight':'ones_'}, activation=(F.dropout, {'p':1.0}), )
return x
x = torch.randn(1, 2, 3)
m = TestModel()
assert not engine.is_ready(m), 'is_ready did not work correctly.'
engine.prepare_model_(m, x)
assert engine.is_ready(m), 'prepare_model_ did not work correctly.'
assert m.linear_1.bias is None, 'linear_1 should not have bias.'
assert torch.allclose(m.linear_1.weight, torch.Tensor([1.0])), 'linear_1.weight should be initialized to all 1s.'
y = m(x)
assert torch.allclose(y, torch.Tensor([0.0])), 'y should be all 0s because we dropout everything.'
assert list(y.shape) == [1, 2, 4], 'y should have shape [1, 2, 4] after linear projection.'
def test_forward():
x = torch.randn(1, 2, 3)
m = nn.Module()
engine.set_default_parent(m)
class TripleOut(nn.Module): # to test tuple_out
def forward(self, x, b=1, c='2'):
return x+b, x, c
y = engine.forward(x, base_class=TripleOut, base_name='tri', tuple_out=False)
assert isinstance(y, torch.Tensor), 'tuple_out did not work correctly.'
y = engine.forward(x, base_class=TripleOut, base_name='tri', tuple_out=True)
assert isinstance(y, tuple) and len(y) == 3 and y[-1] == '2', 'tuple_out did not work correctly.'
y = engine.forward(x, base_class=TripleOut, base_name='tri', forward_kw={'c':3}, tuple_out=True)
assert y[-1] == 3, 'forward_kw did not work correctly.'
y = engine.forward(x, base_class=TripleOut, base_name='tri', forward_arg=(2.0,))
assert torch.allclose(y-x, torch.Tensor([2.0])), 'forward_arg did not work correctly.'
y = engine.forward(x, base_class=TripleOut, activation=(F.dropout, {'p':1.0}))
assert torch.allclose(y, torch.Tensor([0.0])), 'activation did not work correctly.'
y = engine.forward(
x, base_class=nn.Linear, base_kw={'out_features':4}, infer_kw={'in_features':'C'}, base_shape='BDC')
assert y.shape[1] == 4, 'base_kw, infer_kw did not work correctly.'
def test_namespace():
m = nn.Module()
engine.set_default_parent(m)
@engine.namespace
def f1(name=''):
return ';'.join([f2(name=name) for i in range(2)])
@engine.namespace
def f2(name=''):
return name
s0, s1, s2 = [f1() for i in range(3)]
assert s0 == 'f1_1-f2_1;f1_1-f2_2'
assert s1 == 'f1_2-f2_1;f1_2-f2_2'
assert s2 == 'f1_3-f2_1;f1_3-f2_2'
|
vega/trainer/callbacks/hccl.py | This-50m/vega | 724 | 12790682 | <filename>vega/trainer/callbacks/hccl.py<gh_stars>100-1000
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Data parallel callback."""
import logging
import vega
from .callback import Callback
from vega.common import ClassFactory, ClassType
from vega.common.general import General
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.CALLBACK)
class Hccl(Callback):
"""Callback that saves the evaluated Performance."""
def __init__(self):
"""Initialize ModelCheckpoint callback."""
super(Hccl, self).__init__()
self.priority = 260
def init_trainer(self, logs=None):
"""Set trainer object for current callback."""
if not self.trainer.hccl:
return
if vega.is_torch_backend():
self._init_pytorch_trainer()
if vega.is_ms_backend():
self._init_ms_trainer()
def _init_pytorch_trainer(self):
import torch
import torch.distributed as dist
logger.info("init HCCL")
model = self.trainer.model
dist.init_process_group(
backend='hccl',
init_method=f"tcp://{General.cluster.hccl_server_ip}:{General.cluster.hccl_port}",
world_size=self.trainer.num_workers,
rank=self.trainer.rank_id)
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.trainer.device_id],
broadcast_buffers=General.cluster.enable_broadcast_buffers)
self.trainer.model = model
def _init_ms_trainer(self):
from mindspore import context
from mindspore.context import ParallelMode
from mindspore.communication.management import init
logger.info("init HCCL")
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
init()
def before_epoch(self, epoch, logs=None):
"""Be called before each epoach."""
if not vega.is_torch_backend() or not self.trainer.hccl:
return
if self.trainer.sampler is not None:
self.trainer.sampler.set_epoch(epoch)
def after_train(self, logs=None):
"""Stop session."""
if self.trainer.hccl and vega.is_tf_backend():
self.trainer.sess.run(self.trainer.npu_shutdown)
self.trainer.sess.close()
|
sbin/db_mgmt_cwe.py | AlexFaraino/cve-search | 377 | 12790718 | <gh_stars>100-1000
#!/usr/bin/env python3
#
# Import script of NIST CWE Common Weakness Enumeration.
#
# Until now, the import is only import Weakness description.
#
# The format is the following:
#
# { "_id" : ObjectId("52b70521b261026f36818515"), "weaknessabs" : "Variant",
# "name" : "ASP.NET Misconfiguration: Missing Custom Error Page",
# "description_summary" : "An ASP .NET application must enable custom error
# pages in order to prevent attackers from mining information from the
# framework's built-in responses.An ASP .NET application must enable custom
# error pages in order to prevent attackers from mining information from the
# framework's built-in responses.", "status" : "Draft", "id" : "12" }
#
# Software is free software released under the "Modified BSD license"
#
# Copyright (c) 2013-2014 <NAME> - <EMAIL>
# Copyright (c) 2015-2016 <NAME> - <EMAIL>
# Imports
import os
import sys
runPath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(runPath, ".."))
from dateutil.parser import parse as parse_datetime
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import argparse
import zipfile
import tempfile
from lib.ProgressBar import progressbar
from lib.Config import Configuration
import lib.DatabaseLayer as db
argparser = argparse.ArgumentParser(description='populate/update NIST CWE Common Weakness Enumeration database')
argparser.add_argument('-v', action='store_true', help='verbose output')
args = argparser.parse_args()
class CWEHandler(ContentHandler):
def __init__(self):
self.cwe = []
self.description_summary_tag = False
self.weakness_tag = False
def startElement(self, name, attrs):
if name == 'Weakness':
self.weakness_tag = True
self.statement = ""
self.weaknessabs = attrs.get('Weakness_Abstraction')
self.name = attrs.get('Name')
self.idname = attrs.get('ID')
self.status = attrs.get('Status')
self.cwe.append({'name': self.name, 'id': self.idname, 'status': self.status, 'weaknessabs': self.weaknessabs})
elif name == 'Description_Summary' and self.weakness_tag:
self.description_summary_tag = True
self.description_summary = ""
def characters(self, ch):
if self.description_summary_tag:
self.description_summary += ch.replace(" ", "")
def endElement(self, name):
if name == 'Description_Summary' and self.weakness_tag:
self.description_summary_tag = False
self.description_summary = self.description_summary + self.description_summary
self.cwe[-1]['description_summary'] = self.description_summary.replace("\n", "")
elif name == 'Weakness':
self.weakness_tag = False
# make parser
parser = make_parser()
ch = CWEHandler()
parser.setContentHandler(ch)
# check modification date
try:
(f, r) = Configuration.getFeedData('cwe')
except Exception as e:
print(e)
sys.exit("Cannot open url %s. Bad URL or not connected to the internet?"%(Configuration.getFeedURL("cwe")))
lastmodified = parse_datetime(r.headers['last-modified'], ignoretz=True)
i = db.getLastModified('cwe')
if i is not None:
if lastmodified == i:
print("Not modified")
sys.exit(0)
# parse xml and store in database
parser.parse(f)
cweList=[]
for cwe in progressbar(ch.cwe):
cwe['description_summary']=cwe['description_summary'].replace("\t\t\t\t\t", " ")
if args.v:
print (cwe)
cweList.append(cwe)
db.bulkUpdate('cwe', cweList)
#update database info after successful program-run
db.setColUpdate('cwe', lastmodified)
|
pseudo/middlewares/standard_middleware.py | mifieldxu/pseudo-lang | 661 | 12790741 | from pseudo.middlewares.middleware import Middleware
from pseudo.pseudo_tree import Node
class StandardMiddleware(Middleware):
'''
changes standard_iterable_call in return to a special type
used by go
'''
@classmethod
def process(cls, tree):
return cls().transform(tree)
def transform_r(self, node, in_block=False, assignment=None):
if node.value.type == 'standard_iterable_call':
node.value.type = 'standard_iterable_call_return'
return node.value
else:
return node
transform_explicit_return = transform_implicit_return = transform_r
|
conf.py | evhub/coconut | 3,624 | 12790748 | <reponame>evhub/coconut<gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------------------------------------------------
# INFO:
# -----------------------------------------------------------------------------------------------------------------------
"""
Author: <NAME>
License: Apache 2.0
Description: Sphinx configuration file for the Coconut Programming Language.
"""
# -----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
# -----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys
import os.path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from coconut.root import * # NOQA
from coconut.constants import (
version_str_tag,
without_toc,
with_toc,
)
from coconut.util import univ_open
import pydata_sphinx_theme # NOQA
import myst_parser # NOQA
# -----------------------------------------------------------------------------------------------------------------------
# README:
# -----------------------------------------------------------------------------------------------------------------------
with univ_open("README.rst", "r") as readme_file:
readme = readme_file.read()
with univ_open("index.rst", "w") as index_file:
index_file.write(readme.replace(without_toc, with_toc))
# -----------------------------------------------------------------------------------------------------------------------
# DEFINITIONS:
# -----------------------------------------------------------------------------------------------------------------------
from coconut.constants import ( # NOQA
project,
copyright,
author,
highlight_language,
)
version = VERSION
release = version_str_tag
html_theme = "pydata_sphinx_theme"
html_theme_options = {
}
master_doc = "index"
exclude_patterns = ["README.*"]
source_suffix = [".rst", ".md"]
default_role = "code"
extensions = ["myst_parser"]
myst_enable_extensions = [
"smartquotes",
]
myst_heading_anchors = 4
html_sidebars = {
"**": [
"localtoc.html",
],
}
|
cscs-checks/libraries/magma/magma_checks.py | CLIP-HPC/reframe | 167 | 12790785 | <gh_stars>100-1000
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.simple_test
class MagmaCheck(rfm.RegressionTest):
subtest = parameter(['cblas_z', 'zgemm', 'zsymmetrize', 'ztranspose',
'zunmbr'])
valid_systems = ['daint:gpu', 'dom:gpu']
valid_prog_environs = ['builtin']
num_gpus_per_node = 1
prebuild_cmds = ['patch < patch.txt']
modules = ['magma']
maintainers = ['AJ', 'SK']
tags = {'scs', 'production', 'maintenance'}
@run_before('compile')
def set_build_system_opts(self):
self.build_system = 'Make'
self.build_system.makefile = f'Makefile_{self.subtest}'
self.build_system.cxxflags = ['-std=c++11']
self.build_system.ldflags = ['-lcusparse', '-lcublas', '-lmagma',
'-lmagma_sparse']
self.executable = f'./testing_{self.subtest}'
# FIXME: Compile cblas_z with -O0 since with a higher level a
# segmentation fault is thrown
if self.subtest == 'cblas_z':
self.build_system.cxxflags += ['-O0']
@run_before('run')
def set_exec_opts(self):
if self.subtest == 'zgemm':
self.executable_opts = ['--range 1088:3136:1024']
@sanity_function
def assert_success(self):
return sn.assert_found(r'Result = PASS', self.stdout)
@run_before('performance')
def set_performance_patterns(self):
if self.subtest == 'cblas_z':
self.perf_patterns = {
'duration': sn.extractsingle(r'Duration: (\S+)',
self.stdout, 1, float)
}
self.reference = {
'daint:gpu': {
'duration': (0.10, None, 1.05, 's'),
},
'dom:gpu': {
'duration': (0.10, None, 1.05, 's'),
},
}
elif self.subtest == 'zgemm':
self.perf_patterns = {
'magma': sn.extractsingle(
r'MAGMA GFlops: (?P<magma_gflops>\S+)',
self.stdout, 'magma_gflops', float, 2
),
'cublas': sn.extractsingle(
r'cuBLAS GFlops: (?P<cublas_gflops>\S+)', self.stdout,
'cublas_gflops', float, 2)
}
self.reference = {
'daint:gpu': {
'magma': (3692.65, -0.05, None, 'Gflop/s'),
'cublas': (4269.31, -0.09, None, 'Gflop/s'),
},
'dom:gpu': {
'magma': (3692.65, -0.05, None, 'Gflop/s'),
'cublas': (4269.31, -0.09, None, 'Gflop/s'),
}
}
elif self.subtest == 'zsymmetrize':
self.perf_patterns = {
'gpu_perf': sn.extractsingle(r'GPU performance: (\S+)',
self.stdout, 1, float),
}
self.reference = {
'daint:gpu': {
'gpu_perf': (158.3, -0.05, None, 'GB/s'),
},
'dom:gpu': {
'gpu_perf': (158.3, -0.05, None, 'GB/s'),
}
}
elif self.subtest == 'ztranspose':
self.perf_patterns = {
'gpu_perf':
sn.extractsingle(
r'GPU performance: (?P<gpu_performance>\S+)',
self.stdout, 'gpu_performance', float
)
}
self.reference = {
'daint:gpu': {
'gpu_perf': (498.2, -0.05, None, 'GB/s'),
},
'dom:gpu': {
'gpu_perf': (498.2, -0.05, None, 'GB/s'),
}
}
elif self.subtest == 'zunmbr':
self.perf_patterns = {
'gpu_perf':
sn.extractsingle(
r'GPU performance: (?P<gpu_performance>\S+)',
self.stdout, 'gpu_performance', float
)
}
self.reference = {
'daint:gpu': {
'gpu_perf': (254.7, -0.05, None, 'Gflop/s'),
},
'dom:gpu': {
'gpu_perf': (254.7, -0.05, None, 'Gflop/s'),
}
}
|
tests/core/validation/test_transaction_validation.py | dbfreem/py-evm | 1,641 | 12790789 | <reponame>dbfreem/py-evm
import pytest
from eth.vm.forks.london.transactions import UnsignedDynamicFeeTransaction
from eth.vm.forks.berlin.transactions import UnsignedAccessListTransaction
from eth_utils import ValidationError
@pytest.mark.parametrize(
"unsigned_access_list_transaction,is_valid",
(
# While ethereum tests do not yet have Berlin or London transaction tests,
# this adds a few tests to test some obvious cases, especially positive test cases.
(UnsignedAccessListTransaction(
chain_id=123456789,
nonce=0,
gas_price=1000000000,
gas=40000,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=((b'\xf0' * 20, (1, 2)),),
), True),
(UnsignedAccessListTransaction(
chain_id=0,
nonce=0,
gas_price=0,
gas=0,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=(),
), True),
(UnsignedAccessListTransaction(
chain_id=123456789,
nonce=0,
gas_price=1000000000,
gas=40000,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=((b'\xf0' * 20, ()),),
), True),
(UnsignedAccessListTransaction(
chain_id=123456789,
nonce=0,
gas_price=1000000000,
gas=40000,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=((b'\xf0' * 19, (1,)),), # access_list address fails validation
), False),
(UnsignedAccessListTransaction(
chain_id='1', # chain_id fails validation
nonce=0,
gas_price=0,
gas=0,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=(),
), False),
)
)
def test_validate_unsigned_access_list_transaction(unsigned_access_list_transaction, is_valid):
if is_valid:
unsigned_access_list_transaction.validate()
else:
with pytest.raises(ValidationError):
unsigned_access_list_transaction.validate()
@pytest.mark.parametrize(
"unsigned_dynamic_fee_transaction,is_valid",
(
# While ethereum tests do not yet have Berlin or London transaction tests,
# this adds a few tests to test some obvious cases, especially positive test cases.
(UnsignedDynamicFeeTransaction(
chain_id=123456789,
nonce=0,
max_fee_per_gas=1000000000,
max_priority_fee_per_gas=1000000000,
gas=40000,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=((b'\xf0' * 20, (1, 2)),),
), True),
(UnsignedDynamicFeeTransaction(
chain_id=0,
nonce=0,
max_fee_per_gas=0,
max_priority_fee_per_gas=0,
gas=0,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=(),
), True),
(UnsignedDynamicFeeTransaction(
chain_id=123456789,
nonce=0,
max_fee_per_gas=1000000000,
max_priority_fee_per_gas=1000000000,
gas=40000,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=((b'\xf0' * 20, ()),),
), True),
(UnsignedDynamicFeeTransaction(
chain_id=123456789,
nonce=0,
max_fee_per_gas=1000000000,
max_priority_fee_per_gas=1000000000,
gas=40000,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=((b'\xf0' * 19, (1,)),), # access_list address fails validation
), False),
(UnsignedDynamicFeeTransaction(
chain_id='1', # chain_id fails validation
nonce=0,
max_fee_per_gas=1000000000,
max_priority_fee_per_gas=1000000000,
gas=0,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=(),
), False),
)
)
def test_validate_unsigned_dynamic_fee_transaction(unsigned_dynamic_fee_transaction, is_valid):
if is_valid:
unsigned_dynamic_fee_transaction.validate()
else:
with pytest.raises(ValidationError):
unsigned_dynamic_fee_transaction.validate()
|
the-cloudwatch-dashboard/python/app.py | mttfarmer/serverless | 1,627 | 12790794 | <reponame>mttfarmer/serverless
#!/usr/bin/env python3
from aws_cdk import core
from the_cloudwatch_dashboard.the_cloudwatch_dashboard_stack import TheCloudwatchDashboardStack
app = core.App()
TheCloudwatchDashboardStack(app, "the-cloudwatch-dashboard")
app.synth()
|
inst/python/python_predict.py | cynthiayang525/PatientLevelPrediction | 141 | 12790810 | <reponame>cynthiayang525/PatientLevelPrediction<gh_stars>100-1000
# apply random forest model on new data
#===============================================================
# INPUT:
# 1) location of new data
# 2) location of model
#
# OUTPUT:
# it returns a file with indexes merged with prediction for test index - named new_pred
#================================================================
import numpy as np
from collections import OrderedDict
import os
import sys
import timeit
import math
#from sklearn.ensemble import RandomForestClassifier
#from sklearn.naive_bayes import GaussianNB
from scipy.sparse import coo_matrix,csr_matrix,vstack,hstack
#from sklearn.feature_selection import SelectFromModel
#from sklearn.cross_validation import PredefinedSplit
from sklearn.externals.joblib import Memory
#from sklearn.datasets import load_svmlight_file
from sklearn.externals import joblib
if "python_dir" in globals():
sys.path.insert(0, python_dir)
import TorchUtils as tu
#================================================================
print("Applying Python Model")
###########################################################################
def get_temproal_data(covariates, population):
p_ids_in_cov = set(covariates[:, 0])
timeid_len = len(set(covariates[:, -2]))
full_covariates = np.array([]).reshape(0,4)
default_covid = covariates[0, 1]
for p_id in population[:, 0]:
if p_id not in p_ids_in_cov:
tmp_x = np.array([p_id, default_covid, 1, 0]).reshape(1,4) #default cov id, timeid=1
full_covariates = np.concatenate((full_covariates, tmp_x), axis=0)
else:
tmp_x = covariates[covariates[:, 0] == p_id, :]
#print tmp_x.shape, X.shape
full_covariates = np.concatenate((full_covariates, tmp_x), axis=0)
X, patient_keys = tu.convert_to_temporal_format(full_covariates, timeid_len = timeid_len, predict = True)
return X
print("Loading Data...")
# load data + train,test indexes + validation index
y=population[:,1]
#print covariates.shape
if modeltype == 'temporal':
X = plpData.to_dense().numpy()
X = X[np.int64(population[:, 0]), :]
#X = get_temproal_data(covariates, population)
dense = 0
else:
#print included
X = plpData[population[:,0],:]
X = X[:,included.flatten()]
# load index file
print("population loaded- %s rows and %s columns" %(np.shape(population)[0], np.shape(population)[1]))
print("Dataset has %s rows and %s columns" %(X.shape[0], X.shape[1]))
print("Data ready for model has %s features" %(np.shape(X)[1]))
###########################################################################
# uf dense convert
if dense==1:
print("converting to dense data...")
X=X.toarray()
###########################################################################
# load model
print("Loading model...")
modelTrained = joblib.load(os.path.join(model_loc,"model.pkl"))
print(X.shape)
print("Calculating predictions on population...")
if autoencoder:
autoencoder_model = joblib.load(os.path.join(model_loc, 'autoencoder_model.pkl'))
X = autoencoder_model.get_encode_features(X)
if modeltype == 'temporal':
test_batch = tu.batch(X, batch_size = 32)
test_pred = []
for test in test_batch:
pred_test1 = modelTrained.predict_proba(test)[:, 1]
test_pred = np.concatenate((test_pred , pred_test1), axis = 0)
else:
test_pred = modelTrained.predict_proba(X)[:, 1]
if test_pred.ndim != 1:
test_pred = test_pred[:,1]
print("Prediction complete: %s rows" %(np.shape(test_pred)[0]))
print("Mean: %s prediction value" %(np.mean(test_pred)))
# merge pred with population
test_pred.shape = (population.shape[0], 1)
prediction = np.append(population,test_pred, axis=1)
|
postgres-debezium-ksql-elasticsearch/python_kafka_notify.py | alonsoir/examples-2 | 1,150 | 12790834 | <reponame>alonsoir/examples-2<gh_stars>1000+
# rmoff / 13 Jun 2018
from slackclient import SlackClient
from confluent_kafka import Consumer, KafkaError
import json
import time
import os,sys
token = os.environ.get('SLACK_API_TOKEN')
if token is None:
print('\n\n*******\nYou need to set your Slack API token in the SLACK_API_TOKEN environment variable\n\nExiting.\n\n*******\n')
sys.exit(1)
sc = SlackClient(token)
# Set 'auto.offset.reset': 'smallest' if you want to consume all messages
# from the beginning of the topic
settings = {
'bootstrap.servers': 'localhost:9092',
'group.id': 'python_kafka_notify.py',
'default.topic.config': {'auto.offset.reset': 'largest'}
}
c = Consumer(settings)
c.subscribe(['UNHAPPY_PLATINUM_CUSTOMERS'])
try:
while True:
msg = c.poll(0.1)
time.sleep(5)
if msg is None:
continue
elif not msg.error():
print('Received message: {0}'.format(msg.value()))
if msg.value() is None:
continue
try:
app_msg = json.loads(msg.value().decode())
except:
app_msg = json.loads(msg.value())
try:
email=app_msg['EMAIL']
message=app_msg['MESSAGE']
channel='unhappy-customers'
text=('`%s` just left a bad review :disappointed:\n> %s\n\n_Please contact them immediately and see if we can fix the issue *right here, right now*_' % (email, message))
print('\nSending message "%s" to channel %s' % (text,channel))
except:
print('Failed to get channel/text from message')
channel='general'
text=msg.value()
try:
sc_response = sc.api_call('chat.postMessage', channel=channel,
text=text, username='KSQL Notifications',
icon_emoji=':rocket:')
if not sc_response['ok']:
print('\t** FAILED: %s' % sc_response['error'])
except Exception as e:
print(type(e))
print(dir(e))
elif msg.error().code() == KafkaError._PARTITION_EOF:
print('End of partition reached {0}/{1}'
.format(msg.topic(), msg.partition()))
else:
print('Error occured: {0}'.format(msg.error().str()))
except Exception as e:
print(type(e))
print(dir(e))
finally:
c.close()
|
alipay/aop/api/response/AlipayEcoDoctemplateSettingurlQueryResponse.py | antopen/alipay-sdk-python-all | 213 | 12790842 | <filename>alipay/aop/api/response/AlipayEcoDoctemplateSettingurlQueryResponse.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEcoDoctemplateSettingurlQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayEcoDoctemplateSettingurlQueryResponse, self).__init__()
self._setting_url = None
@property
def setting_url(self):
return self._setting_url
@setting_url.setter
def setting_url(self, value):
self._setting_url = value
def parse_response_content(self, response_content):
response = super(AlipayEcoDoctemplateSettingurlQueryResponse, self).parse_response_content(response_content)
if 'setting_url' in response:
self.setting_url = response['setting_url']
|
test/manual/annotations/test_list_annotations.py | membranepotential/mendeley-python-sdk | 103 | 12790848 | <reponame>membranepotential/mendeley-python-sdk<filename>test/manual/annotations/test_list_annotations.py
from test import get_user_session, cassette, sleep
from test.resources.documents import create_document, delete_all_documents
def test_should_list_annotations():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/annotations/list_annotations/list_annotations.yaml'):
doc = create_document(session)
doc.add_note("A nice annotation")
page = session.annotations.list()
assert len(page.items) == 1
assert page.count == 1
annotation = page.items[0]
assert annotation.text == "A nice annotation"
assert annotation.privacy_level == 'private'
assert annotation.type == 'note'
assert annotation.last_modified
assert annotation.profile.id
assert annotation.profile.display_name
assert annotation.document().id == doc.id
assert annotation.document().title == doc.title
def test_should_page_through_annotations():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/annotations/list_annotations/page_through_annotations.yaml'):
doc = create_document(session)
file = doc.attach_file('fixtures/resources/files/basket.txt')
file.add_sticky_note("annotation 1", 100, 200, 1)
file.add_sticky_note("annotation 2", 100, 200, 1)
file.add_sticky_note("annotation 3", 100, 200, 1)
first_page = session.annotations.list(page_size=2)
assert len(first_page.items) == 2
assert first_page.count == 3
assert first_page.items[0].text == 'annotation 2'
assert first_page.items[1].text == 'annotation 1'
second_page = first_page.next_page
assert len(second_page.items) == 1
assert second_page.count == 3
assert second_page.items[0].text == 'annotation 3'
def test_should_list_annotations_modified_since():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/annotations/list_annotations/modified_since.yaml'):
doc = create_document(session, 'title 1')
file = doc.attach_file('fixtures/resources/files/basket.txt')
annotation = file.add_sticky_note("annotation 1", 100, 200, 1)
sleep(2)
file.add_sticky_note("annotation 2", 100, 200, 1)
file.add_sticky_note("annotation 3", 100, 200, 1)
page = session.annotations.list(modified_since=annotation.created.replace(seconds=+1))
assert len(page.items) == 2
assert page.count == 2
assert page.items[0].text == 'annotation 2'
assert page.items[1].text == 'annotation 3'
def test_should_list_annotations_deleted_since():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/annotations/list_annotations/deleted_since.yaml'):
doc = create_document(session, 'title 1')
file = doc.attach_file('fixtures/resources/files/basket.txt')
annotation1 = file.add_sticky_note("annotation 1", 100, 200, 1)
annotation2 = file.add_sticky_note("annotation 2", 100, 200, 1)
annotation3 = file.add_sticky_note("annotation 3", 100, 200, 1)
annotation1.delete()
sleep(2)
annotation2.delete()
annotation3.delete()
page = session.annotations.list(deleted_since=annotation3.created.replace(seconds=+1))
assert len(page.items) == 2
assert page.count == 2 |
applications/tensorflow2/image_classification/custom_exceptions.py | payoto/graphcore_examples | 260 | 12790850 | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
class UnsupportedFormat(TypeError):
pass
class DimensionError(ValueError):
pass
class MissingArgumentException(ValueError):
pass
class InvalidPrecisionException(NameError):
pass
class UnallowedConfigurationError(ValueError):
pass
|
examples/s3-2017/rtu2a.py | pgaulon/minicps | 119 | 12790899 | <reponame>pgaulon/minicps
"""
rtu2a.py
"""
from minicps.devices import RTU
from utils import STATE, RTU2A_PROTOCOL
from utils import RTU_PERIOD_SEC
from utils import IP
# rtu2a tags
from utils import CO_0_2a, CO_1_2a, CO_2_2a, CO_3_2a
from utils import HR_0_2a, HR_1_2a, HR_2_2a
from utils import wadi1, wadi1_bin
import time
RTU2A_ADDR = IP['rtu2a'] + ':502'
RTU2B_ADDR = IP['rtu2b'] + ':502'
SCADA_ADDR = IP['scada'] + ':502'
class RTU2a(RTU):
def pre_loop(self, sleep=0.6):
"""rtu2a pre loop.
- sleep
"""
time.sleep(sleep)
def main_loop(self):
"""rtu2a main loop.
- challenge 1
"""
# print('DEBUG: wadi1: {}'.format(wadi1))
# print('DEBUG: wadi1_bin: {}'.format(wadi1_bin))
assert (len(wadi1_bin) / 8) == len(wadi1)
# print('DEBUG: len(wadi1): {}'.format(len(wadi1)))
# print('DEBUG: len(wadi1_bin): {}'.format(len(wadi1_bin)))
# print('DEBUG: len(wadi1_bin)/8: {}'.format(len(wadi1_bin) / 8))
count = 0
while(True):
if count >= len(wadi1_bin):
count = 0
if wadi1_bin[count] == '1':
#self.send(CO_0_2a, True, RTU2A_ADDR)
self.send(CO_0_2a, True, SCADA_ADDR)
# print("DEBUG: rtu2a send {} count {}".format(True, count))
else:
#self.send(CO_0_2a, False, RTU2A_ADDR)
self.send(CO_0_2a, False, SCADA_ADDR)
# print("DEBUG: rtu2a send {} count {}".format(False, count))
count += 1
# NOTE: read sensors
# co_0_2a = True if self.get(CO_0_2a) == '1' else False
# print("DEBUG: rtu2a co_0_2a: {}".format(co_0_2a))
# print("DEBUG: self.receive co_0_2a: \
# {}".format(self.receive(CO_0_2a, RTU2A_ADDR)))
# print("DEBUG: rtu2a main loop")
time.sleep(RTU_PERIOD_SEC)
if __name__ == "__main__":
rtu2a = RTU2a(
name='rtu2a',
state=STATE,
protocol=RTU2A_PROTOCOL)
|
backend/www/photo_store.py | sleepingAnt/viewfinder | 645 | 12790902 | # Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""HTTP request handler for serving viewfinder photo image file
assets.
In case of a local file store, permissions for the current user and
the requested photo are verified and the requester is redirected to
the FileObjectStoreHandler.
For an s3 file store, permissions for the current user and the
requested photo are verified and the requester is redirected to a
pre-authorized, expiring S3 URL.
PhotoStoreHandler: Request handler for authorizing photo requests
"""
__authors__ = ['<EMAIL> (<NAME>)',
'<EMAIL> (<NAME>)']
import base64
import httplib
import logging
from tornado import gen, options, web
from viewfinder.backend.base import handler
from viewfinder.backend.db.episode import Episode
from viewfinder.backend.db.photo import Photo
from viewfinder.backend.db.post import Post
from viewfinder.backend.db.user_post import UserPost
from viewfinder.backend.db.viewpoint import Viewpoint
from viewfinder.backend.www import base
options.define('validate_cert', default=True,
help='set to False to allow insecure file obj store for testing')
def GeneratePhotoUrl(obj_store, photo_id, suffix):
"""Generate S3 signed URL for the given photo. The S3 response will contain a Cache-Control
header specifying private caching and a 1 year max age.
"""
return obj_store.GenerateUrl(photo_id + suffix, cache_control='private,max-age=31536000')
class PhotoStoreHandler(base.BaseHandler):
"""Handles PUT requests by storing image assets in the object
store. GET request retrieve image assets. Each method type
verifies user authentication credentials.
"""
@handler.asynchronous(datastore=True, obj_store=True)
@gen.engine
def get(self, episode_id, photo_id, suffix):
"""Verifies user credentials and then redirects to the URL where
the actual image bits are stored.
"""
url = yield PhotoStoreHandler.GetPhotoUrl(self._client,
self._obj_store,
episode_id,
photo_id,
suffix)
self.redirect(url)
@handler.asynchronous(datastore=True, obj_store=True)
@gen.engine
def put(self, episode_id, photo_id, suffix):
"""Verifies user credentials. If the user has write access to the
photo, and if an 'If-None-Match' is present, sends a HEAD request
to the object store to determine asset Etag. If the Etag matches,
returns a 304. Otherwise, generates an upload URL and redirects.
"""
def _GetUploadUrl(photo, verified_md5):
content_type = photo.content_type or 'image/jpeg'
return self._obj_store.GenerateUploadUrl(photo_id + suffix, content_type=content_type,
content_md5=verified_md5)
# Always expect well-formed Content-MD5 header. This ensures that the image data always matches
# what is in the metadata, and also enables the detection of any bit corruption on the wire.
if 'Content-MD5' not in self.request.headers:
raise web.HTTPError(400, 'Missing Content-MD5 header.')
try:
request_md5 = self.request.headers['Content-MD5']
actual_md5 = base64.b64decode(request_md5).encode('hex')
except:
raise web.HTTPError(400, 'Content-MD5 header "%s" is not a valid base-64 value.' % request_md5)
# Match against the MD5 value stored in the photo metadata.
if suffix not in ['.t', '.m', '.f', '.o']:
raise web.HTTPError(404, 'Photo not found; "%s" suffix is invalid.' % suffix)
# Ensure that user has permission to PUT the photo.
yield PhotoStoreHandler._AuthorizeUser(self._client, episode_id, photo_id, write_access=True)
# Get photo metadata, which will be used to create the upload URL.
photo = yield gen.Task(Photo.Query, self._client, photo_id, None)
# Get name of MD5 attribute in the photo metadata.
if suffix == '.o':
attr_name = 'orig_md5'
elif suffix == '.f':
attr_name = 'full_md5'
elif suffix == '.m':
attr_name = 'med_md5'
elif suffix == '.t':
attr_name = 'tn_md5'
else:
raise web.HTTPError(404, 'Photo not found; "%s" suffix is invalid.' % suffix)
# Check for the existence of the photo's image data in S3.
etag = yield gen.Task(Photo.IsImageUploaded, self._obj_store, photo.photo_id, suffix)
expected_md5 = getattr(photo, attr_name)
if expected_md5 != actual_md5:
if etag is None:
# Since there is not yet any photo image data, update the photo metadata to be equal to the
# actual MD5 value.
setattr(photo, attr_name, actual_md5)
yield gen.Task(photo.Update, self._client)
# Redirect to the S3 location.
self.redirect(_GetUploadUrl(photo, request_md5))
else:
# The client often sends mismatched MD5 values due to non-deterministic JPG creation IOS code.
# Only log the mismatch if it's an original photo to avoid spamming logs.
if suffix == '.o':
logging.error('Content-MD5 header "%s" does not match expected MD5 "%s"' %
(actual_md5, expected_md5))
self.set_status(400)
self.finish()
else:
# Check for If-None-Match header, which is used by client to check whether photo image data
# already exists (and therefore no PUT of the image data is needed).
match_etag = self.request.headers.get('If-None-Match', None)
if match_etag is not None and etag is not None and (match_etag == '*' or match_etag == etag):
# Photo image data exists and is not modified, so no need for client to PUT it again.
self.set_status(httplib.NOT_MODIFIED)
self.finish()
else:
# Redirect to the S3 upload location.
self.redirect(_GetUploadUrl(photo, request_md5))
@classmethod
@gen.coroutine
def GetPhotoUrl(cls, client, obj_store, episode_id, photo_id, suffix):
"""Checks that the current user (in Viewfinder context) is authorized to get the specified
photo, and returns a signed S3 URL for the photo if so.
"""
yield gen.Task(PhotoStoreHandler._AuthorizeUser, client, episode_id, photo_id, write_access=False)
raise gen.Return(GeneratePhotoUrl(obj_store, photo_id, suffix))
@classmethod
@gen.coroutine
def _AuthorizeUser(cls, client, episode_id, photo_id, write_access):
"""Checks that the current user (in Viewfinder context) user is authorized to access the given photo:
1. The photo must exist, and be in the given episode
2. The photo must not be unshared
3. If uploading the photo, the user must be the episode owner
4. A prospective user has access only to photos in the viewpoint specified in the cookie
"""
context = base.ViewfinderContext.current()
if context is None or context.user is None:
raise web.HTTPError(401, 'You are not logged in. Only users that have logged in can access this URL.')
user_id = context.user.user_id
post_id = Post.ConstructPostId(episode_id, photo_id)
episode, post = yield [gen.Task(Episode.QueryIfVisible, client, user_id, episode_id, must_exist=False),
gen.Task(Post.Query, client, episode_id, photo_id, None, must_exist=False)]
if episode is None or post is None:
raise web.HTTPError(404, 'Photo was not found or you do not have permission to view it.')
if write_access and episode.user_id != user_id:
raise web.HTTPError(403, 'You do not have permission to upload this photo; it is not owned by you.')
if post.IsUnshared():
raise web.HTTPError(403, 'This photo can no longer be viewed; it was unshared.')
# BUGBUG(Andy): The 1.5 client has a bug where it always passes in the library episode id
# when trying to fetch a photo, even if the photo is part of a conversation. This results
# in 403 errors when a user tries to sync to their library. For now, I'm disabling this
# check. Once 2.0 has established itself, I'll re-enable the check.
#if post.IsRemoved():
# raise web.HTTPError(403, 'This photo can no longer be viewed; it was removed.')
if not context.CanViewViewpoint(episode.viewpoint_id):
# Always allow system viewpoints to be accessed by a prospective user.
viewpoint = yield gen.Task(Viewpoint.Query, client, episode.viewpoint_id, None)
if not viewpoint.IsSystem():
raise web.HTTPError(403, 'You do not have permission to view this photo. '
'To see it, you must register an account.')
def _IsInteractiveRequest(self):
"""Always returns false, as this API is accessed programatically."""
return False
|
docs/examples/fig6p24.py | uluturki/Mathematics-of-Epidemics-on-Networks | 136 | 12790938 | import networkx as nx
import EoN
from collections import defaultdict
import matplotlib.pyplot as plt
import scipy
import random
colors = ['#5AB3E6','#FF2000','#009A80','#E69A00', '#CD9AB3', '#0073B3','#F0E442']
rho = 0.01
Nbig=500000
Nsmall = 5000
tau =0.4
gamma = 1.
def poisson():
return scipy.random.poisson(5)
def PsiPoisson(x):
return scipy.exp(-5*(1-x))
def DPsiPoisson(x):
return 5*scipy.exp(-5*(1-x))
bimodalPk = {8:0.5, 2:0.5}
def PsiBimodal(x):
return (x**8 +x**2)/2.
def DPsiBimodal(x):
return(8*x**7 + 2*x)/2.
def homogeneous():
return 5
def PsiHomogeneous(x):
return x**5
def DPsiHomogeneous(x):
return 5*x**4
PlPk = {}
exponent = 1.418184432
kave = 0
for k in range(1,81):
PlPk[k]=k**(-exponent)*scipy.exp(-k*1./40)
kave += k*PlPk[k]
normfact= sum(PlPk.values())
for k in PlPk:
PlPk[k] /= normfact
#def trunc_pow_law():
# r = random.random()
# for k in PlPk:
# r -= PlPk[k]
# if r<0:
# return k
def PsiPowLaw(x):
#print PlPk
rval = 0
for k in PlPk:
rval += PlPk[k]*x**k
return rval
def DPsiPowLaw(x):
rval = 0
for k in PlPk:
rval += k*PlPk[k]*x**(k-1)
return rval
def get_G(N, Pk):
while True:
ks = []
for ctr in range(N):
r = random.random()
for k in Pk:
if r<Pk[k]:
break
else:
r-= Pk[k]
ks.append(k)
if sum(ks)%2==0:
break
G = nx.configuration_model(ks)
return G
report_times = scipy.linspace(0,20,41)
def process_degree_distribution(Gbig, Gsmall, color, Psi, DPsi, symbol):
t, S, I, R = EoN.fast_SIR(Gsmall, tau, gamma, rho=rho)
plt.plot(t, I*1./Gsmall.order(), ':', color = color)
t, S, I, R = EoN.fast_SIR(Gbig, tau, gamma, rho=rho)
plt.plot(t, I*1./Gbig.order(), color = color)
N= Gbig.order()#N is arbitrary, but included because our implementation of EBCM assumes N is given.
t, S, I, R = EoN.EBCM(N, lambda x: (1-rho)*Psi(x), lambda x: (1-rho)*DPsi(x), tau, gamma, 1-rho)
I = EoN.subsample(report_times, t, I)
plt.plot(report_times, I/N, symbol, color = color, markeredgecolor='k')
#<NAME>
Gsmall = nx.fast_gnp_random_graph(Nsmall, 5./(Nsmall-1))
Gbig = nx.fast_gnp_random_graph(Nbig, 5./(Nbig-1))
process_degree_distribution(Gbig, Gsmall, colors[0], PsiPoisson, DPsiPoisson, '^')
#Bimodal
Gsmall = get_G(Nsmall, bimodalPk)
Gbig = get_G(Nbig, bimodalPk)
process_degree_distribution(Gbig, Gsmall, colors[1], PsiBimodal, DPsiBimodal, 'o')
#Homogeneous
Gsmall = get_G(Nsmall, {5:1.})
Gbig = get_G(Nbig, {5:1.})
process_degree_distribution(Gbig, Gsmall, colors[2], PsiHomogeneous, DPsiHomogeneous, 's')
#Powerlaw
Gsmall = get_G(Nsmall, PlPk)
Gbig = get_G(Nbig, PlPk)
process_degree_distribution(Gbig, Gsmall, colors[3], PsiPowLaw, DPsiPowLaw, 'd')
plt.axis(xmin=0, ymin=0, xmax = 20, ymax = 0.2)
plt.xlabel('$t$')
plt.ylabel('Proportion Infected')
plt.savefig('fig6p24.png') |
crank/net/module/mlfb.py | abeersaqib/crank | 162 | 12790955 | <reponame>abeersaqib/crank<filename>crank/net/module/mlfb.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
"""
import librosa
import scipy.signal
import torch
import torch.nn as nn
class MLFBLayer(torch.nn.Module):
def __init__(
self, fs=22050, fft_size=1024, n_mels=80, fmin=None, fmax=None, eps=1.0e-10
):
super().__init__()
fmin = 0 if fmin is None else fmin
fmax = fs / 2 if fmax is None else fmax
mel_basis = librosa.filters.mel(
sr=fs,
n_fft=fft_size,
n_mels=n_mels,
fmin=fmin,
fmax=fmax,
)
self.eps = eps
self.register_buffer("mel_basis", torch.from_numpy(mel_basis.T).float())
def forward(
self,
x,
):
mlfb = torch.matmul(x, self.mel_basis)
mlfb = torch.clamp(mlfb, min=self.eps).log10()
return mlfb
class STFTLayer(torch.nn.Module):
def __init__(
self,
fs=22050,
hop_size=256,
fft_size=1024,
win_length=None,
window="hann",
center=True,
pad_mode="reflect",
return_complex=False,
):
super().__init__()
self.hop_size = hop_size
self.fft_size = fft_size
self.win_length = fft_size if win_length is None else win_length
self.center = center
self.pad_mode = pad_mode
self.return_complex = return_complex
"""
prepare window parameter type of window
- "hann": hanning window
- "param": parameter-based window
- "conv": convolution-based window
"""
self.window_type = window
if window == "param":
win = scipy.signal.get_window("hann", self.win_length).astype(float)
self.register_parameter(
"window", nn.Parameter(torch.from_numpy(win), requires_grad=True)
)
elif window == "conv":
kernel_size = 65
self.window_conv = nn.Sequential(
nn.Conv1d(
in_channels=1,
out_channels=24,
kernel_size=kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
),
nn.Sigmoid(),
)
else:
self.window = window
def forward(self, x):
if self.window_type == "param":
window = self.window
elif self.window_type == "conv":
x = x.unsqueeze(-1).transpose(1, 2)
x = torch.mean(self.window_conv(x).transpose(1, 2), -1)
window = None
else:
f = getattr(torch, f"{self.window}_window")
window = f(self.win_length, dtype=x.dtype, device=x.device)
stft = torch.stft(
x,
n_fft=self.fft_size,
win_length=self.win_length,
hop_length=self.hop_size,
window=window,
center=self.center,
pad_mode=self.pad_mode,
return_complex=self.return_complex,
)
return stft.transpose(1, 2).float()
class MLFBScalerLayer(nn.Module):
def __init__(self, scaler):
super().__init__()
self.register_parameter(
"mean",
nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False),
)
self.register_parameter(
"std",
nn.Parameter(
torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False
),
)
def forward(self, x):
return (x - self.mean) / self.std
class LogMelFilterBankLayer(nn.Module):
def __init__(
self,
fs=22050,
hop_size=256,
fft_size=1024,
win_length=None,
window="hann",
center=True,
pad_mode="reflect",
n_mels=80,
fmin=None,
fmax=None,
scaler=None,
):
super().__init__()
self.stft_layer = STFTLayer(
fs,
hop_size,
fft_size,
win_length,
window,
center=center,
pad_mode=pad_mode,
)
self.mlfb_layer = MLFBLayer(fs, fft_size, n_mels, fmin, fmax)
if scaler is not None:
self.scaler_layer = MLFBScalerLayer(scaler)
else:
self.scaler_layer = None
def forward(self, x):
stft = self.stft_layer(x)
amplitude = torch.sqrt(stft[..., 0] ** 2 + stft[..., 1] ** 2)
mlfb = self.mlfb_layer(amplitude)
if self.scaler_layer is not None:
mlfb = self.scaler_layer(mlfb)
return mlfb
|
library/keystone_service_provider.py | pgraziano/ursula | 193 | 12790992 | <reponame>pgraziano/ursula<gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016, IBM
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
author: <NAME>
module: keystone_service_provider
short_description: register sp on keystone idp
description:
- This module registers a keystone service provider on the keystone
identity provider.
options:
service_provider_id:
description:
- A globally unique id to identify the service provider
example -sp.id
required: true
service_provider_url:
description:
- URL that is found in the service provider's metadata
(Which is usually found
in https://keystone.sp/Shibboleth.sso/metadata)
example -https://keystone.sp/Shibboleth.sso/SAML2/ECP
required: true
service_provider_auth_url:
description:
- URL that is used to authenticate with the identity provider
This URL should be available once the idp registered on the sp
example -'http://keystone.sp/v3/OS-FEDERATION/'
'identity_providers/keystone-idp/protocols/saml2/auth'
required: true
enabled:
description:
- A value of True enables the service provider and False disables it.
default: True
description:
description:
The description of the service provider.
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
'''
def _needs_update(module, service_provider):
"""Check for differences in the updatable values.
Note: Names cannot be updated.
"""
params_dict = dict(sp_url='service_provider_url',
auth_url='service_provider_auth_url',
enabled='enabled', description='description')
for sp_attr, module_attr in params_dict.items():
module_val = module.params.get(module_attr, None)
if module_val != getattr(service_provider, sp_attr, None):
return True
return False
def _system_state_change(module, service_provider):
state = module.params['state']
if state == 'present':
if not service_provider:
return True
return _needs_update(module, service_provider)
if state == 'absent' and service_provider:
return True
return False
def _get_cloud(**kwargs):
cloud_shade = shade.openstack_cloud(**kwargs)
cloud_shade.cloud_config.config['identity_api_version'] = '3'
cloud = ShadePlaceholder(cloud_shade.keystone_client)
return cloud
class ShadePlaceholder(object):
def __init__(self, keystone_client):
self.client = keystone_client
def get_service_provider(self, sp_id):
for sp in self.client.federation.service_providers.list():
if getattr(sp, 'id') == sp_id:
return sp
return None
def create_service_provider(
self, sp_id, sp_url, sp_auth_url, enabled, description):
service_provider = self.client.federation.service_providers.create(
id=sp_id, sp_url=sp_url, auth_url=sp_auth_url,
enabled=enabled, description=description)
return service_provider
def update_service_provider(
self, sp_id, sp_url, sp_auth_url, enabled, description):
service_provider = self.client.federation.service_providers.update(
service_provider=sp_id, sp_url=sp_url, auth_url=sp_auth_url,
enabled=enabled, description=description)
return service_provider
def delete_service_provider(self, sp_id):
self.client.federation.service_providers.delete(service_provider=sp_id)
def main():
argument_spec = openstack_full_argument_spec(
service_provider_id=dict(required=True),
service_provider_url=dict(required=True),
service_provider_auth_url=dict(required=True),
enabled=dict(required=False, type='bool', default=True),
description=dict(required=False, default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
sp_id = module.params['service_provider_id']
sp_url = module.params['service_provider_url']
sp_auth_url = module.params['service_provider_auth_url']
enabled = module.params['enabled']
description = module.params['description']
state = module.params['state']
try:
cloud = _get_cloud(**module.params)
service_provider = cloud.get_service_provider(sp_id)
if module.check_mode:
changed = _system_state_change(module, service_provider)
module.exit_json(changed=changed)
changed = False
if state == 'present':
if not service_provider:
service_provider = cloud.create_service_provider(
sp_id, sp_url, sp_auth_url, enabled, description)
changed = True
else:
if _needs_update(module, service_provider):
service_provider = cloud.update_service_provider(
sp_id, sp_url, sp_auth_url, enabled, description)
changed = True
module.exit_json(
changed=changed,
service_provider=[service_provider.id, service_provider.sp_url,
service_provider.auth_url, enabled, description])
if state == 'absent':
if service_provider:
cloud.delete_service_provider(sp_id)
changed = True
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(msg="service provider failed: %s" % str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
tests/__init__.py | lpnueg4/-logzero | 1,091 | 12790998 | <filename>tests/__init__.py
# -*- coding: utf-8 -*-
"""Unit test package for logzero."""
|
solver/spoof.py | juandesant/astrometry.net | 460 | 12791022 | <reponame>juandesant/astrometry.net
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
try:
import pyfits
except ImportError:
try:
from astropy.io import fits as pyfits
except ImportError:
raise ImportError("Cannot import either pyfits or astropy.io.fits")
import math
from math import exp
from matplotlib.pylab import imread
from numpy.oldnumeric.functions import zeros, ravel
I=imread('3.png')
I=I[:,:,:3]
(h,w,planes) = I.shape
XY = pyfits.open('16b.fits')[1].data
X = XY.field('X')
Y = XY.field('Y')
psfw = 1.0
stars = zeros((h,w)).astype(float)
for (x,y) in zip(X,Y):
ix = int(round(x))
iy = int(round(y))
for dy in range(-5, 6):
yy = iy + dy
if yy < 0 or yy >= h:
continue
for dx in range(-5, 6):
xx = ix + dx
if xx < 0 or xx >= w:
continue
dd = (xx - x)**2 + (yy - y)**2
stars[yy,xx] += exp(-dd / (2 * psfw**2)) #1./(psfw**2 * 2 * math.pi
#origfrac = 0.5
#maxorig = I.max()
#starfrac = (1.0 - origfrac) + (1.0 - maxorig)
#for p in range(planes):
# I[:,:,p] = I[:,:,p] * origfrac + stars/stars.max() * starfrac
for p in range(planes):
I[:,:,p] = I[:,:,p] * 0.7 + stars/stars.max() * 0.8
f=open('out.ppm', 'wb')
f.write('P6 %i %i %i\n' % (w, h, 255))
#for j in range(h):
# for i in range(w):
# for p in range(planes):
# f.write(chr(int(round(I[j,i,p] * 255.0))))
flatI = (I.ravel() * 255.0).round().astype(int)
f.write("".join([chr(min(i,255)) for i in flatI]))
f.close()
|
products/ui/llbuildui/model.py | uraimo/swift-llbuild | 1,034 | 12791034 | import struct
from sqlalchemy import *
from sqlalchemy.orm import relation, relationship
from sqlalchemy.ext.declarative import declarative_base
# DB Declaration
Base = declarative_base()
class KeyName(Base):
__tablename__ = "key_names"
id = Column(Integer, nullable=False, primary_key=True)
name = Column('key', String, nullable=False)
def __repr__(self):
return "%s%r" % (
self.__class__.__name__, (self.id, self.name))
class RuleResult(Base):
__tablename__ = "rule_results"
id = Column(Integer, nullable=False, primary_key=True)
key_id = Column(Integer, ForeignKey(KeyName.id),
nullable=False)
value_bytes = Column("value", Binary, nullable=False)
built_at = Column(Integer, nullable=False)
computed_at = Column(Integer, nullable=False)
key = relation(KeyName)
dependencies_bytes = Column("dependencies", Binary, nullable=True)
def __repr__(self):
return "%s%r" % (
self.__class__.__name__, (self.id, self.key, self.value,
self.built_at, self.computed_at))
@property
def value(self):
return BuildValue(self.value_bytes)
@property
def dependencies(self):
if self.dependencies_bytes is None:
return []
else :
num_dependencies = len(self.dependencies_bytes) / 8
return struct.unpack("<" + str(num_dependencies) + "Q",
self.dependencies_bytes)
###
class BuildValue(object):
# FIXME: This is a manually Python translation of the C++
# llbuild::buildsystem::BuildValue type, which is unfortunate, but it isn't
# available via an API we can access directly yet.
kinds = [
"Invalid",
"VirtualInput", "ExistingInput", "MissingInput",
"DirectoryContents", "DirectoryTreeSignature",
"StaleFileRemoval", "MissingOutput", "FailedInput",
"SuccessfulCommand", "FailedCommand",
"PropagatedFailureCommand", "CancelledCommand", "SkippedCommand",
"Target",
]
def __init__(self, data):
bytes = str(data)
# The first byte is the kind.
if bytes:
self.kind = self.__class__.kinds[struct.unpack("<B", bytes[0])[0]]
bytes = bytes[1:]
else:
self.kind = "Invalid"
# The next item is the signature, if used.
if self.hasCommandSignature:
self.signature = struct.unpack("<Q", bytes[:8])[0]
bytes = bytes[8:]
else:
self.signature = None
# The outputs follow, if used.
if self.hasOutputInfo:
numOutputs = struct.unpack("<I", bytes[:4])[0]
bytes = bytes[4:]
self.outputs = []
for i in range(numOutputs):
# Read the file information.
self.outputs.append(FileInfo(bytes[:48]))
bytes = bytes[48:]
else:
self.outputs = None
# The strings follow, if used.
if self.hasStringList:
stringsLength = struct.unpack("<Q", bytes[:8])[0]
bytes = bytes[8:]
if stringsLength == 0:
self.strings = []
else:
stringData = bytes[:stringsLength]
bytes = bytes[stringsLength:]
assert len(stringData) == stringsLength
assert stringData[-1] == '\0'
self.strings = stringData[:-1].split("\0")
else:
self.strings = None
assert len(bytes) == 0
@property
def hasCommandSignature(self):
return self.kind in ("SuccessfulCommand", "DirectoryTreeSignature")
@property
def hasStringList(self):
return self.kind in ("DirectoryContents", "StaleFileRemoval")
@property
def hasOutputInfo(self):
return self.kind in ("ExistingInput", "SuccessfulCommand",
"DirectoryContents")
def __repr__(self):
output = "BuildValue(kind=%r" % self.kind
if self.signature is not None:
output += ", signature=%0x" % self.signature
if self.outputs is not None:
output += ", outputs=%r" % self.outputs
if self.strings is not None:
output += ", strings=%r" % self.strings
output += ")"
return output
class FileInfo(object):
def __init__(self, bytes):
(self.device, self.inode, self.mode, self.size,
modTimeSec, modTimeNano) = struct.unpack("<QQQQQQ", bytes)
self.modTime = (modTimeSec, modTimeNano)
def __repr__(self):
return "FileInfo(device=%r, inode=%#0x, mode=%r, size=%r, mtime=(%r, %r))" % (
self.device, self.inode, self.mode, self.size,
self.modTime[0], self.modTime[1])
|
ldap2pg/defaults.py | ng-pe/ldap2pg | 151 | 12791043 | from itertools import chain
from textwrap import dedent
from .utils import string_types
shared_queries = dict(
datacl=dedent("""\
WITH grants AS (
SELECT
(aclexplode(datacl)).grantee AS grantee,
(aclexplode(datacl)).privilege_type AS priv
FROM pg_catalog.pg_database
WHERE datname = current_database()
UNION
SELECT q.*
FROM (VALUES (0, 'CONNECT'), (0, 'TEMPORARY')) AS q
CROSS JOIN pg_catalog.pg_database
WHERE datacl IS NULL AND datname = current_database()
)
SELECT
grants.priv AS key,
NULL as namespace,
COALESCE(rolname, 'public')
FROM grants
LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid
WHERE grantee = 0 OR rolname IS NOT NULL
"""),
defacl=dedent("""\
WITH
grants AS (
SELECT
defaclnamespace,
defaclrole,
(aclexplode(defaclacl)).grantee AS grantee,
(aclexplode(defaclacl)).privilege_type AS priv,
defaclobjtype AS objtype
FROM pg_catalog.pg_default_acl
)
SELECT
priv || '_on_' || objtype AS key,
nspname,
COALESCE(rolname, 'public') AS rolname,
TRUE AS full,
pg_catalog.pg_get_userbyid(defaclrole) AS owner
FROM grants
JOIN pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace
LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid
WHERE (grantee = 0 OR rolname IS NOT NULL)
AND nspname NOT LIKE 'pg\\_%temp\\_%'
AND nspname <> 'pg_toast'
-- ORDER BY 1, 2, 3, 5
"""),
globaldefacl=dedent("""\
WITH
grants AS (
SELECT
defaclrole AS owner,
(aclexplode(defaclacl)).grantee,
(aclexplode(defaclacl)).privilege_type AS priv
FROM pg_default_acl AS def
WHERE defaclnamespace = 0
UNION
SELECT
rol.oid AS owner,
0 AS grantee,
'EXECUTE' AS priv
FROM pg_roles AS rol
LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl
ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace = 0
WHERE defaclacl IS NULL
)
SELECT
priv AS key,
NULL AS "schema",
COALESCE(rolname, 'public') as rolname,
TRUE AS "full",
pg_catalog.pg_get_userbyid(owner) AS owner
FROM grants
LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid
WHERE rolname IS NOT NULL OR grantee = 0
"""),
nspacl=dedent("""\
WITH grants AS (
SELECT
nspname,
(aclexplode(nspacl)).grantee AS grantee,
(aclexplode(nspacl)).privilege_type AS priv
FROM pg_catalog.pg_namespace
)
SELECT
grants.priv AS key,
nspname,
COALESCE(rolname, 'public') AS rolname
FROM grants
LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid
WHERE (grantee = 0 OR rolname IS NOT NULL)
AND nspname NOT LIKE 'pg\\_%temp\\_%'
AND nspname <> 'pg_toast'
ORDER BY 1, 2
""")
)
_datacl_tpl = dict(
type='datacl',
inspect=dict(shared_query='datacl', keys=['%(privilege)s']),
grant="GRANT %(privilege)s ON DATABASE {database} TO {role};",
revoke="REVOKE %(privilege)s ON DATABASE {database} FROM {role};",
)
_global_defacl_tpl = dict(
type='globaldefacl',
inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']),
grant=(
"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}"
" GRANT %(privilege)s ON %(TYPE)s TO {role};"),
revoke=(
"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}"
" REVOKE %(privilege)s ON %(TYPE)s FROM {role};"),
)
_defacl_tpl = dict(
type="defacl",
inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']),
grant=dedent("""\
ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema}
GRANT %(privilege)s ON %(TYPE)s TO {role};
"""),
revoke=dedent("""\
ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema}
REVOKE %(privilege)s ON %(TYPE)s FROM {role};
"""),
)
_nspacl_tpl = dict(
type="nspacl",
inspect=dict(shared_query='nspacl', keys=['%(privilege)s']),
grant="GRANT %(privilege)s ON SCHEMA {schema} TO {role};",
revoke="REVOKE %(privilege)s ON SCHEMA {schema} FROM {role};",
)
# ALL TABLES is tricky because we have to manage partial grant. But the
# trickiest comes when there is no tables in a namespace. In this case, is it
# granted or revoked ? We have to tell ldap2pg that this grant is irrelevant on
# this schema.
#
# Here is a truth table:
#
# FOR GRANT | no grant | partial grant | fully granted
# -----------+----------+---------------+---------------
# no tables | NOOP | N/D | N/D
# -----------+----------+---------------+---------------
# 1+ tables | GRANT | GRANT | NOOP
# -----------+----------+---------------+---------------
#
# FOR REVOKE | no grant | partial grant | fully granted
# -----------+----------+---------------+---------------
# no tables | NOOP | N/D | N/D
# -----------+----------+---------------+---------------
# 1+ tables | NOOP | REVOKE | REVOKE
# -----------+----------+---------------+---------------
#
# When namespace has NO tables, we always return a row with full as NULL,
# meaning privilege is irrelevant : it is both granted and revoked.
#
# When namespace has tables, we compare grants to availables tables to
# determine if privilege is fully granted. If the privilege is not granted at
# all, we drop the row in WHERE clause to ensure the privilege is considered as
# revoked.
#
_allrelacl_tpl = dict(
type='nspacl',
inspect=dedent("""\
WITH
namespace_rels AS (
SELECT
nsp.oid,
nsp.nspname,
array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS rels
FROM pg_catalog.pg_namespace nsp
LEFT OUTER JOIN pg_catalog.pg_class AS rel
ON rel.relnamespace = nsp.oid AND relkind IN %(t_array)s
WHERE nspname NOT LIKE 'pg\\_%%temp\\_%%'
AND nspname <> 'pg_toast'
GROUP BY 1, 2
),
all_grants AS (
SELECT
relnamespace,
(aclexplode(relacl)).privilege_type,
(aclexplode(relacl)).grantee,
array_agg(relname ORDER BY relname) AS rels
FROM pg_catalog.pg_class
WHERE relkind IN %(t_array)s
GROUP BY 1, 2, 3
),
all_roles AS (
SELECT 0 AS oid, 'public' AS rolname
UNION
SELECT oid, rolname from pg_roles
)
SELECT
nspname,
rolname,
CASE
WHEN nsp.rels = ARRAY[]::name[] THEN NULL
ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[])
END AS "full"
FROM namespace_rels AS nsp
CROSS JOIN all_roles AS rol
LEFT OUTER JOIN all_grants AS grants
ON relnamespace = nsp.oid
AND grantee = rol.oid
AND privilege_type = '%(privilege)s'
WHERE NOT (array_length(nsp.rels, 1) IS NOT NULL AND grants.rels IS NULL)
-- ORDER BY 1, 2
"""),
grant="GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}",
revoke=(
"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}"),
)
_allprocacl_tpl = dict(
type='nspacl',
inspect=dedent("""\
WITH
grants AS (SELECT
pronamespace, grantee, priv,
array_agg(DISTINCT proname ORDER BY proname) AS procs
FROM (
SELECT
pronamespace,
proname,
(aclexplode(proacl)).grantee,
(aclexplode(proacl)).privilege_type AS priv
FROM pg_catalog.pg_proc
UNION
SELECT
pronamespace, proname,
0 AS grantee,
'EXECUTE' AS priv
FROM pg_catalog.pg_proc
WHERE proacl IS NULL
) AS grants
GROUP BY 1, 2, 3
),
namespaces AS (
SELECT
nsp.oid, nsp.nspname,
array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL) AS procs
FROM pg_catalog.pg_namespace nsp
LEFT OUTER JOIN pg_catalog.pg_proc AS pro
ON pro.pronamespace = nsp.oid
GROUP BY 1, 2
),
roles AS (
SELECT oid, rolname
FROM pg_catalog.pg_roles
UNION
SELECT 0, 'public'
)
SELECT
nspname, rolname,
CASE
WHEN nsp.procs = ARRAY[]::name[] THEN NULL
ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[])
END AS "full"
FROM namespaces AS nsp
CROSS JOIN roles
LEFT OUTER JOIN grants
ON pronamespace = nsp.oid AND grants.grantee = roles.oid
WHERE NOT (array_length(nsp.procs, 1) IS NOT NULL AND grants.procs IS NULL)
AND (priv IS NULL OR priv = '%(privilege)s')
AND nspname NOT LIKE 'pg\\_%%temp\\_%%'
-- ORDER BY 1, 2
"""), # noqa
grant="GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}",
revoke=(
"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}"),
)
_types = {
'FUNCTIONS': ('f',),
'TABLES': ('r', 'v', 'f'),
'TYPES': ('T',),
'SEQUENCES': ('S',),
}
def format_keys(fmt, fmt_kwargs):
if '%(t)' in fmt:
for t in fmt_kwargs['t']:
yield fmt % dict(fmt_kwargs, t=t)
else:
yield fmt % fmt_kwargs
def make_privilege(tpl, name, TYPE, privilege):
t = _types.get(TYPE)
fmt_args = dict(
t=t,
# Loose SQL formatting
t_array='(%s)' % (', '.join(['%r' % i for i in t or []])),
TYPE=TYPE,
privilege=privilege.upper(),
)
privilege = dict()
for k, v in tpl.items():
if isinstance(v, string_types):
v = v % fmt_args
else:
if v['shared_query'] not in shared_queries:
raise Exception("Unknown query %s." % v['shared_query'])
v = v.copy()
v['keys'] = list(chain(*[
format_keys(key, fmt_args)
for key in v['keys']
]))
privilege[k] = v
return name, privilege
def make_proc_privileges(
privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'):
fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower())
all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw
default = '__default_%(privilege)s_on_%(type)s__' % fmtkw
global_def = '__global_default_%(privilege)s_on_%(type)s__' % fmtkw
name = namefmt % fmtkw
return dict([
make_privilege(_allprocacl_tpl, all_, TYPE, privilege),
make_privilege(_defacl_tpl, default, TYPE, privilege),
make_privilege(_global_defacl_tpl, global_def, TYPE, privilege),
(name, [all_, default, global_def]),
])
def make_rel_privileges(
privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'):
fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower())
all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw
default = '__default_%(privilege)s_on_%(type)s__' % fmtkw
name = namefmt % fmtkw
return dict([
make_privilege(_allrelacl_tpl, all_, TYPE, privilege),
make_privilege(_defacl_tpl, default, TYPE, privilege),
(name, [all_, default]),
])
def make_well_known_privileges():
privileges = dict([
make_privilege(_datacl_tpl, '__connect__', None, 'CONNECT'),
make_privilege(_datacl_tpl, '__temporary__', None, 'TEMPORARY'),
make_privilege(_nspacl_tpl, '__create_on_schemas__', None, 'CREATE'),
make_privilege(_nspacl_tpl, '__usage_on_schemas__', None, 'USAGE'),
make_privilege(
_defacl_tpl, '__default_usage_on_types__', 'TYPES', 'USAGE'),
])
# This is a compatibility alias.
privileges['__usage_on_types__'] = ['__default_usage_on_types__']
privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS'))
privileges['__execute__'] = ['__execute_on_functions__']
for privilege in 'DELETE', 'INSERT', 'REFERENCES', 'TRIGGER', 'TRUNCATE':
privileges.update(
make_rel_privileges(privilege, 'TABLES'))
alias = '__%s__' % (privilege.lower(),)
privileges[alias] = ['__%s_on_tables__' % (privilege.lower(),)]
for privilege in 'SELECT', 'UPDATE':
privileges.update(make_rel_privileges(privilege, 'TABLES'))
privileges.update(make_rel_privileges(privilege, 'SEQUENCES'))
privileges.update(make_rel_privileges('USAGE', 'SEQUENCES'))
privileges['__all_on_schemas__'] = [
'__create_on_schemas__',
'__usage_on_schemas__',
]
privileges['__all_on_sequences__'] = [
'__select_on_sequences__',
'__update_on_sequences__',
'__usage_on_sequences__',
]
privileges['__all_on_tables__'] = [
'__delete__',
'__insert__',
'__references__',
'__select_on_tables__',
'__trigger__',
'__truncate__',
'__update_on_tables__',
]
return privileges
|
python/veles/tests/data/test_repack.py | pombredanne/veles | 918 | 12791058 | <reponame>pombredanne/veles<gh_stars>100-1000
# Copyright 2017 CodiLime
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from veles.data.bindata import BinData
from veles.data.repack import Endian, Repacker
class TestRepacker(unittest.TestCase):
def test_endian(self):
self.assertNotEqual(Endian.LITTLE, Endian.BIG)
def test_simple_copy(self):
r = Repacker(endian=Endian.LITTLE, from_width=8, to_width=8)
self.assertEqual(r.repack_unit, 8)
self.assertEqual(r.repack_size(num_elements=2), 2)
self.assertEqual(r.repackable_size(from_size=2), 2)
a = BinData(8, [1, 2, 3, 4])
b = r.repack(a, start=1, num_elements=2)
self.assertEqual(b, BinData(8, [2, 3]))
self.assertEqual(r.repack(a), a)
def test_gather_8to16_little(self):
r = Repacker(endian=Endian.LITTLE, from_width=8, to_width=16)
self.assertEqual(r.repack_unit, 16)
self.assertEqual(r.repack_size(2), 4)
self.assertEqual(r.repackable_size(2), 1)
self.assertEqual(r.repackable_size(3), 1)
self.assertEqual(r.repackable_size(4), 2)
a = BinData(8, [1, 2, 3, 4, 5, 6])
b = r.repack(a, start=1, num_elements=2)
self.assertEqual(b, BinData.from_spaced_hex(16, '0302 0504'))
c = r.repack(a, start=1)
self.assertEqual(b, c)
d = r.repack(a)
self.assertEqual(d, BinData.from_spaced_hex(16, '0201 0403 0605'))
def test_gather_8to16_big(self):
r = Repacker(endian=Endian.BIG, from_width=8, to_width=16)
self.assertEqual(r.repack_unit, 16)
self.assertEqual(r.repack_size(2), 4)
self.assertEqual(r.repackable_size(2), 1)
self.assertEqual(r.repackable_size(3), 1)
self.assertEqual(r.repackable_size(4), 2)
a = BinData(8, [1, 2, 3, 4, 5, 6])
b = r.repack(a, start=1, num_elements=2)
self.assertEqual(b, BinData.from_spaced_hex(16, '0203 0405'))
c = r.repack(a, start=1)
self.assertEqual(b, c)
d = r.repack(a)
self.assertEqual(d, BinData.from_spaced_hex(16, '0102 0304 0506'))
def test_mash_8to12_little(self):
r = Repacker(Endian.LITTLE, 8, 12)
self.assertEqual(r.repack_unit, 24)
self.assertEqual(r.repack_size(1), 2)
self.assertEqual(r.repack_size(2), 3)
self.assertEqual(r.repackable_size(1), 0)
self.assertEqual(r.repackable_size(2), 1)
self.assertEqual(r.repackable_size(3), 2)
self.assertEqual(r.repackable_size(4), 2)
a = BinData.from_spaced_hex(8, '12 34 56 78 9a')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(12, '634 785'))
c = r.repack(a, 1)
self.assertEqual(b, c)
d = r.repack(a)
self.assertEqual(d, BinData.from_spaced_hex(12, '412 563 a78'))
def test_mash_8to12_big(self):
r = Repacker(Endian.BIG, 8, 12)
self.assertEqual(r.repack_unit, 24)
self.assertEqual(r.repack_size(1), 2)
self.assertEqual(r.repack_size(2), 3)
self.assertEqual(r.repackable_size(1), 0)
self.assertEqual(r.repackable_size(2), 1)
self.assertEqual(r.repackable_size(3), 2)
self.assertEqual(r.repackable_size(4), 2)
a = BinData.from_spaced_hex(8, '12 34 56 78 9a')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(12, '345 678'))
c = r.repack(a, 1)
self.assertEqual(b, c)
d = r.repack(a)
self.assertEqual(d, BinData.from_spaced_hex(12, '123 456 789'))
def test_split_8to1_little(self):
r = Repacker(Endian.LITTLE, 8, 1)
self.assertEqual(r.repack_unit, 8)
self.assertEqual(r.repack_size(12), 2)
self.assertEqual(r.repack_size(8), 1)
self.assertEqual(r.repack_size(9), 2)
self.assertEqual(r.repack_size(17), 3)
self.assertEqual(r.repackable_size(1), 8)
a = BinData.from_spaced_hex(8, '12 34 56')
b = r.repack(a, 1, 12)
c = BinData.from_spaced_hex(1, ' '.join(format(0x634, '012b')[::-1]))
self.assertEqual(b, c)
def test_split_8to1_big(self):
r = Repacker(Endian.BIG, 8, 1)
self.assertEqual(r.repack_unit, 8)
self.assertEqual(r.repack_size(12), 2)
self.assertEqual(r.repack_size(8), 1)
self.assertEqual(r.repack_size(9), 2)
self.assertEqual(r.repack_size(17), 3)
self.assertEqual(r.repackable_size(1), 8)
a = BinData.from_spaced_hex(8, '12 34 56')
b = r.repack(a, 1, 12)
c = BinData.from_spaced_hex(1, ' '.join(format(0x345, '012b')))
self.assertEqual(b, c)
def test_split_60to20_little(self):
r = Repacker(Endian.LITTLE, 60, 20)
self.assertEqual(r.repack_unit, 60)
self.assertEqual(r.repack_size(1), 1)
self.assertEqual(r.repack_size(2), 1)
self.assertEqual(r.repack_size(3), 1)
self.assertEqual(r.repack_size(4), 2)
self.assertEqual(r.repackable_size(1), 3)
a = BinData(60, [0xfedcba987654321])
b = r.repack(a)
self.assertEqual(b, BinData.from_spaced_hex(20, '54321 a9876 fedcb'))
def test_split_60to20_big(self):
r = Repacker(Endian.BIG, 60, 20)
self.assertEqual(r.repack_unit, 60)
self.assertEqual(r.repack_size(1), 1)
self.assertEqual(r.repack_size(2), 1)
self.assertEqual(r.repack_size(3), 1)
self.assertEqual(r.repack_size(4), 2)
self.assertEqual(r.repackable_size(1), 3)
a = BinData(60, [0xfedcba987654321])
b = r.repack(a)
self.assertEqual(b, BinData.from_spaced_hex(20, 'fedcb a9876 54321'))
def test_split_16to8_little(self):
r = Repacker(Endian.LITTLE, 16, 8)
self.assertEqual(r.repack_unit, 16)
self.assertEqual(r.repack_size(3), 2)
self.assertEqual(r.repackable_size(3), 6)
a = BinData(16, [0x1234, 0x5678, 0x9abc])
b = r.repack(a, 1, 3)
self.assertEqual(b, BinData.from_spaced_hex(8, '78 56 bc'))
def test_split_16to8_big(self):
r = Repacker(Endian.BIG, 16, 8)
self.assertEqual(r.repack_unit, 16)
self.assertEqual(r.repack_size(3), 2)
self.assertEqual(r.repackable_size(3), 6)
a = BinData(16, [0x1234, 0x5678, 0x9abc])
b = r.repack(a, 1, 3)
self.assertEqual(b, BinData.from_spaced_hex(8, '56 78 9a'))
def test_padded_8to23_left_little(self):
r = Repacker(Endian.LITTLE, 8, 23, high_pad=9)
self.assertEqual(r.repack_unit, 32)
self.assertEqual(r.repack_size(2), 8)
self.assertEqual(r.repackable_size(7), 1)
self.assertEqual(r.repackable_size(8), 2)
a = BinData.from_spaced_hex(8, '11 22 33 44 55 66 77 88 99 aa')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(23, '443322 087766'))
def test_padded_8to23_right_little(self):
r = Repacker(Endian.LITTLE, 8, 23, low_pad=9)
self.assertEqual(r.repack_unit, 32)
self.assertEqual(r.repack_size(2), 8)
self.assertEqual(r.repackable_size(7), 1)
self.assertEqual(r.repackable_size(8), 2)
a = BinData.from_spaced_hex(8, '11 22 33 44 55 66 77 88 99 aa')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(23, '2aa219 4cc43b'))
def test_padded_8to23_mixed_little(self):
r = Repacker(Endian.LITTLE, 8, 23, low_pad=8, high_pad=1)
self.assertEqual(r.repack_unit, 32)
self.assertEqual(r.repack_size(2), 8)
self.assertEqual(r.repackable_size(7), 1)
self.assertEqual(r.repackable_size(8), 2)
a = BinData.from_spaced_hex(8, '11 22 33 44 55 66 77 88 99 aa')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(23, '554433 198877'))
def test_padded_8to23_left_big(self):
r = Repacker(Endian.BIG, 8, 23, high_pad=9)
self.assertEqual(r.repack_unit, 32)
self.assertEqual(r.repack_size(2), 8)
self.assertEqual(r.repackable_size(7), 1)
self.assertEqual(r.repackable_size(8), 2)
a = BinData.from_spaced_hex(8, '11 22 33 44 55 66 77 88 99 aa')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(23, '334455 778899'))
def test_padded_8to23_right_big(self):
r = Repacker(Endian.BIG, 8, 23, low_pad=9)
self.assertEqual(r.repack_unit, 32)
self.assertEqual(r.repack_size(2), 8)
self.assertEqual(r.repackable_size(7), 1)
self.assertEqual(r.repackable_size(8), 2)
a = BinData.from_spaced_hex(8, '11 22 33 44 55 66 77 88 99 aa')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(23, '1119a2 333bc4'))
def test_padded_8to23_mixed_big(self):
r = Repacker(Endian.BIG, 8, 23, low_pad=8, high_pad=1)
self.assertEqual(r.repack_unit, 32)
self.assertEqual(r.repack_size(2), 8)
self.assertEqual(r.repackable_size(7), 1)
self.assertEqual(r.repackable_size(8), 2)
a = BinData.from_spaced_hex(8, '11 22 33 44 55 66 77 88 99 aa')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(23, '223344 667788'))
|
service-configs/zabbix/partitioning/zabbix-partitioning.py | digideskio/adminscripts | 110 | 12791064 | <gh_stars>100-1000
#!/usr/bin/python
import psycopg2
from optparse import OptionParser
tables = {
'history':'daily',
'history_sync':'daily',
'history_uint':'daily',
'history_uint_sync':'daily',
'history_str':'daily',
'history_str_sync':'daily',
'history_log':'daily',
'history_text':'daily',
'trends':'monthly',
'trends_uint':'monthly',
'acknowledges':'monthly',
'alerts':'monthly',
'auditlog':'monthly',
'events':'monthly',
'service_alarms':'monthly',
}
#change these settings
db_user = 'zabbix'
db_pw = '<PASSWORD>'
db = 'zabbix'
db_host = 'localhost'
#####
parser = OptionParser()
parser.add_option("-i", "--init", dest="init",help="partitioning init",action="store_true", default=False)
(options, args) = parser.parse_args()
if options.init:
init = 1
else:
init = 0
db_connection = psycopg2.connect(database=db, user=db_user, password=<PASSWORD>,host=db_host)
db_cursor = db_connection.cursor()
for table_key, table_value in tables.iteritems():
db_cursor.execute('''select create_zbx_partitions(%s,%s,%s)''',[table_key,table_value,init])
db_connection.commit()
db_cursor.close()
db_connection.close()
|
tests/test_timer_dec.py | pnpnpn/timy | 300 | 12791103 | <gh_stars>100-1000
from unittest import mock
from timy import timer
from timy.settings import timy_config
@mock.patch('timy.output')
def test_timer_no_tracking(p_output):
timy_config.tracking = False
@timer()
def func():
pass
func()
p_output.assert_not_called()
@mock.patch('timy.output')
@mock.patch('time.perf_counter')
def test_timer_include_sleeptime(p_perf_counter, p_output):
timy_config.tracking = True
@timer()
def func():
pass
p_perf_counter.return_value = 1
func()
p_output.assert_has_calls([
mock.call(
timy_config.DEFAULT_IDENT,
'executed (func) for 1 time in 0.000000'),
mock.call(
timy_config.DEFAULT_IDENT,
'best time was 0.000000'),
])
@mock.patch('timy.output')
@mock.patch('time.process_time')
def test_timer_include_sleeptime_no(p_process_time, p_output):
timy_config.tracking = True
@timer(include_sleeptime=False)
def func():
pass
p_process_time.return_value = 1
func()
p_output.assert_has_calls([
mock.call(
timy_config.DEFAULT_IDENT,
'executed (func) for 1 time in 0.000000'),
mock.call(
timy_config.DEFAULT_IDENT,
'best time was 0.000000'),
])
@mock.patch('timy.output')
@mock.patch('time.perf_counter')
def test_timer_with_loops(p_perf_counter, p_output):
timy_config.tracking = True
LOOPS = 4
@timer(loops=LOOPS)
def func():
pass
p_perf_counter.return_value = 1
func()
p_output.assert_has_calls([
mock.call(
timy_config.DEFAULT_IDENT,
'executed (func) for {} times in 0.000000'.format(LOOPS)),
mock.call(
timy_config.DEFAULT_IDENT,
'best time was 0.000000'),
])
|
src/word_fix.py | thoppe/orthographic-pedant | 155 | 12791148 | <reponame>thoppe/orthographic-pedant<gh_stars>100-1000
import os, json, logging, glob, codecs, os, time, subprocess
from contextlib import contextmanager
import requests
logging.basicConfig(level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
FLAG_fork = True
FLAG_delete = True
fork_sleep_time = 10
clone_error_sleep_time = 60
# Verify that there is a token set as an env variable and load it
shell_token = "GITHUB_ORTHOGRAPHIC_TOKEN"
GITHUB_TOKEN = os.environ[shell_token]
login_params = {"access_token":GITHUB_TOKEN,}
API_URL = "https://api.github.com/repos"
fork_url = API_URL + "/{user_name}/{repo_name}/forks"
pulls_url = API_URL + "/{user_name}/{repo_name}/pulls"
delete_url = API_URL + "/{user_name}/{repo_name}"
push_url = "https://{bot_name}:{bot_password}@github.com/{bot_name}/{repo_name} {branch_name}:{branch_name}"
clone_url = "https://github.com/orthographic-pedant/{repo_name}"
# Load the PR text
with open("messages/pull_request.txt") as FIN:
pull_request_msg = ' '.join(FIN.read().split())
with open("messages/commit_header.txt") as FIN:
commit_header_msg = FIN.read().strip()
with open("messages/commit_text.txt") as FIN:
commit_text_msg = FIN.read().strip()
def is_branch_different_from_default(repo):
# Checks if any substantial commits have been made
cmd = "git diff {master_branch} --".format(**repo)
p = subprocess.check_output(cmd,shell=True).strip()
# If any edits have been made this will return True
return p
def pull_request_repo(repo):
if not is_branch_different_from_default(repo):
logging.info("No edits have been made, skipping!".format(**repo))
return False
logging.info("Creating pull request for {full_name}".format(**repo))
data = {
"head" :"{bot_name}:{branch_name}".format(**repo),
"base" : repo["master_branch"],
"title" : repo["commit_header"],
"body" : pull_request_msg.format(**repo),
}
url = pulls_url.format(**repo)
r = requests.post(url,params=login_params,json=data)
if "errors" in r.json():
from pprint import pprint
print pprint(r.json()["errors"])
logging.info("Pull request status {}".format(r))
return True
def fork_repo(repo):
f_url = fork_url.format(**repo)
r = requests.post(f_url,params=login_params)
status = r.status_code
logging.info("Creating fork, status {}".format(status))
assert(status == 202)
logging.info("Sleeping for {} seconds.".format(fork_sleep_time))
time.sleep(fork_sleep_time)
def push_commits(repo):
logging.info("Push new branch {bot_name}:{branch_name}".format(**repo))
cmd = "git push -u " + push_url.format(**repo)
os.system(cmd)
def clone_repo(repo):
git_endpoint = clone_url.format(**repo)
cmd = "git clone -q --single-branch --depth 1 " + git_endpoint
if not os.path.exists(repo["repo_name"]):
try:
msg = u"Cloning repo {full_name}".format(**repo)
logging.info(msg)
subprocess.check_output(cmd,shell=True)
except:
msg = u"Cloning repo {full_name} again after sleep".format(**repo)
logging.info(msg)
time.sleep(clone_error_sleep_time)
subprocess.check_output(cmd,shell=True)
os.system(cmd)
def does_git_branch_exist(repo):
# Checks if a branch already exists of a given name
cmd = "git rev-parse -q --verify {branch_name}".format(**repo)
try:
p = subprocess.check_output(cmd,shell=True).strip()
except subprocess.CalledProcessError:
return False
# Valid SHA1 hash will be forty characters long
return len(p.strip()) == 40
def create_branch(repo):
# Attempts to create the branch in repo["branch_name"]
if not does_git_branch_exist(repo):
logging.info("Creating new branch {branch_name}".format(**repo))
cmd = "git checkout -b {branch_name}".format(**repo)
os.system(cmd)
def delete_bot_repo(repo):
url = API_URL + "/{bot_name}/{repo_name}".format(**repo)
r = requests.delete(url,params=login_params)
msg = "Deleted bot repo {repo_name}, status {}"
logging.info(msg.format(r.status_code,**repo))
def fix_word(line,w1,w2):
line = line.replace(w1.title(),w2.title())
line = line.replace(w1,w2)
line = line.replace(w1.lower(),w2.lower())
line = line.replace(w1.upper(),w2.upper())
return line
def fix_file(f, w1, w2):
corrections = 0
newlines = []
with codecs.open(f,'r','utf-8') as FIN:
for line in FIN:
if w1.lower() in line.lower():
logging.info("Fixing {}->{} in {}".format(w1,w2,f))
line = fix_word(line,w1,w2)
corrections += 1
newlines.append(line)
with codecs.open(f,'w','utf-8') as FOUT:
FOUT.write(''.join(newlines))
return corrections
@contextmanager
def enter_repo(repo):
# Remember our original directory
org_dir = os.getcwd()
repo["bot_name"] = "orthographic-pedant"
repo["bot_password"] = <PASSWORD>
# Used so github can track the submissions...
repo["bot_email"] = "travis.hoppe"+"+orthographicpendant"+"@"+"<EMAIL>"
# Record the full name of the repo
repo["full_name"] = "{user_name}:{repo_name}".format(**repo)
logging.info("Entered {}".format(repo["full_name"]))
if FLAG_fork:
fork_repo(repo)
# Create the directories
os.system("mkdir -p forks")
os.chdir("forks")
clone_repo(repo)
# Enter the repo directory
os.chdir(repo["repo_name"])
# Get the current branch name
p = subprocess.check_output("git show-branch",shell=True)
repo["master_branch"] = p.split(']')[0].split('[')[1]
# Set the username
cmd = 'git config user.name "{bot_name}"'.format(**repo)
os.system(cmd)
cmd = 'git config user.email "{bot_email}"'.format(**repo)
os.system(cmd)
yield
logging.info("Exiting {}".format(repo["full_name"]))
if FLAG_delete:
delete_bot_repo(repo)
os.chdir(org_dir)
os.system("rm -rf forks")
def fix_repo(full_name, good_word, bad_word):
full_name = full_name.strip()
user_name, repo_name = full_name.split('/')
repo = {
"access_token" : GITHUB_TOKEN,
"user_name" : user_name,
"repo_name" : repo_name,
"good_word" : good_word,
"bad_word" : bad_word,
}
# Check if the user_name is a "bad_word", this is a false positive!
if bad_word.lower() in user_name.lower():
return False
# Check if repo_name is a "bad_word", this is also a false positive!
if bad_word.lower() in repo_name.lower():
return False
with enter_repo(repo):
# Find READMES
F_README = [x for x in glob.glob("*.*")
if 'readme.' in x.lower()]
repo["branch_name"] = "spell_check/{}".format(good_word)
create_branch(repo)
# Fix READMES
total_corrections = 0
for fr in F_README:
try:
correction_count = fix_file(fr, bad_word, good_word)
except UnicodeDecodeError:
# Skip the repo if the file is too funky for utf-8
msg = "UnicodeDecode Error"
logging.error(msg)
return False
total_corrections += correction_count
logging.info("Fixed {} spelling mistakes".format(total_corrections))
# Commit changes
repo["commit_header"] = commit_header_msg.format(**repo)
repo["commit_text"] = commit_text_msg.format(**repo)
cmd = 'git commit -a -m "{commit_header}" -m "{commit_text}"'.format(**repo)
os.system(cmd)
# Push the changes to bot directory
push_commits(repo)
# Create pull request
pull_status = pull_request_repo(repo)
return pull_status
###############################################################
if __name__ == "__main__":
# Target word
bad_word = "Celcius"
good_word = "Celsius"
full_name = "thoppe/I-am-error"
fix_repo(full_name, good_word, bad_word)
|
udatetime/_pure.py | kashnick/udatetime | 244 | 12791149 | <reponame>kashnick/udatetime
from datetime import tzinfo, timedelta, datetime as dt_datetime
from time import time, gmtime
from math import floor, ceil
DATE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
class TZFixedOffset(tzinfo):
def __init__(self, offset):
self.offset = offset
def utcoffset(self, dt=None):
return timedelta(seconds=self.offset * 60)
def dst(self, dt=None):
return timedelta(0)
def tzname(self, dt=None):
sign = '+'
if self.offset < 0:
sign = '-'
return "%s%d:%d" % (sign, self.offset / 60, self.offset % 60)
def __repr__(self):
return self.tzname()
def _timestamp_to_date_time(timestamp, tzinfo):
t_full = timestamp + (tzinfo.offset * 60)
timestamp = int(floor(t_full))
frac = (t_full - timestamp) * 1e6
us = int(floor(frac + 0.5) if frac >= 0.0 else ceil(frac - 0.5))
if us == 1e6:
timestamp += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = gmtime(timestamp)
ss = min(ss, 59) # if sec > 59, set 59 (platform leap support)
return dt_datetime(y, m, d, hh, mm, ss, us, tzinfo)
def _format_date_time(date_time):
tm = date_time.timetuple()
offset = 0
sign = '+'
if date_time.tzinfo is not None:
if date_time.tzinfo.__class__ is not TZFixedOffset:
# TODO: Support all tzinfo subclasses by calling utcoffset()
raise ValueError('Only TZFixedOffset supported.')
offset = date_time.tzinfo.offset
if offset < 0:
offset = offset * -1
sign = '-'
return '%04d-%02d-%02dT%02d:%02d:%02d.%06d%c%02d:%02d' % (
tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec,
date_time.microsecond, sign, offset / 60, offset % 60
)
def _get_local_utc_offset():
ts = time()
return (
dt_datetime.fromtimestamp(ts) - dt_datetime.utcfromtimestamp(ts)
).total_seconds() / 60
local_utc_offset = _get_local_utc_offset()
local_timezone = TZFixedOffset(local_utc_offset)
utc_timezone = TZFixedOffset(0)
def utcnow():
'''datetime aware object in UTC with current date and time.'''
return _timestamp_to_date_time(time(), utc_timezone)
def now():
'''datetime aware object in local timezone with current date and time.'''
return _timestamp_to_date_time(time(), local_timezone)
def from_rfc3339_string(rfc3339_string):
'''Parse RFC3339 compliant date-time string.'''
rfc3339_string = rfc3339_string.replace(' ', '').lower()
if 't' not in rfc3339_string:
raise ValueError(
'Invalid RFC3339 string. Missing \'T\' date/time separator.'
)
(date, _, _time) = rfc3339_string.partition('t')
if not date or not _time:
raise ValueError('Invalid RFC3339 string.')
try:
(year, month, day) = date.split('-')
year = int(year)
month = int(month)
day = int(day)
except ValueError:
raise ValueError('Invalid RFC3339 string. Invalid date.')
try:
(hour, minute, second) = _time[:8].split(':')
hour = int(hour)
minute = int(minute)
second = int(second)
except ValueError:
raise ValueError('Invalid RFC3339 string. Invalid time.')
usec = 0
offset = None
if len(_time) > 8:
if _time[8] == '.':
usec_buf = ''
for c in _time[9:]:
if c in '0123456789':
usec_buf += c
else:
break
if len(usec_buf) > 6:
raise ValueError('Invalid RFC3339 string. Invalid fractions.')
usec = int(usec_buf)
if len(usec_buf) > 0 and len(usec_buf) < 6:
# ugly as shit, but good damn multiplication precision makes
# it a mess
usec = usec * int('1' + '0' * (6 - len(usec_buf)))
_time = _time[9 + len(usec_buf):]
elif _time[8] == 'z':
offset = 0
if len(_time[9:]):
raise ValueError(
'Invalid RFC3339 string. Remaining data after time zone.'
)
else:
_time = _time[8:]
else:
offset = 0
if offset is None and (len(_time) == 0 or _time[0] == 'z'):
offset = 0
if len(_time[1:]):
raise ValueError(
'Invalid RFC3339 string. Remaining data after time zone.'
)
elif offset is None:
if _time[0] not in '+-':
raise ValueError('Invalid RFC3339 string. Expected timezone.')
negative = True if _time[0] == '-' else False
try:
(off_hour, off_minute) = _time[1:].split(':')
off_hour = int(off_hour)
off_minute = int(off_minute)
except ValueError:
raise ValueError('Invalid RFC3339 string. Invalid timezone.')
offset = (off_hour * 60) + off_minute
if negative:
offset = offset * -1
return dt_datetime(
year, month, day, hour, minute, second, usec, TZFixedOffset(offset)
)
def to_rfc3339_string(date_time):
'''Serialize date_time to RFC3339 compliant date-time string.'''
if date_time and date_time.__class__ is not dt_datetime:
raise ValueError("Expected a datetime object.")
return _format_date_time(date_time)
def from_timestamp(timestamp, tz=None):
'''timestamp[, tz] -> tz's local time from POSIX timestamp.'''
if tz is None:
tz = local_timezone
elif tz.__class__ is not TZFixedOffset:
# TODO: Support all tzinfo subclasses by calling utcoffset()
raise ValueError('Only TZFixedOffset supported.')
return _timestamp_to_date_time(timestamp, tz)
def from_utctimestamp(timestamp):
'''timestamp -> UTC datetime from a POSIX timestamp (like time.time()).'''
return _timestamp_to_date_time(timestamp, utc_timezone)
def utcnow_to_string():
'''Current UTC date and time RFC3339 compliant date-time string.'''
return _format_date_time(utcnow())
def now_to_string():
'''Local date and time RFC3339 compliant date-time string.'''
return _format_date_time(now())
|
datasets/check_utils.py | AkshatShetty101/dmm-8803 | 247 | 12791171 | import math
import time
import pickle
import sys
import os
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from datasets.data_utils import project_image_to_rect, compute_box_3d
def adjust_coord_for_view(points):
return points[:, [2, 0, 1]] * np.array([1, -1, -1])
def draw_box3d(corners, ax):
'''
8, 3
'''
order = np.array([
0, 1,
1, 2,
2, 3,
3, 0,
4, 5,
5, 6,
6, 7,
7, 4,
3, 7,
0, 4,
2, 6,
1, 5]).reshape(-1, 2)
for i in range(len(order)):
ax.plot(corners[order[i], 0], corners[order[i], 1], corners[order[i], 2])
def draw_points(pts, ax):
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2])
def check_box_frustum(box, P, center, dimension, angle):
x1, y1, x2, y2 = box
box_corner = compute_box_3d(center, dimension, angle, P) # 8, 3
z1 = np.arange(0, 70, 0.1)
xyz1 = np.zeros((len(z1), 3))
xyz1[:, 0] = x1
xyz1[:, 1] = y1
xyz1[:, 2] = z1
xyz1_rect = project_image_to_rect(xyz1, P)
xyz1[:, 0] = x2
xyz1[:, 1] = y2
xyz1[:, 2] = z1
xyz2_rect = project_image_to_rect(xyz1, P)
xyz1[:, 0] = x1
xyz1[:, 1] = y2
xyz1[:, 2] = z1
xyz3_rect = project_image_to_rect(xyz1, P)
xyz1[:, 0] = x2
xyz1[:, 1] = y1
xyz1[:, 2] = z1
xyz4_rect = project_image_to_rect(xyz1, P)
fig = plt.figure()
ax = fig.gca(projection='3d')
draw_box3d(box_corner, ax)
draw_points(xyz1_rect, ax)
draw_points(xyz2_rect, ax)
draw_points(xyz3_rect, ax)
draw_points(xyz4_rect, ax)
plt.show()
def check_norm(self, points, ref_points, gt_box3d_corners, pred_box3d_corners):
fig = plt.figure()
ax = fig.gca(projection='3d')
points = adjust_coord_for_view(points)
ref_points = adjust_coord_for_view(ref_points)
gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners)
pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners)
# ax.set_aspect('equal')
# ax.axis('equal')
ax.set_axis_on()
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
draw_points(points, ax)
draw_points(ref_points, ax)
draw_box3d(gt_box3d_corners, ax)
draw_box3d(pred_box3d_corners, ax)
plt.show()
|
source/grammar/openqasm_reference_parser/exceptions.py | shiyunon/openqasm | 603 | 12791229 | <gh_stars>100-1000
__all__ = ["Qasm3ParserError"]
class Qasm3ParserError(Exception):
pass
|
examples/internationalisation/testLangs.py | tgolsson/appJar | 666 | 12791245 | <filename>examples/internationalisation/testLangs.py
import sys
sys.path.append("../../")
from appJar import gui
def press(btn):
app.changeLanguage(btn)
app=gui()
app.showSplash()
app.addLabel("l1", "default text")
app.addButtons(["English", "Korean", "French"], press)
app.addLabel("l2", "default text")
app.addLabel("l3", "default text")
app.addLabelEntry("Genome")
app.addLabelScale("s1")
app.addMessage("m1", "Default message text")
app.addListBox("fruits", ["apples", "oranges", "tomatoes"])
app.addOptionBox("fruits", ["apples", "oranges", "tomatoes"])
app.addSpinBox("fruits", ["apples", "oranges", "tomatoes"])
app.addCheckBox("b1")
app.addCheckBox("b2")
app.addCheckBox("b3")
app.startLabelFrame("Names")
app.addRadioButton("name", "b1")
app.addRadioButton("name", "b2")
app.addRadioButton("name", "b3")
app.addRadioButton("name", "b4")
app.stopLabelFrame()
app.addRadioButton("age", "b1")
app.addRadioButton("age", "b2")
app.addRadioButton("age", "b3")
app.addLink("l1", None)
app.addWebLink("l2", "http://www.appJar.info")
app.addMeter("m1")
app.addEntry("e1")
app.addEntry("e2")
app.setEntryDefault("e1", "<DEFAULT>")
app.go(language="ENGLISH")
|
Chapter 2/computational_graph.py | shantam21/Deep-Learning-with-TensorFlow-2-and-Keras | 267 | 12791256 | <reponame>shantam21/Deep-Learning-with-TensorFlow-2-and-Keras<gh_stars>100-1000
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
in_a = tf.placeholder(dtype=tf.float32, shape=(2))
def model(x):
with tf.variable_scope("matmul"):
W = tf.get_variable("W", initializer=tf.ones(shape=(2,2)))
b = tf.get_variable("b", initializer=tf.zeros(shape=(2)))
return x * W + b
out_a = model(in_a)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
outs = sess.run([out_a],
feed_dict={in_a: [1, 0]})
writer = tf.summary.FileWriter("./logs/example", sess.graph) |
docqa/triviaqa/build_complete_vocab.py | Willyoung2017/doc-qa | 422 | 12791269 | import argparse
from os.path import exists
from docqa.triviaqa.build_span_corpus import TriviaQaOpenDataset
from docqa.triviaqa.evidence_corpus import get_evidence_voc
"""
Build vocab of all words in the triviaqa dataset, including
all documents and all train questions.
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("output")
parser.add_argument("-m", "--min_count", type=int, default=1)
parser.add_argument("-n", "--n_processes", type=int, default=1)
args = parser.parse_args()
if exists(args.output):
raise ValueError()
data = TriviaQaOpenDataset()
corpus_voc = get_evidence_voc(data.evidence, args.n_processes)
print("Adding question voc...")
train = data.get_train()
for q in train:
corpus_voc.update(q.question)
print("Saving...")
with open(args.output, "w") as f:
for word, c in corpus_voc.items():
if c >= args.min_count:
f.write(word)
f.write("\n")
if __name__ == "__main__":
main() |
cellphonedb/utils/dataframe_functions.py | chapuzzo/cellphonedb | 278 | 12791290 | <gh_stars>100-1000
import pandas as pd
from cellphonedb.utils import dataframe_format
def dataframes_has_same_data(dataframe1: pd.DataFrame, dataframe2: pd.DataFrame,
round_decimals: bool = False) -> pd.DataFrame:
dataframe1 = dataframe1.copy(deep=True)
dataframe2 = dataframe2.copy(deep=True)
columns_names_1 = list(dataframe1.columns.values)
columns_names_1.sort()
dataframe1 = dataframe_format.bring_columns_to_end(columns_names_1, dataframe1)
columns_names_2 = list(dataframe2.columns.values)
columns_names_2.sort()
dataframe2 = dataframe_format.bring_columns_to_end(columns_names_2, dataframe2)
if not dataframe1.empty:
dataframe1 = dataframe1.sort_values(columns_names_1).reset_index(drop=True)
if round_decimals:
dataframe1 = dataframe1.round(5)
if not dataframe2.empty:
dataframe2 = dataframe2.sort_values(columns_names_2).reset_index(drop=True)
if round_decimals:
dataframe2 = dataframe2.round(5)
if dataframe1.empty and dataframe2.empty:
return pd.Series(dataframe1.columns.values).equals(pd.Series(dataframe2.columns.values))
return dataframe1.equals(dataframe2)
|
scanpy/datasets/__init__.py | mkmkryu/scanpy2 | 1,171 | 12791330 | """Builtin Datasets.
"""
from ._datasets import (
blobs,
burczynski06,
krumsiek11,
moignard15,
paul15,
toggleswitch,
pbmc68k_reduced,
pbmc3k,
pbmc3k_processed,
visium_sge,
)
from ._ebi_expression_atlas import ebi_expression_atlas
|
Subsets and Splits