max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
prediction/tools/model_warmup/model_warmup.py | gogasca/ai-platform-samples-1 | 418 | 12787035 | <gh_stars>100-1000
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate Warmup requests."""
import tensorflow as tf
import requests
from tensorflow.python.framework import tensor_util
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
IMAGE_URL = 'https://tensorflow.org/images/blogs/serving/cat.jpg'
NUM_RECORDS = 100
def get_image_bytes():
image_content = requests.get(IMAGE_URL, stream=True)
image_content.raise_for_status()
return image_content.content
def main():
"""Generate TFRecords for warming up."""
with tf.io.TFRecordWriter("tf_serving_warmup_requests") as writer:
image_bytes = get_image_bytes()
predict_request = predict_pb2.PredictRequest()
predict_request.model_spec.name = 'resnet'
predict_request.model_spec.signature_name = 'serving_default'
predict_request.inputs['image_bytes'].CopyFrom(
tensor_util.make_tensor_proto([image_bytes], tf.string))
log = prediction_log_pb2.PredictionLog(
predict_log=prediction_log_pb2.PredictLog(request=predict_request))
for r in range(NUM_RECORDS):
writer.write(log.SerializeToString())
if __name__ == "__main__":
main()
|
tests/test_visitors/test_tokenize/test_primitives/test_string_tokens/test_unnecessary_raw_strings.py | cdhiraj40/wemake-python-styleguide | 1,931 | 12787039 | <filename>tests/test_visitors/test_tokenize/test_primitives/test_string_tokens/test_unnecessary_raw_strings.py
import pytest
from wemake_python_styleguide.violations.consistency import (
RawStringNotNeededViolation,
)
from wemake_python_styleguide.visitors.tokenize.primitives import (
WrongStringTokenVisitor,
)
@pytest.mark.parametrize('raw_strings', [
r"r'some text\\'",
r"r'some text\''",
r"r'some text\"'",
r'r"some text\'"',
r"r'some text\t'",
r"r'some text\a'",
r"r'some text\n'",
r"r'some text\u041b'",
r"r'some text\043'",
r"r'some text\x23'",
])
def test_necessary_raw_string(
parse_tokens,
assert_errors,
default_options,
raw_strings,
):
"""Ensures that correct usage of raw string works."""
file_tokens = parse_tokens(raw_strings)
visitor = WrongStringTokenVisitor(default_options, file_tokens=file_tokens)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('raw_strings', [
"r'No escaped character'",
'r"Here neither"',
"r'''Not here as well'''",
'r"""Not here as well"""',
])
def test_unnecessary_raw_string(
parse_tokens,
assert_errors,
default_options,
raw_strings,
):
"""Ensures that usage of raw string is forbidden if no backslash."""
file_tokens = parse_tokens(raw_strings)
visitor = WrongStringTokenVisitor(default_options, file_tokens=file_tokens)
visitor.run()
assert_errors(visitor, [RawStringNotNeededViolation])
|
leetcode/188.best-time-to-buy-and-sell-stock-iv.py | geemaple/algorithm | 177 | 12787040 | <filename>leetcode/188.best-time-to-buy-and-sell-stock-iv.py
class Solution(object):
def maxProfit(self, k, prices):
"""
:type k: int
:type prices: List[int]
:rtype: int
"""
if prices is None or len(prices) == 0 or k == 0:
return 0
res = 0
m = len(prices)
if k > m // 2: # same as stock 2
for i in range(1, m):
if prices[i] - prices[i - 1] > 0:
res += prices[i] - prices[i - 1]
else: # same as stock 3
state = 2 * k + 1
table = [[0 for _ in range(state + 1)] for _ in range(m + 1)]
# init
for j in range(1, state + 1):
table[0][j] = 0 if j == 1 else float('-inf')
for i in range(1, m + 1):
for j in range(1, state + 1):
value = 0
if j % 2 == 1: # 1, 3, 5
# f[i][j] = max(f[i - 1][j], f[i - 1][j - 1] + prices[i - 1] - prices[i - 2])
value = table[i - 1][j]
if i - 2 >= 0 and j > 1 and table[i - 1][j - 1] != float('-inf'):
value = max(value, table[i - 1][j - 1] + prices[i - 1] - prices[i - 2])
else: # 2, 4, 6
# f[i][j] = max(f[i - 1][j - 1], f[i - 1][j] + prices[i - 1] - prices[i - 2], f[i - 1][j - 2] + prices[i - 1] - prices[i - 2])
value = table[i - 1][j - 1]
if i - 2 >= 0 and table[i - 1][j] != float('-inf'):
value = max(value, table[i - 1][j] + prices[i - 1] - prices[i - 2])
if i - 2 >= 0 and j > 2 and table[i - 1][j - 2] != float('-inf'):
value = max(value, table[i - 1][j - 2] + prices[i - 1] - prices[i - 2])
table[i][j] = value
for j in range(1, state + 1, 2):
res = max(res, table[m][j])
return res |
pyclustering/cluster/rock.py | JosephChataignon/pyclustering | 1,013 | 12787041 | """!
@brief Cluster analysis algorithm: ROCK
@details Implementation based on paper @cite inproceedings::rock::1.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
from pyclustering.cluster.encoder import type_encoding
from pyclustering.utils import euclidean_distance
from pyclustering.core.wrapper import ccore_library
import pyclustering.core.rock_wrapper as wrapper
class rock:
"""!
@brief The class represents clustering algorithm ROCK.
Example:
@code
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.rock import rock
from pyclustering.samples.definitions import FCPS_SAMPLES
from pyclustering.utils import read_sample
# Read sample for clustering from file.
sample = read_sample(FCPS_SAMPLES.SAMPLE_HEPTA)
# Create instance of ROCK algorithm for cluster analysis. Seven clusters should be allocated.
rock_instance = rock(sample, 1.0, 7)
# Run cluster analysis.
rock_instance.process()
# Obtain results of clustering.
clusters = rock_instance.get_clusters()
# Visualize clustering results.
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
visualizer.show()
@endcode
"""
def __init__(self, data, eps, number_clusters, threshold=0.5, ccore=True):
"""!
@brief Constructor of clustering algorithm ROCK.
@param[in] data (list): Input data - list of points where each point is represented by list of coordinates.
@param[in] eps (double): Connectivity radius (similarity threshold), points are neighbors if distance between them is less than connectivity radius.
@param[in] number_clusters (uint): Defines number of clusters that should be allocated from the input data set.
@param[in] threshold (double): Value that defines degree of normalization that influences on choice of clusters for merging during processing.
@param[in] ccore (bool): Defines should be CCORE (C++ pyclustering library) used instead of Python code or not.
"""
self.__pointer_data = data
self.__eps = eps
self.__number_clusters = number_clusters
self.__threshold = threshold
self.__clusters = None
self.__ccore = ccore
if self.__ccore:
self.__ccore = ccore_library.workable()
self.__verify_arguments()
self.__degree_normalization = 1.0 + 2.0 * ((1.0 - threshold) / (1.0 + threshold))
self.__adjacency_matrix = None
self.__create_adjacency_matrix()
def process(self):
"""!
@brief Performs cluster analysis in line with rules of ROCK algorithm.
@return (rock) Returns itself (ROCK instance).
@see get_clusters()
"""
# TODO: (Not related to specification, just idea) First iteration should be investigated. Euclidean distance should be used for clustering between two
# points and rock algorithm between clusters because we consider non-categorical samples. But it is required more investigations.
if self.__ccore is True:
self.__clusters = wrapper.rock(self.__pointer_data, self.__eps, self.__number_clusters, self.__threshold)
else:
self.__clusters = [[index] for index in range(len(self.__pointer_data))]
while len(self.__clusters) > self.__number_clusters:
indexes = self.__find_pair_clusters(self.__clusters)
if indexes != [-1, -1]:
self.__clusters[indexes[0]] += self.__clusters[indexes[1]]
self.__clusters.pop(indexes[1]) # remove merged cluster.
else:
break # totally separated clusters have been allocated
return self
def get_clusters(self):
"""!
@brief Returns list of allocated clusters, each cluster contains indexes of objects in list of data.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
@see process()
"""
return self.__clusters
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __find_pair_clusters(self, clusters):
"""!
@brief Returns pair of clusters that are best candidates for merging in line with goodness measure.
The pair of clusters for which the above goodness measure is maximum is the best pair of clusters to be merged.
@param[in] clusters (list): List of clusters that have been allocated during processing, each cluster is represented by list of indexes of points from the input data set.
@return (list) List that contains two indexes of clusters (from list 'clusters') that should be merged on this step.
It can be equals to [-1, -1] when no links between clusters.
"""
maximum_goodness = 0.0
cluster_indexes = [-1, -1]
for i in range(0, len(clusters)):
for j in range(i + 1, len(clusters)):
goodness = self.__calculate_goodness(clusters[i], clusters[j])
if goodness > maximum_goodness:
maximum_goodness = goodness
cluster_indexes = [i, j]
return cluster_indexes
def __calculate_links(self, cluster1, cluster2):
"""!
@brief Returns number of link between two clusters.
@details Link between objects (points) exists only if distance between them less than connectivity radius.
@param[in] cluster1 (list): The first cluster.
@param[in] cluster2 (list): The second cluster.
@return (uint) Number of links between two clusters.
"""
number_links = 0
for index1 in cluster1:
for index2 in cluster2:
number_links += self.__adjacency_matrix[index1][index2]
return number_links
def __create_adjacency_matrix(self):
"""!
@brief Creates 2D adjacency matrix (list of lists) where each element described existence of link between points (means that points are neighbors).
"""
size_data = len(self.__pointer_data)
self.__adjacency_matrix = [[0 for i in range(size_data)] for j in range(size_data)]
for i in range(0, size_data):
for j in range(i + 1, size_data):
distance = euclidean_distance(self.__pointer_data[i], self.__pointer_data[j])
if (distance <= self.__eps):
self.__adjacency_matrix[i][j] = 1
self.__adjacency_matrix[j][i] = 1
def __calculate_goodness(self, cluster1, cluster2):
"""!
@brief Calculates coefficient 'goodness measurement' between two clusters. The coefficient defines level of suitability of clusters for merging.
@param[in] cluster1 (list): The first cluster.
@param[in] cluster2 (list): The second cluster.
@return Goodness measure between two clusters.
"""
number_links = self.__calculate_links(cluster1, cluster2)
devider = (len(cluster1) + len(cluster2)) ** self.__degree_normalization - len(cluster1) ** self.__degree_normalization - len(cluster2) ** self.__degree_normalization
return number_links / devider
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__pointer_data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__pointer_data))
if self.__eps < 0:
raise ValueError("Connectivity radius (current value: '%d') should be greater or equal to 0." % self.__eps)
if self.__threshold < 0 or self.__threshold > 1:
raise ValueError("Threshold (current value: '%d') should be in range (0, 1)." % self.__threshold)
if (self.__number_clusters is not None) and (self.__number_clusters <= 0):
raise ValueError("Amount of clusters (current value: '%d') should be greater than 0." %
self.__number_clusters)
|
examples/how_to/start_parameterized_build.py | pingod/python-jenkins_api | 556 | 12787053 | """
Start a Parameterized Build
"""
from __future__ import print_function
from jenkinsapi.jenkins import Jenkins
jenkins = Jenkins('http://localhost:8080')
params = {'VERSION': '1.2.3', 'PYTHON_VER': '2.7'}
# This will start the job in non-blocking manner
jenkins.build_job('foo', params)
# This will start the job and will return a QueueItem object which
# can be used to get build results
job = jenkins['foo']
qi = job.invoke(build_params=params)
# Block this script until build is finished
if qi.is_queued() or qi.is_running():
qi.block_until_complete()
build = qi.get_build()
print(build)
|
recipes/Python/546543_simple_way_create_change_your_registry/recipe-546543.py | tdiprima/code | 2,023 | 12787063 | #IN THE NAME OF ALLAH
#Nike Name: Pcrlth0n
#(C) 2008
#a simple way to create and change your registry on windows
import win32api
def new_key():
reg1 = open('C:\\reg1.reg', 'w')
reg1.write("""REGEDIT4\n[HKEY_CURRENT_USER\\Example""")
reg1.close()
win32api.WinExec('reg import C:\\reg1.reg', 0)
def new_string_key():
reg2 = open('C:\\reg2.reg', 'w')
reg2.write("""REGEDIT4\n[HKEY_CURRENT_USER\\Example]\n"String Key"="C:\\\\\"""")
reg2.close()
win32api.WinExec('reg import C:\\reg2.reg', 0)
def new_dword_key():
reg3 = open('C:\\reg3.reg', 'w')
reg3.write("""REGEDIT4\n[HKEY_CURRENT_USER\\Example]\n"Dword key"=dword:00000000 """)
reg3.close()
win32api.WinExec('reg import C:\\reg3.reg', 0)
#new_key()
#new_string_key()
#new_dword_key()
|
main.py | pzmarzly/ancs4linux | 120 | 12787072 | <filename>main.py
#!/usr/bin/env python3
# https://developer.apple.com/library/archive/documentation/CoreBluetooth/Reference/AppleNotificationCenterServiceSpecification/Introduction/Introduction.html
import sys
import signal
import argparse
import time
from Hci import Hci
from Handler import DefaultHandler
parser = argparse.ArgumentParser()
parser.add_argument("--hci", metavar="INT", type=int, default=0,
help="use Bluetooth hciX (default 0, see `hcitool dev')")
parser.add_argument("--resolution", metavar="INT", type=int, default=20,
help="polling rate (default 20 per second)")
args = parser.parse_args()
hciID = args.hci
resolution = args.resolution
def signal_handler(sig, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
handler = DefaultHandler()
hci = Hci(hciID)
while True:
device = hci.search_for_device()
while device is None:
time.sleep(1)
device = hci.search_for_device()
handler.device_connected()
try:
device.main_loop(handler, resolution)
except Exception as e:
handler.error(exception=e)
handler.device_disconnected()
|
algorithms/larrys-array.py | gajubadge11/HackerRank-1 | 340 | 12787103 | #!/bin/python3
import sys
def rotate(A, pos):
A[pos], A[pos+1], A[pos+2] = A[pos+1], A[pos+2], A[pos]
def larrysArray(A):
for _ in range(len(A)):
for ind in range(1, len(A) - 1):
a, b, c = A[ind-1], A[ind], A[ind+1]
#print("ind = {} A = {} B = {} C = {}".format(ind, a, b, c))
if a > b or c < a:
#print("rotating 1")
rotate(A, ind-1)
if A == sorted(A):
return 'YES'
else:
return 'NO'
if __name__ == "__main__":
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
A = list(map(int, input().strip().split(' ')))
result = larrysArray(A)
print(result)
|
manila/scheduler/weighers/host_affinity.py | kpawar89/manila | 159 | 12787109 | # Copyright 2019 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from manila import context
from manila.db import api as db_api
from manila.scheduler.weighers import base_host
from manila.share import utils as share_utils
class HostAffinityWeigher(base_host.BaseHostWeigher):
def _weigh_object(self, obj, weight_properties):
"""Weigh hosts based on their proximity to the source's share pool.
If no snapshot_id was provided will return 0, otherwise, if source and
destination hosts are located on:
1. same back ends and pools: host is a perfect choice (100)
2. same back ends and different pools: host is a very good choice (75)
3. different back ends with the same AZ: host is a good choice (50)
4. different back ends and AZs: host isn't so good choice (25)
"""
ctx = context.get_admin_context()
request_spec = weight_properties.get('request_spec')
snapshot_id = request_spec.get('snapshot_id')
snapshot_host = request_spec.get('snapshot_host')
if None in [snapshot_id, snapshot_host]:
# NOTE(silvacarlose): if the request does not contain a snapshot_id
# or a snapshot_host, the user is not creating a share from a
# snapshot and we don't need to weigh the host.
return 0
snapshot_ref = db_api.share_snapshot_get(ctx, snapshot_id)
# Source host info: pool, backend and availability zone
src_pool = share_utils.extract_host(snapshot_host, 'pool')
src_backend = share_utils.extract_host(
request_spec.get('snapshot_host'), 'backend')
src_az = snapshot_ref['share']['availability_zone']
# Destination host info: pool, backend and availability zone
dst_pool = share_utils.extract_host(obj.host, 'pool')
dst_backend = share_utils.extract_host(obj.host, 'backend')
# NOTE(dviroel): All hosts were already filtered by the availability
# zone parameter.
dst_az = None
if weight_properties['availability_zone_id']:
dst_az = db_api.availability_zone_get(
ctx, weight_properties['availability_zone_id']).name
if src_backend == dst_backend:
return 100 if (src_pool and src_pool == dst_pool) else 75
else:
return 50 if (src_az and src_az == dst_az) else 25
|
optimum/onnxruntime/preprocessors/passes/fully_connected.py | techthiyanes/optimum | 414 | 12787112 | <reponame>techthiyanes/optimum
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Set, Tuple
from onnx import ModelProto
from onnxruntime.transformers.onnx_model import OnnxModel
from optimum.onnxruntime.preprocessors import PreprocessorPass
class IncludeFullyConnectedNodes(PreprocessorPass):
def __init__(self):
super().__init__()
def __call__(self, graph: ModelProto, model: OnnxModel) -> Tuple[Set[str], Set[str]]:
fc_subgraphs = []
for add_node in model.get_nodes_by_op_type("Add"):
fc_components = model.match_parent_path(add_node, ["MatMul"], [1])
if fc_components is not None:
fc_components.append(add_node)
fc_subgraphs.append(fc_components)
fc_components = {node.name for fc in fc_subgraphs for node in fc}
return fc_components, set()
|
spockbot/plugins/core/auth.py | SpockBotMC/SpockBot | 171 | 12787113 | """
Provides authorization functions for Mojang's login and session servers
"""
import hashlib
import json
# This is for python2 compatibility
try:
import urllib.request as request
from urllib.error import URLError
except ImportError:
import urllib2 as request
from urllib2 import URLError
import logging
import os
from spockbot.mcp.yggdrasil import YggdrasilCore
from spockbot.plugins.base import PluginBase, pl_announce
logger = logging.getLogger('spockbot')
# This function courtesy of barneygale
def java_hex_digest(digest):
d = int(digest.hexdigest(), 16)
if d >> 39 * 4 & 0x8:
d = "-%x" % ((-d) & (2 ** (40 * 4) - 1))
else:
d = "%x" % d
return d
class AuthCore(object):
def __init__(self, event, online_mode, auth_timeout):
self.online_mode = online_mode
self.auth_timeout = auth_timeout
self.__event = event
self.ygg = YggdrasilCore()
self._shared_secret = None
self._username = None
def get_username(self):
return self._username
def set_username(self, username):
self.ygg.username = username
username = property(get_username, set_username)
def set_password(self, password):
if password and not self.online_mode:
logger.warning("PASSWORD PROVIDED WITH ONLINE_MODE == FALSE")
logger.warning("YOU PROBABLY DIDN'T WANT TO DO THAT")
self.ygg.password = password
password = property(lambda x: bool(x.ygg.password), set_password)
def set_client_token(self, client_token):
if not self.online_mode:
logger.warning("CLIENT TOKEN PROVIDED WITH ONLINE_MODE == FALSE")
logger.warning("YOU PROBABLY DIDN'T WANT TO DO THAT")
self.ygg.client_token = client_token
client_token = property(
lambda x: bool(x.ygg.client_token), set_client_token
)
def set_auth_token(self, auth_token):
if not self.online_mode:
logger.warning("AUTH TOKEN PROVIDED WITH ONLINE_MODE == FALSE")
logger.warning("YOU PROBABLY DIDN'T WANT TO DO THAT")
self.ygg.auth_token = auth_token
auth_token = property(
lambda x: bool(x.ygg.auth_token), set_auth_token
)
def get_shared_secret(self):
self._shared_secret = self._shared_secret or os.urandom(16)
return self._shared_secret
shared_secret = property(get_shared_secret)
def start_session(self):
if not self.online_mode:
self._username = self.ygg.username
return True
if self.ygg.login():
self._username = self.ygg.selected_profile['name']
return True
self.__event.emit('auth_session_error')
return False
def send_session_auth(self, pubkey_raw, server_id_raw):
server_id = java_hex_digest(hashlib.sha1(
server_id_raw.encode('ascii') + self.shared_secret + pubkey_raw
))
logger.info('Attempting to authenticate with Mojang session server')
url = "https://sessionserver.mojang.com/session/minecraft/join"
data = json.dumps({
'accessToken': self.ygg.access_token,
'selectedProfile': self.ygg.selected_profile,
'serverId': server_id,
}).encode('utf-8')
headers = {'Content-Type': 'application/json'}
req = request.Request(url, data, headers)
try:
rep = request.urlopen(
req, timeout=self.auth_timeout
).read().decode('ascii')
except URLError:
rep = "Couldn't connect to sessionserver.mojang.com"
if rep:
logger.warning('Mojang session auth response: %s', rep)
logger.info('Session authentication successful')
@pl_announce('Auth')
class AuthPlugin(PluginBase):
requires = 'Event'
defaults = {
'online_mode': True,
'auth_timeout': 3, # No idea how long this should be, 3s seems good
'auth_quit': True,
'sess_quit': True,
}
events = {
'auth_login_error': 'handle_auth_error',
'auth_session_error': 'handle_session_error',
}
def __init__(self, ploader, settings):
super(AuthPlugin, self).__init__(ploader, settings)
self.sess_quit = self.settings['sess_quit']
self.auth_quit = self.settings['auth_quit']
ploader.provides('Auth', AuthCore(
self.event,
self.settings['online_mode'],
self.settings['auth_timeout']
))
def handle_auth_error(self, name, data):
if self.auth_quit:
logger.error('AUTH: Session authentication error, calling kill')
self.event.kill()
def handle_session_error(self, name, data):
if self.sess_quit:
logger.error('AUTH: Session start error, calling kill')
self.event.kill()
|
tests/unit/metrics_tests/test_object_keypoint_similarity.py | Joeper214/blueoil | 248 | 12787130 | <filename>tests/unit/metrics_tests/test_object_keypoint_similarity.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import pytest
import numpy as np
from blueoil.metrics.object_keypoint_similarity import compute_object_keypoint_similarity, _compute_oks
# Apply set_test_environment() in conftest.py to all tests in this file.
pytestmark = pytest.mark.usefixtures("set_test_environment")
def test_compute_oks():
# case1
joints_gt = np.zeros((17, 3))
joints_pred = np.zeros((17, 3))
image_size = (160, 160)
joints_gt[0, 0] = 80
joints_gt[0, 1] = 80
joints_gt[0, 2] = 1
joints_pred[0, 0] = 70
joints_pred[0, 1] = 70
joints_pred[0, 2] = 1
joints_pred[2, 0] = 1000
joints_pred[2, 1] = 1000
joints_pred[2, 2] = 1
expected = 0.2358359
result = _compute_oks(joints_gt, joints_pred, image_size)
assert np.allclose(result, expected)
# case2
joints_gt = np.zeros((17, 3))
joints_pred = np.zeros((17, 3))
image_size = (160, 160)
joints_gt[0, 0] = 80
joints_gt[0, 1] = 80
joints_gt[0, 2] = 0
joints_pred[0, 0] = 70
joints_pred[0, 1] = 70
joints_pred[0, 2] = 1
joints_pred[2, 0] = 1000
joints_pred[2, 1] = 1000
joints_pred[2, 2] = 1
expected = -1
result = _compute_oks(joints_gt, joints_pred, image_size)
assert np.allclose(result, expected)
# case3
joints_gt = np.zeros((17, 3))
joints_pred1 = np.zeros((17, 3))
joints_pred2 = np.zeros((17, 3))
image_size = (160, 160)
joints_gt[0, 0] = 80
joints_gt[0, 1] = 80
joints_gt[0, 2] = 1
joints_pred1[0, 0] = 70
joints_pred1[0, 1] = 70
joints_pred1[0, 2] = 1
joints_pred2[0, 0] = 78
joints_pred2[0, 1] = 78
joints_pred2[0, 2] = 1
result1 = _compute_oks(joints_gt, joints_pred1, image_size)
result2 = _compute_oks(joints_gt, joints_pred2, image_size)
assert result2 > result1
def test_compute_object_keypoint_similarity():
# case1
joints_gt = np.zeros((1, 17, 3))
joints_pred = np.zeros((1, 17, 3))
image_size = (160, 160)
joints_gt[0, 0, 0] = 80
joints_gt[0, 0, 1] = 80
joints_gt[0, 0, 2] = 1
joints_pred[0, 0, 0] = 70
joints_pred[0, 0, 1] = 70
joints_pred[0, 0, 2] = 1
expected = 0.2358359
result = compute_object_keypoint_similarity(joints_gt, joints_pred, image_size)
assert np.allclose(result, expected)
# case2
joints_gt = np.zeros((2, 17, 3))
joints_pred = np.zeros((2, 17, 3))
image_size = (160, 160)
joints_gt[0, 0, 0] = 80
joints_gt[0, 0, 1] = 80
joints_gt[0, 0, 2] = 1
joints_pred[0, 0, 0] = 70
joints_pred[0, 0, 1] = 70
joints_pred[0, 0, 2] = 1
joints_gt[1, 0, 0] = 50
joints_gt[1, 0, 1] = 50
joints_gt[1, 0, 2] = 1
joints_pred[1, 0, 0] = 50
joints_pred[1, 0, 1] = 50
joints_pred[1, 0, 2] = 1
expected = 0.61791795
result = compute_object_keypoint_similarity(joints_gt, joints_pred, image_size)
assert np.allclose(result, expected)
# case3
joints_gt = np.zeros((2, 17, 3))
joints_pred = np.zeros((2, 17, 3))
image_size = (160, 160)
joints_gt[0, 0, 0] = 80
joints_gt[0, 0, 1] = 80
joints_pred[0, 0, 0] = 70
joints_pred[0, 0, 1] = 70
joints_pred[0, 0, 2] = 1
joints_gt[1, 0, 0] = 50
joints_gt[1, 0, 1] = 50
joints_pred[1, 0, 0] = 50
joints_pred[1, 0, 1] = 50
joints_pred[1, 0, 2] = 1
try:
compute_object_keypoint_similarity(joints_gt, joints_pred, image_size)
except ValueError:
pass
if __name__ == '__main__':
test_compute_oks()
test_compute_object_keypoint_similarity()
|
Chapter06/src/features.py | jvstinian/Python-Reinforcement-Learning-Projects | 114 | 12787136 | import numpy as np
from config import GOPARAMETERS
def stone_features(board_state):
# 16 planes, where every other plane represents the stones of a particular color
# which means we track the stones of the last 8 moves.
features = np.zeros([16, GOPARAMETERS.N, GOPARAMETERS.N], dtype=np.uint8)
num_deltas_avail = board_state.board_deltas.shape[0]
cumulative_deltas = np.cumsum(board_state.board_deltas, axis=0)
last_eight = np.tile(board_state.board, [8, 1, 1])
last_eight[1:num_deltas_avail + 1] -= cumulative_deltas
last_eight[num_deltas_avail +1:] = last_eight[num_deltas_avail].reshape(1, GOPARAMETERS.N, GOPARAMETERS.N)
features[::2] = last_eight == board_state.to_play
features[1::2] = last_eight == -board_state.to_play
return np.rollaxis(features, 0, 3)
def color_to_play_feature(board_state):
# 1 plane representing which color is to play
# The plane is filled with 1's if the color to play is black; 0's otherwise
if board_state.to_play == GOPARAMETERS.BLACK:
return np.ones([GOPARAMETERS.N, GOPARAMETERS.N, 1], dtype=np.uint8)
else:
return np.zeros([GOPARAMETERS.N, GOPARAMETERS.N, 1], dtype=np.uint8)
def extract_features(board_state):
stone_feat = stone_features(board_state=board_state)
turn_feat = color_to_play_feature(board_state=board_state)
all_features = np.concatenate([stone_feat, turn_feat], axis=2)
return all_features
|
educative/binarysearch/findhighestnumber.py | monishshah18/python-cp-cheatsheet | 140 | 12787151 | <filename>educative/binarysearch/findhighestnumber.py<gh_stars>100-1000
def find_highest_number(A):
if len(A) < 3:
return None
def condition(value) -> bool:
leftNeighbor = value - 1
rightNeighbor = value + 1
# TODO: Add conditions for leftmost and rightmost values
if A[leftNeighbor] < A[value] < A[rightNeighbor]:
return False
elif A[leftNeighbor] > A[value] > A[rightNeighbor]:
return True
elif A[leftNeighbor] < A[value] and A[value] > A[rightNeighbor]:
return True
left, right = 0, len(A) -1
while left < right:
mid = left + (right - left) // 2
if condition(mid):
right = mid
else:
left = mid + 1
return A[left]
# Peak element is "5".
A = [1, 2, 3, 4, 5, 4, 3, 2, 1]
print(find_highest_number(A))
A = [1, 6, 5, 4, 3, 2, 1]
print(find_highest_number(A))
A = [1, 2, 3, 4, 5]
print(find_highest_number(A))
A = [5, 4, 3, 2, 1]
print(find_highest_number(A)) |
cocrawler/webserver.py | joye1503/cocrawler | 166 | 12787202 | <filename>cocrawler/webserver.py
import logging
import asyncio
from aiohttp import web
from . import config
LOGGER = logging.getLogger(__name__)
def make_app():
loop = asyncio.get_event_loop()
# TODO switch this to socket.getaddrinfo() -- see https://docs.python.org/3/library/socket.html
serverip = config.read('REST', 'ServerIP')
if serverip is None:
return None
serverport = config.read('REST', 'ServerPort')
increment = False
if isinstance(serverport, str) and serverport.endswith('+'):
increment = True
serverport = serverport[:-1]
app = web.Application()
app.router.add_get('/', frontpage)
app.router.add_get('/api/{name}', api)
# aiohttp 3.0 has AppRunner(). maybe I should switch to it?
# also web.run_app(app, access_log=None) to turn off logging
handler = app.make_handler()
while True:
try:
f = loop.create_server(handler, serverip, serverport)
break
except OSError as e: # address already in use
if increment:
LOGGER.info('OSError starting webserver: %s', repr(e))
serverport += 1
LOGGER.info('incrementing port to %d', serverport)
else:
raise
srv = loop.run_until_complete(f)
LOGGER.info('REST serving on %s', srv.sockets[0].getsockname())
app['cocrawler'] = handler, srv
return app
def close(app):
if app is None:
return
handler, srv = app['cocrawler']
loop = asyncio.get_event_loop()
srv.close()
loop.run_until_complete(srv.wait_closed())
loop.run_until_complete(app.shutdown())
loop.run_until_complete(app.cleanup())
async def frontpage(request):
return web.Response(text='Hello, world!')
async def api(request):
name = request.match_info['name']
data = {'name': name}
return web.json_response(data)
|
office365/sharepoint/sites/usage_info.py | theodoriss/Office365-REST-Python-Client | 544 | 12787206 | <reponame>theodoriss/Office365-REST-Python-Client
from office365.runtime.client_value import ClientValue
class UsageInfo(ClientValue):
"""
Provides fields used to access information regarding site collection usage.
"""
def __init__(self, bandwidth=None, discussion_storage=None, visits=None):
"""
:param long bandwidth: Contains the cumulative bandwidth used by the site collection on the previous day or
on the last day that log files were processed, which is tracked by usage analysis code.
:param long discussion_storage: Contains the amount of storage, identified in bytes,
used by Web discussion data in the site collection.
:param long visits: Contains the cumulative number of visits to the site collection,
which is tracked by the usage analysis code.
"""
super(UsageInfo, self).__init__()
self.Bandwidth = bandwidth
self.DiscussionStorage = discussion_storage
self.Visits = visits
|
python/paddle_serving_client/metric/auc.py | loveululu/Serving | 789 | 12787208 | <filename>python/paddle_serving_client/metric/auc.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing, doc-string-with-all-args, doc-string-with-returns
def tied_rank(x):
"""
Computes the tied rank of elements in x.
This function computes the tied rank of elements in x.
Parameters
----------
x : list of numbers, numpy array
Returns
-------
score : list of numbers
The tied rank f each element in x
"""
sorted_x = sorted(zip(x, range(len(x))))
r = [0 for k in x]
cur_val = sorted_x[0][0]
last_rank = 0
for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0]
for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank + 1 + i) / 2.0
last_rank = i
if i == len(sorted_x) - 1:
for j in range(last_rank, i + 1):
r[sorted_x[j][1]] = float(last_rank + i + 2) / 2.0
return r
def auc(actual, posterior):
"""
Computes the area under the receiver-operater characteristic (AUC)
This function computes the AUC error metric for binary classification.
Parameters
----------
actual : list of binary numbers, numpy array
The ground truth value
posterior : same type as actual
Defines a ranking on the binary numbers, from most likely to
be positive to least likely to be positive.
Returns
-------
score : double
The mean squared error between actual and posterior
"""
r = tied_rank(posterior)
num_positive = len([0 for x in actual if x == 1])
num_negative = len(actual) - num_positive
sum_positive = sum([r[i] for i in range(len(r)) if actual[i] == 1])
auc = ((sum_positive - num_positive * (num_positive + 1) / 2.0) /
(num_negative * num_positive))
return auc
|
pinball/workflow/job.py | DotModus/pinball | 1,143 | 12787280 | <reponame>DotModus/pinball
# Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of job metadata included in job tokens.
Job object describes job inputs, outputs, and all information required to
execute a job (e.g., a command line of a shell job or class name of a data
job)."""
import abc
from pinball.config.utils import get_log
from pinball.persistence.token_data import TokenData
from pinball.workflow.name import Name
__author__ = '<NAME>'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
LOG = get_log('pinball.workflow.worker')
class Job(TokenData):
"""Parent class for specialized job types."""
__metaclass__ = abc.ABCMeta
IS_CONDITION = False
def __init__(self, name=None, inputs=None, outputs=None, emails=None,
max_attempts=1, retry_delay_sec=0, warn_timeout_sec=None,
abort_timeout_sec=None):
self.name = name
self.inputs = inputs if inputs is not None else []
self.outputs = outputs if outputs is not None else []
self.emails = emails if emails is not None else []
self.max_attempts = max_attempts
self.retry_delay_sec = retry_delay_sec
self.warn_timeout_sec = warn_timeout_sec
self.abort_timeout_sec = abort_timeout_sec
assert self.max_attempts > 0
self.disabled = False
self.history = []
self.events = []
@property
def _COMPATIBILITY_ATTRIBUTES(self):
return {
'emails': [],
'disabled': False,
'max_attempts': 1,
'events': [],
'warn_timeout_sec': None,
'abort_timeout_sec': None,
'retry_delay_sec': 0,
}
@abc.abstractmethod
def info(self):
return
def retry(self):
"""Decide if the job should be retried.
Returns:
True if the job should be retried, otherwise False.
"""
if not self.history:
return False
last_record = self.history[-1]
current_instance = last_record.instance
assert last_record.exit_code != 0
failed_runs = 0
for record in reversed(self.history):
if record.instance != current_instance:
break
if record.exit_code != 0:
# There may have been successful runs in the past if we are
# re-doing an execution.
failed_runs += 1
if failed_runs >= self.max_attempts:
return False
return True
def truncate_history(self):
if self.IS_CONDITION and len(self.history) > self.max_attempts:
self.history = self.history[-self.max_attempts:]
def reload(self, new_job):
"""Reload job config from a new config.
Configuration elements defining the workflow topology (inputs and
outputs), execution history, or run-time values (events) are not
modified.
Args:
new_job: The new job configuration to update from.
"""
assert self.__class__ == new_job.__class__
self.emails = new_job.emails
self.max_attempts = new_job.max_attempts
class ShellJob(Job):
"""Shell job runs a command when executed."""
def __init__(self, name=None, inputs=None, outputs=None, emails=None,
max_attempts=1, retry_delay_sec=0, warn_timeout_sec=None,
abort_timeout_sec=None, command=None, cleanup_template=None):
super(ShellJob, self).__init__(name, inputs, outputs, emails,
max_attempts, retry_delay_sec,
warn_timeout_sec, abort_timeout_sec)
self.command = command
self.cleanup_template = cleanup_template
@property
def _COMPATIBILITY_ATTRIBUTES(self):
result = super(ShellJob, self)._COMPATIBILITY_ATTRIBUTES
result['cleanup_template'] = None
return result
def __str__(self):
return ('ShellJob(name=%s, inputs=%s, outputs=%s, emails=%s, '
'max_attempts=%d, retry_delay_sec=%d, warn_timeout_sec=%s, '
'abort_timeout_sec=%s, disabled=%s, command=%s, '
'cleanup_template=%s, events=%s, history=%s)' % (
self.name,
self.inputs,
self.outputs,
self.emails,
self.max_attempts,
self.retry_delay_sec,
self.warn_timeout_sec,
self.abort_timeout_sec,
self.disabled,
self.command,
self.cleanup_template,
self.events,
self.history))
def __repr__(self):
return self.__str__()
def info(self):
return 'command=%s' % self.command
def reload(self, new_job):
super(ShellJob, self).reload(new_job)
self.command = new_job.command
self.cleanup_template = new_job.cleanup_template
@staticmethod
def _get_command_attributes(template):
"""Extract attributes from a command string template.
E.g., for template 'ls %(dir1)s %(dir2)s' the result is
['dir1', 'dir2'].
Args:
template: The template to extract attributes from.
Returns:
The list of named attributes extracted from the template.
"""
class Extractor:
"""Helper class extracting attributes from a string template.
"""
def __init__(self):
self.attributes = set()
def __getitem__(self, attribute):
self.attributes.add(attribute)
return 0
extractor = Extractor()
try:
template % extractor
except ValueError:
LOG.exception('failed to customize template %s', template)
return list(extractor.attributes)
def _consolidate_event_attributes(self):
"""Consolidate attributes in triggering events.
Iterate over events in the most recent execution record and combine
them into one dictionary mapping attribute names to their values. If
multiple events contain the same attribute, the return value will be a
comma separated string of values from all those events.
Returns:
Dictionary of consolidated event attribute key-values.
"""
assert self.history
last_execution_record = self.history[-1]
result = {}
for event in last_execution_record.events:
for key, value in event.attributes.items():
new_value = result.get(key)
if new_value:
new_value += ',%s' % value
else:
new_value = value
result[key] = new_value
return result
def customize_command(self):
"""Specialize the command with attribute values extracted from events.
Returns:
Job command with parameter values replaced by attributes extracted
from the triggering events. If a parameter is not present in the
event attribute set, it is replaced with an empty string.
"""
attributes = {}
command_attributes = ShellJob._get_command_attributes(self.command)
for attribute in command_attributes:
attributes[attribute] = ''
event_attributes = self._consolidate_event_attributes()
attributes.update(event_attributes)
try:
return self.command % attributes
except ValueError:
LOG.exception('failed to customize command %s', self.command)
return self.command
class ShellConditionJob(ShellJob):
IS_CONDITION = True
def __init__(self, name=None, outputs=None, emails=None, max_attempts=10,
retry_delay_sec=5 * 60, warn_timeout_sec=None,
abort_timeout_sec=None, command=None, cleanup_template=None):
super(ShellConditionJob, self).__init__(
name=name,
inputs=[Name.WORKFLOW_START_INPUT],
outputs=outputs,
emails=emails,
max_attempts=max_attempts,
retry_delay_sec=retry_delay_sec,
warn_timeout_sec=warn_timeout_sec,
abort_timeout_sec=abort_timeout_sec,
command=command,
cleanup_template=cleanup_template)
|
oletools/thirdparty/xglob/__init__.py | maniVix/oletools | 2,059 | 12787289 | <gh_stars>1000+
from .xglob import * |
code/src/plan2scene/crop_select/util.py | madhawav/plan2scene | 305 | 12787325 | <gh_stars>100-1000
from plan2scene.common.image_description import ImageDescription, ImageSource
from plan2scene.common.residence import Room, House
from plan2scene.config_manager import ConfigManager
from plan2scene.texture_gen.predictor import TextureGenPredictor
from plan2scene.texture_gen.utils.io import load_conf_eval
import logging
import os.path as osp
from plan2scene.utils.io import load_image
def fill_texture_embeddings(conf: ConfigManager, house: House, predictor: TextureGenPredictor) -> None:
"""
Compute surface texture embeddings of a house
:param conf: Config Manager
:param house: House processed
:param predictor: Predictor with loaded checkpoint
"""
for room_index, room in house.rooms.items():
assert isinstance(room, Room)
for photo in room.photos:
for surface in conf.surfaces:
surface_instances = [i for i in range(conf.texture_gen.masks_per_surface[surface])]
for surface_instance in surface_instances:
for crop_instance in range(conf.texture_gen.crops_per_mask):
candidate_key = <KEY> (photo, surface_instance, crop_instance)
if osp.exists(osp.join(conf.data_paths.rectified_crops_path, surface, candidate_key + ".png")):
image = load_image(
osp.join(conf.data_paths.rectified_crops_path, surface, candidate_key + ".png"))
emb, loss = predictor.predict_embs([image])
room.surface_textures[surface][candidate_key] = ImageDescription(image, ImageSource.NEURAL_SYNTH)
room.surface_embeddings[surface][candidate_key] = emb
room.surface_losses[surface][candidate_key] = loss
def fill_house_textures(conf: ConfigManager, house: House, image_source: ImageSource, skip_existing_textures: bool, key="prop",
predictor: TextureGenPredictor = None) -> None:
"""
Synthesize textures for a house using the assigned texture embeddings.
:param conf: Config Manager
:param house: House to populate textures
:param key: Key of candidate texture embeddings.
:param image_source: Generator of the images
:param predictor: Predictor used to synthesize textures
:param skip_existing_textures: Do no synthesize if a texture already exist
"""
if predictor is None:
predictor = TextureGenPredictor(
conf=load_conf_eval(config_path=conf.texture_gen.texture_synth_conf),
rgb_median_emb=conf.texture_gen.rgb_median_emb)
predictor.load_checkpoint(checkpoint_path=conf.texture_gen.checkpoint_path)
for room_index, room in house.rooms.items():
assert isinstance(room, Room)
for surface in room.surface_embeddings:
if key in room.surface_embeddings[surface]:
if skip_existing_textures and key in room.surface_textures[surface]:
continue
generated_crops, substance_names, extra = predictor.predict_textures(
combined_embs=[room.surface_embeddings[surface][key]],
multiplier=conf.texture_gen.output_multiplier)
room.surface_textures[surface][key] = ImageDescription(generated_crops[0], image_source)
def fill_textures(conf: ConfigManager, houses: dict, image_source: ImageSource, skip_existing_textures: bool, key: str = "prop", log: bool = True,
predictor: TextureGenPredictor = None) -> None:
"""
Synthesize textures for houses using the assigned texture embeddings.
:param conf: Config manager
:param houses: Dictionary of houses.
:param image_source: Image source specified to the synthesized textures
:param skip_existing_textures: Specify true to keep existing textures. Specify false to replace existing textures with new textures.
:param key: Key of embeddings used to synthesize textures.
:param log: Set true to enable logging.
:param predictor: Predictor used to synthesize textures.
"""
if predictor is None:
predictor = TextureGenPredictor(
conf=load_conf_eval(config_path=conf.texture_gen.texture_synth_conf),
rgb_median_emb=conf.texture_gen.rgb_median_emb)
predictor.load_checkpoint(checkpoint_path=conf.texture_gen.checkpoint_path)
for i, (house_key, house) in enumerate(houses.items()):
if log:
logging.info("[%d/%d] Generating Textures %s" % (i, len(houses), house_key))
fill_house_textures(conf, house, skip_existing_textures=skip_existing_textures, key=key, predictor=predictor, image_source=image_source)
def get_least_key(kv):
"""
Given a dictionary, returns the key with minimum value.
:param kv: Dictionary considered.
:return: Key with the minimum value.
"""
min_k = None
min_v = None
for k, v in kv.items():
if min_v is None or v.item() < min_v:
min_k = k
min_v = v.item()
return min_k
def vgg_crop_select(conf: ConfigManager, house: House, predictor: TextureGenPredictor) -> None:
"""
Assigns the least VGG loss crop for each surface of the house.
:param conf: ConfigManager
:param house: House to update
:param predictor: Predictor used to synthesize textures
"""
for room_index, room in house.rooms.items():
assert isinstance(room, Room)
# Calculate the least VGG loss embeddings
for surface in room.surface_embeddings:
least_key = get_least_key(room.surface_losses[surface])
if least_key is not None:
room.surface_embeddings[surface] = {"prop": room.surface_embeddings[surface][least_key]}
room.surface_losses[surface] = {"prop": room.surface_losses[surface][least_key]}
else:
room.surface_embeddings[surface] = {}
room.surface_losses[surface] = {}
fill_textures(conf, {house.house_key: house}, predictor=predictor, log=False, image_source=ImageSource.VGG_CROP_SELECT, skip_existing_textures=False)
|
nnet/separate.py | on1262/conv-tasnet | 149 | 12787371 | <gh_stars>100-1000
#!/usr/bin/env python
# wujian@2018
import os
import argparse
import torch as th
import numpy as np
from conv_tas_net import ConvTasNet
from libs.utils import load_json, get_logger
from libs.audio import WaveReader, write_wav
logger = get_logger(__name__)
class NnetComputer(object):
def __init__(self, cpt_dir, gpuid):
self.device = th.device(
"cuda:{}".format(gpuid)) if gpuid >= 0 else th.device("cpu")
nnet = self._load_nnet(cpt_dir)
self.nnet = nnet.to(self.device) if gpuid >= 0 else nnet
# set eval model
self.nnet.eval()
def _load_nnet(self, cpt_dir):
nnet_conf = load_json(cpt_dir, "mdl.json")
nnet = ConvTasNet(**nnet_conf)
cpt_fname = os.path.join(cpt_dir, "best.pt.tar")
cpt = th.load(cpt_fname, map_location="cpu")
nnet.load_state_dict(cpt["model_state_dict"])
logger.info("Load checkpoint from {}, epoch {:d}".format(
cpt_fname, cpt["epoch"]))
return nnet
def compute(self, samps):
with th.no_grad():
raw = th.tensor(samps, dtype=th.float32, device=self.device)
sps = self.nnet(raw)
sp_samps = [np.squeeze(s.detach().cpu().numpy()) for s in sps]
return sp_samps
def run(args):
mix_input = WaveReader(args.input, sample_rate=args.fs)
computer = NnetComputer(args.checkpoint, args.gpu)
for key, mix_samps in mix_input:
logger.info("Compute on utterance {}...".format(key))
spks = computer.compute(mix_samps)
norm = np.linalg.norm(mix_samps, np.inf)
for idx, samps in enumerate(spks):
samps = samps[:mix_samps.size]
# norm
samps = samps * norm / np.max(np.abs(samps))
write_wav(
os.path.join(args.dump_dir, "spk{}/{}.wav".format(
idx + 1, key)),
samps,
fs=args.fs)
logger.info("Compute over {:d} utterances".format(len(mix_input)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=
"Command to do speech separation in time domain using ConvTasNet",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("checkpoint", type=str, help="Directory of checkpoint")
parser.add_argument(
"--input", type=str, required=True, help="Script for input waveform")
parser.add_argument(
"--gpu",
type=int,
default=-1,
help="GPU device to offload model to, -1 means running on CPU")
parser.add_argument(
"--fs", type=int, default=8000, help="Sample rate for mixture input")
parser.add_argument(
"--dump-dir",
type=str,
default="sps_tas",
help="Directory to dump separated results out")
args = parser.parse_args()
run(args) |
pycaw/api/mmdeviceapi/depend/structures.py | Jan-Zeiseweis/pycaw | 234 | 12787386 | <gh_stars>100-1000
from ctypes import Structure, Union
from ctypes.wintypes import (
DWORD, LONG, LPWSTR, ULARGE_INTEGER, VARIANT_BOOL, WORD)
from comtypes import GUID
from comtypes.automation import VARTYPE, VT_BOOL, VT_CLSID, VT_LPWSTR, VT_UI4
from future.utils import python_2_unicode_compatible
class PROPVARIANT_UNION(Union):
_fields_ = [
('lVal', LONG),
('uhVal', ULARGE_INTEGER),
('boolVal', VARIANT_BOOL),
('pwszVal', LPWSTR),
('puuid', GUID),
]
class PROPVARIANT(Structure):
_fields_ = [
('vt', VARTYPE),
('reserved1', WORD),
('reserved2', WORD),
('reserved3', WORD),
('union', PROPVARIANT_UNION),
]
def GetValue(self):
vt = self.vt
if vt == VT_BOOL:
return self.union.boolVal != 0
elif vt == VT_LPWSTR:
# return Marshal.PtrToStringUni(union.pwszVal)
return self.union.pwszVal
elif vt == VT_UI4:
return self.union.lVal
elif vt == VT_CLSID:
# TODO
# return (Guid)Marshal.PtrToStructure(union.puuid, typeof(Guid))
return
else:
return "%s:?" % (vt)
@python_2_unicode_compatible
class PROPERTYKEY(Structure):
_fields_ = [
('fmtid', GUID),
('pid', DWORD),
]
def __str__(self):
return "%s %s" % (self.fmtid, self.pid)
|
pymagnitude/third_party/allennlp/modules/token_embedders/token_characters_encoder.py | tpeng/magnitude | 1,520 | 12787403 | <filename>pymagnitude/third_party/allennlp/modules/token_embedders/token_characters_encoder.py
from __future__ import absolute_import
import torch
from allennlp.common import Params
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.token_embedders.embedding import Embedding
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
class TokenCharactersEncoder(TokenEmbedder):
u"""
A ``TokenCharactersEncoder`` takes the output of a
:class:`~allennlp.data.token_indexers.TokenCharactersIndexer`, which is a tensor of shape
(batch_size, num_tokens, num_characters), embeds the characters, runs a token-level encoder, and
returns the result, which is a tensor of shape (batch_size, num_tokens, encoding_dim). We also
optionally apply dropout after the token-level encoder.
We take the embedding and encoding modules as input, so this class is itself quite simple.
"""
def __init__(self, embedding , encoder , dropout = 0.0) :
super(TokenCharactersEncoder, self).__init__()
self._embedding = TimeDistributed(embedding)
self._encoder = TimeDistributed(encoder)
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
def get_output_dim(self) :
return self._encoder._module.get_output_dim() # pylint: disable=protected-access
def forward(self, token_characters ) : # pylint: disable=arguments-differ
mask = (token_characters != 0).long()
return self._dropout(self._encoder(self._embedding(token_characters), mask))
# The setdefault requires a custom from_params
@classmethod
def from_params(cls, vocab , params ) : # type: ignore
# pylint: disable=arguments-differ
embedding_params = params.pop(u"embedding")
# Embedding.from_params() uses "tokens" as the default namespace, but we need to change
# that to be "token_characters" by default.
embedding_params.setdefault(u"vocab_namespace", u"token_characters")
embedding = Embedding.from_params(vocab, embedding_params)
encoder_params = params.pop(u"encoder")
encoder = Seq2VecEncoder.from_params(encoder_params)
dropout = params.pop_float(u"dropout", 0.0)
params.assert_empty(cls.__name__)
return cls(embedding, encoder, dropout)
TokenCharactersEncoder = TokenEmbedder.register(u"character_encoding")(TokenCharactersEncoder)
|
tests/Unit/PointwiseFunctions/GeneralRelativity/IndexManipulation.py | nilsvu/spectre | 117 | 12787409 | <reponame>nilsvu/spectre
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
def raise_or_lower_first_index(tensor, metric):
return np.einsum("ij,ikl", metric, tensor)
def trace_last_indices(tensor, metric):
return np.einsum("ij,kij", metric, tensor)
|
utils_nlp/eval/question_answering.py | Anita1017/nlp-recipes | 4,407 | 12787421 | """ Official evaluation script for SQuAD version 2.0.
Modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0
"""
import collections
import json
import re
import string
def get_raw_scores(qa_ids, actuals, preds):
"""
Computes exact match and F1 scores without applying any unanswerable probability threshold.
Args:
qa_ids (list): Unique ids corresponding to the answers in `actuals`.
actuals (list): List of ground truth answers.
preds (dict): Dictionary with qa_id as keys and predicted answers as values.
Returns:
tuple: (exact_match, f1)
"""
# Helper functions
def _normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def _get_tokens(s):
"""Normalizes text and returns white-space tokenized tokens. """
if not s:
return []
return _normalize_answer(s).split()
def _compute_exact(a_gold, a_pred):
"""Compute the exact match between two sentences after normalization.
Returns:
int: 1 if two sentences match exactly after normalization,
0 otherwise.
"""
return int(_normalize_answer(a_gold) == _normalize_answer(a_pred))
def _compute_f1(a_gold, a_pred):
"""
Compute F1 score based on token overlapping between two
sentences.
"""
gold_toks = _get_tokens(a_gold)
pred_toks = _get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
# Helper functions end
exact_scores = {}
f1_scores = {}
for qid, gold_answers in zip(qa_ids, actuals):
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = [""]
if qid not in preds:
print("Missing prediction for %s" % qid)
continue
a_pred = preds[qid]
# Take max over all gold answers
if isinstance(gold_answers, str):
gold_answers = [gold_answers]
exact_scores[qid] = max(_compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(_compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans, unanswerable_exists=False):
"""
Find the best threshold to determine a question is impossible to answer.
Args:
preds (dict): Dictionary with qa_id as keys and predicted answers as values.
scores (dict): Dictionary with qa_id as keys and raw evaluation scores (exact_match or
f1) as values.
na_probs (dict): Dictionary with qa_id as keys and unanswerable probabilities as values.
qid_to_has_ans (dict): Dictionary with qa_id as keys boolean values indicating if the
question has answer as values.
unanswerable_exists (bool, optional): Whether there is unanswerable questions in the data.
Defaults to False.
Returns:
tuple: score after applying best threshold, best threshold, (score for answerable
questions after applying best threshold, if unanswerable_exists=True)
"""
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
# If na_prob > threshold, the question is considered as unanswerable by the prediction.
# Initially, the threshold is 0. All questions are considered as unanswerable by the
# predictions. So cur_score is the number of actual unanswerable questions (i.e. correctly
# predicted as unanswerable in the data.
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
# Sorted in ascending order
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
# When using the cur_na_prob as threshold, all predictions with na_prob > na_prob_cur are
# considered as unanswerable. Current question is considered answerable.
if qid not in scores:
continue
if qid_to_has_ans[qid]:
# Current question has ground truth answer, the prediction is correct. The raw score
# is added to cur_score
diff = scores[qid]
else:
# Current question doesn't have ground truth answer.
if preds[qid]:
# Prediction is not empty, incorrect. cur_score -= 1
diff = -1
else:
# Prediction is empty, correct, the original score 1 from num_no_ans is preserved.
diff = 0
cur_score += diff
if cur_score > best_score:
# When cur_score > best_score, the threshold can increase so that more questions are
# considered as answerable and fewer questions are considered as unanswerable.
# Imagine a PDF with two humps with some overlapping, the x axis is the na_prob. The
# hump on the left is answerable questions and the hump on the right is unanswerable
# questions.
# At some point, the number of actual answerable questions decreases, and we got more
# penalty from considering unanswerable questions as answerable than the score added
# from actual answerable questions, we will not change the threshold anymore and the
# optimal threshold is found.
best_score = cur_score
best_thresh = na_probs[qid]
if not unanswerable_exists:
return 100.0 * best_score / len(scores), best_thresh
else:
has_ans_score, has_ans_cnt = 0, 0
for qid in qid_list:
if not qid_to_has_ans[qid]:
continue
has_ans_cnt += 1
if qid not in scores:
continue
has_ans_score += scores[qid]
return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
def find_all_best_thresh(
main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans, unanswerable_exists=False
):
"""
Update raw evaluation scores by finding the best threshold to determine a question is
impossible to answer.
Args:
main_eval (dict): Dictionary with raw evaluation scores without apply any threshold.
preds (dict): Dictionary with qa_id as keys and predicted answers as values.
exact_raw (dict): Dictionary with qa_id as keys and raw exact_match scores as values.
f1_raw (dict): Dictionary with qa_id as keys and raw f1 scores as values.
na_probs (dict): Dictionary with qa_id as keys and unanswerable probabilities as values.
qid_to_has_ans (dict): Dictionary with qa_id as keys boolean values indicating if the
question has answer as values.
unanswerable_exists (bool, optional): Whether there is unanswerable questions in the data.
Defaults to False.
Returns:
dict: Updated `main_eval` with scores after applying best threshold and best threshold
for each score.
"""
all_exact = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans, unanswerable_exists)
all_f1 = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans, unanswerable_exists)
main_eval["best_exact"] = all_exact[0]
main_eval["best_exact_thresh"] = all_exact[1]
main_eval["best_f1"] = all_f1[0]
main_eval["best_f1_thresh"] = all_f1[1]
if unanswerable_exists:
main_eval["has_ans_exact"] = all_exact[2]
main_eval["has_ans_f1"] = all_f1[2]
def evaluate_qa(
actual_dataset, preds, na_probs=None, na_prob_thresh=0, unanswerable_exists=False, out_file=None
):
"""
Evaluate question answering prediction results against ground truth answers.
Args:
Evaluates question answering model performance.
Args:
actual_dataset (:class:`utils_nlp.dataset.pytorch.QADataset`): Input question answering
dataset with ground truth answers.
preds (dict): The key of the dictionary is the qa_id in the original
:class:`utils_nlp.dataset.pytorch.QADataset`. The values of the dictionary are
the predicted answer texts in string type.
na_probs (dict, optional): Dictionary of qa_id and unanswerable probability pairs.
If None, unanswerable probabilities are all set to zero. Defaults to None.
na_prob_thresh (float, optional): Probability threshold to predict a question to be
unanswerable. For an unanswerable question, if `na_probs` > `na_prob_thresh`,
the prediction is considered as correct. Otherwise, the prediction is considered as
incorrect. Defaults to 0.
out_file (str, optional): Path of the file to save the evaluation results to.
Defaults to None.
Returns:
dict: A dictionary with exact_match and f1 values.
"""
# Helper functions
def _apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
"""Update the input scores by applying unanswerable probability threshold."""
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def _make_eval_dict(exact_scores, f1_scores, qid_list=None):
"""Create a dictionary of evaluation results."""
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(f1_scores.values()) / total),
("total", total),
]
)
else:
total = len(qid_list)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
("total", total),
]
)
def _merge_eval(main_eval, new_eval, prefix):
"""Merge multiple evaluation result dictionaries."""
for k in new_eval:
main_eval["%s_%s" % (prefix, k)] = new_eval[k]
# Helper functions end
if na_probs is None:
na_probs_available = False
na_probs = {k: 0.0 for k in preds}
else:
na_probs_available = True
qa_ids = [item.qa_id for item in actual_dataset]
actuals = [item.answer_text for item in actual_dataset]
qid_to_has_ans = {qa_id: bool(ans) for (qa_id, ans) in zip(qa_ids, actuals)}
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(qa_ids, actuals, preds)
exact_thresh = _apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, na_prob_thresh)
f1_thresh = _apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, na_prob_thresh)
out_eval = _make_eval_dict(exact_thresh, f1_thresh)
if has_ans_qids:
has_ans_eval = _make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)
_merge_eval(out_eval, has_ans_eval, "HasAns")
if no_ans_qids:
no_ans_eval = _make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)
_merge_eval(out_eval, no_ans_eval, "NoAns")
if na_probs_available:
find_all_best_thresh(
out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans, unanswerable_exists
)
if out_file:
with open(out_file, "w") as f:
json.dump(out_eval, f)
else:
print(json.dumps(out_eval, indent=2))
return out_eval
|
scripts/issues/issue15.py | Jhsmit/awesome-panel | 179 | 12787427 | <reponame>Jhsmit/awesome-panel
import panel as pn
text = r"""
```math
f(x) = \int_{-\infty}^\infty
\hat f(\xi)\,e^{2 \pi i \xi x}
\,d\xi
```
"""
app = pn.Column(pn.pane.Markdown(text))
app.servable()
|
Configuration/DataProcessing/python/Impl/__init__.py | Purva-Chaudhari/cmssw | 852 | 12787473 | <reponame>Purva-Chaudhari/cmssw
#!/usr/bin/env python3
"""
_Impl_
Scenario Implementations
"""
__all__ = []
|
backend/ecs_tasks/delete_files/s3.py | guvenbz/amazon-s3-find-and-forget | 165 | 12787475 | import logging
from functools import lru_cache
from urllib.parse import urlencode, quote_plus
from boto_utils import fetch_job_manifest, paginate
from botocore.exceptions import ClientError
from utils import remove_none, retry_wrapper
logger = logging.getLogger(__name__)
def save(s3, client, buf, bucket, key, metadata, source_version=None):
"""
Save a buffer to S3, preserving any existing properties on the object
"""
# Get Object Settings
request_payer_args, _ = get_requester_payment(client, bucket)
object_info_args, _ = get_object_info(client, bucket, key, source_version)
tagging_args, _ = get_object_tags(client, bucket, key, source_version)
acl_args, acl_resp = get_object_acl(client, bucket, key, source_version)
extra_args = {
**request_payer_args,
**object_info_args,
**tagging_args,
**acl_args,
**{"Metadata": metadata},
}
logger.info("Object settings: %s", extra_args)
# Write Object Back to S3
logger.info("Saving updated object to s3://%s/%s", bucket, key)
contents = buf.read()
with s3.open("s3://{}/{}".format(bucket, key), "wb", **extra_args) as f:
f.write(contents)
s3.invalidate_cache() # TODO: remove once https://github.com/dask/s3fs/issues/294 is resolved
new_version_id = f.version_id
logger.info("Object uploaded to S3")
# GrantWrite cannot be set whilst uploading therefore ACLs need to be restored separately
write_grantees = ",".join(get_grantees(acl_resp, "WRITE"))
if write_grantees:
logger.info("WRITE grant found. Restoring additional grantees for object")
client.put_object_acl(
Bucket=bucket,
Key=key,
VersionId=new_version_id,
**{**request_payer_args, **acl_args, "GrantWrite": write_grantees,}
)
logger.info("Processing of file s3://%s/%s complete", bucket, key)
return new_version_id
@lru_cache()
def get_requester_payment(client, bucket):
"""
Generates a dict containing the request payer args supported when calling S3.
GetBucketRequestPayment call will be cached
:returns tuple containing the info formatted for ExtraArgs and the raw response
"""
request_payer = client.get_bucket_request_payment(Bucket=bucket)
return (
remove_none(
{
"RequestPayer": "requester"
if request_payer["Payer"] == "Requester"
else None,
}
),
request_payer,
)
@lru_cache()
def get_object_info(client, bucket, key, version_id=None):
"""
Generates a dict containing the non-ACL/Tagging args supported when uploading to S3.
HeadObject call will be cached
:returns tuple containing the info formatted for ExtraArgs and the raw response
"""
kwargs = {"Bucket": bucket, "Key": key, **get_requester_payment(client, bucket)[0]}
if version_id:
kwargs["VersionId"] = version_id
object_info = client.head_object(**kwargs)
return (
remove_none(
{
"CacheControl": object_info.get("CacheControl"),
"ContentDisposition": object_info.get("ContentDisposition"),
"ContentEncoding": object_info.get("ContentEncoding"),
"ContentLanguage": object_info.get("ContentLanguage"),
"ContentType": object_info.get("ContentType"),
"Expires": object_info.get("Expires"),
"Metadata": object_info.get("Metadata"),
"ServerSideEncryption": object_info.get("ServerSideEncryption"),
"StorageClass": object_info.get("StorageClass"),
"SSECustomerAlgorithm": object_info.get("SSECustomerAlgorithm"),
"SSEKMSKeyId": object_info.get("SSEKMSKeyId"),
"WebsiteRedirectLocation": object_info.get("WebsiteRedirectLocation"),
}
),
object_info,
)
@lru_cache()
def get_object_tags(client, bucket, key, version_id=None):
"""
Generates a dict containing the Tagging args supported when uploading to S3
GetObjectTagging call will be cached
:returns tuple containing tagging formatted for ExtraArgs and the raw response
"""
kwargs = {"Bucket": bucket, "Key": key}
if version_id:
kwargs["VersionId"] = version_id
tagging = client.get_object_tagging(**kwargs)
return (
remove_none(
{
"Tagging": urlencode(
{tag["Key"]: tag["Value"] for tag in tagging["TagSet"]},
quote_via=quote_plus,
)
}
),
tagging,
)
@lru_cache()
def get_object_acl(client, bucket, key, version_id=None):
"""
Generates a dict containing the ACL args supported when uploading to S3
GetObjectAcl call will be cached
:returns tuple containing ACL formatted for ExtraArgs and the raw response
"""
kwargs = {"Bucket": bucket, "Key": key, **get_requester_payment(client, bucket)[0]}
if version_id:
kwargs["VersionId"] = version_id
acl = client.get_object_acl(**kwargs)
existing_owner = {"id={}".format(acl["Owner"]["ID"])}
return (
remove_none(
{
"GrantFullControl": ",".join(
existing_owner | get_grantees(acl, "FULL_CONTROL")
),
"GrantRead": ",".join(get_grantees(acl, "READ")),
"GrantReadACP": ",".join(get_grantees(acl, "READ_ACP")),
"GrantWriteACP": ",".join(get_grantees(acl, "WRITE_ACP")),
}
),
acl,
)
def get_grantees(acl, grant_type):
prop_map = {
"CanonicalUser": ("ID", "id"),
"AmazonCustomerByEmail": ("EmailAddress", "emailAddress"),
"Group": ("URI", "uri"),
}
filtered = [
grantee["Grantee"]
for grantee in acl.get("Grants")
if grantee["Permission"] == grant_type
]
grantees = set()
for grantee in filtered:
identifier_type = grantee["Type"]
identifier_prop = prop_map[identifier_type]
grantees.add("{}={}".format(identifier_prop[1], grantee[identifier_prop[0]]))
return grantees
@lru_cache()
def validate_bucket_versioning(client, bucket):
resp = client.get_bucket_versioning(Bucket=bucket)
versioning_enabled = resp.get("Status") == "Enabled"
mfa_delete_enabled = resp.get("MFADelete") == "Enabled"
if not versioning_enabled:
raise ValueError("Bucket {} does not have versioning enabled".format(bucket))
if mfa_delete_enabled:
raise ValueError("Bucket {} has MFA Delete enabled".format(bucket))
return True
@lru_cache()
def fetch_manifest(manifest_object):
return fetch_job_manifest(manifest_object)
def delete_old_versions(client, input_bucket, input_key, new_version):
try:
resp = list(
paginate(
client,
client.list_object_versions,
["Versions", "DeleteMarkers"],
Bucket=input_bucket,
Prefix=input_key,
VersionIdMarker=new_version,
KeyMarker=input_key,
)
)
versions = [el[0] for el in resp if el[0] is not None]
delete_markers = [el[1] for el in resp if el[1] is not None]
versions.extend(delete_markers)
sorted_versions = sorted(versions, key=lambda x: x["LastModified"])
version_ids = [v["VersionId"] for v in sorted_versions]
errors = []
max_deletions = 1000
for i in range(0, len(version_ids), max_deletions):
resp = client.delete_objects(
Bucket=input_bucket,
Delete={
"Objects": [
{"Key": input_key, "VersionId": version_id}
for version_id in version_ids[i : i + max_deletions]
],
"Quiet": True,
},
)
errors.extend(resp.get("Errors", []))
if len(errors) > 0:
raise DeleteOldVersionsError(
errors=[
"Delete object {} version {} failed: {}".format(
e["Key"], e["VersionId"], e["Message"]
)
for e in errors
]
)
except ClientError as e:
raise DeleteOldVersionsError(errors=[str(e)])
def verify_object_versions_integrity(
client, bucket, key, from_version_id, to_version_id
):
def raise_exception(msg):
raise IntegrityCheckFailedError(msg, client, bucket, key, to_version_id)
conflict_error_template = "A {} ({}) was detected for the given object between read and write operations ({} and {})."
not_found_error_template = "Previous version ({}) has been deleted."
object_versions = retry_wrapper(client.list_object_versions)(
Bucket=bucket,
Prefix=key,
VersionIdMarker=to_version_id,
KeyMarker=key,
MaxKeys=1,
)
versions = object_versions.get("Versions", [])
delete_markers = object_versions.get("DeleteMarkers", [])
all_versions = versions + delete_markers
if not len(all_versions):
return raise_exception(not_found_error_template.format(from_version_id))
prev_version = all_versions[0]
prev_version_id = prev_version["VersionId"]
if prev_version_id != from_version_id:
conflicting_version_type = (
"delete marker" if "ETag" not in prev_version else "version"
)
return raise_exception(
conflict_error_template.format(
conflicting_version_type,
prev_version_id,
from_version_id,
to_version_id,
)
)
return True
def rollback_object_version(client, bucket, key, version, on_error):
""" Delete newly created object version as soon as integrity conflict is detected """
try:
return client.delete_object(Bucket=bucket, Key=key, VersionId=version)
except ClientError as e:
err_message = "ClientError: {}. Version rollback caused by version integrity conflict failed".format(
str(e)
)
on_error(err_message)
except Exception as e:
err_message = "Unknown error: {}. Version rollback caused by version integrity conflict failed".format(
str(e)
)
on_error(err_message)
class DeleteOldVersionsError(Exception):
def __init__(self, errors):
super().__init__("\n".join(errors))
self.errors = errors
class IntegrityCheckFailedError(Exception):
def __init__(self, message, client, bucket, key, version_id):
self.message = message
self.client = client
self.bucket = bucket
self.key = key
self.version_id = version_id
|
REST/python/Environments/create-environments.py | gdesai1234/OctopusDeploy-Api | 199 | 12787508 | <filename>REST/python/Environments/create-environments.py
import json
import requests
from urllib.parse import quote
octopus_server_uri = 'https://your.octopus.app/api'
octopus_api_key = 'API-YOURAPIKEY'
headers = {'X-Octopus-ApiKey': octopus_api_key}
def get_octopus_resource(uri):
response = requests.get(uri, headers=headers)
response.raise_for_status()
return json.loads(response.content.decode('utf-8'))
def post_octopus_resource(uri, body):
response = requests.post(uri, headers=headers, json=body)
response.raise_for_status()
return json.loads(response.content.decode('utf-8'))
def get_by_name(uri, name):
resources = get_octopus_resource(uri)
return next((x for x in resources['Items'] if x['Name'] == name), None)
space_name = 'Default'
environment_names = ['Development', 'Test', 'Staging', 'Production']
space = get_by_name('{0}/spaces?partialName={1}&skip=0&take=100'.format(octopus_server_uri, quote(space_name)), space_name)
for environment_name in environment_names:
existing_environment = get_by_name('{0}/{1}/environments?partialName={2}&skip=0&take=100'.format(octopus_server_uri, space['Id'], quote(environment_name)), environment_name)
if existing_environment is None:
print('Creating environment \'{0}\''.format(environment_name))
environment = {
'Name': environment_name
}
environment_resource = post_octopus_resource('{0}/{1}/environments'.format(octopus_server_uri, space['Id']), environment)
print('EnvironmentId: \'{0}\''.format(environment_resource['Id']))
else:
print('Environment \'{0}\' already exists. Nothing to create :)'.format(environment_name)) |
src/mlspace/scripts/__init__.py | abhishekkrthakur/mlspace | 283 | 12787553 | from .base import script as base_script
from .install_docker import script as install_docker
from .install_nvidia_docker import script as install_nvidia_docker
from .install_nvidia_drivers import script as install_nvidia_drivers
|
lib/pipefunc.py | kei-iketani/plex | 153 | 12787573 | <reponame>kei-iketani/plex<gh_stars>100-1000
#*********************************************************************
# content = common functions
# version = 0.1.0
# date = 2019-12-01
#
# license = MIT <https://github.com/alexanderrichtertd>
# author = <NAME> <<EMAIL>>
#*********************************************************************
import os
import glob
import json
import time
import webbrowser
# NO logging since it will break the init
#*********************************************************************
# FUNCTIONS
def help(name=''):
from tank import Tank
if not name and os.getenv('SOFTWARE'):
name = os.getenv('SOFTWARE')
project_help = Tank().data_project['HELP']
if name in project_help:
webbrowser.open(project_help[name])
else:
webbrowser.open(project_help['default'])
# GET all (sub) keys in dict
def get_all_keys(key_list, dictonary=[]):
for key, items in key_list.iteritems():
dictonary.append(key)
if isinstance(items, dict):
get_all_keys(items, dictonary)
return dictonary
# decorator: return function duration time
def get_duration(func):
def timed(*args, **kw):
startTime = time.time()
resultTime = func(*args, **kw)
endTime = time.time()
printResult = '%r (%r, %r) %2.2f sec' % (func.__name__, args, kw, endTime-startTime)
print(printResult)
return resultTime
return timed
def find_inbetween(text, first, last):
try:
start = text.index(first) + len(first)
end = text.index(last, start)
except ValueError: return ""
return text[start:end]
#*********************************************************************
# FOLDER
# @BRIEF creates a folder, checks if it already exists,
# creates the folder above if the path is a file
def create_folder(path):
if len(path.split('.')) > 1: path = os.path.dirname(path)
if not os.path.exists(path):
try: os.makedirs(path)
except: print('CANT create folder: {}'.format(path))
# @BRIEF opens folder even if file is given
def open_folder(path):
path = os.path.normpath(path)
if os.path.exists(path):
if len(path.split('.')) > 1: path = os.path.dirname(path)
webbrowser.open(path)
else: print('UNVALID path: {}'.format(path))
return path
#*********************************************************************
# FILES
# @BRIEF get a file/folder list with specifics
#
# @PARAM path string.
# file_type string/string[]. '*.py'
# extension bool. True:[name.py] False:[name]
# exclude string /string[]. '__init__.py' | '__init__' | ['btnReport48', 'btnHelp48']
#
# @RETURN strint[].
def get_file_list(path, file_type='*', extension=False, exclude='*', add_path=False):
if(os.path.exists(path)):
getFile = []
try: os.chdir(path)
except: print('Invalid dir: {}'.format(path))
for file_name in glob.glob(file_type):
if exclude in file_name: continue
if add_path: file_name = os.path.normpath(('/').join([path,file_name]))
if extension: getFile.append(file_name)
else: getFile.append((file_name.split('.')[0]))
return getFile
##
# @BRIEF GET ALL subfolders in the path
def get_deep_folder_list(path, add_path=False):
if add_path: getFile = map(lambda x: x[0], os.walk(path))
else: getFile = map(lambda x: os.path.basename(x[0]), os.walk(path))
try: getFile.pop(0)
except: print('CANT pop file. Path: {}'.format(path))
return getFile
#*********************************************************************
# REPOSITORY
def make_github_issue(title, body=None, assignee='', milestone=None, labels=None):
import requests
from tank import Tank
REPO_DATA = Tank().user.data_user_path
if not assignee: assignee = REPO_DATA['username']
# Our url to create issues via POST
url = 'https://api.github.com/repos/%s/%s/issues' % (REPO_DATA['owner'], REPO_DATA['repository'])
# Create an authenticated session to create the issue
session = requests.Session()
session.auth = (REPO_DATA['username'], REPO_DATA['password'])
issue = {'title': title,
'body': body,
'assignee': assignee,
'milestone': milestone,
'labels': labels}
# Add the issue to our repository
repo = session.post(url, json.dumps(issue))
if repo.status_code == 201:
LOG.info('Successfully created Issue {}'.format(title))
else:
LOG.warning('Could not create Issue {}.\nResponse:{}'.format(title, repo.content))
#*********************************************************************
# TEST
# make_github_issue(title='Login Test', body='Body text', milestone=None, labels=['bug'])
|
tests/broken_pickle.py | Kyle-Kyle/angr | 6,132 | 12787583 | <reponame>Kyle-Kyle/angr<gh_stars>1000+
import pickle
import angr
import nose
def test_pickle_state():
b = angr.Project("/home/angr/angr/angr/tests/blob/x86_64/fauxware")
p = b.path_generator.entry_point()
p.state.inspect.make_breakpoint('mem_write')
nose.tools.assert_true('inspector' in p.state.plugins)
s_str = pickle.dumps(p.state)
s2 = pickle.loads(s_str)
nose.tools.assert_is(p.state, s2)
del p
del s2
import gc; gc.collect()
s2 = pickle.loads(s_str)
nose.tools.assert_true('inspector' not in s2.plugins)
if __name__ == '__main__':
test_pickle_state()
|
platypush/message/event/music/snapcast.py | RichardChiang/platypush | 228 | 12787609 | from platypush.message.event import Event
class SnapcastEvent(Event):
""" Base class for Snapcast events """
def __init__(self, host='localhost', *args, **kwargs):
super().__init__(host=host, *args, **kwargs)
class ClientConnectedEvent(SnapcastEvent):
"""
Event fired upon client connection
"""
def __init__(self, client, host='localhost', *args, **kwargs):
super().__init__(client=client, host=host, *args, **kwargs)
class ClientDisconnectedEvent(SnapcastEvent):
"""
Event fired upon client disconnection
"""
def __init__(self, client, host='localhost', *args, **kwargs):
super().__init__(client=client, host=host, *args, **kwargs)
class ClientVolumeChangeEvent(SnapcastEvent):
"""
Event fired upon volume change or mute status change on a client
"""
def __init__(self, client, volume, muted, host='localhost', *args, **kwargs):
super().__init__(client=client, host=host, volume=volume,
muted=muted, *args, **kwargs)
class ClientLatencyChangeEvent(SnapcastEvent):
"""
Event fired upon latency change on a client
"""
def __init__(self, client, latency, host='localhost', *args, **kwargs):
super().__init__(client=client, host=host, latency=latency,
*args, **kwargs)
class ClientNameChangeEvent(SnapcastEvent):
"""
Event fired upon name change of a client
"""
def __init__(self, client, name, host='localhost', *args, **kwargs):
super().__init__(client=client, host=host, name=name,
*args, **kwargs)
class GroupMuteChangeEvent(SnapcastEvent):
"""
Event fired upon mute status change
"""
def __init__(self, group, muted, host='localhost', *args, **kwargs):
super().__init__(group=group, host=host, muted=muted, *args, **kwargs)
class GroupStreamChangeEvent(SnapcastEvent):
"""
Event fired upon group stream change
"""
def __init__(self, group, stream, host='localhost', *args, **kwargs):
super().__init__(group=group, host=host, stream=stream, *args, **kwargs)
class StreamUpdateEvent(SnapcastEvent):
"""
Event fired upon stream update
"""
def __init__(self, stream_id, stream, host='localhost', *args, **kwargs):
super().__init__(stream_id=stream_id, stream=stream, host=host, *args, **kwargs)
class ServerUpdateEvent(SnapcastEvent):
"""
Event fired upon stream update
"""
def __init__(self, server, host='localhost', *args, **kwargs):
super().__init__(server=server, host=host, *args, **kwargs)
# vim:sw=4:ts=4:et:
|
serieswatcher/sqlobject/tests/test_transactions.py | lightcode/SeriesWatcher | 303 | 12787621 | from sqlobject import *
from sqlobject.tests.dbtest import *
########################################
## Transaction test
########################################
class TestSOTrans(SQLObject):
#_cacheValues = False
class sqlmeta:
defaultOrder = 'name'
name = StringCol(length=10, alternateID=True, dbName='name_col')
def test_transaction():
if not supports('transactions'):
return
setupClass(TestSOTrans)
TestSOTrans(name='bob')
TestSOTrans(name='tim')
trans = TestSOTrans._connection.transaction()
try:
TestSOTrans._connection.autoCommit = 'exception'
TestSOTrans(name='joe', connection=trans)
trans.rollback()
trans.begin()
assert ([n.name for n in TestSOTrans.select(connection=trans)]
== ['bob', 'tim'])
b = TestSOTrans.byName('bob', connection=trans)
b.name = 'robert'
trans.commit()
assert b.name == 'robert'
b.name = 'bob'
trans.rollback()
trans.begin()
assert b.name == 'robert'
finally:
TestSOTrans._connection.autoCommit = True
def test_transaction_commit_sync():
if not supports('transactions'):
return
setupClass(TestSOTrans)
trans = TestSOTrans._connection.transaction()
try:
TestSOTrans(name='bob')
bOut = TestSOTrans.byName('bob')
bIn = TestSOTrans.byName('bob', connection=trans)
bIn.name = 'robert'
assert bOut.name == 'bob'
trans.commit()
assert bOut.name == 'robert'
finally:
TestSOTrans._connection.autoCommit = True
def test_transaction_delete(close=False):
if not supports('transactions'):
return
setupClass(TestSOTrans)
trans = TestSOTrans._connection.transaction()
try:
TestSOTrans(name='bob')
bIn = TestSOTrans.byName('bob', connection=trans)
bIn.destroySelf()
bOut = TestSOTrans.select(TestSOTrans.q.name=='bob')
assert bOut.count() == 1
bOutInst = bOut[0]
bOutID = bOutInst.id
trans.commit(close=close)
assert bOut.count() == 0
raises(SQLObjectNotFound, "TestSOTrans.get(bOutID)")
raises(SQLObjectNotFound, "bOutInst.name")
finally:
trans.rollback()
TestSOTrans._connection.autoCommit = True
def test_transaction_delete_with_close():
test_transaction_delete(close=True)
|
preprocess.py | akoksal/Turkish-Word2Vec | 175 | 12787650 | <filename>preprocess.py
from __future__ import print_function
import os.path
import sys
from gensim.corpora import WikiCorpus
import xml.etree.ElementTree as etree
import warnings
import logging
import string
from gensim import utils
def tokenize_tr(content,token_min_len=2,token_max_len=50,lower=True):
if lower:
lowerMap = {ord(u'A'): u'a',ord(u'A'): u'a',ord(u'B'): u'b',ord(u'C'): u'c',ord(u'Ç'): u'ç',ord(u'D'): u'd',ord(u'E'): u'e',ord(u'F'): u'f',ord(u'G'): u'g',ord(u'Ğ'): u'ğ',ord(u'H'): u'h',ord(u'I'): u'ı',ord(u'İ'): u'i',ord(u'J'): u'j',ord(u'K'): u'k',ord(u'L'): u'l',ord(u'M'): u'm',ord(u'N'): u'n',ord(u'O'): u'o',ord(u'Ö'): u'ö',ord(u'P'): u'p',ord(u'R'): u'r',ord(u'S'): u's',ord(u'Ş'): u'ş',ord(u'T'): u't',ord(u'U'): u'u',ord(u'Ü'): u'ü',ord(u'V'): u'v',ord(u'Y'): u'y',ord(u'Z'): u'z'}
content = content.translate(lowerMap)
return [
utils.to_unicode(token) for token in utils.tokenize(content, lower=False, errors='ignore')
if token_min_len <= len(token) <= token_max_len and not token.startswith('_')
]
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Please provide two arguments, first one is path to the wikipedia dump, second one is path to the output file")
print("Example command: python3 preprocess.py trwiki-20180101-pages-articles.xml.bz2 wiki.tr.txt")
sys.exit()
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
inputFile = sys.argv[1]
outputFile = sys.argv[2]
wiki = WikiCorpus(inputFile, lemmatize=False,tokenizer_func = tokenize_tr)
logging.info("Wikipedia dump is opened.")
output = open(outputFile,"w",encoding="utf-8")
logging.info("Output file is created.")
i = 0
for text in wiki.get_texts():
output.write(" ".join(text)+"\n")
i+=1
if (i % 10000 == 0):
logging.info("Saved " +str(i) + " articles.")
output.close()
|
tests/unit/conftest.py | ckornacker/aws-gate | 369 | 12787672 | <reponame>ckornacker/aws-gate
import os
import boto3
import placebo
import pytest
@pytest.fixture(name="session")
def placebo_session(request):
session_kwargs = {"region_name": os.environ.get("AWS_DEFAULT_REGION", "eu-west-1")}
profile_name = os.environ.get("PLACEBO_PROFILE", None)
if profile_name:
session_kwargs["profile_name"] = profile_name
session = boto3.Session(**session_kwargs)
prefix = request.function.__name__
base_dir = os.environ.get("PLACEBO_DIR", os.path.join(os.getcwd(), "placebo"))
record_dir = os.path.join(base_dir, prefix)
if not os.path.exists(record_dir):
os.makedirs(record_dir)
pill = placebo.attach(session, data_path=record_dir)
if os.environ.get("PLACEBO_MODE") == "record":
pill.record()
else:
pill.playback()
return session
@pytest.fixture
def ec2(session):
return session.resource("ec2", region_name="eu-west-1")
@pytest.fixture
def ec2_ic(session):
return session.resource("ec2-instance-connect", region_name="eu-west-1")
@pytest.fixture
def ssm(session):
return session.client("ssm", region_name="eu-west-1")
@pytest.fixture
def ec2_mock(mocker):
return mocker.MagicMock()
@pytest.fixture
def ec2_ic_mock(mocker):
return mocker.MagicMock()
@pytest.fixture
def ssm_mock(mocker):
mock = mocker.MagicMock()
response = {
"SessionId": "session-020bf6cd31f912b53",
"TokenValue": "randomtokenvalue",
}
mock.configure_mock(
**{
"start_session.return_value": response,
"terminate_session.return_value": response,
}
)
type(mock.meta).endpoint_url = mocker.PropertyMock(return_value="ssm")
return mock
@pytest.fixture
def instance_id():
return "i-0c32153096cd68a6d"
@pytest.fixture
def ssh_key(mocker):
mock = mocker.MagicMock()
mock.configure_mock(
**{
"public_key.return_value": "ssh-rsa ranodombase64string",
"key_path.return_value": "/home/user/.aws-gate/key",
}
)
return mock
@pytest.fixture
def config(mocker):
mock = mocker.MagicMock()
mock.configure_mock(
**{
"get_host.return_value": {
"alias": "test",
"name": "SSM-test",
"profile": "default",
"region": "eu-west-1",
}
}
)
return mock
@pytest.fixture
def empty_config(mocker):
mock = mocker.MagicMock()
mock.configure_mock(**{"get_host.return_value": {}})
return mock
@pytest.fixture
def get_instance_details_response():
return {"availability_zone": "eu-west-1a"}
|
authlib/deprecate.py | YPCrumble/authlib | 3,172 | 12787685 | import warnings
class AuthlibDeprecationWarning(DeprecationWarning):
pass
warnings.simplefilter('always', AuthlibDeprecationWarning)
def deprecate(message, version=None, link_uid=None, link_file=None):
if version:
message += '\nIt will be compatible before version {}.'.format(version)
if link_uid and link_file:
message += '\nRead more <https://git.io/{}#file-{}-md>'.format(link_uid, link_file)
warnings.warn(AuthlibDeprecationWarning(message), stacklevel=2)
|
changes/backends/jenkins/generic_builder.py | vault-the/changes | 443 | 12787701 | <reponame>vault-the/changes<gh_stars>100-1000
from __future__ import absolute_import
from flask import current_app
from changes.config import db
from changes.models.snapshot import SnapshotImage
from changes.models.command import FutureCommand
from changes.utils.http import build_internal_uri
from changes.buildsteps.base import LXCConfig
from .builder import JenkinsBuilder
class JenkinsGenericBuilder(JenkinsBuilder):
def __init__(self, master_urls=None, setup_script='', teardown_script='',
artifacts=(), reset_script='', path='', workspace='',
snapshot_script=None, clean=True, cluster=None, *args, **kwargs):
"""Builder for JenkinsGenericBuildStep. See JenkinsGenericBuildStep
for information on most of these arguments.
"""
self.setup_script = setup_script
self.script = kwargs.pop('script')
self.teardown_script = teardown_script
self.snapshot_script = snapshot_script
self.reset_script = reset_script
self.path = path
self.workspace = workspace
self.artifacts = artifacts
self.clean = clean
# See configuration for more details; by default, the default build type is
# legacy which sets up no additional configuration.
self.build_type = kwargs.pop('build_type',
current_app.config['CHANGES_CLIENT_DEFAULT_BUILD_TYPE'])
if self.build_type is None:
self.build_type = current_app.config['CHANGES_CLIENT_DEFAULT_BUILD_TYPE']
# If a server url is not provided (default: None), set it to a blank string
self.artifact_server_base_url = current_app.config['ARTIFACTS_SERVER'] or ''
# we do this as early as possible in order to propagate the
# error faster. The build description is simply the configuration
# key'd by the build_type, documented in config.py
self.build_desc = self.load_build_desc(self.build_type)
super(JenkinsGenericBuilder, self).__init__(master_urls, cluster=cluster, *args, **kwargs)
def load_build_desc(self, build_type):
build_desc = current_app.config['CHANGES_CLIENT_BUILD_TYPES'][build_type]
self.validate_build_desc(build_type, build_desc)
return build_desc
# TODO validate configuration at start of application or use a linter to validate
# configuration before pushing/deploying
def validate_build_desc(self, build_type, build_desc):
if build_desc.get('uses_client', False):
if 'jenkins-command' not in build_desc:
raise ValueError('[CHANGES_CLIENT_BUILD_TYPES INVALID] build type %s missing required key: jenkins-command' % build_type)
if 'adapter' not in build_desc:
raise ValueError('[CHANGES_CLIENT_BUILD_TYPES INVALID] build type %s missing required key: adapter' % build_type)
# These three methods all describe which build specification,
# setup, and teardown should be used to create a snapshot
# build. In the generic builder, this is the same as a normal build,
# but sharded builds need to override these with the shard equivalents
# in order to create the correct snapshot.
def get_snapshot_build_desc(self):
return self.build_desc
def get_snapshot_setup_script(self):
return self.setup_script
def get_snapshot_teardown_script(self):
return self.teardown_script
def get_expected_image(self, job_id):
"""
Get the snapshot-image (filesystem tarball for this jobstep).
If this returns None, it is a normal build (the more common case),
otherwise it returns the id of the snapshot image, which indicates
to where the build agent should upload the snapshot onto s3.
"""
return db.session.query(
SnapshotImage.id,
).filter(
SnapshotImage.job_id == job_id,
).scalar()
def _get_build_desc(self, jobstep):
if self.get_expected_image(jobstep.job_id):
return self.get_snapshot_build_desc()
return self.build_desc
def get_lxc_config(self, jobstep):
"""
Get the LXC configuration, if the LXC adapter should be used.
Args:
jobstep (JobStep): The JobStep to get the LXC config for.
Returns:
LXCConfig: The config to use for this jobstep, or None.
"""
build_desc = self._get_build_desc(jobstep)
if build_desc.get('uses_client') and build_desc.get('adapter') == 'lxc':
app_cfg = current_app.config
snapshot_bucket = app_cfg.get('SNAPSHOT_S3_BUCKET', '')
default_pre = self.debug_config.get('prelaunch_script') or app_cfg.get('LXC_PRE_LAUNCH', '')
default_post = app_cfg.get('LXC_POST_LAUNCH', '')
default_release = app_cfg.get('LXC_RELEASE', 'trusty')
return LXCConfig(s3_bucket=snapshot_bucket,
compression='lz4',
prelaunch=build_desc.get('pre-launch', default_pre),
postlaunch=build_desc.get('post-launch', default_post),
release=build_desc.get('release', default_release),
template=None,
mirror=None,
security_mirror=None)
return None
def get_job_parameters(self, job, changes_bid, setup_script=None,
script=None, teardown_script=None, path=None):
"""
Gets a list containing dictionaries, each with two keys - name and value.
These key,value pairs correspond to the input variables in Jenkins.
changes_bid is actually the jobstep id, and job is the current job.
*_script and path override the corresponding fields of the current
builder.
"""
params = super(JenkinsGenericBuilder, self).get_job_parameters(
job, changes_bid=changes_bid)
if path is None:
path = self.path
if setup_script is None:
setup_script = self.setup_script
if script is None:
script = self.script
if teardown_script is None:
teardown_script = self.teardown_script
project = job.project
repository = project.repository
vcs = repository.get_vcs()
if vcs:
repo_url = vcs.remote_url
else:
repo_url = repository.url
snapshot_bucket = current_app.config.get('SNAPSHOT_S3_BUCKET', '')
default_pre = self.debug_config.get('prelaunch_script') or current_app.config.get('LXC_PRE_LAUNCH', '')
default_post = current_app.config.get('LXC_POST_LAUNCH', '')
default_release = current_app.config.get('LXC_RELEASE', 'trusty')
build_desc = self.build_desc
# This is the image we are expected to produce or None
# if this is not a snapshot build.
expected_image = self.get_expected_image(job.id)
# Setting script to be empty essentially forces nothing
# but setup/teardown to be run, making a clean snapshot
snapshot_id = ''
if expected_image:
snapshot_id = expected_image.hex
# this is a no-op command in sh, essentially equivalent
# to '' except it tells changes-client that we are
# deliberately doing absolutely nothing. However,
# if snapshot script is not None, then we just use
# that in place of script (so the normal script is
# never used).
script = self.snapshot_script or ':'
# sharded builds will have different setup/teardown/build_desc
# scripts between shards and collector so we need to
# use the shard ones
build_desc = self.get_snapshot_build_desc()
setup_script = self.get_snapshot_setup_script()
teardown_script = self.get_snapshot_teardown_script()
# CHANGES_BID, the jobstep id, is provided by superclass
params.update({
'CHANGES_PID': project.slug,
'PROJECT_CONFIG': project.get_config_path(),
'REPO_URL': repo_url,
'SETUP_SCRIPT': setup_script,
'SCRIPT': script,
'TEARDOWN_SCRIPT': teardown_script,
'RESET_SCRIPT': self.reset_script,
'REPO_VCS': repository.backend.name,
'WORK_PATH': path,
'C_WORKSPACE': self.workspace,
'ARTIFACTS_SERVER_BASE_URL': self.artifact_server_base_url})
if 'bind_mounts' in self.debug_config:
params['bind-mounts'] = self.debug_config['bind_mounts']
if build_desc.get('uses_client', False):
params.update({
'JENKINS_COMMAND': build_desc['jenkins-command'],
'CHANGES_CLIENT_ADAPTER': build_desc['adapter'],
'CHANGES_CLIENT_SERVER': build_internal_uri('/api/0'),
'CHANGES_CLIENT_SNAPSHOT_BUCKET': snapshot_bucket,
'CHANGES_CLIENT_SNAPSHOT_ID': snapshot_id,
'CHANGES_CLIENT_LXC_PRE_LAUNCH': build_desc.get('pre-launch',
default_pre),
'CHANGES_CLIENT_LXC_POST_LAUNCH': build_desc.get('post-launch',
default_post),
'CHANGES_CLIENT_LXC_RELEASE': build_desc.get('release',
default_release)
})
return params
def get_future_commands(self, env, commands, artifacts):
"""Create future commands which are later created as comands.
See models/command.py.
"""
return map(lambda command: FutureCommand(command['script'],
artifacts=artifacts,
env=env),
commands)
def create_commands(self, jobstep, env):
"""
This seems slightly redundant, but in fact is necessary for
changes-client to work. The issue is mainly that the client is
designed for the exact flow of information that mesos uses,
in which the commands are taken from changes through an api request.
We need to tell changes to run what would normally be ran through
the Jenkins configuration - so we move this from the Jenkins
configuration into the commands of the build type.
Arguments:
jobstep (JobStep): jobstep to create commands under
env (dict): Env variables to supply to all commands.
"""
commands = self.build_desc.get('commands', [])
artifacts = self.artifacts_for_jobstep(jobstep)
env = env.copy()
if not self.clean:
env['SKIP_GIT_CLEAN'] = "1"
index = 0
for future_command in self.get_future_commands(env, commands, artifacts):
db.session.add(future_command.as_command(jobstep, index))
index += 1
def can_snapshot(self):
"""
Whether or not this build can snapshot is purely a function of the
build type. Right now the only adapter supporting this is the lxc
adapter, but in the scenario that another adapter is added (e.g.
docker?) then we would need for multiple adapters to support snapshots,
so we just encode whether it can or not as a field, defaulting to
false as most types don't support this operation.
"""
return self.build_desc.get('can_snapshot', False)
def artifacts_for_jobstep(self, jobstep):
"""
The artifact names/patterns we want to collect for a given jobstep.
For example, we may want to collect different artifacts for a
collection phase jobstep.
Arguments:
jobstep (JobStep): jobstep in question
"""
return self.artifacts
|
arguments.py | mjlbach/Object-Goal-Navigation | 106 | 12787721 | import argparse
import torch
def get_args():
parser = argparse.ArgumentParser(
description='Goal-Oriented-Semantic-Exploration')
# General Arguments
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--auto_gpu_config', type=int, default=1)
parser.add_argument('--total_num_scenes', type=str, default="auto")
parser.add_argument('-n', '--num_processes', type=int, default=5,
help="""how many training processes to use (default:5)
Overridden when auto_gpu_config=1
and training on gpus""")
parser.add_argument('--num_processes_per_gpu', type=int, default=6)
parser.add_argument('--num_processes_on_first_gpu', type=int, default=1)
parser.add_argument('--eval', type=int, default=0,
help='0: Train, 1: Evaluate (default: 0)')
parser.add_argument('--num_training_frames', type=int, default=10000000,
help='total number of training frames')
parser.add_argument('--num_eval_episodes', type=int, default=200,
help="number of test episodes per scene")
parser.add_argument('--num_train_episodes', type=int, default=10000,
help="""number of train episodes per scene
before loading the next scene""")
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument("--sim_gpu_id", type=int, default=0,
help="gpu id on which scenes are loaded")
parser.add_argument("--sem_gpu_id", type=int, default=-1,
help="""gpu id for semantic model,
-1: same as sim gpu, -2: cpu""")
# Logging, loading models, visualization
parser.add_argument('--log_interval', type=int, default=10,
help="""log interval, one log per n updates
(default: 10) """)
parser.add_argument('--save_interval', type=int, default=1,
help="""save interval""")
parser.add_argument('-d', '--dump_location', type=str, default="./tmp/",
help='path to dump models and log (default: ./tmp/)')
parser.add_argument('--exp_name', type=str, default="exp1",
help='experiment name (default: exp1)')
parser.add_argument('--save_periodic', type=int, default=500000,
help='Model save frequency in number of updates')
parser.add_argument('--load', type=str, default="0",
help="""model path to load,
0 to not reload (default: 0)""")
parser.add_argument('-v', '--visualize', type=int, default=0,
help="""1: Render the observation and
the predicted semantic map,
2: Render the observation with semantic
predictions and the predicted semantic map
(default: 0)""")
parser.add_argument('--print_images', type=int, default=0,
help='1: save visualization as images')
# Environment, dataset and episode specifications
parser.add_argument('-efw', '--env_frame_width', type=int, default=640,
help='Frame width (default:640)')
parser.add_argument('-efh', '--env_frame_height', type=int, default=480,
help='Frame height (default:480)')
parser.add_argument('-fw', '--frame_width', type=int, default=160,
help='Frame width (default:160)')
parser.add_argument('-fh', '--frame_height', type=int, default=120,
help='Frame height (default:120)')
parser.add_argument('-el', '--max_episode_length', type=int, default=500,
help="""Maximum episode length""")
parser.add_argument("--task_config", type=str,
default="tasks/objectnav_gibson.yaml",
help="path to config yaml containing task information")
parser.add_argument("--split", type=str, default="train",
help="dataset split (train | val | val_mini) ")
parser.add_argument('--camera_height', type=float, default=0.88,
help="agent camera height in metres")
parser.add_argument('--hfov', type=float, default=79.0,
help="horizontal field of view in degrees")
parser.add_argument('--turn_angle', type=float, default=30,
help="Agent turn angle in degrees")
parser.add_argument('--min_depth', type=float, default=0.5,
help="Minimum depth for depth sensor in meters")
parser.add_argument('--max_depth', type=float, default=5.0,
help="Maximum depth for depth sensor in meters")
parser.add_argument('--success_dist', type=float, default=1.0,
help="success distance threshold in meters")
parser.add_argument('--floor_thr', type=int, default=50,
help="floor threshold in cm")
parser.add_argument('--min_d', type=float, default=1.5,
help="min distance to goal during training in meters")
parser.add_argument('--max_d', type=float, default=100.0,
help="max distance to goal during training in meters")
parser.add_argument('--version', type=str, default="v1.1",
help="dataset version")
# Model Hyperparameters
parser.add_argument('--agent', type=str, default="sem_exp")
parser.add_argument('--lr', type=float, default=2.5e-5,
help='learning rate (default: 2.5e-5)')
parser.add_argument('--global_hidden_size', type=int, default=256,
help='global_hidden_size')
parser.add_argument('--eps', type=float, default=1e-5,
help='RL Optimizer epsilon (default: 1e-5)')
parser.add_argument('--alpha', type=float, default=0.99,
help='RL Optimizer alpha (default: 0.99)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--use_gae', action='store_true', default=False,
help='use generalized advantage estimation')
parser.add_argument('--tau', type=float, default=0.95,
help='gae parameter (default: 0.95)')
parser.add_argument('--entropy_coef', type=float, default=0.001,
help='entropy term coefficient (default: 0.01)')
parser.add_argument('--value_loss_coef', type=float, default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument('--max_grad_norm', type=float, default=0.5,
help='max norm of gradients (default: 0.5)')
parser.add_argument('--num_global_steps', type=int, default=20,
help='number of forward steps in A2C (default: 5)')
parser.add_argument('--ppo_epoch', type=int, default=4,
help='number of ppo epochs (default: 4)')
parser.add_argument('--num_mini_batch', type=str, default="auto",
help='number of batches for ppo (default: 32)')
parser.add_argument('--clip_param', type=float, default=0.2,
help='ppo clip parameter (default: 0.2)')
parser.add_argument('--use_recurrent_global', type=int, default=0,
help='use a recurrent global policy')
parser.add_argument('--num_local_steps', type=int, default=25,
help="""Number of steps the local policy
between each global step""")
parser.add_argument('--reward_coeff', type=float, default=0.1,
help="Object goal reward coefficient")
parser.add_argument('--intrinsic_rew_coeff', type=float, default=0.02,
help="intrinsic exploration reward coefficient")
parser.add_argument('--num_sem_categories', type=float, default=16)
parser.add_argument('--sem_pred_prob_thr', type=float, default=0.9,
help="Semantic prediction confidence threshold")
# Mapping
parser.add_argument('--global_downscaling', type=int, default=2)
parser.add_argument('--vision_range', type=int, default=100)
parser.add_argument('--map_resolution', type=int, default=5)
parser.add_argument('--du_scale', type=int, default=1)
parser.add_argument('--map_size_cm', type=int, default=2400)
parser.add_argument('--cat_pred_threshold', type=float, default=5.0)
parser.add_argument('--map_pred_threshold', type=float, default=1.0)
parser.add_argument('--exp_pred_threshold', type=float, default=1.0)
parser.add_argument('--collision_threshold', type=float, default=0.20)
# parse arguments
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
if args.auto_gpu_config:
num_gpus = torch.cuda.device_count()
if args.total_num_scenes != "auto":
args.total_num_scenes = int(args.total_num_scenes)
elif "objectnav_gibson" in args.task_config and \
"train" in args.split:
args.total_num_scenes = 25
elif "objectnav_gibson" in args.task_config and \
"val" in args.split:
args.total_num_scenes = 5
else:
assert False, "Unknown task config, please specify" + \
" total_num_scenes"
# GPU Memory required for the SemExp model:
# 0.8 + 0.4 * args.total_num_scenes (GB)
# GPU Memory required per thread: 2.6 (GB)
min_memory_required = max(0.8 + 0.4 * args.total_num_scenes, 2.6)
# Automatically configure number of training threads based on
# number of GPUs available and GPU memory size
gpu_memory = 1000
for i in range(num_gpus):
gpu_memory = min(gpu_memory,
torch.cuda.get_device_properties(
i).total_memory
/ 1024 / 1024 / 1024)
assert gpu_memory > min_memory_required, \
"""Insufficient GPU memory for GPU {}, gpu memory ({}GB)
needs to be greater than {}GB""".format(
i, gpu_memory, min_memory_required)
num_processes_per_gpu = int(gpu_memory / 2.6)
num_processes_on_first_gpu = \
int((gpu_memory - min_memory_required) / 2.6)
if args.eval:
max_threads = num_processes_per_gpu * (num_gpus - 1) \
+ num_processes_on_first_gpu
assert max_threads >= args.total_num_scenes, \
"""Insufficient GPU memory for evaluation"""
if num_gpus == 1:
args.num_processes_on_first_gpu = num_processes_on_first_gpu
args.num_processes_per_gpu = 0
args.num_processes = num_processes_on_first_gpu
assert args.num_processes > 0, "Insufficient GPU memory"
else:
num_threads = num_processes_per_gpu * (num_gpus - 1) \
+ num_processes_on_first_gpu
num_threads = min(num_threads, args.total_num_scenes)
args.num_processes_per_gpu = num_processes_per_gpu
args.num_processes_on_first_gpu = max(
0,
num_threads - args.num_processes_per_gpu * (num_gpus - 1))
args.num_processes = num_threads
args.sim_gpu_id = 1
print("Auto GPU config:")
print("Number of processes: {}".format(args.num_processes))
print("Number of processes on GPU 0: {}".format(
args.num_processes_on_first_gpu))
print("Number of processes per GPU: {}".format(
args.num_processes_per_gpu))
else:
args.sem_gpu_id = -2
if args.num_mini_batch == "auto":
args.num_mini_batch = max(args.num_processes // 2, 1)
else:
args.num_mini_batch = int(args.num_mini_batch)
return args
|
test/IECoreMaya/FnSceneShapeTest.py | bradleyhenke/cortex | 386 | 12787752 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import os
import maya.cmds
import imath
import IECore
import IECoreScene
import IECoreMaya
class FnSceneShapeTest( IECoreMaya.TestCase ) :
__testFile = "test/test.scc"
def setUp( self ) :
scene = IECoreScene.SceneCache( FnSceneShapeTest.__testFile, IECore.IndexedIO.OpenMode.Write )
sc = scene.createChild( str(1) )
mesh = IECoreScene.MeshPrimitive.createBox(imath.Box3f(imath.V3f(0),imath.V3f(1)))
mesh["Cd"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.V3fVectorData( [ imath.V3f( 1, 0, 0 ) ] * 6 ) )
sc.writeObject( mesh, 0.0 )
matrix = imath.M44d().translate( imath.V3d( 1, 0, 0 ) )
sc.writeTransform( IECore.M44dData( matrix ), 0.0 )
sc = sc.createChild( "child" )
mesh = IECoreScene.MeshPrimitive.createBox(imath.Box3f(imath.V3f(0),imath.V3f(1)))
mesh["Cd"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.V3fVectorData( [ imath.V3f( 0, 1, 0 ) ] * 6 ) )
sc.writeObject( mesh, 0.0 )
matrix = imath.M44d().translate( imath.V3d( 2, 0, 0 ) )
sc.writeTransform( IECore.M44dData( matrix ), 0.0 )
sc = sc.createChild( str( 3 ) )
mesh = IECoreScene.MeshPrimitive.createBox(imath.Box3f(imath.V3f(0),imath.V3f(1)))
mesh["Cd"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.V3fVectorData( [ imath.V3f( 0, 0, 1 ) ] * 6 ) )
sc.writeObject( mesh, 0.0 )
matrix = imath.M44d().translate( imath.V3d( 3, 0, 0 ) )
sc.writeTransform( IECore.M44dData( matrix ), 0.0 )
return scene
def __setupTableProp( self ):
boxSize = imath.Box3f( imath.V3f( -.5, -.5, -.5 ), imath.V3f( .5, .5, .5 ) )
table = IECoreScene.SceneCache( FnSceneShapeTest.__testFile, IECore.IndexedIO.Write )
table.writeAttribute( 'scene:visible', IECore.BoolData( True ), 0 )
table.writeAttribute( 'user:testBool', IECore.BoolData( True ), 0 )
table.writeAttribute( 'user:testShort', IECore.ShortData( 2 ), 0 )
table.writeAttribute( 'user:testInt', IECore.IntData( 3 ), 0 )
table.writeAttribute( 'user:testInt64', IECore.Int64Data( 4 ), 0 )
table.writeAttribute( 'user:testFloat', IECore.FloatData( 5 ), 0 )
table.writeAttribute( 'user:testDouble', IECore.DoubleData( 6 ), 0 )
table.writeAttribute( 'user:testString', IECore.StringData( 'seven' ), 0 )
mat = imath.M44d( ( 8, 9, 10, 11 ), ( 12, 13, 14, 15 ), ( 16, 17, 18, 19 ), ( 20, 21, 22, 23 ) )
table.writeAttribute( 'user:testMatrixd', IECore.M44dData(mat), 0 )
mat = imath.M44f( ( 24, 25, 26, 27 ), ( 28, 29, 30, 31 ), ( 32, 33, 34, 35 ), ( 36, 37, 38, 39 ) )
table.writeAttribute( 'user:testMatrixf', IECore.M44fData(mat), 0 )
pedestal_GEO = table.createChild( 'pedestal_GEO' )
pedestal_GEO.writeObject( IECoreScene.MeshPrimitive.createBox(boxSize), 0 )
s = imath.V3d(15, 1, 15)
r = imath.Eulerd()
t = imath.V3d(0, .5, 0)
mat = IECore.TransformationMatrixd( s, r, t )
pedestal_GEO.writeTransform( IECore.TransformationMatrixdData(mat), 0 )
column_GEO = pedestal_GEO.createChild( 'column_GEO' )
column_GEO.writeObject( IECoreScene.MeshPrimitive.createBox(boxSize), 0 )
s = imath.V3d(.25, 20, .25)
r = imath.Eulerd()
t = imath.V3d(0, 10.5, 0)
mat = IECore.TransformationMatrixd( s, r, t )
column_GEO.writeTransform( IECore.TransformationMatrixdData(mat), 0 )
tableTop_GEO = column_GEO.createChild( 'tableTop_GEO' )
tableTop_GEO.writeObject( IECoreScene.MeshPrimitive.createBox(boxSize), 0 )
s = imath.V3d(10, 0.05, 10)
r = imath.Eulerd()
t = imath.V3d(0, .525, 0)
mat = IECore.TransformationMatrixd( s, r, t )
tableTop_GEO.writeTransform( IECore.TransformationMatrixdData(mat), 0 )
def testSceneInterface( self ) :
maya.cmds.file( new=True, f=True )
node = maya.cmds.createNode( "ieSceneShape" )
maya.cmds.setAttr( node+'.file', FnSceneShapeTest.__testFile,type='string' )
fn = IECoreMaya.FnSceneShape( node )
# Check scene for a wrong path
maya.cmds.setAttr( node+'.root', 'blabla', type='string' )
scene = fn.sceneInterface()
self.assertEqual( scene, None )
maya.cmds.setAttr( node+'.root', '/', type='string' )
scene = fn.sceneInterface()
self.assertTrue( isinstance( scene, IECoreScene.SceneCache ) )
self.assertEqual( scene.childNames(), ['1'] )
self.assertFalse( scene.hasObject() )
maya.cmds.setAttr( node+'.root', '/1', type='string' )
scene = fn.sceneInterface()
self.assertTrue( isinstance( scene, IECoreScene.SceneCache ) )
self.assertEqual( scene.childNames(), ['child'] )
self.assertTrue( scene.hasObject() )
def testCreationName( self ) :
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "bob" )
self.assertEqual( fn.fullPathName(), u"|bob|bobSceneShape" )
fn = IECoreMaya.FnSceneShape.create( "bob1")
self.assertEqual( fn.fullPathName(), u"|bob1|bobSceneShape1" )
fn = IECoreMaya.FnSceneShape.create( "bob" )
self.assertEqual( fn.fullPathName(), u"|bob2|bobSceneShape2" )
def testCreationSetup( self ) :
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "test" )
self.assertTrue( maya.cmds.sets( fn.fullPathName(), isMember="initialShadingGroup" ) )
self.assertTrue( maya.cmds.getAttr( fn.fullPathName()+".objectOnly", l=True ) )
self.assertFalse( maya.cmds.getAttr( fn.fullPathName()+".objectOnly" ) )
self.assertTrue( maya.cmds.isConnected( "time1.outTime", fn.fullPathName()+".time" ) )
def testExpandOnce( self ) :
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile,type='string' )
result = fn.expandOnce()
self.assertTrue( maya.cmds.getAttr( fn.fullPathName()+".objectOnly" ) )
self.assertEqual( maya.cmds.getAttr( fn.fullPathName()+".queryPaths[0]" ), "/1" )
self.assertTrue( len(result) == 1 )
childFn = result[0]
self.assertTrue( isinstance( childFn, IECoreMaya.FnSceneShape ) )
self.assertEqual( childFn.fullPathName(), "|test|sceneShape_1|sceneShape_SceneShape1" )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".file" ), FnSceneShapeTest.__testFile )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".root" ), "/1" )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outTranslate", "|test|sceneShape_1.translate" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outRotate", "|test|sceneShape_1.rotate" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outScale", "|test|sceneShape_1.scale" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTime", childFn.fullPathName()+".time" ) )
maya.cmds.setAttr( childFn.fullPathName()+".drawGeometry", 1 )
result = childFn.expandOnce()
self.assertTrue( maya.cmds.getAttr( childFn.fullPathName()+".objectOnly" ) )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".queryPaths[0]" ), "/child" )
self.assertTrue( len(result) == 1 )
self.assertTrue( isinstance( result[0], IECoreMaya.FnSceneShape ) )
self.assertEqual( result[0].fullPathName(), "|test|sceneShape_1|child|childSceneShape" )
self.assertEqual( maya.cmds.getAttr( result[0].fullPathName()+".file" ), FnSceneShapeTest.__testFile )
self.assertEqual( maya.cmds.getAttr( result[0].fullPathName()+".root" ), "/1/child" )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outTranslate", "|test|sceneShape_1|child.translate" ) )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outRotate", "|test|sceneShape_1|child.rotate" ) )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outScale", "|test|sceneShape_1|child.scale" ) )
self.assertEqual( maya.cmds.getAttr( result[0].fullPathName()+".drawGeometry"), 1 )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTime", result[0].fullPathName()+".time" ) )
def testExpandOnceNamespace( self ) :
maya.cmds.file( new=True, f=True )
namespace = "INPUT"
if not maya.cmds.namespace( exists=namespace ):
maya.cmds.namespace( addNamespace=namespace )
def addnamespace( path ):
return path.replace( "|", "|" + namespace + ":" )
fn = IECoreMaya.FnSceneShape.create( namespace + ":" + "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile, type='string' )
result = fn.expandOnce( preserveNamespace=True )
self.assertTrue( len(result) == 1 )
childFn = result[ 0 ]
self.assertTrue( isinstance( childFn, IECoreMaya.FnSceneShape ) )
self.assertEqual( childFn.fullPathName(), addnamespace ( "|test|sceneShape_1|sceneShape_SceneShape1" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outTranslate", addnamespace ( "|test|sceneShape_1.translate" ) ) )
def testExpandAll( self ) :
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile,type='string' )
maya.cmds.setAttr( fn.fullPathName()+".drawGeometry", 1 )
result = fn.expandAll()
self.assertTrue( maya.cmds.getAttr( fn.fullPathName()+".objectOnly" ) )
self.assertEqual( maya.cmds.getAttr( fn.fullPathName()+".queryPaths[0]" ), "/1" )
self.assertTrue( len(result) == 3 )
childFn = result[0]
self.assertTrue( isinstance( childFn, IECoreMaya.FnSceneShape ) )
self.assertEqual( childFn.fullPathName(), "|test|sceneShape_1|sceneShape_SceneShape1" )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".file" ), FnSceneShapeTest.__testFile )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".root" ), "/1" )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outTranslate", "|test|sceneShape_1.translate" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outRotate", "|test|sceneShape_1.rotate" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outScale", "|test|sceneShape_1.scale" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTime", childFn.fullPathName()+".time" ) )
self.assertTrue( maya.cmds.getAttr( childFn.fullPathName()+".objectOnly" ) )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".queryPaths[0]" ), "/child" )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".drawGeometry"), 1 )
self.assertTrue( isinstance( result[1], IECoreMaya.FnSceneShape ) )
self.assertEqual( result[1].fullPathName(), "|test|sceneShape_1|child|childSceneShape" )
self.assertEqual( maya.cmds.getAttr( result[1].fullPathName()+".file" ), FnSceneShapeTest.__testFile )
self.assertEqual( maya.cmds.getAttr( result[1].fullPathName()+".root" ), "/1/child" )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outTranslate", "|test|sceneShape_1|child.translate" ) )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outRotate", "|test|sceneShape_1|child.rotate" ) )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outScale", "|test|sceneShape_1|child.scale" ) )
self.assertEqual( maya.cmds.getAttr( result[1].fullPathName()+".drawGeometry"), 1 )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTime", result[1].fullPathName()+".time" ) )
def testExpandAllNamespace( self ) :
namespace = "INPUT"
if not maya.cmds.namespace( exists=namespace ):
maya.cmds.namespace( addNamespace=namespace )
def addnamespace( path ):
return path.replace( "|", "|" + namespace + ":" )
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( namespace + ":" + "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile,type='string' )
maya.cmds.setAttr( fn.fullPathName()+".drawGeometry", 1 )
result = fn.expandAll( preserveNamespace=True )
self.assertTrue( maya.cmds.getAttr( fn.fullPathName()+".objectOnly" ) )
self.assertEqual( maya.cmds.getAttr( fn.fullPathName()+".queryPaths[0]" ), "/1" )
self.assertTrue( len(result) == 3 )
childFn = result[0]
self.assertTrue( isinstance( childFn, IECoreMaya.FnSceneShape ) )
self.assertEqual( childFn.fullPathName(), addnamespace( "|test|sceneShape_1|sceneShape_SceneShape1" ) )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".file" ), FnSceneShapeTest.__testFile )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".root" ), "/1" )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outTranslate", addnamespace( "|test|sceneShape_1.translate" ) ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outRotate", addnamespace( "|test|sceneShape_1.rotate" ) ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outScale", addnamespace( "|test|sceneShape_1.scale" ) ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTime", childFn.fullPathName()+".time" ) )
self.assertTrue( maya.cmds.getAttr( childFn.fullPathName()+".objectOnly" ) )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".queryPaths[0]" ), "/child" )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".drawGeometry"), 1 )
self.assertTrue( isinstance( result[1], IECoreMaya.FnSceneShape ) )
self.assertEqual( result[1].fullPathName(), addnamespace( "|test|sceneShape_1|child|childSceneShape" ) )
self.assertEqual( maya.cmds.getAttr( result[1].fullPathName()+".file" ), FnSceneShapeTest.__testFile )
self.assertEqual( maya.cmds.getAttr( result[1].fullPathName()+".root" ), "/1/child" )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outTranslate", addnamespace( "|test|sceneShape_1|child.translate" ) ) )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outRotate", addnamespace( "|test|sceneShape_1|child.rotate" ) ) )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outScale", addnamespace( "|test|sceneShape_1|child.scale" ) ) )
self.assertEqual( maya.cmds.getAttr( result[1].fullPathName()+".drawGeometry"), 1 )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTime", result[1].fullPathName()+".time" ) )
def testCollapse( self ) :
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile,type='string' )
result = fn.expandOnce()
result[0].expandOnce()
children = set( ["|test|testSceneShape", "|test|sceneShape_1", "|test|sceneShape_1|sceneShape_SceneShape1", "|test|sceneShape_1|child", "|test|sceneShape_1|child|childSceneShape"] )
self.assertEqual( set(maya.cmds.listRelatives( "|test", ad=True, f=True )), children )
fn.collapse()
self.assertEqual( maya.cmds.listRelatives( "|test", ad=True, f=True ), ["|test|testSceneShape"] )
self.assertEqual( maya.cmds.getAttr( fn.fullPathName()+".objectOnly" ), 0 )
self.assertEqual( maya.cmds.getAttr( fn.fullPathName()+".visibility" ), 1 )
def testConvertAllToGeometry( self ):
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile,type='string' )
fn.convertAllToGeometry()
children = ["|test|testSceneShape", "|test|sceneShape_1"]
self.assertEqual( maya.cmds.listRelatives( "|test", f=True ), children )
self.assertEqual( maya.cmds.getAttr( fn.fullPathName()+".intermediateObject" ), 0 )
children = ["|test|sceneShape_1|sceneShape_SceneShape1", "|test|sceneShape_1|child", "|test|sceneShape_1|sceneShape_Shape1"]
self.assertEqual( maya.cmds.listRelatives( "|test|sceneShape_1", f=True ), children )
self.assertEqual( maya.cmds.getAttr( "|test|sceneShape_1|sceneShape_SceneShape1.intermediateObject" ), 1 )
self.assertEqual( maya.cmds.nodeType( "|test|sceneShape_1|sceneShape_Shape1" ), "mesh")
self.assertEqual( maya.cmds.getAttr( "|test|sceneShape_1|sceneShape_SceneShape1.queryPaths[1]" ), "/" )
self.assertTrue( maya.cmds.isConnected( "|test|sceneShape_1|sceneShape_SceneShape1.outObjects[1]", "|test|sceneShape_1|sceneShape_Shape1.inMesh" ) )
def testComponentNames( self ):
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile,type='string' )
maya.cmds.setAttr( fn.fullPathName()+".drawGeometry", 0 )
self.assertEqual( fn.componentNames(), [] )
maya.cmds.setAttr( fn.fullPathName()+".drawGeometry", 1 )
self.assertEqual( fn.componentNames(), ['/', '/1', '/1/child', '/1/child/3'] )
fn.selectComponentNames( ['/', '/1', '/1/child/3'] )
self.assertEqual( fn.selectedComponentNames(), set( ['/', '/1', '/1/child/3'] ) )
def testQuery( self ):
maya.cmds.file( new=True, f=True )
def createSceneFile():
scene = IECoreScene.SceneCache( FnSceneShapeTest.__testFile, IECore.IndexedIO.OpenMode.Write )
sc = scene.createChild( str(1) )
curves = IECoreScene.CurvesPrimitive.createBox(imath.Box3f(imath.V3f(0),imath.V3f(1))) # 6 curves.
sc.writeObject( curves, 0.0 )
matrix = imath.M44d().translate( imath.V3d( 0, 0, 0 ) )
sc.writeTransform( IECore.M44dData( matrix ), 0.0 )
createSceneFile()
node = maya.cmds.createNode( "ieSceneShape" )
maya.cmds.setAttr( node+'.file', FnSceneShapeTest.__testFile,type='string' )
maya.cmds.setAttr( node+'.root', '/',type='string' )
fn = IECoreMaya.FnSceneShape( node )
self.assertEqual( maya.cmds.getAttr(fn.fullPathName()+".outObjects[0]", type=True), None )
self.assertEqual( maya.cmds.getAttr(fn.fullPathName()+".outObjects[1]", type=True), None )
maya.cmds.setAttr( fn.fullPathName()+".queryPaths[0]" , "/1", type="string")
maya.cmds.setAttr( fn.fullPathName()+".queryPaths[1]" , "/1", type="string")
maya.cmds.setAttr( fn.fullPathName()+".queryConvertParameters[0]", "-index 0", type="string" ) # Set it to output 0 th box curve.
maya.cmds.setAttr( fn.fullPathName()+".queryConvertParameters[1]", "-index 1", type="string" ) # Set it to output 1 th box curve.
self.assertEqual( maya.cmds.getAttr(fn.fullPathName()+".outObjects[0]", type=True), "nurbsCurve" )
self.assertEqual( maya.cmds.getAttr(fn.fullPathName()+".outObjects[1]", type=True), "nurbsCurve" )
curveShape0 = maya.cmds.createNode( "nurbsCurve" )
curveShape1 = maya.cmds.createNode( "nurbsCurve" )
maya.cmds.connectAttr( fn.fullPathName()+ ".outObjects[0]", curveShape0 + '.create' )
maya.cmds.connectAttr( fn.fullPathName()+ ".outObjects[1]", curveShape1 + '.create' )
self.assertNotEqual( maya.cmds.pointPosition(curveShape0 + '.cv[0]' ), maya.cmds.pointPosition(curveShape1 + '.cv[0]' ) )
maya.cmds.setAttr( fn.fullPathName()+".queryConvertParameters[1]", "-index 0", type="string" )
self.assertEqual( maya.cmds.pointPosition(curveShape0 + '.cv[0]' ), maya.cmds.pointPosition(curveShape1 + '.cv[0]' ) )
def testPromotableAttributeNames( self ):
maya.cmds.file( new=True, force=True )
self.__setupTableProp()
sceneShapeFn = IECoreMaya.FnSceneShape.create( 'table' )
sceneShapeFn.findPlug( 'file' ).setString( FnSceneShapeTest.__testFile )
expectedAttrs = [
'user:testBool', 'user:testShort', 'user:testInt', 'user:testInt64', 'user:testFloat',
'user:testDouble', 'user:testString', 'user:testMatrixd', 'user:testMatrixf', 'scene:visible'
]
self.assertEquals( set( sceneShapeFn.promotableAttributeNames() ), set( expectedAttrs ) )
def testPromoteAttribute( self ):
maya.cmds.file( new=True, force=True )
self.__setupTableProp()
sceneShapeFn = IECoreMaya.FnSceneShape.create( 'table' )
sceneShapeFn.findPlug( 'file' ).setString( FnSceneShapeTest.__testFile )
for pAttr in sceneShapeFn.promotableAttributeNames():
sceneShapeFn.promoteAttribute( pAttr )
sceneShape = sceneShapeFn.fullPathName()
table = maya.cmds.listRelatives( sceneShape, parent=True )[0]
testVisibility = maya.cmds.getAttr( table + '.' + str( IECoreMaya.LiveScene.visibilityOverrideName ) )
testBool = maya.cmds.getAttr( table + '.ieAttr_testBool' )
testShort = maya.cmds.getAttr( table + '.ieAttr_testShort' )
testInt = maya.cmds.getAttr( table + '.ieAttr_testInt' )
testInt64 = maya.cmds.getAttr( table + '.ieAttr_testInt64' )
testFloat = maya.cmds.getAttr( table + '.ieAttr_testFloat' )
testDouble = maya.cmds.getAttr( table + '.ieAttr_testDouble' )
testString = maya.cmds.getAttr( table + '.ieAttr_testString' )
testMatrixd = maya.cmds.getAttr( table + '.ieAttr_testMatrixd' )
testMatrixf = maya.cmds.getAttr( table + '.ieAttr_testMatrixf' )
self.assertTrue( testVisibility )
self.assertTrue( testBool )
self.assertEquals( testShort, 2 )
self.assertEquals( testInt, 3 )
self.assertEquals( testInt64, 4 )
self.assertEquals( testFloat, 5. )
self.assertEquals( testDouble, 6. )
self.assertEquals( testString, 'seven' )
self.assertEquals( testMatrixd, [ 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23. ] )
self.assertEquals( testMatrixf, [ 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39. ] )
def tearDown( self ) :
if os.path.exists( FnSceneShapeTest.__testFile ) :
os.remove( FnSceneShapeTest.__testFile )
if __name__ == "__main__":
IECoreMaya.TestProgram( plugins = [ "ieCore" ] )
|
moya/testprojects/expose/site/py/exposed.py | moyaproject/moya | 129 | 12787759 | from __future__ import unicode_literals
from moya.expose import View
class TestView(View):
name = "hello"
def get(self, context):
return "Hello, World"
|
rnnt/args.py | lahiruts/Online-Speech-Recognition | 201 | 12787765 | <reponame>lahiruts/Online-Speech-Recognition<gh_stars>100-1000
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('name', 'rnn-t-v5', help='session name')
flags.DEFINE_enum('mode', 'train', ['train', 'resume', 'eval'], help='mode')
flags.DEFINE_integer('resume_step', None, help='model step')
# dataset
flags.DEFINE_string('LibriSpeech_train_100',
"../librispeech/LibriSpeech/train-clean-100",
help='LibriSpeech train')
flags.DEFINE_string('LibriSpeech_train_360',
"../librispeech/LibriSpeech/train-clean-360",
help='LibriSpeech train')
flags.DEFINE_string('LibriSpeech_train_500',
"../librispeech/LibriSpeech/train-other-500",
help='LibriSpeech train')
flags.DEFINE_string('LibriSpeech_test',
"../librispeech/LibriSpeech/test-clean",
help='LibriSpeech test')
flags.DEFINE_string('LibriSpeech_dev',
"../librispeech/LibriSpeech/dev-clean",
help='LibriSpeech dev')
flags.DEFINE_string('TEDLIUM_train',
"../speech_data/TEDLIUM/TEDLIUM_release1/train",
help='TEDLIUM 1 train')
flags.DEFINE_string('TEDLIUM_test',
"../speech_data/TEDLIUM/TEDLIUM_release1/test",
help='TEDLIUM 1 test')
flags.DEFINE_string('CommonVoice', "../speech_data/common_voice",
help='common voice')
flags.DEFINE_string('YT_bloomberg2', "../speech_data/common_voice",
help='common voice')
flags.DEFINE_string('YT_life', "../speech_data/common_voice",
help='common voice')
flags.DEFINE_integer('num_workers', 4, help='dataloader workers')
# learning
flags.DEFINE_bool('use_pretrained', default=False, help='Use pretrained enncoder')
flags.DEFINE_enum('optim', "adam", ['adam', 'sgd', 'sm3'], help='optimizer')
flags.DEFINE_float('lr', 1e-4, help='initial lr')
flags.DEFINE_bool('sched', True, help='lr reduce rate on plateau')
flags.DEFINE_integer('sched_patience', 1, help='lr reduce rate on plateau')
flags.DEFINE_float('sched_factor', 0.5, help='lr reduce rate on plateau')
flags.DEFINE_float('sched_min_lr', 1e-6, help='lr reduce rate on plateau')
flags.DEFINE_integer('warmup_step', 10000, help='linearly warmup lr')
flags.DEFINE_integer('epochs', 30, help='epoch')
flags.DEFINE_integer('batch_size', 8, help='batch size')
flags.DEFINE_integer('sub_batch_size', 8, help='accumulate batch size')
flags.DEFINE_integer('eval_batch_size', 4, help='evaluation batch size')
flags.DEFINE_float('gradclip', None, help='clip norm value')
# encoder
flags.DEFINE_string('enc_type', 'LSTM', help='encoder rnn type')
flags.DEFINE_integer('enc_hidden_size', 600, help='encoder hidden dimension')
flags.DEFINE_integer('enc_layers', 4, help='encoder layers')
flags.DEFINE_integer('enc_proj_size', 600, help='encoder layers')
flags.DEFINE_float('enc_dropout', 0, help='encoder dropout')
# decoder
flags.DEFINE_integer('dec_hidden_size', 150, help='decoder hidden dimension')
flags.DEFINE_integer('dec_layers', 2, help='decoder layers')
flags.DEFINE_integer('dec_proj_size', 150, help='encoder layers')
flags.DEFINE_float('dec_dropout', 0., help='decoder dropout')
# joint
flags.DEFINE_integer('joint_size', 512, help='Joint hidden dimension')
# tokenizer
flags.DEFINE_enum('tokenizer', 'char', ['char', 'bpe'], help='tokenizer')
flags.DEFINE_integer('bpe_size', 256, help='BPE vocabulary size')
flags.DEFINE_integer('vocab_embed_size', 16, help='vocabulary embedding size')
# data preprocess
flags.DEFINE_float('audio_max_length', 14, help='max length in seconds')
flags.DEFINE_enum('feature', 'mfcc', ['mfcc', 'melspec', 'logfbank'],
help='audio feature')
flags.DEFINE_integer('feature_size', 80, help='mel_bins')
flags.DEFINE_integer('n_fft', 400, help='spectrogram')
flags.DEFINE_integer('win_length', 400, help='spectrogram')
flags.DEFINE_integer('hop_length', 200, help='spectrogram')
flags.DEFINE_bool('delta', False, help='concat delta and detal of dealt')
flags.DEFINE_bool('cmvn', False, help='normalize spectrogram')
flags.DEFINE_integer('downsample', 3, help='downsample audio feature')
flags.DEFINE_integer('T_mask', 50, help='downsample audio feature')
flags.DEFINE_integer('T_num_mask', 2, help='downsample audio feature')
flags.DEFINE_integer('F_mask', 5, help='downsample audio feature')
flags.DEFINE_integer('F_num_mask', 1, help='downsample audio feature')
# apex
flags.DEFINE_bool('apex', default=True, help='fp16 training')
flags.DEFINE_string('opt_level', 'O1', help='use mix precision')
# parallel
flags.DEFINE_bool('multi_gpu', False, help='DataParallel')
# log
flags.DEFINE_integer('loss_step', 5, help='frequency to show loss in pbar')
flags.DEFINE_integer('save_step', 10000, help='frequency to save model')
flags.DEFINE_integer('eval_step', 10000, help='frequency to save model')
flags.DEFINE_integer('sample_size', 20, help='size of visualized examples')
|
tools/todos.py | mrjrty/rpaframework | 518 | 12787809 | <gh_stars>100-1000
#!/usr/bin/env python3
import argparse
import json
import os
import re
import sys
from collections import defaultdict
from contextlib import contextmanager
from io import StringIO
from pathlib import Path
from pylint.lint import Run
TODO_PATTERN = re.compile(r"(todo|fixme|xxx)[\:\.]?\s*(.+)", re.IGNORECASE)
@contextmanager
def redirect():
stdout = sys.stdout
sys.stdout = StringIO()
try:
yield sys.stdout
finally:
sys.stdout.close()
sys.stdout = stdout
def todo_msg(msg):
match = TODO_PATTERN.match(msg)
if match:
return match.group(2)
else:
return msg
def main():
parser = argparse.ArgumentParser(description="Write all todo items as rst")
parser.add_argument("input", help="Path to source files")
parser.add_argument("output", help="Path to output rst file")
args = parser.parse_args()
cmd = [
"pylint",
"--disable=all",
"--enable=fixme",
"--exit-zero",
"-f",
"json",
Path(args.input).name,
]
cwd = os.getcwd()
os.chdir(Path(args.input).parent)
try:
with redirect() as stdout:
Run(cmd, exit=False)
result = json.loads(stdout.getvalue())
finally:
os.chdir(cwd)
todos = defaultdict(list)
for item in result:
# Remove given search path from module path
name = ".".join(item["module"].split(".")[1:])
message = todo_msg(item["message"])
todos[name].append({"message": todo_msg(item["message"]), "line": item["line"]})
output = ["****", "TODO", "****", ""]
for module, items in sorted(todos.items()):
items.sort(key=lambda item: item["line"])
output.append(f"{module}:")
output.append("=" * (len(module) + 1))
output.append("")
output.append(".. csv-table::")
output.append(" :header: \"Line\", \"Message\"")
output.append(" :widths: 10, 40")
output.append("")
for item in items:
output.append(" \"{line}\", \"{message}\"".format(**item))
output.append("")
with open(args.output, "w") as outfile:
outfile.write("\n".join(output))
if __name__ == "__main__":
main()
|
configs/deepsvg/defaults_fonts.py | naoto0804/deepsvg | 573 | 12787852 | from .default_icons import *
class Config(Config):
def __init__(self, num_gpus=1):
super().__init__(num_gpus=num_gpus)
# Dataset
self.data_dir = "./dataset/fonts_tensor/"
self.meta_filepath = "./dataset/fonts_meta.csv"
|
dbcArchives/2021/000_6-sds-3-x-dl/055_DLbyABr_04-ConvolutionalNetworks.py | r-e-x-a-g-o-n/scalable-data-science | 138 | 12787866 | <filename>dbcArchives/2021/000_6-sds-3-x-dl/055_DLbyABr_04-ConvolutionalNetworks.py<gh_stars>100-1000
# Databricks notebook source
# MAGIC %md
# MAGIC ScaDaMaLe Course [site](https://lamastex.github.io/scalable-data-science/sds/3/x/) and [book](https://lamastex.github.io/ScaDaMaLe/index.html)
# MAGIC
# MAGIC This is a 2019-2021 augmentation and update of [<NAME>](https://www.linkedin.com/in/adbreind)'s initial notebooks.
# MAGIC
# MAGIC _Thanks to [<NAME>](https://www.linkedin.com/in/christianvonkoch/) and [<NAME>](https://www.linkedin.com/in/william-anz%C3%A9n-b52003199/) for their contributions towards making these materials Spark 3.0.1 and Python 3+ compliant._
# COMMAND ----------
# MAGIC %md
# MAGIC # Convolutional Neural Networks
# MAGIC ## aka CNN, ConvNet
# COMMAND ----------
# MAGIC %md
# MAGIC As a baseline, let's start a lab running with what we already know.
# MAGIC
# MAGIC We'll take our deep feed-forward multilayer perceptron network, with ReLU activations and reasonable initializations, and apply it to learning the MNIST digits.
# MAGIC
# MAGIC The main part of the code looks like the following (full code you can run is in the next cell):
# MAGIC
# MAGIC ```
# MAGIC # imports, setup, load data sets
# MAGIC
# MAGIC model = Sequential()
# MAGIC model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu'))
# MAGIC model.add(Dense(15, kernel_initializer='normal', activation='relu'))
# MAGIC model.add(Dense(10, kernel_initializer='normal', activation='softmax'))
# MAGIC model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
# MAGIC
# MAGIC categorical_labels = to_categorical(y_train, num_classes=10)
# MAGIC
# MAGIC history = model.fit(X_train, categorical_labels, epochs=100, batch_size=100)
# MAGIC
# MAGIC # print metrics, plot errors
# MAGIC ```
# MAGIC
# MAGIC Note the changes, which are largely about building a classifier instead of a regression model:
# MAGIC * Output layer has one neuron per category, with softmax activation
# MAGIC * __Loss function is cross-entropy loss__
# MAGIC * Accuracy metric is categorical accuracy
# COMMAND ----------
# MAGIC %md
# MAGIC Let's hold pointers into wikipedia for these new concepts.
# COMMAND ----------
# MAGIC %scala
# MAGIC //This allows easy embedding of publicly available information into any other notebook
# MAGIC //Example usage:
# MAGIC // displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Topics_in_LDA",250))
# MAGIC def frameIt( u:String, h:Int ) : String = {
# MAGIC """<iframe
# MAGIC src=""""+ u+""""
# MAGIC width="95%" height="""" + h + """"
# MAGIC sandbox>
# MAGIC <p>
# MAGIC <a href="http://spark.apache.org/docs/latest/index.html">
# MAGIC Fallback link for browsers that, unlikely, don't support frames
# MAGIC </a>
# MAGIC </p>
# MAGIC </iframe>"""
# MAGIC }
# MAGIC displayHTML(frameIt("https://en.wikipedia.org/wiki/Cross_entropy#Cross-entropy_error_function_and_logistic_regression",500))
# COMMAND ----------
# MAGIC %scala
# MAGIC displayHTML(frameIt("https://en.wikipedia.org/wiki/Softmax_function",380))
# COMMAND ----------
# MAGIC %md
# MAGIC The following is from: [https://www.quora.com/How-does-Keras-calculate-accuracy](https://www.quora.com/How-does-Keras-calculate-accuracy).
# MAGIC
# MAGIC **Categorical accuracy:**
# MAGIC
# MAGIC ```%python
# MAGIC def categorical_accuracy(y_true, y_pred):
# MAGIC return K.cast(K.equal(K.argmax(y_true, axis=-1),
# MAGIC K.argmax(y_pred, axis=-1)),
# MAGIC K.floatx())
# MAGIC ```
# MAGIC
# MAGIC > `K.argmax(y_true)` takes the highest value to be the prediction and matches against the comparative set.
# COMMAND ----------
# MAGIC %md
# MAGIC Watch (1:39)
# MAGIC * [](https://www.youtube.com/watch?v=tRsSi_sqXjI)
# MAGIC
# MAGIC Watch (1:54)
# MAGIC * [](https://www.youtube.com/watch?v=x449QQDhMDE)
# COMMAND ----------
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
import sklearn.datasets
import datetime
import matplotlib.pyplot as plt
import numpy as np
train_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt"
test_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt"
X_train, y_train = sklearn.datasets.load_svmlight_file(train_libsvm, n_features=784)
X_train = X_train.toarray()
X_test, y_test = sklearn.datasets.load_svmlight_file(test_libsvm, n_features=784)
X_test = X_test.toarray()
model = Sequential()
model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu'))
model.add(Dense(15, kernel_initializer='normal', activation='relu'))
model.add(Dense(10, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
categorical_labels = to_categorical(y_train, num_classes=10)
start = datetime.datetime.today()
history = model.fit(X_train, categorical_labels, epochs=40, batch_size=100, validation_split=0.1, verbose=2)
scores = model.evaluate(X_test, to_categorical(y_test, num_classes=10))
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
print ("Start: " + str(start))
end = datetime.datetime.today()
print ("End: " + str(end))
print ("Elapse: " + str(end-start))
# COMMAND ----------
# MAGIC %md
# MAGIC after about a minute we have:
# MAGIC
# MAGIC ```
# MAGIC ...
# MAGIC
# MAGIC Epoch 40/40
# MAGIC 1s - loss: 0.0610 - categorical_accuracy: 0.9809 - val_loss: 0.1918 - val_categorical_accuracy: 0.9583
# MAGIC
# MAGIC ...
# MAGIC
# MAGIC loss: 0.216120
# MAGIC
# MAGIC categorical_accuracy: 0.955000
# MAGIC
# MAGIC Start: 2017-12-06 07:35:33.948102
# MAGIC
# MAGIC End: 2017-12-06 07:36:27.046130
# MAGIC
# MAGIC Elapse: 0:00:53.098028
# MAGIC ```
# COMMAND ----------
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
fig.set_size_inches((5,5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
display(fig)
# COMMAND ----------
# MAGIC %md
# MAGIC What are the big takeaways from this experiment?
# MAGIC
# MAGIC 1. We get pretty impressive "apparent error" accuracy right from the start! A small network gets us to training accuracy 97% by epoch 20
# MAGIC 2. The model *appears* to continue to learn if we let it run, although it does slow down and oscillate a bit.
# MAGIC 3. Our test accuracy is about 95% after 5 epochs and never gets better ... it gets worse!
# MAGIC 4. Therefore, we are overfitting very quickly... most of the "training" turns out to be a waste.
# MAGIC 5. For what it's worth, we get 95% accuracy without much work.
# MAGIC
# MAGIC This is not terrible compared to other, non-neural-network approaches to the problem. After all, we could probably tweak this a bit and do even better.
# MAGIC
# MAGIC But we talked about using deep learning to solve "95%" problems or "98%" problems ... where one error in 20, or 50 simply won't work. If we can get to "multiple nines" of accuracy, then we can do things like automate mail sorting and translation, create cars that react properly (all the time) to street signs, and control systems for robots or drones that function autonomously.
# MAGIC
# MAGIC Try two more experiments (try them separately):
# MAGIC 1. Add a third, hidden layer.
# MAGIC 2. Increase the size of the hidden layers.
# MAGIC
# MAGIC Adding another layer slows things down a little (why?) but doesn't seem to make a difference in accuracy.
# MAGIC
# MAGIC Adding a lot more neurons into the first topology slows things down significantly -- 10x as many neurons, and only a marginal increase in accuracy. Notice also (in the plot) that the learning clearly degrades after epoch 50 or so.
# MAGIC
# MAGIC ... We need a new approach!
# MAGIC
# MAGIC ---
# MAGIC
# MAGIC ... let's think about this:
# MAGIC
# MAGIC ### What is layer 2 learning from layer 1? Combinations of pixels
# MAGIC
# MAGIC #### Combinations of pixels contain information but...
# MAGIC
# MAGIC There are a lot of them (combinations) and they are "fragile"
# MAGIC
# MAGIC In fact, in our last experiment, we basically built a model that memorizes a bunch of "magic" pixel combinations.
# MAGIC
# MAGIC What might be a better way to build features?
# MAGIC
# MAGIC * When humans perform this task, we look not at arbitrary pixel combinations, but certain geometric patterns -- lines, curves, loops.
# MAGIC * These features are made up of combinations of pixels, but they are far from arbitrary
# MAGIC * We identify these features regardless of translation, rotation, etc.
# MAGIC
# MAGIC Is there a way to get the network to do the same thing?
# MAGIC
# MAGIC I.e., in layer one, identify pixels. Then in layer 2+, identify abstractions over pixels that are translation-invariant 2-D shapes?
# MAGIC
# MAGIC We could look at where a "filter" that represents one of these features (e.g., and edge) matches the image.
# MAGIC
# MAGIC How would this work?
# MAGIC
# MAGIC ### Convolution
# MAGIC
# MAGIC Convolution in the general mathematical sense is define as follows:
# MAGIC
# MAGIC <img src="https://i.imgur.com/lurC2Cx.png" width=300>
# MAGIC
# MAGIC The convolution we deal with in deep learning is a simplified case. We want to compare two signals. Here are two visualizations, courtesy of Wikipedia, that help communicate how convolution emphasizes features:
# MAGIC
# MAGIC <img src="http://i.imgur.com/EDCaMl2.png" width=500>
# MAGIC
# MAGIC ---
# MAGIC
# MAGIC #### Here's an animation (where we change \\({\tau}\\))
# MAGIC <img src="http://i.imgur.com/0BFcnaw.gif">
# MAGIC
# MAGIC __In one sense, the convolution captures and quantifies the pattern matching over space__
# MAGIC
# MAGIC If we perform this in two dimensions, we can achieve effects like highlighting edges:
# MAGIC
# MAGIC <img src="http://i.imgur.com/DKEXIII.png">
# MAGIC
# MAGIC The matrix here, also called a convolution kernel, is one of the functions we are convolving. Other convolution kernels can blur, "sharpen," etc.
# MAGIC
# MAGIC ### So we'll drop in a number of convolution kernels, and the network will learn where to use them? Nope. Better than that.
# MAGIC
# MAGIC ## We'll program in the *idea* of discrete convolution, and the network will learn what kernels extract meaningful features!
# MAGIC
# MAGIC The values in a (fixed-size) convolution kernel matrix will be variables in our deep learning model. Although inuitively it seems like it would be hard to learn useful params, in fact, since those variables are used repeatedly across the image data, it "focuses" the error on a smallish number of parameters with a lot of influence -- so it should be vastly *less* expensive to train than just a huge fully connected layer like we discussed above.
# MAGIC
# MAGIC This idea was developed in the late 1980s, and by 1989, <NAME> (at AT&T/Bell Labs) had built a practical high-accuracy system (used in the 1990s for processing handwritten checks and mail).
# MAGIC
# MAGIC __How do we hook this into our neural networks?__
# MAGIC
# MAGIC * First, we can preserve the geometric properties of our data by "shaping" the vectors as 2D instead of 1D.
# MAGIC
# MAGIC * Then we'll create a layer whose value is not just activation applied to weighted sum of inputs, but instead it's the result of a dot-product (element-wise multiply and sum) between the kernel and a patch of the input vector (image).
# MAGIC * This value will be our "pre-activation" and optionally feed into an activation function (or "detector")
# MAGIC
# MAGIC <img src="http://i.imgur.com/ECyi9lL.png">
# MAGIC
# MAGIC
# MAGIC If we perform this operation at lots of positions over the image, we'll get lots of outputs, as many as one for every input pixel.
# MAGIC
# MAGIC
# MAGIC <img src="http://i.imgur.com/WhOrJ0Y.jpg">
# MAGIC
# MAGIC * So we'll add another layer that "picks" the highest convolution pattern match from nearby pixels, which
# MAGIC * makes our pattern match a little bit translation invariant (a fuzzy location match)
# MAGIC * reduces the number of outputs significantly
# MAGIC * This layer is commonly called a pooling layer, and if we pick the "maximum match" then it's a "max pooling" layer.
# MAGIC
# MAGIC <img src="http://i.imgur.com/9iPpfpb.png">
# MAGIC
# MAGIC __The end result is that the kernel or filter together with max pooling creates a value in a subsequent layer which represents the appearance of a pattern in a local area in a prior layer.__
# MAGIC
# MAGIC __Again, the network will be given a number of "slots" for these filters and will learn (by minimizing error) what filter values produce meaningful features. This is the key insight into how modern image-recognition networks are able to generalize -- i.e., learn to tell 6s from 7s or cats from dogs.__
# MAGIC
# MAGIC <img src="http://i.imgur.com/F8eH3vj.png">
# MAGIC
# MAGIC ## Ok, let's build our first ConvNet:
# MAGIC
# MAGIC First, we want to explicity shape our data into a 2-D configuration. We'll end up with a 4-D tensor where the first dimension is the training examples, then each example is 28x28 pixels, and we'll explicitly say it's 1-layer deep. (Why? with color images, we typically process over 3 or 4 channels in this last dimension)
# MAGIC
# MAGIC A step by step animation follows:
# MAGIC * http://cs231n.github.io/assets/conv-demo/index.html
# COMMAND ----------
train_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt"
test_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt"
X_train, y_train = sklearn.datasets.load_svmlight_file(train_libsvm, n_features=784)
X_train = X_train.toarray()
X_test, y_test = sklearn.datasets.load_svmlight_file(test_libsvm, n_features=784)
X_test = X_test.toarray()
X_train = X_train.reshape( (X_train.shape[0], 28, 28, 1) )
X_train = X_train.astype('float32')
X_train /= 255
y_train = to_categorical(y_train, num_classes=10)
X_test = X_test.reshape( (X_test.shape[0], 28, 28, 1) )
X_test = X_test.astype('float32')
X_test /= 255
y_test = to_categorical(y_test, num_classes=10)
# COMMAND ----------
# MAGIC %md
# MAGIC Now the model:
# COMMAND ----------
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
model = Sequential()
model.add(Conv2D(8, # number of kernels
(4, 4), # kernel size
padding='valid', # no padding; output will be smaller than input
input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu')) # alternative syntax for applying activation
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# COMMAND ----------
# MAGIC %md
# MAGIC ... and the training loop and output:
# COMMAND ----------
start = datetime.datetime.today()
history = model.fit(X_train, y_train, batch_size=128, epochs=8, verbose=2, validation_split=0.1)
scores = model.evaluate(X_test, y_test, verbose=1)
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
# COMMAND ----------
fig, ax = plt.subplots()
fig.set_size_inches((5,5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
display(fig)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Our MNIST ConvNet
# MAGIC
# MAGIC In our first convolutional MNIST experiment, we get to almost 99% validation accuracy in just a few epochs (a minutes or so on CPU)!
# MAGIC
# MAGIC The training accuracy is effectively 100%, though, so we've almost completely overfit (i.e., memorized the training data) by this point and need to do a little work if we want to keep learning.
# MAGIC
# MAGIC Let's add another convolutional layer:
# COMMAND ----------
model = Sequential()
model.add(Conv2D(8, # number of kernels
(4, 4), # kernel size
padding='valid',
input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(Conv2D(8, (4, 4)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=2, validation_split=0.1)
scores = model.evaluate(X_test, y_test, verbose=1)
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
# COMMAND ----------
# MAGIC %md
# MAGIC While that's running, let's look at a number of "famous" convolutional networks!
# MAGIC
# MAGIC ### LeNet (<NAME>, 1998)
# MAGIC
# MAGIC <img src="http://i.imgur.com/k5hMtMK.png">
# MAGIC
# MAGIC <img src="http://i.imgur.com/ERV9pHW.gif">
# COMMAND ----------
# MAGIC %md <img src="http://i.imgur.com/TCN9C4P.png">
# COMMAND ----------
# MAGIC %md
# MAGIC ### AlexNet (2012)
# MAGIC
# MAGIC <img src="http://i.imgur.com/CpokDKV.jpg">
# MAGIC
# MAGIC <img src="http://i.imgur.com/Ld2QhXr.jpg">
# COMMAND ----------
# MAGIC %md
# MAGIC ### Back to our labs: Still Overfitting
# MAGIC
# MAGIC We're making progress on our test error -- about 99% -- but just a bit for all the additional time, due to the network overfitting the data.
# MAGIC
# MAGIC There are a variety of techniques we can take to counter this -- forms of regularization.
# MAGIC
# MAGIC Let's try a relatively simple solution solution that works surprisingly well: add a pair of `Dropout` filters, a layer that randomly omits a fraction of neurons from each training batch (thus exposing each neuron to only part of the training data).
# MAGIC
# MAGIC We'll add more convolution kernels but shrink them to 3x3 as well.
# COMMAND ----------
model = Sequential()
model.add(Conv2D(32, # number of kernels
(3, 3), # kernel size
padding='valid',
input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(rate=1-0.25)) # <- regularize, new parameter rate added (rate=1-keep_prob)
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(rate=1-0.5)) # <-regularize, new parameter rate added (rate=1-keep_prob)
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=2)
scores = model.evaluate(X_test, y_test, verbose=2)
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
# COMMAND ----------
# MAGIC %md
# MAGIC While that's running, let's look at some more recent ConvNet architectures:
# MAGIC
# MAGIC ### VGG16 (2014)
# MAGIC
# MAGIC <img src="http://i.imgur.com/gl4kZDf.png">
# COMMAND ----------
# MAGIC %md
# MAGIC ### GoogLeNet (2014)
# MAGIC
# MAGIC <img src="http://i.imgur.com/hvmtDqN.png">
# MAGIC
# MAGIC *"Inception" layer: parallel convolutions at different resolutions*
# MAGIC
# MAGIC ### Residual Networks (2015-)
# MAGIC
# MAGIC Skip layers to improve training (error propagation). Residual layers learn from details at multiple previous layers.
# MAGIC
# MAGIC <img src="http://i.imgur.com/32g8Ykl.png">
# COMMAND ----------
# MAGIC %md
# MAGIC ---
# MAGIC
# MAGIC > __ASIDE: Atrous / Dilated Convolutions__
# MAGIC
# MAGIC > An atrous or dilated convolution is a convolution filter with "holes" in it. Effectively, it is a way to enlarge the filter spatially while not adding as many parameters or attending to every element in the input.
# MAGIC
# MAGIC > Why? Covering a larger input volume allows recognizing coarser-grained patterns; restricting the number of parameters is a way of regularizing or constraining the capacity of the model, making training easier.
# MAGIC
# MAGIC ---
# COMMAND ----------
# MAGIC %md
# MAGIC ## *Lab Wrapup*
# MAGIC
# MAGIC From the last lab, you should have a test accuracy of over 99.1%
# MAGIC
# MAGIC For one more activity, try changing the optimizer to old-school "sgd" -- just to see how far we've come with these modern gradient descent techniques in the last few years.
# MAGIC
# MAGIC Accuracy will end up noticeably worse ... about 96-97% test accuracy. Two key takeaways:
# MAGIC
# MAGIC * Without a good optimizer, even a very powerful network design may not achieve results
# MAGIC * In fact, we could replace the word "optimizer" there with
# MAGIC * initialization
# MAGIC * activation
# MAGIC * regularization
# MAGIC * (etc.)
# MAGIC * All of these elements we've been working with operate together in a complex way to determine final performance
# COMMAND ----------
# MAGIC %md
# MAGIC Of course this world evolves fast - see the new kid in the CNN block -- **capsule networks**
# MAGIC
# MAGIC > Hinton: “The pooling operation used in convolutional neural networks is a big mistake and the fact that it works so well is a disaster.”
# MAGIC
# MAGIC Well worth the 8 minute read:
# MAGIC * [https://medium.com/ai%C2%B3-theory-practice-business/understanding-hintons-capsule-networks-part-i-intuition-b4b559d1159b](https://medium.com/ai%C2%B3-theory-practice-business/understanding-hintons-capsule-networks-part-i-intuition-b4b559d1159b)
# MAGIC
# MAGIC To understand deeper:
# MAGIC * original paper: [https://arxiv.org/abs/1710.09829](https://arxiv.org/abs/1710.09829)
# MAGIC
# MAGIC [Keras capsule network example](https://keras.io/examples/cifar10_cnn_capsule/)
# COMMAND ----------
# MAGIC %md
# MAGIC # More resources
# MAGIC
# MAGIC - http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/
# MAGIC - https://openai.com/
# COMMAND ----------
|
_integration/python-pymysql/test.py | jfrabaute/go-mysql-server | 114 | 12787892 | # Copyright 2020-2021 Dolthub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pymysql.cursors
class TestMySQL(unittest.TestCase):
def test_connect(self):
connection = pymysql.connect(host='127.0.0.1',
user='root',
password='',
db='',
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
sql = "SELECT name, email FROM mytable ORDER BY name, email"
cursor.execute(sql)
rows = cursor.fetchall()
expected = [
{"name": "<NAME>", "email": "<EMAIL>"},
{"name": "<NAME>", "email": "<EMAIL>"},
{"name": "<NAME>", "email": "<EMAIL>"},
{"name": "<NAME>", "email": "<EMAIL>"}
]
self.assertEqual(expected, rows)
finally:
connection.close()
if __name__ == '__main__':
unittest.main()
|
tests/test___main__.py | oterrier/openapi-python-client | 172 | 12787900 | def test_main(mocker):
app = mocker.patch("openapi_python_client.cli.app")
# noinspection PyUnresolvedReferences
from openapi_python_client import __main__
app.assert_called_once()
|
platypush/plugins/tcp.py | RichardChiang/platypush | 228 | 12787910 | import base64
import json
import socket
from typing import Optional, Union
from platypush.plugins import Plugin, action
class TcpPlugin(Plugin):
"""
Plugin for raw TCP communications.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._sockets = {}
def _connect(self, host: str, port: int, timeout: Optional[float] = None) -> socket.socket:
sd = self._sockets.get((host, port))
if sd:
return sd
sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if timeout:
sd.settimeout(timeout)
sd.connect((host, port))
self._sockets[(host, port)] = sd
return sd
@action
def connect(self, host: str, port: int, timeout: Optional[float] = None):
"""
Open a TCP connection.
:param host: Host IP/name.
:param port: TCP port.
:param timeout: Connection timeout in seconds (default: None).
"""
self._connect(host, port, timeout)
@action
def close(self, host: str, port: int):
"""
Close an active TCP connection.
:param host: Host IP/name.
:param port: TCP port.
"""
sd = self._sockets.get((host, port))
if not sd:
self.logger.warning('Not connected to ({}, {})'.format(host, port))
return
sd.close()
@action
def send(self, data: Union[bytes, str], host: str, port: int, binary: bool = False,
timeout: Optional[float] = None, recv_response: bool = False, **recv_opts):
"""
Send data over a TCP connection. If the connection isn't active it will be created.
:param data: Data to be sent, as bytes or string.
:param host: Host IP/name.
:param port: TCP port.
:param binary: If set to True and ``data`` is a string then will be treated as base64-encoded binary input.
:param timeout: Connection timeout in seconds (default: None).
:param recv_response: If True then the action will wait for a response from the server before closing the
connection. Note that ``recv_opts`` must be specified in this case - at least ``length``.
"""
if isinstance(data, list) or isinstance(data, dict):
data = json.dumps(data)
if isinstance(data, str):
data = data.encode()
if binary:
data = base64.decodebytes(data)
sd = self._connect(host, port, timeout)
try:
sd.send(data)
if recv_response:
recv_opts.update({
'host': host,
'port': port,
'timeout': timeout,
'binary': binary,
})
return self.recv(**recv_opts)
finally:
self.close(host, port)
@action
def recv(self, length: int, host: str, port: int, binary: bool = False, timeout: Optional[float] = None) -> str:
"""
Receive data from a TCP connection. If the connection isn't active it will be created.
:param length: Maximum number of bytes to be received.
:param host: Host IP/name.
:param port: TCP port.
:param binary: If set to True then the output will be base64-encoded, otherwise decoded as string.
:param timeout: Connection timeout in seconds (default: None).
"""
sd = self._connect(host, port, timeout)
try:
data = sd.recv(length)
if binary:
data = base64.encodebytes(data).decode()
else:
data = data.decode()
return data
finally:
self.close(host, port)
# vim:sw=4:ts=4:et:
|
mbuild/lib/bulk_materials/__init__.py | daico007/mbuild | 101 | 12787915 | """mBuild bulk materials library."""
from mbuild.lib.bulk_materials.amorphous_silica_bulk import AmorphousSilicaBulk
|
recipes/Python/577236_ur1ca_commandline_client/recipe-577236.py | tdiprima/code | 2,023 | 12787927 | #!/usr/bin/env python
"""ur1.py -- command-line ur1.ca client.
ur1.ca is the URL shortening services provided by status.net. This script
makes it possible to access the service from the command line. This is done
by scraping the returned page and look for the shortened URL.
USAGE:
ur1.py LONGURL
RETURN STATUS:
If the URL is succesfully shortened by ur1.ca, it is written
to the standard output, and the program exits with status 0.
If ur1.ca fails to shorten the long URL, the error message
provided by ur1.ca is written to the standard error, and the
program exits with status 1.
If the input URL is malformed, no attempts of contacting the
server is made, and the program exits with status 2.
"""
import sys
import urllib
import urlparse
import re
UR1CA = "http://ur1.ca/"
ESUCCESS = 0
EFAIL = 1
EBADARG = 2
RE_GOOD = re.compile(r'<p class="success">Your ur1 is: <a href="(?P<shorturl>.+)">(?P=shorturl)</a></p>')
RE_BAD = re.compile(r'<p class="error">(?P<errormsg>.+)</p>')
def isgoodarg(url):
"""Check if the input URL makes "sense".
A URL does not make sense if the scheme is neither http or https,
or the host part is missing.
url: input URL
Returns boolean indicating whether the URL makes sense.
"""
parse_result = urlparse.urlparse(url)
#pylint: disable-msg=E1101
isgoodscheme = (parse_result.scheme == "http" or
parse_result.scheme == "https")
isgoodhost = parse_result.hostname
return isgoodscheme and isgoodhost
def parameterize(url):
"""Encode input URL as POST parameter.
url: a string which is the URL to be passed to ur1.ca service.
Returns the POST parameter constructed from the URL.
"""
return urllib.urlencode({"longurl": url})
def request(parameter):
"""Send POST request to ur1.ca using the parameter.
parameter: the parameter to the POST request, as returned by
parameterize().
Returns the file-like object as returned by urllib.urlopen.
"""
return urllib.urlopen(UR1CA, parameter)
def retrievedoc(response):
"""Retrieve the HTML text from the ur1.ca response.
response: the file-like HTTP response file returned by ur1.ca.
Returns the text as a string.
"""
#XXX: ensure all bytes are read
res_info = response.info()
clength = int(res_info["content-length"])
return response.read(clength)
def scrape(document):
"""Scrape the HTML document returned from ur1.ca for the answer.
document: HTML document returned from ur1.ca
Returns a 2-tuple (success, answer) where --
success: boolean value indicating whether the service returned
some meaningful result
answer: if success, this is the shortened URL, otherwise a string
indicating the possible problem
"""
goodguess = RE_GOOD.search(document)
if goodguess:
matchdict = goodguess.groupdict()
return (True, matchdict["shorturl"])
badguess = RE_BAD.search(document)
if badguess:
matchdict = badguess.groupdict()
return (False, matchdict["errormsg"])
else:
return (False, "Unknown local error.")
def __do_main():
"""Do everything."""
try:
arg = sys.argv[1]
except IndexError:
sys.exit(EBADARG)
if not isgoodarg(arg):
sys.exit(EBADARG)
post_param = parameterize(arg)
answerfile = request(post_param)
doc = retrievedoc(answerfile)
answerfile.close()
status, msg = scrape(doc)
if status:
print msg
sys.exit(ESUCCESS)
else:
print >> sys.stderr, msg
sys.exit(EFAIL)
if __name__ == "__main__":
__do_main()
|
petridish/app/directory.py | Bhaskers-Blu-Org2/petridishnn | 121 | 12787943 | <reponame>Bhaskers-Blu-Org2/petridishnn
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import glob
import re
from petridish.philly.container import is_philly
from petridish.app.multi_proc import has_stopped
"""
Dir structures
"""
def _updir(d, n=1):
for _ in range(n):
d = os.path.dirname(d)
return d
"""
Philly specific dir structures regarding multiple trials of the same experiment
"""
def previous_trial_log_root(log_root):
if not is_philly():
return None
# e.g., xx/application_xx-xx/logs/2/petridish_main
log_root = os.path.normpath(log_root)
triali = int(os.path.basename(_updir(log_root, 1)))
if triali == 1:
return None
return os.path.join(_updir(log_root, 2), str(triali - 1), os.path.basename(log_root))
def previous_trial_model_root(model_root):
if not is_philly():
return None
# e.g., xxx/application_xx-xx/models
return os.path.normpath(model_root)
#model_root = os.path.normpath(model_root)
#triali = int(os.path.basename(model_root))
#if triali == 1:
# return None
#return os.path.join(_updir(model_root, 1), str(triali - 1))
"""
Helper functions to create names for communication over file-system.
Direct connections are not available.
"""
def _auto_script_fn(i, prefix=None):
if prefix is not None:
return '{}_{}.sh'.format(prefix, i)
return '{}.sh'.format(i)
def _auto_script_dir(log_dir, is_critic, is_log_dir_root=False):
n_updir = 1 + int(bool(is_critic)) - int(bool(is_log_dir_root)) #+ 2 * is_philly()
return os.path.join(_updir(log_dir, n_updir), 'auto_scripts')
def _all_mi(dir_root):
all_mi = []
for dn in os.listdir(dir_root):
try:
mi = int(os.path.basename(dn.strip()))
all_mi.append(mi)
except:
continue
return all_mi
def _dn_to_mi(dn):
try:
mi = int(os.path.basename(os.path.normpath(dn)))
return mi
except:
return None
def _mi_to_dn(dir_root, model_iter):
return os.path.join(dir_root, str(model_iter))
def _dn_to_ci(dn):
try:
ci = int(os.path.basename(os.path.normpath(dn)))
return ci
except:
return None
def _ci_to_dn(dir_root, critic_iter, queue_name):
if critic_iter is None:
return os.path.join(dir_root, queue_name)
return os.path.join(dir_root, queue_name, str(critic_iter))
def _all_critic_dn(dir_root, queue_name):
return glob.glob(os.path.join(dir_root, queue_name, '*'))
def _latest_ci(log_dir_root, model_dir_root, queue_name):
l_dns = _all_critic_dn(log_dir_root, queue_name)
max_ci = None
for dn in l_dns:
dn = os.path.normpath(dn.strip())
try:
# make sure the dirname is an int so it is actually a dir for critic
ci = int(os.path.basename(dn))
except:
continue
if not has_stopped(dn):
# make sure model is mark finished.
continue
if not os.path.exists(_ci_to_dn(model_dir_root, ci, queue_name)):
# make sure model exists
continue
if max_ci is None or max_ci < ci:
max_ci = ci
return max_ci
def _mi_info_save_fn(log_dir_root):
return os.path.join(log_dir_root, 'mi_info.npz') |
recipes/Python/576587_Sort_sections_keys_ini/recipe-576587.py | tdiprima/code | 2,023 | 12787986 | #!/usr/bin/python
# -*- coding: cp1250 -*-
__version__ = '$Id: sort_ini.py 543 2008-12-19 13:44:59Z mn $'
# author: <NAME>
import sys
USAGE = 'USAGE:\n\tsort_ini.py file.ini'
def sort_ini(fname):
"""sort .ini file: sorts sections and in each section sorts keys"""
f = file(fname)
lines = f.readlines()
f.close()
section = ''
sections = {}
for line in lines:
line = line.strip()
if line:
if line.startswith('['):
section = line
continue
if section:
try:
sections[section].append(line)
except KeyError:
sections[section] = [line, ]
if sections:
sk = sections.keys()
sk.sort()
for k in sk:
vals = sections[k]
vals.sort()
print k
print '\n'.join(vals)
print
if '--version' in sys.argv:
print __version__
elif len(sys.argv) < 2:
print USAGE
else:
sort_ini(sys.argv[1])
|
tests/gdb/complete.py | cohortfsllc/cohort-cocl2-sandbox | 2,151 | 12787993 | # -*- python -*-
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gdb_test
class CompleteTest(gdb_test.GdbTest):
def test_complete(self):
# Test that continue causes the debugged program to run to completion.
self.gdb.ResumeCommand('continue')
def tearDown(self):
# Test program should run to completion and return a special value.
# Intentionally bypass superclass's tearDown as it assumes gdb exits first.
self.AssertSelLdrExits(expected_returncode=123)
self.gdb.Quit()
self.gdb.Wait()
if __name__ == '__main__':
gdb_test.Main()
|
src/connectors/airwatch_devices.py | sfc-gh-kmaurya/SnowAlert | 144 | 12787996 | <gh_stars>100-1000
"""Airwatch
Collect Device information using API Key, Host, and CMSURL Authentication
"""
from runners.helpers import log
from runners.helpers import db
from runners.helpers.dbconfig import ROLE as SA_ROLE
from datetime import datetime
import requests
from urllib.error import HTTPError
from .utils import yaml_dump
PAGE_SIZE = 500
CONNECTION_OPTIONS = [
{
'name': 'api_key',
'title': "Airwatch API Key",
'prompt': "Your Airwatch API Key",
'type': 'str',
'secret': True,
'required': True,
},
{
'name': 'host_airwatch',
'title': "Airwatch Host",
'prompt': "Your Airwatch Host",
'type': 'str',
'secret': True,
'required': True,
},
{
'name': 'device_auth',
'title': "Device URL",
'prompt': "Your Airwatch CMS Auth for Device URL",
'type': 'str',
'secret': True,
'required': True,
},
{
'name': 'custom_attributes_auth',
'title': "Custom Attributes URL",
'prompt': "Your Airwatch CMS Auth for Custom Attributes URL",
'type': 'str',
'secret': True,
'required': True,
},
]
LANDING_TABLE_COLUMNS_DEVICE = [
('INSERT_ID', 'NUMBER IDENTITY START 1 INCREMENT 1'),
('SNAPSHOT_AT', 'TIMESTAMP_LTZ(9)'),
('RAW', 'VARIANT'),
('EAS_IDS', 'VARIANT'),
('UDID', 'VARCHAR(256)'),
('SERIAL_NUMBER', 'VARCHAR(256)'),
('MAC_ADDRESS', 'VARCHAR(256)'),
('IMEI', 'VARCHAR(256)'),
('EAS_ID', 'VARCHAR(256)'),
('ASSET_NUMBER', 'VARCHAR(256)'),
('DEVICE_FRIENDLY_NAME', 'VARCHAR(256)'),
('LOCATION_GROUP_ID', 'VARIANT'),
('LOCATION_GROUP_NAME', 'VARCHAR(256)'),
('USER_ID', 'VARIANT'),
('USER_NAME', 'VARCHAR(256)'),
('DATA_PROTECTION_STATUS', 'NUMBER(38,0)'),
('USER_EMAIL_ADDRESS', 'VARCHAR(256)'),
('OWNERSHIP', 'VARCHAR(256)'),
('PLATFORM_ID', 'VARIANT'),
('PLATFORM', 'VARCHAR(256)'),
('MODEL_ID', 'VARIANT'),
('MODEL', 'VARCHAR(256)'),
('OPERATING_SYSTEM', 'VARCHAR(256)'),
('PHONE_NUMBER', 'VARCHAR(256)'),
('LAST_SEEN', 'TIMESTAMP_LTZ(9)'),
('ENROLLMENT_STATUS', 'VARCHAR(256)'),
('COMPLIANCE_STATUS', 'VARCHAR(256)'),
('COMPROMISED_STATUS', 'BOOLEAN'),
('LAST_ENROLLED_ON', 'TIMESTAMP_LTZ(9)'),
('LAST_COMPLIANCE_CHECK_ON', 'TIMESTAMP_LTZ(9)'),
('LAST_COMPROMISED_CHECK_ON', 'TIMESTAMP_LTZ(9)'),
('IS_SUPERVISED', 'BOOLEAN'),
('VIRTUAL_MEMORY', 'NUMBER(38,0)'),
('DEVICE_CAPACITY', 'FLOAT'),
('AVAILABLE_DEVICE_CAPACITY', 'FLOAT'),
('IS_DEVICE_DND_ENABLED', 'BOOLEAN'),
('IS_DEVICE_LOCATOR_ENABLED', 'BOOLEAN'),
('IS_CLOUD_BACKUP_ENABLED', 'BOOLEAN'),
('IS_ACTIVATION_LOCK_ENABLED', 'BOOLEAN'),
('IS_NETWORKTETHERED', 'BOOLEAN'),
('BATTERY_LEVEL', 'VARCHAR(256)'),
('IS_ROAMING', 'BOOLEAN'),
('SYSTEM_INTEGRITY_PROTECTION_ENABLED', 'BOOLEAN'),
('PROCESSOR_ARCHITECTURE', 'NUMBER(38,0)'),
('TOTAL_PHYSICAL_MEMORY', 'NUMBER(38,0)'),
('AVAILABLE_PHYSICAL_MEMORY', 'NUMBER(38,0)'),
('DEVICE_CELLULAR_NETWORK_INFO', 'VARIANT'),
('ENROLLMENT_USER_UUID', 'VARCHAR(256)'),
('ID', 'VARIANT'),
('UUID', 'VARCHAR(256)'),
]
LANDING_TABLE_COLUMNS_CUSTOM_ATTRIBUTES = [
('INSERT_ID', 'NUMBER IDENTITY START 1 INCREMENT 1'),
('SNAPSHOT_AT', 'TIMESTAMP_LTZ(9)'),
('RAW', 'VARIANT'),
('DEVICE_ID', 'INT'),
('UDID', 'VARCHAR(256)'),
('SERIAL_NUMBER', 'VARCHAR(256)'),
('ENROLLMENT_USER_NAME', 'VARCHAR(256)'),
('ASSET_NUMBER', 'VARCHAR(256)'),
('CUSTOM_ATTRIBUTES', 'VARIANT'),
]
def get_data(url: str, cms_auth: str, api_key: str, params: dict = {}) -> dict:
headers: dict = {
'Content-Type': 'application/json',
'aw-tenant-code': api_key,
'Accept': 'application/json',
'Authorization': cms_auth,
}
try:
log.debug(f"Preparing GET: url={url} with params={params}")
req = requests.get(url, params=params, headers=headers)
req.raise_for_status()
except HTTPError as http_err:
log.error(f"Error GET: url={url}")
log.error(f"HTTP error occurred: {http_err}")
raise
return req.json()
def connect(connection_name, options):
landing_table_device = f'data.airwatch_devices_{connection_name}_device_connection'
landing_table_custom_attributes = (
f'data.airwatch_devices_{connection_name}_custom_attributes_connection'
)
comment = yaml_dump(module='airwatch_devices', **options)
db.create_table(
name=landing_table_device, cols=LANDING_TABLE_COLUMNS_DEVICE, comment=comment, rw_role=SA_ROLE
)
db.create_table(
name=landing_table_custom_attributes,
cols=LANDING_TABLE_COLUMNS_CUSTOM_ATTRIBUTES,
comment=comment,
rw_role=SA_ROLE
)
return {'newStage': 'finalized', 'newMessage': "Airwatch ingestion tables created!"}
def ingest(table_name, options):
host_airwatch = options['host_airwatch']
api_key = options['api_key']
device_auth = options['device_auth']
custom_attributes_auth = options['custom_attributes_auth']
ingest_type = (
'device' if table_name.endswith('_DEVICE_CONNECTION') else 'custom_attributes'
)
timestamp = datetime.utcnow()
landing_table = f'data.{table_name}'
if ingest_type == 'device':
device_params: dict = {'PageSize': PAGE_SIZE, 'Page': 0}
url = f'https://{host_airwatch}/api/mdm/devices/search'
while 1:
result: dict = get_data(url, device_auth, api_key, device_params)
devices = result['Devices']
db.insert(
landing_table,
values=[
(
timestamp,
device,
device.get('EasIds'),
device.get('Udid'),
device.get('SerialNumber'),
device.get('MacAddress'),
device.get('Imei'),
device.get('EasId'),
device.get('AssetNumber'),
device.get('DeviceFriendlyName'),
device.get('LocationGroupId'),
device.get('LocationGroupName'),
device.get('UserId'),
device.get('UserName'),
device.get('DataProtectionStatus'),
device.get('UserEmailAddress'),
device.get('Ownership'),
device.get('PlatformId'),
device.get('Platform'),
device.get('ModelId'),
device.get('Model'),
device.get('OperatingSystem'),
device.get('PhoneNumber'),
device.get('LastSeen'),
device.get('EnrollmentStatus'),
device.get('ComplianceStatus'),
device.get('CompromisedStatus'),
device.get('LastEnrolledOn'),
device.get('LastComplianceCheckOn'),
device.get('LastCompromisedCheckOn'),
device.get('IsSupervised'),
device.get('VirtualMemory'),
device.get('DeviceCapacity'),
device.get('AvailableDeviceCapacity'),
device.get('IsDeviceDNDEnabled'),
device.get('IsDeviceLocatorEnabled'),
device.get('IsCloudBackupEnabled'),
device.get('IsActivationLockEnabled'),
device.get('IsNetworkTethered'),
device.get('BatteryLevel'),
device.get('IsRoaming'),
device.get('SystemIntegrityProtectionEnabled'),
device.get('ProcessorArchitecture'),
device.get('TotalPhysicalMemory'),
device.get('AvailablePhysicalMemory'),
device.get('DeviceCellularNetworkInfo'),
device.get('EnrollmentUserUuid'),
device.get('Id'),
device.get('Uuid'),
)
for device in devices
],
select=db.derive_insert_select(LANDING_TABLE_COLUMNS_DEVICE),
columns=db.derive_insert_columns(LANDING_TABLE_COLUMNS_DEVICE),
)
log.info(f'Inserted {len(devices)} rows ({landing_table}).')
yield len(devices)
processed_total = (result['Page'] + 1) * result['PageSize']
if processed_total >= result['Total']:
break
device_params['Page'] += 1
else:
custom_device_params: dict = {'PageSize': PAGE_SIZE, 'Page': 0}
url = f'https://{host_airwatch}/api/mdm/devices/customattribute/search'
while 1:
result: dict = get_data(
url, custom_attributes_auth, api_key, custom_device_params
)
device_attributes = result['Devices']
db.insert(
landing_table,
values=[
(
timestamp,
device_attr,
device_attr.get('DeviceId'),
device_attr.get('Udid'),
device_attr.get('SerialNumber'),
device_attr.get('EnrollmentUserName'),
device_attr.get('AssetNumber'),
device_attr.get('CustomAttributes'),
)
for device_attr in device_attributes
],
select=db.derive_insert_select(LANDING_TABLE_COLUMNS_CUSTOM_ATTRIBUTES),
columns=db.derive_insert_columns(
LANDING_TABLE_COLUMNS_CUSTOM_ATTRIBUTES
),
)
log.info(f'Inserted {len(device_attributes)} rows ({landing_table}).')
yield len(device_attributes)
processed_total = (result['Page'] + 1) * result['PageSize']
if processed_total >= result['Total']:
break
custom_device_params['Page'] += 1
|
glazier/lib/logs_test.py | ItsMattL/glazier | 1,233 | 12788000 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for glazier.lib.logs."""
import os
import zipfile
from absl.testing import absltest
from glazier.lib import constants
from glazier.lib import file_util
from glazier.lib import logs
import mock
from pyfakefs.fake_filesystem_unittest import Patcher
TEST_ID = '1A19SEL90000R90DZN7A-1234567'
class LoggingTest(absltest.TestCase):
def testCollect(self):
with Patcher() as patcher:
files = [
os.path.join(constants.SYS_LOGS_PATH, 'log1.log'),
os.path.join(constants.SYS_LOGS_PATH, 'log2.log'),
]
patcher.fs.create_dir(constants.SYS_LOGS_PATH)
patcher.fs.create_file(files[0], contents='log1 content')
patcher.fs.create_file(files[1], contents='log2 content')
logs.Collect(r'C:\glazier.zip')
with zipfile.ZipFile(r'C:\glazier.zip', 'r') as out:
with out.open(files[1].lstrip('/')) as f2:
self.assertEqual(f2.read(), b'log2 content')
def testCollectIOErr(self):
with Patcher() as patcher:
patcher.fs.create_dir(constants.SYS_LOGS_PATH)
with self.assertRaises(logs.LogError):
logs.Collect(constants.SYS_LOGS_PATH)
@mock.patch.object(zipfile.ZipFile, 'write', autospec=True)
def testCollectValueErr(self, wr):
wr.side_effect = ValueError('ZIP does not support timestamps before 1980')
with Patcher() as patcher:
patcher.fs.create_dir(constants.SYS_LOGS_PATH)
patcher.fs.create_file(os.path.join(constants.SYS_LOGS_PATH, 'log1.log'))
with self.assertRaises(logs.LogError):
logs.Collect(r'C:\glazier.zip')
@mock.patch.object(logs.winpe, 'check_winpe', autospec=True)
def testGetLogsPath(self, wpe):
# WinPE
wpe.return_value = True
self.assertEqual(logs.GetLogsPath(), logs.constants.WINPE_LOGS_PATH)
# Host
wpe.return_value = False
self.assertEqual(logs.GetLogsPath(), logs.constants.SYS_LOGS_PATH)
@mock.patch.object(file_util, 'CreateDirectories')
@mock.patch.object(logs.buildinfo.BuildInfo, 'ImageID', autospec=True)
@mock.patch.object(logs.winpe, 'check_winpe', autospec=True)
@mock.patch.object(logs.logging, 'FileHandler')
def testSetup(self, fh, wpe, ii, create_dir):
ii.return_value = TEST_ID
wpe.return_value = False
logs.Setup()
create_dir.assert_called_with(r'%s\glazier.log' %
logs.constants.SYS_LOGS_PATH)
fh.assert_called_with(r'%s\glazier.log' % logs.constants.SYS_LOGS_PATH)
@mock.patch.object(file_util, 'CreateDirectories')
@mock.patch.object(logs.buildinfo.BuildInfo, 'ImageID', autospec=True)
@mock.patch.object(logs.winpe, 'check_winpe', autospec=True)
@mock.patch.object(logs.logging, 'FileHandler')
def testSetupError(self, fh, wpe, ii, create_dir):
ii.return_value = TEST_ID
wpe.return_value = False
fh.side_effect = IOError
with self.assertRaises(logs.LogError):
logs.Setup()
self.assertTrue(create_dir.called)
if __name__ == '__main__':
absltest.main()
|
sysidentpy/polynomial_basis/tests/test_simulation.py | neylsoncrepalde/sysidentpy | 107 | 12788010 | from numpy.testing._private.utils import assert_allclose
from sysidentpy.polynomial_basis import PolynomialNarmax
from sysidentpy.utils.generate_data import get_miso_data, get_siso_data
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from numpy.testing import assert_raises
from sysidentpy.polynomial_basis import SimulatePolynomialNarmax
def test_get_index_from_regressor_code():
s = SimulatePolynomialNarmax()
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
regressor_space = np.array(
[
[0, 0],
[1001, 0],
[2001, 0],
[2002, 0],
[1001, 1001],
[2001, 1001],
[2002, 1001],
[2001, 2001],
[2002, 2001],
[2002, 2002],
]
)
index = s._get_index_from_regressor_code(
regressor_code=regressor_space, model_code=model
)
assert (index == np.array([1, 3, 5])).all()
def test_list_output_regressor():
s = SimulatePolynomialNarmax()
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
y_code = s._list_output_regressor_code(model)
assert (y_code == np.array([1001, 1001])).all()
def test_list_input_regressor():
s = SimulatePolynomialNarmax()
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
x_code = s._list_input_regressor_code(model)
assert (x_code == np.array([2001, 2002])).all()
def test_get_lag_from_regressor_code():
s = SimulatePolynomialNarmax()
list_regressor1 = np.array([2001, 2002])
list_regressor2 = np.array([1004, 1002])
max_lag1 = s._get_lag_from_regressor_code(list_regressor1)
max_lag2 = s._get_lag_from_regressor_code(list_regressor2)
assert max_lag1 == 2
assert max_lag2 == 4
def test_simulate():
x_train, x_valid, y_train, y_valid = get_siso_data(
n=1000, colored_noise=False, sigma=0.001, train_percentage=90
)
s = SimulatePolynomialNarmax()
# the model must be a numpy array
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
# theta must be a numpy array of shape (n, 1) where n is the number of regressors
theta = np.array([[0.2, 0.9, 0.1]]).T
yhat, results = s.simulate(
X_test=x_valid, y_test=y_valid, model_code=model, theta=theta, plot=False
)
assert yhat.shape == (100, 1)
assert len(results) == 3
def test_simulate_theta():
x_train, x_valid, y_train, y_valid = get_siso_data(
n=1000, colored_noise=False, sigma=0.001, train_percentage=90
)
s = SimulatePolynomialNarmax(estimate_parameter=True)
# the model must be a numpy array
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
yhat, results = s.simulate(
X_train=x_train,
y_train=y_train,
X_test=x_valid,
y_test=y_valid,
model_code=model,
plot=False,
)
theta = np.array([[0.2, 0.9, 0.1]]).T
assert_almost_equal(s.theta, theta, decimal=1)
def test_estimate_parameter():
assert_raises(TypeError, SimulatePolynomialNarmax, estimmate_parameter=1)
|
office365/planner/tasks/check_list_items.py | theodoriss/Office365-REST-Python-Client | 544 | 12788084 | from office365.planner.tasks.check_list_item import PlannerChecklistItem
from office365.runtime.client_value_collection import ClientValueCollection
class PlannerChecklistItems(ClientValueCollection):
"""The plannerChecklistItemCollection resource represents the collection of checklist items on a task.
It is an Open Type. It is part of the task details object.
The value in the property-value pair is the checklistItem object.
"""
def __init__(self, initial_values=None):
super(PlannerChecklistItems, self).__init__(PlannerChecklistItem, initial_values)
|
cellphonedb/src/tests/cellphone_flask_test_case.py | chapuzzo/cellphonedb | 278 | 12788099 | import os
import random
import string
import time
from flask_testing import TestCase
from cellphonedb.src.app.cellphonedb_app import cellphonedb_app
from cellphonedb.src.local_launchers.local_collector_launcher import LocalCollectorLauncher
from cellphonedb.utils import utils
class CellphoneFlaskTestCase(TestCase):
@staticmethod
def fixtures_dir():
current_dir = os.path.dirname(os.path.realpath(__file__))
fixtures_dir = '{}/fixtures'.format(current_dir)
return fixtures_dir
@staticmethod
def reset_db():
cellphonedb_app.cellphonedb.database_manager.database.drop_everything()
cellphonedb_app.cellphonedb.database_manager.database.create_all()
def populate_db(self):
LocalCollectorLauncher().all('collect_protein.csv', 'collect_gene.csv', 'collect_complex.csv',
'collect_interaction.csv', self.fixtures_dir())
@staticmethod
def remove_file(file):
os.remove(file)
@staticmethod
def rand_string(digits=5):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(digits))
@staticmethod
def get_test_filename(original_namefile, extension, prefix='TESTING'):
namefile = '{}_{}_{}_{}.{}'.format(prefix, original_namefile, int(time.time()),
CellphoneFlaskTestCase.rand_string(),
extension)
return namefile
def assert_file_not_empty(self, file, message=''):
if not message:
message = 'File {} is empty'.format(file)
read_data = utils.read_data_table_from_file(file)
self.assertFalse(read_data.empty, message)
def assert_file_exist(self, path_file, message=''):
if not message:
message = 'File {} didnt exist'.format(path_file)
self.assertTrue(os.path.isfile(path_file), message)
|
454 4Sum II.py | krishna13052001/LeetCode | 872 | 12788157 | #!/usr/bin/python3
"""
Given four lists A, B, C, D of integer values, compute how many tuples (i, j,
k, l) there are such that A[i] + B[j] + C[k] + D[l] is zero.
To make problem a bit easier, all A, B, C, D have same length of N where
0 ≤ N ≤ 500. All integers are in the range of -2^28 to 2^28 - 1 and the result
is guaranteed to be at most 2^31 - 1.
Example:
Input:
A = [ 1, 2]
B = [-2,-1]
C = [-1, 2]
D = [ 0, 2]
Output:
2
Explanation:
The two tuples are:
1. (0, 0, 0, 1) -> A[0] + B[0] + C[0] + D[1] = 1 + (-2) + (-1) + 2 = 0
2. (1, 1, 0, 0) -> A[1] + B[1] + C[0] + D[0] = 2 + (-1) + (-1) + 0 = 0
"""
from collections import defaultdict
class Solution:
def fourSumCount(self, A, B, C, D):
"""
Brute force with map: O(N^3)
O(N^3) is pretty large, O(N^2) or O(N log N)?
O(N^2) to sum cartesian product (A, B) to construct index
similar to C, D.
Then index loop up
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
N = len(A)
AB = defaultdict(int)
CD = defaultdict(int)
for i in range(N):
for j in range(N):
AB[A[i] + B[j]] += 1
CD[C[i] + D[j]] += 1
ret = 0
# O(N^2)
for gross, count in AB.items():
target = 0 - gross
ret += count * CD[target]
return ret
if __name__ == "__main__":
A = [ 1, 2]
B = [-2,-1]
C = [-1, 2]
D = [ 0, 2]
assert Solution().fourSumCount(A, B, C, D) == 2
|
21爬虫入门/day01/note.py | HaoZhang95/PythonAndMachineLearning | 937 | 12788244 | """
爬虫的用途:12306抢票,短信轰炸,数据获取
分类:通用爬虫:是搜索引擎抓取系统的重要部分,主要是把互联网上的页面下载到本地作为一个镜像备份
聚焦爬虫:对特定需求进行数据获取,会对页面的内容进行筛选,保证只抓取和需求相关的网页信息
Http:端口号80
Https: 端口号443
使用第三方的requests进行请求:支持python2和3,在urllib中2和3的语法有些不一样
"""
import requests
kw = {'wd': '长城'}
# headers伪装成一个浏览器进行的请求
# 不加这个的话,网页会识别出请求来自一个python而不是浏览器的正常请求
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
response = requests.get("https://www.baidu.com/s?", params=kw, headers=headers)
# 返回的是unicode格式解码的str的数据
print(response.text)
# 返回字节流的二进制数据,并根据unicode进行解码
print(response.content)
print(response.content.decode())
# 返回完整的url地址
print(response.url)
# 返回字符编码
print(response.encoding)
# 返回状态吗
print(response.status_code)
# 保存响应结果
with open('baidu.html', 'wb') as f:
f.write(response.content)
|
apps/modules/theme_setting/process/nav_setting.py | Bension/osroom | 579 | 12788260 | <gh_stars>100-1000
#!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2019/12/2 14:43
# @Author : <NAME>
from bson import ObjectId
from flask import request, g
from flask_babel import gettext
from apps.app import mdbs, cache
from apps.core.flask.reqparse import arg_verify
from apps.utils.format.obj_format import json_to_pyseq, objid_to_str, str_to_num
@cache.cached(timeout=86400, key_base64=False, db_type="redis")
def get_global_theme_navs(theme_name, lang):
langs = g.site_global["language"]["all_language"].keys()
navs = mdbs["sys"].dbs["theme_nav_setting"].find(
{
"language": lang,
"theme_name": theme_name
},
{"_id": 0}
).sort([("order", 1)])
if navs.count(True):
return list(navs)
else:
for la in langs:
if la == lang:
continue
navs = mdbs["sys"].dbs["theme_nav_setting"].find(
{
"language": la,
"theme_name": theme_name
},
{"_id": 0}
).sort([("order", 1)])
if navs.count(True):
return list(navs)
return []
def get_navs():
theme_name = request.argget.all("theme_name")
lang = request.argget.all("language")
s, r = arg_verify(
[
(gettext("theme name"), theme_name),
(gettext("language"), lang)
],
required=True
)
if not s:
return r
navs = mdbs["sys"].dbs["theme_nav_setting"].find(
{"language": lang, "theme_name": theme_name}
).sort([("order", 1)])
navs = objid_to_str(navs)
data = {
"navs": navs
}
return data
def nav_setting():
"""
Update
:RETURN:
"""
cid = request.argget.all("id")
theme_name = request.argget.all("theme_name")
lang = request.argget.all("language")
display_name = request.argget.all("display_name")
order = str_to_num(request.argget.all("order", 99))
json_data = json_to_pyseq(request.argget.all("json_data"))
s, r = arg_verify(
[(gettext("Display name"), display_name),
(gettext("theme name"), theme_name),
(gettext("language"), lang),
(gettext("Json data"), json_data)
],
required=True
)
if not s:
return r
if not isinstance(json_data, dict):
data = {
"msg": gettext('Value must be of type json'),
"msg_type": "e",
"custom_status": 400
}
return data
if not cid:
updata = {
'theme_name': theme_name,
'display_name': display_name,
'language': lang,
'json_data': json_data,
"order": order
}
r = mdbs["sys"].dbs["theme_nav_setting"].insert_one(updata)
if r.inserted_id:
data = {
"msg": gettext("Navigation added successfully"),
"msg_type": "s",
"custom_status": 200
}
else:
data = {
"msg": gettext("Failed to add navigation"),
"msg_type": "w",
"custom_status": 400
}
else:
updata = {
'theme_name': theme_name,
'display_name': display_name,
'language': lang,
'json_data': json_data,
"order": order
}
r = mdbs["sys"].dbs["theme_nav_setting"].update_one(
{"_id": ObjectId(cid)},
{"$set": updata}
)
if r.modified_count:
data = {
"msg": gettext("Updated successfully"),
"msg_type": "s",
"custom_status": 200
}
elif r.matched_count:
data = {
"msg": gettext("Unmodified"),
"msg_type": "w",
"custom_status": 200
}
else:
data = {
"msg": gettext("Update failed"),
"msg_type": "w",
"custom_status": 400
}
cache.delete_autokey(
fun="get_global_theme_navs",
theme_name=".*",
lang=".*",
db_type="redis",
key_regex=True
)
return data
def del_navs():
ids = json_to_pyseq(request.argget.all("ids"))
s, r = arg_verify(
[(gettext("ids"), ids)],
required=True
)
if not s:
return r
del_ids = []
for id in ids:
del_ids.append(ObjectId(id))
r = mdbs["sys"].dbs["theme_nav_setting"].delete_many({"_id": {"$in": del_ids}})
if r.deleted_count:
data = {
"msg": gettext("Deleted successfully"),
"msg_type": "s",
"custom_status": 200
}
else:
data = {
"msg": gettext("Delete failed"),
"msg_type": "s",
"custom_status": 200
}
cache.delete_autokey(
fun="get_global_theme_navs",
theme_name=".*",
lang=".*",
db_type="redis",
key_regex=True
)
return data
|
explainaboard/tasks/re/eval_spec.py | Shadowlized/ExplainaBoard | 255 | 12788263 | # -*- coding: utf-8 -*-
import explainaboard.error_analysis as ea
import numpy
import os
def get_aspect_value(sample_list, dict_aspect_func):
dict_span2aspect_val = {}
dict_span2aspect_val_pred = {}
for aspect, fun in dict_aspect_func.items():
dict_span2aspect_val[aspect] = {}
dict_span2aspect_val_pred[aspect] = {}
# maintain it for print error case
dict_sid2sent = {}
sample_id = 0
for info_list in sample_list:
#
#
#
# word_list = word_segment(sent).split(" ")
# Sentence Entities Paragraph True Relation Label Predicted Relation Label
# Sentence Length Paragraph Length Number of Entities in Ground Truth Relation Average Distance of Entities
sent, entities, paragraph, true_label, pred_label, sent_length, para_length, n_entity, avg_distance = info_list
dict_sid2sent[str(sample_id)] = ea.format4json2(entities + "|||" + sent)
sent_pos = ea.tuple2str((sample_id, true_label))
sent_pos_pred = ea.tuple2str((sample_id, pred_label))
# Sentence Length: sentALen
aspect = "sLen"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = float(sent_length)
dict_span2aspect_val_pred[aspect][sent_pos_pred] = float(sent_length)
# Paragraph Length: pLen
aspect = "pLen"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = float(para_length)
dict_span2aspect_val_pred[aspect][sent_pos_pred] = float(para_length)
# Number of Entity: nEnt
aspect = "nEnt"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = float(n_entity)
dict_span2aspect_val_pred[aspect][sent_pos_pred] = float(n_entity)
# Average Distance: avgDist
aspect = "avgDist"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = float(avg_distance)
dict_span2aspect_val_pred[aspect][sent_pos_pred] = float(avg_distance)
# Tag: tag
aspect = "tag" ############## MUST Be Gold Tag for text classification task
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = true_label
dict_span2aspect_val_pred[aspect][sent_pos_pred] = true_label
sample_id += 1
# print(dict_span2aspect_val["bleu"])
return dict_span2aspect_val, dict_span2aspect_val_pred, dict_sid2sent
def evaluate(task_type="ner", analysis_type="single", systems=[], dataset_name = 'dataset_name', model_name = 'model_name', output_filename="./output.json", is_print_ci=False,
is_print_case=False, is_print_ece=False):
path_text = systems[0] if analysis_type == "single" else ""
path_comb_output = "model_name" + "/" + path_text.split("/")[-1]
dict_aspect_func, dict_precomputed_path, obj_json = ea.load_task_conf(task_dir=os.path.dirname(__file__))
sample_list, sent_list, entity_list, true_list, pred_list = file_to_list(path_text)
error_case_list = []
if is_print_case:
error_case_list = get_error_case(sent_list, entity_list, true_list, pred_list)
print(" -*-*-*- the number of error casse:\t", len(error_case_list))
dict_span2aspect_val, dict_span2aspect_val_pred, dict_sid2sent = get_aspect_value(sample_list, dict_aspect_func)
holistic_performance = ea.accuracy(true_list, pred_list)
holistic_performance = format(holistic_performance, '.3g')
# Confidence Interval of Holistic Performance
confidence_low, confidence_up = 0, 0
if is_print_ci:
confidence_low, confidence_up = ea.compute_confidence_interval_acc(true_list, pred_list, n_times=1000)
dict_span2aspect_val, dict_span2aspect_val_pred, dict_sid2sent = get_aspect_value(sample_list, dict_aspect_func)
print("------------------ Holistic Result----------------------")
print(holistic_performance)
# print(f1(list_true_tags_token, list_pred_tags_token)["f1"])
dict_bucket2span = {}
dict_bucket2span_pred = {}
dict_bucket2f1 = {}
aspect_names = []
for aspect, func in dict_aspect_func.items():
# print(aspect, dict_span2aspect_val[aspect])
dict_bucket2span[aspect] = ea.select_bucketing_func(func[0], func[1], dict_span2aspect_val[aspect])
# print(aspect, dict_bucket2span[aspect])
# exit()
dict_bucket2span_pred[aspect] = ea.bucket_attribute_specified_bucket_interval(dict_span2aspect_val_pred[aspect],
dict_bucket2span[aspect].keys())
# dict_bucket2span_pred[aspect] = __select_bucketing_func(func[0], func[1], dict_span2aspect_val_pred[aspect])
dict_bucket2f1[aspect] = get_bucket_acc_with_error_case(dict_bucket2span[aspect],
dict_bucket2span_pred[aspect], dict_sid2sent,
is_print_ci, is_print_case)
aspect_names.append(aspect)
print("aspect_names: ", aspect_names)
print("------------------ Breakdown Performance")
for aspect in dict_aspect_func.keys():
ea.print_dict(dict_bucket2f1[aspect], aspect)
print("")
# Calculate databias w.r.t numeric attributes
dict_aspect2bias = {}
for aspect, aspect2Val in dict_span2aspect_val.items():
if type(list(aspect2Val.values())[0]) != type("string"):
dict_aspect2bias[aspect] = numpy.average(list(aspect2Val.values()))
print("------------------ Dataset Bias")
for k, v in dict_aspect2bias.items():
print(k + ":\t" + str(v))
print("")
dict_fine_grained = {}
for aspect, metadata in dict_bucket2f1.items():
dict_fine_grained[aspect] = []
for bucket_name, v in metadata.items():
# print("---------debug--bucket name old---")
# print(bucket_name)
bucket_name = ea.beautify_interval(bucket_name)
# print("---------debug--bucket name new---")
# print(bucket_name)
# bucket_value = format(v[0]*100,'.4g')
bucket_value = format(v[0], '.4g')
n_sample = v[1]
confidence_low_bucket = format(v[2], '.4g')
confidence_up_bucket = format(v[3], '.4g')
bucket_error_case = v[4]
# instantiation
dict_fine_grained[aspect].append({"bucket_name": bucket_name, "bucket_value": bucket_value, "num": n_sample,
"confidence_low": confidence_low_bucket,
"confidence_up": confidence_up_bucket,
"bucket_error_case": bucket_error_case})
obj_json["task"] = task_type
obj_json["data"]["language"] = "English"
obj_json["data"]["name"] = dataset_name
obj_json["data"]["bias"] = dict_aspect2bias
obj_json["data"]["output"] = path_comb_output
obj_json["model"]["name"] = model_name
obj_json["model"]["results"]["overall"]["error_case"] = error_case_list
obj_json["model"]["results"]["overall"]["performance"] = holistic_performance
obj_json["model"]["results"]["overall"]["confidence_low"] = confidence_low
obj_json["model"]["results"]["overall"]["confidence_up"] = confidence_up
obj_json["model"]["results"]["fine_grained"] = dict_fine_grained
raise NotImplementedError('RE is not fully implemented yet, see below')
# ece = 0
# dic_calibration = None
# if is_print_ece:
# ece, dic_calibration = process_all(path_text,
# size_of_bin=10, dataset=corpus_type, model=model_name)
# obj_json["model"]["results"]["calibration"] = dic_calibration
# # print(dic_calibration)
# ea.save_json(obj_json, output_filename)
#
# def main():
#
# parser = argparse.ArgumentParser(description='Interpretable Evaluation for NLP')
#
#
# parser.add_argument('--task', type=str, required=True,
# help="absa")
#
# parser.add_argument('--ci', type=str, required=False, default= False,
# help="True|False")
#
# parser.add_argument('--case', type=str, required=False, default= False,
# help="True|False")
#
# parser.add_argument('--ece', type=str, required=False, default= False,
# help="True|False")
#
#
# parser.add_argument('--type', type=str, required=False, default="single",
# help="analysis type: single|pair|combine")
# parser.add_argument('--systems', type=str, required=True,
# help="the directories of system outputs. Multiple one should be separated by comma, for example, system1,system2 (no space)")
#
# parser.add_argument('--output', type=str, required=True,
# help="analysis output file")
# args = parser.parse_args()
#
#
# is_print_ci = args.ci
# is_print_case = args.case
# is_print_ece = args.ece
#
# task = args.task
# analysis_type = args.type
# systems = args.systems.split(",")
# output = args.output
#
#
# print("task", task)
# print("type", analysis_type)
# print("systems", systems)
# # sample_list = file_to_list_re(systems[0])
# # print(sample_list[0])
# evaluate(task_type=task, analysis_type=analysis_type, systems=systems, output=output, is_print_ci = is_print_ci, is_print_case = is_print_case, is_print_ece = is_print_ece)
#
# # python eval_spec.py --task re --systems ./test_re.tsv --output ./a.json
# if __name__ == '__main__':
# main()
def get_bucket_acc_with_error_case(dict_bucket2span, dict_bucket2span_pred, dict_sid2sent, is_print_ci, is_print_case):
# The structure of span_true or span_pred
# 2345|||Positive
# 2345 represents sentence id
# Positive represents the "label" of this instance
dict_bucket2f1 = {}
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
if bucket_interval not in dict_bucket2span_pred.keys():
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
# loop over samples from a given bucket
error_case_bucket_list = []
if is_print_case:
for info_true, info_pred in zip(spans_true, spans_pred):
sid_true, label_true = info_true.split("|||")
sid_pred, label_pred = info_pred.split("|||")
if sid_true != sid_pred:
continue
sent_entities = dict_sid2sent[sid_true]
if label_true != label_pred:
error_case_info = label_true + "|||" + label_pred + "|||" + sent_entities
error_case_bucket_list.append(error_case_info)
accuracy_each_bucket = ea.accuracy(spans_pred, spans_true)
confidence_low, confidence_up = 0, 0
if is_print_ci:
confidence_low, confidence_up = ea.compute_confidence_interval_acc(spans_pred, spans_true)
dict_bucket2f1[bucket_interval] = [accuracy_each_bucket, len(spans_true), confidence_low, confidence_up,
error_case_bucket_list]
return ea.sort_dict(dict_bucket2f1)
def get_error_case(sent_list, entity_list, true_label_list, pred_label_list):
error_case_list = []
for sent, entities, true_label, pred_label in zip(sent_list, entity_list, true_label_list, pred_label_list):
if true_label != pred_label:
error_case_list.append(true_label + "|||" + pred_label + "|||" + entities + "|||" + ea.format4json2(sent))
return error_case_list
def file_to_list(file_path):
sample_list = []
fin = open(file_path, "r")
true_list = []
pred_list = []
sent_list = []
entity_list = []
for idx, line in enumerate(fin):
if idx == 0:
continue
info_list = line.rstrip("\n").split("\t")
sample_list.append([info for info in info_list])
true_list.append(info_list[3])
pred_list.append(info_list[4])
sent_list.append(info_list[0])
entity_list.append(info_list[1])
return sample_list, sent_list, entity_list, true_list, pred_list |
examples/wsecho.py | VladimirKuzmin/werkzeug | 4,200 | 12788332 | <reponame>VladimirKuzmin/werkzeug
"""Shows how you can implement a simple WebSocket echo server using the
wsproto library.
"""
from werkzeug.exceptions import InternalServerError
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request
from werkzeug.wrappers import Response
from wsproto import ConnectionType
from wsproto import WSConnection
from wsproto.events import AcceptConnection
from wsproto.events import CloseConnection
from wsproto.events import Message
from wsproto.events import Ping
from wsproto.events import Request as WSRequest
from wsproto.events import TextMessage
from wsproto.frame_protocol import CloseReason
@Request.application
def websocket(request):
# The underlying socket must be provided by the server. Gunicorn and
# Werkzeug's dev server are known to support this.
stream = request.environ.get("werkzeug.socket")
if stream is None:
stream = request.environ.get("gunicorn.socket")
if stream is None:
raise InternalServerError()
# Initialize the wsproto connection. Need to recreate the request
# data that was read by the WSGI server already.
ws = WSConnection(ConnectionType.SERVER)
in_data = b"GET %s HTTP/1.1\r\n" % request.path.encode("utf8")
for header, value in request.headers.items():
in_data += f"{header}: {value}\r\n".encode()
in_data += b"\r\n"
ws.receive_data(in_data)
running = True
while True:
out_data = b""
for event in ws.events():
if isinstance(event, WSRequest):
out_data += ws.send(AcceptConnection())
elif isinstance(event, CloseConnection):
out_data += ws.send(event.response())
running = False
elif isinstance(event, Ping):
out_data += ws.send(event.response())
elif isinstance(event, TextMessage):
# echo the incoming message back to the client
if event.data == "quit":
out_data += ws.send(
CloseConnection(CloseReason.NORMAL_CLOSURE, "bye")
)
running = False
else:
out_data += ws.send(Message(data=event.data))
if out_data:
stream.send(out_data)
if not running:
break
in_data = stream.recv(4096)
ws.receive_data(in_data)
# The connection will be closed at this point, but WSGI still
# requires a response.
return Response("", status=204)
if __name__ == "__main__":
run_simple("localhost", 5000, websocket)
|
desktop/core/ext-py/future-0.16.0/docs/futureext.py | kokosing/hue | 908 | 12788361 | # -*- coding: utf-8 -*-
"""
Python-Future Documentation Extensions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for automatically documenting filters and tests.
Based on the Jinja2 documentation extensions.
:copyright: Copyright 2008 by <NAME>.
:license: BSD.
"""
import collections
import os
import re
import inspect
from itertools import islice
from types import BuiltinFunctionType
from docutils import nodes
from docutils.statemachine import ViewList
from sphinx.ext.autodoc import prepare_docstring
from sphinx.application import TemplateBridge
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
def parse_rst(state, content_offset, doc):
node = nodes.section()
# hack around title style bookkeeping
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
state.memo.title_styles = []
state.memo.section_level = 0
state.nested_parse(doc, content_offset, node, match_titles=1)
state.memo.title_styles = surrounding_title_styles
state.memo.section_level = surrounding_section_level
return node.children
class FutureStyle(Style):
title = 'Future Style'
default_style = ""
styles = {
Comment: 'italic #0B6A94', # was: #0066ff',
Comment.Preproc: 'noitalic #B11414',
Comment.Special: 'italic #505050',
Keyword: 'bold #D15E27',
Keyword.Type: '#D15E27',
Operator.Word: 'bold #B80000',
Name.Builtin: '#333333',
Name.Function: '#333333',
Name.Class: 'bold #333333',
Name.Namespace: 'bold #333333',
Name.Entity: 'bold #363636',
Name.Attribute: '#686868',
Name.Tag: 'bold #686868',
Name.Decorator: '#686868',
String: '#AA891C',
Number: '#444444',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
def setup(app):
pass
# uncomment for inline toc. links are broken unfortunately
##app.connect('doctree-resolved', inject_toc)
|
src/__init__.py | cynth-s/Information_retrieval | 191 | 12788374 | <reponame>cynth-s/Information_retrieval
__author__ = '<NAME>'
__all__ = ['invdx', 'parse', 'query', 'rank']
|
attic/concurrency/flags/getthreadpool.py | matteoshen/example-code | 5,651 | 12788391 | <reponame>matteoshen/example-code
from concurrent import futures
import sys
import requests
import countryflags as cf
import time
from getsequential import fetch
DEFAULT_NUM_THREADS = 100
GLOBAL_TIMEOUT = 300 # seconds
times = {}
def main(source, num_threads):
pool = futures.ThreadPoolExecutor(num_threads)
pending = {}
t0 = time.time()
# submit all jobs
for iso_cc in sorted(cf.cc2name):
print('get:', iso_cc)
times[iso_cc] = [time.time() - t0]
job = pool.submit(fetch, iso_cc, source)
pending[job] = iso_cc
to_download = len(pending)
downloaded = 0
# get results as jobs are done
for job in futures.as_completed(pending, timeout=GLOBAL_TIMEOUT):
try:
octets, file_name = job.result()
times[pending[job]].append(time.time() - t0)
downloaded += 1
print('\t--> {}: {:5d} bytes'.format(file_name, octets))
except Exception as exc:
print('\t***', pending[job], 'generated an exception:', exc)
ratio = downloaded / to_download
print('{} of {} downloaded ({:.1%})'.format(downloaded, to_download, ratio))
for iso_cc in sorted(times):
start, end = times[iso_cc]
print('{}\t{:.6g}\t{:.6g}'.format(iso_cc, start, end))
if __name__ == '__main__':
import argparse
source_names = ', '.join(sorted(cf.SOURCE_URLS))
parser = argparse.ArgumentParser(description='Download flag images.')
parser.add_argument('source', help='one of: ' + source_names)
parser.add_argument('-t', '--threads', type=int, default=DEFAULT_NUM_THREADS,
help='number of threads (default: %s)' % DEFAULT_NUM_THREADS)
args = parser.parse_args()
main(args.source, args.threads)
"""
From CIA, 1 thread:
real 2m0.832s
user 0m4.685s
sys 0m0.366s
"""
|
deep_qa/testing/test_case.py | richarajpal/deep_qa | 459 | 12788397 | # pylint: disable=invalid-name,protected-access
from copy import deepcopy
from unittest import TestCase
import codecs
import gzip
import logging
import os
import shutil
from keras import backend as K
import numpy
from numpy.testing import assert_allclose
from deep_qa.common.checks import log_keras_version_info
from deep_qa.data.instances.instance import TextInstance
from deep_qa.data.tokenizers import tokenizers
from deep_qa.common.params import Params
class DeepQaTestCase(TestCase): # pylint: disable=too-many-public-methods
TEST_DIR = './TMP_TEST/'
TRAIN_FILE = TEST_DIR + 'train_file'
VALIDATION_FILE = TEST_DIR + 'validation_file'
TEST_FILE = TEST_DIR + 'test_file'
TRAIN_BACKGROUND = TEST_DIR + 'train_background'
VALIDATION_BACKGROUND = TEST_DIR + 'validation_background'
SNLI_FILE = TEST_DIR + 'snli_file'
PRETRAINED_VECTORS_FILE = TEST_DIR + 'pretrained_glove_vectors_file'
PRETRAINED_VECTORS_GZIP = TEST_DIR + 'pretrained_glove_vectors_file.gz'
def setUp(self):
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.DEBUG)
log_keras_version_info()
os.makedirs(self.TEST_DIR, exist_ok=True)
def tearDown(self):
shutil.rmtree(self.TEST_DIR)
TextInstance.tokenizer = tokenizers["words"](Params({}))
K.clear_session()
def get_model_params(self, additional_arguments=None):
params = Params({})
params['save_models'] = False
params['model_serialization_prefix'] = self.TEST_DIR
params['train_files'] = [self.TRAIN_FILE]
params['validation_files'] = [self.VALIDATION_FILE]
params['embeddings'] = {'words': {'dimension': 6}, 'characters': {'dimension': 2}}
params['encoder'] = {"default": {'type': 'bow'}}
params['num_epochs'] = 1
params['validation_split'] = 0.0
if additional_arguments:
for key, value in additional_arguments.items():
params[key] = deepcopy(value)
return params
def get_model(self, model_class, additional_arguments=None):
params = self.get_model_params(additional_arguments)
return model_class(params)
def ensure_model_trains_and_loads(self, model_class, args: Params):
args['save_models'] = True
# Our loading tests work better if you're not using data generators. Unless you
# specifically request it in your test, we'll avoid using them here, and if you _do_ use
# them, we'll skip some of the stuff below that isn't compatible.
args.setdefault('data_generator', None)
model = self.get_model(model_class, args)
model.train()
# load the model that we serialized
loaded_model = self.get_model(model_class, args)
loaded_model.load_model()
# verify that original model and the loaded model predict the same outputs
if model._uses_data_generators():
# We shuffle the data in the data generator. Instead of making that logic more
# complicated, we'll just pass on the loading tests here. See comment above.
pass
else:
model_predictions = model.model.predict(model.validation_arrays[0])
loaded_model_predictions = loaded_model.model.predict(model.validation_arrays[0])
for model_prediction, loaded_prediction in zip(model_predictions, loaded_model_predictions):
assert_allclose(model_prediction, loaded_prediction)
# We should get the same result if we index the data from the original model and the loaded
# model.
_, indexed_validation_arrays = loaded_model.load_data_arrays(model.validation_files)
if model._uses_data_generators():
# As above, we'll just pass on this.
pass
else:
model_predictions = model.model.predict(model.validation_arrays[0])
loaded_model_predictions = loaded_model.model.predict(indexed_validation_arrays[0])
for model_prediction, loaded_prediction in zip(model_predictions, loaded_model_predictions):
assert_allclose(model_prediction, loaded_prediction)
return model, loaded_model
@staticmethod
def one_hot(index, length):
vector = numpy.zeros(length)
vector[index] = 1
return vector
def write_snli_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\ttext 1\thypothesis1\tentails\n')
train_file.write('2\ttext 2\thypothesis2\tcontradicts\n')
train_file.write('3\ttext3\thypothesis3\tentails\n')
train_file.write('4\ttext 4\thypothesis4\tneutral\n')
train_file.write('5\ttext5\thypothesis 5\tentails\n')
train_file.write('6\ttext6\thypothesis6\tcontradicts\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\ttext 1 with extra words\thypothesis1\tentails\n')
validation_file.write('2\ttext 2\tlonger hypothesis 2\tcontradicts\n')
validation_file.write('3\ttext3\thypothesis withreallylongfakeword\tentails\n')
def write_sequence_tagging_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('cats###N\tare###V\tanimals###N\t.###N\n')
train_file.write('dogs###N\tare###V\tanimals###N\t.###N\n')
train_file.write('snakes###N\tare###V\tanimals###N\t.###N\n')
train_file.write('birds###N\tare###V\tanimals###N\t.###N\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('horses###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('blue###N\tcows###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('monkeys###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('caterpillars###N\tare###V\tanimals###N\t.###N\n')
def write_verb_semantics_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('root####absorb####water\t1,1\t2,2\tMOVE\t-1,-1\t0,0\n')
train_file.write('this####mixture####is####converted####into####sugar####inside####leaf'
'\t2,3\t5,5\tCREATE\t7,7\t-1,-1\n')
train_file.write('lakes####contain####water\t1,1\t2,2\tNONE\t-1,-1\t-1,-1\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('root####absorb####water\t1,1\t2,2\tMOVE\t-1,-1\t0,0\n')
validation_file.write('this####mixture####is####converted####into####sugar####inside####leaf'
'\t2,3\t5,5\tCREATE\t7,7\t-1,-1\n')
validation_file.write('lakes####contain####water\t1,1\t2,2\tNONE\t-1,-1\t-1,-1\n')
def write_true_false_model_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tq1a1\t0\n')
validation_file.write('2\tq1a2\t1\n')
validation_file.write('3\tq1a3\t0\n')
validation_file.write('4\tq1a4\t0\n')
validation_file.write('5\tq2a1\t0\n')
validation_file.write('6\tq2a2\t0\n')
validation_file.write('7\tq2a3\t1\n')
validation_file.write('8\tq2a4\t0\n')
validation_file.write('9\tq3a1\t0\n')
validation_file.write('10\tq3a2\t0\n')
validation_file.write('11\tq3a3\t0\n')
validation_file.write('12\tq3a4\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tsentence1\t0\n')
train_file.write('2\tsentence2 word2 word3\t1\n')
train_file.write('3\tsentence3 word2\t0\n')
train_file.write('4\tsentence4\t1\n')
train_file.write('5\tsentence5\t0\n')
train_file.write('6\tsentence6\t0\n')
with codecs.open(self.TEST_FILE, 'w', 'utf-8') as test_file:
test_file.write('1\ttestsentence1\t0\n')
test_file.write('2\ttestsentence2 word2 word3\t1\n')
test_file.write('3\ttestsentence3 word2\t0\n')
test_file.write('4\ttestsentence4\t1\n')
test_file.write('5\ttestsentence5 word4\t0\n')
test_file.write('6\ttestsentence6\t0\n')
def write_additional_true_false_model_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tq4a1\t0\n')
validation_file.write('2\tq4a2\t1\n')
validation_file.write('3\tq4a3\t0\n')
validation_file.write('4\tq4a4\t0\n')
validation_file.write('5\tq5a1\t0\n')
validation_file.write('6\tq5a2\t0\n')
validation_file.write('7\tq5a3\t1\n')
validation_file.write('8\tq5a4\t0\n')
validation_file.write('9\tq6a1\t0\n')
validation_file.write('10\tq6a2\t0\n')
validation_file.write('11\tq6a3\t0\n')
validation_file.write('12\tq6a4\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tsentence7\t0\n')
train_file.write('2\tsentence8 word4 word5\t1\n')
train_file.write('3\tsentence9 word4\t0\n')
train_file.write('4\tsentence10\t1\n')
train_file.write('5\tsentence11 word3 word2\t0\n')
train_file.write('6\tsentence12\t0\n')
def write_question_answer_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tquestion1\tanswer1###answer2\t0\n')
with codecs.open(self.VALIDATION_BACKGROUND, 'w', 'utf-8') as validation_background:
validation_background.write('1\tvb1\tvb2\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\ta b e i d\tanswer 1###answer2\t0\n')
train_file.write('2\ta b c d\tanswer3###answer4\t1\n')
train_file.write('3\te d w f d s a b\tanswer5###answer6###answer9\t2\n')
train_file.write('4\te fj k w q\tanswer7###answer8\t0\n')
with codecs.open(self.TRAIN_BACKGROUND, 'w', 'utf-8') as train_background:
train_background.write('1\tsb1\tsb2\n')
train_background.write('2\tsb3\n')
train_background.write('3\tsb4\n')
train_background.write('4\tsb5\tsb6\n')
def write_who_did_what_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tHe went to the store to buy goods, because he wanted to.'
'\tHe bought xxxxx\tgoods###store\t0\n')
validation_file.write('1\tShe hiking on the weekend with her friend.'
'\tShe went xxxxx\thiking###friend###weekend###her friend\t0\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# document, question, answers
train_file.write('1\tFred hit the ball with the bat.\tHe hit the ball with the xxxxx\tbat###ball\t0\n')
train_file.write('1\tShe walked the dog today.\tThe xxxxx was walked today.\tShe###dog###today\t1\n')
train_file.write('1\tHe kept typing at his desk.\tHe typed at his xxxxx\tdesk###kept\t0\n')
train_file.write('1\tThe pup at the bone but not the biscuit.\tThe pup ate the xxxxx\t'
'bone###biscuit\t0\n')
def write_tuple_inference_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tss<>v f d<>oo o<>c$$$s<>v ff<>o i###ss r<>v<>o e<>o ee\t'
'ss ss<>ve gg<>o sd<>ccs\t0\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# document, question, answers
train_file.write('1\tss<>v<>oo o<>c$$$s e<>ff<>o ii i###ss r<>rr<>o e<>o ee\t'
'ss<>ve gg<>o sd<>ccs\t0\n')
train_file.write('2\tsg g<>vg<>oo o<>c$$$s e<>v ff<>o ii i###ss<>v rr<>o e<>o ee'
'###hh kk<>hdj d<>hh\tss ss<>ve gg<>o sd<>ccs\t2\n')
train_file.write('3\ts r<>v f d<>o ss<>c$$$s e<>v ff<>o ss i$$$r<>v ss<>s o e<>o ee\t'
'ss ss<>v g<>o sd<>ccs\t0\n')
train_file.write('4\tty y<>cf fv ss<>s ss<>c$$$rt e<>vv f<>oss i i###ss<>v<>os e<>o ee\t'
'ss ss<>ve gg<>o sd<>ccs\t1\n')
def write_span_prediction_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tquestion 1 with extra words\t'
'passage with answer and a reallylongword\t13,18\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tquestion 1\tpassage1 with answer1\t14,20\n')
train_file.write('2\tquestion 2\tpassage2 with answer2\t0,8\n')
train_file.write('3\tquestion 3\tpassage3 with answer3\t9,13\n')
train_file.write('4\tquestion 4\tpassage4 with answer4\t14,20\n')
def write_sentence_selection_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tWhere is Paris?\tParis is the capital of France.###It '
'is by the Seine.###It is quite old###this is a '
'very long sentence meant to test that loading '
'and padding works properly in the model.\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tWho won Super Bowl 50?\tSuper Bowl 50 was in Santa '
'Clara.###The Patriots beat the Broncos.\t1\n')
train_file.write('2\tWhen is Thanksgiving?\tFolk tales tell '
'of the Pilgrims celebrating the holiday.###Many '
'people eat a lot.###It is in November.\t2\n')
train_file.write('3\tWhen were computers invented?\tThe ancient Chinese used '
'abacuses.###Alan Turing cracked Enigma.###It is hard to '
'pinpoint an inventor of the computer.\t2\n')
def write_pretrained_vector_files(self):
# write the file
with codecs.open(self.PRETRAINED_VECTORS_FILE, 'w', 'utf-8') as vector_file:
vector_file.write('word2 0.21 0.57 0.51 0.31\n')
vector_file.write('sentence1 0.81 0.48 0.19 0.47\n')
# compress the file
with open(self.PRETRAINED_VECTORS_FILE, 'rb') as f_in:
with gzip.open(self.PRETRAINED_VECTORS_GZIP, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def write_sentence_data(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write("This is a sentence for language modelling.\n")
train_file.write("Here's another one for language modelling.\n")
def write_original_snli_data(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# pylint: disable=line-too-long
train_file.write("""{"annotator_labels": ["neutral"],"captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"}\n""")
train_file.write("""{"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"}\n""")
train_file.write("""{"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"}\n""")
# pylint: enable=line-too-long
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
# pylint: disable=line-too-long
validation_file.write("""{"annotator_labels": ["neutral"],"captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"}\n""")
validation_file.write("""{"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"}\n""")
validation_file.write("""{"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"}\n""")
# pylint: enable=line-too-long
|
cpmoptimize/recompiler.py | borzunov/cpmoptimize | 121 | 12788400 | <reponame>borzunov/cpmoptimize<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import byteplay
from matcode import *
class RecompilationError(Exception):
def __init__(self, message, state):
self.message = "Can't optimize loop: %s" % message
if state.lineno is not None:
self.message += ' at line %s' % state.lineno
self.message += ' in %s' % state.settings['function_info']
def __str__(self):
return self.message
class UnpredictableArgsError(Exception):
pass
class RecompilerState(object):
def __init__(self, settings):
self._settings = settings
self.lineno = settings['head_lineno']
self.stack = []
self._content = []
# List of straight references of all used variables
self._vars_storage = []
# List of indexes of really existing variables in
# self._vars_storage (we need to save their values at the end of
# the loop)
self._real_vars_indexes = []
# Map from a straight variable reference to a pair of variable
# index in a unified storage (actually in
# self._vars_storage) and its effective unified
# reference (VAR, index) or (CONST, const_no)
self._vars_map = {}
# Storage for folded instructions sets of constants. This
# instructions will be executed during run-time once. Calculated
# values will be inserted into matrices.
self._consts = []
@property
def settings(self):
return self._settings
@property
def content(self):
return self._content
@property
def consts(self):
return self._consts
@property
def vars_storage(self):
return self._vars_storage
@property
def real_vars_indexes(self):
return self._real_vars_indexes
_real_folded_arr = '__cpm::folded'
@property
def real_folded_arr(self):
return self._real_folded_arr
def add_const(self, straight):
arg_type, arg = straight
if arg_type == FOLD_TOS:
lines = self.stack[-arg - 1]
if lines is None:
raise ValueError(
'Unpredictable value to fold for FOLD_TOS'
)
elif arg_type == FOLD:
lines = arg
else:
raise ValueError((
"Can't add constant from argument with type %s " +
"to matrix code"
) % arg_type)
index = len(self._consts)
self._consts.append(lines)
return CONST, index
def add_var(self, straight, mutation):
# If a variable was changed at least once in a loop's body, we need
# mark it as mutable at the beginning of compilation.
# During the compilation its value can become predictable.
try:
index, unified = self._vars_map[straight]
if mutation and unified[0] != VAR:
unified = VAR, index
self.store_var(straight, unified)
except KeyError:
index = len(self._vars_storage)
self._vars_storage.append(straight)
var_type = straight[0]
if var_type in (NAME, GLOBAL, FAST, DEREF):
self._real_vars_indexes.append(index)
if mutation:
unified = VAR, index
else:
load_oper = VARIABLE_OPERATION_MAP[var_type][0]
unified = self.add_const((FOLD, [
(load_oper, straight[1]),
]))
self._vars_map[straight] = [index, unified]
return unified
def _translate_arg(self, arg):
# Translate argument of types used in matcode generation to
# argument with type VALUE, CONST or VAR (make unified
# reference from straight)
arg_type = arg[0]
if arg_type in (VALUE, CONST, PARAM):
return arg
if arg_type == FOLD_TOS:
return self.add_const(arg)
if arg_type not in VARIABLE_OPERATION_MAP.keys() + [COUNTER, TOS]:
raise ValueError((
"Can't add variable from argument with type %s " +
"to matrix code"
) % arg_type)
if arg_type == TOS:
# If argument type was TOS, translate it to argument with
# type STACK first (make absolute reference from relative)
arg = STACK, len(self.stack) - 1 - arg[1]
return self.add_var(arg, True)
def append(self, *instrs):
for instr in instrs:
oper = instr[0]
args = map(self._translate_arg, instr[1:])
self._content.append([oper] + args)
def load_var(self, straight):
return self._vars_map[straight][1]
def store_var(self, straight, unified):
self._vars_map[straight][1] = unified
def handle_nop(state, instr):
pass
def handle_pop_top(state, instr):
state.stack.pop()
def create_rot(count):
def handle_rot(state, instr):
for index in xrange(-1, count - 1):
if state.stack[-index - 2] is None:
state.append(
[MOV, (TOS, index), (TOS, index + 1)],
)
if state.stack[-1] is None:
state.append(
[MOV, (TOS, count - 1), (TOS, -1)],
)
if state.settings['opt_clear_stack']:
# Stack clearing is busy because program will works
# slower if big values will remains on the stack
state.append(
[MOV, (TOS, -1), (VALUE, 0)],
)
state.stack[-count:] = (
[state.stack[-1]] + state.stack[-count:-1]
)
return handle_rot
def create_dup(count):
def handle_dup(state, instr):
for index in xrange(count):
if state.stack[-count + index] is None:
state.append(
[MOV, (TOS, index - count), (TOS, index)],
)
state.stack += state.stack[-count:]
return handle_dup
def handle_dup_topx(state, instr):
create_dup(instr[1])(state, instr)
def handle_unary_negative(state, instr):
if state.stack[-1] is not None:
state.stack[-1].append(instr)
else:
state.append(
[MOV, (TOS, -1), (TOS, 0)],
[MOV, (TOS, 0), (VALUE, 0)],
[SUB, (TOS, 0), (TOS, -1)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, -1), (VALUE, 0)],
)
def handle_unary_const(state, instr):
if state.stack[-1] is not None:
state.stack[-1].append(instr)
else:
raise UnpredictableArgsError
def handle_binary_multiply(state, instr):
if state.stack[-2] is not None and state.stack[-1] is not None:
state.stack[-2] += state.stack[-1] + [instr]
state.stack.pop()
elif state.stack[-2] is not None:
state.append(
[MUL, (TOS, 0), (FOLD_TOS, 1)],
[MOV, (TOS, 1), (TOS, 0)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, 0), (VALUE, 0)],
)
state.stack[-2] = None
state.stack.pop()
elif state.stack[-1] is not None:
state.append(
[MUL, (TOS, 1), (FOLD_TOS, 0)],
)
state.stack.pop()
else:
raise RecompilationError((
'Multiplication of two unpredictable values is unsupported'
), state)
def handle_binary_add(state, instr):
if state.stack[-2] is not None and state.stack[-1] is not None:
state.stack[-2] += state.stack[-1] + [instr]
state.stack.pop()
elif state.stack[-2] is not None:
state.append(
[ADD, (TOS, 0), (FOLD_TOS, 1)],
[MOV, (TOS, 1), (TOS, 0)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, 0), (VALUE, 0)],
)
state.stack[-2] = None
state.stack.pop()
elif state.stack[-1] is not None:
state.append(
[ADD, (TOS, 1), (FOLD_TOS, 0)],
)
state.stack.pop()
else:
state.append(
[ADD, (TOS, 1), (TOS, 0)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, 0), (VALUE, 0)],
)
state.stack.pop()
def handle_binary_subtract(state, instr):
if state.stack[-2] is not None and state.stack[-1] is not None:
state.stack[-2] += state.stack[-1] + [instr]
state.stack.pop()
elif state.stack[-2] is not None:
state.append(
[SUB, (TOS, 0), (FOLD_TOS, 1)],
[MOV, (TOS, 1), (VALUE, 0)],
[SUB, (TOS, 1), (TOS, 0)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, 0), (VALUE, 0)],
)
state.stack[-2] = None
state.stack.pop()
elif state.stack[-1] is not None:
state.append(
[SUB, (TOS, 1), (FOLD_TOS, 0)],
)
state.stack.pop()
else:
state.append(
[SUB, (TOS, 1), (TOS, 0)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, 0), (VALUE, 0)],
)
state.stack.pop()
def handle_binary_const(state, instr):
if state.stack[-2] is not None and state.stack[-1] is not None:
state.stack[-2] += state.stack[-1] + [instr]
state.stack.pop()
else:
raise UnpredictableArgsError
def handle_load_const(state, instr):
arg = instr[1]
if not isinstance(arg, state.settings['types']):
allowed_types = ', '.join(map(repr, state.settings['types']))
raise RecompilationError((
'Constant %s has an unallowed type %s instead of ' +
'one of allowed types: %s'
) % (repr(arg), type(arg), allowed_types), state)
state.stack.append([instr])
def handle_load_var(state, instr):
oper, name = instr
straight = VARIABLE_TYPE_MAP[oper][0], name
unified = state.load_var(straight)
if unified[0] == CONST:
state.stack.append([
(byteplay.LOAD_FAST, state.real_folded_arr),
(byteplay.LOAD_CONST, unified[1]),
(byteplay.BINARY_SUBSCR, None),
])
else:
state.append(
[MOV, (TOS, -1), straight],
)
state.stack.append(None)
def handle_store_var(state, instr):
oper, name = instr
straight = VARIABLE_TYPE_MAP[oper][0], name
lines = state.stack[-1]
if lines is not None:
if (
len(lines) == 3 and
lines[0] == (byteplay.LOAD_FAST, state.real_folded_arr) and
lines[1][0] == byteplay.LOAD_CONST and
isinstance(lines[1][1], int) and
lines[2] == (byteplay.BINARY_SUBSCR, None)
):
const_ref = CONST, lines[1][1]
else:
const_ref = state.add_const((FOLD_TOS, 0))
state.append(
[MOV, straight, const_ref],
)
state.store_var(straight, const_ref)
else:
state.append(
[MOV, straight, (TOS, 0)],
)
if state.settings['opt_clear_stack']:
state.append(
[MOV, (TOS, 0), (VALUE, 0)],
)
state.store_var(straight, straight)
state.stack.pop()
LOAD_OPERATIONS, STORE_OPERATIONS = zip(*VARIABLE_OPERATION_MAP.values())
BYTECODE_HANDLERS = [
(handle_nop, [byteplay.NOP]),
(handle_pop_top, [byteplay.POP_TOP]),
(create_rot(2), [byteplay.ROT_TWO]),
(create_rot(3), [byteplay.ROT_THREE]),
(create_rot(4), [byteplay.ROT_FOUR]),
(create_dup(1), [byteplay.DUP_TOP]),
(handle_nop, [byteplay.UNARY_POSITIVE]),
(handle_unary_negative, [byteplay.UNARY_NEGATIVE]),
(handle_unary_const, [
byteplay.UNARY_NOT, byteplay.UNARY_INVERT,
]),
(handle_binary_const, [byteplay.BINARY_POWER]),
(handle_binary_multiply, [byteplay.BINARY_MULTIPLY]),
(handle_binary_const, [
byteplay.BINARY_DIVIDE, byteplay.BINARY_FLOOR_DIVIDE,
byteplay.BINARY_TRUE_DIVIDE, byteplay.BINARY_MODULO,
]),
(handle_binary_add, [byteplay.BINARY_ADD]),
(handle_binary_subtract, [byteplay.BINARY_SUBTRACT]),
(handle_binary_const, [
byteplay.BINARY_LSHIFT, byteplay.BINARY_RSHIFT,
byteplay.BINARY_AND, byteplay.BINARY_XOR, byteplay.BINARY_OR,
]),
(handle_binary_const, [byteplay.INPLACE_POWER]),
(handle_binary_multiply, [byteplay.INPLACE_MULTIPLY]),
(handle_binary_const, [
byteplay.INPLACE_DIVIDE, byteplay.INPLACE_FLOOR_DIVIDE,
byteplay.INPLACE_TRUE_DIVIDE, byteplay.INPLACE_MODULO,
]),
(handle_binary_add, [byteplay.INPLACE_ADD]),
(handle_binary_subtract, [byteplay.INPLACE_SUBTRACT]),
(handle_binary_const, [
byteplay.INPLACE_LSHIFT, byteplay.INPLACE_RSHIFT,
byteplay.INPLACE_AND, byteplay.INPLACE_XOR, byteplay.INPLACE_OR,
]),
(handle_dup_topx, [byteplay.DUP_TOPX]),
(handle_load_const, [byteplay.LOAD_CONST]),
(handle_load_var, LOAD_OPERATIONS),
(handle_store_var, STORE_OPERATIONS),
]
SUPPORTED_OPERATIONS = {}
for handler, opers in BYTECODE_HANDLERS:
for oper in opers:
SUPPORTED_OPERATIONS[oper] = handler
def browse_vars(state, body):
# Browse used in loop's body variables to determine their mutability
for oper, arg in body:
try:
arg_type, mutation = VARIABLE_TYPE_MAP[oper]
state.add_var((arg_type, arg), mutation)
except KeyError:
pass
def browse_counter(state, body):
store_instr = body[0]
oper, name = store_instr
try:
arg_type, mutation = VARIABLE_TYPE_MAP[oper]
if not mutation:
raise KeyError
except KeyError:
raise RecompilationError((
'Unsupported iterator usage in instruction %s' % repr(instr)
), state)
load_instr = VARIABLE_OPERATION_MAP[arg_type][0], name
if state.settings['opt_min_rows']:
status = 'n' # A loop counter was not used
for index in xrange(1, len(body)):
instr = body[index]
if instr == store_instr:
status = 'w' # The counter was changed at least once
break
if instr == load_instr:
status = 'r' # The counter was not changed but was read at least once
else:
status = 'w'
return (arg_type, name), status, body[1:]
def recompile_body(settings, body):
state = RecompilerState(settings)
elem_straight, counter_status, rem_body = browse_counter(
state, body,
)
if counter_status == 'w':
# If real counter is mutable, we need special variable to
# store real counter value
counter_service = COUNTER, None
elif counter_status == 'r':
# If real counter isn't mutable but used, we need to
# maintain its value
counter_service = elem_straight
if counter_status == 'n':
# If real counter isn't used at all, we don't need to
# maintain this variable in the loop, but we need to save
# its final value after the loop
state.manual_store_counter = elem_straight
else:
# We must mark real counter as mutable at the beginning of the
# loop, because first instruction (counter storing) was removed
# from rem_body and system doesn't know that counter is mutable
state.add_var(elem_straight, True)
state.manual_store_counter = None
browse_vars(state, rem_body)
if counter_status != 'n':
state.append(
[MOV, counter_service, (PARAM, 'start')],
)
state.append(
[LOOP, (PARAM, 'iters_count')],
)
if counter_status == 'w':
state.append(
[MOV, elem_straight, (COUNTER, None)],
)
for instr in rem_body:
oper = instr[0]
if oper == byteplay.SetLineno:
state.lineno = instr[1]
continue
try:
SUPPORTED_OPERATIONS[oper](state, instr)
except UnpredictableArgsError:
raise RecompilationError(('All operands of instruction %s must be a constant ' +
'or must have a predictable value') % oper, state)
except IndexError:
raise RecompilationError('Unsupported loop type or invalid stack usage in bytecode', state)
except KeyError:
raise RecompilationError('Unsupported instruction %s' % repr(instr), state)
if counter_status != 'n':
state.append(
[ADD, counter_service, (PARAM, 'step')],
)
state.append(
[END],
)
if counter_status == 'r':
state.append(
[SUB, counter_service, (PARAM, 'step')],
)
return state
|
readability/text/syllables.py | rbamos/py-readability-metrics | 198 | 12788478 | import re
def count(word):
"""
Simple syllable counting
"""
word = word if type(word) is str else str(word)
word = word.lower()
if len(word) <= 3:
return 1
word = re.sub('(?:[^laeiouy]es|[^laeiouy]e)$', '', word) # removed ed|
word = re.sub('^y', '', word)
matches = re.findall('[aeiouy]{1,2}', word)
return len(matches)
|
recc/emulators/python/linux-emulator-example.py | oscourse-tsinghua/OS2018spring-projects-g02 | 249 | 12788485 | <gh_stars>100-1000
# Copyright 2016 <NAME> Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import select
import time
import sys, tty, termios
from op_cpu_package.python_l0_module import OpCPUData
from op_cpu_package.op_cpu_module import OpCPU
# A Linux interface to using the python implementation of the One Page CPU emulator
def main_loop():
loader_data = OpCPUData()
op_cpu = OpCPU(loader_data)
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setraw(sys.stdin.fileno())
input_buffer = [] # Characters to be sent to emulator
while not op_cpu.is_halted():
in_chrs = select.select([sys.stdin], [], [], 0.0001)[0]
if not in_chrs:
for x in range(0, 10000):
r = op_cpu.vm_getc()
if 'chr' in r:
sys.stdout.write(chr(r['chr']))
if r['chr'] == 10:
sys.stdout.write('\r')
sys.stdout.flush()
if len(input_buffer):
inchr = input_buffer.pop()
if op_cpu.vm_putc(inchr): # Not able to input chr
input_buffer = [inchr] + input_buffer
op_cpu.step()
else:
dobreak = False
for file in in_chrs:
c = file.read(1)
input_buffer = input_buffer + [ord(c)]
if ord(c) == 3:
dobreak = True
if dobreak:
break
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
main_loop()
|
train/labeler.py | lvaughn/nnsplit | 248 | 12788502 | from typing import List
from fractions import Fraction
from abc import ABC, abstractmethod
import spacy
import string
import random
import pandas as pd
import numpy as np
import diskcache
import sys
from somajo import SoMaJo
from spacy.lang.tr import Turkish
from spacy.lang.sv import Swedish
from spacy.lang.uk import Ukrainian
NO_MODEL_LANGUAGE_LOOKUP = {
"turkish": Turkish,
"swedish": Swedish,
"ukrainian": Ukrainian,
}
def noise(text, insert_chance, delete_chance, repeat_chance):
assert insert_chance == delete_chance == repeat_chance
chances = np.random.random(len(text) * 3)
if (chances < insert_chance).all():
return text
out = ""
for i, char in enumerate(text):
if chances[i * 3] >= delete_chance:
out += char
if chances[(i * 3) + 1] < repeat_chance:
out += char
if chances[(i * 3) + 2] < insert_chance:
out += random.choice(string.ascii_letters)
return out
def get_model(name):
try:
nlp = spacy.load(name, disable=["tagger", "parser", "ner"])
except OSError:
nlp = NO_MODEL_LANGUAGE_LOOKUP[name]()
return nlp
def has_space(text: str) -> bool:
return any(x.isspace() for x in text)
class Tokenizer(ABC):
def __init__(self):
self.training = True
def train(self, mode=True):
self.training = mode
def eval(self):
self.train(False)
@abstractmethod
def tokenize(self, text: str) -> List[str]:
pass
def remove_last_punct(text: str, punctuation) -> str:
for i in range(len(text))[::-1]:
if text[i] in punctuation:
return text[:i] + text[i + 1 :]
elif not text[i].isspace():
return text
return text
class SpacySentenceTokenizer(Tokenizer):
def __init__(
self,
model_name: str,
lower_start_prob: Fraction,
remove_end_punct_prob: Fraction,
punctuation: str,
):
super().__init__()
self.nlp = get_model(model_name)
self.nlp.add_pipe("sentencizer")
self.lower_start_prob = lower_start_prob
self.remove_end_punct_prob = remove_end_punct_prob
self.punctuation = punctuation
def tokenize(self, text: str) -> List[str]:
out_sentences = []
current_sentence = ""
end_sentence = False
for token in self.nlp(text):
text = token.text
whitespace = token.whitespace_
if token.is_sent_start:
end_sentence = True
if end_sentence and not text.isspace():
if self.training and random.random() < self.remove_end_punct_prob:
current_sentence = remove_last_punct(current_sentence, self.punctuation)
out_sentences.append(current_sentence)
current_sentence = ""
end_sentence = False
if (
self.training
and len(current_sentence) == 0
and random.random() < self.lower_start_prob
):
text = text.lower()
current_sentence += text + whitespace
out_sentences.append(current_sentence)
return [x for x in out_sentences if len(x) > 0]
class SpacyWordTokenizer(Tokenizer):
def __init__(self, model_name: str):
super().__init__()
self.tokenizer = get_model(model_name).tokenizer
def tokenize(self, text: str) -> List[str]:
out_tokens = []
current_token = ""
for token in self.tokenizer(text):
if not token.text.isspace():
out_tokens.append(current_token)
current_token = ""
current_token += token.text + token.whitespace_
out_tokens.append(current_token)
return [x for x in out_tokens if len(x) > 0]
class SoMaJoSentenceTokenizer(Tokenizer):
def __init__(self, model_name: str):
super().__init__()
self.tokenizer = SoMaJo(model_name)
def tokenize(self, text: str) -> List[str]:
out_sentences = []
sentences = list(self.tokenizer.tokenize_text([text]))
for i, sentence in enumerate(sentences):
text = ""
for token in sentence:
if "SpaceAfter=No" in token.extra_info:
whitespace = ""
else:
whitespace = " "
text += token.text + whitespace
if i == len(sentences) - 1:
text = text.rstrip()
out_sentences.append(text)
return out_sentences
class SoMaJoWordTokenizer(Tokenizer):
def __init__(self, model_name: str):
super().__init__()
self.tokenizer = SoMaJo(model_name, split_sentences=False)
def tokenize(self, text: str) -> List[str]:
out_tokens = []
tokens = next(self.tokenizer.tokenize_text([text]))
for i, token in enumerate(tokens):
if "SpaceAfter=No" in token.extra_info or i == len(tokens) - 1:
whitespace = ""
else:
whitespace = " "
# sometimes sample more spaces than one space so the model learns to deal with it
while random.random() < 0.05:
whitespace += " "
out_tokens.append(token.text + whitespace)
return [x for x in out_tokens if len(x) > 0]
class WhitespaceTokenizer(Tokenizer):
def tokenize(self, text: str) -> List[str]:
out = None
for i in range(len(text))[::-1]:
if not text[i].isspace():
out = [text[: i + 1], text[i + 1 :]]
break
if out is None:
out = [text, ""]
return out
class SECOSCompoundTokenizer(Tokenizer):
def __init__(self, secos_path: str):
super().__init__()
sys.path.append(secos_path)
import decompound_server
self.decompound = decompound_server.make_decompounder(
[
"decompound_server.py",
f"{secos_path}data/denews70M_trigram__candidates",
f"{secos_path}data/denews70M_trigram__WordCount",
"50",
"3",
"3",
"5",
"3",
"upper",
"0.01",
"2020",
]
)
self.disk_cache = diskcache.Index("secos_cache")
self.cache = {}
for key in self.disk_cache:
self.cache[key] = self.disk_cache[key]
def tokenize(self, text: str) -> List[str]:
if text.isspace():
return [text]
text_bytes = text.encode("utf-8")
compounds = self.cache.get(text_bytes)
if compounds is None:
assert not has_space(text), text
compounds = self.decompound(text)
if len(compounds) == 0:
compounds = text
compound_bytes = compounds.encode("utf-8")
self.disk_cache[text_bytes] = compound_bytes
self.cache[text_bytes] = compound_bytes
else:
compounds = compounds.decode("utf-8")
compounds = compounds.split()
compounds = [noise(x, 0.001, 0.001, 0.001) for x in compounds]
return compounds if len(compounds) > 0 else [noise(text, 0.001, 0.001, 0.001)]
class Labeler:
def __init__(self, tokenizers):
self.tokenizers = tokenizers
def _annotate(self, text: str, tok_index=0):
if tok_index >= len(self.tokenizers):
return [(text, set())]
out = []
for token in self.tokenizers[tok_index].tokenize(text):
out += self._annotate(token, tok_index=tok_index + 1)
out[-1][1].add(tok_index)
return out
def _to_dense_label(self, annotations):
input_bytes = []
label = []
all_zeros = [0] * len(self.tokenizers)
for (token, annotation) in annotations:
token_bytes = token.encode("utf-8")
input_bytes += token_bytes
label += [all_zeros.copy() for _ in range(len(token_bytes))]
if len(label) > 0:
for idx in annotation:
label[-1][idx] = 1
return input_bytes, label
def label(self, text):
return self._to_dense_label(self._annotate(text))
def visualize(self, text):
text, label = self.label(text)
data = []
for char, label_col in zip(text, label):
data.append([char, *label_col])
df = pd.DataFrame(
data, columns=["byte", *[x.__class__.__name__ for x in self.tokenizers]]
).T
df.columns = ["" for _ in range(len(df.columns))]
with pd.option_context(
"display.max_columns",
len(text),
):
print(df)
if __name__ == "__main__":
labeler = Labeler(
[
SpacySentenceTokenizer(
"de_core_news_sm", lower_start_prob=0.7, remove_end_punct_prob=0.7, punctuation=".?!"
),
SpacyWordTokenizer("de_core_news_sm"),
WhitespaceTokenizer(),
SECOSCompoundTokenizer("../../../Experiments/SECOS/"),
]
)
labeler.visualize("KNN (ANN).")
|
scripts/analysis/scheduling_duration_cdf.py | Container-Projects/firmament | 287 | 12788515 | #!/usr/bin/python
import sys, re
from datetime import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if len(sys.argv) < 2:
print "usage: scheduling_duration_cdf.py <log file 0> <label 0> " \
"<log file 1> <label 1> ..."
sys.exit(1)
durations = {}
for i in range(1, len(sys.argv), 2):
inputfile = sys.argv[i]
label = sys.argv[i+1]
att_start = None
# read and process log file
for line in open(inputfile).readlines():
rec = re.match("[A-Z][0-9]+ ([0-9:\.]+)\s+[0-9]+ .+\] (.+)", line)
if not rec:
#print "ERROR: failed to match line %s" % (line)
pass
else:
timestamp_str = rec.group(1)
message_str = rec.group(2)
timestamp = datetime.strptime(timestamp_str, "%H:%M:%S.%f")
m = re.match("START SCHEDULING (.+)",
message_str)
if m:
if att_start == None:
job_id = m.group(1)
att_start = timestamp
else:
print "ERROR: overlapping scheduling events?"
m = re.match("STOP SCHEDULING (.+).", message_str)
if m:
if att_start != None:
job_id = m.group(1)
duration = timestamp - att_start
if not label in durations:
durations[label] = []
durations[label].append(duration.total_seconds())
att_start = None
else:
print "ERROR: overlapping scheduling events?"
plt.figure()
for l, d in durations.items():
plt.hist(d, bins=200, label=l)
plt.legend(loc=4)
plt.ylabel("Count")
plt.xlabel("Scheduler runtime [sec]")
plt.savefig("scheduling_duration_hist.pdf", format="pdf", bbox_inches='tight')
plt.clf()
for l, d in durations.items():
plt.hist(d, bins=200, histtype='step', cumulative=True, normed=True, label=l,
lw=2.0)
plt.legend(loc=4)
plt.ylim(0, 1)
plt.xlabel("Scheduler runtime [sec]")
plt.savefig("scheduling_duration_cdf.pdf", format="pdf", bbox_inches='tight')
|
tests/test_fds.py | CyberFlameGO/fds | 322 | 12788527 | <reponame>CyberFlameGO/fds
import unittest
from unittest.mock import patch
import pytest
import re
from fds.version import __version__
from fds.services.fds_service import FdsService
from fds.run import HooksRunner
BOOLS = [True, False]
# NOTE unittest.mock:_Call backport
def patch_unittest_mock_call_cls():
import sys
if sys.version_info.minor >= 8:
return
import unittest.mock
def _get_call_arguments(self):
if len(self) == 2:
args, kwargs = self
else:
name, args, kwargs = self
return args, kwargs
@property
def args(self):
return self._get_call_arguments()[0]
@property
def kwargs(self):
return self._get_call_arguments()[1]
unittest.mock._Call._get_call_arguments = _get_call_arguments
unittest.mock._Call.args = args
unittest.mock._Call.kwargs = kwargs
patch_unittest_mock_call_cls()
class TestFds(unittest.TestCase):
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_init_success(self, mock_git_service, mock_dvc_service):
fds_service = FdsService(mock_git_service, mock_dvc_service)
fds_service.init()
assert mock_git_service.init.called
assert mock_dvc_service.init.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_status_success(self, mock_git_service, mock_dvc_service):
fds_service = FdsService(mock_git_service, mock_dvc_service)
fds_service.status()
assert mock_git_service.status.called
assert mock_dvc_service.status.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_status_git_failure(self, mock_git_service, mock_dvc_service):
mock_git_service.status.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
self.assertRaises(Exception, mock_git_service.status)
self.assertRaises(Exception, fds_service.status)
assert mock_git_service.status.called
assert mock_dvc_service.status.notcalled
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_status_dvc_failure(self, mock_git_service, mock_dvc_service):
mock_dvc_service.status.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
self.assertRaises(Exception, fds_service.status)
self.assertRaises(Exception, mock_dvc_service.status)
assert mock_git_service.status.called
assert mock_dvc_service.status.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_add_success(self, mock_git_service, mock_dvc_service):
fds_service = FdsService(mock_git_service, mock_dvc_service)
fds_service.add(".")
assert mock_git_service.add.called
assert mock_dvc_service.add.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_add_git_failure(self, mock_git_service, mock_dvc_service):
mock_git_service.add.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
self.assertRaises(Exception, mock_git_service.add)
with self.assertRaises(Exception):
fds_service.add(".")
assert mock_git_service.add.called
assert mock_dvc_service.add.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_add_dvc_failure(self, mock_git_service, mock_dvc_service):
mock_dvc_service.add.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
with self.assertRaises(Exception):
fds_service.add(".")
self.assertRaises(Exception, mock_dvc_service.add)
assert mock_dvc_service.add.called
assert mock_git_service.add.notcalled
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_commit_success(self, mock_git_service, mock_dvc_service):
fds_service = FdsService(mock_git_service, mock_dvc_service)
fds_service.commit("some commit message", True)
assert mock_git_service.commit.called
assert mock_dvc_service.commit.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_commit_git_failure(self, mock_git_service, mock_dvc_service):
mock_git_service.commit.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
with self.assertRaises(Exception):
fds_service.commit("some commit message", True)
self.assertRaises(Exception, mock_git_service.commit)
assert mock_git_service.commit.called
assert mock_dvc_service.commit.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_commit_dvc_failure(self, mock_git_service, mock_dvc_service):
mock_dvc_service.commit.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
with self.assertRaises(Exception):
fds_service.commit("some commit message", False)
self.assertRaises(Exception, mock_dvc_service.commit)
assert mock_dvc_service.commit.called
assert mock_git_service.commit.notcalled
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_clone_dvc_failure(self, mock_git_service, mock_dvc_service):
mock_dvc_service.pull.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
with self.assertRaises(Exception):
fds_service.clone("https://github.com/dagshub/fds.git", None, None)
self.assertRaises(Exception, mock_dvc_service.pull)
mock_git_service.clone.assert_called_with("https://github.com/dagshub/fds.git", None)
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_clone_git_failure(self, mock_git_service, mock_dvc_service):
mock_git_service.clone.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
with self.assertRaises(Exception):
fds_service.clone("https://github.com/dagshub/fds.git", None, None)
self.assertRaises(Exception, mock_git_service.clone)
assert mock_dvc_service.pull.notcalled
class TestFdsHooks:
@pytest.mark.parametrize("dvc_preinstalled", BOOLS)
@pytest.mark.parametrize("install_prompt_accept", BOOLS)
@patch('fds.run.execute_command')
@patch('fds.run.get_confirm_from_user')
@patch('fds.services.fds_service.FdsService')
@patch('fds.run.which')
def test_dvc_installed(
self,
mock_which,
mock_fds_service,
mock_prompt,
mock_execute_command,
dvc_preinstalled: bool,
install_prompt_accept: bool
):
mock_which.return_value = dvc_preinstalled or None
mock_prompt.return_value = install_prompt_accept
hooks_runner = HooksRunner(
mock_fds_service.service,
mock_fds_service.printer,
mock_fds_service.logger,
)
ret = hooks_runner._ensure_dvc_installed()
mock_which.assert_called_with("dvc")
if dvc_preinstalled:
return
assert mock_prompt.call_count == 1
if not install_prompt_accept:
assert ret != 0
# TODO validate printer containing "install dvc manually"
return
assert ret == 0
assert mock_execute_command.call_count == 1
args = mock_execute_command.call_args_list[0].args[0]
assert re.findall(r"^pip3 install .*'dvc", args[0])
@pytest.mark.parametrize("git_preinstalled", BOOLS)
@patch('fds.run.sys.exit')
@patch('fds.services.fds_service.FdsService')
@patch('fds.run.which')
def test_git_installed(
self,
mock_which,
mock_fds_service,
mock_sys_exit,
git_preinstalled: bool,
):
mock_which.return_value = git_preinstalled or None
hooks_runner = HooksRunner(
mock_fds_service.service,
mock_fds_service.printer,
mock_fds_service.logger,
)
ret = hooks_runner._ensure_git_installed()
mock_which.assert_called_with("git")
if git_preinstalled:
assert ret == 0
return
assert mock_sys_exit.call_count == 1
assert 0 not in mock_sys_exit.called_with
@pytest.mark.parametrize("is_latest", BOOLS)
@pytest.mark.parametrize("install_prompt_accept", BOOLS)
@patch('fds.run.rerun_in_new_shell_and_exit')
@patch('fds.run.execute_command')
@patch('fds.run.get_confirm_from_user')
@patch('fds.services.fds_service.FdsService')
@patch('fds.run.requests.get')
def test_fds_update(
self,
mock_requests_get,
mock_fds_service,
mock_prompt,
mock_execute_command,
mock_rerun,
is_latest: bool,
install_prompt_accept: bool
):
mock_requests_get.return_value = type(
"Response",
(),
{
"json": lambda self: {
"info": {
"version": __version__ + ("b3" if not is_latest else "")
}
}
}
)()
mock_prompt.return_value = install_prompt_accept
hooks_runner = HooksRunner(
mock_fds_service.service,
mock_fds_service.printer,
mock_fds_service.logger,
)
ret = hooks_runner._ensure_fds_updated()
mock_requests_get.assert_called_with("https://pypi.python.org/pypi/fastds/json")
assert ret == 0
if is_latest:
return
assert mock_prompt.call_count == 1
# # TODO validate stdout contains "Should we upgrade..."
if not install_prompt_accept:
return
assert mock_execute_command.call_count == 1
lst = mock_execute_command.call_args_list[0]
assert re.findall(r"^pip3 install .*fastds.*--upgrade", lst.args[0][0])
assert mock_rerun.call_count == 1
mock_rerun.assert_called_with()
@pytest.mark.parametrize("raise_on_reject", BOOLS)
@pytest.mark.parametrize("service_preinitialized", BOOLS)
@pytest.mark.parametrize("initialize_prompt_accept", BOOLS)
@pytest.mark.parametrize("service_name", ["git", "dvc"])
@patch('fds.run.sys.exit')
@patch('fds.run.get_confirm_from_user')
@patch('fds.services.fds_service.FdsService')
def test_service_initialized(
self,
mock_fds_service,
mock_prompt,
mock_sys_exit,
raise_on_reject: bool,
service_preinitialized: bool,
initialize_prompt_accept: bool,
service_name: str,
tmpdir,
):
attr_name = f"{service_name}_service"
svc = getattr(mock_fds_service.service, attr_name)
fut_name = f"_ensure_{service_name}_initialized"
hooks_runner = HooksRunner(
mock_fds_service.service,
mock_fds_service.printer,
mock_fds_service.logger,
)
fut = getattr(hooks_runner, fut_name)
mock_prompt.return_value = initialize_prompt_accept
with patch.object(
svc,
"repo_path",
tmpdir.strpath,
), patch.object(
svc,
"is_initialized",
return_value=service_preinitialized,
), patch.object(
svc,
"init",
):
ret = fut()
assert svc.is_initialized.call_count == 1
if service_preinitialized:
assert ret == 0
return
assert mock_prompt.call_count == 1
if initialize_prompt_accept:
assert svc.init.call_count == 1
assert ret == 0
return
assert re.findall(
r"You can initialize.*{}.*manually by running".format(service_name),
mock_fds_service.printer.warn.call_args_list[0].args[0]
)
if raise_on_reject:
assert mock_sys_exit.call_count == 1
else:
assert 0 not in mock_sys_exit.called_with
|
venv/Lib/site-packages/pdfminer/pslexer.py | richung99/digitizePlots | 202 | 12788528 | import re
import ply.lex as lex
states = (
('instring', 'exclusive'),
)
tokens = (
'COMMENT', 'HEXSTRING', 'INT', 'FLOAT', 'LITERAL', 'KEYWORD', 'STRING', 'OPERATOR'
)
delimiter = r'\(\)\<\>\[\]\{\}\/\%\s'
delimiter_end = r'(?=[%s]|$)' % delimiter
def t_COMMENT(t):
# r'^%!.+\n'
r'%.*\n'
pass
RE_SPC = re.compile(r'\s')
RE_HEX_PAIR = re.compile(r'[0-9a-fA-F]{2}|.')
@lex.TOKEN(r'<[0-9A-Fa-f\s]*>')
def t_HEXSTRING(t):
cleaned = RE_SPC.sub('', t.value[1:-1])
pairs = RE_HEX_PAIR.findall(cleaned)
token_bytes = bytes([int(pair, 16) for pair in pairs])
try:
t.value = token_bytes.decode('ascii')
except UnicodeDecodeError:
# should be kept as bytes
t.value = token_bytes
return t
@lex.TOKEN(r'(\-|\+)?[0-9]+' + delimiter_end)
def t_INT(t):
t.value = int(t.value)
return t
@lex.TOKEN(r'(\-|\+)?([0-9]+\.|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?' + delimiter_end)
def t_FLOAT(t):
t.value = float(t.value)
return t
RE_LITERAL_HEX = re.compile(r'#[0-9A-Fa-f]{2}')
@lex.TOKEN(r'/.+?' + delimiter_end)
def t_LITERAL(t):
newvalue = t.value[1:]
# If there's '#' chars in the literal, we much de-hex it
def re_sub(m):
# convert any hex str to int (without the # char) and the convert that
return bytes.fromhex(m.group(0)[1:]).decode('latin-1')
newvalue = RE_LITERAL_HEX.sub(re_sub , newvalue)
# If there's any lone # char left, remove them
newvalue = newvalue.replace('#', '')
t.value = newvalue
return t
def t_OPERATOR(t):
r'{|}|<<|>>|\[|\]'
return t
t_KEYWORD = r'.+?' + delimiter_end
def t_instring(t):
r'\('
t.lexer.value_buffer = []
t.lexer.string_startpos = t.lexpos
t.lexer.level = 1
t.lexer.begin('instring')
# The parens situation: it's complicated. We can have both escaped parens and unescaped parens.
# If they're escaped, there's nothing special, we unescape them and add them to the string. If
# they're not escaped, we have to count how many of them there are, to know when a rparen is the
# end of the string. The regular expression for this is messed up, so what we do is when we hit
# a paren, we look if the previous buffer ended up with a backslash. If it did, we don't to paren
# balancing.
def t_instring_lparen(t):
r'\('
is_escaped = t.lexer.value_buffer and t.lexer.value_buffer[-1].endswith('\\')
if is_escaped:
t.lexer.value_buffer[-1] = t.lexer.value_buffer[-1][:-1]
else:
t.lexer.level +=1
t.lexer.value_buffer.append('(')
def t_instring_rparen(t):
r'\)'
is_escaped = t.lexer.value_buffer and t.lexer.value_buffer[-1].endswith('\\')
if is_escaped:
t.lexer.value_buffer[-1] = t.lexer.value_buffer[-1][:-1]
else:
t.lexer.level -=1
if t.lexer.level == 0:
t.value = ''.join(t.lexer.value_buffer)
if any(ord(c) > 0x7f for c in t.value):
t.value = t.value.encode('latin-1')
t.type = "STRING"
t.lexpos = t.lexer.string_startpos
t.lexer.begin('INITIAL')
return t
else:
t.lexer.value_buffer.append(')')
RE_STRING_ESCAPE = re.compile(r'\\[btnfr\\]')
RE_STRING_OCTAL = re.compile(r'\\[0-7]{1,3}')
RE_STRING_LINE_CONT = re.compile(r'\\\n|\\\r|\\\r\n')
ESC_STRING = { 'b': '\b', 't': '\t', 'n': '\n', 'f': '\f', 'r': '\r', '\\': '\\' }
def repl_string_escape(m):
return ESC_STRING[m.group(0)[1]]
def repl_string_octal(m):
i = int(m.group(0)[1:], 8)
if i < 0xff: # we never want to go above 256 because it's unencodable
return chr(i)
else:
return m.group(0)
def t_instring_contents(t):
r'[^()]+'
s = t.value
s = RE_STRING_ESCAPE.sub(repl_string_escape, s)
s = RE_STRING_OCTAL.sub(repl_string_octal, s)
s = RE_STRING_LINE_CONT.sub('', s)
t.lexer.value_buffer.append(s)
t_instring_ignore = ''
t_ignore = ' \t\r\n'
# Error handling rule
def t_error(t):
print("Illegal character '%r'" % t.value[0])
t.lexer.skip(1)
t_instring_error = t_error
lexer = lex.lex() |
algs4/merge.py | dumpmemory/algs4-py | 230 | 12788536 | """
Sorts a sequence of strings from standard input using merge sort.
% more tiny.txt
S O R T E X A M P L E
% python merge.py < tiny.txt
A E E L M O P R S T X [ one string per line ]
% more words3.txt
bed bug dad yes zoo ... all bad yet
% python merge.py < words3.txt
all bad bed bug dad ... yes yet zoo [ one string per line ]
"""
class Merge:
@classmethod
def merge(cls, arr, lo, mid, hi):
aux = list(arr) # copy to aux
i = lo
j = mid + 1
k = lo
while k <= hi:
if i > mid:
arr[k] = aux[j]
j += 1
elif j > hi:
arr[k] = aux[i]
i += 1
elif aux[i] < aux[j]:
arr[k] = aux[i]
i += 1
else:
arr[k] = aux[j]
j += 1
k += 1
@classmethod
def mergesort(cls, arr, lo, hi):
if lo >= hi:
return
mid = (lo + hi) // 2
cls.mergesort(arr, lo, mid)
cls.mergesort(arr, mid + 1, hi)
cls.merge(arr, lo, mid, hi)
return arr
@classmethod
def sort(cls, arr):
return cls.mergesort(arr, 0, len(arr) - 1)
@classmethod
def is_sorted(cls, arr):
for i in range(1, len(arr)):
if arr[i] < arr[i-1]:
return False
return True
if __name__ == '__main__':
import sys
items = []
for line in sys.stdin:
items.extend(line.split())
print(' items: ', items)
print('sort items: ', Merge.sort(items))
assert Merge.is_sorted(items)
|
observations/r/capm.py | hajime9652/observations | 199 | 12788542 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def capm(path):
"""Stock Market Data
monthly observations from 1960–01 to 2002–12
*number of observations* : 516
A time serie containing :
rfood
excess returns food industry
rdur
excess returns durables industry
rcon
excess returns construction industry
rmrf
excess returns market portfolio
rf
riskfree return
most of the above data are from Kenneth French's data library at
http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `capm.csv`.
Returns:
Tuple of np.ndarray `x_train` with 516 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'capm.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/Capm.csv'
maybe_download_and_extract(path, url,
save_file_name='capm.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
Dynamic Programming/741. Cherry Pickup.py | beckswu/Leetcode | 138 | 12788554 | """
741. Cherry Pickup
"""
class Solution:
def cherryPickup(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
n = len(grid)
if grid[0][0] == -1 or grid[n-1][n-1] == -1: return 0
dp = [[-1,]*n for _ in range(n)] # 很重要,
"""
因为比如[[1,-1,1],[-1,1,1],[1,1,1]],
如果初始化为0,k=1 后 , dp = [[-1,-1,0],[-1,-1,0],[0, 0,0]]
然后k = 2, i = 2, j = 2, 只能从(1,0) (1,0) 过来, dp[1][1] = -1,但是因为初始化为0,通过比较max,所以dp最后应该为-1,但是结果为0
"""
dp[0][0] = grid[0][0]
for k in range(1,2*n-1):
for i in range(min(k,n-1),max(-1, k-n),-1):
for j in range(min(k,n-1),max(-1, k-n),-1):
if grid[i][k-i] == -1 or grid[j][k-j] == -1:
dp[i][j] = -1
continue
if i>0 : dp[i][j] = max(dp[i][j], dp[i-1][j]) #向下向右
if j>0 : dp[i][j] = max(dp[i][j], dp[i][j-1]) #向右向下
if i>0 and j>0: dp[i][j] = max(dp[i][j], dp[i-1][j-1]) #向下向下
if dp[i][j]<0 :continue
dp[i][j] += grid[i][k-i]
if i!=j:
dp[i][j] += grid[j][k-j]
return max(dp[-1][-1],0)
class Solution:
def cherryPickup(self, grid: List[List[int]]) -> int:
n = len(grid)
if grid[0][0] == -1 or grid[n-1][n-1] == -1: return 0
dp = [[-1,]*n for _ in range(n)]
dp[0][0] = grid[0][0]
for k in range(1,2*n-1):
for i in range(n-1,-1,-1):
for j in range(n-1,-1,-1):
p, q = k-i, k -j
if p < 0 or p >= n or q<0 or q>=n or grid[i][p] == -1 or grid[j][q] == -1:
dp[i][j] = -1
continue
if i>0 : dp[i][j] = max(dp[i][j], dp[i-1][j]) #向下向右
if j>0 : dp[i][j] = max(dp[i][j], dp[i][j-1]) #向右向下
if i>0 and j>0: dp[i][j] = max(dp[i][j], dp[i-1][j-1]) #向下向下
if dp[i][j]<0 :continue
dp[i][j] += grid[i][p]
if i!=j:
dp[i][j] += grid[j][q]
return max(dp[-1][-1],0)
# Top-Down
class Solution:
def cherryPickup(self, grid: List[List[int]]) -> int:
N = len(grid)
lookup = {}
def solve(x1, y1, x2, y2):
# check if we reached bottom right corner
if x1 == N-1 and y1 == N-1:
return grid[x1][y1] if grid[x1][y1] != -1 else float("-inf")
# out of the grid and thorn check
if x1 == N or y1 == N or x2 == N or y2 == N or grid[x1][y1] == -1 or grid[x2][y2] == -1:
return float("-inf")
# memorization check
lookup_key = (x1, y1, x2, y2)
if lookup_key in lookup: return lookup[lookup_key]
# pick your cherries
if x1 == x2 and y1 == y2:
cherries = grid[x1][y1]
else:
cherries = grid[x1][y1] + grid[x2][y2]
res = cherries + max(
solve(x1 + 1, y1, x2 + 1, y2), # right, right
solve(x1, y1 + 1, x2, y2 + 1), # down, down
solve(x1 + 1, y1, x2, y2 + 1), # right, down
solve(x1, y1 + 1, x2 + 1, y2), # down, right
)
lookup[lookup_key] = res
return res
res = solve(0, 0, 0, 0)
return res if res > 0 else 0 |
torchnlp/samplers/noisy_sorted_sampler.py | MPetrochuk/PyTorch-NLP | 2,125 | 12788560 | <reponame>MPetrochuk/PyTorch-NLP
import random
from torch.utils.data.sampler import Sampler
from torchnlp.utils import identity
def _uniform_noise(_):
return random.uniform(-1, 1)
class NoisySortedSampler(Sampler):
""" Samples elements sequentially with noise.
**Background**
``NoisySortedSampler`` is similar to a ``BucketIterator`` found in popular libraries like
`AllenNLP` and `torchtext`. A ``BucketIterator`` pools together examples with a similar size
length to reduce the padding required for each batch. ``BucketIterator`` also includes the
ability to add noise to the pooling.
**AllenNLP Implementation:**
https://github.com/allenai/allennlp/blob/e125a490b71b21e914af01e70e9b00b165d64dcd/allennlp/data/iterators/bucket_iterator.py
**torchtext Implementation:**
https://github.com/pytorch/text/blob/master/torchtext/data/iterator.py#L225
Args:
data (iterable): Data to sample from.
sort_key (callable): Specifies a function of one argument that is used to extract a
numerical comparison key from each list element.
get_noise (callable): Noise added to each numerical ``sort_key``.
Example:
>>> from torchnlp.random import set_seed
>>> set_seed(123)
>>>
>>> import random
>>> get_noise = lambda i: round(random.uniform(-1, 1))
>>> list(NoisySortedSampler(range(10), sort_key=lambda i: i, get_noise=get_noise))
[0, 1, 2, 3, 5, 4, 6, 7, 9, 8]
"""
def __init__(self, data, sort_key=identity, get_noise=_uniform_noise):
super().__init__(data)
self.data = data
self.sort_key = sort_key
self.get_noise = get_noise
def __iter__(self):
zip_ = []
for i, row in enumerate(self.data):
value = self.get_noise(row) + self.sort_key(row)
zip_.append(tuple([i, value]))
zip_ = sorted(zip_, key=lambda r: r[1])
return iter([item[0] for item in zip_])
def __len__(self):
return len(self.data)
|
care/facility/migrations/0190_auto_20201001_1134.py | gigincg/care | 189 | 12788591 | <filename>care/facility/migrations/0190_auto_20201001_1134.py<gh_stars>100-1000
# Generated by Django 2.2.11 on 2020-10-01 06:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facility', '0189_auto_20200929_1258'),
]
operations = [
migrations.AlterField(
model_name='shiftingrequest',
name='status',
field=models.IntegerField(choices=[(10, 'PENDING'), (15, 'ON HOLD'), (20, 'APPROVED'), (30, 'REJECTED'), (40, 'DESTINATION APPROVED'), (50, 'DESTINATION REJECTED'), (60, 'AWAITING TRANSPORTATION'), (70, 'TRANSFER IN PROGRESS'), (80, 'COMPLETED')], default=10),
),
]
|
nuplan/planning/metrics/evaluation_metrics/common/ego_lat_jerk.py | motional/nuplan-devkit | 128 | 12788592 | <gh_stars>100-1000
from typing import List
from nuplan.planning.metrics.evaluation_metrics.base.within_bound_metric_base import WithinBoundMetricBase
from nuplan.planning.metrics.metric_result import MetricStatistics
from nuplan.planning.metrics.utils.state_extractors import extract_ego_jerk
from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario
from nuplan.planning.simulation.history.simulation_history import SimulationHistory
class EgoLatJerkStatistics(WithinBoundMetricBase):
"""Ego lateral jerk metric."""
def __init__(self, name: str, category: str) -> None:
"""
Initializes the EgoLatJerkStatistics class
:param name: Metric name
:param category: Metric category.
"""
super().__init__(name=name, category=category)
def compute(self, history: SimulationHistory, scenario: AbstractScenario) -> List[MetricStatistics]:
"""
Returns the lateral jerk metric
:param history: History from a simulation engine
:param scenario: Scenario running this metric
:return the estimated lateral jerk metric.
"""
return self._compute_statistics( # type: ignore
history=history,
scenario=scenario,
statistic_unit_name='meters_per_second_cubed',
extract_function=extract_ego_jerk,
extract_function_params={'acceleration_coordinate': 'y'},
)
|
scale/job/messages/failed_jobs.py | kaydoh/scale | 121 | 12788596 | <filename>scale/job/messages/failed_jobs.py
"""Defines a command message that sets FAILED status for job models"""
from __future__ import unicode_literals
import logging
from collections import namedtuple
from django.db import transaction
from error.models import get_error
from job.models import Job
from messaging.messages.message import CommandMessage
from util.parse import datetime_to_string, parse_datetime
from util.retry import retry_database_query
# This is the maximum number of job models that can fit in one message. This maximum ensures that every message of this
# type is less than 25 KiB long.
MAX_NUM = 100
FailedJob = namedtuple('FailedJob', ['job_id', 'exe_num', 'error_id'])
logger = logging.getLogger(__name__)
def create_failed_jobs_messages(failed_jobs, when):
"""Creates messages to fail the given jobs
:param failed_jobs: The failed jobs
:type failed_jobs: :func:`list`
:param when: When the jobs failed
:type when: :class:`datetime.datetime`
:return: The list of messages
:rtype: :func:`list`
"""
messages = []
message = None
for failed_job in failed_jobs:
if not message:
message = FailedJobs()
message.ended = when
elif not message.can_fit_more():
messages.append(message)
message = FailedJobs()
message.ended = when
message.add_failed_job(failed_job)
if message:
messages.append(message)
return messages
class FailedJobs(CommandMessage):
"""Command message that sets FAILED status for job models
"""
def __init__(self):
"""Constructor
"""
super(FailedJobs, self).__init__('failed_jobs')
self._count = 0
self._failed_jobs = {} # {Error ID: [FailedJob]}
self.ended = None
def add_failed_job(self, failed_job):
"""Adds the given failed job to this message
:param failed_job: The failed job
:type failed_job: :class:`job.messages.failed_jobs.FailedJob`
"""
self._count += 1
if failed_job.error_id in self._failed_jobs:
self._failed_jobs[failed_job.error_id].append(failed_job)
else:
self._failed_jobs[failed_job.error_id] = [failed_job]
def can_fit_more(self):
"""Indicates whether more failed jobs can fit in this message
:return: True if more failed jobs can fit, False otherwise
:rtype: bool
"""
return self._count < MAX_NUM
def to_json(self):
"""See :meth:`messaging.messages.message.CommandMessage.to_json`
"""
error_list = []
for error_id, job_list in self._failed_jobs.items():
jobs_list = []
for failed_job in job_list:
jobs_list.append({'id': failed_job.job_id, 'exe_num': failed_job.exe_num})
error_list.append({'id': error_id, 'jobs': jobs_list})
return {'ended': datetime_to_string(self.ended), 'errors': error_list}
@staticmethod
def from_json(json_dict):
"""See :meth:`messaging.messages.message.CommandMessage.from_json`
"""
message = FailedJobs()
message.ended = parse_datetime(json_dict['ended'])
for error_dict in json_dict['errors']:
error_id = error_dict['id']
for job_dict in error_dict['jobs']:
job_id = job_dict['id']
exe_num = job_dict['exe_num']
message.add_failed_job(FailedJob(job_id, exe_num, error_id))
return message
@retry_database_query(max_tries=5, base_ms_delay=1000, max_ms_delay=5000)
def execute(self):
"""See :meth:`messaging.messages.message.CommandMessage.execute`
"""
from queue.messages.queued_jobs import create_queued_jobs_messages, QueuedJob
job_ids = []
for job_list in self._failed_jobs.values():
for failed_job in job_list:
job_ids.append(failed_job.job_id)
root_recipe_ids = set()
with transaction.atomic():
# Retrieve locked job models
job_models = {}
for job in Job.objects.get_locked_jobs(job_ids):
job_models[job.id] = job
if job.root_recipe_id:
root_recipe_ids.add(job.root_recipe_id)
# Get job models with related fields
# TODO: once long running job types are gone, the related fields are not needed
for job in Job.objects.get_jobs_with_related(job_ids):
job_models[job.id] = job
jobs_to_retry = []
all_failed_job_ids = []
for error_id, job_list in self._failed_jobs.items():
error = get_error(error_id)
jobs_to_fail = []
for failed_job in job_list:
job_model = job_models[failed_job.job_id]
# If job cannot be failed or execution number does not match, then this update is obsolete
if not job_model.can_be_failed() or job_model.num_exes != failed_job.exe_num:
# Ignore this job
continue
# Re-try job if error supports re-try and there are more tries left
retry = error.should_be_retried and job_model.num_exes < job_model.max_tries
# Also re-try long running jobs
retry = retry or job_model.job_type.is_long_running
# Do not re-try superseded jobs
retry = retry and not job_model.is_superseded
if retry:
jobs_to_retry.append(QueuedJob(job_model.id, job_model.num_exes))
else:
jobs_to_fail.append(job_model)
# Update jobs that failed with this error
if jobs_to_fail:
failed_job_ids = Job.objects.update_jobs_to_failed(jobs_to_fail, error_id, self.ended)
logger.info('Set %d job(s) to FAILED status with error %s', len(failed_job_ids), error.name)
all_failed_job_ids.extend(failed_job_ids)
# Need to update recipes of failed jobs so that dependent jobs are BLOCKED
if root_recipe_ids:
from recipe.messages.update_recipe import create_update_recipe_messages_from_node
self.new_messages.extend(create_update_recipe_messages_from_node(root_recipe_ids))
# Place jobs to retry back onto the queue
if jobs_to_retry:
self.new_messages.extend(create_queued_jobs_messages(jobs_to_retry, requeue=True))
# Send messages to update recipe metrics
from recipe.messages.update_recipe_metrics import create_update_recipe_metrics_messages_from_jobs
self.new_messages.extend(create_update_recipe_metrics_messages_from_jobs(job_ids))
return True
|
carball/json_parser/actor/team.py | unitedroguegg/carball | 119 | 12788613 | <reponame>unitedroguegg/carball
from .ball import *
class TeamHandler(BaseActorHandler):
@classmethod
def can_handle(cls, actor: dict) -> bool:
return actor['ClassName'] == 'TAGame.Team_Soccar_TA'
def update(self, actor: dict, frame_number: int, time: float, delta: float) -> None:
self.parser.team_dicts[actor['Id']] = actor
self.parser.team_dicts[actor['Id']]['colour'] = 'blue' if actor["TypeName"] == "Archetypes.Teams.Team0" else \
'orange'
|
build_fake_image__build_exe/__injected_code.py | DazEB2/SimplePyScripts | 117 | 12788651 | <reponame>DazEB2/SimplePyScripts
import os.path
from pathlib import Path
file_name = Path(os.path.expanduser("~/Desktop")).resolve() / "README_YOU_WERE_HACKED.txt"
file_name.touch(exist_ok=True)
|
CalibTracker/SiStripCommon/python/ShallowGainCalibration_cfi.py | ckamtsikis/cmssw | 852 | 12788655 | <reponame>ckamtsikis/cmssw<filename>CalibTracker/SiStripCommon/python/ShallowGainCalibration_cfi.py
import FWCore.ParameterSet.Config as cms
shallowGainCalibration = cms.EDProducer("ShallowGainCalibration",
Tracks=cms.InputTag("generalTracks",""),
Prefix=cms.string("GainCalibration"),
Suffix=cms.string(""))
|
docs/source/conf.py | steven-lang/SPFlow | 199 | 12788662 | # -- Path setup --------------------------------------------------------------
import os
import sys
sys.path.insert(0, os.path.abspath("../../src"))
import sphinx_gallery
# -- Project information -----------------------------------------------------
project = "SPFlow"
copyright = "2020, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
author = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
# Get __version__ from _meta
from spn._meta import __version__
version = __version__
release = __version__
extensions = [
"sphinx.ext.linkcode",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
"sphinx_gallery.gen_gallery",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
exclude_patterns = ["build", "Thumbs.db", ".DS_Store", "env"]
pygments_style = "sphinx"
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
html_logo = "../../Documentation/logo/spflow_logoSquare.png"
# -- Extension configuration -------------------------------------------------
autosummary_generate = True
autodoc_default_options = {"undoc-members": None}
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"sklearn": ("https://scikit-learn.org/stable", None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Linkcode extension
def linkcode_resolve(domain, info):
if domain != "py":
return None
if not info["module"]:
return None
filename = info["module"].replace(".", "/")
return "https://github.com/SPFlow/SPFlow/blob/master/src/%s.py" % filename
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# sphinx_gallery.gen_gallery settings
sphinx_gallery_conf = {
"doc_module": "spn",
"backreferences_dir": os.path.join("generated"),
"reference_url": {"spn": None},
"remove_config_comments": True,
}
|
lib-opencc-android/src/main/jni/OpenCC/binding.gyp | huxiaomao/android-opencc | 5,895 | 12788682 | {
"includes": [
"node/global.gypi",
"node/configs.gypi",
"node/dicts.gypi",
"node/node_opencc.gypi",
]
}
|
adadelta.py | morpheusthewhite/twitter-sent-dnn | 314 | 12788688 | <gh_stars>100-1000
"""
Adadelta algorithm implementation
"""
import numpy as np
import theano
import theano.tensor as T
def build_adadelta_updates(params, param_shapes, param_grads, rho=0.95, epsilon=0.001):
# AdaDelta parameter update
# E[g^2]
# initialized to zero
egs = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "Eg:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
# E[\delta x^2], initialized to zero
exs = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "Ex:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
new_egs = [
rho * eg + (1 - rho) * g ** 2
for eg, g in zip(egs, param_grads)
]
delta_x = [
-(T.sqrt(ex + epsilon) / T.sqrt(new_eg + epsilon)) * g
for new_eg, ex, g in zip(new_egs, exs, param_grads)
]
new_exs = [
rho * ex + (1 - rho) * (dx ** 2)
for ex, dx in zip(exs, delta_x)
]
egs_updates = zip(egs, new_egs)
exs_updates = zip(exs, new_exs)
param_updates = [
(p, p + dx)
for dx, g, p in zip(delta_x, param_grads, params)
]
updates = egs_updates + exs_updates + param_updates
return updates
|
qtrader/simulation/tests/__init__.py | aaron8tang/qtrader | 381 | 12788691 | <reponame>aaron8tang/qtrader
from qtrader.simulation.tests.arbitrage import Arbitrage
from qtrader.simulation.tests.moments import Moments
|
tests/unittests/analysis/test_lpi.py | obilaniu/orion | 177 | 12788742 | <filename>tests/unittests/analysis/test_lpi.py
# -*- coding: utf-8 -*-
"""Tests :func:`orion.analysis.lpi`"""
import copy
import numpy
import pandas as pd
import pytest
from orion.analysis.base import to_numpy, train_regressor
from orion.analysis.lpi_utils import compute_variances, lpi, make_grid
from orion.core.io.space_builder import SpaceBuilder
data = pd.DataFrame(
data={
"id": ["a", "b", "c", "d"],
"x": [0, 1, 2, 3],
"y": [1, 2, 0, 3],
"objective": [0.1, 0.2, 0.3, 0.5],
}
)
space = SpaceBuilder().build({"x": "uniform(0, 6)", "y": "uniform(0, 3)"})
def test_accept_empty():
"""Tests an empty dataframe is returned if you give an empty dataframe"""
empty_frame = pd.DataFrame()
results = lpi(empty_frame, space)
assert results.columns.tolist() == ["LPI"]
assert results.index.tolist() == list(space.keys())
assert results["LPI"].tolist() == [0, 0]
empty_frame = pd.DataFrame(columns=["x", "y", "objective"])
results = lpi(empty_frame, space)
assert results.columns.tolist() == ["LPI"]
assert results.index.tolist() == list(space.keys())
assert results["LPI"].tolist() == [0, 0]
def test_parameter_not_modified():
"""Tests the original dataframe is not modified"""
original = copy.deepcopy(data)
lpi(data, space)
pd.testing.assert_frame_equal(data, original)
def test_make_grid():
"""Test grid has correct format"""
trials = to_numpy(data, space)
model = train_regressor("RandomForestRegressor", trials)
best_point = trials[numpy.argmin(trials[:, -1])]
grid = make_grid(best_point, space, model, 4)
# Are fixed to anchor value
numpy.testing.assert_equal(grid[0][:, 1], best_point[1])
numpy.testing.assert_equal(grid[1][:, 0], best_point[0])
# Is a grid in search space
numpy.testing.assert_equal(grid[0][:, 0], [0, 2, 4, 6])
numpy.testing.assert_equal(grid[1][:, 1], [0, 1, 2, 3])
def test_make_grid_predictor(monkeypatch):
"""Test grid contains corresponding predictions from the model"""
trials = to_numpy(data, space)
model = train_regressor("RandomForestRegressor", trials)
best_point = trials[numpy.argmin(trials[:, -1])]
# Make sure model is not predicting exactly the original objective
with numpy.testing.assert_raises(AssertionError):
numpy.testing.assert_equal(
best_point[-1], model.predict(best_point[:-1].reshape(1, -1))
)
grid = make_grid(best_point, space, model, 4)
# Verify that grid predictions are those of the model
numpy.testing.assert_equal(grid[0][:, -1], model.predict(grid[0][:, :-1]))
numpy.testing.assert_equal(grid[1][:, -1], model.predict(grid[1][:, :-1]))
# Verify model predictions differ on different points
with numpy.testing.assert_raises(AssertionError):
numpy.testing.assert_equal(grid[0][:, -1], grid[1][:, -1])
def test_compute_variance():
"""Test variance computation over the grid"""
grid = numpy.arange(3 * 5 * 4).reshape(3, 5, 4)
grid[0, :, -1] = 10
grid[1, :, -1] = [0, 1, 2, 3, 4]
grid[2, :, -1] = [0, 10, 20, 30, 40]
variances = compute_variances(grid)
assert variances.shape == (3,)
assert variances[0] == 0
assert variances[1] == numpy.var([0, 1, 2, 3, 4])
assert variances[2] == numpy.var([0, 10, 20, 30, 40])
def test_lpi_results():
"""Verify LPI results in DataFrame"""
results = lpi(data, space, random_state=1)
assert results.columns.tolist() == ["LPI", "STD"]
assert results.index.tolist() == list(space.keys())
# The data is made such that x correlates more strongly with objective than y
assert results["LPI"].loc["x"] > results["LPI"].loc["y"]
def test_lpi_with_categorical_data():
"""Verify LPI can be computed on categorical dimensions"""
data = pd.DataFrame(
data={
"id": ["a", "b", "c", "d"],
"x": [0, 1, 2, 3],
"y": ["b", "c", "a", "d"],
"objective": [0.1, 0.2, 0.3, 0.5],
}
)
space = SpaceBuilder().build(
{"x": "uniform(0, 6)", "y": 'choices(["a", "b", "c", "d"])'}
)
results = lpi(data, space, random_state=1)
assert results.columns.tolist() == ["LPI", "STD"]
assert results.index.tolist() == ["x", "y"]
# The data is made such that x correlates more strongly with objective than y
assert results["LPI"].loc["x"] > results["LPI"].loc["y"]
def test_lpi_with_multidim_data():
"""Verify LPI can be computed on categorical dimensions"""
data = pd.DataFrame(
data={
"id": ["a", "b", "c", "d"],
"x": [[0, 2, 4], [1, 1, 3], [2, 2, 2], [3, 0, 3]],
"y": [["b", "b"], ["c", "b"], ["a", "a"], ["d", "c"]],
"objective": [0.1, 0.2, 0.3, 0.5],
}
)
space = SpaceBuilder().build(
{"x": "uniform(0, 6, shape=3)", "y": 'choices(["a", "b", "c", "d"], shape=2)'}
)
results = lpi(data, space, random_state=1)
assert results.columns.tolist() == ["LPI", "STD"]
assert results.index.tolist() == ["x[0]", "x[1]", "x[2]", "y[0]", "y[1]"]
# The data is made such some x correlates more strongly with objective than other x and most y
assert results["LPI"].loc["x[0]"] > results["LPI"].loc["x[1]"]
assert results["LPI"].loc["x[1]"] > results["LPI"].loc["x[2]"]
assert results["LPI"].loc["x[0]"] > results["LPI"].loc["y[0]"]
assert results["LPI"].loc["x[0]"] > results["LPI"].loc["y[1]"]
def test_lpi_n_points(monkeypatch):
"""Verify given number of points is used"""
N_POINTS = numpy.random.randint(2, 50)
def mock_make_grid(*args, **kwargs):
grid = make_grid(*args, **kwargs)
assert grid.shape == (len(space), N_POINTS, len(space) + 1)
return grid
monkeypatch.setattr("orion.analysis.lpi_utils.make_grid", mock_make_grid)
lpi(data, space, random_state=1, n_points=N_POINTS)
def test_lpi_n_runs(monkeypatch):
"""Verify number of runs"""
N_RUNS = 5
seeds = set()
n_runs = 0
def mock_train_regressor(*args, **kwargs):
nonlocal n_runs
n_runs += 1
seeds.add(kwargs["random_state"])
return train_regressor(*args, **kwargs)
monkeypatch.setattr(
"orion.analysis.lpi_utils.train_regressor", mock_train_regressor
)
lpi(data, space, random_state=1, n_runs=N_RUNS)
assert n_runs == N_RUNS
assert len(seeds) > 0
|
esda/tests/test_local_geary_mv.py | jeffcsauer/esda | 145 | 12788744 | import unittest
import libpysal
from libpysal.common import pandas, RTOL, ATOL
from esda.geary_local_mv import Geary_Local_MV
import numpy as np
PANDAS_EXTINCT = pandas is None
class Geary_Local_MV_Tester(unittest.TestCase):
def setUp(self):
np.random.seed(100)
self.w = libpysal.io.open(libpysal.examples.get_path("stl.gal")).read()
f = libpysal.io.open(libpysal.examples.get_path("stl_hom.txt"))
self.y1 = np.array(f.by_col['HR8893'])
self.y2 = np.array(f.by_col['HC8488'])
def test_local_geary_mv(self):
lG_mv = Geary_Local_MV(connectivity=self.w).fit([self.y1, self.y2])
print(lG_mv.p_sim[0])
self.assertAlmostEqual(lG_mv.localG[0], 0.4096931479581422)
self.assertAlmostEqual(lG_mv.p_sim[0], 0.211)
suite = unittest.TestSuite()
test_classes = [
Geary_Local_MV_Tester
]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite)
|
pybaseball/cache/file_utils.py | reddigari/pybaseball | 650 | 12788749 | import json
import os
import pathlib
from typing import Any, Dict, List, Union, cast
JSONData = Union[List[Any], Dict[str, Any]]
# Splitting this out for testing with no side effects
def mkdir(directory: str) -> None:
return pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
# Splitting this out for testing with no side effects
def remove(filename: str) -> None:
return os.remove(filename)
def safe_jsonify(directory: str, filename: str, data: JSONData) -> None:
mkdir(directory)
fname = os.path.join(directory, filename)
with open(fname, 'w') as json_file:
json.dump(data, json_file)
def load_json(filename: str) -> JSONData:
with open(filename) as json_file:
return cast(JSONData, json.load(json_file))
|
grr/server/grr_response_server/check_lib/__init__.py | khanhgithead/grr | 4,238 | 12788826 | <reponame>khanhgithead/grr
#!/usr/bin/env python
"""This is the check capabilities used to post-process host data."""
# pylint: disable=g-import-not-at-top,unused-import
from grr_response_server.check_lib import checks
from grr_response_server.check_lib import hints
from grr_response_server.check_lib import triggers
|
tests/micropython/opt_level.py | sebi5361/micropython | 181 | 12788851 | <reponame>sebi5361/micropython
import micropython as micropython
# check we can get and set the level
micropython.opt_level(0)
print(micropython.opt_level())
micropython.opt_level(1)
print(micropython.opt_level())
# check that the optimisation levels actually differ
micropython.opt_level(0)
exec('print(__debug__)')
micropython.opt_level(1)
exec('print(__debug__)')
exec('assert 0')
|
vergeml/sources/mnist.py | ss18/vergeml | 324 | 12788866 | <gh_stars>100-1000
from vergeml.img import INPUT_PATTERNS, open_image, fixext, ImageType
from vergeml.io import source, SourcePlugin, Sample
from vergeml.data import Labels
from vergeml.utils import VergeMLError
from vergeml.sources.labeled_image import LabeledImageSource
import random
import numpy as np
from PIL import Image
import os.path
import json
from operator import methodcaller
import io
from typing import List
import gzip
import hashlib
_FILES = ("train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz")
_MNIST_LABELS = ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9")
_FASHION_MNIST_LABELS = ("tshirt_top",
"trouser",
"pullover",
"dress",
"coat",
"sandal",
"shirt",
"sneaker",
"sag",
"ankle_boot")
# we use the md5 to check for fashion mnist, so we can provide the labels
# automatically
_MD5_FASHION = "8d4fb7e6c68d591d4c3dfef9ec88bf0d"
def _md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
@source('image', descr="Load images in MNIST format.")
class InputMnist(SourcePlugin):
data = None
def num_samples(self, split: str) -> int:
return len(self.data[split])
def read_sample(self, split: str, index: int):
return self.data[split][index]
def _check_files(self):
self.data = dict(train=[], val=[], test=[])
samples_dir = self.config["samples_dir"]
files = [os.path.join(samples_dir, file) for file in _FILES]
for path in files:
if not os.path.exists(path):
raise VergeMLError("File not found in samples_dir: {}".format(
os.path.basename(path)))
if _md5(files[0]) == _MD5_FASHION:
self.meta['labels'] = _FASHION_MNIST_LABELS
else:
self.meta['labels'] = _MNIST_LABELS
# preload
for split, images, labels in (('train', files[0], files[1]), ('test', files[2], files[3])):
with gzip.open(images) as f:
# First 16 bytes are magic_number, n_imgs, n_rows, n_cols
pixels = np.frombuffer(f.read(), 'B', offset=16)
pixels = pixels.reshape(-1, 28, 28)
with gzip.open(labels) as f:
# First 8 bytes are magic_number, n_labels
integer_labels = np.frombuffer(f.read(), 'B', offset=8)
n_cols = integer_labels.max() + 1
for ix, imagearr in enumerate(pixels):
label = integer_labels[ix]
onehot = np.zeros((n_cols), dtype='float32')
onehot[label] = 1.0
self.data[split].append((Image.fromarray(imagearr), onehot,
dict(labels=self.meta['labels'],
filename=images,
split=split,
types=('pil', 'labels'))))
if split == 'train':
n = self.config['val_num']
if self.config['val_perc'] is not None:
n = int(len(self.data['train']) * self.config['val_perc'] // 100)
if n is not None:
if n > len(self.data['train']):
raise VergeMLError("number of test samples is greater than number of available samples.")
rng = random.Random(self.config['random_seed'])
count = len(self.data[split])
indices = rng.sample(range(count), count)
self.data['val'] = [self.data['train'][i] for i in indices[:n]]
self.data['train'] = [self.data['train'][i] for i in indices[n:]]
else:
if self.config['test_num']:
if self.config['test_num'] > len(self.data['test']):
raise VergeMLError("number of test samples is greater than number of available samples.")
rng = random.Random(self.config['random_seed'])
indices = rng.sample(range(len(self.data[split])), len(pixels))
self.data['test'] = [self.data['test'][i] for i in indices[:n]]
plugin = InputMnist
|
pycket/prims/hash.py | namin/pycket | 129 | 12788878 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from pycket import impersonators as imp
from pycket import values, values_string
from pycket.hash.base import W_HashTable, W_ImmutableHashTable, w_missing
from pycket.hash.simple import (
W_EqvMutableHashTable, W_EqMutableHashTable,
W_EqvImmutableHashTable, W_EqImmutableHashTable,
make_simple_mutable_table, make_simple_mutable_table_assocs,
make_simple_immutable_table, make_simple_immutable_table_assocs)
from pycket.hash.equal import W_EqualHashTable
from pycket.impersonators.baseline import W_ImpHashTable, W_ChpHashTable
from pycket.cont import continuation, loop_label
from pycket.error import SchemeException
from pycket.prims.expose import default, expose, procedure, define_nyi
from rpython.rlib import jit, objectmodel
_KEY = 0
_VALUE = 1
_KEY_AND_VALUE = 2
_PAIR = 3
PREFIXES = ["unsafe-mutable", "unsafe-immutable"]
def prefix_hash_names(base):
result = [base]
for pre in PREFIXES:
result.append("%s-%s" % (pre, base))
return result
@expose(prefix_hash_names("hash-iterate-first"), [W_HashTable])
def hash_iterate_first(ht):
if ht.length() == 0:
return values.w_false
return values.W_Fixnum.ZERO
@expose(prefix_hash_names("hash-iterate-next"), [W_HashTable, values.W_Fixnum])
def hash_iterate_next(ht, pos):
return ht.hash_iterate_next(pos)
@objectmodel.specialize.arg(4)
def hash_iter_ref(ht, n, env, cont, returns):
from pycket.interpreter import return_value, return_multi_vals
try:
w_key, w_val = ht.get_item(n)
if returns == _KEY:
return return_value(w_key, env, cont)
if returns == _VALUE:
return return_value(w_val, env, cont)
if returns == _KEY_AND_VALUE:
vals = values.Values._make2(w_key, w_val)
return return_multi_vals(vals, env, cont)
if returns == _PAIR:
vals = values.W_Cons.make(w_key, w_val)
return return_value(vals, env, cont)
assert False, "unknown return code"
except KeyError:
raise SchemeException("hash-iterate-key: invalid position")
except IndexError:
raise SchemeException("hash-iterate-key: invalid position")
@expose(prefix_hash_names("hash-iterate-key"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_key(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_KEY)
@expose(prefix_hash_names("hash-iterate-value"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_value(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_VALUE)
@expose(prefix_hash_names("hash-iterate-key+value"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_key_value(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_KEY_AND_VALUE)
@expose(prefix_hash_names("hash-iterate-pair"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_pair(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_PAIR)
@expose("hash-for-each", [W_HashTable, procedure, default(values.W_Object, values.w_false)], simple=False)
def hash_for_each(ht, f, try_order, env, cont):
# FIXME: implmeent try-order? -- see hash-map
return hash_for_each_loop(ht, f, 0, env, cont)
@loop_label
def hash_for_each_loop(ht, f, index, env, cont):
from pycket.interpreter import return_value
try:
w_key, w_value = ht.get_item(index)
except KeyError:
return hash_for_each_loop(ht, f, index + 1, env, cont)
except IndexError:
return return_value(values.w_void, env, cont)
return f.call([w_key, w_value], env,
hash_for_each_cont(ht, f, index, env, cont))
@continuation
def hash_for_each_cont(ht, f, index, env, cont, _vals):
return hash_for_each_loop(ht, f, index + 1, env, cont)
@expose("hash-map", [W_HashTable, procedure, default(values.W_Object, values.w_false)], simple=False)
def hash_map(h, f, try_order, env, cont):
# FIXME : If try-order? is true, then the order of keys and values
# passed to proc is normalized under certain circumstances, such
# as when the keys are all symbols and hash is not an
# impersonator.
from pycket.interpreter import return_value
acc = values.w_null
return hash_map_loop(f, h, 0, acc, env, cont)
# f.enable_jitting()
# return return_value(w_missing, env,
# hash_map_cont(f, h, 0, acc, env, cont))
@loop_label
def hash_map_loop(f, ht, index, w_acc, env, cont):
from pycket.interpreter import return_value
try:
w_key, w_value = ht.get_item(index)
except KeyError:
return hash_map_loop(f, ht, index + 1, w_acc, env, cont)
except IndexError:
return return_value(w_acc, env, cont)
after = hash_map_cont(f, ht, index, w_acc, env, cont)
return f.call([w_key, w_value], env, after)
@continuation
def hash_map_cont(f, ht, index, w_acc, env, cont, _vals):
from pycket.interpreter import check_one_val
w_val = check_one_val(_vals)
w_acc = values.W_Cons.make(w_val, w_acc)
return hash_map_loop(f, ht, index + 1, w_acc, env, cont)
@jit.elidable
def from_assocs(assocs, fname):
if not assocs.is_proper_list():
raise SchemeException("%s: expected proper list" % fname)
keys = []
vals = []
while isinstance(assocs, values.W_Cons):
val, assocs = assocs.car(), assocs.cdr()
if not isinstance(val, values.W_Cons):
raise SchemeException("%s: expected list of pairs" % fname)
keys.append(val.car())
vals.append(val.cdr())
return keys[:], vals[:]
@expose("make-weak-hasheq", [default(values.W_List, values.w_null)])
def make_weak_hasheq(assocs):
# FIXME: not actually weak
return make_simple_mutable_table_assocs(W_EqMutableHashTable, assocs, "make-weak-hasheq")
@expose("make-weak-hasheqv", [default(values.W_List, values.w_null)])
def make_weak_hasheqv(assocs):
# FIXME: not actually weak
return make_simple_mutable_table_assocs(W_EqvMutableHashTable, assocs, "make-weak-hasheqv")
@expose(["make-weak-hash", "make-late-weak-hasheq"], [default(values.W_List, None)])
def make_weak_hash(assocs):
if assocs is None:
return W_EqualHashTable([], [], immutable=False)
return W_EqualHashTable(*from_assocs(assocs, "make-weak-hash"), immutable=False)
@expose("make-immutable-hash", [default(values.W_List, values.w_null)])
def make_immutable_hash(assocs):
keys, vals = from_assocs(assocs, "make-immutable-hash")
return W_EqualHashTable(keys, vals, immutable=True)
@expose("make-immutable-hasheq", [default(values.W_List, values.w_null)])
def make_immutable_hasheq(assocs):
return make_simple_immutable_table_assocs(W_EqImmutableHashTable, assocs, "make-immutable-hasheq")
@expose("make-immutable-hasheqv", [default(values.W_List, values.w_null)])
def make_immutable_hasheqv(assocs):
return make_simple_immutable_table_assocs(W_EqvImmutableHashTable, assocs, "make-immutable-hasheq")
@expose("hash")
def hash(args):
if len(args) % 2 != 0:
raise SchemeException("hash: key does not have a corresponding value")
keys = [args[i] for i in range(0, len(args), 2)]
vals = [args[i] for i in range(1, len(args), 2)]
return W_EqualHashTable(keys, vals, immutable=True)
@expose("hasheq")
def hasheq(args):
if len(args) % 2 != 0:
raise SchemeException("hasheq: key does not have a corresponding value")
keys = [args[i] for i in range(0, len(args), 2)]
vals = [args[i] for i in range(1, len(args), 2)]
return make_simple_immutable_table(W_EqImmutableHashTable, keys, vals)
@expose("hasheqv")
def hasheqv(args):
if len(args) % 2 != 0:
raise SchemeException("hasheqv: key does not have a corresponding value")
keys = [args[i] for i in range(0, len(args), 2)]
vals = [args[i] for i in range(1, len(args), 2)]
return make_simple_immutable_table(W_EqvImmutableHashTable, keys, vals)
@expose("make-hash", [default(values.W_List, values.w_null)])
def make_hash(pairs):
return W_EqualHashTable(*from_assocs(pairs, "make-hash"))
@expose("make-hasheq", [default(values.W_List, values.w_null)])
def make_hasheq(pairs):
return make_simple_mutable_table_assocs(W_EqMutableHashTable, pairs, "make-hasheq")
@expose("make-hasheqv", [default(values.W_List, values.w_null)])
def make_hasheqv(pairs):
return make_simple_mutable_table_assocs(W_EqvMutableHashTable, pairs, "make-hasheqv")
@expose("hash-set!", [W_HashTable, values.W_Object, values.W_Object], simple=False)
def hash_set_bang(ht, k, v, env, cont):
if ht.immutable():
raise SchemeException("hash-set!: given immutable table")
return ht.hash_set(k, v, env, cont)
@continuation
def hash_set_cont(key, val, env, cont, _vals):
from pycket.interpreter import check_one_val
table = check_one_val(_vals)
return table.hash_set(key, val, env, return_table_cont(table, env, cont))
@continuation
def return_table_cont(table, env, cont, _vals):
from pycket.interpreter import return_value
return return_value(table, env, cont)
@expose("hash-set", [W_HashTable, values.W_Object, values.W_Object], simple=False)
def hash_set(table, key, val, env, cont):
from pycket.interpreter import return_value
if not table.immutable():
raise SchemeException("hash-set: not given an immutable table")
# Fast path
if isinstance(table, W_ImmutableHashTable):
new_table = table.assoc(key, val)
return return_value(new_table, env, cont)
return hash_copy(table, env,
hash_set_cont(key, val, env, cont))
@continuation
def hash_ref_cont(default, k, env, cont, _vals):
from pycket.interpreter import return_value, check_one_val
val = check_one_val(_vals)
if val is not w_missing:
return return_value(val, env, cont)
if default is None:
raise SchemeException("key %s not found"%k.tostring())
if default.iscallable():
return default.call([], env, cont)
return return_value(default, env, cont)
@expose("hash-ref", [W_HashTable, values.W_Object, default(values.W_Object, None)], simple=False)
def hash_ref(ht, k, default, env, cont):
return ht.hash_ref(k, env, hash_ref_cont(default, k, env, cont))
@expose("hash-remove!", [W_HashTable, values.W_Object], simple=False)
def hash_remove_bang(ht, k, env, cont):
if ht.immutable():
raise SchemeException("hash-remove!: expected mutable hash table")
return ht.hash_remove_inplace(k, env, cont)
@expose("hash-remove", [W_HashTable, values.W_Object], simple=False)
def hash_remove(ht, k, env, cont):
if not ht.immutable():
raise SchemeException("hash-remove: expected immutable hash table")
return ht.hash_remove(k, env, cont)
@continuation
def hash_clear_cont(ht, env, cont, _vals):
return hash_clear_loop(ht, env, cont)
def hash_clear_loop(ht, env, cont):
from pycket.interpreter import return_value
if ht.length() == 0:
return return_value(values.w_void, env, cont)
w_k, w_v = ht.get_item(0)
return ht.hash_remove_inplace(w_k, env, hash_clear_cont(ht, env, cont))
@expose("hash-clear!", [W_HashTable], simple=False)
def hash_clear_bang(ht, env, cont):
from pycket.interpreter import return_value
if ht.is_impersonator():
ht.hash_clear_proc(env, cont)
return hash_clear_loop(ht, env, cont)
else:
ht.hash_empty()
return return_value(values.w_void, env, cont)
define_nyi("hash-clear", [W_HashTable])
@expose("hash-count", [W_HashTable])
def hash_count(hash):
return values.W_Fixnum(hash.length())
@continuation
def hash_keys_subset_huh_cont(keys_vals, hash_2, idx, env, cont, _vals):
from pycket.interpreter import return_value, check_one_val
val = check_one_val(_vals)
if val is values.w_false:
return return_value(values.w_false, env, cont)
else:
return hash_keys_subset_huh_loop(keys_vals, hash_2, idx + 1, env, cont)
@loop_label
def hash_keys_subset_huh_loop(keys_vals, hash_2, idx, env, cont):
from pycket.interpreter import return_value
if idx >= len(keys_vals):
return return_value(values.w_true, env, cont)
else:
return hash_ref([hash_2, keys_vals[idx][0], values.w_false], env,
hash_keys_subset_huh_cont(keys_vals, hash_2, idx, env, cont))
@jit.elidable
def uses_same_eq_comparison(hash_1, hash_2):
h_1 = hash_1
h_2 = hash_2
if hash_1.is_impersonator() or hash_1.is_chaperone():
h_1 = hash_1.get_proxied()
if hash_2.is_impersonator() or hash_2.is_chaperone():
h_2 = hash_2.get_proxied()
if isinstance(h_1, W_EqualHashTable):
return isinstance(h_2, W_EqualHashTable)
elif isinstance(h_1, W_EqMutableHashTable) or isinstance(h_1, W_EqImmutableHashTable):
return isinstance(h_2, W_EqMutableHashTable) or isinstance(h_2, W_EqImmutableHashTable)
elif isinstance(h_1, W_EqvMutableHashTable) or isinstance(h_1, W_EqvImmutableHashTable):
return isinstance(h_2, W_EqvMutableHashTable) or isinstance(h_2, W_EqvImmutableHashTable)
else:
return False
@expose("hash-keys-subset?", [W_HashTable, W_HashTable], simple=False)
def hash_keys_subset_huh(hash_1, hash_2, env, cont):
if not uses_same_eq_comparison(hash_1, hash_2):
raise SchemeException("hash-keys-subset?: given hash tables do not use the same key comparison -- first table : %s - second table: %s" % (hash_1.tostring(), hash_2.tostring()))
return hash_keys_subset_huh_loop(hash_1.hash_items(), hash_2, 0, env, cont)
@continuation
def hash_copy_ref_cont(keys, idx, src, new, env, cont, _vals):
from pycket.interpreter import check_one_val
val = check_one_val(_vals)
return new.hash_set(keys[idx][0], val, env,
hash_copy_set_cont(keys, idx, src, new, env, cont))
@continuation
def hash_copy_set_cont(keys, idx, src, new, env, cont, _vals):
return hash_copy_loop(keys, idx + 1, src, new, env, cont)
@loop_label
def hash_copy_loop(keys, idx, src, new, env, cont):
from pycket.interpreter import return_value
if idx >= len(keys):
return return_value(new, env, cont)
return src.hash_ref(keys[idx][0], env,
hash_copy_ref_cont(keys, idx, src, new, env, cont))
def hash_copy(src, env, cont):
from pycket.interpreter import return_value
if isinstance(src, W_ImmutableHashTable):
new = src.make_copy()
return return_value(new, env, cont)
new = src.make_empty()
if src.length() == 0:
return return_value(new, env, cont)
return hash_copy_loop(src.hash_items(), 0, src, new, env, cont)
expose("hash-copy", [W_HashTable], simple=False)(hash_copy)
# FIXME: not implemented
@expose("equal-hash-code", [values.W_Object])
def equal_hash_code(v):
# only for improper path cache entries
if isinstance(v, values.W_Cons):
if v.is_proper_list():
return values.W_Fixnum.ZERO
nm = v.car()
p = v.cdr()
if isinstance(nm, values_string.W_String) and \
isinstance(p, values.W_Path) and \
isinstance(p.path, str):
return values.W_Fixnum(objectmodel.compute_hash((nm.tostring(), p.path)))
return values.W_Fixnum.ZERO
@expose("equal-secondary-hash-code", [values.W_Object])
def equal_secondary_hash_code(v):
return values.W_Fixnum.ZERO
@expose("eq-hash-code", [values.W_Object])
def eq_hash_code(v):
t = type(v)
if t is values.W_Fixnum:
return v
if t is values.W_Flonum:
hash = objectmodel.compute_hash(v.value)
elif t is values.W_Character:
hash = objectmodel.compute_hash(v.value)
else:
hash = objectmodel.compute_hash(v)
return values.W_Fixnum(hash)
@expose("eqv-hash-code", [values.W_Object])
def eqv_hash_code(v):
hash = v.hash_eqv()
return values.W_Fixnum(hash)
|
linAlgVis.py | testinggg-art/Linear_Algebra_With_Python | 1,719 | 12788895 | <filename>linAlgVis.py
import matplotlib.pyplot as plt
import numpy as np
import numpy
def linearCombo(a, b, c):
'''This function is for visualizing linear combination of standard basis in 3D.
Function syntax: linearCombo(a, b, c), where a, b, c are the scalar multiplier,
also the elements of the vector.
'''
fig = plt.figure(figsize = (10,10))
ax = fig.add_subplot(projection='3d')
######################## Standard basis and Scalar Multiplid Vectors#########################
vec = np.array([[[0, 0, 0, 1, 0, 0]], # e1
[[0, 0, 0, 0, 1, 0]], # e2
[[0, 0, 0, 0, 0, 1]], # e3
[[0, 0, 0, a, 0, 0]], # a* e1
[[0, 0, 0, 0, b, 0]], # b* e2
[[0, 0, 0, 0, 0, c]], # c* e3
[[0, 0, 0, a, b, c]]]) # ae1 + be2 + ce3
colors = ['b','b','b','r','r','r','g']
for i in range(vec.shape[0]):
X, Y, Z, U, V, W = zip(*vec[i,:,:])
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False,
color = colors[i] ,arrow_length_ratio = .08, pivot = 'tail',
linestyles = 'solid',linewidths = 3, alpha =.6)
#################################Plot Rectangle Boxes##############################
dlines = np.array([[[a, 0, 0],[a, b, 0]],
[[0, b, 0],[a, b, 0]],
[[0, 0, c],[a, b, c]],
[[0, 0, c],[a, 0, c]],
[[a, 0, c],[a, b, c]],
[[0, 0, c],[0, b, c]],
[[0, b, c],[a, b, c]],
[[a, 0, 0],[a, 0, c]],
[[0, b, 0],[0, b, c]],
[[a, b, 0],[a, b, c]]])
colors = ['k','k','g','k','k','k','k','k','k']
for i in range(dlines.shape[0]):
ax.plot(dlines[i,:,0], dlines[i,:,1], dlines[i,:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
#################################Annotation########################################
ax.text(x = a, y = b, z = c, s= ' $(%0.d, %0.d, %.0d)$'% (a, b, c), size = 18)
ax.text(x = a, y = 0, z = 0, s= ' $%0.d e_1 = (%0.d, 0, 0)$'% (a, a), size = 15)
ax.text(x = 0, y = b, z = 0, s= ' $%0.d e_2 = (0, %0.d, 0)$'% (b, b), size = 15)
ax.text(x = 0, y = 0, z = c, s= ' $%0.d e_3 = (0, 0, %0.d)$' %(c, c), size = 15)
#################################Axis Setting######################################
ax.grid()
ax.set_xlim([0, a+1])
ax.set_ylim([0, b+1])
ax.set_zlim([0, c+1])
ax.set_xlabel('x-axis', size = 18)
ax.set_ylabel('y-axis', size = 18)
ax.set_zlabel('z-axis', size = 18)
ax.set_title('Vector $(%0.d, %0.d, %.0d)$ Visualization' %(a, b, c), size = 20)
ax.view_init(elev=20., azim=15)
if __name__ == '__main__':
a = 7
b = 4
c = 9
linearCombo(a, b, c)
def linearComboNonStd(a, b, c, vec1, vec2, vec3):
'''This function is for visualizing linear combination of non-standard basis in 3D.
Function syntax: linearCombo(a, b, c, vec1, vec2, vec3), where a, b, c are the scalar multiplier,
ve1, vec2 and vec3 are the basis.
'''
fig = plt.figure(figsize = (10,10))
ax = fig.add_subplot(projection='3d')
########################Plot basis##############################
vec1 = np.array([[0, 0, 0, vec1[0], vec1[1], vec1[2]]])
X, Y, Z, U, V, W = zip(*vec1)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'blue',arrow_length_ratio = .08, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
vec2 = np.array([[0, 0, 0, vec2[0], vec2[1], vec2[2]]])
X, Y, Z, U, V, W = zip(*vec2)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'blue',arrow_length_ratio = .08, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
vec3 = np.array([[0, 0, 0, vec3[0], vec3[1], vec3[2]]])
X, Y, Z, U, V, W = zip(*vec3)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'blue',arrow_length_ratio = .08, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
###########################Plot Scalar Muliplied Vectors####################
avec1 = a * vec1
X, Y, Z, U, V, W = zip(*avec1)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'red', alpha = .6,arrow_length_ratio = a/100, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
bvec2 = b * vec2
X, Y, Z, U, V, W = zip(*bvec2)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'red', alpha = .6,arrow_length_ratio = b/100, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
cvec3 = c * vec3
X, Y, Z, U, V, W = zip(*cvec3)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'red', alpha = .6,arrow_length_ratio = c/100, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
combo = avec1 + bvec2 + cvec3
X, Y, Z, U, V, W = zip(*combo)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'green', alpha = .7,arrow_length_ratio = np.linalg.norm(combo)/300, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
#################################Plot Rectangle Boxes##############################
point1 = [avec1[0, 3], avec1[0, 4], avec1[0, 5]]
point2 = [avec1[0, 3]+bvec2[0, 3], avec1[0, 4]+bvec2[0, 4], avec1[0, 5]+bvec2[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
point1 = [bvec2[0, 3], bvec2[0, 4], bvec2[0, 5]]
point2 = [avec1[0, 3]+bvec2[0, 3], avec1[0, 4]+bvec2[0, 4], avec1[0, 5]+bvec2[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
point1 = [bvec2[0, 3], bvec2[0, 4], bvec2[0, 5]]
point2 = [cvec3[0, 3]+bvec2[0, 3], cvec3[0, 4]+bvec2[0, 4], cvec3[0, 5]+bvec2[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
point1 = [cvec3[0, 3], cvec3[0, 4], cvec3[0, 5]]
point2 = [cvec3[0, 3]+bvec2[0, 3], cvec3[0, 4]+bvec2[0, 4], cvec3[0, 5]+bvec2[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
point1 = [cvec3[0, 3], cvec3[0, 4], cvec3[0, 5]]
point2 = [cvec3[0, 3]+avec1[0, 3], cvec3[0, 4]+avec1[0, 4], cvec3[0, 5]+avec1[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
point1 = [avec1[0, 3], avec1[0, 4], avec1[0, 5]]
point2 = [cvec3[0, 3]+avec1[0, 3], cvec3[0, 4]+avec1[0, 4], cvec3[0, 5]+avec1[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
##
point1 = [avec1[0, 3]+bvec2[0, 3]+cvec3[0, 3],
avec1[0, 4]+bvec2[0, 4]+cvec3[0, 4],
avec1[0, 5]+bvec2[0, 5]+cvec3[0, 5]]
point2 = [cvec3[0, 3]+avec1[0, 3],
cvec3[0, 4]+avec1[0, 4],
cvec3[0, 5]+avec1[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
##
point1 = [avec1[0, 3]+bvec2[0, 3]+cvec3[0, 3],
avec1[0, 4]+bvec2[0, 4]+cvec3[0, 4],
avec1[0, 5]+bvec2[0, 5]+cvec3[0, 5]]
point2 = [cvec3[0, 3]+bvec2[0, 3],
cvec3[0, 4]+bvec2[0, 4],
cvec3[0, 5]+bvec2[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
##
point1 = [avec1[0, 3]+bvec2[0, 3]+cvec3[0, 3],
avec1[0, 4]+bvec2[0, 4]+cvec3[0, 4],
avec1[0, 5]+bvec2[0, 5]+cvec3[0, 5]]
point2 = [bvec2[0, 3]+avec1[0, 3],
bvec2[0, 4]+avec1[0, 4],
bvec2[0, 5]+avec1[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
#################################Annotation########################################
ax.text(x = vec1[0,3], y = vec1[0,4], z = vec1[0,5], s= ' $v_1 =(%0.d, %0.d, %.0d)$'% (vec1[0,3], vec1[0,4], vec1[0,4]), size = 8)
ax.text(x = vec2[0,3], y = vec2[0,4], z = vec2[0,5], s= ' $v_2 =(%0.d, %0.d, %.0d)$'% (vec2[0,3], vec2[0,4], vec2[0,4]), size = 8)
ax.text(x = vec3[0,3], y = vec3[0,4], z = vec3[0,5], s= ' $v_3= (%0.d, %0.d, %.0d)$'% (vec3[0,3], vec3[0,4], vec3[0,4]), size = 8)
ax.text(x = avec1[0,3], y = avec1[0,4], z = avec1[0,5], s= ' $%.0d v_1 =(%0.d, %0.d, %.0d)$'% (a, avec1[0,3], avec1[0,4], avec1[0,4]), size = 8)
ax.text(x = bvec2[0,3], y = bvec2[0,4], z = bvec2[0,5], s= ' $%.0d v_2 =(%0.d, %0.d, %.0d)$'% (b, bvec2[0,3], bvec2[0,4], bvec2[0,4]), size = 8)
ax.text(x = cvec3[0,3], y = cvec3[0,4], z = cvec3[0,5], s= ' $%.0d v_3= (%0.d, %0.d, %.0d)$'% (c, cvec3[0,3], cvec3[0,4], cvec3[0,4]), size = 8)
# ax.text(x = 0, y = b, z = 0, s= ' $%0.d e_2 = (0, %0.d, 0)$'% (b, b), size = 15)
# ax.text(x = 0, y = 0, z = c, s= ' $%0.d e_3 = (0, 0, %0.d)$' %(c, c), size = 15)
#################################Axis Setting######################################
ax.grid()
ax.set_xlim([0, 15])
ax.set_ylim([0, 15])
ax.set_zlim([0, 15])
ax.set_xlabel('x-axis', size = 18)
ax.set_ylabel('y-axis', size = 18)
ax.set_zlabel('z-axis', size = 18)
#ax.set_title('Vector $(%0.d, %0.d, %.0d)$ Visualization' %(a, b, c), size = 20)
ax.view_init(elev=20., azim=15)
if __name__ == '__main__':
a = 2
b = 3
c = 4
vec1 = np.array([2,1,0])
vec2 = np.array([0,3,1])
vec3 = np.array([1,2,3])
linearComboNonStd(a, b, c, vec1,vec2,vec3)
|
mmdet/det_core/utils/mAP_utils.py | Karybdis/mmdetection-mini | 834 | 12788937 | import numpy as np
from multiprocessing import Pool
from ..bbox import bbox_overlaps
# https://zhuanlan.zhihu.com/p/34655990
def calc_PR_curve(pred, label):
pos = label[label == 1] # 正样本
threshold = np.sort(pred)[::-1] # pred是每个样本的正例预测概率值,逆序
label = label[pred.argsort()[::-1]]
precision = []
recall = []
tp = 0
fp = 0
ap = 0 # 平均精度
for i in range(len(threshold)):
if label[i] == 1:
tp += 1
recall.append(tp / len(pos))
precision.append(tp / (tp + fp))
# 近似曲线下面积
ap += (recall[i] - recall[i - 1]) * precision[i]
else:
fp += 1
recall.append(tp / len(pos))
precision.append(tp / (tp + fp))
return precision, recall, ap
def tpfp_voc(det_bboxes, gt_bboxes, iou_thr=0.5):
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
# tp和fp都是针对预测个数而言,不是gt个数
tp = np.zeros(num_dets, dtype=np.float32)
fp = np.zeros(num_dets, dtype=np.float32)
# 如果gt=0,那么所有预测框都算误报,所有预测bbox位置的fp都设置为1
if gt_bboxes.shape[0] == 0:
fp[...] = 1
return tp, fp
if num_dets == 0:
return tp, fp
ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes).numpy()
# print(ious)
# 对于每个预测框,找到最匹配的gt iou
ious_max = ious.max(axis=1)
# 对于每个预测框,找到最匹配gt的索引
ious_argmax = ious.argmax(axis=1)
# 按照预测概率分支降序排列
sort_inds = np.argsort(-det_bboxes[:, -1])
gt_covered = np.zeros(num_gts, dtype=bool)
# 多对一情况下,除了概率分值最大且大于阈值的预测框算tp外,其他框全部算fp
for i in sort_inds:
# 如果大于iou,则表示匹配
if ious_max[i] >= iou_thr:
matched_gt = ious_argmax[i]
# 每个gt bbox只匹配一次,且是和预测概率最大的匹配,不是按照iou
if not gt_covered[matched_gt]:
gt_covered[matched_gt] = True
tp[i] = 1
else:
fp[i] = 1
else:
fp[i] = 1
return tp, fp
def _average_precision(recalls, precisions, mode='voc2007'):
recalls = recalls[np.newaxis, :]
precisions = precisions[np.newaxis, :]
assert recalls.shape == precisions.shape and recalls.ndim == 2
num_scales = recalls.shape[0]
ap = np.zeros(num_scales, dtype=np.float32)
if mode == 'voc2012': # 平滑后就是标准的PR曲线算法
zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
ones = np.ones((num_scales, 1), dtype=recalls.dtype)
mrec = np.hstack((zeros, recalls, ones))
mpre = np.hstack((zeros, precisions, zeros))
# 写法比较高级,高效
for i in range(mpre.shape[1] - 1, 0, -1):
mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) # 每段区间内,精度都是取最大值,也就是水平线
for i in range(num_scales):
ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] # 找到召回率转折点,表示x轴移动区间索引
ap[i] = np.sum(
(mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) # 每段面积和
elif mode == 'voc2007': # 11点法,需要平平滑处理
for i in range(num_scales):
for thr in np.arange(0, 1 + 1e-3, 0.1):
precs = precisions[i, recalls[i, :] >= thr]
prec = precs.max() if precs.size > 0 else 0
ap[i] += prec
ap /= 11
else:
raise ValueError(
'Unrecognized mode, only "area" and "11points" are supported')
return ap
# code ref from mmdetection
def voc_eval_map(results, annotations, iou_thr=0.5, name='voc2007', nproc=4):
"""
:param results: list[list],外层list是指代图片编号,内层list是指代类别编号,
假设一共20个类,则内层list长度为20,每个List内部是numpy矩阵,nx5表示每张图片对应的每个类别的检测bbox,xyxyconf格式
:param annotations:和results一样
:param iou_thr: 是否算TP的阈值,voc默认是0.5
:param name: 采用哪一种评估指标,voc2007是11点,voc2012是标准pr曲线计算
:return:
"""
assert len(results) == len(annotations)
num_imgs = len(results) # 图片个数
num_classes = len(results[0]) # positive class num
pool = Pool(nproc)
eval_results = []
for i in range(num_classes):
cls_dets = [img_res[i] for img_res in results]
cls_gts = [img_res[i] for img_res in annotations]
tpfp = pool.starmap(
tpfp_voc,
zip(cls_dets, cls_gts, [iou_thr for _ in range(num_imgs)]))
# 得到每个预测bbox的tp和fp情况
tp, fp = tuple(zip(*tpfp))
# 统计gt bbox数目
num_gts = 0
for j, bbox in enumerate(cls_gts):
num_gts += bbox.shape[0]
# 合并所有图片所有预测bbox
cls_dets = np.vstack(cls_dets)
num_dets = cls_dets.shape[0] # 检测bbox个数
# 以上计算出了每个预测bbox的tp和fp情况
# 此处计算精度和召回率,写的比较高级
sort_inds = np.argsort(-cls_dets[:, -1]) # 按照预测概率分值降序排列
# 仔细思考这种写法,其实是c3_pr_roc.py里面calc_PR_curve的高级快速写法
tp = np.hstack(tp)[sort_inds][None]
fp = np.hstack(fp)[sort_inds][None]
tp = np.cumsum(tp, axis=1)
fp = np.cumsum(fp, axis=1)
eps = np.finfo(np.float32).eps
recalls = tp / np.maximum(num_gts, eps)
precisions = tp / np.maximum((tp + fp), eps)
recalls = recalls[0, :]
precisions = precisions[0, :]
# print('recalls', recalls, 'precisions', precisions)
ap = _average_precision(recalls, precisions, name)[0]
eval_results.append({
'num_gts': num_gts,
'num_dets': num_dets,
'recall': recalls,
'precision': precisions,
'ap': ap
})
pool.close()
aps = []
for cls_result in eval_results:
if cls_result['num_gts'] > 0:
aps.append(cls_result['ap'])
mean_ap = np.array(aps).mean().item() if aps else 0.0
return mean_ap
|
deepscm/experiments/medical/ukbb/sem_vi/conditional_sem.py | mobarakol/deepscm | 183 | 12788960 | import torch
import pyro
from pyro.nn import pyro_method
from pyro.distributions import Normal, Bernoulli, TransformedDistribution
from pyro.distributions.conditional import ConditionalTransformedDistribution
from deepscm.distributions.transforms.affine import ConditionalAffineTransform
from pyro.nn import DenseNN
from deepscm.experiments.medical.ukbb.sem_vi.base_sem_experiment import BaseVISEM, MODEL_REGISTRY
class ConditionalVISEM(BaseVISEM):
context_dim = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
# ventricle_volume flow
ventricle_volume_net = DenseNN(2, [8, 16], param_dims=[1, 1], nonlinearity=torch.nn.LeakyReLU(.1))
self.ventricle_volume_flow_components = ConditionalAffineTransform(context_nn=ventricle_volume_net, event_dim=0)
self.ventricle_volume_flow_transforms = [self.ventricle_volume_flow_components, self.ventricle_volume_flow_constraint_transforms]
# brain_volume flow
brain_volume_net = DenseNN(2, [8, 16], param_dims=[1, 1], nonlinearity=torch.nn.LeakyReLU(.1))
self.brain_volume_flow_components = ConditionalAffineTransform(context_nn=brain_volume_net, event_dim=0)
self.brain_volume_flow_transforms = [self.brain_volume_flow_components, self.brain_volume_flow_constraint_transforms]
@pyro_method
def pgm_model(self):
sex_dist = Bernoulli(logits=self.sex_logits).to_event(1)
_ = self.sex_logits
sex = pyro.sample('sex', sex_dist)
age_base_dist = Normal(self.age_base_loc, self.age_base_scale).to_event(1)
age_dist = TransformedDistribution(age_base_dist, self.age_flow_transforms)
age = pyro.sample('age', age_dist)
age_ = self.age_flow_constraint_transforms.inv(age)
# pseudo call to thickness_flow_transforms to register with pyro
_ = self.age_flow_components
brain_context = torch.cat([sex, age_], 1)
brain_volume_base_dist = Normal(self.brain_volume_base_loc, self.brain_volume_base_scale).to_event(1)
brain_volume_dist = ConditionalTransformedDistribution(brain_volume_base_dist, self.brain_volume_flow_transforms).condition(brain_context)
brain_volume = pyro.sample('brain_volume', brain_volume_dist)
# pseudo call to intensity_flow_transforms to register with pyro
_ = self.brain_volume_flow_components
brain_volume_ = self.brain_volume_flow_constraint_transforms.inv(brain_volume)
ventricle_context = torch.cat([age_, brain_volume_], 1)
ventricle_volume_base_dist = Normal(self.ventricle_volume_base_loc, self.ventricle_volume_base_scale).to_event(1)
ventricle_volume_dist = ConditionalTransformedDistribution(ventricle_volume_base_dist, self.ventricle_volume_flow_transforms).condition(ventricle_context) # noqa: E501
ventricle_volume = pyro.sample('ventricle_volume', ventricle_volume_dist)
# pseudo call to intensity_flow_transforms to register with pyro
_ = self.ventricle_volume_flow_components
return age, sex, ventricle_volume, brain_volume
@pyro_method
def model(self):
age, sex, ventricle_volume, brain_volume = self.pgm_model()
ventricle_volume_ = self.ventricle_volume_flow_constraint_transforms.inv(ventricle_volume)
brain_volume_ = self.brain_volume_flow_constraint_transforms.inv(brain_volume)
z = pyro.sample('z', Normal(self.z_loc, self.z_scale).to_event(1))
latent = torch.cat([z, ventricle_volume_, brain_volume_], 1)
x_dist = self._get_transformed_x_dist(latent)
x = pyro.sample('x', x_dist)
return x, z, age, sex, ventricle_volume, brain_volume
@pyro_method
def guide(self, x, age, sex, ventricle_volume, brain_volume):
with pyro.plate('observations', x.shape[0]):
hidden = self.encoder(x)
ventricle_volume_ = self.ventricle_volume_flow_constraint_transforms.inv(ventricle_volume)
brain_volume_ = self.brain_volume_flow_constraint_transforms.inv(brain_volume)
hidden = torch.cat([hidden, ventricle_volume_, brain_volume_], 1)
latent_dist = self.latent_encoder.predict(hidden)
z = pyro.sample('z', latent_dist)
return z
MODEL_REGISTRY[ConditionalVISEM.__name__] = ConditionalVISEM
|
keras_maskrcnn/utils/overlap.py | akashdeepjassal/keras-maskrcnn | 432 | 12788972 | <filename>keras_maskrcnn/utils/overlap.py
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
def compute_overlap(a, b):
"""
Args
a: (N, H, W) ndarray of float
b: (K, H, W) ndarray of float
Returns
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
intersection = np.zeros((a.shape[0], b.shape[0]))
union = np.zeros((a.shape[0], b.shape[0]))
for index, mask in enumerate(a):
intersection[index, :] = np.sum(np.count_nonzero(b & mask, axis=1), axis=1)
union[index, :] = np.sum(np.count_nonzero(b + mask, axis=1), axis=1)
return intersection / union
|
Python/43. MultiplyStrings.py | nizD/LeetCode-Solutions | 263 | 12788976 | """
Leetcode's Medium challege #43 - Multiply Strings (Solution)
<https://leetcode.com/problems/multiply-strings/>
Description:
Given two non-negative integers num1 and num2
represented as strings, return the product of num1 and num2,
also represented as a string.
EXAMPLE:
Input: num1 = "2", num2 = "3"
Output: "6"
Author: <Curiouspaul1>
github: https://github.com/Curiouspaul1
"""
def int_(s):
"""
Converts strings to int, raises exception
for non-int literals
"""
reslt = 0
for i in s:
if ord(i) in range(48,58): # checks that string character is something in [0-9]
reslt = reslt*10 + (ord(i) - ord('0'))
else:
raise ValueError
return reslt
class Solution:
def multiply(self, num1: str, num2: str) -> str:
if len(num1) >= 110 or len(num2) >= 110: # constraints from leetcode
return 0
try:
num1, num2 = int_(num1), int_(num2)
result = num1 * num2
return str(result)
except ValueError:
print("Invalid Entry")
return 0 |
Subsets and Splits