max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/conftest.py | timgates42/guv | 120 | 12732799 | import pytest
from guv.greenio import socket
from guv import listen
@pytest.fixture(scope='session')
def pub_addr():
"""A working public address that is considered always available
"""
return 'gnu.org', 80
@pytest.fixture(scope='session')
def fail_addr():
"""An address that nothing is listening on
"""
return '192.0.0.0', 1000
@pytest.fixture(scope='function')
def gsock():
return socket()
@pytest.fixture(scope='function')
def server_sock():
sock = listen(('', 0))
return sock
|
QUANTAXIS/QAWebServer/schedulehandler.py | B34nK0/QUANTAXIS | 6,322 | 12732805 | <reponame>B34nK0/QUANTAXIS
import datetime
import threading
import pymongo
from apscheduler.jobstores.mongodb import MongoDBJobStore
from apscheduler.schedulers.tornado import TornadoScheduler
from qaenv import mongo_ip, mongo_port
from QUANTAXIS.QAWebServer.basehandles import QABaseHandler
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.web import Application, RequestHandler
"""
增加 mongodb 的数据读取
"""
scheduler = None
job_ids = []
# 初始化
def init_scheduler(database='qascheduler', collection='jobs'):
jobstores = {
'default': MongoDBJobStore(database=database, collection=collection, client=pymongo.MongoClient(host=mongo_ip, port=mongo_port))
}
global scheduler
scheduler = TornadoScheduler(jobstores=jobstores)
scheduler.start()
print('[QAScheduler Init]Scheduler has been started')
return scheduler
# 要执行的定时任务在这里
def task1(options):
print('{} [QASchedule][Task]-{}'.format(
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), options))
# print(threading.enumerate())
class QASchedulerHandler(QABaseHandler):
"""
http://0.0.0.0:8010/scheduler/map?job_id=1&action=add
"""
def get(self):
global job_ids
job_id = self.get_query_argument('job_id', None)
action = self.get_query_argument('action', None)
if job_id:
# add
if 'add' == action:
if job_id not in job_ids:
job_ids.append(job_id)
scheduler.add_job(task1, 'interval',
seconds=3, id=job_id, args=(job_id,))
self.write('[TASK ADDED] - {}'.format(job_id))
else:
self.write('[TASK EXISTS] - {}'.format(job_id))
# remove
elif 'remove' == action:
if job_id in job_ids:
scheduler.remove_job(job_id)
job_ids.remove(job_id)
self.write('[TASK REMOVED] - {}'.format(job_id))
else:
self.write('[TASK NOT FOUND] - {}'.format(job_id))
else:
self.write('[INVALID PARAMS] INVALID job_id or action')
def format_joboutput(job):
return {
'id': job.id,
'name': job.name,
'args': job.args,
'kwards': job.kwargs,
'coalesce': job.coalesce,
'nextruntime': str(job.next_run_time)
}
class QAScheduleQuery(QABaseHandler):
def get(self):
action = self.get_argument('action', 'queryall')
print(action)
if action == 'queryall':
jobs = scheduler.get_jobs()
print([format_joboutput(x) for x in jobs])
self.write({'res': [format_joboutput(x) for x in jobs]})
|
sort/merge.py | x899/algorithms | 472 | 12732848 | """ Implementation of Merge Sort algorithm
"""
def merge(data):
""" MergeSort is a Divide and Conquer algorithm. It divides input array
in two halves, calls itself for the two halves and then merges the
two sorted halves.
:param array: list of elements that needs to be sorted
:type array: list
"""
if len(data) > 1:
mid = len(data) // 2
lefthalf = data[:mid]
righthalf = data[mid:]
merge(lefthalf)
merge(righthalf)
i = j = k = 0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i] < righthalf[j]:
data[k] = lefthalf[i]
i = i + 1
else:
data[k] = righthalf[j]
j = j + 1
k = k + 1
while i < len(lefthalf):
data[k] = lefthalf[i]
i = i + 1
k = k + 1
while j < len(righthalf):
data[k] = righthalf[j]
j = j + 1
k = k + 1
def main():
""" operational function """
arr = [34, 56, 23, 67, 3, 68]
print(f"unsorted array: {arr}")
merge(arr)
print(f" sorted array: {arr}")
if __name__ == "__main__":
main()
|
tests/patterns/test_ParallelPattern.py | josiah-wolf-oberholtzer/supriya | 191 | 12732882 | <gh_stars>100-1000
import pytest
from supriya.patterns import (
CompositeEvent,
EventPattern,
GroupAllocateEvent,
GroupPattern,
NodeFreeEvent,
NoteEvent,
NullEvent,
ParallelPattern,
SequencePattern,
)
from supriya.patterns.testutils import MockUUID as M
from supriya.patterns.testutils import run_pattern_test
@pytest.mark.parametrize(
"stop_at, patterns, expected, is_infinite",
[
(
None,
[
EventPattern(frequency=SequencePattern([440, 550, 660])),
EventPattern(frequency=SequencePattern([777, 888, 999])),
],
[
CompositeEvent(
[
NoteEvent(M("A"), delta=0.0, frequency=440),
NoteEvent(M("B"), delta=0.0, frequency=777),
],
delta=1.0,
),
CompositeEvent(
[
NoteEvent(M("C"), delta=0.0, frequency=550),
NoteEvent(M("D"), delta=0.0, frequency=888),
],
delta=1.0,
),
CompositeEvent(
[
NoteEvent(M("E"), delta=0.0, frequency=660),
NoteEvent(M("F"), delta=0.0, frequency=999),
],
delta=1.0,
),
],
False,
),
(
None,
[
EventPattern(x=SequencePattern([1, 2, 3]), delta=1.0),
EventPattern(y=SequencePattern([1, 2]), delta=1.5),
],
[
CompositeEvent(
[
NoteEvent(M("A"), delta=0.0, x=1),
NoteEvent(M("B"), delta=0.0, y=1),
],
delta=1.0,
),
NoteEvent(M("C"), delta=0.5, x=2),
NoteEvent(M("D"), delta=0.5, y=2),
NoteEvent(M("E"), delta=1.0, x=3),
],
False,
),
(
1,
[
EventPattern(x=SequencePattern([1, 2, 3]), delta=1.0),
EventPattern(y=SequencePattern([1, 2]), delta=1.5),
],
[
CompositeEvent(
[
NoteEvent(M("A"), delta=0.0, x=1),
NoteEvent(M("B"), delta=0.0, y=1),
],
delta=1.0,
)
],
False,
),
(
None,
[
GroupPattern(EventPattern(x=SequencePattern([1, 2, 3]), delta=1.0)),
GroupPattern(EventPattern(y=SequencePattern([1, 2]), delta=1.5)),
],
[
CompositeEvent(
[
CompositeEvent([GroupAllocateEvent(M("A"))]),
NoteEvent(M("B"), delta=0.0, target_node=M("A"), x=1),
CompositeEvent([GroupAllocateEvent(M("C"))]),
NoteEvent(M("D"), delta=0.0, target_node=M("C"), y=1),
],
delta=1.0,
),
NoteEvent(M("E"), delta=0.5, target_node=M("A"), x=2),
NoteEvent(M("F"), delta=0.5, target_node=M("C"), y=2),
NoteEvent(M("G"), delta=1.0, target_node=M("A"), x=3),
CompositeEvent(
[
CompositeEvent([NullEvent(delta=0.25), NodeFreeEvent(M("A"))]),
CompositeEvent([NullEvent(delta=0.25), NodeFreeEvent(M("C"))]),
]
),
],
False,
),
(
1,
[
GroupPattern(EventPattern(x=SequencePattern([1, 2, 3]), delta=1.0)),
GroupPattern(EventPattern(y=SequencePattern([1, 2]), delta=1.5)),
],
[
CompositeEvent(
[
CompositeEvent([GroupAllocateEvent(M("A"))]),
NoteEvent(M("B"), delta=0.0, target_node=M("A"), x=1),
CompositeEvent([GroupAllocateEvent(M("C"))]),
NoteEvent(M("D"), delta=0.0, target_node=M("C"), y=1),
],
delta=1.0,
),
CompositeEvent(
[NullEvent(delta=0.25), NodeFreeEvent(M("A"))], delta=0.5
),
CompositeEvent([NullEvent(delta=0.25), NodeFreeEvent(M("C"))]),
],
False,
),
],
)
def test(stop_at, patterns, expected, is_infinite):
pattern = ParallelPattern(patterns)
run_pattern_test(pattern, expected, is_infinite, stop_at)
|
hail/python/hail/expr/expressions/__init__.py | tdeboer-ilmn/hail | 789 | 12732921 | <filename>hail/python/hail/expr/expressions/__init__.py
from .indices import Indices, Aggregation
from .base_expression import ExpressionException, Expression, impute_type, \
to_expr, cast_expr, unify_all, unify_types_limited, unify_types, \
unify_exprs
from .typed_expressions import ArrayExpression, ArrayStructExpression, \
ArrayNumericExpression, BooleanExpression, CallExpression, \
CollectionExpression, DictExpression, IntervalExpression, LocusExpression, \
NumericExpression, Int32Expression, Int64Expression, Float32Expression, \
Float64Expression, SetExpression, StringExpression, StructExpression, \
TupleExpression, NDArrayExpression, NDArrayNumericExpression, \
SetStructExpression, \
apply_expr, construct_expr, construct_variable, construct_reference
from .expression_typecheck import expr_any, expr_int32, expr_int64, \
expr_float32, expr_float64, expr_call, expr_bool, expr_str, expr_locus, \
expr_interval, expr_array, expr_ndarray, expr_set, expr_dict, expr_tuple, \
expr_struct, expr_oneof, expr_numeric, coercer_from_dtype
from .expression_utils import analyze, eval_timed, eval, eval_typed, \
extract_refs_by_indices, get_refs, matrix_table_source, table_source, \
check_entry_indexed, check_row_indexed
__all__ = ['Indices',
'Aggregation',
'apply_expr',
'construct_expr',
'construct_variable',
'construct_reference',
'impute_type',
'to_expr',
'cast_expr',
'unify_all',
'unify_types_limited',
'unify_types',
'unify_exprs',
'Expression',
'ExpressionException',
'ArrayExpression',
'ArrayStructExpression',
'ArrayNumericExpression',
'BooleanExpression',
'CallExpression',
'CollectionExpression',
'DictExpression',
'IntervalExpression',
'LocusExpression',
'NumericExpression',
'Int32Expression',
'Int64Expression',
'Float32Expression',
'Float64Expression',
'SetExpression',
'SetStructExpression',
'StringExpression',
'StructExpression',
'TupleExpression',
'NDArrayExpression',
'NDArrayNumericExpression',
'analyze',
'check_entry_indexed',
'check_row_indexed',
'get_refs',
'extract_refs_by_indices',
'eval',
'eval_typed',
'eval_timed',
'expr_any',
'expr_int32',
'expr_int64',
'expr_float32',
'expr_float64',
'expr_bool',
'expr_str',
'expr_call',
'expr_locus',
'expr_struct',
'expr_numeric',
'expr_array',
'expr_ndarray',
'expr_set',
'expr_dict',
'expr_struct',
'expr_tuple',
'expr_interval',
'expr_oneof',
'coercer_from_dtype',
'matrix_table_source',
'table_source']
|
abusehelper/bots/tailbot/tailbot.py | AbuseSA/abusehelper | 117 | 12732923 | import os
import time
import errno
import idiokit
from abusehelper.core import events, bot, utils
def read(fd, amount=4096):
try:
data = os.read(fd, amount)
except OSError as ose:
if ose.args[0] != errno.EAGAIN:
raise
data = ""
return data
def try_seek(fd, offset):
try:
if offset is None:
os.lseek(fd, 0, os.SEEK_END)
elif offset >= 0:
os.lseek(fd, offset, os.SEEK_SET)
else:
os.lseek(fd, offset, os.SEEK_END)
except OSError as ose:
if ose.args[0] != errno.ESPIPE:
raise
def follow_file(filename):
while True:
try:
fd = os.open(filename, os.O_RDONLY | os.O_NONBLOCK)
except OSError:
yield None
continue
try:
inode = os.fstat(fd).st_ino
first = True
while True:
try:
stat = os.stat(filename)
except OSError:
stat = None
yield first, time.time(), fd
if stat is None or inode != stat.st_ino:
break
first = False
finally:
os.close(fd)
def tail_file(filename, offset=None):
first = True
buffer = []
for result in follow_file(filename):
if first and result is not None:
_, _, fd = result
try_seek(fd, offset)
first = False
if result is None:
yield None
continue
flush, mtime, fd = result
if flush and buffer:
buffer = []
while True:
data = read(fd)
if not data:
break
lines = data.split("\n")
if len(lines) <= 1:
buffer.extend(lines)
continue
lines[0] = "".join(buffer) + lines[0]
for line in lines[:-1]:
if line.endswith("\r"):
line = line[:-1]
yield mtime, line
if not lines[-1]:
buffer = []
else:
buffer = lines[-1:]
yield None
class TailBot(bot.FeedBot):
path = bot.Param("path to the followed file")
offset = bot.IntParam("file offset", default=None)
@idiokit.stream
def feed(self):
for result in tail_file(self.path, self.offset):
if result is None:
yield idiokit.sleep(2.0)
continue
mtime, line = result
keys = self.parse(line, mtime)
if keys is None:
continue
event = events.Event()
for key, value in keys.items():
event.add(key, value)
yield idiokit.send(event)
def parse(self, line, mtime):
line = line.rstrip()
if not line:
return
line = utils.force_decode(line)
return {"line": line}
if __name__ == "__main__":
TailBot.from_command_line().execute()
|
dlnlputils/__init__.py | Rojanson/stepik-dl-nlp | 120 | 12732947 | <filename>dlnlputils/__init__.py
from . import data
from . import pipeline
from . import visualization
from . import base
|
experiments/training/__init__.py | selflein/manifold-flow | 199 | 12732963 | <reponame>selflein/manifold-flow
from . import losses
from .trainer import ForwardTrainer, ConditionalForwardTrainer, AdversarialTrainer, ConditionalAdversarialTrainer, SCANDALForwardTrainer
from .alternate import AlternatingTrainer
|
src/python/e2e-test-runner/e2e_test_runner/main.py | inickles/grapl | 313 | 12732981 | <reponame>inickles/grapl
import os
import sys
from pathlib import Path
from grapl_common.debugger.vsc_debugger import wait_for_vsc_debugger
from grapl_common.grapl_logger import get_module_grapl_logger
LOGGER = get_module_grapl_logger()
def main() -> None:
wait_for_vsc_debugger("grapl_e2e_tests")
LOGGER.info("executing pytest")
from grapl_tests_common import setup_tests # import here to limit monkeypatch
# Change to the parent directory so pytest can find the tests
os.chdir(Path(__file__).resolve().parent)
result = setup_tests.exec_pytest()
LOGGER.info(f"tests completed with status code {result}")
sys.exit(result)
|
moya/command/sub/init.py | moyaproject/moya | 129 | 12732988 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from ...command import SubCommand
from ...console import Cell
from ...wsgi import WSGIApplication
from ... import namespaces
from ... import db
try:
import readline
except ImportError:
pass
class Init(SubCommand):
"""initialize a site for first use"""
help = """initialize a site for first use"""
def add_arguments(self, parser):
parser.add_argument(
"-l",
"--location",
dest="location",
default=None,
metavar="PATH",
help="location of the Moya server code",
)
parser.add_argument(
"-i",
"--ini",
dest="settings",
default=None,
metavar="SETTINGSPATH",
help="path to project settings",
)
def run(self):
args = self.args
console = self.console
application = WSGIApplication(
self.location,
self.get_settings(),
validate_db=True,
disable_autoreload=True,
master_settings=self.master_settings,
)
archive = application.archive
self.console.div("syncing database")
db.sync_all(archive, self.console, summary=False)
commands = [
command
for command in archive.get_elements_by_type(namespaces.default, "command")
if command._init
]
commands.sort(key=lambda c: c._priority, reverse=True)
fail = None
for command in commands:
if fail:
break
for app_name in archive.apps_by_lib[command.lib.long_name]:
app = archive.apps[app_name]
app_id = command.get_appid(app=app)
# console.div("running 'moya {}'".format(app_id))
console.div(command._synopsis)
# console.text(command._synopsis, italic=True)
result = self.moya_command.project_invoke(
app_id, application=application, root_vars={"init": True}
)
if result != 0:
fail = result
break
console.nl()
if not fail:
msg = """Site is ready for use!\nRun "moya runserver" from the project directory."""
# console.text(msg, fg="green", bold=True)
console.table([[Cell(msg, fg="green", bold=True)]])
else:
msg = """A command failed to complete -- check above for any error messages."""
console.table([[Cell(msg, fg="red", bold=True)]])
|
src/recommendationservice/client.py | gzhao408/stackdriver-sandbox | 229 | 12732990 | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import grpc
import demo_pb2
import demo_pb2_grpc
from opencensus.trace.tracer import Tracer
from opencensus.trace.exporters import stackdriver_exporter
from opencensus.trace.ext.grpc import client_interceptor as oc_client_interceptor
from opentelemetry import trace
from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
from opentelemetry.ext.grpc import client_interceptor
from opentelemetry.ext.grpc.grpcext import intercept_channel
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SimpleExportSpanProcessor
from logger import getJSONLogger
logger = getJSONLogger('recommendationservice-server')
if __name__ == "__main__":
# get port
if len(sys.argv) > 1:
port = sys.argv[1]
else:
port = "8080"
# TODO: remove OpenCensus after conversion to OpenTelemetry
try:
exporter = stackdriver_exporter.StackdriverExporter()
tracer = Tracer(exporter=exporter)
oc_interceptor = oc_client_interceptor.OpenCensusClientInterceptor(tracer, host_port='localhost:'+port)
except:
oc_interceptor = oc_client_interceptor.OpenCensusClientInterceptor()
# OpenTelemetry Tracing
trace.set_tracer_provider(TracerProvider())
cloud_trace_exporter = CloudTraceSpanExporter()
trace.get_tracer_provider().add_span_processor(
SimpleExportSpanProcessor(cloud_trace_exporter)
)
# set up server stub
channel = grpc.insecure_channel('localhost:'+port)
channel = intercept_channel(channel, client_interceptor(trace.get_tracer_provider()))
stub = demo_pb2_grpc.RecommendationServiceStub(channel)
# form request
request = demo_pb2.ListRecommendationsRequest(user_id="test", product_ids=["test"])
# make call to server
response = stub.ListRecommendations(request)
logger.info(response)
|
baselines/ppo2_rudder/policies.py | jingweiz/baselines-rudder | 281 | 12732992 | <reponame>jingweiz/baselines-rudder<filename>baselines/ppo2_rudder/policies.py
# -*- coding: utf-8 -*-
"""policies.py: Adaption of baselines.ppo2.policies.py for RUDDER for atari games
Author -- <NAME>
Contact -- <EMAIL>
"""
import numpy as np
import tensorflow as tf
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch
from baselines.common.distributions import make_pdtype
from baselines.ppo2_rudder.reward_redistribution import RewardRedistributionModel, observation_network
from TeLL.layers import StopGradientLayer
def nature_cnn(unscaled_images):
"""Convolutional parts of CNN from Nature paper
Taken from baselines.ppo2.policies.py
Parameters
-------
unscaled_images : tensorflow tensor
Frame of shape (batchsize, x, y, c)
Returns
-------
tensorflow tensor
Output features of last convolutional layer with flattened x/y/c dimensions
"""
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2)))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2)))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2)))
h3 = conv_to_fc(h3)
return h3
def lstm(xs, ms, s, scope, nh):
"""LSTM layer for policy network, using same weight and bias initialization as LSTM in reward redistribution model
Based on baselines.ppo2.policies.py; These initializations were taken directly from the redistribution model LSTM
and could be optimized;
"""
nbatch, nin = [v.value for v in xs[0].get_shape()]
lstm_w_init = lambda scale: lambda *args, **kwargs: tf.truncated_normal(*args, **kwargs) * scale
truncated_normal_init = lambda mean, stddev: \
lambda *args, **kwargs: tf.truncated_normal(mean=mean, stddev=stddev, *args, **kwargs)
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
wx_ig = tf.get_variable("wx_ig", initializer=lstm_w_init(0.1)([nin, nh]))
wx_og = tf.get_variable("wx_og", initializer=lstm_w_init(0.1)([nin, nh]))
wx_ci = tf.get_variable("wx_ci", initializer=lstm_w_init(0.0001)([nin, nh]))
wx_fg = tf.get_variable("wx_fg", initializer=lstm_w_init(0.1)([nin, nh]))
wh_ig = tf.get_variable("wh_ig", initializer=lstm_w_init(0.001)([nh, nh]))
wh_og = tf.get_variable("wh_og", initializer=lstm_w_init(0.001)([nh, nh]))
wh_ci = tf.get_variable("wh_ci", initializer=lstm_w_init(0.001)([nh, nh]))
wh_fg = tf.get_variable("wh_fg", initializer=lstm_w_init(0.001)([nh, nh]))
b_ig = tf.get_variable("b_ig", initializer=truncated_normal_init(mean=-5, stddev=0.1)([nh]))
b_fg = tf.get_variable("b_fg", initializer=truncated_normal_init(mean=12, stddev=0.1)([nh]))
b_og = tf.get_variable("b_og", initializer=truncated_normal_init(mean=-5, stddev=0.1)([nh]))
b_ci = tf.get_variable("b_ci", initializer=truncated_normal_init(mean=0, stddev=0.1)([nh]))
wx = tf.concat([wx_ig, wx_fg, wx_og, wx_ci], axis=1)
wh = tf.concat([wh_ig, wh_fg, wh_og, wh_ci], axis=1)
b = tf.concat([b_ig, b_fg, b_og, b_ci], axis=0)
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c *= (1 - m)
h *= (1 - m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.identity(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
class LstmPolicy(object):
def __init__(self, tf_session, ob_space, ac_space, nbatch,
reward_redistribution_config, observation_network_config, lstm_network_config, training_config,
exploration_config, nsteps, nlstm=64, reuse=False):
"""LSTM policy network, as described in RUDDER paper
Based on baselines.ppo2.policies.py; LSTM layer sees features from it's own trainable observation network and
the features from the reward redistribution observation network;
Parameters
-------
tf_session : tensorflow session
tensorflow session to compute the graph in
ob_space
Baselines ob_space object (see ppo2_rudder.py); must provide .shape attribute for (x, y, c) shapes;
ac_space
Baselines ac_space object (see ppo2_rudder.py); must provide .n attribute for number of possible actions;
nbatch : int
Batchsize
nsteps : int
Fixed number of timesteps to process at once
reward_redistribution_config : dict
Dictionary containing config for reward redistribution:
-----
lambda_eligibility_trace : float
Eligibility trace value for redistributed reward
vf_contrib : float
Weighting of original value function (vf) vs. redistributed reward (rr), s.t.
:math:`reward = vf \cdot vf\_contrib + rr \cdot (1-vf\_contrib)`
use_reward_redistribution_quality_threshold : float
Quality of reward redistribution has to exceed use_reward_redistribution_quality_threshold to be used;
use_reward_redistribution_quality_threshold range is [0,1]; Quality measure is the squared prediction
error, as described in RUDDER paper;
use_reward_redistribution : bool
Use reward redistribution?
rr_junksize : int
Junksize for reward redistribution; Junks overlap by 1 half each
cont_pred_w : float
Weighting of continous prediciton loss vs. prediction loss of final return at last timestep
intgrd_steps : int
Stepsize for integrated gradients
intgrd_batchsize : int
Integrated gradients is computed batch-wise if intgrd_batchsize > 1
observation_network_config : dict
Dictionary containing config for observation network that processes observations and feeds them to LSTM
network:
-----
show_states : bool
Show frames to network?
show_statedeltas : bool
Show frame deltas to network?
prepoc_states : list of dicts
Network config to preprocess frames
prepoc_deltas : list of dicts
Network config to preprocess frame deltas
prepoc_observations : list of dicts
Network config to preprocess features from frame and frame-delta preprocessing networks
lstm_network_config : dict
Dictionary containing config for LSTM network:
-----
show_actions : bool
Show taken actions to LSTM?
reversed : bool
Process game sequence in reversed order?
layers : list of dicts
Network config for LSTM network and optional additional dense layers
initializations : dict
Initialization config for LSTM network
timestep_encoding : dict
Set "max_value" and "triangle_span" for TeLL.utiltiy.misc_tensorflow.TriangularValueEncoding class
training_config : dict
Dictionary containing config for training and update procedure:
-----
n_no_rr_updates : int
Number of updates to perform without training or using reward redistribution network
n_pretrain_games : int
Number of games to pretrain the reward redistribution network without using it;
downscale_lr_policylag : bool
Downscale learningrate permanently if policy lag gets too large?
optimizer : tf.train optimizer
Optimizer in tf.train, e.g. "AdamOptimizer"
optimizer_params : dict
Kwargs for optimizer
l1 : float
Weighting for l1 weight regularization
l2 : float
Weighting for l2 weight regularization
clip_gradients : float
Threshold for clipping gradients (clipping by norm)
exploration_config : dict
Dictionary containing config for exploration:
-----
sample_actions_from_softmax : bool
True: Apply softmax to policy network output and use it as probabilities to pick an action
False: Use the max. policy network output as action
temporal_safe_exploration : bool
User RUDDER safe exploration
save_pi_threshold : float
Threshold value in range [0,1] for safe actions in RUDDER safe exploration
nlstm : int
Number of LSTM units (=memory cells)
reuse : bool
Reuse tensorflow variables?
"""
#
# Shapes
#
nenv = nbatch // nsteps
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc)
seq_ob_shape = (nenv, -1, nh, nw, 1)
nact = ac_space.n
#
# Placeholders for inputs
#
X = tf.placeholder(tf.uint8, ob_shape) #obs
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states
#
# Prepare input
#
single_frames = tf.cast(tf.reshape(X[..., -1:], shape=seq_ob_shape), dtype=tf.float32)
delta_frames = single_frames - tf.cast(tf.reshape(X[..., -2:-1], shape=seq_ob_shape), dtype=tf.float32)
#
# Get observation features from RR model
#
rr_model = RewardRedistributionModel(reward_redistribution_config=reward_redistribution_config,
observation_network_config=observation_network_config,
lstm_network_config=lstm_network_config, training_config=training_config,
scopename="RR")
self.rr_observation_model = rr_model
rr_observation_layer = rr_model.get_visual_features(single_frame=single_frames, delta_frame=delta_frames,
additional_inputs=[])
#
# Build policy network
#
with tf.variable_scope("model", reuse=reuse):
temperature = tf.get_variable(initializer=tf.constant(1, dtype=tf.float32), trainable=False,
name='temperature')
additional_inputs = [StopGradientLayer(rr_observation_layer)]
observation_layers, observation_features = observation_network(
single_frame=single_frames, delta_frame=delta_frames, additional_inputs=additional_inputs,
observation_network_config=observation_network_config)
self.observation_features_shape = observation_features.get_output_shape()
xs = [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps,
value=tf.reshape(observation_layers[-1].get_output(),
[nenv, nsteps, -1]))]
ms = batch_to_seq(M, nenv, nsteps)
h5, snew = lstm(xs, ms, S, 'lstm1', nh=nlstm)
h5 = seq_to_batch(h5)
h6 = h5
pi = fc(h6, 'pi', nact)
vf = fc(h6, 'v', 1)
self.pdtype = make_pdtype(ac_space)
self.pd = self.pdtype.pdfromflat(pi)
if exploration_config['sample_actions_from_softmax']:
a0 = self.pd.sample_temp(temperature=temperature)
else:
a0 = tf.argmax(pi, axis=-1)
v0 = vf[:, 0]
neglogp0 = self.pd.neglogp(a0)
self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32)
def step(ob, state, mask):
a, v, s, neglogp = tf_session.run([a0, v0, snew, neglogp0], {X:ob, S:state, M:mask})
return a, v, s, neglogp
def value(ob, state, mask):
return tf_session.run(v0, {X:ob, S:state, M:mask})
def action(ob, state, mask, *_args, **_kwargs):
a, s, neglogp = tf_session.run([a0, snew, neglogp0], {X:ob, S:state, M:mask})
return a, s, neglogp
#
# Placeholders for exploration
#
n_envs = pi.shape.as_list()[0]
exploration_timesteps_pl = tf.placeholder(dtype=tf.float32, shape=(n_envs,))
prev_actions_pl = tf.placeholder(dtype=tf.int64, shape=(n_envs,))
gamelengths_pl = tf.placeholder(dtype=tf.float32, shape=(n_envs,))
keep_prev_action_pl = tf.placeholder(dtype=tf.bool, shape=(n_envs,))
prev_action_count_pl = tf.placeholder(dtype=tf.int64, shape=(n_envs,))
exploration_durations_pl = tf.placeholder(dtype=tf.float32, shape=(n_envs,))
#
# Setting up safe exploration
#
explore = tf.logical_and(tf.logical_and(tf.less_equal(exploration_timesteps_pl, gamelengths_pl),
tf.less_equal(gamelengths_pl,
exploration_timesteps_pl + exploration_durations_pl)),
tf.not_equal(exploration_timesteps_pl, tf.constant(-1, dtype=tf.float32)))
safe_pi = pi - tf.reduce_min(pi, axis=-1, keep_dims=True)
safe_pi /= tf.reduce_max(safe_pi, axis=-1, keep_dims=True)
save_pi_thresholds = (1 - (tf.expand_dims(tf.range(n_envs, dtype=tf.float32), axis=1)
/ (n_envs + (n_envs == 1) - 1)) * (1 - exploration_config['save_pi_threshold']))
safe_pi = tf.cast(tf.greater_equal(safe_pi, save_pi_thresholds), dtype=tf.float32)
safe_pi /= tf.reduce_sum(safe_pi)
rand_safe_a = tf.multinomial(safe_pi, 1)[:, 0]
safe_pi_flat = tf.reshape(safe_pi, (-1,))
prev_action_is_safe = tf.gather(safe_pi_flat,
prev_actions_pl + tf.range(safe_pi.shape.as_list()[0], dtype=tf.int64)
* safe_pi.shape.as_list()[1])
prev_action_is_safe = tf.greater(prev_action_is_safe, tf.constant(0, dtype=tf.float32))
a_explore = tf.where(tf.logical_and(tf.logical_and(keep_prev_action_pl,
tf.not_equal(gamelengths_pl, exploration_timesteps_pl)),
prev_action_is_safe),
prev_actions_pl, rand_safe_a)
a_explore = tf.where(explore, a_explore, a0)
# Make sure the actor doesn't repeat an action too often (otherwise screensaver might start)
rand_a = tf.random_uniform(shape=a0.get_shape(), minval=0, maxval=ac_space.n, dtype=a0.dtype)
a_explore = tf.where(tf.greater(prev_action_count_pl, tf.constant(20, dtype=tf.int64)), rand_a, a_explore)
if not exploration_config['temporal_safe_exploration']:
a_explore = a0
neglogp_explore = self.pd.neglogp(a_explore)
def action_exploration(ob, state, mask, *_args, exploration_timesteps, prev_actions, gamelengths,
keep_prev_action, prev_action_count, exploration_durations, **_kwargs):
"""Get actions with exploration for long-term reward"""
a, s, neglogp = tf_session.run([a_explore, snew, neglogp_explore],
{X: ob, S:state, M:mask, exploration_timesteps_pl: exploration_timesteps,
prev_actions_pl: prev_actions,
gamelengths_pl: gamelengths, exploration_durations_pl: exploration_durations,
keep_prev_action_pl: keep_prev_action, prev_action_count_pl: prev_action_count})
return a, s, neglogp
self.X = X
self.M = M
self.S = S
self.pi = pi
self.vf = vf
self.step = step
self.value = value
self.action = action
self.action_exploration = action_exploration
self.seq_ob_shape = seq_ob_shape
self.exploration_config = exploration_config
def get_observation_features(self, frame, delta):
"""Get output features of observation network (to be fed into reward redistribution network)"""
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
_, observation_features = observation_network(
single_frame=frame[..., -1:], delta_frame=delta, additional_inputs=[],
observation_network_config=self.exploration_config['observation_network_config'])
observation_features = observation_features.get_output()
return observation_features
class LstmPolicyDense(object):
def __init__(self, tf_session, ob_space, ac_space, nbatch,
reward_redistribution_config, observation_network_config, lstm_network_config, training_config,
exploration_config, nsteps, nlstm=64, reuse=False):
"""LSTM policy network with additional dense layer after LSTM layer, as described in RUDDER paper
Based on baselines.ppo2.policies.py; LSTM layer sees features from it's own trainable observation network and
the features from the reward redistribution observation network; The additional dense layer after the LSTM
layer contains 128 hidden units;
Parameters
-------
tf_session : tensorflow session
tensorflow session to compute the graph in
ob_space
Baselines ob_space object (see ppo2_rudder.py); must provide .shape attribute for (x, y, c) shapes;
ac_space
Baselines ac_space object (see ppo2_rudder.py); must provide .n attribute for number of possible actions;
nbatch : int
Batchsize
nsteps : int
Fixed number of timesteps to process at once
reward_redistribution_config : dict
Dictionary containing config for reward redistribution:
-----
lambda_eligibility_trace : float
Eligibility trace value for redistributed reward
vf_contrib : float
Weighting of original value function (vf) vs. redistributed reward (rr), s.t.
:math:`reward = vf \cdot vf\_contrib + rr \cdot (1-vf\_contrib)`
use_reward_redistribution_quality_threshold : float
Quality of reward redistribution has to exceed use_reward_redistribution_quality_threshold to be used;
use_reward_redistribution_quality_threshold range is [0,1]; Quality measure is the squared prediction
error, as described in RUDDER paper;
use_reward_redistribution : bool
Use reward redistribution?
rr_junksize : int
Junksize for reward redistribution; Junks overlap by 1 half each
cont_pred_w : float
Weighting of continous prediciton loss vs. prediction loss of final return at last timestep
intgrd_steps : int
Stepsize for integrated gradients
intgrd_batchsize : int
Integrated gradients is computed batch-wise if intgrd_batchsize > 1
observation_network_config : dict
Dictionary containing config for observation network that processes observations and feeds them to LSTM
network:
-----
show_states : bool
Show frames to network?
show_statedeltas : bool
Show frame deltas to network?
prepoc_states : list of dicts
Network config to preprocess frames
prepoc_deltas : list of dicts
Network config to preprocess frame deltas
prepoc_observations : list of dicts
Network config to preprocess features from frame and frame-delta preprocessing networks
lstm_network_config : dict
Dictionary containing config for LSTM network:
-----
show_actions : bool
Show taken actions to LSTM?
reversed : bool
Process game sequence in reversed order?
layers : list of dicts
Network config for LSTM network and optional additional dense layers
initializations : dict
Initialization config for LSTM network
timestep_encoding : dict
Set "max_value" and "triangle_span" for TeLL.utiltiy.misc_tensorflow.TriangularValueEncoding class
training_config : dict
Dictionary containing config for training and update procedure:
-----
n_no_rr_updates : int
Number of updates to perform without training or using reward redistribution network
n_pretrain_games : int
Number of games to pretrain the reward redistribution network without using it;
downscale_lr_policylag : bool
Downscale learningrate permanently if policy lag gets too large?
optimizer : tf.train optimizer
Optimizer in tf.train, e.g. "AdamOptimizer"
optimizer_params : dict
Kwargs for optimizer
l1 : float
Weighting for l1 weight regularization
l2 : float
Weighting for l2 weight regularization
clip_gradients : float
Threshold for clipping gradients (clipping by norm)
exploration_config : dict
Dictionary containing config for exploration:
-----
sample_actions_from_softmax : bool
True: Apply softmax to policy network output and use it as probabilities to pick an action
False: Use the max. policy network output as action
temporal_safe_exploration : bool
User RUDDER safe exploration
save_pi_threshold : float
Threshold value in range [0,1] for safe actions in RUDDER safe exploration
nlstm : int
Number of LSTM units (=memory cells)
reuse : bool
Reuse tensorflow variables?
"""
#
# Shapes
#
nenv = nbatch // nsteps
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc)
seq_ob_shape = (nenv, -1, nh, nw, 1)
nact = ac_space.n
#
# Placeholders
#
X = tf.placeholder(tf.uint8, ob_shape) #obs
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states
#
# Prepare input
#
single_frames = tf.cast(tf.reshape(X[..., -1:], shape=seq_ob_shape), dtype=tf.float32)
delta_frames = single_frames - tf.cast(tf.reshape(X[..., -2:-1], shape=seq_ob_shape), dtype=tf.float32)
#
# Get observation features from RR model
#
rr_model = RewardRedistributionModel(reward_redistribution_config=reward_redistribution_config,
observation_network_config=observation_network_config,
lstm_network_config=lstm_network_config, training_config=training_config,
scopename="RR")
self.rr_observation_model = rr_model
rr_observation_layer = rr_model.get_visual_features(single_frame=single_frames, delta_frame=delta_frames,
additional_inputs=[])
#
# Build policy network
#
with tf.variable_scope("model", reuse=reuse):
temperature = tf.get_variable(initializer=tf.constant(1, dtype=tf.float32), trainable=False,
name='temperature')
additional_inputs = [StopGradientLayer(rr_observation_layer)]
observation_layers, observation_features = observation_network(
single_frame=single_frames, delta_frame=delta_frames, additional_inputs=additional_inputs,
observation_network_config=observation_network_config)
self.observation_features_shape = observation_features.get_output_shape()
xs = [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps,
value=tf.reshape(observation_layers[-1].get_output(),
[nenv, nsteps, -1]))]
ms = batch_to_seq(M, nenv, nsteps)
h5, snew = lstm(xs, ms, S, 'lstm1', nh=nlstm)
h5 = seq_to_batch(h5)
h6 = fc(h5, 'fc1', nh=128, init_scale=np.sqrt(2))
pi = fc(h6, 'pi', nact)
vf = fc(h6, 'v', 1)
self.pdtype = make_pdtype(ac_space)
self.pd = self.pdtype.pdfromflat(pi)
if exploration_config['sample_actions_from_softmax']:
a0 = self.pd.sample_temp(temperature=temperature)
else:
a0 = tf.argmax(pi, axis=-1)
v0 = vf[:, 0]
neglogp0 = self.pd.neglogp(a0)
self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32)
def step(ob, state, mask):
a, v, s, neglogp = tf_session.run([a0, v0, snew, neglogp0], {X:ob, S:state, M:mask})
return a, v, s, neglogp
def value(ob, state, mask):
return tf_session.run(v0, {X:ob, S:state, M:mask})
def action(ob, state, mask, *_args, **_kwargs):
a, s, neglogp = tf_session.run([a0, snew, neglogp0], {X:ob, S:state, M:mask})
return a, s, neglogp
#
# Placeholders for exploration
#
n_envs = pi.shape.as_list()[0]
exploration_timesteps_pl = tf.placeholder(dtype=tf.float32, shape=(n_envs,))
prev_actions_pl = tf.placeholder(dtype=tf.int64, shape=(n_envs,))
gamelengths_pl = tf.placeholder(dtype=tf.float32, shape=(n_envs,))
keep_prev_action_pl = tf.placeholder(dtype=tf.bool, shape=(n_envs,))
prev_action_count_pl = tf.placeholder(dtype=tf.int64, shape=(n_envs,))
exploration_durations_pl = tf.placeholder(dtype=tf.float32, shape=(n_envs,))
#
# Setting up safe exploration
#
explore = tf.logical_and(tf.logical_and(tf.less_equal(exploration_timesteps_pl, gamelengths_pl),
tf.less_equal(gamelengths_pl,
exploration_timesteps_pl + exploration_durations_pl)),
tf.not_equal(exploration_timesteps_pl, tf.constant(-1, dtype=tf.float32)))
safe_pi = pi - tf.reduce_min(pi, axis=-1, keep_dims=True)
safe_pi /= tf.reduce_max(safe_pi, axis=-1, keep_dims=True)
save_pi_thresholds = (1 - (tf.expand_dims(tf.range(n_envs, dtype=tf.float32), axis=1)
/ (n_envs + (n_envs == 1) - 1)) * (1 - exploration_config['save_pi_threshold']))
safe_pi = tf.cast(tf.greater_equal(safe_pi, save_pi_thresholds), dtype=tf.float32)
safe_pi /= tf.reduce_sum(safe_pi)
rand_safe_a = tf.multinomial(safe_pi, 1)[:, 0]
safe_pi_flat = tf.reshape(safe_pi, (-1,))
prev_action_is_safe = tf.gather(safe_pi_flat,
prev_actions_pl + tf.range(safe_pi.shape.as_list()[0], dtype=tf.int64)
* safe_pi.shape.as_list()[1])
prev_action_is_safe = tf.greater(prev_action_is_safe, tf.constant(0, dtype=tf.float32))
a_explore = tf.where(tf.logical_and(tf.logical_and(keep_prev_action_pl,
tf.not_equal(gamelengths_pl, exploration_timesteps_pl)),
prev_action_is_safe),
prev_actions_pl, rand_safe_a)
a_explore = tf.where(explore, a_explore, a0)
# Make sure the actor doesn't repeat an action too often (otherwise screensaver might start)
rand_a = tf.random_uniform(shape=a0.get_shape(), minval=0, maxval=ac_space.n, dtype=a0.dtype)
a_explore = tf.where(tf.greater(prev_action_count_pl, tf.constant(20, dtype=tf.int64)), rand_a, a_explore)
if not exploration_config['temporal_safe_exploration']:
a_explore = a0
neglogp_explore = self.pd.neglogp(a_explore)
def action_exploration(ob, state, mask, *_args, exploration_timesteps, prev_actions, gamelengths,
keep_prev_action, prev_action_count, exploration_durations, **_kwargs):
"""Get actions with exploration for long-term reward"""
a, s, neglogp = tf_session.run([a_explore, snew, neglogp_explore],
{X: ob, S:state, M:mask, exploration_timesteps_pl: exploration_timesteps,
prev_actions_pl: prev_actions,
gamelengths_pl: gamelengths, exploration_durations_pl: exploration_durations,
keep_prev_action_pl: keep_prev_action, prev_action_count_pl: prev_action_count})
return a, s, neglogp
self.X = X
self.M = M
self.S = S
self.pi = pi
self.vf = vf
self.step = step
self.value = value
self.action = action
self.action_exploration = action_exploration
self.seq_ob_shape = seq_ob_shape
self.exploration_config = exploration_config
def get_observation_features(self, frame, delta):
"""Get output features of observation network (to be fed into reward redistribution network)"""
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
_, observation_features = observation_network(
single_frame=frame[..., -1:], delta_frame=delta, additional_inputs=[],
observation_network_config=self.exploration_config['observation_network_config'])
observation_features = observation_features.get_output()
return observation_features
class CnnPolicy(object):
def __init__(self, tf_session, ob_space, ac_space, nbatch,
reward_redistribution_config, observation_network_config, lstm_network_config, training_config,
exploration_config, reuse=False, **kwargs):
"""CNN policy network, as described in RUDDER paper
Based on baselines.ppo2.policies.py; Dense layer sees features from it's own trainable observation network and
the features from the reward redistribution observation network;
Parameters
-------
tf_session : tensorflow session
tensorflow session to compute the graph in
ob_space
Baselines ob_space object (see ppo2_rudder.py); must provide .shape attribute for (x, y, c) shapes;
ac_space
Baselines ac_space object (see ppo2_rudder.py); must provide .n attribute for number of possible actions;
nbatch : int
Batchsize
reward_redistribution_config : dict
Dictionary containing config for reward redistribution:
-----
lambda_eligibility_trace : float
Eligibility trace value for redistributed reward
vf_contrib : float
Weighting of original value function (vf) vs. redistributed reward (rr), s.t.
:math:`reward = vf \cdot vf\_contrib + rr \cdot (1-vf\_contrib)`
use_reward_redistribution_quality_threshold : float
Quality of reward redistribution has to exceed use_reward_redistribution_quality_threshold to be used;
use_reward_redistribution_quality_threshold range is [0,1]; Quality measure is the squared prediction
error, as described in RUDDER paper;
use_reward_redistribution : bool
Use reward redistribution?
rr_junksize : int
Junksize for reward redistribution; Junks overlap by 1 half each
cont_pred_w : float
Weighting of continous prediciton loss vs. prediction loss of final return at last timestep
intgrd_steps : int
Stepsize for integrated gradients
intgrd_batchsize : int
Integrated gradients is computed batch-wise if intgrd_batchsize > 1
observation_network_config : dict
Dictionary containing config for observation network that processes observations and feeds them to LSTM
network:
-----
show_states : bool
Show frames to network?
show_statedeltas : bool
Show frame deltas to network?
prepoc_states : list of dicts
Network config to preprocess frames
prepoc_deltas : list of dicts
Network config to preprocess frame deltas
prepoc_observations : list of dicts
Network config to preprocess features from frame and frame-delta preprocessing networks
lstm_network_config : dict
Dictionary containing config for LSTM network:
-----
show_actions : bool
Show taken actions to LSTM?
reversed : bool
Process game sequence in reversed order?
layers : list of dicts
Network config for LSTM network and optional additional dense layers
initializations : dict
Initialization config for LSTM network
timestep_encoding : dict
Set "max_value" and "triangle_span" for TeLL.utiltiy.misc_tensorflow.TriangularValueEncoding class
training_config : dict
Dictionary containing config for training and update procedure:
-----
n_no_rr_updates : int
Number of updates to perform without training or using reward redistribution network
n_pretrain_games : int
Number of games to pretrain the reward redistribution network without using it;
downscale_lr_policylag : bool
Downscale learningrate permanently if policy lag gets too large?
optimizer : tf.train optimizer
Optimizer in tf.train, e.g. "AdamOptimizer"
optimizer_params : dict
Kwargs for optimizer
l1 : float
Weighting for l1 weight regularization
l2 : float
Weighting for l2 weight regularization
clip_gradients : float
Threshold for clipping gradients (clipping by norm)
exploration_config : dict
Dictionary containing config for exploration:
-----
sample_actions_from_softmax : bool
True: Apply softmax to policy network output and use it as probabilities to pick an action
False: Use the max. policy network output as action
temporal_safe_exploration : bool
User RUDDER safe exploration
save_pi_threshold : float
Threshold value in range [0,1] for safe actions in RUDDER safe exploration
reuse : bool
Reuse tensorflow variables?
"""
#
# Shapes
#
nh, nw, nc = ob_space.shape
activ = tf.nn.relu
ob_shape = (nbatch, nh, nw, nc)
nact = ac_space.n
#
# Placeholders
#
X = tf.placeholder(tf.uint8, ob_shape) #obs
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
#
# Prepare input
#
single_frames = tf.cast(tf.expand_dims(X[..., -1:], axis=1), dtype=tf.float32)
delta_frames = single_frames - tf.cast(tf.expand_dims(X[..., -2:-1], axis=1), dtype=tf.float32)
delta_frames *= tf.reshape(M, shape=(nbatch, 1, 1, 1, 1))
#
# Get observation features from RR model
#
rr_model = RewardRedistributionModel(reward_redistribution_config=reward_redistribution_config,
observation_network_config=observation_network_config,
lstm_network_config=lstm_network_config, training_config=training_config,
scopename="RR")
self.rr_observation_model = rr_model
rr_observation_layer = rr_model.get_visual_features(single_frame=single_frames, delta_frame=delta_frames,
additional_inputs=[])
# Get output tensor
rr_observations = rr_observation_layer.get_output()[:, 0]
#
# Build policy network
#
with tf.variable_scope("model", reuse=reuse):
temperature = tf.get_variable(initializer=tf.constant(1, dtype=tf.float32), trainable=False,
name='temperature')
observation_features = nature_cnn(X)
self.observation_features_shape = tf.expand_dims(observation_features, axis=0).shape
# Concat observation feature from RR model and A2C model
h_for_a2c = tf.concat([observation_features, tf.stop_gradient(rr_observations)], axis=-1)
with tf.variable_scope("model", reuse=reuse):
h_for_a2c = activ(fc(h_for_a2c, 'fc1', nh=512, init_scale=np.sqrt(2)))
with tf.variable_scope("model", reuse=reuse):
pi = fc(h_for_a2c, 'pi', nact, init_scale=0.01)
vf = fc(h_for_a2c, 'v', 1)[:, 0]
self.pdtype = make_pdtype(ac_space)
self.pd = self.pdtype.pdfromflat(pi)
if exploration_config['sample_actions_from_softmax']:
a0 = self.pd.sample_temp(temperature=temperature)
else:
a0 = tf.argmax(pi, axis=-1)
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, state, mask, *_args, **_kwargs):
a, v, neglogp = tf_session.run([a0, vf, neglogp0], {X: ob, M:mask})
return a, v, self.initial_state, neglogp
def value(ob, state, mask, *_args, **_kwargs):
return tf_session.run(vf, {X: ob, M:mask})
def action(ob, state, mask, *_args, **_kwargs):
a, neglogp = tf_session.run([a0, neglogp0], {X:ob, M:mask})
return a, self.initial_state, neglogp
#
# Placeholders for exploration
#
n_envs = pi.shape.as_list()[0]
exploration_timesteps_pl = tf.placeholder(dtype=tf.float32, shape=(n_envs,))
prev_actions_pl = tf.placeholder(dtype=tf.int64, shape=(n_envs,))
gamelengths_pl = tf.placeholder(dtype=tf.float32, shape=(n_envs,))
keep_prev_action_pl = tf.placeholder(dtype=tf.bool, shape=(n_envs,))
prev_action_count_pl = tf.placeholder(dtype=tf.int64, shape=(n_envs,))
exploration_durations_pl = tf.placeholder(dtype=tf.float32, shape=(n_envs,))
#
# Setting up safe exploration
#
explore = tf.logical_and(tf.logical_and(tf.less_equal(exploration_timesteps_pl, gamelengths_pl),
tf.less_equal(gamelengths_pl,
exploration_timesteps_pl + exploration_durations_pl)),
tf.not_equal(exploration_timesteps_pl, tf.constant(-1, dtype=tf.float32)))
safe_pi = pi - tf.reduce_min(pi, axis=-1, keep_dims=True)
safe_pi /= tf.reduce_max(safe_pi, axis=-1, keep_dims=True)
save_pi_thresholds = (1 - (tf.expand_dims(tf.range(n_envs, dtype=tf.float32), axis=1)
/ (n_envs + (n_envs == 1) - 1)) * (1 - exploration_config['save_pi_threshold']))
safe_pi = tf.cast(tf.greater_equal(safe_pi, save_pi_thresholds), dtype=tf.float32)
safe_pi /= tf.reduce_sum(safe_pi)
rand_safe_a = tf.multinomial(safe_pi, 1)[:, 0]
safe_pi_flat = tf.reshape(safe_pi, (-1,))
prev_action_is_safe = tf.gather(safe_pi_flat,
prev_actions_pl + tf.range(safe_pi.shape.as_list()[0], dtype=tf.int64)
* safe_pi.shape.as_list()[1])
prev_action_is_safe = tf.greater(prev_action_is_safe, tf.constant(0, dtype=tf.float32))
a_explore = tf.where(tf.logical_and(tf.logical_and(keep_prev_action_pl,
tf.not_equal(gamelengths_pl, exploration_timesteps_pl)),
prev_action_is_safe),
prev_actions_pl, rand_safe_a)
a_explore = tf.where(explore, a_explore, a0)
# Make sure the actor doesn't repeat an action too often (otherwise screensaver might start)
rand_a = tf.random_uniform(shape=a0.get_shape(), minval=0, maxval=ac_space.n, dtype=a0.dtype)
a_explore = tf.where(tf.greater(prev_action_count_pl, tf.constant(20, dtype=tf.int64)), rand_a, a_explore)
if not exploration_config['temporal_safe_exploration']:
a_explore = a0
neglogp_explore = self.pd.neglogp(a_explore)
def action_exploration(ob, state, mask, *_args, exploration_timesteps, prev_actions, gamelengths,
keep_prev_action, prev_action_count, exploration_durations, **_kwargs):
"""Exploration for long-term reward"""
a, neglogp = tf_session.run([a_explore, neglogp_explore],
{X:ob, M:mask, exploration_timesteps_pl:exploration_timesteps,
prev_actions_pl:prev_actions,
gamelengths_pl:gamelengths, exploration_durations_pl:exploration_durations,
keep_prev_action_pl:keep_prev_action, prev_action_count_pl:prev_action_count})
return a, self.initial_state, neglogp
self.X = X
self.M = M
self.pi = pi
self.vf = vf
self.step = step
self.action = action
self.value = value
self.action_exploration = action_exploration
def get_observation_features(self, frame, delta=None):
"""Get output features of observation network (to be fed into reward redistribution network)"""
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
return tf.expand_dims(nature_cnn(frame[:, 0]), dim=1)
|
recipes/irrxml/all/conanfile.py | rockandsalt/conan-center-index | 562 | 12733009 | <filename>recipes/irrxml/all/conanfile.py
import os
from conans import ConanFile, CMake, tools
class IrrXMLConan(ConanFile):
name = "irrxml"
license = "ZLIB"
homepage = "http://www.ambiera.com/irrxml"
url = "https://github.com/conan-io/conan-center-index"
description = "irrXML is a simple and fast open source xml parser for C++"
topics = ("xml", "xml-parser", "parser", "xml-reader")
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_folder = self.name + "-" + self.version
os.rename(extracted_folder, self._source_subfolder)
def _extract_license(self):
header = tools.load(os.path.join(self.package_folder, "include", "irrXML.h"))
license_contents = header[header.find(r"\section license License")+25:header.find(r"\section history", 1)]
tools.save("LICENSE", license_contents)
def _configure_cmake(self):
cmake = CMake(self)
cmake.configure(build_folder=self._build_subfolder)
return cmake
def build(self):
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self._extract_license()
self.copy(pattern="LICENSE", dst="licenses")
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
if self.settings.os == "Linux":
self.cpp_info.system_libs = ["m"]
|
posthog/migrations/0122_organization_setup_section_2_completed.py | FarazPatankar/posthog | 7,409 | 12733017 | # Generated by Django 3.0.11 on 2021-02-02 15:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0121_person_email_index"),
]
operations = [
migrations.AddField(
model_name="organization", name="setup_section_2_completed", field=models.BooleanField(default=True),
),
]
|
Packs/SOCRadar/Integrations/SOCRadarThreatFusion/SOCRadarThreatFusion_test.py | diCagri/content | 799 | 12733023 | <gh_stars>100-1000
import json
import io
import pytest
from CommonServerPython import DemistoException, FeedIndicatorType, CommandResults
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
SOCRADAR_API_ENDPOINT = 'https://platform.socradar.com/api'
CALCULATE_DBOT_SCORE_INPUTS = [
(900, 3),
(800, 2),
(450, 2),
(300, 1),
(100, 1),
(0, 0),
]
def test_test_module(requests_mock):
"""Tests the test_module validation command.
"""
from SOCRadarThreatFusion import Client, test_module
mock_socradar_api_key = "APIKey"
suffix = f'threat/analysis/check/auth?key={mock_socradar_api_key}'
mock_response = util_load_json('test_data/check_auth_response.json')
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
response = test_module(client)
assert response == 'ok'
def test_test_module_handles_authorization_error(requests_mock):
"""Tests the test_module validation command authorization error.
"""
from SOCRadarThreatFusion import Client, test_module, MESSAGES
mock_socradar_api_key = "WrongAPIKey"
suffix = f'threat/analysis/check/auth?key={mock_socradar_api_key}'
mock_response = util_load_json('test_data/check_auth_response_auth_error.json')
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response, status_code=401)
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(DemistoException, match=MESSAGES['AUTHORIZATION_ERROR']):
test_module(client)
def test_ip_command(requests_mock):
"""Tests the ip_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, ip_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_ip_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'ip': '1.1.1.1'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = ip_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_ip_expected_output.json')
expected_context = util_load_json('test_data/score_ip_expected_context_generic_command.json')
assert isinstance(result, list)
assert result != []
assert '### SOCRadar - Analysis results for IP: 1.1.1.1' in result[0].readable_output
assert result[0].outputs == expected_context
assert result[0].raw_response == expected_output
def test_ip_command_handles_incorrect_entity_type():
"""Tests the ip_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, ip_command
mock_socradar_api_key = "APIKey"
mock_args = {'ip': 'INCORRECT IP ADDRESS'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
ip_command(
client=client,
args=mock_args,
)
def test_domain_command(requests_mock):
"""Tests the domain_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, domain_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_domain_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'domain': 'paloaltonetworks.com'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = domain_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_domain_expected_output.json')
expected_context = util_load_json('test_data/score_domain_expected_context_generic_command.json')
assert isinstance(result, list)
assert result != []
assert '### SOCRadar - Analysis results for domain: paloaltonetworks.com' in result[0].readable_output
assert result[0].outputs == expected_context
assert result[0].raw_response == expected_output
def test_domain_command_handles_incorrect_entity_type():
"""Tests the domain_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, domain_command
mock_socradar_api_key = "APIKey"
mock_args = {'domain': 'INCORRECT DOMAIN'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
domain_command(
client=client,
args=mock_args,
)
def test_file_command(requests_mock):
"""Tests the file_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, file_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_hash_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'file': '3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = file_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_hash_expected_output.json')
expected_context = util_load_json('test_data/score_hash_expected_context_generic_command.json')
assert isinstance(result, list)
assert result != []
assert '### SOCRadar - Analysis results for hash: 3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792' \
in result[0].readable_output
assert result[0].outputs == expected_context
assert result[0].raw_response == expected_output
def test_file_command_handles_incorrect_entity_type():
"""Tests the file_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, file_command
mock_socradar_api_key = "APIKey"
mock_args = {'file': 'INCORRECT HASH'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
file_command(
client=client,
args=mock_args,
)
def test_score_ip(requests_mock):
"""Tests the score_ip_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, score_ip_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_ip_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'ip': '1.1.1.1'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = score_ip_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_ip_expected_output.json')
expected_context = util_load_json('test_data/score_ip_expected_context.json')
assert isinstance(result, CommandResults)
assert '### SOCRadar - Analysis results for IP: 1.1.1.1' in result.readable_output
assert result.outputs == expected_context
assert result.raw_response == expected_output
def test_score_ip_handles_incorrect_entity_type():
"""Tests the score_ip_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, score_ip_command
mock_socradar_api_key = "APIKey"
mock_args = {'ip': 'INCORRECT IP ADDRESS'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
score_ip_command(
client=client,
args=mock_args,
)
def test_score_domain(requests_mock):
"""Tests the score_domain_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, score_domain_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_domain_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'domain': 'paloaltonetworks.com'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = score_domain_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_domain_expected_output.json')
expected_context = util_load_json('test_data/score_domain_expected_context.json')
assert isinstance(result, CommandResults)
assert '### SOCRadar - Analysis results for domain: paloaltonetworks.com' in result.readable_output
assert result.outputs == expected_context
assert result.raw_response == expected_output
def test_score_domain_handles_incorrect_entity_type():
"""Tests the score_domain_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, score_domain_command
mock_socradar_api_key = "APIKey"
mock_args = {'domain': 'INCORRECT DOMAIN'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
score_domain_command(
client=client,
args=mock_args,
)
def test_score_hash(requests_mock):
"""Tests the score_hash_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, score_hash_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_hash_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'hash': '3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = score_hash_command(
client=client,
args=mock_args
)
expected_output = util_load_json('test_data/score_hash_expected_output.json')
expected_context = util_load_json('test_data/score_hash_expected_context.json')
assert isinstance(result, CommandResults)
assert '### SOCRadar - Analysis results for hash: 3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792' \
in result.readable_output
assert result.outputs == expected_context
assert result.raw_response == expected_output
def test_score_hash_handles_incorrect_entity_type():
"""Tests the score_hash_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, score_hash_command
mock_socradar_api_key = "APIKey"
mock_args = {'hash': 'INCORRECT HASH'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
score_hash_command(
client=client,
args=mock_args,
)
@pytest.mark.parametrize('socradar_score, dbot_score', CALCULATE_DBOT_SCORE_INPUTS)
def test_calculate_dbot_score(socradar_score, dbot_score):
from SOCRadarThreatFusion import calculate_dbot_score
assert calculate_dbot_score(socradar_score) == dbot_score
def test_map_indicator_type():
from SOCRadarThreatFusion import map_indicator_type
assert FeedIndicatorType.IP == map_indicator_type('ipv4')
assert FeedIndicatorType.IPv6 == map_indicator_type('ipv6')
assert FeedIndicatorType.Domain == map_indicator_type('hostname')
assert FeedIndicatorType.File == map_indicator_type('hash')
assert None is map_indicator_type('IP')
assert None is map_indicator_type('invalid')
|
dino/web.py | thenetcircle/dino | 150 | 12733036 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from flask import Flask
from flask_socketio import SocketIO
from werkzeug.contrib.fixers import ProxyFix
from dino import environ
from dino.config import ConfigKeys
__author__ = '<NAME> <<EMAIL>>'
logger = logging.getLogger(__name__)
logging.getLogger('amqp').setLevel(logging.INFO)
logging.getLogger('kafka.conn').setLevel(logging.INFO)
logging.getLogger('kafka.client').setLevel(logging.INFO)
logging.getLogger('kafka.metrics').setLevel(logging.INFO)
class ReverseProxied(object):
"""
Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
def create_app():
_app = Flask(
import_name=__name__,
template_folder='admin/templates/',
static_folder='admin/static/')
# used for encrypting cookies for handling sessions
_app.config['SECRET_KEY'] = 'abc492ee-9739-11e6-a174-07f6b92d4a4b'
_app.config['ROOT_URL'] = environ.env.config.get(ConfigKeys.ROOT_URL, domain=ConfigKeys.WEB, default='/')
message_queue_type = environ.env.config.get(ConfigKeys.TYPE, domain=ConfigKeys.QUEUE, default=None)
if message_queue_type is None and not (len(environ.env.config) == 0 or environ.env.config.get(ConfigKeys.TESTING)):
raise RuntimeError('no message queue type specified')
message_queue = 'redis://%s' % environ.env.config.get(ConfigKeys.HOST, domain=ConfigKeys.CACHE_SERVICE, default='')
message_channel = 'dino_%s' % environ.env.config.get(ConfigKeys.ENVIRONMENT, default='test')
logger.info('message_queue: %s' % message_queue)
_socketio = SocketIO(
_app,
logger=logger,
engineio_logger=os.environ.get('DINO_DEBUG', '0') == '1',
async_mode='eventlet',
message_queue=message_queue,
channel=message_channel)
# preferably "emit" should be set during env creation, but the socketio object is not created until after env is
environ.env.out_of_scope_emit = _socketio.emit
_app.wsgi_app = ReverseProxied(ProxyFix(_app.wsgi_app))
return _app, _socketio
app, socketio = create_app()
environ.init_web_auth(environ.env)
# keep this, otherwise flask won't find any routes
import dino.admin.routes
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/FilterIntegerRule.py | htlcnn/ironpython-stubs | 182 | 12733095 | <filename>release/stubs.min/Autodesk/Revit/DB/__init___parts/FilterIntegerRule.py
class FilterIntegerRule(FilterNumericValueRule,IDisposable):
"""
A filter rule that operates on integer values in a Revit project.
FilterIntegerRule(valueProvider: FilterableValueProvider,evaluator: FilterNumericRuleEvaluator,ruleValue: int)
"""
def Dispose(self):
""" Dispose(self: FilterRule,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: FilterRule,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,valueProvider,evaluator,ruleValue):
""" __new__(cls: type,valueProvider: FilterableValueProvider,evaluator: FilterNumericRuleEvaluator,ruleValue: int) """
pass
RuleValue=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The user-supplied value against which values from a Revit document will be tested.
Get: RuleValue(self: FilterIntegerRule) -> int
Set: RuleValue(self: FilterIntegerRule)=value
"""
|
chapter5_operations/prediction_log_pattern/src/app/routers/routers.py | sudabon/ml-system-in-actions | 133 | 12733118 | <reponame>sudabon/ml-system-in-actions<filename>chapter5_operations/prediction_log_pattern/src/app/routers/routers.py
import uuid
from logging import getLogger
from typing import Any, Dict, List
from fastapi import APIRouter, HTTPException
from src.ml.data import Data
from src.ml.outlier_detection import outlier_detector
from src.ml.prediction import classifier
from src.utils.profiler import log_decorator
logger = getLogger(__name__)
router = APIRouter()
@router.get("/health")
def health() -> Dict[str, str]:
return {"health": "ok"}
@router.get("/metadata")
def metadata() -> Dict[str, Any]:
return {
"data_type": "float32",
"data_structure": "(1,4)",
"data_sample": Data().data,
"prediction_type": "float32",
"prediction_structure": "(1,3)",
"prediction_sample": [0.97093159, 0.01558308, 0.01348537],
"outlier_type": "bool, float32",
"outlier_structure": "(1,2)",
"outlier_sample": [False, 0.4],
}
@router.get("/label")
def label() -> Dict[int, str]:
return classifier.label
@log_decorator(endpoint="/predict/test", logger=logger)
def _predict_test(job_id: str) -> Dict[str, Any]:
logger.info(f"execute: [{job_id}]")
prediction = classifier.predict(data=Data().data)
is_outlier, outlier_score = outlier_detector.predict(data=Data().data)
prediction_list = list(prediction)
return {
"job_id": job_id,
"prediction": prediction_list,
"is_outlier": is_outlier,
"outlier_score": outlier_score,
}
@router.get("/predict/test")
def predict_test() -> Dict[str, Any]:
job_id = str(uuid.uuid4())[:6]
return _predict_test(job_id=job_id)
@log_decorator(endpoint="/predict/test/label", logger=logger)
def _predict_test_label(job_id: str) -> Dict[str, Any]:
logger.info(f"execute: [{job_id}]")
prediction = classifier.predict_label(data=Data().data)
is_outlier, outlier_score = outlier_detector.predict(data=Data().data)
return {
"job_id": job_id,
"prediction": prediction,
"is_outlier": is_outlier,
"outlier_score": outlier_score,
}
@router.get("/predict/test/label")
def predict_test_label() -> Dict[str, Any]:
job_id = str(uuid.uuid4())[:6]
return _predict_test_label(job_id=job_id)
@log_decorator(endpoint="/predict", logger=logger)
def _predict(data: Data, job_id: str) -> Dict[str, Any]:
logger.info(f"execute: [{job_id}]")
if len(data.data) != 1 or len(data.data[0]) != 4:
raise HTTPException(status_code=404, detail="Invalid input data")
prediction = classifier.predict(data.data)
is_outlier, outlier_score = outlier_detector.predict(data=data.data)
prediction_list = list(prediction)
return {
"job_id": job_id,
"prediction": prediction_list,
"is_outlier": is_outlier,
"outlier_score": outlier_score,
}
@router.post("/predict")
def predict(data: Data) -> Dict[str, Any]:
job_id = str(uuid.uuid4())[:6]
return _predict(data=data, job_id=job_id)
@log_decorator(endpoint="/predict/label", logger=logger)
def _predict_label(data: Data, job_id: str) -> Dict[str, str]:
logger.info(f"execute: [{job_id}]")
if len(data.data) != 1 or len(data.data[0]) != 4:
raise HTTPException(status_code=404, detail="Invalid input data")
prediction = classifier.predict_label(data.data)
is_outlier, outlier_score = outlier_detector.predict(data=data.data)
return {
"job_id": job_id,
"prediction": prediction,
"is_outlier": is_outlier,
"outlier_score": outlier_score,
}
@router.post("/predict/label")
def predict_label(data: Data) -> Dict[str, Any]:
job_id = str(uuid.uuid4())[:6]
return _predict_label(data=data, job_id=job_id)
|
pygears/sim/modules/cosim_base.py | bogdanvuk/pygears | 120 | 12733137 | <reponame>bogdanvuk/pygears
from dataclasses import dataclass
from pygears.sim.sim_gear import SimGear
from pygears.sim import delta, timestep, log, clk
from pygears.sim.sim import SimPlugin
from pygears.conf import Inject, inject
from pygears import GearDone, reg
from .cosim_port import CosimNoData, InCosimPort, OutCosimPort
class CosimBase(SimGear):
@inject
def __init__(self, gear, timeout=-1, sim_map=Inject('sim/map')):
super().__init__(gear)
self.timeout = timeout
self.in_cosim_ports = [InCosimPort(self, p) for p in gear.in_ports]
self.out_cosim_ports = [OutCosimPort(self, p) for p in gear.out_ports]
self.eval_needed = False
for p in (self.in_cosim_ports + self.out_cosim_ports):
sim_map[p.port] = p
def cycle(self):
raise NotImplementedError()
def forward(self):
raise NotImplementedError()
def back(self):
raise NotImplementedError()
def read_out(self, port):
if not port in self.handlers:
raise ConnectionResetError
if self.eval_needed:
self.forward()
self.eval_needed = True
hout = self.handlers[port]
hout.reset()
return hout.read()
def ack_out(self, port):
if not port in self.handlers:
raise ConnectionResetError
self.eval_needed = True
hout = self.handlers[port]
hout.ack()
self.activity_monitor = 0
def write_in(self, port, data):
if not port in self.handlers:
raise ConnectionResetError
self.eval_needed = True
hin = self.handlers[port]
return hin.send(data)
def reset_in(self, port):
if not port in self.handlers:
raise ConnectionResetError
self.eval_needed = True
hin = self.handlers[port]
hin.reset()
def ready_in(self, port):
if not port in self.handlers:
raise ConnectionResetError
if self.eval_needed:
self.back()
self.eval_needed = False
hin = self.handlers[port]
if hin.ready():
self.activity_monitor = 0
return True
else:
return False
async def func(self, *args, **kwds):
self.activity_monitor = 0
self.eval_needed = False
try:
while True:
phase = None
while phase != 'back':
phase = await delta()
if self.eval_needed:
self.forward()
self.eval_needed = False
if self.activity_monitor == self.timeout:
raise GearDone
self.cycle()
self.activity_monitor += 1
except (GearDone, BrokenPipeError):
# print(f"SimGear canceling: {self.gear.name}")
for p in self.gear.out_ports:
p.producer.finish()
self._finish()
raise GearDone
@dataclass
class AuxClock:
name: str
frequency: int
class CosimPlugin(SimPlugin):
@classmethod
def bind(cls):
reg.confdef('sim/aux_clock', default=[])
|
shared-data/python/opentrons_shared_data/module/__init__.py | faliester/opentrons | 235 | 12733205 | <filename>shared-data/python/opentrons_shared_data/module/__init__.py
""" opentrons_shared_data.module: functions and types for module defs """
import json
from pathlib import Path
from typing import overload, TYPE_CHECKING
from ..load import load_shared_data
if TYPE_CHECKING:
from .dev_types import (
SchemaVersions, ModuleSchema, SchemaV1, SchemaV2,
ModuleDefinitionV1, ModuleDefinitionV2, ModuleModel
)
class ModuleNotFoundError(KeyError):
def __init__(self, version: str, model_or_loadname: str):
super().__init__(model_or_loadname)
self.requested_version = version
self.requested_module = model_or_loadname
def __str__(self) -> str:
return f'No such version {self.requested_version} module '\
f'{self.requested_module}'
def __repr__(self) -> str:
return f'{self.__class__.__name__}: {self.requested_module} '\
f'at version {self.requested_version}'
def load_schema(version: 'SchemaVersions') -> 'ModuleSchema':
path = Path('module') / 'schemas' / f'{version}.json'
return json.loads(load_shared_data(path))
@overload
def load_definition(
version: 'SchemaV1', model_or_loadname: str) -> 'ModuleDefinitionV1':
...
@overload
def load_definition(
version: 'SchemaV2',
model_or_loadname: 'ModuleModel') -> 'ModuleDefinitionV2':
...
def load_definition(version, model_or_definition):
if version == '1':
path = Path('module') / 'definitions' / '1.json'
data = json.loads(load_shared_data(path))
try:
return data[model_or_definition]
except KeyError:
raise ModuleNotFoundError('1', model_or_definition)
else:
path = Path(f'module/definitions/2/{model_or_definition}.json')
try:
data = load_shared_data(path)
except FileNotFoundError:
raise ModuleNotFoundError('2', model_or_definition)
return json.loads(data)
|
pocketsphinx-5prealpha/swig/python/test/continuous_test.py | sowmyavasudeva/SmartBookmark | 139 | 12733220 | <reponame>sowmyavasudeva/SmartBookmark<gh_stars>100-1000
#!/usr/bin/python
from os import environ, path
from pocketsphinx.pocketsphinx import *
from sphinxbase.sphinxbase import *
MODELDIR = "../../../model"
DATADIR = "../../../test/data"
config = Decoder.default_config()
config.set_string('-hmm', path.join(MODELDIR, 'en-us/en-us'))
config.set_string('-lm', path.join(MODELDIR, 'en-us/en-us.lm.bin'))
config.set_string('-dict', path.join(MODELDIR, 'en-us/cmudict-en-us.dict'))
config.set_string('-logfn', '/dev/null')
decoder = Decoder(config)
stream = open(path.join(DATADIR, 'goforward.raw'), 'rb')
#stream = open('10001-90210-01803.wav', 'rb')
in_speech_bf = False
decoder.start_utt()
while True:
buf = stream.read(1024)
if buf:
decoder.process_raw(buf, False, False)
if decoder.get_in_speech() != in_speech_bf:
in_speech_bf = decoder.get_in_speech()
if not in_speech_bf:
decoder.end_utt()
print 'Result:', decoder.hyp().hypstr
decoder.start_utt()
else:
break
decoder.end_utt()
|
scripts/ci/list-test-executables.py | Luthaf/lumol | 147 | 12733223 | #!/usr/bin/env python3
# -*- encoding: utf8 -*-
import json
import subprocess
def list_executables(*command):
command = list(command)
command.append("--no-run")
command.append("--message-format=json")
result = subprocess.run(command, check=True, stdout=subprocess.PIPE)
# convert successive JSON documents to a list of JSON objects
stdout = "[" + result.stdout.decode("utf8").replace("}\n{", "},{") + "]"
executables = []
for event in json.loads(stdout):
if "profile" in event and event["profile"]["test"]:
for path in event["filenames"]:
if path.endswith(".dSYM"):
continue
executables.append(path)
return executables
def main():
executables = []
executables.extend(list_executables("cargo", "test", "--all", "--lib"))
executables.extend(
list_executables("cargo", "test", "--package=lumol-input", "--tests")
)
for executable in set(executables):
if "lumol_tutorial" in executable:
continue
print(executable)
if __name__ == "__main__":
main()
|
examples/reflection/server.py | artificial-aidan/grpclib | 754 | 12733241 | <filename>examples/reflection/server.py<gh_stars>100-1000
import asyncio
from grpclib.utils import graceful_exit
from grpclib.server import Server
from grpclib.reflection.service import ServerReflection
from helloworld.server import Greeter
async def main(*, host: str = '127.0.0.1', port: int = 50051) -> None:
services = ServerReflection.extend([Greeter()])
server = Server(services)
with graceful_exit([server]):
await server.start(host, port)
print(f'Serving on {host}:{port}')
await server.wait_closed()
if __name__ == '__main__':
asyncio.run(main())
|
plaso/analyzers/__init__.py | nflexfo/plaso | 1,253 | 12733275 | # -*- coding: utf-8 -*-
"""This file imports Python modules that register analyzers."""
from plaso.analyzers import hashing_analyzer
from plaso.analyzers import yara_analyzer
|
tests/unit/test_app.py | paulmassen/kibitzr | 478 | 12733297 | from ..compat import mock
def test_loop_aborts_without_checks(app, settings):
assert app.run() == 1
def test_main_executes_all_checks_before_loop(app, settings):
with mock.patch.object(app, "check_forever", side_effect=app.on_interrupt) as the_loop:
settings.checks.append({
'name': 'A',
'script': {'python': 'ok, content = True, "ok"'}
})
assert app.run() == 1
assert the_loop.call_count == 1
assert the_loop.call_args[0][0][0].check.call_count == 1
def test_main_filters_names(app, settings):
with mock.patch.object(app, "check_forever", side_effect=app.on_interrupt) as the_loop:
settings.checks.extend([
{'name': 'A', 'url': 'A'},
{'name': 'B', 'url': 'B'},
])
assert app.run(names=['B']) == 1
assert the_loop.call_count == 1
assert the_loop.call_args[0][0][0].check.call_count == 1
|
aw_nas/btcs/layer2/controller.py | Harald-R/aw_nas | 195 | 12733364 | """
2-layer controller.
"""
from aw_nas import utils, assert_rollout_type
from aw_nas.utils import DistributedDataParallel
from aw_nas.controller.base import BaseController
from aw_nas.btcs.layer2.search_space import (
Layer2Rollout,
Layer2DiffRollout,
DenseMicroRollout,
DenseMicroDiffRollout,
StagewiseMacroRollout,
StagewiseMacroDiffRollout,
SinkConnectMacroDiffRollout,
)
from collections import OrderedDict
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
try:
# from torch.nn.SyncBatchNorm import convert_sync_batch_norm as convert_sync_bn
from torch.nn import SyncBatchNorm
convert_sync_bn = SyncBatchNorm.convert_sync_batchnorm
except ImportError:
convert_sync_bn = lambda m: m
class Layer2Optimizer(optim.Optimizer):
def __init__(self, params, **opt_cfg):
super(Layer2Optimizer, self).__init__([torch.tensor([])], defaults={})
macro_opt_type = opt_cfg["macro"].pop("type")
micro_opt_type = opt_cfg["micro"].pop("type")
# currently width alphas & macro-alpha share the same optimizer
self.macro_optimizer = getattr(optim, macro_opt_type)(
nn.ParameterList(params[0:2]), **opt_cfg["macro"]
) # after adding width-alphas, as 2nd
self.micro_optimizer = getattr(optim, micro_opt_type)(
nn.ParameterList(params[2:]), **opt_cfg["micro"]
)
def step(self):
self.macro_optimizer.step()
self.micro_optimizer.step()
torch.optim.layer2 = Layer2Optimizer # add patch the torch optim
class Layer2DiffController(BaseController, nn.Module):
NAME = "layer2-differentiable"
def __init__(
self,
search_space,
rollout_type,
mode="eval",
device="cuda",
macro_controller_type="random_sample",
macro_controller_cfg={},
micro_controller_type="random_sample",
micro_controller_cfg={},
inspect_hessian_every=-1,
save_alphas_every=-1,
multiprocess=False,
schedule_cfg=None,
):
super(Layer2DiffController, self).__init__(
search_space, rollout_type, schedule_cfg=schedule_cfg
)
nn.Module.__init__(self)
self.search_space = search_space
self.rollout_type = rollout_type
self.device = device
self.to(self.device)
self.inspect_hessian_every = inspect_hessian_every
self.inspect_hessian = False
self.save_alphas_every = save_alphas_every
self.save_alphas = False
self.saved_dict = {
"macro": [],
"micro": [],
"width": [],
}
self.multiprocess = multiprocess
# the macro/micro controllers
if macro_controller_type == "macro-stagewise-diff":
self.macro_controller = MacroStagewiseDiffController(
self.search_space.macro_search_space,
macro_controller_type,
device=self.device,
multiprocess=self.multiprocess,
**macro_controller_cfg,
)
elif macro_controller_type == "macro-sink-connect-diff":
self.macro_controller = MacroSinkConnectDiffController(
self.search_space.macro_search_space,
macro_controller_type,
device=self.device,
multiprocess=self.multiprocess,
**macro_controller_cfg,
)
else:
raise NotImplementedError()
if micro_controller_type == "micro-dense-diff":
self.micro_controller = MicroDenseDiffController(
self.search_space.micro_search_space,
micro_controller_type,
device=self.device,
multiprocess=self.multiprocess,
**micro_controller_cfg,
)
else:
raise NotImplementedError()
object.__setattr__(self, "parallel_model", self)
self._parallelize()
def _parallelize(self):
if self.multiprocess:
net = convert_sync_bn(self).to(self.device)
object.__setattr__(
self,
"parallel_model",
DistributedDataParallel(
self, (self.device,), find_unused_parameters=True
),
)
def on_epoch_start(self, epoch):
super(Layer2DiffController, self).on_epoch_start(epoch)
if self.inspect_hessian_every >= 0 and epoch % self.inspect_hessian_every == 0:
self.inspect_hessian = True
if self.save_alphas_every >= 0 and epoch % self.save_alphas_every == 0:
self.save_alphas = True
# save alphas every epoch
if self.save_alphas:
self.saved_dict["macro"].append(
[alpha.data.cpu() for alpha in self.macro_controller.cg_alphas]
)
self.saved_dict["micro"].append(
[alpha.data.cpu() for alpha in self.micro_controller.cg_alphas]
)
self.saved_dict["width"].append(
[
width_alpha.cpu()
for width_alpha in self.macro_controller.width_alphas
]
)
self.macro_controller.on_epoch_start(epoch)
self.micro_controller.on_epoch_start(epoch)
def set_device(self, device):
self.device = device
self.to(device)
def set_mode(self, mode):
super(Layer2DiffController, self).set_mode(mode)
if mode == "train":
nn.Module.train(self)
elif mode == "eval":
nn.Module.eval(self)
else:
raise Exception("Unrecognized mode: {}".format(mode))
def parameters(self, recurse=False):
# FIXME: normal nn.module.parameters() use recurse=True to acquire all params
param_list = nn.ParameterList([])
param_list.extend(self.macro_controller.parameters())
param_list.extend(self.micro_controller.parameters())
return param_list
def _entropy_loss(self):
return (
self.macro_controller._entropy_loss()
+ self.micro_controller._entropy_loss()
)
def sample(self, n=1, batch_size=1):
if self.multiprocess:
return self.parallel_model.forward(n=n, batch_size=batch_size)
else:
return self.forward(n=n, batch_size=batch_size)
def forward(self, n=1, batch_size=1):
rollouts = []
macro_rollouts = self.macro_controller.forward(n=n, batch_size=batch_size)
micro_rollouts = self.micro_controller.forward(n=n, batch_size=batch_size)
for i in range(n):
rollouts.append(
Layer2DiffRollout(
macro_rollouts[i], micro_rollouts[i], self.search_space
)
)
return rollouts
def gradient(self, loss, return_grads=True, zero_grads=True):
if zero_grads:
self.zero_grad()
if self.inspect_hessian:
for name, param in self.named_parameters():
max_eig = utils.torch_utils.max_eig_of_hessian(loss, param)
self.logger.info("Max eigenvalue of Hessian of %s: %f", name, max_eig)
_loss = loss + self._entropy_loss()
_loss.backward()
if return_grads:
return utils.get_numpy(_loss), [
(k, v.grad.clone()) for k, v in self.named_parameters()
]
return utils.get_numpy(_loss)
def step_current_gradient(self, optimizer):
self.macro_controller.step_current_gradient(optimizer.macro_optimizer)
self.micro_controller.step_current_gradient(optimizer.micro_optimizer)
def step_gradient(self, gradients, optimizer):
self.macro_controller.step_gradient(gradients[0], optimizer.macro_optimizer)
self.micro_controller.step_gradient(gradients[1], optimizer.micro_optimizer)
def step(self, rollouts, optimizer, perf_name):
macro_rollouts = [r.macro for r in rollouts]
micro_rollouts = [r.micro for r in rollouts]
macro_loss = self.macro_controller.step(
macro_rollouts, optimizer.macro_optimizer, perf_name
)
micro_loss = self.micro_controller.step(
micro_rollouts, optimizer.micro_optimizer, perf_name
)
return macro_loss, micro_loss
def summary(self, rollouts, log=False, log_prefix="", step=None):
macro_rollouts = [r.macro for r in rollouts]
micro_rollouts = [r.micro for r in rollouts]
self.macro_controller.summary(
macro_rollouts, log=log, log_prefix=log_prefix, step=None
)
self.micro_controller.summary(
micro_rollouts, log=log, log_prefix=log_prefix, step=None
)
def save(self, path):
"""Save the parameters to disk."""
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
self.logger.info("Saved controller network to %s", path)
"""save alphas"""
if self.save_alphas_every is not None:
# os.path.dirname means the parent path of the `PATH`
torch.save(
self.saved_dict,
os.path.join(os.path.dirname(os.path.dirname(path)), "alphas.pth"),
)
def load(self, path):
"""Load the parameters from disk."""
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
self.logger.info("Loaded controller network from %s", path)
# since the layer2controller.parameters() is a list of [macro_parameters(), micro_parameters()], we need to override the zero_grad() since it used model.parameters()
def zero_grad(self):
for param in self.parameters():
for p in param:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
@classmethod
def supported_rollout_types(cls):
return ["layer2", "layer2-differentiable"]
class GetArchMacro(torch.autograd.Function):
@staticmethod
def forward(
ctx,
search_space,
op_weights,
device,
i_stage,
):
stage_conn = torch.zeros(
(
search_space.stage_node_nums[i_stage],
search_space.stage_node_nums[i_stage],
)
).to(device)
stage_conn[search_space.idxes[i_stage]] = op_weights
ctx.save_for_backward(
torch.as_tensor(op_weights), torch.as_tensor(search_space.idxes[i_stage])
)
return stage_conn
@staticmethod
def backward(ctx, grad_output):
op_weights, idxes = ctx.saved_tensors
op_weights_grad = grad_output[idxes[0], idxes[1]]
return None, op_weights_grad, None, None, None
class MacroStagewiseDiffController(BaseController, nn.Module):
NAME = "macro-stagewise-diff"
SCHEDULABLE_ATTRS = [
"gumbel_temperature",
"entropy_coeff",
"force_uniform",
"width_gumbel_temperature",
"width_entropy_coeff",
]
def __init__(
self,
search_space,
rollout_type,
mode="eval",
device="cuda",
use_prob=False,
gumbel_hard=False,
gumbel_temperature=1.0,
use_sigmoid=False,
use_edge_normalization=False,
entropy_coeff=0.01,
max_grad_norm=None,
force_uniform=False,
full_init=False, # use all-one initialization and big flops reg
progressive_pruning_th=None,
multiprocess=False,
per_stage_width=True, # default use per stage width
width_entropy_coeff=0.01,
width_gumbel_temperature=1.0,
schedule_cfg=None,
):
super(MacroStagewiseDiffController, self).__init__(
search_space, rollout_type, schedule_cfg=schedule_cfg
)
nn.Module.__init__(self)
self.device = device
# sampling
self.use_prob = use_prob
self.gumbel_hard = gumbel_hard
self.gumbel_temperature = gumbel_temperature
self.use_sigmoid = use_sigmoid
# use_prob / use_sigmoid should not the True at the same time
# if both false use plain gumbel softmax
assert not (use_prob and use_sigmoid)
# edge normalization
self.use_edge_normalization = use_edge_normalization
# training
self.entropy_coeff = entropy_coeff
self.max_grad_norm = max_grad_norm
self.force_uniform = force_uniform
self.progressive_pruning_th = progressive_pruning_th
self.width_choice = self.search_space.width_choice
self.multiprocess = multiprocess
self.per_stage_width = per_stage_width
self.width_gumbel_temperature = width_gumbel_temperature
self.width_entropy_coeff = width_entropy_coeff
# generate parameters
self.full_init = full_init
if not self.full_init:
init_value = 1.0e-3
else:
init_value = 1.0
self.cg_alphas = nn.ParameterList(
[
nn.Parameter(
init_value * torch.randn(sum(self.search_space.num_possible_edges))
)
]
)
# width choices [#cells , #width_choice]
if self.width_choice is not None:
if not self.per_stage_width:
self.width_alphas = nn.ParameterList(
[
nn.Parameter(
init_value
* torch.randn(
len(self.search_space.cell_layout),
len(self.width_choice),
)
)
]
)
else:
self.width_alphas = nn.ParameterList(
[
nn.Parameter(
init_value
* torch.randn(
len(self.search_space.stage_node_nums),
len(self.width_choice),
)
)
]
)
self.stage_num_alphas = (
self.search_space.num_possible_edges
) # used for competible with sink-connecting ss
if self.use_edge_normalization:
raise NotImplementedError("MacroDiffController does not support edge-norm")
else:
self.cg_betas = None
self.get_arch = GetArchMacro()
self.to(self.device)
def set_mode(self, mode):
super(MacroStagewiseDiffController, self).set_mode(mode)
if mode == "train":
nn.Module.train(self)
elif mode == "eval":
nn.Module.eval(self)
else:
raise Exception("Unrecognized mode: {}".format(mode))
def set_device(self, device):
self.device = device
self.to(device)
def progressive_pruning(self):
for alpha in self.cg_alphas:
# inpalce replace alphas that smaller than the pruning threshold, no grad
alpha.data = alpha * (alpha.gt(self.progressive_pruning_th).float())
def forward(self, n=1, batch_size=1):
return self.sample(n=n, batch_size=batch_size)
def sample(self, n=1, batch_size=1):
if self.progressive_pruning_th is not None:
self.progressive_pruning()
width_arch, width_logits = self.sample_width(n=n, batch_size=batch_size)
rollouts = []
for i_sample in range(n):
# op_weights.shape: [num_edges, [batch_size,] num_ops]
# edge_norms.shape: [num_edges] do not have batch_size.
op_weights_list = []
edge_norms_list = []
sampled_list = []
logits_list = []
for alphas in self.cg_alphas:
if (
self.progressive_pruning_th is not None
and self.progressive_pruning_th > 0
):
alphas = alphas.clamp(self.progressive_pruning_th, 1.0e4)
else:
pass
if self.force_uniform: # cg_alpha parameters will not be in the graph
# NOTE: `force_uniform` config does not affects edge_norms (betas),
# if one wants a force_uniform search, keep `use_edge_normalization=False`
alphas = torch.zeros_like(alphas)
if batch_size > 1:
expanded_alpha = (
alphas.reshape([alphas.shape[0], 1, alphas.shape[1]])
.repeat([1, batch_size, 1])
.reshape([-1, alphas.shape[-1]])
)
else:
expanded_alpha = alphas
if self.use_prob:
sampled = F.softmax(
expanded_alpha / self.gumbel_temperature, dim=-1
)
elif self.use_sigmoid:
sampled = utils.relaxed_bernoulli_sample(
expanded_alpha, self.gumbel_temperature
)
else:
# gumbel sampling
sampled, _ = utils.gumbel_softmax(
expanded_alpha, self.gumbel_temperature, hard=False
)
if self.gumbel_hard:
op_weights = utils.straight_through(sampled)
else:
op_weights = sampled
if batch_size > 1:
sampled = sampled.reshape([-1, batch_size, op_weights.shape[-1]])
op_weights = op_weights.reshape(
[-1, batch_size, op_weights.shape[-1]]
)
op_weights_list.append(op_weights)
sampled_list.append(utils.get_numpy(sampled))
# logits_list.append(utils.get_numpy(alphas))
logits_list.append(alphas)
stage_conns = []
split_op_weights = torch.split(op_weights, self.stage_num_alphas)
for i_stage in range(self.search_space.stage_num):
stage_conn = self.get_arch.apply(
self.search_space,
split_op_weights[i_stage],
self.device,
i_stage,
)
stage_conns.append(stage_conn)
rollouts.append(
StagewiseMacroDiffRollout(
arch=stage_conns,
sampled=sampled_list,
logits=logits_list,
width_arch=width_arch[i_sample],
width_logits=width_logits[i_sample],
search_space=self.search_space,
)
)
return rollouts
def sample_width(self, n=1, batch_size=1):
assert batch_size == 1, "sample_width should not have batch size > 1"
width_sampled_list = []
width_logits_list = []
width_op_weights_list = []
for _ in range(n):
# sample the width alphas
for width_alphas in self.width_alphas:
if self.force_uniform: # cg_alpha parameters will not be in the graph
# NOTE: `force_uniform` config does not affects edge_norms (betas),
# if one wants a force_uniform search, keep `use_edge_normalization=False`
width_alphas = torch.zeros_like(width_alphas)
if batch_size > 1:
expanded_width_alpha = (
width_alphas.reshape(
[width_alphas.shape[0], 1, width_alphas.shape[1]]
)
.repeat([1, batch_size, 1])
.reshape([-1, width_alphas.shape[-1]])
)
else:
expanded_width_alpha = width_alphas
if self.use_prob:
width_sampled = F.softmax(
expanded_width_alpha / self.width_gumbel_temperature, dim=-1
)
elif self.use_sigmoid:
width_sampled = utils.relaxed_bernoulli_sample(
expanded_width_alpha, self.width_gumbel_temperature
)
else:
# gumbel sampling
width_sampled, _ = utils.gumbel_softmax(
expanded_width_alpha, self.width_gumbel_temperature, hard=False
)
if self.gumbel_hard:
width_op_weights = utils.straight_through(width_sampled)
else:
width_op_weights = width_sampled
if batch_size > 1:
width_sampled = width_sampled.reshape(
[-1, batch_size, width_op_weights.shape[-1]]
)
width_op_weights = width_op_weights.reshape(
[-1, batch_size, width_op_weights.shape[-1]]
)
if not self.per_stage_width:
width_op_weights_full = width_op_weights
width_sampled_full = width_sampled
width_alphas_full = width_alphas
else:
# the last stage has one more node
node_list = self.search_space.stage_node_nums.copy()
# let the 1st stage num_node -1
# to let all reduction cell uses the width-alphas of next stage
node_list[0] = node_list[0] - 1
width_op_weights_full = torch.cat(
[
width_op_weights[idx_stage].repeat(num_nodes - 1, 1)
for idx_stage, num_nodes in enumerate(node_list)
]
)
width_sampled_full = torch.cat(
[
width_sampled[idx_stage].repeat(num_nodes - 1, 1)
for idx_stage, num_nodes in enumerate(node_list)
]
)
width_alphas_full = torch.cat(
[
width_alphas[idx_stage].repeat(num_nodes - 1, 1)
for idx_stage, num_nodes in enumerate(node_list)
]
)
width_op_weights_list.append(width_op_weights_full)
width_sampled_list.append(utils.get_numpy(width_sampled_full))
# logits_list.append(utils.get_numpy(alphas))
width_logits_list.append(width_alphas_full)
return width_op_weights_list, width_logits_list
def save(self, path):
"""Save the parameters to disk."""
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
self.logger.info("Saved controller network to %s", path)
def load(self, path):
"""Load the parameters from disk."""
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
self.logger.info("Loaded controller network from %s", path)
def _entropy_loss(self):
ent_loss = 0.0
if self.entropy_coeff > 0:
alphas = self.cg_alphas[0].split(
[i - 1 for i in self.search_space.stage_node_nums]
)
probs = [F.softmax(alpha, dim=-1) for alpha in self.cg_alphas]
ent_loss = (
self.entropy_coeff
* sum(-(torch.log(prob) * prob).sum() for prob in probs)
+ ent_loss
)
if self.width_entropy_coeff > 0:
width_alphas = self.width_alphas
probs = [F.softmax(alpha, dim=-1) for alpha in self.width_alphas]
ent_loss = (
self.width_entropy_coeff
* sum(-(torch.log(prob) * prob).sum() for prob in probs)
+ ent_loss
)
return ent_loss
def gradient(self, loss, return_grads=True, zero_grads=True):
raise NotImplementedError(
"the grad function is implemented in the layer2diffcontroller.gradient()"
)
def step_current_gradient(self, optimizer):
if self.max_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
optimizer.step()
def step_gradient(self, gradients, optimizer):
self.zero_grad()
named_params = dict(self.named_parameters())
for k, grad in gradients:
named_params[k].grad = grad
# clip the gradients
if self.max_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
# apply the gradients
optimizer.step()
def step(self, rollouts, optimizer, perf_name): # very memory inefficient
self.zero_grad()
losses = [r.get_perf(perf_name) for r in rollouts]
optimizer.step()
[l.backward() for l in losses]
return np.mean([l.detach().cpu().numpy() for l in losses])
def __getstate__(self):
state = super(MacroStagewiseDiffController, self).__getstate__().copy()
del state["get_arch"]
return state
def __setstate__(self, state):
super(MacroStagewiseDiffController, self).__setstate__(state)
self.get_arch = GetArchMacro()
def summary(self, rollouts, log=False, log_prefix="", step=None):
num = len(rollouts)
logits_list = [
[utils.get_numpy(logits) for logits in r.logits] for r in rollouts
]
_ss = self.search_space
if self.gumbel_hard:
cg_logprobs = [0.0 for _ in range(_ss.num_cell_groups)]
cg_entros = [0.0 for _ in range(_ss.num_cell_groups)]
for rollout, logits in zip(rollouts, logits_list):
for cg_idx, (vec, cg_logits) in enumerate(zip(rollout.arch, logits)):
prob = utils.softmax(cg_logits)
logprob = np.log(prob)
if self.gumbel_hard:
inds = np.argmax(utils.get_numpy(vec.op_weights), axis=-1)
cg_logprobs[cg_idx] += np.sum(logprob[range(len(inds)), inds])
cg_entros[cg_idx] += -(prob * logprob).sum()
# mean across rollouts
if self.gumbel_hard:
cg_logprobs = [s / num for s in cg_logprobs]
total_logprob = sum(cg_logprobs)
cg_logprobs_str = ",".join(["{:.2f}".format(n) for n in cg_logprobs])
cg_entros = [s / num for s in cg_entros]
total_entro = sum(cg_entros)
cg_entro_str = ",".join(["{:.2f}".format(n) for n in cg_entros])
if log:
# maybe log the summary
self.logger.info(
"%s%d rollouts: %s ENTROPY: %2f (%s)",
log_prefix,
num,
"-LOG_PROB: %.2f (%s) ;" % (-total_logprob, cg_logprobs_str)
if self.gumbel_hard
else "",
total_entro,
cg_entro_str,
)
if step is not None and not self.writer.is_none():
if self.gumbel_hard:
self.writer.add_scalar("log_prob", total_logprob, step)
self.writer.add_scalar("entropy", total_entro, step)
stats = [
(n + " ENTRO", entro) for n, entro in zip(_ss.cell_group_names, cg_entros)
]
if self.gumbel_hard:
stats += [
(n + " LOGPROB", logprob)
for n, logprob in zip(_ss.cell_group_names, cg_logprobs)
]
return OrderedDict(stats)
@classmethod
def supported_rollout_types(cls):
return ["macro-stagewise", "macro-stagewise-diff", "macro-sink-connect-diff"]
class GetArchMacroSinkConnect(torch.autograd.Function):
@staticmethod
def forward(
ctx,
search_space,
op_weights,
device,
i_stage,
):
stage_conn = torch.zeros(
(
search_space.stage_node_nums[i_stage],
search_space.stage_node_nums[i_stage],
)
).to(device)
stage_conn[np.arange(len(op_weights)) + 1, np.arange(len(op_weights))] = 1
stage_conn[-1, : len(op_weights)] = op_weights
ctx.save_for_backward(
torch.as_tensor(op_weights), torch.as_tensor(search_space.idxes[i_stage])
)
return stage_conn
@staticmethod
def backward(ctx, grad_output):
op_weights, idxes = ctx.saved_tensors
op_weights_grad = grad_output[-1, : len(op_weights)]
return None, op_weights_grad, None, None, None
class MacroSinkConnectDiffController(MacroStagewiseDiffController):
NAME = "macro-sink-connect-diff"
# The TF_NAS-like macro search space(sink-based connecting)
# during each stage, before the reduction node, a `sinking point` aggregate the output of each node's output with softmax
# noted that cg-alpha here should denote whether connected or not
def __init__(self, *args, **kwargs):
super(MacroSinkConnectDiffController, self).__init__(*args, **kwargs)
if not self.full_init:
self.cg_alphas = nn.ParameterList(
[
nn.Parameter(
1e-3
* torch.randn(
sum([n - 1 for n in self.search_space.stage_node_nums])
)
)
]
)
else:
self.cg_alphas = nn.ParameterList(
[
nn.Parameter(
torch.ones(
sum([n - 1 for n in self.search_space.stage_node_nums])
)
)
]
)
assert (
self.use_sigmoid == False
) # sink-connecting should introduce competition in edges
self.get_arch = GetArchMacroSinkConnect()
self.stage_num_alphas = [n - 1 for n in self.search_space.stage_node_nums]
self.to(self.device) # move the newly generated cg_alphas to cuda
# The only difference with MacroStageWiseDiffController's sample is that the arch is packed into `sink-connect-diff-rollout`
def sample(self, n=1, batch_size=1):
# if use progressive pruning
if self.progressive_pruning_th is not None:
self.progressive_pruning()
width_arch, width_logits = self.sample_width(n=n, batch_size=batch_size)
rollouts = []
for i_sample in range(n):
# op_weights.shape: [num_edges, [batch_size,] num_ops]
# edge_norms.shape: [num_edges] do not have batch_size.
op_weights_list = []
edge_norms_list = []
sampled_list = []
logits_list = []
for alphas in self.cg_alphas:
splits = [i - 1 for i in self.search_space.stage_node_nums]
op_weights = []
sampleds = []
for alpha in alphas.split(splits):
if (
self.force_uniform
): # cg_alpha parameters will not be in the graph
# NOTE: `force_uniform` config does not affects edge_norms (betas),
# if one wants a force_uniform search, keep `use_edge_normalization=False`
alpha = torch.zeros_like(alpha)
if batch_size > 1:
expanded_alpha = (
alpha.reshape([alpha.shape[0], 1, alpha.shape[1]])
.repeat([1, batch_size, 1])
.reshape([-1, alpha.shape[-1]])
)
else:
expanded_alpha = alpha
if self.use_prob:
sampled = F.softmax(
expanded_alpha / self.gumbel_temperature, dim=-1
)
elif self.use_sigmoid:
sampled = utils.relaxed_bernoulli_sample(
expanded_alpha, self.gumbel_temperature
)
else:
# gumbel sampling
sampled, _ = utils.gumbel_softmax(
expanded_alpha, self.gumbel_temperature, hard=False
)
if self.gumbel_hard:
op_weight = utils.straight_through(sampled)
else:
op_weight = sampled
if batch_size > 1:
sampled = sampled.reshape([-1, batch_size, op_weight.shape[-1]])
op_weight = op_weight.reshape(
[-1, batch_size, op_weight.shape[-1]]
)
op_weights.append(op_weight)
sampleds.append(sampled)
op_weights = torch.cat(op_weights)
sampleds = torch.cat(sampleds)
op_weights_list.append(op_weights)
sampled_list.append(utils.get_numpy(sampleds))
logits_list.append(alphas)
stage_conns = []
split_op_weights = torch.split(op_weights, self.stage_num_alphas)
for i_stage in range(self.search_space.stage_num):
stage_conn = self.get_arch.apply(
self.search_space,
split_op_weights[i_stage],
self.device,
i_stage,
)
stage_conns.append(stage_conn)
rollouts.append(
SinkConnectMacroDiffRollout(
arch=stage_conns,
sampled=sampled_list,
logits=logits_list,
width_arch=width_arch[i_sample],
width_logits=width_logits[i_sample],
search_space=self.search_space,
)
)
return rollouts
def __setstate__(self, state):
super(MacroSinkConnectDiffController, self).__setstate__(state)
self.get_arch = GetArchMacroSinkConnect()
class GetArchMicro(torch.autograd.Function):
@staticmethod
def forward(ctx, search_space, op_weights, device):
empty_arch = torch.zeros(
(
search_space._num_nodes,
search_space._num_nodes,
search_space.num_op_choices,
)
).to(device)
empty_arch[search_space.idx] = op_weights
ctx.save_for_backward(
torch.as_tensor(op_weights), torch.as_tensor(search_space.idx)
)
return empty_arch
@staticmethod
def backward(ctx, grad_output):
op_weights, idxes = ctx.saved_tensors
op_weights_grad = grad_output[idxes[0], idxes[1]]
return None, op_weights_grad, None
class MicroDenseDiffController(BaseController, nn.Module):
NAME = "micro-dense-diff"
SCHEDULABLE_ATTRS = ["gumbel_temperature", "entropy_coeff", "force_uniform"]
def __init__(
self,
search_space,
rollout_type,
mode="eval",
device="cuda",
use_prob=False,
gumbel_hard=False,
gumbel_temperature=1.0,
use_sigmoid=True,
use_edge_normalization=False,
entropy_coeff=0.01,
max_grad_norm=None,
force_uniform=False,
full_init=False,
progressive_pruning_th=None,
multiprocess=False,
schedule_cfg=None,
):
super(MicroDenseDiffController, self).__init__(
search_space, rollout_type, schedule_cfg=schedule_cfg
)
nn.Module.__init__(self)
self.device = device
# sampling
self.use_prob = use_prob
self.use_sigmoid = use_sigmoid
self.gumbel_hard = gumbel_hard
self.gumbel_temperature = gumbel_temperature
assert not (use_prob and use_sigmoid)
# edge normalization
self.use_edge_normalization = use_edge_normalization
# training
self.entropy_coeff = entropy_coeff
self.max_grad_norm = max_grad_norm
self.force_uniform = force_uniform
self.full_init = full_init
self.progressive_pruning_th = progressive_pruning_th
self.multiprocess = multiprocess
_num_init_nodes = self.search_space.num_init_nodes
_num_edges_list = [
sum(
_num_init_nodes + i
for i in range(self.search_space.get_num_steps(i_cg))
)
for i_cg in range(self.search_space.num_cell_groups)
]
if not self.full_init:
self.cg_alphas = nn.ParameterList(
[
nn.Parameter(
1e-3
* torch.randn(
_num_edges,
len(self.search_space.cell_shared_primitives[i_cg]),
)
) # shape: [num_edges, num_ops]
for i_cg, _num_edges in enumerate(_num_edges_list)
]
)
else:
self.cg_alphas = nn.ParameterList(
[
nn.Parameter(
1
* torch.ones(
_num_edges,
len(self.search_space.cell_shared_primitives[i_cg]),
)
) # shape: [num_edges, num_ops]
for i_cg, _num_edges in enumerate(_num_edges_list)
]
)
if self.use_edge_normalization:
raise NotImplementedError("MicroDenseController does not support edge-norm")
else:
self.cg_betas = None
self.get_arch = GetArchMicro()
self.to(self.device)
def set_mode(self, mode):
super(MicroDenseDiffController, self).set_mode(mode)
if mode == "train":
nn.Module.train(self)
elif mode == "eval":
nn.Module.eval(self)
else:
raise Exception("Unrecognized mode: {}".format(mode))
def set_device(self, device):
self.device = device
self.to(device)
def progressive_pruning(self):
for alpha in self.cg_alphas:
# inpalce replace alphas that smaller than the pruning threshold, no grad
alpha.data = alpha * (alpha.gt(self.progressive_pruning_th).float())
def forward(self, n=1, batch_size=1): # pylint: disable=arguments-differ
return self.sample(n=n, batch_size=batch_size)
def sample(self, n=1, batch_size=1):
if self.progressive_pruning_th is not None:
self.progressive_pruning()
rollouts = []
for _ in range(n):
# op_weights.shape: [num_edges, [batch_size,] num_ops]
# edge_norms.shape: [num_edges] do not have batch_size.
op_weights_list = []
edge_norms_list = []
sampled_list = []
logits_list = []
for alphas in self.cg_alphas:
if self.force_uniform: # cg_alpha parameters will not be in the graph
# NOTE: `force_uniform` config does not affects edge_norms (betas),
# if one wants a force_uniform search, keep `use_edge_normalization=False`
alphas = torch.zeros_like(alphas)
if batch_size > 1:
expanded_alpha = (
alphas.reshape([alphas.shape[0], 1, alphas.shape[1]])
.repeat([1, batch_size, 1])
.reshape([-1, alphas.shape[-1]])
)
else:
expanded_alpha = alphas
if self.use_prob:
# probability as sample
sampled = F.softmax(
expanded_alpha / self.gumbel_temperature, dim=-1
)
elif self.use_sigmoid:
sampled = utils.relaxed_bernoulli_sample(
expanded_alpha, self.gumbel_temperature
)
else:
# gumbel sampling
sampled, _ = utils.gumbel_softmax(
expanded_alpha, self.gumbel_temperature, hard=False
)
if self.gumbel_hard:
op_weights = utils.straight_through(sampled)
else:
op_weights = sampled
if batch_size > 1:
sampled = sampled.reshape([-1, batch_size, op_weights.shape[-1]])
op_weights = op_weights.reshape(
[-1, batch_size, op_weights.shape[-1]]
)
op_weights_list.append(op_weights)
sampled_list.append(utils.get_numpy(sampled))
# logits_list.append(utils.get_numpy(alphas))
logits_list.append((alphas))
if self.use_edge_normalization:
raise NotImplementedError
else:
arch_list = []
logits_arch_list = []
for op_weights in op_weights_list:
arch = self.get_arch.apply(
self.search_space, op_weights, self.device
)
arch_list.append(arch)
for logits in logits_list:
logits_arch = self.get_arch.apply(
self.search_space, logits, self.device
)
logits_arch_list.append(logits_arch)
rollouts.append(
DenseMicroDiffRollout(
arch_list,
sampled_list,
logits_list,
logits_arch_list,
search_space=self.search_space,
)
)
return rollouts
def save(self, path):
"""Save the parameters to disk."""
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
self.logger.info("Saved controller network to %s", path)
def load(self, path):
"""Load the parameters from disk."""
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
self.logger.info("Loaded controller network from %s", path)
def _entropy_loss(self):
if self.entropy_coeff > 0:
probs = [F.softmax(alpha, dim=-1) for alpha in self.cg_alphas]
return self.entropy_coeff * sum(
-(torch.log(prob) * prob).sum() for prob in probs
)
return 0.0
def gradient(self, loss, return_grads=True, zero_grads=True):
raise NotImplementedError(
"the grad function is implemented in the layer2diffcontroller.gradient()"
)
def step_current_gradient(self, optimizer):
if self.max_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
optimizer.step()
def step_gradient(self, gradients, optimizer):
self.zero_grad()
named_params = dict(self.named_parameters())
for k, grad in gradients:
named_params[k].grad = grad
# clip the gradients
if self.max_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
# apply the gradients
optimizer.step()
def step(self, rollouts, optimizer, perf_name): # very memory inefficient
self.zero_grad()
losses = [r.get_perf(perf_name) for r in rollouts]
optimizer.step()
[l.backward() for l in losses]
return np.mean([l.detach().cpu().numpy() for l in losses])
def __getstate__(self):
state = super(MicroDenseDiffController, self).__getstate__().copy()
del state["get_arch"]
return state
def __setstate__(self, state):
super(MicroDenseDiffController, self).__setstate__(state)
self.get_arch = GetArchMicro()
def summary(self, rollouts, log=False, log_prefix="", step=None):
num = len(rollouts)
logits_list = [
[utils.get_numpy(logits) for logits in r.logits] for r in rollouts
]
_ss = self.search_space
if self.gumbel_hard:
cg_logprobs = [0.0 for _ in range(_ss.num_cell_groups)]
cg_entros = [0.0 for _ in range(_ss.num_cell_groups)]
for rollout, logits in zip(rollouts, logits_list):
for cg_idx, (vec, cg_logits) in enumerate(zip(rollout.arch, logits)):
prob = utils.softmax(cg_logits)
logprob = np.log(prob)
if self.gumbel_hard:
inds = np.argmax(utils.get_numpy(vec), axis=-1)
cg_logprobs[cg_idx] += np.sum(logprob[range(len(inds)), inds])
cg_entros[cg_idx] += -(prob * logprob).sum()
# mean across rollouts
if self.gumbel_hard:
cg_logprobs = [s / num for s in cg_logprobs]
total_logprob = sum(cg_logprobs)
cg_logprobs_str = ",".join(["{:.2f}".format(n) for n in cg_logprobs])
cg_entros = [s / num for s in cg_entros]
total_entro = sum(cg_entros)
cg_entro_str = ",".join(["{:.2f}".format(n) for n in cg_entros])
if log:
# maybe log the summary
self.logger.info(
"%s%d rollouts: %s ENTROPY: %2f (%s)",
log_prefix,
num,
"-LOG_PROB: %.2f (%s) ;" % (-total_logprob, cg_logprobs_str)
if self.gumbel_hard
else "",
total_entro,
cg_entro_str,
)
if step is not None and not self.writer.is_none():
if self.gumbel_hard:
self.writer.add_scalar("log_prob", total_logprob, step)
self.writer.add_scalar("entropy", total_entro, step)
stats = [
(n + " ENTRO", entro) for n, entro in zip(_ss.cell_group_names, cg_entros)
]
if self.gumbel_hard:
stats += [
(n + " LOGPROB", logprob)
for n, logprob in zip(_ss.cell_group_names, cg_logprobs)
]
return OrderedDict(stats)
@classmethod
def supported_rollout_types(cls):
return ["micro-dense", "micro-dense-diff"]
|
tg/configuration/mongo/auth.py | sergiobrr/tg2 | 812 | 12733392 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from tgming.auth import MingAuthenticatorPlugin
def create_default_authenticator(user_class, translations=None, **unused):
mingauth = MingAuthenticatorPlugin(user_class)
return unused, mingauth
|
tools/polly/bin/detail/timer.py | Kondr11/LABA7 | 861 | 12733414 | <reponame>Kondr11/LABA7
# Copyright (c) 2015, <NAME>
# All rights reserved.
import datetime
import sys
import time
perf_counter_available = (sys.version_info.minor >= 3)
class Job:
def __init__(self, job_name):
if perf_counter_available:
self.start = time.perf_counter()
else:
self.start = 0
self.job_name = job_name
self.stopped = False
def stop(self):
if self.stopped:
sys.exit('Already stopped')
self.stopped = True
if perf_counter_available:
self.total = time.perf_counter() - self.start
else:
self.total = 0
def result(self):
if not self.stopped:
sys.exit("Stop the job before result")
print(
'{}: {}s'.format(self.job_name, datetime.timedelta(seconds=self.total))
)
class Timer:
def __init__(self):
self.jobs = []
self.total = Job('Total')
def start(self, job_name):
if job_name == 'Total':
sys.exit('Name reserved')
for i in self.jobs:
if i.job_name == job_name:
sys.exit('Job already exists: {}'.format(job_name))
self.jobs.append(Job(job_name))
def stop(self):
if len(self.jobs) == 0:
sys.exit("No jobs to stop")
self.jobs[-1].stop()
def result(self):
if not perf_counter_available:
print('timer.perf_counter is not available (update to python 3.3+)')
return
for i in self.jobs:
i.result()
print('-')
self.total.stop()
self.total.result()
|
h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_8077_binomial_alpha.py | MikolajBak/h2o-3 | 6,098 | 12733417 | <reponame>MikolajBak/h2o-3
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import h2o
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from tests import pyunit_utils
# test binomial family with generate_scoring_history on, alpha arrays, lambda search on and off
def test_binomial_alpha():
training_data = h2o.import_file(pyunit_utils.locate("smalldata/logreg/benign.csv"))
Y = 3
X = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]
# test with lambda search on, generate_scoring_history on and off
model1 = H2OGeneralizedLinearEstimator(family="binomial", alpha=[0,0.2,0.5,0.8,1], lambda_search=True,
generate_scoring_history=True)
model1.train(x=X, y=Y, training_frame=training_data)
model2 = H2OGeneralizedLinearEstimator(family="binomial", alpha=[0,0.2,0.5,0.8,1], lambda_search=True,
generate_scoring_history=True)
model2.train(x=X, y=Y, training_frame=training_data)
pyunit_utils.assertCoefDictEqual(model1.coef(), model2.coef())
# test with lambda search off, generate_scoring_history on and off
model1 = H2OGeneralizedLinearEstimator(family="binomial", alpha=[0,0.2,0.5,0.8,1], lambda_search=False,
generate_scoring_history=True, Lambda=[0, 0.1, 0.01, 0.001])
model1.train(x=X, y=Y, training_frame=training_data)
model2 = H2OGeneralizedLinearEstimator(family="binomial", alpha=[0,0.2,0.5,0.8,1], lambda_search=False,
generate_scoring_history=True, Lambda=[0, 0.1, 0.01, 0.001])
model2.train(x=X, y=Y, training_frame=training_data)
pyunit_utils.assertCoefDictEqual(model1.coef(), model2.coef())
# test with lambda search on, generate_scoring_history on and off, cv on
model1 = H2OGeneralizedLinearEstimator(family="binomial", alpha=[0,0.2,0.5,0.8,1], lambda_search=True,
generate_scoring_history=True, nfolds=2, seed=12345)
model1.train(x=X, y=Y, training_frame=training_data)
model2 = H2OGeneralizedLinearEstimator(family="binomial", alpha=[0,0.2,0.5,0.8,1], lambda_search=True,
generate_scoring_history=True, nfolds=2, seed=12345)
model2.train(x=X, y=Y, training_frame=training_data)
pyunit_utils.assertCoefDictEqual(model1.coef(), model2.coef())
# test with lambda search off, generate_scoring_history on and off, cv on
model1 = H2OGeneralizedLinearEstimator(family="binomial", alpha=[0,0.2,0.5,0.8,1], lambda_search=False,
generate_scoring_history=True, nfolds=2, seed=12345,
Lambda=[0, 0.1, 0.01, 0.001])
model1.train(x=X, y=Y, training_frame=training_data)
model2 = H2OGeneralizedLinearEstimator(family="binomial", alpha=[0,0.2,0.5,0.8,1], lambda_search=False,
generate_scoring_history=True, nfolds=2, seed=12345,
Lambda=[0, 0.1, 0.01, 0.001])
model2.train(x=X, y=Y, training_frame=training_data)
pyunit_utils.assertCoefDictEqual(model1.coef(), model2.coef())
if __name__ == "__main__":
pyunit_utils.standalone_test(test_binomial_alpha)
else:
test_binomial_alpha()
|
hiddeneye_reborn/core/config.py | Devil4333/HiddenEye | 582 | 12733446 | import yaml
# TODO u know, work in it...
|
quspin/basis/_reshape_subsys.py | anton-buyskikh/QuSpin | 195 | 12733449 | import numpy as _np
import scipy.sparse as _sp
from ._basis_utils import _shuffle_sites
####################################################
# set of helper functions to implement the partial #
# trace of lattice density matrices. They do not #
# have any checks and states are assumed to be #
# in the non-symmetry reduced basis. #
####################################################
def _lattice_partial_trace_pure(psi,sub_sys_A,L,sps,return_rdm="A"):
"""
This function computes the partial trace of a dense pure state psi over set of sites sub_sys_A and returns
reduced DM. Vectorisation available.
"""
psi_v=_lattice_reshape_pure(psi,sub_sys_A,L,sps)
if return_rdm == "A":
return _np.squeeze(_np.einsum("...ij,...kj->...ik",psi_v,psi_v.conj())),None
elif return_rdm == "B":
return None,_np.squeeze(_np.einsum("...ji,...jk->...ik",psi_v.conj(),psi_v))
elif return_rdm == "both":
return _np.squeeze(_np.einsum("...ij,...kj->...ik",psi_v,psi_v.conj())),_np.squeeze(_np.einsum("...ji,...jk->...ik",psi_v.conj(),psi_v))
def _lattice_partial_trace_mixed(rho,sub_sys_A,L,sps,return_rdm="A"):
"""
This function computes the partial trace of a set of dense mixed states rho over set of sites sub_sys_A
and returns reduced DM. Vectorisation available.
"""
rho_v=_lattice_reshape_mixed(rho,sub_sys_A,L,sps)
if return_rdm == "A":
return _np.einsum("...jlkl->...jk",rho_v),None
elif return_rdm == "B":
return None,_np.einsum("...ljlk->...jk",rho_v.conj())
elif return_rdm == "both":
return _np.einsum("...jlkl->...jk",rho_v),_np.einsum("...ljlk->...jk",rho_v.conj())
def _lattice_partial_trace_sparse_pure(psi,sub_sys_A,L,sps,return_rdm="A"):
"""
This function computes the partial trace of a sparse pure state psi over set of sites sub_sys_A and returns
reduced DM.
"""
psi=_lattice_reshape_sparse_pure(psi,sub_sys_A,L,sps)
if return_rdm == "A":
return psi.dot(psi.H),None
elif return_rdm == "B":
return None,psi.H.dot(psi)
elif return_rdm == "both":
return psi.dot(psi.H),psi.H.dot(psi)
def _lattice_reshape_pure(psi,sub_sys_A,L,sps):
"""
This function reshapes the dense pure state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""
extra_dims = psi.shape[:-1]
n_dims = len(extra_dims)
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
T_tup = sub_sys_A+sub_sys_B
psi_v = _shuffle_sites(sps,T_tup,psi)
psi_v = psi_v.reshape(extra_dims+(Ns_A,Ns_B))
return psi_v
'''
def _lattice_reshape_pure(psi,sub_sys_A,L,sps):
"""
This function reshapes the dense pure state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""
extra_dims = psi.shape[:-1]
n_dims = len(extra_dims)
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
T_tup = sub_sys_A+sub_sys_B
T_tup = tuple(range(n_dims)) + tuple(n_dims + s for s in T_tup)
R_tup = extra_dims + tuple(sps for i in range(L))
psi_v = psi.reshape(R_tup) # DM where index is given per site as rho_v[i_1,...,i_L,j_1,...j_L]
psi_v = psi_v.transpose(T_tup) # take transpose to reshuffle indices
psi_v = psi_v.reshape(extra_dims+(Ns_A,Ns_B))
return psi_v
'''
def _lattice_reshape_mixed(rho,sub_sys_A,L,sps):
"""
This function reshapes the dense mixed state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""
extra_dims = rho.shape[:-2]
n_dims = len(extra_dims)
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
# T_tup tells numpy how to reshuffle the indices such that when I reshape the array to the
# 4-_tensor rho_{ik,jl} i,j are for sub_sys_A and k,l are for sub_sys_B
# which means I need (sub_sys_A,sub_sys_B,sub_sys_A+L,sub_sys_B+L)
T_tup = sub_sys_A+sub_sys_B
T_tup = tuple(T_tup) + tuple(L+s for s in T_tup)
rho = rho.reshape(extra_dims+(-1,))
rho_v = _shuffle_sites(sps,T_tup,rho)
return rho_v.reshape(extra_dims+(Ns_A,Ns_B,Ns_A,Ns_B))
'''
def _lattice_reshape_mixed(rho,sub_sys_A,L,sps):
"""
This function reshapes the dense mixed state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""
extra_dims = rho.shape[:-2]
n_dims = len(extra_dims)
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
# T_tup tells numpy how to reshuffle the indices such that when I reshape the array to the
# 4-_tensor rho_{ik,jl} i,j are for sub_sys_A and k,l are for sub_sys_B
# which means I need (sub_sys_A,sub_sys_B,sub_sys_A+L,sub_sys_B+L)
T_tup = sub_sys_A+sub_sys_B
T_tup = tuple(range(n_dims)) + tuple(s+n_dims for s in T_tup) + tuple(L+n_dims+s for s in T_tup)
R_tup = extra_dims + tuple(sps for i in range(2*L))
rho_v = rho.reshape(R_tup) # DM where index is given per site as rho_v[i_1,...,i_L,j_1,...j_L]
rho_v = rho_v.transpose(T_tup) # take transpose to reshuffle indices
return rho_v.reshape(extra_dims+(Ns_A,Ns_B,Ns_A,Ns_B))
'''
def _lattice_reshape_sparse_pure(psi,sub_sys_A,L,sps):
"""
This function reshapes the sparse pure state psi over the Hilbert space defined by sub_sys_A and its complement.
"""
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
psi = psi.tocoo()
T_tup = sub_sys_A+sub_sys_B
# reshuffle indices for the sub-systems.
# j = sum( j[i]*(sps**i) for i in range(L))
# this reshuffles the j[i] similar to the transpose operation
# on the dense arrays psi_v.transpose(T_tup)
if T_tup != tuple(range(L)):
indx = _np.zeros(psi.col.shape,dtype=psi.col.dtype)
for i_old,i_new in enumerate(T_tup):
indx += ((psi.col//(sps**(L-i_new-1))) % sps)*(sps**(L-i_old-1))
else:
indx = psi.col
# A = _np.array([0,1,2,3,4,5,6,7,8,9,10,11])
# print("make shift way of reshaping array")
# print("A = {}".format(A))
# print("A.reshape((3,4)): \n {}".format(A.reshape((3,4))))
# print("rows: A.reshape((3,4))/4: \n {}".format(A.reshape((3,4))/4))
# print("cols: A.reshape((3,4))%4: \n {}".format(A.reshape((3,4))%4))
psi._shape = (Ns_A,Ns_B)
psi.row[:] = indx / Ns_B
psi.col[:] = indx % Ns_B
return psi.tocsr()
def _tensor_reshape_pure(psi,sub_sys_A,Ns_l,Ns_r):
extra_dims = psi.shape[:-1]
if sub_sys_A == "left":
return psi.reshape(extra_dims+(Ns_l,Ns_r))
else:
n_dims = len(extra_dims)
T_tup = tuple(range(n_dims))+(n_dims+1,n_dims)
psi_v = psi.reshape(extra_dims+(Ns_l,Ns_r))
return psi_v.transpose(T_tup)
def _tensor_reshape_sparse_pure(psi,sub_sys_A,Ns_l,Ns_r):
psi = psi.tocoo()
# make shift way of reshaping array
# j = j_l + Ns_r * j_l
# j_l = j / Ns_r
# j_r = j % Ns_r
if sub_sys_A == "left":
psi._shape = (Ns_l,Ns_r)
psi.row[:] = psi.col / Ns_r
psi.col[:] = psi.col % Ns_r
return psi.tocsr()
else:
psi._shape = (Ns_l,Ns_r)
psi.row[:] = psi.col / Ns_r
psi.col[:] = psi.col % Ns_r
return psi.T.tocsr()
def _tensor_reshape_mixed(rho,sub_sys_A,Ns_l,Ns_r):
extra_dims = rho.shape[:-2]
if sub_sys_A == "left":
return rho.reshape(extra_dims+(Ns_l,Ns_r,Ns_l,Ns_r))
else:
n_dims = len(extra_dims)
T_tup = tuple(range(n_dims))+(n_dims+1,n_dims)+(n_dims+3,n_dims+2)
rho_v = rho.reshape(extra_dims+(Ns_l,Ns_r,Ns_l,Ns_r))
return rho_v.transpose(T_tup)
def _tensor_partial_trace_pure(psi,sub_sys_A,Ns_l,Ns_r,return_rdm="A"):
psi_v = _tensor_reshape_pure(psi,sub_sys_A,Ns_l,Ns_r)
if return_rdm == "A":
return _np.squeeze(_np.einsum("...ij,...kj->...ik",psi_v,psi_v.conj())),None
elif return_rdm == "B":
return None,_np.squeeze(_np.einsum("...ji,...jk->...ik",psi_v.conj(),psi_v))
elif return_rdm == "both":
return _np.squeeze(_np.einsum("...ij,...kj->...ik",psi_v,psi_v.conj())),_np.squeeze(_np.einsum("...ji,...jk->...ik",psi_v.conj(),psi_v))
def _tensor_partial_trace_sparse_pure(psi,sub_sys_A,Ns_l,Ns_r,return_rdm="A"):
psi = _tensor_reshape_sparse_pure(psi,sub_sys_A,Ns_l,Ns_r)
if return_rdm == "A":
return psi.dot(psi.H),None
elif return_rdm == "B":
return None,psi.H.dot(psi)
elif return_rdm == "both":
return psi.dot(psi.H),psi.H.dot(psi)
def _tensor_partial_trace_mixed(rho,sub_sys_A,Ns_l,Ns_r,return_rdm="A"):
rho_v = _tensor_reshape_mixed(rho,sub_sys_A,Ns_l,Ns_r)
if return_rdm == "A":
return _np.squeeze(_np.einsum("...ijkj->...ik",rho_v)),None
elif return_rdm == "B":
return None,_np.squeeze(_np.einsum("...jijk->...ik",rho_v.conj()))
elif return_rdm == "both":
return _np.squeeze(_np.einsum("...ijkj->...ik",rho_v)),_np.squeeze(_np.einsum("...jijk->...ik",rho_v.conj()))
|
src/peering/azext_peering/generated/custom.py | Mannan2812/azure-cli-extensions | 207 | 12733459 | <gh_stars>100-1000
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines, unused-argument
import json
def peering_legacy_list(cmd, client,
peering_location,
kind,
asn=None):
return client.list(peering_location=peering_location,
kind=kind,
asn=asn)
def peering_asn_list(cmd, client):
return client.list_by_subscription()
def peering_asn_show(cmd, client,
peer_asn_name):
return client.get(peer_asn_name=peer_asn_name)
def peering_asn_create(cmd, client,
peer_asn_name,
peer_asn=None,
peer_contact_detail=None,
peer_name=None,
validation_state=None):
return client.create_or_update(peer_asn_name=peer_asn_name,
peer_asn=peer_asn,
peer_contact_detail=peer_contact_detail,
peer_name=peer_name,
validation_state=validation_state)
def peering_asn_delete(cmd, client,
peer_asn_name):
return client.delete(peer_asn_name=peer_asn_name)
def peering_location_list(cmd, client,
kind,
direct_peering_type=None):
return client.list(kind=kind,
direct_peering_type=direct_peering_type)
def peering_registered_asn_list(cmd, client,
resource_group_name,
peering_name):
return client.list_by_peering(resource_group_name=resource_group_name,
peering_name=peering_name)
def peering_registered_asn_show(cmd, client,
resource_group_name,
peering_name,
registered_asn_name):
return client.get(resource_group_name=resource_group_name,
peering_name=peering_name,
registered_asn_name=registered_asn_name)
def peering_registered_asn_create(cmd, client,
resource_group_name,
peering_name,
registered_asn_name,
asn=None):
return client.create_or_update(resource_group_name=resource_group_name,
peering_name=peering_name,
registered_asn_name=registered_asn_name,
asn=asn)
def peering_registered_asn_update(cmd, client,
resource_group_name,
peering_name,
registered_asn_name,
asn=None):
return client.create_or_update(resource_group_name=resource_group_name,
peering_name=peering_name,
registered_asn_name=registered_asn_name,
asn=asn)
def peering_registered_asn_delete(cmd, client,
resource_group_name,
peering_name,
registered_asn_name):
return client.delete(resource_group_name=resource_group_name,
peering_name=peering_name,
registered_asn_name=registered_asn_name)
def peering_registered_prefix_list(cmd, client,
resource_group_name,
peering_name):
return client.list_by_peering(resource_group_name=resource_group_name,
peering_name=peering_name)
def peering_registered_prefix_show(cmd, client,
resource_group_name,
peering_name,
registered_prefix_name):
return client.get(resource_group_name=resource_group_name,
peering_name=peering_name,
registered_prefix_name=registered_prefix_name)
def peering_registered_prefix_create(cmd, client,
resource_group_name,
peering_name,
registered_prefix_name,
prefix=None):
return client.create_or_update(resource_group_name=resource_group_name,
peering_name=peering_name,
registered_prefix_name=registered_prefix_name,
prefix=prefix)
def peering_registered_prefix_update(cmd, client,
resource_group_name,
peering_name,
registered_prefix_name,
prefix=None):
return client.create_or_update(resource_group_name=resource_group_name,
peering_name=peering_name,
registered_prefix_name=registered_prefix_name,
prefix=prefix)
def peering_registered_prefix_delete(cmd, client,
resource_group_name,
peering_name,
registered_prefix_name):
return client.delete(resource_group_name=resource_group_name,
peering_name=peering_name,
registered_prefix_name=registered_prefix_name)
def peering_peering_list(cmd, client,
resource_group_name=None):
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list_by_subscription()
def peering_peering_show(cmd, client,
resource_group_name,
peering_name):
return client.get(resource_group_name=resource_group_name,
peering_name=peering_name)
def peering_peering_create(cmd, client,
resource_group_name,
peering_name,
sku,
kind,
location,
tags=None,
direct=None,
exchange=None,
peering_location=None):
if isinstance(direct, str):
direct = json.loads(direct)
if isinstance(exchange, str):
exchange = json.loads(exchange)
return client.create_or_update(resource_group_name=resource_group_name,
peering_name=peering_name,
sku=sku,
kind=kind,
location=location,
tags=tags,
direct=direct,
exchange=exchange,
peering_location=peering_location)
def peering_peering_update(cmd, client,
resource_group_name,
peering_name,
tags=None):
return client.update(resource_group_name=resource_group_name,
peering_name=peering_name,
tags=tags)
def peering_peering_delete(cmd, client,
resource_group_name,
peering_name):
return client.delete(resource_group_name=resource_group_name,
peering_name=peering_name)
def peering_received_route_list(cmd, client,
resource_group_name,
peering_name,
prefix=None,
as_path=None,
origin_as_validation_state=None,
rpki_validation_state=None,
skip_token=None):
return client.list_by_peering(resource_group_name=resource_group_name,
peering_name=peering_name,
prefix=prefix,
as_path=as_path,
origin_as_validation_state=origin_as_validation_state,
rpki_validation_state=rpki_validation_state,
skip_token=skip_token)
def peering_service_country_list(cmd, client):
return client.list()
def peering_service_location_list(cmd, client,
country=None):
return client.list(country=country)
def peering_service_prefix_list(cmd, client,
resource_group_name,
peering_service_name,
expand=None):
return client.list_by_peering_service(resource_group_name=resource_group_name,
peering_service_name=peering_service_name,
expand=expand)
def peering_service_prefix_show(cmd, client,
resource_group_name,
peering_service_name,
prefix_name,
expand=None):
return client.get(resource_group_name=resource_group_name,
peering_service_name=peering_service_name,
prefix_name=prefix_name,
expand=expand)
def peering_service_prefix_create(cmd, client,
resource_group_name,
peering_service_name,
prefix_name,
prefix=None,
peering_service_prefix_key=None):
return client.create_or_update(resource_group_name=resource_group_name,
peering_service_name=peering_service_name,
prefix_name=prefix_name,
prefix=prefix,
peering_service_prefix_key=peering_service_prefix_key)
def peering_service_prefix_update(cmd, client,
resource_group_name,
peering_service_name,
prefix_name,
prefix=None,
peering_service_prefix_key=None):
return client.create_or_update(resource_group_name=resource_group_name,
peering_service_name=peering_service_name,
prefix_name=prefix_name,
prefix=prefix,
peering_service_prefix_key=peering_service_prefix_key)
def peering_service_prefix_delete(cmd, client,
resource_group_name,
peering_service_name,
prefix_name):
return client.delete(resource_group_name=resource_group_name,
peering_service_name=peering_service_name,
prefix_name=prefix_name)
def peering_service_provider_list(cmd, client):
return client.list()
def peering_service_list(cmd, client,
resource_group_name=None):
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list_by_subscription()
def peering_service_show(cmd, client,
resource_group_name,
peering_service_name):
return client.get(resource_group_name=resource_group_name,
peering_service_name=peering_service_name)
def peering_service_create(cmd, client,
resource_group_name,
peering_service_name,
location,
sku=None,
tags=None,
peering_service_location=None,
peering_service_provider=None):
return client.create_or_update(resource_group_name=resource_group_name,
peering_service_name=peering_service_name,
sku=sku,
location=location,
tags=tags,
peering_service_location=peering_service_location,
peering_service_provider=peering_service_provider)
def peering_service_update(cmd, client,
resource_group_name,
peering_service_name,
tags=None):
return client.update(resource_group_name=resource_group_name,
peering_service_name=peering_service_name,
tags=tags)
def peering_service_delete(cmd, client,
resource_group_name,
peering_service_name):
return client.delete(resource_group_name=resource_group_name,
peering_service_name=peering_service_name)
|
pydantic_factories/constraints/constrained_float_handler.py | lyz-code/pydantic-factories | 163 | 12733547 | from pydantic import ConstrainedFloat
from pydantic_factories.value_generators.constrained_number import (
generate_constrained_number,
get_constrained_number_range,
)
from pydantic_factories.value_generators.primitives import create_random_float
def handle_constrained_float(field: ConstrainedFloat) -> float:
"""
Handles 'ConstrainedFloat' instances
"""
multiple_of = field.multiple_of
if multiple_of == 0:
return 0
minimum, maximum = get_constrained_number_range(
gt=field.gt, ge=field.ge, lt=field.lt, le=field.le, t_type=float, multiple_of=multiple_of
)
return generate_constrained_number(
minimum=minimum,
maximum=maximum,
multiple_of=multiple_of,
method=create_random_float,
)
|
plenum/test/plugin/conftest.py | andkononykhin/plenum | 148 | 12733588 | OPERATION_VALIDATION_PLUGIN_PATH_VALUE = "operation_verification"
AUCTION_REQ_VALIDATION_PLUGIN_PATH_VALUE = "auction_req_validation"
AUCTION_REQ_PROCESSOR_PLUGIN_PATH_VALUE = "auction_req_processor"
BANK_REQ_VALIDATION_PLUGIN_PATH_VALUE = "bank_req_validation"
BANK_REQ_PROCESSOR_PLUGIN_PATH_VALUE = "bank_req_processor"
STATS_CONSUMER_PLUGIN_PATH_VALUE = "stats_consumer"
|
String/1417. Reformat The String.py | beckswu/Leetcode | 138 | 12733612 | class Solution:
def reformat(self, s: str) -> str:
str_list = []
digit_list = []
for i in s:
if i.isdigit():
digit_list += i,
elif i.isalpha():
str_list += i,
if abs( len(str_list) - len(digit_list)) > 1:
return ""
res = ""
if len(str_list) < len(digit_list):
str_list, digit_list = digit_list, str_list #str_list always bigger than digit_list
for i, j in zip(str_list,digit_list):
res += i + j
return res + ("" if len(str_list) == len(digit_list) else str_list[-1])
class Solution(object):
def reformat(self, s):
"""
:type s: str
:rtype: str
"""
def char_gen(start, end, count):
for c in range(ord(start), ord(end)+1):
c = chr(c)
for i in range(count[c]):
yield c
yield ''
count = collections.defaultdict(int)
alpha_cnt = 0
for c in s:
count[c] += 1
if c.isalpha():
alpha_cnt += 1
if abs(len(s)-2*alpha_cnt) > 1:
return ""
result = []
it1, it2 = char_gen('a', 'z', count), char_gen('0', '9', count)
if alpha_cnt < len(s)-alpha_cnt:
it1, it2 = it2, it1
while len(result) < len(s):
result.append(next(it1))
result.append(next(it2))
return "".join(result)
class Solution:
def reformat(self, s: str) -> str:
a, b = [], []
for c in s:
if 'a' <= c <= 'z':
a.append(c)
else:
b.append(c)
if len(a) < len(b):
a, b = b, a
if len(a) - len(b) >= 2:
return ''
ans = ''
for i in range(len(a)+len(b)):
if i % 2 == 0:
ans += a[i//2]
else:
ans += b[i//2]
return ans |
tests/unit/compiler/__init__.py | gamechanger/dusty | 421 | 12733618 | import os
from pkg_resources import resource_isdir, resource_listdir, resource_string
import yaml
from nose.tools import nottest
from dusty.compiler.spec_assembler import get_specs_from_path
@nottest
def get_all_test_configs():
return resource_listdir(__name__, 'test_configs')
@nottest
def resources_for_test_config(test_config):
resources = {}
for key in [constants.CONFIG_BUNDLES_KEY, 'apps', 'libs', 'services']:
key_path = 'test_configs/{}/{}'.format(test_config, key)
if resource_isdir(__name__, key_path):
resources[key] = {resource_name: resource_string(__name__, '{}/{}'.format(key_path, resource_name))
for resource_name in resource_listdir(__name__, key_path)}
return resources
@nottest
def specs_for_test_config(test_config):
case_path = '{}/test_configs/{}/'.format(__path__[0], test_config)
return get_specs_from_path(case_path)
@nottest
def assembled_specs_for_test_config(test_config):
assembled_file = "{}/test_configs/{}/assembled_spec.yml".format(__path__[0], test_config)
with open(assembled_file, 'r') as f:
return yaml.load(f.read())
@nottest
def nginx_config_for_test_config(test_config):
return resource_string(__name__, 'test_configs/{}/nginx.conf'.format(test_config))
@nottest
def docker_compose_yaml_for_test_config(test_config):
return resource_string(__name__, 'test_configs/{}/docker-compose.yml'.format(test_config))
|
plydata/tidy/__init__.py | has2k1/plydata | 247 | 12733630 | from .tidy_verbs import * # noqa
from .. import _get_all_imports
__all__ = _get_all_imports(globals())
|
src/collector/wechat/feddd_start.py | showthesunli/liuli | 139 | 12733679 | """
Created by leeorz.
Description:
采集器:
- 基于 src/collector/wechat_sougou/items/wechat_item.py的公众号采集
- 基于 feedparser,从feeddd解析rss
Changelog: all notable changes to this file will be documented
"""
import asyncio
import feedparser
from ruia import Response, Spider
from ruia_ua import middleware as ua_middleware
from src.collector.utils import load_data_to_articlles
from src.collector.wechat.items import WechatItem
from src.processor import html_to_text_h2t
from src.utils.log import LOGGER
from src.utils.tools import md5_encryption
class WeiXinSpider(Spider):
"""微信公众号文章抓取爬虫
Args:
collect_config (dict, optional): 采集器配置
"""
name = "WeiXinSpider"
request_config = {"RETRIES": 3, "DELAY": 3, "TIMEOUT": 5}
concurrency = 10
wechat_name = ""
# aiohttp config
aiohttp_kwargs = {}
async def parse(self, response: Response):
"""解析公众号元数据"""
html = await response.text()
wechat_item: WechatItem = await WechatItem.get_item(html=html)
wechat_data = {
**wechat_item.results,
**{
"doc_id": md5_encryption(f"{wechat_item.doc_name}_{self.wechat_name}"),
"doc_keywords": "",
"doc_source_name": self.wechat_name,
"doc_link": response.url,
"doc_source": "liuli_wechat",
"doc_source_account_nick": wechat_item.doc_source_account_nick,
"doc_source_account_intro": wechat_item.doc_source_account_intro,
"doc_content": html_to_text_h2t(html),
"doc_html": "",
},
}
await asyncio.coroutine(load_data_to_articlles)(input_data=wechat_data)
def run(collect_config: dict):
"""rss解析,并使用WeiXinSpider抓取rss条目,并持久化
Args:
collect_config (dict, optional): 采集器配置
"""
feeds_dict: dict = collect_config.get("feeds_dict")
feeds_name: list = list(feeds_dict)
delta_time = collect_config.get("delta_time", 3)
WeiXinSpider.request_config = {
"RETRIES": 3,
"DELAY": delta_time,
"TIMEOUT": 5,
}
for name in feeds_name:
WeiXinSpider.wechat_name = name
LOGGER.info(f"rss源 {name}: {feeds_dict[name]}")
fd = feedparser.parse(feeds_dict[name])
urls = []
for entry in fd.entries:
LOGGER.info(entry.link)
urls.append(entry.link)
WeiXinSpider.start_urls = urls
WeiXinSpider.start(middleware=ua_middleware)
|
ai/models/detectron2/_functional/checkpointer.py | MattSkiff/aerial_wildlife_detection | 166 | 12733699 | <filename>ai/models/detectron2/_functional/checkpointer.py
'''
Subclass of Detectron2's Checkpointer, able to load
model states from an in-memory Python dict according
to states as saved in AIDE.
2020-21 <NAME>
'''
from detectron2.checkpoint import DetectionCheckpointer
from typing import List, Optional
class DetectionCheckpointerInMem(DetectionCheckpointer):
def loadFromObject(self, stateDict: dict, checkpointables: Optional[List[str]] = None) -> object:
'''
Customized routine that loads a model state dict
from an object, rather than a file path.
Most of the remaining code is just copied from
https://detectron2.readthedocs.io/_modules/fvcore/common/checkpoint.html#Checkpointer.load
'''
if stateDict is None or 'model' not in stateDict:
# nothing to load; return
return {}
incompatible = self._load_model(stateDict)
if (
incompatible is not None
): # handle some existing subclasses that returns None
self._log_incompatible_keys(incompatible)
for key in self.checkpointables if checkpointables is None else checkpointables:
if key in stateDict: # pyre-ignore
self.logger.info("Loading {} from state dict".format(key))
obj = self.checkpointables[key]
obj.load_state_dict(stateDict.pop(key)) # pyre-ignore
# return any further checkpoint data
return stateDict |
util/security/crypto.py | giuseppe/quay | 2,027 | 12733755 | <filename>util/security/crypto.py
import base64
from cryptography.fernet import Fernet, InvalidToken
def encrypt_string(string, key):
"""
Encrypts a string with the specified key.
The key must be 32 raw bytes.
"""
f = Fernet(key)
# Fernet() works only on byte objects. Convert the string to bytes.
unencrypted_bytes = string.encode()
encrypted_bytes = f.encrypt(unencrypted_bytes)
# Fernet() returns a byte object. Convert it to a string before returning.
encrypted_string = encrypted_bytes.decode()
return encrypted_string
def decrypt_string(string, key, ttl=None):
"""
Decrypts an encrypted string with the specified key.
The key must be 32 raw bytes.
"""
f = Fernet(key)
# Fernet() works only on byte objects. Convert the string to bytes before decrypting.
encrypted_bytes = string.encode() # str -> bytes
try:
decrypted_bytes = f.decrypt(encrypted_bytes, ttl=ttl)
except InvalidToken:
"""
From the the Cryptography's library documentation:
If the token is in any way invalid, this exception is raised.
A token may be invalid for a number of reasons: it is older than the
ttl, it is malformed, or it does not have a valid signature.
"""
return None # TODO(kmullins): Shall we log this case? Is it expected?
decrypted_string = decrypted_bytes.decode() # bytes -> str
return decrypted_string
|
tools/traffic_annotation/scripts/traffic_annotation_auditor_tests.py | zipated/src | 2,151 | 12733763 | <reponame>zipated/src<filename>tools/traffic_annotation/scripts/traffic_annotation_auditor_tests.py
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs tests to ensure annotation tests are working as expected.
"""
import os
import argparse
import sys
import tempfile
from annotation_tools import NetworkTrafficAnnotationTools
# If this test starts failing, please set TEST_IS_ENABLED to "False" and file a
# bug to get this reenabled, and cc the people listed in
# //tools/traffic_annotation/OWNERS.
TEST_IS_ENABLED = True
class TrafficAnnotationTestsChecker():
def __init__(self, build_path=None):
"""Initializes a TrafficAnnotationTestsChecker object.
Args:
build_path: str Absolute or relative path to a fully compiled build
directory.
"""
self.tools = NetworkTrafficAnnotationTools(build_path)
def RunAllTests(self):
"""Runs all tests and returns the result."""
return self.CheckAuditorResults() and self.CheckOutputExpectations()
def CheckAuditorResults(self):
"""Runs auditor using different configurations, expecting to run error free,
and having equal results in the exported TSV file in all cases. The TSV file
provides a summary of all annotations and their content.
Returns:
bool True if all results are as expected.
"""
configs = [
["--test-only", "--error-resilient"], # Similar to trybot.
["--test-only"], # Failing on any runtime error.
["--test-only", "--no-filtering"] # Not using heuristic filtering.
]
last_result = None
for config in configs:
result = self._RunTest(config)
if not result:
print("No output for config: %s" % config)
return False
if last_result and last_result != result:
print("Unexpected different results for config: %s" % config)
return False
last_result = result
return True
def CheckOutputExpectations(self):
# TODO(https://crbug.com/690323): Add tests to check for an expected minimum
# number of items for each type of pattern that auditor extracts. E.g., we
# should have many annotations of each type (complete, partial, ...),
# functions that need annotations, direct assignment to mutable annotations,
# etc.
return True
def _RunTest(self, args):
"""Runs the auditor test with given |args|, and returns the extracted
annotations.
Args:
args: list of str Arguments to be passed to auditor.
Returns:
str Content of annotations.tsv file if successful, otherwise None.
"""
print("Running auditor using config: %s" % args)
temp_file = tempfile.NamedTemporaryFile()
temp_filename = temp_file.name
temp_file.close()
_, stderr_text, return_code = self.tools.RunAuditor(
args + ["--annotations-file=%s" % temp_filename])
if os.path.exists(temp_filename):
annotations = None if (return_code or stderr_text) \
else open(temp_filename).read()
os.remove(temp_filename)
else:
annotations = None
if annotations:
print("Test PASSED.")
else:
print("Test FAILED.\n%s" % stderr_text)
return annotations
def main():
if not TEST_IS_ENABLED:
return 0
parser = argparse.ArgumentParser(
description="Traffic Annotation Tests checker.")
parser.add_argument(
'--build-path',
help='Specifies a compiled build directory, e.g. out/Debug. If not '
'specified, the script tries to guess it. Will not proceed if not '
'found.')
args = parser.parse_args()
checker = TrafficAnnotationTestsChecker(args.build_path)
return 0 if checker.RunAllTests() else 1
if '__main__' == __name__:
sys.exit(main()) |
setup.py | bcicen/docker-replay | 191 | 12733775 | from setuptools import setup
exec(open('docker_replay/version.py').read())
setup(name='docker-replay',
version=version,
packages=['docker_replay'],
description='Generate docker run commands from running containers',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/bcicen/docker-replay',
install_requires=['docker>=2.4.2'],
license='http://opensource.org/licenses/MIT',
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License ',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
),
keywords='docker docker-py devops',
entry_points = {
'console_scripts' : ['docker-replay = docker_replay:main']
}
)
|
menpo_functions.py | konatasick/face-of-art | 220 | 12733777 | <gh_stars>100-1000
import os
from scipy.io import loadmat
from menpo.shape.pointcloud import PointCloud
from menpo.transform import ThinPlateSplines
import menpo.transform as mt
import menpo.io as mio
from glob import glob
from deformation_functions import *
# landmark indices by facial feature
jaw_indices = np.arange(0, 17)
lbrow_indices = np.arange(17, 22)
rbrow_indices = np.arange(22, 27)
upper_nose_indices = np.arange(27, 31)
lower_nose_indices = np.arange(31, 36)
leye_indices = np.arange(36, 42)
reye_indices = np.arange(42, 48)
outer_mouth_indices = np.arange(48, 60)
inner_mouth_indices = np.arange(60, 68)
# flipped landmark indices
mirrored_parts_68 = np.hstack([
jaw_indices[::-1], rbrow_indices[::-1], lbrow_indices[::-1],
upper_nose_indices, lower_nose_indices[::-1],
np.roll(reye_indices[::-1], 4), np.roll(leye_indices[::-1], 4),
np.roll(outer_mouth_indices[::-1], 7),
np.roll(inner_mouth_indices[::-1], 5)
])
def load_bb_files(bb_file_dirs):
"""load bounding box mat file for challenging, common, full & training datasets"""
bb_files_dict = {}
for bb_file in bb_file_dirs:
bb_mat = loadmat(bb_file)['bounding_boxes']
num_imgs = np.max(bb_mat.shape)
for i in range(num_imgs):
name = bb_mat[0][i][0][0][0][0]
bb_init = bb_mat[0][i][0][0][1] - 1 # matlab indicies
bb_gt = bb_mat[0][i][0][0][2] - 1 # matlab indicies
if str(name) in bb_files_dict.keys():
print (str(name) + ' already exists')
else:
bb_files_dict[str(name)] = (bb_init, bb_gt)
return bb_files_dict
def load_bb_dictionary(bb_dir, mode, test_data='full'):
"""create bounding box dictionary of input dataset: train/common/full/challenging"""
if mode == 'TRAIN':
bb_dirs = \
['bounding_boxes_afw.mat', 'bounding_boxes_helen_trainset.mat', 'bounding_boxes_lfpw_trainset.mat']
else:
if test_data == 'common':
bb_dirs = \
['bounding_boxes_helen_testset.mat', 'bounding_boxes_lfpw_testset.mat']
elif test_data == 'challenging':
bb_dirs = ['bounding_boxes_ibug.mat']
elif test_data == 'full':
bb_dirs = \
['bounding_boxes_ibug.mat', 'bounding_boxes_helen_testset.mat', 'bounding_boxes_lfpw_testset.mat']
elif test_data == 'training':
bb_dirs = \
['bounding_boxes_afw.mat', 'bounding_boxes_helen_trainset.mat', 'bounding_boxes_lfpw_trainset.mat']
else:
bb_dirs = None
if mode == 'TEST' and test_data not in ['full', 'challenging', 'common', 'training']:
bb_files_dict = None
else:
bb_dirs = [os.path.join(bb_dir, dataset) for dataset in bb_dirs]
bb_files_dict = load_bb_files(bb_dirs)
return bb_files_dict
def center_margin_bb(bb, img_bounds, margin=0.25):
"""create new bounding box with input margin"""
bb_size = ([bb[0, 2] - bb[0, 0], bb[0, 3] - bb[0, 1]])
margins = (np.max(bb_size) * (1 + margin) - bb_size) / 2
bb_new = np.zeros_like(bb)
bb_new[0, 0] = np.maximum(bb[0, 0] - margins[0], 0)
bb_new[0, 2] = np.minimum(bb[0, 2] + margins[0], img_bounds[1])
bb_new[0, 1] = np.maximum(bb[0, 1] - margins[1], 0)
bb_new[0, 3] = np.minimum(bb[0, 3] + margins[1], img_bounds[0])
return bb_new
def crop_to_face_image(img, bb_dictionary=None, gt=True, margin=0.25, image_size=256, normalize=True,
return_transform=False):
"""crop face image using bounding box dictionary, or GT landmarks"""
name = img.path.name
img_bounds = img.bounds()[1]
# if there is no bounding-box dict and GT landmarks are available, use it to determine the bounding box
if bb_dictionary is None and img.has_landmarks:
grp_name = img.landmarks.group_labels[0]
bb_menpo = img.landmarks[grp_name].bounding_box().points
bb = np.array([[bb_menpo[0, 1], bb_menpo[0, 0], bb_menpo[2, 1], bb_menpo[2, 0]]])
elif bb_dictionary is not None:
if gt:
bb = bb_dictionary[name][1] # ground truth
else:
bb = bb_dictionary[name][0] # init from face detector
else:
bb = None
if bb is not None:
# add margin to bounding box
bb = center_margin_bb(bb, img_bounds, margin=margin)
bb_pointcloud = PointCloud(np.array([[bb[0, 1], bb[0, 0]],
[bb[0, 3], bb[0, 0]],
[bb[0, 3], bb[0, 2]],
[bb[0, 1], bb[0, 2]]]))
if return_transform:
face_crop, bb_transform = img.crop_to_pointcloud(bb_pointcloud, return_transform=True)
else:
face_crop = img.crop_to_pointcloud(bb_pointcloud)
else:
# if there is no bounding box/gt landmarks, use entire image
face_crop = img.copy()
bb_transform = None
# if face crop is not a square - pad borders with mean pixel value
h, w = face_crop.shape
diff = h - w
if diff < 0:
face_crop.pixels = np.pad(face_crop.pixels, ((0, 0), (0, -1 * diff), (0, 0)), 'mean')
elif diff > 0:
face_crop.pixels = np.pad(face_crop.pixels, ((0, 0), (0, 0), (0, diff)), 'mean')
if return_transform:
face_crop, rescale_transform = face_crop.resize([image_size, image_size], return_transform=True)
if bb_transform is None:
transform_chain = rescale_transform
else:
transform_chain = mt.TransformChain(transforms=(rescale_transform, bb_transform))
else:
face_crop = face_crop.resize([image_size, image_size])
if face_crop.n_channels == 4:
face_crop.pixels = face_crop.pixels[:3, :, :]
if normalize:
face_crop.pixels = face_crop.rescale_pixels(0., 1.).pixels
if return_transform:
return face_crop, transform_chain
else:
return face_crop
def augment_face_image(img, image_size=256, crop_size=248, angle_range=30, flip=True):
"""basic image augmentation: random crop, rotation and horizontal flip"""
# taken from MDM: https://github.com/trigeorgis/mdm
def mirror_landmarks_68(lms, im_size):
return PointCloud(abs(np.array([0, im_size[1]]) - lms.as_vector(
).reshape(-1, 2))[mirrored_parts_68])
# taken from MDM: https://github.com/trigeorgis/mdm
def mirror_image(im):
im = im.copy()
im.pixels = im.pixels[..., ::-1].copy()
for group in im.landmarks:
lms = im.landmarks[group]
if lms.points.shape[0] == 68:
im.landmarks[group] = mirror_landmarks_68(lms, im.shape)
return im
flip_rand = np.random.random() > 0.5
# rot_rand = np.random.random() > 0.5
# crop_rand = np.random.random() > 0.5
rot_rand = True # like ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
crop_rand = True # like ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
if crop_rand:
lim = image_size - crop_size
min_crop_inds = np.random.randint(0, lim, 2)
max_crop_inds = min_crop_inds + crop_size
img = img.crop(min_crop_inds, max_crop_inds)
if flip and flip_rand:
img = mirror_image(img)
if rot_rand:
rot_angle = 2 * angle_range * np.random.random_sample() - angle_range
img = img.rotate_ccw_about_centre(rot_angle)
img = img.resize([image_size, image_size])
return img
def augment_menpo_img_ns(img, img_dir_ns, p_ns=0.):
"""texture style image augmentation using stylized copies in *img_dir_ns*"""
img = img.copy()
if p_ns > 0.5:
ns_augs = glob(os.path.join(img_dir_ns, img.path.name.split('.')[0] + '_ns*'))
num_augs = len(ns_augs)
if num_augs > 0:
ns_ind = np.random.randint(0, num_augs)
ns_aug = mio.import_image(ns_augs[ns_ind])
ns_pixels = ns_aug.pixels
img.pixels = ns_pixels
return img
def augment_menpo_img_geom(img, p_geom=0.):
"""geometric style image augmentation using random face deformations"""
img = img.copy()
if p_geom > 0.5:
grp_name = img.landmarks.group_labels[0]
lms_geom_warp = deform_face_geometric_style(img.landmarks[grp_name].points.copy(), p_scale=p_geom, p_shift=p_geom)
img = warp_face_image_tps(img, PointCloud(lms_geom_warp), grp_name)
return img
def warp_face_image_tps(img, new_shape, lms_grp_name='PTS', warp_mode='constant'):
"""warp image to new landmarks using TPS interpolation"""
tps = ThinPlateSplines(new_shape, img.landmarks[lms_grp_name])
try:
img_warp = img.warp_to_shape(img.shape, tps, mode=warp_mode)
img_warp.landmarks[lms_grp_name] = new_shape
return img_warp
except np.linalg.linalg.LinAlgError as err:
print ('Error:'+str(err)+'\nUsing original landmarks for:\n'+str(img.path))
return img
def load_menpo_image_list(
img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25,
bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0,
augment_geom=False, p_geom=0, verbose=False, return_transform=False):
"""load images from image dir to create menpo-type image list"""
def crop_to_face_image_gt(img):
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size,
return_transform=return_transform)
def crop_to_face_image_init(img):
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size,
return_transform=return_transform)
def crop_to_face_image_test(img):
return crop_to_face_image(img, bb_dictionary=None, margin=margin, image_size=image_size,
return_transform=return_transform)
def augment_menpo_img_ns_rand(img):
return augment_menpo_img_ns(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture)[0])
def augment_menpo_img_geom_rand(img):
return augment_menpo_img_geom(img, p_geom=1. * (np.random.rand() < p_geom)[0])
if mode is 'TRAIN':
if train_crop_dir is None:
img_set_dir = os.path.join(img_dir, 'training')
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
if bb_type is 'gt':
out_image_list = out_image_list.map(crop_to_face_image_gt)
elif bb_type is 'init':
out_image_list = out_image_list.map(crop_to_face_image_init)
else:
img_set_dir = os.path.join(img_dir, train_crop_dir)
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
# perform image augmentation
if augment_texture and p_texture > 0:
out_image_list = out_image_list.map(augment_menpo_img_ns_rand)
if augment_geom and p_geom > 0:
out_image_list = out_image_list.map(augment_menpo_img_geom_rand)
if augment_basic:
out_image_list = out_image_list.map(augment_face_image)
else: # if mode is 'TEST', load test data
if test_data in ['full', 'challenging', 'common', 'training', 'test']:
img_set_dir = os.path.join(img_dir, test_data)
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
if bb_type is 'gt':
out_image_list = out_image_list.map(crop_to_face_image_gt)
elif bb_type is 'init':
out_image_list = out_image_list.map(crop_to_face_image_init)
else:
img_set_dir = os.path.join(img_dir, test_data)
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
out_image_list = out_image_list.map(crop_to_face_image_test)
return out_image_list
|
heraspy/events.py | new-coast-ventures/hc-ai-hera | 368 | 12733802 | <reponame>new-coast-ventures/hc-ai-hera<gh_stars>100-1000
TRAIN_BEGIN = 'TRAIN_BEGIN'
TRAIN_END = 'TRAIN_END'
EPOCH_BEGIN = 'EPOCH_BEGIN'
EPOCH_END = 'EPOCH_END'
BATCH_BEGIN = 'BATCH_BEGIN'
BATCH_END = 'BATCH_END'
|
tests/test_context.py | Hirni-Meshram5/moderngl | 916 | 12733819 | from unittest import TestCase
import moderngl
import numpy
import platform
class ContextTests(TestCase):
def test_create_destroy(self):
"""Create and destroy a context"""
for _ in range(25):
ctx = moderngl.create_context(standalone=True)
ctx.release()
def test_context_switch(self):
"""Ensure context switching is working"""
ctx1 = moderngl.create_context(standalone=True)
ctx2 = moderngl.create_context(standalone=True)
with ctx1 as ctx:
buffer1 = ctx.buffer(reserve=1024)
with ctx2 as ctx:
buffer2 = ctx.buffer(reserve=1024)
self.assertEqual(buffer1.glo, buffer2.glo)
ctx1.release()
ctx2.release()
def test_exit(self):
"""Ensure the previous context was activated on exit"""
ctx1 = moderngl.create_context(standalone=True)
ctx2 = moderngl.create_context(standalone=True)
with ctx1 as ctx:
ctx.buffer(reserve=1024)
# Will error out if no context is active "moderngl.error.Error: cannot create buffer"
ctx1.buffer(reserve=1024)
ctx1.release()
ctx2.release()
def test_share(self):
"""Create resources with shared context"""
if platform.system().lower() in ["darwin", "linux"]:
self.skipTest('Context sharing not supported on darwin')
data1 = numpy.array([1, 2, 3, 4], dtype='u1')
data2 = numpy.array([4, 3, 2, 1], dtype='u1')
ctx1 = moderngl.create_context(standalone=True)
ctx2 = moderngl.create_context(standalone=True, share=True)
with ctx1 as ctx:
b1 = ctx.buffer(data=data1)
with ctx2 as ctx:
b2 = ctx.buffer(data=data2)
# Because the resources are shared the name should increment
self.assertEqual(b1.glo, 1)
self.assertEqual(b2.glo, 2)
# Ensure we can read the same buffer data in both contexts
with ctx1:
self.assertEqual(b1.read(), b'\x01\x02\x03\x04')
self.assertEqual(b2.read(), b'\x04\x03\x02\x01')
with ctx2:
self.assertEqual(b1.read(), b'\x01\x02\x03\x04')
self.assertEqual(b2.read(), b'\x04\x03\x02\x01')
ctx1.release()
ctx2.release()
def test_extensions(self):
ctx = moderngl.create_context(standalone=True)
# self.assertTrue("GL_ARB_vertex_array_object" in ctx.extensions)
# self.assertTrue("GL_ARB_transform_feedback2" in ctx.extensions)
# self.assertTrue("GL_ARB_shader_subroutine" in ctx.extensions)
self.assertIsInstance(ctx.extensions, set)
self.assertTrue(len(ctx.extensions) > 0)
ctx.release()
def test_attributes(self):
"""Ensure enums are present in the context instance"""
ctx = moderngl.create_context(standalone=True)
# Flags
self.assertIsInstance(ctx.NOTHING, int)
self.assertIsInstance(ctx.BLEND, int)
self.assertIsInstance(ctx.DEPTH_TEST, int)
self.assertIsInstance(ctx.CULL_FACE, int)
self.assertIsInstance(ctx.RASTERIZER_DISCARD, int)
self.assertIsInstance(ctx.PROGRAM_POINT_SIZE, int)
# Primitive modes
self.assertIsInstance(ctx.POINTS, int)
self.assertIsInstance(ctx.LINES, int)
self.assertIsInstance(ctx.LINE_LOOP, int)
self.assertIsInstance(ctx.LINE_STRIP, int)
self.assertIsInstance(ctx.TRIANGLES, int)
self.assertIsInstance(ctx.TRIANGLE_STRIP, int)
self.assertIsInstance(ctx.TRIANGLE_FAN, int)
self.assertIsInstance(ctx.LINES_ADJACENCY, int)
self.assertIsInstance(ctx.LINE_STRIP_ADJACENCY, int)
self.assertIsInstance(ctx.TRIANGLES_ADJACENCY, int)
self.assertIsInstance(ctx.TRIANGLE_STRIP_ADJACENCY, int)
self.assertIsInstance(ctx.PATCHES, int)
# Texture filters
self.assertIsInstance(ctx.LINEAR, int)
self.assertIsInstance(ctx.NEAREST, int)
self.assertIsInstance(ctx.NEAREST_MIPMAP_NEAREST, int)
self.assertIsInstance(ctx.LINEAR_MIPMAP_LINEAR, int)
self.assertIsInstance(ctx.LINEAR_MIPMAP_NEAREST, int)
self.assertIsInstance(ctx.NEAREST_MIPMAP_LINEAR, int)
# Blend functions
self.assertIsInstance(ctx.ZERO, int)
self.assertIsInstance(ctx.ONE, int)
self.assertIsInstance(ctx.SRC_COLOR, int)
self.assertIsInstance(ctx.ONE_MINUS_SRC_COLOR, int)
self.assertIsInstance(ctx.SRC_ALPHA, int)
self.assertIsInstance(ctx.ONE_MINUS_SRC_ALPHA, int)
self.assertIsInstance(ctx.DST_ALPHA, int)
self.assertIsInstance(ctx.ONE_MINUS_DST_ALPHA, int)
self.assertIsInstance(ctx.DST_COLOR, int)
self.assertIsInstance(ctx.ONE_MINUS_DST_COLOR, int)
# Blend shortcuts
self.assertIsInstance(ctx.DEFAULT_BLENDING, tuple)
self.assertIsInstance(ctx.ADDITIVE_BLENDING, tuple)
self.assertIsInstance(ctx.PREMULTIPLIED_ALPHA, tuple)
# Blend equations
self.assertIsInstance(ctx.FUNC_ADD, int)
self.assertIsInstance(ctx.FUNC_SUBTRACT, int)
self.assertIsInstance(ctx.FUNC_REVERSE_SUBTRACT, int)
self.assertIsInstance(ctx.MIN, int)
self.assertIsInstance(ctx.MAX, int)
# Provoking vertex
self.assertIsInstance(ctx.FIRST_VERTEX_CONVENTION, int)
self.assertIsInstance(ctx.LAST_VERTEX_CONVENTION, int)
def test_enable_direct(self):
ctx = moderngl.create_context(standalone=True)
ctx.error # consume error during initialization
# We already support this, but it's a safe value
GL_PROGRAM_POINT_SIZE = 0x8642
ctx.enable_direct(GL_PROGRAM_POINT_SIZE)
self.assertEqual(ctx.error, "GL_NO_ERROR")
ctx.disable_direct(GL_PROGRAM_POINT_SIZE)
self.assertEqual(ctx.error, "GL_NO_ERROR")
|
scripts/artifacts/appGrouplisting.py | xperylabhub/iLEAPP | 325 | 12733834 | <filename>scripts/artifacts/appGrouplisting.py
import biplist
import pathlib
import plistlib
import sys
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, is_platform_windows
def get_appGrouplisting(files_found, report_folder, seeker):
data_list = []
for file_found in files_found:
file_found = str(file_found)
with open(file_found, "rb") as fp:
if sys.version_info >= (3, 9):
plist = plistlib.load(fp)
else:
plist = biplist.readPlist(fp)
bundleid = plist['MCMMetadataIdentifier']
p = pathlib.Path(file_found)
appgroupid = p.parent.name
fileloc = str(p.parents[1])
typedir = str(p.parents[1].name)
data_list.append((bundleid, typedir, appgroupid, fileloc))
if len(data_list) > 0:
filelocdesc = 'Path column in the report'
description = 'List can included once installed but not present apps. Each file is named .com.apple.mobile_container_manager.metadata.plist'
report = ArtifactHtmlReport('Bundle ID by AppGroup & PluginKit IDs')
report.start_artifact_report(report_folder, 'Bundle ID by AppGroup & PluginKit IDs', description)
report.add_script()
data_headers = ('Bundle ID','Type','Directory GUID','Path')
report.write_artifact_data_table(data_headers, data_list, filelocdesc)
report.end_artifact_report()
tsvname = 'Bundle ID - AppGroup ID - PluginKit ID'
tsv(report_folder, data_headers, data_list, tsvname)
else:
logfunc('No data on Bundle ID - AppGroup ID - PluginKit ID')
|
src/python/loadGraph.py | neoremind/luceneutil | 164 | 12733837 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import re
import cPickle
import responseTimeGraph
logPoints = ((50, 2),
(75, 4),
(90, 10),
(95, 20),
(97.5, 40),
(99, 100),
(99.9, 1000),
(99.99, 10000),
(99.999, 100000),
(99.9999, 1000000),
(99.99999, 10000000))
def graph(rowPoint, logsDir, warmupSec, names, fileName, maxQPS=None):
allQPS = set()
points = {}
for name in names:
reQPS = re.compile(r'^%s\.qps(\d+)$' % re.escape(name))
for f in os.listdir(logsDir):
m = reQPS.match(f)
resultsFile = '%s/%s/results.pk' % (logsDir, f)
if not os.path.exists(resultsFile):
resultsFile = '%s/%s/results.bin' % (logsDir, f)
if m is not None and os.path.exists(resultsFile):
qps = int(m.group(1))
if maxQPS is not None and qps > maxQPS:
print 'SKIPPING %s qps' % qps
continue
allQPS.add(qps)
results = responseTimeGraph.loadResults(resultsFile)
# Discard first warmupSec seconds:
upto = 0
while results[upto][0] < warmupSec:
upto += 1
results = results[upto:]
responseTimes = [x[2] for x in results]
responseTimes.sort()
if rowPoint == 'min':
t = responseTimes[0]
elif rowPoint == 'max':
t = responseTimes[-1]
else:
pct, minCount = logPoints[rowPoint]
if len(responseTimes) < minCount:
raise RuntimeError('%s doesn\'t have enough enough data' % name)
idx = int(((100.0-pct)/100.0)*len(responseTimes))
# TODO: should we take linear blend of the two points...? Else
# we have a sparseness problem...
t = responseTimes[-idx-1]
points[(name, qps)] = t
if sla is not None and t <= sla:
passesSLA.add(name)
qpsList = list(allQPS)
qpsList.sort()
cleanName = {'OracleCMS': 'CMS',
'OracleCMSMMap': 'CMS + MMap',
'OracleCMSMMapDir': 'CMS + MMap'}
print 'names: %s; cleaned %s' % (names, (', '.join("'%s'" % cleanName.get(x, x) for x in names)))
l = []
w = l.append
w("['QPS', %s],\n" % (', '.join("'%s'" % cleanName.get(x, x) for x in names)))
for qps in qpsList:
row = ['%d' % qps]
for name in names:
try:
s = '%.1f' % points[(name, qps)]
except KeyError:
s = ''
row.append(s)
w('[%s],\n' % ','.join(row))
if rowPoint == 'max':
p = 'Max'
elif rowPoint == 'min':
p = 'Min'
else:
p = '%g%%' % logPoints[rowPoint][0]
html = graphHeader + ''.join(l) + graphFooter % p
open(fileName, 'wb').write(html)
print ' saved %s' % fileName
graphHeader = '''<html>
<head>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["corechart"]});
google.setOnLoadCallback(drawChart);
function drawChart() {
var data = google.visualization.arrayToDataTable([
'''
graphFooter = '''
]);
var options = {
title: '%s Response Time vs QPS',
hAxis: {'title': 'QPS'},
vAxis: {'title': 'Response Time (msec)'},
};
var chart = new google.visualization.LineChart(document.getElementById('chart_div'));
chart.draw(data, options);
}
</script>
</head>
<body>
<div id="chart_div" style="width: 1200px; height: 600px;"></div>
</body>
</html>
'''
if __name__ == '__main__':
logsDir = sys.argv[1]
warmupSec = float(sys.argv[2])
reportDir = sys.argv[3]
for idx in xrange(len(logPoints)):
graph(idx, logsDir, warmupSec, sys.argv[4:], '%s/load%spct.html' % (reportDir, logPoints[idx][0]))
graph('max', logsDir, warmupSec, sys.argv[4:], '%s/loadmax.html' % reportDir)
|
x2paddle/project_convertor/pytorch/torch2paddle/__init__.py | usertianqin/X2Paddle | 559 | 12733853 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .io import *
from .layer import *
from .tensor import *
from .optimizer import *
from .nn import *
from .nn_utils import *
from .nn_functional import *
from .nn_init import *
from .varbase import *
from .vision_transforms import *
from .device import *
from .vision_utils import *
from .vision_datasets import *
from .ops import *
from .learning_rate_scheduler import *
from .parambase import *
|
amazon-ml-hyperparameter-optimization/fold.py | meghanaravikumar/sigopt-examples | 213 | 12733871 | #!/usr/bin/env python
# Amazon Machine Learning Samples
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
import base64
import boto3
import json
import os
import logging
import config
logger = logging.getLogger(config.APP_NAME)
class Fold(object):
"""
This class represents a 'Fold' in K-fold Cross-validation. A fold
understands the ordinal number in its sequence of Fold objects,
so that it is able to draw the splitting range, and the splitting
complement flag for DataRearrangement string. The instance of a
fold can use Amazon ML's API to create an evaluation datasource,
and a training datasource.
"""
def __init__(self, data_spec=None, this_fold=None, kfolds=None):
"""
Construct this instance of Fold.
Args:
data_spec: the named tuple object that wraps dataset related
parameters.
this_fold: the integer number indicating the ordinal number
of this instance in its sequence of Fold objects.
kfolds: the integer number representing the number of folds.
"""
self.data_spec = data_spec
self.this_fold = this_fold
self.kfolds = kfolds
self.fold_ordinal = self.this_fold + 1 # fold_ordinal counting from 1
self.train_ds_id = None
self.train_ds_rearrange = None
self.eval_ds_id = None
self.eval_ds_rearrange = None
def build(self):
"""
Builds the necessary entities on Amazon ML.
"""
self._ml = boto3.client('machinelearning', region_name='us-east-1')
self.create_datasources()
def cleanup(self):
self._ml.delete_data_source(DataSourceId=self.train_ds_id)
logger.info("Deleted Training Datasource " + self.train_ds_id)
self._ml.delete_data_source(DataSourceId=self.eval_ds_id)
logger.info("Deleted Evaluation Datasource " + self.eval_ds_id)
def __str__(self):
"""
Returns the string representing this fold object. The string
includes the IDs of entities newly created on Amazon ML.
"""
return """\n\
Fold {fold_ordinal} of {kfolds}:
- Training Datasource ID: {train_ds_id}
- Evaluation Datasource ID: {eval_ds_id}""".format(**self.__dict__)
def create_datasources(self):
"""
Creates datasource for model training and evaluation on Amazon ML.
"""
# create training datasource for this fold
self.train_ds_id = "ds-" + base64.b32encode(os.urandom(10)).decode(
"ascii")
self.train_ds_rearrange = self.build_rearrangement_str(
is_complement=True)
self.train_ds_name = self.build_datasource_name(
self.data_spec.name, self.train_ds_rearrange)
self._ml.create_data_source_from_s3(
DataSourceId=self.train_ds_id,
DataSourceName=self.train_ds_name,
DataSpec=dict(
DataLocationS3=self.data_spec.data_s3_url,
DataSchema=self.data_spec.schema,
DataRearrangement=self.train_ds_rearrange,
),
ComputeStatistics=True,
)
logger.info("Created Training Datasource " + self.train_ds_id)
# create evaluation datasource for this fold
self.eval_ds_id = "ds-" + base64.b32encode(os.urandom(10)).decode(
"ascii")
self.eval_ds_rearrange = self.build_rearrangement_str(
is_complement=False)
self.eval_ds_name = self.build_datasource_name(
self.data_spec.name, self.eval_ds_rearrange)
self._ml.create_data_source_from_s3(
DataSourceId=self.eval_ds_id,
DataSourceName=self.eval_ds_name,
DataSpec=dict(
DataLocationS3=self.data_spec.data_s3_url,
DataSchema=self.data_spec.schema,
DataRearrangement=self.eval_ds_rearrange,
),
ComputeStatistics=True,
)
logger.info("Created Evaluation Datasource " + self.eval_ds_id)
def build_rearrangement_str(self, is_complement):
"""
Returns the DataRearrangement string.
Args:
is_complement: the boolean flag to indicate whether the
datasource takes the given splitting range, or the
complement of it.
Returns:
a string of the DataRearrangement
"""
# Use integer division as rearrange API only support percentage
# in integer. Casting self.kfolds to integer for Python 2
# compatibility.
percent_begin = self.this_fold * (100 // int(self.kfolds))
percent_end = (self.this_fold + 1) * (100 // int(self.kfolds))
return json.dumps({
"splitting": {
"percentBegin": percent_begin,
"percentEnd": percent_end,
"complement": is_complement,
"strategy": config.STRATEGY,
"strategyParams": {
"randomSeed": config.RANDOM_STRATEGY_RANDOM_SEED
}
}
})
def build_datasource_name(self, name, rearrangement_str):
"""
Builds the name of datasource to create
Args:
name: the user-provided name of entities on Amazon ML
rearrangement_str: the rearrangement JSON string
Returns:
a string representing the name of datasource to create
"""
rearrangement = json.loads(rearrangement_str)
percent_begin = rearrangement["splitting"]["percentBegin"]
percent_end = rearrangement["splitting"]["percentEnd"]
is_complement = rearrangement["splitting"]["complement"]
return "{name} [percentBegin={pb}, percentEnd={pe}, complement={c}]"\
.format(name=name,
pb=percent_begin,
pe=percent_end,
c=is_complement)
|
Home Automation/Part 5/homeSecPi.py | NAzT/ESP8266 | 101 | 12733905 | <reponame>NAzT/ESP8266
#!/usr/bin/python
import sys
import os
import paho.mqtt.client as mqtt
import smtplib
import string
#var to track the current status of the system (armed or disarmed)
securityEnStatus = None
#enable/disable email notifications - disable when testing/enable when in normal use
sendEmail = False
#This is like the confirm topic for the security system - it notifies other nodes about the status of the system
statusTopic = "/house/secStatus"
#this is the control topic that this script listens to for messages telling it to arm or disarm
enableTopic = "/house/secEn"
def on_connect(mqttc, obj, flags, rc):
print("rc: "+str(rc))
def on_message(mqttc, obj, msg):
global securityEnStatus
global breachesSinceReset
#if the topic is the enable topic and its to enable the system (1)
#publish to the state topic that the system is armed and enable status to on
if(str(msg.topic) == enableTopic and int(msg.payload) == 1):
securityEnStatus = True
mqttc.publish(statusTopic, "armed_away")
print("Security Enabled")
#if the topic is the enable topic and its to disable the system (0)
#then set the enable status to off
#and publish to the state topic that were disabled and publish the number of breaches
elif(str(msg.topic) == enableTopic and int(msg.payload) == 0):
securityEnStatus = False
mqttc.publish(statusTopic, "disarmed")
print("Security Disabled")
#all other topics are detector nodes such as motion detectors and door sensors
#so as long as the security system is enabled (armed), if one of the detector nodes is triggered
#send out an alert
elif(securityEnStatus == True and int(msg.payload) == 1):
mqttc.publish(statusTopic, "triggered")
securityAlert(str(msg.topic))
def on_publish(mqttc, obj, mid):
print("mid: "+str(mid))
def on_subscribe(mqttc, obj, mid, granted_qos):
print("Subscribed: "+str(mid)+" "+str(granted_qos))
def on_log(mqttc, obj, level, string):
print(string)
def securityAlert(alertLoc):
global sendEmail
print("Breach at: " + alertLoc)
print("Sending Alert!\n")
if(sendEmail == True):
#to and from addresses
fromaddr = '-----YOUR EMAIL ADDRESS-----'
toaddrs = '-----EMAIL ADDRESS TO ALERT-----'
#form the email header+body
header = 'To:' + toaddrs + '\n' + 'From: ' + fromaddr + '\n' + 'Subject:Home Security Breach! \n'
msg = header + "\nBreach Location: " + alertLoc + "\n\n"
#login credentials
username = '-----YOUR EMAIL ADDRESS USERNAME-----'
password = '-----<PASSWORD>-----'
# send the email
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.ehlo()
server.login(username,password)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
mqttc = mqtt.Client()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
mqttc.connect("127.0.0.1", 1883, 60)
#Enable Monitor
mqttc.subscribe(enableTopic, 0)
#detector nodes
mqttc.subscribe("/house/door1", 0)
mqttc.loop_forever()
|
functions/setting/artificial_generation_setting.py | hsokooti/RegNet | 187 | 12733945 | <filename>functions/setting/artificial_generation_setting.py<gh_stars>100-1000
import copy
def load_deform_exp_setting(selected_deform_exp):
def_setting = dict()
if selected_deform_exp == '3D_max20_D14':
def_setting = dict()
def_setting['MaxDeform'] = 20 # The maximum amplitude of deformations
def_setting['DeformMethods'] = ['respiratory_motion', 'respiratory_motion', 'respiratory_motion', 'respiratory_motion',
'single_frequency', 'single_frequency', 'single_frequency', 'single_frequency', 'single_frequency',
'mixed_frequency', 'mixed_frequency', 'mixed_frequency', 'mixed_frequency',
'zero']
def_setting['UseLungMask'] = True # The peaks of synthetic deformation can only be inside the mask
def_setting['verbose_image'] = False # Detailed writing of images: writing the DVF of the nextFixedImage
def_setting['DVFNormalization'] = True
def_setting['MaskToZero'] = 'Torso'
def_setting['WriteIntermediateIntensityAugmentation'] = False
# stages
def_setting['DeleteStage1Images'] = True # After downsampling, delete all images in the original resolution.
# images
def_setting['Canny_LowerThreshold'] = 50.0
def_setting['Canny_UpperThreshold'] = 100.0
def_setting['Im_NoiseSigma'] = 10 # Sigma for adding noise after deformation
def_setting['Im_NoiseAverage'] = 10 # Mean for adding noise after deformation
# occlusion
def_setting['Occlusion'] = True
def_setting['Occlusion_NumberOfEllipse'] = 10
def_setting['Occlusion_IntensityRange'] = [-800, -780]
def_setting['Occlusion_Max_a'] = 15
def_setting['Occlusion_Max_b'] = 15
def_setting['Occlusion_Max_c'] = 15
# NextIm
def_setting['NextIm_SigmaN'] = 2 # The intensity noise is less than normal Defomred Images in order to prevent accumulating noise.
# Since we are going to generate several deformed images on the NextIm
def_setting['NextIm_MaxDeform'] = 15
# Single Frequency
def_setting['SingleFrequency_BSplineGridSpacing'] = [[80, 80, 80], [70, 70, 70], [60, 60, 60], [50, 50, 50], [45, 45, 45]] # in mm approximately
def_setting['SingleFrequency_SetGridBorderToZero'] = [[1, 1, 1], [1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2]]
def_setting['SingleFrequency_GridSmoothingSigma'] = [[0.8, 0.8, 0.8], [0.8, 0.8, 0.8], [0.8, 0.8, 0.8], [0.8, 0.8, 0.8], [0.8, 0.8, 0.8]] # in voxel not in mm
def_setting['SingleFrequency_BackgroundSmoothingSigma'] = [8, 8, 8, 8, 8] # in voxel not in mm
def_setting['SingleFrequency_MaxDeformRatio'] = [1, 1, 1, 1, 1]
# Mixed Frequency
def_setting['MixedFrequency_BlockRadius'] = 20 # in voxel not in mm
def_setting['MixedFrequency_Np'] = [200, 150, 150, 150]
def_setting['MixedFrequency_BSplineGridSpacing'] = [[80, 80, 80], [60, 60, 60], [50, 50, 50], [45, 45, 60]]
def_setting['MixedFrequency_SigmaRange'] = [[10, 15], [10, 15], [10, 15], [10, 15]] # in voxel not in mm
def_setting['MixedFrequency_GridSmoothingSigma'] = [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]] # in voxel not in mm
def_setting['MixedFrequency_SetGridBorderToZero'] = [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]] # in voxel not in mm
def_setting['MixedFrequency_MaxDeformRatio'] = [1, 1, 1, 1]
# Respiratory Motion
def_setting['RespiratoryMotion_t0'] = [30, 30, 30, 30, 30] # in mm
def_setting['RespiratoryMotion_s0'] = [0.12, 0.12, 0.12, 0.12, 0.12]
def_setting['RespiratoryMotion_BSplineGridSpacing'] = [[80, 80, 80], [70, 70, 70], [60, 60, 60], [50, 50, 50], [45, 45, 45]] # in mm approximately
def_setting['RespiratoryMotion_SetGridBorderToZero'] = [[1, 1, 1], [1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2]]
def_setting['RespiratoryMotion_GridSmoothingSigma'] = [[0.8, 0.8, 0.8], [0.8, 0.8, 0.8], [0.8, 0.8, 0.8], [0.8, 0.8, 0.8], [0.8, 0.8, 0.8]] # in voxel not in mm
def_setting['RespiratoryMotion_BackgroundSmoothingSigma'] = [8, 8, 8, 8, 8] # in voxel not in mm
def_setting['RespiratoryMotion_MaxDeformRatio'] = [1, 1, 1, 1, 1]
def_setting['RespiratoryMotion_SingleFrequency_MaxDeformRatio'] = [0.5, 0.5, 0.5, 0.5, 0.5]
# translation
def_setting['Translation_MaxDeformRatio'] = [1, 1, 1, 1]
# translation_old
def_setting['BsplineGridSpacing_translation'] = [[40, 40, 40], [40, 40, 40], [40, 40, 40], [40, 40, 40]]
def_setting['setGridBorderToZero_translation'] = [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]
elif selected_deform_exp == '3D_max20_D14_K':
deform_exp_setting_temp = load_deform_exp_setting('3D_max20_D14')
def_setting = copy.deepcopy(deform_exp_setting_temp)
elif selected_deform_exp in ['3D_max7_D14_K']:
deform_exp_setting_temp = load_deform_exp_setting('3D_max20_D14')
def_setting = copy.deepcopy(deform_exp_setting_temp)
def_setting['MaxDeform'] = 7
def_setting['SingleFrequency_BSplineGridSpacing'] = [[50, 50, 50], [45, 45, 45], [35, 35, 35], [25, 25, 25], [20, 20, 20]]
def_setting['SingleFrequency_MaxDeformRatio'] = [0.5, 1, 1, 1, 1]
def_setting['MixedFrequency_BSplineGridSpacing'] = [[50, 50, 50], [40, 40, 40], [25, 25, 35], [20, 20, 30]]
def_setting['MixedFrequency_SigmaRange'] = [[5, 10], [5, 10], [5, 10], [5, 10]]
def_setting['MixedFrequency_MaxDeformRatio'] = [1, 1, 1, 1]
def_setting['RespiratoryMotion_t0'] = [15, 15, 15, 15, 15] # in mm
def_setting['RespiratoryMotion_s0'] = [0.12, 0.12, 0.12, 0.12, 0.12]
def_setting['RespiratoryMotion_BSplineGridSpacing'] = [[50, 50, 50], [45, 45, 45], [35, 35, 35], [25, 25, 25], [20, 20, 20]]
def_setting['RespiratoryMotion_MaxDeformRatio'] = [1, 1, 1, 1, 1]
def_setting['RespiratoryMotion_SingleFrequency_MaxDeformRatio'] = [0.5, 0.5, 0.5, 0.5, 0.5]
elif selected_deform_exp in ['3D_max15_D14_K']:
deform_exp_setting_temp = load_deform_exp_setting('3D_max20_D14')
def_setting = copy.deepcopy(deform_exp_setting_temp)
def_setting['MaxDeform'] = 15
def_setting['SingleFrequency_BSplineGridSpacing'] = [[60, 60, 60], [50, 50, 50], [45, 45, 45], [40, 40, 40], [35, 35, 35]]
def_setting['SingleFrequency_MaxDeformRatio'] = [0.5, 1, 1, 1, 1]
def_setting['MixedFrequency_BSplineGridSpacing'] = [[60, 60, 60], [50, 50, 40], [40, 40, 80], [35, 35, 80]]
def_setting['MixedFrequency_SigmaRange'] = [[7, 12], [7, 12], [7, 12], [7, 12]]
def_setting['MixedFrequency_MaxDeformRatio'] = [1, 1, 1, 1]
def_setting['RespiratoryMotion_t0'] = [22, 22, 22, 22, 22] # in mm
def_setting['RespiratoryMotion_s0'] = [0.12, 0.12, 0.12, 0.12, 0.12]
def_setting['RespiratoryMotion_BSplineGridSpacing'] = [[60, 60, 60], [50, 50, 50], [45, 45, 45], [40, 40, 40], [35, 35, 35]]
def_setting['RespiratoryMotion_MaxDeformRatio'] = [1, 1, 1, 1, 1]
def_setting['RespiratoryMotion_SingleFrequency_MaxDeformRatio'] = [0.5, 0.5, 0.5, 0.5, 0.5]
else:
print('warning: -------- selected_deform_exp not found')
return def_setting
|
codigo/Live54/exemplo_1.py | cassiasamp/live-de-python | 572 | 12733952 | <gh_stars>100-1000
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ProcessPoolExecutor
from requests import get
from functools import partial
from os import getpid
l_urls = ['https://google.com'] * 6
# executor = ThreadPoolExecutor(max_workers=3)
# result = executor.map(get, l_urls)
# print(result)
print('Threads')
with ThreadPoolExecutor(max_workers=3) as executor:
"""
executor.__enter__ -> self (ThreadPoolExecutor)
executor.__exit__ -> executor.shutdown(wait=True)
"""
result = executor.map(get, l_urls)
print(result)
print(list(result))
print('Processo')
with ProcessPoolExecutor() as executor:
result = executor.map(partial(print, getpid()))
print(result)
print(list(result))
|
mmflow/models/flow_estimators/liteflownet.py | hologerry/mmflow | 481 | 12733957 | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Sequence, Tuple
from numpy import ndarray
from torch import Tensor
from ..builder import FLOW_ESTIMATORS
from .pwcnet import PWCNet
@FLOW_ESTIMATORS.register_module()
class LiteFlowNet(PWCNet):
"""LiteFlowNet model."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def extract_feat(
self, imgs: Tensor
) -> Tuple[Tensor, Tensor, Dict[str, Tensor], Dict[str, Tensor]]:
"""Extract features from images.
Args:
imgs (Tensor): The concatenated input images.
Returns:
Tuple[Tensor, Tensor, Dict[str, Tensor], Dict[str, Tensor]]: The
first input image, the second input image, the feature pyramid
of the first input image and the feature pyramid of secode
input image.
"""
in_channels = self.encoder.in_channels
# take from github.com:sniklaus/pytorch-liteflownet.git
imgs_mean = [0.411618, 0.434631, 0.454253]
for ich in range(in_channels):
imgs[:, ich, :, :] = imgs[:, ich, :, :] - imgs_mean[ich]
imgs[:, ich + in_channels, :, :] = (
imgs[:, ich + in_channels, :, :] - imgs_mean[ich])
img1 = imgs[:, :in_channels, ...]
img2 = imgs[:, in_channels:, ...]
return img1, img2, self.encoder(img1), self.encoder(img2)
def forward_train(
self,
imgs: Tensor,
flow_gt: Tensor,
valid: Optional[Tensor] = None,
img_metas: Optional[Sequence[dict]] = None) -> Dict[str, Tensor]:
"""Forward function for LiteFlowNet when model training.
Args:
imgs (Tensor): The concatenated input images.
flow_gt (Tensor): The ground truth of optical flow.
Defaults to None.
valid (Tensor, optional): The valid mask. Defaults to None.
img_metas (Sequence[dict], optional): meta data of image to revert
the flow to original ground truth size. Defaults to None.
Returns:
Dict[str, Tensor]: The losses of output.
"""
img1, img2, feat1, feat2 = self.extract_feat(imgs)
return self.decoder.forward_train(
img1, img2, feat1, feat2, flow_gt=flow_gt, valid=valid)
def forward_test(
self,
imgs: Tensor,
img_metas: Optional[Sequence[dict]] = None) -> Sequence[ndarray]:
"""Forward function for LiteFlowNet when model testing.
Args:
imgs (Tensor): The concatenated input images.
img_metas (Sequence[dict], optional): meta data of image to revert
the flow to original ground truth size. Defaults to None.
Returns:
Sequence[Dict[str, ndarray]]: the batch of predicted optical flow
with the same size of images after augmentation.
"""
img1, img2, feat1, feat2 = self.extract_feat(imgs)
return self.decoder.forward_test(
img1, img2, feat1, feat2, img_metas=img_metas)
|
zoo/policies/cross-rl-agent/cross_rl_agent/train/utils.py | idsc-frazzoli/SMARTS | 554 | 12734045 | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# The author of this file is: https://github.com/mg2015started
import numpy as np
def get_split_batch(batch):
"""memory.sample() returns a batch of experiences, but we want an array
for each element in the memory (s, a, r, s', done)"""
states_mb = np.array([each[0][0] for each in batch])
# print(states_mb.shape)
actions_mb = np.array([each[0][1] for each in batch])
# print(actions_mb.shape)
rewards_mb = np.array([each[0][2] for each in batch])
# print(rewards_mb.shape)
next_states_mb = np.array([each[0][3] for each in batch])
# print(next_states_mb.shape)
dones_mb = np.array([each[0][4] for each in batch])
return states_mb, actions_mb, rewards_mb, next_states_mb, dones_mb
def OU(action, mu=0, theta=0.15, sigma=0.3):
# noise = np.ones(action_dim) * mu
noise = theta * (mu - action) + sigma * np.random.randn(1)
# noise = noise + d_noise
return noise
def calculate_angle(ego_location, goal_location, ego_direction):
# calculate vector direction
goal_location = np.array(goal_location)
ego_location = np.array(ego_location)
goal_vector = goal_location - ego_location
L_g_vector = np.sqrt(goal_vector.dot(goal_vector))
ego_vector = np.array(
[np.cos(ego_direction * np.pi / 180), np.sin(ego_direction * np.pi / 180)]
)
L_e_vector = np.sqrt(ego_vector.dot(ego_vector))
cos_angle = goal_vector.dot(ego_vector) / (L_g_vector * L_e_vector)
angle = (np.arccos(cos_angle)) * 180 / np.pi
if np.cross(goal_vector, ego_vector) > 0:
angle = -angle
return angle
def calculate_distance(location_a, location_b):
""" calculate distance between a and b"""
return np.linalg.norm(location_a - location_b)
|
recipes/Python/473881_splitandjoinpy/recipe-473881.py | tdiprima/code | 2,023 | 12734048 | <gh_stars>1000+
import os # give access to os.path & os.remove
import md5 # allows md5.md5(arg).digest() for file signature
import time # gives access to time.sleep
import random # gives access to random.randrange
import thread # to start new threads
import cPickle # to pickle and unpickle the index
import Tkinter # provides GUI tools
import tkFileDialog # provides some GUI dialogs
THREAD_FLAG = False # we'll use this to control the flag
class Application: # this is the main class / [function]
FILE_SIZE = 1024 * 1024 # this is an approximate size of output files
PART_SIZE = 1024 # this is used to tune the "disguising" engine
# main class function
def __init__(self):
self.__root = Tkinter.Tk() # open the main window
self.__root.title('Split & Join') # give it a name
self.__root.resizable(False, False) # disable resizing
# starts the splitter engine
split = Tkinter.Button(self.__root, text='Split', font='Courier 8', command=self.split)
split.grid(row=0, column=0, padx=15, pady=5)
# starts the joiner engine
join = Tkinter.Button(self.__root, text='Join', font='Courier 8', command=self.join)
join.grid(row=0, column=1, padx=15, pady=5)
# used for saving / opening files
self.__open = tkFileDialog.Open()
self.__save = tkFileDialog.Directory()
# don't forget to execute!
self.__root.mainloop()
# wrap the splitter engine
def split(self):
global THREAD_FLAG
if not THREAD_FLAG:
self.__root.withdraw()
self.__start()
try:
self.__do_split()
self.__stop('SPLIT - DONE')
except:
self.__stop('SPLIT - FAIL')
self.__root.deiconify()
# wrap the joiner engine
def join(self):
global THREAD_FLAG
if not THREAD_FLAG:
self.__root.withdraw()
self.__start()
try:
self.__do_join()
self.__stop('JOIN - DONE')
except:
self.__stop('JOIN - FAIL')
self.__root.deiconify()
# remind the user that the program is working
def __working(self):
global THREAD_FLAG
state, key = 0, ['|', '/', '-', '\\']
while THREAD_FLAG:
os.system('cls')
print '.' * (state / 8) + key[state % 4]
state += 1
time.sleep(0.125)
# start the reminder thread
def __start(self):
global THREAD_FLAG
THREAD_FLAG = True
thread.start_new_thread(self.__working, ())
# stop the reminder thread
def __stop(self, message):
global THREAD_FLAG
THREAD_FLAG = False
time.sleep(0.25)
os.system('cls')
print message
# get the signature of the file specified by path
def __signature(self, path):
return md5.md5(file(path, 'rb').read()).digest()
# split string so len(part) == size
def __partition(self, string, size):
if len(string) % size:
parts = len(string) / size + 1
else:
parts = len(string) / size
return [string[index*size:index*size+size] for index in range(parts)]
# get a source file and a destination folder
def __get_source_and_destination(self):
return open(self.__open.show(), 'rb'), self.__save.show()
# create a random key
def __new_key(self):
data = range(256)
key = ''
while data:
index = random.randrange(len(data))
key += chr(data[index])
del data[index]
return key
# encode a string
def __s2c(self, string):
'''s2c(str string)
Convert from string to code.'''
self.__assert_type((str, string))
return self.__n2c(self.__s2n(string))
# convert number to code
def __n2c(self, number):
self.__assert_type((long, number))
code = ''
while number:
code = chr(number % 255 + 1) + code
number /= 255
return code
# convert string to number
def __s2n(self, string):
self.__assert_type((str, string))
number = 1L
for character in string:
number <<= 8
number += ord(character)
return number
# make sure that type checking passes
def __assert_type(self, *tuples):
for types, objects in tuples:
if type(objects) is not types:
raise TypeError
# this is the splitter engine
def __do_split(self):
# get file and folder
source, destination = self.__get_source_and_destination()
# make sure that there is a destination
assert destination != ''
# index will be the master file to the many files, key will be for mangling
index = [os.path.basename(source.name), self.__new_key()]
# devide the source for the individual files
data = self.__partition(source.read(), self.FILE_SIZE)
# all source data has been collected, so close it
source.close()
# write the individual files
for num, part in enumerate(data):
# figure out what the filename will be
dest_path = os.path.join(destination, '%s.%s.part' % (num, os.path.basename(source.name)))
# open the file for writing
dest_file = open(dest_path, 'wb')
# mangle part to be indistiguishable
part = part.translate(index[1])
# partition part for futher mangling
part = self.__partition(part, self.PART_SIZE)
# mangle each part again
part = [self.__s2c(x) for x in part]
# write the joined parts after mangling
dest_file.write(chr(0).join(part).translate(index[1]))
# close the destination
dest_file.close()
# add the signature to index
index.append(self.__signature(dest_path))
# write the index
cPickle.dump(index, file(os.path.join(destination, '%s.part' % os.path.basename(source.name)), 'wb'))
# return an inverted key
def __inverse(self, key):
array = range(256)
for num, char in enumerate(key):
array[ord(char)] = chr(num)
return ''.join(array)
# verify unpacking
def __check_index(self, index, dirname, source):
all_path = list()
for num, signature in enumerate(index):
all_path.append(os.path.join(dirname, '%s.%s' % (num, source)))
present = self.__signature(all_path[-1])
assert signature == present
return all_path
# convert from code to string
def __c2s(self, code):
'''c2s(str code)
Convert from code to string.'''
self.__assert_type((str, code))
return self.__n2s(self.__c2n(code))
# convert from code to number
def __c2n(self, code):
self.__assert_type((str, code))
number = 0L
for character in code:
number *= 255
number += ord(character) - 1
return number
# convert from number to string
def __n2s(self, number):
self.__assert_type((long, number))
string = ''
while number > 1:
string = chr(number & 0xFF) + string
number >>= 8
return string
# this is the joiner engine
def __do_join(self):
# get the source file and destination folder
source, destination = self.__get_source_and_destination()
# make sure that there is a destination
assert destination != ''
# reload the index
index = cPickle.load(source)
# close the source
source.close()
# make sure that the name of the source agrees with itself
assert index[0] == os.path.basename(source.name)[:-5]
# save the key
key = index[1]
# do a mild check of the key
assert len(key) == 256
# invert the key for decoding purposes
key = self.__inverse(key)
# get the dirname from source
dirname = os.path.dirname(source.name)
# verify that all files are present and valid
all_path = self.__check_index(index[2:], dirname, os.path.basename(source.name))
# if all files were verfied, they just need to put together now [file]
dest_file = open(os.path.join(destination, index[0]), 'wb')
# go through all of the files
for path in all_path:
# open the source
source2 = open(path, 'rb')
# get the source data
data = source2.read()
# close the source
source2.close()
# automatically clean up the source
os.remove(path)
# translate the data
data = data.translate(key)
# get the parts
parts = data.split(chr(0))
# decode the parts
parts = [self.__c2s(part) for part in parts]
# calculate the string to be written
final = ''.join(parts).translate(key)
# write the data
dest_file.write(final)
# close the destination
dest_file.close()
# cleanup the index
os.remove(source.name)
if __name__ == '__main__':
Application()
|
tests/integration_tests/colors.py | trailofbits/mcsema | 1,301 | 12734053 | # Copyright (c) 2020 Trail of Bits, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class Colors:
class c:
green = '\033[92m'
yellow = '\033[93m'
red = '\033[91m'
magneta = '\033[95m'
bg_yellow = '\033[43m'
orange = '\033[38;5;202m'
RESET = '\033[0m'
def get_result_color(total, success):
if total == 0:
return Colors.c.magneta
if total == success:
return Colors.c.green
if success == 0:
return Colors.c.red
return Colors.c.yellow
def get_bin_result(result):
if result == 1:
return Colors.c.green
if result == 0:
return Colors.c.red
return Colors.c.magneta
def clean():
return Colors.RESET
def c(color, message):
return color + message + clean()
def fail():
return Colors.c.red
def succ():
return Colors.c.green
#TODO: Not sure if it's worth to generate these for each color from attrs dynamically
def green(message):
return c(Colors.c.green, message)
def red(message):
return c(Colors.c.red, message)
def yellow(message):
return c(Colors.c.yellow, message)
def magneta(message):
return c(Colors.c.magneta, message)
def bg_yellow(message):
return c(Colors.c.bg_yellow, message)
def orange(message):
return c(Colors.c.orange, message)
def id(message):
return message
|
typed_python/compiler/type_wrappers/compiler_introspection_wrappers.py | APrioriInvestments/typed_python | 105 | 12734097 | # Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typed_python.internals import (
isCompiled,
typeKnownToCompiler,
localVariableTypesKnownToCompiler
)
from typed_python.compiler.type_wrappers.wrapper import Wrapper
import typed_python.compiler.native_ast as native_ast
import typed_python
class IsCompiledWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__(isCompiled)
def getNativeLayoutType(self):
return native_ast.Type.Void()
def convert_call(self, context, expr, args, kwargs):
if args or kwargs:
context.pushException(TypeError, "isCompiled() accepts no arguments")
return context.constant(True)
class TypeKnownToCompiler(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__(typeKnownToCompiler)
def getNativeLayoutType(self):
return native_ast.Type.Void()
def convert_call(self, context, expr, args, kwargs):
if len(args) != 1 or kwargs:
context.pushException(TypeError, "typeKnownToCompiler() accepts 1 positional argument")
return typed_python.compiler.python_object_representation.pythonObjectRepresentation(
context,
args[0].expr_type.typeRepresentation
)
class LocalVariableTypesKnownToCompiler(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__(localVariableTypesKnownToCompiler)
def getNativeLayoutType(self):
return native_ast.Type.Void()
def convert_call(self, context, expr, args, kwargs):
if args or kwargs:
context.pushException(TypeError, "localVariableTypesKnownToCompiler() accepts no arguments")
return context.constant(dict(context.variableStates._types), allowArbitrary=True)
|
glazier/lib/terminator.py | ItsMattL/glazier | 1,233 | 12734110 | <reponame>ItsMattL/glazier
# Lint as: python3
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Terminates currently running Glazier processes."""
import logging
import os
import sys
import traceback
from typing import Optional
from glazier.lib import actions
from glazier.lib import buildinfo
from glazier.lib import constants
from glazier.lib import logs
from glazier.lib import winpe
def log_and_exit(msg: str,
build_info: buildinfo.BuildInfo,
code: int = 4000,
exception: Optional[Exception] = None,
collect: bool = True):
"""Logs a user-facing error message and exits.
This function handles all Glazier Exceptions by sequentially:
- (Optional) Collecting logs to a zip folder on disk
- Logging the full traceback to the debug log
- Constructing the user-facing failure string, consisting of:
* The message to accompany the failure
* (Optional) The exception object, and if available, the file and line
number of the root exception
* The user-facing help message containing where to look for logs and
where to go for further assistance.
- Log the user-facing failure string
- Exit Glazier with code 1
Args:
msg: The error message to accompany the failure.
build_info: The active BuildInfo class.
code: Error code to append to the failure message.
exception: The exception object.
collect: Whether to collect log files.
"""
if collect:
try:
logs.Collect(os.path.join(build_info.CachePath(), r'\glazier_logs.zip'))
except logs.LogError as e:
logging.error('logs collection failed with %s', e)
# Log the full traceback to _BUILD_LOG to assist in troubleshooting
logging.debug(traceback.format_exc())
string = f'{msg}\n\n'
if exception:
# Index 2 contains the traceback from the sys.exc_info() tuple
trace = sys.exc_info()[2]
if trace:
# Index -1 contains the traceback object of the root exception
trace_obj = traceback.extract_tb(trace)[-1]
# The trace object contains the full file path, grab just the file name
file = os.path.split(trace_obj.filename)[1]
lineno = trace_obj.lineno
string += f'Exception: {file}:{lineno}] {exception}\n\n'
else:
string += f'Exception] {exception}\n\n'
build_log = constants.SYS_BUILD_LOG
if winpe.check_winpe():
build_log = constants.WINPE_BUILD_LOG
string += (f'See {build_log} for more info. '
f'Need help? Visit {constants.HELP_URI}#{code}')
logging.critical(string)
sys.exit(1)
|
setup.py | cbalfour/python-qrcode | 2,651 | 12734133 | <filename>setup.py
#!/usr/bin/env python
from setuptools import setup
# See setup.cfg for configuration.
setup(
data_files=[('share/man/man1', ['doc/qr.1'])],
)
|
tests/functional/test_cisco_iosxe.py | verbosemode/scrapli | 404 | 12734140 | <reponame>verbosemode/scrapli<gh_stars>100-1000
def test_non_standard_default_desired_privilege_level(iosxe_conn):
# purpose of this test is to ensure that when a user sets a non-standard default desired priv
# level, that there is nothing in genericdriver/networkdriver that will prevent that from
# actually being set as the default desired priv level
iosxe_conn.close()
iosxe_conn.default_desired_privilege_level = "configuration"
iosxe_conn.open()
current_prompt = iosxe_conn.get_prompt()
assert current_prompt == "csr1000v(config)#"
iosxe_conn.close()
|
mozi/cost.py | hycis/Mozi | 122 | 12734156 |
import theano.tensor as T
import theano
from mozi.utils.utils import theano_unique
from mozi.utils.theano_utils import asfloatX
floatX = theano.config.floatX
if floatX == 'float64':
epsilon = 1.0e-8
else:
epsilon = 1.0e-6
def accuracy(y, y_pred):
L = T.eq(y_pred.argmax(axis=1), y.argmax(axis=1))
return T.mean(L)
# L = T.eq(y_pred.argmax(axis=1), y.argmax(axis=1))
# return T.sum(L) / y.shape[0].astype(floatX)
def mse(y, y_pred):
return T.mean(T.sqr(y-y_pred))
def entropy(y, y_pred):
y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
L = -(y * T.log(y_pred) + (1-y) * T.log(1-y_pred))
return T.mean(L)
# L = - T.sum(y * T.log(y_pred) + (1-y) * T.log(1-y_pred), axis=1)
# return T.mean(L)
def error(y, y_pred):
L = T.neq(y_pred.argmax(axis=1), y.argmax(axis=1))
return T.mean(L)
def recall(y, y_pred):
L = T.eq(y_pred.argmax(axis=1), y.argmax(axis=1))
return T.sum(L) / y.shape[0].astype(floatX)
def precision(y, y_pred):
L = T.eq(y_pred.argmax(axis=1), y.argmax(axis=1))
return T.sum(L) / y_pred.shape[0].astype(floatX)
def f1(y, y_pred):
r = recall(y, y_pred)
p = precision(y, y_pred)
return 2 * p * r / (p + r)
def hingeloss(y, y_pred):
y_pred = T.clip(y_pred, 0., 1.0)
L = T.max(0, 1 - y * y_pred)
return T.mean(L)
def abs(y, y_pred):
return T.mean(T.abs_(y-y_pred))
def SGVB_bin(y, y_pred):
'''
This cost function is for variational autoencoder with binary inputs
'''
ypred, miu_e, logsig_e = y_pred
ypred = T.clip(ypred, epsilon, 1.0 - epsilon)
logpxz = -T.nnet.binary_crossentropy(ypred, y).sum(axis=1)
L = logpxz + 0.5 * (1 + 2*logsig_e - miu_e**2 - T.exp(2*logsig_e)).sum(axis=1)
return L.mean()
|
phasing/utils/plotPhaseCount.py | ArthurDondi/cDNA_Cupcake | 205 | 12734157 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 13:14:28 2020
@author: derek.bickhart-adm
"""
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use('Agg')
from matplotlib.collections import BrokenBarHCollection
from matplotlib import cm
from itertools import cycle
from collections import defaultdict
import argparse
import pandas
import numpy as np
import pysam
def arg_parse():
parser = argparse.ArgumentParser(
description = "A tool to plot bin and contig level read depth differences in strain assignment"
)
parser.add_argument('-f', '--fai',
help="Input reference fasta index file for the bin",
required=True, type=str
)
parser.add_argument('-o', '--output',
help="Output file basename. Output files are {output}.wins and {output}.pdf",
required=True, type=str,
)
parser.add_argument('-b', '--bam',
help="Input CCS read depth bam file",
required=True, type=str
)
parser.add_argument('-h', '--human',
help="Input human-readable variant call file",
required=True, type=str
)
parser.add_argument('-i', '--binsize',
help="Bin size in bases [5000 bp]",
type = int, default=5000
)
return parser.parse_args(), parser
def main(args, parser):
# Get the contig length list
ctglens = dict()
with open(args.fai, 'r') as fai:
for l in fai:
s = l.rstrip().split()
ctglens[s[0]] = s[1]
# Create windows
winlist = defaultdict(list)
# offset bp to add for stitching contigs together in one line
ctgoffset = dict()
lastbp = 0
for c in ctglens:
ctgoffset[c] = lastbp + 100
for i in range(0, ctglens[c], args.binsize):
winlist[c].append(window(c, i, i + args.binsize))
lastbp += ctglens[c]
# read each sam region and count the reads
with pysam.AlignmentFile(args.bam, 'rb') as bamfile:
for c, w in winlist.items():
for i, win in enumerate(w):
count = 0
for s in bamfile.fetch(c, win.start, win.end):
if s.is_secondary:
continue
count += 1
winlist = updateWin(winlist, c, i, count)
# Now, read in the human readable text file and process that
hapset = set()
with open(args.human, 'r') as human:
human.readline()
for l in human:
s = l.rstrip().split()
# determine where the contig start falls
for i, win in enumerate(winlist[s[2]]):
if int(s[3]) < win.end and int(s[3]) >= win.start:
winlist = updateWin(winlist, s[2], i, int(s[6]), s[4])
print(f'Updating window: {s[2]} {win.start} {win.end} to {s[6]} for Hap {s[4]}')
hapset.add(s[4])
# OK, data is in! Let's try plotting
raw = defaultdict(list)
bars = list()
for c, w in winlist.items():
bars.append([ctgoffset[c], ctglens[c]])
for win in winlist:
for h in hapset:
raw["contig"].append(c)
raw["start"].append(win.start + ctgoffset[c])
raw["end"].append(win.end + ctgoffset[c])
raw["hap"].append(h)
raw["count"].append(win.getCount(h))
df = pandas.DataFrame(raw)
df.to_csv(args.output + '.wins', sep='\t', header=True)
fig = plt.figure(figsize=(6,8))
ax = df[['start', 'hap', 'count']].plot.area(x='start', y='count', colormap='viridis')
ax.add_collection(BrokenBarHCollection(bars, [-1, 1], facecolors=plt.get_cmap('tab20')))
ax.axis('tight')
plt.savefig(args.output + '.pdf')
def updateWin(winlist, contig, winidx, count, haplotype = 'REF'):
winlist[contig].count[haplotype] = count
return winlist
class window:
def __init__(self, contig, start, end):
self.contig = contig
self.start = start
self.end = end
self.count = defaultdict(int)
def getCount(self, hap):
if hap in self.count:
return self.count[hap]
else:
return 0
if __name__ == "__main__":
args, parser = arg_parse()
main(args, parser)
|
octodns/processor/awsacm.py | h3rj4n/octodns | 369 | 12734167 | <filename>octodns/processor/awsacm.py<gh_stars>100-1000
#
# Ignores AWS ACM validation CNAME records.
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from logging import getLogger
from .base import BaseProcessor
class AwsAcmMangingProcessor(BaseProcessor):
'''
processors:
awsacm:
class: octodns.processor.acme.AwsAcmMangingProcessor
...
zones:
something.com.:
...
processors:
- awsacm
...
'''
log = getLogger('AwsAcmMangingProcessor')
def _ignore_awsacm_cnames(self, zone):
for r in zone.records:
if r._type == 'CNAME' and \
r.name.startswith('_') \
and r.value.endswith('.acm-validations.aws.'):
self.log.info('_process: ignoring %s', r.fqdn)
zone.remove_record(r)
return zone
def process_source_zone(self, desired, *args, **kwargs):
return self._ignore_awsacm_cnames(desired)
def process_target_zone(self, existing, *args, **kwargs):
return self._ignore_awsacm_cnames(existing)
|
test_pyspin.py | crunchex/py-spin | 196 | 12734187 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from pyspin import spin
def test_spinner():
spinner = spin.Spinner(spin.Spin9)
assert spinner.length == 4
assert spinner.frames == spin.Spin9
assert spinner.current() == u'←'
assert spinner.next() == u'←'
assert spinner.next() == u'↑'
assert spinner.next() == u'→'
assert spinner.next() == u'↓'
assert spinner.next() == u'←'
assert spinner.next() == u'↑'
spinner.reset()
assert spinner.position == 0
def test_make_spin():
@spin.make_spin(spin.Default, 'Downloading...')
def fake_download():
time.sleep(2)
fake_download()
def test_make_spin_with_args():
@spin.make_spin(spin.Default, 'Downloading...')
def fake_download(url, retry_times=3):
print("Downloading {0}, will retry {1} times".format(url, retry_times))
time.sleep(2)
fake_download("https://www.example.com/text.txt", retry_times=5)
def test_stop_on_exception():
@spin.make_spin(spin.Default, 'Downloading...')
def fake_download():
1 / 0
try:
fake_download()
except ZeroDivisionError:
print("We catched the exception! Yeah!")
def test_several_calls():
@spin.make_spin(spin.Default, 'Downloading...')
def fake_download():
time.sleep(2)
print("Begin the first download.")
fake_download()
print("Begin the second download.")
fake_download()
|
semseg/__init__.py | Genevievekim/semantic-segmentation-1 | 196 | 12734204 | <gh_stars>100-1000
from tabulate import tabulate
from semseg import models
from semseg import datasets
from semseg.models import backbones, heads
def show_models():
model_names = models.__all__
numbers = list(range(1, len(model_names)+1))
print(tabulate({'No.': numbers, 'Model Names': model_names}, headers='keys'))
def show_backbones():
backbone_names = backbones.__all__
variants = []
for name in backbone_names:
try:
variants.append(list(eval(f"backbones.{name.lower()}_settings").keys()))
except:
variants.append('-')
print(tabulate({'Backbone Names': backbone_names, 'Variants': variants}, headers='keys'))
def show_heads():
head_names = heads.__all__
numbers = list(range(1, len(head_names)+1))
print(tabulate({'No.': numbers, 'Heads': head_names}, headers='keys'))
def show_datasets():
dataset_names = datasets.__all__
numbers = list(range(1, len(dataset_names)+1))
print(tabulate({'No.': numbers, 'Datasets': dataset_names}, headers='keys'))
|
scripts/elasticsearch/search-reindex.py | ZMaratovna/cloud-pipeline | 126 | 12734214 | <gh_stars>100-1000
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# touch all root tree entities
# touch docker registry
# TODO: touch runs starting from date
from pipeline import PipelineAPI
import os
import json
ROLE = 'SEARCH_UPDATE'
def touch_item(api, id, acl_class):
print('Processing %s [%s]' % (str(id), acl_class))
permissions = {'id': id,
'aclClass': acl_class,
'mask': 0,
'principal': False,
'userName': ROLE}
try:
api.execute_request(str(api.api_url) + '/grant', method='post', data=json.dumps(permissions))
api.execute_request(str(api.api_url) + '/grant?id={id}&aclClass={aclClass}&user={userName}&isPrincipal=false'
.format(**permissions), method='delete')
except BaseException as e:
print(str(e.message))
def run():
api = PipelineAPI(os.environ['API'], 'logs')
result = api.execute_request(str(api.api_url) + '/folder/loadTree', method='get')
children = ['pipelines', 'childFolders', 'storages', 'configurations']
for child_type in children:
if child_type in result:
for item in result[child_type]:
touch_item(api, item['id'], item['aclClass'])
registries = api.docker_registry_load_all()
for registry in registries:
touch_item(api, registry['id'], registry['aclClass'])
if __name__ == '__main__':
run()
|
skmultilearn/ext/__init__.py | emrecncelik/scikit-multilearn | 763 | 12734255 | """
The :mod:`skmultilearn.ext` provides wrappers for other multi-label
classification libraries. Currently it provides a wrapper for:
Currently the available classes include:
+--------------------------------------------+------------------------------------------------------------------+
| Name | Description |
+============================================+==================================================================+
| :class:`~skmultilearn.ext.Meka` | Wrapper for the Multilabel Extension to WEKA - |
| | `MEKA <http://meka.sf.net>`_ library |
+--------------------------------------------+------------------------------------------------------------------+
| :class:`~skmultilearn.ext.Keras` | Wrapper for the Python Deep Learning library - |
| | `KERAS <http://https://keras.io/>`_ |
+--------------------------------------------+------------------------------------------------------------------+
| :func:`~skmultilearn.ext.download_meka` | Helper function for installing MEKA |
+--------------------------------------------+------------------------------------------------------------------+
"""
import sys, platform
from .meka import Meka, download_meka
__all__ = ["Meka", 'download_meka']
if not (sys.version_info[0] == 2 or platform.architecture()[0]=='32bit'):
from .keras import Keras
__all__ += ['Keras']
|
alipay/aop/api/domain/CloudbusStop.py | antopen/alipay-sdk-python-all | 213 | 12734267 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class CloudbusStop(object):
def __init__(self):
self._latitude = None
self._longitude = None
self._station_id = None
self._station_name = None
self._station_num = None
self._station_space = None
self._station_volume = None
@property
def latitude(self):
return self._latitude
@latitude.setter
def latitude(self, value):
self._latitude = value
@property
def longitude(self):
return self._longitude
@longitude.setter
def longitude(self, value):
self._longitude = value
@property
def station_id(self):
return self._station_id
@station_id.setter
def station_id(self, value):
self._station_id = value
@property
def station_name(self):
return self._station_name
@station_name.setter
def station_name(self, value):
self._station_name = value
@property
def station_num(self):
return self._station_num
@station_num.setter
def station_num(self, value):
self._station_num = value
@property
def station_space(self):
return self._station_space
@station_space.setter
def station_space(self, value):
self._station_space = value
@property
def station_volume(self):
return self._station_volume
@station_volume.setter
def station_volume(self, value):
self._station_volume = value
def to_alipay_dict(self):
params = dict()
if self.latitude:
if hasattr(self.latitude, 'to_alipay_dict'):
params['latitude'] = self.latitude.to_alipay_dict()
else:
params['latitude'] = self.latitude
if self.longitude:
if hasattr(self.longitude, 'to_alipay_dict'):
params['longitude'] = self.longitude.to_alipay_dict()
else:
params['longitude'] = self.longitude
if self.station_id:
if hasattr(self.station_id, 'to_alipay_dict'):
params['station_id'] = self.station_id.to_alipay_dict()
else:
params['station_id'] = self.station_id
if self.station_name:
if hasattr(self.station_name, 'to_alipay_dict'):
params['station_name'] = self.station_name.to_alipay_dict()
else:
params['station_name'] = self.station_name
if self.station_num:
if hasattr(self.station_num, 'to_alipay_dict'):
params['station_num'] = self.station_num.to_alipay_dict()
else:
params['station_num'] = self.station_num
if self.station_space:
if hasattr(self.station_space, 'to_alipay_dict'):
params['station_space'] = self.station_space.to_alipay_dict()
else:
params['station_space'] = self.station_space
if self.station_volume:
if hasattr(self.station_volume, 'to_alipay_dict'):
params['station_volume'] = self.station_volume.to_alipay_dict()
else:
params['station_volume'] = self.station_volume
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CloudbusStop()
if 'latitude' in d:
o.latitude = d['latitude']
if 'longitude' in d:
o.longitude = d['longitude']
if 'station_id' in d:
o.station_id = d['station_id']
if 'station_name' in d:
o.station_name = d['station_name']
if 'station_num' in d:
o.station_num = d['station_num']
if 'station_space' in d:
o.station_space = d['station_space']
if 'station_volume' in d:
o.station_volume = d['station_volume']
return o
|
reid/trainers.py | nuannuanhcc/DomainAdaptiveReID | 200 | 12734273 | <filename>reid/trainers.py
from __future__ import print_function, absolute_import
import time
import torch
from torch.autograd import Variable
from .evaluation_metrics import accuracy
from .loss import OIMLoss, TripletLoss
from .utils.meters import AverageMeter
class BaseTrainer(object):
def __init__(self, model, criterions, print_freq=1):
super(BaseTrainer, self).__init__()
self.model = model
self.criterions = criterions
self.print_freq = print_freq
def train(self, epoch, data_loader, optimizer):
self.model.train()
# for name, param in self.model.named_parameters():
# if 'classifier' in name:
# param.requires_grad = False
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
precisions = AverageMeter()
end = time.time()
for i, inputs in enumerate(data_loader):
data_time.update(time.time() - end)
inputs, targets = self._parse_data(inputs)
loss, prec1 = self._forward(inputs, targets, epoch)
losses.update(loss.data[0], targets.size(0))
precisions.update(prec1, targets.size(0))
optimizer.zero_grad()
loss.backward()
#add gradient clip for lstm
for param in self.model.parameters():
try:
param.grad.data.clamp(-1., 1.)
except:
continue
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % self.print_freq == 0:
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss {:.3f} ({:.3f})\t'
'Prec {:.2%} ({:.2%})\t'
.format(epoch, i + 1, len(data_loader),
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses.val, losses.avg,
precisions.val, precisions.avg))
def _parse_data(self, inputs):
raise NotImplementedError
def _forward(self, inputs, targets):
raise NotImplementedError
class Trainer(BaseTrainer):
def _parse_data(self, inputs):
imgs, _, pids, _ = inputs
inputs = [Variable(imgs)]
targets = Variable(pids.cuda())
return inputs, targets
def _forward(self, inputs, targets, epoch):
outputs = self.model(*inputs) #outputs=[x1,x2,x3]
#new added by wc
# x1 triplet loss
loss_tri, prec_tri = self.criterions[0](outputs[0], targets, epoch)
# x2 triplet loss
loss_global, prec_global = self.criterions[1](outputs[1], targets, epoch)
return loss_tri+loss_global, prec_global
|
saleor/order/migrations/0043_auto_20180322_0655.py | elwoodxblues/saleor | 15,337 | 12734324 | <gh_stars>1000+
# Generated by Django 2.0.3 on 2018-03-22 11:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("order", "0042_auto_20180227_0436")]
operations = [
migrations.AlterModelOptions(
name="order",
options={
"ordering": ("-pk",),
"permissions": (
("view_order", "Can view orders"),
("edit_order", "Can edit orders"),
),
},
),
migrations.AlterField(
model_name="order",
name="language_code",
field=models.CharField(default="en", max_length=35),
),
]
|
moonlight/training/clustering/staffline_patches_dofn.py | lithomas1/moonlight | 288 | 12734348 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extracts non-empty patches of extracted stafflines.
Extracts vertical slices of the image where glyphs are expected
(see `staffline_extractor.py`), and takes horizontal windows of the slice which
will be clustered. Some patches will have a glyph roughly in their center, and
the corresponding cluster centroids will be labeled as such.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import apache_beam as beam
from apache_beam import metrics
from moonlight.staves import staffline_extractor
from moonlight.util import more_iter_tools
import numpy as np
from six.moves import filter
import tensorflow as tf
def _filter_patch(patch, min_num_dark_pixels=10):
unused_patch_name, patch = patch
return np.greater_equal(np.sum(np.less(patch, 0.5)), min_num_dark_pixels)
class StafflinePatchesDoFn(beam.DoFn):
"""Runs the staffline patches graph."""
def __init__(self, patch_height, patch_width, num_stafflines, timeout_ms,
max_patches_per_page):
self.patch_height = patch_height
self.patch_width = patch_width
self.num_stafflines = num_stafflines
self.timeout_ms = timeout_ms
self.max_patches_per_page = max_patches_per_page
self.total_pages_counter = metrics.Metrics.counter(self.__class__,
'total_pages')
self.failed_pages_counter = metrics.Metrics.counter(self.__class__,
'failed_pages')
self.successful_pages_counter = metrics.Metrics.counter(
self.__class__, 'successful_pages')
self.empty_pages_counter = metrics.Metrics.counter(self.__class__,
'empty_pages')
self.total_patches_counter = metrics.Metrics.counter(
self.__class__, 'total_patches')
self.emitted_patches_counter = metrics.Metrics.counter(
self.__class__, 'emitted_patches')
def start_bundle(self):
self.extractor = staffline_extractor.StafflinePatchExtractor(
patch_height=self.patch_height,
patch_width=self.patch_width,
run_options=tf.RunOptions(timeout_in_ms=self.timeout_ms))
self.session = tf.Session(graph=self.extractor.graph)
def process(self, png_path):
self.total_pages_counter.inc()
try:
with self.session.as_default():
patches_iter = self.extractor.page_patch_iterator(png_path)
# pylint: disable=broad-except
except Exception:
logging.exception('Skipping failed music score (%s)', png_path)
self.failed_pages_counter.inc()
return
patches_iter = filter(_filter_patch, patches_iter)
if 0 < self.max_patches_per_page:
# Subsample patches.
patches = more_iter_tools.iter_sample(patches_iter,
self.max_patches_per_page)
else:
patches = list(patches_iter)
if not patches:
self.empty_pages_counter.inc()
self.total_patches_counter.inc(len(patches))
# Serialize each patch as an Example.
for patch_name, patch in patches:
example = tf.train.Example()
example.features.feature['name'].bytes_list.value.append(
patch_name.encode('utf-8'))
example.features.feature['features'].float_list.value.extend(
patch.ravel())
example.features.feature['height'].int64_list.value.append(patch.shape[0])
example.features.feature['width'].int64_list.value.append(patch.shape[1])
yield example
self.successful_pages_counter.inc()
# Patches are sub-sampled by this point.
self.emitted_patches_counter.inc(len(patches))
def finish_bundle(self):
self.session.close()
del self.extractor
del self.session
|
Lib/parallel/test/thread_scratch.py | pyparallel/pyparallel | 652 | 12734396 | <filename>Lib/parallel/test/thread_scratch.py
import ujson
import parallel
import datetime
from parallel import (
timer,
thread,
gmtime,
sys_stats,
socket_stats,
memory_stats,
context_stats,
thread_seq_id,
)
from parallel.http.server import (
quote_html,
text_response,
html_response,
json_serialization,
HttpServer,
)
thr = thread(interval=8, thread_characteristics="Low Latency")
class RateLimitedServer(HttpServer):
http11 = True
rate_limit = datetime.timedelta(milliseconds=16)
def hello(self, transport, data):
return b'Hello, World!'
def stats(self, transport, data):
return t.data or b''
def uni(self, transport, data):
return '<html><body>Works!</body></html>'
def bytearr(self, transport, data):
return bytearray(b'abcd')
def res(self, transport, data):
return thr.system_responsiveness
server1 = parallel.server('0.0.0.0', 8081)
parallel.register(server1, RateLimitedServer)
import parallel
def gmtime():
return parallel.gmtime()
t = parallel.thread(func=gmtime, interval=1000, thread_characteristics="Low Latency")
import parallel
import datetime
def gmtime():
return parallel.gmtime()
t = parallel.timer(func=gmtime, duetime=datetime.timedelta(milliseconds=1000), period=1000)
|
examples/grating_two_level/grating_coupler_2D_apodized.py | jbellevi/lumopt | 101 | 12734421 | <reponame>jbellevi/lumopt
"""
Copyright (c) 2019 Lumerical Inc. """
######## IMPORTS ########
# General purpose imports
import os
import numpy as np
import scipy as sp
# Optimization specific imports
from lumopt.utilities.load_lumerical_scripts import load_from_lsf
from lumopt.utilities.wavelengths import Wavelengths
from lumopt.geometries.polygon import FunctionDefinedPolygon
from lumopt.figures_of_merit.modematch import ModeMatch
from lumopt.optimizers.generic_optimizers import ScipyOptimizers
from lumopt.optimization import Optimization
from lumopt.utilities.materials import Material
from numpy.random import rand
def runGratingOptimization(bandwidth_in_nm, etch_depth, n_grates, params):
bounds = [(0.1, 1)]*4
bounds[0] = (-3,3) #< Starting position
bounds[1] = (0,0.1) #< Scaling parameter R
bounds[2] = (1.5,3) #< Parameter a
bounds[3] = (0,2) #< Parameter b
def grating_params_pos(params, output_waveguide_length = 0.5e-6, height = 220e-9, y0 = 0):
x_begin = -3e-6
y3 = y0+height
y1 = y3-etch_depth
x_start = params[0]*1e-6 #< First parameter is the starting position
x0 = x_start
R = params[1]*1e6 #< second parameter (unit is 1/um)
a = params[2] #< Third parameter (dim-less)
b = params[3] #< Fourth parameter (dim-less)
verts = np.array( [ [x_begin,y0],[x_begin,y3],[x0,y3],[x0,y1] ] )
lambda_c = 1.55e-6
F0 = 0.95
## Iterate over all but the last
for i in range(n_grates-1):
F = F0-R*(x0-x_start)
Lambda = lambda_c / (a+F*b)
x1 = x0 + (1-F)*Lambda #< Width of the etched region
x2 = x0 + Lambda #< Rest of cell
verts = np.concatenate((verts,[[x1,y1],[x1,y3],[x2,y3],[x2,y1]]),axis=0)
x0 = x2
F = F0-R*(x0-x_start)
Lambda = lambda_c / (a+F*b)
x1 = x0 + (1-F)*Lambda #< Width of the etched region
x_end = x1+output_waveguide_length
verts = np.concatenate((verts,[[x1,y1],[x1,y3],[x_end,y3],[x_end,y0]]),axis=0)
return verts
geometry = FunctionDefinedPolygon(func = grating_params_pos, initial_params = params, bounds = bounds, z = 0.0, depth = 110e-9, eps_out = 1.44 ** 2, eps_in = 3.47668 ** 2, edge_precision = 5, dx = 1e-3)
######## DEFINE FIGURE OF MERIT ########
fom = ModeMatch(monitor_name = 'fom', mode_number = 1, direction = 'Backward', target_T_fwd = lambda wl: np.ones(wl.size), norm_p = 1)
######## DEFINE OPTIMIZATION ALGORITHM ########
optimizer = ScipyOptimizers(max_iter = 25, method = 'L-BFGS-B', scaling_factor = 1, pgtol = 1e-6)
######## DEFINE BASE SIMULATION ########
base_script = load_from_lsf(os.path.join(os.path.dirname(__file__), 'grating_coupler_2D_2etch.lsf'))
######## PUT EVERYTHING TOGETHER ########
lambda_start = 1550 - bandwidth_in_nm/2
lambda_end = 1550 + bandwidth_in_nm/2
lambda_pts = int(bandwidth_in_nm/10)+1
wavelengths = Wavelengths(start = lambda_start*1e-9, stop = lambda_end*1e-9, points = lambda_pts)
opt = Optimization(base_script = base_script, wavelengths = wavelengths, fom = fom, geometry = geometry, optimizer = optimizer, hide_fdtd_cad = True, use_deps = True)
######## RUN THE OPTIMIZER ########
opt.run()
if __name__ == "__main__":
bandwidth_in_nm = 0 #< Only optimiza for center frequency of 1550nm
initial_params = [0, 0.03, 2.4, 0.5369]
runGratingOptimization(bandwidth_in_nm=bandwidth_in_nm, etch_depth=80e-9, n_grates = 25, params=initial_params)
|
shopyo/shopyoapi/assets.py | Bnseamster/shopyo | 235 | 12734425 | <reponame>Bnseamster/shopyo
from flask import url_for
from flask import current_app
def get_static(boxormodule, filename):
if current_app.config["DEBUG"] == True:
return url_for("devstatic", boxormodule=boxormodule, filename=filename)
else:
return url_for(
"static", filename="modules/{}/{}".format(boxormodule, filename)
)
|
Code/KafkaProducer.py | dong-yf/100-Days-Of-ML-Code | 17,496 | 12734462 | #!/usr/bin/python
from kafka import KafkaProducer
kafkaHosts=["kafka01.paas.longfor.sit:9092"
,"kafka02.paas.longfor.sit:9092"
,"kafka03.paas.longfor.sit:9092"]
producer = KafkaProducer(bootstrap_servers=kafkaHosts);
for _ in range(20):
producer.send("testapplog_plm-prototype",b"Hello....")
producer.flush(); |
pretrain/pri3d/main.py | kudo1026/Pri3D | 103 | 12734476 | <reponame>kudo1026/Pri3D
#!/usr/bin/env python3
import hydra
import os
from common.distributed import multi_proc_run
def single_proc_run(config):
from common.pretrain import Pretrain
#dir_path = os.path.dirname(os.path.realpath(__file__))
trainer = Pretrain(config)
trainer.train()
@hydra.main(config_path='config', config_name='default.yaml')
def main(config):
if config.distributed.num_gpus > 1:
multi_proc_run(config.distributed.num_gpus, fun=single_proc_run, fun_args=(config,))
else:
single_proc_run(config)
if __name__ == '__main__':
#os.environ["CUDA_VISIBLE_DEVICES"]="5"
os.environ["OMP_NUM_THREADS"]="12"
main()
|
backend/apps/marquees/admin.py | FroggyTaipei/froggy-service | 174 | 12734486 | from django.contrib import admin
from django.forms import TextInput, ModelForm
from suit.admin import SortableModelAdmin
from .models import MarqueeMessage
class MarqueeMessageForm(ModelForm):
class Meta:
widgets = {
'message': TextInput(attrs={'class': 'input-xxlarge'}),
}
class MarqueeMessageAdmin(SortableModelAdmin):
form = MarqueeMessageForm
sortable = 'order'
list_editable = ('display', 'order')
list_display = ('message', 'display', 'order')
admin.site.register(MarqueeMessage, MarqueeMessageAdmin)
|
config/base_station/gs_gps.py | leozz37/makani | 1,178 | 12734514 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GS GPS parameters."""
from makani.config import mconfig
from makani.control import system_types
import numpy as np
@mconfig.Config(deps={
'gs_model': 'base_station.gs_model',
'test_site': 'common.test_site',
})
def MakeParams(params):
"""Make ground station gps parameters."""
if params['gs_model'] == system_types.kGroundStationModelTopHat:
gps_primary_antenna_dir = [0.0, 0.0, -1.0]
gps_primary_pos = [1.418, -1.657, -2.417]
# TopHat doesn't actually have a secondary gps.
gps_secondary_antenna_dir = gps_primary_antenna_dir
gps_secondary_pos = gps_primary_pos
# Angle [rad] from the GPS compass baseline to the zero-azimuth
# reference of the perch frame. Note: The TopHat does not have a
# GPS compass, but this value is set for historical consistency.
gps_compass_to_perch_azi = -2.440
elif params['gs_model'] == system_types.kGroundStationModelGSv1:
gps_primary_antenna_dir = [0.0, 0.0, -1.0]
# Position measured on 2015-06-15.
gps_primary_pos = [0.0, 0.0, -2.94]
# GSv1 doesn't actually have a secondary gps.
gps_secondary_antenna_dir = gps_primary_antenna_dir
gps_secondary_pos = gps_primary_pos
# Angle [rad] from the GPS compass baseline to the zero-azimuth
# reference of the perch frame
gps_compass_to_perch_azi = -2.440
elif params['gs_model'] == system_types.kGroundStationModelGSv2:
gps_primary_antenna_dir = [0.0, 0.0, -1.0]
gps_secondary_antenna_dir = [0.0, 0.0, -1.0]
if params['test_site'] == system_types.kTestSiteParkerRanch:
# See b/137283974 for details.
gps_primary_pos = [-0.002, 0.011, -6.7]
gps_secondary_pos = [-2.450, -0.428, -6.827]
elif params['test_site'] == system_types.kTestSiteNorway:
# See b/137660975 for details.
gps_primary_pos = [-0.002, 0.011, -6.7]
gps_secondary_pos = [-2.450, -0.428, -6.757]
else:
assert False, 'Unsupported test site.'
# Angle [rad] from the GPS compass baseline to the zero-azimuth
# reference of the platform frame. See b/118710931.
gps_compass_to_perch_azi = np.deg2rad(169.84)
else:
assert False, 'Unsupported ground station model.'
return {
# Position [m] of the GS GPS antenna in the platform frame.
# NOTE: The direction of the antennae is currently not used.
'primary_antenna_p': {
'antenna_dir': gps_primary_antenna_dir,
'pos': gps_primary_pos,
},
'secondary_antenna_p': {
'antenna_dir': gps_secondary_antenna_dir,
'pos': gps_secondary_pos,
},
# Calibration for the ground station compass ([#], [rad], [#]).
# The bias is used to account for the angle between the perch
# frame and the NovAtel differential GPS receiver.
# TODO: Remove this parameter once the computation of
# compass heading from the primary and secondary antennae is implemented.
'heading_cal': {
'scale': 1.0, 'bias': gps_compass_to_perch_azi, 'bias_count': 0}
}
|
options/test_options.py | Yijunmaverick/Im2Pencil | 176 | 12734532 | <filename>options/test_options.py<gh_stars>100-1000
from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
self.parser.add_argument('--outline_style', type=int, default=0, help='which edge style')
self.parser.add_argument('--shading_style', type=int, default=0, help='which shading style')
self.parser.add_argument('--Sigma', type=float, default=2.5, help='sigma for XDoG')
self.parser.add_argument('--pad', type=int, default=10)
self.parser.add_argument('--r', type=int, default=11)
self.parser.add_argument('--eps', type=float, default=0.1)
self.isTrain = False
|
fewshot/configs/cnn_config_pb2.py | renmengye/inc-few-shot-attractor-public | 122 | 12734545 | <reponame>renmengye/inc-few-shot-attractor-public
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: fewshot/configs/cnn_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='fewshot/configs/cnn_config.proto',
package='fewshot.configs',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n fewshot/configs/cnn_config.proto\x12\x0f\x66\x65wshot.configs\"\xa1\x01\n\tCNNConfig\x12\x0e\n\x06height\x18\x01 \x01(\x05\x12\r\n\x05width\x18\x02 \x01(\x05\x12\x13\n\x0bnum_channel\x18\x03 \x01(\x05\x12\x13\n\x0bnum_filters\x18\x04 \x03(\x05\x12\x0f\n\x07strides\x18\x05 \x03(\x05\x12\x0f\n\x07pool_fn\x18\x06 \x03(\t\x12\x14\n\x0cpool_strides\x18\x07 \x03(\x05\x12\x13\n\x0b\x63onv_act_fn\x18\x08 \x03(\t')
)
_CNNCONFIG = _descriptor.Descriptor(
name='CNNConfig',
full_name='fewshot.configs.CNNConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='height', full_name='fewshot.configs.CNNConfig.height', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='fewshot.configs.CNNConfig.width', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_channel', full_name='fewshot.configs.CNNConfig.num_channel', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_filters', full_name='fewshot.configs.CNNConfig.num_filters', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='strides', full_name='fewshot.configs.CNNConfig.strides', index=4,
number=5, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pool_fn', full_name='fewshot.configs.CNNConfig.pool_fn', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pool_strides', full_name='fewshot.configs.CNNConfig.pool_strides', index=6,
number=7, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conv_act_fn', full_name='fewshot.configs.CNNConfig.conv_act_fn', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=54,
serialized_end=215,
)
DESCRIPTOR.message_types_by_name['CNNConfig'] = _CNNCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CNNConfig = _reflection.GeneratedProtocolMessageType('CNNConfig', (_message.Message,), dict(
DESCRIPTOR = _CNNCONFIG,
__module__ = 'fewshot.configs.cnn_config_pb2'
# @@protoc_insertion_point(class_scope:fewshot.configs.CNNConfig)
))
_sym_db.RegisterMessage(CNNConfig)
# @@protoc_insertion_point(module_scope)
|
xknx/telegram/__init__.py | iligiddi/xknx | 179 | 12734565 | <gh_stars>100-1000
"""
Module for handling KNX primitves.
* KNX Addresses
* KNX Telegrams
"""
# flake8: noqa
from .address import GroupAddress, GroupAddressType, IndividualAddress
from .address_filter import AddressFilter
from .telegram import Telegram, TelegramDirection
__all__ = [
"AddressFilter",
"GroupAddress",
"GroupAddressType",
"IndividualAddress",
"Telegram",
"TelegramDirection",
]
|
resources/lib/ambilight_controller.py | metbosch/script.kodi.http.ambilight | 172 | 12734571 | import lights
from tools import xbmclog
class AmbilightController(lights.Controller):
def __init__(self, *args, **kwargs):
super(AmbilightController, self).__init__(*args, **kwargs)
def on_playback_start(self):
if self.settings.ambilight_start_dim_enable:
self.save_state_as_initial()
xbmclog('Kodi Hue: In AmbilightController.on_playback_start() '
'dimming ambilight group')
self.set_state(
bri=self.settings.ambilight_start_dim,
force_on=self.settings.force_light_on,
)
def on_playback_pause(self):
if self.settings.ambilight_start_dim_enable:
xbmclog('Kodi Hue: In AmbilightController.on_playback_pause() '
'undimming ambilight group')
if self.settings.ambilight_pause_bri_override:
bri = self.settings.ambilight_pause_bri
self.set_state(
bri=bri,
force_on=self.settings.force_light_on,
)
else:
self.restore_initial_state(
force_on=self.settings.force_light_on,
)
def on_playback_stop(self):
if self.settings.ambilight_start_dim_enable:
xbmclog('Kodi Hue: In AmbilightController.on_playback_stop() '
'undimming ambilight group')
if self.settings.ambilight_stop_bri_override:
self.set_state(
bri=self.settings.ambilight_stop_bri,
force_on=self.settings.force_light_on,
)
else:
self.restore_initial_state(
force_on=self.settings.force_light_on,
)
else:
self.restore_initial_state(
force_on=self.settings.force_light_on,
)
|
gobigger/agents/__init__.py | luanshaotong/GoBigger | 189 | 12734649 | from .base_agent import BaseAgent
from .bot_agent import BotAgent
|
integration_test/test_m_flag.py | robintw/recipy | 451 | 12734659 | """
Tests of 'python -m recipy' usage.
This script uses a Python script (run_numpy_no_recipy.py) about
which the following assumptions are made:
* Co-located with this test script, in the same directory.
* Expects two arguments via the command-line: an input file
name and an output file name.
* Reads the input file and creates the output file using a library
which recipy is configured to log.
"""
# Copyright (c) 2016 University of Edinburgh.
import os
import os.path
import shutil
import tempfile
from integration_test import helpers
from integration_test import recipy_environment as recipyenv
class TestMflag:
"""
Tests of 'python -m recipy' usage.
"""
SCRIPT_NAME = "run_numpy_no_recipy.py"
""" Test script assumed to be in same directory as this class. """
script = ""
""" Absolute path to test script. """
original_script = ""
""" Absolute path to original copy of test script. """
directory = ""
""" Absolute path to temporary directory for these tests. """
def setup_method(self, method):
"""
py.test setup function, creates test directory in $TEMP,
sets 'script' with path to SCRIPT_NAME and copies script from
'script' to 'original_script'.
:param method: Test method
:type method: function
"""
TestMflag.directory = tempfile.mkdtemp(TestMflag.__name__)
TestMflag.script = os.path.join(os.path.dirname(__file__),
TestMflag.SCRIPT_NAME)
TestMflag.original_script = TestMflag.script + ".orig"
shutil.copy(TestMflag.script, TestMflag.original_script)
def teardown_method(self, method):
"""
py.test teardown function, deletes test directory in $TEMP,
and moves 'original_script' to 'script'.
"""
if os.path.isdir(TestMflag.directory):
shutil.rmtree(TestMflag.directory)
os.remove(TestMflag.script)
os.rename(TestMflag.original_script, TestMflag.script)
def test_m_recipy(self):
"""
Running 'python -m recipy script' and the same script that
inclues 'import recipy' should give the same results in the
log (aside from their 'unique_id', 'diff', 'date',
'exit_date', 'command_args', 'inputs' and 'outputs').
"""
input_file = os.path.join(TestMflag.directory, "input.csv")
with open(input_file, "w") as csv_file:
csv_file.write("1,4,9,16\n")
output_file = os.path.join(TestMflag.directory, "output.csv")
exit_code, _ = helpers.execute_python(
["-m", "recipy", TestMflag.script,
input_file, output_file])
assert exit_code == 0, ("Unexpected exit code " + str(exit_code))
module_log, _ = helpers.get_log(recipyenv.get_recipydb())
helpers.enable_recipy(TestMflag.original_script, TestMflag.script)
exit_code, _ = helpers.execute_python(
["-m", "recipy", TestMflag.script,
input_file, output_file])
assert exit_code == 0, ("Unexpected exit code " + str(exit_code))
import_log, _ = helpers.get_log(recipyenv.get_recipydb())
for key in ["inputs", "outputs"]:
assert len(module_log[key]) == len(import_log[key]),\
("Expected same number of " + key + " files")
for index in range(0, len(module_log[key])):
[import_file, _] = module_log[key][index]
[module_file, _] = import_log[key][index]
assert os.path.basename(import_file) ==\
os.path.basename(module_file),\
"Expected local file names to be equal"
# Remove fields that are specific to a run.
for key in ["unique_id", "diff", "date", "exit_date",
"command_args", "inputs", "outputs"]:
if key in module_log:
del module_log[key]
del import_log[key]
assert module_log == import_log,\
("Expected " + str(module_log) + " to equal " +
str(import_log))
|
source/test_proxe_s2.py | jiyeonkim127/PSI | 138 | 12734681 | <gh_stars>100-1000
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys, os, glob
import json
import argparse
import numpy as np
import open3d as o3d
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import torch.optim as optim
from torch.optim import lr_scheduler
import smplx
from human_body_prior.tools.model_loader import load_vposer
import chamfer_pytorch.dist_chamfer as ext
from cvae import BodyParamParser, HumanCVAES2, GeometryTransformer
from batch_gen import BatchGeneratorTest
class TestOP:
def __init__(self, testconfig):
for key, val in testconfig.items():
setattr(self, key, val)
if not os.path.exists(self.ckpt_dir):
print('--[ERROR] checkpoints do not exist')
sys.exit()
if self.use_cont_rot:
n_dim_body=72+3
else:
n_dim_body=72
self.model_h_latentD = 256
self.model_h = HumanCVAES2(latentD_g=self.model_h_latentD,
latentD_l=self.model_h_latentD,
n_dim_body=n_dim_body,
n_dim_scene=self.model_h_latentD,
test=True)
### body mesh model
self.vposer, _ = load_vposer(self.vposer_ckpt_path, vp_model='snapshot')
self.body_mesh_model = smplx.create(self.human_model_path, model_type='smplx',
gender='neutral', ext='npz',
num_pca_comps=12,
create_global_orient=True,
create_body_pose=True,
create_betas=True,
create_left_hand_pose=True,
create_right_hand_pose=True,
create_expression=True,
create_jaw_pose=True,
create_leye_pose=True,
create_reye_pose=True,
create_transl=True,
batch_size=self.n_samples
)
def test(self, batch_gen):
self.model_h.eval()
self.model_h.to(self.device)
self.vposer.to(self.device)
self.body_mesh_model.to(self.device)
## load checkpoints
ckp_list = sorted(glob.glob(os.path.join(self.ckpt_dir,'epoch-*.ckp')),
key=os.path.getmtime)
ckp_path = ckp_list[-1]
checkpoint = torch.load(ckp_path)
print('[INFO] load checkpoints: ' + ckp_path)
self.model_h.load_state_dict(checkpoint['model_h_state_dict'])
## get a batch of data for testing
batch_gen.reset()
test_data = batch_gen.next_batch(batch_size=1)
depth_batch = test_data[0]
seg_batch = test_data[1]
max_d_batch = test_data[2]
cam_int_batch = test_data[3]
cam_ext_batch = test_data[4]
## pass data to network
xs = torch.cat([depth_batch, seg_batch],dim=1)
xs_n = xs.repeat(self.n_samples, 1,1,1)
noise_batch_g = torch.randn([self.n_samples, self.model_h_latentD], dtype=torch.float32,
device=self.device)
noise_batch_l = torch.randn([self.n_samples, self.model_h_latentD], dtype=torch.float32,
device=self.device)
if self.use_cont_rot:
xhnr_gen = self.model_h.sample(xs_n, noise_batch_g, noise_batch_l)
xhn_gen = GeometryTransformer.convert_to_3D_rot(xhnr_gen)
else:
xhnr_gen = self.model_h.sample(xs_n, noise_batch_g, noise_batch_l)
xh_gen = GeometryTransformer.recover_global_T(xhn_gen, cam_int_batch, max_d_batch)
body_param_list = BodyParamParser.body_params_encapsulate(xh_gen)
scene_name = os.path.abspath(self.scene_file_path).split("/")[-2].split("_")[0]
outdir = os.path.join(self.output_dir, scene_name)
if not os.path.exists(outdir):
os.makedirs(outdir)
print('[INFO] save results to: '+outdir)
for ii, body_param in enumerate(body_param_list):
body_param['cam_ext'] = cam_ext_batch.detach().cpu().numpy()
body_param['cam_int'] = cam_int_batch.detach().cpu().numpy()
outfilename = os.path.join(outdir, 'body_gen_{:06d}.pkl'.format(ii+900))
outfile = open(outfilename, 'wb')
pickle.dump(body_param, outfile)
outfile.close()
if __name__ == '__main__':
proxe_path = '/is/cluster/yzhang/PROXE'
test_file_list = ['MPH16_00157_01', 'N0SittingBooth_00162_01', 'MPH1Library_00034_01', 'N3OpenArea_00157_01']
for fileid in range(len(test_file_list)):
testconfig={
'human_model_path': '/is/ps2/yzhang/body_models/VPoser',
'scene_3d_path': os.path.join(proxe_path, 'scenes/'),
'vposer_ckpt_path': '/is/ps2/yzhang/body_models/VPoser/vposer_v1_0',
'device': torch.device("cuda" if torch.cuda.is_available() else "cpu"),
'ckpt_dir': 'checkpoints/checkpoints_virtualcams3_modelS2_batch32_epoch30_LRS0.0003_LRH0.0003_LossVposer0.001_LossKL0.1_LossContact0.001_LossCollision0.01',
'test_data_path': os.path.join(proxe_path, 'snapshot/'+test_file_list[fileid]),
'scene_file_path':os.path.join(proxe_path, 'snapshot/'+test_file_list[fileid]+'/rec_000000.mat'),
'n_samples': 300,
'use_cont_rot':True,
'output_dir': 'results_proxe_stage2_sceneloss/virtualcams'
}
batch_gen = BatchGeneratorTest(dataset_path=testconfig['test_data_path'],
device=testconfig['device'])
test_op = TestOP(testconfig)
test_op.test(batch_gen)
|
NAS-Models/lib/models/nas_net.py | zhangting2020/AutoDL | 155 | 12734691 | import paddle
import paddle.fluid as fluid
from .operations import OPS
def AuxiliaryHeadCIFAR(inputs, C, class_num):
print('AuxiliaryHeadCIFAR : inputs-shape : {:}'.format(inputs.shape))
temp = fluid.layers.relu(inputs)
temp = fluid.layers.pool2d(
temp, pool_size=5, pool_stride=3, pool_padding=0, pool_type='avg')
temp = fluid.layers.conv2d(
temp,
filter_size=1,
num_filters=128,
stride=1,
padding=0,
act=None,
bias_attr=False)
temp = fluid.layers.batch_norm(input=temp, act='relu', bias_attr=None)
temp = fluid.layers.conv2d(
temp,
filter_size=1,
num_filters=768,
stride=2,
padding=0,
act=None,
bias_attr=False)
temp = fluid.layers.batch_norm(input=temp, act='relu', bias_attr=None)
print('AuxiliaryHeadCIFAR : last---shape : {:}'.format(temp.shape))
predict = fluid.layers.fc(input=temp, size=class_num, act='softmax')
return predict
def InferCell(name, inputs_prev_prev, inputs_prev, genotype, C_prev_prev,
C_prev, C, reduction, reduction_prev):
print(
'[{:}] C_prev_prev={:} C_prev={:}, C={:}, reduction_prev={:}, reduction={:}'.
format(name, C_prev_prev, C_prev, C, reduction_prev, reduction))
print('inputs_prev_prev : {:}'.format(inputs_prev_prev.shape))
print('inputs_prev : {:}'.format(inputs_prev.shape))
inputs_prev_prev = OPS['skip_connect'](inputs_prev_prev, C_prev_prev, C, 2
if reduction_prev else 1)
inputs_prev = OPS['skip_connect'](inputs_prev, C_prev, C, 1)
print('inputs_prev_prev : {:}'.format(inputs_prev_prev.shape))
print('inputs_prev : {:}'.format(inputs_prev.shape))
if reduction: step_ops, concat = genotype.reduce, genotype.reduce_concat
else: step_ops, concat = genotype.normal, genotype.normal_concat
states = [inputs_prev_prev, inputs_prev]
for istep, operations in enumerate(step_ops):
op_a, op_b = operations
# the first operation
#print ('-->>[{:}/{:}] [{:}] + [{:}]'.format(istep, len(step_ops), op_a, op_b))
stride = 2 if reduction and op_a[1] < 2 else 1
tensor1 = OPS[op_a[0]](states[op_a[1]], C, C, stride)
stride = 2 if reduction and op_b[1] < 2 else 1
tensor2 = OPS[op_b[0]](states[op_b[1]], C, C, stride)
state = fluid.layers.elementwise_add(x=tensor1, y=tensor2, act=None)
assert tensor1.shape == tensor2.shape, 'invalid shape {:} vs. {:}'.format(
tensor1.shape, tensor2.shape)
print('-->>[{:}/{:}] tensor={:} from {:} + {:}'.format(
istep, len(step_ops), state.shape, tensor1.shape, tensor2.shape))
states.append(state)
states_to_cat = [states[x] for x in concat]
outputs = fluid.layers.concat(states_to_cat, axis=1)
print('-->> output-shape : {:} from concat={:}'.format(outputs.shape,
concat))
return outputs
# NASCifarNet(inputs, 36, 6, 3, 10, 'xxx', True)
def NASCifarNet(ipt, C, N, stem_multiplier, class_num, genotype, auxiliary):
# cifar head module
C_curr = stem_multiplier * C
stem = fluid.layers.conv2d(
ipt,
filter_size=3,
num_filters=C_curr,
stride=1,
padding=1,
act=None,
bias_attr=False)
stem = fluid.layers.batch_norm(input=stem, act=None, bias_attr=None)
print('stem-shape : {:}'.format(stem.shape))
# N + 1 + N + 1 + N cells
layer_channels = [C] * N + [C * 2] + [C * 2] * N + [C * 4] + [C * 4] * N
layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
reduction_prev = False
auxiliary_pred = None
cell_results = [stem, stem]
for index, (C_curr,
reduction) in enumerate(zip(layer_channels, layer_reductions)):
xstr = '{:02d}/{:02d}'.format(index, len(layer_channels))
cell_result = InferCell(xstr, cell_results[-2], cell_results[-1],
genotype, C_prev_prev, C_prev, C_curr,
reduction, reduction_prev)
reduction_prev = reduction
C_prev_prev, C_prev = C_prev, cell_result.shape[1]
cell_results.append(cell_result)
if auxiliary and reduction and C_curr == C * 4:
auxiliary_pred = AuxiliaryHeadCIFAR(cell_result, C_prev, class_num)
global_P = fluid.layers.pool2d(
input=cell_results[-1], pool_size=8, pool_type='avg', pool_stride=1)
predicts = fluid.layers.fc(input=global_P, size=class_num, act='softmax')
print('predict-shape : {:}'.format(predicts.shape))
if auxiliary_pred is None:
return predicts
else:
return [predicts, auxiliary_pred]
|
tests/test_statistics.py | MORE-EU/matrixprofile | 262 | 12734720 | <reponame>MORE-EU/matrixprofile<filename>tests/test_statistics.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import os
import pytest
import numpy as np
from matrixprofile.algorithms.statistics import statistics
def test_invalid_ts_not_1d():
ts = np.array([[1, 1], [1, 1]])
w = 2
with pytest.raises(ValueError) as excinfo:
statistics(ts, w)
assert 'The time series must be 1D' in str(excinfo.value)
def test_invalid_ts_not_array():
ts = None
w = 2
with pytest.raises(ValueError) as excinfo:
statistics(ts, w)
assert 'ts must be array like' in str(excinfo.value)
def test_invalid_window_size_not_int():
ts = np.arange(10)
w = 's'
with pytest.raises(ValueError) as excinfo:
statistics(ts, w)
assert 'Expecting int for window_size' in str(excinfo.value)
def test_invalid_window_size_too_large():
ts = np.arange(10)
w = 11
with pytest.raises(ValueError) as excinfo:
statistics(ts, w)
assert 'Window size cannot be greater than len(ts)' in str(excinfo.value)
def test_invalid_window_size_too_small():
ts = np.arange(10)
w = 2
with pytest.raises(ValueError) as excinfo:
statistics(ts, w)
assert 'Window size cannot be less than 3' in str(excinfo.value)
def test_valid():
ts = np.array([1, 3, 2, 4, 5, 1, 1, 1, 2, 4, 9, 7])
w = 4
ts_stats = statistics(ts, w)
assert(ts_stats['min'] == 1)
assert(ts_stats['max'] == 9)
np.testing.assert_almost_equal(ts_stats['mean'], 3.3333333)
np.testing.assert_almost_equal(ts_stats['std'], 2.494438257)
assert(ts_stats['median'] == 2.5)
np.testing.assert_almost_equal(ts_stats['moving_min'], np.array([1, 2, 1, 1, 1, 1, 1, 1, 2]))
np.testing.assert_almost_equal(ts_stats['moving_max'], np.array([4, 5, 5, 5, 5, 2, 4, 9, 9]))
np.testing.assert_almost_equal(ts_stats['moving_mean'], np.array([2.5, 3.5, 3.0, 2.75, 2.0, 1.25, 2.0, 4.0, 5.5]))
np.testing.assert_almost_equal(ts_stats['moving_std'], np.array([1.11803399, 1.11803399, 1.58113883, 1.78535711, 1.73205081, 0.4330127, 1.22474487, 3.082207, 2.6925824]))
np.testing.assert_almost_equal(ts_stats['moving_median'], np.array([2.5, 3.5, 3.0, 2.5, 1.0, 1.0, 1.5, 3.0, 5.5]))
np.testing.assert_equal(ts_stats['ts'], ts)
assert(ts_stats['window_size'] == w)
assert(ts_stats['class'] == 'Statistics')
|
combinations/solution.py | mahimadubey/leetcode-python | 528 | 12734723 | """
Given two integers n and k, return all possible combinations of k numbers out
of 1 ... n.
For example,
If n = 4 and k = 2, a solution is:
[
[2,4],
[3,4],
[2,3],
[1,2],
[1,3],
[1,4],
]
"""
class Solution(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
a = range(1, n + 1)
return self.combine_aux(a, k)
def combine_aux(self, a, k):
if k == 0:
return [[]]
else:
res = []
for i, e in enumerate(a):
rest_comb = self.combine_aux(a[i + 1:], k - 1)
for comb in rest_comb:
comb.insert(0, e)
res += rest_comb
return res
|
tests/syntax/unmatched_closing_bracket_2.py | matan-h/friendly | 287 | 12734771 | <filename>tests/syntax/unmatched_closing_bracket_2.py
"""Should raise SyntaxError: invalid syntax"""
x = (1,
2,
3]
|
bumpversion/__main__.py | jaap3/bump2version | 1,289 | 12734803 | <filename>bumpversion/__main__.py<gh_stars>1000+
__import__('bumpversion').main()
|
release/scripts/presets/framerate/50.py | rbabari/blender | 365 | 12734815 | import bpy
bpy.context.scene.render.fps = 50
bpy.context.scene.render.fps_base = 1
|
ci/bootstrap_dockerignore.py | decentral1se/purerpc | 143 | 12734843 | #! /usr/bin/env python
import os
def main():
if os.path.exists(".dockerignore"):
print(".dockerignore already exists, remove it to proceed")
exit(-1)
with open(".gitignore", "r") as fin, open(".dockerignore", "w") as fout:
fout.write("# This file was automatically generated by ./ci/bootstrap_dockerignore.py\n")
fout.write("# based on the contents of .gitignore\n\n")
for line in fin:
if line[0] in "#!/\n":
fout.write(line)
else:
fout.write("**/" + line)
if __name__ == "__main__":
main()
|
src/python/e2e-test-runner/e2e_test_runner/test_web_auth.py | inickles/grapl | 313 | 12734863 | """
TODO (wimax July 2020): I don't see anything in here that indicates that
screams "e2e test", this certainly seems like more of an integration test.
There's nothing here that does anything cross-service.
Perhaps it's just "does it work in AWS?"
"""
from grapl_tests_common.clients.grapl_web_client import GraplWebClient
def test_real_user_fake_password() -> None:
# Exercises the PasswordVerification case in grapl-web-ui login.rs
resp = GraplWebClient().real_user_fake_password()
assert resp.status_code == 401
def test_nonexistent_user() -> None:
# Exercises the UserRecordNotFound case in grapl-web-ui login.rs
resp = GraplWebClient().nonexistent_user()
assert resp.status_code == 401
def test_check__empty_creds() -> None:
resp = GraplWebClient().empty_creds()
assert resp.status_code == 500
# TODO: https://github.com/grapl-security/issue-tracker/issues/686
# Add a `test_no_content_type()` (it currently 200s for some reason)
|
scripts/data/kitti_extract_ground_plane.py | wuzzh/master_thesis_code | 206 | 12734928 | """
Script for extracting the ground plane from the KITTI dataset.
We need to determine the ground plane position and orientation in order to be able to reconstruct
points on it, which we are trying to detect.
We will collect all the points on the ground plane from the dataset and then fit a plane to them
with RANSAC.
----------------------------------------------------------------------------------------------------
python kitti_extract_ground_plane.py path_labels
----------------------------------------------------------------------------------------------------
"""
__date__ = '04/13/2017'
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import argparse
import os
import numpy as np
import random
# import matplotlib
# matplotlib.use('Agg') # Prevents from using X interface for plotting
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from shared.geometry import R3x3_y, t3x1, Rt4x4
####################################################################################################
# DEFINITIONS #
####################################################################################################
# Parameter for RANSAC
# Distance from the plane (in meters), which is considered as an inlier region
INLIER_TRHESHOLD = 1.0
# Number of estimation iterations carried out by RANSAC
RANSAC_ITERS = 10000
####################################################################################################
# FUNCTIONS #
####################################################################################################
def plane_3p(p1, p2, p3):
"""
Computes the equation of a plane passing through the 3 given points.
Input:
p1, p2, p3: 3x1 np.matrix coordinates of points in the plane
Returns:
[a, b, c, d] coefficients as a 1x4 np.matrix
"""
l1 = p2 - p1
l2 = p3 - p1
normal = np.cross(l1, l2, axis=0)
d = - (normal[0,0]*p1[0,0] + normal[1,0]*p1[1,0] + normal[2,0]*p1[2,0])
return np.asmatrix([normal[0,0], normal[1,0], normal[2,0], d])
def show_X_and_gp(gp_X_4xn, gp_1x4):
"""
Show a 3D plot of the estimated ground plane.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect('equal')
ax.scatter(np.array(gp_X_4xn[2,0:1000]), np.array(gp_X_4xn[0,0:1000]), np.array(-gp_X_4xn[1,0:1000]), color='red')
X = np.arange(-20, 20, 1)
Y = np.arange(-1, 10, 1)
X, Y = np.meshgrid(X, Y)
Z = - (gp_1x4[0,0]*X + gp_1x4[0,1]*Y + gp_1x4[0,3]) / gp_1x4[0,2]
ax.plot_surface(Z, X, -Y, linewidth=0, alpha=0.5, antialiased=True)
# Bounding box of the car
ax.plot([3,3,3,3,3], [1.5, 1.5, -1.5, -1.5, 1.5], [0,-1.9,-1.9,0,0], color='green')
ax.plot([-3,-3,-3,-3,-3], [1.5, 1.5, -1.5, -1.5, 1.5], [0,-1.9,-1.9,0,0], color='red')
ax.plot([3, -3], [1.5, 1.5], [0,0], color='blue')
ax.plot([3, -3], [1.5, 1.5], [-1.9,-1.9], color='blue')
ax.plot([3, -3], [-1.5, -1.5], [0,0], color='blue')
ax.plot([3, -3], [-1.5, -1.5], [-1.9,-1.9], color='blue')
ax.set_xlim(-100, 100)
ax.set_ylim(-100, 100)
ax.set_zlim(-100, 100)
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
plt.show()
####################################################################################################
# CLASSES #
####################################################################################################
class GroundPlaneEstimator(object):
"""
Takes care of the estimation of the ground plane position in the KITTI dataset.
"""
def __init__(self, path_labels):
"""
Input:
path_labels: Path to the "label_2" folder of the KITTI dataset
"""
super(GroundPlaneEstimator, self).__init__()
self.path_labels = path_labels
self.gp_points = []
def run_estimation(self):
"""
Runs the whole process of estimating the ground plane.
"""
print('-- ESTIMATING GROUND PLANE POSITION')
# Read label files and get all ground plane points
print('-- Reading label files')
self._read_label_files()
print('-- Label files contain ' + str(len(self.gp_points)) + ' points')
# Create a matrix from all the points for easier computation
self.gp_X_4xn = np.asmatrix(np.ones((4, len(self.gp_points))))
for i in xrange(len(self.gp_points)):
self.gp_X_4xn[0:3,i] = self.gp_points[i]
# plt.scatter(self.gp_X_4xn[2,:], self.gp_X_4xn[1,:])
# plt.show()
# Run RANSAC on those points
print('-- Running RANSAC plane estimation')
self._ransac_plane()
def _read_label_files(self):
"""
Reads all label files and extract the points on the ground plane.
"""
filenames = [f for f in os.listdir(self.path_labels)
if os.path.isfile(os.path.join(self.path_labels, f))]
if len(filenames) != 7481:
print('Wrong number (%d) of files in the KITTI dataset! Should be 7481.'%(len(filenames)))
exit(1)
# Read each label file
# i = 0
for f in filenames:
path_label_file = os.path.join(self.path_labels, f)
self._process_label_file(path_label_file)
# i += 1
# if i == 1000: break
def _process_label_file(self, path_label_file):
"""
Processes one label file.
Input:
path_label_file: Path to the TXT label file in KITTI format to be processed.
"""
with open(path_label_file, 'r') as infile_label:
# Read the objects
for line in infile_label:
line = line.rstrip('\n')
data = line.split(' ')
# First element of the data is the label. We don't want to process 'Misc' and
# 'DontCare' labels
if data[0] == 'Misc' or data[0] == 'DontCare': continue
# Extract the points of this object on the ground plane
self._extract_ground_plane_pts(data)
def _extract_ground_plane_pts(self, data):
"""
Extract 3D points from the object bounding box, which lie on the ground plane.
Input:
data: One split line of the label file (line.split(' '))
"""
# Object dimensions
h = float(data[8])
w = float(data[9])
l = float(data[10])
# Position of the center point on the ground plane (xz plane)
cx = float(data[11])
cy = float(data[12])
cz = float(data[13])
# Rotation of the object around y
ry = float(data[14])
# 3D box corners on the ground plane. Careful, the coordinate system of the car is that
# x points forward, not z! (It is rotated by 90deg with respect to the camera one)
# fbr, rbr, fbl, rbl
X = np.asmatrix([[l/2, -l/2, l/2, -l/2],
[0, 0, 0, 0 ],
[-w/2, -w/2, w/2, w/2 ],
[1, 1, 1, 1 ]])
# Rotate the 3D box around y axis and translate it to the correct position in the cam. frame
X = Rt4x4(R3x3_y(ry), t3x1(cx, cy, cz)) * X
self.gp_points.append(X[0:3,0])
self.gp_points.append(X[0:3,1])
self.gp_points.append(X[0:3,2])
self.gp_points.append(X[0:3,3])
def _ransac_plane(self):
"""
Finds "optimal" ground plane position given the points.
Returns:
[a, b, c, d] plane equation ax+by+cz+d=0 coefficients as a 1x4 np.matrix
"""
num_points = len(self.gp_points)
# Variables for storing minimum distance sum from the estimated plane
dist2_sum_min = 99999999999999999
gp_1x4_max = np.asmatrix(np.zeros((1,4)))
for i in range(RANSAC_ITERS):
rp = random.sample(range(0, num_points), 3)
# Compute the equation of the ground plane
gp_1x4 = plane_3p(self.gp_points[rp[0]], self.gp_points[rp[1]], self.gp_points[rp[2]])
# Check that the plane gives small errors on the original points - when we have some
# close to singular situation we have to be careful
if gp_1x4 * self.gp_X_4xn[:,rp[0]] > 0.000000001 or \
gp_1x4 * self.gp_X_4xn[:,rp[1]] > 0.000000001 or \
gp_1x4 * self.gp_X_4xn[:,rp[2]] > 0.000000001:
print('WARNING: Solution not precise, skipping...')
continue
# Compute the sum of distances from this plane
distances2 = np.power(gp_1x4 * self.gp_X_4xn, 2)
dist2_sum = np.sum(distances2, axis=1)
if dist2_sum[0,0] < dist2_sum_min:
print('New min distance sum: ' + str(dist2_sum[0,0]))
dist2_sum_min = dist2_sum[0,0]
gp_1x4_max = gp_1x4
print('-- RANSAC FINISHED')
print('Estimated ground plane: ' + str(gp_1x4_max))
print('Sum of distances: ' + str(dist2_sum_min) + ', ' + str(dist2_sum_min/num_points) + ' per point')
# Show a plot of the plane
show_X_and_gp(self.gp_X_4xn, gp_1x4_max)
return gp_1x4_max
####################################################################################################
# MAIN #
####################################################################################################
def parse_arguments():
"""
Parse input options of the script.
"""
parser = argparse.ArgumentParser(description='Convert KITTI label files into BBTXT.')
parser.add_argument('path_labels', metavar='path_labels', type=str,
help='Path to the "label_2" folder of the KITTI dataset')
args = parser.parse_args()
if not os.path.exists(args.path_labels):
print('Input path "%s" does not exist!'%(args.path_labels))
parser.print_help()
exit(1)
return args
def main():
args = parse_arguments()
gpe = GroundPlaneEstimator(args.path_labels)
gpe.run_estimation()
if __name__ == '__main__':
main()
|
corehq/ex-submodules/fluff/exceptions.py | dimagilg/commcare-hq | 471 | 12734932 | <reponame>dimagilg/commcare-hq
class EmitterTypeError(Exception):
pass
class EmitterValidationError(Exception):
pass
|
.modules/.CMSeeK/cmseekdb/header.py | termux-one/EasY_HaCk | 1,103 | 12734975 | <gh_stars>1000+
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# This is a part of CMSeeK, check the LICENSE file for more information
# Copyright (c) 2018 Tuhinshubhra
# This file contains all the methods of detecting cms via http Headers
# Version: 1.0.0
# Return a list with ['1'/'0','ID of CMS'/'na'] 1 = detected 0 = not detected
import re
def check(h):
if h == "":
return ['0', 'na']
else:
hstring = h
# harray = h.split("\n") # will use whenever necessary
#### START DETECTION FROM HERE
if '/wp-json/' in hstring:
## WordPress
return ['1','wp']
elif 'X-Drupal-' in hstring or '19 Nov 1978 05' in hstring:
## Drupal [the date is interesting isn't it? just google for it ;) ]
return ['1', 'dru']
elif 'Expires: Wed, 17 Aug 2005 00:00:00 GMT' in hstring:
## This is the only weird but common header i noticed in joomla Sites
return ['1', 'joom']
elif 'X-Wix-' in hstring:
return ['1', 'wix']
elif 'Set-Cookie: ushahidi' in hstring:
return ['1', 'ushahidi']
elif 'X-Generated-By: UMI.CMS' in hstring:
return ['1', 'umi']
elif 'x-generator: Sulu' in hstring:
return ['1', 'sulu']
elif 'X-Powered-CMS: Subrion CMS' in hstring:
return ['1', 'subcms']
elif 'Set-Cookie: SQ_SYSTEM_SESSION' in hstring or 'squizedge.net' in hstring:
return ['1', 'sqm']
elif 'spincms' in hstring:
return ['1', 'spin']
elif 'solodev_session' in hstring:
return ['1', 'sdev']
elif 'SC_ANALYTICS_GLOBAL_COOKIE' in hstring:
return ['1', 'score']
elif 'X-ServedBy: simplebo' in hstring or '_simplebo_tool_session' in hstring:
return ['1', 'spb']
elif 'X-Blog: Serendipity' in hstring or 'Set-Cookie: serendipity[' in hstring or 'Set-Cookie: s9y_' in hstring:
return ['1', 'spity']
elif 'Set-Cookie: SEAMLESS_IDENTIFIER' in hstring:
return ['1', 'slcms']
elif 'X-Powered-By: Roadiz CMS' in hstring:
return ['1', 'roadz']
elif 'X-Powered-By: pimcore' in hstring:
return ['1', 'pcore']
elif 'x-powered-by: PencilBlue' in hstring:
return ['1', 'pblue']
elif 'x-powered-by: Ophal' in hstring:
return ['1', 'ophal']
elif 'Server: OpenCms' in hstring:
return ['1', 'ocms']
elif 'X-Odoo-' in hstring:
return ['1', 'odoo']
elif 'X-SharePointHealthScore' in hstring or 'SPIisLatency' in hstring or 'SPRequestGuid' in hstring or 'MicrosoftSharePointTeamServices' in hstring or 'SPRequestDuration' in hstring:
return ['1', 'share']
elif 'october_session' in hstring:
return ['1', 'octcms']
elif 'Generator: Mura CMS' in hstring:
return ['1', 'mura']
elif 'X-Powered-By: MODX' in hstring:
return ['1', 'modx']
elif 'X-KoobooCMS-Version' in hstring:
return ['1', 'kbcms']
elif 'X-Jimdo-' in hstring:
return ['1', 'jimdo']
elif 'Set-Cookie: ndxz_' in hstring:
return ['1', 'ibit']
elif 'X-Jcms-Ajax-Id' in hstring:
return ['1', 'jcms']
elif 'Set-Cookie: grav-site-' in hstring:
return ['1', 'grav']
elif 'X-Powered-By: FlexCMP' in hstring or 'X-Flex-Tag:' in hstring or 'X-Flex-Lang:' in hstring or 'X-Flex-Lastmod:' in hstring or 'X-Flex-Community:' in hstring or 'X-Flex-Evstart' in hstring:
return ['1', 'flex']
elif 'X-Powered-By: eZ Publish' in hstring or 'Set-Cookie: eZSESSID' in hstring:
return ['1', 'ezpu']
elif 'Set-Cookie: exp_tracker' in hstring or 'Set-Cookie: exp_last_activity' in hstring or 'Set-Cookie: exp_last_visit' in hstring or 'Set-Cookie: exp_csrf_token=' in hstring:
return ['1', 'exen']
elif 'X-Powered-By: e107' in hstring or 'Set-Cookie: SESSE107COOKIE' in hstring:
return ['1', 'e107']
elif 'Set-Cookie: dnn_IsMobile' in hstring or 'DNNOutputCache' in hstring or 'DotNetNuke' in hstring:
return ['1', 'dnn']
elif 'X-Powered-By: CMS Danneo' in hstring:
return ['1', 'dncms']
elif 'X-Powered-By: Craft CMS' in hstring or 'Set-Cookie: CraftSessionId' in hstring:
return ['1', 'craft']
elif 'X-Powered-By: Dragonfly CMS' in hstring:
return ['1', 'dragon']
elif 'X-Generator: Orchard' in hstring:
return ['1', 'orchd']
elif 'X-Powered-By: ContentBox' in hstring or 'Set-Cookie: LIGHTBOXSESSION' in hstring:
return ['1', 'cbox']
elif 'Set-Cookie: CONCRETE5' in hstring:
return ['1', 'con5']
elif 'X-Discourse-Route' in hstring:
return ['1', 'dscrs']
elif 'Set-Cookie: flarum_session=' in hstring:
return ['1', 'flarum']
elif 'IPSSessionFront' in hstring or 'ipbWWLmodpids' in hstring or 'ipbWWLsession_id' in hstring:
return ['1', 'ipb']
elif 'X-Powered-By: NodeBB' in hstring:
return ['1', 'nodebb']
elif 'X-Garden-Version: Vanilla' in hstring or 'Maybe you should be reading this instead: https://www.vanillaforums.com/en/careers' in hstring:
return ['1', 'vanilla']
elif 'Set-Cookie: xf_session=' in hstring or 'Set-Cookie: xf_csrf=' in hstring:
return ['1', 'xf']
elif '[aefsid]' in hstring:
return ['1', 'aef']
elif 'Set-Cookie: fud_session_' in hstring:
return ['1', 'fudf']
elif 'Set-Cookie: phorum_session' in hstring:
return ['1', 'phorum']
elif 'Set-Cookie: yazdLastVisited=' in hstring:
return ['1', 'yazd']
elif 'Set-Cookie: ubbt_' in hstring:
return ['1', 'ubbt']
####################################################
# REGEX DETECTIONS STARTS FROM HERE #
####################################################
ybb_regex = re.search(r'Set-Cookie: (YaBBusername=|YaBBpassword=|YaBBSession|Y2User-(\d.*?)|Y2Pass-(\d.*?)|Y2Sess-(\d.*?))=', hstring)
if ybb_regex != None:
return ['1', 'yabb']
xmb_regex = re.search(r'Set-Cookie: xmblv(a|b)=(\d.*?)\n',hstring)
if xmb_regex != None:
return ['1', 'xmb']
wind_regex = re.search(r'Set-Cookie: [a-zA-Z0-9]{5}_(lastpos|lastvisit)=', hstring)
if wind_regex != None:
return ['1', 'pwind']
myb_regex = re.search(r'Set-Cookie: mybb\[(.*?)\]=', hstring)
if myb_regex != None:
return ['1', 'mybb']
bb_regex = re.search(r'Set-Cookie: wcf(.*?)_cookieHash=', hstring)
if bb_regex != None:
return ['1', 'bboard']
epis_regex = re.search(r'X-XRDS-Location: (.*?)EPiServerCommunity', hstring)
if epis_regex != None:
return ['1', 'epis']
lep_regex = re.search(r'lep(.*?)sessionid', hstring)
if lep_regex != None:
return ['1', 'lepton']
pb_regex = re.search(r'Set-Cookie: phpbb(.*?)=', hstring)
if pb_regex != None:
return ['1', 'phpbb']
ses_regex = re.search(r'Set-Cookie: ses(\d+)=', hstring)
if ses_regex != None:
return ['1', 'impage']
else:
return ['0', 'na']
|
src/encoded/tests/test_upgrade_atac_alignment_enrichment_quality_metric.py | procha2/encoded | 102 | 12735006 | def test_upgrade_atac_alignment_enrichment_quality_metric_1_2(
upgrader, atac_alignment_enrichment_quality_metric_1
):
value = upgrader.upgrade(
'atac_alignment_enrichment_quality_metric',
atac_alignment_enrichment_quality_metric_1,
current_version='1',
target_version='2',
)
assert value['schema_version'] == '2'
assert 'fri_blacklist' not in value
assert value['fri_exclusion_list'] == 0.0013046877081284722
|
examples/i18nurls/__init__.py | adityavs/werkzeug | 4,200 | 12735014 | from .application import Application as make_app
|
2020/06/24/How to Create a Celery Task Progress Bar in Django/djangoprogressbar/progress/example/tasks.py | kenjitagawa/youtube_video_code | 492 | 12735028 | from celery import shared_task
from celery_progress.backend import ProgressRecorder
from time import sleep
@shared_task(bind=True)
def go_to_sleep(self, duration):
progress_recorder = ProgressRecorder(self)
for i in range(100):
sleep(duration)
progress_recorder.set_progress(i + 1, 100, f'On iteration {i}')
return 'Done' |
tests/test_day_periods.py | PLSV/babel | 5,079 | 12735031 | # -- encoding: UTF-8 --
from datetime import time
import babel.dates as dates
import pytest
@pytest.mark.parametrize("locale, time, expected_period_id", [
("de", time(7, 42), "morning1"), # (from, before)
("de", time(3, 11), "night1"), # (after, before)
("fi", time(0), "midnight"), # (at)
("en_US", time(12), "noon"), # (at)
("agq", time(10), "am"), # no periods defined
("agq", time(22), "pm"), # no periods defined
("am", time(14), "afternoon1"), # (before, after)
])
def test_day_period_rules(locale, time, expected_period_id):
assert dates.get_period_id(time, locale=locale) == expected_period_id
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.