content
stringlengths 5
1.05M
|
---|
from utils.operator_identifier import *
def parse_decomposition(qdmr):
"""Parses the decomposition into an ordered list of steps
Parameters
----------
qdmr : str
String representation of the QDMR
Returns
-------
list
returns ordered list of qdmr steps
"""
# parse commas as separate tokens
qdmr = qdmr.replace(",", " , ")
crude_steps = qdmr.split(DELIMITER)
steps = []
for i in range(len(crude_steps)):
step = crude_steps[i]
tokens = step.split()
step = ""
# remove 'return' prefix
for tok in tokens[1:]:
step += tok.strip() + " "
step = step.strip()
steps += [step]
return steps
class QDMRStep:
def __init__(self, step_text, operator, arguments):
self.step = step_text
self.operator = operator
self.arguments = arguments
def __str__(self):
ARG_SEP = ' @@ARG_SEP@@ '
OP_SEP = ' @@OP_SEP@@ '
# print(self.arguments)
arguments = ARG_SEP.join(self.arguments)
# return "%s%a" % (self.operator.upper(), self.arguments)
return OP_SEP.join([self.operator, arguments])
class StepIdentifier(object):
def __init__(self):
self.identifiers = {"select": IdentifyOperatorSelect(),
"filter": IdentifyOperatorFilter(),
"project": IdentifyOperatorProject(),
"aggregate": IdentifyOperatorAggregate(),
"group": IdentifyOperatorGroup(),
"superlative": IdentifyOperatorSuperlative(),
"comparative": IdentifyOperatorComparative(),
"union": IdentifyOperatorUnion(),
"intersection": IdentifyOperatorIntersect(),
"discard": IdentifyOperatorDiscard(),
"sort": IdentifyOperatorSort(),
"boolean": IdentifyOperatorBoolean(),
"arithmetic": IdentifyOperatorArithmetic(),
"comparison": IdentifyOperatorComparison()}
self.operator = None
def step_type(self, step_text):
potential_operators = set()
for op in self.identifiers:
identifier = self.identifiers[op]
if identifier.identify_op(step_text):
potential_operators.add(op)
# no matching operator found
if len(potential_operators) == 0:
return None
operators = potential_operators.copy()
# duplicate candidates
if len(operators) > 1:
# avoid project duplicity with aggregate
if "project" in operators:
operators.remove("project")
# avoid filter duplcitiy with comparative, superlative, sort, discard
if "filter" in operators:
operators.remove("filter")
# return boolean (instead of intersect)
if "boolean" in operators:
operators = {"boolean"}
# return intersect (instead of filter)
if "intersect" in operators:
operators = {"intersect"}
# return superlative (instead of comparative)
if "superlative" in operators:
operators = {"superlative"}
# return group (instead of arithmetic)
if "group" in operators:
operators = {"group"}
# return comparative (instead of discard)
if "comparative" in operators:
operators = {"comparative"}
# return intersection (instead of comparison)
if "intersection" in operators:
operators = {"intersection"}
assert (len(operators) == 1)
operator = list(operators)[0]
self.operator = operator
return operator
def step_args(self, step_text):
self.operator = self.step_type(step_text)
identifier = self.identifiers[self.operator]
args = identifier.extract_args(step_text)
return args
def identify(self, step_text):
self.operator = self.step_type(step_text)
args = self.step_args(step_text)
return QDMRStep(step_text, self.operator, args)
class QDMRProgramBuilder(object):
def __init__(self, qdmr_text):
self.qdmr_text = qdmr_text
self.steps = None
self.operators = None
self.program = None
def build(self):
try:
self.get_operators()
self.build_steps()
except:
# print("Unable to identify all steps: %s" % self.qdmr_text)
pass
return True
def build_steps(self):
self.steps = []
steps = parse_decomposition(self.qdmr_text)
step_identifier = StepIdentifier()
for step_text in steps:
try:
step = step_identifier.identify(step_text)
except:
# print("Unable to identify step: %s" % step_text)
step = None
self.steps += [step]
return self.steps
def get_operators(self):
self.operators = []
steps = parse_decomposition(self.qdmr_text)
step_identifier = StepIdentifier()
for step_text in steps:
try:
op = step_identifier.step_type(step_text)
except:
print("Unable to identify operator: %s" % step_text)
op = None
self.operators += [op]
return self.operators
def build_program(self):
raise NotImplementedError
return True
def __str__(self):
SEP = ' @@SEP@@ '
return SEP.join([str(step) for step in self.steps])
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import CdnManagementClientConfiguration
from .operations import ProfilesOperations
from .operations import EndpointsOperations
from .operations import OriginsOperations
from .operations import OriginGroupsOperations
from .operations import CustomDomainsOperations
from .operations import CdnManagementClientOperationsMixin
from .operations import ResourceUsageOperations
from .operations import Operations
from .operations import EdgeNodesOperations
from .operations import PoliciesOperations
from .operations import ManagedRuleSetsOperations
from .. import models
class CdnManagementClient(CdnManagementClientOperationsMixin):
"""Cdn Management Client.
:ivar profiles: ProfilesOperations operations
:vartype profiles: azure.mgmt.cdn.aio.operations.ProfilesOperations
:ivar endpoints: EndpointsOperations operations
:vartype endpoints: azure.mgmt.cdn.aio.operations.EndpointsOperations
:ivar origins: OriginsOperations operations
:vartype origins: azure.mgmt.cdn.aio.operations.OriginsOperations
:ivar origin_groups: OriginGroupsOperations operations
:vartype origin_groups: azure.mgmt.cdn.aio.operations.OriginGroupsOperations
:ivar custom_domains: CustomDomainsOperations operations
:vartype custom_domains: azure.mgmt.cdn.aio.operations.CustomDomainsOperations
:ivar resource_usage: ResourceUsageOperations operations
:vartype resource_usage: azure.mgmt.cdn.aio.operations.ResourceUsageOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.cdn.aio.operations.Operations
:ivar edge_nodes: EdgeNodesOperations operations
:vartype edge_nodes: azure.mgmt.cdn.aio.operations.EdgeNodesOperations
:ivar policies: PoliciesOperations operations
:vartype policies: azure.mgmt.cdn.aio.operations.PoliciesOperations
:ivar managed_rule_sets: ManagedRuleSetsOperations operations
:vartype managed_rule_sets: azure.mgmt.cdn.aio.operations.ManagedRuleSetsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Azure Subscription ID.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = CdnManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.profiles = ProfilesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.endpoints = EndpointsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.origins = OriginsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.origin_groups = OriginGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.custom_domains = CustomDomainsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.resource_usage = ResourceUsageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.edge_nodes = EdgeNodesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.policies = PoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.managed_rule_sets = ManagedRuleSetsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "CdnManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
import warnings
warnings.filterwarnings('ignore')
import torch
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
# from dataset import DataBuilder, Dataset, Vocab, load_embedding
from dataset_daily import DataBuilder, Dataset, Vocab, load_embedding
from model import Seq2Seq
class Trainer:
def __init__(self, model, device, train_dataloader, n_epoch, optim, tb_dir,
case_interval, vocab,
valid_dataloader=None, test_dataloader=None):
self.device = device
self.model = model.to(device)
self.train_dataloader = train_dataloader
self.valid_dataloader = valid_dataloader
self.test_dataloader = test_dataloader
self.optim = optim
self.case_interval = case_interval
self.n_epoch = n_epoch
self.global_t = None
self.vocab = vocab
self.writer = SummaryWriter(tb_dir, flush_secs=1)
def loss_fn(self, input, target):
input = input.reshape(-1, input.size(-1))
target = target.reshape(-1)
loss = torch.nn.functional.cross_entropy(
input=input, target=target,
ignore_index=0, reduction='mean')
return loss
def batch2sents(self, batch):
sents = []
for data in batch.tolist():
for _ in range(data.count(self.vocab.pad_value)):
data.remove(self.vocab.pad_value)
if self.vocab.eos_value in data:
tail = len(data) - data[::-1].index(self.vocab.eos_value)
data = data[:tail]
sent = [self.vocab[x] for x in data]
sents.append(' '.join(sent))
return sents
def show_case(self, x, y, y_preds):
post = self.batch2sents(x.t())[1]
targ = self.batch2sents(y.t())[1]
pred = y_preds.argmax(dim=2)
pred = self.batch2sents(pred.t())[1]
texts = [
f'[Post] {post}',
f'[Targ] {targ}',
f'[Pred] {pred}'
]
texts = '\n\n'.join(texts)
self.writer.add_text('case', texts, self.global_t)
def train_batch(self, batch):
x, y = batch[0].to(self.device), batch[1].to(self.device)
y_preds = self.model(x, y)
loss = self.loss_fn(input=y_preds, target=y)
self.model.zero_grad()
loss.backward()
self.optim.step()
self.global_t += 1
if self.global_t % self.case_interval == 0:
self.show_case(x, y, y_preds)
return {'loss': loss.item()}
def overfit_one_batch(self, n_step):
self.model.train()
batch = next(iter(self.train_dataloader))
pbar = tqdm(range(n_step), desc='Overfit')
self.global_t = 0
for i in pbar:
state = self.train_batch(batch)
pbar.set_postfix(state)
self.writer.add_scalars('overfit', state, self.global_t)
def fit(self):
self.global_t = 0
for epoch in tqdm(range(1, self.n_epoch + 1), desc='Total'):
self.train_epoch(epoch)
if self.valid_dataloader is not None:
self.valid_epoch(epoch)
if self.test_dataloader is not None:
self.test_epoch(epoch)
def train_epoch(self, epoch):
self.model.train()
pbar = tqdm(self.train_dataloader, desc=f'Train Epoch {epoch}')
for batch in pbar:
state = self.train_batch(batch)
pbar.set_postfix(state)
self.writer.add_scalars('train', state, self.global_t)
if __name__ == '__main__':
# data_dir = 'data'
data_dir = 'data-daily-train'
embedding_path = 'embedding/glove.42B.300d.txt'
tb_dir = 'runs/fair'
case_interval = 10
gpu_id = 7
max_len = 30
vocab_size = 10000
n_epoch = 30
learning_rate = 0.001
batch_size = 64
embed_size = 300
hidden_size = 200
if not torch.cuda.is_available() or gpu_id == -1:
device = torch.device('cpu')
else:
device = torch.device('cuda:' + str(gpu_id))
pairs, vocab = DataBuilder.build(
data_dir=data_dir,
max_len=max_len,
vocab_size=vocab_size,
)
dataset = Dataset(pairs, vocab)
embedding = load_embedding(embedding_path, vocab)
train_dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
collate_fn=dataset.collate_fn,
)
model = Seq2Seq(
embedding=embedding,
hidden_size=hidden_size,
pad_value=vocab.pad_value,
sos_value=vocab.sos_value
)
adam = torch.optim.Adam(
model.parameters(),
lr=learning_rate,
)
trainer = Trainer(
model=model,
device=device,
train_dataloader=train_dataloader,
n_epoch=n_epoch,
optim=adam,
tb_dir=tb_dir,
case_interval=case_interval,
vocab=vocab,
)
# trainer.overfit_one_batch(500)
trainer.fit() |
import tensorflow as tf
import os
import pickle
import numpy as np
base_seq = [1,1,1,1,2,1,1,1,3,3,1,3,2] + ([0]*95)
current_struc = [1,1,1,1,1,1,1,1,1,1,1,1,1] + ([0]*95)
target_struc = [2,1,1,2,2,1,1,1,3,3,1,1,3] + ([0]*95)
current_energy = [0.0] + ([0]*107)
target_energy = [7.9] + ([0]*107)
locks = [1,1,1,1,1,2,2,2,1,1,1,1,1] + ([0]*95)
TF_SHAPE = 648
inputs = np.array([base_seq,current_struc,target_struc,current_energy,target_energy,locks])
inputs = inputs.reshape([-1,TF_SHAPE])
sess = tf.Session()
saver = tf.train.import_meta_graph(os.getcwd()+'/models/locationDNN.meta')
saver.restore(sess,os.getcwd()+'/models/locationDNN')
graph = tf.get_default_graph()
x = graph.get_tensor_by_name('x_placeholder:0')
y = graph.get_tensor_by_name('y_placeholder:0')
keep_prob = graph.get_tensor_by_name('keep_prob_placeholder:0')
feed_dict={x:inputs,keep_prob:1.0}
op7 = graph.get_tensor_by_name('op7:0')
print ((sess.run((op7),feed_dict)[0]))
print np.argmax((sess.run((op7),feed_dict)[0])) + 1
print sess.run(tf.argmax(op7),feed_dict)
|
import simplejson
import sys
import util.net as net
import urllib2
from util.primitives import funcs
from operator import itemgetter
import random
import util.callbacks as callbacks
import util.threads as threads
import common.asynchttp
from logging import getLogger
log = getLogger('loadbalance')
class DigsbyLoadBalanceAPI(object):
__version__ = (1, 0)
def __init__(self, profile, username, host="login1.digsby.org", port=80, mode='async', initial=None, **_k):
self.profile = profile
self.username = username
self.host = host
self.port = port
self.mode = mode
self.httpmaster = common.asynchttp.HttpMaster()
self.initial = initial
def copy(self):
ret = type(self)(**self.__dict__)
return ret
@property
def console(self):
new = self.copy()
new.mode = "console"
return new
@property
def stringversion(self):
return '.'.join(['%d']*len(self.__version__)) % self.__version__
@callbacks.callsback
def get(self, callback=None, **k):
version = self.stringversion
from gui.native.helpers import GetUserIdleTime
from AccountManager import SECONDS_FOR_IDLE
idle = GetUserIdleTime() > (1000 * SECONDS_FOR_IDLE)
local_load_exc = getattr(self.profile, 'local_load_exc', None)
log.debug('loaded: %s initial: %s', local_load_exc, self.initial)
have_acct_data = not bool(local_load_exc)
button_clicked = bool(self.initial)
log.debug('have_data: %s button_clicked: %s', have_acct_data, button_clicked)
if button_clicked and not have_acct_data:
state = 'initial_nocache'
elif button_clicked:
state = 'initial'
elif idle:
state = 'reconnect_idle'
else:
state = 'reconnect'
url = net.UrlQueryObject('http://%s:%s/load/all/json' % (self.host, self.port),
revision = getattr(sys, 'REVISION', 'unknown'),
tag = getattr(sys, 'TAG', 'unknown'),
username = self.username,
version = version,
state = state,
v = version,
**k)
log.debug('calling loadbalance URL: %s', url)
if self.mode == 'async':
return self.call_async(url, callback=callback)
elif self.mode == 'console':
return self.get_urllib(url)
elif self.mode == 'threaded':
return threads.threaded(self.get_urllib)(url, callback=callback)
else:
return callbacks.callback_adapter(self.get_urllib)(url, callback=callback)
def get_urllib(self, url):
res = urllib2.urlopen(url)
return self.clean(res.read())
@callbacks.callsback
def call_async(self, url, callback=None):
return self.httpmaster.open(url,
success=(lambda *a, **k:
callbacks.callback_adapter(self.parse_response, do_return=False)(callback=callback, *a, **k)),
error = callback.error,
timeout = callback.timeout)
def parse_response(self, _req, resp):
if hasattr(resp, 'read'):
response = resp.read()
#if not asynchttp:
if hasattr(resp, 'close'):
resp.close()
else:
raise TypeError('failed to parse: %r', resp)
return self.clean(response)
def clean(self, val):
log.debug("Got loadbalance result data: %r", val)
info = simplejson.loads(val)
if not isinstance(info, dict):
return self.clean_0_0(val)
return getattr(self, 'clean_%s' % info['version'].replace('.', '_'))(info)
def clean_0_0(self, val):
info = simplejson.loads(val)
return DigsbyLoadBalanceInfo(nodes = info)
def clean_1_0(self, info):
info['version'] = map(int, info['version'].split('.'))
return DigsbyLoadBalanceInfo(**info)
class DigsbyLoadBalanceInfo(object):
def __init__(self, nodes = None, reconnect_strategy = None, version = (0, 0), **k):
self.nodes = nodes
self.state = k.get('state', None)
self.reconnect_strategy = reconnect_strategy
self.version = version
@property
def addresses(self):
if not self.nodes:
return None
else:
grouped = dict(funcs.groupby(self.nodes, itemgetter('load')))
sorts = sorted(grouped.items())
addresses = []
for _load, hosts in sorts:
addys = []
for host in hosts:
addys.extend(host.get('addresses', []))
random.shuffle(addys)
addresses.extend(addys)
addresses = [a.encode('idna') for a in addresses]
return addresses or None
def __repr__(self):
return "<%(name)s version:%(version)s state:'%(state)s' reconnect_strategy:%(reconnect_strategy)r nodes:%(nodes)r>" % dict(name = type(self).__name__, **self.__dict__)
class DigsbyLoadBalanceManager(object):
def __init__(self, profile, username, servers, success, error, timeout=None, load_server=None, initial=None):
self.servers = servers
self.pos = 0
self.success = success
self.error = error
self.timeout = timeout
self.username = username
self.load_server = load_server
self.profile = profile
self.initial = initial
def process_one(self):
if self.pos >= len(self.servers):
return self.error(self)
h,p = self.servers[self.pos]
api = DigsbyLoadBalanceAPI(profile = self.profile, username=self.username, host=h, port=p, mode = 'async', initial = self.initial)
api.get(success = self.response, error = self.api_error)
def response(self, val):
self.success(self, val)
def api_error(self, *_a, **_k):
self.pos += 1
self.process_one()
if __name__ == '__main__':
print DigsbyLoadBalanceAPI('foo', host='192.168.99.71', mode='console').get()
|
import os
import fire
from src.utils.directory import CONFIG_YAML_PATH, EXPENSE_YAML_PATH, INCOME_YAML_PATH, FINAL_PATH
from src.utils.io import load_yaml
from src.utils.transactions import (
load_transaction,
get_accounts,
remove_accounts,
remove_keywords,
map_category,
map_description,
map_type,
get_emoji,
get_filename,
)
# TODO: Add logger for process, use logsensei
def main(year: int = None, month: int = None):
expense_dict = load_yaml(EXPENSE_YAML_PATH)
config_dict = load_yaml(CONFIG_YAML_PATH)
income_dict = load_yaml(INCOME_YAML_PATH)
full_df = load_transaction(year, month)
accounts = get_accounts(year, month) + config_dict["FILTER_ACCOUNTS"]
full_df = full_df[["id", "account_id", "made_on", "amount", "description", "category", "type"]]
full_df = remove_accounts(full_df, accounts)
full_df = remove_keywords(full_df, config_dict["FILTER_KEYWORDS"])
expense_df = full_df[full_df["amount"] < 0.0].copy().reset_index(drop=True)
expense_df["true_category"] = "OTHER_EXPENSES"
expense_df = map_type(expense_df)
expense_df = map_category(expense_df, expense_dict)
expense_df = map_description(expense_df, expense_dict)
expense_df = get_emoji(expense_df, expense_dict)
income_df = full_df[full_df["amount"] > 0].copy().reset_index(drop=True)
income_df["true_category"] = "OTHER_INCOME"
income_df = map_type(income_df)
income_df = map_category(income_df, income_dict)
income_df = map_description(income_df, income_dict)
income_df = get_emoji(income_df, income_dict)
expense_path = os.path.join(FINAL_PATH, get_filename("expense", year, month))
expense_df.to_csv(expense_path, index=False)
income_path = os.path.join(FINAL_PATH, get_filename("income", year, month))
income_df.to_csv(income_path, index=False)
if __name__ == "__main__":
fire.Fire(main)
|
from django.contrib import admin
from .models import Sensor, Order, Employee, Responsable
admin.site.site_header = 'Administration Panel'
admin.site.site_title = 'Dustbin IoT'
admin.site.index_title = 'Admin'
admin.site.site_url = '/home/'
admin.site.register([Sensor, Order, Employee, Responsable])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Toolbox used to interact with the DebugDriver using a designated port. It's
intended to be used for debugging.
"""
import time
import sys
import struct
from cflib.crtp.crtpstack import CRTPPacket, CRTPPort
from PyQt4 import QtCore, QtGui, uic
from PyQt4.QtCore import Qt, pyqtSlot, pyqtSignal, QThread, SIGNAL
__author__ = 'Bitcraze AB'
__all__ = ['DebugDriverToolbox']
debugdriver_tab_class = uic.loadUiType(
sys.path[0] +
"/cfclient/ui/toolboxes/debugDriverToolbox.ui")[0]
class DebugDriverToolbox(QtGui.QWidget, debugdriver_tab_class):
"""Used to interact with the DebugDriver toolbox"""
connectionDoneSignal = pyqtSignal(str)
disconnectedSignal = pyqtSignal(str)
def __init__(self, helper, *args):
super(DebugDriverToolbox, self).__init__(*args)
self.setupUi(self)
self.helper = helper
# Connected / disconnected signals
self.helper.cf.connected.add_callback(
self.connectionDoneSignal.emit)
self.connectionDoneSignal.connect(self.connectionDone)
self.helper.cf.disconnected.add_callback(self.disconnectedSignal.emit)
self.disconnectedSignal.connect(self.disconnected)
self.linkQuality.valueChanged.connect(self.linkQualityChanged)
self.forceDisconnect.pressed.connect(self.forceDisconnecPressed)
def forceDisconnecPressed(self):
if (self.helper.cf.link is not None):
p = CRTPPacket()
p.set_header(CRTPPort.DEBUGDRIVER, 0)
p.data = struct.pack('<B', 1) # Force disconnect
self.helper.cf.send_packet(p)
def linkQualityChanged(self, value):
if (self.helper.cf.link is not None):
p = CRTPPacket()
p.set_header(CRTPPort.DEBUGDRIVER, 0)
p.data = struct.pack('<BB', 0, value) # Set link quality
self.helper.cf.send_packet(p)
def disconnected(self, linkURI):
if ("debug" in linkURI):
self.linkQuality.setEnabled(False)
self.forceDisconnect.setEnabled(False)
def connectionDone(self, linkURI):
if ("debug" in linkURI):
self.linkQuality.setEnabled(True)
self.forceDisconnect.setEnabled(True)
def getName(self):
return 'Debug driver'
def getTabName(self):
return 'Debug driver'
def enable(self):
return
def disable(self):
return
def preferedDockArea(self):
return Qt.RightDockWidgetArea
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
def _add_suffixes(tensor_str, suffixes, indent):
tensor_strs = [tensor_str]
last_line_len = len(tensor_str) - tensor_str.rfind("\n") + 1
linewidth = 80
for suffix in suffixes:
suffix_len = len(suffix)
if last_line_len + suffix_len + 2 > linewidth:
tensor_strs.append(",\n" + " " * indent + suffix)
last_line_len = indent + suffix_len
else:
tensor_strs.append(", " + suffix)
last_line_len += suffix_len + 2
tensor_strs.append(")")
return "".join(tensor_strs)
def _gen_tensor_str(tensor):
prefix = "tensor("
indent = len(prefix)
suffixes = []
if tensor.device.type != "cpu" or (
tensor.device.type == "cuda" and tensor.device.index != 0
):
suffixes.append("device='" + str(tensor.device) + "'")
suffixes.append("dtype=" + str(tensor.dtype))
if tensor.grad_fn is not None:
name = tensor.grad_fn.name()
suffixes.append("grad_fn=<{}>".format(name))
elif tensor.requires_grad:
suffixes.append("requires_grad=True")
tensor_str = np.array2string(
tensor.numpy(), precision=4, separator=", ", prefix=prefix
)
return _add_suffixes(prefix + tensor_str, suffixes, indent)
|
# DO NOT USE THIS MODULE.
# This module contians a legacy API, an early approach to composing plans that
# was little used and finally deprecated in v0.10.0. It will be removed in a
# future release. It should not be used.
from functools import wraps
from contextlib import contextmanager
import warnings
from .utils import (normalize_subs_input, root_ancestor,
separate_devices,
Msg, single_gen)
from .plan_stubs import (broadcast_msg, trigger_and_read)
def planify(func):
"""Turn a function that returns a list of generators into a coroutine.
Parameters
----------
func : callable
expected to return a list of generators that yield messages (`Msg`
objects) the function may have an arbitrary signature
Returns
-------
gen : generator
a single generator that yields messages. The return value from
the generator is the return of the last plan in the plan
stack.
"""
@wraps(func)
def wrapped(*args, **kwargs):
gen_stack = func(*args, **kwargs)
ret = None
for g in gen_stack:
ret = yield from g
return ret
return wrapped
@contextmanager
def subs_context(plan_stack, subs):
"""
Subscribe callbacks to the document stream; then unsubscribe on exit.
.. deprecated:: 0.10.0
Use :func:`subs_wrapper` or :func:`subs_decorator` instead.
Parameters
----------
plan_stack : list-like
appendable collection of generators that yield messages (`Msg` objects)
subs : callable, list of callables, or dict of lists of callables
Documents of each type are routed to a list of functions.
Input is normalized to a dict of lists of functions, like so:
None -> {'all': [], 'start': [], 'stop': [], 'event': [],
'descriptor': []}
func -> {'all': [func], 'start': [], 'stop': [], 'event': [],
'descriptor': []}
[f1, f2] -> {'all': [f1, f2], 'start': [], 'stop': [], 'event': [],
'descriptor': []}
{'event': [func]} -> {'all': [], 'start': [], 'stop': [],
'event': [func], 'descriptor': []}
Signature of functions must confirm to `f(name, doc)` where
name is one of {'all', 'start', 'stop', 'event', 'descriptor'} and
doc is a dictionary.
"""
warnings.warn("subs_context is deprecated. "
"Use subs_wrapper or subs_decorator.")
subs = normalize_subs_input(subs)
tokens = set()
def _subscribe():
for name, funcs in subs.items():
for func in funcs:
token = yield Msg('subscribe', None, func, name)
tokens.add(token)
def _unsubscribe():
for token in tokens:
yield Msg('unsubscribe', None, token=token)
plan_stack.append(_subscribe())
try:
yield plan_stack
finally:
# The RunEngine might never process these if the execution fails,
# but it keeps its own cache of tokens and will try to remove them
# itself if this plan fails to do so.
plan_stack.append(_unsubscribe())
@contextmanager
def run_context(plan_stack, *, md=None):
"""Enclose in 'open_run' and 'close_run' messages.
.. deprecated:: 0.10.0
Use :func:`run_wrapper` or :func:`run_decorator` instead.
Parameters
----------
plan_stack : list-like
appendable collection of generators that yield messages (`Msg` objects)
md : dict, optional
metadata to be passed into the 'open_run' message
"""
warnings.warn(
"run_context is deprecated. Use run_wrapper or run_decorator.")
plan_stack.append(single_gen(Msg('open_run', None, **dict(md or {}))))
yield plan_stack
plan_stack.append(single_gen(Msg('close_run')))
@contextmanager
def event_context(plan_stack, name='primary'):
"""Bundle readings into an 'event' (a datapoint).
This encloses the contents in 'create' and 'save' messages.
.. deprecated:: 0.10.0
Use the :func:`create` and :func:`save` plans directly. Also,
:func:`trigger_and_read` addresses the common case of reading one or
more devices into one Event.
Parameters
----------
plan_stack : list-like
appendable collection of generators that yield messages (`Msg` objects)
name : string, optional
name of event stream; default is 'primary'
"""
warnings.warn(
"event_context is deprecated. Use create, save, or trigger_and_read.")
plan_stack.append(single_gen(Msg('create', None, name=name)))
yield plan_stack
plan_stack.append(single_gen(Msg('save')))
@contextmanager
def stage_context(plan_stack, devices):
"""
Stage devices upon entering context and unstage upon exiting.
.. deprecated:: 0.10.0
Use :func:`stage_wrapper` or :func:`stage_decorator`.
Parameters
----------
plan_stack : list-like
appendable collection of generators that yield messages (`Msg` objects)
devices : collection
list of devices to stage immediately on entrance and unstage on exit
See Also
--------
:func:`bluesky.plans.lazily_stage`
"""
warnings.warn("stage_context is deprecated. "
"Use stage_wrapper or stage_decorator.")
# Resolve unique devices, avoiding redundant staging.
devices = separate_devices(root_ancestor(device) for device in devices)
def stage():
# stage devices explicitly passed to 'devices' argument
yield from broadcast_msg('stage', devices)
def unstage():
# unstage devices explicitly passed to 'devices' argument
yield from broadcast_msg('unstage', reversed(devices))
plan_stack.append(stage())
yield plan_stack
plan_stack.append(unstage())
@contextmanager
def baseline_context(plan_stack, devices, name='baseline'):
"""
Read every device once upon entering and exiting the context.
.. deprecated:: 0.10.0
Use :func:`baseline_wrapper` or :func:`baseline_decorator`.
The readings are designated for a separate event stream named 'baseline'
by default.
Parameters
----------
plan_stack : list-like
appendable collection of generators that yield messages (`Msg` objects)
devices : collection
collection of Devices to read
name : string, optional
name for event stream; by default, 'baseline'
"""
warnings.warn("baseline_context is deprecated. Use baseline_wrapper or "
"baseline_decorator.")
plan_stack.append(trigger_and_read(devices, name=name))
yield
plan_stack.append(trigger_and_read(devices, name=name))
@contextmanager
def monitor_context(plan_stack, signals):
"""
Asynchronously monitor signals, generating separate event streams.
.. deprecated:: 0.10.0
Use :func:`monitor_wrapper` or :func:`monitor_decorator`.
Upon exiting the context, stop monitoring.
Parameters
----------
plan_stack : list-like
appendable collection of generators that yield messages (`Msg` objects)
signals : dict or list
either a dict mapping Signals to event stream names or simply a list
of Signals, in which case the event stream names default to None
name : string, optional
name for event stream; by default, None
Examples
--------
>>> plan_stack = deque()
With custom event stream names
>>> with monitor_context(plan_stack, {sig1: 'sig1', sig2: 'sig2'}):
...
With no event stream names
>>> with monitor_context(plan_stack, [sig1, sig2]):
...
"""
warnings.warn("monitor_context is deprecated. Use monitor_wrapper or "
"monitor_decorator.")
if hasattr(signals, 'items'):
# interpret input as dict of signals mapped to event stream names
pass
else:
# interpet input as list of signals
signals = {sig: None for sig in signals}
for sig, name in signals.items():
plan_stack.append(single_gen(Msg('monitor', sig, name=name)))
yield
for sig, name in signals.items():
plan_stack.append(single_gen(Msg('unmonitor', sig)))
|
"""
NOTE: these functions are copied from "gpu_extract.py" in the hackathon branch;
the pieces have not yet been put together into a working GPU extraction
in this branch.
"""
import math
import numpy as np
import numpy.polynomial.legendre
from numba import cuda
import cupy as cp
import cupy.prof
import cupyx
import cupyx.scipy.special
from .cpu import get_spec_padding
from .both import xp_ex2d_patch
from ..io import native_endian
from ..util import Timer
from ..linalg import (
cholesky_solve,
matrix_sqrt,
diag_block_matrix_sqrt,
)
from ..polynomial import (
hermevander,
legvander,
)
default_weight_scale = 1e-4
@cupy.prof.TimeRangeDecorator("evalcoeffs")
def evalcoeffs(psfdata, wavelengths, specmin=0, nspec=None):
'''
evaluate PSF coefficients parameterized as Legendre polynomials
Args:
psfdata: PSF data from io.read_psf() of Gauss Hermite PSF file
wavelengths: 1D array of wavelengths
Options:
specmin: first spectrum to include
nspec: number of spectra to include (default: all)
Returns a dictionary params[paramname] = value[nspec, nwave]
The Gauss Hermite coefficients are treated differently:
params['GH'] = value[i,j,nspec,nwave]
The dictionary also contains scalars with the recommended spot size
2*(HSIZEX, HSIZEY)+1 and Gauss-Hermite degrees GHDEGX, GHDEGY
(which is also derivable from the dimensions of params['GH'])
'''
if nspec is None:
nspec = psfdata['PSF']['COEFF'].shape[1]
p = dict(WAVE=wavelengths)
#- Evaluate X and Y which have different dimensionality from the
#- PSF coefficients (and might have different WAVEMIN, WAVEMAX)
meta = psfdata['XTRACE'].meta
wavemin, wavemax = meta['WAVEMIN'], meta['WAVEMAX']
ww = (wavelengths - wavemin) * (2.0 / (wavemax - wavemin)) - 1.0
# TODO: Implement cuda legval
p['X'] = cp.asarray(numpy.polynomial.legendre.legval(ww, psfdata['XTRACE']['X'][specmin:specmin+nspec].T))
meta = psfdata['YTRACE'].meta
wavemin, wavemax = meta['WAVEMIN'], meta['WAVEMAX']
ww = (wavelengths - wavemin) * (2.0 / (wavemax - wavemin)) - 1.0
# TODO: Implement cuda legval
p['Y'] = cp.asarray(numpy.polynomial.legendre.legval(ww, psfdata['YTRACE']['Y'][specmin:specmin+nspec].T))
#- Evaluate the remaining PSF coefficients with a shared dimensionality
#- and WAVEMIN, WAVEMAX
meta = psfdata['PSF'].meta
wavemin, wavemax = meta['WAVEMIN'], meta['WAVEMAX']
ww = (wavelengths - wavemin) * (2.0 / (wavemax - wavemin)) - 1.0
L = legvander(ww, meta['LEGDEG'])
nparam = psfdata['PSF']['COEFF'].shape[0]
ndeg = psfdata['PSF']['COEFF'].shape[2]
nwave = L.shape[0]
nghx = meta['GHDEGX']+1
nghy = meta['GHDEGY']+1
p['GH'] = cp.zeros((nghx, nghy, nspec, nwave))
coeff_gpu = cp.array(native_endian(psfdata['PSF']['COEFF']))
for name, coeff in zip(psfdata['PSF']['PARAM'], coeff_gpu):
name = name.strip()
coeff = coeff[specmin:specmin+nspec]
if name.startswith('GH-'):
i, j = map(int, name.split('-')[1:3])
p['GH'][i,j] = L.dot(coeff.T).T
else:
p[name] = L.dot(coeff.T).T
#- Include some additional keywords that we'll need
for key in ['HSIZEX', 'HSIZEY', 'GHDEGX', 'GHDEGY']:
p[key] = meta[key]
return p
@cupy.prof.TimeRangeDecorator("calc_pgh")
def calc_pgh(ispec, wavelengths, psfparams):
'''
Calculate the pixelated Gauss Hermite for all wavelengths of a single spectrum
ispec : integer spectrum number
wavelengths : array of wavelengths to evaluate
psfparams : dictionary of PSF parameters returned by evalcoeffs
returns pGHx, pGHy
where pGHx[ghdeg+1, nwave, nbinsx] contains the pixel-integrated Gauss-Hermite polynomial
for all degrees at all wavelengths across nbinsx bins spaning the PSF spot, and similarly
for pGHy. The core PSF will then be evaluated as
PSFcore = sum_ij c_ij outer(pGHy[j], pGHx[i])
'''
#- shorthand
p = psfparams
#- spot size (ny,nx)
nx = 2*p['HSIZEX'] + 1
ny = 2*p['HSIZEY'] + 1
nwave = len(wavelengths)
#- convert to cupy arrays
for k in ['X', 'Y', 'GHSIGX', 'GHSIGY']:
p[k] = cp.asarray(p[k])
#- x and y edges of bins that span the center of the PSF spot
xedges = cp.repeat(cp.arange(nx+1) - nx//2 - 0.5, nwave).reshape(nx+1, nwave)
yedges = cp.repeat(cp.arange(ny+1) - ny//2 - 0.5, nwave).reshape(ny+1, nwave)
#- Shift to be relative to the PSF center and normalize
#- by the PSF sigma (GHSIGX, GHSIGY).
#- Note: x,y = 0,0 is center of pixel 0,0 not corner
#- Dimensions: xedges[nx+1, nwave], yedges[ny+1, nwave]
dx = (p['X'][ispec]+0.5)%1 - 0.5
dy = (p['Y'][ispec]+0.5)%1 - 0.5
xedges = ((xedges - dx)/p['GHSIGX'][ispec])
yedges = ((yedges - dy)/p['GHSIGY'][ispec])
#- Degree of the Gauss-Hermite polynomials
ghdegx = p['GHDEGX']
ghdegy = p['GHDEGY']
#- Evaluate the Hermite polynomials at the pixel edges
#- HVx[ghdegx+1, nwave, nx+1]
#- HVy[ghdegy+1, nwave, ny+1]
HVx = hermevander(xedges, ghdegx).T
HVy = hermevander(yedges, ghdegy).T
#- Evaluate the Gaussians at the pixel edges
#- Gx[nwave, nx+1]
#- Gy[nwave, ny+1]
Gx = cp.exp(-0.5*xedges**2).T / cp.sqrt(2. * cp.pi)
Gy = cp.exp(-0.5*yedges**2).T / cp.sqrt(2. * cp.pi)
#- Combine into Gauss*Hermite
GHx = HVx * Gx
GHy = HVy * Gy
#- Integrate over the pixels using the relationship
# Integral{ H_k(x) exp(-0.5 x^2) dx} = -H_{k-1}(x) exp(-0.5 x^2) + const
#- pGHx[ghdegx+1, nwave, nx]
#- pGHy[ghdegy+1, nwave, ny]
pGHx = cp.zeros((ghdegx+1, nwave, nx))
pGHy = cp.zeros((ghdegy+1, nwave, ny))
pGHx[0] = 0.5 * cp.diff(cupyx.scipy.special.erf(xedges/cp.sqrt(2.)).T)
pGHy[0] = 0.5 * cp.diff(cupyx.scipy.special.erf(yedges/cp.sqrt(2.)).T)
pGHx[1:] = GHx[:ghdegx,:,0:nx] - GHx[:ghdegx,:,1:nx+1]
pGHy[1:] = GHy[:ghdegy,:,0:ny] - GHy[:ghdegy,:,1:ny+1]
return pGHx, pGHy
@cuda.jit()
def _multispot(pGHx, pGHy, ghc, spots):
nx = pGHx.shape[-1]
ny = pGHy.shape[-1]
nwave = pGHx.shape[1]
#this is the magic step
iwave = cuda.grid(1)
n = pGHx.shape[0]
m = pGHy.shape[0]
if (0 <= iwave < nwave):
#yanked out the i and j loops in lieu of the cuda grid of threads
for i in range(pGHx.shape[0]):
px = pGHx[i,iwave]
for j in range(0, pGHy.shape[0]):
py = pGHy[j,iwave]
c = ghc[i,j,iwave]
for iy in range(len(py)):
for ix in range(len(px)):
spots[iwave, iy, ix] += c * py[iy] * px[ix]
@cupy.prof.TimeRangeDecorator("multispot")
def multispot(pGHx, pGHy, ghc):
nx = pGHx.shape[-1]
ny = pGHy.shape[-1]
nwave = pGHx.shape[1]
blocksize = 256
numblocks = (nwave + blocksize - 1) // blocksize
spots = cp.zeros((nwave, ny, nx)) #empty every time!
_multispot[numblocks, blocksize](pGHx, pGHy, ghc, spots)
cuda.synchronize()
return spots
@cupy.prof.TimeRangeDecorator("get_spots")
def get_spots(specmin, nspec, wavelengths, psfdata):
'''Calculate PSF spots for the specified spectra and wavelengths
Args:
specmin: first spectrum to include
nspec: number of spectra to evaluate spots for
wavelengths: 1D array of wavelengths
psfdata: PSF data from io.read_psf() of Gauss Hermite PSF file
Returns:
spots: 4D array[ispec, iwave, ny, nx] of PSF spots
corners: (xc,yc) where each is 2D array[ispec,iwave] lower left corner of spot
'''
nwave = len(wavelengths)
p = evalcoeffs(psfdata, wavelengths, specmin, nspec)
nx = 2*p['HSIZEX']+1
ny = 2*p['HSIZEY']+1
spots = cp.zeros((nspec, nwave, ny, nx))
for ispec in range(nspec):
pGHx, pGHy = calc_pgh(ispec, wavelengths, p)
spots[ispec] = multispot(pGHx, pGHy, p['GH'][:,:,ispec,:])
# spots[ispec] = cp.einsum('lmk,mkj,lki->kji',
# p['GH'][:,:,ispec,:], pGHy, pGHx, optimize='greedy')
# spots = cp.einsum('lmnk,mkj,lki->nkji', p['GH'], pGHy, pGHx, optimize='greedy')
#- ensure positivity and normalize
#- TODO: should this be within multispot itself?
spots = spots.clip(0.0)
norm = cp.sum(spots, axis=(2,3)) #- norm[nspec, nwave] = sum over each spot
spots = (spots.T / norm.T).T #- transpose magic for numpy array broadcasting
#- Define corners of spots
#- extra 0.5 is because X and Y are relative to center of pixel not edge
xc = np.floor(p['X'] - p['HSIZEX'] + 0.5).astype(int)
yc = np.floor(p['Y'] - p['HSIZEY'] + 0.5).astype(int)
corners = (xc, yc)
return spots, corners, p
@cuda.jit()
def _cuda_projection_matrix(A, xc, yc, xmin, ymin, ispec, iwave, nspec, nwave, spots):
#this is the heart of the projection matrix calculation
ny, nx = spots.shape[2:4]
i, j = cuda.grid(2)
#no loops, just a boundary check
if (0 <= i < nspec) and (0 <= j <nwave):
ixc = xc[ispec+i, iwave+j] - xmin
iyc = yc[ispec+i, iwave+j] - ymin
#A[iyc:iyc+ny, ixc:ixc+nx, i, j] = spots[ispec+i,iwave+j]
#this fancy indexing is not allowed in numba gpu (although it is in numba cpu...)
#try this instead
for iy, y in enumerate(range(iyc,iyc+ny)):
for ix, x in enumerate(range(ixc,ixc+nx)):
temp_spot = spots[ispec+i, iwave+j][iy, ix]
A[y, x, i, j] += temp_spot
@cupy.prof.TimeRangeDecorator("get_xyrange")
def get_xyrange(ispec, nspec, iwave, nwave, spots, corners):
"""
Find xy ranges that these spectra cover
Args:
ispec: starting spectrum index
nspec: number of spectra
iwave: starting wavelength index
nwave: number of wavelengths
spots: 4D array[ispec, iwave, ny, nx] of PSF spots
corners: (xc,yc) where each is 2D array[ispec,iwave] lower left corner of spot
Returns (xmin, xmax, ymin, ymax)
spots[ispec:ispec+nspec,iwave:iwave+nwave] touch pixels[ymin:ymax,xmin:xmax]
"""
ny, nx = spots.shape[2:4]
xc = corners[0][ispec:ispec+nspec, iwave:iwave+nwave]
yc = corners[1][ispec:ispec+nspec, iwave:iwave+nwave]
xmin = np.min(xc)
xmax = np.max(xc) + nx
ymin = np.min(yc)
ymax = np.max(yc) + ny
return xmin, xmax, ymin, ymax
@cupy.prof.TimeRangeDecorator("projection_matrix")
def projection_matrix(ispec, nspec, iwave, nwave, spots, corners, corners_cpu):
'''
Create the projection matrix A for p = Af
Args:
ispec: starting spectrum index
nspec: number of spectra
iwave: starting wavelength index
nwave: number of wavelengths
spots: 4D array[ispec, iwave, ny, nx] of PSF spots
corners: (xc,yc) where each is 2D array[ispec,iwave] lower left corner of spot
Returns (A[iy, ix, ispec, iwave], (xmin, xmax, ymin, ymax))
'''
xc, yc = corners
xmin, xmax, ymin, ymax = get_xyrange(ispec, nspec, iwave, nwave, spots, corners_cpu)
cp.cuda.nvtx.RangePush('allocate A')
A = cp.zeros((ymax-ymin,xmax-xmin,nspec,nwave), dtype=np.float64)
cp.cuda.nvtx.RangePop()
cp.cuda.nvtx.RangePush('blocks_per_grid')
threads_per_block = (16, 16)
blocks_per_grid_y = math.ceil(A.shape[0] / threads_per_block[0])
blocks_per_grid_x = math.ceil(A.shape[1] / threads_per_block[1])
blocks_per_grid = (blocks_per_grid_x, blocks_per_grid_y)
cp.cuda.nvtx.RangePop()
cp.cuda.nvtx.RangePush('_cuda_projection_matrix')
_cuda_projection_matrix[blocks_per_grid, threads_per_block](
A, xc, yc, xmin, ymin, ispec, iwave, nspec, nwave, spots)
cuda.synchronize()
cp.cuda.nvtx.RangePop()
return A, (xmin, xmax, ymin, ymax)
@cp.memoize()
def _rdiags_mask(ndiag, nspecpad, nwave, wavepad):
nwavetot = 2*wavepad + nwave
n = nspecpad*nwavetot
ii = cp.c_[cp.arange(n)]
# select elements near diagonal
mask = cp.abs(ii + -ii.T) <= ndiag
# select elements in core wavelength regions
mask &= (cp.abs((2 * (ii % nwavetot) - (nwavetot - 0.5))) <= nwave)
return mask
@cupy.prof.TimeRangeDecorator("get_resolution_diags")
def get_resolution_diags(R, ndiag, nspecpad, nwave, wavepad):
"""Returns the diagonals of R in a form suited for creating scipy.sparse.dia_matrix
Args:
R: dense resolution matrix
ndiag: number of diagonal elements to keep in the resolution matrix
nspec: number of spectra to extract (not including padding)
nwave: number of wavelengths to extract (not including padding)
wavepad: number of extra wave bins to extract (and discard) on each end
Returns:
Rdiags (nspec, 2*ndiag+1, nwave): resolution matrix diagonals
"""
mask = _rdiags_mask(ndiag, nspecpad, nwave, wavepad)
Rdiags = R.T[mask].reshape(nspecpad, nwave, -1).swapaxes(-2, -1)
# NOTE: I think this is actually correct but need to compare with specter
# Rdiags = R[mask].reshape(nspecpad, nwave, -1).swapaxes(-2, -1)
return Rdiags
@cupy.prof.TimeRangeDecorator("ex2d_padded")
def ex2d_padded(image, imageivar, patch, spots, corners, pixpad_frac, regularize, model, psferr):
"""
Extracts a patch with border padding, but only return results for patch
Args:
image: full image (not trimmed to a particular xy range)
imageivar: image inverse variance (same dimensions as image)
ispec: starting spectrum index relative to `spots` indexing
nspec: number of spectra to extract (not including padding)
iwave: starting wavelength index
nwave: number of wavelengths to extract (not including padding)
spots: array[nspec, nwave, ny, nx] pre-evaluated PSF spots
corners: tuple of arrays xcorners[nspec, nwave], ycorners[nspec, nwave]
wavepad: number of extra wave bins to extract (and discard) on each end
Options:
bundlesize: size of fiber bundles; padding not needed on their edges
"""
ispec = patch.ispec - patch.bspecmin
nspec = patch.nspectra_per_patch
iwave = patch.iwave
nwave = patch.nwavestep
wavepad = patch.wavepad
#- Yikes, pulling this out from get_xyrange
corners_cpu = (corners[0].get(), corners[1].get())
#- Get patch pixels and projection matrix
specmin, nspectot = get_spec_padding(ispec, nspec, patch.bundlesize)
patchpixels, patchivar, patchA4, xyslice = _prepare_patch(
image, imageivar, specmin, nspectot, iwave, nwave, wavepad, spots, corners, corners_cpu, pixpad_frac
)
#- Standardize problem size
icov, y = _apply_weights(
patchpixels.ravel(), patchivar.ravel(), patchA4.reshape(patchpixels.size, -1),
regularize=regularize
)
#- Perform the extraction
nwavetot = nwave + 2*wavepad
flux, fluxivar, resolution = _batch_extraction(icov, y, nwavetot)
#- Finalize the output for this patch
ndiag = spots.shape[2]//2
result = _finalize_patch(
patchpixels, patchivar, patchA4, xyslice,
flux, fluxivar, resolution,
ispec-specmin, nspec,
nwave, wavepad, ndiag, psferr, patch, model=model
)
return result
@cupy.prof.TimeRangeDecorator("_prepare_patch")
def _prepare_patch(image, imageivar, specmin, nspectot, iwave, nwave, wavepad, spots, corners, corners_cpu, pixpad_frac):
"""This is essentially the preamble of `gpu_specter.extract.gpu.ex2d_padded`"""
#- Get the projection matrix for the full wavelength range with padding
# specmin, nspectot = get_spec_padding(ispec, nspec, bundlesize)
wavemin, nwavetot = iwave-wavepad, nwave+2*wavepad
A4, xyrange = projection_matrix(specmin, nspectot, wavemin, nwavetot, spots, corners, corners_cpu)
xmin, xmax, ypadmin, ypadmax = xyrange
#- But we only want to use the pixels covered by the original wavelengths
#- TODO: this unnecessarily also re-calculates xranges
xlo, xhi, ymin, ymax = get_xyrange(specmin, nspectot, iwave, nwave, spots, corners_cpu)
# ypadlo = ymin - ypadmin
# ypadhi = ypadmax - ymax
# A4 = A4[ypadlo:-ypadhi]
#- TODO: for ypadmax=ymax the above logic will not work
ypadlo = int((ymin - ypadmin) * (1 - pixpad_frac))
ypadhi = int((ymax - ypadmin) + (ypadmax - ymax) * (pixpad_frac))
A4 = A4[ypadlo:ypadhi]
#- use padded pixel boundaries
# ymin, ymax = ypadmin, ypadmax
#- Number of image pixels in y and x
ny, nx = A4.shape[0:2]
ymin = ypadmin+ypadlo
ymax = ypadmin+ypadhi
#- Check dimensions
assert A4.shape[2] == nspectot
assert A4.shape[3] == nwavetot
if (0 <= ymin) & (ymin+ny <= image.shape[0]):
xyslice = np.s_[ymin:ymin+ny, xmin:xmin+nx]
patchpixels = image[xyslice]
patchivar = imageivar[xyslice]
elif ymin+ny > image.shape[0]:
ny = image.shape[0] - ymin
A4 = A4[:ny]
xyslice = np.s_[ymin:ymin+ny, xmin:xmin+nx]
patchpixels = image[xyslice]
patchivar = imageivar[xyslice]
else:
#- TODO: this zeros out the entire patch if any of it is off the edge
#- of the image; we can do better than that
#print('offedge:', ymin, ymin+ny, image.shape[0], flush=True)
xyslice = None
patchivar = cp.zeros((ny, nx))
patchpixels = cp.zeros((ny, nx))
return patchpixels, patchivar, A4, xyslice
@cupy.fuse()
def _regularize(ATNinv, regularize, weight_scale):
fluxweight = ATNinv.sum(axis=-1)
minweight = weight_scale*cp.max(fluxweight)
ibad = fluxweight <= minweight
lambda_squared = ibad*(minweight - fluxweight) + ~ibad*regularize*regularize
return lambda_squared
@cupy.prof.TimeRangeDecorator("_apply_weights")
def _apply_weights(pixel_values, pixel_ivar, A, regularize, weight_scale=default_weight_scale):
"""This is essentially the preamble of of `gpu_specter.extract.both.xp_deconvolve`
The outputs of this will be uniform shape for a subbundle.
"""
ATNinv = A.T * pixel_ivar
icov = ATNinv.dot(A)
y = ATNinv.dot(pixel_values)
fluxweight = ATNinv.sum(axis=-1)
minweight = weight_scale*cp.max(fluxweight)
ibad = fluxweight <= minweight
#- TODO: regularize vs regularize**2 ?
lambda_squared = regularize*regularize*cp.ones_like(y)
lambda_squared[ibad] = minweight - fluxweight[ibad]
icov += cp.diag(lambda_squared)
#- TODO: is cupy.fuse() faster?
# icov += cp.diag(_regularize(ATNinv, regularize, weight_scale))
return icov, y
@cupy.prof.TimeRangeDecorator("_batch_apply_weights")
def _batch_apply_weights(batch_pixels, batch_ivar, batch_A4, regularize, weight_scale=default_weight_scale):
"""Turns a list of subbundle patch inputs into batch arrays of unifom shape
"""
batch_size = len(batch_A4)
ny, nx, nspecpad, nwavetot = batch_A4[0].shape
nbin = nspecpad * nwavetot
batch_icov = cp.zeros((batch_size, nbin, nbin))
batch_y = cp.zeros((batch_size, nbin))
for i, (pix, ivar, A4) in enumerate(zip(batch_pixels, batch_ivar, batch_A4)):
# Note that each patch can have a different number of pixels
batch_icov[i], batch_y[i] = _apply_weights(
pix.ravel(), ivar.ravel(), A4.reshape(-1, nbin),
regularize=regularize, weight_scale=weight_scale)
return batch_icov, batch_y
@cupy.prof.TimeRangeDecorator("_batch_apply_resolution")
def _batch_apply_resolution(deconvolved, Q):
"""Compute and apply resolution to deconvolved flux"""
s = cp.einsum('...ij->...i', Q)
resolution = Q/s[..., cp.newaxis]
fluxivar = s*s
flux = cp.einsum('...ij,...j->...i', resolution, deconvolved)
return flux, fluxivar, resolution
@cupy.prof.TimeRangeDecorator("_batch_extraction")
def _batch_extraction(icov, y, nwavetot):
"""Performs batch extraction given a batch of patches from a subbundle.
Note that the inputs are lists of ndarrays because the patches on the ccd are not
the same size.
"""
# batch_size = len(A4)
# ny, nx, nspecpad, nwavetot = A4[0].shape
# cp.cuda.nvtx.RangePush('apply_weights')
# icov, y = _batch_apply_weights(pixel_values, pixel_ivar, A4, regularize=regularize)
# cp.cuda.nvtx.RangePop() # apply_weights
cp.cuda.nvtx.RangePush('deconvolve')
deconvolved = cholesky_solve(icov, y)
cp.cuda.nvtx.RangePop() # deconvolve
cp.cuda.nvtx.RangePush('decorrelate')
Q = diag_block_matrix_sqrt(icov, nwavetot)
#- TODO: implement alternate noise decorrelation path
# Q = matrix_sqrt(icov)
cp.cuda.nvtx.RangePop() # decorrelate
cp.cuda.nvtx.RangePush('apply_resolution')
flux, fluxivar, resolution = _batch_apply_resolution(deconvolved, Q)
cp.cuda.nvtx.RangePop() # apply_resolution
return flux, fluxivar, resolution
@cp.fuse()
def compute_chisq(patchpixels, patchivar, patchmodel, psferr):
modelsigma = psferr*patchmodel
ii = (modelsigma > 0 ) & (patchivar > 0)
totpix_ivar = ii*cp.reciprocal(~ii + ii*modelsigma*modelsigma + ii*cp.reciprocal(ii*patchivar+~ii))
chi = (patchpixels - patchmodel)*cp.sqrt(totpix_ivar)
return chi*chi
@cp.fuse()
def reweight_chisq(chi2pix, weight):
bad = weight == 0
return (chi2pix * ~bad) / (weight + bad)
@cupy.prof.TimeRangeDecorator("_finalize_patch")
def _finalize_patch(patchpixels, patchivar, A4, xyslice, fx, ivarfx, R,
ispec, nspec, nwave, wavepad, ndiag, psferr, patch, model=None):
"""This is essentially the postamble of gpu_specter.extract.gpu.ex2d_padded."""
ny, nx, nspectot, nwavetot = A4.shape
#- Select the non-padded spectra x wavelength core region
specslice = np.s_[ispec:ispec+nspec,wavepad:wavepad+nwave]
specflux = fx.reshape(nspectot, nwavetot)[specslice]
specivar = ivarfx.reshape(nspectot, nwavetot)[specslice]
#- Diagonals of R in a form suited for creating scipy.sparse.dia_matrix
Rdiags = get_resolution_diags(R, ndiag, nspectot, nwave, wavepad)[specslice[0]]
# if cp.any(cp.isnan(specflux[:, patch.keepslice])):
# # raise RuntimeError('Found NaN in extracted flux')
# print(f'nanflux: {patch.bspecmin}, {patch.ispec}, {patch.iwave}, {xyslice}', flush=True)
# if cp.any(specflux[:, patch.keepslice] == 0):
# # raise RuntimeError('Found zero in extracted flux')
# print(specflux.shape, patch.keepslice, flush=True)
# print(f'zeroflux: ({patch.bspecmin}, {patch.ispec}, {ispec}), {patch.iwave}, {xyslice}', flush=True)
# where = np.where(specflux[:, patch.keepslice] == 0)
# print(f'where: {where}', flush=True)
patchpixels = patchpixels.ravel()
patchivar = patchivar.ravel()
cp.cuda.nvtx.RangePush('pixmask_fraction')
Apatch = A4[:, :, specslice[0], specslice[1]]
Apatch = Apatch.reshape(ny*nx, nspec*nwave)
pixmask_fraction = Apatch.T.dot(patchivar == 0)
pixmask_fraction = pixmask_fraction.reshape(nspec, nwave)
cp.cuda.nvtx.RangePop() # pixmask_fraction
#- Weighted chi2 of pixels that contribute to each flux bin;
#- only use unmasked pixels and avoid dividing by 0
cp.cuda.nvtx.RangePush('chi2pix')
cp.cuda.nvtx.RangePush('modelpadded')
Apadded = A4.reshape(ny*nx, nspectot*nwavetot)
patchmodel = Apadded.dot(fx.ravel())
cp.cuda.nvtx.RangePop()
cp.cuda.nvtx.RangePush('chi2')
chi2 = compute_chisq(patchpixels, patchivar, patchmodel, psferr)
cp.cuda.nvtx.RangePop()
cp.cuda.nvtx.RangePush('Apadded dot chi2')
chi2pix = Apadded.T.dot(chi2)
cp.cuda.nvtx.RangePop()
cp.cuda.nvtx.RangePush('psfweight')
psfweight = Apadded.T.dot(chi2 > 0)
chi2pix = reweight_chisq(chi2pix, psfweight)
cp.cuda.nvtx.RangePop()
chi2pix = chi2pix.reshape(nspectot, nwavetot)[specslice]
cp.cuda.nvtx.RangePop() # chi2pix
if model:
#TODO: divide flux by wavelength grid spacing?
modelimage = Apatch.dot(specflux.ravel()*(specivar.ravel() > 0)).reshape(ny, nx)
else:
#modelimage = cp.zeros((ny, nx))
modelimage = None
result = dict(
flux = specflux,
ivar = specivar,
Rdiags = Rdiags,
modelimage = modelimage,
xyslice = xyslice,
pixmask_fraction = pixmask_fraction,
chi2pix = chi2pix,
)
return result
def ex2d_subbundle(image, imageivar, patches, spots, corners, pixpad_frac, regularize, model, psferr):
"""Extract an entire subbundle of patches. The patches' output shape (nspec, nwave) must be aligned.
Args:
image: full image (not trimmed to a particular xy range)
imageivar: image inverse variance (same dimensions as image)
patches: list contain gpu_specter.core.Patch objects for extraction
spots: array[nspec, nwave, ny, nx] pre-evaluated PSF spots
corners: tuple of arrays xcorners[nspec, nwave], ycorners[nspec, nwave]
pixpad_frac: padded pixel fraction to use (value between 0 and 1)
regularize: added to diagonal of icov
model: compute image pixel model using extracted flux
psferr: value of error to assume in psf model
Returns:
results: list of (patch, result) tuples
"""
batch_pixels = list()
batch_ivar = list()
batch_A4 = list()
batch_xyslice = list()
# batch_icov = list()
# batch_y = list()
corners_cpu = (corners[0].get(), corners[1].get())
cp.cuda.nvtx.RangePush('batch_prepare')
# Use the first patch to determine spec padding, must be the same for all patches
# in this subbundle
p = patches[0]
specmin, nspectot = get_spec_padding(p.ispec-p.bspecmin, p.nspectra_per_patch, p.bundlesize)
nwavetot = p.nwavestep + 2*p.wavepad
batch_size = len(patches)
n = nspectot*nwavetot
batch_icov = cp.zeros((batch_size, n, n))
batch_y = cp.zeros((batch_size, n))
for i, patch in enumerate(patches):
patchpixels, patchivar, patchA4, xyslice = _prepare_patch(
image, imageivar, specmin, nspectot,
patch.iwave, patch.nwavestep, patch.wavepad,
spots, corners, corners_cpu, pixpad_frac
)
patch.xyslice = xyslice
batch_pixels.append(patchpixels)
batch_ivar.append(patchivar)
batch_A4.append(patchA4)
batch_xyslice.append(xyslice)
batch_icov[i], batch_y[i] = _apply_weights(
patchpixels.ravel(),
patchivar.ravel(),
patchA4.reshape(patchpixels.size, nspectot*nwavetot),
regularize=regularize
)
# batch_icov.append(icov)
# batch_y.append(y)
# batch_icov = cp.array(batch_icov)
# batch_y = cp.array(batch_y)
cp.cuda.nvtx.RangePop()
# perform batch extraction
cp.cuda.nvtx.RangePush('batch_extraction')
batch_flux, batch_fluxivar, batch_resolution = _batch_extraction(
batch_icov, batch_y, nwavetot
)
cp.cuda.nvtx.RangePop()
# finalize patch results
cp.cuda.nvtx.RangePush('batch_finalize')
results = list()
for i, patch in enumerate(patches):
result = _finalize_patch(
batch_pixels[i], batch_ivar[i], batch_A4[i], batch_xyslice[i],
batch_flux[i], batch_fluxivar[i], batch_resolution[i],
patch.ispec-patch.bspecmin-specmin, patch.nspectra_per_patch,
patch.nwavestep, patch.wavepad, patch.ndiag, psferr, patch, model=model
)
results.append( (patches[i], result) )
cp.cuda.nvtx.RangePop()
return results
|
# gridDataFormats --- python modules to read and write gridded data
# Copyright (c) 2009-2014 Oliver Beckstein <[email protected]>
# Released under the GNU Lesser General Public License, version 3 or later.
# See the files COPYING and COPYING.LESSER for details.
"""
:mod:`gridData` -- Handling grids of data
=========================================
Overview
--------
This module contains classes that allow importing and exporting of
simple gridded data, A grid is an N-dimensional array that represents
a discrete mesh over a region of space. The array axes are taken to be
parallel to the cartesian axes of this space. Together with this array
we also store the edges, which are are (essentially) the cartesian
coordinates of the intersections of the grid (mesh) lines on the
axes. In this way the grid is anchored in space.
The :class:`~gridData.core.Grid` object can be resampled at arbitrary resolution (by
interpolating the data). Standard algebraic operations are defined for
grids on a point-wise basis (same as for :class:`numpy.ndarray`).
Description
-----------
The package reads grid data from files, makes them available as a
:class:`~gridData.core.Grid` object, and allows one to write out the data again.
A :class:`~gridData.core.Grid` consists of a rectangular, regular, N-dimensional
array of data. It contains
(1) The position of the array cell edges.
(2) The array data itself.
This is equivalent to knowing
(1) The origin of the coordinate system (i.e. which data cell
corresponds to (0,0,...,0)
(2) The spacing of the grid in each dimension.
(3) The data on a grid.
:class:`~gridData.core.Grid` objects have some convenient properties:
* The data is represented as a :class:`numpy.ndarray` and thus shares
all the advantages coming with this sophisticated and powerful
library.
* They can be manipulated arithmetically, e.g. one can simply add or
subtract two of them and get another one, or multiply by a
constant. Note that all operations are defined point-wise (see the
:mod:`numpy` documentation for details) and that only grids defined
on the same cell edges can be combined.
* A :class:`~gridData.core.Grid` object can also be created from within python code
e.g. from the output of the :func:`numpy.histogramdd` function.
* The representation of the data is abstracted from the format that
the files are saved in. This makes it straightforward to add
additional readers for new formats.
* The data can be written out again in formats that are understood by
other programs such as VMD or PyMOL.
Reading grid data files
-----------------------
Some Formats_ can be read directly from a file on disk::
g = Grid(filename)
*filename* could be, for instance, "density.dx".
Constructing a Grid
-------------------
Data from an n-dimensional array can be packaged as a :class:`~gridData.core.Grid`
for convenient handling (especially export to other formats). The
:class:`~gridData.core.Grid` class acts as a universal constructor::
g = Grid(ndarray, edges=edges) # from histogramdd
g = Grid(ndarray, origin=origin, delta=delta) # from arbitrary data
g.export(filename, format) # export to the desire format
See the doc string for :class:`~gridData.core.Grid` for details.
Formats
-------
The following formats are available (:ref:`supported-file-formats`):
:mod:`~gridData.OpenDX`
IBM's Data Explorer, http://www.opendx.org/
:mod:`~gridData.gOpenMol`
http://www.csc.fi/gopenmol/
:mod:`~gridData.CCP4`
CCP4 format http://www.ccp4.ac.uk/html/maplib.html#description
pickle
python pickle file (:mod:`pickle`)
"""
from .core import Grid
from . import OpenDX
from . import gOpenMol
from . import CCP4
__all__ = ['Grid', 'OpenDX', 'gOpenMol', 'CCP4']
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
# Local
from .helpers import config
import discord
from discord.ext import commands
class AdminTools(commands.Cog):
def __init__(self, client):
self.client = client
self.prefix = config.load()['bot']['prefix']
@commands.has_permissions(administrator=True)
@commands.command(aliases=['config', 'settings'], help='🔒Ändere die Bot-Einstellungen. [Benötigt die Berechtigung "administrator"]', usage='Kein Argument für Infos und Hilfe angeben.')
async def configure(self, ctx, *args):
args = ' '.join(args)
if args.strip(' '):
path = args.split(' = ')[0].split()
to = args.split(' = ')[1]
config.edit(path=path, to=to)
await ctx.send(embed=discord.Embed(title='Einstellung vorgenommen!', description=f'> `{" ".join(path)}` wurde auf **`{to}`** gesetzt!', color=config.load()['design']['colors']['primary']))
else:
await ctx.send(embed=discord.Embed(title='Wie die Einstellungen funktionieren', description=f'> **Argumente:** `{self.prefix}config <Pfad, mit Leerzeichen getrennt> <Wert>`,\n> **Beispiel:** `{self.prefix}config bot prefix $`\n(Manche Einstellungen benötigen einen Neustart!)\n**__Aktuelle Einstellungen__**\n\n```yml\n{open("src/config.yml").read()}```'))
def setup(client):
client.add_cog(AdminTools(client)) |
from maya.app.renderSetup.views.lightEditor.lightSource import *
from maya.app.renderSetup.views.lightEditor.group import GroupAttributes
from maya.app.renderSetup.views.lightEditor.group import Group
import PySide2.QtCore as _QtCore
class ItemModel(_QtCore.QAbstractItemModel):
"""
This class defines the view's model which is represented by a tree of items.
"""
def __del__(self):
pass
def __init__(self):
pass
def addGroup(self, mayaObj, parentIndex='<PySide2.QtCore.QModelIndex(-1,-1,0x0,QObject(0x0)) >'):
pass
def addLightSource(self, transformObj, shapeObj, parentIndex='<PySide2.QtCore.QModelIndex(-1,-1,0x0,QObject(0x0)) >'):
pass
def allowOverride(self):
pass
def columnCount(self, parent='<PySide2.QtCore.QModelIndex(-1,-1,0x0,QObject(0x0)) >'):
pass
def data(self, index, role):
"""
Returns the data stored under the given role for the item referred to by the index.
"""
pass
def dispose(self):
pass
def dropMimeData(self, data, action, row, column, parentIndex):
"""
Handles the data supplied by a drag and drop operation that ended with the given action.
"""
pass
def emitDataChanged(self, idx1, idx2):
pass
def findNode(self, mayaObj):
pass
def flags(self, index):
"""
Returns the item flags for the given index.
"""
pass
def getRenderLayer(self):
pass
def headerData(self, section, orientation, role=PySide2.QtCore.Qt.ItemDataRole.DisplayRole):
pass
def index(self, row, column, parentIndex='<PySide2.QtCore.QModelIndex(-1,-1,0x0,QObject(0x0)) >'):
"""
Returns the index of the item in the model specified by the given row, column and parent index.
"""
pass
def indexFromNode(self, node):
pass
def isResetting(self):
pass
def loadScene(self):
pass
def mimeData(self, indexes):
"""
Returns an object that contains serialized items of data corresponding to the list of indexes specified.
"""
pass
def mimeTypes(self):
"""
Returns the list of allowed MIME types.
"""
pass
def nodeFromIndex(self, index):
"""
Returns the node specified by index, if the index is invalid, returns the root node.
"""
pass
def parent(self, index):
"""
Returns the parent index of the model item with the given index. If the item has no parent, an invalid QModelIndex is returned.
"""
pass
def rowCount(self, parentIndex='<PySide2.QtCore.QModelIndex(-1,-1,0x0,QObject(0x0)) >'):
"""
Returns the number of rows under the given parent
"""
pass
def setData(self, index, value, role=PySide2.QtCore.Qt.ItemDataRole.EditRole):
"""
Sets a new value for this index.
"""
pass
def setModelContext(self, layer, canOverride):
pass
def startReset(self):
pass
def supportedDropActions(self):
"""
Returns the drop actions supported by this model.
"""
pass
staticMetaObject = None
valueEditedByUser = None
ENABLE_LIGHT_EDITOR_GROUP_CMD = []
MODEL_ROOT_NODE_NAME = 'lightEditorRoot'
|
"""
1088. Confusing Number II
We can rotate digits by 180 degrees to form new digits. When 0, 1, 6, 8, 9 are rotated 180 degrees, they become 0, 1, 9, 8, 6 respectively. When 2, 3, 4, 5 and 7 are rotated 180 degrees, they become invalid.
A confusing number is a number that when rotated 180 degrees becomes a different number with each digit valid.(Note that the rotated number can be greater than the original number.)
Given a positive integer N, return the number of confusing numbers between 1 and N inclusive.
Example 1:
Input: 20
Output: 6
Explanation:
The confusing numbers are [6,9,10,16,18,19].
6 converts to 9.
9 converts to 6.
10 converts to 01 which is just 1.
16 converts to 91.
18 converts to 81.
19 converts to 61.
Example 2:
Input: 100
Output: 19
Explanation:
The confusing numbers are [6,9,10,16,18,19,60,61,66,68,80,81,86,89,90,91,98,99,100].
"""
# back tracking
# it's hard to think of back-tracking
# Runtime: 2904 ms, faster than 43.07% of Python3 online submissions for Confusing Number II.
# Memory Usage: 95.6 MB, less than 33.33% of Python3 online submissions for Confusing Number II.
class Solution:
def confusingNumberII(self, N: int) -> int:
useful = [0, 1, 6, 8, 9]
res = 0
self.change = {"0":"0", "1":"1", "6":"9", "9":"6", "8":"8"}
res = self.dfs(N, useful, [1, 6, 8, 9])
return res
def dfs(self, N, useful, curr_list):
if len(curr_list) == 0:
return 0
new_list = []
res = 0
for num in curr_list:
if self.is_confusing(num):
res += 1
for ele in useful:
new_num = num * 10 + ele
if new_num > N:
continue
new_list.append(new_num)
res += self.dfs(N, useful, new_list)
return res
def is_confusing(self, num):
str_num = str(num)
left, right = 0, len(str_num) - 1
while left <= right:
if str_num[left] != self.change[str_num[right]]:
return True
left += 1
right -= 1
return False
|
import numpy
import disba
import helpers
def test_ellipticity():
velocity_model = helpers.velocity_model(5)
t = numpy.logspace(0.0, 1.0, 20)
ell = disba.Ellipticity(*velocity_model)
rel = ell(t)
assert numpy.allclose(14.038, rel.ellipticity.sum(), atol=0.001)
|
"""Support for Axis devices."""
import logging
from homeassistant.const import (
CONF_DEVICE,
CONF_HOST,
CONF_MAC,
CONF_PASSWORD,
CONF_PORT,
CONF_TRIGGER_TIME,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from .const import CONF_CAMERA, CONF_EVENTS, DEFAULT_TRIGGER_TIME, DOMAIN
from .device import AxisNetworkDevice, get_device
LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Old way to set up Axis devices."""
return True
async def async_setup_entry(hass, config_entry):
"""Set up the Axis component."""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
if not config_entry.options:
await async_populate_options(hass, config_entry)
device = AxisNetworkDevice(hass, config_entry)
if not await device.async_setup():
return False
# 0.104 introduced config entry unique id, this makes upgrading possible
if config_entry.unique_id is None:
hass.config_entries.async_update_entry(
config_entry, unique_id=device.api.vapix.params.system_serialnumber
)
hass.data[DOMAIN][config_entry.unique_id] = device
await device.async_update_device_registry()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, device.shutdown)
return True
async def async_unload_entry(hass, config_entry):
"""Unload Axis device config entry."""
device = hass.data[DOMAIN].pop(config_entry.data[CONF_MAC])
return await device.async_reset()
async def async_populate_options(hass, config_entry):
"""Populate default options for device."""
device = await get_device(
hass,
host=config_entry.data[CONF_HOST],
port=config_entry.data[CONF_PORT],
username=config_entry.data[CONF_USERNAME],
password=config_entry.data[CONF_PASSWORD],
)
supported_formats = device.vapix.params.image_format
camera = bool(supported_formats)
options = {
CONF_CAMERA: camera,
CONF_EVENTS: True,
CONF_TRIGGER_TIME: DEFAULT_TRIGGER_TIME,
}
hass.config_entries.async_update_entry(config_entry, options=options)
async def async_migrate_entry(hass, config_entry):
"""Migrate old entry."""
LOGGER.debug("Migrating from version %s", config_entry.version)
# Flatten configuration but keep old data if user rollbacks HASS
if config_entry.version == 1:
config_entry.data = {**config_entry.data, **config_entry.data[CONF_DEVICE]}
config_entry.version = 2
LOGGER.info("Migration to version %s successful", config_entry.version)
return True
|
"""plots saliency maps of images to determine
which pixels most contriute to the final output"""
import os
import matplotlib.pyplot as plt
import torch
import torchvision.transforms as T
from PIL import Image
import cocpit.config as config
plt_params = {
"axes.labelsize": "large",
"axes.titlesize": "large",
"xtick.labelsize": "large",
"ytick.labelsize": "large",
}
plt.rcParams["font.family"] = "serif"
plt.rcParams.update(plt_params)
def which_ax(x):
"""for plotting on the correct ax"""
y = 0
y1 = 1
if x >= 3 and x < 6:
x -= 3
y = 2
y1 = 3
if x >= 6 and x < 9:
x -= 6
y = 4
y1 = 5
if x >= 9:
x -= 9
return x, y, y1
def plot_on_axis(image, ax, x, class_, model):
'''plot saliency map on axis'''
x, y, y1 = which_ax(x)
ax[y, x].imshow(image)
ax[y, x].set_title(class_[1])
ax[y, 0].set_ylabel("Original Image")
ax[y, x].axes.xaxis.set_ticks([])
ax[y, x].axes.yaxis.set_ticks([])
image = preprocess(image)
saliency = get_saliency(image, model)
# code to plot the saliency map as a heatmap
ax[y1, x].imshow(saliency[0], cmap=plt.cm.hot)
ax[y1, 0].set_ylabel("Saliency Map")
ax[y1, x].axes.xaxis.set_ticks([])
ax[y1, x].axes.yaxis.set_ticks([])
def plot_saliency(model, class_names, savefig=True):
"""The saliency map will show the strength
for each pixel contribution to the final output"""
fig, ax = plt.subplots(6, 3, figsize=(5, 12))
for x, class_ in enumerate(class_names.items()):
open_dir = (
f"{config.BASE_DIR}/cpi_data/training_datasets/hand_labeled_resized_v1.3.0_no_blank/%s/"
% class_[0]
)
file = os.listdir(open_dir)[21]
image = Image.open(open_dir + file).convert("RGB")
plot_on_axis(image, ax, x, class_, model)
if savefig:
fig.savefig(f"{config.BASE_DIR}/plots/saliency_maps.png")
def preprocess(image, size=224):
"""Preprocess the image, convert to tensor, normalize,
and convert to correct shape"""
transform = T.Compose(
[
T.Resize((size, size)),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
tensor = transform(image).unsqueeze(0)
tensor.requires_grad = True
return tensor
def get_saliency(image, model):
model.eval()
"""find the gradient with respect to
the input image; call requires_grad_ on it"""
image.requires_grad_()
"""
forward pass through the model to get the scores
note that VGG-19 model doesn't perform softmax at the end
we also don't need softmax, just need scores
"""
scores = model(image)
"""Get the index corresponding to the
maximum score and the maximum score itself."""
score_max_index = scores.argmax()
score_max = scores[0, score_max_index]
"""
backward function on score_max performs the backward
pass in the computation graph and calculates the gradient of
score_max with respect to nodes in the computation graph
"""
score_max.backward()
"""
Saliency would be the gradient with respect to the input image now.
But note that the input image has 3 channels,
R, G and B. To derive a single class saliency
value for each pixel (i, j), we take the maximum magnitude
across all colour channels.
"""
saliency, _ = torch.max(image.grad.data.abs(), dim=1)
return saliency
|
# -*- coding: iso-8859-1 -*-
from sqlalchemy import create_engine
import fdb
# FORMA SIMPLE ACCESO A DB
# con = fdb.connect(dsn='/threads/proyectos/academia/db/ACADEMIA.DB', user='SYSDBA', password='masterkey')
# cur = con.cursor()
# cur.execute("SELECT CODIGO, NOMBRE FROM UBICACIONES")
# print(cur.fetchall())
engine = create_engine('firebird+fdb://sysdba:masterkey@localhost:3050/ACADEMIA')
|
#########################
### ACTIVITY CALENDAR ###
#########################
SHEET_SERVER_COLUMN = 1
SHEET_TIMESTAMP_COLUMN = 2
SHEET_ACTIVITY_COLUMN = 3
SHEET_DESCRIPTION_COLUMN = 4
SHEET_LINK_COLUMN = 5
ACTIVITY_CALENDAR_CHANNEL_ID = 390394020851089408
|
"""PatchmatchNet dataset module
reference: https://github.com/FangjinhuaWang/PatchmatchNet
"""
|
from qutipy.states.MaxEnt_state import MaxEnt_state
from qutipy.states.GHZ_state import GHZ_state
from qutipy.states.graph_state import graph_state
from qutipy.states.isotropic_state import isotropic_state
from qutipy.states.isotropic_twirl_state import isotropic_twirl_state
from qutipy.states.MaxMix_state import MaxMix_state
from qutipy.states.singlet_state import singlet_state
from qutipy.states.Werner_state import Werner_state
from qutipy.states.Werner_twirl_state import Werner_twirl_state
from qutipy.states.RandomDensityMatrix import RandomDensityMatrix
from qutipy.states.RandomStateVector import RandomStateVector
from qutipy.states.Bell_state import Bell_state
from qutipy.states.log_negativity import log_negativity
from qutipy.states.check_kext import check_kext |
# -*- coding: utf-8 -*-
import os
from simmate.conftest import copy_test_files, make_dummy_files
from simmate.calculators.vasp.inputs import Incar
from simmate.calculators.vasp.error_handlers import Eddrmm
def test_eddrmm(tmpdir):
copy_test_files(
tmpdir,
test_directory=__file__,
test_folder="eddrmm",
)
# we reference the files several spots below so we grab its path up front
incar_filename = os.path.join(tmpdir, "INCAR")
chgcar_filename = os.path.join(tmpdir, "CHGCAR")
wavecar_filename = os.path.join(tmpdir, "WAVECAR")
# init class with default settings
error_handler = Eddrmm()
# Confirm an error IS NOT found
error_handler.filename_to_check = "vasp.no_error"
assert error_handler.check(tmpdir) == False
# Confirm an error IS found
error_handler.filename_to_check = "vasp.out"
assert error_handler.check(tmpdir) == True
# Make first attempt at fixing the error
make_dummy_files(chgcar_filename, wavecar_filename)
fix = error_handler.correct(tmpdir)
assert fix == "switched ALGO to Normal and deleted CHGCAR + WAVECAR"
assert Incar.from_file(incar_filename)["ALGO"] == "Normal"
assert not os.path.exists(chgcar_filename)
assert not os.path.exists(wavecar_filename)
# Make second attempt at fixing the error
make_dummy_files(chgcar_filename, wavecar_filename)
fix = error_handler.correct(tmpdir)
assert fix == "switch POTIM from 0.5 to 0.25 and deleted CHGCAR + WAVECAR"
assert Incar.from_file(incar_filename)["POTIM"] == 0.25
assert not os.path.exists(chgcar_filename)
assert not os.path.exists(wavecar_filename)
def test_eddrmm_neb(tmpdir):
copy_test_files(
tmpdir,
test_directory=__file__,
test_folder="eddrmm_neb",
)
# This test is identical to test_eddrmm but with an NEB folder organization
# we reference the files several spots below so we grab its path up front
incar_filename = os.path.join(tmpdir, "INCAR")
# These files exist within a series of directories 00, 01,..., 05
chgcar_filenames = [
os.path.join(tmpdir, str(n).zfill(2), "CHGCAR") for n in range(5)
]
wavecar_filenames = [
os.path.join(tmpdir, str(n).zfill(2), "WAVECAR") for n in range(5)
]
# init class with default settings
error_handler = Eddrmm()
# Confirm an error IS NOT found
error_handler.filename_to_check = "vasp.no_error"
assert error_handler.check(tmpdir) == False
# Confirm an error IS found
error_handler.filename_to_check = "vasp.out"
assert error_handler.check(tmpdir) == True
# Make first attempt at fixing the error
make_dummy_files(*chgcar_filenames, *wavecar_filenames)
fix = error_handler.correct(tmpdir)
assert (
fix == "switched ALGO to Normal and deleted CHGCARs + WAVECARs for all images"
)
assert Incar.from_file(incar_filename)["ALGO"] == "Normal"
assert not any([os.path.exists(f) for f in chgcar_filenames])
assert not any([os.path.exists(f) for f in wavecar_filenames])
# Make second attempt at fixing the error
make_dummy_files(*chgcar_filenames, *wavecar_filenames)
fix = error_handler.correct(tmpdir)
assert (
fix
== "switch POTIM from 0.5 to 0.25 and deleted CHGCARs + WAVECARs for all images"
)
assert Incar.from_file(incar_filename)["POTIM"] == 0.25
assert not any([os.path.exists(f) for f in chgcar_filenames])
assert not any([os.path.exists(f) for f in wavecar_filenames])
|
from django.contrib.auth.models import Group, Permission
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from meiduo_admin.serializer.groups import GroupSerializer
from meiduo_admin.serializer.permission import PermissionSerializer
from meiduo_admin.utils import PageNum
class GroupView(ModelViewSet):
# 指定序列化器
serializer_class = GroupSerializer
# 指定查询集
queryset = Group.objects.all()
# 指定分页器
pagination_class = PageNum
# 指定权限
permission_classes = [IsAdminUser]
# 获取权限表数据
def simple(self, request):
# 获取权限
permission = Permission.objects.all()
# 序列化返回
ser = PermissionSerializer(permission, many=True)
return Response(ser.data)
|
# -*- coding: utf-8 -*-
import torch
import torch.onnx
import torch.onnx.symbolic_helper
import torch.onnx.utils
import torch.nn as nn
import numpy as np
from collections import OrderedDict
from . import register
from . import mask_utils
from . import function_module
from typing import Dict, List
from torchpruner.operator import operator
from torchpruner.model_tools import *
import time
import copy
def create_operator(node):
op_kind = node.kind().split("::")
type_name = op_kind[1]
operator_class = register.operator_reg.get(type_name)
if operator_class is None:
raise RuntimeError("Can not find operator " + str(type_name))
return operator_class(node)
max_search_time = 100000
def _operator_params_combine(params_list):
params_dict = {}
for params in params_list:
for key in params:
if not isinstance(params[key], int):
raise RuntimeError("The change params must be integer")
if key not in params_dict:
params_dict[key] = params[key]
else:
params_dict[key] += params[key]
return params_dict
def _get_all_modules(module_dict, prefix, names):
module_list = []
for name in names:
prefix = prefix + "." + name
module_list.append(module_dict[prefix])
return module_list
def _get_common(scope1, scope2):
prefix = ""
i = 0
while i < len(scope1) and i < len(scope2):
if scope1[i] == scope2[i]:
if prefix == "":
prefix += scope1[i]
else:
prefix = prefix + "." + scope1[i]
i += 1
else:
break
list1 = []
list2 = []
for j in range(i, len(scope1)):
list1.append(scope1[j])
for j in range(i, len(scope2)):
list2.append(scope2[j])
return prefix, list1, list2
def _cat_names(names):
name = names[0]
for i in range(1, len(names)):
name = name + "." + names[i]
return name
def _find_module_list(module_list, target):
keys = None
if isinstance(module_list, nn.ModuleList):
keys = range(0, len(module_list))
if isinstance(module_list, nn.ModuleDict):
keys = module_list.keys()
for key in keys:
if module_list[key] is target:
return [target._get_name(), str(key)]
if isinstance(module_list[key], (nn.ModuleList, nn.ModuleDict)):
return [module_list[key]._get_name(), str(key)] + _find_module_list(
module_list[key], target
)
return None
def _get_object_to_name_dict(model):
to_name_dict = {}
stack = []
stack.append([model, "self"])
while len(stack) != 0:
obj, name = stack.pop()
if isinstance(obj, nn.Module):
to_name_dict[id(obj)] = name
for key in obj._modules.keys():
stack.append([obj._modules[key], name + "." + key])
return to_name_dict
class scope_name_workaround(object):
def __init__(self, model):
self.backup = None
self.to_name_dict = _get_object_to_name_dict(model)
self.scope_stack = []
def __enter__(self):
def _tracing_name(self_, tracing_state):
if not tracing_state._traced_module_stack:
return None
module = tracing_state._traced_module_stack[-1]
for name, child in module.named_children():
if child is self_:
return name
if isinstance(child, (nn.ModuleList, nn.ModuleDict)):
search_result = _find_module_list(child, self_)
if search_result is not None:
search_result = [child._get_name(), name] + search_result
return search_result
return None
def _slow_forward(self_, *input, **kwargs):
tracing_state = torch._C._get_tracing_state()
if not tracing_state or isinstance(self_.forward, torch._C.ScriptMethod):
return self_.forward(*input, **kwargs)
if tracing_state.current_scope() != "":
self.scope_stack.append(tracing_state.current_scope())
tracing_state.pop_scope()
if id(self_) in self.to_name_dict:
tracing_state.push_scope(self.to_name_dict[id(self_)])
try:
result = self_.forward(*input, **kwargs)
finally:
if tracing_state.current_scope() != "":
tracing_state.pop_scope()
if len(self.scope_stack) != 0:
tracing_state.push_scope(self.scope_stack[-1])
self.scope_stack.pop()
return result
self.backup = torch.nn.Module._slow_forward
setattr(torch.nn.Module, "_slow_forward", _slow_forward)
def __exit__(self, type, value, tb):
setattr(torch.nn.Module, "_slow_forward", self.backup)
# DataNode
class DataNode(object):
def __init__(self, node):
# basic info
self.name = "self." + node.debugName()
self._type = None
self._size = None
self.kind = str(node.type().kind())
if self.kind == "TensorType" or self.kind == "CompleteTensorType":
sizes = node.type().sizes()
if sizes is not None:
self._size = list(sizes)
self._type = str(node.type().scalarType())
self._is_terminal = False
self._is_input = False
self._is_output = False
# operator related
self.in_operator: operator.OperatorNode = None
self.out_operators: List[operator.OperatorNode] = []
# data add with the hook
self.data = None
# add a key value changeable
self._changeable = True
# set the graph
self.graph: ONNXGraph = None
def get(self, indexs, dim):
dict_tuple = []
for _ in range(0, dim):
dict_tuple.append(slice(None, None, None))
dict_tuple.append(indexs)
return self.data[tuple(dict_tuple)]
def __str__(self):
return_str = "%" + self.name + ": "
if self._type is None:
return return_str + "Unknown()"
return_str += self._type
return_str += "("
if len(self._size) == 0:
return_str += ")"
return return_str
for s in self._size:
return_str += str(s)
return_str += ", "
return_str = return_str[:-2]
return_str += ")"
return return_str
def __repr__(self):
return self.__str__()
def is_terminal(self):
return self._is_terminal
def is_input(self):
return self._is_input
def is_output(self):
return self._is_output
def is_changeable(self):
return self._changeable
def size(self, dim=None):
if dim is None:
return self._size
if dim >= len(self._size):
raise RuntimeError("the dim out of index")
return self._size[dim]
def type(self):
return self._type
def __len__(self):
if self._size is None:
return 0
else:
if len(self._size) == 0:
return 0
return self._size[0]
def cut_analysis(self, index, dim):
mask = mask_utils.Mask(self._size)
if not isinstance(index, (list, np.ndarray)):
raise RuntimeError("The index must be a list or a ndarray")
mask.set_mask([index], [dim])
return self.cut_analysis_with_mask(mask)
def cut_analysis_with_mask(self, mask):
times = 0
mask_dict = OrderedDict()
mask_dict[self.name] = mask
operator_dict = OrderedDict()
stack = []
stack.append((self, mask, None))
while len(stack) != 0:
node, mask, push_operator = stack.pop()
operators = node.out_operators
operators = operators[:]
if node.in_operator is not None:
operators.append(node.in_operator)
# remove the push_opeartion
if push_operator is not None:
for i in range(0, len(operators)):
if id(operators[i]) == id(push_operator):
del operators[i]
break
# run analysis for operator
for operator in operators:
return_masks, operator_params = operator.analysis(node, mask)
# handle operator_dict
if operator_params is not None:
if operator.name not in operator_dict:
operator_dict[operator.name] = [operator_params]
else:
operator_dict[operator.name].append(operator_params)
# handle return_dict
for name in return_masks.keys():
return_node = self.graph.nodes[name]
if name in mask_dict.keys():
if mask_dict[name].include(return_masks[name]):
continue
mask_dict[name] = mask_utils.combine_mask(
[mask_dict[name], return_masks[name]]
)
else:
mask_dict[name] = return_masks[name]
# push stack
stack.append((return_node, return_masks[name].copy(), operator))
times += 1
if times >= max_search_time:
raise RuntimeError("max search time exceed")
conbine_dict = {}
conbine_dict["terminal"] = {}
conbine_dict["iner"] = {}
conbine_dict["operator"] = {}
for key in mask_dict.keys():
node = self.graph.nodes[key]
result = mask_dict[key].indexs()
if not node.is_terminal():
conbine_dict["iner"][key] = result
else:
conbine_dict["terminal"][key] = result
for key in operator_dict.keys():
conbine_dict["operator"][key] = _operator_params_combine(operator_dict[key])
return conbine_dict
# the module class
class Module(object):
def __init__(self):
self.name = ""
self.sub_modules: Dict[str, Module] = OrderedDict() # store the sub modules
self.in_data: List[DataNode] = [] # the data may be used different times
self.out_data: List[DataNode] = [] # the data may produced different times
self.operators: List[
operator.OperatorNode
] = [] # save the opeartor in current module
self.nn_object: nn.Module = None # bounding the actual object
self.terminal_node: DataNode = None
def cut_analysis(self, attribute_name, index, dim):
attrs = attribute_name.split(".")
current_module = self
for attr in attrs:
if attr in current_module.sub_modules:
current_module = current_module.sub_modules[attr]
else:
raise RuntimeError("Can not find attribute " + str(attribute_name))
if current_module.terminal_node is None:
raise RuntimeError("The target attribute is not cuttable")
return current_module.terminal_node.cut_analysis(index, dim)
def __str__(self):
return_string = ""
class_string = str(self.nn_object.__class__)[8:-2]
return_string += class_string
return_string += "["
if self.terminal_node is not None:
return_string += "self."
terminal_string = str(getattr(self, "terminal_node"))[1:]
split_string = terminal_string.split(":")
return_string += split_string[0]
return_string += "]:"
return_string += split_string[1][1:]
else:
return_string += self.name
return_string += "]"
return return_string
def __repr__(self):
return self.__str__()
class ONNXGraph(object):
def __init__(self, model, onnx_device="CPU"):
if isinstance(
model, (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
):
print(
"WARNING: The model is warped with the DataParallel, the Graph object just binding the model.module part"
)
self._model: nn.Module = model.module
else:
self._model: nn.Module = model
self.modules: Dict[str, Module] = OrderedDict()
self.inputs: Dict[str, DataNode] = OrderedDict()
self.nodes: Dict[str, DataNode] = OrderedDict()
self.outputs: Dict[str, DataNode] = OrderedDict()
self.operators: Dict[str, operator.OperatorNode] = OrderedDict()
self._device: str = onnx_device
def __str__(self):
out_string = ""
for node in self.nodes.keys():
out_string += str(self.nodes[node])
out_string += "\n"
for operator in self.operators.keys():
out_string += str(self.operators[operator])
out_string += "\n"
return out_string
def forward(self, inputs):
# change the inputs
if len(self.inputs.keys()) != len(inputs):
raise RuntimeError(
"The inputs numbers is wrong expected "
+ str(self.inputs.keys())
+ " but got "
+ str(len(inputs))
)
input_keys = list(self.inputs.keys())
for i in range(0, len(inputs)):
if list(inputs[i].size()) != list(self.inputs[input_keys[i]].size()):
raise RuntimeError(
"The inputs must as the same size as the origin input"
)
self.inputs[input_keys[i]].data = inputs[i].numpy()
for operator in self.operators:
self.operators[operator].fill_shape()
self.operators[operator].fill_value()
def set_device(self, device):
self._device = device
for operator in self.operators:
self.operators[operator].set_device(device)
def get_device(self):
return self._device
def flops(self):
total_flops = 0
for operator in self.operators:
total_flops += self.operators[operator].flops()
return total_flops / 1000000
def get_module_by_object(self, obj):
for module_name in self.modules:
c_module = self.modules[module_name]
if id(c_module.nn_object) == id(obj):
return c_module
return None
def build_graph(self, inputs, fill_value=True, training=False):
# prepare the data structure
data_node_dict: Dict[str, DataNode] = OrderedDict()
# input node dict
input_node_dict: Dict[str, DataNode] = OrderedDict()
# terminal_node_dict
terminal_node_dict: Dict[str, DataNode] = OrderedDict()
# output node dict
output_node_dict: Dict[str, DataNode] = OrderedDict()
# operator list
operator_dict: Dict[str, DataNode] = OrderedDict()
# module dict
module_dict: Dict[str, Module] = OrderedDict()
# check the function_module
if function_module.function_module_activate() and not hasattr(
self._model, "_init_function_module_ok"
):
raise RuntimeError("Call the init_function_module in function_module mode")
# deep copy the model
model = copy.deepcopy(self._model)
model = model.cpu()
# preprocess the quantization node
for module in model.modules():
if isinstance(module, torch.quantization.FakeQuantize):
module.calculate_qparams()
model.apply(torch.quantization.disable_observer)
with scope_name_workaround(model):
torch.onnx.symbolic_helper._set_opset_version(11)
graph, params_dict, torch_out = torch.onnx.utils._model_to_graph(
model,
inputs,
_retain_param_name=True,
do_constant_folding=False,
training=training,
)
torch.onnx.symbolic_helper._set_opset_version(9)
# create the inputs and the terminals
inputs_number = len(inputs)
input_nodes = list(graph.inputs())
total_number = len(input_nodes)
for i in range(0, total_number):
data_node = DataNode(input_nodes[i])
if i < inputs_number:
data_node._is_input = True
data_node._changeable = False
data_node.data = inputs[i].numpy()
input_node_dict[data_node.name] = data_node
else:
data_node._is_terminal = True
data_node.data = params_dict[
".".join(data_node.name.split(".")[1:])
].numpy()
terminal_node_dict[data_node.name] = data_node
data_node_dict[data_node.name] = data_node
# create the iner node and the operator node
body_nodes = list(graph.nodes())
for i in range(0, len(body_nodes)):
# create the operator node
node = body_nodes[i]
operator_node = create_operator(node)
operator_node.set_device(self._device)
# create the outputs node
outputs = list(node.outputs())
for out_node in outputs:
data_node = DataNode(out_node)
data_node.in_operator = operator_node
data_node_dict[data_node.name] = data_node
operator_node.out_data.append(data_node)
# link the inputs node
inputs = list(node.inputs())
for in_node in inputs:
in_node_name = "self." + in_node.debugName()
data_node = data_node_dict[in_node_name]
operator_node.in_data.append(data_node)
data_node.out_operators.append(operator_node)
operator_dict[str(i)] = operator_node
# if the data node is the output, set the changeable to be false, set the is output to be true
outputs = list(node.outputs())
for out_node in outputs:
out_node_name = "self." + out_node.debugName()
data_node = data_node_dict[out_node_name]
data_node._changeable = False
data_node._is_output = True
output_node_dict[out_node_name] = data_node
# binding the graph to node
for key in data_node_dict.keys():
data_node_dict[key].graph = self
# create the module
for key in operator_dict:
operator = operator_dict[key]
obj_list = operator.obj_list
current = ""
parent = None
for i in range(0, len(obj_list)):
name = obj_list[i]
if current == "":
current = name
else:
current = current + "." + name
actual_obj = get_object(self._model, current)
if current not in module_dict.keys():
module_dict[current] = Module()
module_dict[current].name = current
module_dict[current].graph = graph
module_dict[current].nn_object = actual_obj
if parent is not None:
parent.sub_modules[name] = module_dict[current]
parent = module_dict[current]
if i == len(obj_list) - 1:
module_dict[current].operators.append(operator)
# add the terminal node
for node_name in terminal_node_dict.keys():
node = terminal_node_dict[node_name]
obj_names = node_name.split(".")
if len(obj_names) == 2 and intable(obj_names[1]):
continue
current = "self"
parent = None
for i in range(1, len(obj_names)):
name = obj_names[i]
current = current + "." + name
actual_obj = get_object(self._model, current)
if current not in module_dict.keys():
if i == len(obj_names) - 1:
if not isinstance(actual_obj, (nn.Parameter, torch.Tensor)):
raise RuntimeError(
"The terminal node must be the nn.Parameter or torch.Tensor"
)
module_dict[current] = Module()
module_dict[current].terminal_node = node
module_dict[current].name = current
module_dict[current].graph = graph
module_dict[current].nn_object = actual_obj
module_dict[current].nn_type = type(actual_obj)
if parent is not None:
parent.sub_modules[name] = module_dict[current]
parent = module_dict[current]
# bind the in_data and out_data for modules
for node_name in data_node_dict.keys():
node = data_node_dict[node_name]
if node.is_terminal() and not node.is_input():
continue
if node.is_input():
out_operators = node.out_operators
for operator in out_operators:
obj_names = operator.obj_list[1:]
prefix = "self"
modules_list = _get_all_modules(module_dict, prefix, obj_names)
for module in modules_list:
if node not in module.in_data:
module.in_data.append(node)
continue
in_operator = node.in_operator
in_scope = in_operator.obj_list
out_operators = node.out_operators[:]
if not node.is_output() and len(out_operators) == 0:
module_name = _cat_names(in_operator.obj_list)
module_dict[module_name].out_data.append(node)
continue
output_scope_list = []
for out_operator in out_operators:
output_scope_list.append(out_operator.obj_list)
if node.is_output:
output_scope_list.append(["self"])
for scope in output_scope_list:
prefix, in_scope_names, out_scope_names = _get_common(in_scope, scope)
in_modules_list = _get_all_modules(module_dict, prefix, in_scope_names)
for module in in_modules_list:
if node not in module.out_data:
module.out_data.append(node)
out_modules_list = _get_all_modules(
module_dict, prefix, out_scope_names
)
for module in out_modules_list:
if node not in module.in_data:
module.in_data.append(node)
self.nodes = data_node_dict
self.inputs = input_node_dict
self.outputs = output_node_dict
self.modules = module_dict
self.operators = operator_dict
# fille the data and value
if fill_value:
for operator in operator_dict:
operator_dict[operator].fill_shape()
operator_dict[operator].fill_value()
else:
for operator in operator_dict:
operator_dict[operator].fill_shape()
self.nodes = data_node_dict
self.inputs = input_node_dict
self.outputs = output_node_dict
self.modules = module_dict
self.operators = operator_dict
|
from dragonfly import (IntegerRef, Integer)
from dragonfly.grammar.elements import RuleWrap, Choice
from dragonfly.language.base.integer_internal import MapIntBuilder
from dragonfly.language.loader import language
from castervoice.lib import settings
from castervoice.rules.core.numbers_rules.numeric_support import numbers_map_1_to_9 # Conditional import load from user directory?
'''
Integer Remap feature needs to be rewritten:
- allow customization
- make it language sensitive (can this be done without eval?)
'''
if not settings.settings(["miscellaneous", "integer_remap_crash_fix"]):
class IntegerRefST(RuleWrap):
def __init__(self, name, min, max, default=None):
if not settings.settings(["miscellaneous", "short_integer_opt_out"]):
content = language.ShortIntegerContent
else:
content = language.IntegerContent
if "en" in language.language_map and settings.settings(["miscellaneous", "integer_remap_opt_in"]):
content.builders[1] = MapIntBuilder(numbers_map_1_to_9())
element = Integer(None, min, max, content=content)
RuleWrap.__init__(self, name, element, default=default)
else:
print("Integer Remap switch: OFF")
class IntegerRefST(IntegerRef):
''''''
class Boolean(Choice):
def __init__(self, spec):
Choice.__init__(self, spec, {spec: True})
|
sal = float(input('Digite seu salário atual:\n'))
if sal >= 1200:
aum = sal + (sal*0.15)
print('Seu salário vai de R${} para R${}.'.format(sal, aum))
else:
aum = sal + (sal*0.2)
print('Seu salário vai de R${} para R${}.'.format(sal, aum))
print('Agora vai lá comemorar comendo umas puta')
|
"""Kernel density estimate tissue mode normalization CLI
Author: Jacob Reinhold ([email protected])
Created on: 13 Oct 2021
"""
__all__ = ["kde_main", "kde_parser"]
from intensity_normalization.normalize.kde import KDENormalize
# main functions and parsers for CLI
kde_parser = KDENormalize.parser()
kde_main = KDENormalize.main(kde_parser)
|
"""
Sum numeric strings a + b without adding them up directly.
Examples
“123” + "1" = “124”
"999" + "1" = "1000"
SOLUTION
Time O(N)
Space O(N): store the result, else O(1).
"""
from typing import List
def solve(a: str, b: str) -> str:
res: str = ""
digits: List[str] = [str(i) for i in range(10)]
longer = a
shorter = b
if len(a) < len(b):
longer, shorter = b, a
carry_over = 0
for i in range(len(longer)):
li = len(longer) - 1 - i
si = len(shorter) - 1 - i
d = int(longer[li]) + carry_over
if si >= 0:
d += int(shorter[si])
res = digits[d % 10] + res
if d <= 9:
carry_over = 0
else:
carry_over = 1
if carry_over == 1:
res = "1" + res
return res
|
import os
import time
import threading
LK = threading.Lock()
Ni = 20 + 1
Nj = 20 + 1
Np = 4
FF = []
CC = []
for i in range( Ni ):
for j in range( Nj ):
CC.append( ( i, j ) )
FF.append( not os.path.isfile( "pes.%d.%d"%( i, j ) ) )
NN = len( FF )
def worker( num ):
global LK, CC, FF, NN
fd = open( "run.%d.log"%( num ), "wt" )
while( sum( FF ) > 0 ):
LK.acquire()
w = 0
Wi = None
Wj = None
while( w < NN and ( Wi == None or Wj == None ) ):
if( FF[w] and os.path.isfile( "pes.%d.%d"%( CC[w][0], CC[w][1] - 1 ) ) ):
Wi = CC[w][0]; Oi = CC[w][0]
Wj = CC[w][1]; Oj = CC[w][1] - 1
elif( FF[w] and os.path.isfile( "pes.%d.%d"%( CC[w][0] - 1, CC[w][1] ) ) ):
Wi = CC[w][0]; Oi = CC[w][0] - 1
Wj = CC[w][1]; Oj = CC[w][1]
elif( FF[w] and os.path.isfile( "pes.%d.%d"%( CC[w][0] - 1, CC[w][1] - 1 ) ) ):
Wi = CC[w][0]; Oi = CC[w][0] - 1
Wj = CC[w][1]; Oj = CC[w][1] - 1
else:
w += 1
if( Wi != None and Wj != None ):
FF[w] = False
LK.release()
if( Wi != None and Wj != None ):
fd.write( "%d.%d >> %d.%d"%( Oi, Oj, Wi, Wj ) ); fd.flush()
os.system( "python3 test.scan %d %d %d %d"%( Oi, Oj, Wi, Wj ) )
fd.write( " >> done!\n" ); fd.flush()
time.sleep( 1 )
pid = []
for i in range( Np ):
pid.append( threading.Thread( target = worker, args = ( i, ) ) )
pid[-1].start()
time.sleep( 1 )
for i in range( Np ):
pid[i].join()
|
#!/usr/bin/python
# (C) Copyright IBM Corp. 2015, 2016
# The source code for this program is not published or
# otherwise divested of its trade secrets, irrespective of
# what has been deposited with the US Copyright Office.
from abstract_qpylib import AbstractQpylib
import json
import os
import os.path
import sys
import getpass
import logging
dev_auth_file = ".qradar_appfw.auth"
dev_console_file = ".qradar_appfw.console"
yes = ("y", "yes")
no = ("no", "n")
api_auth_user = 0
api_auth_password = 0
consoleIP = 0
handler_added = 0
manifest_location = 'manifest.json'
class SdkQpylib(AbstractQpylib):
def get_manifest_location(self):
global manifest_location
return manifest_location
def get_app_id(self):
return "DEV_APP"
def get_app_name(self):
return "SDK_APP"
def get_console_address(self):
global consoleIP
global dev_console_file
home = os.path.expanduser("~")
console_file_path = os.path.join(home, dev_console_file)
if os.path.isfile(console_file_path):
print("Loading console details from file: " + str(console_file_path))
sys.stdout.flush()
with open(console_file_path) as consolefile:
console_json = json.load(consolefile)
consoleIP = console_json["console"]
else:
if consoleIP == 0:
console_data = {}
print("What is the IP of QRadar console"),
print("required to make this API call:")
sys.stdout.flush()
consoleIP = raw_input()
console_data['console'] = consoleIP
print("Do you want to store the console IP at:" + console_file_path)
print("[y/n]:")
sys.stdout.flush()
do_store = raw_input()
if do_store in yes:
with open(console_file_path, 'w+') as console_file:
json.dump(console_data, console_file)
return consoleIP
def get_api_auth(self):
auth = None
global dev_auth_file
global api_auth_user
global api_auth_password
home = os.path.expanduser("~")
auth_file_path = os.path.join(home, dev_auth_file)
if os.path.isfile(auth_file_path):
print("Loading user details from file: " + str(auth_file_path))
sys.stdout.flush()
with open(auth_file_path) as authfile:
auth_json = json.load(authfile)
auth = (auth_json["user"], auth_json["password"])
else:
auth_data = {}
consoleAddress = self.get_console_address()
print("QRadar credentials for " + consoleAddress + " are required to make this API call:")
if api_auth_user == 0:
print( "User:" )
sys.stdout.flush()
api_auth_user = raw_input()
if api_auth_password == 0:
api_auth_password = getpass.getpass("Password:")
auth_data['user'] = api_auth_user
auth_data['password'] = api_auth_password
print("Store credentials credentials at:" + auth_file_path)
print("WARNING: credentials will be stored in clear.")
print("[y/n]:")
sys.stdout.flush()
do_store = raw_input()
if do_store in yes:
with open(auth_file_path, 'w+') as auth_file:
json.dump(auth_data, auth_file)
auth = (api_auth_user, api_auth_password)
print( "Using Auth: " + str(auth) )
return auth
def REST(self, RESTtype, requestURL, headers=None, data=None, params=None, json_inst=None, version=None):
if headers is None:
headers={}
if version is not None:
headers['Version'] = version
auth = self.get_api_auth()
fullURL = "https://" + str(self.get_console_address()) + "/" + str(requestURL)
rest_func = self.chooseREST(RESTtype)
return rest_func(URL=fullURL, headers=headers, data=data, auth=auth, params=params, json_inst=json_inst)
def add_log_handler(self, loc_logger):
global handler_added
if 0 == handler_added:
loc_logger.setLevel(self.map_log_level('debug'))
handler = logging.StreamHandler()
loc_logger.addHandler(handler)
handler_added=1
def root_path(self):
return os.getenv('QRADAR_APPFW_WORKSPACE', '~')
def get_app_base_url(self):
return "http://localhost:5000"
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
# Twisted is an event-driven networking engine.
#
# This is the test for qt4reactor - Twisted is driven by the Qt mainloop.
import sys
# Workaround to remove the reactor module created by PyInstaller twisted rthook.
# Otherwise you will get error
# twisted.internet.error.ReactorAlreadyInstalledError: reactor already installed
if 'twisted.internet.reactor' in sys.modules:
del sys.modules['twisted.internet.reactor']
# Code to init Qt.
from PyQt4 import QtCore
app = QtCore.QCoreApplication(sys.argv)
# Install reactor.
import qt4reactor
qt4reactor.install()
def main():
"""Run application."""
# Hook up Qt application to Twisted.
from twisted.internet import reactor
# Make sure stopping twisted event also shuts down QT.
reactor.addSystemEventTrigger('after', 'shutdown', app.quit)
# Shutdown twisted when window is closed.
app.connect(app, QtCore.SIGNAL("lastWindowClosed()"), reactor.stop)
# Do not block test to finish.
reactor.runReturn()
if __name__ == '__main__':
main()
|
from .app_builder_message import AppBuilderMessage
from .app_builder_save_theme import AppBuilderSaveTheme
from .app_builder_create_app import AppBuilderCreateApp
from .app_builder_settings import Settings
from .app_builder_functions import UIFunctions
from qt_core import *
Gen_Class, Base_Class = loadUiType(UIFunctions().resource_path("./builder/uis/app_builder_right.ui"))
class AppBuilderRight(Base_Class, Gen_Class):
def __init__(self, parent=None, ui=None, apps_path=None, app_name=None):
super(self.__class__, self).__init__(parent)
#############################################################
# Flags
#############################################################
self.setWindowFlag(Qt.FramelessWindowHint)
#############################################################
# Initial
#############################################################
self.ui = ui
self.parent = parent
self.apps_path = apps_path
self.app_name = app_name
self.setupUi(self)
self.builder_settings = None
def setup(self):
self.themeSaver = AppBuilderSaveTheme(self, self.ui)
self.appCreator = AppBuilderCreateApp(self, self.ui)
self.message_box = AppBuilderMessage(self)
self.initFormControl()
self.resize_window()
self.show()
def resize_window(self):
screen = QApplication.primaryScreen()
self.size = screen.size()
self.resize(
self.builder_settings.items['right']['width']
+
1,
self.size.height()
-
self.builder_settings.items['right']['bottom']
-
self.builder_settings.items['right']['top']
)
self.move(
self.size.width()
-
self.builder_settings.items['right']['width']
-
1,
self.builder_settings.items['right']['top']
)
def initFormControl(self):
self.saveCurrentTheme.setCheckable(True)
self.saveCurrentTheme.setToolTip("Save current style as theme")
self.saveCurrentTheme.setCursor(QCursor(Qt.PointingHandCursor))
self.saveCurrentTheme.clicked.connect(self.save_current_theme)
self.saveCurrentTheme.enterEvent = lambda x: self.highlighter(self.saveCurrentTheme, "enter")
self.saveCurrentTheme.leaveEvent = lambda x: self.highlighter(self.saveCurrentTheme, "leave")
self.add_icon(self.saveCurrentTheme, "ei.css")
self.saveAppBuilder.setToolTip('Save all changes')
self.saveAppBuilder.setCursor(QCursor(Qt.PointingHandCursor))
self.saveAppBuilder.clicked.connect(self.parent.saveAll)
self.saveAppBuilder.enterEvent = lambda x: self.highlighter(self.saveAppBuilder, "enter")
self.saveAppBuilder.leaveEvent = lambda x: self.highlighter(self.saveAppBuilder, "leave")
self.add_icon(self.saveAppBuilder, "mdi.content-save-all-outline")
self.reloadApp.setToolTip('Reload App')
self.reloadApp.setCursor(QCursor(Qt.PointingHandCursor))
self.reloadApp.clicked.connect(self.parent.reload_app)
self.add_icon(self.reloadApp, "mdi.reload")
self.createNewApp.setCheckable(True)
self.createNewApp.setToolTip('Create new app from template')
self.createNewApp.setCursor(QCursor(Qt.PointingHandCursor))
self.createNewApp.clicked.connect(self.create_new_app)
self.createNewApp.enterEvent = lambda x: self.highlighter(self.createNewApp, "enter")
self.createNewApp.leaveEvent = lambda x: self.highlighter(self.createNewApp, "leave")
self.add_icon(self.createNewApp, "mdi.new-box")
self.compileApp.setCheckable(True)
self.compileApp.setToolTip('Build selectedb app')
self.compileApp.setCursor(QCursor(Qt.PointingHandCursor))
#self.compileApp.clicked.connect(self.create_new_app)
self.add_icon(self.compileApp, "ph.buildings-bold")
self.adjustApp.setCheckable(True)
self.adjustApp.setToolTip('Adjust app')
self.adjustApp.setCursor(QCursor(Qt.PointingHandCursor))
self.adjustApp.clicked.connect(self.adjust_app)
self.add_icon(self.adjustApp, "ei.adjust-alt")
#self.setAppsPath.setCheckable(True)
self.setAppsPath.setToolTip('Set applications path ')
self.setAppsPath.setCursor(QCursor(Qt.PointingHandCursor))
self.setAppsPath.clicked.connect(self.parent.setAppsPath)
self.setAppsPath.enterEvent = lambda x: self.highlighter(self.setAppsPath, "enter")
self.setAppsPath.leaveEvent = lambda x: self.highlighter(self.setAppsPath, "leave")
self.add_icon(self.setAppsPath, "mdi.folder-table-outline")
def adjust_app(self):
btn = self.sender()
if self.builder_settings.items['selected_app'] == "":
self.message_box.notify("warning", "Adjust App", "No App selected!")
timer=QTimer.singleShot(2000, lambda: self.message_box.close())
btn.toggle()
return
if btn.isChecked():
self.parent.app.move(
self.builder_settings.items['left']['width']
+
self.builder_settings.items['center']['left'],
self.builder_settings.items['center']['top'],
)
self.parent.app.resize(
self.size.width()
-
self.builder_settings.items['left']['width']
-
self.builder_settings.items['right']['width']
-
self.builder_settings.items['center']['right'],
self.size.height()
-
self.builder_settings.items['bottom']['height']
-
self.builder_settings.items['center']['bottom']
-
self.builder_settings.items['center']['top']
)
else:
self.parent.app.resize(self.parent.settings.items['window']['initial_width'], self.parent.settings.items['window']['initial_height'])
self.parent.move_app_to_center()
def add_icon(self, btn, icon_name):
if 'icons_color' in self.builder_settings.items:
if self.builder_settings.items['icons_color'] != "":
icon_color = self.builder_settings.items['icons_color']
else:
icon_color = "white"
icon = qta.icon(icon_name, color=icon_color)
btn.setIcon(icon)
btn.setIconSize(QSize(40, 40))
def save_current_theme(self):
if self.builder_settings.items['selected_app'] == "":
self.message_box.notify("warning", "Save Settings", "No App selected!")
timer=QTimer.singleShot(2000, lambda: self.message_box.close())
self.sender().toggle()
return
if self.themeSaver.isVisible():
return
if not self.saveCurrentTheme.isChecked():
return
self.themeSaver.show()
def take_screenshot(self, name=None):
screen = QApplication.primaryScreen()
screenshot = screen.grabWindow(
QApplication.desktop().winId(),
self.ui.pos().x(),
self.ui.pos().y(),
self.ui.width(),
self.ui.height()
)
screenshot.save(f'{self.apps_path}/{self.app_name}/gui/resources/imgs/themes/{name}.png', 'png')
theme_settings = Settings('theme', self.apps_path, self.app_name)
theme_settings.items['themes'][name] = theme_settings.items['theme']
theme_settings.serialize()
self.parent.builder_bottom.loadThemesButtons()
def create_new_app(self):
btn = self.sender()
if self.appCreator.isVisible():
return
if not btn.isChecked():
return
self.appCreator.show()
def highlighter(self, btn, e):
btn_name = btn.objectName()
if btn_name == "setAppsPath":
if e == "enter":
self.parent.builder_center_header.apps_path.setStyleSheet("color: orange")
else:
self.parent.builder_center_header.apps_path.setStyleSheet("color: white")
if btn_name == "createNewApp":
if e == "enter":
self.parent.builder_center.scrollArea.setStyleSheet("QScrollArea{border-bottom: 1px solid cyan}")
else:
self.parent.builder_center.scrollArea.setStyleSheet("QScrollArea{border: 0px solid cyan}")
if btn_name == "saveCurrentTheme":
if e == "enter":
self.parent.builder_bottom.themes_label.setStyleSheet("color: orange")
else:
self.parent.builder_bottom.themes_label.setStyleSheet("color: white")
if btn_name == "saveAppBuilder":
if e == "enter":
self.parent.builder_left.setStyleSheet("#BuilderLeft QTabBar::tab{color: orange}")
else:
self.parent.builder_left.setStyleSheet("#BuilderLeft QTabBar::tab{color: white}")
|
import calendar
from datetime import datetime
print(calendar.day_name[0])
print(calendar.day_name[1])
print(calendar.day_name[6])
dt = datetime(2019, 8, 17, 6, 00, 00)
h = dt.time().hour
print(h)
# datetime comparison
d1 = datetime(2020, 2, 11, 8, 52, 40)
d2 = datetime(2020, 2, 11, 9, 52, 40)
print(f'{d1} > {d2}') if d1 > d2 else print(f'{d1} <= {d2}')
|
from .feed_filter_tags import * # NOQA
|
from mamba import describe, included_context, it
NEW_EXPORTED_CONTEXT = 'New Exported Context'
with describe('Real tests'):
with included_context(NEW_EXPORTED_CONTEXT):
with it('added example'):
pass
|
from . import pyrender_wrapper
# from pyrender_wrapper import CustomShaderCache
import numpy as np
import pyrender
from skimage import io
# Camera transform from position and look direction
def get_camera_transform(position, look_direction):
camera_forward = -look_direction / np.linalg.norm(look_direction)
camera_right = np.cross(camera_forward, np.array((0, 0, -1)))
if np.linalg.norm(camera_right) < 0.5:
camera_right = np.array((0, 1, 0), dtype=np.float32)
camera_right /= np.linalg.norm(camera_right)
camera_up = np.cross(camera_forward, camera_right)
camera_up /= np.linalg.norm(camera_up)
rotation = np.identity(4)
rotation[:3, 0] = camera_right
rotation[:3, 1] = camera_up
rotation[:3, 2] = camera_forward
translation = np.identity(4)
translation[:3, 3] = position
return np.matmul(translation, rotation)
class Scan():
"""
A virtual laser scan of an object from one point in space.
This renders a normal and depth buffer and reprojects it into a point cloud.
The resulting point cloud contains a point for every pixel in the buffer that hit the model.
"""
def __init__(self, color, depth, camera_transform, projection_matrix, resolution, z_near, z_far, calculate_normals=False):
self.camera_transform = camera_transform
self.camera_position = np.matmul(self.camera_transform, np.array([0, 0, 0, 1]))[:3]
self.resolution = resolution
self.projection_matrix = projection_matrix
self.normal_buffer = color if calculate_normals else None
self.depth_buffer = depth.copy()
indices = np.argwhere(depth != 0)
depth[depth == 0] = float('inf')
# This reverts the processing that pyrender does and calculates the original depth buffer in clipping space
self.depth = (z_far + z_near - (2.0 * z_near * z_far) / depth) / (z_far - z_near)
points = np.ones((indices.shape[0], 4))
points[:, [1, 0]] = indices.astype(float) / (resolution -1) * 2 - 1
points[:, 1] *= -1
points[:, 2] = self.depth[indices[:, 0], indices[:, 1]]
clipping_to_world = np.matmul(self.camera_transform, np.linalg.inv(self.projection_matrix))
points = np.matmul(points, clipping_to_world.transpose())
points /= points[:, 3][:, np.newaxis]
self.points = points[:, :3]
if calculate_normals:
normals = color[indices[:, 0], indices[:, 1]] / 255 * 2 - 1
camera_to_points = self.camera_position - self.points
normal_orientation = np.einsum('ij,ij->i', camera_to_points, normals)
normals[normal_orientation < 0] *= -1
self.normals = normals
else:
self.normals = None
def convert_world_space_to_viewport(self, points):
half_viewport_size = 0.5 * self.resolution
clipping_to_viewport = np.array([
[half_viewport_size, 0.0, 0.0, half_viewport_size],
[0.0, -half_viewport_size, 0.0, half_viewport_size],
[0.0, 0.0, 1.0, 0.0],
[0, 0, 0.0, 1.0]
])
world_to_clipping = np.matmul(self.projection_matrix, np.linalg.inv(self.camera_transform))
world_to_viewport = np.matmul(clipping_to_viewport, world_to_clipping)
world_space_points = np.concatenate([points, np.ones((points.shape[0], 1))], axis=1)
viewport_points = np.matmul(world_space_points, world_to_viewport.transpose())
viewport_points /= viewport_points[:, 3][:, np.newaxis]
return viewport_points
def is_visible(self, points):
viewport_points = self.convert_world_space_to_viewport(points)
pixels = viewport_points[:, :2].astype(int)
# This only has an effect if the camera is inside the model
in_viewport = (pixels[:, 0] >= 0) & (pixels[:, 1] >= 0) & (pixels[:, 0] < self.resolution) & (pixels[:, 1] < self.resolution) & (viewport_points[:, 2] > -1)
result = np.zeros(points.shape[0], dtype=bool)
result[in_viewport] = viewport_points[in_viewport, 2] < self.depth[pixels[in_viewport, 1], pixels[in_viewport, 0]]
return result
def show(self):
scene = pyrender.Scene()
scene.add(pyrender.Mesh.from_points(self.points, normals=self.normals))
pyrender.Viewer(scene, use_raymond_lighting=True, point_size=2)
def save(self, filename_depth, filename_normals=None):
if filename_normals is None and self.normal_buffer is not None:
items = filename_depth.split('.')
filename_normals = '.'.join(items[:-1]) + "_normals." + items[-1]
depth = self.depth_buffer / np.max(self.depth_buffer) * 255
io.imsave(filename_depth, depth.astype(np.uint8))
if self.normal_buffer is not None:
io.imsave(filename_normals, self.normal_buffer.astype(np.uint8))
|
"""
Interfacing with External Environments
- Matlab and Octave
"""
import statsmodels.api as sm
from scipy.io import savemat
data_loader = sm.datasets.sunspots.load_pandas()
df = data_loader.data
savemat("sunspots", {"sunspots": df.values})
|
"""
Crie um programa que leia o nome e o preço de varios produtos.
O programa devera perguntar se o usuario vai continuar. no final
Mostre:
A) qual é o total gasto na compra
B) quantos produtos custam mais de R$ 1000
C) qual é o nome do produto mais barato.
"""
total = maior_1000 = cont = menor = 0
nome_mais_barato = ''
while True:
print('-=' * 25)
produto = str(input('Qual nome do produto? '))
preco = float(input('Qual preço do produto? R$ '))
total += preco
cont += 1
if preco > 1000:
maior_1000 += 1
if cont == 1 or preco < menor: # Usar cont e menor pra pegar o menor valor e não fazer a gambiarra de 9999
menor = preco
nome_mais_barato = produto
continuar = ' '
while continuar not in 'sn':
continuar = str(input('Quer continuar? [S/N] ')).strip().lower()[0]
if continuar == 'n':
print('-=' * 25)
break
print(f'O total é de R${total}')
print(f'O produto mais barato é {nome_mais_barato} e custou R${menor:.2f}')
print(f'há {maior_1000} produtos maior que R$1000 ')
|
from amundsenatlastypes import Initializer
init = Initializer()
init.create_required_entities()
"""
DB: [db]@[cluster]
Table: [db].[table]@[cluster]
Column: [db].[table].[column]@[cluster]
TableMetadata: [db].[table].metadata@[cluster]
ColumnMetadata: [db].[table].[column].metadata@[cluster]
Reader: [db].[table].[CK].reader@[cluster]
Partition: SuperType
hive_table_partition: [db].[table].partition.[partitionName]@[cluster]
"""
|
#!/usr/bin/env python
import glob
import os
import markdown
import time
def get_output_filename(filename):
items = os.path.splitext(filename)
return items[0] + '.tex'
def compile():
os.system("make")
files = glob.glob("*.tx") + glob.glob("*/*.tx") + glob.glob("*/*/*.tx")
if len(files)==0:
print "no file to track. (only look at 3 levels)"
quit()
else:
print "tracking", len(files), "files"
while True:
for f in files:
oname = get_output_filename(f)
if (not os.path.exists(oname) or
(os.path.exists(f) and
(os.path.getmtime(f) > os.path.getmtime(oname)))):
compile()
print "Built"
time.sleep(1)
|
FLOWER_FEATURE_CHOICES = (
("sepal_length", "sepal_length"),
("sepal_width", "sepal_width"),
("petal_length", "petal_length"),
("petal_width", "petal_width"),
("flower_type", "flower_type"),
)
|
from rest_framework.views import APIView
from rest_framework.response import Response
import logging
from .. import utils
logger = logging.getLogger(__name__)
class ScheduleSettingView(APIView):
def get(self, request, format=None):
response = {
"week_settings": utils.WEEK_CHOICES,
"timespan_settings": utils.TIME_CHOICES,
}
return Response(response)
|
from azurlane.common_fight import CommonMap
from simulator.win32_tools import rand_click
import time
import sys
s = CommonMap("Games")
start_time = time.time()
last = {}
def check():
global last
name = "HandleMark"
t = time.time()
s.make_screen_shot()
found, pos = s.search_resource(name)
if not found:
last = {}
# print("Wait...")
time.sleep(0.1)
return
x = pos[0]
if last:
speed = (x - last["x"]) / (t - last["t"])
# print("Speed", speed)
last["t"] = t
last["x"] = x
if 682 < x < 698:
# print("CLICK AT", x)
rand_click(s.hwnd, (300, 300, 400, 400))
time.sleep(0.2)
return True
# print("Skip AT", x)
n = int(sys.argv[1])
clicked = 0
while clicked < n:
did = check()
if did:
clicked += 1
print(time.time() - start_time, "Clicked", clicked, n)
|
from contrastive_learner.contrastive_learner import ContrastiveLearner
|
# Python 3.0
# Bartosz Wolak
# created 23.07.2021
# updated 23.07.2021
"""
Module responible for keeping data for buttons,dependent on pygame
"""
import pygame
import src.Drawable
import src.loading
class Button(pygame.sprite.Sprite):
def __init__(self, pos: (int, int), label: str, size: (int, int) = (200, 50)):
pygame.sprite.Sprite.__init__(self)
self.label = label
self.rect = pygame.Rect(pos, size)
self.static_image = pygame.Surface(size)
self.static_image.fill(src.loading.colours['light_gray'])
self.hover_image = pygame.Surface(size)
self.hover_image.fill(src.loading.colours['dark_gray'])
self.image = pygame.Surface(size)
self.image.blit(self.static_image, (0, 0))
self.font = pygame.font.SysFont('Corbel', 25)
self.pygame_rendered_label = self.font.render(self.label, True, src.loading.colours['font_colour'])
self.image.blit(self.pygame_rendered_label, (self.rect.width / 2 - 5 * len(self.label), self.rect.height / 2 - 15))
self.hover_image.blit(self.pygame_rendered_label, (self.rect.width / 2 - 5 * len(self.label), self.rect.height / 2 - 15))
self.static_image.blit(self.pygame_rendered_label, (self.rect.width / 2 - 5 * len(self.label), self.rect.height / 2 - 15))
self.state = 'static'
self.function = None
def hover(self):
if self.state == 'static':
self.image.blit(self.hover_image, (0, 0))
self.state = 'hover'
def static(self):
if self.state == 'hover':
self.image.blit(self.static_image, (0, 0))
self.state = 'static'
def bind(self, func):
self.function = func
def trigger(self):
self.function()
|
import ifstool
if __name__ == "__main__":
ifstool.run()
|
import torch
class TrimHandler:
def __init__(self,num_nodes,initial_graph=None,crop_every=100,eps=0.5):
self.graph = initial_graph if initial_graph else torch.ones(num_nodes,num_nodes)
self.crop_every=crop_every
self.num_nodes,self.n=num_nodes,num_nodes**2
self.eps=eps
self.cache=torch.zeros(num_nodes,num_nodes)
self.t=0
def push(self,attns):
self.t+=attns.shape[0]
with torch.no_grad():
self.cache+=attns.sum(axis=0).cpu()
#self.log[-1]+=attns.data() ##MWW
"""if self.t%self.cache_every==0:
self.log[-1]/=self.cache_every ##maybe will create disappearing of attentions in graph?
self.log.append(self.cache=torch.zeros(num_nodes,num_nodes))
"""
if self.t>=self.crop_every:
self.cache/=self.cache.sum()
self.t=0
print("Cache\n",self.cache)
idx=self.cache<(self.eps/(self.n**2)) ##eps * 1/n
self.graph[idx]=0
self.n=self.num_nodes**2-int(idx.sum())
print("Graph Updated\n",self.graph)
print("num_connections",self.n)
self.cache=torch.zeros(self.num_nodes,self.num_nodes)
def get_mask(self,batch_size):
with torch.no_grad():
return (1-self.graph).type(torch.bool).unsqueeze(0).cuda().repeat(batch_size,1,1)
|
#!/usr/bin/env python3
from datetime import datetime, timedelta
from PIL import Image, ImageOps
import argparse
import re
import srt
import time
import win32print
import struct
MAXWIDTH = 190
WIDTHCORRECTION = 1.5
ESC = b'\x1B'
GS = b'\x1D'
RESET = ESC + b'@'
SETBOLD = ESC + b'E\x01'
EXITBOLD = ESC + b'E\x00'
CENTER = ESC + b'a\x01'
PAGEFEED = ESC + b'd\x05'
CHARSIZE = GS + b'!'
# 16 pixel line spacing
LS16 = ESC + b'3\x10'
parser = argparse.ArgumentParser(description='Prints text on POS printers.')
parser.add_argument('printer', help='printer name')
parser.add_argument('script', help='script file', type=argparse.FileType('r', encoding='utf-8'))
args = parser.parse_args()
subs = args.script.read()
# Trim UTF-8 BOM if present, or the SRT parser chokes
if len(subs) > 0 and subs[0] == '\ufeff':
subs = subs[1:]
subs = list(srt.parse(subs))
# Just making sure it's not fucked up
subs.sort(key=lambda x: x.start)
class Line:
def __init__(self, time, data):
self.time = time
self.data = data
def __repr__(self):
return 'Line(time=%s, data=%s)' % (self.time, self.data)
startTime = datetime.now()
lines = list()
for sub in subs:
isFirst = True
for line in sub.content.split('\n'):
line = line.lstrip('\r')
imageMatch = re.match(r'[ \t]*\[img=(.*)\][ \t]*$', line)
if imageMatch:
image = Image.open(imageMatch.group(1))
# Convert into grayscale if not already so we can invert it
image = image.convert('L')
# Rescale to fix aspect ratio and make it fit
if image.width * WIDTHCORRECTION > MAXWIDTH:
correctheight = round((image.height * MAXWIDTH) / (image.width * WIDTHCORRECTION))
image = image.resize((MAXWIDTH, correctheight))
else:
image = image.resize((round(image.width * WIDTHCORRECTION), image.height))
# Invert now, as in ESC/POS a 1 is black and 0 is white
image = ImageOps.invert(image)
# Create a new black and white image
bwimage = Image.new('1', (MAXWIDTH, image.height or 7), 0)
# Paste image centered
pastepos = (
round(bwimage.width / 2.0 - image.width / 2.0),
round(bwimage.height / 2.0 - image.height / 2.0)
)
bwimage.paste(image, pastepos)
# Rotate for slicing
bwimage = bwimage.transpose(Image.ROTATE_270)
bwimage = bwimage.transpose(Image.FLIP_LEFT_RIGHT)
isFirst = True
header = ESC + b'*\x00' + struct.pack('<H', MAXWIDTH)
for rowStart in range(0, bwimage.width, 8):
rowimage = bwimage.crop((rowStart, 0, rowStart + 8, bwimage.height))
rowdata = bytearray()
if isFirst:
# 16 pixels of line spacing (8 pixels of image due to half resolution)
rowdata.extend(RESET + LS16)
isFirst = False
rowdata.extend(header)
rowdata.extend(rowimage.tobytes())
rowdata.extend(b'\n')
lines.append(Line(sub.start, rowdata))
elif line == '[pagefeed]':
for i in range(8):
lines.append(Line(sub.start, b'\n'))
else:
if line == '_':
line = b''
else:
line = line.encode('ascii')
line = line.replace(b'[pagefeed]', PAGEFEED)
line = line.replace(b'[center]', CENTER)
line = line.replace(b'[b]', SETBOLD)
line = line.replace(b'[/b]', EXITBOLD)
# This is to account for big text that span more than one line
dummylines = 0
def sizeCallback(match):
global dummylines
size = int(match.group(1)) - 1
dummylines = max(dummylines, size)
size = size << 4 | size
return CHARSIZE + bytes([size])
line = re.sub(br'\[size=([1-8])\]', sizeCallback, line)
for x in range(dummylines):
print('Adding dummy %d' % x)
lines.append(Line(sub.start, b''))
if isFirst:
line = RESET + LS16 + line
isFirst = False
lines.append(Line(sub.start, line + b'\n'))
print(lines)
# First "n" lines aren't immediately visible, we have to print ahead of time
timebuffer = [timedelta()] * 3
for line in lines:
timebuffer.append(line.time)
line.time = timebuffer.pop(0)
for timestamp in timebuffer:
lines.append(Line(timestamp, b'\n'))
# Merge lines with common times, so we don't have to open and close the printer so often
curline = lines[0]
mergedlines = list()
for line in lines[1:]:
if line.time == curline.time:
curline.data += line.data
else:
mergedlines.append(curline)
curline = line
mergedlines.append(curline)
print(mergedlines)
if True:
startTime = datetime.now()
p = win32print.OpenPrinter(args.printer)
for line in mergedlines:
delay = line.time + startTime - datetime.now()
if delay.days >= 0:
time.sleep(delay.total_seconds())
win32print.StartDocPrinter(p, 1, ('Line document', None, 'raw'))
print(line)
win32print.WritePrinter(p, line.data)
#win32print.FlushPrinter(p, bytes(line.data), 0)
win32print.EndDocPrinter(p)
win32print.ClosePrinter(p)
|
# coding=utf-8
"""
Tests for deepreg/model/loss/label.py in
pytest style
"""
from test.unit.util import is_equal_tf
import numpy as np
import pytest
import tensorflow as tf
from deepreg.loss.util import (
NegativeLossMixin,
cauchy_kernel1d,
gaussian_kernel1d_sigma,
gaussian_kernel1d_size,
rectangular_kernel1d,
separable_filter,
triangular_kernel1d,
)
@pytest.mark.parametrize("sigma", [1, 3, 2.2])
def test_cauchy_kernel1d(sigma):
"""
Testing the 1-D cauchy kernel
:param sigma: float
:return:
"""
tail = int(sigma * 5)
expected = [1 / ((x / sigma) ** 2 + 1) for x in range(-tail, tail + 1)]
expected = expected / np.sum(expected)
got = cauchy_kernel1d(sigma)
assert is_equal_tf(got, expected)
@pytest.mark.parametrize("sigma", [1, 3, 2.2])
def test_gaussian_kernel1d_sigma(sigma):
"""
Testing the 1-D gaussian kernel given sigma as input
:param sigma: float
:return:
"""
tail = int(sigma * 3)
expected = [np.exp(-0.5 * x ** 2 / sigma ** 2) for x in range(-tail, tail + 1)]
expected = expected / np.sum(expected)
got = gaussian_kernel1d_sigma(sigma)
assert is_equal_tf(got, expected)
@pytest.mark.parametrize("kernel_size", [3, 7, 11])
def test_gaussian_kernel1d_size(kernel_size):
"""
Testing the 1-D gaussian kernel given size as input
:param kernel_size: int
:return:
"""
mean = (kernel_size - 1) / 2.0
sigma = kernel_size / 3
grid = tf.range(0, kernel_size, dtype=tf.float32)
expected = tf.exp(-tf.square(grid - mean) / (2 * sigma ** 2))
got = gaussian_kernel1d_size(kernel_size)
assert is_equal_tf(got, expected)
@pytest.mark.parametrize("kernel_size", [3, 7, 11])
def test_rectangular_kernel1d(kernel_size):
"""
Testing the 1-D rectangular kernel
:param kernel_size: int
:return:
"""
expected = tf.ones(shape=(kernel_size,), dtype=tf.float32)
got = rectangular_kernel1d(kernel_size)
assert is_equal_tf(got, expected)
@pytest.mark.parametrize("kernel_size", [3, 5, 7, 9])
def test_triangular_kernel1d(kernel_size):
"""
Testing the 1-D triangular kernel
:param kernel_size: int (odd number)
:return:
"""
expected = np.zeros(shape=(kernel_size,), dtype=np.float32)
expected[kernel_size // 2] = kernel_size // 2 + 1
for it_k in range(kernel_size // 2):
expected[it_k] = it_k + 1
expected[-it_k - 1] = it_k + 1
got = triangular_kernel1d(kernel_size)
assert is_equal_tf(got, expected)
def test_separable_filter():
"""
Testing separable filter case where non
zero length tensor is passed to the
function.
"""
k = np.ones((3, 3, 3, 3, 1), dtype=np.float32)
array_eye = np.identity(3, dtype=np.float32)
tensor_pred = np.zeros((3, 3, 3, 3, 1), dtype=np.float32)
tensor_pred[:, :, 0, 0, 0] = array_eye
tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32)
k = tf.convert_to_tensor(k, dtype=tf.float32)
expect = np.ones((3, 3, 3, 3, 1), dtype=np.float32)
expect = tf.convert_to_tensor(expect, dtype=tf.float32)
get = separable_filter(tensor_pred, k)
assert is_equal_tf(get, expect)
class MinusClass(tf.keras.losses.Loss):
def __init__(self):
super().__init__()
self.name = "MinusClass"
def call(self, y_true, y_pred):
return y_true - y_pred
class MinusClassLoss(NegativeLossMixin, MinusClass):
pass
@pytest.mark.parametrize("y_true,y_pred,expected", [(1, 2, 1), (2, 1, -1), (0, 0, 0)])
def test_negative_loss_mixin(y_true, y_pred, expected):
"""
Testing NegativeLossMixin class that
inverts the sign of any value
returned by a function
:param y_true: int
:param y_pred: int
:param expected: int
:return:
"""
y_true = tf.constant(y_true, dtype=tf.float32)
y_pred = tf.constant(y_pred, dtype=tf.float32)
got = MinusClassLoss().call(
y_true,
y_pred,
)
assert is_equal_tf(got, expected)
|
from task_manager import TaskManager
class TaskOpenproject (TaskManager):
"""Subclass of TaskManager implementing HMGM task management with OpenProject platforms."""
def __init__(self, root_dir):
self.root_dir = root_dir
# TODO add logic to set up Open Project task manager
def create_task(self, task_type, context, input_path, task_json):
# TODO replace with real logic, make sure the task_json contains ID, key, URL
return None
def close_task(self, task_json):
# TODO replace with real logic
return None |
# Helper Code
from collections import defaultdict
class Graph:
def __init__(self):
self.nodes = set() # A set cannot contain duplicate nodes
self.neighbours = defaultdict(
list) # Defaultdict is a child class of Dictionary that provides a default value for a key that does not exists.
self.distances = {} # Dictionary. An example record as ('A', 'B'): 6 shows the distance between 'A' to 'B' is 6 units
def add_node(self, value):
self.nodes.add(value)
def add_edge(self, from_node, to_node, distance):
self.neighbours[from_node].append(to_node)
self.neighbours[to_node].append(from_node)
self.distances[(from_node, to_node)] = distance
self.distances[
(to_node, from_node)] = distance # lets make the graph undirected / bidirectional
def print_graph(self):
print("Set of Nodes are: ", self.nodes)
print("Neighbours are: ", self.neighbours)
print("Distances are: ", self.distances)
# my recursive solution
def dijkstra_rec(unvisited, results, node, graph, path):
min_dist = -1
to_visit = [n for n in graph.neighbours[node]]
closest_node = None
dist_source = results.get(node, 0)
for n in to_visit:
dist = graph.distances[(node, n)]
results[n] = min(results.get(n, dist + dist_source), dist + dist_source)
if (min_dist == -1 or graph.distances[(node, n)] < min_dist) and n in unvisited:
min_dist = graph.distances[(node, n)]
closest_node = n
path[node] = closest_node
unvisited.remove(node)
return unvisited, results, closest_node, graph, path
def dijkstra(graph, source):
results = {source: 0}
unvisited = list(graph.nodes)
dist_source = 0
path = {}
node = source
# Declare and initialize result, unvisited, and path
# As long as unvisited is non-empty
while unvisited:
unvisited, results, node, graph, path = dijkstra_rec(unvisited,
results, node, graph, path)
return results
# udacity solution O(N^2) as well
import sys
'''Find the shortest path from the source node to every other node in the given graph'''
def dijkstra(graph, source):
result = {}
result[source] = 0
for node in graph.nodes:
if (node != source):
# he assing to the dict default huge values.
result[node] = sys.maxsize
unvisited = set(graph.nodes)
path = {}
'''THE GREEDY APPROACH'''
# As long as unvisited is non-empty
while unvisited:
min_node = None
# 1. Find the unvisited node having smallest known distance from the source node.
for node in unvisited:
if node in result:
if min_node is None:
min_node = node
elif result[node] < result[min_node]:
min_node = node
if min_node is None:
break
# known distance of min_node
current_distance = result[min_node]
# 2. For the current node, find all the unvisited neighbours.
# For this, you have calculate the distance of each unvisited neighbour.
for neighbour in graph.neighbours[min_node]:
if neighbour in unvisited:
distance = current_distance + graph.distances[(min_node, neighbour)]
# 3. If the calculated distance of the unvisited neighbour is less than the already known distance in result dictionary, update the shortest distance in the result dictionary.
if ((neighbour not in result) or (distance < result[neighbour])):
result[neighbour] = distance
# 4. If there is an update in the result dictionary, you need to update the path dictionary as well for the same key.
path[neighbour] = min_node
# 5. Remove the current node from the unvisited set.
unvisited.remove(min_node)
return result
# Test 1
testGraph = Graph()
for node in ['A', 'B', 'C', 'D', 'E']:
testGraph.add_node(node)
testGraph.add_edge('A','B',3)
testGraph.add_edge('A','D',2)
testGraph.add_edge('B','D',4)
testGraph.add_edge('B','E',6)
testGraph.add_edge('B','C',1)
testGraph.add_edge('C','E',2)
testGraph.add_edge('E','D',1)
testGraph.print_graph()
print(dijkstra(testGraph, 'A')) # {'A': 0, 'D': 2, 'B': 3, 'E': 3, 'C': 4} |
import sys
handle = open(sys.argv[1])
data = handle.readlines()
handle.close()
extracted = []
outputFile = sys.argv[1].replace("opt1","csv") # Keep the original MultEval file
handle = open(outputFile, "w")
handle.write("id\ttext\tbleu4\tmeteor\tter\n");
for line in data:
line = line.replace("\n", "")
line = line.split("|||")
num = line[0].strip()
description = line[1].strip()
scores = line[2] # Extract the BLEU4, Meteor, and TER scores
scores = scores.split(" ")
bleu4 = scores[1].split("=")[1]
meteor = scores[2].split("=")[1]
ter = scores[3].split("=")[1]
handle.write("%s\t%s\t%s\t%s\t%s\n" % (num, description, bleu4, meteor, ter))
handle.close()
|
class PIDController():
"""Updated version of PIDController"""
def __init__(self, Kp: float = 0.0, Ki: float = 0.0, Kd: float = 0.0,
limitMin: float = 0.0, limitMax: float = 0.0,
tau: float = 0.0, dt: float = 0.0):
"""Version 2 of PIDController, with updated math
Args:
Kp (float, optional): gain for proportional. Defaults to 0.0.
Ki (float, optional): gain for integrator. Defaults to 0.0.
Kd (float, optional): gain for derivator. Defaults to 0.0.
limitMin (float, optional): Max Output value. Defaults to 0.0.
limitMax (float, optional): Min Output value. Defaults to 0.0.
tau (float, optional): gain for low pass filter. Defaults to 0.0.
dp (float, optional): timestep. Defaults to 0.0.
"""
self.kp = Kp
self.ki = Ki
self.kd = Kd
self.limitMax = limitMax
self.limitMin = limitMin
self.tau = tau
self.dt = dt
self.use_low_pass_filter = False
self.use_kick_avoidance = False
self._integral = 0.0
self._derivative = 0.0
self._previousError = 0.0
self._previousMeasurement = 0.0
self._error = 0.0
def _clamp(self, value):
lower = self.limitMin
upper = self.limitMax
if value is None:
return None
elif (upper is not None) and (value > upper):
return upper
elif (lower is not None) and (value < lower):
return lower
return value
def compute(self, target: float,
measurement: float, dt: float = None) -> float:
"""Calculate output through PID interface using gain value set via
self.Kp, self.Ki, self.Kd
Args:
target (float): target value to be reached
measurement (float): actual value
dt (float, optional): Timestep since the las call. if default we
uses self.dt. Defaults to None.
Returns:
float: The output of the PID clamped to
[self.limitMax, self.limitMax]
"""
if dt is None:
dt = self.dt
self._error = target - measurement
d_error = self._error - self._previousError
proportional = self._error * self.kp
self._integral += self.ki * self._error * dt
self._integral = self._clamp(self._integral)
self._derivative = self.kd * d_error / self.dt
output = proportional + self._integral + self._derivative
output = self._clamp(output)
self._previousError = self._error
self._previousMeasurement = measurement
return output
def get_kpe(self):
return self.kp * self._error
def get_kie(self):
return self._integral * self.ki
def get_kde(self):
return self._derivative * self.kd
# def get_kie(self):
# return self.ki * self._integrator
|
"""Documetação oficial"""
variavel_1 = 'valor 1'
def soma(x, y):
#soma x e y
return x + y
def multiplica(x, y, z=None):
"""Soma x, y, z
Multiplica x, y, z o programador por omitir a variavel z caso não tenha
necessidade de usa-la
"""
if z:
return x * y
else:
return x * y * z
variavel_2 = 'valor 2'
variavel_3 = 'valor 3'
variavel_4 = 'valor 4' |
import requests
from flask import url_for, session, Blueprint, redirect
from flask import request
from apikit import jsonify
from nomenklatura import authz
from nomenklatura.core import db, github
from nomenklatura.model import Account, Dataset
section = Blueprint('sessions', __name__)
@section.route('/sessions')
def status():
return jsonify({
'logged_in': authz.logged_in(),
'api_key': request.account.api_key if authz.logged_in() else None,
'account': request.account,
'base_url': url_for('index', _external=True)
})
@section.route('/sessions/authz')
def get_authz():
permissions = {}
dataset_name = request.args.get('dataset')
if dataset_name is not None:
dataset = Dataset.find(dataset_name)
permissions[dataset_name] = {
'view': True,
'edit': authz.dataset_edit(dataset),
'manage': authz.dataset_manage(dataset)
}
return jsonify(permissions)
@section.route('/sessions/login')
def login():
callback = url_for('sessions.authorized', _external=True)
return github.authorize(callback=callback)
@section.route('/sessions/logout')
def logout():
authz.require(authz.logged_in())
session.clear()
return redirect('/')
@section.route('/sessions/callback')
@github.authorized_handler
def authorized(resp):
if 'access_token' not in resp:
return redirect(url_for('index'))
access_token = resp['access_token']
session['access_token'] = access_token, ''
res = requests.get('https://api.github.com/user?access_token=%s' % access_token,
verify=False)
data = res.json()
for k, v in data.items():
session[k] = v
account = Account.by_github_id(data.get('id'))
if account is None:
account = Account.create(data)
db.session.commit()
return redirect('/')
|
from django.shortcuts import render, redirect, render_to_response
from django.http import HttpResponse
from django.contrib.auth import logout as auth_logout
from django.contrib.auth.decorators import login_required
# Create your views here.
|
#!/usr/bin/env python3.8
#
# Copyright (c) 2020 by Ron Frederick <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-License-Identifier: MIT
import asyncio
import websockets
class WSRelay:
_relays = {}
def __init__(self):
self._sockets = {}
self._next_id = 1
async def _send(self, source, target, message):
"""Send a message on one or more associated WebSockets"""
message = f'{source} {message}'
if target == '*':
await asyncio.gather(*(sock.send(message) for (target, sock) in
self._sockets.items() if target != source))
else:
try:
await self._sockets[target].send(message)
except KeyError:
pass
async def _recv(self, sock, path):
"""Receive and relay messages on a WebSocket"""
source = str(self._next_id)
self._next_id += 1
await sock.send(f'{source} self')
for peer in self._sockets.keys():
await sock.send(f'{peer} join')
await self._send(source, '*', 'join')
self._sockets[source] = sock
try:
async for message in sock:
target, message = message.split(None, 1)
await self._send(source, target, message)
except websockets.WebSocketException:
pass
finally:
del self._sockets[source]
await self._send(source, '*', 'quit')
if not self._sockets:
del self._relays[path]
@classmethod
async def _accept(cls, sock, path):
"""Accept a new WebSocket connection"""
try:
relay = cls._relays[path]
except KeyError:
relay = cls()
cls._relays[path] = relay
await relay._recv(sock, path)
@classmethod
def listen(cls, host, port):
"""Set up a listener for WebSocket connections"""
return websockets.serve(cls._accept, host, port)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(WSRelay.listen('localhost', 7927))
loop.run_forever()
|
class MassLevelData(Element,IDisposable):
"""
MassLevelData is a conceptual representation of an occupiable floor (Mass Floor) in a conceptual building model.
It is defined by associating a particular level with a particular mass element in a Revit project.
"""
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def IsEmpty(self):
"""
IsEmpty(self: MassLevelData) -> bool
Indicates if the MassLevelData (Mass Floor) has a geometrical representation.
May not
if the level does not intersect the mass geometry.
Returns: Returns True if MassLevelData is dimensionless,False otherwise.
"""
pass
@staticmethod
def IsMassFamilyInstance(document,id):
"""
IsMassFamilyInstance(document: Document,id: ElementId) -> bool
Checks if the ElementId is a mass family instance.
document: The document.
id: The ElementId to be checked.
Returns: True if the ElementId is a mass family instance,false otherwise.
"""
pass
def IsValidConceptualConstructionTypeElement(self,id):
"""
IsValidConceptualConstructionTypeElement(self: MassLevelData,id: ElementId) -> bool
Checks if the ElementId is an acceptable conceptual construction type ElementId
for the MassLevelData (Mass Floor).
id: The ElementId to be checked.
Returns: True if the ElementId is an acceptable conceptual construction type ElementId,
false otherwise.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
ConceptualConstructionId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The ElementId of the conceptual construction associated with the MassLevelData (Mass Floor).
Get: ConceptualConstructionId(self: MassLevelData) -> ElementId
Set: ConceptualConstructionId(self: MassLevelData)=value
"""
ConceptualConstructionIsByEnergyData=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Indicates if the ConceptualConstructionType of the MassLevelData (Mass Floor) is synchronized
with the EnergyDataSettings or if it overrides those settings.
Get: ConceptualConstructionIsByEnergyData(self: MassLevelData) -> bool
Set: ConceptualConstructionIsByEnergyData(self: MassLevelData)=value
"""
MaterialId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The ElementId of the visualization material used for the MassLevelData (Mass Floor)
Get: MaterialId(self: MassLevelData) -> ElementId
Set: MaterialId(self: MassLevelData)=value
"""
MaterialType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Indicates if the material used for the graphical appearance is by category or a specific material,or
if the material to be used should be taken from the ConceptualConstructionType of the MassLevelData.
Get: MaterialType(self: MassLevelData) -> MassSurfaceDataMaterialType
Set: MaterialType(self: MassLevelData)=value
"""
NExteriorSurfaceArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The exterior surface area of the volume of the mass between the level of this MassLevelData (Mass Floor) to the next in the mass.
Get: NExteriorSurfaceArea(self: MassLevelData) -> float
"""
NLevelFafArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The surface area of the intersection of the MassLevelData's level with the mass geometry.
Get: NLevelFafArea(self: MassLevelData) -> float
"""
NLevelPerimeter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The perimeter of the intersection of the MassLevelData's level with the mass geometry.
Get: NLevelPerimeter(self: MassLevelData) -> float
"""
NVolume=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The volume of from the level of this MassLevelData (Mass Floor) to the next in the mass.
Get: NVolume(self: MassLevelData) -> float
"""
OwningMassId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The ElementId of the mass that the MassLevelData (Mass Floor) is associated with.
Get: OwningMassId(self: MassLevelData) -> ElementId
"""
StrUsage=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""A String which describes the usage or occupancy type of the level of the MassLevelData.
Get: StrUsage(self: MassLevelData) -> str
Set: StrUsage(self: MassLevelData)=value
"""
|
import datetime
import six
from eventsourcing.domain.model.entity import EventSourcedEntity, EntityRepository
from eventsourcing.domain.model.events import publish
from quantdsl.priceprocess.base import datetime_from_date
class SimulatedPrice(EventSourcedEntity):
class Created(EventSourcedEntity.Created):
pass
class Discarded(EventSourcedEntity.Discarded):
pass
def __init__(self, value, **kwargs):
super(SimulatedPrice, self).__init__(**kwargs)
self._value = value
@property
def value(self):
return self._value
def register_simulated_price(market_simulation_id, market_name, fixing_date, delivery_date, price_value):
simulated_price_id = make_simulated_price_id(market_simulation_id, market_name, fixing_date, delivery_date)
created_event = SimulatedPrice.Created(entity_id=simulated_price_id, value=price_value)
simulated_price = SimulatedPrice.mutator(event=created_event)
publish(created_event)
return simulated_price
def make_simulated_price_id(simulation_id, market_name, fixing_date, delivery_date):
assert isinstance(market_name, six.string_types), market_name
assert isinstance(fixing_date, (datetime.datetime, datetime.date)), (fixing_date, type(fixing_date))
assert isinstance(delivery_date, (datetime.datetime, datetime.date)), (delivery_date, type(delivery_date))
fixing_date = datetime_from_date(fixing_date)
delivery_date = datetime_from_date(delivery_date)
price_id = ("PriceId(simulation_id='{}' commodity_name='{}' fixing_date='{}', delivery_date='{}')"
"".format(simulation_id, market_name, fixing_date, delivery_date))
return price_id
class SimulatedPriceRepository(EntityRepository):
pass
|
from typing import Optional, Tuple
import math
__all__ = [
'Angle',
'Distance',
'Speed',
'Vector3',
'angle_z_to_quaternion',
'Matrix44',
'Quaternion',
'Pose',
'hex_dump',
'hex_load',
'frange',
]
class Angle:
"""
Angle representation.
Args:
radians (float): The number of radians the angle should represent
(cannot be combined with ``degrees``)
degrees (float): The number of degress the angle should represent
(cannot be combined with ``radians``)
"""
__slots__ = '_radians'
def __init__(self, radians: Optional[float] = None, degrees: Optional[float] = None):
if radians is None and degrees is None:
raise ValueError("Expected either the degrees or radians keyword argument")
if radians and degrees:
raise ValueError("Expected either the degrees or radians keyword argument, not both")
if degrees is not None:
radians = degrees * math.pi / 180
self._radians = float(radians)
def __repr__(self):
return "<%s %.2f radians (%.2f degrees)>" % (self.__class__.__name__, self.radians, self.degrees)
def __add__(self, other):
if not isinstance(other, Angle):
raise TypeError("Unsupported type for + expected Angle")
return Angle(radians=self.radians + other.radians)
def __sub__(self, other):
if not isinstance(other, Angle):
raise TypeError("Unsupported type for - expected Angle")
return Angle(radians=self.radians - other.radians)
def __mul__(self, other):
if not isinstance(other, (int, float)):
raise TypeError("Unsupported type for * expected number")
return Angle(radians=self.radians * other)
def __truediv__(self, other):
if not isinstance(other, (int, float)):
raise TypeError("Unsupported type for / expected number")
return Angle(radians=self.radians / other)
def _cmp_int(self, other):
if not isinstance(other, Angle):
raise TypeError("Unsupported type for comparison expected Angle")
return self.radians - other.radians
def __eq__(self, other):
return self._cmp_int(other) == 0
def __ne__(self, other):
return self._cmp_int(other) != 0
def __gt__(self, other):
return self._cmp_int(other) > 0
def __lt__(self, other):
return self._cmp_int(other) < 0
def __ge__(self, other):
return self._cmp_int(other) >= 0
def __le__(self, other):
return self._cmp_int(other) <= 0
@property
def radians(self) -> float:
""" Returns the angle in radians. """
return self._radians
@property
def degrees(self) -> float:
""" Returns the angle in degrees. """
return self._radians / math.pi * 180
@property
def abs_value(self):
""":class:`cozmo.util.Angle`: The absolute value of the angle.
If the Angle is positive then it returns a copy of this Angle, otherwise it returns -Angle.
"""
return Angle(radians=abs(self._radians))
class Distance:
"""
Represents a distance.
The class allows distances to be returned in either millimeters or inches.
Args:
mm (float): The number of millimeters the distance should
represent (cannot be combined with ``distance_inches``).
inches (float): The number of inches the distance should
represent (cannot be combined with ``distance_mm``).
"""
__slots__ = '_mm'
def __init__(self, mm: Optional[float] = None, inches: Optional[float] = None):
if mm is None and inches is None:
raise ValueError("Expected either the mm or inches keyword argument")
if mm and inches:
raise ValueError("Expected either the mm or inches keyword argument, not both")
if inches is not None:
mm = inches * 25.4
self._mm = mm
def __repr__(self):
return "<%s %.2f mm (%.2f inches)>" % (self.__class__.__name__, self.mm, self.inches)
def __add__(self, other):
if not isinstance(other, Distance):
raise TypeError("Unsupported operand for + expected Distance")
return Distance(mm=self.mm + other.mm)
def __sub__(self, other):
if not isinstance(other, Distance):
raise TypeError("Unsupported operand for - expected Distance")
return Distance(mm=self.mm - other.mm)
def __mul__(self, other):
if not isinstance(other, (int, float)):
raise TypeError("Unsupported operand for * expected number")
return Distance(mm=self.mm * other)
def __truediv__(self, other):
if not isinstance(other, (int, float)):
raise TypeError("Unsupported operand for / expected number")
return Distance(mm=self.mm / other)
@property
def mm(self) -> float:
""" The distance in millimeters. """
return self._mm
@property
def inches(self) -> float:
""" The distance in inches. """
return self._mm / 25.4
class Speed:
"""
Speed representation.
Args:
mmps (float): The number of millimeters per second the speed should represent.
"""
__slots__ = '_mmps'
def __init__(self, mmps: float):
self._mmps = mmps
def __repr__(self):
return "<%s %.2f mmps>" % (self.__class__.__name__, self.mmps)
def __add__(self, other):
if not isinstance(other, Speed):
raise TypeError("Unsupported operand for + expected Speed")
return Speed(mmps=self.mmps + other.mmps)
def __sub__(self, other):
if not isinstance(other, Speed):
raise TypeError("Unsupported operand for - expected Speed")
return Speed(mmps=self.mmps - other.mmps)
def __mul__(self, other):
if not isinstance(other, (int, float)):
raise TypeError("Unsupported operand for * expected number")
return Speed(mmps=self.mmps * other)
def __truediv__(self, other):
if not isinstance(other, (int, float)):
raise TypeError("Unsupported operand for / expected number")
return Speed(mmps=self.mmps / other)
@property
def mmps(self) -> float:
""" Returns the speed in millimeters per second (mmps). """
return self._mmps
class Vector3:
"""
Represents a 3D Vector (type/units aren't specified).
Args:
x (float): X component
y (float): Y component
z (float): Z component
"""
__slots__ = ('_x', '_y', '_z')
def __init__(self, x: float, y: float, z: float):
self._x = x
self._y = y
self._z = z
def set_to(self, rhs):
"""
Copy the x, y and z components of the given vector.
Args:
rhs (:class:`Vector3`): The right-hand-side of this assignment - the
source vector to copy into this vector.
"""
self._x = rhs.x
self._y = rhs.y
self._z = rhs.z
@property
def x(self) -> float:
""" The x component. """
return self._x
@property
def y(self) -> float:
""" The y component. """
return self._y
@property
def z(self) -> float:
""" The z component. """
return self._z
@property
def x_y_z(self) -> Tuple[float, float, float]:
""" The X, Y, Z elements of the Vector3 (x,y,z). """
return self._x, self._y, self._z
def __repr__(self):
return "<%s x: %.2f y: %.2f z: %.2f>" % (self.__class__.__name__, self.x, self.y, self.z)
def __add__(self, other):
if not isinstance(other, Vector3):
raise TypeError("Unsupported operand for + expected Vector3")
return Vector3(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
if not isinstance(other, Vector3):
raise TypeError("Unsupported operand for - expected Vector3")
return Vector3(self.x - other.x, self.y - other.y, self.z - other.z)
def __mul__(self, other):
if not isinstance(other, (int, float)):
raise TypeError("Unsupported operand for * expected number")
return Vector3(self.x * other, self.y * other, self.z * other)
def __truediv__(self, other):
if not isinstance(other, (int, float)):
raise TypeError("Unsupported operand for / expected number")
return Vector3(self.x / other, self.y / other, self.z / other)
def angle_z_to_quaternion(angle_z: Angle) -> Tuple[float, float, float, float]:
"""
Converts an angle in the z axis (Euler angle z component) to a quaternion.
"""
# Define the quaternion to be converted from a Euler angle (x,y,z) of 0,0,angle_z
# These equations have their original equations above, and simplified implemented
# q0 = cos(x/2)*cos(y/2)*cos(z/2) + sin(x/2)*sin(y/2)*sin(z/2)
q0 = math.cos(angle_z.radians / 2)
# q1 = sin(x/2)*cos(y/2)*cos(z/2) - cos(x/2)*sin(y/2)*sin(z/2)
q1 = 0
# q2 = cos(x/2)*sin(y/2)*cos(z/2) + sin(x/2)*cos(y/2)*sin(z/2)
q2 = 0
# q3 = cos(x/2)*cos(y/2)*sin(z/2) - sin(x/2)*sin(y/2)*cos(z/2)
q3 = math.sin(angle_z.radians / 2)
return q0, q1, q2, q3
class Matrix44:
"""
A 4x4 Matrix for representing the rotation and/or position of an object in the world.
Can be generated from a Quaternion for a pure rotation matrix, or
combined with a position for a full translation matrix, as done by Pose.to_matrix().
"""
__slots__ = ('m00', 'm10', 'm20', 'm30',
'm01', 'm11', 'm21', 'm31',
'm02', 'm12', 'm22', 'm32',
'm03', 'm13', 'm23', 'm33')
def __init__(self,
m00, m10, m20, m30,
m01, m11, m21, m31,
m02, m12, m22, m32,
m03, m13, m23, m33):
self.m00 = m00
self.m10 = m10
self.m20 = m20
self.m30 = m30
self.m01 = m01
self.m11 = m11
self.m21 = m21
self.m31 = m31
self.m02 = m02
self.m12 = m12
self.m22 = m22
self.m32 = m32
self.m03 = m03
self.m13 = m13
self.m23 = m23
self.m33 = m33
def __repr__(self):
return ("<%s: "
"%.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f "
"%.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f>" % (self.__class__.__name__, *self.in_row_order))
@property
def tabulated_string(self):
"""str: A multi-line string formatted with tabs to show the matrix contents."""
return ("%.1f\t%.1f\t%.1f\t%.1f\n"
"%.1f\t%.1f\t%.1f\t%.1f\n"
"%.1f\t%.1f\t%.1f\t%.1f\n"
"%.1f\t%.1f\t%.1f\t%.1f" % self.in_row_order)
@property
def in_row_order(self) -> Tuple[float, float, float, float,
float, float, float, float,
float, float, float, float,
float, float, float, float]:
"""
Returns the contents of the matrix in row order.
"""
return self.m00, self.m01, self.m02, self.m03, \
self.m10, self.m11, self.m12, self.m13, \
self.m20, self.m21, self.m22, self.m23, \
self.m30, self.m31, self.m32, self.m33
@property
def in_column_order(self) -> Tuple[float, float, float, float,
float, float, float, float,
float, float, float, float,
float, float, float, float]:
"""
Returns the contents of the matrix in column order.
"""
return self.m00, self.m10, self.m20, self.m30, \
self.m01, self.m11, self.m21, self.m31, \
self.m02, self.m12, self.m22, self.m32, \
self.m03, self.m13, self.m23, self.m33
@property
def forward_xyz(self) -> Tuple[float, float, float]:
"""
Returns the x,y,z components representing the matrix's forward vector.
"""
return self.m00, self.m01, self.m02
@property
def left_xyz(self) -> Tuple[float, float, float]:
"""
Returns the x,y,z components representing the matrix's left vector.
"""
return self.m10, self.m11, self.m12
@property
def up_xyz(self) -> Tuple[float, float, float]:
"""
Returns the x,y,z components representing the matrix's up vector.
"""
return self.m20, self.m21, self.m22
@property
def pos_xyz(self) -> Tuple[float, float, float]:
"""
Returns the x,y,z components representing the matrix's position vector.
"""
return self.m30, self.m31, self.m32
def set_forward(self, x: float, y: float, z: float):
"""
Set the x,y,z components representing the matrix's forward vector.
"""
self.m00 = x
self.m01 = y
self.m02 = z
def set_left(self, x: float, y: float, z: float):
"""
Set the x,y,z components representing the matrix's left vector.
"""
self.m10 = x
self.m11 = y
self.m12 = z
def set_up(self, x: float, y: float, z: float):
"""
Set the x,y,z components representing the matrix's up vector.
"""
self.m20 = x
self.m21 = y
self.m22 = z
def set_pos(self, x: float, y: float, z: float) -> None:
"""
Set the x,y,z components representing the matrix's position vector.
"""
self.m30 = x
self.m31 = y
self.m32 = z
class Quaternion:
"""
Represents rotation.
"""
__slots__ = ('_q0', '_q1', '_q2', '_q3')
def __init__(self,
q0: Optional[float] = None,
q1: Optional[float] = None,
q2: Optional[float] = None,
q3: Optional[float] = None,
angle_z: Optional[Angle] = None):
is_quaternion = (q0 is not None) and (q1 is not None) and (q2 is not None) and (q3 is not None)
if not is_quaternion and angle_z is None:
raise ValueError("Expected either the q0 q1 q2 and q3 or angle_z keyword arguments")
if is_quaternion and angle_z:
raise ValueError("Expected either the q0 q1 q2 and q3 or angle_z keyword argument, not both")
if angle_z is not None:
if not isinstance(angle_z, Angle):
raise TypeError("Unsupported type for angle_z expected Angle")
q0, q1, q2, q3 = angle_z_to_quaternion(angle_z)
self._q0 = q0
self._q1 = q1
self._q2 = q2
self._q3 = q3
def __repr__(self):
return ("<%s q0: %.2f q1: %.2f q2: %.2f q3: %.2f (angle_z: %s)>" %
(self.__class__.__name__, self.q0, self.q1, self.q2, self.q3, self.angle_z))
def to_matrix(self, pos_x: float = 0.0, pos_y: float = 0.0, pos_z: float = 0.0):
"""
Convert the Quaternion to a 4x4 matrix representing this rotation.
A position can also be provided to generate a full translation matrix.
"""
# See https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
q0q0 = self.q0 * self.q0
q1q1 = self.q1 * self.q1
q2q2 = self.q2 * self.q2
q3q3 = self.q3 * self.q3
q0x2 = self.q0 * 2.0 # saves 2 multiplies
q0q1x2 = q0x2 * self.q1
q0q2x2 = q0x2 * self.q2
q0q3x2 = q0x2 * self.q3
q1x2 = self.q1 * 2.0 # saves 1 multiply
q1q2x2 = q1x2 * self.q2
q1q3x2 = q1x2 * self.q3
q2q3x2 = 2.0 * self.q2 * self.q3
m00 = (q0q0 + q1q1 - q2q2 - q3q3)
m01 = (q1q2x2 + q0q3x2)
m02 = (q1q3x2 - q0q2x2)
m10 = (q1q2x2 - q0q3x2)
m11 = (q0q0 - q1q1 + q2q2 - q3q3)
m12 = (q0q1x2 + q2q3x2)
m20 = (q0q2x2 + q1q3x2)
m21 = (q2q3x2 - q0q1x2)
m22 = (q0q0 - q1q1 - q2q2 + q3q3)
return Matrix44(m00, m10, m20, pos_x,
m01, m11, m21, pos_y,
m02, m12, m22, pos_z,
0.0, 0.0, 0.0, 1.0)
# These are only for angle_z because quaternion addition/subtraction is not relevant here
def __add__(self, other):
if not isinstance(other, Quaternion):
raise TypeError("Unsupported operand for + expected Quaternion")
angle_z = self.angle_z + other.angle_z
return Quaternion(angle_z=angle_z)
def __sub__(self, other):
if not isinstance(other, Quaternion):
raise TypeError("Unsupported operand for - expected Quaternion")
angle_z = self.angle_z - other.angle_z
return Quaternion(angle_z=angle_z)
def __mul__(self, other):
if not isinstance(other, (int, float)):
raise TypeError("Unsupported operand for * expected number")
angle_z = self.angle_z * other
return Quaternion(angle_z=angle_z)
def __truediv__(self, other):
if not isinstance(other, (int, float)):
raise TypeError("Unsupported operand for / expected number")
angle_z = self.angle_z / other
return Quaternion(angle_z=angle_z)
@property
def q0(self) -> float:
return self._q0
@property
def q1(self) -> float:
return self._q1
@property
def q2(self) -> float:
return self._q2
@property
def q3(self) -> float:
return self._q3
@property
def q0_q1_q2_q3(self) -> Tuple[float, float, float, float]:
return self._q0, self._q1, self._q2, self._q3
@property
def angle_z(self) -> Angle:
radians = math.atan2(2 * (self.q1 * self.q2 + self.q0 * self.q3),
1 - 2 * (self.q2 ** 2 + self.q3 ** 2))
return Angle(radians=radians)
@property
def euler_angles(self) -> Tuple[float, float, float]:
"""
Returns the pitch, yaw, roll Euler components of the object's
rotation defined as rotations in the x, y, and z axis respectively.
:return:
"""
# convert to matrix
matrix = self.to_matrix()
# normalize the magnitudes of cos(roll)*sin(pitch) (i.e. m12) and
# cos(roll)*cos(pitch) (ie. m22), to isolate cos(roll) to be compared
# against -sin(roll) (m02). Unfortunately, this omits results with an
# absolute angle larger than 90 degrees on roll.
absolute_cos_roll = math.sqrt(matrix.m12 * matrix.m12 + matrix.m22 * matrix.m22)
near_gimbal_lock = absolute_cos_roll < 1e-6
if not near_gimbal_lock:
# general case euler decomposition
pitch = math.atan2(matrix.m22, matrix.m12)
yaw = math.atan2(matrix.m00, matrix.m01)
roll = math.atan2(absolute_cos_roll, -matrix.m02)
else:
# special case euler angle decomposition near gimbal lock
pitch = math.atan2(matrix.m11, -matrix.m21)
yaw = 0
roll = math.atan2(absolute_cos_roll, -matrix.m02)
# adjust roll to be consistent with how the device is oriented
roll = math.pi * 0.5 - roll
if roll > math.pi:
roll -= math.pi * 2
return pitch, yaw, roll
class Pose:
"""
A combination of position (vector) and rotation (quaternion).
"""
__slots__ = ('_position', '_rotation', '_origin_id', '_is_accurate')
def __init__(self,
x: float, y: float, z: float,
q0: Optional[float] = None, q1: Optional[float] = None,
q2: Optional[float] = None, q3: Optional[float] = None,
angle_z: Optional[Angle] = None, origin_id: int = -1, is_accurate: bool = True):
self._position = Vector3(x, y, z)
self._rotation = Quaternion(q0, q1, q2, q3, angle_z)
self._origin_id = origin_id
self._is_accurate = is_accurate
@classmethod
def _create_from_clad(cls, pose):
return cls(pose.x, pose.y, pose.z,
q0=pose.q0, q1=pose.q1, q2=pose.q2, q3=pose.q3,
origin_id=pose.originID)
@classmethod
def _create_default(cls):
return cls(0.0, 0.0, 0.0,
q0=1.0, q1=0.0, q2=0.0, q3=0.0,
origin_id=-1)
def __repr__(self):
return "<%s %s %s origin_id=%d>" % (self.__class__.__name__, self.position, self.rotation, self.origin_id)
def __add__(self, other):
if not isinstance(other, Pose):
raise TypeError("Unsupported operand for + expected Pose")
pos = self.position + other.position
rot = self.rotation + other.rotation
return Pose(pos.x, pos.y, pos.z, rot.q0, rot.q1, rot.q2, rot.q3)
def __sub__(self, other):
if not isinstance(other, Pose):
raise TypeError("Unsupported operand for - expected Pose")
pos = self.position - other.position
rot = self.rotation - other.rotation
return Pose(pos.x, pos.y, pos.z, rot.q0, rot.q1, rot.q2, rot.q3)
def __mul__(self, other):
if not isinstance(other, (int, float)):
raise TypeError("Unsupported operand for * expected number")
pos = self.position * other
rot = self.rotation * other
return Pose(pos.x, pos.y, pos.z, rot.q0, rot.q1, rot.q2, rot.q3)
def __truediv__(self, other):
if not isinstance(other, (int, float)):
raise TypeError("Unsupported operand for / expected number")
pos = self.position / other
rot = self.rotation / other
return Pose(pos.x, pos.y, pos.z, rot.q0, rot.q1, rot.q2, rot.q3)
def define_pose_relative_this(self, new_pose):
"""
Creates a new pose such that new_pose's origin is now at the location of this pose.
"""
if not isinstance(new_pose, Pose):
raise TypeError("Unsupported type for new_origin, must be of type Pose")
x, y, z = self.position.x_y_z
angle_z = self.rotation.angle_z
new_x, new_y, new_z = new_pose.position.x_y_z
new_angle_z = new_pose.rotation.angle_z
cos_angle = math.cos(angle_z.radians)
sin_angle = math.sin(angle_z.radians)
res_x = x + (cos_angle * new_x) - (sin_angle * new_y)
res_y = y + (sin_angle * new_x) + (cos_angle * new_y)
res_z = z + new_z
res_angle = angle_z + new_angle_z
return Pose(res_x, res_y, res_z, angle_z=res_angle, origin_id=self._origin_id)
def invalidate(self) -> None:
"""
Mark this pose as being invalid (unusable).
"""
self._origin_id = -1
def is_comparable(self, other_pose) -> bool:
"""
Are these two poses comparable.
Poses are comparable if they're valid and having matching origin IDs.
"""
return (self.is_valid and other_pose.is_valid and
(self.origin_id == other_pose.origin_id))
@property
def is_valid(self) -> bool:
"""
Checks whether a pose is valid (usable).
"""
return self.origin_id >= 0
@property
def position(self) -> Vector3:
"""
Returns the position component of this pose.
"""
return self._position
@property
def rotation(self) -> Quaternion:
"""
Returns the rotation component of this pose.
"""
return self._rotation
def to_matrix(self):
"""
Convert the Pose to a Matrix44.
"""
return self.rotation.to_matrix(*self.position.x_y_z)
@property
def origin_id(self) -> int:
"""
Returns an ID maintained by the robot (engine) which represents which coordinate frame this pose is in.
"""
return self._origin_id
@origin_id.setter
def origin_id(self, value: int) -> None:
"""
Change the ID of a pose.
"""
if not isinstance(value, int):
raise TypeError("The type of origin_id must be int")
self._origin_id = value
@property
def is_accurate(self) -> bool:
"""
Returns True if this pose is valid and accurate.
Poses are marked as inaccurate if we detect movement via accelerometer,
or if they were observed from far enough away that we're less certain
of the exact pose.
"""
return self.is_valid and self._is_accurate
def hex_dump(data: bytes) -> str:
res = ":".join("{:02x}".format(b) for b in data)
return res
def hex_load(data: str) -> bytes:
res = bytearray.fromhex(data.replace(":", ""))
return res
def frange(start, stop, step):
x = start
while x < stop:
yield x
x += step
|
from .laundry_cycle_converter import LaundryCycleConverter
from .machine_state_converter import MachineStateConverter
from .laundry_door_status_converter import LaundryDoorStatusConverter
from .laundry_sub_cycle_converter import LaundrySubCycleConverter
from .rinse_option_converter import RinseOptionConverter
from .temperature_option_converter import TemperatureOptionConverter, TemperatureNewOptionConverter
from .washtemp_level_converter import WashTempLevelConverter
from .dryness_level_converter import DrynessLevelConverter, DrynessNewLevelConverter
from .spintime_level_converter import SpinTimeLevelConverter
from .soil_level_converter import SoilLevelConverter
from .tank_selected_converter import TankSelectedConverter
from .tumble_status_converter import TumbleStatusConverter
from .sheet_usage_configuration_converter import SheetUsageConfigurationConverter
from .ecodry_status_converter import EcoDryStatusConverter
from .smart_dispense_converter import SmartDispenseConverter
from .smart_dispense_tank_status_converter import SmartDispenseTankStatusConverter
|
class ValidationError(Exception):
"""Empty directory exception"""
def __init__(self, msg):
self.msg = msg
super(ValidationError, self).__init__(msg)
class MissingDecorator(Exception):
"""Empty directory exception"""
def __init__(self, msg):
self.msg = msg
super(MissingDecorator, self).__init__(msg)
class MissingArguments(Exception):
"""Empty directory exception"""
def __init__(self, msg):
self.msg = msg
super(MissingArguments, self).__init__(msg)
class MissingUniqueField(Exception):
"""Empty directory exception"""
def __init__(self, msg):
self.msg = msg
super(MissingUniqueField, self).__init__(msg)
|
from __future__ import division
import tensorflow as tf
import numpy as np
from utils import *
from sklearn.externals.joblib import Memory
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_svmlight_file
from sklearn import metrics
############# define input options ################
flags = tf.flags
flags.DEFINE_string(
"dataset", None,
"Choose one of the datasets: (1) phishing, (2) real-sim, (3) skin_nonskin, (4) SUSY, (5) epsilon_normalized, (6) covtype.libsvm.binary.scale, (7) ijcnn1, (8) HIGGS, (9) diabetes_scale, (10) heart_scale, (11) rcv1_train.binary")
flags.DEFINE_string("output_file", None,
"Where the training/test experiment data is stored.")
flags.DEFINE_float("request_ratio", 0.7, "Positive / Negative data ratio. Default value 0.7")
flags.DEFINE_integer("batch_size", 32, "batch_size (default = 32).")
flags.DEFINE_integer("num_time_steps", 20000, "number of time steps for the AUC optimization")
flags.DEFINE_integer("num_epochs", 10, "number of times to repeat the same experiment")
FLAGS = flags.FLAGS
###################################################
def get_data():
data = load_svmlight_file('./data/' + FLAGS.dataset)
return data[0], data[1]
print 'load data'
X, y = get_data()
print 'todense'
X = X.todense()
data_num, feat_num = np.shape(X)
# compute (+/-) ratio of dataset:
data_ratio = 0
for i in range(data_num):
if y[i] == 1:
data_ratio = (i * data_ratio + y[i]) / ((i + 1) * 1.0)
else:
y[i] = 0
data_ratio = (i * data_ratio + y[i]) / ((i + 1) * 1.0)
print 'data_ratio=', data_ratio
print 'relabel y=1/0 & reset (+/-) ratio:'
X_new = np.array([]).reshape(-1, X.shape[1])
y_new = np.array([])
X_pos = X[y == 1].reshape(-1, X.shape[1])
X_neg = X[y == 0].reshape(-1, X.shape[1])
C = FLAGS.request_ratio * (1 - data_ratio) / (data_ratio * (1 - FLAGS.request_ratio))
if FLAGS.request_ratio > data_ratio:
X_new = np.r_[X_new, X_pos]
y_new = np.r_[y_new, np.ones(X_pos.shape[0])]
neg_idx = np.arange(0, X_neg.shape[0], np.ceil(C)).astype(np.int32)
X_new = np.r_[X_new, X_neg[neg_idx].reshape(-1, X.shape[1])]
y_new = np.r_[y_new, np.zeros(neg_idx.size)]
else:
X_new = np.r_[X_new, X_neg]
y_new = np.r_[y_new, np.zeros(X_neg.shape[0])]
pos_idx = np.arange(0, X_pos.shape[0], np.ceil(C)).astype(np.int32)
X_new = np.r_[X_new, X_pos[pos_idx].reshape(-1, X.shape[1])]
y_new = np.r_[y_new, np.ones(pos_idx.size)]
print 'X_new.shape', X_new.shape
print 'y_new.shape', y_new.shape
new_data_num, feat_num = np.shape(X_new)
# mean 0
X_new_mean = np.mean(X_new, axis=0)
X_new = X_new - np.stack([X_new_mean for _ in range(new_data_num)])
# norm 1
for i in range(new_data_num):
X_new[i, :] = X_new[i, :] / np.linalg.norm(X_new[i, :])
p = np.mean(y_new)
X_train, X_test, y_train, y_test = train_test_split(X_new, y_new, test_size=0.2, random_state=42)
print 'X_train shape', X_train.shape
print 'X_test shape', X_test.shape
new_idx = np.random.permutation(np.shape(y_train)[0])
# shuffle training set:
X_train = X_train[new_idx]
y_train = np.asarray(y_train)[new_idx]
train_num = X_train.shape[0]
test_num = X_test.shape[0]
# ---------------------------------------------------------------------------
W_range = 1.0
batch_size = FLAGS.batch_size
# AUC neural net model
class AUCModel(object):
global p
def __init__(self):
self._build_model()
def _build_model(self):
self.X = tf.placeholder(tf.float64, [None, feat_num])
self.y_sing = tf.placeholder(tf.float64, [None])
with tf.variable_scope('weight'):
# current copy of w
self.w = tf.Variable(tf.zeros([feat_num, 1], dtype=tf.float64), name="w")
# average version of w
self.w_ave = tf.Variable(tf.zeros([feat_num, 1], dtype=tf.float64), name="w_ave")
self.inner_prod = tf.matmul(self.X, self.w)
self.inner_prod_ave = tf.matmul(self.X, self.w_ave)
self.pred = 0.5 * tf.sign(self.inner_prod) + 0.5
with tf.variable_scope('network'):
# current copies of (a,b)
self.a = tf.Variable(tf.zeros([1], dtype=tf.float64), name="a")
self.b = tf.Variable(tf.zeros([1], dtype=tf.float64), name="b")
self.alpha = tf.Variable(tf.zeros([1], dtype=tf.float64), name="alpha")
# average versions of (a,b)
self.a_ave = tf.Variable(tf.zeros([1], dtype=tf.float64), name="a_ave")
self.b_ave = tf.Variable(tf.zeros([1], dtype=tf.float64), name="b_ave")
self.alpha_ave = tf.Variable(tf.zeros([1], dtype=tf.float64), name="alpha_ave")
self.loss = tf.reduce_mean(
(1 - p) * tf.multiply(
self.y_sing,
tf.square(self.inner_prod - tf.tile(self.a, [batch_size])))
+ p * tf.multiply(
1 - self.y_sing,
tf.square(self.inner_prod - tf.tile(self.b, [batch_size])))
+ 2 * (1 + self.alpha) *
(p * tf.multiply(1 - self.y_sing, self.inner_prod) -
(1 - p) * tf.multiply(self.y_sing, self.inner_prod)) - p *
(1 - p) * tf.square(self.alpha))
# Build the model graph
graph = tf.get_default_graph()
with graph.as_default():
model = AUCModel()
learning_rate = tf.placeholder(tf.float64, [])
weighted_coeff = tf.placeholder(tf.float64, [])
fraction = tf.divide(learning_rate, weighted_coeff)
# assign new weighted-averages of (w,a,b,alpha)
save_w_op = tf.assign(model.w_ave, (1-fraction)*model.w_ave+fraction*model.w)
save_a_op = tf.assign(model.a_ave, (1-fraction)*model.a_ave+fraction*model.a)
save_b_op = tf.assign(model.b_ave, (1-fraction)*model.b_ave+fraction*model.b)
save_alpha_op = tf.assign(model.alpha_ave, (1-fraction)*model.alpha_ave+fraction*model.alpha)
# define min optimizer
min_train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# define max optimizer
max_train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# stochastic descent
t_vars = tf.trainable_variables()
# compute the gradients of a list of vars: w,a,b
grads_and_vars_min = min_train_op.compute_gradients(model.loss, [v for v in t_vars if(v.name == 'weight/w:0' or v.name == 'network/a:0' or v.name == 'network/b:0')])
min_op = min_train_op.apply_gradients(grads_and_vars_min)
clip_a_op = tf.assign(model.a, tf.clip_by_value(model.a, clip_value_min=-W_range, clip_value_max=W_range))
clip_b_op = tf.assign(model.b, tf.clip_by_value(model.b, clip_value_min=-W_range, clip_value_max=W_range))
clip_w_op = tf.assign(model.w, tf.clip_by_norm(model.w, clip_norm=W_range, axes=[0]))
# stochastic ascent
# compute the gradients of a list of vars: alpha
grads_and_vars_max = max_train_op.compute_gradients(tf.negative(model.loss), [v for v in t_vars if v.name == 'network/alpha:0'])
max_op = min_train_op.apply_gradients(grads_and_vars_max)
clip_alpha_op = tf.assign(model.alpha,
tf.clip_by_value(model.alpha, clip_value_min=-2*W_range, clip_value_max=2*W_range))
train_op = tf.group(max_op, clip_alpha_op, min_op, clip_a_op, clip_b_op, clip_w_op,
save_w_op, save_a_op, save_b_op, save_alpha_op)
# Evaluation
correct_label_pred = tf.equal(tf.cast(model.y_sing,tf.int64), tf.cast(tf.transpose(model.pred), tf.int64))
label_accuracy = tf.reduce_mean(tf.cast(correct_label_pred, tf.float64))
# Params
num_steps = FLAGS.num_time_steps
def train_and_evaluate(training_mode, graph, model, verbose=True):
"""Helper to run the model with different training modes."""
wc = 0
with tf.Session(graph=graph) as sess:
tf.global_variables_initializer().run()
# global X_train, X_test, y_train, y_test
gen_train_batch = batch_generator([X_train, y_train], batch_size)
gen_test_batch = batch_generator([X_test, y_test], batch_size)
# Training loop
prediction_list = []
label_list = []
for i in range(num_steps):
# Learning rate as in Stochastic Online AUC optimization
# Training step
if training_mode == 'auc':
lr = 1e1 / np.sqrt(i + 1)
wc = wc + lr
# lr = 0
X, y_sing = gen_train_batch.next()
# fetch loss
accuracy, batch_total_loss, prediction, frac, W, A, B, Alpha, inner_product, gvmin, gvmax, correct, _ = sess.run(
[label_accuracy, model.loss, model.pred, fraction, model.w, model.a, model.b, model.alpha,
model.inner_prod_ave, grads_and_vars_min, grads_and_vars_max, correct_label_pred, train_op],
feed_dict={
model.X: X,
model.y_sing: y_sing,
learning_rate: lr,
weighted_coeff: wc
}
)
prediction_list.extend(prediction.reshape([batch_size]))
label_list.extend(y_sing)
# print(np.array(label_list).shape)
# print(np.array(prediction_list).shape)
if verbose and i % 2000 == 1999:
print '\n\nAUC optimization training, (+/-) ratio %f', p, 1 - p
print 'epoch', i, '/', num_steps
AUC = tf.contrib.metrics.streaming_auc(prediction, y_sing)
sess.run(tf.local_variables_initializer(
)) # try commenting this line and you'll get the error
train_AUC = sess.run(AUC)
gmin, vmin = zip(*gvmin)
gmax, vmax = zip(*gvmax)
print 'batch_total_loss', batch_total_loss
print 'learning_rate', lr
print 'fraction', frac
print 'gradient_a', gmin[1]
print 'gradient_b', gmin[2]
print 'gradient_alpha', gmax[0]
print 'A', A
print 'B', B
print 'Alpha', Alpha
# print('weighted_coeff',weight)
print 'train_auc', train_AUC
print 'train_acc', accuracy
# cumulative_AUC = metrics.roc_auc_score(np.array(label_list),np.array(prediction_list))
# print 'cumulative AUC', cumulative_AUC
print 'inner_product', inner_product.T
print 'prediction ', prediction.reshape(
[batch_size]).astype(int)
print 'groundtruth', y_sing.astype(int)
print 'correct ', correct.reshape(
[batch_size]).astype(int)
# Compute final evaluation on test data
acc, prediction = sess.run(
[label_accuracy, model.pred],
feed_dict={model.X: X_test,
model.y_sing: y_test})
test_prediction = prediction.reshape([test_num])
cumulative_auc = metrics.roc_auc_score(y_test, test_prediction)
return acc, cumulative_auc # train_auc, train_pre, train_rec
if not FLAGS.dataset:
raise ValueError("Must set --dataset for experiments")
if not FLAGS.output_file:
raise ValueError("Must set --output_file for experiments")
fout = open('./output/' + FLAGS.output_file, 'a')
fout.write('dataset: ' + FLAGS.dataset)
fout.write('\noutput_file: ' + FLAGS.output_file)
fout.write('\n(+/-) ratio: ' + str(FLAGS.request_ratio) + ':' +
str(1 - FLAGS.request_ratio))
fout.write('\nAUC optimization with ' + str(FLAGS.num_time_steps) +
' training steps')
fout.close()
print 'dataset:', FLAGS.dataset
print 'output_file:', FLAGS.output_file
print '(+/-) ratio:', FLAGS.request_ratio, ' : ', 1 - FLAGS.request_ratio
print '\nauc optimization training'
acc_ave = 0.0
auc_ave = 0.0
for i in range(FLAGS.num_epochs):
print 'epoch', i, '/', str(FLAGS.num_epochs)
fopen = open('./output/' + FLAGS.output_file, 'a')
fopen.write('\nNumber of experiment ' + str(i) + ' / ' +
str(FLAGS.num_epochs) + '\n')
acc, auc = train_and_evaluate('auc', graph, model)
print 'testing data accuracy:', acc
fopen.write('testing data accuracy: ' + str(acc) + '\n')
print 'testing data cumulative AUC', auc
fopen.write('testing data culumative AUC: ' + str(auc) + '\n')
auc_ave = (i * auc_ave + auc) / ((i + 1) * 1.0)
acc_ave = (i * acc_ave + acc) / ((i + 1) * 1.0)
fopen.close()
fopen = open('./output/' + FLAGS.output_file, 'a')
fopen.write('testing data average ACC over ' + str(FLAGS.num_epochs) +
' epochs: ' + str(acc_ave) + '\n')
fopen.write('testing data average AUC over ' + str(FLAGS.num_epochs) +
' epochs: ' + str(auc_ave) + '\n')
fopen.close()
|
"""
Preprocess wetlands dataset.
TOOTCHI
https://doi.org/10.1594/PANGAEA.892657
In:
Spatial: 0.066 deg
Out:
Spatial: 0.033 deg
Steps:
1) Harmonize
"""
import os
import xarray as xr
import logging
from utils.pyutils import rm_existing
from dataprocessing.plotting import plot_var
from dataprocessing.datasets.config import \
dir_source, \
dir_target
logging.info('Processing dataset: globland')
file_in = os.path.join(
dir_source, '0d06_static/wetlands/Tootchi_2019/Data/CW_TCI.fractions.10800.5400.nc'
)
file_out = os.path.join(
dir_target, 'processed/0d033/static/wetlands.nc'
)
rm_existing(file_out)
os.makedirs(os.path.dirname(file_out), exist_ok=True)
ds = xr.open_dataset(file_in).rename({'latitude': 'lat', 'longitude': 'lon'})
ds_out = xr.Dataset()
ds_out['data'] = ds.to_array().rename({'variable': 'var'})
ds_out.attrs = ds.attrs
# Fill missing values with zeros.
ds_out = ds_out.where(ds_out.sel(var='none').notnull(), 0.0)
ds_out.to_netcdf(file_out)
plot_path = __file__.replace('.py', '.jpg')
plot_var(path=file_out, plot_path=plot_path)
logging.info('Done processing dataset: wetlands')
|
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("-pythonpath", "--pythonpath", type=str)
parser.add_argument("-tomo_name", "--tomo_name", type=str)
parser.add_argument("-fold", "--fold", type=str, default="None")
parser.add_argument("-config_file", "--config_file", help="yaml_file", type=str)
args = parser.parse_args()
pythonpath = args.pythonpath
sys.path.append(pythonpath)
import os
import ast
import torch
from os.path import join
import numpy as np
import pandas as pd
from constants.dataset_tables import DatasetTableHeader
from file_actions.writers.csv import motl_writer
from networks.utils import build_prediction_output_dir
from performance.statistics_utils import pr_auc_score, \
f1_score_calculator, precision_recall_calculator, get_max_F1
from tomogram_utils.peak_toolbox.utils import read_motl_coordinates_and_values
from constants.statistics import write_statistics_pp
from constants.config import Config, model_descriptor_from_config
from plotting.statistics import generate_performance_plots
from constants.config import get_model_name
from networks.io import get_device
config_file = args.config_file
config = Config(user_config_file=config_file)
tomo_name = args.tomo_name
fold = ast.literal_eval(args.fold)
model_path, model_name = get_model_name(config, fold)
print("model_name", model_name)
snakemake_pattern = config.output_dir + "/predictions/" + model_name + "/" + tomo_name + "/" + config.pred_class + \
"/pr_radius_" + str(config.pr_tolerance_radius) + \
"/detected/.{fold}.done_pp_snakemake".format(fold=str(fold))
from networks.utils import get_training_testing_lists
if isinstance(fold, int):
tomo_training_list, tomo_testing_list = get_training_testing_lists(config=config, fold=fold)
if tomo_name in tomo_testing_list:
run_job = True
else:
run_job = False
else:
run_job = True
if run_job:
DTHeader = DatasetTableHeader(semantic_classes=config.semantic_classes)
df = pd.read_csv(config.dataset_table)
df[DTHeader.tomo_name] = df[DTHeader.tomo_name].astype(str)
print("Processing tomo", tomo_name)
pred_output_dir = os.path.join(config.output_dir, "predictions")
tomo_output_dir = build_prediction_output_dir(base_output_dir=pred_output_dir,
label_name="", model_name=model_name,
tomo_name=tomo_name, semantic_class=config.pred_class)
print(tomo_output_dir)
os.makedirs(tomo_output_dir, exist_ok=True)
motl_in_dir = [file for file in os.listdir(tomo_output_dir) if 'motl_' == file[:5]]
assert len(motl_in_dir) == 1, "only one motive list can be filtered; we got {}.".format(len(motl_in_dir))
path_to_motl_predicted = os.path.join(tomo_output_dir, motl_in_dir[0])
tomo_df = df[df[DTHeader.tomo_name] == tomo_name]
path_to_motl_true = tomo_df.iloc[0][DTHeader.clean_motls[config.pred_class_number]]
figures_dir = os.path.join(tomo_output_dir, "figures")
os.makedirs(figures_dir, exist_ok=True)
predicted_values, predicted_coordinates = read_motl_coordinates_and_values(
path_to_motl=path_to_motl_predicted)
true_values, true_coordinates = read_motl_coordinates_and_values(
path_to_motl=path_to_motl_true)
unique_peaks_number = len(predicted_values)
predicted_coordinates = np.array(predicted_coordinates)
prec, recall, tp_true, tp_pred, fp_pred, tp_pred_scores, fp_pred_scores, fn, *_ = \
precision_recall_calculator(
predicted_coordinates=predicted_coordinates,
value_predicted=predicted_values,
true_coordinates=true_coordinates,
radius=config.pr_tolerance_radius)
F1_score = f1_score_calculator(prec, recall)
auPRC = pr_auc_score(precision=prec, recall=recall)
max_F1, optimal_peak_number = get_max_F1(F1_score)
print("auPRC = ", auPRC, "; max_F1 = ", max_F1, "; final F1 = ", F1_score[-1])
tomo_output_dir = os.path.join(tomo_output_dir, "pr_radius_" + str(config.pr_tolerance_radius))
path_to_detected = join(tomo_output_dir, "detected")
path_to_detected_true = join(path_to_detected, "in_true_motl")
path_to_detected_predicted = join(path_to_detected, "in_pred_motl")
path_to_undetected_predicted = join(tomo_output_dir, "undetected")
os.makedirs(path_to_detected_predicted, exist_ok=True)
os.makedirs(path_to_detected_true, exist_ok=True)
os.makedirs(path_to_undetected_predicted, exist_ok=True)
motl_writer(path_to_output_folder=path_to_detected_predicted,
list_of_peak_coords=tp_pred,
list_of_peak_scores=tp_pred_scores,
in_tom_format=True)
motl_writer(path_to_output_folder=path_to_detected_true,
list_of_peak_coords=tp_true,
list_of_peak_scores=[1 for n in tp_true],
in_tom_format=True)
motl_writer(path_to_output_folder=path_to_undetected_predicted,
list_of_peak_coords=fp_pred,
list_of_peak_scores=fp_pred_scores,
in_tom_format=True)
# save performance figures
generate_performance_plots(recall=recall, prec=prec, F1_score=F1_score, predicted_values=predicted_values,
tp_pred_scores=tp_pred_scores, fp_pred_scores=fp_pred_scores, figures_dir=figures_dir)
statistics_file = os.path.join(config.output_dir, "pp_statistics.csv")
device = get_device()
checkpoint = torch.load(model_path, map_location=device)
if 'model_descriptor' in checkpoint.keys():
model_descriptor = checkpoint["model_descriptor"]
else:
print("WARNING: model without model descriptor")
model_descriptor = model_descriptor_from_config(config)
checkpoint["model_descriptor"] = model_descriptor
torch.save({
'model_descriptor': model_descriptor,
'epoch': checkpoint['epoch'],
'model_state_dict': checkpoint['model_state_dict'],
'optimizer_state_dict': checkpoint['optimizer_state_dict'],
'loss': checkpoint['loss'],
}, model_path)
print(statistics_file)
write_statistics_pp(statistics_file=statistics_file, tomo_name=tomo_name, model_descriptor=model_descriptor,
statistic_variable="auPRC",
statistic_value=round(auPRC, 4), pr_radius=config.pr_tolerance_radius,
min_cluster_size=config.min_cluster_size, max_cluster_size=config.max_cluster_size,
threshold=config.threshold, prediction_class=config.pred_class,
clustering_connectivity=config.clustering_connectivity, processing_tomo=config.processing_tomo,
region_mask=config.region_mask)
write_statistics_pp(statistics_file=statistics_file, tomo_name=tomo_name, model_descriptor=model_descriptor,
statistic_variable="max_F1",
statistic_value=round(max_F1, 4), pr_radius=config.pr_tolerance_radius,
min_cluster_size=config.min_cluster_size, max_cluster_size=config.max_cluster_size,
threshold=config.threshold, prediction_class=config.pred_class,
clustering_connectivity=config.clustering_connectivity, processing_tomo=config.processing_tomo,
region_mask=config.region_mask)
write_statistics_pp(statistics_file=statistics_file, tomo_name=tomo_name, model_descriptor=model_descriptor,
statistic_variable="F1",
statistic_value=round(F1_score[-1], 4), pr_radius=config.pr_tolerance_radius,
min_cluster_size=config.min_cluster_size, max_cluster_size=config.max_cluster_size,
threshold=config.threshold, prediction_class=config.pred_class,
clustering_connectivity=config.clustering_connectivity, processing_tomo=config.processing_tomo,
region_mask=config.region_mask)
# For snakemake:
snakemake_pattern_dir = os.path.dirname(snakemake_pattern)
os.makedirs(snakemake_pattern_dir, exist_ok=True)
with open(file=snakemake_pattern, mode="w") as f:
print("Creating snakemake pattern", snakemake_pattern)
|
#!/usr/bin/env python3
# Copyright (c) 2016 Fabian Schuiki
#
# This script provides the means to prepare, execute, and analyze the results of
# a propagation and transition time characterization.
import sys, os, argparse
from potstill.char.util import *
from potstill.char.tpd import Input, Run
# Parse the command line arguments.
parser = argparse.ArgumentParser(prog="potstill char-tpd", description="Prepare, execute, and analyze the results of a propagation and transition time characterization.")
argparse_init_macro(parser)
parser.add_argument("TSLEW", type=float, help="input transition time [s]")
parser.add_argument("CLOAD", type=float, help="output load capacitance [F]")
parser.add_argument("--spectre", action="store_true", help="write SPECTRE input file to stdout")
parser.add_argument("--ocean", action="store_true", help="write OCEAN input file to stdout")
args = parser.parse_args()
# Create the input files.
macro = argparse_get_macro(args)
inp = Input(macro, args.TSLEW, args.CLOAD)
if args.spectre:
sys.stdout.write(inp.make_spectre())
sys.exit(0)
if args.ocean:
sys.stdout.write(inp.make_ocean())
sys.exit(0)
# Execute the run.
run = Run(inp)
run.run()
|
import unittest
import requests
from assertpy import assert_that
from requests.exceptions import Timeout
from unittest.mock import Mock, patch
from src.Api import Api
class TestApiMonkeyPatch(unittest.TestCase):
@patch('src.Api.Api', autospec=True)
def test_method_api_put_raises_timeout(self, mock_class):
mock_id = Mock()
mock_id.return_value = 1
mock_data = Mock()
mock_data.return_value = {"key": "value"}
mock_class.api_put.side_effect = Timeout
with self.assertRaises(Timeout):
mock_class.api_put(mock_id, mock_data)
def test_method_api_put_assert_that_called_once(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_data = Mock()
mock_data.return_value = {"key": "value"}
mock_api.api_put(mock_id, mock_data)
mock_api.api_put.assert_called_once()
def test_method_api_put_assert_that_called(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_id2 = Mock()
mock_id2.return_value = 2
mock_data = Mock()
mock_data.return_value = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_data2 = Mock()
mock_data2.return_value = {
"userId": 2,
"title": "Lorem ipsum",
"completed": True
}
mock_api.api_put(mock_id, mock_data)
mock_api.api_put(mock_id2, mock_data2)
mock_api.api_put.assert_called()
def test_method_api_put_assert_that_not_called(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_api.api_put.assert_not_called()
def test_method_api_put_assert_that_called_with_id_1_and_mock_data_userId_1_title_Lorem(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_data = Mock()
mock_data.return_value = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_api.api_put(mock_id, mock_data)
mock_api.api_put.assert_called_with(mock_id, mock_data)
def test_method_api_put_assert_that_called_once_with_id_1_and_mock_data_userId_1_title_Lorem(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_data = Mock()
mock_data.return_value = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_api.api_put(mock_id, mock_data)
mock_api.api_put.assert_called_once_with(mock_id, mock_data)
def test_method_api_put_assert_that_response_has_status_code_200(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
todo = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_api.api_put.return_value = {"put_id": todo_id,
"put_data": todo, "status_code": 200}
response = mock_api.api_put(todo_id, todo)
assert_that(response).has_status_code(200)
def test_method_api_post_assert_that_response_status_code_is_not_200(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
todo = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_api.api_put.return_value = {"status_code": 408}
response = mock_api.api_put(todo_id, todo)
assert_that(response["status_code"]).is_not_equal_to(200)
def test_method_api_post_assert_that_response_returns_put_data(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
todo = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_api.api_put.return_value = {"put_id": todo_id,
"put_data": todo, "status_code": 200}
response = mock_api.api_put(todo_id, todo)
assert_that(response["put_data"]).is_equal_to(todo)
def test_method_api_post_assert_that_response_put_data_contain_all_keys_userId_title_completed(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
todo = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_api.api_put.return_value = {"put_id": todo_id,
"put_data": todo, "status_code": 200}
response = mock_api.api_put(todo_id, todo)
assert_that(response["put_data"]).contains_key("userId", "title", "completed")
def test_method_api_put_assert_that_response_is_instance_of_dict(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
todo = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_api.api_put.return_value = {"put_id": todo_id,
"put_data": todo, "status_code": 200}
response = mock_api.api_put(todo_id, todo)
assert_that(response).is_instance_of(dict)
def test_method_api_put_assert_that_response_has_key_put_id_1(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
todo = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_api.api_put.return_value = {"put_id": todo_id,
"put_data": todo, "status_code": 200}
response = mock_api.api_put(todo_id, todo)
assert_that(response).has_put_id(1)
def test_method_api_put_assert_that_not_called_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_data = Mock()
mock_data.return_value = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_api.api_put(mock_id, mock_data)
with self.assertRaises(AssertionError):
mock_api.api_put.assert_not_called()
def test_method_api_put_assert_that_called_once_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_id2 = Mock()
mock_id2.return_value = 2
mock_data = Mock()
mock_data.return_value = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_data2 = Mock()
mock_data2.return_value = {
"userId": 2,
"title": "Lorem ipsum",
"completed": True
}
mock_api.api_put(mock_id, mock_data)
mock_api.api_put(mock_id2, mock_data2)
with self.assertRaises(AssertionError):
mock_api.api_put.assert_called_once()
def test_method_api_put_assert_that_called_with_id_1_and_mock_data_userId_1_title_Lorem_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_id2 = Mock()
mock_id2.return_value = 2
mock_data = Mock()
mock_data.return_value = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_data2 = Mock()
mock_data2.return_value = {
"userId": 2,
"title": "Lorem ipsum",
"completed": True
}
mock_api.api_put(mock_id2, mock_data2)
with self.assertRaises(AssertionError):
mock_api.api_put.assert_called_with(mock_id, mock_data)
def test_method_api_put_assert_that_called_once_with_id_1_and_mock_data_userId_1_title_Lorem_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_id2 = Mock()
mock_id2.return_value = 2
mock_data = Mock()
mock_data.return_value = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_data2 = Mock()
mock_data2.return_value = {
"userId": 2,
"title": "Lorem ipsum",
"completed": True
}
mock_api.api_put(mock_id, mock_data)
mock_api.api_put(mock_id2, mock_data2)
with self.assertRaises(AssertionError):
mock_api.api_put.assert_called_once_with(mock_id, mock_data)
def test_method_api_put_no_parameter_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
with self.assertRaises(TypeError):
mock_api.api_put()
def test_method_api_put_only_one_parameter_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
with self.assertRaises(TypeError):
mock_api.api_put(mock_id)
def test_method_api_put_assert_that_response_returns_ValueError_when_called_with_id_0_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 0
todo = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_api.api_put.return_value = {"status_code": 408}
mock_api.api_put.side_effect = ValueError
assert_that(mock_api.api_put).raises(ValueError).when_called_with(todo_id, todo)
def test_method_api_put_assert_that_response_returns_ValueError_when_called_with_id_300_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 300
todo = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_api.api_put.return_value = {"status_code": 408}
mock_api.api_put.side_effect = ValueError
assert_that(mock_api.api_put).raises(ValueError).when_called_with(todo_id, todo)
def test_method_api_put_assert_that_response_returns_TypeError_when_called_with_id_not_int_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = "1"
todo = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_api.api_put.return_value = {"status_code": 408}
mock_api.api_put.side_effect = TypeError
assert_that(mock_api.api_put).raises(TypeError).when_called_with(todo_id, todo)
def test_method_api_put_assert_that_response_returns_AttributeError_when_called_with_id_None_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = None
todo = {
"userId": 1,
"title": "Lorem",
"completed": False
}
mock_api.api_put.return_value = {"status_code": 408}
mock_api.api_put.side_effect = AttributeError
assert_that(mock_api.api_put).raises(AttributeError).when_called_with(todo_id, todo)
def test_method_api_put_assert_that_response_returns_ValueError_when_called_with_empty_obj_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
todo = {}
mock_api.api_put.return_value = {"status_code": 408}
mock_api.api_put.side_effect = ValueError
assert_that(mock_api.api_put).raises(ValueError).when_called_with(todo_id, todo)
def test_method_api_put_assert_that_response_returns_ValueError_when_called_with_obj_without_key_userId_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
todo = {
"title": "Lorem",
"completed": False
}
mock_api.api_put.return_value = {"status_code": 408}
mock_api.api_put.side_effect = ValueError
assert_that(mock_api.api_put).raises(ValueError).when_called_with(todo_id, todo)
def test_method_api_put_assert_that_response_returns_ValueError_when_called_with_obj_without_key_title_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
todo = {
"userId": 1,
"completed": False
}
mock_api.api_put.return_value = {"status_code": 408}
mock_api.api_put.side_effect = ValueError
assert_that(mock_api.api_put).raises(ValueError).when_called_with(todo_id, todo)
def test_method_api_put_assert_that_response_returns_ValueError_when_called_with_obj_without_key_completed_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
todo = {
"userId": 1,
"title": "Lorem",
}
mock_api.api_put.return_value = {"status_code": 408}
mock_api.api_put.side_effect = ValueError
assert_that(mock_api.api_put).raises(ValueError).when_called_with(todo_id, todo)
if __name__ == '__main__':
unittest.main()
|
import gym.envs.classic_control as gym_classic
import gym.envs.box2d as gym_box
from .abstract_environments import *
from helpers import sin_and_cos_to_radians
class DiscreteActionMountainCar(GroundTruthSupportEnv, DiscreteActionReshaper, gym_classic.MountainCarEnv):
goal_state = np.array([[0.5, 0.0]])
goal_mask = np.array([[1.0, 0.0]])
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
setattr(self.action_space, "low", self.low)
setattr(self.action_space, "high", self.high)
def get_GT_state(self):
return self.state
def set_GT_state(self, state):
# noinspection PyAttributeOutsideInit
self.state = state
def set_state_from_observation(self, observation):
self.set_GT_state(observation)
class DiscreteActionCartPole(DiscreteActionReshaper, gym_classic.CartPoleEnv):
goal_state = np.array([[0.0, 0.0, 0.0, 0.0]])
goal_mask = np.array([[1.0, 1.0, 1.0, 1.0]])
class ContinuousMountainCar(GroundTruthSupportEnv, gym_classic.Continuous_MountainCarEnv):
goal_state = np.array([[0.5, 0.0]])
goal_mask = np.array([[1.0, 0.0]])
def get_GT_state(self):
return self.state
def set_GT_state(self, state):
# noinspection PyAttributeOutsideInit
self.state = state
def set_state_from_observation(self, observation):
self.set_GT_state(observation)
class ContinuousLunarLander(EnvWithDefaults, gym_box.LunarLanderContinuous):
goal_state = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1, 1]])
goal_mask = np.array([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0]])
class ContinuousPendulum(GroundTruthSupportEnv, gym_classic.PendulumEnv):
def set_GT_state(self, state):
cos_theta, sin_theta, theta_dot = state
theta = sin_and_cos_to_radians(sin_theta, cos_theta)
# noinspection PyAttributeOutsideInit
self.state = theta, theta_dot
def get_GT_state(self):
return self.state
def set_state_from_observation(self, observation):
self.set_GT_state(observation)
@staticmethod
def angle_normalize(x):
return ((x + np.pi) % (2 * np.pi)) - np.pi
def cost_fn(self, observation, action, next_obs):
cos_theta, sin_theta, th_dot = observation.T
th = sin_and_cos_to_radians(sin_theta, cos_theta)
costs = self.angle_normalize(th) ** 2 + 0.1 * th_dot ** 2 + 0.001 * (np.squeeze(action) ** 2)
return costs
|
import os
import sys
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(CURRENT_PATH)
PAR_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir))
sys.path.append(PAR_PATH)
from src.dataset import Dataset
from src.server.server import Server
from src.algo.baselines.randomP.randomP import RandomP
from dataset import DATASET_PATH
import random
import time
import os
from src.dataset import read_file_to_dict
from log.graphs.random import RANDOM_GRAPH_PATH
def main():
network_dataset = Dataset('twitters2')
nl = read_file_to_dict(os.path.join(DATASET_PATH, 'TwitterSample2.txt'))
# 10% sampling
nbunch = nl[0:int(len(nl) // 2)]
network_dataset.graph = network_dataset.graph.subgraph(nbunch)
server_list = [Server(k) for k in range(0, 512)]
vp_number = 0
node_list = list(network_dataset.graph.nodes)
random.shuffle(node_list)
print('Dataset information: TwitterSample2\nNodes Number:', network_dataset.graph.order(), '\nEdge Number:',
network_dataset.graph.size())
print('Using Random Partitioning Method...\nServer Number:', len(server_list), '\nVirtual Primary Copy Number:',
vp_number,
'\nWrite Frequency of Nodes: 1')
start = time.time()
m = RandomP(server_list, network_dataset, node_list)
m.add_new_primary_node(server_list, vp_number)
m.check_server_load()
m.check_locality()
end = time.time()
print('Random Partitioning Time:', end - start, 'seconds')
m.compute_inter_sever_cost()
path = RANDOM_GRAPH_PATH
m.save_all(path)
main()
|
# stdlib
from typing import Any
# relative
from ...core.common import UID
from ...core.common.serde.serializable import Serializable
class PyPrimitive(Serializable):
def __init__(self, temporary_box: bool = False) -> None:
self._id: UID
# sometimes we need to serialize a python primitive in such a way that we can
# deserialize it back as that primitive later. This flag allows us to do that.
self.temporary_box = temporary_box
def upcast(self) -> Any:
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import json
import csv
import os
import math
import re
from hltbapi import HtmlScraper
import datetime
import parameters
os.getcwd()
# Export file to JSON
def exportJSON(file, name):
with open(name + '_data.json', 'w', encoding='utf-8') as jsonfile:
json.dump(file, jsonfile, ensure_ascii=False, indent=4)
# Return review numbers of owned STEAM games
def getGameReviews(gameID):
try:
gameReviews = requests.get('https://store.steampowered.com/appreviews/' + gameID
+ '?json=1&language=all&purchase_type=all').json()
except:
return 'Error', 'Error', 'Error', 'Error', 'Error'
totalVotes = gameReviews['query_summary']['total_reviews']
if totalVotes == 0:
return 0, 0, 0, 0, 0
else:
totalPositive = gameReviews['query_summary']['total_positive']
totalNegative = gameReviews['query_summary']['total_negative']
reviewScore = totalPositive / totalVotes
rating = reviewScore - (reviewScore - 0.5) * math.pow(2, - math.log10(totalVotes + 1))
return totalVotes, totalPositive, totalNegative, (round(reviewScore * 100)), (round(rating * 100))
# Get the game tags from the Steam product page
def getGameTags(gameID):
gameTags = []
try:
getGameDetails = requests.get(parameters.gamePageData + gameID).json()
gameTag = getGameDetails[gameID]['data']['categories']
for tag in gameTag:
gameTags.append(tag['description'])
except:
gameTags.append('Page not found')
finally:
return ", ".join(gameTags)
# Scrap expected game length times per play style from HowLongToBeat website
def getHowLongToBeat(name):
# Using checklist to check steam name searches in HowLongToBeat base
# Using regex to exclude mismatches between steam and HowLongToBeat game names
name = re.sub(parameters.keyWords, ' ', name)
# Remove utf-8 coded text for better search results
encodeName = name.encode(encoding='ascii', errors='ignore')
decodeName = encodeName.decode()
try:
howLongToBeat = HtmlScraper().search(decodeName)[0]
avgSum = []
print(howLongToBeat.timeLabels) #############################################################################
gameplayMain = round(howLongToBeat.gameplayMain)
gameplayExtra = round(howLongToBeat.gameplayMainExtra)
gameplayComplete = round(howLongToBeat.gameplayCompletionist)
if gameplayMain != 0:
avgSum.append(gameplayMain)
if gameplayExtra != 0:
avgSum.append(gameplayExtra)
if gameplayComplete != 0:
avgSum.append(gameplayComplete)
avgMedian = round(sum(avgSum)/len(avgSum))
return gameplayMain, gameplayExtra, gameplayComplete, avgMedian, \
decodeName + ' -> ' + howLongToBeat.gameName
except:
print("ERROR! ITEM NOT FOUND -> " + decodeName)
return 'Error', 'Error', 'Error', 'Error', decodeName + ' -> NOT FOUND!'
# Get optional JSON lists for various Steam account statistics
def optionalLists():
# Get the Steam user and badge level
getSteamLvl = requests.get(parameters.steamLink + parameters.steamLevel + parameters.steamID
+ parameters.steamKey)
getBadgeLvl = requests.get(parameters.steamLink + parameters.badgeLevel + parameters.steamID
+ parameters.steamKey)
exportJSON([getSteamLvl.json(), getBadgeLvl.json()], name='user_level_steam')
# Get recently played STEAM games
getRecentlyPlayed = requests.get(parameters.steamLink + parameters.recentlyPlayed + parameters.steamID
+ parameters.steamKey)
exportJSON(getRecentlyPlayed.json(), name='recently_played_steam')
# Get game's Steam page details
getGameDetails = requests.get(parameters.gamePageData + parameters.gamePageID)
exportJSON(getGameDetails.json(), name='game_details')
# Rank the game times per length ranking
def gameTimeRange(gameTime):
if gameTime == 'Error':
return'Error'
elif gameTime == 0:
return '0'
elif gameTime > 0 and gameTime <= 5:
return '5'
elif gameTime > 5 and gameTime <= 10:
return '10'
elif gameTime > 10 and gameTime <= 20:
return '20'
elif gameTime > 20 and gameTime <= 30:
return '30'
elif gameTime > 30 and gameTime <= 40:
return '40'
elif gameTime > 40 and gameTime <= 50:
return '50'
elif gameTime > 50 and gameTime <= 60:
return '60'
elif gameTime > 60 and gameTime <= 70:
return '70'
elif gameTime > 70 and gameTime <= 80:
return '80'
elif gameTime > 80 and gameTime <= 90:
return '90'
elif gameTime > 90 and gameTime <= 100:
return '100'
else: # gameTime > 100:
return '100+'
def main():
begin_time = datetime.datetime.now() # TODO use timeit
# Return owned STEAM games list
getOwnedGames = (requests.get(parameters.steamLink + parameters.ownedGames + parameters.steamKey
+ parameters.steamID + parameters.textFormat + parameters.appInfo
+ parameters.freeGames)).json()
exportJSON(getOwnedGames, name='owned_games_steam')
# Get the total count of Steam games
gamesTotal = ["Total Steam game count: " + str(getOwnedGames['response']['game_count'])]
rowTags = ['count', 'ID', 'Game Name', 'Steam Playtime(h)', 'Main Story(h)', 'Extra Content(h)', 'Complete(h)',
'Average(h)', 'Total Votes', 'Total Positive', 'Total Negative', 'Score(%)', 'Steam db Score(%)',
'Tags',
'Story Range(h)', 'Extra Range(h)', 'Complete Range(h)', 'Average Range(h)']
# Get the game IDs, names, total playtime per title, total vote numbers, total positive and negative numbers
gameItems = []
checkList = []
gameList = (getOwnedGames['response']['games'])
oldCount = 0
print("Going through the user's Steam library")
for count, item in enumerate(gameList, 1):
# Show the progress of the list done in percentage till 100%
newCount = int(count / len(gameList) * 100)
if newCount != oldCount:
oldCount = newCount
print('--->List done: ' + str(newCount) + '%<---')
appID = item['appid']
name = item['name']
print(count, name) ############################################################################### remove this
steamMin = item['playtime_forever']
# Turn total playtime minutes into hours
# steamTime = str(int(steamMin / 60)) + 'h ' + str(steamMin % 60) + 'min' # use this to get hours and minutes
steamTime = str(round(steamMin/60))
story, extra, complete, avgMedian, gameName = getHowLongToBeat(name=(str(item['name'])))
total, positive, negative, score, steamScore = getGameReviews(str(item['appid']))
gameTags = getGameTags(str(item['appid']))
# Form the data
gameItems.append([count, appID, name, steamTime,
story, extra, complete, avgMedian,
total, positive, negative, score, steamScore,
gameTags,
gameTimeRange(story), gameTimeRange(extra), gameTimeRange(complete), gameTimeRange(avgMedian)
])
checkList.append(gameName)
# Export to JSON file
exportJSON((rowTags, gameItems), name='game_list')
exportJSON(checkList, name='check_list')
# Export to CSV file
with open('steam_catalog_list_data.csv', 'w', encoding='UTF8', newline='') as csvfile:
writer = csv.writer(csvfile)
# writer.writerow(gamesTotal)
writer.writerow(rowTags)
writer.writerows(gameItems)
print(datetime.datetime.now() - begin_time) # TODO use timeit
if __name__ == '__main__':
if parameters.optionalLists:
optionalLists()
if parameters.mainList:
main() |
from __future__ import print_function
from pyspark import SparkContext
import sys
def mapper(w):
arr = w.split(" ")
return (arr[0], int(broadcastVectorSmall.value[int(arr[1])-1].split(" ")[1]) * int(arr[2]))
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Usage: matrix_small_c.py vectorfile matrixfile outputfile ", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="MyTestJob")
vector_small = sc.textFile(sys.argv[1])
# Give the vector in a correct order
broadcastVectorSmall = sc.broadcast(sorted(vector_small.collect(),key=lambda val: int(val.split( )[0])))
matrix_small = sc.textFile(sys.argv[2])
matrix_vector_small = matrix_small.map(mapper).reduceByKey(lambda x,y: int(x) + int(y))
matrix_vector_small.saveAsTextFile(sys.argv[3])
sc.stop() |
#!/usr/bin/python
'''
Created on Apr 16, 2014
@author: Lee Khan-Bourne
Template for calling Elasticsearch from within a CherryPy website
Modify functions 'objects', 'search' and 'setup_routes' to suit your needs
'''
import sys
sys.stdout = sys.stderr
import os, os.path
import string
import json
import atexit
import threading
import cherrypy
import pystache
from pystache import Renderer
renderer = Renderer()
import elasticsearch
from elasticsearch import Elasticsearch
es = Elasticsearch()
# Set up some globals (these should be put in a config file)
rootDir = os.path.dirname(os.path.realpath(__file__))
esIndex = 'myIndex'
cherrypy.config.update({'environment': 'embedded'})
if cherrypy.__version__.startswith('3.0') and cherrypy.engine.state == 0:
cherrypy.engine.start(blocking=False)
atexit.register(cherrypy.engine.stop)
def api():
return file(rootDir + '/static/index.html')
def http_error_page(status, message, traceback, version):
'''
Serve up a standard http error screen with a customised message embedded
'''
fileHandle = open(rootDir + '/templates/http_error.html' ,'r')
contents = renderer.render(fileHandle.read(), {'responseCode': status, 'responseMessage': message})
return contents
def objects(objectType,objectId,*args,**kw):
'''
Search for and return an Elasticsearch document by id
'''
debug = application.config['global']['debug.on']
if debug:
cherrypy.log(json.dumps(queryObj))
res = es.get(index=esIndex, doc_type=objectType, id=objectId)
if res['found'] == True:
return json.dumps(res['source'])
else:
raise cherrypy.HTTPError(404, message="The %s you have requested does not exist" % objectType)
def unicode_string(param):
return param.replace("%u","\u").decode('unicode-escape')
def search(objectType,*args,**kw):
'''
Search for and return one or more documents based on a free text string passed in as an argument called 'text'
'''
debug = application.config['global']['debug.on']
if 'text' not in kw:
raise cherrypy.HTTPError(404, message="Please provide some text to search by")
# Perform search on free text
cherrypy.log('Searching by programme name - for related objects')
queryObj = {
"query": {
"match" : {
"_all" : unicode_string(kw['text'])
}
}
}
if debug:
cherrypy.log(json.dumps(queryObj))
res = es.search(index=esIndex, body=queryObj)
if res['hits']['total'] > 0:
return json.dumps(res['hits']['hits'])
else:
raise cherrypy.HTTPError(404, message="Could not find any documents matching your criteria")
dispatcher = None
def setup_routes():
'''
Set up a number of regexp style url patterns that this application can handle
'''
d = cherrypy.dispatch.RoutesDispatcher()
d.connect('objects', '/{objectType:(type_one|type_two|type_three)}s/{objectId:([0-9]+)}', objects)
d.connect('search', '/search', search)
d.connect('root', '/', api)
dispatcher = d
return dispatcher
cherrypy.config.update({
'error_page.400' : http_error_page,
'error_page.404' : http_error_page,
'error_page.500' : http_error_page,
})
serverConf = {
'global': {
}
}
# Read the config defined above
# then override it with any entries in server.conf
cherrypy.config.update(serverConf)
cherrypy.config.update(rootDir + "/server.conf")
appConf = {
'/': {
'tools.sessions.on': True,
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8',
'request.dispatch': setup_routes()
},
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': rootDir + '/static'
},
'/favicon.ico': {
'tools.staticfile.on': True,
'tools.staticfile.filename': rootDir + '/static/favicon.ico'
}
}
application = cherrypy.Application(None, '/elastic', appConf)
application.merge(rootDir + '/app.conf')
|
import torch
import time
from collections import OrderedDict
# Using ryzen 3600 for benchmark (single core) compare to snapdragon 850 on HL2
# https://gadgetversus.com/processor/qualcomm-sdm850-snapdragon-850-vs-amd-ryzen-5-3600/
def test_cpu_inference(test_generator, model, params, best_model_str=None):
# Force batch size to 1 for inference time computations
params['batch_size'] = 1
CPU = torch.device('cpu')
batch_progress = 0
total_time = 0
frame_count = 0
test_acc = 0
test_num_of_images = 0
# Using cross entropy loss function
loss_fn = torch.nn.CrossEntropyLoss(
reduction='sum')
# Get the total length of the training and validation dataset, use 500 samples
total_num_test = 500 # use a short sequence of test data for speed (will not replicate the actual accuracy)
print('total_num_test:', total_num_test)
all_preds = []
all_gts = []
# To CPU
model.to(CPU)
# Load the best weights from training if not none
if best_model_str != None:
state_dict = torch.load(best_model_str)
# https://discuss.pytorch.org/t/solved-keyerror-unexpected-key-module-encoder-embedding-weight-in-state-dict/1686/13
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove 'module.' of dataparallel
new_state_dict[name]=v
model.load_state_dict(new_state_dict)
# Testing inference speed on CPU
model.eval()
with torch.no_grad():
for local_batch, local_labels in test_generator:
# If we have reached frame count, break
if frame_count >= total_num_test:
break
local_batch, local_labels = \
local_batch.to(CPU), \
local_labels.to(CPU)
begin_time = time.time()
# Model inference to get output
local_output = model(local_batch)
time_diff = time.time() - begin_time
# Reshape to [batch_size, n_classes]
local_output = local_output[params['seq_len'] - 1::params['seq_len']]
# Scale the raw output probabilities
Sm = torch.nn.Softmax()
local_output = Sm(local_output)
# print('local_output_softmax:', local_output)
# Get max value of model as prediction
possibility, preds = torch.max(local_output.data, 1)
loss = loss_fn(local_output, local_labels)
# Append the predictions and labels
for i in range(len(preds)):
all_preds.append(preds[i])
all_gts.append(local_labels.data[i])
# Increment counter
frame_count += 1
# Increment correct result count and loss
test_acc += torch.sum(preds == local_labels.data)
total_time += time_diff
test_num_of_images += total_num_test
batch_progress += 1
# If we have reached the end of a batch
if batch_progress * params['batch_size'] >= total_num_test:
percent = 100.0
print('\rTest progress: {:.2f} % [{} / {}]'.format(
percent,
total_num_test,
total_num_test),
end='\n')
else:
percent = batch_progress * params['batch_size'] / total_num_test * 100
print('\rTest progress: {:.2f} % [{} / {}]'.format(
percent,
batch_progress * params['batch_size'],
total_num_test),
end='')
# Each loop inference time
# print('Inference time: {:.3f}ms'.format(time_diff / params['batch_size'] * 1000))
test_acc = float(test_acc) / total_num_test
print('Average inference: [{:.3f} ms] Acc: [{:.4f}]'.format(
(total_time / (batch_progress * params['batch_size']))*1000,
test_acc))
|
# bubble
def bubble(list1):
tmpLen = len(list1)
for i in range(tmpLen):
for j in range(1, tmpLen - i):
if list1[j] < list1[j-1]:
list1[j], list1[j-1] = list1[j-1], list1[j]
print(i, j, list1)
return list1
# 找出字符串中第一个不重复的字符
def findFirstAlpha(s):
tmps = []
for i in range(len(s)):
print(tmps)
if s[i] not in s[:i]:
tmps.append(s[i])
else:
tmps.remove(s[i])
ret = tmps[0] if len(tmps) else None
print(ret)
return ret
def isCardStraight(cards):
tmpCards = [0] * 10 ## todo JQK
if not isinstance(cards, list) or len(cards) != 5:
raise ValueError("please correct cards value")
for card in cards:
tmpCards[card-1] = card
print(tmpCards)
for i, v in enumerate(tmpCards):
print(i, v)
if v != 0 and i < 6 and 0 not in tmpCards[i:i+5]: # out of list
return True
return False
def isStraight(cards):
if 1 in cards and 13 in cards:
cards[cards.index(1)]=14
if 3 == sum(set([abs(x-sum(cards)/5) for x in cards])):
return True
else:
return False
if __name__ == "__main__":
tmplist = [5, 4, 3, 2, 1]
tmplist = [11, 10, 12, 13, 1]
# print(bubble((tmplist)))
# s = '1234123'
# findFirstAlpha(s)
# print(isCardStraight(tmplist))
print(isStraight(tmplist))
|
import bisect
import decimal
from collections import namedtuple
from decimal import Decimal
from typing import Counter, Dict, List, Tuple
ArithmeticEncodingReturn = namedtuple(
"ArithmeticEncodingReturn", "decimal probability_table end_of_sequence")
def __init_range_table(probability_table:Dict[str,Decimal]):
range_table = {}
prev = Decimal(0)
for symbol, probability in probability_table.items():
range_table[symbol] = [prev, prev + probability]
prev += probability
return range_table
def __update_range_table(range_table: Dict[str, List[Decimal]],
probability_table: Dict[str, Decimal],
symbol: str) -> Tuple[Decimal, Decimal]:
"""
Update the range_table (in place) to width of symbol
Parameters
----------
range_table : Dict[str, List[Decimal]]
A Dictionary mapping symbols to ranges:
Example:
{
"A":[Decimal(0.4), Decimal(0.5)],
"B":[Decimal(0.5), Decimal(0.7)],
}
probability_table : Dict[str, Decimal]
symbol : str
Returns
-------
Tuple[Decimal, Decimal]
[description]
"""
lower, upper = range_table[symbol]
width = upper - lower
prev = lower
for symbol, probability in probability_table.items():
range_table[symbol] = [prev, prev + probability * width]
prev += probability * width
upper = prev
return lower, upper
def __minimize_entropy(lower: Decimal, upper: Decimal) -> Decimal:
"""
Given lower range and upper range, this function finds an x, lower<=x<upper
Such that it has minimum length
Parameters
----------
lower : Decimal
upper : Decimal
Returns
-------
Decimal
"""
jump = Decimal("0.1")
while True:
start = lower.quantize(jump)
while start < upper:
if lower <= start < upper: return start
start += jump
jump /= Decimal(10)
def __create_probability_table(symbol_list):
frequency_table = Counter(symbol_list)
N = sum(frequency_table.values())
assert N != 0
return {
symbol: Decimal(freq) / N
for symbol, freq in frequency_table.items()
}
def encode(
symbol_list: List[str],
end_of_sequence: str,
precision: int = 100,
probability_table: Dict[str,
float] = None) -> ArithmeticEncodingReturn:
"""
Uses the Arithmetic Coding algorithm
### Note
The method relies on 'precision' of Decimal from the python Standard Library.
If data is large, complete decoding might not be possible
Parameters
----------
symbol_list : List[str]
List of symbols
end_of_sequence : str
A special character that only occurs at end_of_sequences.
Both probability_table (if given) and input_data must have this table
precision : int, optional, by default 100
The precision of Decimal, will be set as:
getcontext().prec = precision
probability_table : Dict, optional, by default None
A dictionary mapping symbols to their probabilities
Example:
{
"R": 0.4,
"G": 0.5,
"B": 0.1,
}
If None, one will be calculated for you by using the frequency of input data
Returns
-------
ArithmeticEncdodingReturn
= namedtuple("ArithmeticEncodingReturn", "decimal probability_table end_of_sequence")
decimal = A string containing the decimal representation of a fraction, with prefix "0." removed
probability_table = Same as input, with floats casted to Decimal
end_of_sequence = Same as input
"""
assert end_of_sequence in symbol_list
decimal.getcontext().prec = precision
if probability_table is None:
decimal_probability_table = __create_probability_table(symbol_list)
else:
# Since the user has given probability_table, it might not contain end_of_sequence character
assert end_of_sequence in probability_table
decimal_probability_table = {
k: Decimal(str(v))
for k, v in probability_table.items()
}
# Check if sum of probabilities is 1
assert abs(sum(decimal_probability_table.values()) -
Decimal("1")) < Decimal("1e-5")
range_table = __init_range_table(decimal_probability_table)
for symbol in symbol_list:
lower, upper = __update_range_table(range_table,
decimal_probability_table, symbol)
return ArithmeticEncodingReturn(
str(__minimize_entropy(lower, upper))[2:], decimal_probability_table,
end_of_sequence)
def decode(encoded: ArithmeticEncodingReturn,
precision: int = 100) -> List[str]:
"""
Decode an Arithmetic Encoded sequence and return a list of symbols
# Warning:
If precision is too low, the sequence will not reach end_of_sequence and
hence run into an infinite loop
Parameters
----------
encoded : ArithmeticEncodingReturn
ArithmeticEncodingReturn = namedtuple(
"ArithmeticEncodingReturn", "decimal probability_table end_of_sequence")
precision : int
The precision of Decimal, will be set as:
getcontext().prec = precision
Returns
-------
List[str]
List containing symbols (including end_of_sequence) from encoded.probability_table
"""
decimal.getcontext().prec = precision
fraction = Decimal("0." + encoded.decimal)
probability_table = encoded.probability_table
range_lower = [] # Stores the CDF
range_symbol = [] # Stores the corresponding symbol
prev = Decimal(0)
for symbol, probability in probability_table.items():
range_lower.append(prev)
range_symbol.append(symbol)
prev += probability
output = []
symbol = None
while symbol != encoded.end_of_sequence:
# Use Binary Search to find closest symbol
v = bisect.bisect_right(range_lower, fraction) - 1
symbol = range_symbol[v]
output.append(symbol)
fraction -= range_lower[v]
fraction /= probability_table[symbol]
return output
|
##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2020
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira Edição - Janeiro/2019 - ISBN 978-85-7522-718-3
#
# Site: https://python.nilo.pro.br/
#
# Arquivo: exercicios3\capitulo 05\exercicio-05-13.py
##############################################################################
dívida = float(input("Dívida: "))
taxa = float(input("Juros (Ex.: 3 para 3%): "))
pagamento = float(input("Pagamento mensal:"))
mês = 1
if (dívida * (taxa/100) > pagamento):
print("Sua dívida não será paga nunca, pois os juros são superiores ao pagamento mensal.")
else:
saldo = dívida
juros_pago = 0
while saldo > pagamento:
juros = saldo * taxa / 100
saldo = saldo + juros - pagamento
juros_pago = juros_pago + juros
print(f"Saldo da dívida no mês {mês} é de R${saldo:6.2f}.")
mês = mês + 1
print(f"Para pagar uma dívida de R${dívida:8.2f}, a {taxa:5.2f} % de juros,")
print(f"você precisará de {mês - 1} meses, pagando um total de R${juros_pago:8.2f} de juros.")
print(f"No último mês, você teria um saldo residual de R${saldo:8.2f} a pagar.")
|
import unittest
from deep_find_all import DFS, BFS
class TestDeepSearch(unittest.TestCase):
graph = {'A': ['D', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C']}
def test_depth_first_search_when_no_such_vertice_in_the_graph_then_return_empty_list(self):
dfs = DFS()
expected = []
self.assertEqual(dfs.depth_first_search_all(self.graph, 'Z'), expected)
def test_breadth_first_search_when_no_such_vertice_then_return_empty_list(self):
bfs = BFS()
expected = []
self.assertEqual(bfs.breadth_first_search_all(self.graph, 'Z'), expected)
def test_breadth_first_search_when_there_is_such_vertice_then_return_list_of_all_values_containing_it(self):
bfs = BFS()
expected = [['A', 'B', 'D'], ['A', 'F', 'G'], ['A', 'D', 'E']]
self.assertEqual(bfs.breadth_first_search_all(self.graph, 'A'), expected)
def test_depth_first_search_when_there_is_such_vertice_then_return_list_of_all_values_containing_it(self):
dfs = DFS()
expected = [['A', 'D', 'E'], ['A', 'F', 'G'], ['A', 'B', 'D']]
self.assertEqual(dfs.depth_first_search_all(self.graph, 'A'), expected)
def test_breadth_and_depth_first_search_for_the_same_vertice_and_compare_the_results_then_return_true(self):
bfs = BFS()
dfs = DFS()
bfs_result = bfs.breadth_first_search_all(self.graph, 'A')
dfs_result = dfs.depth_first_search_all(self.graph, 'A')
# In order to compare the equality, we need to order them first
self.assertEqual(bfs_result.sort(), dfs_result.sort())
if __name__ == '__main__':
unittest.main() |
# -*- coding: utf-8 -*-
#
# voldemort_manipulate.py
#
# Jan/27/2012
import voldemort
import json
import sys
#
# ------------------------------------------------------------
def voldemort_to_dict_proc (client):
keys = {"t3051","t3052","t3053",
"t3054","t3055","t3056",
"t3057","t3058","t3059"}
dict_aa = {}
for key in keys:
resp = client.get (key)
if (0 < len (resp)):
json_str = resp[0][0]
unit_aa = json.loads (json_str)
dict_aa[key] = unit_aa
# name = unit_aa['name'].encode ('utf-8')
# print key,name,unit_aa['population'],unit_aa['date_mod']
return dict_aa
# ------------------------------------------------------------
|
# this test file takes an encoded emai, decodes it, parese out order information and saves it to the database
# also changes the status of the messages db from "need to scrape" to "scraped"
#
#
#
#required encoding for scraping, otherwise defaults to unicode and screws things up
from bs4 import BeautifulSoup
import requests
import sys;
import json;
#reload(sys);
#sys.setdefaultencoding("utf8")
import re
import pandas as pd
import pprint
import numpy as np
import csv, sys
import base64
import datefinder
import pymongo
from pymongo import MongoClient
uri = 'mongodb://heroku_4jtg3rvf:[email protected]:61503/heroku_4jtg3rvf'
client = MongoClient(uri)
db = client['heroku_4jtg3rvf']
#client = MongoClient('mongodb://localhost:27017/test')
#db = client.test
#Read data from stdin
def read_in():
lines = sys.stdin.readlines()
# Since our input would only be having one line, parse our JSON data from that
return json.loads(lines[0])
def main():
#get our data as an array from read_in()
lines = read_in()
print(lines[0] + lines[1])
db.order_info_item_scrapes.update_many(
{"order_num": lines[0], "item_name": lines[1]},
{"$set": {"status": "contacted"}}
)
print('MIMZEY did it')
# Start process
if __name__ == '__main__':
main()
|
# APRENDENDO A UTILIZAR EIXOS NO NUMPY
import numpy as np
def titulo(texto=''):
print('-' * 40)
print(texto.upper().center(40))
print('-' * 40)
##########################
# MATRIZES DE 1 DIMENSÃO #
##########################
titulo('uma dimensão')
one_dimensional = np.array([1, 1, 2, 3, 5])
print('Matriz:\n', one_dimensional)
sum_one_dimensional = one_dimensional.sum(0)
print('Soma dos elementos (axis=None):', sum_one_dimensional)
# retorna o mesmo resultado:
sum_one_dimensional = one_dimensional.sum(axis=0)
print('Soma dos elementos (axis=0):', sum_one_dimensional)
# retorna erro:
# sum_one_dimensional = one_dimensional.sum(axis=1)
# print(sum_one_dimensional)
# >>> numpy.AxisError: axis 1 is out of bounds for array of dimension 1
###########################
# MATRIZES DE 2 DIMENSÕES #
###########################
titulo('duas dimensões')
two_dimensional = np.array(
[
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]
]
)
print('Matriz:\n', two_dimensional)
# soma todos os elementos da matriz:
sum_two_dimensional = two_dimensional.sum()
print('Soma de todos os elementos da matriz (axis=None):', sum_two_dimensional)
# soma os elementos das colunas
sum_two_dimensional = two_dimensional.sum(0)
print('Soma dos elementos das colunas (axis=0):', sum_two_dimensional)
# soma os elementos das linhas:
sum_two_dimensional = two_dimensional.sum(1)
print('Soma dos elementos das linhas (axis=1):', sum_two_dimensional)
# retorna erro
# sum_two_dimensional = two_dimensional.sum(2)
# print(sum_two_dimensional)
# >>> numpy.AxisError: axis 2 is out of bounds for array of dimension 2
###########################
# MATRIZES DE 3 DIMENSÕES #
###########################
titulo('três dimensões')
three_dimensional = np.array(
[
[
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]
],
[
[9, 10, 11],
[12, 13, 14],
[15, 16, 17]
]
]
)
print('Matriz:\n', three_dimensional)
# soma todos os elementos da matriz
sum_three_dimensional = three_dimensional.sum()
print('\nSoma de todos os elementos da matriz (axis=None):', sum_three_dimensional)
# soma "de cima para baixo"
sum_three_dimensional = three_dimensional.sum(0)
print('''\nSoma "de cima para baixo" (axis=0):\n''', sum_three_dimensional)
# soma "de frente para o fundo"
sum_three_dimensional = three_dimensional.sum(1)
print('''\nSoma "de frente para o fundo" (axis=1):\n''', sum_three_dimensional)
# soma "da esquerda para a direita"
sum_three_dimensional = three_dimensional.sum(2)
print('''\nSoma "da esquerda para a direita" (axis=2):\n''', sum_three_dimensional)
# retorna erro
# sum_three_dimensional = three_dimensional.sum(3)
# print(sum_three_dimensional)
# >>> numpy.AxisError: axis 3 is out of bounds for array of dimension 3
|
# Dynamic Programming Python implementation of Min Cost Path
# problem
R = 3
C = 3
def minCost(cost, m, n):
# Instead of following line, we can use int tc[m+1][n+1] or
# dynamically allocate memoery to save space. The following
# line is used to keep te program simple and make it working
# on all compilers.
tc = [[0 for x in range(C)] for x in range(R)]
tc[0][0] = cost[0][0]
# Initialize first column of total cost(tc) array
for i in range(1, m+1):
tc[i][0] = tc[i-1][0] + cost[i][0]
# Initialize first row of tc array
for j in range(1, n+1):
tc[0][j] = tc[0][j-1] + cost[0][j]
# Construct rest of the tc array
for i in range(1, m+1):
for j in range(1, n+1):
tc[i][j] = min(tc[i-1][j-1], tc[i-1][j], tc[i][j-1]) + cost[i][j]
return tc[m][n]
# Driver program to test above functions
cost = [[1, 2, 3],
[4, 8, 2],
[1, 5, 3]]
print(minCost(cost, 2, 2))
#O(mn) |
# encoding: utf-8
"""
math
~~~~
This is an example for a parser that parses and evaluates mathematical
expressions.
:copyright: 2015 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import print_function
import re
import sys
from operator import itemgetter
from pratt import Grammar, Parser
token_re = re.compile(r"""
(?P<int>\d+)|
(?P<add>\+)|
(?P<sub>-)|
(?P<mul>\*)|
(?P<div>/)|
(?P<left_paren>\()|
(?P<right_paren>\))|
(?P<whitespace>\s+)
""", re.VERBOSE)
def tokenize(string):
"""
This returns an iterator yielding tuples consisting of a type and a lexeme.
Possible types are `int`, `add`, `sub`, `mul`, `div`, `left_paren`,
`right_paren` and `end`. Lexemes are always strings.
"""
for match in token_re.finditer(string):
for type, lexeme in match.groupdict().items():
if lexeme is None or type == 'whitespace':
continue
yield type, lexeme
break
yield 'end', ''
class SyntaxError(Exception):
pass
def handle_unexpected_token(token):
"""
Called when the parser encounters a token it doesn't know about.
"""
raise SyntaxError('unexpected token: {!r}'.format(token[0]))
grammar = Grammar(itemgetter(0), handle_unexpected_token)
# The end token exists only as an indicator, we are not using it anywhere and
# are therefore not associating it with anything. Nevertheless we have to tell
# the parser that it's a token that exists and might appear.
grammar.symbol('end')
@grammar.literal('int')
def handle_int(token):
# We evaluate the mathematical expression as part of the parsing process,
# therefore we simply turn the lexeme (remember, that's the second element in
# the tuple) into a Python int.
#
# In the "real world" we would probably want our parser to return an AST,
# that can be inspected instead.
return int(token[1])
@grammar.prefix('add', 100)
def prefix_add(token, operand):
# Called when + is used as a prefix operator. `operand` is something that
# was returned by one of our parsing functions.
#
# We define this operator to have a left binding power of 100 because we
# want it to bind more tightly than infix operators and we don't want to
# bother carefully considering how much "space" we need below that for
# other operators.
return +operand
@grammar.prefix('sub', 100)
def prefix_sub(token, operand):
return -operand
@grammar.infix('add', 10)
def infix_add(token, left, right):
# This function implements the addition operator. We define addition to
# have a left binding power of 10.
#
# The default binding power is 0, so we could have gone with 1 as well but
# using a larger one allows us to potentially squeeze in something else at
# a later date without having to change binding powers everywhere.
return left + right
@grammar.infix('sub', 10)
def infix_sub(token, left, right):
return left - right
@grammar.infix('mul', 20)
def infix_mul(token, left, right):
# This is almost the same as our addition and subtraction operators. The
# only really difference is that the left binding power is higher at 20.
# This tells the parser that multiplication binds more tightly, so that
# 1 + 1 * 2 is evaluated to 3 as opposed to 4.
return left * right
@grammar.infix('div', 20)
def infix_div(token, left, right):
return left // right
@grammar.enclosing('left_paren', 'right_paren', 0)
def parenthesis(left_paren, right_paren, body):
return body
def evaluate(string):
"""
Evaluates a mathematical expressions. Available operators are `+`, `-`, `*`
and `/`. Parenthesis are supported. The only available numbers are
integers. Whitespace is ignored.
"""
tokenizer = tokenize(string)
parser = Parser(grammar, tokenizer)
return parser.parse()
if __name__ == '__main__':
expression = ' '.join(sys.argv[1:])
print('> {}'.format(expression))
print(evaluate(expression))
|
# -*- coding: utf-8 -*-
import sys
import random
def makerandomline(n = None):
if n is None:
n = random.randrange(1, 11)
s = [random.randrange(100) for i in range(n)]
s = [sum(s)] + s
s = [str(x) for x in s]
return ('\t'.join(s) + '\n').encode('utf-8')
def generate():
with open('test1.txt', 'wb') as f:
for _ in range(20000):
line = makerandomline()
f.write(line)
def generatemore():
with open('test2.txt', 'wb') as f:
for _ in range(19999):
line = makerandomline()
f.write(line)
line = makerandomline()
f.write(line[:-1])
with open('test3.txt', 'wb') as f:
line = makerandomline(2000)
f.write(line)
with open('test4.txt', 'wb') as f:
pass
with open('test5.txt', 'wb') as f:
f.write(b'\n')
with open('test6.txt', 'wb') as f:
line = makerandomline(2000000)
f.write(line)
with open('test7.txt', 'wb') as f:
for _ in range(500):
line = makerandomline(3000)
f.write(line)
def generateevenmore():
with open('test8.txt', 'wb') as f:
for _ in range(20000000):
line = makerandomline()
f.write(line)
if __name__ == '__main__':
generate()
generatemore()
#generateevenmore()
|
from django.db import models
class ToDoList(models.Model):
name = models.CharField(max_length=256, unique=True, db_index=True) # Natural key.
def __str__(self):
return self.name
class ToDoItem(models.Model):
title = models.CharField(max_length=256) # Allowing duplicates in the list.
completed = models.BooleanField(default=False)
parent_list = models.ForeignKey(ToDoList, on_delete=models.CASCADE,
related_name='todos')
class Meta:
order_with_respect_to = 'parent_list'
def __str__(self):
return self.title
|
from . import asynconnect
def set_timeout (timeout):
for each in (asynconnect.AsynConnect, asynconnect.AsynSSLConnect, asynconnect.AsynSSLProxyConnect):
each.keep_alive = timeout
each.zombie_timeout = timeout
|
import os
from django.conf import settings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, preprocessing
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
companies_list = [
{'value':"AMBUJACEM", 'name':"Ambuja Cement"},
{'value':"ASIANPAINT", 'name':"Asian Paints"},
{'value':"BANKBARODA", 'name':"Bank Of Baroda"},
{'value':"HDIL", 'name':"Housing Development & Infrastructure Ltd."},
{'value':"HEROMOTOCO", 'name':"Hero Motor Corporation"},
{'value':"HINDUNILVR", 'name':"Hindustan Unilever"},
{'value':"INFY", 'name':"Infosys"},
{'value':"ITC", 'name':"ITC"},
{'value':"MARUTI", 'name':"Maruti Suzuki Ltd."},
{'value':"TCS", 'name':"Tata Consultancy Services"},
]
company_namees = {
"AMBUJACEM":"Ambuja Cement",
"ASIANPAINT" :"Asian Paints",
"BANKBARODA" :"Bank Of Baroda",
"HDIL" :"Housing Development & Infrastructure Ltd.",
"HEROMOTOCO" :"Hero Motor Corporation",
"HINDUNILVR" :"Hindustan Unilever",
"INFY" :"Infosys",
"ITC" :"ITC",
"MARUTI" :"Maruti Suzuki Ltd.",
"TCS" :"Tata Consultancy Services"
}
def predict(company):
#Enter the prediction here, you will be getting the company code as input Eg: TCS,INFY,HEROMOTOCO
'''
:param company: company code
:return: list
Output is a list with dictionaries
Eg:
[
{'day':"Tomorrow", 'value':"58.55"},
{'day':"5 days later", 'value':"58.55"},
{'day':"10 days later", 'value':"58.55"},
{'day':"15 days later", 'value':"58.55"},
.
.
.
.
]
'''
print("Company name"+company)
#dict1={'AMBUJACEM':'.csv','INFY':'infosys2.csv'}
FEATURES =['Close','X1','X2','X3','X4','X5','X6','X7','X8','X9','X10','X11','X12','X13','X14','X15','X16','X17','X18','X19','X20','X21','X22','X23','X24','M5','M10','M15','M20','One Day Momentum','Five Day Momentum','Ten Day Momentum','Fifteen Day Momentum','Twenty Day Momentum']
#df = pd.DataFrame.from_csv(dict1[company])
name=company+".csv"
#df=pd.DataFrame.from_csv(name)
df = pd.read_csv(settings.MEDIA_ROOT + name)
#test_size = 200
df=df.replace([np.inf,-np.inf],np.nan)
df=df.replace('#DIV/0!',np.nan)
df=df.dropna()
predictions=[]
list_index=['Next Day Price','5 Day Price','10 Day Price','15 Day Price','20 Day Price']
'''
for h in list_index:
if(h=='Next Day Price'):
X=np.array(df[FEATURES].values)
y=(df[h].values)
reg=linear_model.Lasso(alpha=0.1)
reg.fit(X[40:len(df)-1],y[40:len(df)-1])
price=reg.predict(X[len(df)-1])[0]
dict2={'day':"",'value':""}
print(X[len(df)-1])
dict2['day']=h
dict2['value']=price
predictions.append(dict2)
elif(h=='5 Day Price'):
X = np.array(df[FEATURES].values)
y = (df[h].values)
reg=linear_model.Lasso(alpha=0.1)
reg.fit(X[40:len(df)-5],y[40:len(df)-5])
price=reg.predict(X[len(df)-1])[0]
dict2={'day':"",'value':""}
print(X[len(df)-1])
dict2['day']=h
dict2['value']=price
predictions.append(dict2)
elif(h=='10 Day Price'):
X = np.array(df[FEATURES].values)
y = (df[h].values)
reg=linear_model.Lasso(alpha=0.1)
reg.fit(X[40:len(df)-10],y[40:len(df)-10])
price=reg.predict(X[len(df)-1])[0]
dict2={'day':"",'value':""}
print(X[len(df)-1])
dict2['day']=h
dict2['value']=price
predictions.append(dict2)
elif(h=='15 Day Price'):
X = np.array(df[FEATURES].values)
y = (df[h].values)
reg=linear_model.Lasso(alpha=0.1)
reg.fit(X[40:len(df)-15],y[40:len(df)-15])
price=reg.predict(X[len(df)-1])[0]
dict2={'day':"",'value':""}
print(X[len(df)-1])
dict2['day']=h
dict2['value']=price
predictions.append(dict2)
else:
X = np.array(df[FEATURES].values)
y = (df[h].values)
reg=linear_model.Lasso(alpha=0.1)
reg.fit(X[40:len(df)-20],y[40:len(df)-20])
price=reg.predict(X[len(df)-1])[0]
dict2={'day':"",'value':""}
print(X[len(df)-1])
dict2['day']=h
dict2['value']=price
predictions.append(dict2)
print(predictions)
'''
for h in list_index:
X = np.array(df[FEATURES].values)
y = (df[h].values)
reg=linear_model.Lasso(alpha=0.1)
reg.fit(X[40:len(df)-20],y[40:len(df)-20])
data = np.array(X[len(df)-1])
data = data.reshape(1, -1)
price=reg.predict(data)[0]
dict2={'day':"",'value':""}
print(X[len(df)-1])
dict2['day']=h
dict2['value']=price
predictions.append(dict2)
'''
predictions = [
{'day':"Tomorrow", 'value':"58.55"},
{'day':"5 days later", 'value':"58.55"},
{'day':"10 days later", 'value':"58.55"},
{'day':"15 days later", 'value':"58.55"},
]
'''
return predictions
def stock_prices(company):
#return the list of stock prices and dates(in a dict), list of dicts
# dict1={'AMB':'ambuja2.csv','INFY':'infosys2.csv'}
name=company+'.csv'
df = pd.DataFrame.from_csv(settings.MEDIA_ROOT + name)
#test_size = 200
#df=df.replace([np.inf,-np.inf],np.nan)
#df=df.replace('#DIV/0!',np.nan)
#df=df.dropna()
#print(df.keys())
#print(df['Date'][1])
prices=[]
for i in range(0,len(df)):
dict2={'day':"",'value':""}
dict2['day']=df['Date'][i]
dict2['value']=df['Close'][i]
prices.append(dict2)
# print(prices)
return prices
def company_latest(company):
#details = {'name': "AMNUJA CEMENTS", 'price':"222.5", 'change': "+", 'change_price':"22.5"}
details={'name':"",'price':"",'change':"","change_price":"","stock_name":""}
name=company+".csv"
#df=pd.DataFrame.from_csv(name)
df = pd.read_csv(settings.MEDIA_ROOT + name)
#df=pd.DataFrame.from_csv("ambuja2.csv")
df1=df.tail(1)
x=df1.index[0]
print(x)
print(df['Date'][x])
p1=df['Close'][x]
p2=df['Close'][x-1]
diff=p1-p2
sign=""
if(p1-p2>=0):
sign="+"
elif(p1-p2<0):
sign="-"
details['name']=company_namees[company]
details['stock_name']=company+".NS"
details['price']=p1
details['change']=sign
details['change_price']=diff
return details
def plot(company):
#dict1={'AMB':'ambuja2.csv','INFY':'infosys2.csv'}
name=company+".csv"
df = pd.read_csv(settings.MEDIA_ROOT + name)
print(df.keys())
print(settings.MEDIA_ROOT)
df=df.replace([np.inf,-np.inf],np.nan)
df=df.replace('#DIV/0!',np.nan)
df=df.dropna()
dates=[]
prices=[]
for i in range(0,len(df)):
dates.append(df['Date'][i])
prices.append(df['Close'][i])
x = [dt.datetime.strptime(d,'%d-%m-%Y').date() for d in dates]
fig = plt.figure(1)
ax1 = fig.add_subplot(111)
ax1.plot(x,prices)
#plt.plot(x,prices)
#plt.show()
fig.savefig("prices.png")
print("plot")
def recommend(amount):
price_change = []
for commpany in companies_list:
temp = {}
predictions = predict(commpany['value'])
twenty_day = float((predictions[4]['value']))
next_day = float((predictions[0]['value']))
temp['company'] = commpany['value']
temp['change'] = twenty_day - next_day
temp['curr']=next_day
price_change.append(temp)
sorted_price_change = sorted(price_change, key=lambda k: k['change'], reverse=True)
initial_seed = float(amount)
x_50=initial_seed*0.5
x_30=initial_seed*0.3
x_20=initial_seed*0.2
val1=int(x_50/sorted_price_change[0]['curr'])
val2=int(x_30/sorted_price_change[1]['curr'])
val3=int(x_20/sorted_price_change[2]['curr'])
suggestions = []
if float(sorted_price_change[2]['change']) > 0:
for c in companies_list:
temp = {}
if c['value'] == sorted_price_change[0]['company']:
temp['company_name'] = c['name']
temp['amount'] = val1*sorted_price_change[0]['curr']
temp['number_of_stocks']=val1
temp['profit'] = (sorted_price_change[0]['change'] * val1)
suggestions.append(temp)
elif c['value'] == sorted_price_change[1]['company']:
temp['company_name'] = c['name']
temp['amount'] = val2*sorted_price_change[1]['curr']
temp['number_of_stocks']=val2
temp['profit'] = (sorted_price_change[1]['change'] * val2)
suggestions.append(temp)
elif c['value'] == sorted_price_change[2]['company']:
temp['company_name'] = c['name']
temp['amount'] = val3*sorted_price_change[2]['curr']
temp['number_of_stocks']=val3
temp['profit'] = (sorted_price_change[2]['change'] * val3)
suggestions.append(temp)
elif float(sorted_price_change[1]['change']) > 0 and float(sorted_price_change[2]['change']) <= 0:
for c in companies_list:
temp = {}
if c['value'] == sorted_price_change[0]['company']:
temp['company_name'] = c['name']
temp['amount'] = val1*sorted_price_change[0]['curr']
temp['number_of_stocks']=val1
temp['profit'] = (sorted_price_change[0]['change'] * val1)
suggestions.append(temp)
elif c['value'] == sorted_price_change[1]['company']:
temp['company_name'] = c['name']
temp['amount'] = val2*sorted_price_change[1]['curr']
temp['number_of_stocks']=val2
temp['profit'] = (sorted_price_change[1]['change'] * val2)
suggestions.append(temp)
elif float(sorted_price_change[1]['change']) <= 0 and float(sorted_price_change[2]['change']) <= 0:
for c in companies_list:
temp = {}
if c['value'] == sorted_price_change[0]['company']:
temp['company_name'] = c['name']
temp['amount'] = val1*sorted_price_change[0]['curr']
temp['number_of_stocks']=val1
temp['profit'] = (sorted_price_change[0]['change'] * val1)
suggestions.append(temp)
elif float(sorted_price_change[0]['change']) < 0 or len(suggestions)<2:
temp = {}
temp['company_name'] = "Don't Invest in anymore companies"
temp['amount'] = 0
temp['number_of_stocks'] = 0
temp['profit'] = -1
suggestions.append(temp)
print(len(suggestions))
total_amount_invested = 0
for i in suggestions:
total_amount_invested+=float(i['amount'])
i['amount'] = round(i['amount'], 2)
i['profit'] = round(i['profit'], 2)
amount_not_invested = float(amount) - float(total_amount_invested)
output = {}
output['total_amount_invested'] = round(total_amount_invested, 2)
output['amount_not_invested'] = round(amount_not_invested, 2)
output['suggestions'] = suggestions
return output
#recommend(5000)
|
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import IntegerField, SubmitField
from wtforms.validators import InputRequired, NumberRange
class UploadImage(FlaskForm):
image = FileField(
"Your Image",
validators=[
FileRequired(),
FileAllowed(
["jpg", "jpeg", "png", "gif"], "Please upload image files only!"
),
InputRequired(),
],
)
upload = SubmitField("Upload Image")
class ResampleImage(FlaskForm):
colors = IntegerField(
"Max Colors", validators=[InputRequired(), NumberRange(min=1, max=20)]
)
sensitivity = IntegerField(
"Sensitivity", validators=[InputRequired(), NumberRange(min=0, max=250)]
)
resample = SubmitField("Resample Colors", validators=[InputRequired()])
|
from django.test import TestCase
from dojo.tools.fortify.parser import FortifyXMLParser
from dojo.models import Test
class TestFortifyParser(TestCase):
def test_fortify_many_findings(self):
testfile = "dojo/unittests/scans/fortify/fortify_many_findings.xml"
parser = FortifyXMLParser(testfile, Test())
self.assertEqual(324, len(parser.items))
def test_fortify_few_findings(self):
testfile = "dojo/unittests/scans/fortify/fortify_few_findings.xml"
parser = FortifyXMLParser(testfile, Test())
self.assertEqual(2, len(parser.items))
def test_fortify_few_findings_count_chart(self):
testfile = "dojo/unittests/scans/fortify/fortify_few_findings_count_chart.xml"
parser = FortifyXMLParser(testfile, Test())
self.assertEqual(3, len(parser.items))
|
# /*==========================================================*\
# | /=============================================\ |
# | || - Code develop to compara two hands at - || |
# | || - poker game and print who is the winner - || |
# | || - Desafio Python - DATA H - || |
# | || - Created by: Thiago Piovesan - || |
# | || - Versao atual: 1.0.0 - || |
# | \=============================================/ |
# \*==========================================================*/
# Link do Github: https://github.com/ThiagoPiovesan
#==================================================================================================#
# Bibliotecas utilizadas:
from PokerHand import PokerHand as PH
#==================================================================================================#
# S = Espadas, H = Copas, D = Ouros, C = Paus
NAIPES: str = ["S", "H", "D", "C"]
# T = 10, J = Valete, Q = Rainha, K = Rei e A = Ace
CARDS: str = ["2", "3", "4", "5", "6", "7", "8", "9", "T", "J", "Q", "K", "A"]
#==================================================================================================#
if __name__ == '__main__':
# Test:
hand1 = "TS JS QS KS AS"
hand2 = "AC AH AS AS KS"
hand1 = PH.PokerHand(hand1)
hand2 = PH.PokerHand(hand2)
hand1.compare_with(hand2)
print(hand1)
|
import numpy as np
def calculate_shift(void, wall, field, err_field):
'''
Calculate the average and median shifts between the void and wall
populations.
Parameters:
===========
void : astropy table of length n_void
Table containing void galaxy parameters
wall : astropy table of length n_wall
Table containing wall galaxy parameters
field : string
Name of the field in the void and wall tables that refers to the
characteristic currently being analyzed.
err_field : string
Name of the field containing the error of the characteristic
currently being analyzed.
'''
#######################################################################
# Calculate averages, shift between voids and walls
#----------------------------------------------------------------------
v_mean = np.mean(void[field])
w_mean = np.mean(wall[field])
v_median = np.median(void[field])
w_median = np.median(wall[field])
mean_diff = v_mean - w_mean
median_diff = v_median - w_median
#######################################################################
#######################################################################
# Calculate uncertainties in the averages and shifts
#----------------------------------------------------------------------
# Preserve only finite elements for error calculation
v_finite = void[err_field][np.isfinite(void[err_field])]
w_finite = wall[err_field][np.isfinite(wall[err_field])]
# Uncertainties in the mean
v_mean_err = np.sqrt(np.sum(v_finite**2))/len(v_finite)
w_mean_err = np.sqrt(np.sum(w_finite**2))/len(w_finite)
mean_diff_err = np.sqrt(v_mean_err**2 + w_mean_err**2)
# Uncertainties in the median
v_median_err = v_mean_err*np.sqrt(np.pi*len(v_finite)/(4*0.5*(len(v_finite) - 1)))
w_median_err = w_mean_err*np.sqrt(np.pi*len(w_finite)/(4*0.5*(len(w_finite) - 1)))
median_diff_err = np.sqrt(v_median_err**2 + w_median_err**2)
#######################################################################
print('There are', len(void), 'void galaxies and', len(wall), 'wall galaxies in this sample.')
print('The average ratio for voids is', v_mean, 'pm', v_mean_err, 'and for walls is', w_mean, 'pm', w_mean_err)
print('The average difference between the two populations is', mean_diff, 'pm', mean_diff_err)
print('The median ratio for voids is', v_median, 'pm', v_median_err, 'and for walls is', w_median, 'pm', w_median_err)
print('The median difference between the two populations is', median_diff, 'pm', median_diff_err)
|
def middle(t):
print("Array ", t)
if len(t)>1:
t.pop(0)
t.pop()
elif len(t)>0:
t.pop()
print("Middle of Array ",t)
n = int(input("Size of array: "))
arr = []
for _ in range(n):
arr.append(int(input("Array Element: ")))
middle(arr) |
import sys
from os.path import dirname, abspath, join
path = dirname(dirname(abspath(__file__)))
sys.path.append(join(path, 'predict'))
from flask import request, render_template, make_response, flash, redirect, url_for
from flask import current_app as app
from flask import Blueprint
from flask_login import login_required, current_user
from sqlalchemy.sql import func
from sqlalchemy import or_, and_
import pandas as pd
from .models import db, Race
from .forms import PredictForm
from predict import predict_runner
main = Blueprint('main', __name__)
@main.route('/')
def index():
return render_template('index.html')
@main.route('/profile')
@login_required
def profile():
return render_template('profile.html', name=current_user.name)
@main.route('/predict', methods=['GET', 'POST'])
def predict_race_time():
search = PredictForm(request.form)
result = ''
if request.method == 'POST':
name = search.data['search']
df = pd.read_sql(db.session.query(Race).statement, con=db.engine)
if name:
result = predict_runner(name, df)
else:
flash('Please enter a name')
if not result:
flash('No result found')
return redirect(url_for('main.predict_race_time'))
return render_template('predict.html',
title='Gugs DB',
form=search,
result=result)
|
import json
import os.path
from json import JSONDecodeError
file_path = 'storage\\users.json'
def create_user():
if not os.path.exists(file_path):
with open(file_path, "w"):
pass
username = str(input('Username : ')).lower()
password = str(input('Password : '))
try:
access_level = int(input('Access Level: 1-5 : '))
if access_level <= 0:
print('Must be above 0')
return
elif access_level > 5:
access_level = 5
except ValueError:
print('Only numbers allowed...')
return
with open(file_path, 'r') as f:
try:
data = json.load(f)
data[username] = {"role_id": access_level, "password": password}
except JSONDecodeError:
data = {username:{"role_id": access_level, "password": password}}
with open(file_path, 'w') as f:
json.dump(data, f)
print('User created')
create_user()
|
import frappe
import json
def get_context(context):
if frappe.session.user != 'Guest':
frappe.local.flags.redirect_location = '/'
raise frappe.Redirect
return context |
from enum import Enum
class Direction(Enum):
LEFT = 1
UP = 2
RIGHT = 3
DOWN = 4
class Packet:
xOffsets = {
Direction.LEFT: -1,
Direction.UP: 0,
Direction.RIGHT: 1,
Direction.DOWN: 0
}
yOffsets = {
Direction.LEFT: 0,
Direction.UP: -1,
Direction.RIGHT: 0,
Direction.DOWN: 1
}
def __init__(self, map):
self.map = map
self.path = ""
def start(self):
self.direction = Direction.DOWN
self.stepCount = 0
self.y = 0
self.x = self.map[0].index("|")
while self.move():
pass
return self.path, self.stepCount
def nextChar(self, direction):
"""
Return the next character in the given direction. Return
None if we'd fall off the map
"""
testX = self.x + Packet.xOffsets[direction]
testY = self.y + Packet.yOffsets[direction]
try:
mapChar = self.map[testY][testX]
except IndexError:
mapChar = None
return mapChar
def nextDirection(self):
testDirections = [Direction.LEFT, Direction.RIGHT] if self.direction in [Direction.UP, Direction.DOWN] \
else [Direction.UP, Direction.DOWN]
for direction in testDirections:
if self.nextChar(direction) not in [None, " "]:
newDirection = direction
break
else:
raise ValueError
return newDirection
def move(self):
reachedEnd = False
try:
mapChar = self.map[self.y][self.x]
self.stepCount += 1
except IndexError:
mapChar = None
reachedEnd = True
if mapChar is None:
pass
elif mapChar in "|-":
# Keep going in the same direction
pass
elif mapChar == "+":
# Figure out which direction to go next
nextChar = self.nextChar(self.direction)
if nextChar in [None, " "]:
# Can't keep going in the same direction
try:
self.direction = self.nextDirection()
except ValueError:
# There is no valid next direction
reachedEnd = True
elif mapChar == " ":
self.stepCount -= 1
reachedEnd = True
else:
self.path += mapChar
if not reachedEnd:
self.x += Packet.xOffsets[self.direction]
self.y += Packet.yOffsets[self.direction]
return not reachedEnd
def day19(fileName):
map = []
with open(fileName) as infile:
for line in infile:
map.append(line.rstrip())
packet = Packet(map)
return packet.start()
if __name__ == "__main__":
path, stepCount = day19("19.txt")
print(path)
print(f"{stepCount} steps")
# 16493 is wrong
|
from django.contrib.auth.models import User
from rest_framework import serializers
from rest_framework.relations import PrimaryKeyRelatedField
from logbook.models import Message, Category, Attachment
class CategorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Category
fields = [
"id",
"keywords",
"type",
]
class AttachmentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Attachment
fields = [
"id",
"file",
"url",
]
class MessageSerializer(serializers.HyperlinkedModelSerializer):
# files = FileSerializer()
# files = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
attachments = serializers.HyperlinkedRelatedField(many=True, read_only=True, view_name="attachment-detail")
user = PrimaryKeyRelatedField(queryset=User.objects.all())
class Meta:
model = Message
fields = [
"id",
"user",
"attachments",
"status",
"time",
"text",
"source",
"created_at",
"updated_at",
]
|
"""
@Time :2020/2/15 21:20
@Author : 梁家熙
@Email: :[email protected]
"""
import json
import random
import os
import logging
import collections
from pathlib import Path
import torch
from allennlp.modules.span_extractors import SpanExtractor, EndpointSpanExtractor
from tqdm import tqdm
from pprint import pprint
from typing import List, Dict, Tuple
from allennlp.nn import util
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
@SpanExtractor.register("my_endpoint")
class MyEndpointSpanExtractor(EndpointSpanExtractor):
def __init__(
self,
input_dim: int,
combination: str = "x,y",
num_width_embeddings: int = None,
span_width_embedding_dim: int = None,
bucket_widths: bool = False,
use_exclusive_start_indices: bool = False,
) -> None:
super(MyEndpointSpanExtractor, self).__init__(input_dim, combination, num_width_embeddings, span_width_embedding_dim, bucket_widths, use_exclusive_start_indices)
self.linear = torch.nn.Linear(input_dim * len(combination.split(',')), input_dim)
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
sequence_mask: torch.LongTensor = None,
span_indices_mask: torch.LongTensor = None,
):
# shape (batch_size, num_spans)
span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)]
if span_indices_mask is not None:
# It's not strictly necessary to multiply the span indices by the mask here,
# but it's possible that the span representation was padded with something other
# than 0 (such as -1, which would be an invalid index), so we do so anyway to
# be safe.
span_starts = span_starts * span_indices_mask
span_ends = span_ends * span_indices_mask
if not self._use_exclusive_start_indices:
if sequence_tensor.size(-1) != self._input_dim:
raise ValueError(
f"Dimension mismatch expected ({sequence_tensor.size(-1)}) "
f"received ({self._input_dim})."
)
start_embeddings = util.batched_index_select(sequence_tensor, span_starts)
end_embeddings = util.batched_index_select(sequence_tensor, span_ends)
else:
# We want `exclusive` span starts, so we remove 1 from the forward span starts
# as the AllenNLP `SpanField` is inclusive.
# shape (batch_size, num_spans)
exclusive_span_starts = span_starts - 1
# shape (batch_size, num_spans, 1)
start_sentinel_mask = (exclusive_span_starts == -1).long().unsqueeze(-1)
exclusive_span_starts = exclusive_span_starts * (1 - start_sentinel_mask.squeeze(-1))
# We'll check the indices here at runtime, because it's difficult to debug
# if this goes wrong and it's tricky to get right.
if (exclusive_span_starts < 0).any():
raise ValueError(
f"Adjusted span indices must lie inside the the sequence tensor, "
f"but found: exclusive_span_starts: {exclusive_span_starts}."
)
start_embeddings = util.batched_index_select(sequence_tensor, exclusive_span_starts)
end_embeddings = util.batched_index_select(sequence_tensor, span_ends)
# We're using sentinels, so we need to replace all the elements which were
# outside the dimensions of the sequence_tensor with the start sentinel.
float_start_sentinel_mask = start_sentinel_mask.float()
start_embeddings = (
start_embeddings * (1 - float_start_sentinel_mask)
+ float_start_sentinel_mask * self._start_sentinel
)
combined_tensors = util.combine_tensors(
self._combination, [start_embeddings, end_embeddings]
)
combined_tensors = self.linear(combined_tensors)
if self._span_width_embedding is not None:
# Embed the span widths and concatenate to the rest of the representations.
if self._bucket_widths:
span_widths = util.bucket_values(
span_ends - span_starts, num_total_buckets=self._num_width_embeddings
)
else:
span_widths = span_ends - span_starts
span_width_embeddings = self._span_width_embedding(span_widths)
combined_tensors = torch.cat([combined_tensors, span_width_embeddings], -1)
if span_indices_mask is not None:
return combined_tensors * span_indices_mask.unsqueeze(-1).float()
return combined_tensors
|
"""Sigmoid-Bernoulli Restricted Boltzmann Machine.
"""
from typing import Optional, Tuple
import torch
import torch.nn.functional as F
from learnergy.models.bernoulli import RBM
from learnergy.utils import logging
logger = logging.get_logger(__name__)
class SigmoidRBM(RBM):
"""A SigmoidRBM class provides the basic implementation for
Sigmoid-Bernoulli Restricted Boltzmann Machines.
References:
G. Hinton. A practical guide to training restricted Boltzmann machines.
Neural networks: Tricks of the trade (2012).
"""
def __init__(
self,
n_visible: Optional[int] = 128,
n_hidden: Optional[int] = 128,
steps: Optional[int] = 1,
learning_rate: Optional[float] = 0.1,
momentum: Optional[float] = 0.0,
decay: Optional[float] = 0.0,
temperature: Optional[float] = 1.0,
use_gpu: Optional[bool] = False,
) -> None:
"""Initialization method.
Args:
n_visible: Amount of visible units.
n_hidden: Amount of hidden units.
steps: Number of Gibbs' sampling steps.
learning_rate: Learning rate.
momentum: Momentum parameter.
decay: Weight decay used for penalization.
temperature: Temperature factor.
use_gpu: Whether GPU should be used or not.
"""
logger.info("Overriding class: RBM -> SigmoidRBM.")
super(SigmoidRBM, self).__init__(
n_visible,
n_hidden,
steps,
learning_rate,
momentum,
decay,
temperature,
use_gpu,
)
logger.info("Class overrided.")
def visible_sampling(
self, h: torch.Tensor, scale: Optional[bool] = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Performs the visible layer sampling, i.e., P(v|h).
Args:
h: A tensor incoming from the hidden layer.
scale: A boolean to decide whether temperature should be used or not.
Returns:
(Tuple[torch.Tensor, torch.Tensor]): The states and probabilities of the visible layer sampling.
"""
# Calculating neurons' activations
activations = F.linear(h, self.W, self.a)
# If scaling is true
if scale:
# Calculate probabilities with temperature
probs = torch.sigmoid(torch.div(activations, self.T))
# If scaling is false
else:
# Calculate probabilities as usual
probs = torch.sigmoid(activations)
# Copying states as current probabilities
states = probs
return states, probs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.