content
stringlengths 5
1.05M
|
---|
#!/usr/bin/python3
import matplotlib.pyplot as plt
import numpy as np
import sys
# Plots the impulse response of the Bandstop and its frequency response
def plot_if(figno,name,figtitle):
plt.figure(figno)
plt.suptitle(figtitle)
fs = 250
y = np.loadtxt(name);
plt.subplot(311)
plt.title("Impulse response")
plt.plot(y);
#
# Fourier Transform
yf = np.fft.fft(y)
plt.subplot(312)
fx = np.linspace(0,fs,len(yf))
plt.plot(fx,20*np.log10(abs(yf)))
plt.xlim(0,fs/2)
plt.title("Frequency response")
plt.xlabel("f/Hz")
plt.ylabel("gain/dB")
plt.subplot(313)
p = -np.diff(np.unwrap(np.angle(yf))) / np.diff(fx * 2 * np.pi)
plt.plot(np.linspace(0,fs,len(yf)-1),p)
plt.xlim(0,fs/2)
plt.ylim(-0.075,0.075)
plt.title("Phase response")
plt.xlabel("f/Hz")
plt.ylabel("delay/secs")
if len(sys.argv) < 2:
print("Specify which filter shall be plotted: bessel, butterworth, chebyshevI, chebyshevII.")
quit()
prefix = "target/surefire-reports/"+sys.argv[1]+"/"
plot_if(1,prefix+"lp.txt","Lowpass")
plot_if(2,prefix+"hp.txt","Highpass")
plot_if(3,prefix+"bs.txt","Bandstop")
plot_if(4,prefix+"bp.txt","Bandpass")
plt.show()
|
import numpy as np
import multiprocessing
import timeit
def start():
a = np.random.rand(1000, 1000)
b = np.random.rand(1000, 1000)
np.multiply(a,b)
start2 = timeit.timeit()
for i in range(500):
start()
end = timeit.timeit()
print(end-start2)
start2 = timeit.timeit()
pool = multiprocessing.Pool(multiprocessing.cpu_count())
liste = [pool.apply_async(start, ()) for i in range(500)]
[p.get() for p in liste]
end = timeit.timeit()
print(end - start2)
|
import unittest
import ray
from ray.rllib.algorithms import sac
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.test_utils import check_compute_single_action, framework_iterator
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()
class TestRNNSAC(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_rnnsac_compilation(self):
"""Test whether a R2D2Trainer can be built on all frameworks."""
config = (
sac.RNNSACConfig()
.rollouts(num_rollout_workers=0)
.training(
# Wrap with an LSTM and use a very simple base-model.
model={"max_seq_len": 20},
policy_model_config={
"use_lstm": True,
"lstm_cell_size": 64,
"fcnet_hiddens": [10],
"lstm_use_prev_action": True,
"lstm_use_prev_reward": True,
},
q_model_config={
"use_lstm": True,
"lstm_cell_size": 64,
"fcnet_hiddens": [10],
"lstm_use_prev_action": True,
"lstm_use_prev_reward": True,
},
replay_buffer_config={
"type": "MultiAgentPrioritizedReplayBuffer",
"replay_burn_in": 20,
"zero_init_states": True,
},
lr=5e-4,
)
)
num_iterations = 1
# Test building an RNNSAC agent in all frameworks.
for _ in framework_iterator(config, frameworks="torch"):
trainer = config.build(env="CartPole-v0")
for i in range(num_iterations):
results = trainer.train()
print(results)
check_compute_single_action(
trainer,
include_state=True,
include_prev_action_reward=True,
)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
import unittest
from conans.test.tools import TestClient
import os
from conans.paths import CONANINFO
import platform
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from nose.plugins.attrib import attr
from conans.util.files import load
from conans.model.info import ConanInfo
import time
@attr("slow")
class BasicBuildTest(unittest.TestCase):
def complete_build_flow_test(self):
"""In local user folder"""
client = TestClient()
command = os.sep.join([".", "bin", "say_hello"])
for install, lang, static in [("install", 0, True),
("install -o language=1", 1, True),
("install -o language=1 -o static=False", 1, False),
("install -o static=False", 0, False)]:
dll_export = client.default_compiler_visual_studio and not static
files = cpp_hello_conan_files("Hello0", "0.1", dll_export=dll_export)
client.save(files)
client.run(install)
time.sleep(1) # necessary so the conaninfo.txt is flushed to disc
client.run('build')
client.runner(command, client.current_folder)
msg = "Hello" if lang == 0 else "Hola"
self.assertIn("%s Hello0" % msg, client.user_io.out)
conan_info_path = os.path.join(client.current_folder, CONANINFO)
conan_info = ConanInfo.loads(load(conan_info_path))
self.assertTrue(conan_info.full_options.language == lang)
if static:
self.assertTrue(conan_info.full_options.static)
else:
self.assertFalse(conan_info.full_options.static)
|
"""
If in interact mode (see :func:`vcs.Canvas.Canvas.interact`), these attributes can be configured interactively, via the
method described in the **Interact Mode** section of the attribute description.
"""
# @author: tpmaxwel
from . import VCS_validation_functions
import multiprocessing
import vcs
import time
import warnings
try:
from DV3D.ConfigurationFunctions import ConfigManager
HAS_DV3D = True
except Exception:
HAS_DV3D = False
from .xmldocs import toggle_surface, toggle_volume, xslider, yslider, zslider, verticalscaling, scalecolormap # noqa
from .xmldocs import scaletransferfunction, toggleclipping, isosurfacevalue, scaleopacity, basemapopacity, camera, scriptdocs # noqa
class Gfdv3d(object):
__doc__ = """
Gfdv3d is class from which Gf3Dvector, Gf3Dscalar, and Gf3DDualScalar
inherit. It sets up properties and functions common to all of the 3d
graphics method objects.
Attributes
----------
%s
%s
%s
%s
%s
%s
%s
%s
%s
%s
%s
%s
%s
.. pragma: skip-doctest
""" % (toggle_surface, toggle_volume, xslider, yslider, zslider, verticalscaling, scalecolormap,
scaletransferfunction, toggleclipping, isosurfacevalue, scaleopacity, basemapopacity, camera)
__slots__ = [
'g_name',
'ncores',
'plot_attributes'
]
def _getname(self):
return self._name
def _setname(self, value):
value = VCS_validation_functions.checkname(self, 'name', value)
if value is not None:
self._name = value
name = property(_getname, _setname)
def _getaxes(self):
return self._axes
def _setaxes(self, value):
# value=VCS_validation_functions.checkOnOff(self,'axes',value)
self._axes = value
axes = property(_getaxes, _setaxes)
def _getNumCores(self):
return self.ncores
def _setNumCores(self, nc):
self.ncores = nc
NumCores = property(_getNumCores, _setNumCores)
def script(self, script_filename=None, mode=None):
if (script_filename is None):
raise ValueError(
'Error - Must provide an output script file name.')
if (mode is None):
mode = 'a'
elif (mode not in ('w', 'a')):
raise ValueError(
'Error - Mode can only be "w" for replace or "a" for append.')
# By default, save file in json
scr_type = script_filename.split(".")
if len(scr_type) == 1 or len(scr_type[-1]) > 5:
scr_type = "json"
if script_filename != "initial.attributes":
script_filename += ".json"
else:
scr_type = scr_type[-1]
if scr_type == '.scr':
raise vcs.VCSDeprecationWarning("scr script are no longer generated")
elif scr_type == ".py":
mode = mode + '+'
py_type = script_filename[
len(script_filename) -
3:len(script_filename)]
if (py_type != '.py'):
script_filename = script_filename + '.py'
# Write to file
fp = open(script_filename, mode)
if (fp.tell() == 0): # Must be a new file, so include below
fp.write("#####################################\n")
fp.write("# #\n")
fp.write("# Import and Initialize VCS #\n")
fp.write("# #\n")
fp.write("#############################\n")
fp.write("import vcs\n")
fp.write("v=vcs.init()\n\n")
gtype = 'xyt' if (self._axes == "Hovmoller3D") else 'default'
unique_name = 'gm3d_%s' % str(time.time() % 1)[2:]
fp.write('%s = vcs.get%s("%s")\n' % (unique_name, self.g_name, gtype))
for param_name in self.parameter_names:
fp.write('%s.%s = %s\n' % (unique_name, param_name, self.cfgManager.getParameterValue(param_name)))
else:
# Json type
mode += "+"
f = open(script_filename, mode)
vcs.utils.dumpToJson(self, f)
f.close()
# can we add a scriptdocs[g_name] here and have each derived class pick up the documentation correctly?
def __init__(self, Gfdv3d_name, Gfdv3d_name_src='default'):
if not HAS_DV3D:
warnings.warn("Could not find DV3D module, you will not be able to use DV3D's graphic methods")
return
if not isinstance(Gfdv3d_name, str):
raise ValueError("DV3D name must be a string")
if Gfdv3d_name in list(vcs.elements[self.g_name].keys()):
raise ValueError(
"DV3D graphic method '%s' already exists" %
Gfdv3d_name)
self._name = Gfdv3d_name
self.plot_attributes = {}
self.projection = 'default'
self.provenanceHandler = None
vcs.elements[self.g_name][Gfdv3d_name] = self
self._axes = "xyz"
# Use parent config values if possible
if isinstance(Gfdv3d_name_src, str):
# Make sure we aren't inheriting from ourself
if Gfdv3d_name_src != Gfdv3d_name:
parent_cfg = vcs.elements[self.g_name][Gfdv3d_name_src].cfgManager
self._axes = vcs.elements[self.g_name][Gfdv3d_name_src]._axes
else:
parent_cfg = None
else:
# Make sure we aren't inheriting from ourself
if Gfdv3d_name_src.name != self.name:
parent_cfg = Gfdv3d_name_src.cfgManager
self._axes = Gfdv3d_name_src._axes
else:
parent_cfg = None
self.cfgManager = ConfigManager(cm=parent_cfg)
if Gfdv3d_name == "Hovmoller3D":
self._axes = "xyt"
self.ncores = multiprocessing.cpu_count()
self.addParameters()
self.plot_attributes['name'] = self.g_name
self.plot_attributes['template'] = Gfdv3d_name
def setProvenanceHandler(self, provenanceHandler):
self.provenanceHandler = provenanceHandler
def getStateData(self):
return self.cfgManager.getStateData()
def getConfigurationData(self, **args):
return self.cfgManager.getConfigurationData(**args)
def getConfigurationParms(self, **args):
return self.cfgManager.getConfigurationParms(**args)
def getConfigurationState(self, pname, **args):
return self.cfgManager.getConfigurationState(pname, **args)
def add_property(self, name):
def fget(self):
return self.getParameter(name)
def fset(self, value):
self.setParameter(name, value)
setattr(self.__class__, name, property(fget, fset))
if name not in Gfdv3d.__slots__:
Gfdv3d.__slots__.append(name)
def addPlotAttribute(self, name, value):
self.plot_attributes[name] = value
def getPlotAttribute(self, name):
return self.plot_attributes.get(name, None)
def getPlotAttributes(self):
return self.plot_attributes
@staticmethod
def getParameterList():
from DV3D.DV3DPlot import PlotButtonNames
cfgManager = ConfigManager()
parameterList = cfgManager.getParameterList(extras=PlotButtonNames)
return parameterList
def addParameters(self):
self.parameter_names = []
for pname in self.getParameterList():
self.add_property(pname)
self.parameter_names.append(pname)
# print " ------------->> Adding parameter: ", pname
def getParameter(self, param_name, **args):
return self.cfgManager.getParameterValue(param_name, **args)
def setParameter(self, param_name, data, **args):
self.cfgManager.setParameter(param_name, data, **args)
def restoreState(self):
self.cfgManager.restoreState()
def initDefaultState(self):
self.cfgManager.initDefaultState()
def list(self):
print('---------- DV3D (Gfdv3d) member (attribute) listings ----------')
print('name =', self.name)
print('axes =', self.axes)
for pname in self.parameter_names:
pval = self.getParameter(pname)
print(pname, '=', repr(pval))
class Gf3Dvector(Gfdv3d):
"""
Gf3Dvector
"""
def __init__(self, Gfdv3d_name, Gfdv3d_name_src='default'):
self.g_name = '3d_vector'
Gfdv3d.__init__(self, Gfdv3d_name, Gfdv3d_name_src=Gfdv3d_name_src)
class Gf3Dscalar(Gfdv3d):
"""
Gf3Dscalar
"""
def __init__(self, Gfdv3d_name, Gfdv3d_name_src='default'):
self.g_name = '3d_scalar'
Gfdv3d.__init__(self, Gfdv3d_name, Gfdv3d_name_src=Gfdv3d_name_src)
self.VectorDisplay = Gfdv3d_name
class Gf3DDualScalar(Gfdv3d):
"""
Gf3DDualScalar
"""
def __init__(self, Gfdv3d_name, Gfdv3d_name_src='default'):
self.g_name = '3d_dual_scalar'
Gfdv3d.__init__(self, Gfdv3d_name, Gfdv3d_name_src=Gfdv3d_name_src)
|
from rdflib import Namespace
from pycali.vocabulary import ODRL
# Namespace
CALI = Namespace('http://cali.priloo.univ-nantes.fr/ontology#')
ODRS = Namespace('http://schema.theodi.org/odrs#')
# Classes
Status = CALI['Status']
# Properties
lessRestrictiveThan = CALI['lessRestrictiveThan']
compatibleWith = ODRS['compatibleWith']
# Resources
# Status
Undefined = CALI['Undefined']
Recommendation = CALI['Recommendation']
Dispensation = CALI['Dispensation']
Permission = ODRL['Permission']
Prohibition = ODRL['Prohibition']
Duty = ODRL['Duty']
|
# just here so fixtures/initial_data.json gets loaded.
|
"""
remote interpreter functions
"""
import sys
from Helpers import RedirectedStd
__all__ = ["remote_interpreter", "remote_pm"]
def remote_interpreter(conn, namespace = None):
"""starts an interactive interpreter on the server"""
if namespace is None:
namespace = {"conn" : conn}
std = RedirectedStd(conn)
try:
std.redirect()
conn.modules[__name__]._remote_interpreter_server_side(**namespace)
finally:
std.restore()
def _remote_interpreter_server_side(**namespace):
import code
namespace.update(globals())
code.interact(local = namespace)
def remote_pm(conn):
"""a version of pdb.pm() that operates on exceptions at the remote side of the connection"""
import pdb
pdb.post_mortem(conn.modules.sys.last_traceback)
|
#How to visualise data using t-SNE - lesson 1
import pandas as pd #data analysis library to help read .CSV files
import numpy as np #helps transform data into a format a machine learning model can understand
from sklearn.preprocessing import LabelEncoder #scikit-learn helps create the machine learning model
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.manifold import TSNE #standard scikit-learn module to reduce high dimensional data so it can be visualised in 2D or 3D
import matplotlib.pyplot as plt #helps to visualise the data
#Step 1 - download the data
dataframe_all = pd.read_csv('') #insert a filepath within the single quotation marks to point to the .CSV file
num_rows = dataframe_all.shape[0] #uses the first column (the 0th) to determine the number of rows
#Step 2 - clean the data
# count the number of missing elements (NaN) in each column
counter_nan = dataframe_all.isnull().sum()
counter_without_nan = counter_nan[counter_nan==0]
# remove the columns with missing elements
dataframe_all = dataframe_all[counter_without_nan.keys()]
# remove the first 7 columns which contain no discriminative information
dataframe_all = dataframe_all.ix[:,7:]
# the list of columns (the last column is the class label)
columns = dataframe_all.columns
print columns
#Step 3 - get features (x) and scale the features
# get x and convert it to numpy array
x = dataframe_all.ix[:,:-1].values
standard_scaler = StandardScaler()
x_std = standard_scaler.fit_transform(x)
#Step 4 - get class labels y and then encode it into number
# get class label data
y = dataframe_all.ix[:,-1].values
# encode the class label
class_labels = np.unique(y)
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)
#Step 5 - split the data into training set and test set
test_percentage = 0.1
x_train, x_test, y_train, y_test = train_test_split(x_std, y, test_size = test_percentage, random_state = 0)
# t-distributed Stochastic Neighbor Embedding (t-SNE) visualization
tsne = TSNE(n_components=2, random_state=0)
x_test_2d = tsne.fit_transform(x_test)
# scatter plot the sample points among 5 classes
markers=('s', 'd', 'o', '^', 'v')
color_map = {0:'red', 1:'blue', 2:'lightgreen', 3:'purple', 4:'cyan'}
plt.figure()
for idx, cl in enumerate(np.unique(y_test)):
plt.scatter(x=x_test_2d[y_test==cl,0], y=x_test_2d[y_test==cl,1], c=color_map[idx], marker=markers[idx], label=cl)
plt.xlabel('X in t-SNE')
plt.ylabel('Y in t-SNE')
plt.legend(loc='upper left')
plt.title('t-SNE visualization of test data')
plt.show()
|
from util.tipo import tipo
class S_USER_LOCATION(object):
def __init__(self, tracker, time, direction, opcode, data):
dic = {}
id = data.read(tipo.uint64)
dic['pos1'] = data.read(tipo.float, 3)
dic['angle'] = data.read(tipo.angle) * 360. / 0x10000
unk2 = data.read(tipo.int16) # maybe w is int32?
dic['speed'] = data.read(tipo.int16)
dic['pos'] = data.read(tipo.float, 3)
dic['type'] = data.read(tipo.int32) # 0 = Move, 7= Rotate standing
unk = data.read(tipo.byte)
tracker.get_entity(id).location(dic)
|
# Copyright 2019 Wilhelm Putz
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from celery import Celery
from jinjamator.task import JinjamatorTask
import importlib
import sys
import os
from time import sleep
import collections
import logging
import json
from celery.exceptions import Ignore
from jinjamator.daemon import celery
from jinjamator.task.celery.loghandler import CeleryLogHandler, CeleryLogFormatter
from jinjamator.task import TaskletFailed
from copy import deepcopy
@celery.task(bind=True)
def run_jinjamator_task(self, path, data, output_plugin, user_id):
"""
Jinjamator Celery Task runner.
"""
self.update_state(
state="PROGRESS",
meta={
"status": "setting up jinjamator task run",
"configuration": {"root_task_path": path, "created_by_user_id": user_id},
},
)
formatter = CeleryLogFormatter()
log_handler = CeleryLogHandler()
formatter.created_by_user_id = user_id
log_handler.created_by_user_id = user_id
log_handler.setLevel(logging.DEBUG)
log_handler.setFormatter(formatter)
log_handler.set_celery_task(self)
log_handler.formatter.set_root_task_path(path)
# if "jinjamator_pre_run_tasks" in data:
# for pre_run_task in data["jinjamator_pre_run_tasks"]:
# task = JinjamatorTask()
# task._configuration._data["jinjamator_job_id"] = self.request.id
# log_handler.formatter.set_jinjamator_task(task)
# task._scheduler = self
# task._log.addHandler(log_handler)
# task._log.setLevel(logging.DEBUG)
# if "output_plugin" in pre_run_task["task"]:
# task.load_output_plugin(pre_run_task["task"]["output_plugin"])
# else:
# task.load_output_plugin("console")
# task.configuration.merge_dict(pre_run_task["task"]["data"])
# task._configuration.merge_dict(
# celery.conf["jinjamator_private_configuration"]
# )
# task.configuration.merge_dict(deepcopy(data))
# task.load(pre_run_task["task"]["path"])
# task._log.info(
# "running pre run task {}".format(pre_run_task["task"]["path"])
# )
# if not task.run():
# raise Exception("task failed")
# task._log.handlers.remove(log_handler)
# log_handler._task = None
# del task
self.update_state(
state="PROGRESS",
meta={
"status": "running main task",
"configuration": {"root_task_path": path, "created_by_user_id": user_id},
},
)
task = JinjamatorTask()
task._configuration._data["jinjamator_job_id"] = self.request.id
task._scheduler = self
log_handler.formatter.set_jinjamator_task(task)
task._log.setLevel(logging.DEBUG)
task._log.addHandler(log_handler)
task.load_output_plugin(
output_plugin,
celery.conf["jinjamator_private_configuration"][
"global_output_plugins_base_dirs"
],
)
task._configuration.merge_dict(celery.conf["jinjamator_private_configuration"])
task.configuration.merge_dict(data)
task.load(path)
try:
task.run()
except TaskletFailed:
raise Exception("task failed")
return {
"status": "finished task",
"stdout": task._stdout.getvalue(),
"log": log_handler.contents,
}
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: liangliangyy
@license: MIT Licence
@contact: [email protected]
@site: https://www.lylinux.net/
@software: PyCharm
@file: context_processors.py
@time: 2016/11/6 下午4:23
"""
from blog.models import Category, Article
from website.utils import cache, get_web_setting, get_contact_info
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
def seo_processor(requests):
key = 'seo_processor'
value = cache.get(key)
if value:
return value
else:
logger.info('set processor cache.')
setting = get_web_setting()
contact = get_contact_info()
value = {
'SITE_ADDRESS': setting.site_address,
'SITE_NAME': setting.sitename,
'SHOW_GOOGLE_ADSENSE': setting.show_google_adsense,
'GOOGLE_ADSENSE_CODES': setting.google_adsense_codes,
'SITE_SEO_DESCRIPTION': setting.site_seo_description,
'SITE_DESCRIPTION': setting.site_description,
'SITE_KEYWORDS': setting.site_keywords,
'COPYRIGHT': setting.copyright,
'BLOG_NAME': setting.blogname,
'BLOG_SEO_DESCRIPTION': setting.blog_seo_description,
'BLOG_DESCRIPTION': setting.blog_description,
'BLOG_KEYWORDS': setting.blog_keywords,
'SITE_BASE_URL': requests.scheme + '://' + requests.get_host() + '/',
'BLOG_BASE_URL': requests.scheme + '://' + requests.get_host() + '/blog/',
'ARTICLE_SUB_LENGTH': setting.article_sub_length,
'nav_category_list': Category.objects.all(),
'nav_pages': Article.objects.filter(type='p', status='p'),
'OPEN_BLOG_COMMENT': setting.open_blog_comment,
'BEIAN_CODE': setting.beiancode,
'ANALYTICS_CODE': setting.analyticscode,
"BEIAN_CODE_GONGAN": setting.gongan_beiancode,
"SHOW_GONGAN_CODE": setting.show_gongan_code,
"CURRENT_YEAR": datetime.now().year,
"LOGO_IMG": setting.logo_img.url,
"LOGO_FOOTER_IMG": setting.logo_footer_img.url,
"PHONE_IMG": setting.phone_img.url,
"WECHART_IMG": setting.wechart_img.url,
"COMPANY": contact.company,
"PHONE": contact.phone,
"PHONE_USER": contact.phone_user,
"WECHART": contact.wechart,
"QQ": contact.qq,
"PHONE_AFTER_SALE": contact.phone_after_sale,
"PHONE_AFTER_SALE_USER": contact.phone_after_sale_user,
"EMAIL": contact.email,
"EMAIL_HR": contact.email_hr,
"ADDRESS": contact.address,
}
cache.set(key, value, 60 * 60 * 10)
return value
|
from vyper.compiler import (
compile_codes,
mk_full_signature,
)
def test_only_init_function():
code = """
x: int128
@public
def __init__():
self.x = 1
"""
code_init_empty = """
x: int128
@public
def __init__():
pass
"""
empty_sig = [{
'outputs': [],
'inputs': [],
'constant': False,
'payable': False,
'type': 'constructor'
}]
assert mk_full_signature(code) == empty_sig
assert mk_full_signature(code_init_empty) == empty_sig
def test_default_abi():
default_code = """
@payable
@public
def __default__():
pass
"""
assert mk_full_signature(default_code) == [{
'constant': False,
'payable': True,
'type': 'fallback'
}]
def test_method_identifiers():
code = """
x: public(int128)
@public
def foo(x: uint256) -> bytes[100]:
return b"hello"
"""
out = compile_codes(
codes={'t.vy': code},
output_formats=['method_identifiers'],
output_type='list'
)[0]
assert out['method_identifiers'] == {
'foo(uint256)': '0x2fbebd38',
'x()': '0xc55699c'
}
|
import abc
import itertools
import os
import pickle
import re
from collections import defaultdict
from copy import deepcopy
from itertools import chain
from typing import Union, List
import nltk
import json
from tqdm import tqdm
import numpy as np
from attacks.glue_datasets import get_dataset_dir_name
from common.utils import get_possible_perturbations, clean_sent_for_syns
import operator as op
from data.constants import stop_words
_loaded_nltk = False
_wordtags = None
def _load_nltk_once():
global _loaded_nltk, _wordtags
if not _loaded_nltk:
nltk.download('brown')
nltk.download('universal_tagset')
_wordtags = nltk.ConditionalFreqDist((w.lower(), t) for w, t in nltk.corpus.brown.tagged_words(tagset='universal'))
def _get_poses(w, min_occurences=10):
_load_nltk_once()
# res = vb.part_of_speech(w) # apparently this can fail because of the api request
# return set() if not res else {v['text'] for v in json.loads(res)}
# possible nltk universal pos tags: {'NOUN', 'CONJ', 'NUM', 'PRT', 'ADP', 'ADV', 'PRON', 'DET', 'ADJ', 'VERB'}
# which almost matches spacy tags: https://spacy.io/api/annotation#pos-universal
return {k for k, v in _wordtags[w].items() if v >= min_occurences}
def _create_english_only_pos_neighbors_json(neighbors_dict):
# nlp = spacy.load("en_core_web_sm") # python3 -m spacy download en_core_web_sm
filtered_neigbors = {}
# def _is_english(w):
# return bool(vb.part_of_speech(w))
import concurrent.futures
with concurrent.futures.ProcessPoolExecutor(12) as executor:
all_words = list(set(synonyms.keys()).union(chain.from_iterable(synonyms.values())))
word_to_pos = {w: v for w, v in tqdm(zip(all_words, executor.map(_get_poses, all_words)), total=len(all_words)) if len(v) > 0}
for k, v in tqdm(neighbors_dict.items()):
if len(v) == 0:
continue
key_poses = word_to_pos.get(k, set())
if len(key_poses) == 0:
continue
value_poses = defaultdict(list)
for w in v:
for pos in word_to_pos.get(w, set()):
value_poses[pos].append(w)
value_poses = {k: v for k, v in value_poses.items() if k in key_poses}
if len(value_poses) > 0:
filtered_neigbors[k] = value_poses
return filtered_neigbors
class SynonymsCreator(metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_possible_perturbations(self, sent: Union[str, List[str]], append_self=True, raise_on_failure=True) -> List[List[str]]:
raise NotImplementedError
class EncodedSynonyms(SynonymsCreator):
def __init__(self, synonyms_path):
# Note - hashes are lower cases and pre-procssed, but not cleaned. vocabulary is only words in lower case. Punctuation and
# capitalization must come in eval time form the given sentence and is not pre-computed
data_path = os.path.dirname((os.path.dirname(synonyms_path)))
# vocab_path = os.path.join(data_path, 'text_fooler_synonyms_vocab.json')
vocab_path = os.path.join(data_path, 'counterfitted_neighbors_vocab.json')
hashes_path = synonyms_path.replace('synonyms', 'hashes')
with open(synonyms_path) as f:
self.synonyms = {int(k): {int(k_):v_ for k_, v_ in v.items()} for k,v in json.load(f).items()}
with open(vocab_path) as f:
self.rev_vocab = json.load(f)
self.vocab = np.array(list(zip(*sorted(self.rev_vocab.items(), key=op.itemgetter(1))))[0])
with open(hashes_path) as f:
self.hashes = json.load(f)
def get_possible_perturbations(self, sent: Union[str, List[str]], append_self=True, raise_on_failure=True):
if not isinstance(sent, str):
split_sent = sent
sent = ' '.join(sent)
else:
split_sent = sent.split(' ')
sent_id = self.hashes.get(sent.lower(), None)
split_sent, punct, capitalized = clean_sent_for_syns(split_sent) # remove punctuations to find synonyms
if sent_id is None:
# raise ValueError(f'Given sentence is not in the expected attack space encoding: {sent}')
print(f'Given sentence does not have encoded synonyms: {sent}') # hopefully that won't happen often
codes = {}
else:
codes = self.synonyms.get(sent_id, None)
if codes is None:
raise ValueError(f'Given sentence does not have encoded synonyms: {sent}')
if not append_self:
perts = [self.vocab[codes.get(i, [])].tolist() for i in range(len(split_sent))]
else:
perts = [self.vocab[codes.get(i, [])].tolist() + [w] for i, w in enumerate(split_sent)]
for k in capitalized:
perts[k] = [w.capitalize() for w in perts[k]]
for k, v in punct.items():
perts[k] = [v[0] + w + v[1] for w in perts[k]] # add the punctuations back
return perts
class CachedSynonyms(SynonymsCreator):
def __init__(self):
self._sig_to_idx_dict = {}
self._idx_to_sent_dict = {}
self._idx_to_syns_dict = {}
self._idx_to_cached_idx_dict = {}
self._idx_to_sent_len = {}
self._stripped_sig_to_idx_dict = None # computed in runtime only if needed
self._black_list = set()
self._known_sents = {} # computed in runtime only if needed
@staticmethod
def from_pkl(syn_pkl_path):
with open(syn_pkl_path, 'rb') as f:
return pickle.load(f)
def get_possible_perturbations(self, sent: Union[str, List[str]], append_self=True, raise_on_failure=True) -> List[List[str]]:
# the following should have been a didacted _get_sent_idx but as we already pickled the synonyms, they would need a convertor to
# include new functions
sig = CachedSynonyms.get_sent_signature(sent)
try:
idx = self._sig_to_idx_dict[sig]
except KeyError:
sent_ = ' '.join(sent) if isinstance(sent, list) else sent
if not hasattr(self, '_black_list'):
self._black_list = set()
if not hasattr(self, '_known_sents'):
self._known_sents = {}
if sent_ in self._known_sents:
idx = self._known_sents[sent_]
elif sent_ in self._black_list:
# no need to check it again, we already know t's not here
if raise_on_failure:
raise KeyError(f'Signature for the sentence was not found at all and is blacklisted: {sent}')
else:
split_sent = sent.split(' ') if isinstance(sent, str) else sent
syns = [[] for _ in range(len(split_sent))] if not append_self else [[w] for w in split_sent]
return syns
else:
# there is an issue with BERT tokenizer transforming "do not" into "don't" so we will give it one last go before we give up here
new_sig = sig.replace("don't", "donot")
if new_sig in self._sig_to_idx_dict:
idx = self._sig_to_idx_dict[new_sig]
else:
# there is still the option of the input being too long and clipped by the tokenizer. the following will take care of it,
# though very wasteful in cpu time.. (we need to both look for the sig as a start of one of the existing signatures,
# and also take care of both "do not" and clipping issue appearing together
# There is also a potential problem that happen in BoolQ for example with special characters being messed up causing a
# failure to find the correct signature, in which case we can ignore
potential_indices = []
success = False
for long_sig, idx in self._sig_to_idx_dict.items():
if len(long_sig) > len(sig) and (long_sig.startswith(sig) or long_sig.startswith(new_sig)):
potential_indices.append(idx)
if len(potential_indices) == 1:
idx = potential_indices[0]
success = True
elif append_self:
# let's make a last attempt since some non-ascii characters may interfere with the signatures. note that it might
# replace the sentence in terms of punctuations and thus can only be used if append self is on (to allow for replacement)
if not hasattr(self, '_stripped_sig_to_idx_dict') or self._stripped_sig_to_idx_dict is None:
self._stripped_sig_to_idx_dict = {re.sub('[^a-z0-9]', '', sig): idx for sig, idx in self._sig_to_idx_dict.items()}
stripped_sig = re.sub('[^a-z0-9]', '', sig)
stripped_new_sig = re.sub('[^a-z0-9]', '', new_sig)
if stripped_sig in self._stripped_sig_to_idx_dict:
idx = self._stripped_sig_to_idx_dict[stripped_sig]
success = True
elif stripped_new_sig in self._stripped_sig_to_idx_dict:
idx = self._stripped_sig_to_idx_dict[stripped_new_sig]
success = True
if success:
pass # otherwise, we failed
elif not raise_on_failure:
split_sent = sent.split(' ') if isinstance(sent, str) else sent
syns = [[] for _ in range(len(split_sent))] if not append_self else [[w] for w in split_sent]
# print(f'Warning - got a sample with no matching signature: {sent}')
print(f'Warning - got a sample with no matching signature..')
self._black_list.add(sent_)
return syns
elif len(potential_indices) == 0:
raise KeyError(f'Signature for the sentence was not found at all: {sent}')
elif len(potential_indices) > 1:
raise KeyError(f'Signature for the sentence was found at multiple ({len(potential_indices)}) places: {sent}')
self._known_sents[sent_] = idx
syns = deepcopy(self.get_possible_perturbations_by_sent_idx(idx))
if append_self:
for s, w in zip(syns, self._idx_to_sent_dict[idx].split()):
s.append(w)
return syns
def get_possible_perturbations_by_sent_idx(self, idx: int):
if idx not in self._idx_to_syns_dict:
idx = self._idx_to_cached_idx_dict[idx]
syns = self._idx_to_syns_dict[idx]
return [syns.get(j, []) for j in range(self._idx_to_sent_len[idx])]
def get_sent_by_sent_idx(self, idx: int):
# Note that this is the index in the cache, and may not correspond with the current shuffle of the dataset, so it shouldn't be used directly
if idx not in self._idx_to_syns_dict:
idx = self._idx_to_cached_idx_dict[idx]
return self._idx_to_sent_dict[idx]
@staticmethod
def get_sent_signature(sent: Union[str, List[str]]):
if not isinstance(sent, str):
sent = ' '.join(sent)
return re.sub('\s', '', sent.lower())
def is_sig_already_exists(self, sent, idx=None, sig=None):
sig = CachedSynonyms.get_sent_signature(sent) if sig is None else sig
if idx is not None and idx in self._idx_to_sent_dict:
assert CachedSynonyms.get_sent_signature(self.get_sent_by_sent_idx(idx)) == sig
if sig not in self._sig_to_idx_dict:
return False
if idx is not None and idx not in self._idx_to_syns_dict and idx not in self._idx_to_cached_idx_dict:
# link this idx to the already existing one
cached_idx = self._sig_to_idx_dict[sig]
self._idx_to_cached_idx_dict[idx] = cached_idx
return True
def add_new_sent(self, sent, syns, idx, sig=None):
sig = CachedSynonyms.get_sent_signature(sent) if sig is None else sig
if sig in self._sig_to_idx_dict:
raise ValueError('Adding a new sent but signature already exists')
self._sig_to_idx_dict[sig] = idx
self._idx_to_sent_dict[idx] = sent
self._idx_to_syns_dict[idx] = syns
self._idx_to_sent_len[idx] = len(sent.split(' '))
def _run_sanity_check(self):
for sig, idx in self._sig_to_idx_dict.items():
assert sig == CachedSynonyms.get_sent_signature(self._idx_to_sent_dict[idx])
def _increase_indices_by(self, addition):
self._sig_to_idx_dict = {sig: idx+addition for sig, idx in self._sig_to_idx_dict.items()}
self._idx_to_sent_dict = {idx+addition: sent for idx, sent in self._idx_to_sent_dict.items()}
self._idx_to_syns_dict = {idx+addition: syns for idx, syns in self._idx_to_syns_dict.items()}
self._idx_to_cached_idx_dict = {idx+addition: cidx+addition for idx, cidx in self._idx_to_cached_idx_dict.items()}
self._idx_to_sent_len = {idx+addition: l for idx, l in self._idx_to_sent_len.items()}
@staticmethod
def merge_multiple_cached_synonyms(*cached_synonyms) -> 'CachedSynonyms':
# there might be indices clashes here, so we would make sure it won't happen, even though we may not use some indices. If we want,
# we can correct it later but it's not crucial.
# changes are made in place so be warned!
largest_index = [max(cs._sig_to_idx_dict.values())+1 for cs in cached_synonyms[:-1]]
additions = np.cumsum(largest_index).tolist()
orig_cached_synonyms = cached_synonyms[0]
orig_cached_synonyms._run_sanity_check()
for cs, addition in zip(cached_synonyms[1:], additions):
cs._run_sanity_check()
cs._increase_indices_by(addition)
# copy/link the cached sentences
for sig, idx in cs._sig_to_idx_dict.items():
if orig_cached_synonyms.is_sig_already_exists('', idx, sig):
continue
orig_cached_synonyms.add_new_sent(sent=cs._idx_to_sent_dict[idx], syns=cs._idx_to_syns_dict[idx], idx=idx, sig=sig)
# now link the sentences which were linked before
for idx, cached_idx in cs._idx_to_cached_idx_dict.items():
assert orig_cached_synonyms.is_sig_already_exists(cs.get_sent_by_sent_idx(idx), idx)
orig_cached_synonyms._run_sanity_check()
return orig_cached_synonyms
def __len__(self):
return len(self._sig_to_idx_dict) + len(self._idx_to_cached_idx_dict)
class RandomEncodedSynonyms(SynonymsCreator):
def __init__(self, vocabulary_path, n_syns=50): # TODO XXX
raise NotImplementedError('This probably wont work as is after the punctuations and capitalization fixes')
with open(vocabulary_path) as f:
rev_vocab = set(json.load(f).keys()).difference(stop_words)
self.rev_vocab_list = np.array(list(rev_vocab))
self.rev_vocab = {w: i for i, w in enumerate(self.rev_vocab_list)}
self.n_syns = n_syns
def get_possible_perturbations(self, sent: Union[str, List[str]], append_self=True, raise_on_failure=True) -> List[List[str]]:
split_sent = sent.split(' ') if isinstance(sent, str) else sent
rep_words = [i for i, w in enumerate(split_sent) if w in self.rev_vocab]
res = [[w] if append_self else [] for w in split_sent]
inds = np.random.randint(0, len(self.rev_vocab), (len(rep_words), self.n_syns + int(append_self)))
if append_self:
inds[:, -1] = [self.rev_vocab[split_sent[i]] for i in rep_words]
for i, w_is in zip(rep_words, inds):
res[i] = self.rev_vocab_list[w_is].tolist()
return res
_encoded_synonyms = None
_random_synonyms = None
def load_synonym_for_attacks(synonyms_path, dataset=None, train=None):
if synonyms_path is None:
assert dataset is not None and train is not None
syn_suffix = 'train' if train else 'dev'
synonyms_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data',
get_dataset_dir_name(dataset), f'synonyms_{syn_suffix}.pkl')
global _encoded_synonyms
if _encoded_synonyms is None:
_encoded_synonyms = CachedSynonyms.from_pkl(synonyms_path)
return _encoded_synonyms, None
elif 'vocab' in synonyms_path:
global _random_synonyms
if _random_synonyms is None:
_random_synonyms = RandomEncodedSynonyms(synonyms_path)
return _random_synonyms, None
with open(synonyms_path) as f:
synonyms = defaultdict(list)
synonyms.update(json.load(f))
for k, v in synonyms.items():
if len(v) > 0:
return synonyms, isinstance(v, dict)
MOCK_SIZES = False
CAP_AT = None
attack_size_cache = {}
def set_mock_sizes(mock=True):
global MOCK_SIZES
MOCK_SIZES = mock
def set_cap_at(cap_at=None):
global CAP_AT
CAP_AT = cap_at
def _get_synonyms_attack_space_size(orig_sentence, edit_distance, synonyms, requires_pos, allow_smaller_ed, syn_name):
global MOCK_SIZES, CAP_AT
if MOCK_SIZES:
return 1, 1
def _inner():
possible_modifications = sorted([len(ps) for i, ps in
enumerate(get_possible_perturbations(orig_sentence, synonyms, requires_pos, False))
if len(ps) > 0])[::-1]
pos = len(possible_modifications)
if not allow_smaller_ed and len(possible_modifications) < edit_distance:
return 1, pos
if not allow_smaller_ed:
if CAP_AT is None: # compute exact result which may take a very very long time for large edit distance and many attack positions
return np.product(np.asarray(list(itertools.combinations(possible_modifications, edit_distance))), axis=1).sum() + 1, pos
total = 0
for c in itertools.combinations(possible_modifications, edit_distance):
total += np.product(c)
if total >= CAP_AT:
break
return total+1, pos
if len(possible_modifications) == 1:
return possible_modifications[0] + 1, pos
# TODO - prepare estimates here as it takes way too long
if CAP_AT is None: # compute exact result which may take a very very long time for large edit distance and many attack positions
sz = sum(np.product(np.asarray(list(itertools.combinations(possible_modifications, eds))), axis=1).sum()
for eds in list(range(1, min(len(possible_modifications), edit_distance) + 1))) + 1
# there may be an overflow
if sz < 0:
sz = 1e18
return sz, pos
total = 0
for eds in range(1, min(len(possible_modifications), edit_distance) + 1):
for c in itertools.combinations(possible_modifications, edit_distance):
total += np.product(c)
if total >= CAP_AT:
break
return total+1, pos
key = ' '.join(orig_sentence) + str(edit_distance) + syn_name + str(requires_pos) + str(allow_smaller_ed)
if key not in attack_size_cache:
attack_size_cache[key] = _inner()
return attack_size_cache[key]
SYN_NAMES = {
'counterfitted_neighbors': 'Full',
'english_pos_counterfitted_neighbors': 'POS',
'filtered_counterfitted_neighbors': 'Token',
}
def synonym_file_short_name(syn_path):
if syn_path is None:
return "PhiPrime"
syn_name = os.path.splitext(os.path.basename(syn_path))[0]
return SYN_NAMES.get(syn_name, syn_name)
if __name__ == '__main__':
with open('./../data/counterfitted_neighbors.json') as f:
synonyms = json.load(f)
filtered_results = _create_english_only_pos_neighbors_json(synonyms)
with open('./../data/english_pos_counterfitted_neighbors.json', 'w') as f:
json.dump(filtered_results, f) |
# Import Person_detection_tan
from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten
import testdata as testdata
import logging
from pathlib import Path
# import cv2
from skimage.filters import threshold_otsu, threshold_multiotsu
from matplotlib import image
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import matplotlib.patches as patches
try:
from PIL import Image
except ImportError:
import Image
# Set Constants
FILENAME = 'RAISE_all.csv'
RAISE_DIR = '/original/RAISE/'
# Variables
global MODEL_DIR
# global CHECKPOINT_DIR
global FIRST_ONLY
global TOTAL_IMAGES
MODEL_DIR = os.path.dirname(os.path.abspath(__file__))
MODEL_DIR = '/scratch/projekt1/demo/lr-0.000001_bs-4_ep-160_ti-8155_nm-True_is-1_us-Tan_kr-0.01/tmp/'
# CHECKPOINT_DIR = os.path.dirname(os.path.abspath(__file__))
# PLOT_DIR = '/scratch/projekt1/submitSkript/plots/'
PLOT_DIR = 'plots/'
plot_path = ''
# Set up logging
# local_logger = logging.getLogger('Localization')
# local_logger.basicConfig(level=logging.INFO, format='%(asctime)s: %(message)s', datefmt='%m-%d %H:%M', filename='output_loc.log', filemode='w')
# logging.basicConfig(level=logging.INFO, format='%(asctime)s: %(message)s', datefmt='%m-%d %H:%M', filename='/scratch/projekt1/Source/localization.log', filemode='w')
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# formatter = logging.Formatter('%(asctime)s: %(message)s')
# console.setFormatter(formatter)
# logging.getLogger().addHandler(console)
# local_logger = logging.getLogger()
local_logger = logging.getLogger('localization')
for hdlr in local_logger.handlers[:]: # remove all old handlers
local_logger.removeHandler(hdlr)
def preprocess_raise_test_path(total_images = 0):
all_img_path_vector = []
raise_db = pd.read_csv(RAISE_DIR + FILENAME)
image_paths = raise_db.File
total_img = image_paths.shape[0]
if total_images > 0:
total_img = total_images
if FIRST_ONLY:
total_img = 50
for row in tqdm(range(total_img)):
for root, dirs, files in os.walk(RAISE_DIR + 'RaiseTest'):
for file in files:
if file.endswith(str(image_paths.loc[row]) + '.TIF'):
all_img_path_vector.append(root+'/'+str(file))
return all_img_path_vector
def preprocess_raise_test_binary(total_images = 0):
all_img_binary_vector = []
raise_db = pd.read_csv(RAISE_DIR + FILENAME)
keywords = raise_db.Keywords
image_paths = raise_db.File
image_paths = raise_db.File
total_img = image_paths.shape[0]
if total_images > 0:
total_img = total_images
if FIRST_ONLY:
total_img = 50
for row in tqdm(range(total_img)):
for root, dirs, files in os.walk(RAISE_DIR + 'RaiseTest'):
for file in files:
if file.endswith(str(image_paths.loc[row]) + '.TIF'):
if('people' in str(keywords.loc[row]) ):
all_img_binary_vector.append([1, 0])
else:
all_img_binary_vector.append([0, 1])
return all_img_binary_vector
# Load model
# def load_model(model_dir = MODEL_DIR, checkpoint_dir = CHECKPOINT_DIR):
# # # Load existing model
# # model = tf.keras.models.load_model(MODEL_PATH + model_name, compile=False)
# model = tf.keras.models.load_model(model_dir)
# latest = tf.train.latest_checkpoint(checkpoint_dir)
# model.load_weights(latest)
# # # Print summary
# local_logger.info(model.summary())
# return model
def get_weights(model):
# local_logger.info(model.trainable_variables)
# trainable_variables = tf.Variable(model.trainable_variables)
# model_weights = trainable_variables.eval()
# local_logger.info('Layer weight: ')
# local_logger.info('Trainable variables: ' + repr(model.trainable_variables))
# local_logger.info('[0]: ' + repr(model.trainable_variables[0]))
# local_logger.info('[1]: ' + repr(model.trainable_variables[1]))
model_weights = np.asarray(model.trainable_variables[0].numpy())
# local_logger.info('Model weights: ' + repr(model_weights))
# local_logger.info(model_weights.shape)
model_weights_2 = model_weights[:,1]
# local_logger.info('w2: ' + repr(model_weights_2))
# local_logger.info(model_weights_2.shape)
model_weights_1 = model_weights[:,0]
# local_logger.info('w1: ' + repr(model_weights_1))
# local_logger.info(model_weights_1.shape)
if WEIGHT_MATRIX == 'w1':
# local_logger.info('weight w1')
return model_weights_1
else:
# local_logger.info('weight w2')
return model_weights_2
# Get flatten layer output
def get_flat_img(img):
if (FIRST_ONLY):
layer_last_model = tf.keras.Model(model.inputs, model.layers[-1].output)
layer_last_out = layer_last_model(img, training=False)
local_logger.info('Layer [-1]: ' + repr(layer_last_out))
local_logger.info('Layer [-1] shape: ' + repr(layer_last_out.numpy().shape))
flatten_layer_model = tf.keras.Model(model.inputs, model.layers[-2].output)
flatten_layer_out = flatten_layer_model(img, training=False)
# local_logger.info('Layer [-2]: ' + repr(flatten_layer_out))
# local_logger.info('Layer [-2] shape: ' + repr(flatten_layer_out.numpy().shape))
if (FIRST_ONLY):
layer_first_out = image_features_extract_model(img, training=False)
local_logger.info('Layer [-3]: ' + repr(layer_first_out))
local_logger.info('Layer [-3] shape: ' + repr(layer_first_out.numpy().shape))
flat_img = flatten_layer_out.numpy()[0]
# features_extract = image_features_extract_model(img)
# flat_img = image_flatten(features_extract).numpy()[0]
# local_logger.info(flat_img)
return flat_img
# # Calculate the weight matrix to locate people
def get_2d_sum_mat(weight_matrix, layer_matrix):
# local_logger.info('Weight matrix shape: ' + repr(weight_matrix.shape))
# local_logger.info('Layer matrix shape: ' + repr(layer_matrix.shape))
flat_product_mat = [a*b for a, b in zip(weight_matrix , layer_matrix)]
flat_product_mat = np.asarray(flat_product_mat)
# cubic_product_mat = flat_product_mat.reshape((9, 15, 2048))
cubic_product_mat = flat_product_mat.reshape(9, 15, 2048)
sum_mat = np.sum(cubic_product_mat, axis=2)
if (FIRST_ONLY):
# For debugging only
cubic_layer_matrix = layer_matrix.reshape(9, 15, 2048)
local_logger.info('Cubic layer after reshape: ' + repr(cubic_layer_matrix))
local_logger.info('Cubic layer shape: ' + repr(cubic_layer_matrix.shape))
local_logger.info('Flat product shape: ' + repr(flat_product_mat.shape))
local_logger.info('Cubic product: ' + repr(cubic_product_mat))
local_logger.info('Cubic product shape: ' + repr(cubic_product_mat.shape))
local_logger.info('Result matrix: ' + repr(sum_mat))
local_logger.info('Result matrix shape: ' + repr(sum_mat.shape))
return sum_mat
# Detect people in one image
def detect_people(img, img_path):
# local_logger.info(img.shape)
# First, predict if image has people
prediction = model(img, training=False).numpy()
# local_logger.info(prediction)
# Second, if image has peple then do the localisization
if prediction[0][0] > prediction[0][1]:
flat_img = get_flat_img(img)
sum_mat = get_2d_sum_mat(model_weights, flat_img)
# replace all negative values with 0
no_neg_sum_mat = sum_mat.copy()
no_neg_sum_mat[no_neg_sum_mat < 0] = 0
# Using Otsu threshold to locate people
# sum_mat = sum_mat.astype('float32')
# max = np.max(sum_mat)
# min = np.min(sum_mat)
# raw_th = max - 0.4*(max - min)
# otsu_th, otsu_mat = cv2.threshold(sum_mat, raw_th, max, cv2.THRESH_TOZERO+cv2.THRESH_OTSU)
# local_logger.info(max, min, otsu_th)
# draw_img_plot(img_path, otsu_mat, otsu_th, PLOT_DIR)
# thresh = threshold_otsu(sum_mat)
thresh_arr = threshold_multiotsu(no_neg_sum_mat, classes=3)
thresh = thresh_arr[-1]
# local_logger.info('Otsu threshold value: ' + repr(thresh))
# Considering threshold_multiotsu for better localization?
draw_img_plot(img_path, sum_mat, thresh, plot_path)
return 1
else:
return 0
# Get 1st image containing people
def get_first_detection(path_vector, class_vector, all_img_name_vector):
# img_pos = -1
# for index in range(len(class_vector)):
# if class_vector[index] == [1,0]:
# img_pos = index
# break
# img = all_img_name_vector[img_pos]
# return detect_people(img, path_vector[img_pos])
total_imgs = len(path_vector)
positive = 0
for i in tqdm(range(total_imgs)):
img_path = path_vector[i]
img = all_img_name_vector[i]
positive = detect_people(img, img_path)
if positive == 1:
break
return positive
# Detect people in all test dataset
def detect_people_all(path_vector, class_vector, all_img_name_vector):
total_imgs = len(path_vector)
total_positives = 0
for i in tqdm(range(total_imgs)):
img_path = path_vector[i]
# local_logger.info(img_path)
img = all_img_name_vector[i]
pos = detect_people(img, img_path)
total_positives += pos
return total_positives
# # Draw image and plot
def draw_img_plot(img_path, result, thres = 0.4, plot_dir = PLOT_DIR):
my_dpi=100.
temp_img = Image.open(img_path)
temp_img = temp_img.resize((600, 360))
# temp_img = np.asarray(temp_img)
# local_logger.info(temp_img.shape)
img_filename = os.path.basename(img_path)
# local_logger.info(img_filename)
fig = plt.figure(figsize=(15, 9),dpi=my_dpi)
ax=fig.add_subplot(111)
# Remove whitespace from around the image
# fig.subplots_adjust(left=0,right=1,bottom=0,top=1)
# Set the gridding interval: here we use the major tick interval
myInterval=40.
loc = plticker.MultipleLocator(base=myInterval)
ax.xaxis.set_major_locator(loc)
ax.yaxis.set_major_locator(loc)
# Add the grid
ax.set_xticks(np.arange(0, 15*myInterval, myInterval))
ax.grid(which='major', axis='both', linestyle='-')
# Add the image
ax.imshow(temp_img)
# fig.savefig(PLOT_DIR + '_img.jpg', dpi=my_dpi)
for (i, j), z in np.ndenumerate(result):
row = (i + 0.5)*myInterval
col = (j + 0.5)*myInterval
if z < thres:
text_color = 'w'
else:
text_color = 'r'
rect = patches.Rectangle((j*myInterval,i*myInterval),myInterval,myInterval,linewidth=2,edgecolor='r',facecolor=(0,1,0,0.3))
ax.add_patch(rect)
ax.text(col, row, '{:0.4f}'.format(z), ha='center', va='center', color=text_color)
# plt.show()
fig.savefig(plot_dir + img_filename + '_plot.jpg', dpi=my_dpi)
plt.close("all")
if (__name__ == "__main__"):
PARSER = argparse.ArgumentParser()
# Adding arguments for parser
PARSER.add_argument('--model_dir', type=str, default=MODEL_DIR, help='Model and checkpoint dir')
# PARSER.add_argument('--checkpoint', type=str, default=os.path.dirname(os.path.abspath(__file__)), help='Checkpoint dir')
PARSER.add_argument('--weight_matrix', type=str, default='w1', help='Which weight matrix to use: w1 or w2')
PARSER.add_argument('--first_only', type=str2bool, default=False, help='Get first result only')
PARSER.add_argument('--total_images', type=int, default=150, help='Defining size of loaded dataset')
PARSER.add_argument('--plot_dir', type=str, default=PLOT_DIR, help='Plot dir')
args = PARSER.parse_args()
for name, value in args._get_kwargs():
variable_name = name.upper()
exec(variable_name + " = value")
# if name=='model_dir':
# MODEL_DIR=value
# continue
# if name=='checkpoint':
# CHECKPOINT_DIR=value
# continue
# if name=='first_only':
# FIRST_ONLY=value
# continue
# if name=='total_images':
# TOTAL_IMAGES=value
# continue
# if name=='plot_dir':
# PLOT_DIR=value
# continue
# Create plots directory
plot_parent_dir = MODEL_DIR.rstrip('//').replace('/tmp', '/')
plot_path = plot_parent_dir + PLOT_DIR
Path(plot_path).mkdir(parents=True, exist_ok=True)
# Logging
local_logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
if FIRST_ONLY:
fh = logging.FileHandler(plot_parent_dir + 'localization_first.log')
else:
fh = logging.FileHandler(plot_parent_dir + 'localization.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
local_logger.addHandler(ch)
local_logger.addHandler(fh)
# local_logger.info(FIRST_ONLY)
# local_logger.info(TOTAL_IMAGES)
# local_logger.info(MODEL_DIR)
# Get model weights
model = load_model(model_dir = MODEL_DIR, checkpoint_dir = MODEL_DIR)
model_weights = get_weights(model)
# Get test dataset vector
local_logger.info('Processing dataset')
total_images = TOTAL_IMAGES
start_index = 300 #For demo purpose
# path_vector = preprocess_raise_test_path(total_images)
# class_vector = preprocess_raise_test_binary(total_images)
# all_img_name_vector = preprocess_raise_img_vector(path_vector)
path_vector = testdata.path_vector[start_index:total_images+start_index]
class_vector = testdata.class_vector[start_index:total_images+start_index]
image_vector = []
for i in tqdm(range(len(path_vector))):
img_path = path_vector[i]
img = image.imread(img_path)
img = tf.image.resize(img, (IMG_HEIGHT, IMG_WIDTH))
img = tf.expand_dims(img, axis=0)
img = tf.cast(img, tf.float32)
img = tf.keras.applications.inception_v3.preprocess_input(img)
# local_logger.info(img.shape)
image_vector.append(img)
# image_vector = np.asarray(image_vector)
# local_logger.info(len(path_vector))
# local_logger.info(len(class_vector))
# local_logger.info(image_vector.shape)
if FIRST_ONLY:
first = get_first_detection(path_vector, class_vector, image_vector)
else:
total_pos = detect_people_all(path_vector, class_vector, image_vector)
local_logger.info('Localization is complete')
local_logger.info('Number of true positives: '+repr(class_vector.count([1,0])))
local_logger.info('Number of predicted positives: '+repr(total_pos)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
######################################################################
# Tools
######################################################################
from os.path import join, dirname
import sys
from pathlib import Path
sys.path.insert(0, str(Path.home()) + '/Z01-Tools/scripts/')
from config import *
class tstHW(object):
def __init__(self):
self.pwd = os.path.dirname(os.path.abspath(__file__))
self.rtl = os.path.join(self.pwd, 'src/')
self.tst = os.path.join(self.pwd,'')
self.log = os.path.join(TOOL_PATH,'log','logF.xml')
self.work = vhdlScript(self.log)
def addSrc(self, work):
work.addSrc(self.rtl)
work.addSrcFile(self.rtl+'Dispositivos/RAM/RAM16K.vho')
work.addSrcFile(self.rtl+'Dispositivos/Screen/Screen.vho')
def addTst(self, work):
if work.addTstConfigFile(self.tst) is False:
sys.exit(-1)
def add(self, work):
self.addSrc(work)
self.addTst(work)
if __name__ == "__main__":
# inicializa notificacao
noti = notificacao(PROJ_F_NAME)
# Init ALU
tstCpu = tstHW()
tstLogiComb = tstLogiComb()
tstUla = tstUla()
tstSeq = tstLogiSeq()
tstCpu.addSrc(tstCpu.work)
tstUla.addSrc(tstCpu.work)
tstLogiComb.addSrc(tstCpu.work)
tstSeq.addSrc(tstCpu.work)
tstCpu.add(tstCpu.work)
if tstCpu.work.run() < 0:
noti.error('\n Erro de compilação VHDL')
sys.exit(-1)
print("===================================================")
r = report(tstCpu.log, 'F', 'HW')
# notificacao / log do teste
noti.hw(r)
#print("Reporting test result to server")
#r.send()
sys.exit(r.error)
print("===================================================")
|
class WillBaseEncryptionBackend(object):
def __init__(self, *args, **kwargs):
pass
@staticmethod
def encrypt_to_b64(raw):
raise NotImplemented
@staticmethod
def decrypt_from_b64(enc):
raise NotImplemented
|
import tempfile
from os.path import dirname, join
from os import environ
GENERATED_DIR = join(dirname(__file__), 'generated')
MATRIX_FILE_NAMING = 'universal_automaton_%s.csv'
if environ.get('PYFFS_SETTINGS_MODE', 'release') == 'test':
temp_dir = tempfile.TemporaryDirectory()
GENERATED_DIR = temp_dir.name
|
##########################################################################
## Imports & Configuration
##########################################################################
from math import pi
from bokeh.io import show, output_file
from bokeh.models import ColumnDataSource, HoverTool, LinearColorMapper, CategoricalColorMapper
from bokeh.plotting import figure
import pandas as pd
#Allow modules to import each other at parallel file structure (TODO clean up this configuration in a refactor, it's messy...)
from inspect import getsourcefile
import os.path, sys
current_path = os.path.abspath(getsourcefile(lambda:0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
repo_dir = parent_dir[:parent_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import database_management
##########################################################################
## Functions
##########################################################################
decisions_filename_noending = "random200_decisions"
connection = database_management.get_database_connection('database')
#Get our data from the CSV export. TODO -we can later switch this to run the SQL queries directly.
#decisions_path = "summary_outputs/" + decisions_filename_noending + ".csv"
#decisions_df = pd.read_csv(decisions_path, encoding="utf8")
# Open and read the SQL command file as a single buffer
fd = open('decisions_table_only.sql', 'r')
sqlFile = fd.read()
fd.close()
decisions_df = pd.read_sql(sqlFile,connection,parse_dates=['tracs_overall_expiration_date','previous_expiration_date'])
decisions_df.fillna('none', inplace=True)
source = ColumnDataSource(data = decisions_df)
contracts = decisions_df.contract_number.unique().tolist()
snapshots = decisions_df.snapshot_id.unique().tolist()
snapshots.sort()
# Start making the plot
output_name = decisions_filename_noending + '.html'
output_file(output_name)
# Colors are red, green, gray, and blue, mapping to the factors list in the same order
colors = ['#CD5C5C','#C0D9AF', '#738269', '#d3d3d3', '#0099CC', '#d3bfe0']
mapper = CategoricalColorMapper(factors=['out','in', 'restored', 'no change', 'suspicious','first'], palette=colors)
TOOLS = "hover,save,pan,box_zoom,wheel_zoom"
plot_height = len(contracts)*30
p = figure(title="Snapshot tracking",
x_range=snapshots, y_range=contracts,
x_axis_location="above", plot_width=900, plot_height=plot_height,
tools=TOOLS)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "9pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = pi / 3
p.rect(x='snapshot_id', y='contract_number', width=0.95, height=0.9,
#x=["c2006-02","c2006-02"], y=[1,2], width=1, height=1, #
source=source,
fill_color={'field': 'decision', 'transform': mapper}, #'#0099CC',
line_color='#d3d3d3')
p.select_one(HoverTool).tooltips = [
('snapshot', '@snapshot_id'),
('decision', '@decision'),
('expiration_extended_test', '@expiration_extended_test'),
('status_test', '@status_test'),
('expiration_passed_test', '@expiration_passed_test'),
('tracs_overall_expiration_date', '@tracs_overall_expiration_date'),
('previous_expiration_date', '@previous_expiration_date'),
('change in expiration from previous snapshot', '@time_diff'),
('current contract duration (months)', '@contract_term_months_qty'),
('tracs_status_name', '@tracs_status_name'),
('previous_status', '@previous_status'),
('contract_number', '@contract_number')
]
show(p) # show the plot
|
#MenuTitle: Rotate Around Anchor
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Rotate selected glyphs (or selected paths and components) around a 'rotate' anchor.
"""
import vanilla, math
from Foundation import NSPoint, NSAffineTransform, NSAffineTransformStruct, NSMakePoint, NSRect
rotateAnchorName = "rotate"
def centerOfRect(rect):
"""
Returns the center of NSRect rect as an NSPoint.
"""
center = NSPoint( rect.origin.x + rect.size.width/2, rect.origin.y + rect.size.height/2 )
return center
class Rotator(object):
"""GUI for rotating selected glyphs."""
def __init__(self):
self.w = vanilla.FloatingWindow((320, 95), "Rotate around anchor")
# Window 'self.w':
windowWidth = 320
windowHeight = 85
windowWidthResize = 0 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.RotateAroundAnchor.mainwindow" # stores last window position and size
)
linePos, inset, lineHeight = 10, 12, 22
self.w.anchor_text = vanilla.TextBox( (inset, linePos+3, 120, 15), "Set 'rotate' anchor:", sizeStyle = 'small')
self.w.anchor_x = vanilla.EditText( (inset+110, linePos, 40, 19), "0", sizeStyle = 'small', callback=self.SavePreferences)
self.w.anchor_y = vanilla.EditText( (inset+110+45, linePos, 40, 19), "0", sizeStyle = 'small', callback=self.SavePreferences)
self.w.updateButton = vanilla.SquareButton( (inset+110+90, linePos, 20, 19), u"↺", sizeStyle='small', callback=self.updateRotateAnchor )
self.w.anchor_button = vanilla.Button( (inset+110+120, linePos+1, -inset, 19), "Insert", sizeStyle = 'small', callback = self.insertRotateAnchor)
linePos += lineHeight
self.w.rotate_text1 = vanilla.TextBox((inset, linePos+3, 55, 19 ), "Rotate by", sizeStyle = 'small')
self.w.rotate_degrees = vanilla.EditText((inset+60, linePos, 35, 19 ), "10", sizeStyle = 'small', callback = self.SavePreferences)
self.w.rotate_text2 = vanilla.TextBox((inset+60+40, linePos+3, 50, 19 ), "degrees:", sizeStyle = 'small')
self.w.rotate_ccw = vanilla.Button((-150, linePos+1, -70-inset, 19 ), u"↺ ccw", sizeStyle = 'small', callback = self.rotate )
self.w.rotate_cw = vanilla.Button((-80, linePos+1, -inset, 19 ), u"↻ cw", sizeStyle = 'small', callback = self.rotate )
linePos += lineHeight
self.w.stepAndRepeat_text1 = vanilla.TextBox((inset, linePos+3, 55, 19), "Repeat", sizeStyle = 'small')
self.w.stepAndRepeat_times = vanilla.EditText((inset+60, linePos, 35, 19), "5", sizeStyle = 'small', callback = self.SavePreferences)
self.w.stepAndRepeat_text2 = vanilla.TextBox((inset+60+40, linePos+3, 50, 19), "times:", sizeStyle = 'small')
self.w.stepAndRepeat_ccw = vanilla.Button((-150, linePos+1, -70-inset, 19), u"↺+ ccw", sizeStyle = 'small', callback = self.rotate )
self.w.stepAndRepeat_cw = vanilla.Button((-80, linePos+1, -inset, 19), u"↻+ cw", sizeStyle = 'small', callback = self.rotate )
if not self.LoadPreferences():
print( "Rotate Around Anchor: Could not load prefs, will resort to defaults." )
self.updateRotateAnchor()
self.w.open()
self.w.makeKey()
def SavePreferences( self, sender=None ):
try:
Glyphs.defaults["com.mekkablue.rotateAroundAnchor.rotate_degrees"] = self.w.rotate_degrees.get()
Glyphs.defaults["com.mekkablue.rotateAroundAnchor.stepAndRepeat_times"] = self.w.stepAndRepeat_times.get()
Glyphs.defaults["com.mekkablue.rotateAroundAnchor.anchor_x"] = self.w.anchor_x.get()
Glyphs.defaults["com.mekkablue.rotateAroundAnchor.anchor_y"] = self.w.anchor_y.get()
return True
except:
return False
def LoadPreferences( self ):
try:
self.w.rotate_degrees.set( Glyphs.defaults["com.mekkablue.rotateAroundAnchor.rotate_degrees"] )
self.w.stepAndRepeat_times.set( Glyphs.defaults["com.mekkablue.rotateAroundAnchor.stepAndRepeat_times"] )
self.w.anchor_x.set( Glyphs.defaults["com.mekkablue.rotateAroundAnchor.anchor_x"] )
self.w.anchor_y.set( Glyphs.defaults["com.mekkablue.rotateAroundAnchor.anchor_y"] )
return True
except:
return False
def insertRotateAnchor(self, sender=None):
try:
selectedLayers = Glyphs.currentDocument.selectedLayers()
myRotationCenter = NSPoint()
myRotationCenter.x = int( Glyphs.defaults["com.mekkablue.rotateAroundAnchor.anchor_x"] )
myRotationCenter.y = int( Glyphs.defaults["com.mekkablue.rotateAroundAnchor.anchor_y"] )
myRotationAnchor = GSAnchor( "#%s"%rotateAnchorName, myRotationCenter )
for thisLayer in selectedLayers:
# adds '#rotate' if it doesn't exist, resets it if it exists:
thisLayer.addAnchor_( myRotationAnchor )
except Exception as e:
import traceback
print(traceback.format_exc())
def updateRotateAnchor(self, sender=None):
try:
# (#)rotate anchor present, including those shining through from components:
selectedLayer = Glyphs.currentDocument.selectedLayers()[0]
for anchorName in ("#%s"%rotateAnchorName, rotateAnchorName):
for thisAnchor in selectedLayer.anchorsTraversingComponents():
if thisAnchor.name == anchorName:
self.w.anchor_x.set( int(thisAnchor.x) )
self.w.anchor_y.set( int(thisAnchor.y) )
return
# no anchor present:
selectionRect = selectedLayer.boundsOfSelection()
if selectionRect:
# take center of selection:
selectionCenter = centerOfRect(selectionRect)
self.w.anchor_x.set( int(selectionCenter.x) )
self.w.anchor_y.set( int(selectionCenter.y) )
self.insertRotateAnchor()
else:
# no selection either: take origin
self.w.anchor_x.set( 0 )
self.w.anchor_y.set( 0 )
self.insertRotateAnchor()
except Exception as e:
import traceback
print(traceback.format_exc())
def rotationTransform( self, rotationCenter, rotationDegrees, rotationDirection ):
try:
rotationX = rotationCenter.x
rotationY = rotationCenter.y
rotation = rotationDegrees * rotationDirection
RotationTransform = NSAffineTransform.transform()
RotationTransform.translateXBy_yBy_( rotationX, rotationY )
RotationTransform.rotateByDegrees_( rotation )
RotationTransform.translateXBy_yBy_( -rotationX, -rotationY )
return RotationTransform
except Exception as e:
import traceback
print(traceback.format_exc())
def rotate(self, sender):
# update settings to the latest user input:
if not self.SavePreferences( self ):
print("Note: 'Rotate Around Anchor' could not write preferences.")
selectedLayers = Glyphs.currentDocument.selectedLayers()
originatingButton = sender.getTitle()
if "ccw" in originatingButton:
rotationDirection = 1
else:
rotationDirection = -1
if "+" in originatingButton:
repeatCount = int( Glyphs.defaults["com.mekkablue.rotateAroundAnchor.stepAndRepeat_times"] )
else:
repeatCount = 0
rotationDegrees = float( Glyphs.defaults["com.mekkablue.rotateAroundAnchor.rotate_degrees"] )
rotationCenter = NSPoint(
int(Glyphs.defaults["com.mekkablue.rotateAroundAnchor.anchor_x"]),
int(Glyphs.defaults["com.mekkablue.rotateAroundAnchor.anchor_y"]),
)
if len(selectedLayers) == 1:
selectionCounts = True
else:
selectionCounts = False
for thisLayer in selectedLayers:
# rotate individually selected nodes and components
try:
thisGlyph = thisLayer.parent
selectionCounts = selectionCounts and bool(thisLayer.selection) # True only if both are True
RotationTransform = self.rotationTransform( rotationCenter, rotationDegrees, rotationDirection )
print("rotationCenter, rotationDegrees, rotationDirection:", rotationCenter, rotationDegrees, rotationDirection)
RotationTransformMatrix = RotationTransform.transformStruct()
thisGlyph.beginUndo()
if repeatCount == 0: # simple rotation
for thisThing in selection:
thisLayer.transform_checkForSelection_doComponents_( RotationTransform, selectionCounts, True )
else: # step and repeat paths and components
newPaths, newComps = [], []
for i in range(repeatCount):
for thisPath in thisLayer.paths:
if thisPath.selected or not selectionCounts:
rotatedPath = thisPath.copy()
for j in range(i+1):
rotatedPath.applyTransform( RotationTransformMatrix )
newPaths.append(rotatedPath)
for thisComp in thisLayer.components:
if thisComp.selected or not selectionCounts:
rotatedComp = thisComp.copy()
for j in range(i+1):
rotatedComp.applyTransform( RotationTransformMatrix )
newComps.append(rotatedComp)
for newPath in newPaths:
thisLayer.paths.append(newPath)
for newComp in newComps:
thisLayer.components.append(newComp)
thisGlyph.endUndo()
except Exception as e:
import traceback
print(traceback.format_exc())
Rotator() |
import os
import sys
import subprocess
from rq import Queue
from redis import Redis
from builder_queue_lib import build
RQ_REDIS_HOST = os.environ.get('RQ_REDIS_HOST') or 'localhost'
RQ_REDIS_PORT = int(os.environ.get('RQ_REDIS_PORT') or '6379')
RQ_REDIS_DB = int(os.environ.get('RQ_REDIS_DB') or '5')
RQ_JOB_TIMEOUT_SECONDS = int(os.environ.get('RQ_JOB_TIMEOUT') or '240')
def main(arch, github_user, tag_name):
r = Redis(RQ_REDIS_HOST, RQ_REDIS_PORT, db=RQ_REDIS_DB)
q = Queue(connection=r)
try:
if arch == 'all':
print("Queueing all supported OS architectures")
out = subprocess.check_output(['./entrypoint.sh', '--list'])
for arch in (line.strip().decode() for line in out.splitlines() if line.strip()):
main(arch, github_user, tag_name)
else:
print("Queueing OS architecture (arch={} github_user={} tag_name={})".format(arch, github_user, tag_name))
q.enqueue(build, arch, github_user, tag_name, job_timeout=RQ_JOB_TIMEOUT_SECONDS)
finally:
r.close()
if __name__ == "__main__":
main(*sys.argv[1:])
|
import haiku as hk
import pytest
from jax import numpy as np
from jax import random
from numpyro import distributions as dist
from ramsey.attention import MultiHeadAttention
from ramsey.covariance_functions import exponentiated_quadratic
from ramsey.models import ANP, DANP, NP
# pylint: disable=too-many-locals,invalid-name,redefined-outer-name
@pytest.fixture()
def simple_data_set():
key = random.PRNGKey(0)
batch_size = 10
n, p = 50, 1
n_context = 20
key, sample_key = random.split(key, 2)
x = random.normal(key, shape=(n * p,)).reshape((n, p))
ys = []
for _ in range(batch_size):
key, sample_key1, sample_key2, sample_key3 = random.split(key, 4)
rho = dist.InverseGamma(5, 5).sample(sample_key1)
sigma = dist.InverseGamma(5, 5).sample(sample_key2)
K = exponentiated_quadratic(x, x, sigma, rho)
y = random.multivariate_normal(
sample_key3, mean=np.zeros(n), cov=K + np.diag(np.ones(n)) * 0.05
).reshape((1, n, 1))
ys.append(y)
x_target = np.tile(x, [batch_size, 1, 1])
y_target = np.vstack(np.array(ys))
key, sample_key = random.split(key, 2)
idxs_context = random.choice(
sample_key, np.arange(n), shape=(n_context,), replace=False
)
x_context = x_target[:, idxs_context, :]
y_context = y_target[:, idxs_context, :]
return x_context, y_context, x_target, y_target
def __lnp(**kwargs):
np = NP(
decoder=hk.nets.MLP([3, 2]),
latent_encoder=(hk.nets.MLP([3, 3]), hk.nets.MLP([3, 6])),
)
return np(**kwargs)
def __np(**kwargs):
np = NP(
decoder=hk.nets.MLP([3, 2]),
deterministic_encoder=hk.nets.MLP([4, 4]),
latent_encoder=(hk.nets.MLP([3, 3]), hk.nets.MLP([3, 6])),
)
return np(**kwargs)
def __anp(**kwargs):
np = ANP(
decoder=hk.nets.MLP([3, 2]),
deterministic_encoder=(
hk.nets.MLP([4, 4]),
MultiHeadAttention(8, 8, hk.nets.MLP([8, 8])),
),
latent_encoder=(hk.nets.MLP([3, 3]), hk.nets.MLP([3, 6])),
)
return np(**kwargs)
def __danp(**kwargs):
np = DANP(
decoder=hk.nets.MLP([3, 2]),
deterministic_encoder=(
hk.nets.MLP([4, 4]),
MultiHeadAttention(8, 8, hk.nets.MLP([8, 8])),
MultiHeadAttention(8, 8, hk.nets.MLP([8, 8])),
),
latent_encoder=(
hk.nets.MLP([3, 3]),
MultiHeadAttention(8, 8, hk.nets.MLP([8, 8])),
hk.nets.MLP([3, 6]),
),
)
return np(**kwargs)
@pytest.fixture(params=[__lnp, __np, __anp, __danp])
def module(request):
yield request.param
|
#!/usr/bin/python2
from generate import cancel_hail
import argparse
import uuid
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=argparse.FileType('a'), default='/data/vehicles')
parser.add_argument('VEHICLE_ID')
args = parser.parse_args()
vehicle_offline(args.file, args.VEHICLE_ID)
|
#!/usr/pkg/bin/python2.7
from __future__ import print_function
import sys
import py_compile
def main(argv):
if len(argv) != 2:
print('Usage: ./gpyc.py <file_to_compile.py>')
return
file = argv[1]
py_compile.compile(file)
if __name__ == '__main__':
main(sys.argv) |
import re
from babel import numbers
from .utils import AnyRE
class NumberRE(AnyRE):
def __init__(self, locale):
self._locale = locale
self._element_re = {
"int" : "([0-9]+)",
"sign" : "(" + "|".join((re.escape(x) for x in (numbers.get_plus_sign_symbol(locale), numbers.get_minus_sign_symbol(locale)))) + ")",
"frac" : "(" + re.escape(numbers.get_decimal_symbol(locale)) + "[0-9]+)",
"exp" : "(" + re.escape(numbers.get_exponential_symbol(locale)) + "(" + "|".join((re.escape(x) for x in (numbers.get_plus_sign_symbol(locale), numbers.get_minus_sign_symbol(locale)))) + ")?" + "[0-9]+)",
"hex" : "([0-9a-fA-F]+)",
"?": "?",
}
@property
def common_decimal_formats(self):
return [
"%int",
"%sign%?%int",
"%sign%?%int%?%frac",
"%sign%?%int%?%frac%exp%?",
]
@property
def common_hex_formats(self):
return [
"%hex",
"0x%hex",
] |
from django.http import HttpResponse
from .models import Note, GGITUser,NoteElement
from .serializers import NoteSerializer, NoteElementSerializer, UserSerializer
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from django.shortcuts import get_object_or_404, render
def index(request):
return HttpResponse('Hello Git')
@api_view(['GET', 'POST'])
@permission_classes([IsAuthenticated])
def note_list(request):
if request.method == 'GET':
list_of_notes = Note.objects.filter(user=request.user)
serializer_notes = NoteSerializer(list_of_notes, many=True)
return Response(serializer_notes.data)
elif request.method == "POST":
new_data = request.data
new_data["user"] = request.user.id
if not new_data.get('note_elements'):
new_data['note_elements'] = []
note_serializer = NoteSerializer(data=new_data)
if note_serializer.is_valid():
note_serializer.save()
return Response(note_serializer.data)
else:
return Response(note_serializer.errors)
@api_view(['POST'])
def username_is_unique(request):
if GGITUser.objects.filter(username=request.data['username']).exists():
return Response(status=400)
else:
return Response(status=200)
@api_view(["GET",'DELETE', 'PUT'])
@permission_classes([IsAuthenticated])
def note_details(request, note_id):
note = get_object_or_404(Note, pk = note_id)
serialized_note = NoteSerializer(note)
if request.method == "GET":
return Response(serialized_note.data)
elif request.method == 'PUT':
request_data=request.data
request_data["user"] = request.user.id
serialized_note = NoteSerializer(note, request_data)
if serialized_note.is_valid():
serialized_note.save()
return Response(serialized_note.data)
else:
return Response(serialized_note.errors)
else:
note.delete()
return Response(status= 200)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def users(request):
list_of_users = GGITUser.objects.all()
serializer_list_of_users = UserSerializer(list_of_users, many = True)
return Response(serializer_list_of_users.data)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def note_elements_list(request):
if request.method == 'GET':
list_of_elements = NoteElement.objects.all()
serializer_elements = NoteElementSerializer(list_of_elements, many=True)
return Response(serializer_elements.data)
@api_view(['GET','PUT'])
@permission_classes([IsAuthenticated])
def user_details(request, user_id):
user = get_object_or_404(GGITUser, pk=user_id)
if request.method == 'GET':
serialized_user = UserSerializer(user)
return Response(serialized_user.data)
elif request.method == 'PUT':
request_data = request.data
serialized_user = UserSerializer(user, request_data)
if serialized_user.is_valid():
serialized_user.save()
return Response(serialized_user.data)
else:
return Response(serialized_user.errors)
@api_view(['PUT'])
@permission_classes([IsAuthenticated])
def publish_note(request, note_id):
note = get_object_or_404(Note, pk=note_id)
note.is_published = True
note.save()
return Response(status=200)
@api_view(['PUT'])
@permission_classes([IsAuthenticated])
def done_note(request, note_id):
note = get_object_or_404(Note, pk=note_id)
note.is_done = True
note.save()
serialized_done_note = NoteSerializer(note)
return Response(serialized_done_note.data,status=200)
@api_view(['POST'])
def register(request):
user_data = request.data
password = user_data['password']
register_serialized = UserSerializer(data=user_data)
if register_serialized.is_valid():
user_instance = register_serialized.save()
user_instance.set_password(password)
user_instance.save()
return Response(register_serialized.data, status = 201)
else:
return Response(register_serialized.errors, status= 406)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def user_me(request):
user_data = request.user
serialized_user_me = UserSerializer(user_data)
return Response(serialized_user_me.data)
@api_view(['GET'])
def notes_public(request):
list_notes = Note.objects.filter(is_published=True)
serialized_list_notes = NoteSerializer(list_notes, many=True)
return Response(serialized_list_notes.data)
|
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from axians_netbox_pdu.choices import PDUUnitChoices
from axians_netbox_pdu.models import PDUConfig, PDUStatus
from dcim.models import Device, DeviceType
class PDUConfigSerializer(serializers.ModelSerializer):
"""Serializer for the PDUConfig model."""
def validate(self, data):
if DeviceType.objects.get(slug=data["device_type"]).poweroutlettemplates.count() == 0:
raise serializers.ValidationError({"device_type": "Device Type does not contain any Power Outlets."})
return data
device_type = serializers.SlugRelatedField(
many=False,
read_only=False,
queryset=DeviceType.objects.all(),
slug_field="slug",
required=True,
help_text="Netbox DeviceType 'slug' value",
validators=[UniqueValidator(PDUConfig.objects.all())],
)
power_usage_oid = serializers.CharField(required=True, help_text="OID string to collect power usage",)
power_usage_unit = serializers.ChoiceField(
choices=PDUUnitChoices.CHOICES, help_text="The unit of power to be collected",
)
class Meta:
model = PDUConfig
fields = [
"id",
"device_type",
"power_usage_oid",
"power_usage_unit",
]
class PDUStatusSerializer(serializers.ModelSerializer):
"""Serializer for the PSUStatus model."""
def validate(self, data):
if data["device"].poweroutlets.count() == 0:
raise serializers.ValidationError({"device": "Device does not contain any Power Outlets."})
return data
device = serializers.PrimaryKeyRelatedField(
many=False,
read_only=False,
queryset=Device.objects.all(),
validators=[UniqueValidator(queryset=PDUStatus.objects.all())],
required=True,
help_text="Netbox Device 'id' value",
)
power_usage = serializers.IntegerField(read_only=False, required=True, help_text="Power Usage Value")
class Meta:
model = PDUStatus
fields = ["id", "device", "power_usage"]
|
from __future__ import absolute_import
from asyncio import get_event_loop
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, Callable, Optional
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from sqlalchemy.engine.base import Engine
from sqlalchemy.ext.declarative.api import DeclarativeMeta
from asyncalchemy.global_executor import set_global_executor
from asyncalchemy.session_committer import SessionCommitter
# Callable doesn't support optional parameters (reuse_session in our case),
# therefore the "..." convention is used.
SessionFactoryType = Callable[..., SessionCommitter]
SQLITE_DRIVER_NAME = 'pysqlite'
def create_session_factory_from_engine(engine: Engine) -> SessionFactoryType:
"""
Create an SQLAlchemy session factory from an engine instance.
:param engine: An instance of an SQLAlchemy engine.
:returns: A function of (reuse_session=None) -> SessionCommitter
"""
session_maker = sessionmaker(bind=engine)
# SQLite workaround: SQLite objects cannot be shared between threads
if engine.driver == SQLITE_DRIVER_NAME:
print('WARNING: Sqlite backend detected. Using single threaded executor '
'- DO NOT USE IN PRODUCTION!')
executor = ThreadPoolExecutor(max_workers=1)
set_global_executor(global_executor=executor)
def factory(reuse_session: Optional[Session] = None) -> SessionCommitter:
"""
Create a session.
:param reuse_session: If set to an existing session, will reduce the SessionCommitter
to a noop and return that session instead. Useful for with statements that might
be within other with statements, but not necessarily.
:returns: A SessionCommitter
"""
return SessionCommitter(session_maker, reuse_session)
return factory
def create_session_factory(uri: str, base: Optional[DeclarativeMeta] = None,
**engine_kwargs: Dict[Any, Any]) -> SessionFactoryType:
"""
Create an SQLAlchemy session factory.
:param uri: The URI to the database to use
:param base: The declarative base class, if any
:param engine_kwargs: Keyword arguments to be passed to SQLAlchemy's create_engine
:returns: A function of (reuse_session=None) -> SessionCommitter
"""
engine = create_engine(uri, **engine_kwargs)
# Create tables.
if base is not None:
base.metadata.create_all(engine)
return create_session_factory_from_engine(engine)
|
"""
- Edited by b3kc4t to using stated references on structure file.
"""
from __future__ import division
from __future__ import print_function
from struct import pack, unpack, calcsize
from six import b, PY3
from pentestui.pentest_api.attacks.kerberos.attackstatus.statusasreproast import StructAsrror
class Structure(StructAsrror):
"""
Data Types Values And Process with Kerberos Authentication
"""
commonHdr = ()
structure = ()
debug = 0
def __init__(self, data = None, alignment = 0):
if not hasattr(self, 'alignment'):
self.alignment = alignment
self.fields = {}
self.rawData = data
if data is not None:
self.fromString(data)
else:
self.data = None
@classmethod
def fromFile(self, file):
answer = self()
answer.fromString(file.read(len(answer)))
return answer
def setAlignment(self, alignment):
self.alignment = alignment
def setData(self, data):
self.data = data
def packField(self, fieldName, format = None):
if self.debug:
print("packField( %s | %s )" % (fieldName, format))
if format is None:
format = self.formatForField(fieldName)
if fieldName in self.fields:
ans = self.pack(format, self.fields[fieldName], field = fieldName)
else:
ans = self.pack(format, None, field = fieldName)
if self.debug:
print("\tanswer %r" % ans)
return ans
def getData(self):
if self.data is not None:
return self.data
data = bytes()
for field in self.commonHdr+self.structure:
try:
data += self.packField(field[0], field[1])
except Exception as e:
if field[0] in self.fields:
e.args += ("When packing field '%s | %s | %r' in %s" % (field[0], field[1], self[field[0]], self.__class__),)
else:
e.args += ("When packing field '%s | %s' in %s" % (field[0], field[1], self.__class__),)
if self.alignment:
if len(data) % self.alignment:
data += (b'\x00'*self.alignment)[:-(len(data) % self.alignment)]
return data
def fromString(self, data):
self.rawData = data
for field in self.commonHdr+self.structure:
if self.debug:
print("fromString( %s | %s | %r )" % (field[0], field[1], data))
size = self.calcUnpackSize(field[1], data, field[0])
if self.debug:
print(" size = %d" % size)
dataClassOrCode = b
if len(field) > 2:
dataClassOrCode = field[2]
try:
self[field[0]] = self.unpack(field[1], data[:size], dataClassOrCode = dataClassOrCode, field = field[0])
except Exception as e:
e.args += ("When unpacking field '%s | %s | %r[:%d]'" % (field[0], field[1], data, size),)
size = self.calcPackSize(field[1], self[field[0]], field[0])
if self.alignment and size % self.alignment:
size += self.alignment - (size % self.alignment)
data = data[size:]
return self
def __setitem__(self, key, value):
self.fields[key] = value
self.data = None
def __getitem__(self, key):
return self.fields[key]
def __delitem__(self, key):
del self.fields[key]
def __str__(self):
return self.getData()
def __len__(self):
return len(self.getData())
def pack(self, format, data, field = None):
if self.debug:
print(" pack( %s | %r | %s)" % (format, data, field))
if field:
addressField = self.findAddressFieldFor(field)
if (addressField is not None) and (data is None):
return b''
# void specifier
if format[:1] == '_':
return b''
# quote specifier
if format[:1] == "'" or format[:1] == '"':
return b(format[1:])
# code specifier
two = format.split('=')
if len(two) >= 2:
try:
return self.pack(two[0], data)
except:
fields = {'self':self}
fields.update(self.fields)
return self.pack(two[0], eval(two[1], {}, fields))
# address specifier
two = format.split('&')
if len(two) == 2:
try:
return self.pack(two[0], data)
except:
if (two[1] in self.fields) and (self[two[1]] is not None):
return self.pack(two[0], id(self[two[1]]) & ((1<<(calcsize(two[0])*8))-1) )
else:
return self.pack(two[0], 0)
# length specifier
two = format.split('-')
if len(two) == 2:
try:
return self.pack(two[0],data)
except:
return self.pack(two[0], self.calcPackFieldSize(two[1]))
# array specifier
two = format.split('*')
if len(two) == 2:
answer = bytes()
for each in data:
answer += self.pack(two[1], each)
if two[0]:
if two[0].isdigit():
if int(two[0]) != len(data):
self.errortag()
else:
return self.pack(two[0], len(data))+answer
return answer
if format[:1] == '%':
return b(format % data)
# asciiz specifier
if format[:1] == 'z':
if isinstance(data,bytes):
return data + b('\0')
return bytes(b(data)+b('\0'))
# unicode specifier
if format[:1] == 'u':
return bytes(data+b('\0\0') + (len(data) & 1 and b('\0') or b''))
# DCE-RPC/NDR string specifier
if format[:1] == 'w':
if len(data) == 0:
data = b('\0\0')
elif len(data) % 2:
data = b(data) + b('\0')
l = pack('<L', len(data)//2)
return b''.join([l, l, b('\0\0\0\0'), data])
if data is None:
self.errortag()
# literal specifier
if format[:1] == ':':
if isinstance(data, Structure):
return data.getData()
elif hasattr(data, "getData"):
return data.getData()
elif isinstance(data, int):
return bytes(data)
elif isinstance(data, bytes) is not True:
return bytes(b(data))
else:
return data
if format[-1:] == 's':
if isinstance(data, bytes) or isinstance(data, bytearray):
return pack(format, data)
else:
return pack(format, b(data))
return pack(format, data)
def unpack(self, format, data, dataClassOrCode = b, field = None):
if self.debug:
print(" unpack( %s | %r )" % (format, data))
if field:
addressField = self.findAddressFieldFor(field)
if addressField is not None:
if not self[addressField]:
return
if format[:1] == '_':
if dataClassOrCode != b:
fields = {'self':self, 'inputDataLeft':data}
fields.update(self.fields)
return eval(dataClassOrCode, {}, fields)
else:
return None
# quote specifier
if format[:1] == "'" or format[:1] == '"':
answer = format[1:]
if b(answer) != data:
self.errortag()
return answer
# address specifier
two = format.split('&')
if len(two) == 2:
return self.unpack(two[0],data)
# code specifier
two = format.split('=')
if len(two) >= 2:
return self.unpack(two[0],data)
# length specifier
two = format.split('-')
if len(two) == 2:
return self.unpack(two[0],data)
# array specifier
two = format.split('*')
if len(two) == 2:
answer = []
sofar = 0
if two[0].isdigit():
number = int(two[0])
elif two[0]:
sofar += self.calcUnpackSize(two[0], data)
number = self.unpack(two[0], data[:sofar])
else:
number = -1
while number and sofar < len(data):
nsofar = sofar + self.calcUnpackSize(two[1],data[sofar:])
answer.append(self.unpack(two[1], data[sofar:nsofar], dataClassOrCode))
number -= 1
sofar = nsofar
return answer
if format[:1] == '%':
# format string like specifier
return format % data
# asciiz specifier
if format == 'z':
if data[-1:] != b('\x00'):
self.errortag()
if PY3:
return data[:-1].decode('latin-1')
else:
return data[:-1]
# unicode specifier
if format == 'u':
if data[-2:] != b('\x00\x00'):
self.errortag()
return data[:-2]
# DCE-RPC/NDR string specifier
if format == 'w':
l = unpack('<L', data[:4])[0]
return data[12:12+l*2]
# literal specifier
if format == ':':
if isinstance(data, bytes) and dataClassOrCode is b:
return data
return dataClassOrCode(data)
# struct like specifier
return unpack(format, data)[0]
def calcPackSize(self, format, data, field = None):
if field:
addressField = self.findAddressFieldFor(field)
if addressField is not None:
if not self[addressField]:
return 0
# void specifier
if format[:1] == '_':
return 0
# quote specifier
if format[:1] == "'" or format[:1] == '"':
return len(format)-1
# address specifier
two = format.split('&')
if len(two) == 2:
return self.calcPackSize(two[0], data)
# code specifier
two = format.split('=')
if len(two) >= 2:
return self.calcPackSize(two[0], data)
# length specifier
two = format.split('-')
if len(two) == 2:
return self.calcPackSize(two[0], data)
# array specifier
two = format.split('*')
if len(two) == 2:
answer = 0
if two[0].isdigit():
if int(two[0]) != len(data):
self.errortag()
elif two[0]:
answer += self.calcPackSize(two[0], len(data))
for each in data:
answer += self.calcPackSize(two[1], each)
return answer
if format[:1] == '%':
return len(format % data)
# asciiz specifier
if format[:1] == 'z':
return len(data)+1
# asciiz specifier
if format[:1] == 'u':
l = len(data)
return l + (l & 1 and 3 or 2)
# DCE-RPC/NDR string specifier
if format[:1] == 'w':
l = len(data)
return 12+l+l % 2
# literal specifier
if format[:1] == ':':
return len(data)
# struct like specifier
return calcsize(format)
def calcUnpackSize(self, format, data, field = None):
if self.debug:
print(" calcUnpackSize( %s | %s | %r)" % (field, format, data))
# void specifier
if format[:1] == '_':
return 0
addressField = self.findAddressFieldFor(field)
if addressField is not None:
if not self[addressField]:
return 0
try:
lengthField = self.findLengthFieldFor(field)
return int(self[lengthField])
except Exception:
pass
# quote specifier
if format[:1] == "'" or format[:1] == '"':
return len(format)-1
# address specifier
two = format.split('&')
if len(two) == 2:
return self.calcUnpackSize(two[0], data)
# code specifier
two = format.split('=')
if len(two) >= 2:
return self.calcUnpackSize(two[0], data)
# length specifier
two = format.split('-')
if len(two) == 2:
return self.calcUnpackSize(two[0], data)
# array specifier
two = format.split('*')
if len(two) == 2:
answer = 0
if two[0]:
if two[0].isdigit():
number = int(two[0])
else:
answer += self.calcUnpackSize(two[0], data)
number = self.unpack(two[0], data[:answer])
while number:
number -= 1
answer += self.calcUnpackSize(two[1], data[answer:])
else:
while answer < len(data):
answer += self.calcUnpackSize(two[1], data[answer:])
return answer
if format[:1] == '%':
self.errortag()
# asciiz specifier
if format[:1] == 'z':
return data.index(b('\x00'))+1
# asciiz specifier
if format[:1] == 'u':
l = data.index(b('\x00\x00'))
return l + (l & 1 and 3 or 2)
# DCE-RPC/NDR string specifier
if format[:1] == 'w':
l = unpack('<L', data[:4])[0]
return 12+l*2
# literal specifier
if format[:1] == ':':
return len(data)
# struct like specifier
return calcsize(format)
def calcPackFieldSize(self, fieldName, format = None):
if format is None:
format = self.formatForField(fieldName)
return self.calcPackSize(format, self[fieldName])
def formatForField(self, fieldName):
for field in self.commonHdr+self.structure:
if field[0] == fieldName:
return field[1]
self.errortag()
def findAddressFieldFor(self, fieldName):
descriptor = '&%s' % fieldName
l = len(descriptor)
for field in self.commonHdr+self.structure:
if field[1][-l:] == descriptor:
return field[0]
return None
def findLengthFieldFor(self, fieldName):
descriptor = '-%s' % fieldName
l = len(descriptor)
for field in self.commonHdr+self.structure:
if field[1][-l:] == descriptor:
return field[0]
return None
def zeroValue(self, format):
two = format.split('*')
if len(two) == 2:
if two[0].isdigit():
return (self.zeroValue(two[1]),)*int(two[0])
if not format.find('*') == -1:
return ()
if 's' in format:
return b''
if format in ['z',':','u']:
return b''
if format == 'w':
return b('\x00\x00')
return 0
def clear(self):
for field in self.commonHdr + self.structure:
self[field[0]] = self.zeroValue(field[1])
def dump(self, msg = None, indent = 0):
if msg is None:
msg = self.__class__.__name__
ind = ' '*indent
print("\n%s" % msg)
fixedFields = []
for field in self.commonHdr+self.structure:
i = field[0]
if i in self.fields:
fixedFields.append(i)
if isinstance(self[i], Structure):
self[i].dump('%s%s:{' % (ind,i), indent = indent + 4)
print("%s}" % ind)
else:
print("%s%s: {%r}" % (ind,i,self[i]))
# Do we have remaining fields not defined in the structures? let's
# print them
remainingFields = list(set(self.fields) - set(fixedFields))
for i in remainingFields:
if isinstance(self[i], Structure):
self[i].dump('%s%s:{' % (ind,i), indent = indent + 4)
print("%s}" % ind)
else:
print("%s%s: {%r}" % (ind,i,self[i]))
def pretty_print(x):
if chr(x) in '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ ':
return chr(x)
else:
return u'.'
def hexdump(data, indent = ''):
if data is None:
return
if isinstance(data, int):
data = str(data).encode('utf-8')
x=bytearray(data)
strLen = len(x)
i = 0
while i < strLen:
line = " %s%04x " % (indent, i)
for j in range(16):
if i+j < strLen:
line += "%02X " % x[i+j]
else:
line += u" "
if j%16 == 7:
line += " "
line += " "
line += ''.join(pretty_print(x) for x in x[i:i+16] )
print (line)
i += 16
def parse_bitmask(dict, value):
ret = ''
for i in range(0, 31):
flag = 1 << i
if value & flag == 0:
continue
if flag in dict:
ret += '%s | ' % dict[flag]
else:
ret += "0x%.8X | " % flag
if len(ret) == 0:
return '0'
else:
return ret[:-3]
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import os
import sys
import pytest
import pandas
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import shutil
assert (
"modin.utils" not in sys.modules
), "Do not import modin.utils before patching, or tests could fail"
# every import under this assert has to be postfixed with 'noqa: E402'
# as flake8 complains about that... but we _have_ to make sure we
# monkey-patch at the right spot, otherwise testing doc URLs might
# not catch all of them
import modin.utils # noqa: E402
_generated_doc_urls = set()
def _saving_make_api_url(token, _make_api_url=modin.utils._make_api_url):
url = _make_api_url(token)
_generated_doc_urls.add(url)
return url
modin.utils._make_api_url = _saving_make_api_url
import modin # noqa: E402
import modin.config # noqa: E402
from modin.config import IsExperimental, TestRayClient # noqa: E402
from modin.backends import PandasQueryCompiler, BaseQueryCompiler # noqa: E402
from modin.engines.python.pandas_on_python.io import PandasOnPythonIO # noqa: E402
from modin.data_management.factories import factories # noqa: E402
from modin.utils import get_current_backend # noqa: E402
from modin.pandas.test.utils import ( # noqa: E402
_make_csv_file,
get_unique_filename,
teardown_test_files,
NROWS,
IO_OPS_DATA_DIR,
)
# create test data dir if it is not exists yet
if not os.path.exists(IO_OPS_DATA_DIR):
os.mkdir(IO_OPS_DATA_DIR)
def pytest_addoption(parser):
parser.addoption(
"--simulate-cloud",
action="store",
default="off",
help="simulate cloud for testing: off|normal|experimental",
)
parser.addoption(
"--backend",
action="store",
default=None,
help="specifies backend to run tests on",
)
parser.addoption(
"--extra-test-parameters",
action="store_true",
help="activate extra test parameter combinations",
)
class Patcher:
def __init__(self, conn, *pairs):
self.pairs = pairs
self.originals = None
self.conn = conn
def __wrap(self, func):
def wrapper(*a, **kw):
return func(
*(tuple(self.conn.obtain(x) for x in a)),
**({k: self.conn.obtain(v) for k, v in kw.items()}),
)
return func, wrapper
def __enter__(self):
self.originals = []
for module, attrname in self.pairs:
orig, wrapped = self.__wrap(getattr(module, attrname))
self.originals.append((module, attrname, orig))
setattr(module, attrname, wrapped)
return self
def __exit__(self, *a, **kw):
for module, attrname, orig in self.originals:
setattr(module, attrname, orig)
def set_experimental_env(mode):
from modin.config import IsExperimental
IsExperimental.put(mode == "experimental")
@pytest.fixture(scope="session", autouse=True)
def simulate_cloud(request):
mode = request.config.getoption("--simulate-cloud").lower()
if mode == "off":
yield
return
if mode not in ("normal", "experimental"):
raise ValueError(f"Unsupported --simulate-cloud mode: {mode}")
assert IsExperimental.get(), "Simulated cloud must be started in experimental mode"
from modin.experimental.cloud import create_cluster, get_connection
import pandas._testing
import pandas._libs.testing as cyx_testing
with create_cluster("local", cluster_type="local"):
get_connection().teleport(set_experimental_env)(mode)
with Patcher(
get_connection(),
(pandas._testing, "assert_class_equal"),
(pandas._testing, "assert_series_equal"),
(cyx_testing, "assert_almost_equal"),
):
yield
@pytest.fixture(scope="session", autouse=True)
def enforce_config():
"""
A fixture that ensures that all checks for MODIN_* variables
are done using modin.config to prevent leakage
"""
orig_env = os.environ
modin_start = os.path.dirname(modin.__file__)
modin_exclude = [os.path.dirname(modin.config.__file__)]
class PatchedEnv:
@staticmethod
def __check_var(name):
if name.upper().startswith("MODIN_"):
frame = sys._getframe()
try:
# get the path to module where caller of caller is defined;
# caller of this function is inside PatchedEnv, and we're
# interested in whomever called a method on PatchedEnv
caller_file = frame.f_back.f_back.f_code.co_filename
finally:
del frame
pkg_name = os.path.dirname(caller_file)
if pkg_name.startswith(modin_start):
assert any(
pkg_name.startswith(excl) for excl in modin_exclude
), "Do not access MODIN_ environment variable bypassing modin.config"
def __getitem__(self, name):
self.__check_var(name)
return orig_env[name]
def __setitem__(self, name, value):
self.__check_var(name)
orig_env[name] = value
def __delitem__(self, name):
self.__check_var(name)
del orig_env[name]
def pop(self, name, default=object()):
self.__check_var(name)
return orig_env.pop(name, default)
def get(self, name, default=None):
self.__check_var(name)
return orig_env.get(name, default)
def __contains__(self, name):
self.__check_var(name)
return name in orig_env
def __getattr__(self, name):
return getattr(orig_env, name)
def __iter__(self):
return iter(orig_env)
os.environ = PatchedEnv()
yield
os.environ = orig_env
BASE_BACKEND_NAME = "BaseOnPython"
class TestQC(BaseQueryCompiler):
def __init__(self, modin_frame):
self._modin_frame = modin_frame
def finalize(self):
self._modin_frame.finalize()
@classmethod
def from_pandas(cls, df, data_cls):
return cls(data_cls.from_pandas(df))
@classmethod
def from_arrow(cls, at, data_cls):
return cls(data_cls.from_arrow(at))
def free(self):
pass
to_pandas = PandasQueryCompiler.to_pandas
default_to_pandas = PandasQueryCompiler.default_to_pandas
class BaseOnPythonIO(PandasOnPythonIO):
query_compiler_cls = TestQC
class BaseOnPythonFactory(factories.BaseFactory):
@classmethod
def prepare(cls):
cls.io_cls = BaseOnPythonIO
def set_base_backend(name=BASE_BACKEND_NAME):
setattr(factories, f"{name}Factory", BaseOnPythonFactory)
modin.set_backends(engine="python", partition=name.split("On")[0])
def pytest_configure(config):
if config.option.extra_test_parameters is not None:
import modin.pandas.test.utils as utils
utils.extra_test_parameters = config.option.extra_test_parameters
backend = config.option.backend
if backend is None:
return
if backend == BASE_BACKEND_NAME:
set_base_backend(BASE_BACKEND_NAME)
else:
partition, engine = backend.split("On")
modin.set_backends(engine=engine, partition=partition)
def pytest_runtest_call(item):
custom_markers = ["xfail", "skip"]
# dynamicly adding custom markers to tests
for custom_marker in custom_markers:
for marker in item.iter_markers(name=f"{custom_marker}_backends"):
backends = marker.args[0]
if not isinstance(backends, list):
backends = [backends]
current_backend = get_current_backend()
reason = marker.kwargs.pop("reason", "")
item.add_marker(
getattr(pytest.mark, custom_marker)(
condition=current_backend in backends,
reason=f"Backend {current_backend} does not pass this test. {reason}",
**marker.kwargs,
)
)
@pytest.fixture(scope="class")
def TestReadCSVFixture():
filenames = []
files_ids = [
"test_read_csv_regular",
"test_read_csv_blank_lines",
"test_read_csv_yes_no",
"test_read_csv_nans",
"test_read_csv_bad_lines",
]
# each xdist worker spawned in separate process with separate namespace and dataset
pytest.csvs_names = {file_id: get_unique_filename() for file_id in files_ids}
# test_read_csv_col_handling, test_read_csv_parsing
_make_csv_file(filenames)(
filename=pytest.csvs_names["test_read_csv_regular"],
)
# test_read_csv_parsing
_make_csv_file(filenames)(
filename=pytest.csvs_names["test_read_csv_yes_no"],
additional_col_values=["Yes", "true", "No", "false"],
)
# test_read_csv_col_handling
_make_csv_file(filenames)(
filename=pytest.csvs_names["test_read_csv_blank_lines"],
add_blank_lines=True,
)
# test_read_csv_nans_handling
_make_csv_file(filenames)(
filename=pytest.csvs_names["test_read_csv_nans"],
add_blank_lines=True,
additional_col_values=["<NA>", "N/A", "NA", "NULL", "custom_nan", "73"],
)
# test_read_csv_error_handling
_make_csv_file(filenames)(
filename=pytest.csvs_names["test_read_csv_bad_lines"],
add_bad_lines=True,
)
yield
# Delete csv files that were created
teardown_test_files(filenames)
@pytest.fixture
def make_csv_file():
"""Pytest fixture factory that makes temp csv files for testing.
Yields:
Function that generates csv files
"""
filenames = []
yield _make_csv_file(filenames)
# Delete csv files that were created
teardown_test_files(filenames)
@pytest.fixture
def make_parquet_file():
"""Pytest fixture factory that makes a parquet file/dir for testing.
Yields:
Function that generates a parquet file/dir
"""
filenames = []
def _make_parquet_file(
filename,
row_size=NROWS,
force=True,
directory=False,
partitioned_columns=[],
):
"""Helper function to generate parquet files/directories.
Args:
filename: The name of test file, that should be created.
row_size: Number of rows for the dataframe.
force: Create a new file/directory even if one already exists.
directory: Create a partitioned directory using pyarrow.
partitioned_columns: Create a partitioned directory using pandas.
Will be ignored if directory=True.
"""
df = pandas.DataFrame(
{"col1": np.arange(row_size), "col2": np.arange(row_size)}
)
if os.path.exists(filename) and not force:
pass
elif directory:
if os.path.exists(filename):
shutil.rmtree(filename)
else:
os.mkdir(filename)
table = pa.Table.from_pandas(df)
pq.write_to_dataset(table, root_path=filename)
elif len(partitioned_columns) > 0:
df.to_parquet(filename, partition_cols=partitioned_columns)
else:
df.to_parquet(filename)
filenames.append(filename)
# Return function that generates csv files
yield _make_parquet_file
# Delete parquet file that was created
for path in filenames:
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
@pytest.fixture
def make_sql_connection():
"""Sets up sql connections and takes them down after the caller is done.
Yields:
Factory that generates sql connection objects
"""
filenames = []
def _sql_connection(filename, table=""):
# Remove file if exists
if os.path.exists(filename):
os.remove(filename)
filenames.append(filename)
# Create connection and, if needed, table
conn = "sqlite:///{}".format(filename)
if table:
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3, 4, 5, 6],
"col2": [7, 8, 9, 10, 11, 12, 13],
"col3": [14, 15, 16, 17, 18, 19, 20],
"col4": [21, 22, 23, 24, 25, 26, 27],
"col5": [0, 0, 0, 0, 0, 0, 0],
}
)
df.to_sql(table, conn)
return conn
yield _sql_connection
# Teardown the fixture
teardown_test_files(filenames)
@pytest.fixture(scope="class")
def TestReadGlobCSVFixture():
filenames = []
base_name = get_unique_filename(extension="")
pytest.glob_path = "{}_*.csv".format(base_name)
pytest.files = ["{}_{}.csv".format(base_name, i) for i in range(11)]
for fname in pytest.files:
# Glob does not guarantee ordering so we have to remove the randomness in the generated csvs.
_make_csv_file(filenames)(fname, row_size=11, remove_randomness=True)
yield
teardown_test_files(filenames)
@pytest.fixture
def get_generated_doc_urls():
return lambda: _generated_doc_urls
ray_client_server = None
def pytest_sessionstart(session):
if TestRayClient.get():
import ray
import ray.util.client.server.server as ray_server
addr = "localhost:50051"
global ray_client_server
ray_client_server = ray_server.serve(addr)
ray.util.connect(addr)
def pytest_sessionfinish(session, exitstatus):
if TestRayClient.get():
import ray
ray.util.disconnect()
if ray_client_server:
ray_client_server.stop(0)
|
from multiprocessing import Pool
import shutil
from django.core.management import BaseCommand
from django.conf import settings
from tqdm import tqdm
from data.models import Officer
from xlsx.utils import export_officer_xlsx
from shared.aws import aws
def upload_xlsx_files(officer):
tmp_dir = f'tmp/{officer.id}'
file_names = export_officer_xlsx(officer, tmp_dir)
for file_name in file_names:
aws.s3.upload_file(
f'{tmp_dir}/{file_name}',
settings.S3_BUCKET_OFFICER_CONTENT,
f'{settings.S3_BUCKET_XLSX_DIRECTORY}/{officer.id}/{file_name}'
)
shutil.rmtree(tmp_dir, ignore_errors=True)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('officer_ids', nargs='*')
def handle(self, officer_ids, *args, **kwargs):
if officer_ids:
officers = Officer.objects.filter(id__in=officer_ids)
else:
officers = Officer.objects.all()
with Pool(20) as p:
list(tqdm(p.imap(upload_xlsx_files, officers), desc='Uploading officer xlsx', total=officers.count()))
|
# Copyright (c) 2017-2020 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
from .default_listener import DefaultListener
from .dispatching_listener import DispatchingListener
from .generator import depthcontrol, Generator
from .serializer import simple_space_serializer
from .tree import BaseRule, Tree, UnlexerRule, UnparserRule
|
import logging
from aspire.source import ImageSource
logger = logging.getLogger(__name__)
class OrientEstSource(ImageSource):
"""
Derived an ImageSource class for updating orientation information
"""
def __init__(self, src, orient_method):
"""
Initialize an Orientation ImageSource object from original ImageSource
:param src: Original ImageSource object after 2D classification
:param orient_method: object specifying orientation estimation method
"""
super().__init__(src.L, src.n, dtype=src.dtype, metadata=src._metadata.copy())
self._im = None
self.orient_method = orient_method
self.rots = orient_method.rotations
|
soma = 0
quant = 0
for n in range(3, 501, 6):
soma += n
quant += 1
print(f'A soma dos {quant} números ímpares multiplos de 3 é {soma}')
|
from kivymd.app import MDApp
from kivymd.uix.bottomnavigation import MDBottomNavigationItem
from kivy.uix.recycleview import RecycleView
from kivy.clock import Clock
from kivymd.uix.button import MDRectangleFlatButton
from kivy.properties import StringProperty
from Mdialog import InfoDialog
from decor import db_dialog
import re
texts_in = {
'in_error': 'Niepoprawny format kosztów. Wprowadź koszt zgodnie z wzorcem: np: dziesięć złotych i pięćdziesiat '
'groszy to: 10.50'
}
cost_input = []
class FirstScreen(MDBottomNavigationItem):
text_in = StringProperty()
def _input(self):
if self.text_in:
global cost_input
cost_input.append(self.text_in)
class CatProView(RecycleView):
""" RV for Categories """
def __init__(self, **kwargs):
super(CatProView, self).__init__(**kwargs)
Clock.schedule_once(self.populate_view)
def populate_view(self, *args):
store = MDApp.get_running_app().store
if store['category']['cat'] or store['project']['pro']:
# join lists
new_store = [*store['category']['cat'], *store['project']['pro']]
try:
self.data = [{'text': str(i)} for i in new_store]
except Exception as e:
pass #todo: v.2 - logging module with e, emialclient, internet permission
else:
self.data = []
return self.data
class CatProButton(MDRectangleFlatButton):
""" add new cost in to db """
def add_to_db(self):
global cost_input
if cost_input:
cost = cost_input[-1]
cost = self.validate(cost)
if cost is not None and cost != 0.0:
cost, item = str(cost), self.text
store = MDApp.get_running_app().store
if item in store['category']['cat']:
self.ins_cost(cost, None, item)
elif item in store['project']['pro']:
self.ins_cost(cost, item, None)
else:
InfoDialog(text=f'{texts_in["in_error"]}').dialog_()
else:
InfoDialog(text=f'{texts_in["in_error"]}').dialog_()
cost_input[:] = []
def validate(self, cost):
re_obj = re.compile(r'(^[0-9]+(\.|\,)?((\d)?\d)?$)') # (^[0-9]+(\.| \,)(?:\d\d?)$)
match_obj = re_obj.match(cost)
if match_obj is not None:
if ',' in cost:
cost = float(cost.replace(',', '.'))
return float(cost)
@db_dialog
def ins_cost(self, *args):
app = MDApp.get_running_app().db
return app.insert_cost(*args)
|
def front_times(str, n):
return str[:3] * n |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from common.api.base import DataAPI, DataDRFAPISet, DRFActionAPI
from conf import dataapi_settings
META_API_URL = "http://{host}:{port}/v3/meta/".format(
host=getattr(dataapi_settings, "META_API_HOST"), port=getattr(dataapi_settings, "META_API_PORT")
)
class _MetaApi:
def __init__(self):
self.result_tables = DataDRFAPISet(
url=META_API_URL + "result_tables/",
primary_key="result_table_id",
module="meta",
description="获取结果表元信息",
custom_config={
"storages": DRFActionAPI(method="get", detail=True),
"fields": DRFActionAPI(method="get", detail=True)
# detail为True说明接口是针对某个子资源,即为detail_route生成的接口,在调用的时候需要传入primary_key对应的参数
# 反之则为list_route生成的接口,不需要传入primary_key
},
default_timeout=60, # 设置默认超时时间
)
self.sub_instances = DataDRFAPISet(
url="http://127.0.0.1:8000/v3/demo/instances/{instance_id}/sub_instances/",
primary_key="sub_instance_id",
url_keys=["instance_id"],
module="demo",
description="二级资源DEMO",
)
# 非rest风格的API
self.get_result_table = DataAPI(
url=META_API_URL + "result_tables/{result_table_id}/",
method="GET",
module="demo",
url_keys=["result_table_id"],
description="获取结果表元信息",
)
# 非标准格式的结果返回
self.nonstandard_api = DataAPI(
url="http://i_am_a_nonstandard_api.com/aaa/bb", method="GET", module="demo", description="xx"
)
MetaApi = _MetaApi()
|
import os
import glob
import lsst.afw.image as afwImage
from lsst.afw.cameraGeom import utils as cgu
import lsst.obs.lsst as obs_lsst
import lsst_camera.cp_pipe_drivers as cpd
camera = obs_lsst.LsstCamMapper().camera
det_names = ['R22_S00', 'R22_S01', 'R22_S02',
'R22_S10', 'R22_S11', 'R22_S12',
'R22_S20', 'R22_S21', 'R22_S22']
image_source = cpd.CalibImageSource('CALIB/bias/2019-10-18')
bias_image = cgu.makeImageFromCamera(camera, detectorNameList=det_names,
imageSource=image_source,
imageFactory=afwImage.ImageF)
bias_image.writeFits('BOT_R22_6813D_bias_mosaic.fits')
image_source = CalibImageSource('CALIB/dark/2019-10-18')
#image_source = cpd.CalibImageSource(
# 'calib_products/rerun/dark_calibs_nobias/dark/2019-10-18')
dark_image = cgu.makeImageFromCamera(camera, detectorNameList=det_names,
imageSource=image_source,
imageFactory=afwImage.ImageF)
dark_image.writeFits('BOT_R22_6813D_dark_mosaic.fits')
#dark_image.writeFits('BOT_R22_6813D_dark_mosaic_nobias.fits')
|
sandboxHost = 'https://api.sandbox.namecheap.com/xml.response'
sandboxAuthentication = 'ApiUser=aUserName&ApiKey=apiKeyString&UserName=aUserName'
realHost = 'https://api.namecheap.com/xml.response'
realAuthentication = 'ApiUser=aUserName&ApiKey=apiKeyString&UserName=aUserName'
domainInfo = 'SLD=planet-ignite&TLD=net'
clientIP='clientIP=171.67.92.194'
autoDomainName = '.planet-ignite.net'
|
#!/usr/bin/python3
""" """
from tests.test_models.test_base_model import test_basemodel
from models.state import State
import os
class test_state(test_basemodel):
""" states test class"""
def __init__(self, *args, **kwargs):
""" state test class init"""
super().__init__(*args, **kwargs)
self.name = "State"
self.value = State
def test_name3(self):
""" testing state name attr"""
new = self.value()
self.assertEqual(type(new.name), str if
os.getenv('HBNB_TYPE_STORAGE') != 'db' else
type(None))
|
"""The Edge Histogram kernel as defined in :cite:`sugiyama2015halting`."""
from warnings import warn
from collections import Counter
from collections import Iterable
from grakel.graph import Graph
from numpy import zeros
from scipy.sparse import csr_matrix
from six import iteritems
from six import itervalues
from .vertex_histogram import VertexHistogram
class EdgeHistogram(VertexHistogram):
"""Edge Histogram kernel as found in :cite:`sugiyama2015halting`.
Parameters
----------
sparse : bool, or 'auto', default='auto'
Defines if the data will be stored in a sparse format.
Sparse format is slower, but less memory consuming and in some cases the only solution.
If 'auto', uses a sparse matrix when the number of zeros is more than the half of the matrix size.
In all cases if the dense matrix doesn't fit system memory, I sparse approach will be tried.
Attributes
----------
None.
"""
def parse_input(self, X):
"""Parse and check the given input for EH kernel.
Parameters
----------
X : iterable
For the input to pass the test, we must have:
Each element must be an iterable with at most three features and at
least one. The first that is obligatory is a valid graph structure
(adjacency matrix or edge_dictionary) while the second is
node_labels and the third edge_labels (that fitting the given graph
format).
Returns
-------
out : np.array, shape=(len(X), n_labels)
A np array for frequency (cols) histograms for all Graphs (rows).
"""
if not isinstance(X, Iterable):
raise TypeError('input must be an iterable\n')
else:
rows, cols, data = list(), list(), list()
if self._method_calling in [1, 2]:
labels = dict()
self._labels = labels
elif self._method_calling == 3:
labels = dict(self._labels)
ni = 0
for (i, x) in enumerate(iter(X)):
is_iter = isinstance(x, Iterable)
if is_iter:
x = list(x)
if is_iter and len(x) in [0, 3]:
if len(x) == 0:
warn('Ignoring empty element on index: '+str(i))
continue
else:
# Our element is an iterable of at least 2 elements
L = x[2]
elif type(x) is Graph:
# get labels in any existing format
L = x.get_labels(purpose="any", label_type="edge")
else:
raise TypeError('each element of X must be either a ' +
'graph object or a list with at least ' +
'a graph like object and node labels ' +
'dict \n')
if L is None:
raise ValueError("Invalid graph entry at location " + str(i) + "!")
# construct the data input for the numpy array
for (label, frequency) in iteritems(Counter(itervalues(L))):
# for the row that corresponds to that graph
rows.append(ni)
# and to the value that this label is indexed
col_idx = labels.get(label, None)
if col_idx is None:
# if not indexed, add the new index (the next)
col_idx = len(labels)
labels[label] = col_idx
# designate the certain column information
cols.append(col_idx)
# as well as the frequency value to data
data.append(frequency)
ni += 1
# Initialise the feature matrix
if self._method_calling in [1, 2]:
if self.sparse == 'auto':
self.sparse_ = (len(cols)/float(ni * len(labels)) <= 0.5)
else:
self.sparse_ = bool(self.sparse)
if self.sparse_:
features = csr_matrix((data, (rows, cols)), shape=(ni, len(labels)), copy=False)
else:
# Initialise the feature matrix
try:
features = zeros(shape=(ni, len(labels)))
features[rows, cols] = data
except MemoryError:
warn('memory-error: switching to sparse')
self.sparse_, features = True, csr_matrix((data, (rows, cols)), shape=(ni, len(labels)), copy=False)
if ni == 0:
raise ValueError('parsed input is empty')
return features
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
from xml.etree import ElementTree
from fingerprinting.common import utils
from fingerprinting.application import Application
from fingerprinting.common.tshark import FileCapture
from processpcaps.analyze import process_single_capture
if __name__ == "__main__":
if len(sys.argv) == 2 and os.path.isdir(sys.argv[1]):
utils.walk_directory(sys.argv[1], process_single_capture)
elif len(sys.argv) == 2 and utils.validate_extension(sys.argv[1], "pcap"):
process_single_capture(os.path.abspath(sys.argv[1]))
else:
print("usage: %s [<pcap_directory_or_file>]" % sys.argv[0])
|
# Ex080
values = []
for v in range(0, 5):
number = int(input('Digite um valor: '))
if v == 0:
values.append(number)
print(f'Valor {number} adicionado ao fim da lista')
else:
for p, c in enumerate(values):
if number >= c:
if c == values[-1]:
values.append(number)
print(f'Valor {number} adicionado ao fim da lista')
break
elif number <= values[p + 1]:
values.insert(p + 1, number)
print(f'Valor {number} adicionado na posição {p + 1}')
break
else:
values.insert(0, number)
print(f'Valor {number} adicionado ao início da lista')
break
print('-' * 45)
print(f'Os valores digitados foram: \033[32m{values}\033[m')
|
import os
import json
from .contracts import RevertError
from .wrappers import (
ETHCall, AddressBook, MAXUINT256, ETHWrapper, SKIP_PROXY, register_provider, BaseProvider
)
from environs import Env
from eth_account.account import Account, LocalAccount
from eth_account.signers.base import BaseAccount
from web3.exceptions import ExtraDataLengthError
from web3.middleware import geth_poa_middleware
from eth_event import get_topic_map, decode_logs
env = Env()
CONTRACT_JSON_PATH = env.list("CONTRACT_JSON_PATH", [], delimiter=":")
W3_TRANSACT_MODE = env.str("W3_TRANSACT_MODE", "transact")
W3_ADDRESS_BOOK_PREFIX = env.str("W3_ADDRESS_BOOK_PREFIX", "W3_ADDR_")
W3_ADDRESS_BOOK_CREATE_UNKNOWN = env.str("W3_ADDRESS_BOOK_CREATE_UNKNOWN", "")
class W3TimeControl:
def __init__(self, w3):
self.w3 = w3
def fast_forward(self, secs):
self.w3.provider.make_request("evm_increaseTime", [secs])
# Not tested!
@property
def now(self):
return self.w3.get_block("latest").timestamp
def register_w3_provider(provider_key="w3", tester=None, provider_kwargs={}):
if tester is None:
try:
import eth_tester # noqa
except ImportError:
tester = False
if tester:
from web3 import Web3
w3 = Web3(Web3.EthereumTesterProvider())
else:
from web3.auto import w3
assert w3.isConnected()
try:
w3.eth.get_block("latest")
except ExtraDataLengthError:
w3.middleware_onion.inject(geth_poa_middleware, layer=0)
# If address_book not provided and there are envs with W3_ADDRESS_BOOK_PREFIX,
# use W3EnvAddressBook
if "address_book" not in provider_kwargs and not tester:
if [k for k in os.environ.keys() if k.startswith(W3_ADDRESS_BOOK_PREFIX)]:
provider_kwargs["address_book"] = W3EnvAddressBook(
w3, create_unknown_name=W3_ADDRESS_BOOK_CREATE_UNKNOWN
)
provider = W3Provider(w3, **provider_kwargs)
register_provider(provider_key, provider)
return provider
def transact(provider, function, tx_kwargs):
if W3_TRANSACT_MODE == "transact":
# uses eth_sendTransaction
tx_hash = function.transact({**provider.tx_kwargs, **tx_kwargs})
elif W3_TRANSACT_MODE == "sign-and-send":
tx_kwargs = {**provider.tx_kwargs, **tx_kwargs}
from_ = tx_kwargs.pop("from")
if isinstance(from_, BaseAccount):
tx_kwargs["from"] = from_.address
else: # it's a string, I try to get the PK from the environment
from_ = provider.address_book.get_signer_account(from_)
tx_kwargs["from"] = from_.address
tx = function.buildTransaction(
{**tx_kwargs, **{"nonce": provider.w3.eth.get_transaction_count(from_.address)}}
)
signed_tx = from_.sign_transaction(tx)
tx_hash = provider.w3.eth.send_raw_transaction(signed_tx.rawTransaction)
elif W3_TRANSACT_MODE == "defender-async":
from .defender_relay import send_transaction
tx_kwargs = {**provider.tx_kwargs, **tx_kwargs}
tx = function.buildTransaction(tx_kwargs)
return send_transaction(tx)
return provider.w3.eth.wait_for_transaction_receipt(tx_hash)
class W3AddressBook(AddressBook):
def __init__(self, w3, eth_accounts=None):
self.w3 = w3
self._eth_accounts = eth_accounts
self.name_to_address = {}
self.last_account_used = -1
@property
def eth_accounts(self):
if self._eth_accounts is None:
self._eth_accounts = self.w3.eth.accounts
return self._eth_accounts
def get_account(self, name):
if isinstance(name, (Account, LocalAccount)):
return name
if name is None:
return self.ZERO
if type(name) == str and name.startswith("0x"):
return name
if name not in self.name_to_address:
self.last_account_used += 1
try:
self.name_to_address[name] = self.eth_accounts[self.last_account_used]
except IndexError:
self.name_to_address[name] = self.w3.eth.account.create().address
return self.name_to_address[name]
def get_name(self, account_or_address):
if isinstance(account_or_address, (LocalAccount, )):
account_or_address = account_or_address.address
for name, addr in self.name_to_address.items():
if addr == account_or_address:
return name
return None
class W3EnvAddressBook(AddressBook):
def __init__(self, w3, env_prefix=W3_ADDRESS_BOOK_PREFIX, create_unknown_name=False):
"""Creates an address book read from environment variables
@param create_unknown_name "addr-only" means if name not found, creates an address but doesn't store
the PK (can't sign)
"yes" or True means if name not found, local address is created
"no" or False doesn't create addresses for unknown names
"""
self.w3 = w3
self.signers = {}
self.name_to_address = {}
self.create_unknown_name = create_unknown_name
for k, value in os.environ.items():
if not k.startswith(env_prefix):
continue
if k.endswith("_ADDR"):
continue # Addresses linked to names
addr = k[len(env_prefix):]
if addr.startswith("0x"):
account = w3.account.from_key(value)
assert account.address == addr
else: # It's a name
name = addr
account = Account.from_key(value)
if f"{env_prefix}{name}_ADDR" in os.environ:
address = os.environ[f"{env_prefix}{name}_ADDR"]
assert account.address == address
self.name_to_address[name] = account.address
self.signers[account.address] = account
def get_account(self, name):
if isinstance(name, (Account, LocalAccount)):
return name
if name is None:
return self.ZERO
if type(name) == str and name.startswith("0x"):
return name
if name in self.name_to_address:
return self.name_to_address[name]
if self.create_unknown_name:
account = self.w3.eth.account.create()
self.name_to_address[name] = account.address
if self.create_unknown_name != "addr-only":
self.signers[account.address] = account
return account.address
raise RuntimeError(f"No account found for name {name}")
def get_name(self, account_or_address):
if isinstance(account_or_address, (LocalAccount, )):
account_or_address = account_or_address.address
for name, addr in self.name_to_address.items():
if addr == account_or_address:
return name
return None
def get_signer_account(self, address):
return self.signers[address]
class ReceiptWrapper:
"""Class that makes w3 receipts more user friendly"""
def __init__(self, receipt, contract):
self._receipt = receipt
self._contract = contract
@property
def events(self):
if not hasattr(self, "_events"):
topic_map = get_topic_map(self._contract.abi)
logs = decode_logs(self._receipt.logs, topic_map, allow_undecoded=True)
evts = {}
for evt in logs:
evt_name = evt["name"]
evt_params = dict((d["name"], d["value"]) for d in evt["data"])
if evt_name not in evts:
evts[evt_name] = evt_params
elif type(evts[evt_name]) == dict:
evts[evt_name] = [evts[evt_name], evt_params] # start a list
else: # it's already a list
evts[evt_name].append(evt_params)
self._events = evts
return self._events
def __getattr__(self, attr_name):
return getattr(self._receipt, attr_name)
class W3ETHCall(ETHCall):
@classmethod
def find_function_abi(cls, contract, eth_method, eth_variant):
abis = [x for x in contract.abi if "name" in x and x["name"] == eth_method]
if len(abis) == 1:
return abis[0]
# TODO: eth_variant
raise RuntimeError(f"Method {eth_method} not found")
@classmethod
def get_eth_function_and_mutability(cls, wrapper, eth_method, eth_variant=None):
function = getattr(wrapper.contract.functions, eth_method) # TODO: eth_variant
function_abi = cls.find_function_abi(wrapper.contract, eth_method, eth_variant)
if function_abi["stateMutability"] in ("pure", "view"):
def eth_function(*args):
if args and type(args[-1]) == dict:
args = args[:-1] # remove dict with {from: ...}
return function(*args).call()
else: # Mutable function, need to send and wait transaction
def eth_function(*args):
if args and type(args[-1]) == dict:
transact_args = args[-1]
args = args[:-1] # remove dict with {from: ...}
else:
transact_args = None # Will use default_account??
return transact(wrapper.provider, function(*args), (transact_args or {}))
return eth_function, function_abi["stateMutability"]
def normalize_receipt(self, wrapper, receipt):
if W3_TRANSACT_MODE == "defender-async":
return receipt # Don't do anything because the receipt it's just a dict of not-yet-mined tx
return ReceiptWrapper(receipt, wrapper.contract)
def _handle_exception(self, err):
if str(err).startswith("execution reverted: "):
raise RevertError(str(err)[len("execution reverted: "):])
super()._handle_exception(err)
@classmethod
def parse(cls, wrapper, value_type, value):
if value_type.startswith("(") and value_type.endswith(")"):
# It's a tuple / struct
value_types = [t.strip() for t in value_type.split(",")]
return tuple(
cls.parse(wrapper, vt, value[i]) for i, vt in enumerate(value_types)
)
if value_type == "address":
if isinstance(value, (LocalAccount, Account)):
return value
# elif isinstance(value, (Contract, ProjectContract)):
# return value.address
elif isinstance(value, ETHWrapper):
return value.contract.address
elif isinstance(value, str) and value.startswith("0x"):
return value
return wrapper.provider.address_book.get_account(value)
if value_type == "keccak256":
return cls._parse_keccak256(value)
if value_type == "contract":
if isinstance(value, ETHWrapper):
return value.contract.address
elif value is None:
return AddressBook.ZERO
raise RuntimeError(f"Invalid contract: {value}")
if value_type == "amount" and value is None:
return MAXUINT256
return value
class W3Provider(BaseProvider):
eth_call = W3ETHCall
def __init__(self, w3, address_book=None, contracts_path=None, tx_kwargs=None):
self.w3 = w3
self.contracts_path = contracts_path or CONTRACT_JSON_PATH
self.contract_def_cache = {}
self.address_book = address_book or W3AddressBook(w3)
self.time_control = W3TimeControl(w3)
self.tx_kwargs = tx_kwargs or {}
def get_contract_def(self, eth_contract):
if eth_contract not in self.contract_def_cache:
json_file = None
for contract_path in self.contracts_path:
for sub_path, _, files in os.walk(contract_path):
if f"{eth_contract}.json" in files:
json_file = os.path.join(sub_path, f"{eth_contract}.json")
break
if json_file is not None:
break
else:
raise RuntimeError(f"{eth_contract} JSON definition not found in {self.contracts_path}")
self.contract_def_cache[eth_contract] = json.load(open(json_file))
return self.contract_def_cache[eth_contract]
def get_contract_factory(self, eth_contract):
contract_def = self.get_contract_def(eth_contract)
return self.w3.eth.contract(abi=contract_def["abi"], bytecode=contract_def.get("bytecode", None))
def deploy(self, eth_contract, init_params, from_, **kwargs):
factory = self.get_contract_factory(eth_contract)
kwargs["from"] = from_
return self.construct(factory, init_params, kwargs)
def get_events(self, eth_wrapper, event_name, filter_kwargs={}):
"""Returns a list of events given a filter, like this:
>>> provider.get_events(currencywrapper, "Transfer", dict(fromBlock=0))
[AttributeDict({
'args': AttributeDict(
{'from': '0x0000000000000000000000000000000000000000',
'to': '0x56Cd397bAA08F2339F0ae470DEA99D944Ac064bB',
'value': 6000000000000000000000}),
'event': 'Transfer',
'logIndex': 0,
'transactionIndex': 0,
'transactionHash': HexBytes(
'0x406b2cf8de2f12f4d0958e9f0568dc0919f337ed399f8d8d78ddbc648c01f806'
),
'address': '0xf8BedC7458fb8cAbD616B5e90F57c34c392e7168',
'blockHash': HexBytes('0x7b23c6ea49759bcee769b1a357dec7f63f03bdb1dd13f1ee19868925954134b3'),
'blockNumber': 23
})]
"""
contract = eth_wrapper.contract
event = getattr(contract.events, event_name)
if "fromBlock" not in filter_kwargs:
filter_kwargs["fromBlock"] = self.get_first_block(eth_wrapper)
event_filter = event.createFilter(**filter_kwargs)
return event_filter.get_all_entries()
def init_eth_wrapper(self, eth_wrapper, owner, init_params, kwargs):
eth_wrapper.owner = self.address_book.get_account(owner)
assert not eth_wrapper.libraries_required, "Not supported"
eth_contract = self.get_contract_factory(eth_wrapper.eth_contract)
if eth_wrapper.proxy_kind is None:
eth_wrapper.contract = self.construct(eth_contract, init_params, {"from": eth_wrapper.owner})
elif eth_wrapper.proxy_kind == "uups" and not SKIP_PROXY:
constructor_params, init_params = init_params
real_contract = self.construct(eth_contract, constructor_params, {"from": eth_wrapper.owner})
ERC1967Proxy = self.get_contract_factory("ERC1967Proxy")
init_data = real_contract.functions.initialize(
*init_params
).buildTransaction({**self.tx_kwargs, **{"from": eth_wrapper.owner}})["data"]
proxy_contract = self.construct(
ERC1967Proxy,
(real_contract.address, init_data),
{**self.tx_kwargs, **{"from": eth_wrapper.owner}}
)
eth_wrapper.contract = self.w3.eth.contract(
abi=eth_contract.abi,
address=proxy_contract.address
)
elif eth_wrapper.proxy_kind == "uups" and SKIP_PROXY:
constructor_params, init_params = init_params
eth_wrapper.contract = self.construct(eth_contract, constructor_params,
{"from": eth_wrapper.owner})
transact(
self,
eth_wrapper.contract.functions.initialize(*init_params),
{"from": eth_wrapper.owner}
)
def construct(self, contract_factory, constructor_args=(), transact_kwargs={}):
receipt = transact(
self,
contract_factory.constructor(*constructor_args),
transact_kwargs
)
return self.w3.eth.contract(abi=contract_factory.abi, address=receipt.contractAddress)
def build_contract(self, contract_address, contract_factory, contract_name=None):
return self.w3.eth.contract(abi=contract_factory.abi, address=contract_address)
|
import sys
import numpy as np
from panda3d.bullet import *
from panda3d.core import Point3
def obj2TriangleMesh(filename,matrix):
print "Obj->Mesh:"+filename
mesh = BulletTriangleMesh()
vetexArray=[]
# dummy
vetexArray.append(Point3(0,0,0))
with open(filename,"r") as file:
for line in file.readlines():
if line.startswith('v '):
values=line.split()
vec=np.array([float(values[1]),float(values[2]),float(values[3]),1])
vec=np.dot(matrix,vec)
vetexArray.append(Point3(vec[0],vec[1],vec[2]))
elif line.startswith('f'):
values=line.split()
if(values[1].find("/")!=-1):
values=map(lambda x:x[0:x.find("/")] ,values)
mesh.add_triangle(vetexArray[int(values[1])] , vetexArray[int(values[2])] , vetexArray[int(values[3])])
return mesh
def obj2ConvexHull(filename,matrix):
print "Obj->ConvexHull:"+filename
mesh = BulletConvexHullShape()
with open(filename,"r") as file:
for line in file.readlines():
if line.startswith('v '):
values=line.split()
vec=np.array([float(values[1]),float(values[2]),float(values[3]),1])
vec=np.dot(matrix,vec)
mesh.add_point(Point3(vec[0],vec[1],vec[2]))
return mesh
|
#!/usr/bin/python
# DS4A Project
# Group 84
# using node/edge info to create network graph
# and do social network analysis
from os import path
import pandas as pd
bacon_graph_data_path = '../data/oracle_of_bacon_data/clean_data/graph_data.csv'
nominee_count_data_path = '../data/nominations_count.csv'
nominee_count_degree_data_path = '../data/nominee_degree_counts_data.csv'
def load_dataset(filepath):
df = pd.read_csv(filepath)
return df
def write_to_csv(df,filepath):
'''
input: df - a pandas DataFrame
filepath - an output filepath as a string
writes to a csv file
in same diretory as this script
returns: nothing
'''
# if no csv exists
if not path.exists(filepath):
df.to_csv(filepath,index=False)
else:
df.to_csv(filepath, mode='a', header=False,index=False)
degree_df = load_dataset(bacon_graph_data_path)
count_df = load_dataset(nominee_count_data_path)
print(degree_df.info())
print(count_df.info())
df = pd.merge(count_df, degree_df, how='left', left_on=['name','year_film'], right_on = ['name','year'])
print(len(df))
# Count total NaN at each column in a DataFrame
print(" \nCount total NaN at each column in a DataFrame : \n\n",
df.isnull().sum())
#only 33 missing in degreee and year
df1 = df[df.year.notnull()]
print(len(df1))
print(" \nCount total NaN at each column in a DataFrame : \n\n",
df1.isna().sum())
null_data = df[df.isnull().any(axis=1)]
print(null_data)
df1.year = df1.year.astype(int)
df1.degree = df1.degree.astype(int)
#write_to_csv(df1,nominee_count_degree_data_path)
a = df['name'].drop_duplicates()
print(a)
|
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
import re
have_symbol = re.compile('[^a-zA-Z0-9._-]+')
wrong_ip = re.compile('^0.|^255.')
wrong_name = re.compile('[^a-zA-Z0-9._-]+')
def validate_hostname(value):
sym = have_symbol.match(value)
wip = wrong_ip.match(value)
if sym:
raise ValidationError(_('Hostname must contain only numbers, or the domain name separated by "."'))
elif wip:
raise ValidationError(_('Wrong IP address'))
def validate_name(value):
have_symbol = wrong_name.match('[^a-zA-Z0-9._-]+')
if have_symbol:
raise ValidationError(_('The hostname must not contain any special characters'))
|
"""
Functions for aggregating and analyzing exam-related data, such as calculating
student exam performance.
"""
import csv
import os
import pandas as pd
def grade_scantron(
input_scantron, correct_answers, drops=[], item_value=1, incorrect_threshold=0.5
):
"""
Calculate student grades from scantron data.
Compiles data collected from a scantron machine (5-option multiple choice
exam) and calculates grades for each student. Also provides descriptive
statistics of exam performance, as well as a list of the questions "most"
students got incorrect, and saves the distribution of answers for those
poorly performing questions.
This function receives 1 scantron text file and produces 2 output files.
Splitting of the scantron data is specific to each scantron machine. The
indices used in this function are correct for the scantron machine in the
UBC Psychology department as of 2015. Indices need to be adjusted for
different machines.
Scantron exams can be finicky. Students who incorrectly fill out scantrons
need to be considered. Make sure to manually inspect the text file output
by the scantron machine for missing answers before running this. This
function does not correct for human error when filling out the scantron.
Parameters
----------
input_scantron : string
Path to the .txt file produced by the scantron machine.
correct_answers : list
A list of strings containing the *correct* exam answers. For example:
["A", "E", "D", "A", B"]. The order must match the order of
presentation on the exam (i.e. the first list item must correspond
to the first exam question)
drops : list, optional
List of integers containing question numbers that should be excluded
from calculation of grades. For example: [1, 5] will not include
questions 1 and 5 when calculating exam scores.
item_value : int, optional
Integer representing how many points each exam question is worth.
incorrect_threshold : float between [0., 1.], optional
Poorly performing questions are those where few students got the
correct answer. This parameter sets the threshold at which an item is
considered poor. For example, a threshold of 0.4 means that a poor
item is considered to be one where less than 40% of students
chose the correct answer.
"""
# Start and end locations of various pieces of information in the scantron text file.
# These need to be adjusted for different scantron machine.
# Currently set for the machine used in UBC Psychology
surname_idx = (0, 12)
first_name_idx = (12, 21)
student_num_idx = (21, 30)
answers_idx = 30
# output directory
directory, filename = os.path.split(input_scantron)
filename = os.path.splitext(filename)[0]
# calculate total number of points available on the exam
total_points = (len(correct_answers) * item_value) - len(drops)
# create a pandas dataframe to hold the scantron data
summary = ["surname", "first_name", "student_number", "points", "percent"]
questions = ["Q-{}".format(i + 1) for i in range(len(correct_answers))]
df = pd.DataFrame(columns=summary + questions)
# calculate grades
with open(input_scantron, "r") as f:
scantron_data = csv.reader(f)
# loop through every row (student) in the input scantron file
for row in scantron_data:
surname = row[0][surname_idx[0] : surname_idx[1]].lstrip().rstrip()
first_name = row[0][first_name_idx[0] : first_name_idx[1]].lstrip().rstrip()
student_num = (
row[0][student_num_idx[0] : student_num_idx[1]].lstrip().rstrip()
)
answers = row[0][answers_idx : (answers_idx + len(correct_answers))]
points = 0
for i, pair in enumerate(zip(answers, correct_answers)):
if i + 1 not in drops:
if pair[0] == pair[1]:
points += item_value
df_summary = {
"surname": surname,
"first_name": first_name,
"student_number": student_num,
"points": points,
"percent": (points / total_points) * 100,
}
df_questions = {"Q-{}".format(i + 1): a for i, a in enumerate(answers)}
df = df.append([{**df_summary, **df_questions}], ignore_index=True)
df.to_excel(
os.path.join(directory, "{}.xls".format(filename + "_grades")),
sheet_name="grades",
index=False,
)
# write summary statistics
with open(
os.path.join(directory, "{}.txt".format(filename + "_summary")), "w"
) as f:
# calculate descriptive statistics
N = df.shape[0]
mean_percent = df["percent"].mean()
mean_points = df["points"].mean()
std_points = df["points"].std()
range_points = (df["points"].min(), df["points"].max())
f.writelines(
[
"Descriptive Statistics: \n\n",
"N: {}\n".format(N),
"Mean %: {:.2f}%\n".format(mean_percent),
"Mean score (out of {} points): {:.2f}\n".format(
total_points, mean_points
),
"Score SD: {:.2f}\n".format(std_points),
"Range: {} (Min: {}, Max: {})\n\n\n".format(
range_points[1] - range_points[0], range_points[0], range_points[1]
),
]
)
if len(drops) > 0:
f.writelines(
["Dropped questions: {}\n\n\n".format(", ".join(map(str, drops)))]
)
f.writelines(
[
"Problem Items (questions that less "
"than {}% of students got correct):\n\n".format(
incorrect_threshold * 100
)
]
)
problems = False
for i, item in enumerate(questions):
cur_q = df[item]
if len(cur_q[cur_q == correct_answers[i]]) < (N * incorrect_threshold):
problems = True
distribution = cur_q.value_counts()
f.writelines(
[
"{} (A: {}, B: {}, C: {}, D: {}, E: {})\n".format(
cur_q.name,
distribution.A,
distribution.B,
distribution.C,
distribution.D,
distribution.E,
)
]
)
if not problems:
f.writelines(["None"])
|
#
# Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import json
import os
import pytest
import platform
import subprocess
import sys
from distutils.version import LooseVersion
ROOT_DEV_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))
if ROOT_DEV_PATH not in sys.path:
sys.path.append(ROOT_DEV_PATH)
from cmake.Tools import common
from cmake.Tools.Platform.Android import android_support, generate_android_project
@pytest.mark.parametrize(
"from_override, version_str, expected_result", [
pytest.param(False, b"Gradle 4.10.1", LooseVersion('4.10.1'), id='equalMinVersion'),
pytest.param(False, b"Gradle 5.6.4", LooseVersion('5.6.4'), id='equalMaxVersion'),
pytest.param(False, b"Gradle 1.0", common.LmbrCmdError('error', common.ERROR_CODE_ENVIRONMENT_ERROR), id='lessThanMinVersion'),
pytest.param(False, b"Gradle 26.3", common.LmbrCmdError('error', common.ERROR_CODE_ENVIRONMENT_ERROR), id='greaterThanMaxVersion'),
pytest.param(True, b"Gradle 4.10.1", LooseVersion('4.10.1')),
pytest.param(True, b"Gradle 5.6.4", LooseVersion('5.6.4')),
pytest.param(True, b"Gradle 1.0", common.LmbrCmdError('error', common.ERROR_CODE_ENVIRONMENT_ERROR)),
pytest.param(True, b"Gradle 26.3", common.LmbrCmdError('error', common.ERROR_CODE_ENVIRONMENT_ERROR))
]
)
def test_verify_gradle(tmpdir, from_override, version_str, expected_result):
orig_check_output = subprocess.check_output
if from_override:
gradle_script = 'gradle.bat' if platform.system() == 'Windows' else 'gradle'
tmpdir.ensure(f'gradle/bin/{gradle_script}')
override_gradle_install_path = str(tmpdir.join('gradle').realpath())
else:
override_gradle_install_path = None
def _mock_check_output(args, shell):
assert args
assert shell is True
if from_override:
assert args[0] == os.path.normpath(f'{override_gradle_install_path}/bin/{gradle_script}')
assert args[1] == '-v'
return version_str
subprocess.check_output = _mock_check_output
try:
result_version, result_override_path = generate_android_project.verify_gradle(override_gradle_install_path)
assert isinstance(expected_result, LooseVersion)
assert result_version == expected_result
if from_override:
assert os.path.normpath(result_override_path) == os.path.normpath(os.path.join(override_gradle_install_path, 'bin', gradle_script))
else:
assert result_override_path is None
except common.LmbrCmdError:
assert isinstance(expected_result, common.LmbrCmdError)
except Exception as e:
pass
finally:
subprocess.check_output = orig_check_output
@pytest.mark.parametrize(
"from_override, version_str, expected_result", [
pytest.param(False, b"cmake version 3.17.0\nKit Ware", LooseVersion('3.17.0'), id='equalMinVersion'),
pytest.param(False, b"cmake version 4.0.0\nKit Ware", LooseVersion('4.0.0'), id='greaterThanMinVersion'),
pytest.param(False, b"cmake version 1.0.0\nKit Ware", common.LmbrCmdError('error', common.ERROR_CODE_ENVIRONMENT_ERROR), id='lessThanMinVersion'),
pytest.param(True, b"cmake version 3.17.0\nKit Ware", LooseVersion('3.17.0'), id='override_equalMinVersion'),
pytest.param(True, b"cmake version 4.0.0\nKit Ware", LooseVersion('4.0.0'), id='override_greaterThanMinVersion'),
pytest.param(True, b"cmake version 1.0.0\nKit Ware", common.LmbrCmdError('error', common.ERROR_CODE_ENVIRONMENT_ERROR), id='override_lessThanMinVersion'),
]
)
def test_verify_cmake(tmpdir, from_override, version_str, expected_result):
orig_check_output = subprocess.check_output
if from_override:
cmake_exe = 'cmake.exe' if platform.system() == 'Windows' else 'cmake'
tmpdir.ensure(f'cmake/bin/{cmake_exe}')
override_cmake_install_path = str(tmpdir.join('cmake').realpath())
else:
override_cmake_install_path = None
def _mock_check_output(args, shell):
assert args
assert shell is True
if from_override:
assert args[0] == os.path.normpath(f'{override_cmake_install_path}/bin/{cmake_exe}')
assert args[1] == '--version'
return version_str
subprocess.check_output = _mock_check_output
try:
result_version, result_override_path = generate_android_project.verify_cmake(override_cmake_install_path)
assert isinstance(expected_result, LooseVersion)
assert result_version == expected_result
if from_override:
assert os.path.normpath(result_override_path) == os.path.normpath(os.path.join(override_cmake_install_path, 'bin', cmake_exe))
else:
assert result_override_path is None
except common.LmbrCmdError:
assert isinstance(expected_result, common.LmbrCmdError)
finally:
subprocess.check_output = orig_check_output
@pytest.mark.parametrize(
"from_override, version_str, expected_result", [
pytest.param(False, b"1.0.0", LooseVersion('1.0.0')),
pytest.param(False, b"1.10.0", LooseVersion('1.10.0')),
pytest.param(True, b"1.0.0", LooseVersion('1.0.0')),
pytest.param(True, b"1.10.0", LooseVersion('1.10.0'))
]
)
def test_verify_ninja(tmpdir, from_override, version_str, expected_result):
orig_check_output = subprocess.check_output
if from_override:
ninja_exe = 'ninja.exe' if platform.system() == 'Windows' else 'ninja'
tmpdir.ensure(f'ninja/{ninja_exe}')
override_cmake_install_path = str(tmpdir.join('ninja').realpath())
else:
override_cmake_install_path = None
def _mock_check_output(args, shell):
assert args
assert shell is True
if from_override:
assert args[0] == os.path.normpath(f'{override_cmake_install_path}/{ninja_exe}')
assert args[1] == '--version'
return version_str
subprocess.check_output = _mock_check_output
try:
result_version, result_override_path = generate_android_project.verify_ninja(override_cmake_install_path)
assert isinstance(expected_result, LooseVersion)
assert result_version == expected_result
if from_override:
assert os.path.normpath(result_override_path) == os.path.normpath(os.path.join(override_cmake_install_path, ninja_exe))
else:
assert result_override_path is None
except common.LmbrCmdError:
assert isinstance(expected_result, common.LmbrCmdError)
finally:
subprocess.check_output = orig_check_output
|
import numpy as np
import scipy.signal as signal
from q1pulse.instrument import Q1Instrument
from init_pulsars import qcm0, qrm1
from plot_util import plot_output
instrument = Q1Instrument('q1')
instrument.add_qcm(qcm0)
instrument.add_qrm(qrm1)
instrument.add_control('P1', qcm0.name, [0])
instrument.add_control('P2', qrm1.name, [1])
instrument.add_control('P2c', qcm0.name, [1])
instrument.add_readout('R1', qrm1.name, [])
# gain = 0: +/- 0.5V; gain -6 dB: +/- 1.0 V
vmax_in = 0.5
qrm1.in0_gain(0)
qrm1.in1_gain(0)
p = instrument.new_program('acquire')
p.repetitions = 1
P1 = p.P1
P2 = p.P2
P2c = p.P2c
R1 = p.R1
N = 5
n_acq = N*N*p.repetitions
R1.add_acquisition_bins('non-weighed', n_acq)
R1.add_acquisition_bins('weighed', n_acq)
R1.add_weight('gaus100', signal.gaussian(100, 12))
#R1.add_weight('gaus100', np.ones(100))
R1.integration_length_acq = 100
amplitude = 0.125
# output range QCM (P1): +/- 2.5 V
# output range QRM (P1): +/- 0.5 V
# input range QRM (R1): +/- 0.5 V (gain = 0 dB)
v1_max = 0.5/2.5*0.9
v2_max = 0.5/0.5*0.9
with p.loop_linspace(-v1_max, v1_max, N) as v1:
with p.loop_linspace(-v2_max, v2_max, N) as v2:
with p.parallel():
P1.block_pulse(500, v1)
P2.block_pulse(500, v2)
P2c.block_pulse(500, v1)
# delay from output to input is ~108 ns
R1.acquire('non-weighed', 'increment', t_offset=112)
with p.parallel():
P1.block_pulse(500, v1)
P2c.block_pulse(500, v1)
P2.block_pulse(500, v2)
R1.acquire_weighed('weighed', 'increment', 'gaus100', t_offset=120)
p.wait(1100)
#p.describe()
#print()
p.compile(listing=True, annotate=True)
instrument.run_program(p)
plot_output([qcm0, qrm1])
# @@@ program should return data.
data_n = instrument.get_acquisition_bins('R1', 'non-weighed')
data_w = instrument.get_acquisition_bins('R1', 'weighed')
dn0 = np.array(data_n['integration']['path0']).reshape((p.repetitions,N,N))/R1.integration_length_acq*vmax_in
dn1 = np.array(data_n['integration']['path1']).reshape((p.repetitions,N,N))/R1.integration_length_acq*vmax_in
dw0 = np.array(data_w['integration']['path0']).reshape((p.repetitions,N,N))*vmax_in
dw1 = np.array(data_w['integration']['path1']).reshape((p.repetitions,N,N))*vmax_in
with np.printoptions(precision=2, threshold=1000):
print('non-weighed')
print(dn0)
print(dn1)
print('weighed')
print(dw0)
print(dw1)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from djangocms_bootstrap4.helpers import concat_classes
from .models import Bootstrap4Spacing
class Bootstrap4SpacingPlugin(CMSPluginBase):
"""
Components > "Card" Plugin
https://getbootstrap.com/docs/4.0/components/card/
"""
model = Bootstrap4Spacing
name = _('Spacing')
module = _('Bootstrap 4')
render_template = 'djangocms_bootstrap4/spacing.html'
change_form_template = 'djangocms_bootstrap4/admin/spacing.html'
allow_children = True
fieldsets = [
(None, {
'fields': (
'space_property',
'space_sides',
'space_size',
'space_device',
)
}),
(_('Advanced settings'), {
'classes': ('collapse',),
'fields': (
'tag_type',
'attributes',
)
}),
]
def render(self, context, instance, placeholder):
spacing = '{}'.format(instance.space_property)
if instance.space_sides:
spacing += '{}'.format(instance.space_sides)
if instance.space_device:
spacing += '-{}'.format(instance.space_device)
spacing += '-{}'.format(instance.space_size)
classes = concat_classes([
spacing,
instance.attributes.get('class'),
])
instance.attributes['class'] = classes
return super(Bootstrap4SpacingPlugin, self).render(
context, instance, placeholder
)
plugin_pool.register_plugin(Bootstrap4SpacingPlugin)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 11 15:54:15 2020
@author: Leo Turowski
@executive author: Noah Becker
"""
import csv
import os
import ntpath
from zipfile import ZipFile
import pandas as pd
import numpy as np
import pickle
import typing
import math
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from pyts.image import GramianAngularField as GAF
from numpy.lib import recfunctions as rfn
from pyts.datasets import load_gunpoint
from mpl_toolkits.axes_grid1 import ImageGrid
def zip_to_csv(path:str):
"""
**Convert a packaged sql file into a csv file**
This function unpacks the given zip file at its location and invokes the given sql_to_csv
function on the sql file, with the same name as the zip file
param path: as the absolute path to the zip file with the sql in it, type str
"""
path=checkpath(path)
if(not(os.path.isfile(path))):
print("this path does not lead to a file")
return
if(path[-3:]!='zip'):
print("this is not a zip file")
return
os.chdir(os.path.dirname(path))
filename = ntpath.basename(path)
with ZipFile(filename, 'r') as zip:
zip.extractall()
sql_to_csv(path[:-4])
def zip_to_npy(path:str):
"""
**Convert a packaged sql file into a npy file**
This function unpacks the given zip file at its location and invokes the given sql_to_npy
function on the sql file, with the same name as the zip file
param path: as the absolute path to the zip file with the sql in it, type str
"""
path=checkpath(path)
if(not(os.path.isfile(path))):
print("this path does not lead to a file")
return
if(path[-3:]!='zip'):
print("this is not a zip file")
return
os.chdir(os.path.dirname(path))
filename = ntpath.basename(path)
with ZipFile(filename, 'r') as zip:
zip.extractall()
sql_to_npy(path[:-4])
def sql_to_csv(path:str,delimiter:str='\n'):
"""
**Convert a set of INSERT statement into csv format**
Extracting the Data from a set of INSERT statements saved in a sql file, this function
converts the data into a csv file where every not INSERT line is saved in a separate pickle
file and the data of the INSERT statements is stored line after line, with the given delimiter
at the end of each line.
param path: as the absolute path to the sql file, type str
param delimiter: as the delimiter at the end of each line, type str
"""
path=checkpath(path)
if(not(os.path.isfile(path))):
print("this path does not lead to a file")
return
if(path[-3:]!='sql'):
print("this is not an sql file")
return
os.chdir(os.path.dirname(path))
filename = ntpath.basename(path)
with open(filename, 'r') as oldfile:
with open(filename[:-3] + 'csv', 'w',newline='') as newfile:
content = oldfile.readlines()
data = []
picklelist = []
for line in content:
if(line.startswith('I')):
line = line.split('(')
line = line[1] # cuts of the Insert part of the sql statement
line = line.split(')') # cuts of the ");\n" end of the sql statement
line = line[0]
line = line.replace("'","")
data.append(line)
else:
picklelist.append(line)
write = csv.writer(newfile,delimiter=delimiter)
write.writerow(data)
pickle.dump(picklelist, open((filename[:-3] + 'p'),'wb'))
def sql_to_npy(path:str,delimiter:str= ','):
"""
**Convert a set of INSERT statement into a numpy array**
Similar to the csv this function also stores unused data in a pickle file and creates
a brand new file with the extracted data, this time in an npy format, however this time
the delimiter has to be the delimiter used in the sql file, as well as an additional
missing_values string used to represent missing data
param path: as the absolute path to the sql file, type str
param delimiter: as the string used in the sql file to separate the data, type str
param missing_values: the string used for missing data, type str
"""
path=checkpath(path)
if(not(os.path.isfile(path))):
print("this path does not lead to a file")
return
if(path[-3:]!="sql"):
print("this is not an sql file")
return
os.chdir(os.path.dirname(path))
filename = ntpath.basename(path)
with open(filename, "r") as oldfile:
newfilename = filename[:-3]
content = oldfile.readlines()
data = []
picklelist = []
for line in content:
if(line.startswith("I")):
line = line.split("(")
line = line[1] # cuts of the Insert part of the sql statement
line = line.split(')') # cuts of the ");\n" end of the sql statement
line = line[0]
line = line.replace("'","")
data.append(line)
else:
picklelist.append(line)
nparray = np.loadtxt(data, dtype=str,delimiter=delimiter,encoding = 'ASCII',ndmin=2)
np.save(newfilename + "npy", nparray)
pickle.dump(picklelist, open(newfilename + "p","wb"))
def csv_to_sql(path:str,delimiter:str='\n'):
"""
**Convert a csv file into a set of INSERT statements**
This function converts each set of data divided by the given delimiter
of a csv file into a INSERT statement it also adds data
stored in a pickle file, with the same name as the csv file,
as a commentary at the beginning, as to not impede the functionality
param path: as the absolute path to the csv file, type str
param delimiter: as the string used to detect the different data sets, type str
"""
path=checkpath(path)
if(not(os.path.isfile(path))):
print("this path does not lead to a file")
return
if(path[-3:]!='csv'):
print("this is not a csv file")
return
os.chdir(os.path.dirname(path))
filename = ntpath.basename(path)
with open(path, newline='') as oldfile:
newfilename = filename[:-3]
picklelist = pickle.load(open(newfilename + "p","rb"))
table = picklelist[0]
table = table[table.rfind(" ") + 1:-1]
reader = csv.reader(oldfile,delimiter=delimiter)
with open(newfilename+ "sql", "w") as newfile:
newfile.writelines(picklelist)
for line in reader:
line=''.join(line)
line = line.replace(",","','")
newfile.write("INSERT INTO %s VALUES('" %table)
newfile.write("%s');\n" % line)
def csv_to_npy(path:str,delimiter:str=','):
"""
**Convert a csv file into a numpy array representation**
This function converts a csv file into a 2-dimensional numpy representation,
while every set of data divided by the given delimiter is interpreted as a new row
param path: as the absolute path to the csv file, type str
param delimiter: the string used to determine the rows of the numpy array, type str
param missing_values: as the string used to represent missing data, type str
"""
path=checkpath(path)
if(not(os.path.isfile(path))):
print("this path does not lead to a file")
return
if(path[-3:]!='csv'):
print("this is not a csv file")
return
os.chdir(os.path.dirname(path))
filename = ntpath.basename(path)
newfilename = filename[:-3] + 'npy'
data = np.loadtxt(path, dtype=str,delimiter=delimiter,encoding = 'ASCII',ndmin=2)
np.save(newfilename,data)
def npy_to_sql(path:str):
"""
**Convert a npy file into a set of INSERT statements**
this function is the reverse function to sql_to_npy and when used in conjuction
you end up with the same file in the end as you had in the beginning
param path: as the absolute path to the npy file, type str
path=checkpath(path)
"""
path=checkpath(path)
if(not(os.path.isfile(path))):
print("this path does not lead to a file")
return
if(path[-3:]!='npy'):
print("this is not an npy file")
return
os.chdir(os.path.dirname(path))
np_array = np.load(path, 'r')
filename = ntpath.basename(path)
with open(filename[:-3] + 'sql', 'w') as newfile:
picklelist = pickle.load(open(filename[:-3] + "p","rb"))
newfile.writelines(picklelist)
table = picklelist[0]
table = table[table.rfind(" ") + 1:-1]
for row in np_array:
data = ','.join(row)
data += "'"
data = data.replace(",","','")
data = data.replace("'NULL'","NULL")
newfile.write("INSERT INTO {0} VALUES('{1});\n".format(table,data))
def npy_to_csv(path:str):
"""
**Converts a npy file into a csv representation of the data**
Similar to npy_to_sql this function is the reverse function to csv_to_npy
param path: as the absolute path to the npy file, type str
"""
path=checkpath(path)
if(not(os.path.isfile(path))):
print("this path does not lead to a file")
return
if(path[-3:]!='npy'):
print("this is not an npy file")
return
os.chdir(os.path.dirname(path))
np_array = np.load(path, 'r')
filename = ntpath.basename(path)
with open(filename[:-3] + 'csv', 'w') as newfile:
for row in np_array:
data = ','.join(row)
newfile.write("{0}\n".format(data))
def gen_GAF(path:str):
"""
**Generate a Gramian Angular Field with User input**
this function gets the input from the user through the console to generate
either a Gramian Angular Summation Field or a Gramian Angular Difference Field
from the data of a numpy array using the gen_GAF_exec function
param path: as the absolute path to the npy file, type str
"""
path=checkpath(path)
if(not(os.path.isfile(path))):
print("this path does not lead to a file")
return
if(path[-3:]!='npy'):
print("this is not an npy file")
return
os.chdir(os.path.dirname(path))
np_array = np.load(path, encoding = 'ASCII')
method = int(input("Enter if you either want a Summation field(1) or a Difference field(2):\n"))
if(method == 1):
method = 'summation'
else:
method='difference'
null_value=input("Enter the number you want to represent missing/NULL values (Default: 0):\n")
gen_GAF_exec(np_array,(-1,1), method,null_value)
def gen_GAF_exec(data:list, sample_range:None or tuple = (-1,1), method:str = 'summation',null_value:str='0'):
"""
**Generate a Gramian angular Field**
this is the actual function when it comes to generating a Gramian Angular Field
out of the data of a numpy array. This function takes different variables to determine
how the Field should be scaled, what its size should be
and if it is either a summation or difference Field
param data: as the content of a npy file , type list
param size: this is the size of the square output image, type int or float
param sample_range: as the range the data should be scaled to, type None or tuple
param method: as the type of field it should be, type 'summation' or 'difference'
param **null_value**: as the number to use instead of NULL, type str
"""
gaf = GAF(sample_range=sample_range,method=method)
data = np.where(data=='NULL', null_value , data)
data = data[:,3:].astype(dtype=float)
data_gaf = gaf.fit_transform(data)
plt.imshow(data_gaf[0],cmap='rainbow',origin='lower')
plt.show()
def false_input(path:str):
"""
**Print error and return to main**
this function prints an error message to the console and returns to main
param path: this parameter is only there so the function has a proper form, type str
"""
print("this is an invalid option")
main()
def exit(path:str):
"""
**Print Message and end program**
This function prints a message to the console and ends the program
param path: this parameter is only there so the function has a proper form, type str
"""
print("thank you for using shapy, the converter of your choice")
def switchoption(n:int,path:str):
"""
**Invoke a function**
this function invokes one of the funtions of this program corresponding to the n
and gives it the path as input
param n: this number specifies which dunction should be invoked, type int
param path: this is the path to the file used for the function to be invoked, type str
"""
switcher = {
1: zip_to_csv,
2: zip_to_npy,
3: sql_to_csv,
4: sql_to_npy,
5: csv_to_sql,
6: csv_to_npy,
7: npy_to_sql,
8: npy_to_csv,
9: gen_GAF,
0:exit,
}
function = switcher.get(n,false_input)
function(path)
def main():
"""
**Get User input and invoke functions**
this function uses the console to get input from the user, as to which function
should be invoked and where to find the coresponding file
"""
path = input("enter path:\n")
print("to exit (0)\n")
print("zip_to_csv(1)\n")
print("zip_to_npy(2)\n")
print("sql_to_csv(3)\n")
print("sql_to_npy(4)\n")
print("csv_to_sql(5)\n")
print("csv_to_npy(6)\n")
print("npy_to_sql(7)\n")
print("npy_to_csv(8)\n")
print("gen_GAF(9)\n")
n = int(input("what do you want to do:"))
switchoption(n,path)
def checkpath(path:str):
"""
**check the path for relativity**
this function removes any quotation from a path and checks if it is relative or absolute
it returns a *cleansed* path being the absolute representation of the given path
param path: the string to be used as a path, type str
return path: the absolute path, type str
"""
path=path.replace('"','')
path=path.replace("'","")
if(os.path.isabs(path)):
return path
return os.getcwd+path
if __name__ == "__main__":
main()
|
from ..core import *
from ..callback import *
from ..basic_train import Learner, LearnerCallback
__all__ = ['GeneralScheduler', 'TrainingPhase']
@dataclass
class TrainingPhase():
"Schedule hyper-parameters for a phase of `length` iterations."
length:int
def __post_init__(self): self.scheds = dict()
def schedule_hp(self, name, vals, anneal=None):
"Adds a schedule for `name` between `vals` using `anneal`."
self.scheds[name] = Scheduler(vals, self.length, anneal)
return self
class GeneralScheduler(LearnerCallback):
"Schedule multiple `TrainingPhase` for a `Learner`."
def __init__(self, learn:Learner, phases:Collection[TrainingPhase], start_epoch:int=None):
super().__init__(learn)
self.phases,self.start_epoch = phases,start_epoch
def on_train_begin(self, epoch:int, **kwargs:Any)->None:
"Initialize the schedulers for training."
res = {'epoch':self.start_epoch} if self.start_epoch is not None else None
self.start_epoch = ifnone(self.start_epoch, epoch)
self.scheds = [p.scheds for p in self.phases]
self.opt = self.learn.opt
for k,v in self.scheds[0].items():
v.restart()
self.opt.set_stat(k, v.start)
self.idx_s = 0
return res
def jump_to_epoch(self, epoch:int)->None:
for _ in range(len(self.learn.data.train_dl) * epoch):
self.on_batch_end(True)
def on_batch_end(self, train, **kwargs:Any)->None:
"Take a step in lr,mom sched, start next stepper when the current one is complete."
if train:
if self.idx_s >= len(self.scheds): return {'stop_training': True, 'stop_epoch': True}
sched = self.scheds[self.idx_s]
for k,v in sched.items(): self.opt.set_stat(k, v.step())
if list(sched.values())[0].is_done: self.idx_s += 1 |
#/usr/bin/env python
"""
globifest/globitest/testBuoondedStatefulParser.py - Tests for StatefulParser module
Copyright 2018, Daniel Kristensen, Garmin Ltd, or its subsidiaries.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import io
import os
import sys
import unittest
from GlobifestLib import Log, BoundedStatefulParser
from GlobifestLib.StatefulParser import PARSE_STATUS, FLAGS
class TestBoundedStatefulParser(unittest.TestCase):
def create_string_parser(self, text):
self.parser = BoundedStatefulParser.new(text, "'", flags=FLAGS.DEBUG)
return self.parser
def create_dq_string_parser(self, text):
self.parser = BoundedStatefulParser.new(text, "\"", flags=FLAGS.DEBUG)
return self.parser
def create_parenthases_parser(self, text):
self.parser = BoundedStatefulParser.new(text, "(", ")", flags=FLAGS.DEBUG | FLAGS.MULTI_LEVEL)
return self.parser
def doCleanups(self):
Log.Logger.set_err_pipe(sys.stderr)
if not self._outcome.success:
if hasattr(self, "parser"):
print(self.parser.get_debug_log());
self.parser = None
if self.pipe:
print(self.pipe.getvalue().rstrip())
self.pipe = None
def setUp(self):
self.pipe = io.StringIO()
Log.Logger.set_err_pipe(self.pipe)
def test_begin_lbound1(self):
p = self.create_parenthases_parser("")
# Verify no error if stream has lbound at beginning
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse("(foobaz"))
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.get_status())
self.assertEqual("foobaz", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_begin_lbound2(self):
p = self.create_parenthases_parser("")
# Verify error if stream has lbound in middle
self.assertEqual(PARSE_STATUS.ERROR, p.parse("foobaz("))
self.assertEqual(PARSE_STATUS.ERROR, p.get_status())
self.assertEqual("", p.get_parsed_text())
self.assertEqual("foobaz(", p.get_remaining_text())
def test_begin_rbound1(self):
p = self.create_parenthases_parser("")
# Verify error if stream has rbound at beginning
self.assertEqual(PARSE_STATUS.ERROR, p.parse(")foobaz"))
self.assertEqual(PARSE_STATUS.ERROR, p.get_status())
self.assertEqual("", p.get_parsed_text())
self.assertEqual(")foobaz", p.get_remaining_text())
def test_begin_rbound2(self):
p = self.create_parenthases_parser("")
# Verify error if stream has rbound in middle
self.assertEqual(PARSE_STATUS.ERROR, p.parse("foobaz)"))
self.assertEqual(PARSE_STATUS.ERROR, p.get_status())
self.assertEqual("", p.get_parsed_text())
self.assertEqual("foobaz)", p.get_remaining_text())
def test_begin_non_matching1(self):
p = self.create_string_parser("")
# Verify error if boundary is not present
self.assertEqual(PARSE_STATUS.ERROR, p.parse("foobaz"))
self.assertEqual(PARSE_STATUS.ERROR, p.get_status())
self.assertEqual("", p.get_parsed_text())
self.assertEqual("foobaz", p.get_remaining_text())
def test_parenthases_complete1(self):
# Verify section is found in initial load
p = self.create_parenthases_parser("(hi)")
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_parenthases_complete2(self):
p = self.create_parenthases_parser("")
# Verify section is found on first parse
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("(there)"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("there", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_parenthases_complete3(self):
p = self.create_parenthases_parser("")
# Verify section is found on subsequent parse
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse(""))
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("(there)"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("there", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_parenthases_complete4(self):
p = self.create_parenthases_parser("")
# Verify section is found split across parses with no intermediate text
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse("(hi "))
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("there)"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi there", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_parenthases_complete5(self):
p = self.create_parenthases_parser("")
# Verify section is found split across parses with intermediate text
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse("(hi "))
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse("there"))
self.assertEqual(PARSE_STATUS.FINISHED, p.parse(" guys)"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi there guys", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_parenthases_create(self):
p = self.create_parenthases_parser("")
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.get_status())
self.assertEqual("", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse(""))
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.get_status())
self.assertEqual("", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_parenthases_nested_text(self):
p = self.create_parenthases_parser("")
# Verify nested parenthases are parsed correctly
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("((hi) there) guys"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("(hi) there", p.get_parsed_text())
self.assertEqual(" guys", p.get_remaining_text())
def test_parenthases_remaining_text1(self):
p = self.create_parenthases_parser("")
# Verify text after section is still present on single parse
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("(hi) there"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi", p.get_parsed_text())
self.assertEqual(" there", p.get_remaining_text())
def test_parenthases_remaining_text2(self):
p = self.create_parenthases_parser("")
# Verify text after section is still present on subsequent parse
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse("(h"))
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("i) there"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi", p.get_parsed_text())
self.assertEqual(" there", p.get_remaining_text())
def test_parenthases_with_string1(self):
# Verify parentheses with an embedded string
p = self.create_parenthases_parser("('hi' there)")
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("'hi' there", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_parenthases_with_string2(self):
# Verify parentheses with an embedded string, using alternate string character
p = self.create_parenthases_parser('("hi" there)')
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual('"hi" there', p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_parenthases_with_string3(self):
# Verify parentheses with an embedded string, with nesting levels before and after string
p = self.create_parenthases_parser("((hi) 'there' (guys))")
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("(hi) 'there' (guys)", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_parenthases_with_string4(self):
# Verify parentheses with an embedded string, with unmatched parentheses inside the string
p = self.create_parenthases_parser("('th(ere')")
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("'th(ere'", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_parenthases_with_string5(self):
# Verify parentheses with an embedded string, with unmatched parentheses inside the string
p = self.create_parenthases_parser("('the)re')")
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("'the)re'", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_parenthases_with_string6(self):
# Verify a string with parentheses after the match
p = self.create_parenthases_parser("(hi)'(there)'")
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi", p.get_parsed_text())
self.assertEqual("'(there)'", p.get_remaining_text())
def test_parenthases_with_string7(self):
# Verify a string with parentheses and escapes
p = self.create_parenthases_parser("(hi'(\\'there)')")
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi'(\\'there)'", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_parenthases_with_string8(self):
# Verify a string with parentheses and escapes
p = self.create_parenthases_parser("")
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse("(hi '"))
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse("\\')there(\\'"))
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("' guys)"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi '\\')there(\\'' guys", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_parenthases_with_string9(self):
# Verify a string with parentheses and mixed string delimeters
p = self.create_parenthases_parser("(hi 'there\\'\") guys')")
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi 'there\\'\") guys'", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_string_alternate_bound(self):
p = self.create_dq_string_parser("")
# Verify string with different boundary characters
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("\"hi there'\""))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi there'", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_string_complete1(self):
# Verify string is found in initial load
p = self.create_string_parser("'hi'")
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_string_complete2(self):
p = self.create_string_parser("")
# Verify string is found on first parse
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("'there'"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("there", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_string_complete3(self):
p = self.create_string_parser("")
# Verify string is found on subsequent parse
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse(""))
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("'there'"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("there", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_string_complete4(self):
p = self.create_string_parser("")
# Verify string is found split across parses with no intermediate text
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse("'hi "))
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("there'"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi there", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_string_complete5(self):
p = self.create_string_parser("")
# Verify string is found split across parses with intermediate text
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse("'hi "))
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse("there"))
self.assertEqual(PARSE_STATUS.FINISHED, p.parse(" guys'"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi there guys", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_string_create(self):
p = self.create_string_parser("")
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.get_status())
self.assertEqual("", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse(""))
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.get_status())
self.assertEqual("", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_string_remaining_text1(self):
p = self.create_string_parser("")
# Verify text after string is still present on single parse
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("'hi' there"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi", p.get_parsed_text())
self.assertEqual(" there", p.get_remaining_text())
def test_string_remaining_text2(self):
p = self.create_string_parser("")
# Verify text after string is still present on subsequent parse
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse("'hi"))
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("' there"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi", p.get_parsed_text())
self.assertEqual(" there", p.get_remaining_text())
def test_string_with_escape1(self):
p = self.create_string_parser("'hi \\'there\\''")
# Verify string parsing with escaped string delimiters
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi \\'there\\'", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_string_with_escape2(self):
p = self.create_string_parser("'hi \\\"there\\\"'")
# Verify string parsing with escaped string delimiter which is not the boundary
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi \\\"there\\\"", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_string_with_escape3(self):
p = self.create_string_parser("")
# Verify string parsing in sequence
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse("'hi"))
self.assertEqual(PARSE_STATUS.INCOMPLETE, p.parse(" there "))
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("\\'guys'"))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi there \\'guys", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
def test_string_with_escape4(self):
p = self.create_dq_string_parser("")
# Verify string with different boundary characters and escapes
self.assertEqual(PARSE_STATUS.FINISHED, p.parse("\"hi \\\"guys\""))
self.assertEqual(PARSE_STATUS.FINISHED, p.get_status())
self.assertEqual("hi \\\"guys", p.get_parsed_text())
self.assertEqual("", p.get_remaining_text())
|
#!/usr/bin/env python
from projections.atlas import atlas
from projections.rasterset import RasterSet, Raster
import projections.predicts as predicts
import projections.rds as rds
# Read Katia's abundance model (mainland)
mod = rds.read('../models/ab-mainland.rds')
predicts.predictify(mod)
# Import standard PREDICTS rasters
rasters = predicts.rasterset('rcp', 'aim', 2020, 'medium')
rs = RasterSet(rasters)
rs[mod.output()] = mod
data = rs.eval(mod.output())
# Display the raster
atlas(data, mod.output(), 'viridis')
|
from django.core.management.base import BaseCommand
from apps.api.notice.models import GroupEvent
from django.contrib.auth import get_user_model
class Command(BaseCommand):
help = 'this sandbox command'
def handle(self, *args, **options):
ge = GroupEvent.objects.first()
print(ge.get_l_count(user=get_user_model().objects.first()))
|
"""
Test the MicroPython driver for M5Stack U097, 4 relays I2C grove unit.
In SYNC mode, the LED is controled by the RELAY state
* Author(s):
28 may 2021: Meurisse D. (shop.mchobby.be) - port to MicroPython
https://github.com/m5stack/M5Stack/blob/master/examples/Unit/4-RELAY/4-RELAY.ino
"""
from machine import I2C
from m4relay import Relays
from time import sleep
# Pico - I2C(0) - sda=GP8, scl=GP9
i2c = I2C(0)
# M5Stack core
# i2c = I2C( sda=Pin(21), scl=Pin(22) )
rel = Relays(i2c)
# The LED is controled with the Relay
# Switch all relay ON
for i in range(4): # from 1 to 3
rel.relay( i, True )
sleep( 1 )
# Switch All relay OFF
for i in range(4): # from 1 to 3
rel.relay( i, False )
sleep( 1 )
|
import json
from enum import Enum
from eva.models.storage.batch import Batch
class ResponseStatus(str, Enum):
FAIL = -1
SUCCESS = 0
class ResponseEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Batch):
return {"__batch__": obj.to_json()}
return json.JSONEncoder.default(self, obj)
def as_response(d):
if "__batch__" in d:
return Batch.from_json(d["__batch__"])
else:
return d
class Response:
"""
Data model for EVA server response
"""
def __init__(self, status: ResponseStatus, batch: Batch, metrics=None):
self._status = status
self._batch = batch
self._metrics = metrics
def to_json(self):
obj = {'status': self.status,
'batch': self.batch,
'metrics': self.metrics}
return json.dumps(obj, cls=ResponseEncoder)
@classmethod
def from_json(cls, json_str: str):
obj = json.loads(json_str, object_hook=as_response)
return cls(**obj)
def __eq__(self, other: 'Response'):
return self.status == other.status and \
self.batch == other.batch and \
self.metrics == other.metrics
def __str__(self):
return 'Response Object:\n' \
'@status: %s\n' \
'@batch: %s\n' \
'@metrics: %s' \
% (self.status, self.batch, self.metrics)
@property
def status(self):
return self._status
@property
def batch(self):
return self._batch
@property
def metrics(self):
return self._metrics
|
import pandas as pd
import numpy as np
import boost_histogram as bh
from mpi4py import MPI
class Spectrum:
"""Represents a histogram of some quantity."""
def __init__(self, loader, cut, var, weight=None):
# Associate this spectrum with the loader
loader.add_spectrum(self)
self._cut = cut
self._var = var
self._wgt = weight
self._dfvars = []
self._dfwgts = []
def fill(self, tables):
# Compute the var and complete cut
dfvar = self._var(tables)
dfcut = self._cut(tables)
# We allow the cut to have any subset of the indices used in the var
# The two dataframes need to be aligned in this case
if not dfvar.index.equals(dfcut.index):
# When aligning, the cut and var have to be of the same type
if isinstance(dfvar, pd.DataFrame):
dfvar, dfcut = dfvar.align(dfcut.to_frame(), axis=0, join="inner")
else:
dfvar, dfcut = dfvar.align(dfcut, axis=0, join="inner")
dfvar = dfvar.loc[dfcut.to_numpy()]
# Compute weights
if self._wgt is not None:
dfwgt = self._wgt(tables)
# align the weights to the var
# TODO: Is 0 the right fill?
dfwgt, _ = dfwgt.align(dfvar, axis=0, join="right", fill_value=0)
else:
dfwgt = pd.Series(1, dfvar.index, name="weight")
self._dfvars.append(dfvar)
self._dfwgts.append(dfwgt)
def finish(self):
assert len(self._dfvars) == len(self._dfwgts)
if len(self._dfvars) > 1:
self._df = pd.concat(self._dfvars, axis=0)
self._weight = pd.concat(self._dfwgts, axis=0)
else:
self._df = self._dfvars[0]
self._weight = self._dfwgts[0]
def df(self):
return self._df
def weight(self):
return self._weight
def entries(self):
return self._df.shape[0]
def histogram(self, bins, range=None, mpireduce=False, root=0):
n, bins = bh.numpy.histogram(
self._df, bins, range, weights=self._weight, storage=bh.storage.Double()
)
if mpireduce:
n = MPI.COMM_WORLD.reduce(n, MPI.SUM, root=root)
return n, bins
def integral(self):
return self._weight.sum()
def to_text(self, file_name, sep=" ", header=False):
self._df.to_csv(file_name, sep=sep, index=True, header=header)
def __add__(self, other):
df = pd.concat([self._df, other._df])
wgt = pd.concat([self._weight, other._weight])
return FilledSpectrum(df, wgt)
class FilledSpectrum(Spectrum):
"""Construct a spectrum directly from a Series or DataFrame"""
def __init__(self, df, weight):
self._df = df
self._weight = weight
def fill(self):
print("This spectrum was constructed already filled.")
# Save spectra to an hdf5 file. Takes a single or a list of spectra
def save_spectra(filename, spectra, groups):
if not isinstance(spectra, list):
spectra = [spectra]
if not isinstance(groups, list):
groups = [groups]
assert len(spectra) == len(groups), "Each spectrum must have a group name."
# idk why we are giving things to the store
store = pd.HDFStore(filename, "w")
for spectrum, group in zip(spectra, groups):
store[group + "/dataframe"] = spectrum.df()
store[group + "/weights"] = spectrum.weight()
store.close()
def load_spectra(filename, groups):
"""Load a spectrum from a file."""
if not isinstance(groups, list):
groups = [groups]
# ah that's more like it
store = pd.HDFStore(filename, "r")
ret = []
for group in groups:
df = store[group + "/dataframe"]
weight = store[group + "/weights"]
ret.append(FilledSpectrum(df, weight))
store.close()
if len(groups) == 1:
return ret[0]
return ret
|
# File automatically generated by mapry. DO NOT EDIT OR APPEND!
"""defines some object graph."""
import datetime
import typing
class SomeGraph:
"""defines some object graph."""
def __init__(
self,
some_time_zone: datetime.tzinfo) -> None:
"""
initializes an instance of SomeGraph with the given values.
:param some_time_zone: defines some time zone.
"""
self.some_time_zone = some_time_zone
# File automatically generated by mapry. DO NOT EDIT OR APPEND!
|
"""Conversão para dicionário
Escreva um programa que converta a lista abaixo num dicionário.
l = [
['carlos', 15, 12],
['alberto', 9, 15],
['maria', 18, 19]
]
Formato nome: nota1, nota2
"""
l = [
['carlos', 15, 12],
['alberto', 9, 15],
['maria', 18, 19]
]
d = {}
for row in l:
d[row[0]] = [row[1], row[2]]
print(d)
|
#!/usr/bin/env python
# Copyright 2015 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import humanize
import isodate
import requests_cache
from paasta_tools import marathon_tools
from paasta_tools.mesos_tools import get_running_tasks_from_active_frameworks
from paasta_tools.mesos_tools import get_mesos_slaves_grouped_by_attribute
from paasta_tools.mesos_tools import status_mesos_tasks_verbose
from paasta_tools.monitoring.replication_utils import match_backends_and_tasks, backend_is_up
from paasta_tools.smartstack_tools import DEFAULT_SYNAPSE_PORT
from paasta_tools.smartstack_tools import get_backends
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import datetime_from_utc_to_local
from paasta_tools.utils import format_table
from paasta_tools.utils import is_under_replicated
from paasta_tools.utils import _log
from paasta_tools.utils import NoDockerImageError
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import remove_ansi_escape_sequences
log = logging.getLogger('__main__')
logging.basicConfig()
def start_marathon_job(service, instance, app_id, normal_instance_count, client, cluster):
name = PaastaColors.cyan(compose_job_id(service, instance))
_log(
service=service,
line="EmergencyStart: scaling %s up to %d instances" % (name, normal_instance_count),
component='deploy',
level='event',
cluster=cluster,
instance=instance
)
client.scale_app(app_id, instances=normal_instance_count, force=True)
def stop_marathon_job(service, instance, app_id, client, cluster):
name = PaastaColors.cyan(compose_job_id(service, instance))
_log(
service=service,
line="EmergencyStop: Scaling %s down to 0 instances" % (name),
component='deploy',
level='event',
cluster=cluster,
instance=instance
)
client.scale_app(app_id, instances=0, force=True) # TODO do we want to capture the return val of any client calls?
def restart_marathon_job(service, instance, app_id, normal_instance_count, client, cluster):
stop_marathon_job(service, instance, app_id, client, cluster)
start_marathon_job(service, instance, app_id, normal_instance_count, client, cluster)
def get_bouncing_status(service, instance, client, job_config):
apps = marathon_tools.get_matching_appids(service, instance, client)
bounce_method = job_config.get_bounce_method()
app_count = len(apps)
if app_count == 0:
return PaastaColors.red("Stopped")
elif app_count == 1:
return PaastaColors.green("Running")
elif app_count > 1:
return PaastaColors.yellow("Bouncing (%s)" % bounce_method)
else:
return PaastaColors.red("Unknown (count: %s)" % app_count)
def status_desired_state(service, instance, client, job_config):
status = get_bouncing_status(service, instance, client, job_config)
desired_state = job_config.get_desired_state_human()
return "State: %s - Desired state: %s" % (status, desired_state)
def status_marathon_job(service, instance, app_id, normal_instance_count, client):
name = PaastaColors.cyan(compose_job_id(service, instance))
if marathon_tools.is_app_id_running(app_id, client):
app = client.get_app(app_id)
running_instances = app.tasks_running
if len(app.deployments) == 0:
deploy_status = PaastaColors.bold("Running")
else:
deploy_status = PaastaColors.yellow("Deploying")
if running_instances >= normal_instance_count:
status = PaastaColors.green("Healthy")
instance_count = PaastaColors.green("(%d/%d)" % (running_instances, normal_instance_count))
elif running_instances == 0:
status = PaastaColors.yellow("Critical")
instance_count = PaastaColors.red("(%d/%d)" % (running_instances, normal_instance_count))
else:
status = PaastaColors.yellow("Warning")
instance_count = PaastaColors.yellow("(%d/%d)" % (running_instances, normal_instance_count))
return "Marathon: %s - up with %s instances. Status: %s." % (status, instance_count, deploy_status)
else:
red_not = PaastaColors.red("NOT")
status = PaastaColors.red("Critical")
return "Marathon: %s - %s (app %s) is %s running in Marathon." % (status, name, app_id, red_not)
def get_verbose_status_of_marathon_app(app):
"""Takes a given marathon app object and returns the verbose details
about the tasks, times, hosts, etc"""
output = []
create_datetime = datetime_from_utc_to_local(isodate.parse_datetime(app.version))
output.append(" Marathon app ID: %s" % PaastaColors.bold(app.id))
output.append(" App created: %s (%s)" % (str(create_datetime), humanize.naturaltime(create_datetime)))
output.append(" Tasks:")
rows = [("Mesos Task ID", "Host deployed to", "Deployed at what localtime")]
for task in app.tasks:
local_deployed_datetime = datetime_from_utc_to_local(task.staged_at)
if task.host is not None:
hostname = "%s:%s" % (task.host.split(".")[0], task.ports[0])
else:
hostname = "Unknown"
rows.append((
get_short_task_id(task.id),
hostname,
'%s (%s)' % (
local_deployed_datetime.strftime("%Y-%m-%dT%H:%M"),
humanize.naturaltime(local_deployed_datetime),
)
))
output.append('\n'.join([" %s" % line for line in format_table(rows)]))
if len(app.tasks) == 0:
output.append(" No tasks associated with this marathon app")
return app.tasks, "\n".join(output)
def status_marathon_job_verbose(service, instance, client):
"""Returns detailed information about a marathon apps for a service
and instance. Does not make assumptions about what the *exact*
appid is, but instead does a fuzzy match on any marathon apps
that match the given service.instance"""
all_tasks = []
all_output = []
# For verbose mode, we want to see *any* matching app. As it may
# not be the one that we think should be deployed. For example
# during a bounce we want to see the old and new ones.
for appid in marathon_tools.get_matching_appids(service, instance, client):
app = client.get_app(appid)
tasks, output = get_verbose_status_of_marathon_app(app)
all_tasks.extend(tasks)
all_output.append(output)
return all_tasks, "\n".join(all_output)
def haproxy_backend_report(normal_instance_count, up_backends):
"""Given that a service is in smartstack, this returns a human readable
report of the up backends"""
# TODO: Take into account a configurable threshold, PAASTA-1102
crit_threshold = 50
under_replicated, ratio = is_under_replicated(num_available=up_backends,
expected_count=normal_instance_count,
crit_threshold=crit_threshold)
if under_replicated:
status = PaastaColors.red("Critical")
count = PaastaColors.red("(%d/%d, %d%%)" % (up_backends, normal_instance_count, ratio))
else:
status = PaastaColors.green("Healthy")
count = PaastaColors.green("(%d/%d)" % (up_backends, normal_instance_count))
up_string = PaastaColors.bold('UP')
return "%s - in haproxy with %s total backends %s in this namespace." % (status, count, up_string)
def format_haproxy_backend_row(backend, is_correct_instance):
"""Pretty Prints the status of a given haproxy backend
Takes the fields described in the CSV format of haproxy:
http://www.haproxy.org/download/1.5/doc/configuration.txt
And tries to make a good guess about how to represent them in text
"""
backend_name = backend['svname']
backend_hostname = backend_name.split("_")[-1]
backend_port = backend_name.split("_")[0].split(":")[-1]
pretty_backend_name = "%s:%s" % (backend_hostname, backend_port)
if backend['status'] == "UP":
status = PaastaColors.default(backend['status'])
elif backend['status'] == 'DOWN' or backend['status'] == 'MAINT':
status = PaastaColors.red(backend['status'])
else:
status = PaastaColors.yellow(backend['status'])
lastcheck = "%s/%s in %sms" % (backend['check_status'], backend['check_code'], backend['check_duration'])
lastchange = humanize.naturaltime(datetime.timedelta(seconds=int(backend['lastchg'])))
row = (
' %s' % pretty_backend_name,
lastcheck,
lastchange,
status,
)
if is_correct_instance:
return row
else:
return tuple(PaastaColors.grey(remove_ansi_escape_sequences(col)) for col in row)
def status_smartstack_backends(service, instance, job_config, cluster, tasks, expected_count, soa_dir, verbose):
"""Returns detailed information about smartstack backends for a service
and instance.
return: A newline separated string of the smarststack backend status
"""
output = []
nerve_ns = marathon_tools.read_namespace_for_service_instance(service, instance, cluster)
service_instance = compose_job_id(service, nerve_ns)
if instance != nerve_ns:
ns_string = PaastaColors.bold(nerve_ns)
output.append("Smartstack: N/A - %s is announced in the %s namespace." % (instance, ns_string))
# If verbose mode is specified, then continue to show backends anyway, otherwise stop early
if not verbose:
return "\n".join(output)
service_namespace_config = marathon_tools.load_service_namespace_config(service, instance, soa_dir=soa_dir)
discover_location_type = service_namespace_config.get_discover()
monitoring_blacklist = job_config.get_monitoring_blacklist()
unique_attributes = get_mesos_slaves_grouped_by_attribute(
attribute=discover_location_type, blacklist=monitoring_blacklist)
if len(unique_attributes) == 0:
output.append("Smartstack: ERROR - %s is NOT in smartstack at all!" % service_instance)
else:
output.append("Smartstack:")
if verbose:
output.append(" Haproxy Service Name: %s" % service_instance)
output.append(" Backends:")
output.extend(pretty_print_smartstack_backends_for_locations(
service_instance,
tasks,
unique_attributes,
expected_count,
verbose
))
return "\n".join(output)
def pretty_print_smartstack_backends_for_locations(service_instance, tasks, locations, expected_count, verbose):
"""
Pretty prints the status of smartstack backends of a specified service and instance in the specified locations
"""
rows = [(" Name", "LastCheck", "LastChange", "Status")]
expected_count_per_location = int(expected_count / len(locations))
for location in sorted(locations):
hosts = locations[location]
# arbitrarily choose the first host with a given attribute to query for replication stats
synapse_host = hosts[0]
sorted_backends = sorted(get_backends(service_instance,
synapse_host=synapse_host,
synapse_port=DEFAULT_SYNAPSE_PORT),
key=lambda backend: backend['status'],
reverse=True) # Specify reverse so that backends in 'UP' are placed above 'MAINT'
matched_tasks = match_backends_and_tasks(sorted_backends, tasks)
running_count = sum(1 for backend, task in matched_tasks if backend and backend_is_up(backend))
rows.append(" %s - %s" % (location, haproxy_backend_report(expected_count_per_location, running_count)))
# If verbose mode is specified, show status of individual backends
if verbose:
for backend, task in matched_tasks:
if backend is not None:
rows.append(format_haproxy_backend_row(backend, task is not None))
return format_table(rows)
def get_short_task_id(task_id):
"""Return just the Marathon-generated UUID of a Mesos task id."""
return task_id.split(marathon_tools.MESOS_TASK_SPACER)[-1]
def status_mesos_tasks(service, instance, normal_instance_count):
job_id = marathon_tools.format_job_id(service, instance)
running_and_active_tasks = get_running_tasks_from_active_frameworks(job_id)
count = len(running_and_active_tasks)
if count >= normal_instance_count:
status = PaastaColors.green("Healthy")
count = PaastaColors.green("(%d/%d)" % (count, normal_instance_count))
elif count == 0:
status = PaastaColors.red("Critical")
count = PaastaColors.red("(%d/%d)" % (count, normal_instance_count))
else:
status = PaastaColors.yellow("Warning")
count = PaastaColors.yellow("(%d/%d)" % (count, normal_instance_count))
running_string = PaastaColors.bold('TASK_RUNNING')
return "Mesos: %s - %s tasks in the %s state." % (status, count, running_string)
def perform_command(command, service, instance, cluster, verbose, soa_dir, app_id=None):
"""Performs a start/stop/restart/status on an instance
:param command: String of start, stop, restart, or status
:param service: service name
:param instance: instance name, like "main" or "canary"
:param cluster: cluster name
:param verbose: bool if the output should be verbose or not
:returns: A unix-style return code
"""
marathon_config = marathon_tools.load_marathon_config()
job_config = marathon_tools.load_marathon_service_config(service, instance, cluster, soa_dir=soa_dir)
if not app_id:
try:
app_id = marathon_tools.create_complete_config(service, instance, marathon_config, soa_dir=soa_dir)['id']
except NoDockerImageError:
job_id = compose_job_id(service, instance)
print "Docker image for %s not in deployments.json. Exiting. Has Jenkins deployed it?" % job_id
return 1
normal_instance_count = job_config.get_instances()
normal_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace(service, instance)
proxy_port = marathon_tools.get_proxy_port_for_instance(service, instance, soa_dir=soa_dir)
client = marathon_tools.get_marathon_client(marathon_config.get_url(), marathon_config.get_username(),
marathon_config.get_password())
if command == 'start':
start_marathon_job(service, instance, app_id, normal_instance_count, client, cluster)
elif command == 'stop':
stop_marathon_job(service, instance, app_id, client, cluster)
elif command == 'restart':
restart_marathon_job(service, instance, app_id, normal_instance_count, client, cluster)
elif command == 'status':
# Setting up transparent cache for http API calls
requests_cache.install_cache('paasta_serviceinit', backend='memory')
print status_desired_state(service, instance, client, job_config)
print status_marathon_job(service, instance, app_id, normal_instance_count, client)
tasks, out = status_marathon_job_verbose(service, instance, client)
if verbose:
print out
print status_mesos_tasks(service, instance, normal_instance_count)
if verbose:
print status_mesos_tasks_verbose(app_id, get_short_task_id)
if proxy_port is not None:
print status_smartstack_backends(
service=service,
instance=instance,
cluster=cluster,
job_config=job_config,
tasks=tasks,
expected_count=normal_smartstack_count,
soa_dir=soa_dir,
verbose=verbose,
)
else:
# The command parser shouldn't have let us get this far...
raise NotImplementedError("Command %s is not implemented!" % command)
return 0
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Dirk Chang and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from wechatpy.oauth import WeChatOAuth
from wechat.api import check_wechat_binding
def get_context(context):
context.no_cache = 1
url = "/desk#List/Tickets Ticket"
if frappe.form_dict.name:
url = url + "/" + frappe.form_dict.name
check_wechat_binding(redirect_url=url)
|
# """This module is still in an experimental stage and should not be assumed to be "reliable", or
# "useful", or anything else that might be expected of a normal module"""
# ##################################################
# # Import Own Assets
# ##################################################
# from hyperparameter_hunter.utils.general_utils import type_val
# from hyperparameter_hunter.utils.learning_utils import upsample
# from hyperparameter_hunter.settings import G
#
# ##################################################
# # Import Miscellaneous Assets
# ##################################################
# from collections import Counter
#
# ##################################################
# # Import Learning Assets
# ##################################################
# from sklearn.preprocessing import StandardScaler
# from imblearn.over_sampling import SMOTE, ADASYN, RandomOverSampler
#
#
# class PreprocessingPipelineMixIn(object):
# def __init__(
# self,
# pipeline,
# preprocessing_params,
# features,
# target_column,
# train_input_data: object = None,
# train_target_data: object = None,
# validation_input_data: object = None,
# validation_target_data: object = None,
# holdout_input_data: object = None,
# holdout_target_data: object = None,
# test_input_data: object = None,
# fitting_guide=None,
# fail_gracefully=False,
# preprocessing_stage="infer",
# ):
# """
#
# Parameters
# ----------
# pipeline: List
# List of tuples of form: (<string id>, <callable function>), in which the id identifies
# the paired function transformations to be fitted on dfs specified by fitting_guide[i],
# then applied to all other same-type, non-null dfs
# preprocessing_params: Dict
# All the parameters necessary for the desired preprocessing functionality
# features: List
# List containing strings that specify the columns to be used as input
# target_column: String
# String naming the target column
# train_input_data: Pandas Dataframe
# ...
# train_target_data: Pandas Dataframe
# ...
# validation_input_data: Pandas Dataframe
# ...
# validation_target_data: Pandas Dataframe
# ...
# holdout_input_data: Pandas Dataframe
# ...
# holdout_target_data: Pandas Dataframe
# ...
# test_input_data: Pandas Dataframe
# ...
# fitting_guide: List of same length as pipeline containing tuples of strings, default=None
# If not None, specifies datasets used to fit each manipulation in pipeline. Those not
# included in the list (and of same type: input/target) will be transformed according to
# the fitted functions. Else, infer from preprocessing_stage
# fail_gracefully: Boolean, default=False
# If True, Exceptions thrown by preprocessing transformations will be logged and skipped,
# so processing can continue
# preprocessing_stage: String in ['pre_cv', 'intra_cv', 'infer'], default='infer'
# Denotes when preprocessing is occurring. If 'pre_cv', pipeline functions are fit on all
# available data. If 'intra_cv', pipeline functions are fit on train data and applied to
# all same-type, non-null data. Else, infer stage"""
# ##################################################
# # Core Attributes
# ##################################################
# self.pipeline = pipeline
# self.preprocessing_params = preprocessing_params
# self.features = features
# self.target_column = target_column
#
# ##################################################
# # Dataset Attributes
# ##################################################
# self.train_input_data = train_input_data.copy() if train_input_data is not None else None
# self.train_target_data = train_target_data.copy() if train_target_data is not None else None
# self.validation_input_data = (
# validation_input_data.copy() if validation_input_data is not None else None
# )
# self.validation_target_data = (
# validation_target_data.copy() if validation_target_data is not None else None
# )
# self.holdout_input_data = (
# holdout_input_data.copy() if holdout_input_data is not None else None
# )
# self.holdout_target_data = (
# holdout_target_data.copy() if holdout_target_data is not None else None
# )
# self.test_input_data = test_input_data.copy() if test_input_data is not None else None
#
# ##################################################
# # Miscellaneous Attributes
# ##################################################
# self.fitting_guide = fitting_guide
# self.fail_gracefully = fail_gracefully
#
# ##################################################
# # Preprocessing Stage and Dataset Type Attributes
# ##################################################
# self.preprocessing_stage = preprocessing_stage
# self.all_input_sets = self.get_non_null(
# ["{}_input_data".format(_) for _ in ("train", "validation", "holdout", "test")]
# )
# self.all_target_sets = self.get_non_null(
# ["{}_target_data".format(_) for _ in ("train", "validation", "holdout")]
# )
# self.fit_input_sets = None
# self.fit_target_sets = None
#
# ##################################################
# # Initialize Mix-Ins/Inherited Classes
# ##################################################
# pass
#
# ##################################################
# # Ensure Attributes are Properly Initialized
# ##################################################
# self.set_preprocessing_stage_and_sets()
#
# def get_non_null(self, dataset_names):
# return [_ for _ in dataset_names if self.__getattribute__(_) is not None]
#
# def set_preprocessing_stage_and_sets(self):
# """Ensures preprocessing_stage has been properly initialized before initializing
# fit_input_sets and fit_target_sets"""
# try:
# self.preprocessing_stage = self.initialize_preprocessing_stage()
# except Exception as _ex:
# raise (_ex)
# else:
# if self.preprocessing_stage == "pre_cv":
# self.fit_input_sets = self.all_input_sets
# self.fit_target_sets = self.all_target_sets
# # self.fit_target_sets = ['train_target_data', 'holdout_target_data']
# elif self.preprocessing_stage == "intra_cv":
# self.fit_input_sets = ["train_input_data"]
# self.fit_target_sets = ["train_target_data"]
# # self.fit_input_sets = ['train_input_data', 'validation_input_data', 'holdout_input_data', 'test_input_data']
# # self.fit_target_sets = ['train_target_data', 'validation_target_data', 'holdout_target_data']
#
# def initialize_preprocessing_stage(self):
# """Ensures preprocessing_stage can be set according to class attributes or method input"""
# _stages, _err = ["pre_cv", "intra_cv"], "Unknown error occurred."
# _i_strs = ["validation_input_data", "validation_target_data"]
# _i_sets = [getattr(self, _) for _ in _i_strs]
#
# if self.preprocessing_stage in _stages:
# return self.preprocessing_stage
# elif self.preprocessing_stage == "infer":
# if all([_ for _ in _i_sets]):
# return "intra_cv"
# elif any([_ for _ in _i_sets]):
# _err = "Inference failed. {} types must be same. Received: {}".format(
# _i_strs, [type(_) for _ in _i_sets]
# )
# else:
# return "pre_cv"
# else:
# _err = "preprocessing_stage must be in {}. Received type {}: {}".format(
# _stages, *type_val(self.preprocessing_stage)
# )
#
# if self.fail_gracefully is True:
# G.warn(_err)
# return "pre_cv"
# else:
# raise ValueError(_err)
#
# def build_pipeline(self):
# new_pipeline = (
# []
# ) # (<str id>, <callable transformation>, <sets to fit on>, <sets to transform>)
#
# if not isinstance(self.pipeline, list):
# raise TypeError(
# "Expected pipeline of type list. Received {}: {}".format(*type_val(self.pipeline))
# )
#
# for i, step in enumerate(self.pipeline):
# step_id, step_callable, step_fit_sets, step_transform_sets = None, None, None, None
#
# ##################################################
# # Pipeline is a list of strings
# ##################################################
# if isinstance(step, str):
# # element names a method in this class to use
# step_id, step_callable = step, getattr(self, step, default=None)
# if step_callable is None:
# raise AttributeError(
# "Expected pipeline value to name a method. Received {}: {}".format(
# *type_val(step)
# )
# )
# ##################################################
# # Pipeline is a list of tuple/list pairs
# ##################################################
# # TODO: Instead of forcing len() == 2, merge self.fitting_guide into self.pipeline.
# # TODO: Max valid length == 4 after adding fit_sets(subset of transform_sets), transform_sets
# # TODO: If len > 4, throw WARNING that extra values will be ignored and continue
# elif any([isinstance(step, _) for _ in (tuple, list)]) and len(step) == 2:
# # element is a tuple/list of length 2, where 2nd value is a callable or names method of transformation
# if isinstance(step[0], str):
# step_id = step[0]
# else:
# pass
#
# if callable(step[1]):
# pass
# # TODO: Dynamically create new method, whose name is heavily mangled and modified
# # TODO: Try to include original callable __name__ in new method's name. If unavailable use "i"
# # TODO: New method name should be something like: "__dynamic_pipeline_method_{}".format(step[1].__name__ or i)
# # TODO: Actual name that would be called would be mangled because of double underscore prefix
# # FLAG: If you want your method to have other arguments, place them in preprocessing_params dict, instead
# # FLAG: Then just use self.preprocessing_params[<your arg>] inside your callable
# # FLAG: In fact, declaring values that may change later on directly in your callable could be problematic
# # FLAG: If you include them in preprocessing_params, hyperparameters for experiment will be clear
# ##################################################
# # Pipeline type is invalid
# ##################################################
# else:
# raise TypeError(
# "Expected pipeline step to be: a str, or a tuple pair. Received {}: {}".format(
# *type_val(step)
# )
# )
#
# ##################################################
# # Additional Error Handling
# ##################################################
# if step_id is None:
# raise TypeError(
# "Expected str as first value in each pipeline tuple. Received {}: {}".format(
# *type_val(step[0])
# )
# )
#
# new_pipeline.append((step_id, step_callable, step_fit_sets, step_transform_sets))
#
# def custom_pipeline_method_builder(self, functionality, name=None):
# """...
#
# Parameters
# ----------
# functionality: Callable
# Performs all desired transformations/alterations/work for this pipeline step. This
# callable will not receive any input arguments, so don't expect any. Instead, it is
# implemented as a class method, so it has access to all class attributes and methods. To
# work properly, the class attributes: ['self.train_input_data', 'self.train_target_data',
# 'self.validation_input_data', 'self.validation_target_data', 'self.holdout_input_data',
# 'self.holdout_target_data', 'self.test_input_data'] are expected to be directly
# modified. See the "Notes"/"Examples" sections below for more
# name: String, or None, default=None
# Suffix for the name of the new custom method. See below "Notes" section for details on
# method name creation
#
# Returns
# -------
# name: str
# The name of the new method that was created
#
# Notes
# -----
# WARNING: Because the custom functionality is implemented as a class method, it is capable
# of modifying values that are not expected to change, or setting new attributes. Doing either
# of these is a bad idea. The only attributes that should be set are those listed in the above
# "Parameters" description for the "functionality" argument. Additionally, the only values
# that should be retrieved are the aforementioned "data" attributes, plus
# :attr:`preprocessing_params`
#
# METHOD ARGUMENTS: If the custom functionality requires some input argument that could be
# subject to change later (like a hyperparameter), it should be included in
# :attr:`preprocessing_params`. Then in the custom functionality, it can be retrieved with
# "self.preprocessing_params[<your_arg>]". See the "Examples" section below for details on how
# to do this. The two primary reasons for this behavior are as follows:
#
# 1) to get around having to make sense of methods' expected arguments and the arguments
# actually input to them, and
# 2) to include any necessary arguments in the experiment's hyperparameters.
#
# Examples
# --------
# >>> from hyperparameter_hunter.feature_engineering import PreprocessingPipelineMixIn
# >>> def my_function(self):
# >>> self.train_input_data = self.train_input_data.fillna(self.preprocessing_params['my_imputer'])
# Notice in "my_function", "self" is the only input, "self.train_input_data" is directly
# modified, and instead of passing "my_imputer" as an input, it is referenced in
# "self.preprocessing_params". Now, the class can use "my_function" below.
# >>> preprocessor = PreprocessingPipelineMixIn(
# >>> pipeline=[('my_function', my_function)],
# >>> preprocessing_params=dict(my_imputer=-1), features=[], target_column=''
# >>> )
# The "pipeline" is set to include "my_function", which, after its creation, will be able to
# retrieve "my_imputer" from "self.preprocessing_params". Note that this example just
# demonstrates custom method building. It won't work as-is, without any train_input_data,
# among other things. Now in a later experiment, null values can be imputed to -2 instead of
# -1, just by changing "preprocessing_params":
# >>> preprocessor = PreprocessingPipelineMixIn(
# >>> pipeline=[('my_function', my_function)],
# >>> preprocessing_params=dict(my_imputer=-2), features=[], target_column=''
# >>> )
# This makes it much easier to keep track of the actual hyperparameters being used in an
# experiment than having to scour obscure functions for some number that may or may not even
# be declared inside"""
# if not callable(functionality):
# raise TypeError(
# "Custom pipeline methods must be callable. Received type {}".format(
# type(functionality)
# )
# )
#
# # TODO: Set name (using "functionality.__name__") if name is None
#
# while hasattr(self, name):
# _name = name + "" # TODO: Make changes to "name" here
# # TODO: Do something to further modify name and check again
# G.warn(
# 'Encountered naming conflict in custom_pipeline_method_builder with "{}". Trying "{}"'.format(
# name, _name
# )
# )
# name = _name
#
# #################### Create New Custom Method ####################
# setattr(self, name, functionality)
#
# return name
#
# def data_imputation(self, which_sets=None):
# imputer = self.preprocessing_params.get("imputer", None)
# which_sets = which_sets if which_sets else self.fit_input_sets
#
# for data_key in which_sets:
# data = self.__getattribute__(data_key)
#
# if data is not None:
# if callable(imputer): # Apply Function to Impute Data
# # TODO: Send either "self" or all attributes in self as other input to "imputer"
# # TODO: Force callable "imputer" to have **kwargs, or check for the args it expects and send only those
# self.__setattr__(data_key, imputer(data))
# elif any(
# [isinstance(imputer, _) for _ in (int, float)]
# ): # Fill Null Data With Given Value
# self.__setattr__(data_key, data.fillna(imputer))
#
# G.log("Completed data_imputation preprocessing")
#
# def target_data_transformation(self, which_sets=None):
# transformation = self.preprocessing_params.get("target_transformation", None)
# which_sets = which_sets if which_sets else self.fit_target_sets
#
# for data_key in which_sets:
# data = self.__getattribute__(data_key)
#
# if callable(transformation) and data:
# # TODO: Send either "self" or all attributes in self as other input to "imputer"
# # TODO: Force callable "imputer" to have **kwargs, or check for the args it expects and send only those
# self.__setattr__(data_key, transformation(data))
#
# G.log("Completed target_data_transformation preprocessing")
#
# def data_scaling(self, which_sets=None):
# which_sets = which_sets if which_sets else self.fit_input_sets
#
# # TODO: Expand method to include other scaling types by sending string param or callable for apply_scale arg
# if self.preprocessing_params.get("apply_standard_scale", False) is True:
# scaler = StandardScaler()
#
# # TODO: Modify fitting process to use 'which_sets' and 'self.fit_input_sets' like 'data_imputation' method
# scaler.fit(self.train_input_data[self.features].values)
#
# if "train_input_data" in self.all_input_sets:
# self.train_input_data[self.features] = scaler.transform(
# self.train_input_data[self.features].values
# )
# if "holdout_input_data" in self.all_input_sets:
# self.holdout_input_data[self.features] = scaler.transform(
# self.holdout_input_data[self.features].values
# )
# if "test_input_data" in self.all_input_sets:
# self.test_input_data[self.features] = scaler.transform(
# self.test_input_data[self.features].values
# )
#
# G.log(
# 'Completed data_scaling preprocessing. preprocessing_params["apply_standard_scale"]={}'.format(
# self.preprocessing_params.get("apply_standard_scale", False)
# )
# )
#
#
# class PreCVPreprocessingPipeline(PreprocessingPipelineMixIn):
# def __init__(
# self,
# features,
# target_column,
# train_input_data,
# train_target_data,
# holdout_input_data,
# holdout_target_data,
# test_input_data=None,
# ):
# PreprocessingPipelineMixIn.__init__(
# self,
# preprocessing_stage="pre_cv",
# pipeline=None,
# fitting_guide=None,
# preprocessing_params=None,
# features=features,
# target_column=target_column,
# train_input_data=train_input_data,
# train_target_data=train_target_data,
# validation_input_data=None,
# validation_target_data=None,
# holdout_input_data=holdout_input_data,
# holdout_target_data=holdout_target_data,
# test_input_data=test_input_data,
# fail_gracefully=False,
# )
#
# # FLAG: WARNING: Method of same name in "CrossValidationWrapper" class
# # FLAG: WARNING: Method of same name in "CrossValidationWrapper" class
# def pre_cv_preprocessing(self):
# # FLAG: WARNING: Method of same name in "CrossValidationWrapper" class
# # FLAG: WARNING: Method of same name in "CrossValidationWrapper" class
# #################### Feature Selection ####################
# pass
#
# #################### Impute Missing Values in Data ####################
# pass
#
#
# class IntraCVPreprocessingPipeline(PreprocessingPipelineMixIn):
# def __init__(
# self,
# features,
# target_column,
# train_input_data,
# train_target_data,
# validation_input_data,
# validation_target_data,
# holdout_input_data=None,
# holdout_target_data=None,
# test_input_data=None,
# ):
# PreprocessingPipelineMixIn.__init__(
# self,
# preprocessing_stage="intra_cv",
# pipeline=None,
# fitting_guide=None,
# preprocessing_params=None,
# features=features,
# target_column=target_column,
# train_input_data=train_input_data,
# train_target_data=train_target_data,
# validation_input_data=validation_input_data,
# validation_target_data=validation_target_data,
# holdout_input_data=holdout_input_data,
# holdout_target_data=holdout_target_data,
# test_input_data=test_input_data,
# fail_gracefully=False,
# )
#
#
# class Sampler:
# def __init__(self, parameters, input_data, target_data):
# self.input_data = input_data
# self.target_data = target_data
# self.parameters = parameters
#
# def execute_pipeline(self):
# default_element = dict(
# method="", target_feature="target", target_value=-1.0, parameters=dict()
# )
# if len(self.parameters) > 0:
# self.report_status()
#
# for element in self.parameters:
# default_element.update(element)
#
# self.advance_pipeline(default_element)
#
# def advance_pipeline(self, element):
# # FLAG: imblearn functions return ndarray when give pandas series - FIGURE THAT SHIT OUT
# # FLAG: imblearn functions return ndarray when give pandas series - FIGURE THAT SHIT OUT
# # FLAG: imblearn functions return ndarray when give pandas series - FIGURE THAT SHIT OUT
# # FLAG: imblearn functions return ndarray when give pandas series - FIGURE THAT SHIT OUT
#
# if element["method"] == "smote":
# self.input_data, self.target_data = SMOTE(**element["parameters"]).fit_sample(
# self.input_data, self.target_data
# )
# elif element["method"] == "adasyn":
# self.input_data, self.target_data = ADASYN(**element["parameters"]).fit_sample(
# self.input_data, self.target_data
# )
# elif element["method"] == "RandomOverSampler":
# self.input_data, self.target_data = RandomOverSampler(
# **element["parameters"]
# ).fit_sample(self.input_data, self.target_data)
#
# # FLAG: imblearn functions return ndarray when give pandas series - FIGURE THAT SHIT OUT
# # FLAG: imblearn functions return ndarray when give pandas series - FIGURE THAT SHIT OUT
# # FLAG: imblearn functions return ndarray when give pandas series - FIGURE THAT SHIT OUT
# # FLAG: imblearn functions return ndarray when give pandas series - FIGURE THAT SHIT OUT
#
# elif element["method"] == "upsample":
# self.input_data, self.target_data = upsample(
# self.input_data,
# self.target_data,
# element["target_feature"],
# element["target_value"],
# **element["parameters"]
# )
#
# self.report_status(element["method"])
#
# def report_status(self, method=None):
# if method is None:
# print("Target Label Counts... {}".format(sorted(Counter(self.target_data).items())))
# else:
# print(
# "After Performing... {}... Target Label Counts... {}".format(
# method, sorted(Counter(self.target_data).items())
# )
# )
#
#
# # def _execute():
# # """EXPERIMENTAL"""
# # test_parameters = [
# # dict(
# # target_feature='target',
# # target_value=1.0,
# # method='smote'
# # ),
# # dict(
# # target_feature='target',
# # method='upsample',
# # target_value=1.0,
# # parameters=dict(
# # n_times=3
# # )
# # ),
# # # dict(
# # # target_feature='target',
# # # target_value=1.0,
# # # method='smote'
# # # ),
# # # dict(
# # # target_feature='target',
# # # target_value=1.0,
# # # method='adasyn'
# # # )
# # ]
# #
# # # train_data = pd.read_csv('./data/porto_seguro_train.csv')
# # # train_input = train_data.drop(['id', 'target'], axis=1)
# # # train_target = train_data['target']
# #
# # test_sampler = Sampler(test_parameters, train_input, train_target)
# # # test_sampler.report_status()
# # # test_sampler.advance_pipeline()
# # test_sampler.execute_pipeline()
# #
# # print('hold')
# #
# #
# # if __name__ == '__main__':
# # _execute()
|
from django.core.paginator import Paginator as BasePaginator
class Paginator(BasePaginator):
on_each_side = 2
current_page = 1
@property
def page_elements(self):
window = self.on_each_side * 2
result = {
'first': None,
'slider': None,
'last': None
}
if self.num_pages < (self.on_each_side * 2) + 6:
result['first'] = list(range(1, self.num_pages + 1))
elif self._current_page <= window:
result = {
'first': list(range(1, window + 2)),
'slider': None,
'last': list(range(self.num_pages, self.num_pages + 1))
}
elif self._current_page > (self.num_pages - window):
result = {
'first': list(range(1, 3)),
'slider': None,
'last': list(range(self.num_pages - window, self.num_pages + 1))
}
elif self.num_pages > 1:
result = {
'first': list(range(1, 3)),
'slider': list(range(
self._current_page - self.on_each_side,
self._current_page + self.on_each_side + 1
)),
'last': list(range(self.num_pages, self.num_pages + 1))
}
return filter(None, [
result['first'],
'...' if isinstance(result['slider'], list) else None,
result['slider'],
'...' if isinstance(result['last'], list) else None,
result['last']
])
@property
def _current_page(self):
current_page = int(self.current_page)
return 1 if current_page < 1 else current_page
|
from typing import *
from return_type import ReturnType
from collections import abc
v_int: ReturnType[Callable[..., int]] = 1
v_float: ReturnType[Callable[..., float]] = 1.1
v_str: ReturnType[Callable[..., str]] = "123"
v_bytes: ReturnType[Callable[..., bytes]] = b"2123"
v_hash: ReturnType[Callable[..., Hashable]] = 1
class AwaitableClass(Awaitable[Any]):
def __await__(self):
pass
v_awaitable: ReturnType[Callable[..., Awaitable[Any]]] = AwaitableClass()
async def cof() -> Any:
pass
v_coroutine: ReturnType[Callable[..., Coroutine]] = cof()
# TODO add test for types below
# AsyncIterable,
# AsyncIterator,
# Iterable,
# Iterator,
# Reversible,
# Sized,
# Container,
# Collection,
# Callable,
# AbstractSet,
# MutableSet,
# # NOTE: Mapping is only covariant in the value type.
# Mapping,
# MutableMapping,
# Sequence,
# MutableSequence,
# ByteString,
# # Tuple accepts variable number of parameters.
# Tuple,
# List,
# Deque,
# Set,
# FrozenSet,
# MappingView,
# KeysView,
# ItemsView,
# ValuesView,
# ContextManager,
# AsyncContextManager,
# Dict,
# DefaultDict,
# OrderedDict,
# Counter,
# ChainMap,
# Generator,
# AsyncGenerator,
# TypedDict,
|
import logging
from eth_utils import (
ValidationError,
)
from eth.db.diff import (
DBDiff,
DBDiffTracker,
DiffMissingError,
)
from eth.db.backends.base import BaseDB
class BatchDB(BaseDB):
"""
A wrapper of basic DB objects with uncommitted DB changes stored in local cache,
which represents as a dictionary of database keys and values.
This class should be usable as a context manager, the changes either all fail or all succeed.
Upon exiting the context, it writes all of the key value pairs from the cache into
the underlying database. If any error occurred before committing phase,
we would not apply commits at all.
"""
logger = logging.getLogger("eth.db.BatchDB")
wrapped_db: BaseDB = None
_track_diff: DBDiffTracker = None
def __init__(self, wrapped_db: BaseDB, read_through_deletes: bool = False) -> None:
self.wrapped_db = wrapped_db
self._track_diff = DBDiffTracker()
self._read_through_deletes = read_through_deletes
def __enter__(self) -> 'BatchDB':
return self
def __exit__(self, exc_type: None, exc_value: None, traceback: None) -> None:
# commit all the changes from local cache to underlying db
if exc_type is None:
self.commit()
else:
self.clear()
self.logger.exception("Unexpected error occurred during batch update")
def clear(self) -> None:
self._track_diff = DBDiffTracker()
def commit(self, apply_deletes: bool = True) -> None:
self.commit_to(self.wrapped_db, apply_deletes)
def commit_to(self, target_db: BaseDB, apply_deletes: bool = True) -> None:
if apply_deletes and self._read_through_deletes:
raise ValidationError("BatchDB should never apply deletes when reading through deletes")
diff = self.diff()
diff.apply_to(target_db, apply_deletes)
self.clear()
def _exists(self, key: bytes) -> bool:
try:
self[key]
except KeyError:
return False
else:
return True
def __getitem__(self, key: bytes) -> bytes:
try:
value = self._track_diff[key]
except DiffMissingError as missing:
if missing.is_deleted and not self._read_through_deletes:
raise KeyError(key)
else:
return self.wrapped_db[key]
else:
return value
def __setitem__(self, key: bytes, value: bytes) -> None:
self._track_diff[key] = value
def __delitem__(self, key: bytes) -> None:
if key not in self:
raise KeyError(key)
del self._track_diff[key]
def diff(self) -> DBDiff:
return self._track_diff.diff()
|
#
# PySNMP MIB module ASCEND-MIBUDS3NET-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBUDS3NET-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:12:47 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration")
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter32, TimeTicks, IpAddress, ModuleIdentity, Bits, Integer32, Unsigned32, Counter64, ObjectIdentity, NotificationType, Gauge32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "TimeTicks", "IpAddress", "ModuleIdentity", "Bits", "Integer32", "Unsigned32", "Counter64", "ObjectIdentity", "NotificationType", "Gauge32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class DisplayString(OctetString):
pass
mibuds3NetworkProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 5))
mibuds3NetworkProfileTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 5, 1), )
if mibBuilder.loadTexts: mibuds3NetworkProfileTable.setStatus('mandatory')
mibuds3NetworkProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1), ).setIndexNames((0, "ASCEND-MIBUDS3NET-MIB", "uds3NetworkProfile-Shelf-o"), (0, "ASCEND-MIBUDS3NET-MIB", "uds3NetworkProfile-Slot-o"), (0, "ASCEND-MIBUDS3NET-MIB", "uds3NetworkProfile-Item-o"))
if mibBuilder.loadTexts: mibuds3NetworkProfileEntry.setStatus('mandatory')
uds3NetworkProfile_Shelf_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 1), Integer32()).setLabel("uds3NetworkProfile-Shelf-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: uds3NetworkProfile_Shelf_o.setStatus('mandatory')
uds3NetworkProfile_Slot_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 2), Integer32()).setLabel("uds3NetworkProfile-Slot-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: uds3NetworkProfile_Slot_o.setStatus('mandatory')
uds3NetworkProfile_Item_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 3), Integer32()).setLabel("uds3NetworkProfile-Item-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: uds3NetworkProfile_Item_o.setStatus('mandatory')
uds3NetworkProfile_Name = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 4), DisplayString()).setLabel("uds3NetworkProfile-Name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_Name.setStatus('mandatory')
uds3NetworkProfile_PhysicalAddress_Shelf = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("anyShelf", 1), ("shelf1", 2), ("shelf2", 3), ("shelf3", 4), ("shelf4", 5), ("shelf5", 6), ("shelf6", 7), ("shelf7", 8), ("shelf8", 9), ("shelf9", 10)))).setLabel("uds3NetworkProfile-PhysicalAddress-Shelf").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_PhysicalAddress_Shelf.setStatus('mandatory')
uds3NetworkProfile_PhysicalAddress_Slot = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 55, 56, 57, 58, 49, 50, 42, 53, 54, 45, 46, 51, 59))).clone(namedValues=NamedValues(("anySlot", 1), ("slot1", 2), ("slot2", 3), ("slot3", 4), ("slot4", 5), ("slot5", 6), ("slot6", 7), ("slot7", 8), ("slot8", 9), ("slot9", 10), ("slot10", 11), ("slot11", 12), ("slot12", 13), ("slot13", 14), ("slot14", 15), ("slot15", 16), ("slot16", 17), ("slot17", 18), ("slot18", 19), ("slot19", 20), ("slot20", 21), ("slot21", 22), ("slot22", 23), ("slot23", 24), ("slot24", 25), ("slot25", 26), ("slot26", 27), ("slot27", 28), ("slot28", 29), ("slot29", 30), ("slot30", 31), ("slot31", 32), ("slot32", 33), ("slot33", 34), ("slot34", 35), ("slot35", 36), ("slot36", 37), ("slot37", 38), ("slot38", 39), ("slot39", 40), ("slot40", 41), ("aLim", 55), ("bLim", 56), ("cLim", 57), ("dLim", 58), ("leftController", 49), ("rightController", 50), ("controller", 42), ("firstControlModule", 53), ("secondControlModule", 54), ("trunkModule1", 45), ("trunkModule2", 46), ("controlModule", 51), ("slotPrimary", 59)))).setLabel("uds3NetworkProfile-PhysicalAddress-Slot").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_PhysicalAddress_Slot.setStatus('mandatory')
uds3NetworkProfile_PhysicalAddress_ItemNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 7), Integer32()).setLabel("uds3NetworkProfile-PhysicalAddress-ItemNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_PhysicalAddress_ItemNumber.setStatus('mandatory')
uds3NetworkProfile_Enabled = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("uds3NetworkProfile-Enabled").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_Enabled.setStatus('mandatory')
uds3NetworkProfile_ProfileNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 9), Integer32()).setLabel("uds3NetworkProfile-ProfileNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_ProfileNumber.setStatus('mandatory')
uds3NetworkProfile_LineConfig_TrunkGroup = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 10), Integer32()).setLabel("uds3NetworkProfile-LineConfig-TrunkGroup").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_LineConfig_TrunkGroup.setStatus('mandatory')
uds3NetworkProfile_LineConfig_NailedGroup = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 11), Integer32()).setLabel("uds3NetworkProfile-LineConfig-NailedGroup").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_LineConfig_NailedGroup.setStatus('mandatory')
uds3NetworkProfile_LineConfig_RoutePort_SlotNumber_SlotNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 12), Integer32()).setLabel("uds3NetworkProfile-LineConfig-RoutePort-SlotNumber-SlotNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_LineConfig_RoutePort_SlotNumber_SlotNumber.setStatus('mandatory')
uds3NetworkProfile_LineConfig_RoutePort_SlotNumber_ShelfNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 13), Integer32()).setLabel("uds3NetworkProfile-LineConfig-RoutePort-SlotNumber-ShelfNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_LineConfig_RoutePort_SlotNumber_ShelfNumber.setStatus('mandatory')
uds3NetworkProfile_LineConfig_RoutePort_RelativePortNumber_RelativePortNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 14), Integer32()).setLabel("uds3NetworkProfile-LineConfig-RoutePort-RelativePortNumber-RelativePortNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_LineConfig_RoutePort_RelativePortNumber_RelativePortNumber.setStatus('mandatory')
uds3NetworkProfile_LineConfig_Activation = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("static", 1), ("dsrActive", 2), ("dcdDsrActive", 3)))).setLabel("uds3NetworkProfile-LineConfig-Activation").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_LineConfig_Activation.setStatus('mandatory')
uds3NetworkProfile_LineConfig_LineType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("cBitParity", 1)))).setLabel("uds3NetworkProfile-LineConfig-LineType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_LineConfig_LineType.setStatus('mandatory')
uds3NetworkProfile_LineConfig_LineCoding = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("b3zs", 1)))).setLabel("uds3NetworkProfile-LineConfig-LineCoding").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_LineConfig_LineCoding.setStatus('mandatory')
uds3NetworkProfile_LineConfig_Loopback = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noLoopback", 1), ("facilityLoopback", 2), ("localLoopback", 3)))).setLabel("uds3NetworkProfile-LineConfig-Loopback").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_LineConfig_Loopback.setStatus('mandatory')
uds3NetworkProfile_LineConfig_ClockSource = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eligible", 1), ("notEligible", 2)))).setLabel("uds3NetworkProfile-LineConfig-ClockSource").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_LineConfig_ClockSource.setStatus('mandatory')
uds3NetworkProfile_LineConfig_ClockPriority = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4))).clone(namedValues=NamedValues(("highPriority", 2), ("middlePriority", 3), ("lowPriority", 4)))).setLabel("uds3NetworkProfile-LineConfig-ClockPriority").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_LineConfig_ClockPriority.setStatus('mandatory')
uds3NetworkProfile_LineConfig_StatusChangeTrapEnable = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("uds3NetworkProfile-LineConfig-StatusChangeTrapEnable").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_LineConfig_StatusChangeTrapEnable.setStatus('mandatory')
uds3NetworkProfile_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 5, 1, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("uds3NetworkProfile-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: uds3NetworkProfile_Action_o.setStatus('mandatory')
mibBuilder.exportSymbols("ASCEND-MIBUDS3NET-MIB", uds3NetworkProfile_LineConfig_TrunkGroup=uds3NetworkProfile_LineConfig_TrunkGroup, uds3NetworkProfile_LineConfig_RoutePort_RelativePortNumber_RelativePortNumber=uds3NetworkProfile_LineConfig_RoutePort_RelativePortNumber_RelativePortNumber, uds3NetworkProfile_LineConfig_LineCoding=uds3NetworkProfile_LineConfig_LineCoding, uds3NetworkProfile_Shelf_o=uds3NetworkProfile_Shelf_o, uds3NetworkProfile_Action_o=uds3NetworkProfile_Action_o, uds3NetworkProfile_ProfileNumber=uds3NetworkProfile_ProfileNumber, mibuds3NetworkProfileTable=mibuds3NetworkProfileTable, uds3NetworkProfile_LineConfig_RoutePort_SlotNumber_ShelfNumber=uds3NetworkProfile_LineConfig_RoutePort_SlotNumber_ShelfNumber, uds3NetworkProfile_LineConfig_ClockSource=uds3NetworkProfile_LineConfig_ClockSource, uds3NetworkProfile_Name=uds3NetworkProfile_Name, uds3NetworkProfile_LineConfig_RoutePort_SlotNumber_SlotNumber=uds3NetworkProfile_LineConfig_RoutePort_SlotNumber_SlotNumber, uds3NetworkProfile_Slot_o=uds3NetworkProfile_Slot_o, uds3NetworkProfile_PhysicalAddress_ItemNumber=uds3NetworkProfile_PhysicalAddress_ItemNumber, uds3NetworkProfile_LineConfig_NailedGroup=uds3NetworkProfile_LineConfig_NailedGroup, uds3NetworkProfile_LineConfig_Activation=uds3NetworkProfile_LineConfig_Activation, mibuds3NetworkProfileEntry=mibuds3NetworkProfileEntry, uds3NetworkProfile_Item_o=uds3NetworkProfile_Item_o, mibuds3NetworkProfile=mibuds3NetworkProfile, uds3NetworkProfile_PhysicalAddress_Shelf=uds3NetworkProfile_PhysicalAddress_Shelf, uds3NetworkProfile_LineConfig_ClockPriority=uds3NetworkProfile_LineConfig_ClockPriority, DisplayString=DisplayString, uds3NetworkProfile_LineConfig_StatusChangeTrapEnable=uds3NetworkProfile_LineConfig_StatusChangeTrapEnable, uds3NetworkProfile_LineConfig_LineType=uds3NetworkProfile_LineConfig_LineType, uds3NetworkProfile_LineConfig_Loopback=uds3NetworkProfile_LineConfig_Loopback, uds3NetworkProfile_Enabled=uds3NetworkProfile_Enabled, uds3NetworkProfile_PhysicalAddress_Slot=uds3NetworkProfile_PhysicalAddress_Slot)
|
# Copyright (c) 2020-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from flask import (Blueprint, jsonify, redirect, render_template, request,
url_for)
from flask_login import current_user, login_required
from lifemonitor.utils import NextRouteRegistry, OpenApiSpecs, bool_from_string
from .forms import AuthorizeClient
from .models import Token
from .services import server
from .utils import split_by_crlf
blueprint = Blueprint("oauth2_server", __name__,
template_folder='templates',
static_folder="static", static_url_path='/static/auth2')
logger = logging.getLogger(__name__)
@blueprint.route('/authorize', methods=['GET', 'POST'])
def authorize():
# Login is required since we need to know the current resource owner.
# The decorator ensures the redirection to the login page when the current
# user is not authenticated.
if not current_user.is_authenticated:
NextRouteRegistry.save(route=request.url)
return redirect(url_for('auth.login'))
return _process_authorization()
@blueprint.route('/authorize/<name>', methods=['GET', 'POST'])
def authorize_provider(name):
# Login is required since we need to know the current resource owner.
# This authorizataion request comes from a registry (identified by 'name')
# and registries act as identity providers. Thus, we handle the authentication
# by redirecting the user to the registry. This ensures the authorization
# will be granted by a user which has an identity on that registry.
authenticate_to_provider = False
if current_user.is_anonymous:
logger.debug("Current user is anonymous")
authenticate_to_provider = True
elif name not in current_user.oauth_identity:
logger.debug(f"Current user doesn't have an identity issued by the provider '{name}'")
authenticate_to_provider = True
elif Token.check_token_expiration(current_user.oauth_identity[name].token['expires_at']):
logger.debug(f"The current user has expired token issued by the provider '{name}'")
authenticate_to_provider = True
logger.debug(f"Authenticate to provider '{name}': {authenticate_to_provider}")
if authenticate_to_provider:
return redirect(url_for("oauth2provider.login", name=name,
next=url_for(".authorize_provider",
name=name, **request.args.to_dict())))
return _process_authorization()
def _process_authorization():
confirmed = None
if request.method == 'GET':
grant = server.validate_consent_request(end_user=current_user)
if not server.request_authorization(grant.client, current_user):
# granted by resource owner
return server.create_authorization_response(grant_user=current_user)
confirmed = bool_from_string(request.values.get('confirm', ''))
logger.debug("Confirm authorization [GET]: %r", confirmed)
if not confirmed:
return render_template(
'authorize.html',
grant=grant,
user=current_user,
scope_info=OpenApiSpecs.get_instance().all_scopes
)
elif request.method == 'POST':
form = AuthorizeClient()
logger.debug(form.confirm.data)
confirmed = form.confirm.data
logger.debug("Confirm authorization [POST]: %r", confirmed)
# handle client response
if confirmed:
logger.debug("Authorization confirmed")
# granted by resource owner
return server.create_authorization_response(grant_user=current_user)
# denied by resource owner
return server.create_authorization_response(grant_user=None)
@blueprint.route('/token', methods=['POST'])
def issue_token():
return server.create_token_response()
# @blueprint.route('/create_client', methods=('GET', 'POST'))
@login_required
def create_client():
user = current_user
if not user:
return redirect('/login')
if request.method == 'GET':
return render_template('create_client.html')
form = request.form
client = server.create_client(user,
form["client_name"], form["client_uri"],
split_by_crlf(form["grant_type"]),
split_by_crlf(form["response_type"]),
form["scope"],
split_by_crlf(form["redirect_uri"]),
form["token_endpoint_auth_method"]
)
return jsonify({
"client_id": client.client_id,
"client_secret": client.client_secret
})
|
from aiohttp import web
import json
import docker
import json
import subprocess
from docker import errors
DOCKER_SOCKET_PATH = '/var/run/docker.sock'
OUTPUT_UID = 1000
app = web.Application()
routes = web.RouteTableDef()
docker_client = docker.from_env()
def chown_output(uid, dir):
# spawn a busybox with output repo mounted to adjust the file permissions
docker_client.containers.run("busybox:latest",
volumes={dir: {'bind': '/usr/src/app/data'}},
command=["/bin/sh", "-c", f"chown -R {uid}:{uid} /usr/src/app/data && sleep 1"],
auto_remove=True
)
def pipeline_run(image, input_dir, output_dir, env, cmd, docker_socket):
print(f"Running image {image} with input_dir {input_dir} and output_dir {output_dir}")
_volumes = {output_dir: {'bind': '/usr/src/app/data'}}
# bind docker sock if tool config requires it
if docker_socket:
_volumes[DOCKER_SOCKET_PATH] = {'bind': DOCKER_SOCKET_PATH}
if input_dir is not None:
_volumes[input_dir] = {'bind': '/usr/src/app/input'}
# execute actual tool image with provided settings
try:
docker_client.containers.run(image,
volumes=_volumes,
network='host',
environment=env,
command=cmd,
auto_remove=True
)
except errors.ImageNotFound:
print(f"[Error] Container image {image} not found!")
return {"error": f"container image {image} not found!"}
print("Done")
print("Correcting ownership of pipeline step output ...")
chown_output(OUTPUT_UID, output_dir)
return {"image_used": image, "output_dir": output_dir}
@routes.post('/run')
async def init(request):
data = await request.json()
image = data['image']
input_dir = data['input_dir']
output_dir = data['output_dir']
env = data['env']
cmd = data['cmd']
docker_socket = data['docker_socket']
response = pipeline_run(image, input_dir, output_dir, env, cmd, docker_socket)
return web.json_response(response)
app.add_routes(routes)
if __name__ == '__main__':
web.run_app(app, port=8081)
|
import abc
import asyncio
from typing import Optional
import aiohttp
from fastquotes.const import HEADERS, REQ_CODES_NUM_MAX
from fastquotes.utils import format_stock_codes
class AsyncQuote(metaclass=abc.ABCMeta):
def __init__(self) -> None:
pass
@property
@abc.abstractmethod
def base_url(self) -> str:
pass
@property
@abc.abstractmethod
def split_char(self) -> str:
pass
@abc.abstractmethod
def parse_out_tick_dict(self, msg: str) -> Optional[dict]:
pass
async def tick_dict(self, codes: list) -> dict:
format_codes = format_stock_codes(codes)
res = {}
tasks = []
async with aiohttp.ClientSession() as session:
async def small_price_dict(small_codes: list):
data_str = await self._fetch_data_str(session, small_codes)
data_list = data_str.strip().split("\n")
for item in data_list:
tick_dict = self.parse_out_tick_dict(item)
if tick_dict is None:
return
res[tick_dict["code"]] = tick_dict
codes_len = len(format_codes)
for i in range(0, codes_len, REQ_CODES_NUM_MAX):
if i + REQ_CODES_NUM_MAX >= codes_len:
small_codes = format_codes[i:]
else:
small_codes = format_codes[i : i + REQ_CODES_NUM_MAX]
tasks.append(small_price_dict(small_codes))
await asyncio.wait(tasks)
return res
async def price_dict(self, codes: list) -> dict:
tick_dict = await self.tick_dict(codes)
res_dict = {}
for code, tick in tick_dict.items():
if "current_price" in tick:
res_dict[code] = tick["current_price"]
return res_dict
async def pre_close_dict(self, codes: list) -> dict:
tick_dict = await self.tick_dict(codes)
res_dict = {}
for code, tick in tick_dict.items():
if "pre_close" in tick:
res_dict[code] = tick["pre_close"]
return res_dict
async def open_dict(self, codes: list) -> dict:
tick_dict = await self.tick_dict(codes)
res_dict = {}
for code, tick in tick_dict.items():
if "open" in tick:
res_dict[code] = tick["open"]
return res_dict
async def total_vol_dict(self, codes: list) -> dict:
tick_dict = await self.tick_dict(codes)
res_dict = {}
for code, tick in tick_dict.items():
if "total_vol" in tick:
res_dict[code] = tick["total_vol"]
return res_dict
async def _fetch_data_str(self, session, codes: list) -> str:
codes_str = ",".join(codes)
async with await session.get(
f"{self.base_url}{codes_str}", headers=HEADERS
) as response:
return await response.text()
|
from __future__ import print_function, unicode_literals, division, absolute_import
import json
import os
import types
from queue import Queue
from typing import List, Dict, Any, Type, Set, Iterable
from BlockServer.core.config_list_manager import ConfigListManager
from BlockServer.core.macros import MACROS, PVPREFIX_MACRO
from server_common.utilities import print_and_log as _common_print_and_log, SEVERITY
from server_common.channel_access import ChannelAccess
def print_and_log(message: str, *args, **kwargs):
_common_print_and_log(f"ComponentSwitcher: {message}", *args, **kwargs)
class ComponentSwitcherConfigFileManager(object):
CONF_FILE_PATH = os.path.join(MACROS["$(ICPCONFIGROOT)"], "ComponentSwitcher", "component_switcher.json")
def read_config(self) -> List[Dict[str, Any]]:
"""
Reads a config file from JSON on disk and returns it as a python object
"""
if os.path.exists(self.CONF_FILE_PATH):
with open(self.CONF_FILE_PATH) as f:
return json.loads(f.read())
else:
print_and_log(f"component_switcher config file at {self.CONF_FILE_PATH} does not exist"
f" - assuming empty config", SEVERITY.MINOR)
return []
class ComponentSwitcher(object):
def __init__(self,
config_list: ConfigListManager,
blockserver_write_queue: Queue,
reload_current_config_func: types.FunctionType,
file_manager: ComponentSwitcherConfigFileManager = None,
channel_access_class: Type[ChannelAccess] = None):
self._config_list = config_list
self._blockserver_write_queue = blockserver_write_queue
self._reload_current_config = reload_current_config_func
self._ca_class = channel_access_class if channel_access_class is not None else ChannelAccess
self._file_manager = file_manager if file_manager is not None else ComponentSwitcherConfigFileManager()
def all_components_dynamic(self, components: Iterable[str]) -> bool:
for comp in components:
try:
loaded_comp = self._config_list.load_config(comp, is_component=True)
if not loaded_comp.is_dynamic():
print_and_log(f"Component is not dynamic: {comp}")
return False
except Exception as e:
print_and_log(f"Error while checking whether component {comp} is dynamic: {e}")
return False
return True
def create_monitors(self) -> None:
"""
Starts monitoring the PVs specified in the configswitcher configuration file.
"""
for item in self._file_manager.read_config():
pv = item["pv"]
pv_is_local = item["is_local"]
value_to_component_map = item["value_to_component_map"]
if pv_is_local:
pv = MACROS[PVPREFIX_MACRO] + pv
if not self.all_components_dynamic(value_to_component_map.values()):
print_and_log(f"ERROR: not adding monitor to PV {pv} as some of the requested "
f"components are not marked as dynamic.")
continue
print_and_log("Adding monitor to PV {}".format(pv))
def callback(val: Any, stat: int, sevr: int):
"""
Callback function called when the monitored PV changes.
Args:
val: the value that this monitor returned
stat: the epics status of the monitored PV
sevr: the epics severity of the monitored PV
"""
val = str(val)
if stat != 0 or sevr != 0:
print_and_log(f"Got value '{val}' (stat={stat}, sevr={sevr}) for pv '{pv}', ignoring as it has "
f"non-zero STAT/SEVR")
return
if val not in value_to_component_map:
print_and_log(f"Got value '{val}' (stat={stat}, sevr={sevr}) for pv '{pv}', ignoring as value did "
f"not map to any component")
return
comps_to_remove = {v for k, v in value_to_component_map.items() if k != val}
comps_to_add = {value_to_component_map[val]}
print_and_log(f"Got value '{val}' (stat={stat}, sevr={sevr}) for pv '{pv}'. Editing configurations to "
f"remove components {comps_to_remove} and add components {comps_to_add}.")
# Put these actions onto the blockserver write queue so that we avoid any multithreading problems
# with concurrent edits from multiple sources in the blockserver. This also ensures we don't do any
# CA calls from within a monitor context, which would be invalid.
self._blockserver_write_queue.put(
(self._edit_all_configurations, (comps_to_remove, comps_to_add), "COMPONENT_SWITCHER_EDIT"))
self._ca_class.add_monitor(pv, callback)
def _edit_all_configurations(self, components_to_be_removed: Set[str], components_to_be_added: Set[str]) -> None:
"""
Edits all configurations by adding or removing the specified components.
Args:
components_to_be_removed: A set of component names which will be removed from all configurations if present
components_to_be_added: A set of component names which will be added to all configurations
"""
current_config_name = self._config_list.active_config_name
config_names = {meta["name"] for meta in self._config_list.get_configs()}
component_names = {meta["name"] for meta in self._config_list.get_components()}
if current_config_name not in config_names:
raise ValueError(f"current config {current_config_name} not in list of all configs {config_names}.")
if not components_to_be_removed.issubset(component_names):
raise ValueError(f"A component for removal did not exist. "
f"Remove {components_to_be_removed}, available {component_names}")
if not components_to_be_added.issubset(component_names):
raise ValueError(f"A component to be added did not exist. "
f"Add {components_to_be_added}, available {component_names}")
for config_name in config_names:
config_changed = False
config = self._config_list.load_config(config_name, is_component=False)
# Remove components first to avoid any conflicts
for component_name in components_to_be_removed:
if component_name in config.get_component_names():
print_and_log(f"Removing component {component_name} from {config_name}")
config.remove_comp(component_name)
config_changed = True
for component_name in components_to_be_added:
if component_name not in config.get_component_names():
print_and_log(f"Adding component {component_name} to {config_name}")
config.add_component(component_name)
config_changed = True
if config_changed:
print_and_log(f"Saving modified config {config_name}")
config.save_inactive()
self._config_list.update(config)
if config_name == current_config_name:
print_and_log(f"Reloading active modified config ({config_name})")
self._reload_current_config()
|
#!/usr/bin/env python3
import random
class Stack_3_2():
def __init__(self, init_size = 10):
self.stack_size = init_size
self.back_arr = [None] * init_size
self.head = 0
def push(self, num):
if self.head == 0:
minimum = num
else:
minimum = min(self.back_arr[self.head - 1][1], num)
self.back_arr[self.head] = [num, minimum]
self.head += 1
if self.head >= self.stack_size - 1:
self.stack_size *= 2
new_arr = [None] * self.stack_size
for i in range(0, len(self.back_arr)):
new_arr[i] = self.back_arr[i]
self.back_arr = new_arr
def pop(self):
head = self.back_arr[self.head]
self.back_arr[self.head] = None
self.head -= 1
return head
def minimum(self):
return self.back_arr[self.head - 1][1]
# s = Stack_3_2()
# s.push(12)
# s.push(11)
# s.push(5)
# print(s.back_arr)
# s.push(3)
# print(s.back_arr)
# s.pop()
# s.push(2)
# s.push(5)
# s.push(11)
# print(s.back_arr)
# print(s.minimum())
# class Queue_3_4():
# def __init__(self, init_size = 10):
# self.
# def enqueue(self, num):
# def dequeue(self):
|
import re
from odoo import http
from odoo.http import request
from odoo.addons.website_event.controllers.main import WebsiteEventController
class WebsiteEventControllerExtended(WebsiteEventController):
@http.route()
def registration_confirm(self, event, **post):
"""Check that threre are no email duplicates.
There is a check on frontend, but that is easy to get around."""
registrations = self._process_registration_details(post)
emails = [r.get("email", "").strip() for r in registrations]
assert len(emails) == len(set(emails))
res = super(WebsiteEventControllerExtended, self).registration_confirm(
event, **post
)
if res.location:
# If super redirect (to /shop/checkout)
url = (
request.env["ir.config_parameter"].get_param(
"website_event_sale.redirection"
)
or res.location
)
return request.redirect(url)
else:
return res
def _process_registration_details(self, details):
""" Remove spaces in emails """
res = super(WebsiteEventControllerExtended, self)._process_registration_details(
details
)
for registration in res:
if registration.get("email"):
registration["email"] = registration.get("email").strip()
return res
@http.route(
["/website_event_attendee_fields/check_email"],
type="json",
auth="public",
methods=["POST"],
website=True,
)
def check_email(self, event_id, email):
partner = (
request.env["res.partner"].sudo().search([("email", "=", email)], limit=1)
)
if not partner:
def remove_spaces(s):
s = re.sub(r"^\s*", "", s)
s = re.sub(r"\s*$", "", s)
return s
email = remove_spaces(email)
partner = (
request.env["res.partner"]
.sudo()
.search(
[
"|",
"|",
("email", "=ilike", "% " + email),
("email", "=ilike", "% " + email + " %"),
("email", "=ilike", email + " %"),
],
limit=1,
)
)
partner_email = remove_spaces(partner.email)
if not partner:
return {}
# It's a workaround in order to prevent duplicating partner accounts when buying a ticket
partner.write({"email": partner_email})
event = request.env["event.event"].sudo().browse(event_id)
error_msg = event.check_partner_for_new_ticket(partner.id)
if error_msg:
return {"email_not_allowed": error_msg}
known_fields = []
for f in event.attendee_field_ids:
if f.field_name == "email":
continue
if getattr(partner, f.field_name):
known_fields.append(f.field_name)
return {"known_fields": known_fields}
|
from ebs_deploy import out, get, parse_env_config, parse_option_settings
def add_arguments(parser):
"""
adds arguments for the rebuild command
"""
parser.add_argument('-e', '--environment', help='Environment name', required=True)
parser.add_argument('-w', '--dont-wait', help='Skip waiting for the init to finish', action='store_true')
def execute(helper, config, args):
"""
Rebuilds an environment
"""
env_config = parse_env_config(config, args.environment)
helper.rebuild_environment(args.environment)
# wait
if not args.dont_wait:
helper.wait_for_environments(args.environment, health='Green', status='Ready') |
from cli import *
# Common functionality for network devices
def get_nic_info(obj):
info = [("Recorder", obj.recorder),
("MAC address", obj.mac_address),
("Link", obj.link)]
try:
bw = obj.tx_bandwidth
if bw == 0:
bw = "unlimited"
elif bw % 1000:
bw = "%d bit/s" % bw
else:
bw = bw // 1000
if bw % 1000:
bw = "%d kbit/s" % bw
else:
bw = "%d Mbit/s" % (bw // 1000)
info.append(("Transmit limit", bw))
except:
pass
return [(None, info)]
def get_nic_status(obj):
return []
# -------------------- connect --------------------
def connect_cmd(obj, auto, network_poly):
if auto:
print "The flag '-auto' is deprecated, and shouldn't be used."
# for now quiet if network doesn't exist when using auto
if network_poly[0] == str_t:
return
if network_poly[0] == str_t:
print "Argument is not an Ethernet link object."
SIM_command_has_problem()
return
try:
obj.link = network_poly[1]
except Exception, msg:
print "[%s] Connection failed" % obj.name
print msg
# ------------------- disconnect ------------------
def disconnect_cmd(obj):
try:
obj.link = None
except Exception, msg:
print "[%s] Disconnection failed (%s)" % (obj.name, msg)
# ------------- command registration --------------
def new_nic_commands(device_name):
new_command("connect", connect_cmd,
[arg(flag_t, "-auto"),
arg((obj_t("link", "ethernet-link"), str_t), ("link", "link-name"))],
alias = "",
type = ["Ethernet", "Networking"],
short = "connect to a simulated Ethernet link",
see_also = ['<' + device_name + '>.disconnect'],
namespace = device_name,
doc = """
Connect the device to a simulated Ethernet link.
The flag '-auto' is deprecated and shouldn't be used.
""", filename="/mp/simics-3.0/src/extensions/apps-python/nic_common.py", linenumber="59")
new_command("disconnect", disconnect_cmd,
[],
alias = "",
type = ["Ethernet", "Networking"],
short = "disconnect from simulated link",
see_also = ['<' + device_name + '>.connect'],
namespace = device_name,
doc = """
Disconnect the device from a simulated Ethernet link.
""", filename="/mp/simics-3.0/src/extensions/apps-python/nic_common.py", linenumber="72")
|
from starlette.testclient import TestClient
import firestorefastapi.gunicorn_config as gunicorn_config
def test_gunicorn_config(client: TestClient) -> None:
assert gunicorn_config.worker_class == "uvicorn.workers.UvicornWorker"
|
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback
_LOGGER = logging.getLogger(__name__)
# Configuration:
LANGUAGE = "language"
LANGUAGES = [
"English",
"Danish",
"German",
"Spanish",
"French",
"Italian",
"Norwegian",
"Romanian",
"Swedish",
"Dutch",
"Slovak"
]
SIDEPANEL_TITLE = "sidepanel_title"
SIDEPANEL_ICON = "sidepanel_icon"
ENABLE_SIDEPANEL = "enable_sidepanel"
THEME = "theme"
PRIMARY_COLOR = "primary_color"
THEME_OPTIONS = [
"Auto Mode (Dark/Light)",
"Dark Mode",
"Light Mode",
"Auto Mode (Black/White)",
"Black Mode",
"White Mode",
"HA selected theme"
]
CUSTOMIZE_PATH = "customize_path"
@config_entries.HANDLERS.register("dwains_dashboard")
class DwainsDashboardConfigFlow(config_entries.ConfigFlow):
async def async_step_user(self, user_input=None):
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
return self.async_create_entry(title="", data={})
@staticmethod
@callback
def async_get_options_flow(config_entry):
return DwainsDashboardEditFlow(config_entry)
class DwainsDashboardEditFlow(config_entries.OptionsFlow):
def __init__(self, config_entry):
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
schema = {
vol.Optional(LANGUAGE, default=self.config_entry.options.get("language", "English")): vol.In(LANGUAGES),
vol.Optional(SIDEPANEL_TITLE, default=self.config_entry.options.get("sidepanel_title", "Dwains Dashboard")): str,
vol.Optional(SIDEPANEL_ICON, default=self.config_entry.options.get("sidepanel_icon", "mdi:alpha-d-box")): str,
vol.Optional(ENABLE_SIDEPANEL, default=self.config_entry.options.get("enable_sidepanel", True)): bool,
vol.Optional(THEME, default=self.config_entry.options.get("theme", "Auto Mode (Dark/Light)")): vol.In(THEME_OPTIONS),
vol.Optional(PRIMARY_COLOR, default=self.config_entry.options.get("primary_color", "#299ec2")): str,
vol.Optional(CUSTOMIZE_PATH, default=self.config_entry.options.get("customize_path", "customize.yaml")): str,
}
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(schema)
)
|
#/usr/bin/env python
# Copyright (c) 2012, Andres Blanco and Matias Eissler
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by the authors.
# 4. Neither the name of the authors nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHORS''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Command Line tool for testing or something...
import sys
import phy
import dot11
import libpcap
import helpers
import applayer
# PCAP
DEVICE = "en0" # iOS devices only
PROMISC = 1
SNAPLEN = 65535
TIMEOUT_MS = 100
BPF_FILTER = "ether host 88:88:88:88:88:88"
# Header Sizes
WLC_PHY_HEADER_SIZE = 36
ETHERNET_HEADER_SIZE = 14
# Global things :S
debug = False
networks = {}
def processPackets(packet):
if debug:
print "pcap length: %d" % packet.getLength()
if packet.getLength() < WLC_PHY_HEADER_SIZE + ETHERNET_HEADER_SIZE:
return None
phy_hdr_begin = ETHERNET_HEADER_SIZE
phy_hdr_end = phy_hdr_begin + WLC_PHY_HEADER_SIZE
phy_header = packet.getData()[phy_hdr_begin:phy_hdr_end]
phy_hdr = phy.Bcm4329PhyHeader(phy_header)
if debug:
phy_channel = phy_hdr.getChannel()
print "phy length: %d" % phy_hdr.getFrameSize()
print "phy channel: %d %04X" % (phy_channel, phy_channel)
print "phy rssi: %d" % (phy_hdr.getRssi())
print "phy valid FCS: %d" % (phy_hdr.hasValidFCS())
if not phy_hdr.hasValidFCS():
print "Invalid FCS!"
# return None
fc = dot11.FrameControl(packet.getData()[phy_hdr_end:])
fc_protocol = fc.getProtocol()
fc_type = fc.getType()
fc_subtype = fc.getSubtype()
fc_to_ds = fc.getToDs()
fc_from_ds = fc.getFromDs()
fc_protected = fc.getProtectedFrame()
if debug:
print "proto: %d" % fc_protocol
print "type: %d - subtype: %d" % (fc_type, fc_subtype)
print "toDS: %r - fromDS: %r" % (fc_to_ds, fc_from_ds)
print "protectedFrame: %r" % fc_protected
if fc_type == 0 and fc_subtype == 8: # Type Management Subtype Beacon
try:
beacon_frame = dot11.Beacon(packet.getData()[phy_hdr_end:])
bssid = beacon_frame.getBssid()
if debug:
print "Beacon"
print "Duration: %d" % beacon_frame.getDuration()
print "Destination: %s" % beacon_frame.getDestination()
print "Source: %s" % beacon_frame.getSource()
print "BSSID: %s" % bssid
print "Fragment: %s" % beacon_frame.getFragment()
print "Sequence: %s" % beacon_frame.getSequence()
print "Information Elements"
for item in beacon_frame.getRawInformationElements().items():
print item
for item in beacon_frame.getInformationElements().items():
print item[0]
print item[1]
if not (bssid in networks):
nt = applayer.Network(beacon_frame)
networks[bssid] = nt
ssid = nt.getSsid()
ch = nt.getChannel()
security = nt.getSecurity()
vendor = nt.getVendor()
print "%s - %s - %d - %s - %s" % (bssid,
ssid,
ch,
security,
vendor)
# except dot11.InvalidInformationElement:
# pass
except Exception, e:
print "Exception: %s" % e.__class__
print "phy valid FCS: %d" % phy_hdr.hasValidFCS()
print "phy raw data"
print phy_header.encode('hex')
print "802.11 raw data"
print packet.getData()[phy_hdr_end:].encode('hex')
raise Exception
elif fc_type == 0 and fc_subtype == 4: # Type Management Subtype Probe Req
probe_req = dot11.ProbeRequest(packet.getData()[phy_hdr_end:])
print "-" * 40
print probe_req.getSource()
print probe_req._ies
print "-" * 40
elif fc_type == 2: # Type data
data_frame = dot11.DataFrame(packet.getData()[phy_hdr_end:])
bssid = data_frame.getBssid()
station_address = data_frame.getSourceAddress()
if helpers.is_mac_address_multicast(station_address):
return
if bssid in networks:
nt = networks[bssid]
stations = nt.getStations()
station_address = data_frame.getSourceAddress()
station = applayer.Station(station_address)
if not (station_address in stations):
nt.addStation(station)
print "Station %s connected to %s %s" % (station_address,
nt.getBssid(),
nt.getSsid())
# Show not encrypted frames
# if not fc_protected:
# print repr(packet.getData()[phy_hdr_end:])
def forever():
try:
handle = libpcap.pcap_open_live(DEVICE, SNAPLEN, PROMISC, TIMEOUT_MS)
if handle:
bpf = libpcap.bpf_program()
libpcap.pcap_compile(handle, BPF_FILTER, bpf)
libpcap.pcap_setfilter(handle, bpf)
while(1):
pkt_hdr, pkt_data = libpcap.pcap_next(handle)
packet = libpcap.Packet(pkt_hdr, pkt_data)
processPackets(packet)
except KeyboardInterrupt:
print "Trap Ctrl+C."
print "Exiting..."
sys.exit(0)
if __name__ == "__main__":
if "debug" in sys.argv:
debug = True
forever()
|
import abc
from collections import defaultdict
import pandas as pd
from library.pandas_jqgrid import DataFrameJqGrid
from ontology.models import OntologyTerm, OntologySnake
class AbstractOntologyGenesGrid(DataFrameJqGrid, abc.ABC):
@abc.abstractmethod
def _get_ontology_terms_ids(self):
pass
def get_dataframe(self):
# This uses the same method as gene filter (special_case_gene_symbols_for_hpo_and_omim) though with individual
# calls per term so that it matches what gene filters is doing
hpo_qs, omim_qs = OntologyTerm.split_hpo_and_omim(self._get_ontology_terms_ids())
gene_terms_set = defaultdict(lambda: defaultdict(set))
for hpo in hpo_qs:
for gene in OntologySnake.gene_symbols_for_terms([hpo]):
gene_terms_set[gene.symbol]["hpo"].add(str(hpo))
for omim in omim_qs:
for gene in OntologySnake.gene_symbols_for_terms([omim]):
gene_terms_set[gene.symbol]["omim"].add(str(omim))
gene_dict = {k: {t: ", ".join(sorted(term_set)) for t, term_set in v.items()} for k, v in gene_terms_set.items()}
df = pd.DataFrame.from_dict(gene_dict, orient='index')
return df.sort_index()
|
from __future__ import absolute_import
from .connection import Connection
__version__ = '0.3.5'
|
import numpy as np
class MultiArmedBandit(object):
"""
Multi-armed single Bandit
Args
k: number of arms
"""
def __init__(self, k):
self.k = k
self.action_values = np.zeros(k)
self.optimal = None
def reset(self):
self.action_values = np.zeros(self.k)
self.optimal = None
def pull(self, action):
return 0, True # returns reward and True if action is optimal.
class ContextualBandit(MultiArmedBandit):
"""
Args
k: number of arms
d: dimension of state vector given action
"""
def __init__(self, k, d): # d: dimension of state
super().__init__(k)
self.d = d
self.states = np.zeros((self.k, self.d))
def reset(self):
self.action_values = np.zeros(self.k)
self.optimal = 0
self.states = np.zeros((self.k, self.d))
class MultiBandits(object):
def __init__(self):
self.bandits = []
self.bandit = None
self.cursor = 0
self.k = 0
self.reset()
def reset(self):
self.bandits = []
self.bandit = None
self.cursor = 0
self.k = 0
def add_bandit(self, bandit):
self.bandits.append(bandit)
self.k = bandit.k
def get_bandit(self):
self.bandit = self.bandits[self.cursor]
self.k = self.bandit.k
self.cursor += 1
if self.cursor == len(self.bandits):
self.cursor = 0
def pull(self, action):
return self.bandit.pull(action)
|
# pytube: YouTube 동영상을 다운로드하기 위한 가볍고 종속성이 없는 라이브러리 및 명령줄 유틸리티
# 해당 라이브러리를 사용하기 위해서는 'pip' 명령어를 통해 설치해야 한다.
# 설치 명령어: pip install pytube
from pytube import YouTube
# 노래: < 무직전생 ~이세계에 갔으면 최선을 다한다~ OST :: 머나먼 자장가 >
# 동영상 제목: TVアニメ『無職転生』第19話ノンクレジットOPムービー/OPテーマ:「遠くの子守の唄」大原ゆい子
url = 'https://www.youtube.com/watch?v=d79iZcngOGQ'
# YouTube 객체를 생성한다.
yt = YouTube(url)
# 스트리밍 데이터 프로퍼티를 가져온다.
streams = yt.streams
# 스트리밍 데이터 중에서 mp4에 해당하는 스트리밍 데이터만을 필터링하여 가져온다.
streams = streams.filter(file_extension='mp4', progressive=True)
# 필터링한 스트리밍 데이터들을 resolution으로 정렬한다.
streams = streams.order_by('resolution')
streams = streams.desc()
# mp4 스트리밍 데이터를 가져온다.
mp4 = streams.first()
# 가져온 mp4 스트리밍 데이터를 다운로드 한다.
mp4.download()
|
from unittest import TestCase
from cloudtts import AudioFormat
from cloudtts import CloudTTSError
from cloudtts import Gender
from cloudtts import VoiceConfig
from cloudtts import WatsonClient
from cloudtts import WatsonCredential
class TestWatsonClient(TestCase):
def setUp(self):
self.c = WatsonClient()
def test_init(self):
self.assertIsInstance(self.c, WatsonClient)
def test_make_params_with_nothing(self):
params = self.c._make_params(None, None)
self.assertIsInstance(params, dict)
self.assertIn('accept', params)
self.assertEqual(params['accept'], 'audio/mp3')
self.assertIn('voice', params)
self.assertEqual(params['voice'], 'en-US_AllisonVoice')
def test_make_params_with_voice_config_only(self):
for l, g in WatsonClient.LANG_GENDER_DICT:
vc = VoiceConfig(language=l, gender=g)
params = self.c._make_params(vc, None)
voice = WatsonClient.LANG_GENDER_DICT[(l, g)]
self.assertEqual(params['voice'], voice)
def test_make_params_with_detail_only(self):
detail = {'accept': 'audio/ogg;codecs=opus',
'voice': 'en-US_MichaelVoice'}
params = self.c._make_params(None, detail)
for k in detail:
self.assertEqual(detail[k], params[k])
def test_make_params_with_voice_config_and_detail(self):
detail = {'accept': 'audio/ogg;codecs=opus;rate=192000',
'voice': 'en-US_MichaelVoice'}
vc = VoiceConfig(audio_format=AudioFormat.mp3, gender=Gender.female)
params = self.c._make_params(vc, detail)
# detail overwrites values
self.assertNotEqual(params['accept'], 'mp3')
self.assertEqual(params['accept'], detail['accept'])
self.assertNotEqual(params['voice'], 'en-US_AllisonVoice')
self.assertEqual(params['voice'], detail['voice'])
def test_auth_before_tts(self):
txt = 'Hello world'
self.assertRaises(CloudTTSError, lambda: self.c.tts(txt))
def test_error_without_data(self):
cred = WatsonCredential(username='xxxx', password='yyyy',
url='https://example.com')
self.c.auth(cred)
self.assertRaises(ValueError, lambda: self.c.tts(text=''))
def test_acceptable_text_length(self):
cred = WatsonCredential(username='xxxx', password='yyyy',
url='https://example.com')
self.c.auth(cred)
text = 'a' * (WatsonClient.MAX_TEXT_BYTES)
self.assertRaises(TypeError,
lambda: self.c.tts(
text=text,
voice_config=True,
))
def test_error_with_too_long_text(self):
cred = WatsonCredential(username='xxxx', password='yyyy',
url='https://example.com')
self.c.auth(cred)
text = 'a' * (WatsonClient.MAX_TEXT_BYTES+1)
# CloudTTSError is raised with too long text
self.assertRaises(CloudTTSError, lambda: self.c.tts(text=text))
def test_invalid_credential(self):
self.c.auth({
'username': 'username',
'password': 'password',
'url': 'https://stream.watsonplatform.net/text-to-speech/api'
})
txt = 'Hello world'
self.assertRaises(TypeError, lambda: self.c.tts(txt))
def test_is_valid_voice(self):
for voice in WatsonClient.AVAILABLE_VOICES:
self.assertTrue(self.c._is_valid_voice({'voice': voice}))
self.assertFalse(self.c._is_valid_voice(
{'voice': 'ja-JP_MitsuhiroVoice'}))
def test_is_valid_accept(self):
for codec in WatsonClient.AVAILABLE_ACCEPTS['require_rate']:
d = {'accept': codec}
self.assertFalse(self.c._is_valid_accept(d))
d = {'accept': '{};rate='.format(codec)}
self.assertFalse(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, 'abc')}
self.assertFalse(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, WatsonClient.MIN_RATE-1)}
self.assertFalse(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, WatsonClient.MIN_RATE)}
self.assertTrue(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, WatsonClient.MAX_RATE)}
self.assertTrue(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, WatsonClient.MAX_RATE+1)}
self.assertFalse(self.c._is_valid_accept(d))
for codec in WatsonClient.AVAILABLE_ACCEPTS['allow_rate']:
d = {'accept': codec}
self.assertTrue(self.c._is_valid_accept(d))
d = {'accept': '{};rate='.format(codec)}
self.assertFalse(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, 'abc')}
self.assertFalse(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, WatsonClient.MIN_RATE-1)}
self.assertFalse(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, WatsonClient.MIN_RATE)}
self.assertTrue(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, WatsonClient.MAX_RATE)}
self.assertTrue(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, WatsonClient.MAX_RATE+1)}
self.assertFalse(self.c._is_valid_accept(d))
for codec in WatsonClient.AVAILABLE_ACCEPTS['disallow_rate']:
d = {'accept': codec}
self.assertTrue(self.c._is_valid_accept(d))
d = {'accept': '{};rate='.format(codec)}
self.assertFalse(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, 'abc')}
self.assertFalse(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, WatsonClient.MIN_RATE-1)}
self.assertFalse(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, WatsonClient.MIN_RATE)}
self.assertFalse(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, WatsonClient.MAX_RATE)}
self.assertFalse(self.c._is_valid_accept(d))
d = {'accept': '{};rate={}'.format(codec, WatsonClient.MAX_RATE+1)}
self.assertFalse(self.c._is_valid_accept(d))
class TestWatsonCredential(TestCase):
pass
if __name__ == '__main__':
unittest.main()
|
import asyncio
from yandere_parser import parse_yandere
async def test():
# pixiv without character tag
await parse_yandere(
"https://yande.re/post/show/889591")
# no source with character tag
await parse_yandere("https://yande.re/post/show/829310")
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(test())
|
# create files for chart-02X
# WORKING/chart-NN.makefile
# WORKING/chart-NN.data
# WORKING/chart-NN.txt-SPECIFIC.txt
# import built-ins and libraries
import sys
import pdb
import cPickle as pickle
import os.path
# import my stuff
from Bunch import Bunch
from directory import directory
from Logger import Logger
def print_help():
print 'python chart-NN.py SUFFIX [SPECIFIC]'
print 'where SUFFIX in {"makefile", "data", "txt"}'
print 'and SPECIFIC in {"mean-root-mean-squared-errors",'
print ' "median-root-median-squared-errors"}'
print ' "mean-fraction-wi10"}'
def make_control(specs, argv):
# return a Bunch
def make_base_name(argv):
name = argv[0].split('.')
return name[0]
def make_data_name(argv):
base_name = make_base_name(argv)
return directory('working') + base_name + '.data'
def make_makefile_name(argv):
base_name = make_base_name(argv)
return base_name + '.makefile'
def make_log_name(argv):
base_name = make_base_name(argv)
specific = ('-' + argv[2]) if len(argv) > 2 else ''
return base_name + '-' + argv[1] + specific + '.log'
def make_txt_name(argv):
base_name = make_base_name(argv)
return directory('working') + base_name + '.txt'
def make_path_out_output(argv):
suffix = argv[1]
if suffix == 'data':
return make_data_name(argv)
elif suffix == 'makefile':
return make_makefile_name(argv)
elif suffix == 'txt':
return make_txt_name(argv)
else:
print_help()
raise RuntimeError('bad SUFFIX: ' + suffix)
def make_paths():
result = Bunch(dir_cells=directory('cells'),
dir_working=directory('working'),
out_log=directory('log') + make_log_name(argv),
out_output=make_path_out_output(argv))
return result
if not(2 <= len(argv) <= 3):
print_help()
print 'argv', argv
raise RuntimeError('bad invocation')
if argv[1] == 'txt':
# set format for the error entries in the table
table_entry_format = \
'8.4f' if specs.metric == 'mean-wi10' else '8d'
else:
table_entry_format = None
r = Bunch(base_name=make_base_name(argv),
specs=specs,
path=make_paths(),
table_entry_format=table_entry_format,
testing=False,
debugging=False)
return r
class Report(object):
def __init__(self, lines, table_entry_format):
self.lines = lines
self.format_header = '{:>9s}' + (' {:>8s}' * 8)
self.format_detail = '{:9d}' + ((' {:%s}' % table_entry_format) * 8)
self.format_legend = '{:80s}'
def header(self, c0, c1, c2, c3, c4, c5, c6, c7, c8):
print c0, c1, c2, c3, c4, c5, c6, c7, c8
s = self.format_header.format(c0, c1, c2, c3, c4, c5, c6, c7, c8)
self.lines.append(s)
def detail(self, ndays, *clist):
# replace large column values with all 9's
print ndays, clist
large_value = 99999999
capped = [x if x <= large_value else large_value
for x in clist]
s = self.format_detail.format(ndays,
capped[0],
capped[1],
capped[2],
capped[3],
capped[4],
capped[5],
capped[6],
capped[7])
# s = self.format_detail.format(ndays, c1, c2, c3, c4, c5, c6, c7, c8)
self.lines.append(s)
def legend(self, txt):
print 'legend', txt
s = self.format_legend.format(txt)
self.lines.append(s)
def create_txt(control):
'''Return list of lines that are chart 02.txt.
'''
def append_description(lines):
'''Append header lines'''
lines.append(control.specs.title)
lines.append('From 10-fold Cross Validation')
lines.append(' ')
lines.append('Model: ' + control.specs.model)
lines.append('Time period: ' + control.specs.year)
lines.append(' ')
def read_data():
'''Return correct data dict built by create_data() function.'''
path = control.path.dir_working + control.base_name + '.data'
f = open(path, 'rb')
data = pickle.load(f)
f.close()
return data
def append_header(t):
t.header('response:',
'price', 'price', 'price', 'price',
'logprice', 'logprice', 'logprice', 'logprice')
t.header('features:',
control.specs.feature_sets[0],
control.specs.feature_sets[1],
control.specs.feature_sets[2],
control.specs.feature_sets[3],
control.specs.feature_sets[0],
control.specs.feature_sets[1],
control.specs.feature_sets[2],
control.specs.feature_sets[3])
t.header('ndays', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ')
def append_detail_line(report, data, ndays):
def v(response, features):
'''Return int or 0, the value in the report.
'''
# features := predictor
# shortened, so save one column in the printout
key = (response, features, ndays)
if key in data:
value = data[key]
if control.specs.metric == 'mean-wi10':
return value
elif control.specs.metric == 'mean-mean':
return int(value)
elif control.specs.metric == 'median-median':
return int(value)
else:
raise RuntimeError('unknown metric: ' +
control.specs.metric)
else:
print 'no data for', key
return 0
report.detail(int(ndays),
v('price', control.specs.feature_sets[0]),
v('price', control.specs.feature_sets[1]),
v('price', control.specs.feature_sets[2]),
v('price', control.specs.feature_sets[3]),
v('logprice', control.specs.feature_sets[0]),
v('logprice', control.specs.feature_sets[1]),
v('logprice', control.specs.feature_sets[2]),
v('logprice', control.specs.feature_sets[3]))
def append_detail_lines(report, values):
'''Append body lines to report using values.'''
# one line for each training period
for ndays in control.specs.training_periods:
append_detail_line(report, values, ndays)
feature_set_desc = \
dict(act='features derived from accessor, census, and taxroll data',
actlog='like at, but size features in log domain',
ct='features derived from census and taxroll data',
ctlog='like ct, but size features in log domain',
t='features derived from taxroll data',
tlog='like t, but size features in log domain'
)
def append_legend_lines(report):
# print legend describing features sets actually used
def r(s):
report.legend(s)
r(' ')
r('features set definitions')
for feature_set in control.specs.feature_sets:
r(feature_set + ': ' + feature_set_desc[feature_set])
r(' ')
def write_lines(lines):
f = open(control.path.out_output, 'w')
for line in lines:
f.write(line)
f.write('\n')
f.close()
lines = []
append_description(lines)
report = Report(lines, control.table_entry_format)
append_header(report)
data = read_data()
append_detail_lines(report, data)
append_legend_lines(report)
write_lines(lines)
def create_data(control):
'''Write data file (in pickle format) to working directory.
The data is a dict
key = ERROR (from command line) (one of mrmse mae)
value = a dicionary with the estimated generalization error
key =(response, predictor, training_days)
value = scalar value from each fold
'''
def make_file_path(response, predictor, training_days, control):
'''Return string containing file name.
'''
cell_file_name = '%s-%s-%s-%s-%s.cvcell' % (control.specs.model,
response,
predictor,
control.specs.year,
training_days)
return control.path.dir_cells + cell_file_name
def get_cv_result(file_path):
'''Return CvResult instance.'''
f = open(file_path, 'rb')
cv_result = pickle.load(f)
f.close()
return cv_result
# create table containing results from each cross validation
def cv_result_summary(cv_result):
if control.specs.metric == 'median-median':
maybe_value = cv_result.median_of_root_median_squared_errors()
elif control.specs.metric == 'mean-wi10':
maybe_value = cv_result.mean_of_fraction_wi10()
elif control.specs.metric == 'mean-mean':
maybe_value = cv_result.mean_of_root_mean_squared_errors()
else:
print control.specs
raise RuntimeError('unknown metric: ' + control.specs.metric)
return maybe_value.value if maybe_value.has_value else None
data = {}
for response in control.specs.responses:
for feature_set in control.specs.feature_sets:
for training_period in control.specs.training_periods:
file_path = make_file_path(response,
feature_set,
training_period,
control)
key = (response, feature_set, training_period)
if os.path.isfile(file_path):
cv_result = get_cv_result(file_path)
data[key] = cv_result_summary(cv_result)
else:
print 'no file for', response, feature_set, training_period
raise RuntimeError('missing file: ' + file_path)
# write the data (so that its in the log)
for k, v in data.iteritems():
print k, v
path = control.path.out_output
f = open(path, 'wb')
pickle.dump(data, f)
f.close()
def create_makefile(control):
'''Write makefile to source directory.'''
def make_file_names():
'''Return list of cell names.'''
file_names = []
for response in control.specs.responses:
for feature_set in control.specs.feature_sets:
for training_period in control.specs.training_periods:
cell_name = '%s-%s-%s-%s-%s' % (control.specs.model,
response,
feature_set,
control.specs.year,
training_period)
file_name = '%s%s.cvcell' % (control.path.dir_cells,
cell_name)
file_names.append(file_name)
if control.testing and len(file_names) > 0:
return file_names
return file_names
def make_lines():
'''Produce lines for makefile.
chart-02-cells = <cell1> <cell2> ...
chart-02.txt: chart-02.data chart-02.py
$(PYTHON) chart-02.py txt
chart-02.data: $(chart-02-cells) chart-02.py
$(PYTHON) chart-02.py data
#chart-02.makefile: chart02.py
# $(PYTHON) chart-02.py makefile
'''
lines = []
lines.append('# makefile generated by python %s.py makefile' %
control.base_name)
lines.append('%s-cells = %s' %
(control.base_name, ' '.join(make_file_names())))
path = control.path.dir_working + control.base_name
lines.append('%s.txt: %s.data %s.py' %
(path,
path,
control.base_name))
lines.append('\t$(PYTHON) %s.py txt' % control.base_name)
lines.append('%s.data: $(%s-cells) %s.py' %
(path,
control.base_name,
control.base_name))
lines.append('\t$(PYTHON) %s.py data' % control.base_name)
return lines
lines = make_lines()
if True:
# write first few columns of each line
print 'makefile'
for line in lines:
print line[:80]
f = open(control.path.out_output, 'w')
for line in lines:
f.write(line)
f.write('\n')
f.close()
def chart(specs, argv):
'''create files for charts 02-X
ARGS
specs: a Bunch of specifications
argv: the value of sys.argv from the caller (a main program)
'''
if False:
pdb.set_trace()
control = make_control(specs, argv)
sys.stdout = Logger(logfile_path=control.path.out_log)
print control
suffix = argv[1]
if suffix == 'makefile':
create_makefile(control)
elif suffix == 'data':
create_data(control)
elif suffix == 'txt':
create_txt(control)
else:
print_help()
raise RuntimeError('bad command SUFFIX')
# clean up
print control
if control.testing:
print 'DISCARD OUTPUT: TESTING'
print 'done'
|
from django.contrib import admin
from ._admin.activity import *
from ._admin.assistance import *
from ._admin.dwelling import *
from ._admin.income import *
from ._admin.person import *
# Register your models here.
admin.site.site_header = 'Adminitración de JSM'
|
#!/usr/bin/env python
# Copyright 2019 ARC Centre of Excellence for Climate Extremes
# author: Paola Petrelli <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clef.esdoc import *
from esdoc_fixtures import *
#import pytest
def test_esdoc_urls():
#dids=[]
assert True
def test_get_model_doc():
assert True
def test_get_doc():
base = 'https://api.es-doc.org/2/document/search-name?client=ESDOC-VIEWER-DEMO&encoding=html&'
assert get_doc('model', 'MIROC6', 'CMIP6') == ( base +
'project=CMIP6&name=MIROC6&type=CIM.2.SCIENCE.MODEL')
assert get_doc('experiment', 'historical') == ( base +
'project=CMIP6&name=historical&type=cim.2.designing.NumericalExperiment')
assert get_doc('mip', 'FAFMIP') == ( base +
'project=CMIP6&name=FAFMIP&type=cim.2.designing.Project')
def test_get_wdcc():
did='CMIP6.CMIP.MRI.MRI-ESM2-0.historical.none.r1i1p1f1'
url, json6 = get_wdcc(did)
assert url == 'https://cera-www.dkrz.de/WDCC/ui/cerasearch/cerarest/exportcmip6?input=CMIP6.CMIP.MRI.MRI-ESM2-0'
assert json6['identifier']['id'] == '10.22033/ESGF/CMIP6.621'
did='cmip5.output1.MIROC.MIROC5.historical.mon.atmos.Amon.r1i1p1.v20111028'
url, json5 = get_wdcc(did)
assert url == ("https://cera-www.dkrz.de/WDCC/ui/cerasearch/solr/select?" +
"rows=1&wt=json&q=entry_name_s:cmip5*output1*MIROC*MIROC5")
assert json5['response']['docs'][0]['entry_name_s'] == "cmip5 output1 MIROC MIROC5"
did='cordex.something.or.other'
assert get_wdcc(did) == (None, None)
def test_errata():
assert ( errata('hdl:21.14100/e4193a02-6405-49b6-8ad3-65def741a4dd') ==
["b6302400-3620-c8f1-999b-d192c0349084","2f6b5963-f87e-b2df-a5b0-2f12b6b68d32"])
assert errata('hdl:21.14100/7d16d79b-77c8-446c-9039-36c6803752f2') is None
def test_retrieve_error(test_error):
assert retrieve_error('ce889690-1ef3-6f46-9152-ccb27fc42490') == test_error
|
#!/usr/bin/env python
'''
Plots profiles for hydro-ship test case
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import numpy as np
import netCDF4
#import datetime
# import math
# from pylab import *
from optparse import OptionParser
import matplotlib.pyplot as plt
from matplotlib import cm
# from matplotlib.contour import QuadContourSet
# import time
import random
secInYr = 3600.0 * 24.0 * 365.0 # Note: this may be slightly wrong for some calendar types!
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename", help="file to visualize", metavar="FILE")
parser.add_option("-t", "--time", dest="time", help="time step to visualize (0 based)", metavar="TIME")
parser.add_option("-s", "--save", action="store_true", dest="saveimages", help="include this flag to save plots as files")
parser.add_option("-n", "--nodisp", action="store_true", dest="hidefigs", help="include this flag to not display plots (usually used with -s)")
parser.add_option("-3", dest="A3", action="store_true", help="plot GLADS results for experiment 3")
parser.add_option("-5", dest="A5", action="store_true", help="plot GLADS results for experiment 5")
options, args = parser.parse_args()
if not options.filename:
print("No filename provided. Using output.nc.")
options.filename = "output.nc"
if not options.time:
print("No time provided. Using time -1.")
time_slice = -1
else:
time_slice = int(options.time)
if options.A3 and options.A5:
sys.exit("Only one of -3 and -5 can be specified.")
f = netCDF4.Dataset(options.filename,'r')
nCells = len(f.dimensions['nCells'])
nEdges = len(f.dimensions['nEdges'])
nTime = len(f.dimensions['Time'])
#xtime = f.variables['xtime'][:]
xCell = f.variables['xCell'][:]
yCell = f.variables['yCell'][:]
yVertex = f.variables['yVertex'][:]
xEdge = f.variables['xEdge'][:]
#yEdge = f.variables['yEdge'][:]
h = f.variables['waterThickness'][time_slice,:]
u = f.variables['waterVelocityCellX'][time_slice,:]
N = f.variables['effectivePressure'][time_slice,:]
days = f.variables['daysSinceStart'][:]
#basalmelt = f.variables['basalMeltInput'][time_slice,:]
#surfmelt = f.variables['externalWaterInput'][time_slice,:]
xtime= f.variables['xtime']
areaCell = f.variables['areaCell'][:]
q = u*h
#print "attempting to get input data from landice_grid.nc!"
#fin = netCDF4.Dataset('landice_grid.nc','r')
#H = fin.variables['thickness'][0,:]
print("Using time level {}, which is xtime = {}".format(time_slice, ''.join( xtime[time_slice,:])))
# Find center row - currently files are set up to have central row at y=0
unique_ys=np.unique(yCell[:])
centerY=unique_ys[len(unique_ys)//2]
print("number of ys={}, center y index={}, center Y value={}".format(len(unique_ys), len(unique_ys)//2, centerY))
ind = np.nonzero(yCell[:] == centerY)
x = xCell[ind]/1000.0
# calculate mean,min,max for all x values for needed variables
allx=np.unique(xCell[:]) # list of all unique x values
middley = np.unique(yCell[:])[1:-1] # list of all unique y values, excluding the values on the north and south edges
N_mean = np.zeros(allx.shape)
N_min = np.zeros(allx.shape)
N_max = np.zeros(allx.shape)
q_mean = np.zeros(allx.shape)
q_min = np.zeros(allx.shape)
q_max = np.zeros(allx.shape)
for i in range(len(allx)):
N_mean[i] = N[ xCell == allx[i] ].mean()
N_min[i] = N[ xCell == allx[i] ].min()
N_max[i] = N[ xCell == allx[i] ].max()
# Note: the second logical condition excludes the north and south rows.
# They have unrealistic values after interpolation to cell centers
# because of the no flow lateral b.c.
q_mean[i] = q[ np.logical_and(xCell == allx[i], np.in1d(yCell, middley) ) ].mean()
q_min[i] = q[ np.logical_and(xCell == allx[i], np.in1d(yCell, middley) ) ].min()
q_max[i] = q[ np.logical_and(xCell == allx[i], np.in1d(yCell, middley) ) ].max()
print("start plotting.")
############################
# SHMIP Werder plot with optional comparison to GLADS
############################
fig = plt.figure(1, facecolor='w')
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312, sharex=ax1)
ax3 = fig.add_subplot(313, sharex=ax1)
if options.A3:
#x,N_mean,N_min,N_max,q_mean,q_min,q_max,Q_max
data = np.array([
[1250,3.6938e+05,8918,8.3148e+05,0.00057512,0.00054189,0.00060403,0.0005687],
[3750,1.1954e+06,8.6108e+05,1.5055e+06,0.00055975,0.00051241,0.00061171,1.385e-05],
[6250,1.7328e+06,1.5285e+06,1.8977e+06,0.00054136,0.00049619,0.00058063,4.908e-05],
[8750,1.9756e+06,1.9151e+06,2.0184e+06,0.00052717,0.00051715,0.00054088,6.225e-05],
[11250,2.0292e+06,2.0191e+06,2.0342e+06,0.00051337,0.00050593,0.00052117,6.208e-05],
[13750,2.0343e+06,2.0337e+06,2.0349e+06,0.00049898,0.00049205,0.00050688,5.7544e-05],
[16250,2.0333e+06,2.0328e+06,2.034e+06,0.00048492,0.00047704,0.00049275,5.0174e-05],
[18750,2.0334e+06,2.0328e+06,2.0341e+06,0.00047067,0.00046278,0.00047807,4.3253e-05],
[21250,2.0356e+06,2.0342e+06,2.0376e+06,0.00045614,0.00044857,0.00046326,3.7132e-05],
[23750,2.0401e+06,2.0376e+06,2.0431e+06,0.00044145,0.00043417,0.00044871,3.3286e-05],
[26250,2.0466e+06,2.0432e+06,2.0505e+06,0.000427,0.00041967,0.00043462,2.8756e-05],
[28750,2.055e+06,2.0508e+06,2.0597e+06,0.00041219,0.00040515,0.00041988,2.4631e-05],
[31250,2.0648e+06,2.0598e+06,2.0705e+06,0.00039822,0.0003912,0.00040547,2.2199e-05],
[33750,2.0769e+06,2.0707e+06,2.083e+06,0.00038385,0.00037639,0.00039104,1.9196e-05],
[36250,2.0898e+06,2.0832e+06,2.0968e+06,0.0003691,0.00036202,0.00037614,1.6603e-05],
[38750,2.1048e+06,2.0973e+06,2.1128e+06,0.00035445,0.00034747,0.00036198,1.4817e-05],
[41250,2.1219e+06,2.1131e+06,2.1303e+06,0.00034021,0.00033275,0.00034758,1.263e-05],
[43750,2.1398e+06,2.1305e+06,2.1489e+06,0.00032575,0.00031846,0.00033287,1.0957e-05],
[46250,2.1591e+06,2.1499e+06,2.1697e+06,0.00031096,0.0003043,0.00031857,9.5511e-06],
[48750,2.1805e+06,2.1704e+06,2.1925e+06,0.00029685,0.00028949,0.00030424,8.2628e-06],
[51250,2.2057e+06,2.193e+06,2.2175e+06,0.00028231,0.00027498,0.00028954,7.0915e-06],
[53750,2.2314e+06,2.2186e+06,2.2442e+06,0.0002678,0.0002608,0.00027504,5.953e-06],
[56250,2.259e+06,2.2444e+06,2.2727e+06,0.00025315,0.00024601,0.00026062,4.9826e-06],
[58750,2.2887e+06,2.2735e+06,2.3033e+06,0.00023867,0.00023181,0.00024572,4.185e-06],
[61250,2.3212e+06,2.3056e+06,2.338e+06,0.0002241,0.00021699,0.00023169,3.5131e-06],
[63750,2.3571e+06,2.3385e+06,2.3749e+06,0.00020952,0.00020255,0.00021702,2.9005e-06],
[66250,2.3949e+06,2.3759e+06,2.415e+06,0.0001953,0.00018817,0.00020254,2.3651e-06],
[68750,2.438e+06,2.4168e+06,2.4592e+06,0.00018099,0.00017366,0.00018823,1.8838e-06],
[71250,2.4837e+06,2.4621e+06,2.5086e+06,0.00016647,0.00015922,0.00017381,1.4542e-06],
[73750,2.5348e+06,2.51e+06,2.562e+06,0.0001516,0.00014474,0.0001594,1.1529e-06],
[76250,2.5908e+06,2.5622e+06,2.6215e+06,0.0001375,0.00013076,0.00014454,8.6251e-07],
[78750,2.6556e+06,2.6219e+06,2.6876e+06,0.00012298,0.0001159,0.00013037,6.2556e-07],
[81250,2.7256e+06,2.6895e+06,2.7603e+06,0.00010867,0.0001013,0.00011553,4.1966e-07],
[83750,2.8049e+06,2.7631e+06,2.8462e+06,9.4164e-05,8.6859e-05,0.00010129,2.9364e-07],
[86250,2.8947e+06,2.8512e+06,2.9432e+06,7.9323e-05,7.2377e-05,8.7046e-05,1.7608e-07],
[88750,2.9973e+06,2.9436e+06,3.0541e+06,6.4644e-05,5.7607e-05,7.2224e-05,9.9334e-08],
[91250,3.1202e+06,3.0561e+06,3.184e+06,5.0362e-05,4.3453e-05,5.765e-05,4.8309e-08],
[93750,3.2618e+06,3.1878e+06,3.3358e+06,3.6105e-05,2.8436e-05,4.3565e-05,1.5298e-08],
[96250,3.4222e+06,3.3441e+06,3.5114e+06,2.2029e-05,1.4291e-05,2.9408e-05,3.7029e-09],
[98750,3.6054e+06,3.5227e+06,3.6783e+06,7.1355e-06,1.5613e-06,1.5243e-05,2.3962e-10],])
if options.A5:
#x,N_mean,N_min,N_max,q_mean,q_min,q_max,Q_max
data = np.array([
[1250,3.619e+05,8918,7.9712e+05,0.0010021,0.00017955,0.001585,39.744],
[3750,1.0909e+06,8.1947e+05,1.3215e+06,0.0013428,0.0012158,0.0015387,38.601],
[6250,1.4278e+06,1.3165e+06,1.5309e+06,0.0012151,0.00097281,0.0014001,38.361],
[8750,1.53e+06,1.4328e+06,1.6546e+06,0.001147,0.00089554,0.0013547,37.825],
[11250,1.5416e+06,1.429e+06,1.6859e+06,0.0011332,0.0008849,0.0013364,36.779],
[13750,1.5158e+06,1.4051e+06,1.6878e+06,0.0011489,0.00091201,0.0013329,35.373],
[16250,1.4954e+06,1.384e+06,1.6386e+06,0.0011667,0.00093384,0.0013389,33.595],
[18750,1.4606e+06,1.3643e+06,1.5949e+06,0.0011937,0.00094267,0.0013531,31.617],
[21250,1.4328e+06,1.3424e+06,1.5689e+06,0.0012208,0.001004,0.0013729,29.621],
[23750,1.4017e+06,1.3192e+06,1.5327e+06,0.0012504,0.0010469,0.0014016,27.568],
[26250,1.3721e+06,1.2872e+06,1.5025e+06,0.0012757,0.0010689,0.0014265,25.957],
[28750,1.351e+06,1.2621e+06,1.4725e+06,0.0013092,0.0011136,0.0014584,23.621],
[31250,1.3192e+06,1.2396e+06,1.4348e+06,0.0013414,0.0011444,0.0014804,21.843],
[33750,1.2893e+06,1.2179e+06,1.406e+06,0.0013761,0.0011585,0.0014989,20.081],
[36250,1.2651e+06,1.2029e+06,1.3636e+06,0.0014143,0.0012063,0.0015224,17.839],
[38750,1.232e+06,1.1845e+06,1.3467e+06,0.00148,0.0012409,0.0017218,15.268],
[41250,1.2015e+06,1.1576e+06,1.2575e+06,0.0014802,0.0013333,0.0015753,13.513],
[43750,1.1874e+06,1.1379e+06,1.2476e+06,0.0014928,0.0013516,0.0016143,12.013],
[46250,1.169e+06,1.1104e+06,1.2367e+06,0.0015339,0.0013754,0.001671,10.481],
[48750,1.1426e+06,1.0807e+06,1.2262e+06,0.0016014,0.0014019,0.0017374,8.3152],
[51250,1.1058e+06,1.0392e+06,1.1898e+06,0.0016687,0.001452,0.0018008,6.1913],
[53750,1.0677e+06,1.0072e+06,1.1592e+06,0.0017757,0.0015356,0.0020891,4.4044],
[56250,1.014e+06,9.6914e+05,1.1225e+06,0.001886,0.0016167,0.0022956,2.2276],
[58750,9.7705e+05,9.617e+05,1.035e+06,0.0018537,0.0017904,0.0021797,0.0036676],
[61250,9.748e+05,9.63e+05,9.8723e+05,0.0017411,0.0016804,0.0018283,0.0015423],
[63750,9.9444e+05,9.7833e+05,1.0079e+06,0.0016276,0.0015713,0.001692,0.00091574],
[66250,1.0255e+06,1.0078e+06,1.0438e+06,0.0015172,0.0014609,0.0015759,0.00062522],
[68750,1.0656e+06,1.0448e+06,1.0858e+06,0.0014061,0.001349,0.0014638,0.00042488],
[71250,1.11e+06,1.0888e+06,1.1342e+06,0.0012935,0.0012371,0.0013503,0.00028731],
[73750,1.1597e+06,1.1356e+06,1.186e+06,0.001178,0.0011249,0.0012386,0.00021198],
[76250,1.2134e+06,1.186e+06,1.2423e+06,0.0010685,0.0010165,0.0011229,0.00015782],
[78750,1.274e+06,1.2427e+06,1.3035e+06,0.00095566,0.00090092,0.0010127,0.00011363],
[81250,1.3379e+06,1.3052e+06,1.3691e+06,0.00084453,0.00078734,0.00089811,7.546e-05],
[83750,1.4086e+06,1.3716e+06,1.445e+06,0.00073179,0.00067526,0.00078665,5.2317e-05],
[86250,1.4873e+06,1.4494e+06,1.5294e+06,0.00061648,0.00056304,0.00067593,3.1047e-05],
[88750,1.5761e+06,1.5297e+06,1.6251e+06,0.00050237,0.0004486,0.0005615,1.7476e-05],
[91250,1.6824e+06,1.6269e+06,1.7379e+06,0.0003914,0.00033855,0.00044755,8.5425e-06],
[93750,1.8065e+06,1.7412e+06,1.8724e+06,0.00028062,0.00022185,0.00033801,2.7783e-06],
[96250,1.9517e+06,1.88e+06,2.0349e+06,0.0001711,0.00011134,0.00022772,7.0808e-07],
[98750,2.126e+06,2.0454e+06,2.1977e+06,5.5348e-05,1.2068e-05,0.00011773,5.022e-08], ])
if options.A3 or options.A5:
G_x = data[:,0]
G_Nmean = data[:,1]
G_Nmin = data[:,2]
G_Nmax = data[:,3]
G_qmean = data[:,4]
G_qmin = data[:,5]
G_qmax = data[:,6]
G_Qmax = data[:,7]
# plot GLADS data
lw = 3 # lineweight to use
ax1.plot(G_x/1000.0, G_Nmin/1.0e6, 'g--', linewidth=lw)
ax1.plot(G_x/1000.0, G_Nmean/1.0e6, 'g-', linewidth=lw, label='GLADS mean/range')
ax1.plot(G_x/1000.0, G_Nmax/1.0e6, 'g--', linewidth=lw)
ax2.plot(G_x/1000.0, G_qmin, 'g--', linewidth=lw)
ax2.plot(G_x/1000.0, G_qmean, 'g-', linewidth=lw, label='GLADS mean/range')
ax2.plot(G_x/1000.0, G_qmax, 'g--', linewidth=lw)
ax3.plot(G_x/1000.0, G_Qmax, 'g--', linewidth=lw, label='GLADS max')
# panel 1: effective pressure
plt.sca(ax1)
#plt.plot(x, N[ind] / 1.0e6, '.-g') # this just plots the centerline profile
plt.plot(allx/1000.0, N_mean / 1.0e6, '-b', label='MPAS mean/range')
plt.plot(allx/1000.0, N_min / 1.0e6, '--b')
plt.plot(allx/1000.0, N_max / 1.0e6, '--b')
plt.xlabel('X-position (km)')
plt.ylabel('effecive pressure (MPa)')
plt.xlim( (0, 100.0) )
plt.grid(True)
plt.legend(loc='best')
# panel 2: sheet flux
plt.sca(ax2)
#plt.plot(x, np.absolute(h[ind] * u[ind]), '.-g') # this plots centerline profile
plt.plot(allx/1000.0, np.absolute(q_mean), '-b', label='MPAS mean/range')
plt.plot(allx/1000.0, np.absolute(q_min), '--b')
plt.plot(allx/1000.0, np.absolute(q_max), '--b')
plt.xlabel('X-position (km)')
plt.ylabel('sheet water flux (m^2/s)')
plt.grid(True)
plt.legend(loc='best')
# panel 3: channel flux
plt.sca(ax3)
try:
channelDischarge = f.variables['channelDischarge'][time_slice,:]
allxEdge=np.unique(xEdge[:])
allxEdge2=100000.0 - (allxEdge - 100000.0)
Q_max = np.zeros(allxEdge.shape)
Q_sum = np.zeros(allxEdge.shape)
for i in range(len(allxEdge)):
Q_max[i] = np.absolute(channelDischarge[ xEdge == allxEdge[i] ]).max()
Q_sum[i] = np.absolute(channelDischarge[ xEdge == allxEdge[i] ]).sum()
plt.plot(allxEdge/1000.0, np.absolute(Q_max), 'bo', label='MPAS max')
plt.plot(allxEdge/1000.0, np.absolute(Q_sum), 'bx', label='MPAS sum')
except:
print("Skipping plotting of channel output.")
plt.xlabel('X-position (km)')
plt.ylabel('channel water flux (m^3/s)')
plt.grid(True)
plt.legend(loc='best')
############################
# plot how close to SS we are
############################
fig = plt.figure(2, facecolor='w')
# thickness over time
ax1 = fig.add_subplot(331)
# plot n random cells
n=50 # set number of random cells. More is more expensive
#for i in ind: # this version plots cells along the centerline only
for i in random.sample(range(min(nCells,n)), n):
plt.plot(days/365.0, f.variables['waterThickness'][:,i])
plt.plot(days[:]/365.0, f.variables['waterThickness'][:,:].max(axis=1), linewidth=2) #max
plt.xlabel('Years since start')
plt.ylabel('water thickness (m)')
plt.grid(True)
ax = fig.add_subplot(334)
# max change in thickness
delH = (f.variables['waterThickness'][-1,:] - f.variables['waterThickness'][-2,:]) / ((days[-1] - days[-2])/365.0)
plt.plot(f.variables['waterThickness'][-1,:], delH, '.')
plt.ylabel('dh/dt (m/yr)')
plt.xlabel('water thickness (m)')
plt.title('Rate of change on final time step')
plt.grid(True)
ax = fig.add_subplot(337)
# max change in thickness
plt.plot(f.variables['waterThickness'][-1,:], delH/f.variables['waterThickness'][-1,:]*100, '.')
plt.ylabel('dh/dt (%)')
plt.xlabel('water thickness (m)')
plt.title('Rate of change on final time step. Goal=0.1%?')
plt.grid(True)
# ----
# Effective pressure over time
ax = fig.add_subplot(332, sharex=ax1)
# plot n random cells
#for i in ind: # this version plots cells along the centerline only
for i in random.sample(range(min(nCells,n)), n):
plt.plot(days/365.0, f.variables['effectivePressure'][:,i]/1.0e6)
plt.plot(days[:]/365.0, f.variables['effectivePressure'][:,:].max(axis=1)/1.0e6, linewidth=2) #max
plt.xlabel('Years since start')
plt.ylabel('effective pressure (MPa)')
plt.grid(True)
ax = fig.add_subplot(335)
# max change in N
delN = (f.variables['effectivePressure'][-1,:] - f.variables['effectivePressure'][-2,:]) / ((days[-1] - days[-2])/365.0)
plt.plot(f.variables['effectivePressure'][-1,:], delN, '.')
plt.ylabel('dN/dt (Pa/yr)')
plt.xlabel('N (Pa)')
plt.title('Rate of change on final time step')
plt.grid(True)
ax = fig.add_subplot(338)
# max change in N
plt.plot(f.variables['effectivePressure'][-1,:], delN/f.variables['effectivePressure'][-1,:]*100.0, '.')
plt.ylabel('dN/dt (%)')
plt.xlabel('N (Pa)')
plt.title('Rate of change on final time step. Goal=0.1%?')
plt.grid(True)
# ---
# Channel area over time
ax = fig.add_subplot(333, sharex=ax1)
# plot n largest channel edges
largestChannels = np.argpartition(f.variables['channelArea'][-1,:], -n)[-n:] # get indices to the n largest channels
for i in largestChannels:
plt.plot(days/365.0, f.variables['channelArea'][:,i])
plt.plot(days[:]/365.0, f.variables['channelArea'][:,:].max(axis=1), linewidth=2) #max
plt.xlabel('Years since start')
plt.ylabel('Channel area (m^2)')
plt.grid(True)
ax = fig.add_subplot(336)
# max change in channel area
delS = (f.variables['channelArea'][-1,:] - f.variables['channelArea'][-2,:]) / ((days[-1] - days[-2])/365.0)
plt.plot(f.variables['channelArea'][-1,:], delS, '.')
plt.ylabel('dS/dt (m^2/yr)')
plt.xlabel('channel area (m^2)')
plt.title('Rate of change on final time step')
plt.grid(True)
ax = fig.add_subplot(339)
# max change in channel area, %
plt.plot(f.variables['channelArea'][-1,:], delS/f.variables['channelArea'][-1,:]*100.0, '.')
plt.ylabel('dS/dt (%)')
plt.xlabel('S (m^2)')
plt.title('Rate of change on final time step. Goal=0.1%?')
plt.grid(True)
############################
# plot time steps for various CFL conditions
############################
try:
dtA=f.variables['deltatSGHadvec'][:]
dtD=f.variables['deltatSGHdiffu'][:]
dtP=f.variables['deltatSGHpressure'][:]
fig = plt.figure(3, facecolor='w')
plt.plot(days/365.0, dtA, label='A')
plt.plot(days/365.0, dtD, label='D')
plt.plot(days/365.0, dtP, label='P')
try:
dtAc=f.variables['deltatSGHadvecChannel'][:]
dtDc=f.variables['deltatSGHdiffuChannel'][:]
plt.plot(days/365.0, dtAc, '--', label='Ac')
plt.plot(days/365.0, dtDc, '--', label='Dc')
except:
print("Skipping plot of channel maximum time steps. Channel may be disabled or an error occurred.")
plt.legend()
plt.xlabel('Time (yr)')
plt.ylabel('Allowable time step (s)')
except:
print("Skipping plot of maximum time steps due to missing information or error.")
print("plotting complete")
plt.draw()
if options.saveimages:
print("Saving figures to files.")
plt.savefig('GL-position.png')
if options.hidefigs:
print("Plot display disabled with -n argument.")
else:
plt.show()
|
from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import math
import numpy
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models.cifar as models
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig, closefig
import dataset_utils
model_names = ("vgg16","vgg16_bn")
parser = argparse.ArgumentParser(description='PyTorch cifar10 / svhn / catsdogs Gambler training')
parser.add_argument('-d', '--dataset', default='cifar10', type=str, choices=['cifar10', 'svhn', 'catsdogs'])
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
# Training
parser.add_argument('-t', '--train', dest='evaluate', action='store_false',
help='train the model. When evaluate is true, training is ignored and trained models are loaded.')
parser.add_argument('--epochs', default=300, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--train-batch', default=128, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--test-batch', default=200, type=int, metavar='N',
help='test batchsize')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--schedule', type=int, nargs='+', default=[25,50,75,100,125,150,175,200,225,250,275],
help='Multiply learning rate by gamma at the scheduled epochs (default: 25,50,75,100,125,150,175,200,225,250,275)')
parser.add_argument('--gamma', type=float, default=0.5, help='LR is multiplied by gamma on schedule (default: 0.5)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-o', '--rewards', dest='rewards', type=float, nargs='+', default=[2.2],
metavar='o', help='The reward o for a correct prediction; Abstention has a reward of 1. Provided parameters would be stored as a list for multiple runs.')
parser.add_argument('--pretrain', type=int, default=0,
help='Number of pretraining epochs using the cross entropy loss, so that the learning can always start. Note that it defaults to 100 if dataset==cifar10 and reward<6.1, and the results in the paper are reproduced.')
parser.add_argument('--coverage', type=float, nargs='+',default=[100.,99.,98.,97.,95.,90.,85.,80.,75.,70.,60.,50.,40.,30.,20.,10.],
help='the expected coverages used to evaluated the accuracies after abstention')
# Save
parser.add_argument('-s', '--save', default='save', type=str, metavar='PATH',
help='path to save checkpoint (default: save)')
#parser.add_argument('--resume', default='', type=str, metavar='PATH',
# help='path to load the saved model (default: none)')
# Architecture
parser.add_argument('--arch', '-a', metavar='ARCH', default='vgg16_bn',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: vgg16_bn) Please edit the code to train with other architectures')
# Miscs
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate trained models on validation set, following the paths defined by "save", "arch" and "rewards"')
#Device options
parser.add_argument('--gpu-id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
# set the abstention definitions
expected_coverage = args.coverage
reward_list = args.rewards
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
num_classes=10 # this is modified later in main() when defining the specific datasets
def main():
# make path for the current archtecture & reward
if not resume_path and not os.path.isdir(save_path):
mkdir_p(save_path)
# Dataset
print('==> Preparing dataset %s' % args.dataset)
global num_classes
if args.dataset == 'cifar10':
dataset = datasets.CIFAR10
num_classes = 10
input_size = 32
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
trainset = dataset(root='./data', train=True, download=True, transform=transform_train)
testset = dataset(root='./data', train=False, download=True, transform=transform_test)
elif args.dataset == 'svhn':
dataset = datasets.SVHN
num_classes = 10
input_size = 32
transform_train = transforms.Compose([
transforms.RandomCrop(32,padding=4),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = dataset(root='./data/svhn', train=True, download=True, transform=transform_train)
testset = dataset(root='./data/svhn', train=False, download=True, transform=transform_test)
elif args.dataset == 'catsdogs':
num_classes = 2
input_size = 64
transform_train = transforms.Compose([
transforms.RandomCrop(64, padding=6),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
# use the "ImageFolder" datasets
assert os.path.exists("./data/cats_dogs/train") and os.path.exists("./data/cats_dogs/test"), "Please download and put the 'cats vs dogs' dataset to paths 'data/cats_dogs/train' and 'cats_dogs/test'"
trainset = datasets.ImageFolder('./data/cats_dogs/train')
testset = datasets.ImageFolder('./data/cats_dogs/test')
# resizing the images to 64 and center crop them, so that they become 64x64 squares
trainset = dataset_utils.resized_dataset(trainset, transform_train, resize=64)
testset = dataset_utils.resized_dataset(testset, transform_test, resize=64)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
# End of Dataset
# Model
print("==> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](num_classes=num_classes+1, input_size = input_size)
if use_cuda: model = torch.nn.DataParallel(model.cuda())
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
if args.pretrain: criterion = nn.CrossEntropyLoss()
# the conventional loss is replaced by the gambler's loss in train() and test() explicitly except for pretraining
optimizer = optim.SGD(model.parameters(), lr=state['lr'], momentum=args.momentum, weight_decay=args.weight_decay)
title = args.dataset + '-' + args.arch + ' o={:.2f}'.format(reward)
logger = Logger(os.path.join(save_path, 'log.txt'), title=title)
logger.set_names(['Epoch', 'Learning Rate', 'Train Loss', 'Test Loss', 'Train Err.', 'Test Err.'])
# if only for evaluation, the training part will not be executed
if args.evaluate:
print('\nEvaluation only')
assert os.path.isfile(resume_path), 'no model exists at "{}"'.format(resume_path)
model = torch.load(resume_path)
if use_cuda: model = model.cuda()
test(testloader, model, criterion, 0, use_cuda, evaluation = True)
return
# train
for epoch in range(0, args.epochs):
adjust_learning_rate(optimizer, epoch)
print('\n'+save_path)
print('Epoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
train_loss, train_acc = train(trainloader, model, criterion, optimizer, epoch, use_cuda)
test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda)
# save the model
filepath = os.path.join(save_path, "{:d}".format(epoch+1) + ".pth")
torch.save(model, filepath)
# delete the last saved model if exist
last_path = os.path.join(save_path, "{:d}".format(epoch) + ".pth")
if os.path.isfile(last_path): os.remove(last_path)
# append logger file
logger.append([epoch+1, state['lr'], train_loss, test_loss, 100-train_acc, 100-test_acc])
filepath = os.path.join(save_path, "{:d}".format(args.epochs) + ".pth")
torch.save(model, filepath)
last_path = os.path.join(save_path, "{:d}".format(args.epochs-1) + ".pth")
if os.path.isfile(last_path): os.remove(last_path)
logger.plot(['Train Loss', 'Test Loss'])
savefig(os.path.join(save_path, 'logLoss.eps'))
closefig()
logger.plot(['Train Err.', 'Test Err.'])
savefig(os.path.join(save_path, 'logErr.eps'))
closefig()
logger.close()
def train(trainloader, model, criterion, optimizer, epoch, use_cuda):
# switch to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
bar = Bar('Processing', max=len(trainloader))
for batch_idx, (inputs, targets) in enumerate(trainloader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)
# compute output
outputs = model(inputs)
if epoch >= args.pretrain:
outputs = F.softmax(outputs, dim=1)
outputs, reservation = outputs[:,:-1], outputs[:,-1]
gain = torch.gather(outputs, dim=1, index=targets.unsqueeze(1)).squeeze()
doubling_rate = (gain.add(reservation.div(reward))).log()
loss = -doubling_rate.mean()
else:
loss = criterion(outputs[:,:-1], targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(trainloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def test(testloader, model, criterion, epoch, use_cuda, evaluation = False):
global best_acc
# whether to evaluate uncertainty, or confidence
if evaluation:
evaluate(testloader, model, use_cuda)
return
# switch to test mode
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
end = time.time()
bar = Bar('Processing', max=len(testloader))
abstention_results = []
for batch_idx, (inputs, targets) in enumerate(testloader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)
# compute output
with torch.no_grad():
outputs = model(inputs)
if epoch >= args.pretrain:
outputs = F.softmax(outputs, dim=1)
outputs, reservation = outputs[:,:-1], outputs[:,-1]
predictions = outputs.data.max(1)[1]
# analyze the accuracy at different abstention level
abstention_results.extend(zip(list( reservation.cpu() ),list( predictions.eq(targets.data).cpu() )))
# calculate loss
gain = torch.gather(outputs, dim=1, index=targets.unsqueeze(1)).squeeze()
doubling_rate = (gain.add(reservation.div(reward))).log()
loss = -doubling_rate.mean()
else:
loss = criterion(outputs[:,:-1], targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(testloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
if epoch >= args.pretrain:
# sort the abstention results according to their reservations, from high to low
abstention_results.sort(key = lambda x: x[0])
# get the "correct or not" list for the sorted results
sorted_correct = list(map(lambda x: int(x[1]), abstention_results))
size = len(sorted_correct)
print('accracy of coverage ',end='')
for coverage in expected_coverage:
covered_correct = sorted_correct[:round(size/100*coverage)]
print('{:.0f}: {:.3f}, '.format(coverage, sum(covered_correct)/len(covered_correct)*100.), end='')
print('')
return (losses.avg, top1.avg)
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr']
# this function is used to evaluate the accuracy on validation set and test set per coverage
def evaluate(testloader, model, use_cuda):
model.eval()
abortion_results = [[],[]]
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets
output = model(inputs)
output = F.softmax(output,dim=1)
output, reservation = output[:,:-1], (output[:,-1]).cpu()
values, predictions = output.data.max(1)
predictions = predictions.cpu()
abortion_results[0].extend(list( reservation.numpy() ))
abortion_results[1].extend(list( predictions.eq(targets.data).numpy() ))
def shuffle_list(list, seed=10):
random.seed(seed)
random.shuffle(list)
shuffle_list(abortion_results[0]); shuffle_list(abortion_results[1])
abortion, correct = torch.tensor(abortion_results[0]), torch.tensor(abortion_results[1])
# use 2000 data points as the validation set (randomly shuffled)
abortion_valid, abortion = abortion[:2000], abortion[2000:]
correct_valid, correct = correct[:2000], correct[2000:]
results_valid=[]; results=[]
# In the paper we have used the bisection method to find the appropriate threshold for rejecting data points to achieve the desired coverage.
# However, later we noticed that it was not necessary, and it is better to simply sort the data points by the rate of abortion,
# and therefore we have changed the code below accordingly.
#bisection_method(abortion_valid, correct_valid, results_valid)
#bisection_method(abortion, correct, results)
#
sort_by_abortion_to_find_results(abortion_valid, correct_valid, results_valid)
sort_by_abortion_to_find_results(abortion, correct, results)
f = open(os.path.join(os.path.dirname(resume_path),'coverage VS err.csv'), 'w')
for idx, result in enumerate(results):
f.write('valid{:.0f},{:.2f},{:.3f}\n'.format(expected_coverage[idx],results_valid[idx][0]*100.,(1-results_valid[idx][1])*100))
f.write('test{:.0f},{:.2f},{:.3f}\n'.format(expected_coverage[idx],results[idx][0]*100.,(1-results[idx][1])*100))
return
def bisection_method(abortion, correct, results):
upper = 1.
while True:
mask_up = abortion <= upper
passed_up = torch.sum(mask_up.long()).item()
if passed_up/len(correct)*100.<expected_coverage[0]: upper *= 2.
else: break
test_thres = 1.
for coverage in expected_coverage:
mask = abortion <= test_thres
passed = torch.sum(mask.long()).item()
# bisection method start
lower = 0.
while math.fabs(passed/len(correct)*100.-coverage) > 0.3:
if passed/len(correct)*100.>coverage:
upper = min(test_thres,upper)
test_thres=(test_thres+lower)/2
elif passed/len(correct)*100.<coverage:
lower = max(test_thres,lower)
test_thres=(test_thres+upper)/2
mask = abortion <= test_thres
passed = torch.sum(mask.long()).item()
# bisection method end
masked_correct = correct[mask]
correct_data = torch.sum(masked_correct.long()).item()
passed_acc = correct_data/passed
results.append((passed/len(correct), passed_acc))
#print('coverage {:.0f} done'.format(coverage))
# this function is simpler and can replace the "bisection_method"
def sort_by_abortion_to_find_results(abortion, correct, results):
abortion, correct = list(abortion), list(correct)
correct = [corr for _, corr in sorted(zip(abortion, correct), key=lambda pair: pair[0])]
for coverage in expected_coverage:
passed = round(coverage/100.*len(correct))
passed_correct = correct[:passed]
passed_acc = sum([int(corr) for corr in passed_correct])/passed
results.append((passed/len(correct), passed_acc))
# this function is used to organize all data and write into one file
def save_data():
save=open('{}_{}.csv'.format(args.dataset,args.arch),'w')
save.write('0,100val.,100test,99v,99t,98v,98t,97v,97t,95v,95t,90v,90t,85v,85t,80v,80t,75v,75t,70v,70t,60v,60t,50v,50t,40v,40t,30v,30t,20v,20t,10v,10t\n')
for reward in reward_list:
save_path = base_path+'o={:.2f}'.format(reward)
if os.path.isfile(os.path.join(save_path, 'coverage VS err.csv')):
f = open(os.path.join(save_path, 'coverage VS err.csv') ,'r')
else: print('no file exists at {}'.format(os.path.join(save_path, 'coverage VS err.csv'))); continue
content = f.read()
lines = content.split('\n')
save.write('o={:.2f},'.format(reward))
for idx in range(len(expected_coverage)):
save.write('{},'.format(lines[2*idx].split(',')[2]))
save.write('{},'.format(lines[2*idx+1].split(',')[2]))
save.write('\n')
f.close()
if __name__ == '__main__':
base_path = os.path.join(args.save, args.dataset, args.arch)+'_gambling_'
baseLR = state['lr']
base_pretrain = args.pretrain
resume_path = ""
for i in range(len(reward_list)):
state['lr'] = baseLR
reward = reward_list[i]
save_path = base_path+'o={:.2f}'.format(reward)
if args.evaluate: resume_path= os.path.join(save_path,'{:d}.pth'.format(args.epochs))
args.pretrain = base_pretrain
# default the pretraining epochs to 100 to reproduce the results in the paper
if args.pretrain==0 and reward <6.1 and args.dataset=='cifar10': args.pretrain=100
main()
if args.evaluate: save_data()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @author Mikael Wikström
# https://github.com/leakim/GameOfLifeKata
#
import unittest
from resources import GameOfLife, move
from game_of_life import *
class game_of_life_tests(unittest.TestCase):
# TODO: add more tests here
def test_next(self):
self.assertEqual( iter_next(GameOfLife.BLOCK_0),
GameOfLife.BLOCK_1)
self.assertEqual( iter_next(GameOfLife.THREE_0),
GameOfLife.THREE_1)
self.assertEqual( iter_next(GameOfLife.GLIDER_0),
GameOfLife.GLIDER_1)
def main():
unittest.main()
if __name__ == "__main__":
main()
|
import numpy as np
import bokeh.plotting as bp
bp.output_file("bokeh2.html")
x = np.linspace(0, 2 * np.pi, 1024)
y = np.cos(x)
fig = bp.figure(title="simple line example", x_axis_label="x", y_axis_label="y")
fig.line(x, y, legend="cos(x)", color="red", line_width=2)
bp.show(fig)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import asyncio
import binascii
import json
import logging
import shutil
import sys
import threading
import re
import os
import time
from collections import OrderedDict
from typing import Optional
import coloredlogs
from aioconsole import ainput, get_standard_streams
from ph4_walkingpad.cmd_helper import Ph4Cmd
from ph4_walkingpad.pad import Scanner, WalkingPad, WalkingPadCurStatus, WalkingPadLastStatus, Controller
from ph4_walkingpad.profile import Profile, calories_walk2_minute, calories_rmrcb_minute
from ph4_walkingpad.analysis import StatsAnalysis
from ph4_walkingpad.upload import upload_record, login as svc_login
logger = logging.getLogger(__name__)
coloredlogs.CHROOT_FILES = []
coloredlogs.install(level=logging.INFO)
class WalkingPadControl(Ph4Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.args = None
self.args_src = None
self.ctler = None # type: Optional[Controller]
self.profile = None
self.analysis = None # type: Optional[StatsAnalysis]
self.loaded_margins = []
self.streams = None
self.worker_thread = None
self.stats_thread = None
self.stats_loop = None
self.stats_task = None
self.stats_collecting = False
self.asked_status = False
self.asked_status_beep = False
self.last_speed = 0
self.last_speed_change_rec = None # type: Optional[WalkingPadCurStatus]
self.last_time_steps = (0, 0)
self.cur_cal = 0
self.cur_cal_net = 0
self.calorie_acc = []
self.calorie_acc_net = []
async def disconnect(self):
logger.debug("Disconnecting coroutine")
if self.ctler:
await self.ctler.disconnect()
async def connect(self, address):
if self.args.no_bt:
return
self.ctler = Controller(address=address, do_read_chars=False)
self.ctler.log_messages_info = self.args.cmd
self.ctler.handler_cur_status = self.on_status
self.ctler.handler_last_status = self.on_last_record
await self.ctler.run()
await asyncio.sleep(1.5) # needs to sleep a bit
await self.ctler.ask_profile()
await asyncio.sleep(1.5)
await self.ask_beep()
await asyncio.sleep(1.0)
async def work(self):
self.worker_loop = asyncio.new_event_loop()
self.worker_thread = threading.Thread(
target=self.looper, args=(self.worker_loop,)
)
self.worker_thread.daemon = True
self.worker_thread.start()
address = await self.scan_address()
if self.args.scan:
return
await self.connect(address)
# await asyncio.wait_for(self.connect(address), None, loop=self.worker_loop)
if self.args.stats:
self.start_stats_fetching()
res = None
if not self.args.cmd:
sys.argv = [self.args_src[0]]
res = await self.entry()
sys.argv = self.args_src
if self.args.stats:
try:
while self.cmd_running:
await asyncio.sleep(1)
except KeyboardInterrupt as e:
print("Terminating")
self.stats_collecting = False
if not self.args.no_bt:
await asyncio.sleep(1)
logger.info('Terminating')
return res
async def scan_address(self):
if self.args.no_bt:
return
address = self.args.address
if address and Scanner.is_darwin():
logger.warning('Specifying address does not work on OSX 12+. '
'If connection cannot be made, omit -a parameter')
if address:
return address
if not address or self.args.scan:
scanner = Scanner()
await scanner.scan(timeout=self.args.scan_timeout)
if scanner.walking_belt_candidates:
candidates = scanner.walking_belt_candidates
logger.info("WalkingPad candidates: %s" % (candidates,))
if self.args.scan:
return None
if self.args.address_filter:
candidates = [x for x in candidates if str(x.address).startswith(self.args.address_filter)]
return candidates[0] if candidates else None
return None
def init_stats_fetcher(self):
self.stats_loop = asyncio.new_event_loop()
self.stats_thread = threading.Thread(
target=self.looper, args=(self.stats_loop,)
)
self.stats_thread.daemon = True
self.stats_thread.start()
def start_stats_fetching(self):
if self.args.no_bt:
return
if self.stats_thread is None:
self.init_stats_fetcher()
logger.info("Starting stats fetching")
self.stats_collecting = True
self.submit_coro(self.stats_fetcher(), self.stats_loop)
async def stats_fetcher(self):
while self.stats_collecting:
try:
# await asyncio.wait_for(self.ctler.ask_stats(), None, loop=self.worker_loop)
await self.ctler.ask_stats()
await asyncio.sleep(max(500, self.args.stats or 0)/1000.0)
except Exception as e:
logger.info("Error in ask stats: %s" % (e,))
async def entry(self):
aux = " (bluetooth disabled)" if self.args.no_bt else ""
self.intro = (
"-" * self.get_term_width()
+ "\n WalkingPad controller" + aux + "\n"
+ "\n"
+ "-" * self.get_term_width()
)
# if self.args.no_bt:
# self.cmdloop()
# else:
await self.acmdloop()
def on_status(self, sender, status: WalkingPadCurStatus):
# Calories computation with respect to the last segment of the same speed.
# TODO: refactor to analysis file
if self.last_time_steps[0] > status.time \
or self.last_time_steps[1] > status.steps:
logger.debug('Resetting calorie measurements')
self.last_time_steps = (status.time, status.steps)
self.last_speed_change_rec = None
self.last_speed = 0
self.cur_cal = 0
self.calorie_acc = []
self.calorie_acc_net = []
if not self.last_speed_change_rec:
self.compute_initial_cal(status)
el_time, el_dist = 0, 0
if self.profile:
el_time = status.time - self.last_speed_change_rec.time
el_dist = status.dist - self.last_speed_change_rec.dist
ccal, ccal_net, ccal_sum, ccal_net_sum = None, None, None, None
if el_time > 0 and el_dist > 0:
ccal = (el_time/60) * calories_walk2_minute(self.last_speed_change_rec.speed/10., self.profile.weight, 0.00)
ccal_net = ccal - (el_time/60) * calories_rmrcb_minute(self.profile.weight, self.profile.height,
self.profile.age, self.profile.male)
ccal_sum = sum(self.calorie_acc) + ccal
ccal_net_sum = sum(self.calorie_acc_net) + ccal_net
self.cur_cal = ccal
self.cur_cal_net = ccal_net
# on speed change accumulate calories and move to a new speed
# with a new status.
if self.last_speed_change_rec.speed != status.speed:
if self.cur_cal:
self.calorie_acc.append(self.cur_cal)
if self.cur_cal_net:
self.calorie_acc_net.append(self.cur_cal_net)
self.cur_cal = 0
self.cur_cal_net = 0
self.last_speed_change_rec = status
ccal_str = ''
if ccal:
ccal_str = ', cal: %6.2f, net: %6.2f, total: %6.2f, total net: %6.2f' \
% (ccal, ccal_net, ccal_sum, ccal_net_sum)
if self.asked_status:
self.asked_status = False
print(str(status) + ccal_str)
elif self.asked_status_beep:
self.asked_status_beep = False
print(str(status) + ccal_str)
if not self.args.json_file:
return
js = OrderedDict()
js["time"] = status.time
js["dist"] = status.dist
js["steps"] = status.steps
js["speed"] = status.speed
js["app_speed"] = status.app_speed
js["belt_state"] = status.belt_state
js["controller_button"] = status.controller_button
js["manual_mode"] = status.manual_mode
js["raw"] = binascii.hexlify(status.raw).decode('utf8')
js["rec_time"] = status.rtime
js["pid"] = self.profile.pid if self.profile else None
js["ccal"] = round(ccal*1000)/1000 if ccal else None
js["ccal_net"] = round(ccal_net*1000)/1000 if ccal_net else None
js["ccal_sum"] = round(ccal_sum*1000)/1000 if ccal_sum else None
js["ccal_net_sum"] = round(ccal_net_sum*1000)/1000 if ccal_net_sum else None
with open(self.args.json_file, 'a+') as fh:
json.dump(js, fh)
fh.write("\n")
def on_last_record(self, sender, status: WalkingPadLastStatus):
print(status)
def load_profile(self):
if not self.args.profile:
return
with open(self.args.profile, 'r') as fh:
dt = json.load(fh)
self.profile = Profile.from_data(dt)
def save_profile(self):
if not self.args.profile or not self.profile:
return
tmp_fname = self.args.profile + '.tmp'
bak_fname = self.args.profile + '.backup'
with open(tmp_fname, 'w+') as fh:
json.dump(self.profile.dump(), fh, indent=2)
if not os.path.exists(bak_fname):
shutil.copy(self.args.profile, bak_fname)
os.rename(tmp_fname, self.args.profile)
def login(self):
if not self.args.profile or not self.profile:
raise ValueError('Could not login, no profile')
res = svc_login(self.profile.email, password=self.profile.password, password_md5=self.profile.password_md5)
tok = res[0]
if not tok:
raise ValueError('Could not login')
self.profile.token = tok
self.save_profile()
return res
def load_stats(self):
"""Compute last unfinished walk from the stats file (segments of the same speed)"""
if not self.args.json_file:
return
self.analysis = StatsAnalysis(profile=self.profile, stats_file=self.args.json_file)
accs = self.analysis.load_last_stats(5)
self.loaded_margins = self.analysis.loaded_margins
self.calorie_acc = accs[0]
self.calorie_acc_net = accs[1]
if accs[0] or accs[1]:
self.poutput('Calories burned so far this walk: %7.2f kcal, %7.2f kcal net'
% (sum(self.calorie_acc), sum(self.calorie_acc_net)))
def compute_initial_cal(self, status: WalkingPadCurStatus):
self.last_speed_change_rec = status # default
mgs = self.loaded_margins
if not mgs or not mgs[0] or not mgs[0][0] \
or status.time < mgs[0][0]['time'] \
or status.dist < mgs[0][0]['dist'] \
or status.rtime < mgs[0][0]['rec_time'] \
or status.steps < mgs[0][0]['steps']:
return
nmg = mgs[0][0]
time_to_rtime = abs((status.time - nmg['time']) - (status.rtime - nmg['rec_time']))
# Last statistics from the file is probably too old, do not count it to the current walk.
if time_to_rtime > 5*60:
return
# Last speed change. Calories for block will be counted from this onward.
self.last_speed_change_rec = WalkingPadCurStatus()
self.last_speed_change_rec.speed = status.speed
self.last_speed_change_rec.dist = nmg['dist']
self.last_speed_change_rec.time = nmg['time']
self.last_speed_change_rec.rtime = nmg['rec_time']
self.last_speed_change_rec.steps = nmg['steps']
# if '_segment_time' in nmg:
# self.last_speed_change_rec.dist -= mgs[0]['_segment_dist']
# self.last_speed_change_rec.time -= mgs[0]['_segment_time']
# self.last_speed_change_rec.rtime -= mgs[0]['_segment_rtime']
# self.last_speed_change_rec.steps -= mgs[0]['_segment_steps']
async def main(self):
logger.debug('App started')
parser = self.argparser()
self.args_src = sys.argv
self.args = parser.parse_args(args=self.args_src[1:])
if self.args.debug:
coloredlogs.install(level=logging.DEBUG)
elif self.args.info or self.args.scan:
coloredlogs.install(level=logging.INFO)
else:
coloredlogs.install(level=logging.WARNING)
self.load_profile()
try:
self.load_stats()
except Exception as e:
logger.debug("Stats loading failed: %s" % (e,))
try:
await self.work()
except Exception as e:
logger.error('Exception in the main entry point: %s' % (e,), exc_info=e)
finally:
await self.disconnect()
def argparser(self):
parser = argparse.ArgumentParser(description='ph4 WalkingPad controller')
parser.add_argument('-d', '--debug', dest='debug', action='store_const', const=True,
help='enables debug mode')
parser.add_argument('--no-bt', dest='no_bt', action='store_const', const=True,
help='Do not use Bluetooth, no belt interaction enabled')
parser.add_argument('--info', dest='info', action='store_const', const=True,
help='enables info logging mode')
parser.add_argument('-s', '--scan', dest='scan', action='store_const', const=True,
help='Scan all BLE and exit')
parser.add_argument('--cmd', dest='cmd', action='store_const', const=True,
help='Non-interactive mode')
parser.add_argument('--stats', dest='stats', type=int, default=None,
help='Enable periodic stats collecting, interval in ms')
parser.add_argument('-j', '--json-file', dest='json_file',
help='Write stats to a JSON file')
parser.add_argument('-p', '--profile', dest='profile',
help='Profile JSON file')
parser.add_argument('-a', '--address', dest='address',
help='Walking pad address (if none, scanner is used). OSX 12 have to scan first, do not use')
parser.add_argument('--filter', dest='address_filter',
help='Walking pad address filter, if scanning and multiple devices are found')
parser.add_argument('--scan-timeout', dest='scan_timeout', type=float, default=3.0,
help='Scan timeout in seconds, double')
return parser
async def stop_belt(self, to_standby=False):
await self.ctler.stop_belt()
if to_standby:
await asyncio.sleep(1.5)
await self.ctler.switch_mode(WalkingPad.MODE_STANDBY)
async def start_belt(self, manual=True):
if manual:
await self.ctler.switch_mode(WalkingPad.MODE_MANUAL)
await asyncio.sleep(1.5)
await self.ctler.start_belt()
else:
await self.ctler.switch_mode(WalkingPad.MODE_AUTOMAT)
await asyncio.sleep(1.5)
await self.ctler.start_belt()
async def switch_mode(self, mode):
if mode == 'manual':
await self.ctler.switch_mode(WalkingPad.MODE_MANUAL)
elif mode == 'auto':
await self.ctler.switch_mode(WalkingPad.MODE_AUTOMAT)
elif mode == 'standby':
await self.ctler.switch_mode(WalkingPad.MODE_STANDBY)
else:
print('Unknown mode: %s. Supported: manual, auto, standby' % (mode,))
async def ask_beep(self):
self.asked_status_beep = True
await self.ctler.cmd_162_3_7()
async def ask_status(self):
self.asked_status = True
await self.ctler.ask_stats()
async def upload_record(self, line):
if not self.profile or not self.profile.did or not self.profile.token:
self.poutput("Profile is not properly loaded (token, did)")
return
mt_int = re.match(r'^(\d+)$', line.strip())
# re_float = r'[+-]?(?:[0-9]+(?:[.][0-9]*)?|[.][0-9]+)'
# mt_data = re.match(r'^(?:(%s)\s*m)\s+(?:(\d+)\s*s)\s+(?:(%s)\s*m)\s+(?:(%s)\s*m)\s+(?:(%s)\s*m)\s+$')
cal_acc, timex, dur, dist, steps = 0, 0, 0, 0, 0
if mt_int:
idx = int(line)
mm = [x for x in self.loaded_margins[idx] if '_segment_dist' in x and x['_segment_dist'] > 0]
oldest = min(mm, key=lambda x: x['rec_time'])
newest = min(mm, key=lambda x: -x['rec_time'])
cal_acc = 0
for r in mm:
el_time = r['_segment_rtime']
ccal = (el_time / 60) * calories_walk2_minute(r['speed'] / 10., self.profile.weight, 0.00)
ccal_net = ccal - (el_time / 60) * calories_rmrcb_minute(self.profile.weight, self.profile.height,
self.profile.age, self.profile.male)
cal_acc += ccal_net
timex = int(oldest['rec_time'])
dur, dist, steps = newest['time'], newest['dist'], newest['steps']
elif ',' in line:
p = [x.strip() for x in line.split(',')]
dist, dur, steps, timex, cal_acc = int(p[0]), int(p[1]), int(p[2]), int(p[3]), int(p[4])
else:
dist = int(await self.ask_prompt("Distance: "))
dur = int(await self.ask_prompt("Duration: "))
steps = int(await self.ask_prompt("Steps: "))
timex = int(await self.ask_prompt("Time: (0 for current)"))
cal_acc = int(await self.ask_prompt("Calories: "))
if timex == 0:
timex = int(time.time() - dur - 60)
if steps == 0:
self.poutput('No record to upload')
return
# cal, timex, dur, distance, step
self.poutput('Adding record: Duration=%5d, distance=%5d, steps=%5d, cal=%5d, time: %d'
% (dur, dist, steps, cal_acc, timex))
res = await self.ask_yn()
if not res:
return
self.poutput('Uploading...')
r = upload_record(self.profile.token, self.profile.did,
cal=int(cal_acc), timex=timex, dur=dur, distance=dist, step=steps)
r.raise_for_status()
self.poutput('Response: %s, data: %s' % (r, r.json()))
async def ask_prompt(self, prompt="", is_int=False):
ret_val = None
self.switch_reader(False)
self.remove_reader()
try:
while True:
await asyncio.sleep(0)
ret_val = await ainput(prompt, loop=self.loop)
if not is_int:
break
if is_int and re.match(r'^(\d+)$', ret_val):
break
except Exception as e:
logger.warning('Exception: %s' % (e,))
finally:
await asyncio.sleep(0)
self.switch_reader(True)
self.reset_reader()
return ret_val
async def ask_yn(self):
ret_val = None
self.switch_reader(False)
self.remove_reader()
if not self.streams:
self.streams = await get_standard_streams(use_stderr=False, loop=self.loop)
try:
while True:
await asyncio.sleep(0)
yn = await ainput("Do you confirm? (y/n): ", loop=self.loop, streams=self.streams)
yn2 = yn.lower().strip()
if yn2 in ['y', 'n']:
ret_val = yn2 == 'y'
break
except Exception as e:
logger.warning('Exception: %s' % (e,))
finally:
await asyncio.sleep(0)
self.remove_reader()
self.switch_reader(True)
self.reset_reader()
return ret_val
def do_quit(self, line):
"""Terminate the shell"""
self.stats_collecting = True
self.cmd_running = False
print("Terminating, please wait...")
return super().do_quit(line)
def do_tasks(self, arg):
"""Prints current tasks"""
for task in asyncio.Task.all_tasks(loop=self.loop):
print(task)
def do_ask_stats(self, line):
"""Asks for the latest status, does not print anything"""
self.submit_coro(self.ask_status())
def do_ask_beep(self, line):
"""Asks for the latest status, does not print anything"""
self.submit_coro(self.ask_beep())
def do_ask_last(self, line):
"""Asks for the latest record, does not print anything"""
self.submit_coro(self.ctler.ask_hist())
def do_speed(self, line):
"""Change speed of the running belt. Enter as speed * 10, e.g. 20 for 2.0 km/h"""
self.submit_coro(self.ctler.change_speed(int(line)))
def do_start(self, line):
"""Start the belt in the manual mode"""
self.submit_coro(self.start_belt(True))
def do_stop(self, line):
"""Stop the belt, switch to standby"""
self.submit_coro(self.stop_belt(True))
def do_switch_mode(self, line):
"""Switch mode of the belt"""
self.submit_coro(self.switch_mode(line.strip()))
def do_status(self, line):
"""Print the last received status"""
print(self.ctler.last_status)
def do_s(self, line):
"""Print the next received status"""
self.asked_status = True
def do_profile(self, line):
"""Prints currently loaded profile"""
print(self.profile)
def do_upload(self, line):
"""Uploads records to the app server. Format: dist, dur, steps, timex, cal_acc.
Alternatively, use upload <margin_index>"""
self.submit_coro(self.upload_record(line), loop=self.loop)
def do_login(self, line):
"""Login to the walkingpad service, refreshes JWT token for record upload (logs of the application)
Preferably, use `adb logcat | grep 'user='` when logging in with the Android app to capture JWT"""
try:
self.poutput('Logging in...')
r = self.login()
self.poutput('Logged in. Response: %s' % (r,))
except Exception as e:
logger.error('Could not login: %s' % (e,), exc_info=e)
def do_margins(self, line):
target = int(line) if line else None
for i, m in enumerate(self.loaded_margins):
if target is not None and i != target:
continue
print('='*80, 'Margin %2d, records: %3d' % (i, len(m)))
print(json.dumps(self.analysis.remove_records([m])[0], indent=2))
print('- ' * 40, 'Margin %2d, records: %3d' % (i, len(m)))
print('Num margins: %s' % (len(self.loaded_margins),))
do_q = do_quit
do_Q = do_quit
def main():
try:
loop = asyncio.get_running_loop()
except:
loop = asyncio.new_event_loop()
loop.set_debug(True)
br = WalkingPadControl()
loop.run_until_complete(br.main())
# Alternatively
# asyncio.run(br.main())
if __name__ == '__main__':
main()
|
from django.contrib import admin
from django.urls import path,include
from .views import index,details, enrollment, announcements
app_name = 'my_project.courses'
urlpatterns = [
path('', index, name='index'),
# path('<int:id>/', details, name='details'),
path('<slug:slug>/', details, name='details'),
path('<slug:slug>/inscricao/', enrollment, name='enrollment'),
path('<slug:slug>/anuncios/', announcements, name='announcements'),
] |
from datetime import date
dados = dict()
dados['Nome'] = str(input('Nome: '))
ano = int(input('Ano de Nascimento: '))
dados['Idade'] = date.today().year - ano
dados['Ctps'] = int(input('Carteira de trabalho (0 não tem): '))
if dados['Ctps'] != 0:
dados['Contratação'] = int(input('Ano de contratação: '))
dados['Salário'] = float(input('Salário: RS$'))
dados['Aposentadoria'] = dados['Contratação'] + 35 - ano
print('-='*50)
for k, v in dados.items():
print(f'{k} tem o valor {v}')
|
from fpdf import FPDF
class PDF(FPDF):
def footer(self):
# Position at 1.5 cm from bottom
self.set_y(-15)
# Arial italic 8
self.set_font('Arial', 'I', 8)
# Text color in gray
self.set_text_color(128)
# Page number
self.cell(0, 10, 'Page ' + str(self.page_no()), 0, 0, 'C')
def save_pdf(medicines):
pdf = PDF()
# Add a page
pdf.add_page()
# setting style and size of font for the pdf
pdf.set_font("Arial", size=12)
pdf.cell(
200, 10,
txt="Generated Prscription",
ln=1, align='C'
)
for medic in medicines:
if ('Medicine Name' in medicines[medic]):
# create a cell
pdf.cell(
200, 10,
ln=1, align='C',
txt=medic
)
pdf.cell(
200, 10,
ln=2,
txt="Medicine Name: " + medicines[medic]["Medicine Name"],
)
if "Instruction" in medicines[medic]:
pdf.cell(
200, 10,
ln=2,
txt="Instructions: " + medicines[medic]["Instruction"]
)
else:
pdf.cell(
200, 10,
ln=2,
txt="Instructions*: No Instructions given"
)
# save the pdf with name .pdf
pdf.output("Prescription.pdf")
|
import argparse
import torchani
import torch
import timeit
import tqdm
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('filename',
help='Path to the xyz file.')
parser.add_argument('-d', '--device',
help='Device of modules and tensors',
default=('cuda' if torch.cuda.is_available() else 'cpu'))
parser.add_argument('--tqdm', dest='tqdm', action='store_true',
help='Whether to use tqdm to display progress')
parser = parser.parse_args()
# set up benchmark
device = torch.device(parser.device)
builtins = torchani.neurochem.Builtins()
nnp = torch.nn.Sequential(
builtins.aev_computer,
builtins.models[0],
builtins.energy_shifter
).to(device)
# load XYZ files
class XYZ:
def __init__(self, filename):
with open(filename, 'r') as f:
lines = f.readlines()
# parse lines
self.mols = []
atom_count = None
species = []
coordinates = []
state = 'ready'
for i in lines:
i = i.strip()
if state == 'ready':
atom_count = int(i)
state = 'comment'
elif state == 'comment':
state = 'atoms'
else:
s, x, y, z = i.split()
x, y, z = float(x), float(y), float(z)
species.append(s)
coordinates.append([x, y, z])
atom_count -= 1
if atom_count == 0:
state = 'ready'
species = builtins.consts.species_to_tensor(species) \
.to(device)
coordinates = torch.tensor(coordinates, device=device)
self.mols.append((species, coordinates))
coordinates = []
species = []
def __len__(self):
return len(self.mols)
def __getitem__(self, i):
return self.mols[i]
xyz = XYZ(parser.filename)
print(len(xyz), 'conformations')
print()
# test batch mode
print('[Batch mode]')
species, coordinates = torch.utils.data.dataloader.default_collate(list(xyz))
coordinates.requires_grad_(True)
start = timeit.default_timer()
energies = nnp((species, coordinates))[1]
mid = timeit.default_timer()
print('Energy time:', mid - start)
force = -torch.autograd.grad(energies.sum(), coordinates)[0]
print('Force time:', timeit.default_timer() - mid)
print()
# test single mode
print('[Single mode]')
start = timeit.default_timer()
if parser.tqdm:
xyz = tqdm.tqdm(xyz)
for species, coordinates in xyz:
species = species.unsqueeze(0)
coordinates = coordinates.unsqueeze(0).detach().requires_grad_(True)
energies = nnp((species, coordinates))[1]
force = -torch.autograd.grad(energies.sum(), coordinates)[0]
print('Time:', timeit.default_timer() - start)
|
#!/usr/bin/env python3
from p3lib.pconfig import ConfigManager
from ogsolar.libs.ads1115 import ADS1115ADC
from ogsolar.libs.gpio_control import GPIOControl
class ADCCalibration(object):
"""@brief Responsible for calibrating the voltage and current measurements."""
#Choose slowest ADS1115 conversion rate for highest accuracy.
ADC_SPS = 8
CAL_FILE = "ogsolar_adc_cal.cfg"
CODES_PER_VOLT_ATTR = "CODES_PER_VOLT"
NO_CURRENT_CODES_ATTR = "NO_CURRENT_CODES"
CODES_PER_AMP_ATTR = "CODES_PER_AMP"
DEFAULT_CAL_CONFIG = {
CODES_PER_VOLT_ATTR: 2209,
CODES_PER_AMP_ATTR: 785,
NO_CURRENT_CODES_ATTR: 0
}
def __init__(self, uio, options):
"""@brief Constructor."""
self._uio = uio
self._options = options
self._ads1115ADC = ADS1115ADC(ADS1115ADC.ADDR_PIN_LOW_SLAVE_ADDR, simulateHardware=self._options.sim_ads1115)
self._gpioControl = GPIOControl(self._uio, self._options)
self._calConfigManager = ConfigManager(self._uio, ADCCalibration.CAL_FILE, ADCCalibration.DEFAULT_CAL_CONFIG)
self._calConfigManager.load()
def calibrate(self):
"""@brief calibrate the voltage and current measurements."""
self._calibrateVoltage()
self._calibrateCurrent()
def _calibrateVoltage(self):
"""@brief Calibrate the voltage measurements."""
self._gpioControl.selectACMains(True)
self._gpioControl.spare(False)
self._gpioControl.setLoad1(GPIOControl.DC_POWER_OFF)
self._gpioControl.setLoad2(GPIOControl.DC_POWER_OFF)
voltage = ConfigManager.GetFloat(self._uio, "Enter the voltage measured")
#Set the ADC that measures the voltage
fsVoltage = 4.096
samplesPerSecond = ADCCalibration.ADC_SPS
self._ads1115ADC.setADC2(fsVoltage, samplesPerSecond)
codes = self._ads1115ADC.getADC2(singleEnded=True)
codesPerVolt = int(round(float(codes)/float(voltage)))
self._calConfigManager.addAttr(ADCCalibration.CODES_PER_VOLT_ATTR, codesPerVolt)
self._calConfigManager.store()
def _calibrateCurrent(self):
"""@brief Calibrate the current measurements."""
self._gpioControl.selectACMains(True)
self._gpioControl.spare(False)
self._gpioControl.setLoad1(GPIOControl.DC_POWER_OFF)
self._gpioControl.setLoad2(GPIOControl.DC_POWER_OFF)
#Set the ADC that measures the current
fsVoltage = 1.024
samplesPerSecond = ADCCalibration.ADC_SPS
self._ads1115ADC.setADC0(fsVoltage, samplesPerSecond)
noCurrentValue = self._ads1115ADC.getSignedValue(0, singleEnded=True, bitCount=16)
self._uio.debug("noCurrentValue = {}".format(noCurrentValue))
self._gpioControl.setLoad1(GPIOControl.DC_POWER_ON)
self._gpioControl.setLoad2(GPIOControl.DC_POWER_ON)
self._uio.info("Switched Load 1 and Load 2 ON.")
self._uio.info("Ensure you have a load current.")
amps = ConfigManager.GetFloat(self._uio, "Enter the amps measured")
if amps < 1:
raise Exception("The current measured must be at least 1 amp when calibrating the ADC for current measurement.")
withCurrentValue = self._ads1115ADC.getSignedValue(0, singleEnded=True, bitCount=16)
#ampCodes = self._ads1115ADC.getADC0(singleEnded=False)
self._uio.debug("No current ADC0 value = {}/0x{:x}".format(noCurrentValue, noCurrentValue))
self._uio.debug("ADC0 value = {}/0x{:x}".format(withCurrentValue, withCurrentValue))
deltaCodes = withCurrentValue-noCurrentValue
self._uio.debug("deltaCodes = {}".format(deltaCodes))
codesPerAmp = int(round(deltaCodes/amps))
self._uio.debug("codesPerAmp = {}".format(codesPerAmp))
self._gpioControl.setLoad1(GPIOControl.DC_POWER_OFF)
self._gpioControl.setLoad2(GPIOControl.DC_POWER_OFF)
self._calConfigManager.addAttr(ADCCalibration.NO_CURRENT_CODES_ATTR, noCurrentValue)
self._calConfigManager.addAttr(ADCCalibration.CODES_PER_AMP_ATTR, codesPerAmp)
self._calConfigManager.store()
|
from django.apps import AppConfig
class ProjectilpatternConfig(AppConfig):
name = 'projectilPattern'
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from flask import Flask, render_template, request
from google.auth.transport import requests
from google.cloud import ndb
import google.oauth2.id_token
firebase_request_adapter = requests.Request()
# [START gae_python39_datastore_store_and_fetch_user_times]
# [START gae_python3_datastore_store_and_fetch_user_times]
# [END gae_python3_datastore_store_and_fetch_user_times]
# [END gae_python39_datastore_store_and_fetch_user_times]
app = Flask(__name__)
class Visit(ndb.Model):
timestamp = ndb.DateTimeProperty()
# [START gae_python39_datastore_store_and_fetch_user_times]
# [START gae_python3_datastore_store_and_fetch_user_times]
def store_time(email, dt):
client = ndb.Client()
with client.context():
ancestor_key = ndb.Key("User", email)
visit = Visit(parent=ancestor_key,
timestamp=dt)
visit.put(visit)
def fetch_times(email, limit):
client = ndb.Client()
with client.context():
ancestor_key = ndb.Key('User', email)
query = Visit.query(ancestor=ancestor_key).order(-Visit.timestamp)
times = [f"{v.timestamp}" for v in query]
print(times)
return times
# [END gae_python3_datastore_store_and_fetch_user_times]
# [END gae_python39_datastore_store_and_fetch_user_times]
# [START gae_python39_datastore_render_user_times]
# [START gae_python3_datastore_render_user_times]
@app.route('/')
def root():
# Verify Firebase auth.
id_token = request.cookies.get("token")
error_message = None
claims = None
times = None
user_data = None
if id_token:
try:
# Verify the token against the Firebase Auth API. This example
# verifies the token on each page load. For improved performance,
# some applications may wish to cache results in an encrypted
# session store (see for instance
# http://flask.pocoo.org/docs/1.0/quickstart/#sessions).
claims = google.oauth2.id_token.verify_firebase_token(
id_token, firebase_request_adapter)
user_data = claims['firebase']['sign_in_attributes']
# print(user_data)
email = user_data['email']
store_time(email, datetime.datetime.now())
times = fetch_times(email, 10)
except ValueError as exc:
# This will be raised if the token is expired or any other
# verification checks fail.
error_message = str(exc)
return render_template(
'index.html',
user_data=user_data, error_message=error_message, times=times)
# [END gae_python3_datastore_render_user_times]
# [END gae_python39_datastore_render_user_times]
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
# Flask's development server will automatically serve static files in
# the "static" directory. See:
# http://flask.pocoo.org/docs/1.0/quickstart/#static-files. Once deployed,
# App Engine itself will serve those files as configured in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
|
Subsets and Splits