content
stringlengths 5
1.05M
|
---|
import socket
import sys
import threading
def scanTarget(target_ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = s.connect_ex((target_ip, port))
socket.setdefaulttimeout(1)
if result == 0:
print(f"port {port} is open.")
s.close()
def main():
if len(sys.argv) == 2:
target = socket.gethostbyname(sys.argv[1])
else:
print(
f"invalid amount of argument, requires 1 argument but {len(sys.argv)-1} was given.")
print("Usage: python3 simple-port-scanner.py <ip>")
print("Example: python3 simple-port-scanner.py 192.168.0.1")
exit()
threads = []
try:
for port in range(1000):
threads.append(threading.Thread(
target=scanTarget, args=(target, port)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
except KeyboardInterrupt:
print("Exiting...")
except socket.gaierror:
print("Host name could not be resolved.")
except socket.error:
print("Cannot connect to target.")
exit()
if __name__ == "__main__":
main()
|
from .build_file_tree import build_file_tree # noqa
|
#
# PySNMP MIB module INTEL-L3LINK-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/INTEL-L3LINK-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:54:31 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
mib2ext, = mibBuilder.importSymbols("INTEL-GEN-MIB", "mib2ext")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
NotificationType, ObjectIdentity, Counter32, Bits, ModuleIdentity, MibIdentifier, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, TimeTicks, Gauge32, Integer32, Counter64, iso = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "ObjectIdentity", "Counter32", "Bits", "ModuleIdentity", "MibIdentifier", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "TimeTicks", "Gauge32", "Integer32", "Counter64", "iso")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
l3Link = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 6, 12))
interface = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 6, 12, 1))
l3lkInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 343, 6, 12, 1, 1), )
if mibBuilder.loadTexts: l3lkInterfaceTable.setStatus('mandatory')
if mibBuilder.loadTexts: l3lkInterfaceTable.setDescription('')
l3lkInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 343, 6, 12, 1, 1, 1), ).setIndexNames((0, "INTEL-L3LINK-MIB", "l3lkInterfaceIfIndex"))
if mibBuilder.loadTexts: l3lkInterfaceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: l3lkInterfaceEntry.setDescription('')
l3lkInterfaceIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 12, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: l3lkInterfaceIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: l3lkInterfaceIfIndex.setDescription('Reference to ifIndex')
l3lkInterfaceVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 12, 1, 1, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l3lkInterfaceVlanId.setStatus('mandatory')
if mibBuilder.loadTexts: l3lkInterfaceVlanId.setDescription('The id of the vlan this table is based on')
l3lkInterfaceStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 12, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l3lkInterfaceStatus.setStatus('mandatory')
if mibBuilder.loadTexts: l3lkInterfaceStatus.setDescription('The status of the layer 3 interface which is the same as ifAdminStatus')
l3lkInterfaceCreateObj = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 12, 1, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(3, 3)).setFixedLength(3)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l3lkInterfaceCreateObj.setStatus('mandatory')
if mibBuilder.loadTexts: l3lkInterfaceCreateObj.setDescription('Create a non existing table entry. If the entry already exist, genError is returned. Binary format: [status(1 byte),vlanId(2 bytes)]')
l3lkInterfaceDeleteObj = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 12, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("delete", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l3lkInterfaceDeleteObj.setStatus('mandatory')
if mibBuilder.loadTexts: l3lkInterfaceDeleteObj.setDescription('Delete an existing table entry')
mibBuilder.exportSymbols("INTEL-L3LINK-MIB", l3lkInterfaceTable=l3lkInterfaceTable, l3Link=l3Link, l3lkInterfaceEntry=l3lkInterfaceEntry, l3lkInterfaceVlanId=l3lkInterfaceVlanId, l3lkInterfaceStatus=l3lkInterfaceStatus, l3lkInterfaceDeleteObj=l3lkInterfaceDeleteObj, l3lkInterfaceIfIndex=l3lkInterfaceIfIndex, l3lkInterfaceCreateObj=l3lkInterfaceCreateObj, interface=interface)
|
"""Tests for DAgger."""
import glob
import os
import gym
import numpy as np
import pytest
from stable_baselines.common.policies import BasePolicy
import tensorflow as tf
from imitation.algorithms import dagger
from imitation.policies import serialize
from imitation.util import rollout, util
ENV_NAME = 'CartPole-v1'
EXPERT_POLICY_PATH = "tests/data/expert_models/cartpole_0/policies/final/"
def test_beta_schedule():
one_step_sched = dagger.linear_beta_schedule(1)
three_step_sched = dagger.linear_beta_schedule(3)
for i in range(10):
assert np.allclose(one_step_sched(i), 1 if i == 0 else 0)
assert np.allclose(three_step_sched(i), (3-i)/3 if i <= 2 else 0)
def test_traj_collector(tmpdir):
env = gym.make(ENV_NAME)
robot_calls = 0
def get_random_act(obs):
nonlocal robot_calls
robot_calls += 1
return env.action_space.sample()
collector = dagger.InteractiveTrajectoryCollector(
env=env,
get_robot_act=get_random_act,
beta=0.5,
save_dir=tmpdir)
collector.reset()
zero_action = np.zeros((), dtype='int')
obs, rew, done, info = collector.step(zero_action)
assert rew != 0
assert not done
assert isinstance(info, dict)
# roll out ~5 episodes
for i in range(999):
_, _, done, _ = collector.step(zero_action)
if done:
collector.reset()
# there is a <10^(-12) probability this fails by chance; we should be calling
# robot with 50% prob each time
assert 388 <= robot_calls <= 612
file_paths = glob.glob(os.path.join(tmpdir, 'dagger-demo-*.npz'))
assert len(file_paths) >= 5
trajs = map(dagger._load_trajectory, file_paths)
nonzero_acts = sum(np.sum(traj.acts != 0) for traj in trajs)
assert nonzero_acts == 0
def make_trainer(tmpdir):
env = gym.make(ENV_NAME)
env.seed(42)
return dagger.DAggerTrainer(env, tmpdir, dagger.linear_beta_schedule(1),
optimiser_kwargs=dict(lr=1e-3))
def test_trainer_makes_progress(tmpdir, session):
venv = util.make_vec_env(ENV_NAME, 10)
trainer = make_trainer(tmpdir)
with pytest.raises(dagger.NeedsDemosException):
trainer.extend_and_update()
assert trainer.round_num == 0
pre_train_rew_mean = rollout.mean_return(
trainer.bc_trainer.policy, venv, sample_until=rollout.min_episodes(20),
deterministic_policy=True)
# checking that the initial policy is poor can be flaky; sometimes the
# randomly initialised policy performs very well, and it's not clear why
# assert pre_train_rew_mean < 100
with serialize.load_policy('ppo2', EXPERT_POLICY_PATH, venv) as expert_policy:
for i in range(5):
# roll out a few trajectories for dataset, then train for a few steps
collector = trainer.get_trajectory_collector()
for _ in range(10):
obs = collector.reset()
done = False
while not done:
(expert_action, ), _, _, _ = expert_policy.step(
obs[None], deterministic=True)
obs, _, done, _ = collector.step(expert_action)
trainer.extend_and_update(n_epochs=10)
# make sure we're doing better than a random policy would
post_train_rew_mean = rollout.mean_return(
trainer.bc_trainer.policy, venv, sample_until=rollout.min_episodes(20),
deterministic_policy=True)
assert post_train_rew_mean > 150, \
f'pre-train mean {pre_train_rew_mean}, post-train mean ' \
f'{post_train_rew_mean}'
def test_trainer_save_reload(tmpdir, session):
with tf.variable_scope('orig_trainer'):
trainer = make_trainer(tmpdir)
trainer.round_num = 3
trainer.save_trainer()
with tf.variable_scope('reload_trainer'):
new_trainer = trainer.reconstruct_trainer(tmpdir)
assert new_trainer.round_num == trainer.round_num
# old trainer and reloaded trainer should have same variable values
old_vars = trainer._sess.run(trainer._vars)
new_vars = trainer._sess.run(new_trainer._vars)
assert len(old_vars) == len(new_vars)
for v1, v2 in zip(old_vars, new_vars):
assert np.allclose(v1, v2)
# also those values should be different from a newly created trainer
with tf.variable_scope('third_trainer'):
third_trainer = make_trainer(tmpdir)
third_vars = trainer._sess.run(third_trainer._vars)
all_same = True
for v1, v3 in zip(old_vars, third_vars):
all_same = all_same and np.allclose(v1, v3)
assert not all_same
def test_policy_save_reload(tmpdir, session):
# just make sure the methods run; we already test them in test_bc.py
policy_path = os.path.join(tmpdir, 'policy.pkl')
trainer = make_trainer(tmpdir)
trainer.save_policy(policy_path)
pol = trainer.reconstruct_policy(policy_path)
assert isinstance(pol, BasePolicy)
|
from flask import Flask,jsonify,request,make_response
import json
import requests
import os
import sys
from pixivpy3 import *
username = "[email protected]"
password = "kgs196983"
aapi = AppPixivAPI()
aapi.login(username, password)
papi = PixivAPI()
papi.login(username, password)
class mod:
@staticmethod
def add_two_dim_dict(dictionary,root,**value):
"""ๆฐๅขไฟฎๆนไบ็ถญๅญๅ
ธ\n
dictionary : ่ฆๆฐๅขไฟฎๆน็ๅญๅ
ธ\n
root : ๅญๅ
ธ็ๅญๆ น(ไธ็ถญๅญๅ
ธ็้ต)\n
value : ๅณๅ
ฅๅผ key=value ๅฏ่คๆธ๏ผkey็บ้ต value็บ้ตๅผ
"""
dictionary[root] = {}
for key in value.keys():
dictionary[root][key] = value[key]
return dictionary
@staticmethod
def datetransfer(date):
return date[:4]+'-'+date[4:6]+'-'+date[6:]
class http:
@staticmethod
def status(message,status_code):
return make_response(jsonify(message), status_code)
class illustDetailNotFound(Exception):
def __init__(self,message):
self.message = message
def __str__(self):
return "illustDetailNotFound ," + self.message
class illustListNotFound(Exception):
def __init__(self,message):
self.message = message
def __str__(self):
return "illustListNotFound ," + self.message
class PIXIV():
"""
illustDetail(self,id)\n
\tๆฅ่ฉข id ็ไฝๅ่ฉณ็ดฐ่ณๆ๏ผid ็บๅฟ
้ ๅผ\n
illustList(userid,type='illust')\n
\tๆฅ่ฉข userid ็ไฝๅๅ่กจ่ฉณ็ดฐ่ณๆ๏ผuserid ็บๅฟ
้ ๅผ\n
illustSearch(word,offset=None)\n
\tๆๅฐ PIXIV ็ไฝๅไธญ้จๅๅ
ๅซ word ็ไฝๅๅ่กจ๏ผword ็บๅฟ
้ ๅผ\n\toffset ็บๅฏ้ธ
"""
def illustDetail(self,illustid):
"""
ๅๅพ id ็ไฝๅ่ฉณ็ดฐ่ณๆ\n
illustDetail(illustid)\n
ๅฆ่ฉฒ id ็ไฝๅๅทฒๅช้คๆไธๅญๅจ่ฉฒไฝๅ id ็ดๆฅๅๅณ็ฉบjson๏ผhttp status code = 200
"""
try:
illustdetail = aapi.illust_detail(illustid)
if 'error' in illustdetail:
raise illustDetailNotFound(f'illust {illustid} not found')
return http.status(illustdetail.illust, 200)
except illustDetailNotFound as e:
return http.status({ },200)
except Exception as e:
return Interal_Server_Error(str(e))
def illustList(self,userid,type='illust'):
"""
ๅๅพ userid ็ไฝๅๅ่กจ่ฉณ็ดฐ่ณๆ\n
illustList(userid)\n
ๅฆ่ฉฒ userid ็กไฝๅๆไธๅญๅจ่ฉฒไฝฟ็จ่
็ดๆฅๅๅณ็ฉบjson๏ผhttp status code = 200
"""
try:
illustlist = aapi.user_illusts(userid,type='illust')
if not any(illustlist['illusts']):
raise illustListNotFound(f'user {userid} not found illust ilst')
return http.status(illustlist, 200)
except illustListNotFound as e:
return http.status({ },200)
except Exception as e:
return Interal_Server_Error(str(e))
def illustSearch(self,word):
"""
ๆๅฐ word ็ไฝๅๅ่กจ่ฉณ็ดฐ่ณๆ\n
illustSearch(word,offset=None)\n
word ็บๅฟ
้ ๆๅฐ้้ตๅญ๏ผoffset ็บๅฏ้ธ้ ่จญ0(None):้กฏ็คบ0~29็ญ 30:30~59็ญไพๆญค้กๆจ\n
"""
try:
illust = aapi.search_illust(word, search_target='partial_match_for_tags')
return http.status(illust, 200)
except Exception as e:
return Interal_Server_Error(str(e))
def hottag(self):
try:
tag = aapi.trending_tags_illust()
return http.status(tag, 200)
except Exception as e:
return Interal_Server_Error(str(e))
app = Flask(__name__)
class api:
version = 'v1'
#GET / response
@app.route(f"/{api.version}")
def home():
return jsonify({'response':{'status':200,'message':'ๆฌข่ฟๆฅๅฐPixiv API','version':'1.0'}})
#GET /illust/detail data:{illust id:id,}
@app.route(f'/{api.version}/illust/detail/<int:id>')
def illust_detail(id):
pixiv = PIXIV()
return pixiv.illustDetail(id)
#GET /illust/list data:{user id:id,}
@app.route(f'/{api.version}/illust/list/<int:id>')
def illust_list(id):
pixiv = PIXIV()
return pixiv.illustList(id)
#GET /illust/search data:{keyword:word,}
@app.route(f'/{api.version}/illust/search')
def illust_search():
word = request.args.get('keyword')
pixiv = PIXIV()
return pixiv.illustSearch(word)
@app.route(f'/{api.version}/hottag')
def hot_tag():
pixiv = PIXIV()
return pixiv.hottag()
@app.errorhandler(404)
def Page_Not_Found(e):
return http.status({'status_codde':404,'message':str(e)} , 404)
@app.errorhandler(500)
def Interal_Server_Error(e):
if e is None:
return http.status({'status_codde':500,'message':'500 Interal Server Error'} , 500)
return http.status({'status_codde':500,'message':str(e)} , 500)
if __name__ == "__main__":
app.run(host='0.0.0.0',port='5000')
|
lista_capitais = []
lista_capitais.append('Brasรญlia')
lista_capitais.append('Buenos Aires')
lista_capitais.append('Pequim')
lista_capitais.append('Bogotรก')
print(lista_capitais)
lista_capitais.remove('Buenos Aires')
lista_capitais.pop(2)
|
"""
*Is-Close*
"""
import jax.numpy as jnp
from ._operator import GeometricProperty
__all__ = ["Count"]
class Count(
jnp.size,
GeometricProperty,
):
pass
|
from enum import IntEnum
from typing import Dict, List, Optional
from app.db.pydantic_objectid import PydanticObjectId
from bson import ObjectId
from pydantic import BaseModel, Field
class SlideStatus(IntEnum):
ERROR = 0
SUCCESS = 1
RUNNING = 2
class Slide(BaseModel):
name: str
slide_id: str
status: SlideStatus
metadata: Optional[Dict]
children: Optional[List[str]]
class DatabaseSlide(Slide):
id: Optional[PydanticObjectId] = Field(alias="_id")
class Config:
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
class DatabaseSlideNoMetadata(BaseModel):
id: Optional[PydanticObjectId] = Field(alias="_id")
name: str
slide_id: str
status: SlideStatus
class Config:
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
class SlideNoMetadata(BaseModel):
name: str
slide_id: str
status: SlideStatus
class CreateSlide(BaseModel):
name: str
slide_id: str
status: SlideStatus
metadata: Optional[Dict]
class UpdateSlide(BaseModel):
slide_id: Optional[str]
name: Optional[str]
status: Optional[SlideStatus]
metadata: Optional[Dict]
children: Optional[List[str]]
|
__author__ = 'dylanjf'
import os
import pickle
import logging
import scipy as sp
import numpy as np
from re import sub
from sklearn.grid_search import GridSearchCV
from sklearn import cross_validation
logger = logging.getLogger(__name__)
N_TREES = 300
INITIAL_PARAMS = {
'LogisticRegression': {'C': 1, 'penalty': 'l2', 'class_weight': None},
'RandomForestClassifier': {
'n_estimators': N_TREES, 'n_jobs': 4,
'min_samples_leaf': 2, 'bootstrap': False,
'max_depth': 30, 'min_samples_split': 5, 'max_features': .1
},
'GradientBoostingClassifier': {
'n_estimators': N_TREES, 'learning_rate': .08, 'max_features': 4,
'min_samples_leaf': 2, 'min_samples_split': 5, 'max_depth': 7
}
}
PARAM_GRID = {
'LogisticRegression': {'C': [1, 2, 3, 5, 10, 100],
'penalty': ['l1', 'l2']},
'ElasticNet': {'alpha': [.1, 1.0],
'l1_ratio': [0, .05, .5, .95, 1.0]},
'GradientBoostingClassifier': {
'learning_rate': [.01, .03, .05, .08], 'max_depth': [3, 4, 7], 'max_features': [4, 8, 12]
}
}
class EnsembleGeneralization(object):
"""
Implement stacking to combine several models.
The base (stage 0) models can be either combined through
simple averaging (fastest), or combined using a stage 1 generalizer
(requires computing CV predictions on the train set).
See http://ijcai.org/Past%20Proceedings/IJCAI-97-VOL2/PDF/011.pdf:
"Stacked generalization: when does it work?", Ting and Witten, 1997
Expects models to be a list of (model, dataset) tuples.
If model selection is true, use grid search on globally defined hyperparams
to get best feature combination
"""
def __init__(self, models, score_func, stack, isContinuous=True,
for_model_select=False, pickle_root_dir=None):
self.models = models
self.for_model_select = for_model_select
self.stack = stack
self.score_func = score_func
self.generalizer = MLR()
self.isContinuous = isContinuous
self.pickle_root_dir = pickle_root_dir
def _combine_predictions(self, X_train, X_cv, y):
"""
helper function for CV loop.
assuming CV predictions are aligned for each model,
return both the mean of the 3 models (for if stack = False)
and the result of the Non Negative Least Squares.
"""
if self.isContinuous:
mean_preds = np.mean(X_cv, axis=1)
stack_preds = None
else:
mean_preds = np.mean(X_cv, axis=1)
mean_preds = [0 if x < .5 else 1 for x in mean_preds]
stack_preds = None
if self.stack:
self.generalizer.fit(X_train, y)
stack_preds = self.generalizer.predict(X_cv)
if not self.isContinuous:
stack_preds = np.add(np.sum(np.multiply(
self.generalizer.coef_, X_cv), axis=1), self.generalizer.intercept_)
stack_cutoff = np.add(np.sum(self.generalizer.coef_), self.generalizer.intercept_) / 2.0
stack_preds = [0 if x < stack_cutoff else 1 for x in np.asarray(stack_preds.reshape(-1))]
return mean_preds, stack_preds
def _get_model_cv_preds(self, model, X_train, y_train):
"""
helper function for CV loop
return CV model predictions
"""
kfold = cross_validation.KFold(len(y_train), n_folds=5, random_state=42)
stack_preds = []
indexes_cv = []
for stage0, stack in kfold:
model.fit(np.asarray(X_train[stage0]), np.asarray(y_train[stage0]).reshape(-1))
try:
stack_preds.extend(list(model.predict_proba(
X_train[stack])[:, 1]))
except AttributeError:
stack_preds.extend(list(model.predict(X_train[stack])))
indexes_cv.extend(list(stack))
stack_preds = np.array(stack_preds)[sp.argsort(indexes_cv)]
return stack_preds
def _get_model_preds(self, model, feature_set, X_train, X_predict, y_train):
"""
helper function for generalization
return un-weighted model predictions for a given model,
pickle model fit for speedup in prediction
"""
model.fit(np.asarray(X_train[:, :]), np.asarray(y_train))
if self.pickle_root_dir is not None:
with open(
os.path.join(
self.pickle_root_dir, 'models-fits-' + stringify(model, feature_set) + '.pkl'), 'wb') as f:
pickle.dump(model, f)
if self.isContinuous:
try:
model_preds = model.predict_proba(X_predict)[:, 1]
except AttributeError:
model_preds = model.predict(X_predict)
return model_preds
def fit_predict(self, y, data, features, train=None, predict=None,
show_steps=True):
"""
Fit each model on the appropriate dataset, then return the average
of their individual predictions. If train is specified, use a subset
of the training set to train the models, then predict the outcome of
either the remaining samples or (if given) those specified in cv.
If train is omitted, train the models on the full training set, then
predict the outcome of the full test set.
Options:
------------------------------
- y: numpy array. The full vector of the ground truths.
- train: list. The indices of the elements to be used for training.
If None, take the entire training set.
- predict: list. The indices of the elements to be predicted.
- show_steps: boolean. Whether to compute metrics after each stage
of the computation.
"""
y_train = y[train] if train is not None else y
if train is not None and predict is None:
predict = [i for i in range(len(y)) if i not in train]
stage0_train = []
stage0_predict = []
for model, feature_set in self.models:
X_train, X_predict = get_dataset(data, features, feature_set, train=train, cv=predict)
model_preds = self._get_model_preds(
model, feature_set, X_train, X_predict, y_train)
stage0_predict.append(model_preds)
# if stacking, compute cross-validated predictions on the train set
if self.stack:
model_cv_preds = self._get_model_cv_preds(
model, X_train, y_train)
stage0_train.append(model_cv_preds)
# verbose mode: compute metrics after every model computation
if show_steps:
if train is not None:
mean_preds, stack_preds = self._combine_predictions(
np.array(stage0_train).T, np.array(stage0_predict).T, y_train)
model_score = self.score_func(y[predict], stage0_predict[-1])
mean_score = self.score_func(y[predict], mean_preds)
stack_score = self.score_func(y[predict], stack_preds) \
if self.stack else 0
print "Model:", stringify(model, feature_set), \
"Model Score:", model_score,\
"Mean Score:", mean_score,\
"Stack Score:", stack_score
else:
print("> used model %s:\n%s" % (stringify(
model, feature_set), model.get_params()))
mean_preds, stack_preds = self._combine_predictions(
np.array(stage0_train).T, np.array(stage0_predict).T,
y_train)
if self.for_model_select and self.stack:
selected_preds = np.array(stage0_predict).T
else:
if self.stack:
selected_preds = stack_preds
else:
selected_preds = mean_preds
return selected_preds
class MLR(object):
def __init__(self):
self.coef_ = 0
def fit(self, X, y):
self.coef_ = sp.optimize.nnls(X, y)[0]
self.coef_ = np.array(map(lambda x: x / sum(self.coef_), self.coef_))
def predict(self, X):
predictions = np.array(map(sum, self.coef_ * X))
return predictions
def stringify(model, feature_set):
"""Given a model and a feature set, return a short string that will serve
as identifier for this combination.
Ex: (LogisticRegression(), "basic_s") -> "LR:basic_s"
"""
return "%s-%s" % (sub("[a-z]", '', model.__class__.__name__), feature_set)
def get_dataset(data, features, feature_set='basic', train=None, cv=None):
"""
Return the design matrices constructed with the specified feature set.
If train is specified, split the training set according to train and
the subsample's complement.
"""
try:
X_test = data[np.ix_(cv, features[feature_set])]
X = data[np.ix_(train, features[feature_set])]
except ValueError:
X_test = data[:, features[feature_set]]
X = data[:, features[feature_set]]
return X, X_test
def find_params(model, feature_set, features, y, data, subsample=None,
grid_search=True):
"""
Return parameter set for the model, either predefined
or found through grid search.
"""
model_name = model.__class__.__name__
params = INITIAL_PARAMS.get(model_name, {})
y = y if subsample is None else y[subsample]
if grid_search and model_name in PARAM_GRID:
print "Fitting params for :", model_name
X, _ = get_dataset(data, features, feature_set, subsample)
clf = GridSearchCV(model, PARAM_GRID[model_name], cv=5, n_jobs=4,
scoring="roc_auc")
clf.fit(X, y)
logger.info("found params (%s > %.4f): %s",
stringify(model, feature_set),
clf.best_score_, clf.best_params_)
print "found params (%s > %.4f): %s" % (stringify(model, feature_set),
clf.best_score_, clf.best_params_)
params.update(clf.best_params_)
return params
|
lst=[]
for i in range(3):
lst.append(int(input()))
lst.sort(reverse=True)
print(lst[1])
|
import os
import json
from .base import BaseIsolatedDataset
from ..data_readers import load_frames_from_video
class WLASLDataset(BaseIsolatedDataset):
"""
American Isolated Sign language dataset from the paper:
`Word-level Deep Sign Language Recognition from Video: A New Large-scale Dataset and Methods Comparison <https://arxiv.org/abs/1910.11006>`_
"""
lang_code = "ase"
def read_glosses(self):
with open(self.split_file, "r") as f:
self.content = json.load(f)
self.glosses = sorted([gloss_entry["gloss"] for gloss_entry in self.content])
def read_original_dataset(self):
for gloss_entry in self.content:
gloss, instances = gloss_entry["gloss"], gloss_entry["instances"]
gloss_cat = self.label_encoder.transform([gloss])[0]
for instance in instances:
if instance["split"] not in self.splits:
continue
video_id = instance["video_id"]
instance_entry = video_id, gloss_cat
self.data.append(instance_entry)
def read_video_data(self, index):
video_name, label, start_frame, end_frame = self.data[index]
video_path = os.path.join(self.root_dir, video_name + ".mp4")
imgs = load_frames_from_video(video_path, start_frame, end_frame)
return imgs, label, video_name
|
# Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
def migration_test(name, versions, **kwargs):
native.sh_test(
name = name,
srcs = ["//sandbox-migration:test.sh"],
deps = ["@bazel_tools//tools/bash/runfiles"],
data = [
"//sandbox-migration:sandbox-migration-runner",
"//sandbox-migration:migration-script.dar",
"//sandbox-migration:migration-model.dar",
] + ["@daml-sdk-{}//:daml".format(ver) for ver in versions],
args = versions,
**kwargs
)
|
import urllib.request
import json
from tsp import *
import math
import os
import numpy as np
inf = 0
file = open(os.path.abspath('key.txt'),'r')
key = file.read().strip()
vector = []
firstTime = False
addrRead = open(os.path.abspath('addr.txt'),'r')
vectorList = addrRead.read().splitlines()
for j in range(len(vectorList)):
line = vectorList[j]
wordSplit = line.split(" ")
word = ""
for i in range(len(wordSplit)):
if len(wordSplit)-1 > i:
word = word + wordSplit[i] + "+"
else:
word = word + wordSplit[i]
vector.append(word)
listLength = len(vector)
graph = [[0 for i in range(listLength)] for j in range(listLength)]
for i in range(len(vector)):
for j in range(len(vector)):
if i == j:
# graph[i].insert(j,inf)
graph[i][j] = inf
else:
origin = vector[i]
destination = vector[j]
url = ('https://maps.googleapis.com/maps/api/distancematrix/json'
+'?language=en-US&units=imperial'
+'&origins={}'
+'&destinations={}'
+ '&key={}').format(origin,destination,key)
response = urllib.request.urlopen(url)
response = json.loads(response.read())
graph[i][j] = response['rows'][0]['elements'][0]['duration']['value']
distanceMatrix = np.array(graph, dtype=float)
distance = TSP_dynamic(listLength,distanceMatrix)
oFile = open('optimal.txt', 'w')
for i in range(len(distance)):
if i == 0:
oFile.write(str(distance[i]))
else:
oFile.write(" " + str(distance[i]))
# file = open('arr.txt','w')
# for i in range(len(vector)):
# for j in range(len(vector)):
# file.write(str(graph[i][j]) + " ")
# file.write("\n")
|
import unittest
try:
from rest_framework.test import APITestCase
except ImportError:
raise unittest.SkipTest("Skipping DRF tests as DRF is not installed.")
from rest_framework import status
from tests.test_app.models import (
NaturalKeyChild, ModelWithNaturalKey, ModelWithSingleUniqueField,
)
from natural_keys import NaturalKeySerializer
# Tests for natural key DRF integration
class NaturalKeyRestTestCase(APITestCase):
def test_naturalkey_rest_serializer(self):
# Serializer should include validator
serializer = NaturalKeySerializer.for_model(NaturalKeyChild)()
expect = """
Serializer():
parent = Serializer():
code = CharField(max_length=10)
group = CharField(max_length=10)
mode = CharField(max_length=10)
class Meta:
validators = [<NaturalKeyValidator(queryset=NaturalKeyChild.objects.all(), fields=('parent', 'mode'))>]""".replace(" ", "")[1:] # noqa
self.assertEqual(expect, str(serializer))
fields = serializer.get_fields()
self.assertTrue(fields['parent'].required)
self.assertTrue(fields['mode'].required)
self.assertTrue(fields['parent'].get_fields()['code'].required)
def test_naturalkey_rest_singleunique(self):
# Serializer should only have single top-level validator
serializer = NaturalKeySerializer.for_model(
ModelWithSingleUniqueField
)()
expect = """
Serializer():
code = CharField(max_length=10, validators=[])
class Meta:
validators = [<NaturalKeyValidator(queryset=ModelWithSingleUniqueField.objects.all(), fields=('code',))>]""".replace(" ", "")[1:] # noqa
self.assertEqual(expect, str(serializer))
fields = serializer.get_fields()
self.assertTrue(fields['code'].required)
def test_naturalkey_rest_post(self):
# Posting a compound natural key should work
form = {
'mode': 'mode3a',
'parent[code]': "code3",
'parent[group]': "group3",
}
response = self.client.post('/naturalkeychilds.json', form)
self.assertEqual(
response.status_code, status.HTTP_201_CREATED, response.data
)
self.assertEqual(response.data['mode'], "mode3a")
self.assertEqual(response.data['parent']['code'], "code3")
self.assertEqual(response.data['parent']['group'], "group3")
# Posting a simple natural key should work
form = {
'code': 'code9',
}
response = self.client.post('/modelwithsingleuniquefield.json', form)
self.assertEqual(
response.status_code, status.HTTP_201_CREATED, response.data
)
self.assertEqual(response.data['code'], "code9")
# Posting same nested natural key should reuse nested object
form = {
'mode': 'mode3b',
'parent[code]': "code3",
'parent[group]': "group3",
}
response = self.client.post('/naturalkeychilds.json', form)
self.assertEqual(
response.status_code, status.HTTP_201_CREATED, response.data
)
self.assertEqual(
NaturalKeyChild.objects.get(mode='mode3a').parent.pk,
NaturalKeyChild.objects.get(mode='mode3b').parent.pk,
)
def test_naturalkey_rest_duplicate(self):
# Posting identical compound natural key should fail
form = {
'mode': 'mode3c',
'parent[code]': "code3",
'parent[group]': "group3",
}
response = self.client.post('/naturalkeychilds.json', form)
self.assertEqual(
response.status_code, status.HTTP_201_CREATED, response.data
)
form = {
'mode': 'mode3c',
'parent[code]': "code3",
'parent[group]': "group3",
}
response = self.client.post('/naturalkeychilds.json', form)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data
)
self.assertEqual(
response.data, {
'non_field_errors': [
'The fields parent, mode must make a unique set.'
]
}
)
# Posting identical simple natural key should fail
form = {
'code': 'code8',
}
response = self.client.post('/modelwithsingleuniquefield.json', form)
self.assertEqual(
response.status_code, status.HTTP_201_CREATED, response.data
)
form = {
'code': 'code8',
}
response = self.client.post('/modelwithsingleuniquefield.json', form)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data
)
self.assertEqual(
response.data, {
'code': [
'model with single unique field '
'with this code already exists.'
]
}
)
def test_naturalkey_rest_nested_post(self):
# Posting a regular model with a ref to natural key
form = {
'key[mode]': 'mode4',
'key[parent][code]': "code4",
'key[parent][group]': "group4",
'value': 5,
}
response = self.client.post('/modelwithnaturalkeys.json', form)
self.assertEqual(
response.status_code, status.HTTP_201_CREATED, response.data
)
self.assertEqual(response.data['key']['mode'], "mode4")
self.assertEqual(response.data['key']['parent']['code'], "code4")
self.assertEqual(response.data['key']['parent']['group'], "group4")
def test_naturalkey_rest_nested_put(self):
# Updating a regular model with a ref to natural key
instance = ModelWithNaturalKey.objects.create(
key=NaturalKeyChild.objects.find(
'code5', 'group5', 'mode5'
),
value=7,
)
self.assertEqual(instance.key.parent.code, 'code5')
# Updating with same natural key should reuse it
form = {
'key[mode]': 'mode5',
'key[parent][code]': "code5",
'key[parent][group]': "group5",
'value': 8,
}
self.assertEqual(
NaturalKeyChild.objects.count(),
1
)
# Updating with new natural key should create it
response = self.client.put(
'/modelwithnaturalkeys/%s.json' % instance.pk, form
)
form = {
'key[mode]': 'mode6',
'key[parent][code]': "code6",
'key[parent][group]': "group6",
'value': 9,
}
response = self.client.put(
'/modelwithnaturalkeys/%s.json' % instance.pk, form
)
self.assertEqual(
response.status_code, status.HTTP_200_OK, response.data
)
self.assertEqual(response.data['key']['mode'], "mode6")
self.assertEqual(response.data['key']['parent']['code'], "code6")
self.assertEqual(response.data['key']['parent']['group'], "group6")
self.assertEqual(
NaturalKeyChild.objects.count(),
2
)
def test_naturalkey_lookup(self):
# Support natural_key_slug as lookup_field setting
NaturalKeyChild.objects.find(
'code7', 'group7', 'mode7'
)
response = self.client.get(
'/naturalkeylookup/code7-group7-mode7.json',
)
self.assertEqual(
response.status_code, status.HTTP_200_OK, response.data
)
self.assertEqual(
response.data['id'], 'code7-group7-mode7'
)
def test_naturalkey_lookup_slug(self):
# Support separator in slug (but only for last part of key)
NaturalKeyChild.objects.find(
'code7', 'group7', 'mode7-alt'
)
response = self.client.get(
'/naturalkeylookup/code7-group7-mode7-alt.json',
)
self.assertEqual(
response.status_code, status.HTTP_200_OK, response.data
)
self.assertEqual(
response.data['id'], 'code7-group7-mode7-alt'
)
def test_invalid_slug_404(self):
response = self.client.get(
'/naturalkeylookup/not-valid.json',
)
self.assertEqual(
status.HTTP_404_NOT_FOUND, response.status_code,
)
|
import json
from google.appengine.ext import db
import jsonschema
class JSONProperty(db.Property):
"""Property for storing simple JSON objects backed by a schema."""
data_type = db.Blob
def __init__(self, schema=None, *args, **kwargs):
"""Constructor.
Args:
schema: a JSON Schema per draft 3 or 4 of json-schema.org
"""
self.schema = schema
super(JSONProperty, self).__init__(*args, **kwargs)
def validate(self, value):
"""Validate that the value is valid JSON that conforms to the self.schema.
Args:
value: JSON-serializable object
Returns:
value, unchanged
"""
if self.schema:
try:
jsonschema.validate(value, self.schema)
except jsonschema.ValidationError:
raise db.BadValueError
return value
def make_value_from_datastore(self, value):
"""Convert the datastore blob to a Python object."""
return json.loads(value)
def get_value_for_datastore(self, model_instance):
"""Convert the Python object value into a string for the datastore."""
value = self.__get__(model_instance, model_instance.__class__)
return json.dumps(value)
|
import json
from argparse import ArgumentParser
import tweepy
parser = ArgumentParser()
parser.add_argument('friends_repo')
parser.add_argument('-a', '--auth-filepath', help='Path to auth file',
required=True)
args = parser.parse_args()
FRIENDS_IDS = args.friends_repo
with open(args.auth_filepath) as f:
AUTH = json.load(f)
auth = tweepy.OAuthHandler(AUTH['consumer_token'], AUTH['consumer_secret'])
auth.set_access_token(AUTH['access_token'], AUTH['access_token_secret'])
api = tweepy.API(auth)
with open(FRIENDS_IDS) as f:
friends = json.load(f)
iretrieve = 0
retrieved_user = False
for friend_id, friend in friends.items():
if friend is None:
try:
friends[friend_id] = api.get_user(friend_id)._json
retrieved_user = True
iretrieve += 1
except:
print('API retrieve failed, probably too many requests')
break
friends = {int(friend_id) : friends[friend_id] for friend_id in friends}
with open(FRIENDS_IDS, 'w') as f:
json.dump(friends, f, indent=2, sort_keys=True)
if not retrieved_user:
print('No retrieve user API call done')
print('Retrieved {0} users'.format(iretrieve))
|
import copy
import numpy as np
from . import base, mean, summing
class Cov(base.Bivariate):
"""Covariance.
Parameters
----------
ddof
Delta Degrees of Freedom.
Examples
--------
>>> from river import stats
>>> x = [-2.1, -1, 4.3]
>>> y = [ 3, 1.1, 0.12]
>>> cov = stats.Cov()
>>> for xi, yi in zip(x, y):
... print(cov.update(xi, yi).get())
0.0
-1.044999
-4.286
Notes
-----
The outcomes of the incremental and parallel updates are consistent with numpy's
batch processing when $\\text{ddof} \\le 1$.
References
----------
[^1]: [Wikipedia article on algorithms for calculating variance](https://www.wikiwand.com/en/Algorithms_for_calculating_variance#/Covariance)
[^2]: Schubert, E. and Gertz, M., 2018, July. Numerically stable parallel computation of
(co-) variance. In Proceedings of the 30th International Conference on Scientific and
Statistical Database Management (pp. 1-12).
"""
def __init__(self, ddof=1):
self.ddof = ddof
self.mean_x = mean.Mean()
self.mean_y = mean.Mean()
self.n = 0
self._C = 0
self.cov = 0
def update(self, x, y, w=1.0):
dx = x - self.mean_x.get()
self.mean_x.update(x, w)
self.mean_y.update(y, w)
self._C += w * dx * (y - self.mean_y.get())
self.cov = self._C / max(1, self.mean_x.n - self.ddof)
return self
def update_many(self, X: np.ndarray, Y: np.ndarray):
dx = X - self.mean_x.get()
self.mean_x.update_many(X)
self.mean_y.update_many(Y)
self._C += (dx * (Y - self.mean_y.get())).sum()
self.cov = self._C / max(1, self.mean_x.n - self.ddof)
return self
def get(self):
return self.cov
def __iadd__(self, other):
old_mean_x = self.mean_x.get()
old_mean_y = self.mean_y.get()
old_n = self.mean_x.n
# Update mean estimates
self.mean_x += other.mean_x
self.mean_y += other.mean_y
if self.mean_x.n <= self.ddof:
return self
# Scale factors
scale_a = old_n - self.ddof
scale_b = other.mean_x.n - other.ddof
# Scale the covariances
self.cov = scale_a * self.cov + scale_b * other.cov
# Apply correction factor
self.cov += (
(old_mean_x - other.mean_x.get())
* (old_mean_y - other.mean_y.get())
* ((old_n * other.mean_x.n) / self.mean_x.n)
)
# Reapply scale
self.cov /= self.mean_x.n - self.ddof
return self
def __add__(self, other):
result = copy.deepcopy(self)
result += other
return result
def __isub__(self, other):
if self.mean_x.n <= self.ddof:
return self
old_n = self.mean_x.n
# Update mean estimates
self.mean_x -= other.mean_x
self.mean_y -= other.mean_y
if self.mean_x.n <= self.ddof:
self.cov = 0
return self
# Scale factors
scale_x = old_n - self.ddof
scale_b = other.mean_x.n - other.ddof
# Scale the covariances
self.cov = scale_x * self.cov - scale_b * other.cov
# Apply correction
self.cov -= (
(self.mean_x.get() - other.mean_x.get())
* (self.mean_y.get() - other.mean_y.get())
* ((self.mean_x.n * other.mean_x.n) / old_n)
)
# Re-apply scale factor
self.cov /= self.mean_x.n - self.ddof
return self
def __sub__(self, other):
result = copy.deepcopy(self)
result -= other
return result
class RollingCov(base.Bivariate):
"""Rolling covariance.
Parameters
----------
window_size
Size of the window over which to compute the covariance.
ddof
Delta Degrees of Freedom.
Here is the derivation, where $C$ denotes the covariance and $d$ is the amount of degrees of
freedom:
$$C = \\frac{1}{n - d} \\sum_{i=1}^n (x_i - \\bar{x}) (y_i - \\bar{y})$$
$$C = \\frac{1}{n - d} \\sum_{i=1}^n x_i y_i - x_i \\bar{y} - \\bar{x} y_i + \\bar{x} \\bar{y}$$
$$C = \\frac{1}{n - d} (\\sum_{i=1}^n x_i y_i - \\bar{y} \\sum_{i=1}^n x_i - \\bar{x} \\sum_{i=1}^n y_i + \\sum_{i=1}^n \\bar{x}\\bar{y})$$
$$C = \\frac{1}{n - d} (\\sum_{i=1}^n x_i y_i - \\bar{y} n \\bar{x} - \\bar{x} n \\bar{y} + n \\bar{x}\\bar{y})$$
$$C = \\frac{1}{n - d} (\\sum_{i=1}^n x_i y_i - n \\bar{x} \\bar{y})$$
$$C = \\frac{1}{n - d} (\\sum_{i=1}^n x_i y_i - \\frac{\\sum_{i=1}^n x_i \\sum_{i=1}^n y_i}{n})$$
The derivation is straightforward and somewhat trivial, but is a nice example of reformulating
an equation so that it can be updated online. Note that we cannot apply this derivation to the
non-rolling version of covariance because that would result in sums that grow infinitely, which
can potentially cause numeric overflow.
Examples
--------
>>> from river import stats
>>> x = [-2.1, -1, 4.3, 1, -2.1, -1, 4.3]
>>> y = [ 3, 1.1, .12, 1, 3, 1.1, .12]
>>> rcov = stats.RollingCov(3)
>>> for xi, yi in zip(x, y):
... print(rcov.update(xi, yi).get())
0.0
-1.045
-4.286
-1.382
-4.589
-1.415
-4.286
"""
def __init__(self, window_size, ddof=1):
self.ddof = ddof
self.sx = summing.RollingSum(window_size)
self.sy = summing.RollingSum(window_size)
self.sxy = summing.RollingSum(window_size)
@property
def window_size(self):
return self.sxy.window_size
def update(self, x, y):
self.sx.update(x)
self.sy.update(y)
self.sxy.update(x * y)
return self
def get(self):
n = len(self.sx.window) # current window size
try:
return (self.sxy.get() - self.sx.get() * self.sy.get() / n) / max(
1, n - self.ddof
)
except ZeroDivisionError:
return None
|
from panda3d.core import *
from CCDIK.IKChain import IKChain
from CCDIK.IKActor import IKActor
from CCDIK.Utils import *
from WalkCycle import WalkCycle
from FootArc import FootArc
from CollisionTerrain import CollisionTerrain
from direct.actor.Actor import Actor
class RiggedChar():
def __init__( self, terrain ):
##################################
# Set up main body:
self.rootNode = render.attachNewNode("Torso")
geom = createAxes( 0.3 )
self.rootNode.attachNewNode( geom )
# How high the root node should currently be (Determined by the average position of all
# grounded feet):
self.rootHeight = 0.956756 # Distance between root node and the ground
#self.rootHeight = 0
self.rootNode.setPos( 0, 0, self.rootHeight )
self.curTargetHeight = self.rootHeight
##################################
# Set up body movement:
self.targetNode = render.attachNewNode( "WalkTarget" )
#geom = createAxes( 0.2 )
#self.targetNode.attachNewNode( geom )
self.walkSpeed = 0.5 # m/s
self.turnSpeed = 1
self.heightAdjustmentSpeed = self.walkSpeed
self.newRandomTarget()
##################################
# Set up legs:
self.model = loader.loadModel( "Meshes/person.bam" )
# Standard material:
m = Material()
m.setBaseColor((0.1, 0.5, 0.1, 1))
m.setAmbient((0.1,0.1,0.1,1))
m.setSpecular((0.1,0.7,0.1,1))
self.ikActor = IKActor( self.model )
self.ikActor.reparentTo( self.rootNode )
self.ikActor.actor.setMaterial(m)
#render.find("**/Body").setMaterial(m)
#render.find("**/Body").setShaderAuto()
# rootBone = actor.exposeJoint(None, "modelRoot", "Bone")
# rootBoneControl = actor.controlJoint(None, "modelRoot", "Bone")
# rootBoneControl.setHpr( 45, 45, 45 )
self.ikChainLegLeft = self.ikActor.createIKChain( ["Hip.L", "UpperLeg.L", "LowerLeg.L", "Foot.L"] )
#self.ikChainLegLeft.setStatic( "Hips" )
self.ikChainLegLeft.setHingeConstraint( "Hip.L", axis=LVector3f.unitZ(),
minAng=-math.pi*0.05, maxAng=math.pi*0.05 )
self.ikChainLegLeft.setHingeConstraint( "UpperLeg.L", axis=LVector3f.unitX(),
minAng=-math.pi*0.2, maxAng=math.pi*0.2 )
self.ikChainLegLeft.setHingeConstraint( "LowerLeg.L", axis=LVector3f.unitX(),
minAng=-math.pi*0.5, maxAng=-math.pi*0.05 )
self.ikChainLegLeft.setHingeConstraint( "Foot.L", axis=LVector3f.unitX(),
minAng=-math.pi*0.5, maxAng=math.pi*0.5 )
self.ikChainLegRight = self.ikActor.createIKChain( ["Hip.R", "UpperLeg.R", "LowerLeg.R", "Foot.R"] )
#self.ikChainLegRight.setStatic( "Hips" )
self.ikChainLegRight.setHingeConstraint( "Hip.R", axis=LVector3f.unitZ(),
minAng=-math.pi*0.05, maxAng=math.pi*0.05 )
self.ikChainLegRight.setHingeConstraint( "UpperLeg.R", axis=LVector3f.unitX(),
minAng=-math.pi*0.2, maxAng=math.pi*0.2 )
self.ikChainLegRight.setHingeConstraint( "LowerLeg.R", axis=LVector3f.unitX(),
minAng=-math.pi*0.5, maxAng=-math.pi*0.05 )
self.ikChainLegRight.setHingeConstraint( "Foot.R", axis=LVector3f.unitX(),
minAng=-math.pi*0.5, maxAng=math.pi*0.5 )
#self.ikChainLegLeft.updateIK()
#self.ikChainLegRight.updateIK()
#################################################
# Set up arm chains:
self.ikChainArmLeft = self.ikActor.createIKChain( ["Shoulder.L", "UpperArm.L", "LowerArm.L", "Hand.L"] )
self.ikChainArmLeft.setHingeConstraint( "Shoulder.L", axis=LVector3f.unitZ(),
minAng=math.pi*0.05, maxAng=math.pi*0.05 )
self.ikChainArmLeft.setHingeConstraint( "UpperArm.L", axis=LVector3f.unitY(),
minAng=-math.pi*0.5, maxAng=math.pi*0.5 )
self.ikChainArmLeft.setHingeConstraint( "LowerArm.L", axis=LVector3f.unitZ(),
minAng=-math.pi*0.5, maxAng=0 )
self.ikChainArmLeft.setHingeConstraint( "Hand.L", axis=LVector3f.unitX(),
minAng=-math.pi*0.3, maxAng=math.pi*0.3 )
self.ikChainArmRight = self.ikActor.createIKChain( ["Shoulder.R", "UpperArm.R", "LowerArm.R", "Hand.R"] )
self.ikChainArmRight.setHingeConstraint( "Shoulder.R", axis=LVector3f.unitZ(),
minAng=math.pi*0.05, maxAng=math.pi*0.05 )
self.ikChainArmRight.setHingeConstraint( "UpperArm.R", axis=LVector3f.unitY(),
minAng=-math.pi*0.5, maxAng=math.pi*0.5 )
self.ikChainArmRight.setHingeConstraint( "LowerArm.R", axis=LVector3f.unitZ(),
minAng=0, maxAng=math.pi*0.5 )
self.ikChainArmRight.setHingeConstraint( "Hand.R", axis=LVector3f.unitX(),
minAng=-math.pi*0.3, maxAng=math.pi*0.3 )
############################
self.ikChainLegLeft.debugDisplay( lineLength=0.1 )
self.ikChainLegRight.debugDisplay( lineLength=0.1 )
self.ikChainArmLeft.debugDisplay( lineLength=0.1 )
self.ikChainArmRight.debugDisplay( lineLength=0.1 )
#################################################
# Foot targets:
# Set up two targets that the foot should reach:
self.footTargetLeft = render.attachNewNode("FootTargetLeft")
self.footTargetRight = render.attachNewNode("FootTargetRight")
geom = createAxes( 0.15 )
self.footTargetLeft.attachNewNode( geom )
self.footTargetRight.attachNewNode( geom )
self.ikChainLegLeft.setTarget( self.footTargetLeft )
self.ikChainLegRight.setTarget( self.footTargetRight )
# xRay:
self.footTargetLeft.setBin("fixed", 0)
self.footTargetLeft.setDepthTest(False)
self.footTargetLeft.setDepthWrite(False)
self.footTargetRight.setBin("fixed", 0)
self.footTargetRight.setDepthTest(False)
self.footTargetRight.setDepthWrite(False)
# Set up two nodes which stay (rigidly) infront of the body, on the floor.
# Whenever a leg needs to take a step, the target will be placed on this position:
self.plannedRotation = self.rootNode.attachNewNode( "PlannedRotationNode" )
self.plannedFootTargetLeft = self.plannedRotation.attachNewNode( "PlannedFootTargetLeft" )
self.plannedFootTargetRight = self.plannedRotation.attachNewNode( "PlannedFootTargetRight" )
# Get distance from root bone to foot bone. This is the length of the leg, i.e. it tells
# us how far the planned foot position should be away from the root:
footNode = self.ikActor.getControlNode( "Foot.L" )
footPos = footNode.getPos( self.rootNode )
footHeight = self.rootHeight + footPos.getZ()
self.footOutwards = abs(footPos.getX())
self.footHeightOffset = LVector3f(0,0,footHeight)
print("Foot height:", footHeight, self.rootHeight, footNode, footNode.getPos( self.rootNode ))
self.stepDist = 0.35 # Length of a step
self.plannedFootTargetLeft.setPos( -self.footOutwards, self.stepDist, 0 )
self.plannedFootTargetRight.setPos( self.footOutwards, self.stepDist, 0 )
self.plannedFootTargetLeft.attachNewNode( geom )
self.plannedFootTargetRight.attachNewNode( geom )
self.legMovementSpeed = self.walkSpeed*2
self.stepArcLeft = None
self.stepArcRight = None
self.walkCycle = WalkCycle( 2, 0.5 )
#self.noise = PerlinNoise2()
#self.noise.setScale( 0.1, 0.1 )
self.handBasePosLeft = self.rootNode.attachNewNode("HandTargetLeft")
self.handBasePosLeft.setPos( -0.3, 0, -0.3 )
self.handTargetLeft = self.handBasePosLeft.attachNewNode("HandTargetLeft")
self.handBasePosRight = self.rootNode.attachNewNode("HandTargetRight")
self.handBasePosRight.setPos( 0.3, 0, -0.3 )
self.handTargetRight = self.handBasePosRight.attachNewNode("HandTargetRight")
self.ikChainArmLeft.setTarget( self.handTargetLeft )
self.ikChainArmRight.setTarget( self.handTargetRight )
self.handTargetLeft.attachNewNode( geom )
self.handTargetRight.attachNewNode( geom )
###########################################
## Set up lights:
light = PointLight("PointLight")
light.setColorTemperature( 9000 )
#light.attenuation = (1, 0.5, 0.5)
light.attenuation = (0.75, 0, 0.05)
lightNode = render.attachNewNode( light )
lightNode.setPos( 0, 0, 3 )
render.setLight( lightNode )
#light.setShadowCaster(True, 1024, 1024, -2000 ) # low sort value to render early!
alight = AmbientLight('alight')
alight.setColor((0.2, 0.3, 0.2, 1))
alnp = render.attachNewNode(alight)
render.setLight(alnp)
#################################################
base.taskMgr.add( self.walk, "RiggedCharWalk")
base.accept( "+", self.speedUp )
base.accept( "-", self.slowDown )
##################################
# Set up collision:
self.terrain = terrain
self.collisionRay = CollisionRay()
self.collisionRay.direction = -LVector3f.unitZ() # Trace down
cn = CollisionNode( "RootRayNode" )
cn.addSolid( self.collisionRay )
self.rayNode = self.rootNode.attachNewNode( cn )
self.collisionTraverser = CollisionTraverser()
self.collisionQueue = CollisionHandlerQueue()
self.collisionTraverser.addCollider( self.rayNode, self.collisionQueue )
#self.collisionTraverser.traverse( self.cave.collisionFloorRoot )
##################################
# Control upper body:
self.torsoBone = self.ikActor.getControlNode( "LowerSpine" )
def speedUp( self ):
self.walkSpeed += 0.1
self.walkSpeed = min(self.walkSpeed, 0.5)
self.turnSpeed = self.walkSpeed*2
self.heightAdjustmentSpeed = self.walkSpeed
self.legMovementSpeed = 0.3 + self.walkSpeed*1.2
def slowDown( self ):
self.walkSpeed -= 0.1
self.walkSpeed = max(self.walkSpeed, 0)
self.turnSpeed = self.walkSpeed*2
self.heightAdjustmentSpeed = self.walkSpeed
self.legMovementSpeed = 0.3 + self.walkSpeed*1.2
def walk( self, task ):
#############################
# Update body:
prevPos = self.rootNode.getPos()
diff = self.targetNode.getPos( self.rootNode )
diff.z = 0
diffN = diff.normalized()
ang = LVector3f.unitY().angleRad( diffN )
axis = LVector3f.unitY().cross( diffN )
axis.normalize()
maxRot = self.turnSpeed*globalClock.getDt()
angClamped = 0
if axis.length() > 0.999:
# Limit angle:
angClamped = max( -maxRot, min( maxRot, ang ) )
q = Quat()
q.setFromAxisAngleRad( angClamped, axis )
qOld = self.rootNode.getQuat()
qNew = q*qOld
self.rootNode.setQuat( qNew )
if abs( ang ) < maxRot:
step = diffN*self.walkSpeed*globalClock.getDt()
if step.lengthSquared() > diff.lengthSquared():
self.newRandomTarget()
step = diff
step = self.rootNode.getQuat().xform( step )
self.rootNode.setPos( self.rootNode.getPos() + step )
#############################
# Calculate how far we've walked this frame:
curWalkDist = (prevPos - self.rootNode.getPos()).length()
curWalkSpeed = curWalkDist/globalClock.getDt()
update = curWalkDist*0.75
update += angClamped*0.5
self.walkCycle.updateTime( update )
#############################
# Rotate torso:
cycle = math.sin( self.walkCycle.cycleTime/self.walkCycle.cycleDuration*math.pi*2 )
self.torsoBone.setHpr( -4*cycle, -2, 0 )
#############################
# Move body up and down depending on foot placement:
#if not self.stepArcLeft and self.stepArcRight:
# self.curTargetHeight = self.footTargetLeft.getPos().getZ()
#elif not self.stepArcRight and self.stepArcLeft:
# self.curTargetHeight = self.footTargetRight.getPos().getZ()
#elif not self.stepArcLeft and not self.stepArcRight:
footPosL = self.footTargetLeft.getPos()
#if self.stepArcLeft:
# footPosL = self.stepArcLeft.endPos
footPosR = self.footTargetRight.getPos()
#if self.stepArcRight:
# footPosR = self.stepArcRight.endPos
self.curTargetHeight = 0.5*(footPosL.getZ() +\
footPosR.getZ()) + self.rootHeight - self.footHeightOffset.getZ()
curPos = self.rootNode.getPos()
heightAdjustment = self.curTargetHeight - curPos.getZ()
limit = self.heightAdjustmentSpeed * globalClock.getDt()
heightAdjustment = min( max( heightAdjustment, -limit), limit )
self.rootNode.setPos( curPos.getX(), curPos.getY(), curPos.getZ() + heightAdjustment )
#############################
# Update arms:
self.handTargetLeft.setPos( 0, -cycle*min(curWalkSpeed*0.16,0.3)+0.1, 0 )
self.handTargetRight.setPos( 0, cycle*min(curWalkSpeed*0.16,0.3)+0.1, 0 )
self.ikChainArmLeft.updateIK()
self.ikChainArmRight.updateIK()
#############################
# Update legs:
# TODO: Rotate plannedRotation
#q = Quat()
#q.setFromAxisAngleRad( self.turnSpeed, LVector3f.unitZ() )
#self.plannedRotation.setQuat( q )
# Move planned foot target further forward (longer steps) when character is
# walking faster:
curStepDist = 0.1
if curWalkSpeed > 0:
curStepDist = self.stepDist + self.walkSpeed*0.2
p = LVector3f( -self.footOutwards, curStepDist, 0 )
pw = render.getRelativePoint( self.plannedRotation, p )
#p.z = self.noise( pw.x, pw.y )*0.05 + 0.05
self.plannedFootTargetLeft.setPos( p )
p = LVector3f( self.footOutwards, curStepDist, 0 )
pw = render.getRelativePoint( self.plannedRotation, p )
#p.z = self.noise( pw.x, pw.y )*0.05 + 0.05
self.plannedFootTargetRight.setPos( p )
# Update the walkcycle to determine if a step needs to be taken:
#update = curWalkDist*0.1/globalClock.dt
if self.walkCycle.stepRequired[0]:
#self.footTargetLeft.setPos( self.plannedFootTargetLeft.getPos( render ) )
self.walkCycle.step( 0 ) # Tell walk cycle that step has been taken
#h = min( curWalkSpeed*0.2, 0.3)
h = 0.05
targetPos = self.findGroundPos( self.plannedFootTargetLeft.getPos( self.rootNode ) )
self.stepArcLeft = FootArc( self.footTargetLeft.getPos() - self.footHeightOffset,
render.getRelativePoint( self.rootNode, targetPos ), maxStepHeight=h )
if self.walkCycle.stepRequired[1]:
#self.footTargetRight.setPos( self.plannedFootTargetRight.getPos( render ) )
self.walkCycle.step( 1 ) # Tell walk cycle that step has been taken
#h = min( curWalkSpeed*0.2, 0.3)
h = 0.05
targetPos = self.findGroundPos( self.plannedFootTargetRight.getPos( self.rootNode ) )
self.stepArcRight = FootArc( self.footTargetRight.getPos() - self.footHeightOffset,
render.getRelativePoint( self.rootNode, targetPos ), maxStepHeight=h )
if self.stepArcLeft:
legMoveDist = self.legMovementSpeed*globalClock.dt
self.stepArcLeft.update( legMoveDist )
self.footTargetLeft.setPos( self.stepArcLeft.getPos() + self.footHeightOffset )
if self.stepArcLeft.done():
self.stepArcLeft = None
if self.stepArcRight:
legMoveDist = self.legMovementSpeed*globalClock.dt
self.stepArcRight.update( legMoveDist )
self.footTargetRight.setPos( self.stepArcRight.getPos() + self.footHeightOffset )
if self.stepArcRight.done():
self.stepArcRight = None
self.ikChainLegLeft.updateIK()
self.ikChainLegRight.updateIK()
##################################
## Let toes always face horizontally:
## Note: Not sure if this works correctly yet!
toeNode = self.ikActor.getControlNode( "Toes.L" )
hpr = toeNode.getHpr( self.rootNode )
toeNode.setHpr( self.rootNode, hpr.getX(), 0, hpr.getZ() )
toeNode = self.ikActor.getControlNode( "Toes.R" )
hpr = toeNode.getHpr( self.rootNode )
toeNode.setHpr( self.rootNode, hpr.getX(), 0, hpr.getZ() )
return task.cont
def findGroundPos( self, inputPos ):
self.rayNode.setPos( inputPos + LVector3f.unitZ()*2 )
self.collisionTraverser.traverse( self.terrain.root )
if len( self.collisionQueue.getEntries() ) > 0:
self.collisionQueue.sortEntries()
groundPos = self.collisionQueue.getEntry(0).getSurfacePoint( self.rootNode )
return groundPos
else:
return inputPos
def newRandomTarget( self ):
self.targetNode.setPos(
LVector3f( random.random()*9-4.5,
random.random()*9-4.5,
0 ) )
if __name__ == "__main__":
from direct.showbase.ShowBase import ShowBase
from CCDIK.CameraControl import CameraControl
class MyApp(ShowBase):
def __init__(self):
#####################################
## Set up scene
ShowBase.__init__(self)
base.disableMouse()
base.setFrameRateMeter(True)
wp = WindowProperties()
wp.setSize(1800, 960)
self.win.requestProperties(wp)
base.setBackgroundColor(0,0,0)
grid = createGrid( 20, 1 )
render.attachNewNode( grid )
axes = createAxes( 1000, bothways=True, thickness=3 )
render.attachNewNode( axes )
terrain = CollisionTerrain( 5, 0.25, render, height=1 )
#####################################
# Set up character
self.character = RiggedChar( terrain )
#self.character2 = RiggedChar( terrain )
#####################################
# Set up Camera and input:
#focusNode = render.attachNewNode( "CameraFocusNode" )
self.camControl = CameraControl( camera, self.mouseWatcherNode, speed = 0.02 )
self.camControl.focusPoint = LVector3f( 0, 0, 1 )
self.taskMgr.add( self.camControl.moveCamera, "MoveCameraTask")
self.accept( "wheel_down", self.camControl.wheelDown )
self.accept( "wheel_up", self.camControl.wheelUp )
#####################################
label("[WASD]: Move Camera", 1)
label("[Mouse Wheel]: Zoom Camera", 2)
label("[Middle Mouse]: Rotate Camera", 3)
label("[+]: Speed up", 5)
label("[-]: Slow down", 6)
app = MyApp()
app.run()
|
h = 6.6260755e-27 # planck's constant (ergs-s)
c = 2.99792458e10 # speed of light (cm/s)
k = 1.380658e-16 # boltzmann constant (ergs/K)
pi = 3.14159 # just pi
sb = 5.6704e-5 # stefan boltzman constant (ergs cm^-2 s^-1 K^-4)
m_e = 9.10938188e-28 # mass of electron (g)
m_p = 1.67262158e-24 # mass of proton (g)
a = 7.5657e-15 # radiation constant
e = 4.8032e-10 # electron charge in cgs
a0 = 0.53e-8 # bohr radius (cm)
rydberg = 2.1798741e-11 # rydberg constant in ergs
G = 6.67e-8 # Newton's gravitational constant
sigma_t = 6.6523e-24 # thomson cross-section in cm^2
sigma_tot = 0.0265400193567 # integrated line coefficent (cm^2 Hz)
ev_to_ergs = 1.60217646e-12
cm_to_angs = 1.0e8
angs_to_cm = 1.0e-8
days_to_sec = 86400.0
# astronomical constants
parsec = 3.08e18 # parsec in cm
kpc = 3.08e21
m_sun = 1.99e33
|
"""Parser for the command line arguments."""
import argparse
import datetime
import re
import shutil
import string
import sys
import textwrap
from typing import cast, List, Set, Tuple
from dateutil.parser import parse
from pygments.styles import get_all_styles
from .config import read_config_file, write_config_file, validate_config_file
ARGUMENTS = ("@", "+", "-@", "-+", "-a", "--all", "-b", "--blocked", "-c", "--config-file", "-d", "--due",
"-f", "--file", "-g", "--groupby", "-h", "--help", "-n", "--number", "-o", "--overdue",
"-p", "--priority", "-r", "--reference", "-s", "--style", "-u", "--open-urls", "-V", "--version")
REFERENCE_CHOICES = ("always", "never", "multiple")
GROUPBY_CHOICES = ("context", "duedate", "priority", "project", "source")
class NextActionArgumentParser(argparse.ArgumentParser):
"""Command-line argument parser for Next-action."""
def __init__(self, version: str = "?") -> None:
"""Initialize the parser."""
super().__init__(
usage=textwrap.fill("next-action [-h] [-V] [-c [<config.cfg>] | -w] [-f <todo.txt> ...] [-b] [-g "
"[<group>]] [-l] [-r <ref>] [-s [<style>]] [-a | -n <number>] [-d [<due date>] | -o] "
"[-p [<priority>]] [-u] [--] [<context|project> ...]",
width=shutil.get_terminal_size().columns - len("usage: ")),
description="Show the next action in your todo.txt. The next action is selected from the tasks in the "
"todo.txt file based on task properties such as priority, due date, and creation date. Limit "
"the tasks from which the next action is selected by specifying contexts the tasks must have "
"and/or projects the tasks must belong to.",
epilog="Use -- to separate options with optional arguments from contexts and projects, in order to handle "
"cases where a context or project is mistaken for an argument to an option.",
formatter_class=CapitalisedHelpFormatter)
self.__default_filenames = ["~/todo.txt"]
self.add_optional_arguments(version)
self.add_configuration_options()
self.add_input_options()
self.add_output_options()
self.add_number_options()
self.add_filter_arguments()
def add_optional_arguments(self, version: str) -> None:
"""Add the optional arguments to the parser."""
self._optionals.title = self._optionals.title.capitalize() if self._optionals.title else None
self.add_argument(
"-V", "--version", action="version", version=f"%(prog)s {version}")
def add_configuration_options(self) -> None:
"""Add the configuration options to the parser."""
config_group = self.add_argument_group("Configuration options")
config_group.add_argument(
"-c", "--config-file", metavar="<config.cfg>", type=str, default="~/.next-action.cfg", nargs="?",
help="filename of configuration file to read (default: %(default)s); omit filename to not read any "
"configuration file")
config_group.add_argument(
"-w", "--write-config-file", help="generate a sample configuration file and exit", action="store_true")
def add_input_options(self) -> None:
"""Add the input options to the parser."""
input_group = self.add_argument_group("Input options")
input_group.add_argument(
"-f", "--file", action="append", metavar="<todo.txt>", default=self.__default_filenames[:], type=str,
help="filename of todo.txt file to read; can be '-' to read from standard input; argument can be "
"repeated to read tasks from multiple todo.txt files (default: ~/todo.txt)")
def add_output_options(self) -> None:
"""Add the output/styling options to the parser."""
output_group = self.add_argument_group("Output options")
output_group.add_argument(
"-b", "--blocked", help="show the tasks blocked by the next action, if any (default: %(default)s)",
action="store_true")
output_group.add_argument(
"-g", "--groupby", choices=GROUPBY_CHOICES, default=None, nargs="?", metavar="<group>",
help=f"group the next actions; available groups: {', '.join(GROUPBY_CHOICES)} (default: %(default)s)")
output_group.add_argument(
"-l", "--line-number", action="store_true",
help="reference next actions with the line number in their todo.txt file (default: %(default)s)")
output_group.add_argument(
"-r", "--reference", choices=REFERENCE_CHOICES, default="multiple",
help="reference next actions with the name of their todo.txt file (default: when reading multiple "
"todo.txt files)")
styles = sorted(list(get_all_styles()))
output_group.add_argument(
"-s", "--style", metavar="<style>", choices=styles, default=None, nargs="?",
help=f"colorize the output; available styles: {', '.join(styles)} (default: %(default)s)")
output_group.add_argument(
"-u", "--open-urls", help="open the urls in the next actions, if any (default: %(default)s)",
action="store_true")
def add_number_options(self) -> None:
"""Add the number options to the parser."""
number_group = self.add_argument_group("Show multiple next actions")
number = number_group.add_mutually_exclusive_group()
number.add_argument(
"-a", "--all", default=1, action="store_const", dest="number", const=sys.maxsize,
help="show all next actions")
number.add_argument(
"-n", "--number", metavar="<number>", type=number_type, default=1,
help="number of next actions to show (default: %(default)s)")
def add_filter_arguments(self) -> None:
"""Add the filter arguments to the parser."""
filters = self.add_argument_group("Limit the tasks from which the next actions are selected")
# List contexts or projects in the current todo.txt file(s), for tab completion
filters.add_argument(
"--list-arguments", help=argparse.SUPPRESS)
date = filters.add_mutually_exclusive_group()
date.add_argument(
"-d", "--due", metavar="<due date>", type=date_type, nargs="?", const=datetime.date.max,
help="show only next actions with a due date; if a date is given, show only next actions due on or "
"before that date")
date.add_argument("-o", "--overdue", help="show only overdue next actions", action="store_true")
filters.add_argument(
"-p", "--priority", metavar="<priority>", choices=string.ascii_uppercase, nargs="?",
help="minimum priority (A-Z) of next actions to show (default: %(default)s)")
# Collect all context and project arguments in one list:
filters.add_argument(
"filters", metavar="<context|project>", help=argparse.SUPPRESS, nargs="*", type=filter_type)
filters.add_argument(
"contexts", metavar="@<context> ...", nargs="*", type=filter_type,
help="contexts the next action must have")
filters.add_argument(
"projects", metavar="+<project> ...", nargs="*", type=filter_type,
help="projects the next action must be part of; if repeated the next action must be part of at least one "
"of the projects")
filters.add_argument(
"excluded_contexts", metavar="-@<context> ...", nargs="*", type=filter_type,
help="contexts the next action must not have")
filters.add_argument(
"excluded_projects", metavar="-+<project> ...", nargs="*", type=filter_type,
help="projects the next action must not be part of")
def parse_args(self, args=None, namespace=None) -> argparse.Namespace: # type: ignore
"""Parse the command-line arguments."""
namespace, remaining_args = self.parse_known_args(args, namespace)
self.parse_remaining_args(remaining_args, namespace)
namespace.contexts = subset(namespace.filters, "@")
namespace.projects = subset(namespace.filters, "+")
namespace.excluded_contexts = subset(namespace.filters, "-@")
namespace.excluded_projects = subset(namespace.filters, "-+")
self.validate_arguments(namespace)
if getattr(namespace, "config_file", self.get_default("config_file")) is not None:
self.process_config_file(namespace)
self.fix_filenames(namespace)
if namespace.write_config_file:
write_config_file(namespace)
self.exit()
return namespace
def parse_remaining_args(self, args, namespace: argparse.Namespace) -> None:
"""Parse the remaining command-line arguments, i.e. the excluded contexts and projects."""
try:
namespace.filters.extend([filter_type(arg) for arg in args])
except argparse.ArgumentTypeError as reason:
self.error(str(reason))
def validate_arguments(self, namespace: argparse.Namespace) -> None:
"""Validate arguments."""
if any(value == "" for value in namespace.contexts | namespace.excluded_contexts):
self.error("argument <context|project>: context name missing")
if any(value == "" for value in namespace.projects | namespace.excluded_projects):
self.error("argument <context|project>: project name missing")
for value in namespace.contexts:
if value in namespace.excluded_contexts:
self.error(f"@{value} is both included and excluded")
for value in namespace.projects:
if value in namespace.excluded_projects:
self.error(f"+{value} is both included and excluded")
def process_config_file(self, namespace: argparse.Namespace) -> None:
"""Process the configuration file."""
config_filename = namespace.config_file
config = read_config_file(config_filename, self.get_default("config_file"), self.error)
if not config:
return
validate_config_file(config, config_filename, self.error)
self.insert_config(config, namespace)
def insert_config(self, config, namespace: argparse.Namespace) -> None:
"""Insert the configured parameters in the namespace, if no command line arguments are present."""
if self.arguments_not_specified("-f", "--file"):
filenames = config.get("file", [])
if isinstance(filenames, str):
filenames = [filenames]
getattr(namespace, "file").extend(filenames)
if self.arguments_not_specified("-n", "--number", "-a", "--all"):
number = sys.maxsize if config.get("all", False) else config.get("number", self.get_default("number"))
setattr(namespace, "number", number)
for argument in [("-b", "--blocked"), ("-g", "--groupby"), ("-l", "--line-number"), ("-p", "--priority"),
("-r", "--reference"), ("-s", "--style"), ("-u", "--open-urls")]:
if self.arguments_not_specified(*argument):
parameter = argument[1].lstrip("-").replace("-", "_")
value = config.get(parameter, self.get_default(parameter))
setattr(namespace, parameter, value)
self.insert_configured_filters(config, namespace)
@staticmethod
def insert_configured_filters(config, namespace: argparse.Namespace) -> None:
"""Insert the configured filters in the namespace, if no matching command line filters are present."""
filters = config.get("filters", [])
if isinstance(filters, str):
filters = re.split(r"\s", filters)
for configured_filter in filters:
if configured_filter.startswith("@") and configured_filter[len("@"):] not in namespace.excluded_contexts:
namespace.contexts.add(configured_filter[len("@"):])
if configured_filter.startswith("+") and configured_filter[len("+"):] not in namespace.excluded_projects:
namespace.projects.add(configured_filter[len("+"):])
if configured_filter.startswith("-@") and configured_filter[len("-@"):] not in namespace.contexts:
namespace.excluded_contexts.add(configured_filter[len("-@"):])
if configured_filter.startswith("-+") and configured_filter[len("-+"):] not in namespace.projects:
namespace.excluded_projects.add(configured_filter[len("-+"):])
@staticmethod
def arguments_not_specified(*arguments: str) -> bool:
"""Return whether any of the arguments was specified on the command line."""
return not any([command_line_arg.startswith(argument) for argument in arguments
for command_line_arg in sys.argv])
def fix_filenames(self, namespace: argparse.Namespace) -> None:
"""Fix the filenames."""
# Work around the issue that the "append" action doesn't overwrite defaults.
# See https://bugs.python.org/issue16399.
filenames = namespace.file[:]
default_filenames = self.__default_filenames
if default_filenames != filenames:
for default_filename in default_filenames:
filenames.remove(default_filename)
# Remove duplicate filenames while maintaining order.
namespace.file = list(dict.fromkeys(filenames))
class CapitalisedHelpFormatter(argparse.HelpFormatter):
"""Capitalise the usage string."""
def add_usage(self, usage, actions, groups, prefix=None):
"""Insert a capitalised usage string."""
return super().add_usage(usage, actions, groups, prefix or "Usage: ")
def filter_type(value: str) -> str:
"""Return the filter if it's valid, else raise an error."""
if value.startswith("@") or value.startswith("+") or value.startswith("-@") or value.startswith("-+"):
return value
raise argparse.ArgumentTypeError(f"unrecognized argument: {value}")
def date_type(value: str) -> datetime.date:
"""Return the date if it's valid, else raise an error."""
relative_days = dict(yesterday=-1, today=0, tomorrow=1)
if value.lower() in relative_days:
return datetime.date.today() + datetime.timedelta(days=relative_days[value.lower()])
try:
date_time, remaining_tokens = cast(Tuple, parse(value, fuzzy_with_tokens=True, ignoretz=True))
if not remaining_tokens:
return date_time.date()
except ValueError:
pass
raise argparse.ArgumentTypeError(f"invalid date: {value}")
def number_type(value: str) -> int:
"""Return the value if it's positive, else raise an error."""
try:
number = int(value)
if number > 0:
return number
except ValueError:
pass
raise argparse.ArgumentTypeError(f"invalid number: {value}")
def subset(filters: List[str], prefix: str) -> Set[str]:
"""Return a subset of the filters based on prefix."""
return set(f[len(prefix):] for f in filters if f.startswith(prefix))
|
# coding: utf-8
import logging
from collections import OrderedDict
from django.db.models import Prefetch, Q
from django.utils.translation import ugettext_lazy as _
from future.utils import viewitems
logger = logging.getLogger(__name__)
class ColumnChoice(object):
def __init__(self, model, key, label, lookup, heading=None, lookup_kwargs={}):
self._model = model
self._key = '.'.join([model.__name__, key, ]) if model else key
self._label = label
self._lookup = lookup
self._heading = heading if heading is not None else label
self._lookup_kwargs = lookup_kwargs
@classmethod
def coerce(cls, instances):
lookup = {col.get_key(): col for col in instances}
return lambda key: lookup.get(key, None)
def convert_instance_from_measure(self, measure, default=None):
from main.models import Assay, Line, Measurement, Protocol, Study
try:
return {
Assay: measure.assay,
Line: measure.assay.line,
Measurement: measure,
Protocol: measure.assay.protocol,
Study: measure.assay.line.study,
}.get(self._model, default)
except AttributeError:
return default
def convert_instance_from_line(self, line, protocol, default=None):
from main.models import Line, Protocol, Study
try:
return {
Line: line,
Protocol: protocol or default,
Study: line.study,
None: line,
}.get(self._model, default)
except AttributeError:
return default
def get_field_choice(self):
return (self._key, self._label)
def get_heading(self):
return self._heading
def get_key(self):
return self._key
def get_value(self, instance, **kwargs):
try:
lookup_kwargs = {}
lookup_kwargs.update(**self._lookup_kwargs)
lookup_kwargs.update(**kwargs)
return self._lookup(instance, **lookup_kwargs)
except Exception as e:
logger.exception('Failed to get column value: %s', e)
return ''
class EmptyChoice(ColumnChoice):
""" Always inserts an empty value on lookup callback. """
def __init__(self):
super(EmptyChoice, self).__init__(str, '', '', lambda x: '')
class ExportSelection(object):
""" Object used for selecting objects for export. """
def __init__(self, user, exclude_disabled=True,
studyId=[], lineId=[], assayId=[], measureId=[]):
# cannot import these at top-level
from main import models
def Q_active(**kwargs):
""" Conditionally returns a QuerySet Q filter if exclude_disabled flag is set. """
if exclude_disabled:
return Q(**kwargs)
return Q()
# check studies linked to incoming IDs for permissions
matched_study = models.Study.objects.filter(
(Q(pk__in=studyId) & Q_active(active=True)) |
(Q(line__in=lineId) & Q_active(line__active=True)) |
(Q(line__assay__in=assayId) & Q_active(line__assay__active=True)) |
(Q(line__assay__measurement__in=measureId) &
Q_active(line__assay__measurement__active=True))
).distinct(
).prefetch_related(
'userpermission_set',
'grouppermission_set',
'everyonepermission_set',
)
self._allowed_study = [s for s in matched_study if s.user_can_read(user)]
# load all matching measurements
self._measures = models.Measurement.objects.filter(
# all measurements are from visible study
Q(assay__line__study__in=self._allowed_study),
# OR grouping finds measurements under one of passed-in parameters
Q(assay__line__study__in=studyId) |
(Q(assay__line__in=lineId) & Q_active(assay__line__active=True)) |
(Q(assay__in=assayId) & Q_active(assay__active=True)) |
(Q(pk__in=measureId) & Q_active(active=True)),
).order_by(
'assay__protocol_id'
).select_related(
'measurement_type',
'x_units',
'y_units',
'update_ref__mod_by',
'experimenter',
'assay__experimenter',
'assay__protocol',
'assay__line__contact',
'assay__line__experimenter',
'assay__line__study__contact',
)
# TODO: use Prefetch for measurement_type with django-model-utils
# type_queryset = models.MeasurementType.objects.select_subclasses(models.ProteinIdentifier)
# self._measures.prefetch_related(Prefetch('measurement_type', queryset=type_queryset))
self._assays = models.Assay.objects.filter(
Q(line__study__in=self._allowed_study),
(Q(line__in=lineId) & Q_active(line__active=True)) |
(Q(pk__in=assayId) & Q_active(active=True)) |
(Q(measurement__in=measureId) & Q_active(measurement__active=True)),
).distinct(
).select_related(
'protocol',
)
self._lines = models.Line.objects.filter(
Q(study__in=self._allowed_study),
Q(study__in=studyId) |
(Q(pk__in=lineId) & Q_active(active=True)) |
(Q(assay__in=assayId) & Q_active(assay__active=True)) |
(Q(assay__measurement__in=measureId) & Q_active(assay__measurement__active=True)),
).distinct(
).select_related(
'experimenter__userprofile', 'updated',
).prefetch_related(
Prefetch('strains', queryset=models.Strain.objects.order_by('id')),
Prefetch('carbon_source', queryset=models.CarbonSource.objects.order_by('id')),
)
@property
def studies(self):
""" List of studies allowed to be viewed in the selection. """
return self._allowed_study
@property
def study_columns(self):
from main.models import Study
return Study.export_columns(self.studies)
@property
def lines(self):
""" A queryset of lines included in the selection. """
return self._lines
@property
def line_columns(self):
from main.models import Line
return Line.export_columns(self.lines)
@property
def assays(self):
""" A queryset of assays included in the selection. """
return self._assays
@property
def assay_columns(self):
from main.models import Assay
return Assay.export_columns(self.assays)
@property
def measurements(self):
""" A queryset of measurements to include. """
# TODO: add in empty measurements for assays that have none
return self._measures
@property
def measurements_list(self):
if not hasattr(self, '_measures_list'):
self._measures_list = list(self._measures)
return self._measures_list
class ExportOption(object):
""" Object used for options on a table export. """
DATA_COLUMN_BY_LINE = 'dbyl'
DATA_COLUMN_BY_POINT = 'dbyp'
LINE_COLUMN_BY_DATA = 'lbyd'
LAYOUT_CHOICE = (
(DATA_COLUMN_BY_LINE, _('columns of metadata types, and rows of lines/assays')),
(DATA_COLUMN_BY_POINT, _('columns of metadata types, and rows of single points')),
(LINE_COLUMN_BY_DATA, _('columns of lines/assays, and rows of metadata types')),
)
COMMA_SEPARATED = ','
COMMA_SEPARATED_TOKEN = ','
TAB_SEPARATED = '\t'
TAB_SEPARATED_TOKEN = '\\t'
# need to choose value tokens that can be displayed as HTML
SEPARATOR_CHOICE = (
(COMMA_SEPARATED_TOKEN, _('Comma-separated (CSV)')),
(TAB_SEPARATED_TOKEN, _('Tab-separated')),
)
SEPARATOR_LOOKUP = {
COMMA_SEPARATED_TOKEN: COMMA_SEPARATED,
TAB_SEPARATED_TOKEN: TAB_SEPARATED,
}
ALL_DATA = 'all'
SUMMARY_DATA = 'summary'
NONE_DATA = 'none'
FORMAT_CHOICE = (
(ALL_DATA, _('All')),
(SUMMARY_DATA, _('Summarize')),
(NONE_DATA, _('None')),
)
def __init__(self, layout=DATA_COLUMN_BY_LINE, separator=COMMA_SEPARATED, data_format=ALL_DATA,
line_section=False, protocol_section=False, columns=[], blank_columns=[],
blank_mod=0):
self.layout = layout
self.separator = separator
self.data_format = data_format
self.line_section = line_section
self.protocol_section = protocol_section
self.columns = columns
self.blank_columns = blank_columns
self.blank_mod = blank_mod
@classmethod
def coerce_separator(cls, value):
return cls.SEPARATOR_LOOKUP.get(value, cls.COMMA_SEPARATED)
def value_str(value):
""" used to format value lists to a colon-delimited (unicode) string """
# cast to float to remove 0-padding
return ':'.join(map(str, map(float, value)))
class CellQuote(object):
""" Object defining how to quote table cell values. """
def __init__(self, always_quote=False, separator_string=',', quote_string='"'):
""" Defines how to quote values.
:param always_quote: if True, always quote values, instead of conditionally quote
:param separator_string: sequence that separates cell values, requiring quotation
:param quote_string: sequence used to surround quoted values
"""
self.always_quote = always_quote
self.separator_string = separator_string
self.quote_string = quote_string
def quote(self, value):
""" Quotes a value based on object parameters. """
if self.always_quote or self.separator_string in value:
# wrap in quotes, replace any quote sequences with a doubled sequence
return '%(quote)s%(value)s%(quote)s' % {
'quote': self.quote_string,
'value': value.replace(self.quote_string, self.quote_string * 2),
}
return value
class TableExport(object):
""" Outputs tables for export of EDD objects. """
def __init__(self, selection, options, worklist=None):
self.selection = selection
self.options = options
self.worklist = worklist
self._x_values = {}
def output(self):
""" Builds the CSV of the table export output. """
# store tables; protocol PK keys table for measurements under a protocol, 'line' keys table
# for line-only section (if enabled), 'all' keys table including everything.
tables = OrderedDict()
if self.options.line_section:
tables['line'] = OrderedDict()
tables['line']['header'] = self._output_line_header()
if not self.options.protocol_section:
tables['all'] = OrderedDict()
tables['all']['header'] = self._output_header()
self._do_export(tables)
return self._build_output(tables)
def _build_output(self, tables):
layout = self.options.layout
table_separator = '\n\n'
row_separator = '\n'
cell_separator = self.options.separator
cell_format = CellQuote(separator_string=cell_separator)
if layout == ExportOption.DATA_COLUMN_BY_POINT:
# data is already in correct orientation, join and return
return table_separator.join([
row_separator.join([
cell_separator.join(map(cell_format.quote, rrow))
for rkey, rrow in viewitems(ttable)
]) for tkey, ttable in viewitems(tables)
])
# both LINE_COLUMN_BY_DATA and DATA_COLUMN_BY_LINE are constructed similarly
# each table in LINE_COLUMN_BY_DATA is transposed
out = []
for tkey, table in viewitems(tables):
# sort x values by original numeric values
all_x = sorted(list(self._x_values.get(tkey, {}).items()), key=lambda a: a[1])
# generate header row
rows = [list(map(str, table['header'] + [x[0] for x in all_x]))]
# go through non-header rows; unsquash final column
for rkey, row in list(table.items())[1:]:
unsquash = self._output_unsquash(all_x, row[-1:][0])
rows.append(list(map(str, row[:-1] + unsquash)))
# do the transpose here if needed
if layout == ExportOption.LINE_COLUMN_BY_DATA:
rows = zip(*rows)
# join the cells
rows = [cell_separator.join(map(cell_format.quote, row)) for row in rows]
# join the rows
out.append(row_separator.join(rows))
return table_separator.join(out)
def _do_export(self, tables):
from main.models import Assay, Line, Measurement, MeasurementValue, Protocol, Study
# add data from each exported measurement; already sorted by protocol
value_qs = MeasurementValue.objects.select_related('updated').order_by('x')
measures = self.selection.measurements.prefetch_related(
Prefetch('measurementvalue_set', queryset=value_qs, to_attr='pf_values'),
Prefetch('assay__line__strains'),
Prefetch('assay__line__carbon_source'),
)
for measurement in measures:
assay = measurement.assay
protocol = assay.protocol
line = assay.line
if self.options.line_section:
line_only = [Line, Study, ]
other_only = [Assay, Measurement, Protocol, ]
# add row to line table w/ Study, Line columns only
if line.id not in tables['line']:
row = self._output_row_with_measure(measurement, models=line_only)
tables['line'][line.id] = row
# create row for protocol/all table w/ Protocol, Assay, Measurement columns only
row = self._output_row_with_measure(measurement, models=other_only)
else:
# create row for protocol/all table
row = self._output_row_with_measure(measurement)
table, table_key = self._init_tables_for_protocol(tables, protocol)
values = measurement.pf_values # prefetched above
if self.options.layout == ExportOption.DATA_COLUMN_BY_POINT:
for value in values:
arow = row[:]
arow.append(value_str(value.x))
arow.append(value_str(value.y))
table[value.id] = arow
else:
# keep track of all x values encountered in the table
xx = self._x_values[table_key] = self._x_values.get(table_key, {})
# do value_str to the float-casted version of x to eliminate 0-padding
xx.update({value_str(v.x): v.x for v in values})
squashed = {value_str(v.x): value_str(v.y) for v in values}
row.append(squashed)
table[measurement.id] = row
def _init_tables_for_protocol(self, tables, protocol):
if self.options.protocol_section:
if protocol.id not in tables:
tables[protocol.id] = OrderedDict()
header = []
if self.options.line_section:
header += self._output_measure_header()
else:
header += self._output_header()
tables[protocol.id]['header'] = header
table_key = protocol.id
else:
table_key = 'all'
table = tables[table_key]
return (table, table_key)
def _output_header(self, models=None):
row = []
for column in self.options.columns:
if models is None or column._model in models:
row.append(column.get_heading())
if self.options.layout == ExportOption.DATA_COLUMN_BY_POINT:
row.append('X')
row.append('Y')
return row
def _output_line_header(self):
from main.models import Line, Study
return self._output_header([Line, Study, ])
def _output_row_with_line(self, line, protocol, models=None, columns=None, **kwargs):
row = []
if columns is None:
columns = self.options.columns
for i, column in enumerate(columns):
if models is None or column._model in models:
instance = column.convert_instance_from_line(line, protocol)
row.append(column.get_value(instance, **kwargs))
return row
def _output_row_with_measure(self, measure, models=None):
row = []
for column in self.options.columns:
if models is None or column._model in models:
instance = column.convert_instance_from_measure(measure)
row.append(column.get_value(instance))
return row
def _output_measure_header(self):
from main.models import Assay, Measurement, Protocol
return self._output_header([Assay, Measurement, Protocol, ])
def _output_unsquash(self, all_x, squashed):
# all_x is list of 2-tuple from dict.items()
if isinstance(squashed, dict):
return [squashed.get(x[0], '') for x in all_x]
# expecting a list to be returned
return [squashed]
class WorklistExport(TableExport):
""" Outputs tables for line worklists. """
def __init__(self, selection, options, worklist=None):
super(WorklistExport, self).__init__(selection, options)
self.worklist = worklist
def output(self):
# store tables
tables = OrderedDict()
tables['all'] = OrderedDict()
tables['all']['header'] = self._output_header()
if self.worklist and self.worklist.protocol:
self._do_worklist(tables)
return self._build_output(tables)
def _do_worklist(self, tables):
# if export is a worklist, go off of lines instead of measurements
lines = self.selection.lines
protocol = self.worklist.protocol
table = tables['all']
# lines is a QuerySet of the lines to use in worklist creation
for i, line in enumerate(lines):
# build row with study/line info
row = self._output_row_with_line(line, protocol)
table[str(line.pk)] = row
# when modulus set, insert 'blank' row every modulus rows
if self.options.blank_mod and not (i + 1) % self.options.blank_mod:
blank = self._output_row_with_line(
None, protocol, columns=self.options.blank_columns
)
table['blank%s' % i] = blank
|
from cassandra.cluster import Cluster
from collections import Counter
from itertools import combinations
#import matplotlib.pyplot as plt
import csv
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import re
cluster = Cluster()
session = cluster.connect()
session = cluster.connect('wdm')
session.set_keyspace('wdm')
#query 3-popular state
rows = session.execute('SELECT state,city,month FROM popularstate')
pop_state = []
temp = []
for user_row in rows:
state = user_row.state
month = user_row.month
temp = [state, month]
pop_state.append(temp)
popState = Counter()
for sub in pop_state:
for comb in combinations(sub,2):
popState[comb] += 1
print(popState.most_common())
#popular city
rows = session.execute('SELECT state,city,month FROM popularcity')
pop_state = []
pop_city = []
temp = []
for user_row in rows:
state = user_row.state
city = user_row.city
month = user_row.month
temp = [state, month]
pop_state.append(temp)
temp = [city, state, month]
pop_city.append(temp)
popCity = Counter()
for sub in pop_city:
for comb in combinations(sub,3):
popCity[comb] += 1
print(popCity.most_common())
#query 5-analysis of type of room over time
rows = session.execute('SELECT year,room_type FROM roomtrend')
types = []
rooms = ['Entire home', 'Private room', 'Shared room']
for row in rows:
#room = rooms[int(row.room_type)-1]
room = row.room_type
year = row.year
temp = [room, year]
types.append(temp)
popType = Counter()
for sub in types:
for comb in combinations(sub,2):
popType[comb] += 1
types_count = popType.most_common()
with open('data1.csv', 'w') as myfile:
wr = csv.writer(myfile)
wr.writerow(types_count)
#query 1-frequent words in each listing
rows = session.execute('SELECT listing_id,comments FROM reviews')
reviews = []
for row in rows:
listing = row.listing_id
comment = row.comments
temp = [listing, comment]
reviews.append(temp)
labels = ['id', 'review']
reviewall = np.array(reviews)
df = pd.DataFrame(reviewall, columns=labels)
df[['id']] = df[['id']].apply(pd.to_numeric)
df[['review']] = df[['review']].astype(str)
df1 = df.groupby('id', as_index=False).agg(lambda x: ','.join(x))
df1['w1'] = 'NA'
df1['w2'] = 'NA'
df1['w3'] = 'NA'
df1['w4'] = 'NA'
df1['w5'] = 'NA'
stop_words = set(stopwords.words('english'))
stop_words.add('I')
stop_words.add('The')
stop_words.add('\'s')
#stop_words.add('She')
for i in range(0,(len(df1)-1)):
nstr = re.sub(r'[?|$|.|!|,]',r'',df1['review'][i])
word_tokens = word_tokenize(nstr)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
fre = Counter(" ".join(filtered_sentence).split()).most_common(5)
l = len(fre)
if l>4:
df1['w1'][i] = fre[0][0]
df1['w2'][i] = fre[1][0]
df1['w3'][i] = fre[2][0]
df1['w4'][i] = fre[3][0]
df1['w5'][i] = fre[4][0]
elif l>3:
df1['w1'][i] = fre[0][0]
df1['w2'][i] = fre[1][0]
df1['w3'][i] = fre[2][0]
df1['w4'][i] = fre[3][0]
elif l>2:
df1['w1'][i] = fre[0][0]
df1['w2'][i] = fre[1][0]
df1['w3'][i] = fre[2][0]
elif l>1:
df1['w1'][i] = fre[0][0]
df1['w2'][i] = fre[1][0]
else:
df1['w1'][i] = fre[0][0]
|
# https://practice.geeksforgeeks.org/problems/min-cost-climbing-stairs/1#
# Approach is to memorize the min cost for last two steps and use it to compute next step
# update the min_costs and move to next
class Solution:
def minCostClimbingStairs(self, cost, N):
mc1 = 0
mc2 = 0
for i in range(N):
min_cost = cost[i] + min(mc1, mc2)
mc1, mc2 = mc2, min_cost
return min(mc1,mc2)
if __name__ == '__main__':
t = int (input ())
for _ in range (t):
N=int(input())
cost=list(map(int,input().split()))
ob = Solution()
print(ob.minCostClimbingStairs(cost,N)) |
# -*- coding: utf-8 -*-
#
# Copyright 2017-2019 - Swiss Data Science Center (SDSC)
# A partnership between รcole Polytechnique Fรฉdรฉrale de Lausanne (EPFL) and
# Eidgenรถssische Technische Hochschule Zรผrich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Migrations for dataset."""
def migrate_dataset(data):
"""Migrate from old dataset formats."""
if data.get('@type') != 'dctypes:Dataset':
return data
data['creator'] = data.pop('authors', {})
for file_name, file_ in data.get('files', {}).items():
file_['creator'] = file_.pop('authors', {})
return data
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The definition of the base classes Host and VM which
provide the necessary primitives to interact with a vm and
test a submission bundle."""
from __future__ import with_statement
# Use simplejson or Python 2.6 json, prefer simplejson.
try:
import simplejson as json
except ImportError:
import json
import os
import sys
import time
import logging
import signal
import shlex
from threading import Thread
from subprocess import Popen, PIPE, STDOUT
from vmchecker.config import VirtualMachineConfig
_logger = logging.getLogger('vm_executor')
class Host(object):
def __init__(self):
pass
def executeCommand(self, cmd, path = None):
_logger.debug('Running command: %s' % cmd)
p = Popen([cmd], stdout=PIPE, stderr=STDOUT, shell = True, cwd = path)
output = p.stdout.read()
_logger.debug('Command output: %s' % output)
return output
def getVM(self, bundle_dir, sb_cfg):
vm = VM(self, bundle_dir, sb_cfg)
return None
def start_host_commands(self, jobs_path, host_commands):
"""Run a set of commands on the tester (host) machine"""
host_command_data = []
for host_command_out in host_commands:
out_file = 'run-km.vmr' # Default file name
if host_command_out.rfind('>') != -1:
out_file = host_command_out.split('>')[1].strip()
host_command = host_command_out.split('>')[0].strip()
if len(host_command) == 0:
continue
_logger.info('%%% -- starting host commands [' + host_command + '] >> ' + out_file)
outf = open(os.path.join(jobs_path, out_file), 'a', buffering = 0)
try:
proc = Popen([host_command], stdout=outf, cwd = jobs_path, \
stderr = STDOUT, close_fds = True, shell = True, bufsize = 0, \
preexec_fn = os.setsid)
except:
_logger.exception('HOSTPROC: opening process: ' + host_command)
host_command_data.append((proc, outf))
if len(host_command_data) == 0:
return None
return host_command_data
def stop_host_commands(self, host_commands_data):
"""Stop previously run host commands"""
if host_commands_data == None:
return
for host_command_data in host_commands_data:
(proc, outf) = host_command_data
_logger.info('%%% -- stopping host command writing to file [' + outf.name + ']')
try:
os.killpg(proc.pid, signal.SIGTERM)
outf.close()
except:
_logger.exception('HOSTPROC: while stopping host cmds')
_logger.info("%%% -- stopped host commands")
class VM(object):
host = None
path = None
username = None
password = None
IP = None
def __init__(self, host, bundle_dir, sb_cfg):
self.host = host
self.bundle_dir = bundle_dir
self.sb_cfg = sb_cfg
self.machinecfg = VirtualMachineConfig(sb_cfg, 'Machine')
self.error_fname = os.path.join(bundle_dir, 'vmchecker-stderr.vmr')
self.shell = self.machinecfg.guest_shell_path()
self.username = self.machinecfg.guest_user()
self.password = self.machinecfg.guest_pass()
def executeCommand(self, cmd):
# host.executeCommand(...)
pass
def executeNativeCommand(self, cmd):
# there is no default need for native commands
return self.executeCommand(cmd)
def hasStarted(self):
return False
def hasStopped(self):
return False
def start(self):
pass
def stop(self):
pass
def revert(self, number = None):
pass
def copyTo(self, targetDir, sourceDir, files):
pass
def copyFrom(self, targetDir, sourceDir, files):
pass
def run(self, shell, executable_file, timeout):
pass
def runTest(self, bundle_dir, machinecfg, test):
"""Return False if an exception is thrown or the tests timeout."""
try:
files_to_copy = test['input'] + test['script']
guest_dest_dir = machinecfg.guest_base_path()
self.copyTo(bundle_dir,guest_dest_dir,files_to_copy)
for script in test['script']:
shell = machinecfg.guest_shell_path()
dest_in_guest_shell = machinecfg.guest_home_in_shell()
script_in_guest_shell = dest_in_guest_shell + script
timedout = self.run(shell,script_in_guest_shell,test['timeout'])
self.copyFrom(guest_dest_dir,bundle_dir,test['output'])
if timedout:
return False
return True
except Exception as e:
_logger.exception('Exception thrown in runTest(): ' + type(e).__name__ + "\n" + ", ".join(e.args) + "\n" + e.__str__())
return False
def try_power_on_vm_and_login(self, revertSnapshot=None):
if revertSnapshot == True or \
(revertSnapshot == None and self.sb_cfg.get('Assignment', 'RevertToSnapshot')):
self.revert()
self.start()
return True
|
"""
tests.test_utils
~~~~~~~~~~~~~~~~
Test utility functions
:copyright: Copyright 2017 by ConsenSys France.
:license: BSD, see LICENSE for more details.
"""
import pytest
from cfg_loader.utils import parse_yaml, parse_yaml_file, add_prefix
def test_parse_yaml():
assert parse_yaml("""
section:
subsection:
- one
- two
""") is not None
def test_parse_yaml_file(config_path):
with pytest.raises(TypeError):
parse_yaml_file(None)
with pytest.raises(FileNotFoundError):
parse_yaml_file('unknown')
assert parse_yaml_file(config_path)
def test_add_prefix():
raw_dict = {
'key1': 'value1',
'key2': {
'key3': 'value3'
},
}
assert add_prefix(raw_dict, 'prefix_') == {
'prefix_key1': 'value1',
'prefix_key2': {
'key3': 'value3'
},
}
|
from re import match
from typing import Optional
from bs4 import BeautifulSoup
class Clubs:
def __init__(self, soup: BeautifulSoup, base_url: str) -> None:
self.soup = soup
self.base_url = base_url
def __call__(self) -> dict:
return {
"data": [
{
"name": i.select_one("a").get_text(strip=True),
"url": f"{self.base_url}{i.select_one('a').get('href')}",
"members": self.__members(i.select_one("small").get_text()),
}
for i in self.soup.find_all("div", {"class": "borderClass"})
]
}
def __members(self, string: str) -> Optional[int]:
regex = match(r"\d+", string)
return int(regex.group()) if regex else None
|
'''
Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.
Example:
Input: [0,1,0,3,12]
Output: [1,3,12,0,0]
Note:
You must do this in-place without making a copy of the array.
Minimize the total number of operations.
'''
def moveZeroes(nums):
cnt = nums.count(0)
nums.extend([0]*cnt)
print(nums)
j=0
for i in range(len(nums)-cnt):
i+=j
print(i, nums[i])
if nums[i] == 0:
nums.pop(i)
j-=1
print(nums)
moveZeroes([0,0,0,0,1,0,3,12])
|
#
# oci-objectstorage-onsr-put-object-python version 1.0.
#
# Copyright (c) 2021 Oracle, Inc.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#
import io
import os
import json
import sys
from fdk import response
import oci.object_storage
# Certs location
cert_file_path = "/etc/oci-pki/customer/customer-cert.pem"
# file to upload
file_to_upload = "cert_test"
file_to_upload_content = {"content":"This is test file"}
def handler(ctx, data: io.BytesIO=None):
try:
body = json.loads(data.getvalue())
bucketName = body["bucketName"]
except Exception:
error = """
Input a JSON object in the format: '{"bucketName": "<bucket name>",
"content": "<content>", "objectName": "<object name>"}'
"""
raise Exception(error)
signer = oci.auth.signers.get_resource_principals_signer()
client = oci.object_storage.ObjectStorageClient(config={}, signer=signer)
if os.path.exists(cert_file_path):
client.base_client.session.verify = cert_file_path
resp = put_object(client, bucketName, file_to_upload, file_to_upload_content)
return response.Response(
ctx,
response_data=json.dumps(resp),
headers={"Content-Type": "application/json"}
)
def put_object(client, bucketName, objectName, content):
namespace = client.get_namespace().data
output=""
try:
object = client.put_object(namespace, bucketName, objectName, json.dumps(content))
output = "Success: Put object '" + objectName + "' in bucket '" + bucketName + "'"
except Exception as e:
output = "Failed: " + str(e.message)
return { "state": output }
|
# !/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@file : province_city_area_to_txt.py
@comments :
@time : 2022/01/24
@author : Jacob Zhou <[email protected]>
@ver : 1.0
'''
import requests
from bs4 import BeautifulSoup
import time
import json
class GetCitysToLocal(object):
def __init__(self):
# ่ฎพ็ฝฎๅ
ๆฐๆฎๅนดไปฝ
self.year = 2021
# ็ๆ็ๅธๅบtxtๆไปถ
self.getSSQ()
@staticmethod
def get_response(url, attr):
response = requests.get(url)
response.encoding = response.apparent_encoding # ็ผ็ ่ฝฌๆข
response.text[:1000]
soup = BeautifulSoup(response.text, features="html.parser")
table = soup.find_all('tbody')[1].tbody.tbody.table
if attr:
trs = table.find_all('tr', attrs={'class': attr})
else:
trs = table.find_all('tr')
return trs
# ๅๅปบๆไปถ
# file_path๏ผๆไปถ่ทฏๅพ
# msg๏ผๅณ่ฆๅๅ
ฅ็ๅ
ๅฎน
@staticmethod
def create_file(file_path, msg):
f = open(file_path, "a", encoding='utf-8')
json.dump(json.dumps(msg, ensure_ascii=False),f, ensure_ascii=False)
f.close
def getSSQ(self):
# ๅนดไปฝ
print('Get data of year - ' + str(self.year))
base_url = 'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/%s/' % self.year
trs = self.get_response(base_url, 'provincetr')
areas = []
for tr in trs: # ๅพช็ฏๆฏไธ่ก
# ๅพช็ฏๆฏไธช็
i = 0
for td in tr:
if td.a is None:
continue
href_url = td.a.get('href')
province_name = td.a.get_text()
# province_code = str(href_url.split(".")[0])
province_url = base_url + href_url
i +=1
# ๅพช็ฏๆฏไธชๅธ
trs = self.get_response(province_url, None)
j = 0
for tr in trs[1:]:
city_code = tr.find_all('td')[0].string
city_code = city_code[0:3]
city_name = tr.find_all('td')[1].string
j +=1
# ๅพช็ฏๆฏไธชๅบๅฟ
city_url = base_url + tr.find_all('td')[1].a.get('href')
trs = self.get_response(city_url, None)
for tr in trs[1:]:
county_code = tr.find_all('td')[0].string
county_code = county_code[0:5]
county_name = tr.find_all('td')[1].string
county = {}
county["province"] = province_name
county["city"] = city_name
county["value"] = county_name
print(province_name + '-' + city_name + '-' + county_name)
areas.append(county)
time.sleep(.5)
# ็ญๅพ
,้ฒๆญขๆ ๅๅบ
# print(str(areas))
self.create_file('./area.json',str(areas))
if __name__ == '__main__':
GetCitysToLocal()
|
import os
import finder
from ml import predictor
from django.shortcuts import render
from django.views import View
from django.core.files.storage import FileSystemStorage
class Index(View):
submitted = False
def __init__(self):
self._template = 'index.html'
self._checkpoint_path = 'ml/model/checkpoint.pth'
self._predictor = predictor.Predictor(self._checkpoint_path)
self._classes = self._predictor.classes
def get(self, request):
return render(request, self._template)
def predict_image(self, request):
image_obj = request.FILES.get('filePath', None)
# if user did not upload an image, refresh
if image_obj is None:
return render(request, 'upload.html')
fs = FileSystemStorage()
# save the file and get the path
image_name = fs.get_available_name(image_obj.name)
image_path = fs.save(image_name, image_obj)
image_path = fs.url(image_path)
full_image_path = os.path.join(os.path.dirname(finder.__file__), 'static', 'media', image_name)
# get prediction
try:
pred_confs, pred_classes = self._predictor.predict(full_image_path, topk=5)
except:
context = {
'errorMessage': 'There was an error processing your image.\
Make sure it is not a corrupted or image file \
and that the image has no transparency layers.'
}
return render(request, 'error.html', context)
predicted_class = self._classes[pred_classes[0]]
# plot confidence scores
plot_image_name = fs.get_available_name('plot.png')
plot_image_path = os.path.join('media', plot_image_name)
full_plot_image_path = os.path.join(os.path.dirname(finder.__file__), 'static', plot_image_path)
self._predictor.plot_predictions(pred_confs, pred_classes, full_plot_image_path, topk=5)
submitted = True
# update upload.html with context
context={
'predictedLabel': predicted_class,
'imagePath': image_path,
'plotImagePath': f'/{plot_image_path}',
'submitted': submitted
}
return render(request,'upload.html',context)
def list_of_cars(self,request):
return render(request, 'listOfCars.html')
def error_404(request, exception):
return render(request, 'index.html', status=404) |
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class AverageMeterDict(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = {}
self.avg = {}
self.sum = None
self.count = 0
def update(self, in_dict, n=1):
self.val = in_dict
self.sum = in_dict if self.sum is None else dict([(key, val * n + self.sum[key]) for key, val in in_dict.items()])
self.count += n
self.avg = dict([(key, (val / self.count)) for key, val in self.sum.items()])
def __str__(self):
return " | ".join(f"{k}: {self.val[k]:.5f} ({self.avg[k]:.5f})" for k in self.val)
|
from random import randint
from colorama import Fore
from colorama import Back
from colorama import Style
import os
class game:
playGame = True
numberOfRounds = 0
# Each challange has its own text, value to add to score if completed and
# a value to decrease the score if not completed.
#
# EPIC - no score decreasing upon not completing the challenge
# LEGENDARY - gives points and after that doubles total points (HARD)
# UNLUCKY - 0 points for completing, but 25 for not completing
challengesList = [
["[EPIC] (3 darts) Score more than 60 points.", 15, 0],
["[EPIC] (3 darts) Score less than 7 points - each dart MUST hit the target.", 15, 0],
["(3 darts) Score any number of points between 20 and 30, including those numbers" +
" - each dart MUST hit the target.", 10, 2],
["(1 dart) You must hit even number.", 10, 5],
["(1 dart) You must hit odd number.", 10, 5],
["(2 darts) Sum of two darts must be an even number - each dart MUST hit the target.", 8, 3],
["(2 darts) Sum of two darts must be an odd number - each dart MUST hit the target.", 8, 3],
["(5 darts) You must hit BULLSEYE !", 30, 4],
["[LEGENDARY] (1 dart) You must hit BULLSEYE ! (30 points, but then doubles total points)", 30, 0],
["[LEGENDARY] (2 darts) You must hit number 7 two times. (15 points, but then doubles total points)", 15, 0],
["(2 darts) You must hit one even and one odd number.", 15, 3],
["[UNLUCKY] (2 darts) You must hit two numbers in range 11-16. (BULLSEYE is not included)", 0, 25],
["(3 darts) You must hit two number neighbours. (e.g. 5 and 6, 18 and 17)", 12, 4]
]
@staticmethod
def printWelcome():
game.clearScreen()
print("Welcome to the Darts Challenger game!" +
"\nGame rules are following: " +
"\n\n 1) You will be given a challenge and your target is to complete it." +
"\n 2) If successful, you will be given points or even lose them if not!" +
"\n 3) Each challenge has it's own unique awarding and/or punishing system for their completion and/or failure." +
"\n 4) Player with most points, after playing selected number of rounds, is the winner!" +
"\n 5) Good luck, steady hands and have fun!")
@staticmethod
def clearScreen():
os.system('cls')
@staticmethod
def generateChallengeId():
#seed(randint(0, 10))
value = randint(0, (len(game.challengesList)-1))
return value
@staticmethod
def playRound(gameRound):
for player in playerHelper.playerList:
game.clearScreen()
game.printScore()
print(f"\nRound: {gameRound} - {player[0]} playing")
indeksGenerated = game.generateChallengeId()
print(Fore.YELLOW)
print(f"\nChallenge: {game.challengesList[indeksGenerated][0]}\n\n")
print(Style.RESET_ALL)
if game.HeCompleted(player[0]):
player[1] += game.challengesList[indeksGenerated][1]
if "LEGENDARY" in game.challengesList[indeksGenerated][0]:
player[1] *= 2
else:
player[1] -= game.challengesList[indeksGenerated][2]
if player[1] < 0:
player[1] = 0
@staticmethod
def startGame():
for gameRound in range(1, game.numberOfRounds + 1):
game.playRound(gameRound)
@staticmethod
def selectRounds():
msg = input("\nNumber of rounds (1-16): ")
if msg.isnumeric():
if int(msg) >= 1 and int(msg) <= 16:
game.numberOfRounds = int(msg)
return
else:
print("Number of rounds must be in range 1-16.")
playerHelper.getPlayers()
else:
print("Invalid input.")
playerHelper.getPlayers()
@staticmethod
def printScore():
print(Back.BLACK)
header = "\n PLAYER".ljust(16) + "SCORE".ljust(7)
print(header)
for player in playerHelper.playerList:
print(f" {player[0].ljust(15)} {str(player[1]).ljust(5)}")
print(Style.RESET_ALL)
@staticmethod
def ResetScores():
for player in playerHelper.playerList:
player[1] = 0
@staticmethod
def playSomeMore():
game.clearScreen()
game.printScore()
print("\nDo you want to play again ?")
print("1) Yes")
print("2) No")
msg = input("\nYour choice: ")
if msg == "1":
game.playGame = True
game.ResetScores()
elif msg == "2":
game.playGame = False
else:
print("Invalid option.\n")
game.playSomeMore()
@staticmethod
def HeCompleted(playerName):
print(f"{playerName} completed challenge successfully ?")
print("1) Yes")
print("2) No")
msg = input("\nYour choice: ")
if msg == "1":
return True
elif msg == "2":
return False
else:
print("Invalid option.\n")
game.HeCompleted(playerName)
class playerHelper:
playerList = []
@staticmethod
def getPlayers():
msg = input("\nNumber of players (1-10): ")
if msg.isnumeric():
if int(msg) >= 1 and int(msg) <= 10:
playerHelper.getPlayerNames(msg)
return
else:
print("Number of players must be in range 1-10.")
playerHelper.getPlayers()
else:
print("Invalid input.")
playerHelper.getPlayers()
@staticmethod
def getPlayerNames(playerNum):
for i in range(1, int(playerNum)+1):
playerName = input(f"{i}. player name: ")
playerHelper.playerList.append([playerName, 0])
|
from lxml import etree
parser = etree.HTMLParser()
print(type(parser))
tree = etree.parse('test.html', parser)
root = tree.getroot()
result = etree.tostring(root, encoding='utf-8',
pretty_print=True, method="html")
print(str(result, 'utf-8'))
print(root.tag)
print('lang =', root.get('lang'))
print('charset =', root[0][0].get('charset'))
print('charset =', root[0][1].text)
|
'''
Read site soil moisture fields form FLUXNET-FULLSET '.csv' file and save it into '.nc' file
Todo: Transform into function; add Quality flag columns;
@aelkouk 11202020
'''
import numpy as np
import xarray as xr
import pandas as pd
import os
flx_fullset_csv = '../0_data/FLX_NL-Loo_FLUXNET2015_FULLSET_HH_1996-2014_1-4.csv'
df = pd.read_csv(flx_fullset_csv)
datevar = pd.to_datetime(df['TIMESTAMP_START'], format='%Y%m%d%H%M')
swc = np.full((3, datevar.size), -9999.)
for i in range(3):
swc_lv1 = df['SWC_F_MDS_{}'.format(i+1)].values
swc[i] = swc_lv1
ds = xr.Dataset(data_vars={'SWC':(('layer', 'time'), swc, {'long_name':'Soil water content',
'missing_value':-9999., 'units':'[%]'})},
coords={'layer':[1,2,3], 'time':datevar.values})
#{'long_name':'Soil layer index, 1 is shallowest'}
outnc = 'SWC_'+os.path.basename(flx_fullset_csv).replace('.csv', '.nc')
ds.to_netcdf(outnc)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 12 13:23:55 2021
@author: rayaabuahmad
"""
from code.preprocessing.preprocessor import Preprocessor
from code.util import COLUMN_TWEET, COLUMN_PUNCTUATION_INPUT
class HashtagMentionRemover(Preprocessor):
# constructor
def __init__(self):
# input column "tweet", new output column
super().__init__([COLUMN_TWEET], COLUMN_PUNCTUATION_INPUT)
# return a column that has the tweet without the text after hashtags and mentions
def _get_values(self, inputs):
column = []
prefixes = ['@','#']
for tweet in inputs[0]:
words = []
for word in tweet.split():
word = word.strip()
if word:
if word[0] not in prefixes:
words.append(word)
column.append(' '.join(words))
return column
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'interfaceTelecomunicacoes.ui'
#
# Created: Tue May 27 15:40:06 2014
# by: PyQt4 UI code generator 4.7.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setWindowModality(QtCore.Qt.NonModal)
MainWindow.resize(1010, 660)
MainWindow.setMinimumSize(QtCore.QSize(400, 400))
font = QtGui.QFont()
font.setWeight(50)
font.setItalic(False)
font.setUnderline(False)
font.setStrikeOut(False)
font.setBold(False)
MainWindow.setFont(font)
MainWindow.setCursor(QtCore.Qt.ArrowCursor)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../Icone.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
MainWindow.setDocumentMode(False)
MainWindow.setDockOptions(QtGui.QMainWindow.AllowTabbedDocks|QtGui.QMainWindow.AnimatedDocks)
MainWindow.setUnifiedTitleAndToolBarOnMac(False)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_3 = QtGui.QGridLayout(self.centralwidget)
self.gridLayout_3.setObjectName("gridLayout_3")
self.lineSearch = QtGui.QLineEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineSearch.sizePolicy().hasHeightForWidth())
self.lineSearch.setSizePolicy(sizePolicy)
self.lineSearch.setMaximumSize(QtCore.QSize(250, 16777215))
self.lineSearch.setObjectName("lineSearch")
self.gridLayout_3.addWidget(self.lineSearch, 0, 0, 1, 1)
self.buttonPesquisar = QtGui.QPushButton(self.centralwidget)
self.buttonPesquisar.setMaximumSize(QtCore.QSize(180, 27))
self.buttonPesquisar.setObjectName("buttonPesquisar")
self.gridLayout_3.addWidget(self.buttonPesquisar, 0, 1, 1, 1)
self.line = QtGui.QFrame(self.centralwidget)
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout_3.addWidget(self.line, 0, 2, 5, 1)
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setTextFormat(QtCore.Qt.RichText)
self.label_2.setObjectName("label_2")
self.gridLayout_3.addWidget(self.label_2, 0, 3, 1, 1)
self.lcdNumber = QtGui.QLCDNumber(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(159, 158, 158))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.lcdNumber.setPalette(palette)
self.lcdNumber.setObjectName("lcdNumber")
self.gridLayout_3.addWidget(self.lcdNumber, 0, 4, 1, 1)
spacerItem = QtGui.QSpacerItem(420, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem, 0, 5, 1, 1)
self.listPossibles = QtGui.QListWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.listPossibles.sizePolicy().hasHeightForWidth())
self.listPossibles.setSizePolicy(sizePolicy)
self.listPossibles.setMaximumSize(QtCore.QSize(415, 25))
self.listPossibles.setObjectName("listPossibles")
self.gridLayout_3.addWidget(self.listPossibles, 1, 0, 1, 2)
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
self.tabWidget.setAcceptDrops(False)
self.tabWidget.setAutoFillBackground(False)
self.tabWidget.setObjectName("tabWidget")
self.tabAbordagem = QtGui.QWidget()
self.tabAbordagem.setObjectName("tabAbordagem")
self.gridLayout_2 = QtGui.QGridLayout(self.tabAbordagem)
self.gridLayout_2.setObjectName("gridLayout_2")
self.textAbordagem = QtGui.QTextBrowser(self.tabAbordagem)
self.textAbordagem.setFrameShape(QtGui.QFrame.StyledPanel)
self.textAbordagem.setFrameShadow(QtGui.QFrame.Plain)
self.textAbordagem.setOpenExternalLinks(True)
self.textAbordagem.setObjectName("textAbordagem")
self.gridLayout_2.addWidget(self.textAbordagem, 0, 0, 1, 1)
self.tabWidget.addTab(self.tabAbordagem, "")
self.tabVideoAula = QtGui.QWidget()
self.tabVideoAula.setObjectName("tabVideoAula")
self.gridLayout_5 = QtGui.QGridLayout(self.tabVideoAula)
self.gridLayout_5.setObjectName("gridLayout_5")
self.verticalLayout_4 = QtGui.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.videoPlayer = phonon.Phonon.VideoPlayer(self.tabVideoAula)
self.videoPlayer.setObjectName("videoPlayer")
self.verticalLayout_4.addWidget(self.videoPlayer)
self.seekSlider = phonon.Phonon.SeekSlider(self.tabVideoAula)
self.seekSlider.setObjectName("seekSlider")
self.verticalLayout_4.addWidget(self.seekSlider)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_3 = QtGui.QLabel(self.tabVideoAula)
self.label_3.setObjectName("label_3")
self.horizontalLayout_4.addWidget(self.label_3)
self.volumeSlider = phonon.Phonon.VolumeSlider(self.tabVideoAula)
self.volumeSlider.setObjectName("volumeSlider")
self.horizontalLayout_4.addWidget(self.volumeSlider)
self.buttonReproduzir = QtGui.QPushButton(self.tabVideoAula)
self.buttonReproduzir.setObjectName("buttonReproduzir")
self.horizontalLayout_4.addWidget(self.buttonReproduzir)
self.buttonPausar = QtGui.QPushButton(self.tabVideoAula)
self.buttonPausar.setObjectName("buttonPausar")
self.horizontalLayout_4.addWidget(self.buttonPausar)
self.verticalLayout_4.addLayout(self.horizontalLayout_4)
self.gridLayout_5.addLayout(self.verticalLayout_4, 0, 0, 1, 1)
self.tabWidget.addTab(self.tabVideoAula, "")
self.tabSimulacao = QtGui.QWidget()
self.tabSimulacao.setObjectName("tabSimulacao")
self.gridLayout_4 = QtGui.QGridLayout(self.tabSimulacao)
self.gridLayout_4.setObjectName("gridLayout_4")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.textExplicacao = QtGui.QTextBrowser(self.tabSimulacao)
self.textExplicacao.setObjectName("textExplicacao")
self.verticalLayout.addWidget(self.textExplicacao)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.buttonSimular = QtGui.QPushButton(self.tabSimulacao)
self.buttonSimular.setObjectName("buttonSimular")
self.horizontalLayout_2.addWidget(self.buttonSimular)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.gridLayout_4.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.tabWidget.addTab(self.tabSimulacao, "")
self.tabExercicios = QtGui.QWidget()
self.tabExercicios.setObjectName("tabExercicios")
self.gridLayout_7 = QtGui.QGridLayout(self.tabExercicios)
self.gridLayout_7.setObjectName("gridLayout_7")
self.textExercicio = QtGui.QTextBrowser(self.tabExercicios)
self.textExercicio.setFrameShape(QtGui.QFrame.StyledPanel)
self.textExercicio.setFrameShadow(QtGui.QFrame.Plain)
self.textExercicio.setOpenExternalLinks(True)
self.textExercicio.setObjectName("textExercicio")
self.gridLayout_7.addWidget(self.textExercicio, 0, 0, 1, 1)
self.tabWidget.addTab(self.tabExercicios, "")
self.tabReferencias = QtGui.QWidget()
self.tabReferencias.setObjectName("tabReferencias")
self.gridLayout_6 = QtGui.QGridLayout(self.tabReferencias)
self.gridLayout_6.setObjectName("gridLayout_6")
self.textReferencias = QtGui.QTextBrowser(self.tabReferencias)
self.textReferencias.setFrameShape(QtGui.QFrame.StyledPanel)
self.textReferencias.setFrameShadow(QtGui.QFrame.Plain)
self.textReferencias.setOpenExternalLinks(True)
self.textReferencias.setObjectName("textReferencias")
self.gridLayout_6.addWidget(self.textReferencias, 0, 0, 1, 1)
self.tabWidget.addTab(self.tabReferencias, "")
self.tab = QtGui.QWidget()
self.tab.setObjectName("tab")
self.formLayout = QtGui.QFormLayout(self.tab)
self.formLayout.setObjectName("formLayout")
self.radioNavegador = QtGui.QRadioButton(self.tab)
self.radioNavegador.setObjectName("radioNavegador")
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.radioNavegador)
self.radioAudacity = QtGui.QRadioButton(self.tab)
self.radioAudacity.setObjectName("radioAudacity")
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.radioAudacity)
self.radioGedit = QtGui.QRadioButton(self.tab)
self.radioGedit.setObjectName("radioGedit")
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.radioGedit)
self.radioGnu = QtGui.QRadioButton(self.tab)
self.radioGnu.setObjectName("radioGnu")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.radioGnu)
self.buttonAbrir = QtGui.QPushButton(self.tab)
self.buttonAbrir.setObjectName("buttonAbrir")
self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.buttonAbrir)
self.tabWidget.addTab(self.tab, "")
self.gridLayout_3.addWidget(self.tabWidget, 1, 3, 4, 3)
self.checkEdicao = QtGui.QCheckBox(self.centralwidget)
self.checkEdicao.setLayoutDirection(QtCore.Qt.LeftToRight)
self.checkEdicao.setTristate(False)
self.checkEdicao.setObjectName("checkEdicao")
self.gridLayout_3.addWidget(self.checkEdicao, 3, 0, 1, 2)
self.widgetEditarArvore = QtGui.QFrame(self.centralwidget)
self.widgetEditarArvore.setFrameShape(QtGui.QFrame.StyledPanel)
self.widgetEditarArvore.setFrameShadow(QtGui.QFrame.Raised)
self.widgetEditarArvore.setObjectName("widgetEditarArvore")
self.gridLayout = QtGui.QGridLayout(self.widgetEditarArvore)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.labelEditarArvore = QtGui.QLabel(self.widgetEditarArvore)
self.labelEditarArvore.setMinimumSize(QtCore.QSize(170, 0))
self.labelEditarArvore.setMaximumSize(QtCore.QSize(175, 16777215))
self.labelEditarArvore.setObjectName("labelEditarArvore")
self.horizontalLayout_3.addWidget(self.labelEditarArvore)
self.lineAdicionar = QtGui.QLineEdit(self.widgetEditarArvore)
self.lineAdicionar.setEnabled(True)
self.lineAdicionar.setMinimumSize(QtCore.QSize(190, 0))
self.lineAdicionar.setMaximumSize(QtCore.QSize(370, 16777215))
self.lineAdicionar.setObjectName("lineAdicionar")
self.horizontalLayout_3.addWidget(self.lineAdicionar)
self.gridLayout.addLayout(self.horizontalLayout_3, 0, 0, 1, 1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.buttonAdicionar = QtGui.QPushButton(self.widgetEditarArvore)
self.buttonAdicionar.setEnabled(True)
self.buttonAdicionar.setMinimumSize(QtCore.QSize(110, 0))
self.buttonAdicionar.setObjectName("buttonAdicionar")
self.horizontalLayout.addWidget(self.buttonAdicionar)
self.buttonDeletar = QtGui.QPushButton(self.widgetEditarArvore)
self.buttonDeletar.setEnabled(True)
self.buttonDeletar.setMinimumSize(QtCore.QSize(110, 0))
self.buttonDeletar.setObjectName("buttonDeletar")
self.horizontalLayout.addWidget(self.buttonDeletar)
self.buttonSalvar = QtGui.QPushButton(self.widgetEditarArvore)
self.buttonSalvar.setEnabled(True)
self.buttonSalvar.setMinimumSize(QtCore.QSize(110, 0))
self.buttonSalvar.setMaximumSize(QtCore.QSize(145, 27))
self.buttonSalvar.setObjectName("buttonSalvar")
self.horizontalLayout.addWidget(self.buttonSalvar)
self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1)
self.gridLayout_3.addWidget(self.widgetEditarArvore, 4, 0, 1, 2)
self.treeAssuntos = QtGui.QTreeWidget(self.centralwidget)
self.treeAssuntos.setMaximumSize(QtCore.QSize(415, 16777215))
self.treeAssuntos.setAnimated(True)
self.treeAssuntos.setObjectName("treeAssuntos")
self.treeAssuntos.headerItem().setText(0, "1")
self.gridLayout_3.addWidget(self.treeAssuntos, 2, 0, 1, 2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1010, 23))
self.menubar.setObjectName("menubar")
self.menuSobre = QtGui.QMenu(self.menubar)
self.menuSobre.setObjectName("menuSobre")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionSobre = QtGui.QAction(MainWindow)
self.actionSobre.setObjectName("actionSobre")
self.actionSobre_Qt = QtGui.QAction(MainWindow)
self.actionSobre_Qt.setObjectName("actionSobre_Qt")
self.menuSobre.addAction(self.actionSobre)
self.menuSobre.addAction(self.actionSobre_Qt)
self.menubar.addAction(self.menuSobre.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "SATEC - Sistema de Aprendizagem de Telecomunicaรงรตes", None, QtGui.QApplication.UnicodeUTF8))
self.buttonPesquisar.setText(QtGui.QApplication.translate("MainWindow", "&Pesquisar assunto", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "<html><head/><body><p>Relรณgio:</p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.textAbordagem.setHtml(QtGui.QApplication.translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:9pt;\">Seja bem vindo ao Sistema de Aprendizagem.<br /></span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:9pt;\"> Por favor utilize o manual para verificar como se utiliza o programa.<br /><br />Bom estudo !<br /><br /></span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabAbordagem), QtGui.QApplication.translate("MainWindow", "Aborgadem", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "Volume:", None, QtGui.QApplication.UnicodeUTF8))
self.buttonReproduzir.setText(QtGui.QApplication.translate("MainWindow", "Reproduzir", None, QtGui.QApplication.UnicodeUTF8))
self.buttonPausar.setText(QtGui.QApplication.translate("MainWindow", "Pausar", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabVideoAula), QtGui.QApplication.translate("MainWindow", "Vรญdeo-Aula", None, QtGui.QApplication.UnicodeUTF8))
self.textExplicacao.setHtml(QtGui.QApplication.translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:9pt;\">Texto sobre explicaรงรฃo da simulaรงรฃo atual.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Ubuntu\'; font-size:9pt;\"></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:9pt;\"><br /></span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.buttonSimular.setText(QtGui.QApplication.translate("MainWindow", "&Simular", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabSimulacao), QtGui.QApplication.translate("MainWindow", "Simulaรงรฃo", None, QtGui.QApplication.UnicodeUTF8))
self.textExercicio.setHtml(QtGui.QApplication.translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:9pt;\">Texto sobre os exercรญcios do assunto atual.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Ubuntu\'; font-size:9pt;\"></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:9pt;\"><br /></span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabExercicios), QtGui.QApplication.translate("MainWindow", "Exercรญcios", None, QtGui.QApplication.UnicodeUTF8))
self.textReferencias.setHtml(QtGui.QApplication.translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:9pt;\">Texto sobre as referรชncias do assunto atual.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Ubuntu\'; font-size:9pt;\"></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Ubuntu\'; font-size:9pt;\"><br /></span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabReferencias), QtGui.QApplication.translate("MainWindow", "Referรชncias", None, QtGui.QApplication.UnicodeUTF8))
self.radioNavegador.setText(QtGui.QApplication.translate("MainWindow", "Navegador", None, QtGui.QApplication.UnicodeUTF8))
self.radioAudacity.setText(QtGui.QApplication.translate("MainWindow", "Audacity", None, QtGui.QApplication.UnicodeUTF8))
self.radioGedit.setText(QtGui.QApplication.translate("MainWindow", "Gedit", None, QtGui.QApplication.UnicodeUTF8))
self.radioGnu.setText(QtGui.QApplication.translate("MainWindow", "GNU Radio", None, QtGui.QApplication.UnicodeUTF8))
self.buttonAbrir.setText(QtGui.QApplication.translate("MainWindow", "Abrir", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QtGui.QApplication.translate("MainWindow", "Programas Externos", None, QtGui.QApplication.UnicodeUTF8))
self.checkEdicao.setText(QtGui.QApplication.translate("MainWindow", "Modo de ediรงรฃo - รrvore de assuntos", None, QtGui.QApplication.UnicodeUTF8))
self.labelEditarArvore.setText(QtGui.QApplication.translate("MainWindow", "Assunto a ser adicionado:", None, QtGui.QApplication.UnicodeUTF8))
self.buttonAdicionar.setText(QtGui.QApplication.translate("MainWindow", "&Adicionar", None, QtGui.QApplication.UnicodeUTF8))
self.buttonDeletar.setText(QtGui.QApplication.translate("MainWindow", "&Deletar ", None, QtGui.QApplication.UnicodeUTF8))
self.buttonSalvar.setText(QtGui.QApplication.translate("MainWindow", "&Salvar รrvore", None, QtGui.QApplication.UnicodeUTF8))
self.menuSobre.setTitle(QtGui.QApplication.translate("MainWindow", "Ajuda", None, QtGui.QApplication.UnicodeUTF8))
self.actionSobre.setText(QtGui.QApplication.translate("MainWindow", "Sobre", None, QtGui.QApplication.UnicodeUTF8))
self.actionSobre_Qt.setText(QtGui.QApplication.translate("MainWindow", "Sobre Qt", None, QtGui.QApplication.UnicodeUTF8))
from PyQt4 import phonon
|
class Solution:
def longestConsecutive(self, num):
range_length = collections.defaultdict(int)
max_length = 0
for n in num:
if range_length[n] != 0:
continue
range_length[n] = 1
left_length = range_length[n - 1]
right_length = range_length[n + 1]
new_length = left_length + right_length + 1
range_length[n - left_length] = range_length[n + right_length] = new_length
max_length = max(max_length, new_length)
return max_length
|
"""
## Introduction
When subclasses grow and get developed separately, identical (or nearly identical) fields and methods appear.
Pull up field refactoring removes the repetitive field from subclasses and moves it to a superclass.
## Pre and Post Conditions
### Pre Conditions:
1. There should exist a corresponding child and parent in the project.
2. The field that should be pulled up must be valid.
3. The user must enter the package's name, class's name and the fields that need to be removed.
### Post Conditions:
1. The changed field's usages and callings will also change respectively.
2. There will be children and parents having their desired fields added or removed.
3. Check for multilevel inheritance.
"""
from refactorings.utils import utils_listener_fast, utils2
class PullUpFieldRefactoring:
def __init__(self, source_filenames: list,
package_name: str,
class_name: str,
field_name: str,
filename_mapping=lambda x: (x[:-5] if x.endswith(".java") else x) + ".java"):
"""The main function that does the process of pull up field refactoring.
Removes the repetitive fields from the subclasses, creates the superclass,
and moves the fields to the superclass.
Args:
source_filenames (list): A list of file names to be processed
package_name (str): The name of the package in which the refactoring has to be done (contains the classes/superclasses)
class_name (str): Name of the class that the field is pulled up from
field_name (str): Name of the field that has to be refactored
filename_mapping (str): Mapping the file's name to the correct format so that it can be processed
Returns:
No returns
"""
self.source_filenames = source_filenames
self.package_name = package_name
self.class_name = class_name
self.field_name = field_name
self.filename_mapping = filename_mapping
def do_refactor(self):
program = utils2.get_program(self.source_filenames, print_status=True)
# print(program.packages)
if self.package_name not in program.packages \
or self.class_name not in program.packages[self.package_name].classes \
or self.field_name not in program.packages[self.package_name].classes[self.class_name].fields:
return False
_class: utils_listener_fast.Class = program.packages[self.package_name].classes[self.class_name]
if _class.superclass_name is None:
return False
superclass_name = _class.superclass_name
superclass: utils_listener_fast.Class = program.packages[self.package_name].classes[superclass_name]
superclass_body_start = utils_listener_fast.TokensInfo(superclass.parser_context.classBody())
superclass_body_start.stop = superclass_body_start.start # Start and stop both point to the '{'
if self.field_name in superclass.fields:
return False
datatype = _class.fields[self.field_name].datatype
fields_to_remove = []
for pn in program.packages:
p: utils_listener_fast.Package = program.packages[pn]
for cn in p.classes:
c: utils_listener_fast.Class = p.classes[cn]
if ((c.superclass_name == superclass_name and c.file_info.has_imported_class(self.package_name,
superclass_name))
or (
self.package_name is not None and c.superclass_name == self.package_name + '.' + superclass_name)) \
and self.field_name in c.fields \
and c.fields[self.field_name].datatype == datatype:
fields_to_remove.append(c.fields[self.field_name])
if len(fields_to_remove) == 0:
return False
is_public = False
is_protected = True
for field in fields_to_remove:
field: utils_listener_fast.Field = field
is_public = is_public or "public" in field.modifiers
is_protected = is_protected and ("protected" in field.modifiers or "private" in field.modifiers)
rewriter = utils2.Rewriter(program, self.filename_mapping)
rewriter.insert_after(superclass_body_start, "\n " + (
"public " if is_public else (
"protected " if is_protected else "")) + datatype + " " + self.field_name + ";")
for field in fields_to_remove:
if len(field.neighbor_names) == 0:
rewriter.replace(field.get_tokens_info(), "")
# Have to remove the modifiers too, because of the new grammar.
for mod_ctx in field.modifiers_parser_contexts:
rewriter.replace(utils_listener_fast.TokensInfo(mod_ctx), "")
else:
i = field.index_in_variable_declarators
var_ctxs = field.all_variable_declarator_contexts
if i == 0:
to_remove = utils_listener_fast.TokensInfo(var_ctxs[i])
to_remove.stop = utils_listener_fast.TokensInfo(
var_ctxs[i + 1]).start - 1 # Include the ',' after it
rewriter.replace(to_remove, "")
else:
to_remove = utils_listener_fast.TokensInfo(var_ctxs[i])
to_remove.start = utils_listener_fast.TokensInfo(
var_ctxs[i - 1]).stop + 1 # Include the ',' before it
rewriter.replace(to_remove, "")
# Add initializer to class constructor if initializer exists in field declaration
if field.initializer is not None:
_class: utils_listener_fast.Class = program.packages[field.package_name].classes[field.class_name]
initializer_statement = (field.name
+ " = "
+ ("new " + field.datatype + " " if field.initializer.startswith('{') else "")
+ field.initializer
+ ";")
has_contructor = False
for class_body_decl in _class.parser_context.classBody().getChildren():
if class_body_decl.getText() in ['{', '}']:
continue
member_decl = class_body_decl.memberDeclaration()
if member_decl is not None:
constructor = member_decl.constructorDeclaration()
if constructor is not None:
body = constructor.constructorBody # Start token = '{'
body_start = utils_listener_fast.TokensInfo(body)
body_start.stop = body_start.start # Start and stop both point to the '{'
rewriter.insert_after(body_start, "\n " + initializer_statement)
has_contructor = True
if not has_contructor:
body = _class.parser_context.classBody()
body_start = utils_listener_fast.TokensInfo(body)
body_start.stop = body_start.start # Start and stop both point to the '{'
rewriter.insert_after(body_start,
"\n " + _class.modifiers[
0] + " " + _class.name + "() { " + initializer_statement + " }"
)
rewriter.apply()
# check for multilevel inheritance recursively.
if _class.superclass_name is not None:
PullUpFieldRefactoring(self.source_filenames, self.package_name, _class.superclass_name, "id").do_refactor()
return True
def test():
print("Testing pullup_field...")
filenames = [
"D:/archive/uni/CD/project/CodART/tests/pullup_field/test5.java",
"D:/archive/uni/CD/project/CodART/tests/pullup_field/test6.java",
# "../benchmark_projects/tests/pullup_field/test1.java",
# "../benchmark_projects/tests/pullup_field/test2.java",
# "../benchmark_projects/tests/pullup_field/test3.java",
# "../benchmark_projects/tests/pullup_field/test4.java"
]
if PullUpFieldRefactoring(filenames, "pullup_field_test5", "C", "id").do_refactor():
print("Success!")
else:
print("Cannot refactor.")
def test_ant():
"""
target_files = [
"tests/apache-ant/main/org/apache/tools/ant/types/ArchiveFileSet.java",
"tests/apache-ant/main/org/apache/tools/ant/types/TarFileSet.java",
"tests/apache-ant/main/org/apache/tools/ant/types/ZipFileSet.java"
]
"""
ant_dir = "/home/ali/Desktop/code/TestProject/"
def main(project_dir: str, package_name: str, children_class: str, field_name: str):
print("Pullup Field")
print("Success!" if PullUpFieldRefactoring(
utils2.get_filenames_in_dir(project_dir),
package_name,
children_class,
field_name
# lambda x: "tests/pullup_field_ant/" + x[len(ant_dir):]
).do_refactor() else "Cannot refactor.")
if __name__ == "__main__":
test()
|
import asyncio
import functools
import json
import time
from collections import Counter, defaultdict, deque
from fractions import Fraction
from typing import Dict, List, Optional, Tuple, Union
from quarkchain.cluster.filter import Filter
from quarkchain.cluster.miner import validate_seal
from quarkchain.cluster.neighbor import is_neighbor
from quarkchain.cluster.rpc import ShardStats, TransactionDetail
from quarkchain.cluster.shard_db_operator import ShardDbOperator
from quarkchain.core import (
Address,
Branch,
CrossShardTransactionDeposit,
CrossShardTransactionList,
Log,
MinorBlock,
MinorBlockHeader,
MinorBlockMeta,
RootBlock,
SerializedEvmTransaction,
TokenBalanceMap,
TransactionReceipt,
TypedTransaction,
XshardTxCursorInfo,
calculate_merkle_root,
mk_receipt_sha,
)
from quarkchain.diff import EthDifficultyCalculator
from quarkchain.evm import opcodes
from quarkchain.evm.messages import apply_transaction, validate_transaction
from quarkchain.evm.state import State as EvmState
from quarkchain.evm.transaction_queue import TransactionQueue
from quarkchain.evm.transactions import Transaction as EvmTransaction
from quarkchain.evm.utils import add_dict
from quarkchain.genesis import GenesisManager
from quarkchain.reward import ConstMinorBlockRewardCalcultor
from quarkchain.utils import Logger, check, time_ms
class GasPriceSuggestionOracle:
def __init__(
self, last_price: int, last_head: bytes, check_blocks: int, percentile: int
):
self.last_price = last_price
self.last_head = last_head
self.check_blocks = check_blocks
self.percentile = percentile
class XshardTxCursor:
# Cursor definitions (root_block_height, mblock_index, deposit_index)
# (x, 0, 0): EOF
# (x, 0, z), z > 0: Root-block coinbase tx (always exist)
# (x, y, z), y > 0: Minor-block x-shard tx (may not exist if not neighbor or no xshard)
#
# Note that: the cursor must be
# - EOF
# - A valid x-shard transaction deposit
def __init__(self, shard_state, mblock_header, cursor_info):
self.shard_state = shard_state
self.db = shard_state.db
# Recover cursor
self.max_rblock_header = self.db.get_root_block_header_by_hash(
mblock_header.hash_prev_root_block
)
rblock_header = self.db.get_root_block_header_by_height(
mblock_header.hash_prev_root_block, cursor_info.root_block_height
)
self.mblock_index = cursor_info.minor_block_index
self.xshard_deposit_index = cursor_info.xshard_deposit_index
# Recover rblock and xtx_list if it is processing tx from peer-shard
self.xtx_list = None
if rblock_header is not None:
self.rblock = self.db.get_root_block_by_hash(rblock_header.get_hash())
if self.mblock_index != 0:
self.xtx_list = self.db.get_minor_block_xshard_tx_list(
self.rblock.minor_block_header_list[
self.mblock_index - 1
].get_hash()
).tx_list
else:
# EOF
self.rblock = None
def __get_current_tx(self):
if self.mblock_index == 0:
# 0 is reserved for EOF
check(self.xshard_deposit_index == 1 or self.xshard_deposit_index == 2)
# TODO: For single native token only
if self.xshard_deposit_index == 1:
coinbase_amount = 0
if self.shard_state.branch.is_in_branch(
self.rblock.header.coinbase_address.full_shard_key
):
coinbase_amount = self.rblock.header.coinbase_amount_map.balance_map.get(
self.shard_state.genesis_token_id, 0
)
# Perform x-shard from root chain coinbase
return CrossShardTransactionDeposit(
tx_hash=self.rblock.header.get_hash(),
from_address=self.rblock.header.coinbase_address,
to_address=self.rblock.header.coinbase_address,
value=coinbase_amount,
gas_price=0,
gas_token_id=self.shard_state.genesis_token_id,
transfer_token_id=self.shard_state.genesis_token_id,
)
return None
elif self.xshard_deposit_index < len(self.xtx_list):
return self.xtx_list[self.xshard_deposit_index]
else:
return None
def get_next_tx(self):
""" Return XshardDeposit if succeed else return None
"""
# Check if reach EOF
if self.rblock is None:
return None
self.xshard_deposit_index += 1
tx = self.__get_current_tx()
# Reach the EOF of the mblock or rblock x-shard txs
if tx is not None:
return tx
self.mblock_index += 1
self.xshard_deposit_index = 0
# Iterate minor blocks' cross-shard transactions
while self.mblock_index <= len(self.rblock.minor_block_header_list):
# If it is not neighbor, move to next minor block
mblock_header = self.rblock.minor_block_header_list[self.mblock_index - 1]
if (
not self.shard_state._is_neighbor(
mblock_header.branch, self.rblock.header.height
)
or mblock_header.branch == self.shard_state.branch
):
check(self.xshard_deposit_index == 0)
self.mblock_index += 1
continue
# Check if the neighbor has the permission to send tx to local shard
prev_root_header = self.db.get_root_block_header_by_hash(
mblock_header.hash_prev_root_block
)
if (
prev_root_header.height
<= self.shard_state.env.quark_chain_config.get_genesis_root_height(
self.shard_state.full_shard_id
)
):
check(self.xshard_deposit_index == 0)
check(
self.db.get_minor_block_xshard_tx_list(mblock_header.get_hash())
is None
)
self.mblock_index += 1
continue
self.xtx_list = self.db.get_minor_block_xshard_tx_list(
mblock_header.get_hash()
).tx_list
tx = self.__get_current_tx()
if tx is not None:
return tx
# Move to next minor block
check(self.xshard_deposit_index == 0)
self.mblock_index += 1
# Move to next root block
rblock_header = self.db.get_root_block_header_by_height(
self.max_rblock_header.get_hash(), self.rblock.header.height + 1
)
if rblock_header is None:
# EOF
self.rblock = None
self.mblock_index = 0
self.xshard_deposit_index = 0
return None
else:
# Root-block coinbase (always exist)
self.rblock = self.db.get_root_block_by_hash(rblock_header.get_hash())
self.mblock_index = 0
self.xshard_deposit_index = 1
return self.__get_current_tx()
def get_cursor_info(self):
root_block_height = (
self.rblock.header.height
if self.rblock is not None
else self.max_rblock_header.height + 1
)
return XshardTxCursorInfo(
root_block_height=root_block_height,
minor_block_index=self.mblock_index,
xshard_deposit_index=self.xshard_deposit_index,
)
class ShardState:
""" State of a shard, which includes
- evm state
- minor blockchain
- root blockchain and cross-shard transactions
TODO: Support
- reshard by split
"""
def __init__(self, env, full_shard_id: int, db=None, diff_calc=None):
self.env = env
self.shard_config = env.quark_chain_config.shards[full_shard_id]
self.full_shard_id = full_shard_id
if not diff_calc:
cutoff = self.shard_config.DIFFICULTY_ADJUSTMENT_CUTOFF_TIME
diff_factor = self.shard_config.DIFFICULTY_ADJUSTMENT_FACTOR
min_diff = self.shard_config.GENESIS.DIFFICULTY
check(cutoff > 0 and diff_factor > 0 and min_diff > 0)
diff_calc = EthDifficultyCalculator(
cutoff=cutoff, diff_factor=diff_factor, minimum_diff=min_diff
)
self.diff_calc = diff_calc
self.reward_calc = ConstMinorBlockRewardCalcultor(env)
self.raw_db = db if db is not None else env.db
self.branch = Branch(full_shard_id)
self.db = ShardDbOperator(self.raw_db, self.env, self.branch)
self.tx_queue = TransactionQueue() # queue of EvmTransaction
self.tx_dict = dict() # hash -> Transaction for explorer
self.initialized = False
self.header_tip = None # MinorBlockHeader
# TODO: make the oracle configurable
self.gas_price_suggestion_oracle = GasPriceSuggestionOracle(
last_price=0, last_head=b"", check_blocks=5, percentile=50
)
# new blocks that passed POW validation and should be made available to whole network
self.new_block_pool = dict()
# header hash -> (height, [coinbase address]) during previous blocks (ascending)
self.coinbase_addr_cache = dict() # type: Dict[bytes, Tuple[int, Deque[bytes]]]
self.genesis_token_id = self.env.quark_chain_config.genesis_token
self.local_fee_rate = (
1 - self.env.quark_chain_config.reward_tax_rate
) # type: Fraction
def init_from_root_block(self, root_block):
""" Master will send its root chain tip when it connects to slaves.
Shards will initialize its state based on the root block.
"""
check(
root_block.header.height
> self.env.quark_chain_config.get_genesis_root_height(self.full_shard_id)
)
check(not self.initialized)
self.initialized = True
Logger.info(
"[{}] Initializing shard state from root height {} hash {}".format(
self.branch.to_str(),
root_block.header.height,
root_block.header.get_hash().hex(),
)
)
confirmed_header_tip = self.db.get_last_confirmed_minor_block_header_at_root_block(
root_block.header.get_hash()
)
header_tip = confirmed_header_tip
if not header_tip:
# root chain has not confirmed any block on this shard
# get the genesis block from db
header_tip = self.db.get_minor_block_by_height(0).header
self.header_tip = header_tip
self.root_tip = root_block.header
header_tip_hash = header_tip.get_hash()
self.db.recover_state(self.root_tip, self.header_tip)
Logger.info(
"[{}] Done recovery from db. shard tip {} {}, root tip {} {}".format(
self.branch.to_str(),
self.header_tip.height,
header_tip_hash.hex(),
self.root_tip.height,
self.root_tip.get_hash().hex(),
)
)
self.meta_tip = self.db.get_minor_block_meta_by_hash(header_tip_hash)
self.confirmed_header_tip = confirmed_header_tip
self.evm_state = self.__create_evm_state(
self.meta_tip.hash_evm_state_root, header_hash=header_tip_hash
)
check(
self.db.get_minor_block_evm_root_hash_by_hash(header_tip_hash)
== self.meta_tip.hash_evm_state_root
)
self.__rewrite_block_index_to(
self.db.get_minor_block_by_hash(header_tip_hash), add_tx_back_to_queue=False
)
def __create_evm_state(
self, trie_root_hash: Optional[bytes], header_hash: Optional[bytes]
):
"""EVM state with given root hash and block hash AFTER which being evaluated."""
state = EvmState(
env=self.env.evm_env, db=self.raw_db, qkc_config=self.env.quark_chain_config
)
state.shard_config = self.shard_config
if trie_root_hash:
state.trie.root_hash = trie_root_hash
if self.shard_config.POSW_CONFIG.ENABLED and header_hash is not None:
state.sender_disallow_list = self._get_posw_coinbase_blockcnt(
header_hash
).keys()
return state
def init_genesis_state(self, root_block):
""" root_block should have the same height as configured in shard GENESIS.
If a genesis block has already been created (probably from another root block
with the same height), create and store the new genesis block from root_block
without modifying the in-memory state of this ShardState object.
Additionally returns the coinbase_amount_map from the genesis block
"""
height = self.env.quark_chain_config.get_genesis_root_height(self.full_shard_id)
check(root_block.header.height == height)
genesis_manager = GenesisManager(self.env.quark_chain_config)
genesis_block, coinbase_amount_map = genesis_manager.create_minor_block(
root_block,
self.full_shard_id,
self.__create_evm_state(trie_root_hash=None, header_hash=None),
)
self.db.put_minor_block(genesis_block, [])
self.db.put_root_block(root_block)
self.db.put_genesis_block(root_block.header.get_hash(), genesis_block)
if self.initialized:
# already initialized. just return the block without resetting the state.
return genesis_block, coinbase_amount_map
# block index should not be overwritten if there is already a genesis block
# this must happen after the above initialization check
self.db.put_minor_block_index(genesis_block)
self.root_tip = root_block.header
# Tips that are confirmed by root
self.confirmed_header_tip = None
# Tips that are unconfirmed by root
self.header_tip = genesis_block.header
self.meta_tip = genesis_block.meta
self.evm_state = self.__create_evm_state(
genesis_block.meta.hash_evm_state_root,
header_hash=genesis_block.header.get_hash(),
)
Logger.info(
"[{}] Initialized genensis state at root block {} {}, genesis block hash {}".format(
self.branch.to_str(),
self.root_tip.height,
self.root_tip.get_hash().hex(),
self.header_tip.get_hash().hex(),
)
)
self.initialized = True
return genesis_block, coinbase_amount_map
def __validate_tx(
self,
tx: TypedTransaction,
evm_state,
from_address=None,
gas=None,
xshard_gas_limit=None,
) -> EvmTransaction:
"""from_address will be set for execute_tx"""
evm_tx = tx.tx.to_evm_tx()
if from_address:
check(evm_tx.from_full_shard_key == from_address.full_shard_key)
nonce = evm_state.get_nonce(from_address.recipient)
# have to create a new evm_tx as nonce is immutable
evm_tx = EvmTransaction(
nonce,
evm_tx.gasprice,
gas if gas else evm_tx.startgas, # override gas if specified
evm_tx.to,
evm_tx.value,
evm_tx.data,
from_full_shard_key=evm_tx.from_full_shard_key,
to_full_shard_key=evm_tx.to_full_shard_key,
network_id=evm_tx.network_id,
gas_token_id=evm_tx.gas_token_id,
transfer_token_id=evm_tx.transfer_token_id,
)
evm_tx.sender = from_address.recipient
evm_tx.set_quark_chain_config(self.env.quark_chain_config)
if evm_tx.network_id != self.env.quark_chain_config.NETWORK_ID:
raise RuntimeError(
"evm tx network id mismatch. expect {} but got {}".format(
self.env.quark_chain_config.NETWORK_ID, evm_tx.network_id
)
)
if not self.branch.is_in_branch(evm_tx.from_full_shard_key):
raise RuntimeError(
"evm tx from_full_shard_key ({}) not in this branch ({}).".format(
hex(evm_tx.from_full_shard_key),
hex(self.branch.get_full_shard_id()),
)
)
to_branch = Branch(evm_tx.to_full_shard_id)
initialized_full_shard_ids = self.env.quark_chain_config.get_initialized_full_shard_ids_before_root_height(
self.root_tip.height
)
if (
evm_tx.is_cross_shard
and to_branch.get_full_shard_id() not in initialized_full_shard_ids
):
raise RuntimeError(
"evm tx to_full_shard_id {} is not initialized yet. current root height {}".format(
evm_tx.to_full_shard_id, self.root_tip.height
)
)
if evm_tx.is_cross_shard and not self._is_neighbor(to_branch):
raise RuntimeError(
"evm tx to_full_shard_id {} is not a neighbor of from_full_shard_id {}".format(
evm_tx.to_full_shard_id, evm_tx.from_full_shard_id
)
)
xshard_gas_limit = self.get_xshard_gas_limit(xshard_gas_limit=xshard_gas_limit)
if evm_tx.is_cross_shard and evm_tx.startgas > xshard_gas_limit:
raise RuntimeError("xshard evm tx exceeds xshard gas limit")
# This will check signature, nonce, balance, gas limit
validate_transaction(evm_state, evm_tx)
return evm_tx
def get_gas_limit(self, gas_limit=None):
if gas_limit is None:
gas_limit = self.env.quark_chain_config.gas_limit
return gas_limit
def get_xshard_gas_limit(self, gas_limit=None, xshard_gas_limit=None):
if xshard_gas_limit is not None:
return xshard_gas_limit
return self.get_gas_limit(gas_limit=gas_limit) // 2
def get_gas_limit_all(self, gas_limit=None, xshard_gas_limit=None):
return (
self.get_gas_limit(gas_limit),
self.get_xshard_gas_limit(gas_limit, xshard_gas_limit),
)
def add_tx(self, tx: TypedTransaction, xshard_gas_limit=None):
""" Add a tx to the tx queue
xshard_gas_limit is used for testing, which discards the tx if
- tx is x-shard; and
- tx's startgas exceeds xshard_gas_limit
"""
if (
len(self.tx_queue)
> self.env.quark_chain_config.TRANSACTION_QUEUE_SIZE_LIMIT_PER_SHARD
):
# exceeding tx queue size limit
return False
tx_hash = tx.get_hash()
if self.db.contain_transaction_hash(tx_hash):
return False
if tx_hash in self.tx_dict:
return False
evm_state = self.evm_state.ephemeral_clone()
evm_state.gas_used = 0
try:
evm_tx = self.__validate_tx(
tx, evm_state, xshard_gas_limit=xshard_gas_limit
)
self.tx_queue.add_transaction(evm_tx)
self.tx_dict[tx_hash] = tx
return True
except Exception as e:
Logger.warning_every_sec("Failed to add transaction: {}".format(e), 1)
return False
def _get_evm_state_for_new_block(self, block, ephemeral=True):
root_hash = self.db.get_minor_block_evm_root_hash_by_hash(
block.header.hash_prev_minor_block
)
state = self.__create_evm_state(
root_hash, header_hash=block.header.hash_prev_minor_block
)
if ephemeral:
state = state.ephemeral_clone()
state.timestamp = block.header.create_time
state.gas_limit = block.header.evm_gas_limit
state.block_number = block.header.height
state.recent_uncles[
state.block_number
] = [] # TODO [x.hash for x in block.uncles]
# TODO: Create a account with shard info if the account is not created
# Right now the full_shard_key for coinbase actually comes from the first tx that got applied
state.block_coinbase = block.header.coinbase_address.recipient
state.block_difficulty = block.header.difficulty
state.block_reward = 0
state.prev_headers = [] # TODO: state.add_block_header(block.header)
return state
def __is_same_minor_chain(self, longer_block_header, shorter_block_header):
if shorter_block_header.height > longer_block_header.height:
return False
header = longer_block_header
for i in range(longer_block_header.height - shorter_block_header.height):
header = self.db.get_minor_block_header_by_hash(
header.hash_prev_minor_block
)
return header == shorter_block_header
def __is_same_root_chain(self, longer_block_header, shorter_block_header):
if shorter_block_header.height > longer_block_header.height:
return False
header = longer_block_header
for i in range(longer_block_header.height - shorter_block_header.height):
header = self.db.get_root_block_header_by_hash(header.hash_prev_block)
return header == shorter_block_header
def __validate_block(
self, block: MinorBlock, gas_limit=None, xshard_gas_limit=None
):
""" Validate a block before running evm transactions
"""
height = block.header.height
if height < 1:
raise ValueError("unexpected height")
if not self.db.contain_minor_block_by_hash(block.header.hash_prev_minor_block):
# TODO: May put the block back to queue
raise ValueError(
"[{}] prev block not found, block height {} prev hash {}".format(
self.branch.to_str(),
height,
block.header.hash_prev_minor_block.hex(),
)
)
prev_header = self.db.get_minor_block_header_by_hash(
block.header.hash_prev_minor_block
)
if height != prev_header.height + 1:
raise ValueError("height mismatch")
if block.header.branch != self.branch:
raise ValueError("branch mismatch")
if block.header.create_time <= prev_header.create_time:
raise ValueError(
"incorrect create time tip time {}, new block time {}".format(
block.header.create_time, self.chain[-1].create_time
)
)
if block.header.hash_meta != block.meta.get_hash():
raise ValueError("hash of meta mismatch")
if (
len(block.header.extra_data)
> self.env.quark_chain_config.BLOCK_EXTRA_DATA_SIZE_LIMIT
):
raise ValueError("extra_data in block is too large")
if (
len(block.tracking_data)
> self.env.quark_chain_config.BLOCK_EXTRA_DATA_SIZE_LIMIT
):
raise ValueError("tracking_data in block is too large")
# Gas limit check
gas_limit, xshard_gas_limit = self.get_gas_limit_all(
gas_limit=gas_limit, xshard_gas_limit=xshard_gas_limit
)
if block.header.evm_gas_limit != gas_limit:
raise ValueError(
"incorrect gas limit, expected %d, actual %d"
% (gas_limit, block.header.evm_gas_limit)
)
if block.meta.evm_xshard_gas_limit >= block.header.evm_gas_limit:
raise ValueError(
"xshard_gas_limit %d should not exceed total gas_limit %d"
% (block.meta.evm_xshard_gas_limit, block.header.evm_gas_limit)
)
if block.meta.evm_xshard_gas_limit != xshard_gas_limit:
raise ValueError(
"incorrect xshard gas limit, expected %d, actual %d"
% (xshard_gas_limit, block.meta.evm_xshard_gas_limit)
)
# Make sure merkle tree is valid
merkle_hash = calculate_merkle_root(block.tx_list)
if merkle_hash != block.meta.hash_merkle_root:
raise ValueError("incorrect merkle root")
# Check the first transaction of the block
if not self.branch.is_in_branch(block.header.coinbase_address.full_shard_key):
raise ValueError("coinbase output address must be in the shard")
# Check difficulty
if not self.env.quark_chain_config.SKIP_MINOR_DIFFICULTY_CHECK:
diff = self.diff_calc.calculate_diff_with_parent(
prev_header, block.header.create_time
)
if diff != block.header.difficulty:
raise ValueError("incorrect difficulty")
if not self.branch.is_in_branch(block.header.coinbase_address.full_shard_key):
raise ValueError("coinbase output must be in local shard")
# Check whether the root header is in the root chain
root_block_header = self.db.get_root_block_header_by_hash(
block.header.hash_prev_root_block
)
if root_block_header is None:
raise ValueError("cannot find root block for the minor block")
if (
root_block_header.height
< self.db.get_root_block_header_by_hash(
prev_header.hash_prev_root_block
).height
):
raise ValueError("prev root block height must be non-decreasing")
prev_confirmed_minor_block = self.db.get_last_confirmed_minor_block_header_at_root_block(
block.header.hash_prev_root_block
)
if prev_confirmed_minor_block and not self.__is_same_minor_chain(
prev_header, prev_confirmed_minor_block
):
raise ValueError(
"prev root block's minor block is not in the same chain as the minor block"
)
if not self.__is_same_root_chain(
self.db.get_root_block_header_by_hash(block.header.hash_prev_root_block),
self.db.get_root_block_header_by_hash(prev_header.hash_prev_root_block),
):
raise ValueError("prev root blocks are not on the same chain")
# Check PoW / PoSW
self.validate_minor_block_seal(block)
def run_block(
self, block, evm_state=None, evm_tx_included=None, x_shard_receive_tx_list=None
):
if evm_tx_included is None:
evm_tx_included = []
if x_shard_receive_tx_list is None:
x_shard_receive_tx_list = []
if evm_state is None:
evm_state = self._get_evm_state_for_new_block(block, ephemeral=False)
root_block_header = self.db.get_root_block_header_by_hash(
block.header.hash_prev_root_block
)
prev_header = self.db.get_minor_block_header_by_hash(
block.header.hash_prev_minor_block
)
xtx_list, evm_state.xshard_tx_cursor_info = self.__run_cross_shard_tx_with_cursor(
evm_state=evm_state, mblock=block
)
x_shard_receive_tx_list.extend(xtx_list)
# Adjust inshard gas limit if xshard gas limit is not exhausted
if evm_state.gas_used < block.meta.evm_xshard_gas_limit:
evm_state.gas_limit -= block.meta.evm_xshard_gas_limit - evm_state.gas_used
for idx, tx in enumerate(block.tx_list):
try:
evm_tx = self.__validate_tx(
tx, evm_state, xshard_gas_limit=block.meta.evm_xshard_gas_limit
)
evm_tx.set_quark_chain_config(self.env.quark_chain_config)
apply_transaction(evm_state, evm_tx, tx.get_hash())
evm_tx_included.append(evm_tx)
except Exception as e:
Logger.debug_exception()
Logger.debug(
"Failed to process Tx {}, idx {}, reason {}".format(
tx.get_hash().hex(), idx, e
)
)
raise e
# Pay miner
pure_coinbase_amount = self.get_coinbase_amount_map(block.header.height)
for k, v in pure_coinbase_amount.balance_map.items():
evm_state.delta_token_balance(evm_state.block_coinbase, k, v)
# Update actual root hash
evm_state.commit()
return evm_state
def __is_minor_block_linked_to_root_tip(self, m_block):
""" Determine whether a minor block is a descendant of a minor block confirmed by root tip
"""
if not self.confirmed_header_tip:
# genesis
return True
if m_block.header.height <= self.confirmed_header_tip.height:
return False
header = m_block.header
for i in range(m_block.header.height - self.confirmed_header_tip.height):
header = self.db.get_minor_block_header_by_hash(
header.hash_prev_minor_block
)
return header == self.confirmed_header_tip
def __rewrite_block_index_to(self, minor_block, add_tx_back_to_queue=True):
""" Find the common ancestor in the current chain and rewrite index till minor_block """
new_chain = []
old_chain = []
# minor_block height could be lower than the current tip
# we should revert all the blocks above minor_block height
height = minor_block.header.height + 1
while True:
orig_block = self.db.get_minor_block_by_height(height)
if not orig_block:
break
old_chain.append(orig_block)
height += 1
block = minor_block
# Find common ancestor and record the blocks that needs to be updated
while block.header.height >= 0:
orig_block = self.db.get_minor_block_by_height(block.header.height)
if orig_block and orig_block.header == block.header:
break
new_chain.append(block)
if orig_block:
old_chain.append(orig_block)
if block.header.height <= 0:
break
block = self.db.get_minor_block_by_hash(block.header.hash_prev_minor_block)
for block in old_chain:
self.db.remove_transaction_index_from_block(block)
self.db.remove_minor_block_index(block)
if add_tx_back_to_queue:
self.__add_transactions_from_block(block)
for block in new_chain:
self.db.put_transaction_index_from_block(block)
self.db.put_minor_block_index(block)
self.__remove_transactions_from_block(block)
def __add_transactions_from_block(self, block):
for tx in block.tx_list:
self.tx_dict[tx.get_hash()] = tx
self.tx_queue.add_transaction(tx.tx.to_evm_tx())
def __remove_transactions_from_block(self, block):
evm_tx_list = []
for tx in block.tx_list:
self.tx_dict.pop(tx.get_hash(), None)
evm_tx_list.append(tx.tx.to_evm_tx())
self.tx_queue = self.tx_queue.diff(evm_tx_list)
def add_block(
self, block, skip_if_too_old=True, gas_limit=None, xshard_gas_limit=None
):
""" Add a block to local db. Perform validate and update tip accordingly
gas_limit and xshard_gas_limit are used for testing only.
Returns None if block is already added.
Returns a list of CrossShardTransactionDeposit from block.
Additionally, returns a map of reward token balances for this block
Raises on any error.
"""
start_time = time.time()
start_ms = time_ms()
if skip_if_too_old:
if (
self.header_tip.height - block.header.height
> self.shard_config.max_stale_minor_block_height_diff
):
Logger.info(
"[{}] drop old block {} << {}".format(
self.branch.to_str(),
block.header.height,
self.header_tip.height,
)
)
raise ValueError(
"block is too old {} << {}".format(
block.header.height, self.header_tip.height
)
)
block_hash = block.header.get_hash()
if self.db.contain_minor_block_by_hash(block_hash):
return None, None
evm_tx_included = []
x_shard_receive_tx_list = []
# Throw exception if fail to run
self.__validate_block(
block, gas_limit=gas_limit, xshard_gas_limit=xshard_gas_limit
)
evm_state = self.run_block(
block,
evm_tx_included=evm_tx_included,
x_shard_receive_tx_list=x_shard_receive_tx_list,
)
# ------------------------ Validate ending result of the block --------------------
if evm_state.xshard_tx_cursor_info != block.meta.xshard_tx_cursor_info:
raise ValueError("Cross-shard transaction cursor info mismatches!")
if block.meta.hash_evm_state_root != evm_state.trie.root_hash:
raise ValueError(
"state root mismatch: header %s computed %s"
% (block.meta.hash_evm_state_root.hex(), evm_state.trie.root_hash.hex())
)
receipt_root = mk_receipt_sha(evm_state.receipts, evm_state.db)
if block.meta.hash_evm_receipt_root != receipt_root:
raise ValueError(
"receipt root mismatch: header {} computed {}".format(
block.meta.hash_evm_receipt_root.hex(), receipt_root.hex()
)
)
if evm_state.gas_used != block.meta.evm_gas_used:
raise ValueError(
"gas used mismatch: header %d computed %d"
% (block.meta.evm_gas_used, evm_state.gas_used)
)
if (
evm_state.xshard_receive_gas_used
!= block.meta.evm_cross_shard_receive_gas_used
):
raise ValueError(
"x-shard gas used mismatch: header %d computed %d"
% (
block.meta.evm_cross_shard_receive_gas_used,
evm_state.xshard_receive_gas_used,
)
)
coinbase_amount_map = self.get_coinbase_amount_map(block.header.height)
# add block reward
coinbase_amount_map.add(evm_state.block_fee_tokens)
if (
coinbase_amount_map.balance_map
!= block.header.coinbase_amount_map.balance_map
):
raise ValueError("coinbase reward incorrect")
if evm_state.bloom != block.header.bloom:
raise ValueError("bloom mismatch")
self.db.put_minor_block(block, x_shard_receive_tx_list)
# Update tip if a block is appended or a fork is longer (with the same ancestor confirmed by root block tip)
# or they are equal length but the root height confirmed by the block is longer
update_tip = False
if not self.__is_same_root_chain(
self.root_tip,
self.db.get_root_block_header_by_hash(block.header.hash_prev_root_block),
):
# Don't update tip if the block depends on a root block that is not root_tip or root_tip's ancestor
update_tip = False
elif block.header.hash_prev_minor_block == self.header_tip.get_hash():
update_tip = True
elif self.__is_minor_block_linked_to_root_tip(block):
if block.header.height > self.header_tip.height:
update_tip = True
elif block.header.height == self.header_tip.height:
update_tip = (
self.db.get_root_block_header_by_hash(
block.header.hash_prev_root_block
).height
> self.db.get_root_block_header_by_hash(
self.header_tip.hash_prev_root_block
).height
)
if update_tip:
self.__rewrite_block_index_to(block)
self.evm_state = evm_state
# Safe to update PoSW blacklist here
if self.shard_config.POSW_CONFIG.ENABLED:
disallow_list = self._get_posw_coinbase_blockcnt(block_hash).keys()
self.evm_state.sender_disallow_list = disallow_list
self.header_tip = block.header
self.meta_tip = block.meta
check(
self.__is_same_root_chain(
self.root_tip,
self.db.get_root_block_header_by_hash(
self.header_tip.hash_prev_root_block
),
)
)
Logger.debug(
"Add block took {} seconds for {} tx".format(
time.time() - start_time, len(block.tx_list)
)
)
tracking_data_str = block.tracking_data.decode("utf-8")
if tracking_data_str != "":
tracking_data = json.loads(tracking_data_str)
sample = {
"time": time_ms() // 1000,
"shard": str(block.header.branch.get_full_shard_id()),
"network": self.env.cluster_config.MONITORING.NETWORK_NAME,
"cluster": self.env.cluster_config.MONITORING.CLUSTER_ID,
"hash": block_hash.hex(),
"height": block.header.height,
"original_cluster": tracking_data["cluster"],
"inception": tracking_data["inception"],
"creation_latency_ms": tracking_data["creation_ms"],
"add_block_latency_ms": time_ms() - start_ms,
"mined": tracking_data.get("mined", 0),
"propagation_latency_ms": start_ms - tracking_data.get("mined", 0),
"num_tx": len(block.tx_list),
}
asyncio.ensure_future(
self.env.cluster_config.kafka_logger.log_kafka_sample_async(
self.env.cluster_config.MONITORING.PROPAGATION_TOPIC, sample
)
)
return evm_state.xshard_list, coinbase_amount_map
def get_coinbase_amount_map(self, height) -> TokenBalanceMap:
epoch = (
height
// self.env.quark_chain_config.shards[self.full_shard_id].EPOCH_INTERVAL
)
decay_numerator = (
self.env.quark_chain_config.block_reward_decay_factor.numerator ** epoch
)
decay_denominator = (
self.env.quark_chain_config.block_reward_decay_factor.denominator ** epoch
)
coinbase_amount = (
self.env.quark_chain_config.shards[self.full_shard_id].COINBASE_AMOUNT
* self.local_fee_rate.numerator
* decay_numerator
// self.local_fee_rate.denominator
// decay_denominator
)
# shard coinbase only in genesis_token
return TokenBalanceMap(
{self.env.quark_chain_config.genesis_token: coinbase_amount}
)
def get_tip(self) -> MinorBlock:
return self.db.get_minor_block_by_hash(self.header_tip.get_hash())
def finalize_and_add_block(self, block, gas_limit=None, xshard_gas_limit=None):
""" Finalize the block by filling post-tx data including tx fee collected
gas_limit and xshard_gas_limit is used to verify customized gas limits and they are for test purpose only
"""
evm_state = self.run_block(block)
coinbase_amount_map = self.get_coinbase_amount_map(block.header.height)
coinbase_amount_map.add(evm_state.block_fee_tokens)
block.finalize(evm_state=evm_state, coinbase_amount_map=coinbase_amount_map)
self.add_block(block, gas_limit=gas_limit, xshard_gas_limit=xshard_gas_limit)
def get_token_balance(
self, recipient: bytes, token_id: int, height: Optional[int] = None
) -> int:
evm_state = self._get_evm_state_from_height(height)
if not evm_state:
return 0
return evm_state.get_balance(recipient, token_id=token_id)
def get_balances(self, recipient: bytes, height: Optional[int] = None) -> dict:
evm_state = self._get_evm_state_from_height(height)
if not evm_state:
return {}
return evm_state.get_balances(recipient)
def get_transaction_count(
self, recipient: bytes, height: Optional[int] = None
) -> int:
evm_state = self._get_evm_state_from_height(height)
if not evm_state:
return 0
return evm_state.get_nonce(recipient)
def get_code(self, recipient: bytes, height: Optional[int] = None) -> bytes:
evm_state = self._get_evm_state_from_height(height)
if not evm_state:
return b""
return evm_state.get_code(recipient)
def get_storage_at(
self, recipient: bytes, key: int, height: Optional[int] = None
) -> bytes:
evm_state = self._get_evm_state_from_height(height)
if not evm_state:
return b""
int_result = evm_state.get_storage_data(recipient, key) # type: int
return int_result.to_bytes(32, byteorder="big")
def execute_tx(
self, tx: TypedTransaction, from_address, height: Optional[int] = None
) -> Optional[bytes]:
"""Execute the tx using a copy of state
"""
evm_state = self._get_evm_state_from_height(height)
if not evm_state:
return None
state = evm_state.ephemeral_clone()
state.gas_used = 0
# Use the maximum gas allowed if gas is 0
evm_tx = tx.tx.to_evm_tx()
gas = evm_tx.startgas if evm_tx.startgas else state.gas_limit
try:
evm_tx = self.__validate_tx(tx, state, from_address, gas)
success, output = apply_transaction(
state, evm_tx, tx_wrapper_hash=bytes(32)
)
return output if success else None
except Exception as e:
Logger.warning_every_sec("Failed to apply transaction: {}".format(e), 1)
return None
def get_next_block_difficulty(self, create_time=None):
if not create_time:
create_time = max(int(time.time()), self.header_tip.create_time + 1)
return self.diff_calc.calculate_diff_with_parent(self.header_tip, create_time)
def get_next_block_reward(self):
return self.reward_calc.get_block_reward(self)
def get_next_block_coinbase_amount(self):
# TODO: add block reward
# TODO: the current calculation is bogus and just serves as a placeholder.
coinbase = 0
for tx_wrapper in self.tx_queue.peek():
tx = tx_wrapper.tx
coinbase += tx.gasprice * tx.startgas
# TODO: add x-shard tx
return coinbase
def __get_all_unconfirmed_header_list(self) -> List[MinorBlockHeader]:
""" height in ascending order """
header_list = []
header = self.header_tip
start_height = (
self.confirmed_header_tip.height if self.confirmed_header_tip else -1
)
for i in range(header.height - start_height):
header_list.append(header)
header = self.db.get_minor_block_header_by_hash(
header.hash_prev_minor_block
)
check(header == self.confirmed_header_tip)
header_list.reverse()
return header_list
def get_unconfirmed_header_list(self) -> List[MinorBlockHeader]:
headers = self.__get_all_unconfirmed_header_list()
max_blocks = self.__get_max_blocks_in_one_root_block()
return headers[0:max_blocks]
def get_unconfirmed_headers_coinbase_amount(self) -> int:
""" only returns genesis token coinbase amount
TODO remove coinbase_amount_map from minor header, this is the ONLY place that requires it
"""
amount = 0
headers = self.get_unconfirmed_header_list()
for header in headers:
amount += header.coinbase_amount_map.balance_map.get(
self.env.quark_chain_config.genesis_token, 0
)
return amount
def __get_max_blocks_in_one_root_block(self) -> int:
return self.shard_config.max_blocks_per_shard_in_one_root_block
def __add_transactions_to_block(self, block: MinorBlock, evm_state: EvmState):
""" Fill up the block tx list with tx from the tx queue"""
poped_txs = []
while evm_state.gas_used < evm_state.gas_limit:
evm_tx = self.tx_queue.pop_transaction(
max_gas=evm_state.gas_limit - evm_state.gas_used
)
if evm_tx is None: # tx_queue is exhausted
break
evm_tx.set_quark_chain_config(self.env.quark_chain_config)
to_branch = Branch(evm_tx.to_full_shard_id)
tx = TypedTransaction(SerializedEvmTransaction.from_evm_tx(evm_tx))
try:
apply_transaction(evm_state, evm_tx, tx.get_hash())
block.add_tx(tx)
poped_txs.append(evm_tx)
except Exception as e:
Logger.warning_every_sec(
"Failed to include transaction: {}".format(e), 1
)
self.tx_dict.pop(tx.get_hash(), None)
# We don't want to drop the transactions if the mined block failed to be appended
for evm_tx in poped_txs:
self.tx_queue.add_transaction(evm_tx)
def create_block_to_mine(
self,
create_time=None,
address=None,
gas_limit=None,
xshard_gas_limit=None,
include_tx=True,
):
""" Create a block to append and include TXs to maximize rewards
"""
start_time = time.time()
tracking_data = {
"inception": time_ms(),
"cluster": self.env.cluster_config.MONITORING.CLUSTER_ID,
}
if not create_time:
create_time = max(int(time.time()), self.header_tip.create_time + 1)
difficulty = self.get_next_block_difficulty(create_time)
prev_block = self.get_tip()
block = prev_block.create_block_to_append(
create_time=create_time, address=address, difficulty=difficulty
)
# Add corrected gas limit
# Set gas_limit. Since gas limit is fixed between blocks, this is for test purpose only.
gas_limit, xshard_gas_limit = self.get_gas_limit_all(
gas_limit, xshard_gas_limit
)
block.header.evm_gas_limit = gas_limit
block.meta.evm_xshard_gas_limit = xshard_gas_limit
evm_state = self._get_evm_state_for_new_block(block)
# Cross-shard receive must be handled before including tx from tx_queue
# This is part of consensus.
block.header.hash_prev_root_block = self.root_tip.get_hash()
xtx_list, evm_state.xshard_tx_cursor_info = self.__run_cross_shard_tx_with_cursor(
evm_state=evm_state, mblock=block
)
# Adjust inshard tx limit if xshard gas limit is not exhausted
if evm_state.gas_used < xshard_gas_limit:
evm_state.gas_limit -= xshard_gas_limit - evm_state.gas_used
if include_tx:
self.__add_transactions_to_block(block, evm_state)
# Pay miner
pure_coinbase_amount = self.get_coinbase_amount_map(block.header.height)
for k, v in pure_coinbase_amount.balance_map.items():
evm_state.delta_token_balance(evm_state.block_coinbase, k, v)
# Update actual root hash
evm_state.commit()
pure_coinbase_amount.add(evm_state.block_fee_tokens)
block.finalize(evm_state=evm_state, coinbase_amount_map=pure_coinbase_amount)
tracking_data["creation_ms"] = time_ms() - tracking_data["inception"]
block.tracking_data = json.dumps(tracking_data).encode("utf-8")
end_time = time.time()
Logger.debug(
"Create block to mine took {} seconds for {} tx".format(
end_time - start_time, len(block.tx_list)
)
)
return block
def get_block_by_hash(self, h):
""" Return an validated block. Return None if no such block exists in db
"""
return self.db.get_minor_block_by_hash(h)
def contain_block_by_hash(self, h):
return self.db.contain_minor_block_by_hash(h)
def get_pending_tx_size(self):
return self.transaction_pool.size()
#
# ============================ Cross-shard transaction handling =============================
#
def add_cross_shard_tx_list_by_minor_block_hash(
self, h, tx_list: CrossShardTransactionList
):
""" Add a cross shard tx list from remote shard
The list should be validated by remote shard, however,
it is better to diagnose some bugs in peer shard if we could check
- x-shard gas limit exceeded
- it is a neighor of current shard following our routing rule
"""
self.db.put_minor_block_xshard_tx_list(h, tx_list)
def add_root_block(self, root_block: RootBlock):
""" Add a root block.
Make sure all cross shard tx lists of remote shards confirmed by the root block are in local db.
Return True if the new block become head else False.
Raise ValueError on any failure.
"""
check(
root_block.header.height
> self.env.quark_chain_config.get_genesis_root_height(self.full_shard_id)
)
if not self.db.contain_root_block_by_hash(root_block.header.hash_prev_block):
raise ValueError("cannot find previous root block in pool")
shard_headers = []
for m_header in root_block.minor_block_header_list:
h = m_header.get_hash()
if m_header.branch == self.branch:
if not self.db.contain_minor_block_by_hash(h):
raise ValueError("cannot find minor block in local shard")
shard_headers.append(m_header)
continue
prev_root_header = self.db.get_root_block_header_by_hash(
m_header.hash_prev_root_block
)
# prev_root_header can be None when the shard is not created at root height 0
if (
not prev_root_header
or prev_root_header.height
== self.env.quark_chain_config.get_genesis_root_height(
self.full_shard_id
)
or not self._is_neighbor(m_header.branch, prev_root_header.height)
):
check(
not self.db.contain_remote_minor_block_hash(h),
"minor block {} {} from shard {} shouldn't have been broadcasted to shard {}".format(
m_header.height,
m_header.get_hash().hex(),
m_header.branch.get_full_shard_id(),
self.branch.get_full_shard_id(),
),
)
continue
check(
self.db.contain_remote_minor_block_hash(h),
"cannot find x_shard tx list for {}-{} {}".format(
m_header.branch.get_full_shard_id(), m_header.height, h.hex()
),
)
if len(shard_headers) > self.__get_max_blocks_in_one_root_block():
raise ValueError(
"too many minor blocks in the root block for shard {}".format(
self.branch.get_full_shard_id()
)
)
last_minor_header_in_prev_root_block = self.db.get_last_confirmed_minor_block_header_at_root_block(
root_block.header.hash_prev_block
)
if shard_headers:
# Master should assure this check will not fail
check(
shard_headers[0].height == 0
or shard_headers[0].hash_prev_minor_block
== last_minor_header_in_prev_root_block.get_hash()
)
shard_header = shard_headers[-1]
else:
shard_header = last_minor_header_in_prev_root_block
# shard_header can be None meaning the genesis shard block has not been confirmed by any root block
self.db.put_root_block(root_block, shard_header)
if shard_header:
check(
self.__is_same_root_chain(
root_block.header,
self.db.get_root_block_header_by_hash(
shard_header.hash_prev_root_block
),
)
)
# No change to root tip
if root_block.header.total_difficulty <= self.root_tip.total_difficulty:
check(
self.__is_same_root_chain(
self.root_tip,
self.db.get_root_block_header_by_hash(
self.header_tip.hash_prev_root_block
),
)
)
return False
# Switch to the root block with higher total diff
self.root_tip = root_block.header
self.confirmed_header_tip = shard_header
orig_header_tip = self.header_tip
if shard_header:
orig_block = self.db.get_minor_block_by_height(shard_header.height)
# get_minor_block_by_height only returns block on the best chain
# so orig_block could be on a fork and thus will not be found by
# get_minor_block_by_height
if not orig_block or orig_block.header != shard_header:
# TODO: shard_header might not be the tip of the longest chain
# need to switch to the tip of the longest chain
self.header_tip = shard_header
# the current header_tip might point to a root block on a fork with r_block
# we need to scan back until finding a minor block pointing to the same root chain r_block is on.
# the worst case would be that we go all the way back to orig_block (shard_header)
while not self.__is_same_root_chain(
self.root_tip,
self.db.get_root_block_header_by_hash(self.header_tip.hash_prev_root_block),
):
if self.header_tip.height == 0:
# we are at genesis block now but the root block it points to is still on a fork from root_tip.
# we have to reset the genesis block based on the root chain identified by root_tip
genesis_root_header = self.root_tip
genesis_height = self.env.quark_chain_config.get_genesis_root_height(
self.full_shard_id
)
check(genesis_root_header.height >= genesis_height)
# first find the root block at genesis root height
while genesis_root_header.height != genesis_height:
genesis_root_header = self.db.get_root_block_header_by_hash(
genesis_root_header.hash_prev_block
)
check(genesis_root_header is not None)
# recover the genesis block
self.header_tip = self.db.get_genesis_block(
genesis_root_header.get_hash()
).header
check(self.header_tip is not None)
break
self.header_tip = self.db.get_minor_block_header_by_hash(
self.header_tip.hash_prev_minor_block
)
if self.header_tip != orig_header_tip:
header_tip_hash = self.header_tip.get_hash()
self.meta_tip = self.db.get_minor_block_meta_by_hash(header_tip_hash)
self.__rewrite_block_index_to(
self.db.get_minor_block_by_hash(header_tip_hash)
)
Logger.info(
"[{}] shard tip reset from {} to {} by root block {}".format(
self.branch.to_str(),
orig_header_tip.height,
self.header_tip.height,
root_block.header.height,
)
)
return True
def _is_neighbor(self, remote_branch: Branch, root_height=None):
root_height = self.root_tip.height if root_height is None else root_height
shard_size = len(
self.env.quark_chain_config.get_initialized_full_shard_ids_before_root_height(
root_height
)
)
return is_neighbor(self.branch, remote_branch, shard_size)
def __run_one_xshard_tx(self, evm_state, xshard_deposit_tx):
tx = xshard_deposit_tx
# TODO: Check if target address is a smart contract address or user address
evm_state.delta_token_balance(
tx.to_address.recipient, tx.transfer_token_id, tx.value
)
evm_state.gas_used = evm_state.gas_used + (
opcodes.GTXXSHARDCOST if tx.gas_price != 0 else 0
)
check(evm_state.gas_used <= evm_state.gas_limit)
xshard_fee = (
opcodes.GTXXSHARDCOST
* tx.gas_price
* self.local_fee_rate.numerator
// self.local_fee_rate.denominator
)
add_dict(evm_state.block_fee_tokens, {tx.gas_token_id: xshard_fee})
evm_state.delta_token_balance(
evm_state.block_coinbase, tx.gas_token_id, xshard_fee
)
def __run_cross_shard_tx_with_cursor(self, evm_state, mblock):
cursor_info = self.db.get_minor_block_meta_by_hash(
mblock.header.hash_prev_minor_block
).xshard_tx_cursor_info
cursor = XshardTxCursor(self, mblock.header, cursor_info)
tx_list = []
while True:
xshard_deposit_tx = cursor.get_next_tx()
if xshard_deposit_tx is None:
# EOF
break
tx_list.append(xshard_deposit_tx)
self.__run_one_xshard_tx(evm_state, xshard_deposit_tx)
# Impose soft-limit of xshard gas limit
if evm_state.gas_used >= mblock.meta.evm_xshard_gas_limit:
break
evm_state.xshard_receive_gas_used = evm_state.gas_used
return tx_list, cursor.get_cursor_info()
def contain_remote_minor_block_hash(self, h):
return self.db.contain_remote_minor_block_hash(h)
def get_transaction_by_hash(self, h):
""" Returns (block, index) where index is the position of tx in the block """
block, index = self.db.get_transaction_by_hash(h)
if block:
return block, index
if h in self.tx_dict:
block = MinorBlock(MinorBlockHeader(), MinorBlockMeta())
block.tx_list.append(self.tx_dict[h])
return block, 0
return None, None
def get_transaction_receipt(
self, h
) -> Optional[Tuple[MinorBlock, int, TransactionReceipt]]:
block, index = self.db.get_transaction_by_hash(h)
if not block:
return None
receipt = block.get_receipt(self.evm_state.db, index)
if receipt.contract_address != Address.create_empty_account(0):
address = receipt.contract_address
check(
address.full_shard_key
== self.evm_state.get_full_shard_key(address.recipient)
)
return block, index, receipt
def get_transaction_list_by_address(self, address, start, limit):
if not self.env.cluster_config.ENABLE_TRANSACTION_HISTORY:
return [], b""
if start == bytes(1): # get pending tx
tx_list = []
for orderable_tx in self.tx_queue.txs + self.tx_queue.aside:
tx = orderable_tx.tx
if Address(tx.sender, tx.from_full_shard_key) == address:
tx_list.append(
TransactionDetail(
TypedTransaction(
SerializedEvmTransaction.from_evm_tx(tx)
).get_hash(),
address,
Address(tx.to, tx.to_full_shard_key) if tx.to else None,
tx.value,
block_height=0,
timestamp=0,
success=False,
gas_token_id=tx.gas_token_id,
transfer_token_id=tx.transfer_token_id,
)
)
return tx_list, b""
return self.db.get_transactions_by_address(address, start, limit)
def get_shard_stats(self) -> ShardStats:
cutoff = self.header_tip.create_time - 60
block = self.db.get_minor_block_by_hash(self.header_tip.get_hash())
tx_count = 0
block_count = 0
stale_block_count = 0
last_block_time = 0
while block.header.height > 0 and block.header.create_time > cutoff:
tx_count += len(block.tx_list)
block_count += 1
stale_block_count += max(
0, (self.db.get_block_count_by_height(block.header.height) - 1)
)
block = self.db.get_minor_block_by_hash(block.header.hash_prev_minor_block)
if last_block_time == 0:
last_block_time = self.header_tip.create_time - block.header.create_time
check(stale_block_count >= 0)
return ShardStats(
branch=self.branch,
height=self.header_tip.height,
difficulty=self.header_tip.difficulty,
coinbase_address=self.header_tip.coinbase_address,
timestamp=self.header_tip.create_time,
tx_count60s=tx_count,
pending_tx_count=len(self.tx_queue),
total_tx_count=self.db.get_total_tx_count(self.header_tip.get_hash()),
block_count60s=block_count,
stale_block_count60s=stale_block_count,
last_block_time=last_block_time,
)
def get_logs(
self,
addresses: List[Address],
topics: List[Optional[Union[str, List[str]]]],
start_block: int,
end_block: int,
) -> Optional[List[Log]]:
if addresses and (
len(set(addr.full_shard_key for addr in addresses)) != 1
or self.env.quark_chain_config.get_full_shard_id_by_full_shard_key(
addresses[0].full_shard_key
)
!= self.full_shard_id
):
# should have the same full_shard_id for the given addresses
return None
log_filter = Filter(self.db, addresses, topics, start_block, end_block)
try:
logs = log_filter.run()
return logs
except Exception as e:
Logger.error_exception()
return None
def estimate_gas(self, tx: TypedTransaction, from_address) -> Optional[int]:
"""Estimate a tx's gas usage by binary searching."""
evm_tx_start_gas = tx.tx.to_evm_tx().startgas
# binary search. similar as in go-ethereum
lo = 21000 - 1
hi = evm_tx_start_gas if evm_tx_start_gas > 21000 else self.evm_state.gas_limit
cap = hi
def run_tx(gas):
try:
evm_state = self.evm_state.ephemeral_clone() # type: EvmState
evm_state.gas_used = 0
evm_tx = self.__validate_tx(tx, evm_state, from_address, gas=gas)
success, _ = apply_transaction(
evm_state, evm_tx, tx_wrapper_hash=bytes(32)
)
return success
except Exception:
return False
while lo + 1 < hi:
mid = (lo + hi) // 2
if run_tx(mid):
hi = mid
else:
lo = mid
if hi == cap and not run_tx(hi):
return None
return hi
def gas_price(self) -> Optional[int]:
curr_head = self.header_tip.get_hash()
if curr_head == self.gas_price_suggestion_oracle.last_head:
return self.gas_price_suggestion_oracle.last_price
curr_height = self.header_tip.height
start_height = curr_height - self.gas_price_suggestion_oracle.check_blocks + 1
if start_height < 3:
start_height = 3
prices = []
for i in range(start_height, curr_height + 1):
block = self.db.get_minor_block_by_height(i)
if not block:
Logger.error("Failed to get block {} to retrieve gas price".format(i))
continue
prices.extend(block.get_block_prices())
if not prices:
return None
prices.sort()
price = prices[
(len(prices) - 1) * self.gas_price_suggestion_oracle.percentile // 100
]
self.gas_price_suggestion_oracle.last_price = price
self.gas_price_suggestion_oracle.last_head = curr_head
return price
def validate_minor_block_seal(self, block: MinorBlock):
consensus_type = self.env.quark_chain_config.shards[
block.header.branch.get_full_shard_id()
].CONSENSUS_TYPE
if not self.shard_config.POSW_CONFIG.ENABLED:
validate_seal(block.header, consensus_type)
else:
diff = self.posw_diff_adjust(block)
validate_seal(block.header, consensus_type, adjusted_diff=diff)
def posw_diff_adjust(self, block: MinorBlock) -> int:
start_time = time.time()
header = block.header
diff = header.difficulty
coinbase_address = header.coinbase_address.recipient
# Evaluate stakes before the to-be-added block
evm_state = self._get_evm_state_for_new_block(block, ephemeral=True)
config = self.shard_config.POSW_CONFIG
stakes = evm_state.get_balance(
coinbase_address, self.env.quark_chain_config.genesis_token
)
block_threshold = stakes // config.TOTAL_STAKE_PER_BLOCK
block_threshold = min(config.WINDOW_SIZE, block_threshold)
# The func is inclusive, so need to fetch block counts until prev block
# Also only fetch prev window_size - 1 block counts because the
# new window should count the current block
block_cnt = self._get_posw_coinbase_blockcnt(
header.hash_prev_minor_block, length=config.WINDOW_SIZE - 1
)
cnt = block_cnt.get(coinbase_address, 0)
if cnt < block_threshold:
diff //= config.DIFF_DIVIDER
# TODO: remove it if verified not time consuming
passed_ms = (time.time() - start_time) * 1000
Logger.debug("Adjust PoSW diff took %s milliseconds" % passed_ms)
return diff
def _get_evm_state_from_height(self, height: Optional[int]) -> Optional[EvmState]:
if height is None or height == self.header_tip.height:
return self.evm_state
# note `_get_evm_state_for_new_block` actually fetches the state in the previous block
# so adding 1 is needed here to get the next block
block = self.db.get_minor_block_by_height(height + 1)
if not block:
Logger.error("Failed to get block at height {}".format(height))
return None
return self._get_evm_state_for_new_block(block)
def __get_coinbase_addresses_until_block(
self, header_hash: bytes, length: int
) -> List[bytes]:
"""Get coinbase addresses up until block of given hash within the window."""
curr_block = self.db.get_minor_block_by_hash(header_hash)
if not curr_block:
raise ValueError("curr block not found: hash {}".format(header_hash.hex()))
header = curr_block.header
height = header.height
prev_hash = header.hash_prev_minor_block
if prev_hash in self.coinbase_addr_cache: # mem cache hit
_, addrs = self.coinbase_addr_cache[prev_hash]
addrs = addrs.copy()
if len(addrs) == length:
addrs.popleft()
addrs.append(header.coinbase_address.recipient)
else: # miss, iterating DB
addrs = deque()
for _ in range(length):
addrs.appendleft(header.coinbase_address.recipient)
if header.height == 0:
break
header = self.db.get_minor_block_header_by_hash(
header.hash_prev_minor_block
)
check(header is not None, "mysteriously missing block")
self.coinbase_addr_cache[header_hash] = (height, addrs)
# in case cached too much, clean up
if len(self.coinbase_addr_cache) > 128: # size around 640KB if window size 256
self.coinbase_addr_cache = {
k: (h, addrs)
for k, (h, addrs) in self.coinbase_addr_cache.items()
if h > height - 16 # keep most recent ones
}
return list(addrs)
@functools.lru_cache(maxsize=16)
def _get_posw_coinbase_blockcnt(
self, header_hash: bytes, length: int = None
) -> Dict[bytes, int]:
""" PoSW needed function: get coinbase addresses up until the given block
hash (inclusive) along with block counts within the PoSW window.
Raise ValueError if anything goes wrong.
"""
if length is None:
length = self.shard_config.POSW_CONFIG.WINDOW_SIZE
coinbase_addrs = self.__get_coinbase_addresses_until_block(header_hash, length)
return Counter(coinbase_addrs)
|
#!/usr/bin/env python3
import fnmatch
import os
import re
import sys
def get_files():
# Allow running from root directory and tools directory
root_dir = ".."
if os.path.exists("addons"):
root_dir = "."
sqf_files = []
for root, _, files in os.walk(root_dir):
for file in fnmatch.filter(files, "*.sqf"):
sqf_files.append(os.path.join(root, file))
sqf_files.sort()
return sqf_files
def filter_files(filepaths):
filtered_files = []
# Return only files that have a docblock
for filepath in filepaths:
with open(filepath, 'r') as file_contents:
for line in file_contents:
contents = line.strip()
# A possible docblock starts
if contents.startswith('/*'):
# Find the `* Return Value:` comment
lines = list(map(
# Remove \n from all the lines
(lambda s: s.strip()), file_contents.readlines()
))
return_value_comment_index = lines.index('* Return Value:')
return_value_index = return_value_comment_index + 1
# Drop the first two characters (e.g. `* `) so it returns the return type
return_value = lines[return_value_index][2:]
filtered_files.append([filepath, return_value])
break
return filtered_files
def get_last_line(filepath):
with open(filepath, 'r') as file_contents:
lines = file_contents.readlines()
last_line = lines[-1].strip()
# Handle multiple blank lines at the end of the file
if last_line == "":
i = -2
while lines[i].strip() == "":
i -= 1
return lines[i].strip()
return last_line
def check_last_character(filepath, return_value):
last_line = get_last_line(filepath)
last_line_character = last_line[-1]
# If return type is None and the last line has a semicolon OR the last thing is just the nil keyword OR last thing is a closing bracket
if return_value == 'None' and (last_line_character == ';' or last_line == 'nil' or last_line == '};'):
return True
elif return_value != 'None' and (last_line_character != ';' or last_line == '};'):
return True
else:
return False
def get_expected_last_line(last_line, return_value):
last_line_character = last_line[-1]
if return_value == 'None':
# If last character is a letter or a number
if re.search(r'[A-Za-z0-9]', last_line_character):
return '{};'.format(last_line)
else:
return 'nil'
else:
if last_line_character == ';':
return last_line[:-1]
return 'Unknown'
def main():
print('Validating Return Types')
print('-----------------------')
bad_files = []
files = get_files()
filtered_files = filter_files(files)
for file_details in filtered_files:
filepath, return_value = file_details
status = check_last_character(filepath, return_value)
if not status:
bad_files.append(
[filepath, return_value, get_last_line(filepath)])
error_count = len(bad_files)
print('Found {} error(s)'.format(error_count))
for bad_file in bad_files:
filepath, return_value, last_line = bad_file
expected_last_line = get_expected_last_line(last_line, return_value)
print('\nERROR: In file {}'.format(filepath))
print('Incorrect return type, expected `{}`'.format(return_value))
print('Found line `{}`'.format(last_line))
print('Expected line `{}`'.format(expected_last_line))
if error_count:
print('\nReturn Validation FAILED')
else:
print('\nReturn Validation PASSED')
return error_count
if __name__ == "__main__":
sys.exit(main())
|
# -*- coding: utf-8 -*-
__license__ = \
"""Copyright 2019 West University of Timisoara
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def get_nadir_status(**kw):
angle = int(kw["OffNadirAngle"])
if angle >= 0 and angle <= 25:
return "Nadir"
elif angle >= 26 and angle <= 40:
return "OffNadir"
elif angle >= 41 and angle <= 55:
return "VeryOffNadir"
else:
raise NotImplementedError("Nadir angle not supported!")
class FilterDatasetByNadir(object):
def __init__(self, nadir_type=None):
self._nadir_type = nadir_type
def __call__(self, dataset_id, match_components, dataset):
return self._nadir_type == match_components["nadir"]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import os
import sys
import multiprocessing
if hasattr(sys, "frozen"):
exedir = os.path.dirname(sys.executable)
os.environ['PATH'] = exedir
paths = ['lib',
'lib/library.zip',
'lib/butterflow',
'lib/numpy/core',
'lib/ffmpeg',
'lib/misc']
for path in paths:
os.environ['PATH'] += os.pathsep + os.path.join(exedir, path)
from butterflow.cli import main
if __name__ == '__main__':
multiprocessing.freeze_support()
sys.exit(main())
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 27 17:34:07 2014
@author: ydzhao
"""
import scipy.io as sio
import numpy as np
def pro_mat2py(matdir):
matdata=sio.loadmat(matdir)
banlist=['__header__', '__globals__','__version__']
for item in (set(matdata.keys())-set(banlist)):
if matdata[item].shape==(1,1):
matdata[item]=matdata[item].item()
else:
matdata[item]=np.matrix(matdata[item])
return matdata
def pro_py2mat(matdata,matdir):
for item in matdata.keys():
if type(matdata[item])==np.ndarray:
matdata[item]=array(matdata[item].tolist())
else:
matdata[item]=array(matdata[item])
sio.savemat(matdir)
return matdata
if __name__=="__main__":
matdir='/home/ydzhao/lmitest.mat'
matdata=pro_mat2py(matdir) |
"""
This program can be solved by the concept of backtracking.
Fix a character in the first position
and swap the rest of the character
with the first character.
Like in ABC, in the first iteration
three strings are formed:
ABC, BAC, and CBA by swapping A with A,
B and C respectively.
Repeat step 1 for the rest of the characters
like fixing second character B and so on.
Now swap again to go back to the previous position.
E.g., from ABC, we formed ABC by fixing B again,
and we backtrack to the previous position and swap B
with C. So, now we got ABC and ACB.
Repeat these steps for BAC and CBA, to get all the
permutations.
"""
# Python code to demonstrate
# to find all permutation of
# a given string
# Function to swap two characters in a character array
def swap(ch, i, j):
temp = ch[i]
ch[i] = ch[j]
ch[j] = temp
# Recursive function to generate all permutations of a String
def permutations(ch, curr_index=0):
if curr_index == len(ch) - 1:
print(''.join(ch))
for i in range(curr_index, len(ch)):
swap(ch, curr_index, i)
permutations(ch, curr_index + 1)
swap(ch, curr_index, i)
if __name__ == '__main__':
s = input('Enter string here: ').lower()
print(f"Permutations of the string \"{s}\" are:")
result=permutations(list(s.upper())) |
from iconservice import *
class Delegation(TypedDict):
address: Address
value: int
class SampleSystemScoreInterCall(IconScoreBase):
def __init__(self, db: IconScoreDatabase) -> None:
super().__init__(db)
self.use_interface = VarDB("use_interface", db, value_type=bool)
def on_install(self, use_interface: bool) -> None:
super().on_install()
self.use_interface.set(use_interface)
def on_update(self) -> None:
super().on_update()
def _get_kw_dict(self, ret_locals: dict):
del ret_locals["self"]
del ret_locals["use_interface"]
return ret_locals
@payable
@external
def call_setStake(self, value: int) -> None:
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
test_interface.setStake(value)
else:
self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="setStake",
kw_dict=self._get_kw_dict(locals()))
@external
def call_getStake(self, address: Address) -> dict:
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
return test_interface.getStake(address)
else:
return self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="getStake",
kw_dict=self._get_kw_dict(locals()))
@external
def call_estimateUnstakeLockPeriod(self) -> dict:
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
return test_interface.estimateUnstakeLockPeriod()
else:
return self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="estimateUnstakeLockPeriod",
kw_dict=self._get_kw_dict(locals()))
@external
def call_setDelegation(self, delegations: List[Delegation]):
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
test_interface.setDelegation(delegations)
else:
self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="setDelegation",
kw_dict=self._get_kw_dict(locals()))
@external
def call_getDelegation(self, address: Address) -> dict:
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
return test_interface.getDelegation(address)
else:
return self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="getDelegation",
kw_dict=self._get_kw_dict(locals()))
@payable
@external
def call_claimIScore(self):
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
test_interface.claimIScore()
else:
self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="claimIScore",
kw_dict=self._get_kw_dict(locals()))
@external
def call_queryIScore(self, address: Address) -> dict:
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
return test_interface.queryIScore(address)
else:
return self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="queryIScore",
kw_dict=self._get_kw_dict(locals()))
@external
def call_getIISSInfo(self) -> dict:
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
return test_interface.getIISSInfo()
else:
return self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="getIISSInfo",
kw_dict=self._get_kw_dict(locals()))
@external
def call_getPRep(self, address: Address) -> dict:
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
return test_interface.getPRep(address)
else:
return self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="getPRep",
kw_dict=self._get_kw_dict(locals()))
@external
def call_getPReps(self, startRanking: int, endRanking: int) -> list:
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
return test_interface.getPReps(startRanking, endRanking)
else:
return self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="getPReps",
kw_dict=self._get_kw_dict(locals()))
@external
def call_getMainPReps(self) -> dict:
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
return test_interface.getMainPReps()
else:
return self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="getMainPReps",
kw_dict=self._get_kw_dict(locals()))
@external
def call_getSubPReps(self) -> dict:
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
return test_interface.getSubPReps()
else:
return self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="getSubPReps",
kw_dict=self._get_kw_dict(locals()))
@external
def call_getPRepTerm(self) -> dict:
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
return test_interface.getPRepTerm()
else:
return self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="getPRepTerm",
kw_dict=self._get_kw_dict(locals()))
@external
def call_getInactivePReps(self) -> dict:
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
return test_interface.getInactivePReps()
else:
return self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="getInactivePReps",
kw_dict=self._get_kw_dict(locals()))
@external
def call_getScoreDepositInfo(self, address: Address) -> dict:
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
return test_interface.getScoreDepositInfo(address)
else:
return self.call(addr_to=SYSTEM_SCORE_ADDRESS,
func_name="getScoreDepositInfo",
kw_dict=self._get_kw_dict(locals()))
@payable
@external
def call_burn(self):
use_interface = self.use_interface.get()
if use_interface:
test_interface = self.create_interface_score(SYSTEM_SCORE_ADDRESS, InterfaceSystemScore)
test_interface.icx(self.msg.value).burn()
else:
self.call(
addr_to=SYSTEM_SCORE_ADDRESS,
func_name="burn",
kw_dict={},
amount=self.msg.value
)
|
# Copyright 2014 Nervana Systems Inc., 2016 Hugh Perkins All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Ported to OpenCL from https://github.com/nervanasystems/neon.git file neon/backends/winograd_conv.py
import pyopencl as cl
def calcU(ctx):
print('calcU')
code = r"""
kernel void calcU(
global float* Out, global const float* In,
int RSK, int SK, int SK2, int K, int C1152, int C, int GK)
{
int tid = get_local_id(0);
//if(tid != 0) {
// return;
//}
int blkK = get_num_groups(0) - get_group_id(0) - 1;
int c = get_num_groups(1) - get_group_id(1) - 1;
int k = (blkK<<5) + tid;
// output before:
// [Co//32][Ci][xi][nu][Co % 32]
// output in new order:
// [xi][nu][Co//32][Ci][Co % 32]
int out_offset = blkK*C1152 + c*1152 + tid;
bool valid_k = k < K;
int f_r0s0 = c*RSK + k;
int f_r0s1 = f_r0s0 + K;
int f_r0s2 = f_r0s1 + K;
int f_r1s0 = f_r0s0 + SK;
int f_r1s1 = f_r0s1 + SK;
int f_r1s2 = f_r0s2 + SK;
int f_r2s0 = f_r0s0 + SK2;
int f_r2s1 = f_r0s1 + SK2;
int f_r2s2 = f_r0s2 + SK2;
float I[3][3];
I[0][0] = valid_k ? (In[f_r0s0]) : 0.0f;
I[0][1] = valid_k ? (In[f_r0s1]) : 0.0f;
I[0][2] = valid_k ? (In[f_r0s2]) : 0.0f;
I[1][0] = valid_k ? (In[f_r1s0]) : 0.0f;
I[1][1] = valid_k ? (In[f_r1s1]) : 0.0f;
I[1][2] = valid_k ? (In[f_r1s2]) : 0.0f;
I[2][0] = valid_k ? (In[f_r2s0]) : 0.0f;
I[2][1] = valid_k ? (In[f_r2s1]) : 0.0f;
I[2][2] = valid_k ? (In[f_r2s2]) : 0.0f;
float rcp4 = 1.0f/4.0f;
float rcp6 = 1.0f/6.0f;
float rcp12 = 1.0f/12.0f;
float rcp24 = 1.0f/24.0f;
float T[6][3];
#pragma unroll
for (int i = 0; i < 3; i++)
{
float t0 = rcp6 * I[2][i];
float t1 = fma(I[0][i], -rcp6, -t0);
float t2 = fma(I[0][i], rcp24, t0);
T[0][i] = rcp4 * I[0][i];
T[1][i] = fma(I[1][i], -rcp6, t1);
T[2][i] = fma(I[1][i], rcp6, t1);
T[3][i] = fma(I[1][i], rcp12, t2);
T[4][i] = fma(I[1][i], -rcp12, t2);
T[5][i] = I[2][i];
}
// output in new order:
// [xi][nu][Co//32][Ci][Co % 32]
// we can probably make these kernel parameters
int nu_stride = 32 * C * GK;
int xi_stride = nu_stride * 6;
//int nu_stride = 0;
//int xi_stride = 0;
out_offset = tid + // Co % 32
(c << 5) + // Ci
((blkK * C) << 5) // Co // 32
;
#pragma unroll
for (int i = 0; i < 6; i++)
{
float t0 = rcp6 * T[i][2];
float t1 = fma(T[i][0], -rcp6, -t0);
float t2 = fma(T[i][0], rcp24, t0);
// Out[out_offset + 32*(i*6 + 0)] = (rcp4 * T[i][0]);
// Out[out_offset + 32*(i*6 + 1)] = (fma(T[i][1], -rcp6, t1));
// Out[out_offset + 32*(i*6 + 2)] = (fma(T[i][1], rcp6, t1));
// Out[out_offset + 32*(i*6 + 3)] = (fma(T[i][1], rcp12, t2));
// Out[out_offset + 32*(i*6 + 4)] = (fma(T[i][1], -rcp12, t2));
// Out[out_offset + 32*(i*6 + 5)] = (T[i][2]);
// output in new order:
// [xi][nu][Co//32][Ci][Co % 32]
Out[out_offset + i * xi_stride + 0 * nu_stride] = (rcp4 * T[i][0]);
Out[out_offset + i * xi_stride + 1 * nu_stride] = (fma(T[i][1], -rcp6, t1));
Out[out_offset + i * xi_stride + 2 * nu_stride] = (fma(T[i][1], rcp6, t1));
Out[out_offset + i * xi_stride + 3 * nu_stride] = (fma(T[i][1], rcp12, t2));
Out[out_offset + i * xi_stride + 4 * nu_stride] = (fma(T[i][1], -rcp12, t2));
Out[out_offset + i * xi_stride + 5 * nu_stride] = (T[i][2]);
}
}
"""
with open('/tmp/out.cl', 'w') as f:
f.write(code)
module = cl.Program(ctx, code).build(options='') # -cl-mad-enable -cl-fast-relaxed-math -cl-no-signed-zeros
return module.__getattr__('calcU')
def calcV(ctx):
print('calcV')
code = r"""
static inline int div64(int value, int div_mul, int div_shift)
{
int result;
// if the divisor is a power of two the magic will be 1 and it's just a simple right shift
if (div_mul == 1)
result = value >> div_shift;
// Otherwise multiply by magic and right shift just the high bits
else
result = (value * div_mul) >> div_shift;
return result;
}
kernel void calcV(
global float* Out, global const float* In,
int Y, int X, int N, int pad_y, int pad_x,
int GXS, int GYS2, int GXS2, int magic_GXS2, int shift_GXS2, int magic_GXS, int shift_GXS,
int shlY, int shlX, int maskY, int shrY, int maskX, int shrX, int shlN, int maskN,
int YXN, int XN, int GYS_GXS_C_1152, int GXS_C_1152, int C_1152,
int GX, int GY_GX, int GN, int C)
{
int tid = get_local_id(0);
int blkN = get_num_groups(0) - get_group_id(0) - 1;
int blkYX = get_num_groups(1) - get_group_id(1) - 1;
int c = get_num_groups(2) - get_group_id(2) - 1;
// unpack y,x from get_group_id(0)
int gy2 = (blkYX * magic_GXS) >> shift_GXS;
int gx2 = blkYX - gy2*GXS;
// Implement a square wave block id remapping
// (for all but last row (if odd number of rows))
//int gy = gy2 << 1;
//int gx = gx2;
//if (gy2 != GYS2)
//{
// gy += (gx2 & 1) ^ ((gx2 & 2) >> 1);
// gx = gx2 >> 1;
//}
// Scan backwards on odd rows
//if (gy2 & 1)
// gx = GXS - gx - 1;
int gx = gx2;
int gy = gy2;
//int gygx = gy * tiles + gx;
// Super block YXN coordinates
int y0 = (gy << shlY) + (((tid & maskY) >> shrY) << 2) - pad_y;
int x0 = (gx << shlX) + (((tid & maskX) >> shrX) << 2) - pad_x;
int n = (blkN << shlN) + (tid & maskN);
bool valid = n < N;
bool xin[6], yin[6];
float I[6][6];
#pragma unroll
for (int i = 0; i < 6; i++)
{
xin[i] = x0 + i >= 0 && x0 + i < X && valid;
yin[i] = y0 + i >= 0 && y0 + i < Y;
}
int offset = c*YXN + y0*XN + x0*N + n;
#pragma unroll
for (int y = 0; y < 6; y++)
{
if (y) offset += XN;
#pragma unroll
for (int x = 0; x < 6; x++)
{
float val = 0;
if (yin[y] && xin[x])
val = *(In + offset + x*N);
I[y][x] = (val);
}
}
float T[6][6];
#pragma unroll
for (int i = 0; i < 6; i++)
{
float t0 = fma(I[2][i], -4.0f, I[4][i]);
float t1 = fma(I[1][i], -4.0f, I[3][i]);
float t2 = I[4][i] - I[2][i];
float t3 = I[3][i] - I[1][i];
float t4 = fma(I[2][i], -5.0f, I[4][i]);
float t5 = fma(I[3][i], -5.0f, I[5][i]);
T[0][i] = fma(I[0][i], 4.0f, t4);
T[1][i] = t0 + t1;
T[2][i] = t0 - t1;
T[3][i] = fma(t3, 2.0f, t2);
T[4][i] = fma(t3, -2.0f, t2);
T[5][i] = fma(I[1][i], 4.0f, t5);
}
// old layout:
// [tH, tW, N // 32, Ci, xi, nu, N % 32]
// new layout:
// [xi, nu, N // 32, tH, tW, Ci, N % 32]
// (note: since last dimension is 32, this is always going to be 128-byte aligned)
int out_offset = tid + // N % 32
(c << 5) + // ci
blkYX * (C << 5) + // th* tiles + tw (?)
// 0 *((2 - gy) * 3 + (2 - gx)) * (C << 5) + // th* tiles + tw (?)
blkN * GY_GX * (C << 5) // N // 32
;
// int out_offset = blkN*GYS_GXS_C_1152 + gy*GXS_C_1152 + gx*C_1152 + c*1152 + tid;
int nu_stride = GN * GY_GX * (C << 5);
int xi_stride = nu_stride * 6;
#pragma unroll
for (int i = 0; i < 6; i++)
{
float t0 = fma(T[i][2], -4.0f, T[i][4]);
float t1 = fma(T[i][1], -4.0f, T[i][3]);
float t2 = T[i][4] - T[i][2];
float t3 = T[i][3] - T[i][1];
float t4 = fma(T[i][2], -5.0f, T[i][4]);
float t5 = fma(T[i][3], -5.0f, T[i][5]);
Out[out_offset + i * xi_stride + 0 * nu_stride] = (fma(T[i][0], 4.0f, t4));
Out[out_offset + i * xi_stride + 1 * nu_stride] = (t0 + t1);
Out[out_offset + i * xi_stride + 2 * nu_stride] = (t0 - t1);
Out[out_offset + i * xi_stride + 3 * nu_stride] = (fma(t3, 2.0f, t2));
Out[out_offset + i * xi_stride + 4 * nu_stride] = (fma(t3, -2.0f, t2));
Out[out_offset + i * xi_stride + 5 * nu_stride] = (fma(T[i][1], 4.0f, t5));
//Out[out_offset + i * xi_stride + 0 * nu_stride] = 123.45f;
}
//Out[0] = get_num_groups(1);
//Out[get_group_id(1) + 1] = (float)gy;
//Out[get_group_id(1) + 10] = (float)gx;
//if(get_local_id(0) == 0) {
// Out[get_group_id(2)] = get_group_id(2);
//}
}
"""
module = cl.Program(ctx, code).build(options='') # -cl-mad-enable -cl-fast-relaxed-math -cl-no-signed-zeros
return module.__getattr__('calcV')
def calcM_blocked_l2(ctx):
code = r"""
kernel void calcM_blocked_l2(global float *R, const global float *U, const global float *V,
int A, int B
) {
// just do really naive for now, improve later...
// assume block (32,1,1), which fills the warps, ignore shared memory for now
// incoming data is (A,B).T * (A) ie (B,A) * (A)
// result will be (B)
// B is always 32
// lets use 1 thread for each B value.
// first, we should pull down all the data
// no need for sync, because we are using (32,1,1) block, exactly matches warp
// then each thread calculates one output value
// lets do output value first ,since easiest, thne pull down the data
int b = get_local_id(0);
float sum = 0;
int A_blocks = A >> 5; // assume A is multipel of 32
for(int a_block = 0; a_block < A; a_block+= 32) {
#pragma unroll
for(int a_local = 0; a_local < 32; a_local++) {
int a = a_block + a_local;
// this will be really high latency. improve later
sum += U[a<<5 + b] * V[a];
}
}
R[b] = sum;
}
"""
module = cl.Program(ctx, code).build(options='') # -cl-mad-enable -cl-fast-relaxed-math -cl-no-signed-zeros
return module.__getattr__('calcM_blocked_l2')
def calcM(ctx):
# grid: (GK, GN, th_tw)
# block: (32, 1, 1) # each thread used for different Ci value
code = r"""
void process_ci_block_too_complicated_do_simple_for_now(
global float *restrict M, global float *restrict U, global float *restrict V,
int Ci, int gci, int gk, int gn, int th_tw) {
// each workgroup handles:
// 32 values for Co
// some values for Ci (up to 512?)
// pull down in blocks of 32 * 4 = 128 floats at a time
// assume that only hav eup to Ci == 128 for now (add an extra loop later)
local float4 U4_[32 * 32];
local float4 V4_[32];
// int numVRounds = Ci >> (5+2); // +2 is because we are going to use float4s
int tid = get_local_id(0);
int localCi = Ci - (gci << 128);
int V_offset = 0; // TODO
float4 V_value = V[V_offset + tid];
int U_offset = tid;
global float4 *restrict U4 = (global float4 *)U;
for(int i = 0; i < 8; i+= 1) {
// there are: 128 * 32 = 4096 floats
// or: 32 * 32 = 1024 float4's
// divided by 32 threads, 32 float4's per thread
// or 128 floats per thread
// each loop the 32 threads get 128 float4's, or 512 floats
// after 8 runs through the loop, it has fetchs 1024 float4's
int U_offset0 = U_offset + 0;
int U_offset1 = U_offset + 32;
int U_offset2 = U_offset + 64;
int U_offset3 = U_offset + 96;
float4 b0 = U_offset0 < localCi ? U4[U_offset0] : 0.0f;
float4 b1 = U_offset0 < localCi ? U4[U_offset1] : 0.0f;
float4 b2 = U_offset0 < localCi ? U4[U_offset2] : 0.0f;
float4 b3 = U_offset0 < localCi ? U4[U_offset3] : 0.0f;
U4_[U_offset0] = b0;
U4_[U_offset1] = b1;
U4_[U_offset2] = b2;
U4_[U_offset3] = b3;
U_offset += 128;
}
V4_[tid] = V_value;
// no need to sync, since workgroup is 32 threads, equals warpsize (whether this is a good
// idea, I'm not sure, but worth a shot...)
// now, all data should have been loaded
// each thread will sum across all values of ci, for one particular value of co
local float * restrict U_ = (local float * restrict)U4_;
local float * restrict V_ = (local float * restrict)V4_;
float sum = 0;
for(int ci = 0; ci < Ci; ci += 4) {
//float s0 = U_[(ci << 5) + tid] * V_
//s0 += s1;
//s2 += s3;
//sum = s0 + s2;
}
}
void process_ci_block(
global float *restrict M, global float *restrict U, global float *restrict V,
int Ci, int tiles, int GN, int GK, int b,
local float *U_, local float *V_) {
int tid1 = get_local_id(1);
int tid = get_local_id(0);
int xinu_U_stride = GK * Ci * 32;
int xinu_V_stride = GN * tiles * tiles * Ci * 32;
int Ci_blocks = (Ci + 31) >> 5;
int tiles_offset = b * Ci * 32;
for(int gn = 0; gn < GN; gn++) {
int gn32 = gn << 5;
for(int gk = 0; gk < GK; gk++) {
int gk32 = gk << 5;
for(int xi = 0; xi < 6; xi++) {
for(int nu=0; nu < 6; nu++) {
int xinu = xi * 6 + nu;
float sum0 = 0.0f;
float sum1 = 0.0f;
for(int ci_block = 0; ci_block < Ci_blocks; ci_block++) {
int ci_block_start = ci_block << 5;
int local_ci = tid;
int local_ci32 = local_ci << 5;
int global_ci = ci_block_start + tid;
int global_ci32 = global_ci << 5;
barrier(CLK_LOCAL_MEM_FENCE);
if(global_ci < Ci) {
{
int local_co = tid1;
U_[local_ci32 + local_co] = U[xinu * xinu_U_stride + gk * Ci * 32 + global_ci32 + local_co];
U_[local_ci32 + local_co + 16] = U[xinu * xinu_U_stride + gk * Ci * 32 + global_ci32 + local_co + 16];
}
{
int n = tid1;
V_[local_ci32 + n] = V[xinu * xinu_V_stride + gn * tiles * tiles * Ci * 32 + tiles_offset + global_ci32 + n];
V_[local_ci32 + n + 16] = V[xinu * xinu_V_stride + gn * tiles * tiles * Ci * 32 + tiles_offset + global_ci32 + n + 16];
}
}
barrier(CLK_LOCAL_MEM_FENCE);
int local_co = tid;
{
int n = tid1;
#pragma unroll
for(int ci = 0; ci < 32; ci++) {
int global_ci = ci_block_start + ci;
int ci32 = ci << 5;
float value = global_ci < Ci ? U_[ci32 + local_co] * V_[ci32 + n] : 0.0f;
sum0 += value;
}
n = tid1 + 16;
#pragma unroll
for(int ci = 0; ci < 32; ci++) {
int global_ci = ci_block_start + ci;
int ci32 = ci << 5;
float value = global_ci < Ci ? U_[ci32 + local_co] * V_[ci32 + n] : 0.0f;
sum1 += value;
}
}
}
int local_co = tid;
{
int n = tid1;
int offset = (gn32 + n) * GK * 32 * tiles * tiles * 6 * 6 + // (n // 32) * 32 + (n % 32)
(gk32 + local_co) * tiles * tiles * 6 * 6 + // (co % 32)
b * 6 * 6 + // b
xinu // xinu
;
M[offset] = sum0;
n = tid1 + 16;
offset = (gn32 + n) * GK * 32 * tiles * tiles * 6 * 6 + // (n // 32) * 32 + (n % 32)
(gk32 + local_co) * tiles * tiles * 6 * 6 + // (co % 32)
b * 6 * 6 + // b
xinu // xinu
;
M[offset] = sum1;
}
}
}
}
}
}
// [n // 32][n % 32][co // 32][co % 32][th][tw][xi][nu]
kernel void calcM(global float *restrict M, const global float *restrict U, const global float *restrict V,
int Ci, int GCi, int tiles, int GN, int GK,
local float *U_, local float *V_
) {
int b = get_group_id(0);
int tid1 = get_local_id(1);
// if(tid1 == 0) { // experiment to see if this affects the time
process_ci_block(M, U, V, Ci, tiles, GN, GK, b, U_, V_);
//}
}
"""
module = cl.Program(ctx, code).build(options='') # -cl-mad-enable -cl-fast-relaxed-math -cl-no-signed-zeros
return module.__getattr__('calcM')
def calcO(ctx):
# grid: (GK, GN, th_tw)
# block: (32, 1, 1) # each thread used for different Ci value
code = r"""
kernel void calcO(
global float *O, global float *M, int GID) {
// lets just do stupidly for now, improve later...
// assume block (32,1,1)
// for calcU, each thread does one entire tile (6x6) Let's do the same thing
// let's just have a linear grid for now, to keep it simple stupid, then improve it later
int gid = get_global_id(0);
if(gid >= GID) {
return;
}
// let's assume this is ... well it doesnt matter actually, we simply do the same operation for all
// so just grab a tile, and transform it...
int M_offset = gid * 6 * 6; // 6x6 tiles
float M_[6][6];
for(int i = 0; i < 6; i++) {
int i6 = i * 6;
#pragma unroll
for(int j = 0; j < 6; j++) {
M_[i][j] = M[M_offset + i6 + j];
}
}
float Otmp[4][6];
for(int i = 0; i < 6; i++) {
Otmp[0][i] = M_[0][i] + M_[1][i] + M_[2][i] + M_[3][i] + M_[4][i];
Otmp[1][i] = + M_[1][i] - M_[2][i] + 2.0f * M_[3][i] - 2.0f * M_[4][i];
Otmp[2][i] = + M_[1][i] + M_[2][i] + 4.0f * M_[3][i] + 4.0f * M_[4][i];
Otmp[3][i] = + M_[1][i] - M_[2][i] + 8.0f * M_[3][i] - 8.0f * M_[4][i] + M_[5][i];
}
global float *restrict O_ = O + gid * 4 * 4;
for(int i = 0; i < 4; i++) {
int i4 = (i << 2);
O_[i4 + 0] = Otmp[i][0] + Otmp[i][1] + Otmp[i][2] + Otmp[i][3] + Otmp[i][4];
O_[i4 + 1] = + Otmp[i][1] - Otmp[i][2] + 2.0f * Otmp[i][3] - 2.0f * Otmp[i][4];
O_[i4 + 2] = + Otmp[i][1] + Otmp[i][2] + 4.0f * Otmp[i][3] + 4.0f * Otmp[i][4];
O_[i4 + 3] = + Otmp[i][1] - Otmp[i][2] + 8.0f * Otmp[i][3] - 8.0f * Otmp[i][4] + Otmp[i][5];
}
}
"""
module = cl.Program(ctx, code).build(options='') # -cl-mad-enable -cl-fast-relaxed-math -cl-no-signed-zeros
return module.__getattr__('calcO')
|
#!/usr/bin/env python
# coding: utf-8
# created by [email protected]
# Date: 2016/10/7
# Time: 12:29
import datetime
import networkx as nx
from django.db.models import Q
from friendnet.models import Link, GroupMember
def global_core(G):
for (s, t, d) in G.edges(data=True):
# w = d['weight']
# if w < 2:
# # print s.member_name, t.member_name
# G.remove_edge(s, t)
# c += 1
s = GroupMember.objects.get(id=s)
t = GroupMember.objects.get(id=t)
links = Link.objects.filter((Q(source_member=s, target_member=t) | Q(source_member=t, target_member=s)), status=3, group__id=10001)
couple = {s.user: False, t.user: False}
for link in links:
if link.creator in couple:
# print couple, link.creator
couple[link.creator] = True
if not couple.values() == [True, True]:
G.remove_edge(s.id, t.id)
else:
if d['ks'] - d['weight'] == -2:
print s.id, t.id, d['ks'], d['weight']
return G
# for g in nx.connected_component_subgraphs(G):
# if g.number_of_nodes() > 1:
# print g.number_of_nodes(), g.number_of_edges()
# print G.number_of_nodes(), G.number_of_edges()
def no_hub(G):
for node in G.nodes():
if G.degree(node) > 50:
# for n in G_all_confirmed.neighbors(node):
# G_center.add_edge(node, n)
G.remove_node(node)
for g in nx.connected_component_subgraphs(G):
if g.number_of_nodes() != 1:
print g.number_of_nodes(), g.number_of_edges()
def only_hub(G):
for s, t in G.edges():
if G.degree(s) < 50 and G.degree(t) < 50:
# for n in G_all_confirmed.neighbors(node):
# G_center.add_edge(node, n)
G.remove_edge(s, t)
for g in nx.connected_component_subgraphs(G):
if g.number_of_nodes() != 1:
print g.number_of_nodes(), g.number_of_edges()
def private_link(G):
new_links = Link.objects.filter(group__id=10001, created_time__gt=datetime.datetime(2016, 10, 27, 10, 0, 0))
G_new = build_graph_id(new_links)
for s, t in G_new.edges():
if G.has_edge(s, t):
G.remove_edge(s, t)
print G_new.number_of_edges(), G.number_of_edges()
return G
def build_graph_id(links):
G = nx.Graph()
for link in links:
s, t = link.source_member_id, link.target_member_id
if not G.has_edge(s, t):
G.add_edge(s, t, link=[link], weight=1)
else:
G[s][t]['weight'] += 1
G[s][t]['link'].append(link)
return G
def community_test(G, weight=8):
# new_links = Link.objects.filter(group__id=10001, created_time__gt=datetime.datetime(2016, 10, 27, 10, 0, 0))
#
# G_new = build_graph_id(new_links)
for s, t, d in G.edges(data=True):
if G.has_edge(s, t):
if d['ks'] < weight:
G.remove_edge(s, t)
for n in G.nodes():
if G.degree(n) == 0:
G.remove_node(n)
print G.number_of_nodes(), G.number_of_edges()
return G
def wrong(G):
# s, t = 10041, 10069
# s, t = 10110, 10115
# s, t = 10265, 10046
# s, t = 10263, 10112
s, t = 10028, 10075
print len(set(G.neighbors(s))), (len(G.neighbors(t)))
G.remove_nodes_from(set(G.nodes()) - {t, s} - set(G.neighbors(s)) - set(G.neighbors(t)))
for s1, t1, d in G.edges(data=True):
if d['ks'] < 12:
G.remove_edge(s1, t1)
# if s1 == s or t1 == s or s1 == t or t1 == t:
# continue
# else:
# G.remove_edge(s1, t1)
G.add_edge(s, t, status=True, weight=1, id=100)
return G
|
from . import portfolio
'''
TODO:
- chart growth of 10k
- Fix problem of potentially getting older return series
(maybe just always construct a new return series?)
'''
class Chart:
def __init__(self, portfolio_obj):
if not isinstance(portfolio_obj, portfolio.Portfolio):
raise TypeError(f'{portfolio_obj} is not of type: Portfolio')
self._port = portfolio_obj
if self._port.returns_df is None:
self._port.return_series()
self.returns_df = self._port.returns_df
print(self.returns_df)
|
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from model_mommy import mommy
from rest_framework.exceptions import ErrorDetail
from rest_framework.test import APIClient
from rest_framework import serializers
from unittest.mock import patch, MagicMock
from s3_file_uploads.models import UploadedFile
from s3_file_uploads.fields import UploadedFilePrimaryKeyRelatedField
class UploadedFileTestSerialiser(serializers.Serializer):
test_field = UploadedFilePrimaryKeyRelatedField()
class BaseTestCase(TestCase):
EMAIL = '[email protected]'
PASSWORD = 'Reelsecure123'
def setUp(self):
super().setUp()
self.api_client = APIClient()
self.user = mommy.make(
User,
username=self.EMAIL,
)
self.user.set_password(self.PASSWORD)
self.user.save()
self.api_client.login(
username=self.EMAIL,
password=self.PASSWORD
)
class UploadedFileTestCase(BaseTestCase):
@patch('boto3.client')
def test_upload_file(self, boto_client_mock):
boto_client_mock.return_value.generate_presigned_url.return_value = "https://cat.com/b/a/"
boto_client_mock.return_value.generate_presigned_post.return_value = {
'url': "https://cat.com/a/b/"
}
response = self.api_client.post(reverse('s3_file_uploads:upload-file-create'))
self.assertEqual(response.status_code, 201)
self.assertEqual(UploadedFile.objects.count(), 1)
new_file = UploadedFile.objects.first()
self.assertEqual(new_file.file_key, '')
self.assertEqual(new_file.user, self.user)
self.assertEqual(response.data['id'], str(new_file.id))
self.assertEqual(response.data['upload_form']['url'], "https://cat.com/a/b/")
self.assertEqual(response.data['file'], "https://cat.com/b/a/")
self.assertEqual(new_file.get_view_url(), response.data['file'])
self.assertEqual(new_file.get_upload_form(), response.data['upload_form'])
@patch('boto3.client')
def test_complete_url_with_acl_data(self, boto_client_mock):
acl_type = {'acl': 'public-read'}
boto_client_mock.return_value.generate_presigned_url.return_value = "https://cat.com/b/a/"
boto_client_mock.return_value.generate_presigned_post.return_value = "https://cat.com/b/a/"
response = self.api_client.post(reverse('s3_file_uploads:upload-file-create'), data=acl_type)
new_file = UploadedFile.objects.first()
boto_client_mock.return_value.generate_presigned_post.assert_called_with(
'AWS_BUCKET_NAME',
str(new_file.id),
Conditions=[acl_type, ['content-length-range', 1, 10485760]],
ExpiresIn=300,
Fields={'acl': 'public-read'},
)
self.assertEqual(new_file.get_upload_form(), response.data['upload_form'])
@patch('boto3.client')
def test_complete_url_with_no_acl_data(self, boto_client_mock):
boto_client_mock.return_value.generate_presigned_url.return_value = "https://cat.com/b/a/"
boto_client_mock.return_value.generate_presigned_post.return_value = "https://cat.com/b/a/"
response = self.api_client.post(reverse('s3_file_uploads:upload-file-create'))
new_file = UploadedFile.objects.first()
boto_client_mock.return_value.generate_presigned_post.assert_called_with(
'AWS_BUCKET_NAME',
str(new_file.id),
Conditions=[{'acl': 'private'}, ['content-length-range', 1, 10485760]],
ExpiresIn=300,
Fields={'acl': 'private'},
)
self.assertEqual(new_file.get_upload_form(), response.data['upload_form'])
@patch('boto3.client')
def test_complete_url_with_invalid_acl_data(self, boto_client_mock):
acl_type = {'acl': 'invalid-acl-type'}
boto_client_mock.return_value.generate_presigned_url.return_value = "https://cat.com/b/a/"
boto_client_mock.return_value.generate_presigned_post.return_value = "https://cat.com/b/a/"
response = self.api_client.post(reverse('s3_file_uploads:upload-file-create'), data=acl_type)
self.assertEqual(
response.data,
{'acl': [ErrorDetail(string='"invalid-acl-type" is not a valid choice.', code='invalid_choice')]}
)
@patch('boto3.client')
def test_complete_url(self, boto_client_mock):
boto_client_mock.return_value.generate_presigned_url.return_value = "https://cat.com/b/a/"
boto_client_mock.return_value.generate_presigned_post.return_value = "https://cat.com/b/a/"
response = self.api_client.post(reverse('s3_file_uploads:upload-file-create'))
new_file = UploadedFile.objects.first()
self.assertEqual(new_file.file_upload_state, UploadedFile.UPLOAD_STATES.AWAIT_COMPLETE)
response = self.api_client.post(response.data['complete_url'])
self.assertEqual(response.status_code, 200)
new_file.refresh_from_db()
self.assertEqual(new_file.file_upload_state, UploadedFile.UPLOAD_STATES.COMPLETED)
class UploadedFileUploadCompleteViewTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.uploaded_file = mommy.make(
UploadedFile,
file_upload_state=UploadedFile.UPLOAD_STATES.AWAIT_COMPLETE
)
def test_completes(self):
self.api_client.post(reverse('s3_file_uploads:upload-file-complete', kwargs={
'file_id': str(self.uploaded_file.id)
}))
self.uploaded_file.refresh_from_db()
self.assertEqual(self.uploaded_file.file_upload_state, UploadedFile.UPLOAD_STATES.COMPLETED)
def test_cant_complete_in_wrong_state(self):
self.uploaded_file.file_upload_state = UploadedFile.UPLOAD_STATES.COMPLETED
self.uploaded_file.save()
response = self.api_client.post(reverse('s3_file_uploads:upload-file-complete', kwargs={
'file_id': str(self.uploaded_file.id)
}))
self.assertEqual(response.status_code, 404)
self.uploaded_file.file_upload_state = UploadedFile.UPLOAD_STATES.NEW
self.uploaded_file.save()
response = self.api_client.post(reverse('s3_file_uploads:upload-file-complete', kwargs={
'file_id': str(self.uploaded_file.id)
}))
self.assertEqual(response.status_code, 404)
class UploadedFileFetchViewTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.uploaded_file = mommy.make(
UploadedFile,
file_upload_state=UploadedFile.UPLOAD_STATES.COMPLETED,
file_key='file key',
filename='foo.pdf'
)
@patch('boto3.client')
def test_fetch_file(self, boto_client_mock):
boto_client_mock.return_value.generate_presigned_url.return_value = "https://cat.com/b/a/"
response = self.api_client.get(reverse('s3_file_uploads:upload-file-fetch', kwargs={
'file_id': str(self.uploaded_file.id)
}))
self.assertRedirects(response, "https://cat.com/b/a/", fetch_redirect_response=False)
class UploadedFilePrimaryKeyRelatedFieldTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.valid_user = mommy.make(User)
self.in_valid_user = mommy.make(User)
self.uploaded_file = mommy.make(
UploadedFile,
file_upload_state=UploadedFile.UPLOAD_STATES.COMPLETED,
file_key='file key',
filename='foo.pdf',
user=self.valid_user
)
def test_user_is_valid(self):
mock_request = MagicMock()
mock_request.user = self.valid_user
serializer = UploadedFileTestSerialiser(
data={'test_field': self.uploaded_file.id}, context={'request': mock_request}
)
self.assertTrue(serializer.is_valid())
def test_user_is_not_valid(self):
mock_request = MagicMock()
mock_request.user = self.in_valid_user
serializer = UploadedFileTestSerialiser(
data={'test_field': self.uploaded_file.id}, context={'request': mock_request}
)
self.assertFalse(serializer.is_valid())
|
import secrets
from fastapi import Depends, HTTPException, Security, status
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from fastapi.security.api_key import APIKeyCookie, APIKeyHeader, APIKeyQuery
from starlette.status import HTTP_403_FORBIDDEN
import config
# API_KEY = "1234567asdfgh"
API_KEY = config.parameters.get("api_key")
API_KEY_NAME = "access-token"
api_key_query = APIKeyQuery(name=API_KEY_NAME, auto_error=False)
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
api_key_cookie = APIKeyCookie(name=API_KEY_NAME, auto_error=False)
basic_auth = HTTPBasic()
async def get_api_key(
api_key_query: str = Security(api_key_query),
api_key_header: str = Security(api_key_header),
api_key_cookie: str = Security(api_key_cookie),
):
if api_key_query == API_KEY:
return api_key_query
elif api_key_header == API_KEY:
return api_key_header
elif api_key_cookie == API_KEY:
return api_key_cookie
else:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Could not validate credentials"
)
def verify_credentials(credentials: HTTPBasicCredentials = Depends(basic_auth)):
correct_username = secrets.compare_digest(credentials.username, config.USER_NAME)
correct_password = secrets.compare_digest(credentials.password, config.PASSWORD)
if not (correct_username and correct_password):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect email or password",
headers={"WWW-Authenticate": "Basic"},
)
return True
|
"""
I/O for VTU.
<https://vtk.org/Wiki/VTK_XML_Formats>
<https://vtk.org/wp-content/uploads/2015/04/file-formats.pdf>
"""
import base64
import re
import sys
import zlib
import numpy as np
from ..__about__ import __version__
from .._common import info, join_strings, raw_from_cell_data, replace_space, warn
from .._exceptions import CorruptionError, ReadError
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
from .._vtk_common import meshio_to_vtk_order, meshio_to_vtk_type, vtk_cells_from_data
# Paraview 5.8.1's built-in Python doesn't have lzma.
try:
import lzma
except ModuleNotFoundError:
lzma = None
def num_bytes_to_num_base64_chars(num_bytes):
# Rounding up in integer division works by double negation since Python
# always rounds down.
return -(-num_bytes // 3) * 4
def _polyhedron_cells_from_data(offsets, faces, faceoffsets, cell_data_raw):
# In general the number of faces will vary between cells, and the
# number of nodes vary between faces for each cell. The information
# will be stored as a List (one item per cell) of lists (one item
# per face of the cell) of np-arrays of node indices.
cells = {}
cell_data = {}
# The data format for face-cells is:
# num_faces_cell_0,
# num_nodes_face_0, node_ind_0, node_ind_1, ..
# num_nodes_face_1, node_ind_0, node_ind_1, ..
# ...
# num_faces_cell_1,
# ...
# See https://vtk.org/Wiki/VTK/Polyhedron_Support for more.
# The faceoffsets describes the end of the face description for each
# cell. Switch faceoffsets to give start points, not end points
faceoffsets = np.append([0], faceoffsets[:-1])
# Double loop over cells then faces.
# This will be slow, but seems necessary to cover all cases
for cell_start in faceoffsets:
num_faces_this_cell = faces[cell_start]
faces_this_cell = []
next_face = cell_start + 1
for _ in range(num_faces_this_cell):
num_nodes_this_face = faces[next_face]
faces_this_cell.append(
np.array(
faces[next_face + 1 : (next_face + num_nodes_this_face + 1)],
dtype=int,
)
)
# Increase by number of nodes just read, plus the item giving
# number of nodes per face
next_face += num_nodes_this_face + 1
# Done with this cell
# Find number of nodes for this cell
num_nodes_this_cell = np.unique(np.hstack([v for v in faces_this_cell])).size
key = f"polyhedron{num_nodes_this_cell}"
if key not in cells.keys():
cells[key] = []
cells[key].append(faces_this_cell)
# The cells will be assigned to blocks according to their number of nodes.
# This is potentially a reordering, compared to the ordering in faces.
# Cell data must be reorganized accordingly.
# Start of the cell-node relations
start_cn = np.hstack((0, offsets))
size = np.diff(start_cn)
# Loop over all cell sizes, find all cells with this size, and store
# cell data.
for sz in np.unique(size):
# Cells with this number of nodes.
items = np.where(size == sz)[0]
# Store cell data for this set of cells
for name, d in cell_data_raw.items():
if name not in cell_data:
cell_data[name] = []
cell_data[name].append(d[items])
return cells, cell_data
def _organize_cells(point_offsets, cells, cell_data_raw):
if len(point_offsets) != len(cells):
raise ReadError("Inconsistent data!")
out_cells = []
# IMPLEMENTATION NOTE: The treatment of polyhedral cells is quite a bit different
# from the other cells; moreover, there are some strong (?) assumptions on such
# cells. The processing of such cells is therefore moved to a dedicated function for
# the time being, while all other cell types are treated by the same function.
# There are still similarities between processing of polyhedral and the rest, so it
# may be possible to unify the implementations at a later stage.
# Check if polyhedral cells are present.
polyhedral_mesh = False
for c in cells:
if np.any(c["types"] == 42): # vtk type 42 is polyhedral
polyhedral_mesh = True
break
if polyhedral_mesh:
# The current implementation assumes a single set of cells, and cannot mix
# polyhedral cells with other cell types. It may be possible to do away with
# these limitations, but for the moment, this is what is available.
if len(cells) > 1:
raise ValueError("Implementation assumes single set of cells")
if np.any(cells[0]["types"] != 42):
raise ValueError("Cannot handle combinations of polyhedra with other cells")
# Polyhedra are specified by their faces and faceoffsets; see the function
# _polyhedron_cells_from_data for more information.
faces = cells[0]["faces"]
faceoffsets = cells[0]["faceoffsets"]
cls, cell_data = _polyhedron_cells_from_data(
cells[0]["offsets"], faces, faceoffsets, cell_data_raw[0]
)
# Organize polyhedra in cell blocks according to the number of nodes per cell.
for tp, c in cls.items():
out_cells.append(CellBlock(tp, c))
else:
for offset, cls, cdr in zip(point_offsets, cells, cell_data_raw):
cls, cell_data = vtk_cells_from_data(
cls["connectivity"].ravel(),
cls["offsets"].ravel(),
cls["types"].ravel(),
cdr,
)
for c in cls:
out_cells.append(CellBlock(c.type, c.data + offset))
return out_cells, cell_data
def get_grid(root):
grid = None
appended_data = None
for c in root:
if c.tag == "UnstructuredGrid":
if grid is not None:
raise ReadError("More than one UnstructuredGrid found.")
grid = c
else:
if c.tag != "AppendedData":
raise ReadError(f"Unknown main tag '{c.tag}'.")
if appended_data is not None:
raise ReadError("More than one AppendedData section found.")
if c.attrib["encoding"] != "base64":
raise ReadError("")
appended_data = c.text.strip()
# The appended data always begins with a (meaningless) underscore.
if appended_data[0] != "_":
raise ReadError()
appended_data = appended_data[1:]
if grid is None:
raise ReadError("No UnstructuredGrid found.")
return grid, appended_data
def _parse_raw_binary(filename):
from xml.etree import ElementTree as ET
with open(filename, "rb") as f:
raw = f.read()
try:
res = re.search(re.compile(b'<AppendedData[^>]+(?:">)'), raw)
assert res is not None
i_start = res.end()
i_stop = raw.find(b"</AppendedData>")
except Exception:
raise ReadError()
header = raw[:i_start].decode()
footer = raw[i_stop:].decode()
data = raw[i_start:i_stop].split(b"_", 1)[1].rsplit(b"\n", 1)[0]
root = ET.fromstring(header + footer)
dtype = vtu_to_numpy_type[root.get("header_type", "UInt32")]
if "byte_order" in root.attrib:
dtype = dtype.newbyteorder(
"<" if root.get("byte_order") == "LittleEndian" else ">"
)
appended_data_tag = root.find("AppendedData")
assert appended_data_tag is not None
appended_data_tag.set("encoding", "base64")
compressor = root.get("compressor")
if compressor is None:
arrays = ""
i = 0
while i < len(data):
# The following find() runs into issues if offset is padded with spaces, see
# <https://github.com/nschloe/meshio/issues/1135>. It works in ParaView.
# Unfortunately, Python's built-in XML tree can't handle regexes, see
# <https://stackoverflow.com/a/38810731/353337>.
da_tag = root.find(f".//DataArray[@offset='{i}']")
if da_tag is None:
raise RuntimeError(f"Could not find .//DataArray[@offset='{i}']")
da_tag.set("offset", str(len(arrays)))
block_size = int(np.frombuffer(data[i : i + dtype.itemsize], dtype)[0])
arrays += base64.b64encode(
data[i : i + block_size + dtype.itemsize]
).decode()
i += block_size + dtype.itemsize
else:
c = {"vtkLZMADataCompressor": lzma, "vtkZLibDataCompressor": zlib}[compressor]
root.attrib.pop("compressor")
# raise ReadError("Compressed raw binary VTU files not supported.")
arrays = ""
i = 0
while i < len(data):
da_tag = root.find(f".//DataArray[@offset='{i}']")
assert da_tag is not None
da_tag.set("offset", str(len(arrays)))
num_blocks = int(np.frombuffer(data[i : i + dtype.itemsize], dtype)[0])
num_header_items = 3 + num_blocks
num_header_bytes = num_header_items * dtype.itemsize
header = np.frombuffer(data[i : i + num_header_bytes], dtype)
block_data = b""
j = 0
for k in range(num_blocks):
block_size = int(header[k + 3])
block_data += c.decompress(
data[
i + j + num_header_bytes : i + j + block_size + num_header_bytes
]
)
j += block_size
block_size = np.array([len(block_data)]).astype(dtype).tobytes()
arrays += base64.b64encode(block_size + block_data).decode()
i += j + num_header_bytes
appended_data_tag.text = "_" + arrays
return root
vtu_to_numpy_type = {
"Float32": np.dtype(np.float32),
"Float64": np.dtype(np.float64),
"Int8": np.dtype(np.int8),
"Int16": np.dtype(np.int16),
"Int32": np.dtype(np.int32),
"Int64": np.dtype(np.int64),
"UInt8": np.dtype(np.uint8),
"UInt16": np.dtype(np.uint16),
"UInt32": np.dtype(np.uint32),
"UInt64": np.dtype(np.uint64),
}
numpy_to_vtu_type = {v: k for k, v in vtu_to_numpy_type.items()}
class VtuReader:
"""Helper class for reading VTU files. Some properties are global to the file (e.g.,
byte_order), and instead of passing around these parameters, make them properties of
this class.
"""
def __init__(self, filename): # noqa: C901
from xml.etree import ElementTree as ET
parser = ET.XMLParser()
try:
tree = ET.parse(str(filename), parser)
root = tree.getroot()
except ET.ParseError:
root = _parse_raw_binary(str(filename))
if root.tag != "VTKFile":
raise ReadError(f"Expected tag 'VTKFile', found {root.tag}")
if root.attrib["type"] != "UnstructuredGrid":
tpe = root.attrib["type"]
raise ReadError(f"Expected type UnstructuredGrid, found {tpe}")
if "version" in root.attrib:
version = root.attrib["version"]
if version not in ["0.1", "1.0"]:
raise ReadError(f"Unknown VTU file version '{version}'.")
# fix empty NumberOfComponents attributes as produced by Firedrake
for da_tag in root.findall(".//DataArray[@NumberOfComponents='']"):
da_tag.attrib.pop("NumberOfComponents")
if "compressor" in root.attrib:
assert root.attrib["compressor"] in [
"vtkLZMADataCompressor",
"vtkZLibDataCompressor",
]
self.compression = root.attrib["compressor"]
else:
self.compression = None
self.header_type = (
root.attrib["header_type"] if "header_type" in root.attrib else "UInt32"
)
try:
self.byte_order = root.attrib["byte_order"]
if self.byte_order not in ["LittleEndian", "BigEndian"]:
raise ReadError(f"Unknown byte order '{self.byte_order}'.")
except KeyError:
self.byte_order = None
grid, self.appended_data = get_grid(root)
pieces = []
field_data = {}
for c in grid:
if c.tag == "Piece":
pieces.append(c)
elif c.tag == "FieldData":
# TODO test field data
for data_array in c:
field_data[data_array.attrib["Name"]] = self.read_data(data_array)
else:
raise ReadError(f"Unknown grid subtag '{c.tag}'.")
if not pieces:
raise ReadError("No Piece found.")
points = []
cells = []
point_data = []
cell_data_raw = []
for piece in pieces:
piece_cells = {}
piece_point_data = {}
piece_cell_data_raw = {}
num_points = int(piece.attrib["NumberOfPoints"])
num_cells = int(piece.attrib["NumberOfCells"])
for child in piece:
if child.tag == "Points":
data_arrays = list(child)
if len(data_arrays) != 1:
raise ReadError()
data_array = data_arrays[0]
if data_array.tag != "DataArray":
raise ReadError()
pts = self.read_data(data_array)
num_components = int(data_array.attrib["NumberOfComponents"])
points.append(pts.reshape(num_points, num_components))
elif child.tag == "Cells":
for data_array in child:
if data_array.tag != "DataArray":
raise ReadError()
piece_cells[data_array.attrib["Name"]] = self.read_data(
data_array
)
if len(piece_cells["offsets"]) != num_cells:
raise ReadError()
if len(piece_cells["types"]) != num_cells:
raise ReadError()
cells.append(piece_cells)
elif child.tag == "PointData":
for c in child:
if c.tag != "DataArray":
raise ReadError()
try:
piece_point_data[c.attrib["Name"]] = self.read_data(c)
except CorruptionError as e:
warn(e.args[0] + " Skipping.")
point_data.append(piece_point_data)
elif child.tag == "CellData":
for c in child:
if c.tag != "DataArray":
raise ReadError()
piece_cell_data_raw[c.attrib["Name"]] = self.read_data(c)
cell_data_raw.append(piece_cell_data_raw)
else:
raise ReadError(f"Unknown tag '{child.tag}'.")
if not cell_data_raw:
cell_data_raw = [{}] * len(cells)
if len(cell_data_raw) != len(cells):
raise ReadError()
point_offsets = np.cumsum([0] + [pts.shape[0] for pts in points][:-1])
# Now merge across pieces
if not points:
raise ReadError()
self.points = np.concatenate(points)
if point_data:
self.point_data = {
key: np.concatenate([pd[key] for pd in point_data])
for key in point_data[0]
}
else:
self.point_data = None
self.cells, self.cell_data = _organize_cells(
point_offsets, cells, cell_data_raw
)
self.field_data = field_data
def read_uncompressed_binary(self, data, dtype):
byte_string = base64.b64decode(data)
# the first item is the total_num_bytes, given in header_dtype
header_dtype = vtu_to_numpy_type[self.header_type]
if self.byte_order is not None:
header_dtype = header_dtype.newbyteorder(
"<" if self.byte_order == "LittleEndian" else ">"
)
num_header_bytes = np.dtype(header_dtype).itemsize
total_num_bytes = np.frombuffer(byte_string[:num_header_bytes], header_dtype)[0]
# Check if block size was decoded separately
# (so decoding stopped after block size due to padding)
if len(byte_string) == num_header_bytes:
header_len = len(base64.b64encode(byte_string))
byte_string = base64.b64decode(data[header_len:])
else:
byte_string = byte_string[num_header_bytes:]
# Read the block data; multiple blocks possible here?
if self.byte_order is not None:
dtype = dtype.newbyteorder(
"<" if self.byte_order == "LittleEndian" else ">"
)
return np.frombuffer(byte_string[:total_num_bytes], dtype=dtype)
def read_compressed_binary(self, data, dtype):
# first read the block size; it determines the size of the header
header_dtype = vtu_to_numpy_type[self.header_type]
if self.byte_order is not None:
header_dtype = header_dtype.newbyteorder(
"<" if self.byte_order == "LittleEndian" else ">"
)
num_bytes_per_item = np.dtype(header_dtype).itemsize
num_chars = num_bytes_to_num_base64_chars(num_bytes_per_item)
byte_string = base64.b64decode(data[:num_chars])[:num_bytes_per_item]
num_blocks = np.frombuffer(byte_string, header_dtype)[0]
# read the entire header
num_header_items = 3 + int(num_blocks)
num_header_bytes = num_bytes_per_item * num_header_items
num_header_chars = num_bytes_to_num_base64_chars(num_header_bytes)
byte_string = base64.b64decode(data[:num_header_chars])
header = np.frombuffer(byte_string, header_dtype)
# num_blocks = header[0]
# max_uncompressed_block_size = header[1]
# last_compressed_block_size = header[2]
block_sizes = header[3:]
# Read the block data
byte_array = base64.b64decode(data[num_header_chars:])
if self.byte_order is not None:
dtype = dtype.newbyteorder(
"<" if self.byte_order == "LittleEndian" else ">"
)
byte_offsets = np.empty(block_sizes.shape[0] + 1, dtype=block_sizes.dtype)
byte_offsets[0] = 0
np.cumsum(block_sizes, out=byte_offsets[1:])
assert self.compression is not None
c = {"vtkLZMADataCompressor": lzma, "vtkZLibDataCompressor": zlib}[
self.compression
]
# process the compressed data
block_data = np.concatenate(
[
np.frombuffer(
c.decompress(byte_array[byte_offsets[k] : byte_offsets[k + 1]]),
dtype=dtype,
)
for k in range(num_blocks)
]
)
return block_data
def read_data(self, c):
fmt = c.attrib["format"] if "format" in c.attrib else "ascii"
data_type = c.attrib["type"]
try:
dtype = vtu_to_numpy_type[data_type]
except KeyError:
raise ReadError(f"Illegal data type '{data_type}'.")
if fmt == "ascii":
# ascii
if c.text.strip() == "":
# https://github.com/numpy/numpy/issues/18435
data = np.empty((0,), dtype=dtype)
else:
data = np.fromstring(c.text, dtype=dtype, sep=" ")
elif fmt == "binary":
reader = (
self.read_uncompressed_binary
if self.compression is None
else self.read_compressed_binary
)
data = reader(c.text.strip(), dtype)
elif fmt == "appended":
offset = int(c.attrib["offset"])
reader = (
self.read_uncompressed_binary
if self.compression is None
else self.read_compressed_binary
)
assert self.appended_data is not None
data = reader(self.appended_data[offset:], dtype)
else:
raise ReadError(f"Unknown data format '{fmt}'.")
if "NumberOfComponents" in c.attrib:
nc = int(c.attrib["NumberOfComponents"])
try:
data = data.reshape(-1, nc)
except ValueError:
name = c.attrib["Name"]
raise CorruptionError(
"VTU file corrupt. "
+ f"The size of the data array '{name}' is {data.size} "
+ f"which doesn't fit the number of components {nc}."
)
return data
def read(filename):
reader = VtuReader(filename)
return Mesh(
reader.points,
reader.cells,
point_data=reader.point_data,
cell_data=reader.cell_data,
field_data=reader.field_data,
)
def _chunk_it(array, n):
k = 0
while k * n < len(array):
yield array[k * n : (k + 1) * n]
k += 1
def write(filename, mesh, binary=True, compression="zlib", header_type=None):
# Writing XML with an etree required first transforming the (potentially large)
# arrays into string, which are much larger in memory still. This makes this writer
# very memory hungry. See <https://stackoverflow.com/q/59272477/353337>.
from .._cxml import etree as ET
# Check if the mesh contains polyhedral cells, this will require special treatment
# in certain places.
is_polyhedron_grid = False
for c in mesh.cells:
if c.type.startswith("polyhedron"):
is_polyhedron_grid = True
break
# The current implementation cannot mix polyhedral cells with other cell types.
# To write such meshes, represent all cells as polyhedra.
if is_polyhedron_grid:
for c in mesh.cells:
if c.type[:10] != "polyhedron":
raise ValueError(
"VTU export cannot mix polyhedral cells with other cell types"
)
if not binary:
warn("VTU ASCII files are only meant for debugging.")
if mesh.points.shape[1] == 2:
warn(
"VTU requires 3D points, but 2D points given. "
"Appending 0 third component."
)
points = np.column_stack([mesh.points, np.zeros_like(mesh.points[:, 0])])
else:
points = mesh.points
if mesh.point_sets:
info(
"VTU format cannot write point_sets. Converting them to point_data...",
highlight=False,
)
key, _ = join_strings(list(mesh.point_sets.keys()))
key, _ = replace_space(key)
mesh.point_sets_to_data(key)
if mesh.cell_sets:
info(
"VTU format cannot write cell_sets. Converting them to cell_data...",
highlight=False,
)
key, _ = join_strings(list(mesh.cell_sets.keys()))
key, _ = replace_space(key)
mesh.cell_sets_to_data(key)
vtk_file = ET.Element(
"VTKFile",
type="UnstructuredGrid",
version="0.1",
# Use the native endianness. Not strictly necessary, but this simplifies things
# a bit.
byte_order=("LittleEndian" if sys.byteorder == "little" else "BigEndian"),
)
if header_type is None:
header_type = "UInt32"
else:
vtk_file.set("header_type", header_type)
assert header_type is not None
if binary and compression:
# TODO lz4 <https://vtk.org/doc/nightly/html/classvtkDataCompressor.html>
compressions = {
"lzma": "vtkLZMADataCompressor",
"zlib": "vtkZLibDataCompressor",
}
assert compression in compressions
vtk_file.set("compressor", compressions[compression])
# swap the data to match the system byteorder
# Don't use byteswap to make sure that the dtype is changed; see
# <https://github.com/numpy/numpy/issues/10372>.
points = points.astype(points.dtype.newbyteorder("="), copy=False)
for k, cell_block in enumerate(mesh.cells):
cell_type = cell_block.type
data = cell_block.data
# Treatment of polyhedra is different from other types
if is_polyhedron_grid:
new_cell_info = []
for cell_info in data:
new_face_info = []
for face_info in cell_info:
face_info = np.asarray(face_info)
new_face_info.append(
face_info.astype(face_info.dtype.newbyteorder("="), copy=False)
)
new_cell_info.append(new_face_info)
mesh.cells[k] = CellBlock(cell_type, new_cell_info)
else:
mesh.cells[k] = CellBlock(
cell_type, data.astype(data.dtype.newbyteorder("="), copy=False)
)
for key, data in mesh.point_data.items():
mesh.point_data[key] = data.astype(data.dtype.newbyteorder("="), copy=False)
for data in mesh.cell_data.values():
for k, dat in enumerate(data):
data[k] = dat.astype(dat.dtype.newbyteorder("="), copy=False)
for key, data in mesh.field_data.items():
mesh.field_data[key] = data.astype(data.dtype.newbyteorder("="), copy=False)
def numpy_to_xml_array(parent, name, data):
vtu_type = numpy_to_vtu_type[data.dtype]
fmt = "{:.11e}" if vtu_type.startswith("Float") else "{:d}"
da = ET.SubElement(parent, "DataArray", type=vtu_type, Name=name)
if len(data.shape) == 2:
da.set("NumberOfComponents", f"{data.shape[1]}")
def text_writer_compressed(f):
max_block_size = 32768
data_bytes = data.tobytes()
# round up
num_blocks = -int(-len(data_bytes) // max_block_size)
last_block_size = len(data_bytes) - (num_blocks - 1) * max_block_size
# It's too bad that we have to keep all blocks in memory. This is
# necessary because the header, written first, needs to know the
# lengths of all blocks. Also, the blocks are encoded _after_ having
# been concatenated.
c = {"lzma": lzma, "zlib": zlib}[compression]
compressed_blocks = [
# This compress is the slowest part of the writer
c.compress(block)
for block in _chunk_it(data_bytes, max_block_size)
]
# collect header
header = np.array(
[num_blocks, max_block_size, last_block_size]
+ [len(b) for b in compressed_blocks],
dtype=vtu_to_numpy_type[header_type],
)
f.write(base64.b64encode(header.tobytes()).decode())
f.write(base64.b64encode(b"".join(compressed_blocks)).decode())
def text_writer_uncompressed(f):
data_bytes = data.tobytes()
# collect header
header = np.array(len(data_bytes), dtype=vtu_to_numpy_type[header_type])
f.write(base64.b64encode(header.tobytes() + data_bytes).decode())
def text_writer_ascii(f):
# This write() loop is the bottleneck for the write. Alternatives:
# savetxt is super slow:
# np.savetxt(f, data.reshape(-1), fmt=fmt)
# joining and writing is a bit faster, but consumes huge amounts of
# memory:
# f.write("\n".join(map(fmt.format, data.reshape(-1))))
for item in data.reshape(-1):
f.write((fmt + "\n").format(item))
if binary:
da.set("format", "binary")
da.text_writer = (
text_writer_compressed if compression else text_writer_uncompressed
)
else:
da.set("format", "ascii")
da.text_writer = text_writer_ascii
def _polyhedron_face_cells(face_cells):
# Define the faces of each cell on the format specified for VTU Polyhedron
# cells. These are defined in Mesh.polyhedron_faces, as block data. The block
# consists of a nested list (outer list represents cell, inner is faces for this
# cells), where the items of the inner list are the nodes of specific faces.
#
# The output format is specified at https://vtk.org/Wiki/VTK/Polyhedron_Support
# Initialize array for size of data per cell.
data_size_per_cell = np.zeros(len(face_cells), dtype=int)
# The data itself is of unknown size, and cannot be initialized
data = []
for ci, cell in enumerate(face_cells):
# Number of faces for this cell
data.append(len(cell))
for face in cell:
# Number of nodes for this face
data.append(face.size)
# The nodes themselves
data += face.tolist()
data_size_per_cell[ci] = len(data)
# The returned data corresponds to the faces and faceoffsets fields in the
# vtu polyhedron data format
return data, data_size_per_cell.tolist()
comment = ET.Comment(f"This file was created by meshio v{__version__}")
vtk_file.insert(1, comment)
grid = ET.SubElement(vtk_file, "UnstructuredGrid")
total_num_cells = sum(len(c.data) for c in mesh.cells)
piece = ET.SubElement(
grid,
"Piece",
NumberOfPoints=f"{len(points)}",
NumberOfCells=f"{total_num_cells}",
)
# points
if points is not None:
pts = ET.SubElement(piece, "Points")
numpy_to_xml_array(pts, "Points", points)
if mesh.cells is not None and len(mesh.cells) > 0:
cls = ET.SubElement(piece, "Cells")
faces = None
faceoffsets = None
if is_polyhedron_grid:
# The VTK polyhedron format requires both Cell-node connectivity, and a
# definition of faces. The cell-node relation must be recoved from the
# cell-face-nodes currently in CellBlocks.
# NOTE: If polyhedral cells are implemented for more mesh types, this code
# block may be useful for those as well.
con = []
num_nodes_per_cell = []
for block in mesh.cells:
for cell in block.data:
nodes_this_cell = []
for face in cell:
nodes_this_cell += face.tolist()
unique_nodes = np.unique(nodes_this_cell).tolist()
con += unique_nodes
num_nodes_per_cell.append(len(unique_nodes))
connectivity = np.array(con)
# offsets = np.hstack(([0], np.cumsum(num_nodes_per_cell)[:-1]))
offsets = np.cumsum(num_nodes_per_cell)
# Initialize data structures for polyhedral cells
faces = []
faceoffsets = []
else:
# create connectivity, offset, type arrays
connectivity = []
for v in mesh.cells:
d = v.data
new_order = meshio_to_vtk_order(v.type)
if new_order is not None:
d = d[:, new_order]
connectivity.append(d.flatten())
connectivity = np.concatenate(connectivity)
# offset (points to the first element of the next cell)
offsets = [
v.data.shape[1]
* np.arange(1, v.data.shape[0] + 1, dtype=connectivity.dtype)
for v in mesh.cells
]
for k in range(1, len(offsets)):
offsets[k] += offsets[k - 1][-1]
offsets = np.concatenate(offsets)
# types
types_array = []
for cell_block in mesh.cells:
key = cell_block.type
# some adaptions for polyhedron
if key.startswith("polyhedron"):
# Get face-cell relation on the vtu format. See comments in helper
# function for more information of how to specify this.
faces_loc, faceoffsets_loc = _polyhedron_face_cells(cell_block.data)
# Adjust offsets to global numbering
assert faceoffsets is not None
if len(faceoffsets) > 0:
faceoffsets_loc = [fi + faceoffsets[-1] for fi in faceoffsets_loc]
assert faces is not None
faces += faces_loc
faceoffsets += faceoffsets_loc
key = "polyhedron"
types_array.append(np.full(len(cell_block), meshio_to_vtk_type[key]))
types = np.concatenate(
types_array
# [np.full(len(v), meshio_to_vtk_type[k]) for k, v in mesh.cells]
)
numpy_to_xml_array(cls, "connectivity", connectivity)
numpy_to_xml_array(cls, "offsets", offsets)
numpy_to_xml_array(cls, "types", types)
if is_polyhedron_grid:
# Also store face-node relation
numpy_to_xml_array(cls, "faces", np.array(faces, dtype=int))
numpy_to_xml_array(cls, "faceoffsets", np.array(faceoffsets, dtype=int))
if mesh.point_data:
pd = ET.SubElement(piece, "PointData")
for name, data in mesh.point_data.items():
numpy_to_xml_array(pd, name, data)
if mesh.cell_data:
cd = ET.SubElement(piece, "CellData")
for name, data in raw_from_cell_data(mesh.cell_data).items():
numpy_to_xml_array(cd, name, data)
# write_xml(filename, vtk_file, pretty_xml)
tree = ET.ElementTree(vtk_file)
tree.write(filename)
register_format("vtu", [".vtu"], read, {"vtu": write})
|
from io import BytesIO
from pyrogram import Client, filters
from pyrogram.types import Message
from config import prefix
from consts import http
from localization import use_chat_lang
from utils import commands
@Client.on_message(filters.command("print", prefix))
@use_chat_lang()
async def prints(c: Client, m: Message, strings):
if len(m.command) == 1:
return await m.reply_text(
strings("print_usage"), reply_to_message_id=m.message_id
)
sent = await m.reply_text(strings("taking_screenshot"))
text = m.text.split(maxsplit=1)[1]
r = await http.get("https://webshot.amanoteam.com/print", params=dict(q=text))
bio = BytesIO(r.read())
bio.name = "screenshot.png"
await m.reply_photo(bio)
await sent.delete()
commands.add_command("print", "tools")
|
from gym_221bbakerstreet.environments.baker_street import BakerStreetEnvironment
|
'''Faรงa um programa que tenha uma funรงรฃo chamada maior(), que receba vรกrios parรขmetros
com valores inteiros.
Seu programa tem que analisar e dizer qual deles รฉ o maior.'''
#Nรฃo fiz!
from time import sleep
def maior(* nรบm):
cont = maior = 0
print('-'*30)
print('\nAnalisando os valores passados... ')
for valor in nรบm:
print(f'{valor} ', end='')
sleep(0.3),
if cont == 0:
maior = valor
else:
if valor > maior:
maior = valor
cont+=1
print(f'Foram informados {cont} valores ao todos.')
print(f'O maior valor informado foi {maior}.')
#programa principal
maior(2,9,4,5,7,1)
maior(4,7,0)
maior(1, 2)
maior(6)
maior() |
import argparse
from utils import int_tuple
def get_evaluation_parser():
parser = get_training_parser()
parser.add_argument("--dset_type", default="test", type=str)
parser.add_argument("--noise_mix_type", default="global")
parser.add_argument('--metrics', type=str, default='accuracy', choices=['accuracy', 'collision', 'qualitative'], help='evaluate metrics')
return parser
def get_training_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--log_dir", default="./log/", help="Directory containing logging file")
parser.add_argument("--model_dir", default="", help="Directory containing logging file")
# dataset
parser.add_argument("--dataset_name", default="synthetic", type=str)
parser.add_argument("--delim", default="\t")
parser.add_argument("--obs_len", default=8, type=int)
parser.add_argument("--fut_len", default=12, type=int)
parser.add_argument("--skip", default=1, type=int)
parser.add_argument("--n_coordinates", type=int, default=2, help="Number of coordinates")
parser.add_argument("--filter_envs", type=str, default="", help="Filter only certain environments (i.e 0.1-0.3-0.5)")
parser.add_argument("--filter_envs_pretrain", type=str, default="", help="Say which env were used during pretraining (for contrastive loss) (i.e 0.1-0.3-0.5)")
parser.add_argument('--reduce', default=0, type=int)
parser.add_argument('--reduceall', default=0, type=int)
parser.add_argument('--testonly', default=0, type=int, help='Only test model. 0 -> training, 1 -> testing, 3 -> testing with refinement') # 0 is normal train, 1 is test, 2 is test with k, 3 is ttr
# randomness
parser.add_argument("--seed", type=int, default=72, help="Random seed")
parser.add_argument("--noise_dim", default=(16,), type=int_tuple)
parser.add_argument("--noise_type", default="gaussian")
parser.add_argument("--original_seed", type=int, default=1, help="Seed of original training")
# architecture (STGAT)
parser.add_argument("--traj_lstm_hidden_size", default=32, type=int)
parser.add_argument("--heads", type=str, default="4,1", help="Heads in each layer, splitted with comma")
parser.add_argument("--hidden-units", type=str, default="16", help="Hidden units in each hidden layer, splitted with comma")
parser.add_argument("--graph_network_out_dims", type=int, default=32, help="dims of every node after through GAT module")
parser.add_argument("--graph_lstm_hidden_size", default=32, type=int)
parser.add_argument("--dropout", type=float, default=0, help="Dropout rate (1 - keep probability)")
parser.add_argument("--alpha", type=float, default=0.2, help="Alpha for the leaky_relu")
parser.add_argument('--teachingratio', default=0, type=float)
# architecture (Style)
parser.add_argument('--stylefs', type=str, default='all', choices=['all', 'traj', 'graph'])
parser.add_argument("--relsocial", action='store_false') # default value true
parser.add_argument('--contrastive', default=0, type=float)
parser.add_argument("--aggrstyle", default='minpol-mean', type=str)
parser.add_argument("--classification", default=3, type=int)
# full pipeline arguments
parser.add_argument('--styleinteg', default='adain', type=str, help='Integrator type ("concat", "adain", "adainnew"')
parser.add_argument('--newstyleinteg', default='', type=str, help='Used when loading a pretrained model but you \
want to change the styleinteg. Set the --styleinteg param to the value of the checkpoint \
(to avoid state_dict problems) one you want to load, and then set the new styleinteg value in this parameter ')
# computation
parser.add_argument("--loader_num_workers", default=2, type=int)
parser.add_argument("--gpu_num", default="1", type=str)
# training
parser.add_argument("--best_k", default=1, type=int)
parser.add_argument("--batch_size", default='', type=str)
parser.add_argument("--batch_method", default='het', type=str,
help='Use Homogeneous (hom), Heterogeneous (het) or alternated homogeneous (alt) batches during training')
parser.add_argument("--shuffle", default=True, type=bool)
# spurious feature
parser.add_argument("--add_confidence", default=False, type=bool)
parser.add_argument("--domain_shifts", default='0', type=str, help='domain_shifts per environment: hotel,univ,zara1,zara2,eth')
# method
parser.add_argument("--counter", default=False, type=bool, help='counterfactual analysis')
parser.add_argument("--start-epoch", default=1, type=int, metavar="N", help="manual epoch number (useful on restarts)")
parser.add_argument("--use_gpu", default=1, type=int)
# general training
parser.add_argument("--finetune", default="", type=str)
parser.add_argument("--num_epochs", default='5-5-10', type=lambda x: int_tuple(x, '-')) # '150-100-150',
parser.add_argument("--resume", default="", type=str, metavar="PATH", help="path to latest checkpoint (default: none)")
parser.add_argument("--batch_hetero", default=True, type=bool, help='Use Homogeneous/Heterogeneous batches during training')
parser.add_argument("--tfdir", default='runs', type=str)
# arguments for training style encoder
parser.add_argument("--fut", default=True, type=bool, help='Use future or not to train style encoder')
parser.add_argument("--absolut", default=True, type=bool)
parser.add_argument('--backclassencoder', default=False, type=bool)
# learning rates
parser.add_argument("--lrclass", default=1e-2, type=float, help="initial learning rate for style classifier optimizer")
parser.add_argument("--lrstgat", default=1e-3, type=float, help="initial learning rate for stgat optimizer")
parser.add_argument("--lrstyle", default=5e-4, type=float, help="initial learning rate for style encoder optimizer")
parser.add_argument('--lrinteg', default=0.01, type=float, help="initial learning rate for the integrator optimizer")
# other parameters to test after
parser.add_argument('--addloss', default=0, type=float)
parser.add_argument('--ttr', default=0, type=int, help="Number of steps of refinement during test time")
parser.add_argument('--ttrlr', default=0, type=float, help="initial learning rate for the refinement optimizer")
parser.add_argument('--wrongstyle', default=False, type=bool, help="True if we refine with the accurate style, False if we want to perturb the style with a false one")
parser.add_argument('--styleconsistency', default=0, type=float, help="Adding a loss of style prediction to the training")
# method
parser.add_argument("--irm", default=0.0, type=float, help='IRM parameter (lambda)')
parser.add_argument("--vrex", default=0.0, type=float, help='v-REx parameter (beta)')
parser.add_argument("--complexdecoder", default=True, type=bool, help='')
parser.add_argument("--unbiased", default=True, type=bool, help='')
return parser
|
from gym.envs.registration import register
# Mujoco
# ----------------------------------------
# - randomised reward functions
register(
'AntDir-v0',
entry_point='environments.wrappers:mujoco_wrapper',
kwargs={'entry_point': 'environments.mujoco.ant_dir:AntDirEnv',
'max_episode_steps': 200},
max_episode_steps=200
)
register(
'AntDir2D-v0',
entry_point='environments.wrappers:mujoco_wrapper',
kwargs={'entry_point': 'environments.mujoco.ant_dir:AntDir2DEnv',
'max_episode_steps': 200},
max_episode_steps=200,
)
register(
'AntGoal-v0',
entry_point='environments.wrappers:mujoco_wrapper',
kwargs={'entry_point': 'environments.mujoco.ant_goal:AntGoalEnv',
'max_episode_steps': 200},
max_episode_steps=200
)
register(
'HalfCheetahDir-v0',
entry_point='environments.wrappers:mujoco_wrapper',
kwargs={'entry_point': 'environments.mujoco.half_cheetah_dir:HalfCheetahDirEnv',
'max_episode_steps': 200},
max_episode_steps=200
)
register(
'HalfCheetahVel-v0',
entry_point='environments.wrappers:mujoco_wrapper',
kwargs={'entry_point': 'environments.mujoco.half_cheetah_vel:HalfCheetahVelEnv',
'max_episode_steps': 200},
max_episode_steps=200
)
register(
'HumanoidDir-v0',
entry_point='environments.wrappers:mujoco_wrapper',
kwargs={'entry_point': 'environments.mujoco.humanoid_dir:HumanoidDirEnv',
'max_episode_steps': 200},
max_episode_steps=200
)
# - randomised dynamics
register(
id='Walker2DRandParams-v0',
entry_point='environments.mujoco.rand_param_envs.walker2d_rand_params:Walker2DRandParamsEnv',
max_episode_steps=200
)
register(
id='HopperRandParams-v0',
entry_point='environments.mujoco.rand_param_envs.hopper_rand_params:HopperRandParamsEnv',
max_episode_steps=200
)
# # 2D Navigation
# # ----------------------------------------
#
register(
'PointEnv-v0',
entry_point='environments.navigation.point_robot:PointEnv',
kwargs={'goal_radius': 0.2,
'max_episode_steps': 100,
'goal_sampler': 'semi-circle'
},
max_episode_steps=100,
)
register(
'SparsePointEnv-v0',
entry_point='environments.navigation.point_robot:SparsePointEnv',
kwargs={'goal_radius': 0.2,
'max_episode_steps': 100,
'goal_sampler': 'semi-circle'
},
max_episode_steps=100,
)
#
# # GridWorld
# # ----------------------------------------
register(
'GridNavi-v0',
entry_point='environments.navigation.gridworld:GridNavi',
kwargs={'num_cells': 5, 'num_steps': 15},
)
# # Alchemy
# # ----------------------------------------
#
register(
'Alchemy-v0',
entry_point='environments.alchemy.alchemy:AlchemyEnv',
kwargs={'num_trials': 10, 'num_stones_per_trial': 1, 'num_potions_per_trial': 12, 'max_steps_per_trial': 20, 'fixed': False},
max_episode_steps=20
)
|
import os
import sys
import time
import datetime
import logging
import shutil
import mumaxc as mc
import subprocess as sp
log = logging.getLogger(__name__)
class MumaxRunner:
"""Base class for running mumax3.
Don't use this directly. Use get_mumax_runner() to pick a subclass
of this class.
"""
def call(self, argstr, need_stderr=False):
now = datetime.datetime.now()
timestamp = '{}/{:02d}/{:02d} {:02d}:{:02d}'.format(now.year,
now.month,
now.day,
now.hour,
now.minute)
print('{}: Running mumax3 ({}) ... '.format(timestamp, argstr), end='')
tic = time.time()
res = self._call(argstr=argstr, need_stderr=need_stderr)
self._kill()
toc = time.time()
seconds = '({:0.1f} s)'.format(toc - tic)
print(seconds)
if res.returncode is not 0:
if sys.platform != 'win32':
# Only on Linux and MacOS - on Windows we do not get
# stderr and stdout.
stderr = res.stderr.decode('utf-8', 'replace')
stdout = res.stdout.decode('utf-8', 'replace')
cmdstr = ' '.join(res.args)
print('mumax error:')
print('\tcommand: {}'.format(cmdstr))
print('\tstdout: {}'.format(stdout))
print('\tstderr: {}'.format(stderr))
print('\n')
raise RuntimeError('Error in mumax run.')
return res
def _call(self, argstr, need_stderr=False):
# This method should be implemented in subclass.
raise NotImplementedError
def _kill(self, targets=('all',)):
# This method should be implemented in subclass.
raise NotImplementedError
def version(self):
pass
def platform(self):
pass
class ExeMumaxRunner(MumaxRunner):
"""Using mumax executable on $PATH.
"""
def __init__(self, mumax_exe='mumax3'):
self.mumax_exe = mumax_exe
def _call(self, argstr, need_stderr=False):
cmd = [self.mumax_exe, argstr]
return sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
def _kill(self, targets=['all']):
pass
#sp.run([self.oommf_exe, "killoommf"] + targets)
class OptirunMumaxRunner(MumaxRunner):
"""Using mumax executable on $PATH.
"""
def __init__(self, mumax_exe='mumax3'):
self.mumax_exe = mumax_exe
def _call(self, argstr, need_stderr=False):
cmd = ['optirun', self.mumax_exe, argstr]
return sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
def _kill(self, targets=['all']):
pass
#sp.run([self.oommf_exe, "killoommf"] + targets)
def get_mumax_runner(use_cache=True, mumax_exe='mumax3'):
"""Find the best available way to run Mumax.
Returns an MumaxRunner object, or raises EnvironmentError if no suitable
method is found.
Parameters
----------
use_cache : bool
The first call to this function will determine the best way to run OOMMF
and cache it. Normally, subsequent calls will return the OOMMFRunner
object from the cache. Setting this parameter to False will cause it to
check for available methods again.
envvar : str
Name of the environment variable containing the path to oommf.tcl
oommf_exe : str
The name or path of the executable oommf command
docker_exe : str
The name or path of the docker command
"""
optirun_exe = shutil.which('optirun')
mumax_exe = shutil.which(mumax_exe)
if optirun_exe:
return OptirunMumaxRunner(mumax_exe)
else:
if mumax_exe:
return ExeMumaxRunner(mumax_exe)
else:
raise EnvironmentError('mumax3 cannot be found.')
def status():
"""Run a macrospin example for 1 ps through oommfc and print the OOMMF
status.
"""
pass
#try:
# system = oc.examples.macrospin()
# td = oc.TimeDriver()
# td.drive(system, t=1e-12, n=1, overwrite=True)
# print('OOMMF found and running.')
# shutil.rmtree('example-macrospin')
# return 0
#except (EnvironmentError, RuntimeError):
# print("Cannot find OOMMF.")
# return 1
def overhead():
"""Run a macrospin example for 1 ps through oommfc and directly and
return the difference in run times.
Returns
-------
overhead : float
The time difference (overhead) between running OOMMF though
oommfc and directly
"""
pass
# Running OOMMF through oommfc.
#system = oc.examples.macrospin()
#td = oc.TimeDriver()
#oommfc_start = time.time()
#td.drive(system, t=1e-12, n=1, overwrite=True)
#oommfc_stop = time.time()
#oommfc_time = oommfc_stop - oommfc_start
# Running OOMMF directly.
#oommf_runner = get_oommf_runner()
#mifpath = os.path.realpath(os.path.join('example-macrospin', 'drive-0',
# 'example-macrospin.mif'))
#oommf_start = time.time()
#oommf_runner.call(mifpath)
#oommf_stop = time.time()
#oommf_time = oommf_stop - oommf_start
#shutil.rmtree('example-macrospin')
#return oommfc_time - oommf_time
|
import numpy as np
from keras.utils import to_categorical
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import data_process
#Load sign language dataset
data, label = data_process.load_data()
data=data_process.normalize_data(data)
#data processing
x_train, x_test, y_train, y_test = train_test_split(data, label, test_size = 0.3)
num_classes = 15
#y_train=to_categorical(y_train,num_classes)
#y_test=to_categorical(y_test,num_classes)
#print(x_train.shape)
#print(y_train.shape)
print(type(y_test))
#print(y_test)
#Autoencoder
model = Sequential()
model.add(Dense(3, name='representation', input_shape=(20,)))
model.add(Activation('relu'))
model.add(Dense(20))
model.add(Activation('relu'))
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['mse'])
print(model.summary())
epochs = 200
validation_split = 0.2
history = model.fit(data, data, batch_size=128,
epochs=epochs, validation_split=validation_split)
def predict_representation(model, data, layer_name='representation'):
## We form a new model. Instead of doing \psi\phi(x), we only take \phi(x)
## To do so, we use the layer name
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer(layer_name).output)
representation = intermediate_layer_model.predict(data)
representation = representation.reshape(representation.shape[0], -1)
return representation
representation = predict_representation(model, x_train)
def plot_representation_label(representation, labels, plot3d=1):
## Function used to plot the representation vectors and assign different
## colors to the different classes
# First create the figure
fig, ax = plt.subplots(figsize=(10, 6))
# In case representation dimension is 3, we can plot in a 3d projection too
if plot3d:
ax = fig.add_subplot(111, projection='3d')
# Check number of labels to separate by colors
#n_labels = labels.max() + 1
#n_labels=n_labels.astype('int32')
n_labels=10
# Color map, and give different colors to every label
cm = plt.get_cmap('gist_rainbow')
ax.set_prop_cycle(color=[cm(1. * i / (n_labels)) for i in range(n_labels)])
# Loop is to plot different color for each label
for l in range(n_labels):
# Only select indices for corresponding label
index = labels == l
ind=index.reshape((len(index,)))
print(ind.shape)
if plot3d:
ax.scatter(representation[ind, 0], representation[ind, 1],
representation[ind, 2], label=str(l))
else:
ax.scatter(representation[ind, 0], representation[ind, 1], label=str(l))
ax.legend()
plt.title('Features in the representation space with corresponding label')
plt.show()
return fig, ax
plot_representation_label(representation, y_train)
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from .views import index as home
urlpatterns = [
url(r'^$', home, name='index'),
url(r'^store/$', include('store.urls')),
url(r'^admin/', include(admin.site.urls)),
]
# settings for development environment DEBUG
from django.conf.urls.static import static
from django.conf import settings
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += patterns(
'django.views.static',
(r'^media/(?P<path>.*)',
'serve',
{'document_root': settings.MEDIA_ROOT}), )
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 5 14:52:14 2016
@author: shibom
"""
import sys, os, time
import PyQt4.QtGui as Qt
import PyQt4.QtCore as QC
import subprocess as sub
import plot_utils as psx
import plot_cluster as pc
from sphase import Building
import run_shelx
def writer(filename, symm, sites, resolution, emins, ntries):
ofh = open('run_cluster','w')
ofh.write("#!/bin/bash \n")
ofh.write('\n\n')
ofh.write('''\
python shelx_batch.py --hklfile %s --symm %s\
--sites %s --resolution %s --emins %s --ntries %s\
''' %(filename,symm,sites,resolution,emins,ntries))
ofh.close()
sub.call(['chmod +x run_cluster'],shell=True)
def mail_write(email_id, output):
fh = open("tmp_mail.sh", 'w')
fh.write("#!/bin/bash\n")
fh.write('\n\n')
w = pc.shelxd_plotter(output)
w.create_list_frame()
w.plot_cc()
fh.write('sleep 2\n')
fh.write('cd %s\n' %output)
fh.write('echo "All jobs finished, check output" | mail -s "shelx jobs status" -a "CCplot.pdf" %s\n' %email_id)
fh.write('sleep 2\n')
fh.close()
sub.call(['chmod +x tmp_mail.sh'], shell=True)
class Tabs(Qt.QTabWidget):
def __init__(self):
Qt.QTabWidget.__init__(self)
self.tab1 = Scale_Merge()
self.tab2 = Substructure()
self.tab3 = Model_build()
self.addTab(self.tab1, "scale_merge")
self.addTab(self.tab2, "Substructure")
self.addTab(self.tab3, "Model_build")
self.setWindowTitle("Native-SAD ui")
class Scale_Merge(Qt.QMainWindow):
def __init__(self):
Qt.QMainWindow.__init__(self)
self.layout = Qt.QVBoxLayout()
self.layout.addWidget(MainLayout(adding_files()))
self.layout.addWidget(MainLayout(scale_merge()))
self.layout.addWidget(MainLayout(quick_anom()))
self.centralWidget = Qt.QWidget()
self.centralWidget.setLayout(self.layout)
self.setCentralWidget(self.centralWidget)
class Substructure(Qt.QMainWindow):
def __init__(self):
Qt.QMainWindow.__init__(self)
self.layout = Qt.QVBoxLayout()
self.layout.addWidget(MainLayout(shelx_ui()))
self.layout.addWidget(MainLayout(plotting_ui()))
self.centralWidget = Qt.QWidget()
self.centralWidget.setLayout(self.layout)
self.setCentralWidget(self.centralWidget)
class Model_build(Qt.QMainWindow):
def __init__(self):
Qt.QMainWindow.__init__(self)
self.layout = Qt.QVBoxLayout()
self.layout.addWidget(MainLayout(model_ui()))
self.centralWidget = Qt.QWidget()
self.centralWidget.setLayout(self.layout)
self.setCentralWidget(self.centralWidget)
class MainLayout(Qt.QMainWindow):
def __init__(self, classname):
Qt.QMainWindow.__init__(self)
self.scrollLayout = Qt.QFormLayout()
self.scrollwidget = Qt.QWidget()
self.scrollwidget.setLayout(self.scrollLayout)
self.scrollArea = Qt.QScrollArea()
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setWidget(self.scrollwidget)
# main layout
self.mainLayout = Qt.QVBoxLayout()
# add all main to the main vLayout
self.mainLayout.addWidget(self.scrollArea)
self.add_widget(classname)
#self.scrollLayout.addRow(plotting_ui())
# central widget
self.centralWidget = Qt.QWidget()
self.centralWidget.setLayout(self.mainLayout)
# set central widget
self.setCentralWidget(self.centralWidget)
def add_widget(self, classname):
self.splitter = Qt.QSplitter() #Nice bordering provides..
self.splitter.addWidget(classname)
self.scrollLayout.addRow(self.splitter)
@classmethod
def closing(MainLayout):
MainLayout.close()
print "App is closing"
class scale_merge(Qt.QWidget):
fwid_lists = []; #private variable will store all file widgets and later easily call all the file names and run xscale
def __init__(self):
Qt.QWidget.__init__(self)
self.layout = Qt.QHBoxLayout()
self.morefile_btn = widgets.createButton("Add more", self.add_filewidget)
self.morefile_btn.setToolTip("Click to add more HKL files for merging")
self.xscale_btn = widgets.createButton("xscale", self.merging)
self.xscale_btn.setToolTip("Run xscale")
self.plot_stat = widgets.createButton("xscale_plot", self.xscale_plot)
self.plot_stat.setToolTip("plot I/sig, SigAno, Rfactors")
self.layout.addWidget(self.morefile_btn)
self.layout.addWidget(self.xscale_btn)
self.layout.addWidget(self.plot_stat)
self.setLayout(self.layout)
def add_filewidget(self):
fwid = files_widget("Unmerged HKL")
adding_files.File_layer.addWidget(fwid)
self.__class__.fwid_lists.append(fwid)
def prep_xscale(self):
fh = open("XSCALE.INP",'w')
fh.write("OUTPUT_FILE=XSCALE.ahkl\n")
fh.write("FRIEDEL'S_LAW=FALSE\n")
fh.write("SAVE_CORRECTION_IMAGES=FALSE\n")
fh.write('\n\n')
try:
for dataname in self.__class__.fwid_lists:
if dataname.fname != None:
fh.write("INPUT_FILE= %s\n" %dataname.fname)
else:
print "no further input files for XSCALE.INP\n"
except IndexError:
pass
fh.close()
def merging(self):
if self.__class__.fwid_lists[0].fname != None:
self.prep_xscale()
sub.call(["xscale_par &"], shell=True)
else:
print "nothing to merge, at least one hkl file needed"
def xscale_plot(self):
try:
self.ap = Qt.QDialog()
self.ap.ui = psx.Main('XSCALE.LP', 'xscale')
self.ap.ui.show()
except OSError:
print "XSCALE.LP File not found, run xscale again\n"
class adding_files(Qt.QWidget):
File_layer = Qt.QVBoxLayout() #private variable for the class. this layer will get updated as more data will be added
def __init__(self):
Qt.QWidget.__init__(self)
self.setWindowTitle("Add your HKL files")
self.unmerged_data = files_widget("Unmerged HKL")
self.__class__.File_layer.addWidget(self.unmerged_data) #updating the private layer
scale_merge.fwid_lists.append(self.unmerged_data) #book-keep the file_widgets via a private variable list for scale_merge
self.setLayout(self.__class__.File_layer)
class quick_anom(Qt.QWidget):
def __init__(self):
Qt.QWidget.__init__(self)
layout = Qt.QVBoxLayout()
self.get_file = files_widget("Reflection File")
self.get_file.setToolTip("Reflection file must be SHELX supported format")
self.sg = widgets("Space-Group")
self.sg.setToolTip("Please use name, not space-group number.")
self.xc_xcp = two_buttons("SHELXC", "SHELXC_plot")
self.xc_xcp.Button1.clicked.connect(lambda: self.shelxc_run())
self.xc_xcp.Button2.clicked.connect(lambda: self.shelxc_plot())
layout.addWidget(self.get_file)
layout.addWidget(self.sg)
layout.addWidget(self.xc_xcp)
self.setLayout(layout)
def shelxc_run(self):
inp = open("quicky.inp", 'w')
if self.get_file.fname != None:
inp.write("SAD "+self.get_file.fname+ '\n')
cell = sub.check_output(["grep !UNIT_CELL_CONSTANTS "+self.get_file.fname + " | awk '{print $2,$3,$4,$5,$6,$7}'" ], shell=True)
inp.write("CELL "+ cell + '\n')
else:
print "provide filename\n"
if self.sg.value != None:
inp.write("SPAG "+ self.sg.value + '\n')
inp.write("MAXM 1000 \n")
else:
print "provide space group name, not number\n"
inp.close()
sub.call(["shelxc quick < quicky.inp" +' | tee quick.log'], shell=True)
def shelxc_plot(self):
try:
self.ap = Qt.QDialog()
self.ap.ui = psx.Main('quick.log', 'shelxc')
self.ap.ui.show()
except NameError:
pass
class shelx_ui(Qt.QWidget):
def __init__(self):
Qt.QWidget.__init__(self)
self.setWindowTitle("SHELX-GUI")
# shelx_ui class should have layouts in vertical mode..
layout = Qt.QVBoxLayout()
#create instance for file loading..
self.get_file = files_widget("Reflection-File")
self.get_file.setToolTip("Reflection file must be SHELX supported format")
#connect to enter key to make loaded file effective..
self.get_file.file_box.returnPressed.connect(lambda: self.show_cell())
#create Instance for output directory..
self.outdir = dir_widget("Output-dir")
self.outdir.setToolTip("A 'shelx_grid' directory with all files will be created under this output directory")
#add file and output directory widgets vertically to the layout..
layout.addWidget(self.get_file)
layout.addWidget(self.outdir)
#create Instances of widgets class for each different parameter..
self.cell = widgets("Unit-cell")
self.cell.setToolTip("cell value will pop up automatically, don't need to do anything")
self.sg = widgets("Space-Group")
self.sg.setToolTip("Please use name, not space-group number.")
self.resol = widgets("Resolution")
self.resol.setToolTip("provide values as comma separated, same for sites, emins")
self.sites = widgets("# Sites")
self.emins = widgets("Emins-values")
self.tries = widgets("# trials")
self.tries.setToolTip("please provide at least two numbers as comma separated")
self.emails = widgets("email_id")
self.emails.setToolTip("if email-id is provided, job status will be reported once finished. ")
#create submit-cancel buttons using Instance of two_buttons class..
self.sub_abort = two_buttons("Submit", "Cancel")
#add job submission functionality to "submit" and "cancel" buttons..
self.sub_abort.Button1.clicked.connect(lambda: self.job_run())
self.sub_abort.Button2.clicked.connect(lambda: self.closing())
#add all other widgets to the vertical layout..
layout.addWidget(self.cell)
layout.addWidget(self.sg)
layout.addWidget(self.resol)
layout.addWidget(self.sites)
layout.addWidget(self.emins)
layout.addWidget(self.tries)
layout.addWidget(self.emails)
layout.addWidget(self.sub_abort)
self.setLayout(layout)
def show_cell(self):
if self.get_file.fname != None:
cell_value = sub.check_output(["grep !UNIT_CELL_CONSTANTS "+self.get_file.fname + " | awk '{print $2,$3,$4,$5,$6,$7}'" ], shell=True)
self.cell.textbox.setText(cell_value)
elif len(self.fname) > 72:
msgbox = Qt.QMessageBox()
msgbox.setText("Warning: shelx hates filename with >72 character!")
msgbox.exec_()
else:
msgbox = Qt.QMessageBox()
msgbox.setText("Error: Load a reflection file first")
msgbox.exec_()
def job_run(self):
#create an output directory for shelx jobs..
if self.outdir.dir is None:
self.path = "./shelx_grid"
else:
self.path = self.outdir.dir + "/shelx_grid";
try:
os.mkdir(self.path, 0755)
except OSError: #catching error related to overriding existing folder and impose overriding..
print "shelx_grid folder exists, overriding"
pass
print("we will dump all results in %s directory path" %self.path)
os.chdir( self.path )
if len(self.emins.value) > 0 and len(self.tries.value) > 0:
opt_args = {'emins':self.emins.value, 'ntries':self.tries.value}
else:
opt_args = {}
grid = run_shelx.running(self.get_file.fname, self.sg.value, self.resol.value, self.sites.value, **opt_args)
grid.run_shelxc()
grid.prep_shelxd()
'''
job_cnt = 0
try:
for site in self.sites.value:
for res in self.resol.value:
for emin in self.emins.value:
for val in self.tries.value:
writer(self.get_file.fname,self.sg.value,site,res,emin,val)
sub.call(['sbatch -P day -J shelx run_cluster'], shell=True)
job_cnt += 1;
except TypeError:
pass
print "%d Jobs have been submitted" %job_cnt
'''
self.keep_session()
if self.emails.value is not None:
self.tracker()
def tracker(self):
mail_write(self.emails.value, self.path)
sub.call(['sbatch -d singleton -J shelx -t 1:00 tmp_mail.sh'], shell=True)
'''
job_status = 2;
user = sub.check_output(['who am i'], shell=True)
while job_status > 1:
job_no = sub.check_output(['squeue -u '+user +' | wc -l'], shell=True)
job_status = int(job_no)
time.sleep(1800)
if job_status == 1:
send_email(self.emails.value)
'''
def keep_session(self):
fh = open("session.param", 'w')
fh.write("Reflection filename= %s\n" %self.get_file.fname)
fh.write("Output directory= %s\n" %self.outdir.dir)
fh.write("Space group= %s \n" %self.sg.value)
fh.write("Sites= %s \n" %self.sites.textbox.text())
fh.write("Resolution= %s\n" %self.resol.textbox.text())
fh.write("Emins= %s\n" %self.emins.textbox.text())
fh.write("#trials= %s\n" %self.tries.textbox.text())
fh.write("Email-id= %s\n" %self.emails.value)
fh.write('\n\n')
fh.close()
def closing(self):
username = sub.check_output(["whoami"], shell=True)
sub.call(["scancel -u "+username], shell=True)
print "Job submission is cancelled.."
class Ui_Dialog(Qt.QDialog):
def __init__(self, dbConnection):
Qt.QDialog.__init__(self)
global c
c = dbConnection
class plotting_ui(Qt.QWidget):
def __init__(self):
Qt.QWidget.__init__(self)
self.setWindowTitle("Plot-me")
layout = Qt.QVBoxLayout()
self.datafile = files_widget("Input file")
self.datafile.setToolTip("Browse filename .log or .res based on graphics")
layout.addWidget(self.datafile)
self.datafolder = dir_widget("folder")
self.datafolder.setToolTip("Browse folder name for CC cluster plot")
layout.addWidget(self.datafolder)
self.graphics = dropmenu("graphics")
self.graphics.setToolTip("mention the plot type and hit <enter>")
self.rows = widgets("Rows")
self.cols = widgets("Columns")
self.plot_clear_btn = two_buttons("Plot", "Clear")
self.plot_clear_btn.Button1.clicked.connect(self.call_plotter)
self.plot_clear_btn.Button2.clicked.connect(self.cleaner)
layout.addWidget(self.graphics)
layout.addWidget(self.rows)
layout.addWidget(self.cols)
layout.addWidget(self.plot_clear_btn)
self.setLayout(layout)
def call_plotter(self):
if self.graphics.value == 'cluster':
try:
self.ap = Qt.QDialog()
self.ap.ui = psx.Main(self.datafolder.dir, self.graphics.value, self.rows.value, self.cols.value)
self.ap.ui.show()
except NameError:
pass
if self.graphics.value == 'CCall_CCweak':
try:
self.ap = Qt.QDialog()
self.ap.ui = psx.Main(self.datafile.fname, self.graphics.value)
self.ap.ui.show()
except NameError:
pass
if self.graphics.value == 'shelxc':
try:
self.ap = Qt.QDialog()
self.ap.ui = psx.Main(self.datafile.fname, self.graphics.value)
self.ap.ui.show()
except NameError:
pass
if self.graphics.value == 'site_occupancy':
try:
self.ap = Qt.QDialog()
self.ap.ui = psx.Main(self.datafile.fname, self.graphics.value)
self.ap.ui.show()
except NameError:
pass
def cleaner(self):
self.datafile.file_box.setText(" ")
self.graphics.textbox.setText(" ")
self.rows.textbox.setText(" ")
self.cols.textbox.setText(" ")
class model_ui(Qt.QWidget):
def __init__(self):
Qt.QWidget.__init__(self)
layout = Qt.QVBoxLayout()
self.hklfile = files_widget("HKLfile")
self.hklfile.setToolTip("XDS_ASCII/XSCALE file mandatory")
layout.addWidget(self.hklfile)
self.seq = files_widget("seqin")
self.seq.setToolTip("Sequence better if provided")
layout.addWidget(self.seq)
self.natdata = files_widget("Native")
self.natdata.setToolTip("if native data provided, SIRAS will be tried")
layout.addWidget(self.natdata)
self.substr = files_widget("HA-pdb")
self.substr.setToolTip("optionally, substructure can be provided")
layout.addWidget(self.substr)
self.opts = multi_widgets("high-res", "Nres", "HA-type", "# sites")
self.opts.st1.setToolTip("high resolution cutoff, better if given; Defaults:0.0 A")
self.opts.st2.setToolTip("# residue only mention if seq is not given")
self.opts.arg[0].setToolTip("Heavy atom type, default is S atom")
self.opts.arg[1].setToolTip("Optionally # sites, default: guess from seq or Nres")
layout.addWidget(self.opts)
self.job_btns = two_buttons("crank", "Sharp")
self.job_btns.Button1.clicked.connect(self.run_crank)
self.job_btns.Button2.clicked.connect(self.run_sharp)
layout.addWidget(self.job_btns)
self.setLayout(layout)
def prep_params(self):
keywords = {}
if self.hklfile.fname is None or not os.path.isfile(self.hklfile.fname):
msgbox = Qt.QMessageBox()
msgbox.setText("Error:No file given or path is wrong!")
msgbox.exec_()
if self.seq.fname != None:
keywords['seq_file'] = self.seq.fname
elif self.opts.st2.value != None and self.seq.fname is None:
keywords['nres'] = self.opts.st2.value
else:
msgbox = Qt.QMessageBox()
msgbox.setText("Error:Need at least seq file or # residues to run the program")
msgbox.exec_()
if self.substr.fname != None:
keywords['substr'] = self.substr.fname
if self.opts.arg[0].value != None:
keywords['atomtype'] = self.opts.arg[0].value
if self.opts.st1.value != None:
keywords['hres'] = self.opts.st1.value
if self.opts.arg[1].value != None:
keywords['sites'] = self.opts.arg[1].value
print keywords
Structure = Building(self.hklfile.fname, **keywords)
Structure.prep_mtz()
Structure.matt_calc()
Structure.get_wvl()
if 'native' in keywords.keys():
Structure.create_natmtz()
return Structure
def run_crank(self):
model = self.prep_params()
model.crank_()
def run_sharp(self):
model = self.prep_params()
model.sharp_()
class multi_widgets(Qt.QWidget):
def __init__(self, str1, str2, *args):
Qt.QWidget.__init__(self)
layout = Qt.QHBoxLayout()
self.st1 = widgets(str1)
self.st2 = widgets(str2)
self.arg = [];
layout.addWidget(self.st1)
layout.addWidget(self.st2)
if args > 0:
for arg in args:
tmp = widgets(arg)
self.arg.append(tmp)
layout.addWidget(tmp)
self.setLayout(layout)
class widgets(Qt.QWidget):
def __init__(self, strings):
Qt.QWidget.__init__(self)
self.label = strings
self.value = None
layout = Qt.QHBoxLayout()
layout.addWidget(widgets.createLabel(self.label))
self.textbox = widgets.createBox()
self.textbox.returnPressed.connect(lambda: self.getter())
layout.addWidget(self.textbox)
self.setLayout(layout)
@classmethod
def createLabel(widgets, string1):
lab = Qt.QLabel(string1)
font = lab.font()
font.setBold(True)
font.setPointSize(10)
lab.setFont(font)
return lab
@classmethod
def createBox(widgets):
box = Qt.QLineEdit()
box.resize(100,20)
return box
@classmethod
def createButton(widgets, btn_name, func, *args):
button = Qt.QPushButton(btn_name)
button.connect(button, QC.SIGNAL("clicked()"), func)
return button
def getter(self):
try:
self.value = eval(str(self.textbox.text()))
except NameError:
self.value = str(self.textbox.text())
except SyntaxError:
pass
print self.value
class dropmenu(Qt.QWidget):
def __init__(self, string1):
Qt.QWidget.__init__(self)
self.label = string1;
self.value = None
layout = Qt.QHBoxLayout()
layout.addWidget(widgets.createLabel(self.label))
self.comb = Qt.QComboBox(self)
self.comb.addItems([' ','CCall_CCweak', 'cluster', 'shelxc', 'site_occupancy'])
self.comb.activated[str].connect(self.get_text_combo)
layout.addWidget(self.comb)
self.setLayout(layout)
def get_text_combo(self):
try:
self.value = unicode(self.comb.currentText())
except:
pass
print self.value
class files_widget(Qt.QWidget):
def __init__(self, string1):
Qt.QWidget.__init__(self)
self.fname = None;
self.layout = Qt.QHBoxLayout()
self.layout.addWidget(widgets.createLabel(string1))
self.file_box = widgets.createBox()
self.brw_btn = widgets.createButton("Browse", self.Loader)
self.rm_btn = widgets.createButton("Remove", self.cleaner)
self.layout.addWidget(self.file_box)
self.layout.addWidget(self.brw_btn)
self.layout.addWidget(self.rm_btn)
self.setLayout(self.layout)
def Loader(self):
self.file_box.setFocus()
filename = Qt.QFileDialog.getOpenFileName(self, 'Open File', '~/')
self.file_box.setText(filename)
try:
self.fname = unicode(self.file_box.text())
print "filename received %s" %self.fname
'''
if len(self.fname) > 72:
msgbox = Qt.QMessageBox()
msgbox.setText("Warning: shelx hates filename with >72 character!")
msgbox.exec_()
'''
except:
pass
def cleaner(self):
self.file_box.setText(None)
if self.fname != None:
self.fname = None
class dir_widget(Qt.QWidget):
def __init__(self,dirname):
Qt.QWidget.__init__(self)
self.dir = None;
layout = Qt.QHBoxLayout()
layout.addWidget(widgets.createLabel(dirname))
self.dir_box = widgets.createBox()
self.brw_btn = widgets.createButton("Browse", self.Load_dir)
self.rm_btn = widgets.createButton("Remove", self.cleaner)
layout.addWidget(self.dir_box)
layout.addWidget(self.brw_btn)
layout.addWidget(self.rm_btn)
self.setLayout(layout)
def Load_dir(self):
self.dir_box.setFocus()
directory = Qt.QFileDialog.getExistingDirectory(self)
self.dir_box.setText(directory)
try:
self.dir = unicode(self.dir_box.text())
print "output directory name: %s" %self.dir
except:
pass
def cleaner(self):
self.dir_box.setText(None)
if self.dir != None:
self.dir = None
class two_buttons(Qt.QWidget):
def __init__(self, butn1, butn2, *args):
Qt.QWidget.__init__(self)
layout = Qt.QHBoxLayout()
self.Button1 = Qt.QPushButton(butn1)
self.Button2 = Qt.QPushButton(butn2)
layout.addWidget(self.Button1)
layout.addWidget(self.Button2)
self.setLayout(layout)
if len(args) > 0:
for arg in args:
self.btn = Qt.QPushButton(arg)
layout.addWidget(self.btn)
self.setLayout(layout)
def main():
app = Qt.QApplication(sys.argv)
#w = MainLayout()
w = Tabs()
w.show()
return app.exec_()
if __name__ == '__main__':
main()
|
from data import *
from utilities import *
from networks import *
import matplotlib.pyplot as plt
import numpy as np
num_known_classes = 65 #25
num_all_classes = 65
def skip(data, label, is_train):
return False
batch_size = 32
def transform(data, label, is_train):
label = one_hot(num_all_classes,label)
data = tl.prepro.crop(data, 224, 224, is_random=is_train)
data = np.transpose(data, [2, 0, 1])
data = np.asarray(data, np.float32) / 255.0
return data, label
ds = FileListDataset('/mnt/datasets/office-home/product_0-64_val.txt', '/mnt/datasets/office-home/', transform=transform, skip_pred=skip, is_train=True, imsize=256)
product = CustomDataLoader(ds, batch_size=batch_size, num_threads=2)
ds = FileListDataset('/mnt/datasets/office-home/real_world_0-64_test.txt', '/mnt/datasets/office-home/', transform=transform, skip_pred=skip, is_train=True, imsize=256)
real_world = CustomDataLoader(ds, batch_size=batch_size, num_threads=2)
ds = FileListDataset('/mnt/datasets/office-home/art_0-64_test.txt', '/mnt/datasets/office-home/', transform=transform, skip_pred=skip, is_train=True, imsize=256)
art = CustomDataLoader(ds, batch_size=batch_size, num_threads=2)
ds = FileListDataset('/mnt/datasets/office-home/clipart_0-64_test.txt', '/mnt/datasets/office-home/', transform=transform, skip_pred=skip, is_train=True, imsize=256)
clipart = CustomDataLoader(ds, batch_size=batch_size, num_threads=2)
setGPU('0')
discriminator_p = Discriminator(n = 25).cuda() # multi-binary classifier
discriminator_p.load_state_dict(torch.load('discriminator_p_office-home.pkl'))
feature_extractor = ResNetFc(model_name='resnet50')
cls = CLS(feature_extractor.output_num(), num_known_classes+1, bottle_neck_dim=256)
net = nn.Sequential(feature_extractor, cls).cuda()
score_pr = []
score_rw = []
score_ar = []
score_cl = []
label_pr = []
label_rw = []
label_ar = []
label_cl = []
def get_score(dataset):
ss = []
ll = []
for (i, (im, label)) in enumerate(dataset.generator()):
im = Variable(torch.from_numpy(im)).cuda()
f, __, __, __ = net.forward(im)
p = discriminator_p.forward(f).cpu().detach().numpy()
ss.append(p)
ll.append(label)
return np.vstack(ss), np.vstack(ll)
score_pr, label_pr = get_score(product)
score_rw, label_rw = get_score(real_world)
score_ar, label_ar = get_score(art)
score_cl, label_cl = get_score(clipart)
filename = "scores_office-home"
np.savez_compressed(filename,
product_score=score_pr, product_label=label_pr,
real_world_score=score_rw, real_world_label=label_rw,
art_score=score_ar, art_label=label_ar,
clipart_score=score_cl, clipart_label=label_cl)
|
# Generated by Django 3.1.8 on 2021-04-19 12:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True, verbose_name='name')),
('slug', models.SlugField(max_length=100, unique=True, verbose_name='slug')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TaggedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.IntegerField(db_index=True, verbose_name='object ID')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tags_taggeditem_tagged_items', to='contenttypes.contenttype', verbose_name='content type')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tags_taggeditem_items', to='Tags.tag')),
],
options={
'abstract': False,
},
),
]
|
import json
import requests
import urllib3
from bs4 import BeautifulSoup
from base.casadecambio import CasaDeCambio
from base.origintype import OriginType
from base.cotizacion import Cotizacion
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class Interfisa(CasaDeCambio):
__id = "interfisa"
name = "Interfisa Banco"
originType = OriginType.JSON
#header = ""
#data = ""
sucursales = [
{"id": "web", "name": "Cotizaciรณn de la Web", "loc": ""},
]
def getSucursales(self):
return {
self.__id : { "sucursales": self.sucursales }
}
def getCotizacionWeb(self):
compra = 0
venta = 0
cambio = Cotizacion(compra, venta)
try:
jsonResult = requests.get(
"https://seguro.interfisa.com.py/rest/cotizaciones", timeout=10
).json()
cotizaciones = jsonResult["operacionResponse"]["cotizaciones"]["monedaCot"]
for coti in cotizaciones:
for k, v in coti.items():
if v == "DOLARES AMERICANOS": # estamos en el dict de Dolares
compra = coti["compra"]
venta = coti["venta"]
cambio = Cotizacion(compra, venta)
except requests.ConnectionError as e:
#ToDo: hacer logging
print("Connection error: ")
print(e)
except:
#ToDo: ser mรกs especรญfico
print("Another error")
return cambio
def getCotizaciones(self):
return { self.__id : {self.sucursales[0]['id'] : self.getCotizacionWeb().getValuesDict()} }
def test(self):
ib = Interfisa()
#sucursales
suc = ib.getSucursales()
print(json.dumps(suc, indent=4))
#cotizaciones
coti = ib.getCotizaciones()
#print(coti)
print(json.dumps(coti, indent=4))
|
import os, sys
import math
import numpy as np
import skimage.io
import cv2
from PIL import Image
# Root directory of the project
ROOT_DIR = os.path.abspath("../..")
if ROOT_DIR not in sys.path:
sys.path.insert(0, ROOT_DIR)
from instance_segmentation.object_config import Config
import utils
class ObjectsConfig(Config):
NAME = "prime_sense"
# NAME = "seg_ADE20K"
MODE = 'RGBD'
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 640
# IMAGES_PER_GPU = 2
# LEARNING_RATE = 0.02
# Image mean (RGBD)
MEAN_PIXEL = np.array([123.7, 116.8, 103.9, 1220.7])
class ObjectsDataset(utils.Dataset):
def load(self, dataset_dir, skip=19):
# Add classes
self.add_class("prime_sense", 1, "object")
count = 0
# Add images
for i, (root, dirs, files) in enumerate(os.walk(dataset_dir)):
root_split = root.split('/')
if root_split[-1] == 'image': # and subset in root_split:
for j, file in enumerate(files):
if j % (skip + 1) == 0:
parentRoot = '/'.join(root.split('/')[:-1])
depth_path = os.path.join(parentRoot, 'depth', file)
# only add if corresponding mask exists
path = os.path.join(root, file)
if os.path.isfile(depth_path):
if (os.stat(path).st_size):
im = Image.open(path)
width, height = im.size
self.add_image(
"prime_sense",
image_id=i,
path=path,
depth_path=depth_path,
width=width,
height=height)
count += 1
else:
print('Warning: No depth or mask found for ' + path)
print('added {} images'.format(count))
def load_image(self, image_id, depth=True):
"""Load the specified image and return a [H,W,3+1] Numpy array.
"""
# Load image & depth
image = super(ObjectsDataset, self).load_image(image_id)
if depth:
depth = skimage.io.imread(self.image_info[image_id]['depth_path'])
rgbd = np.dstack((image, depth))
return rgbd
else:
return image
if __name__ == '__main__':
dataset = ObjectsDataset()
dataset.load('/home/orestisz/data/ADE20K_2016_07_26', 'validation')
masks, class_ids = dataset.load_mask(0)
|
def cfgParser(path):
f = open(path,'r')
#contents = list()
cfgDict = dict()
for i in f.readlines():
data = i.strip('\n')
if data == '' or data[0] == '#':
continue
name,value=data.split('=')
if name == 'lossFunction' or name == 'optimizer' or name == 'device':
cfgDict[name] = value
elif name == 'dropOut':
cfgDict[name] = float(value)
elif name.find('Path') != -1:
cfgDict[name] = value
else:
cfgDict[name] = int(value)
return cfgDict
def main():
cfgParser('../cfg/FASTSPEECH2_TRAIN.cfg')
if __name__ == '__main__':
main()
|
import json
from dataclasses import dataclass
from pathlib import Path
import web3
from web3.contract import Contract
from beamer.typing import Address, BlockNumber, ChainId
@dataclass
class ContractInfo:
address: Address
deployment_block: BlockNumber
abi: list
def make_contracts(w3: web3.Web3, contracts_info: dict[str, ContractInfo]) -> dict[str, Contract]:
return {
name: w3.eth.contract(info.address, abi=info.abi) for name, info in contracts_info.items()
}
def load_contract_abi(deployment_dir: Path, contract_name: str) -> list:
with deployment_dir.joinpath(f"{contract_name}.json").open("rt") as f:
data = json.load(f)
return data["abi"]
DeploymentInfo = dict[ChainId, dict[str, ContractInfo]]
def load_deployment_info(deployment_dir: Path) -> DeploymentInfo:
abis = {}
deployment_info = {}
with deployment_dir.joinpath("deployment.json").open("rt") as f:
deployment = json.load(f)
for chain_id, deployed_contracts in deployment["L2"].items():
infos = {}
for name, deployment_data in deployed_contracts.items():
if name not in abis:
abis[name] = load_contract_abi(deployment_dir, name)
abi = abis[name]
infos[name] = ContractInfo(
address=deployment_data["address"],
deployment_block=deployment_data["deployment_block"],
abi=abi,
)
deployment_info[ChainId(int(chain_id))] = infos
return deployment_info
|
import os
import json
def loadJson(relative_file_location):
with open(relative_file_location) as json_data:
return json.load(json_data)
def addOneToString(stringInt):
return str(int(stringInt) + 1)
def loadStuffTextLabels(relative_file_location):
file = open(relative_file_location, 'r')
stuffLabels = {}
for line in file:
id, name = line.strip().split('\t')
stuffLabels[addOneToString(id)] = name
return stuffLabels
def makeResultsDirectory(dir_name):
stringify_dir=str(dir_name)
# Make Directory to story results if it doesnt exist
if not os.path.exists(stringify_dir):
print ("Folder doesn't exist, making now... : {}".format(stringify_dir))
os.makedirs(stringify_dir)
|
import os
import pytz
from .models import Bot, BotflowExecution, SmtpAccount
from django.dispatch import receiver
from django.db.models.signals import pre_save, post_save
from smtplib import SMTP, SMTP_SSL
from email.message import EmailMessage
from os.path import basename
from datetime import datetime
@receiver(pre_save, sender=BotflowExecution)
def botflow_execution_update_progress(sender, instance, **kwargs):
if instance.time_end != None:
instance.custom_progress = 100
@receiver(post_save, sender=BotflowExecution)
def botflow_execution_bot_status(sender, instance, **kwargs):
if instance.status == "Running":
try:
bot = Bot.objects.filter(computer_name__iexact=instance.computer_name, user_name__iexact=instance.user_name)[0]
except Exception:
return
if bot.status != "Running":
bot.status = "Running"
bot.save_without_historical_record()
elif instance.status != "Pending":
try:
bot = Bot.objects.filter(computer_name__iexact=instance.computer_name, user_name__iexact=instance.user_name)[0]
except Exception:
return
if instance.status == "Completed":
if bot.status != "Active":
bot.status = "Active"
bot.save_without_historical_record()
else:
latest_execution = BotflowExecution.objects.filter(status="Running", computer_name__iexact=os.environ['COMPUTERNAME'], user_name__iexact=os.environ['USERNAME']).order_by('-time_start')
if len(latest_execution) > 0:
latest_execution = latest_execution[0]
difference = datetime.now(pytz.timezone('UTC')) - latest_execution.time_start
difference = difference.seconds / 60
if difference < latest_execution.timeout_minutes:
return
if bot.status != "Unknown":
bot.status = "Unknown"
bot.save_without_historical_record()
@receiver(post_save, sender=BotflowExecution)
def botflow_execution_notification(sender, instance, **kwargs):
try:
smtp_account = SmtpAccount.objects.filter(activated=True)[0]
except Exception:
return
if instance.status == "Pending":
if "@" in instance.queued_notification:
email_subject = f"[{instance.pk}] Botflow queued: '{basename(instance.botflow)}'"
email_to = [
email for email in instance.queued_notification.split(",")
]
else:
return
elif instance.status == "Running":
if "@" in instance.started_notification:
email_subject = f"[{instance.pk}] Botflow started: '{basename(instance.botflow)}'"
email_to = [
email for email in instance.started_notification.split(",")
]
else:
return
elif instance.status == "Completed":
if "@" in instance.completed_notification:
email_subject = f"[{instance.pk}] Botflow completed: '{basename(instance.botflow)}'"
email_to = [
email for email in instance.completed_notification.split(",")
]
else:
return
elif "ERROR" in instance.status.upper():
if "@" in instance.error_notification:
email_subject = f"[{instance.pk}] Botflow failed: '{basename(instance.botflow)}'"
email_to = [
email for email in instance.error_notification.split(",")
]
else:
return
else:
if "@" in instance.error_notification:
email_subject = f"[{instance.pk}] UNKNOWN STATUS"
email_to = [
email for email in instance.error_notification.split(",")
]
else:
return
try:
email_to = [str(email).lower().strip()
for email in email_to if "@" in email]
email_from = str(smtp_account.email).lower().strip()
msg = EmailMessage()
msg['Subject'] = email_subject
msg['From'] = email_from
msg['To'] = ", ".join(email_to)
msg.set_content(f"Application: {instance.app}\nBotflow: {instance.botflow}\nTrigger: {instance.trigger}\n\nComputer Name: {instance.computer_name}\nUsername: {instance.user_name}\n\nStatus: {instance.status}\n\nTime Start: {instance.time_start}\nTime End: {instance.time_end}")
with SMTP(smtp_account.server, smtp_account.port) as server:
if smtp_account.tls:
server.starttls()
if smtp_account.password != "":
server.login(email_from, smtp_account.password)
server.send_message(msg)
if smtp_account.status != "Active":
smtp_account.status = "Active"
smtp_account.save_without_historical_record()
except Exception:
if smtp_account.tls:
try:
msg = EmailMessage()
msg['Subject'] = email_subject
msg['From'] = email_from
msg['To'] = ", ".join(email_to)
msg.set_content(
f"Application: {instance.app}\nBotflow: {instance.botflow}\nTrigger: {instance.trigger}\n\nComputer Name: {instance.computer_name}\nUsername: {instance.user_name}\n\nStatus: {instance.status}\n\nTime Start: {instance.time_start}\nTime End: {instance.time_end}"
)
with SMTP_SSL(smtp_account.server, smtp_account.port) as server:
if smtp_account.password != "":
server.login(email_from, smtp_account.password)
server.send_message(msg)
if smtp_account.status != "Active":
smtp_account.status = "Active"
smtp_account.save_without_historical_record()
except Exception:
if smtp_account.status != "ERROR":
smtp_account.status = "ERROR"
smtp_account.save_without_historical_record()
else:
if smtp_account.status != "ERROR":
smtp_account.status = "ERROR"
smtp_account.save_without_historical_record()
|
import matlab.engine # Must import matlab.engine first
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from collections import OrderedDict
import pdb
class BackboneNet(nn.Module):
def __init__(self, in_features, class_num, dropout_rate, cls_branch_num,
base_layer_params, cls_layer_params, att_layer_params):
'''
Layer_params:
[[kerel_num_1, kernel_size_1],[kerel_num_2, kernel_size_2], ...]
'''
super(BackboneNet, self).__init__()
assert (dropout_rate > 0)
self.cls_branch_num = cls_branch_num
self.att_layer_params = att_layer_params
self.dropout = nn.Dropout2d(
p=dropout_rate) # Drop same channels for untri features
base_module_list = self._get_module_list(in_features, base_layer_params,
'base')
self.base = nn.Sequential(OrderedDict(base_module_list))
cls_module_lists = []
for branch_idx in range(cls_branch_num):
cls_module_lists.append(
self._get_module_list(base_layer_params[-1][0],
cls_layer_params,
'cls_b{}'.format(branch_idx)))
self.cls_bottoms = nn.ModuleList(
[nn.Sequential(OrderedDict(i)) for i in cls_module_lists])
self.cls_heads = nn.ModuleList([
nn.Linear(cls_layer_params[-1][0], class_num)
for i in range(cls_branch_num)
])
if self.att_layer_params:
att_module_list = self._get_module_list(base_layer_params[-1][0],
att_layer_params, 'att')
self.att_bottom = nn.Sequential(OrderedDict(att_module_list))
self.att_head = nn.Linear(att_layer_params[-1][0], 1)
else:
self.gap = nn.AdaptiveMaxPool1d(1)
def _get_module_list(self, in_features, layer_params, naming):
module_list = []
for layer_idx in range(len(layer_params)):
if layer_idx == 0:
in_chl = in_features
else:
in_chl = layer_params[layer_idx - 1][0]
out_chl = layer_params[layer_idx][0]
kernel_size = layer_params[layer_idx][1]
conv_pad = kernel_size // 2
module_list.append(('{}_conv_{}'.format(naming, layer_idx),
nn.Conv1d(in_chl,
out_chl,
kernel_size,
padding=conv_pad)))
module_list.append(('{}_relu_{}'.format(naming,
layer_idx), nn.ReLU()))
return module_list
def forward(self, x): # In: B x F x T
x_drop = self.dropout(x.unsqueeze(3)).squeeze(3)
base_feature = self.base(x_drop)
cls_features = []
branch_scores = []
for branch_idx in range(self.cls_branch_num):
cls_feature = self.cls_bottoms[branch_idx](base_feature)
cls_score = self.cls_heads[branch_idx](cls_feature.transpose(1, 2))
cls_features.append(cls_feature)
branch_scores.append(cls_score)
avg_score = torch.stack(branch_scores).mean(0)
if self.att_layer_params:
att_feature = self.att_bottom(base_feature)
att_weight = self.att_head(att_feature.transpose(1, 2))
att_weight = F.softmax(att_weight, dim=1)
global_score = (avg_score * att_weight).sum(1)
else:
att_feature = None
att_weight = None
global_score = self.gap(avg_score.transpose(1, 2)).squeeze(2)
# for debug and future work
feature_dict = {
'base_feature': base_feature,
'cls_features': cls_features,
#'att_feature': att_feature,
}
return avg_score, att_weight, global_score, branch_scores, feature_dict
|
# -*- coding: utf-8 -*-
"""
EnigmaLight Plugin by Speedy1985, 2014
https://github.com/speedy1985
Parts of the code are from other plugins:
all credits to the coders :-)
EnigmaLight is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
EnigmaLight is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
"""
#===============================================================================
# IMPORT
#===============================================================================
import sys, time, socket, os, threading, commands
from threading import Thread
from Components.Console import Console
from Screens import Standby
from Components.ConfigList import ConfigListScreen
from Components.config import config, configfile, getConfigListEntry, ConfigFloat, ConfigSubsection, ConfigEnableDisable, ConfigSelection, ConfigSlider, ConfigDirectory, ConfigOnOff, ConfigNothing, ConfigInteger, ConfigYesNo
from EL_Socket import EL_Socket
from EL_HttpServer import HttpdStart, HttpdStop
from __common__ import EnigmaLight_log as log, rgbToHex, showMessage, showError
from __init__ import getCrashFilePath, _ # _ is translation
elightconf_notfound = _("File /etc/enigmalight.conf not found.")
class Controller(threading.Thread):
def __init__ (self):
log("",self)
threading.Thread.__init__(self)
self.sockClass = EL_Socket()
self.session = None
self.global_session = None
self.checkedForUpdates = False
self.currentScreen = None
self.mainScreen = None
self.clearStatusInfo = None
self.checkConsole = None
self.startConsole = None
self.processID = None
self.callback = None
self.callbackArgs = None
self.isStandby = False
self.processRunning = False
self.lightsEnabled = False
self.current_mode = None
self.current_resolution = "0x0"
self.current_fps = 0
self.current_cpu = 0
self.current_state = "??"
self.connectedAddres = None
self.status_text = ""
self.serverstate = False
self.thread_running = True
#Run this thread as daemon
self.setDaemon(True)
#Standby notifier
config.misc.standbyCounter.addNotifier(self.enterStandby, initial_call = False)
def setScreen(self, screenInstance):
self.currentScreen = screenInstance
def setMainScreen(self, value):
self.mainScreen = value
def run(self):
log("",self,"ControlThread: Running...")
checkLoop = 4 # check every 2 sec for cpu usage
loopCount = 0
while(self.thread_running):
#Check connection
self.sockClass.checkConnection()
#if not connected then connect with it.
if self.sockClass.connectedWithEnigmalight():
self.readInfo() #get runninginfo
else:
self.lightsEnabled = False
self.current_fps = "0"
self.current_resolution = "0x0"
self.current_mode = "Off"
self.current_cpu = "0"
#when checkloop is 2 then getcpu
if(loopCount == checkLoop):
#self.getCpu()
loopCount = 0
else:
loopCount += 1
#check mode
if config.plugins.enigmalight.network_onoff.value:
ext = _("Daemon %s:%s") % (str(config.plugins.enigmalight.address.getText()),str(config.plugins.enigmalight.port.getText()))
elif config.plugins.enigmalight.type.value == "WifiLight":
ext = _("Local [Wifilight] %s:%s") % (str(config.plugins.enigmalight.wifilight_ip.getText()),str(config.plugins.enigmalight.wifilight_port.getText()))
else:
ext = _("Local")
if(self.current_mode == "0" and not self.serverstate):
mode = _("[Server] Idle")
self.lightsEnabled = False
elif(self.current_mode == "0" and self.serverstate):
mode = _("[Server] Client connected (%s)") % self.connectedAddres
self.lightsEnabled = False
elif(self.current_mode == "1"):
mode = _("[Moodlamp] %s | Static color") % ext
self.lightsEnabled = True
elif(self.current_mode == "2"):
mode = _("[Dynamic] %s | %s") % (ext,self.current_resolution)
self.lightsEnabled = True
elif(self.current_mode == "3"):
mode = _("[Moodlamp] %s | RGBtest") % ext
self.lightsEnabled = True
elif(self.current_mode == "4"):
mode = _("[Moodlamp] %s | ColorFader") % ext
self.lightsEnabled = True
elif(self.current_mode == "5"):
mode = _("[Moodlamp] %s | Rainbow") % ext
self.lightsEnabled = True
else:
mode = "Off"
self.lightsEnabled = False
if self.currentScreen != None and self.mainScreen != None:
self.currentScreen.handleFromThread(self.currentScreen.showButtons)
#Set StatusBar text
if not self.lightsEnabled and not self.sockClass.connected:
status = _("Not Running")
mode = "Off"
elif self.lightsEnabled and self.sockClass.connected:
status = _("LightsOn")
elif not self.lightsEnabled and self.sockClass.connected:
status = _("LightsOff")
#Statusbar
if self.currentScreen != None:
stContent = _("Status: %s | Current mode: %s | FPS: %s") % (status,mode,self.current_fps)
try:
#self.currentScreen.handleFromThread("setStatusBarTxt",stContent)
self.currentScreen.handleFromThread(self.currentScreen.setStatusBarTxt,stContent)
except:
from traceback import format_exc
log("",self,"Error: "+format_exc())
try:
open(getCrashFilePath(),"w").write(format_exc())
if config.plugins.enigmalight.message_error_onoff.value:
showError(self.session, (format_exc()), "E")
except:
pass
#clear info
if self.clearStatusInfo != None and self.clearStatusInfo == loopCount:
try:
#self.currentScreen.handleFromThread("setStatusBarInfo","")
self.currentScreen.handleFromThread(self.currentScreen.setStatusBarInfo,"")
#clear info
self.clearStatusInfo = None
except:
from traceback import format_exc
log("",self,"Error: "+format_exc())
try:
open(getCrashFilePath(),"w").write(format_exc())
if config.plugins.enigmalight.message_error_onoff.value:
showError(self.session, (format_exc()), "E")
except:
pass
time.sleep(0.5)
log("ControlThread: exit...")
self.thread_running = False
def setStatusBarInfo(self,info):
log("",self)
try:
if self.currentScreen != None:
self.currentScreen.handleFromThread(self.currentScreen.setStatusBarInfo,_("Info: ["+ info +"]"))
pass
#clear info
self.clearStatusInfo = 2
except:
from traceback import format_exc
log("",self,"Error: "+format_exc())
try:
open(getCrashFilePath(),"w").write(format_exc())
if config.plugins.enigmalight.message_error_onoff.value:
showError(self.session, (format_exc()), "E")
except:
pass
#===============================================================================
# Read info from enigmalight.info
#===============================================================================
def readInfo(self):
#log("",self)
try:
self.serverstate = self.sockClass.getServerState()
self.current_mode = str(self.sockClass.getMode())
self.current_fps = str(self.sockClass.getFPS())
self.current_resolution = str(self.sockClass.getRes())
if self.current_mode == "0" and self.serverstate:
self.connectedAddres = self.sockClass.getConnectedAddress()
else:
self.connectedAddres = None
except:
self.current_fps = "0"
self.current_resolution = "0x0"
self.current_mode = "Off"
self.current_cpu = "0"
from traceback import format_exc
log("",self,"Error: "+format_exc())
try:
open(getCrashFilePath(),"w").write(format_exc())
except:
pass
#===============================================================================
# Get CPU Usage of EnigmaLight
#===============================================================================
def getCpu(self):
#log("",self)
try:
cpu = commands.getstatusoutput('top -n1 | grep "enigmalight" | awk \'{print $7}\'')
cpu_split = str(cpu).split("'")
cpu = cpu_split[1][:3]#remove new line and other stuff
cpu = cpu.replace("\\","")
#print ("[EnigmaLight] Cpu usage [" + cpu + "]")
self.current_cpu = cpu
except:
from traceback import format_exc
log("",self,"Error: "+format_exc())
try:
open(getCrashFilePath(),"w").write(format_exc())
if config.plugins.enigmalight.message_error_onoff.value:
showError(self.session, (format_exc()), "E")
except:
pass
#===============================================================================
# Set Session
#===============================================================================
def setSession(self, session = None):
if session == None:
self.session = self.global_session
else:
self.session = session
def setGlobalSession(self, session):
self.global_session = session
#===============================================================================
# Call function on Standby
#===============================================================================
def enterStandby(self, configElement):
log("",self)
if not self.isStandby and self.lightsEnabled:
Standby.inStandby.onClose.append(self.leaveStandby)
self.isStandby = True
log("",self,"ControlThread: enterStandby..")
self.Control("grabber","sleep")
#===============================================================================
# Call function on wakeup from standby
#===============================================================================
def leaveStandby(self):
log("",self)
if self.isStandby is True:
self.isStandby = False
log("ControlThread: leaveStandby..",self)
self.Control("grabber","wakeup")
#===============================================================================
# Control functions (start ... stop)
#===============================================================================
def Control(self, command, value, startcommand = "enigmalight -m 0 -f", callback = None):
log("",self,"Control: c:%s v:%s" %(command, value))
#Set ConfigFile
s_command = startcommand + " -c " + str(config.plugins.enigmalight.configfilepath.value)
#Don't use config file for client -> host
if config.plugins.enigmalight.network_onoff.value:
host = str(config.plugins.enigmalight.address.getText())
port = str(config.plugins.enigmalight.port.getText())
s_command = "enigmalight -s " + host + ":" + port
if value == "configtest":
s_command = startcommand + " -c /tmp/enigmalight.conf.new"
control = { 'command': command, 'value': value, 'startcommand': s_command}
if config.plugins.enigmalight.network_onoff.value == True:
#self.getPid(control,None,self.checkIfRunningFinisched) #only network mode
self.checkIfRunningFinisched(control,None)
elif os.path.isfile(str(config.plugins.enigmalight.configfilepath.value)) is True:
# getpid and execute command self.checkIfRunning -> DoControl
self.checkIfRunningFinisched(control,None)
else:
showMessage(self.session, elightconf_notfound, "W")
self.setStatusBarInfo(_("Configfile not found!"))
def checkIfRunningFinisched(self, control, callback = None):
log("",self)
log("",self,"control[command] = " + str(control['command']))
log("",self,"control[value] = " + str(control['value']))
log("",self,"control[startcommand] = " + str(control['startcommand']))
log("",self,"callback = " + str(callback))
pid = self.sockClass.getCommand("get pidnr")
log("",self,"pid = " + str(pid))
checkResult = False
try:
if pid is None:
#Check if engimalight is running
#it it's not running, then start it.
if control['value'] != "stop" and control['value'] != "sleep" and control['value'] != "network":
log("",self,"[/usr/bin/enigmalight] not running, Starting..")
checkResult = True
self.startConsole = Console()
self.startConsole.ePopen(str(control['startcommand']), self.DoControl, [checkResult, control, callback])
#If network mode is used
elif control['command'] == "network":
#connect client with host
log("",self,"network")
else:
#Enigmalight is already running
log("",self,"[/usr/bin/enigmalight] already running with pid " + str(pid))
self.DoControl(None, None, [checkResult, control, callback])
except:
from traceback import format_exc
log("",self,"Error: "+format_exc())
try:
open(getCrashFilePath(),"w").write(format_exc())
except:
pass
def showResult(self, result):
s = showMessage(self.session,_("Error while starting EnigmaLight:\n\n%s") % result,"E",15)
def DoControl(self, result, retval, extra_args = None):
log("",self)
(checkResult, control, callback) = extra_args
ret = 0
error = False
commandResult = str(result)
if checkResult:
#sleep one sec before do next step.
time.sleep(1)
if commandResult.find("ERROR:") != -1:
self.showResult(str(result))
error = True
if error is False:
try:
if control['value'] == "stop":
self.setStatusBarInfo(_("Stop lights.."))
if config.plugins.enigmalight.server.value is True and config.plugins.enigmalight.network_onoff.value == False:
#Switch to server
log("",self,"config.plugins.enigmalight.server.value is true, Start server")
data = "set mode 0\n"
ret = self.sockClass.setCommand(data)
else:
#kill enigmalight
data = "set mode stop\n"
self.sockClass.setCommand(data)
if config.plugins.enigmalight.message_onoff.getValue():
showMessage(self.session,_("Control: Lights disabled."),"I")
elif control['value'] == "dynamic":
self.setStatusBarInfo(_("Start lights.."))
ret = self.controlMode()
if config.plugins.enigmalight.message_onoff.getValue():
showMessage(self.session,_("Control: Lights enabled."),"I")
elif control['value'] == "configtest":
self.setStatusBarInfo(_("Change mode"))
data = "set mode 3\n"
ret = self.sockClass.setCommand(data) #3 test
if config.plugins.enigmalight.message_onoff.getValue():
showMessage(self.session,_("Control: Lights enabled, mode[test]"),"I")
elif control['value'] == "server":
self.setStatusBarInfo(_("Change mode"))
data = "set mode 0\n"
ret = self.sockClass.setCommand(data)
elif control['value'] == "moodlamp":
self.setStatusBarInfo(_("Change mode"))
ret = self.writeMoodlamp()
if config.plugins.enigmalight.message_onoff.getValue():
showMessage(self.session,_("Control: Lights enabled, mode[%s]") %(str(config.plugins.enigmalight.moodlamp_mode.getText())),"I")
elif control['value'] == "sleep":
if config.plugins.enigmalight.standbymode.value == str(1):
#Start Moodlamp
ret = self.writeMoodlamp()
elif config.plugins.enigmalight.standbymode.value == str(0):
if config.plugins.enigmalight.server.value is True and config.plugins.enigmalight.network_onoff.value == False:
#Switch to server
log("",self,"config.plugins.enigmalight.server.value is true, Start server")
data = "set mode 0\n"
ret = self.sockClass.setCommand(data)
else:
#disable lights
data = "set mode stop\n"
ret = self.sockClass.setCommand(data)
elif control['value'] == "wakeup":
ret = self.controlMode()
if self.currentScreen != None and self.mainScreen != None:
self.currentScreen.handleFromThread(self.currentScreen.showButtons)
#Send all values
if ret == 1:
if control['value'] == "dynamic" or control['value'] == "restart" or control['value'] == "wakeup":
self.sendAll(True)
if control['value'] != "stop" and control['value'] != "sleep":
self.writeSettings()
return ret
except:
from traceback import format_exc
log("",self,"Error: "+format_exc())
try:
open(getCrashFilePath(),"w").write(format_exc())
if config.plugins.enigmalight.message_error_onoff.value:
showError(self.session, (format_exc()), "E")
except:
pass
def controlMode(self):
log("",self)
if config.plugins.enigmalight.mode.value == str(1):
data = "set mode " + str(config.plugins.enigmalight.moodlamp_mode.getValue()) +"\n"
ret = self.sockClass.setCommand(data)
elif config.plugins.enigmalight.mode.value == str(2):
data = "set mode 2\n"
ret = self.sockClass.setCommand(data)
return ret
#===============================================================================
# Switch from network to normal or normal to network
#===============================================================================
def switchtoNetwork(self):
log("",self)
host = str(config.plugins.enigmalight.address.getText())
port = str(config.plugins.enigmalight.port.getText())
self.Control("start","dynamic","enigmalight -f -s " + host + ":" + port)
#===============================================================================
# Functions to forcekill enigmalight with pidnr
#===============================================================================
def killEnigmalight(self, args = None, callback = None, command = None):
log("",self)
self.setStatusBarInfo(_("killEnigmalight.."))
data = "set mode stop\n"
self.sockClass.setCommand(data)
if callback != None:
self.callback = callback #save callback for later
if args != None:
self.callbackArgs = args
#self.getPid(None,None,self.killEnimgalightNow) #getpid and kill enigmalight
self.killEnigmalightNow(None,None)
def killEnigmalightNow(self, values = None, callback = None):
log("",self)
try:
self.checkConsole = Console()
command = "killall -9 enigmalight"
#set callback from saved callback
if callback == None and self.callback != None:
callback = self.callback
self.callback = None
self.checkConsole.ePopen(command, self.killEnigmalightFinisched, [values, callback])
except:
from traceback import format_exc
log("Error:",format_exc() )
try:
open(getCrashFilePath(),"w").write(format_exc())
except:
pass
def killEnigmalightFinisched(self, result, retval, extra_args = None):
log("",self)
(values, callback) = extra_args
log("",self,"values " + str(values))
log("",self,"result " + str(result))
log("",self,"retval " + str(retval))
log("",self,"callback " + str(callback))
time.sleep(1)
if len(str(result)) == 0:
self.setStatusBarInfo(_("Enigmalight killed."))
if config.plugins.enigmalight.message_onoff.getValue():
showMessage(self.session,_("Enigmalight killed."),"I")
else:
self.setStatusBarInfo(_("Enigmalight not killed!"))
if config.plugins.enigmalight.message_onoff.getValue():
showMessage(self.session,_("Enigmalight not killed\nresult: %s") % str(result),"I")
if callback != None:
if self.callbackArgs != None:
callback(self.callbackArgs) # now do callback from saved callback
else:
callback()
#===============================================================================
#
#===============================================================================
def Toggle(self, **kwargs):
log("",self)
if self.lightsEnabled:
self.Control("mode","stop")
else:
self.Control("start","dynamic")
def StartServer(self):
HttpdStart(self.global_session, self) #self gives the instances of this controller
def StopServer(self):
HttpdStop(self.global_session)
#===============================================================================
#
#===============================================================================
def writeMoodlamp(self):
log("",self)
self.setStatusBarInfo("writeMoodlamp")
# Moodlamp, set threshold to 0
data ="set threshold 0\n"
if config.plugins.enigmalight.moodlamp_mode.getValue() == str(1):
###############
# Static color
###############
color = self.getColor()
data +="set mode 1\n"
data +="set static_color "+ str(color) +"\n"
elif config.plugins.enigmalight.moodlamp_mode.getValue() == str(3):
###############
# Rgb test
###############
data +="set mode 3\n"
elif config.plugins.enigmalight.moodlamp_mode.getValue() == str(4):
###############
# Color fader
###############
#set brightness
data +="set mode 4\n"
data +="set moodlamp_brightness "+ str(config.plugins.enigmalight.moodlamp_fader_brightness.getValue()) +"\n"
elif config.plugins.enigmalight.moodlamp_mode.getValue() == str(5):
###############
# Rainbow
###############
#set brightness
data +="set mode 5\n"
self.sockClass.setCommand(data)
return 1
#===============================================================================
#
#===============================================================================
def writeDynamic(self):
log("",self)
self.setStatusBarInfo(_("writeDynamic"))
data ="set threshold " + str(config.plugins.enigmalight.threshold.getValue()) + "\n"
data +="set mode 2\n"
self.sockClass.setCommand(data)
#===============================================================================
#
#===============================================================================
def writeServer(self):
log("",self)
self.setStatusBarInfo(_("writeServer"))
data ="set threshold 0\n"
data +="set mode 0\n"
self.sockClass.setCommand(data)
#===============================================================================
#
#===============================================================================
def writeAdjust(self):
log("",self)
self.setStatusBarInfo(_("writeAdjust"))
data =""
#only send it to local client
if config.plugins.enigmalight.network_onoff.value == False:
if config.plugins.enigmalight.use_live_adjust.getValue() == "true":
data +="set adjust "+ str(str(config.plugins.enigmalight.adjustr.getValue()) + " " + str(config.plugins.enigmalight.adjustg.getValue()) + " " + str(config.plugins.enigmalight.adjustb.getValue()) + " " + str(config.plugins.enigmalight.use_live_adjust.getValue())) + "\n"
else:
data +="set adjust "+ str(0) + " " + str(0) + " " + str(0) + " false " + "\n"
return data
#===============================================================================
# Change the current selected value
#===============================================================================
def changeValue(self, currentoption):
log("",self)
color = None
value = str(currentoption.getValue())
text = str(currentoption.getText())
self.setStatusBarInfo(_("changeValue"))
try:
if self.lightsEnabled == True:
if currentoption == config.plugins.enigmalight.mode:
if text == _("Moodlamp"):
if self.current_mode == "2" or self.current_mode != None:
self.writeMoodlamp()
elif text == _("Dynamic") and self.current_mode != "2":
self.writeDynamic()
elif currentoption == config.plugins.enigmalight.moodlamp_mode: #Change mode only when mode is set to moodlamp
if self.current_mode != "2" and self.current_mode != None:
self.writeMoodlamp()
#elif currentoption == config.plugins.enigmalight.presets: #send all setting
# self.sendAll()
elif currentoption == config.plugins.enigmalight.moodlamp_static_color_r or currentoption == config.plugins.enigmalight.moodlamp_static_color_g or currentoption == config.plugins.enigmalight.moodlamp_static_color_b or currentoption == config.plugins.enigmalight.moodlamp_fader_brightness or currentoption == config.plugins.enigmalight.adjustr or currentoption == config.plugins.enigmalight.adjustg or currentoption == config.plugins.enigmalight.adjustb or currentoption == config.plugins.enigmalight.use_live_adjust:
data = self.writeAdjust()
#data +="set color_sequence " + str(config.plugins.enigmalight.color_sequence.getValue()) + "\n"
if self.current_mode != "2" and self.current_mode != None:
color = self.getColor()
data +="set static_color " + str(color) + "\n"
data +="set moodlamp_brightness" + str(config.plugins.enigmalight.moodlamp_fader_brightness.getValue()) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.saturation:
data ="set saturation "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.saturationmin:
data ="set saturationmin "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.saturationmax:
data ="set saturationmax "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.value:
data ="set value "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.valuemin:
data ="set valuemin "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.valuemax:
data ="set valuemax "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.speed:
data ="set speed "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.delay:
data ="set delay "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.autospeed:
data ="set autospeed "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.interval:
data ="set interval "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.gamma:
data ="set gamma "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.m_3dmode:
data ="set 3dmode "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.interpolation:
data ="set saturation "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.blackbar_h:
data ="set blackbar_h "+ str(value) + "\n"
data +="set blackbar_f "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.blackbar_v:
data ="set blackbar_v "+ str(value) + "\n"
data +="set blackbar_f "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.blackbar_f:
data ="set blackbar_f "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.threshold:
data ="set threshold "+ str(value) + "\n"
self.sockClass.setCommand(data)
elif currentoption == config.plugins.enigmalight.cluster:
data ="set cluster "+ str(value) + "\n"
self.sockClass.setCommand(data)
except:
from traceback import format_exc
log("",self,"Error: "+format_exc())
try:
open(getCrashFilePath(),"w").write(format_exc())
if config.plugins.enigmalight.message_error_onoff.value:
showError(self.session, (format_exc()), "E")
except:
pass
def writeSettings(self):
log("",self)
self.setStatusBarInfo(_("Write settings."))
##################################
# Write adjust settings
##################################
data = self.writeAdjust()
##################################
# Write some values
##################################
#data +="set color_sequence "+ str(config.plugins.enigmalight.color_sequence.getValue()) + "\n"
data +="set saturation "+ str(config.plugins.enigmalight.saturation.getValue()) + "\n"
data +="set saturationmin "+ str(config.plugins.enigmalight.saturationmin.getValue()) + "\n"
data +="set saturationmax "+ str(config.plugins.enigmalight.saturationmax.getValue()) + "\n"
data +="set value "+ str(config.plugins.enigmalight.value.getValue()) + "\n"
data +="set valuemin "+ str(config.plugins.enigmalight.valuemin.getValue()) + "\n"
data +="set valuemax "+ str(config.plugins.enigmalight.valuemax.getValue()) + "\n"
data +="set speed "+ str(config.plugins.enigmalight.speed.getValue()) + "\n"
data +="set autospeed "+ str(config.plugins.enigmalight.autospeed.getValue()) + "\n"
data +="set gamma "+ str(config.plugins.enigmalight.gamma.getValue()) + "\n"
#data +="set interpolation "+ str(config.plugins.enigmalight.interpolation.getValue()) + "\n"
data +="set threshold "+ str(config.plugins.enigmalight.threshold.getValue()) + "\n"
data +="set interval "+ str(config.plugins.enigmalight.interval.getValue()) + "\n"
data +="set blackbar_h "+ str(config.plugins.enigmalight.blackbar_h.getValue()) + "\n"
data +="set blackbar_v "+ str(config.plugins.enigmalight.blackbar_v.getValue()) + "\n"
data +="set blackbar_f "+ str(config.plugins.enigmalight.blackbar_f.getValue()) + "\n"
data +="set 3dmode "+ str(config.plugins.enigmalight.m_3dmode.getValue()) + "\n"
data +="set cluster "+ str(config.plugins.enigmalight.cluster.getValue()) + "\n"
data +="set delay "+ str(config.plugins.enigmalight.delay.getValue()) + "\n"
self.sockClass.setCommand(data)
#===============================================================================
# Send all values
#===============================================================================
def sendAll(self,sendValues=False):
log("",self)
if sendValues == True or self.current_mode != "99" and self.current_mode != "off": #only send values if grabber or moodlamp is running
if self.sockClass.ping():
self.setStatusBarInfo(_("Set all values.."))
##################################
# Set mode, color etc...
##################################
if self.lightsEnabled and self.current_mode != "0":
if config.plugins.enigmalight.mode.getValue() == str(1):
###############
# Moodlamp
###############
self.writeMoodlamp()
elif config.plugins.enigmalight.mode.getValue() == str(2):
###############
# Dynamic
###############
self.writeDynamic()
#===============================================================================
#
#===============================================================================
def checkMode(self):
log("",self)
return str(config.plugins.enigmalight.mode.value)
#===============================================================================
#
#===============================================================================
def getColor(self):
log("",self)
color = rgbToHex(config.plugins.enigmalight.moodlamp_static_color_r.getValue(),config.plugins.enigmalight.moodlamp_static_color_g.getValue(),config.plugins.enigmalight.moodlamp_static_color_b.getValue())
return color
#===============================================================================
#
#
#
# Webremote handling, get values and change values
#
#
#
#===============================================================================
def handleWebRemote(self, option, value):
#
# Color tuning
#
if option == "brightness":
data ="set value "+ str(value) + "\n"
config.plugins.enigmalight.value.setValue(value)
config.plugins.enigmalight.value.save()
elif option == "brightnessmin":
data ="set valuemin "+ str(value) + "\n"
config.plugins.enigmalight.valuemin.setValue(value)
config.plugins.enigmalight.valuemin.save()
elif option == "brightnessmax":
data ="set valuemax "+ str(value) + "\n"
config.plugins.enigmalight.valuemax.setValue(value)
config.plugins.enigmalight.valuemax.save()
elif option == "saturation":
data ="set saturation "+ str(value) + "\n"
config.plugins.enigmalight.saturation.setValue(value)
config.plugins.enigmalight.saturation.save()
elif option == "saturationmax":
data ="set saturationmax "+ str(value) + "\n"
config.plugins.enigmalight.saturationmax.setValue(value)
config.plugins.enigmalight.saturationmax.save()
elif option == "saturationmin":
data ="set saturationmin "+ str(value) + "\n"
config.plugins.enigmalight.saturationmin.setValue(value)
config.plugins.enigmalight.saturationmin.save()
elif option == "speed":
data ="set speed "+ str(value) + "\n"
config.plugins.enigmalight.speed.setValue(value)
config.plugins.enigmalight.speed.save()
elif option == "gamma":
data ="set gamma "+ str(value) + "\n"
config.plugins.enigmalight.gamma.setValue(value)
config.plugins.enigmalight.gamma.save()
self.sockClass.setCommand(data)
def getOptionValue(self, option):
ret = ""
#
# Control / Mode
#
if option == "lights_onoff":
if self.lightsEnabled:
ret = "on"
else:
ret = "off"
elif option == "mode":
ret = str(self.current_mode)
#
# Color tuning
#
elif option == "saturation":
ret = str(config.plugins.enigmalight.saturation.getValue())
elif option == "saturationmin":
ret = str(config.plugins.enigmalight.saturationmin.getValue())
elif option == "saturationmax":
ret = str(config.plugins.enigmalight.saturationmax.getValue())
elif option == "brightness":
ret = str(config.plugins.enigmalight.value.getValue())
elif option == "brightnessmin":
ret = str(config.plugins.enigmalight.valuemin.getValue())
elif option == "brightnessmax":
ret = str(config.plugins.enigmalight.valuemax.getValue())
elif option == "speed":
ret = str(config.plugins.enigmalight.speed.getValue())
elif option == "gamma":
ret = str(config.plugins.enigmalight.gamma.getValue())
else:
ret = _("Unknown option")
return ret
|
import time
from sqlalchemy import func
from app import db
from app import work_log
from app.main.dbmodel.logdb import t_host_cpu
class MonitorTask(object):
"""docstring for MonitorTask"""
def __init__(self):
super(MonitorTask, self).__init__()
def HostUptimeData(self, data):
ip = data.get("ip")
# current_time = time.strftime('%Y-%m-%d %H:%M:%S')
old_time = time.strftime("%Y-%m-%d_%X", time.localtime(time.time() - 3600))
try:
abc = (
db.session.query(
func.date_format(t_host_cpu.ctime, "%H:%i:%s").label("ctime"),
t_host_cpu.ld_1,
t_host_cpu.ld_2,
t_host_cpu.ld_3,
)
.filter(t_host_cpu.ip == ip, t_host_cpu.ctime > old_time)
.all()
)
except Exception as e:
work_log.error('select db HostUptimeData error')
work_log.error(str(e))
return {"recode": 9, "redata": str(e)}
x_name = []
ld_1 = []
ld_5 = []
ld_15 = []
for i in abc:
x_name.append(str(i.ctime))
ld_1.append(format(i.ld_1, ".2f"))
ld_5.append(format(i.ld_2, ".2f"))
ld_15.append(format(i.ld_3, ".2f"))
value = {
"x_name": x_name,
"y_data": {"ld_1": ld_1, "ld_5": ld_5, "ld_15": ld_15},
}
return {"recode": 0, "redata": value}
def HostCpuData(self, data):
ip = data.get("ip")
# current_time = time.strftime('%Y-%m-%d %H:%M:%S')
old_time = time.strftime("%Y-%m-%d_%X", time.localtime(time.time() - 3600))
try:
abc = (
db.session.query(
func.date_format(t_host_cpu.ctime, "%H:%i:%s").label("ctime"),
t_host_cpu.cpu,
)
.filter(t_host_cpu.ip == ip, t_host_cpu.ctime > old_time)
.all()
)
except Exception as e:
work_log.error('select db HostCpuData error')
work_log.error(str(e))
return {"recode": 9, "redata": str(e)}
x_name = []
cpu = []
work_log.debug(str(abc))
for i in abc:
x_name.append(str(i.ctime))
cpu.append(format(i.cpu, ".2f"))
value = {"x_name": x_name, "y_data": {"cpu": cpu}}
return {"recode": 0, "redata": value}
def run(self, data):
types = data.get("types")
unit = data.get("unit")
if not types == "host":
return {"recode": 1, "redata": "format error"}
if unit == "uptime":
return self.HostUptimeData(data)
if unit == "cpu":
return self.HostCpuData(data)
return {"recode": 1, "redata": "format error"}
|
#!/usr/bin/env python3
from settings import *
class Rom:
@staticmethod
def atualizar():
os.chdir(home + "/"+ rom_custom)
os.system("repo sync -c -f --force-sync -j32 --no-tags --no-clone-bundle --force-broken")
@staticmethod
def limpar():
os.chdir(home + rom_custom + outdir)
pegunta = int(input("1- Limpeza completa \n2- parcial\n"))
if pegunta == 1:
os.system("make clean")
tipo = "Completo"
else:
os.system("rm -rf " + home + outdir + "combinedroot")
os.system("rm -rf " + home + outdir + "data")
os.system("rm -rf " + home + outdir + "recovery")
os.system("rm -rf " + home + outdir + "root")
os.system("rm -rf " + home + outdir + "system")
os.system("rm -rf " + home + outdir + "utilities")
os.system("rm -rf " + home + outdir + "boot")
os.system("rm -rf " + home + outdir + "combined")
os.system("rm -rf " + home + outdir + "kernel")
os.system("rm -rf " + home + outdir + "ramdisk")
os.system("rm -rf " + home + outdir + "system")
os.system("rm -rf " + home + outdir + "obj/ETC/system_build_prop_intermediates")
os.system("rm -rf " + home + outdir + "ota_temp/RECOVERY/RAMDISK")
os.system("rm -rf " + home + outdir + "$outdir/*.*")
tipo = "Parcial"
print("Foi limpo com sucesso do tipo " + tipo)
|
# Type examples
#my_cat = "garfield is 2"
#print(my_cat)
#print(type(my_cat))
#print(type(12))
#print(type(3.14))
"""
Thinking of a grocery list, it might be:
- eggs
- milk
- bread
- bacon
- cheese
"""
# my_blank_list = []
# basic_list = ["item1", "item2"]
groceries = ["eggs", 3.14, "milk", "bread", "bacon", "cheese"]
# index 0 1 2 3 4 5
print(groceries)
print(type(groceries))
print(groceries[3])
print("I need to get " + groceries[0])
print(str(groceries[1]) + " is something I need to get")
|
import os
import tarfile
import tempfile
from copy import copy
from pkg_resources import safe_version, to_filename
from pdm.builders.base import Builder
from pdm.context import context
def normalize_file_permissions(st_mode):
"""
Normalizes the permission bits in the st_mode field from stat to 644/755
Popular VCSs only track whether a file is executable or not. The exact
permissions can vary on systems with different umasks. Normalising
to 644 (non executable) or 755 (executable) makes builds more reproducible.
"""
# Set 644 permissions, leaving higher bits of st_mode unchanged
new_mode = (st_mode | 0o644) & ~0o133
if st_mode & 0o100:
new_mode |= 0o111 # Executable: 644 -> 755
return new_mode
def clean_tarinfo(tar_info):
"""
Clean metadata from a TarInfo object to make it more reproducible.
- Set uid & gid to 0
- Set uname and gname to ""
- Normalise permissions to 644 or 755
- Set mtime if not None
"""
ti = copy(tar_info)
ti.uid = 0
ti.gid = 0
ti.uname = ""
ti.gname = ""
ti.mode = normalize_file_permissions(ti.mode)
return ti
class SdistBuilder(Builder):
"""This build should be performed for PDM project only."""
def build(self, build_dir: str, **kwargs):
if not os.path.exists(build_dir):
os.makedirs(build_dir, exist_ok=True)
context.io.echo("- Building {}...".format(context.io.cyan("sdist")))
version = to_filename(safe_version(self.meta.version))
target = os.path.join(
build_dir, "{}-{}.tar.gz".format(self.meta.project_name, version)
)
tar = tarfile.open(target, mode="w:gz", format=tarfile.PAX_FORMAT)
try:
tar_dir = "{}-{}".format(self.meta.project_name, version)
files_to_add = self.find_files_to_add(True)
for relpath in files_to_add:
tar.add(
relpath,
arcname=os.path.join(tar_dir, str(relpath)),
recursive=False,
)
context.io.echo(f" - Adding: {relpath}", verbosity=context.io.DETAIL)
fd, temp_name = tempfile.mkstemp(prefix="pkg-info")
pkg_info = self.format_pkginfo(False).encode("utf-8")
with open(fd, "wb") as f:
f.write(pkg_info)
tar.add(
temp_name, arcname=os.path.join(tar_dir, "PKG-INFO"), recursive=False
)
context.io.echo(" - Adding: PKG-INFO", verbosity=context.io.DETAIL)
finally:
tar.close()
context.io.echo("- Built {}".format(context.io.cyan(os.path.basename(target))))
return target
|
#!/usr/bin/env python3
# โ*โ coding:utf-8 โ*โ
import numpy as np
from scipy.linalg import eigh_tridiagonal
class Multitaper(object):
'''
A class to perform the spectral density estimation of an input signal based on the multitaper method
'''
def __init__(self, nn, nw=3.5, kk=7):
'''
Initialize the object.
nn: taper length
nw: half of resolution bandwidth
kk: number of tapers, in principle less than 2*nw
'''
self.kk = kk
self.nw = nw
self.nn = nn
self.dpss = self._regen_dpss()
def __str__(self):
return "No. of tapers: {}, taper length: {}, half res. BW: {}".format(self.kk, self.nn, self.nw)
def _regen_dpss(self):
'''
The generated tapers in 2D array [[taper_0], [taper_1], ...] are ordered decreasingly by their respective eigenvalues.
'''
ww = self.nw / self.nn
diag_main = ((self.nn - 1) / 2 - np.arange(self.nn))**2 * np.cos(2 * np.pi * ww)
diag_off = np.arange(1, self.nn) * np.arange(self.nn - 1, 0, -1) / 2
vecs = eigh_tridiagonal(diag_main, diag_off, select='i', select_range=(self.nn - self.kk, self.nn - 1))[1]
# polarity follows Slepian convention
return (vecs * np.where(vecs[0, :] > 0, 1, -1)).T[::-1]
def estimate(self, signal, axis=-1):
'''
Estimate the power spectral density of the input signal.
signal: n-dimensional array of real or complex values
axis: axis along which to apply the Slepian windows. Default is the last one.
'''
# conversion to positive-only index
axis_p = (axis + signal.ndim) % signal.ndim
sig_exp_shape = list(signal.shape[:axis]) + [1] + list(signal.shape[axis:])
tap_exp_shape = [1] * axis_p + list(self.dpss.shape) + [1] * (signal.ndim-1-axis_p)
signal_tapered = signal.reshape(sig_exp_shape) * self.dpss.reshape(tap_exp_shape)
return np.fft.fftshift(np.mean(np.absolute(np.fft.fft(signal_tapered, axis=axis_p+1))**2, axis=axis_p), axes=axis_p)
# ------------------------
if __name__ == '__main__':
mymtm = Multitaper(2048)
sig = np.vectorize(complex)(np.random.rand(2048), np.random.rand(2048))
print(mymtm.estimate(sig))
mymtm = Multitaper(256)
sig = np.reshape(sig, (8, 256))
print(mymtm.estimate(sig))
|
import musekafka
def test_version_exists():
"""musekafka.__version__ is set."""
assert musekafka.__version__ is not None
|
import abc
import copy
import inspect
import json
import os
import six
from fireworks.core.firework import FireTaskBase
from fireworks.core.firework import FWAction
from fireworks.core.firework import Workflow
from fireworks.core.firework import Firework
from fireworks.core.launchpad import LaunchPad
from fireworks.utilities.fw_utilities import explicit_serialize
from fireworks.utilities.fw_serializers import serialize_fw
from monty.json import MontyDecoder #, MontyEncoder
from monty.json import MSONable
from monty.serialization import loadfn
from monty.subprocess import Command
from abiflows.core.mastermind_abc import ControlProcedure, ControlledItemType
from abiflows.core.mastermind_abc import ControllerNote
from abiflows.core.mastermind_abc import Cleaner
from abiflows.fireworks.utils.fw_utils import set_short_single_core_to_spec, get_short_single_core_spec
RESTART_FROM_SCRATCH = ControllerNote.RESTART_FROM_SCRATCH
RESET_RESTART = ControllerNote.RESET_RESTART
SIMPLE_RESTART = ControllerNote.SIMPLE_RESTART
class SRCTaskMixin(object):
src_type = ''
@serialize_fw
def to_dict(self):
d = {}
for arg in inspect.getargspec(self.__init__).args:
if arg != "self":
val = self.__getattribute__(arg)
if hasattr(val, "as_dict"):
val = val.as_dict()
elif isinstance(val, (tuple, list)):
val = [v.as_dict() if hasattr(v, "as_dict") else v for v in val]
d[arg] = val
return d
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
kwargs = {k: dec.process_decoded(v) for k, v in d.items()
if k in inspect.getargspec(cls.__init__).args}
return cls(**kwargs)
def setup_directories(self, fw_spec, create_dirs=False):
if self.src_type == 'setup':
self.src_root_dir = fw_spec.get('_launch_dir', os.getcwd())
elif 'src_directories' in fw_spec:
self.src_root_dir = fw_spec['src_directories']['src_root_dir']
# elif self.src_type in ['run', 'control']:
# self.src_root_dir = os.path.split(os.path.abspath(fw_spec['_launch_dir']))[0]
else:
raise ValueError('Cannot setup directories for "src_type" = "{}"'.format(self.src_type))
self.setup_dir = os.path.join(self.src_root_dir, 'setup')
self.run_dir = os.path.join(self.src_root_dir, 'run')
self.control_dir = os.path.join(self.src_root_dir, 'control')
# if 'src_directories' in fw_spec:
# if (self.src_root_dir != fw_spec['src_directories']['src_root_dir'] or
# self.setup_dir != fw_spec['src_directories']['setup_dir'] or
# self.run_dir != fw_spec['src_directories']['run_dir'] or
# self.control_dir != fw_spec['src_directories']['control_dir']):
# raise ValueError('src_directories in fw_spec do not match actual SRC directories ...')
if create_dirs:
os.makedirs(self.setup_dir)
os.makedirs(self.run_dir)
os.makedirs(self.control_dir)
@property
def src_directories(self):
return {'src_root_dir': self.src_root_dir,
'setup_dir': self.setup_dir,
'run_dir': self.run_dir,
'control_dir': self.control_dir
}
@explicit_serialize
class SetupTask(SRCTaskMixin, FireTaskBase):
src_type = 'setup'
task_type = 'unknown'
RUN_PARAMETERS = ['_queueadapter', 'mpi_ncpus', 'qtk_queueadapter']
def __init__(self, deps=None, restart_info=None, task_type=None):
# TODO: if at some point, this is not enough (as for the ddk, or for the structure, or for anything else,
# we could thing of an object ?
# deps are transformed to be a list or a dict of lists
if isinstance(deps, dict):
deps = dict(deps)
for k, v in deps.items():
if not isinstance(v, (list, tuple)):
deps[k] = [v]
elif deps and not isinstance(deps, (list, tuple)):
deps = [deps]
self.deps = deps
self.restart_info = restart_info
if task_type is not None:
self.task_type = task_type
def set_restart_info(self, restart_info=None):
self.restart_info = restart_info
def run_task(self, fw_spec):
# Set up and create the directory tree of the Setup/Run/Control trio,
self.setup_directories(fw_spec=fw_spec, create_dirs=True)
# Forward directory information to run and control fireworks #HACK in _setup_run_and_control_dirs
self._setup_run_and_control_dirs_and_fworker(fw_spec=fw_spec)
# Move to the setup directory
os.chdir(self.setup_dir)
# Make the file transfers from another worker if needed
self.file_transfers(fw_spec=fw_spec)
# Get back information from the previous runs
self.fetch_previous_info(fw_spec=fw_spec)
# Setup the parameters for the run (number of cpus, time, memory, openmp, ...)
params = list(self.RUN_PARAMETERS)
if 'src_modified_objects' in fw_spec:
for target, modified_object in fw_spec['src_modified_objects'].items():
params.remove(target)
run_parameters = self._setup_run_parameters(fw_spec=fw_spec, parameters=params)
# Prepare run (make links to output files from previous tasks, write input files, create the directory
# tree of the program, ...)
self.prepare_run(fw_spec=fw_spec)
# Update the spec of the Run Firework with the directory tree, the run_parameters obtained from
# setup_run_parameters and the modified_objects transferred directly from the Control Firework
update_spec = {'src_directories': self.src_directories}
update_spec.update(run_parameters)
if 'src_modified_objects' in fw_spec:
update_spec.update(fw_spec['src_modified_objects'])
if 'previous_fws' in fw_spec:
update_spec['previous_fws'] = fw_spec['previous_fws']
return FWAction(update_spec=update_spec)
def _setup_run_parameters(self, fw_spec, parameters):
qadapter_spec, qtk_queueadapter = get_short_single_core_spec(return_qtk=True)
params = {'_queueadapter': qadapter_spec, 'mpi_ncpus': 1, 'qtk_queueadapter': qtk_queueadapter}
setup_params = self.setup_run_parameters(fw_spec=fw_spec)
params.update(setup_params)
if 'initial_parameters' in fw_spec and fw_spec['SRC_task_index'].index == 1:
qtk_queueadapter = params['qtk_queueadapter']
initial_parameters = fw_spec['initial_parameters']
if 'run_timelimit' in initial_parameters:
qtk_queueadapter.set_timelimit(timelimit=initial_parameters['run_timelimit'])
if 'run_mem_per_proc' in initial_parameters:
qtk_queueadapter.set_mem_per_proc(mem_mb=initial_parameters['run_mem_per_proc'])
if 'run_mpi_ncpus' in initial_parameters:
qtk_queueadapter.set_mpi_procs(mpi_procs=initial_parameters['run_mpi_ncpus'])
qadapter_spec = qtk_queueadapter.get_subs_dict()
params.update({'qtk_queueadapter': qtk_queueadapter, '_queueadapter': qadapter_spec})
return {param: params[param] for param in parameters}
def setup_run_parameters(self, fw_spec):
return {}
def file_transfers(self, fw_spec):
pass
def fetch_previous_info(self, fw_spec):
pass
def prepare_run(self, fw_spec):
pass
def _setup_run_and_control_dirs_and_fworker(self, fw_spec):
"""
This method is used to update the spec of the run and control fireworks with the src_directories as well as
set the _launch_dir of the run and control fireworks to be the run_dir and control_dir respectively.
WARNING: This is a bit hackish! Do not change this unless you know exactly what you are doing!
:param fw_spec: Firework's spec
"""
# Get the launchpad
if '_add_launchpad_and_fw_id' in fw_spec:
lp = self.launchpad
setup_fw_id = self.fw_id
else:
try:
fw_dict = loadfn('FW.json')
except IOError:
try:
fw_dict = loadfn('FW.yaml')
except IOError:
raise RuntimeError("Launchpad/fw_id not present in spec and No FW.json nor FW.yaml file present: "
"impossible to determine fw_id")
lp = LaunchPad.auto_load()
setup_fw_id = fw_dict['fw_id']
if '_add_fworker' in fw_spec:
fworker = self.fworker
else:
raise ValueError('Should have access to the fworker in SetupTask ...')
this_lzy_wf = lp.get_wf_by_fw_id_lzyfw(setup_fw_id)
# Check that SetupTask and RunTask have only one child firework
child_fw_ids = this_lzy_wf.links[setup_fw_id]
if len(child_fw_ids) != 1:
raise ValueError('SetupTask\'s Firework should have exactly one child firework')
run_fw_id = child_fw_ids[0]
child_run_fw_ids = this_lzy_wf.links[run_fw_id]
if len(child_run_fw_ids) != 1:
raise ValueError('RunTask\'s Firework should have exactly one child firework')
control_fw_id = child_run_fw_ids[0]
spec_update = {'_launch_dir': self.run_dir,
'src_directories': self.src_directories,
'_fworker': fworker.name}
lp.update_spec(fw_ids=[run_fw_id],
spec_document=spec_update)
spec_update['_launch_dir'] = self.control_dir
lp.update_spec(fw_ids=[control_fw_id],
spec_document=spec_update)
def additional_task_info(self):
return {}
class RunTask(SRCTaskMixin, FireTaskBase):
src_type = 'run'
task_type = 'unknown'
def __init__(self, control_procedure, task_type=None):
self.set_control_procedure(control_procedure=control_procedure)
if task_type is not None:
self.task_type = task_type
def set_control_procedure(self, control_procedure):
self.control_procedure = control_procedure
#TODO: check something here with the monitors ?
def run_task(self, fw_spec):
self.setup_directories(fw_spec=fw_spec, create_dirs=False)
launch_dir = os.getcwd()
# Move to the run directory
os.chdir(self.run_dir)
f = open(os.path.join(self.run_dir, 'fw_info.txt'), 'a')
f.write('FW launch_directory :\n{}'.format(launch_dir))
f.close()
# The Run and Control tasks have to run on the same worker
#TODO: do something here with the monitoring controllers ...
# should stop the RunTask but the correction should be applied in control !
self.config(fw_spec=fw_spec)
self.run(fw_spec=fw_spec)
update_spec = self.postrun(fw_spec=fw_spec)
if update_spec is None:
update_spec = {}
if 'previous_fws' in fw_spec:
update_spec['previous_fws'] = fw_spec['previous_fws']
#TODO: the directory is passed thanks to _pass_job_info. Should we pass anything else ?
return FWAction(stored_data=None, exit=False, update_spec=None, mod_spec=None,
additions=None, detours=None,
defuse_children=False)
def config(self, fw_spec):
pass
@abc.abstractmethod
def run(self, fw_spec):
pass
def postrun(self, fw_spec):
pass
def additional_task_info(self):
return {}
@explicit_serialize
class ScriptRunTask(RunTask):
task_type = 'script'
def __init__(self, script_str, control_procedure):
RunTask.__init__(self, control_procedure=control_procedure)
self.script_str = script_str
def run(self, fw_spec):
f = open('script_run.log', 'w')
cmds_strs = self.script_str.split(';')
for cmd_str in cmds_strs:
cmd = Command(cmd_str)
cmd = cmd.run()
if cmd.retcode != 0:
raise ValueError('Command "{}" returned exit code {:d}'.format(cmd_str, cmd.retcode))
if cmd.output is not None:
print(cmd.output)
f.write('{}\n'.format(str(cmd)))
f.close()
@serialize_fw
def to_dict(self):
return {'script_str': self.script_str,
'control_procedure': self.control_procedure.as_dict()}
@classmethod
def from_dict(cls, d):
control_procedure = ControlProcedure.from_dict(d['control_procedure'])
return cls(script_str=d['script_str'], control_procedure=control_procedure)
@explicit_serialize
class ControlTask(SRCTaskMixin, FireTaskBase):
src_type = 'control'
def __init__(self, control_procedure, manager=None, max_restarts=10, src_cleaning=None):
self.control_procedure = control_procedure
self.manager = manager
self.max_restarts = max_restarts
self.src_cleaning = src_cleaning
def run_task(self, fw_spec):
self.setup_directories(fw_spec=fw_spec, create_dirs=False)
launch_dir = os.getcwd()
# Move to the control directory
os.chdir(self.control_dir)
f = open(os.path.join(self.control_dir, 'fw_info.txt'), 'a')
f.write('FW launch_directory :\n{}'.format(launch_dir))
f.close()
# Get the task index
task_index = SRCTaskIndex.from_any(fw_spec['SRC_task_index'])
# Get the setup and run fireworks
setup_and_run_fws = self.get_setup_and_run_fw(fw_spec=fw_spec)
self.setup_fw = setup_and_run_fws['setup_fw']
self.run_fw = setup_and_run_fws['run_fw']
# Specify the type of the task that is controlled:
# - aborted : the task has been aborted due to a monitoring controller during the Run Task, the FW state
# is COMPLETED
# - completed : the task has completed, the FW state is COMPLETE
# - failed : the task has failed, the FW state is FIZZLED
if self.run_fw.state == 'COMPLETED':
if 'src_run_task_aborted' in fw_spec:
self.control_procedure.set_controlled_item_type(ControlledItemType.task_aborted())
else:
self.control_procedure.set_controlled_item_type(ControlledItemType.task_completed())
elif self.run_fw.state == 'FIZZLED':
self.control_procedure.set_controlled_item_type(ControlledItemType.task_failed())
else:
raise RuntimeError('The state of the Run Firework is "{}" '
'while it should be COMPLETED or FIZZLED'.format(self.run_fw.state))
# Get the keyword_arguments to be passed to the process method of the control_procedure
#TODO: how to do that kind of automatically ??
# each key should have : how to get it from the run_fw/(setup_fw)
# how to force/apply it to the next SRC (e.g. how do we say to setup that)
# Actually, the object, can come from : the setup_fw or from the run_fw (from the setup_spec, from the run_spec,
# from the setup_task or the run_task (or in general one of the tasks ...
# even multiple tasks is not yet supported ... should it be ? or should we stay with only one task allways ?)
# If it is modified, it should update the corresponding bit (setup_spec and/or run_spec and/or
# setup_task and/or run_task)
initial_objects_info = self.get_initial_objects_info(setup_fw=self.setup_fw, run_fw=self.run_fw,
src_directories=self.src_directories)
qerr_filepath = os.path.join(self.run_fw.launches[-1].launch_dir, 'queue.qerr')
qout_filepath = os.path.join(self.run_fw.launches[-1].launch_dir, 'queue.qout')
initial_objects_info.update({'queue_adapter': {'object': self.run_fw.spec['qtk_queueadapter'],
'updates': [{'target': 'fw_spec',
'key': 'qtk_queueadapter'},
{'target': 'fw_spec',
'key': '_queueadapter',
'mod': 'get_subs_dict'}]},
'qerr_filepath': {'object': qerr_filepath},
'qout_filepath': {'object': qout_filepath}})
initial_objects = {name: obj_info['object'] for name, obj_info in initial_objects_info.items()}
control_report = self.control_procedure.process(**initial_objects)
if control_report.unrecoverable:
f = open(os.path.join(self.control_dir, 'control_report.json'), 'w')
json.dump(control_report.as_dict(), f)
f.close()
#TODO: apply the cleaning here
if self.src_cleaning is not None:
pass
raise ValueError('Errors are unrecoverable. Control report written in "control_report.json"')
# If everything is ok, update the spec of the children
if control_report.finalized:
stored_data = {'control_report': control_report, 'finalized': True}
update_spec = {}
mod_spec = []
run_task = self.run_fw.tasks[-1]
setup_task = self.setup_fw.tasks[-1]
task_type = run_task.task_type
#TODO: should we also add here the cluster in which the calculation was performed so that if the next
# SRC trio starts on another cluster, it should fetch the needed files from the run_dir of this cluster
task_info = {'dir': self.run_dir}
task_info.update(run_task.additional_task_info())
task_info.update(setup_task.additional_task_info())
mod_spec.append({'_push': {'previous_fws->'+task_type: task_info}})
if self.src_cleaning is not None:
pass
return FWAction(stored_data=stored_data, exit=False, update_spec=update_spec, mod_spec=mod_spec,
additions=None, detours=None, defuse_children=False)
# Check the maximum number of restarts
if task_index.index == self.max_restarts:
# TODO: decide when to apply cleaning here ?
if self.src_cleaning is not None:
pass
raise ValueError('Maximum number of restarts ({:d}) reached'.format(self.max_restarts))
# Increase the task_index
task_index.increase_index()
# Apply the actions on the objects to get the modified objects (to be passed to SetupTask)
# modified_objects = {}
# for target, action in control_report.actions.items():
# # Special case right now for the queue adapter ...
# if target == 'queue_adapter':
# qtk_qadapter = initial_objects[target]
# action.apply(qtk_qadapter)
# modified_objects['qtk_queueadapter'] = qtk_qadapter
# modified_objects['_queueadapter'] = qtk_qadapter.get_subs_dict()
# else:
# modified_objects[target] = action.apply(initial_objects[target])
# New spec
# remove "_tasks" which is present in spec for recent fireworks versions. Remove it here to avoid
# problems with deepcopy.
new_spec = dict(self.run_fw.spec)
new_spec.pop("_tasks", None)
new_spec = copy.deepcopy(new_spec)
# New tasks
setup_task = self.setup_fw.tasks[-1]
run_task = self.run_fw.tasks[-1]
control_task = self
if 'src_modified_objects' in fw_spec:
modified_objects = fw_spec['src_modified_objects']
else:
modified_objects = {}
setup_spec_update = {}
run_spec_update = {}
for target, action in control_report.actions.items():
target_object = initial_objects[target]
action.apply(target_object)
if target not in initial_objects_info:
raise ValueError('Object "{}" to be modified was not in the initial_objects'.format(target))
if 'updates' not in initial_objects_info[target]:
raise ValueError('Update information not present for object "{}"'.format(target))
for update in initial_objects_info[target]['updates']:
if update['target'] == 'fw_spec':
if 'mod' in update:
mod = getattr(target_object, update['mod'])()
new_spec[update['key']] = mod
modified_objects[update['key']] = mod
else:
new_spec[update['key']] = target_object
modified_objects[update['key']] = target_object
elif update['target'] == 'setup_fw_spec':
if 'mod' in update:
mod = getattr(target_object, update['mod'])()
setup_spec_update[update['key']] = mod
else:
setup_spec_update[update['key']] = target_object
elif update['target'] == 'run_fw_spec':
if 'mod' in update:
mod = getattr(target_object, update['mod'])()
run_spec_update[update['key']] = mod
else:
run_spec_update[update['key']] = target_object
elif update['target'] in ['setup_task', 'run_task']:
task = setup_task if update['target'] == 'setup_task' else run_task
attr = getattr(task, update['attribute'])
if 'mod' in update:
mod = getattr(target_object, update['mod'])()
attr = mod
else:
attr = target_object
elif 'setup_task' in update['target']:
sp = update['target'].split('.')
if len(sp) != 2:
raise ValueError('target is "{}" and contains more than 1 "."'.format(update['target']))
if sp[0] != 'setup_task':
raise ValueError('target does not start with "setup_task" ...')
task = setup_task
task_attribute = getattr(task, sp[1])
if 'attribute' in update:
attr = getattr(task_attribute, update['attribute'])
if 'mod' in update:
mod = getattr(target_object, update['mod'])()
attr = mod
else:
attr = target_object
elif 'setter' in update:
setter = getattr(task_attribute, update['setter'])
if 'mod' in update:
mod = getattr(target_object, update['mod'])()
setter(mod)
else:
setter(target_object)
else:
raise ValueError('Only changes to fw_spec, setup_task and run_task are allowed right now ...')
# Set the restart_info
setup_task.set_restart_info(control_report.restart_info)
# Pass the modified objects to the next SetupTask
new_spec['src_modified_objects'] = modified_objects
new_spec.pop('_launch_dir')
new_spec.pop('src_directories')
new_spec['previous_src'] = {'src_directories': self.src_directories}
if 'all_src_directories' in new_spec:
new_spec['all_src_directories'].append({'src_directories': self.src_directories})
else:
new_spec['all_src_directories'] = [{'src_directories': self.src_directories}]
# if '_queueadapter' in modified_objects:
# new_spec['_queueadapter'] = modified_objects['_queueadapter']
#TODO: what to do here ? Right now this should work, just transfer information from the run_fw to the
# next SRC group
if 'previous_fws' in fw_spec:
new_spec['previous_fws'] = fw_spec['previous_fws']
# Create the new SRC trio
# TODO: check initialization info, deps, ... previous_fws, ... src_previous_fws ? ...
new_SRC_fws = createSRCFireworks(setup_task=setup_task, run_task=run_task, control_task=control_task,
spec=new_spec, initialization_info=None, task_index=task_index,
run_spec_update=run_spec_update, setup_spec_update=setup_spec_update)
wf = Workflow(fireworks=new_SRC_fws['fws'], links_dict=new_SRC_fws['links_dict'])
return FWAction(stored_data={'control_report': control_report}, detours=[wf])
def get_setup_and_run_fw(self, fw_spec):
# Get the launchpad
if '_add_launchpad_and_fw_id' in fw_spec:
lp = self.launchpad
control_fw_id = self.fw_id
else:
try:
fw_dict = loadfn('FW.json')
except IOError:
try:
fw_dict = loadfn('FW.yaml')
except IOError:
raise RuntimeError("Launchpad/fw_id not present in spec and No FW.json nor FW.yaml file present: "
"impossible to determine fw_id")
lp = LaunchPad.auto_load()
control_fw_id = fw_dict['fw_id']
# Check that this ControlTask has only one parent firework
this_lzy_wf = lp.get_wf_by_fw_id_lzyfw(control_fw_id)
parents_fw_ids = this_lzy_wf.links.parent_links[control_fw_id]
if len(parents_fw_ids) != 1:
raise ValueError('ControlTask\'s Firework should have exactly one parent firework')
run_fw_id = parents_fw_ids[0]
# Get the Run Firework and its state
run_fw = lp.get_fw_by_id(fw_id=run_fw_id)
run_is_fizzled = '_fizzled_parents' in fw_spec
if run_is_fizzled and not run_fw.state == 'FIZZLED':
raise ValueError('ControlTask has "_fizzled_parents" key but parent Run firework is not fizzled ...')
run_is_completed = run_fw.state == 'COMPLETED'
if run_is_completed and run_is_fizzled:
raise ValueError('Run firework is FIZZLED and COMPLETED ...')
if (not run_is_completed) and (not run_is_fizzled):
raise ValueError('Run firework is neither FIZZLED nor COMPLETED ...')
# Get the Setup Firework
setup_job_info = run_fw.spec['_job_info'][-1]
setup_fw_id = setup_job_info['fw_id']
setup_fw = lp.get_fw_by_id(fw_id=setup_fw_id)
return {'setup_fw': setup_fw, 'run_fw': run_fw}
def get_initial_objects_info(self, setup_fw, run_fw, src_directories):
return {}
@classmethod
def from_controllers(cls, controllers, max_restarts=10):
cp = ControlProcedure(controllers=controllers)
return cls(control_procedure=cp, max_restarts=max_restarts)
@serialize_fw
def to_dict(self):
return {'control_procedure': self.control_procedure.as_dict(),
'manager': self.manager.as_dict() if self.manager is not None else None,
'max_restarts': self.max_restarts,
'src_cleaning': self.src_cleaning.as_dict() if self.src_cleaning is not None else None}
@classmethod
def from_dict(cls, d):
control_procedure = ControlProcedure.from_dict(d['control_procedure'])
dec = MontyDecoder()
if d['manager'] is None:
manager = None
else:
manager = dec.process_decoded(d['manager']),
if 'src_cleaning' in d:
src_cleaning = SRCCleaning.from_dict(d['src_cleaning']) if d['src_cleaning'] is not None else None
else:
src_cleaning = None
return cls(control_procedure=control_procedure, manager=manager, max_restarts=d['max_restarts'],
src_cleaning=src_cleaning)
class SRCCleanerOptions(MSONable):
WHEN_TO_CLEAN = ['EACH_STEP', 'LAST_STEP', 'EACH_STEP_EXCEPT_LAST']
CURRENT_SRC_STATES_ALLOWED = ['RECOVERABLE', 'UNRECOVERABLE', 'MAXRESTARTS', 'FINALIZED']
def __init__(self, when_to_clean, current_src_states_allowed, which_src_steps_to_clean):
self.when_to_clean = when_to_clean
self.current_src_states_allowed = current_src_states_allowed
self.which_src_steps_to_clean = which_src_steps_to_clean
@classmethod
def clean_all(cls):
return cls(when_to_clean='EACH_STEP', current_src_states_allowed=cls.CURRENT_SRC_STATES_ALLOWED,
which_src_steps_to_clean='all')
@classmethod
def clean_all_except_last(cls):
return cls(when_to_clean='EACH_STEP', current_src_states_allowed=cls.CURRENT_SRC_STATES_ALLOWED,
which_src_steps_to_clean='all_before_this_one')
@property
def when_to_clean(self):
return self._when_to_clean
@when_to_clean.setter
def when_to_clean(self, when_to_clean):
if when_to_clean not in self.WHEN_TO_CLEAN:
raise ValueError('Argument "when_to_clean" is "{}" while it should be one of the following : '
'{}'.format(when_to_clean,
', '.join(self.WHEN_TO_CLEAN)))
self._when_to_clean = when_to_clean
@property
def current_src_states_allowed(self):
return self._current_src_states_allowed
@current_src_states_allowed.setter
def current_src_states_allowed(self, current_src_states_allowed):
for current_src_state_allowed in current_src_states_allowed:
if current_src_state_allowed not in self.CURRENT_SRC_STATES_ALLOWED:
raise ValueError('One of the items in "current_src_states_allowed" is "{}" while it should be one of '
'the following : {}'.format(current_src_state_allowed,
', '.join(self.CURRENT_SRC_STATES_ALLOWED)))
self._current_src_states_allowed = current_src_states_allowed
@property
def which_src_steps_to_clean(self):
return self._which_src_steps_to_clean
@which_src_steps_to_clean.setter
def which_src_steps_to_clean(self, which_src_steps_to_clean):
if which_src_steps_to_clean in ['all', 'this_one', 'all_before_this_one', 'all_before_the_previous_one',
'the_one_before_this_one', 'the_one_before_the_previous_one']:
self._which_src_steps_to_clean = which_src_steps_to_clean
self._which_src_steps_to_clean_pattern = which_src_steps_to_clean
elif which_src_steps_to_clean[:7] == 'single_':
sp = which_src_steps_to_clean.split('_')
if len(sp) != 2:
raise ValueError('Argument "which_src_steps_to_clean" is "{}". It starts with "single_" but has more '
'than one underscore ... '
'Impossible to identify the step to clean.'.format(which_src_steps_to_clean))
try:
istep = int(sp[1])
except ValueError:
raise ValueError('Argument "which_src_steps_to_clean" is "{}". It starts with "single_" but the '
'remaining part is not an integer ... '
'Impossible to identify the step to clean.'.format(which_src_steps_to_clean))
if istep < 1:
raise ValueError('Argument "which_src_steps_to_clean" is "{}". It starts with "single_" but the '
'remaining part is an integer < 1 ... '
'Impossible to identify the step to clean.'.format(which_src_steps_to_clean))
self._which_src_steps_to_clean = which_src_steps_to_clean
self._which_src_steps_to_clean_pattern = 'single_N'
elif (len(which_src_steps_to_clean) > 29 and which_src_steps_to_clean[:15] == 'all_before_the_' and
which_src_steps_to_clean[-14:] == '_previous_ones'):
sp = which_src_steps_to_clean.split('_')
if len(sp) != 6:
raise ValueError('Argument "which_src_steps_to_clean" is "{}". It starts with "all_before_the_", '
'ends with "_previous_ones" but has more than 5 underscores ... Impossible to '
'identify the steps to clean.'.format(which_src_steps_to_clean))
try:
istep = int(sp[3])
except ValueError:
raise ValueError('Argument "which_src_steps_to_clean" is "{}". It starts with "all_before_the_", '
'ends with "_previous_ones" but the remaining part is not an integer ... '
'Impossible to identify the steps to clean.'.format(which_src_steps_to_clean))
if istep < 2:
raise ValueError('Argument "which_src_steps_to_clean" is "{}". It starts with "all_before_the_", '
'ends with "_previous_ones" but the remaining part an integer less than 2 ... '
'Impossible to identify the steps to clean.'.format(which_src_steps_to_clean))
self._which_src_steps_to_clean = which_src_steps_to_clean
self._which_src_steps_to_clean_pattern = 'all_before_the_N_previous_ones'
elif (len(which_src_steps_to_clean) > 33 and which_src_steps_to_clean[:19] == 'the_one_before_the_' and
which_src_steps_to_clean[-14:] == '_previous_ones'):
sp = which_src_steps_to_clean.split('_')
if len(sp) != 7:
raise ValueError('Argument "which_src_steps_to_clean" is "{}". It starts with "the_one_before_the_", '
'ends with "_previous_ones" but has more than 6 underscores ... Impossible to '
'identify the steps to clean.'.format(which_src_steps_to_clean))
try:
istep = int(sp[4])
except ValueError:
raise ValueError('Argument "which_src_steps_to_clean" is "{}". It starts with "the_one_before_the_", '
'ends with "_previous_ones" but the remaining part is not an integer ... '
'Impossible to identify the steps to clean.'.format(which_src_steps_to_clean))
if istep < 2:
raise ValueError('Argument "which_src_steps_to_clean" is "{}". It starts with "the_one_before_the_", '
'ends with "_previous_ones" but the remaining part an integer less than 2 ... '
'Impossible to identify the steps to clean.'.format(which_src_steps_to_clean))
self._which_src_steps_to_clean = which_src_steps_to_clean
self._which_src_steps_to_clean_pattern = 'the_one_before_the_N_previous_ones'
#TODO: implement "the_M_before_the_N_previous_ones" if needed ...
else:
raise ValueError('Argument "which_src_steps_to_clean" is "{}". This is not allowed. See documentation for '
'the allowed options.'.format(which_src_steps_to_clean))
def steps_to_clean(self, this_step_index, this_step_state):
if this_step_state not in self.current_src_states_allowed:
return []
if self._which_src_steps_to_clean_pattern == 'all':
return list(range(1, this_step_index+1))
elif self._which_src_steps_to_clean_pattern == 'this_one':
return [this_step_index]
elif self._which_src_steps_to_clean_pattern == 'the_one_before_this_one':
if this_step_index == 1:
return []
return [this_step_index-1]
elif self._which_src_steps_to_clean_pattern == 'the_one_before_the_previous_one':
if this_step_index <= 2:
return []
return [this_step_index-2]
elif self._which_src_steps_to_clean_pattern == 'the_one_before_the_N_previous_ones':
iprev = int(self.which_src_steps_to_clean.split('_')[4])
istep = this_step_index-iprev-1
if istep < 1:
return []
return [istep]
elif self._which_src_steps_to_clean_pattern == 'all_before_this_one':
return list(range(1, this_step_index))
elif self._which_src_steps_to_clean_pattern == 'all_before_the_previous_one':
return list(range(1, this_step_index-1))
elif self._which_src_steps_to_clean_pattern == 'all_before_the_N_previous_ones':
iprev = int(self.which_src_steps_to_clean.split('_')[3])
return list(range(1, this_step_index-iprev))
elif self._which_src_steps_to_clean_pattern == 'single_N':
istep = int(self.which_src_steps_to_clean.split('_')[1])
if istep > this_step_index:
return []
return [istep]
raise ValueError('Should not reach this point in "steps_to_clean" of "SRCCleanerOptions"')
def as_dict(self):
return {'@class': self.__class__.__name__,
'@module': self.__class__.__module__,
'when_to_clean': self.when_to_clean,
'current_src_states_allowed': self.current_src_states_allowed,
'which_src_steps_to_clean': self.which_src_steps_to_clean}
@classmethod
def from_dict(cls, d):
return cls(when_to_clean=d['when_to_clean'],
current_src_states_allowed=d['current_src_states_allowed'],
which_src_steps_to_clean=d['which_src_steps_to_clean'])
class SRCCleaner(MSONable):
# RECURRENCE_TYPES = ['END', 'STEP', 'STEP_EXCEPT_END']
# STATE_TYPES = {'END': ['FINALIZED', 'UNRECOVERABLE', 'MAXRESTARTS'],
# 'STEP': ['RECOVERABLE', 'UNRECOVERABLE', 'MAXRESTARTS', 'FINALIZED'],
# 'STEP_EXCEPT_END': ['RECOVERABLE']}
# CLEANING_TYPES = ['ALL', 'ALL_EXCEPT_LAST', 'LAST']
SRC_TYPES = ['setup', 'run', 'control', 'src_root']
def __init__(self, cleaners=None, src_type='run', cleaner_options=SRCCleanerOptions.clean_all()):
if cleaners is None:
self.cleaners = []
else:
self.cleaners = cleaners
self.src_type = src_type
self.cleaner_options = cleaner_options
@property
def cleaners(self):
return self._cleaners
@cleaners.setter
def cleaners(self, cleaners):
if cleaners is None:
self._cleaners = []
elif isinstance(cleaners, list):
for cl in cleaners:
if not isinstance(cl, Cleaner):
raise ValueError('One of the items in cleaners is not a Cleaner instance but is an instance '
'of {}'.format(cl.__class__.__name__))
self._cleaners = cleaners
else:
raise ValueError('The variable "cleaners" should be either None or a list of Cleaner objects')
@property
def src_type(self):
return self._src_type
@src_type.setter
def src_type(self, src_type):
if src_type not in self.SRC_TYPES:
raise ValueError('Argument "src_type" should be one of the following : '
'{}'.format(', '.format(self.SRC_TYPES)))
self._src_type = src_type
def src_dir_to_clean(self, src_directories):
return src_directories['{}_dir'.format(self.src_type)]
def check_recurrence(self, src_task_index, state):
if state == self.recurrence:
return True
return False
def clean(self, last_src_directories, previous_src_dirs, src_task_index, state):
dirs_to_clean = []
if self.cleaning in ['ALL', 'LAST']:
dirs_to_clean.append(self.src_dir_to_clean(last_src_directories))
pass
if self.cleaning in ['ALL', 'ALL_EXCEPT_LAST']:
for src_directories in previous_src_dirs:
dirs_to_clean.append(self.src_dir_to_clean(src_directories))
for dir_to_clean in dirs_to_clean:
for cleaner in self.cleaners:
cleaner.clean(root_directory=dir_to_clean)
def as_dict(self):
return {'@class': self.__class__.__name__,
'@module': self.__class__.__module__,
'cleaners': [c.as_dict() for c in self.cleaners],
'src_types': self.src_type,
'cleaner_options': self.cleaner_options}
@classmethod
def from_dict(cls, d):
return cls(recurrence=d['recurrence'],
cleaners=[Cleaner.from_dict(d_c) for d_c in d['cleaners']],
src_type=d['src_type'])
class SRCCleaning(MSONable):
def __init__(self, src_cleaners=None):
if src_cleaners is None:
self.src_cleaners = []
else:
self.src_cleaners = src_cleaners
def clean(self, src_directories, previous_src_dirs, state):
pass
def as_dict(self):
return {'@class': self.__class__.__name__,
'@module': self.__class__.__module__,
'src_cleaners': [src_c.as_dict() for src_c in self.src_cleaners]}
@classmethod
def from_dict(cls, d):
return cls(src_cleaners=[SRCCleaner.from_dict(d_src_c) for d_src_c in d['src_cleaners']])
def createSRCFireworks(setup_task, run_task, control_task, spec=None, initialization_info=None,
task_index=None, setup_spec_update=None, run_spec_update=None):
# Make a full copy of the spec
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
spec = copy.deepcopy(spec)
spec['_add_launchpad_and_fw_id'] = True
spec['_add_fworker'] = True
# Initialize the SRC task_index
if task_index is not None:
src_task_index = SRCTaskIndex.from_any(task_index)
else:
# src_task_index = SRCTaskIndex.from_any('unknown-task')
src_task_index = SRCTaskIndex.from_task(run_task)
spec['SRC_task_index'] = src_task_index
# SetupTask
setup_spec = copy.deepcopy(spec)
# Remove any initial queue_adapter_update from the spec
setup_spec.pop('queue_adapter_update', None)
setup_spec = set_short_single_core_to_spec(setup_spec)
setup_spec['_preserve_fworker'] = True
setup_spec['_pass_job_info'] = True
setup_spec['initialization_info'] = initialization_info
setup_spec.update({} if setup_spec_update is None else setup_spec_update)
setup_fw = Firework(setup_task, spec=setup_spec, name=src_task_index.setup_str)
# RunTask
run_spec = copy.deepcopy(spec)
run_spec['SRC_task_index'] = src_task_index
run_spec['_preserve_fworker'] = True
run_spec['_pass_job_info'] = True
run_spec.update({} if run_spec_update is None else run_spec_update)
run_fw = Firework(run_task, spec=run_spec, name=src_task_index.run_str)
# ControlTask
control_spec = copy.deepcopy(spec)
control_spec = set_short_single_core_to_spec(control_spec)
control_spec['SRC_task_index'] = src_task_index
control_spec['_allow_fizzled_parents'] = True
control_fw = Firework(control_task, spec=control_spec, name=src_task_index.control_str)
links_dict = {setup_fw.fw_id: [run_fw.fw_id],
run_fw.fw_id: [control_fw.fw_id]}
return {'setup_fw': setup_fw, 'run_fw': run_fw, 'control_fw': control_fw, 'links_dict': links_dict,
'fws': [setup_fw, run_fw, control_fw]}
class SRCTaskIndex(MSONable):
ALLOWED_CHARS = ['-']
def __init__(self, task_type, index=1):
self.set_task_type(task_type=task_type)
self.index = index
def set_task_type(self, task_type):
prefix_test_string = str(task_type)
for allowed_char in self.ALLOWED_CHARS:
prefix_test_string = prefix_test_string.replace(allowed_char, "")
if not prefix_test_string.isalnum():
ac_str = ', '.join(['"{}"'.format(ac) for ac in self.ALLOWED_CHARS])
raise ValueError('task_type should only contain letters '
'and the following characters : {}'.format(ac_str))
self.task_type = task_type
@property
def index(self):
return self._index
@index.setter
def index(self, index):
if isinstance(index, int):
self._index = index
elif isinstance(index, str):
try:
myindex = int(index)
self._index = myindex
except Exception:
raise ValueError('Index in SRCTaskIndex should be an integer or a string '
'that can be cast into an integer')
else:
raise ValueError('Index in SRCTaskIndex should be an integer or a string '
'that can be cast into an integer')
def increase_index(self):
self.index += 1
def __add__(self, other):
if not isinstance(other, int):
raise ValueError('The __add__ method in SRCTaskIndex should be an integer')
self.index += other
def __str__(self):
return '_'.join([self.task_type, str(self.index)])
@property
def setup_str(self):
return '_'.join(['setup', self.__str__()])
@property
def run_str(self):
return '_'.join(['run', self.__str__()])
@property
def control_str(self):
return '_'.join(['control', self.__str__()])
@classmethod
def from_string(cls, SRC_task_index_string):
sp = SRC_task_index_string.split('_')
# if len(sp) not in [2, 3]:
# raise ValueError('SRC_task_index_string should contain 1 or 2 underscores ("_") '
# 'while it contains {:d}'.format(len(sp)-1))
if len(sp) == 1:
return cls(task_type=sp[0])
if any([len(part) == 0 for part in sp]):
raise ValueError('SRC_task_index_string has an empty part when separated by underscores ...')
if len(sp) == 2:
return cls(task_type=sp[0], index=sp[1])
elif len(sp) == 3:
if sp[0] not in ['setup', 'run', 'control']:
raise ValueError('SRC_task_index_string should start with "setup", "run" or "control" when 3 parts are '
'identified')
return cls(task_type=sp[1], index=sp[2])
@classmethod
def from_any(cls, SRC_task_index):
if isinstance(SRC_task_index, six.string_types):
return cls.from_string(SRC_task_index)
elif isinstance(SRC_task_index, SRCTaskIndex):
return cls(task_type=SRC_task_index.task_type, index=SRC_task_index.index)
else:
raise ValueError('SRC_task_index should be an instance of "str" or "SRCTaskIndex" '
'in "from_any" class method')
@classmethod
def from_task(cls, task):
return cls(task_type=task.task_type)
@classmethod
def from_dict(cls, d):
return cls(task_type=d['task_type'], index=d['index'])
def as_dict(self):
return {'@class': self.__class__.__name__,
'@module': self.__class__.__module__,
'task_type': self.task_type,
'index': self.index}
def get_queue_adapter_update(qtk_queueadapter, corrections, qa_params=None):
if qa_params is None:
qa_params = ['timelimit', 'mem_per_proc', 'master_mem_overhead']
queue_adapter_update = {}
for qa_param in qa_params:
if qa_param == 'timelimit':
queue_adapter_update[qa_param] = qtk_queueadapter.timelimit
elif qa_param == 'mem_per_proc':
queue_adapter_update[qa_param] = qtk_queueadapter.mem_per_proc
elif qa_param == 'master_mem_overhead':
queue_adapter_update[qa_param] = qtk_queueadapter.master_mem_overhead
else:
raise ValueError('Wrong queue adapter parameter for update')
for correction in corrections:
for action in correction['actions']:
if action['object']['key'] == 'qtk_queueadapter':
qa_update = action['action']['_set']
queue_adapter_update.update(qa_update)
return queue_adapter_update
################
# Exceptions
################
class SRCError(Exception):
pass
class SetupError(SRCError):
pass
class RunError(SRCError):
pass
class ControlError(SRCError):
pass
################
# Timings
################
class FWTime(MSONable):
def __init__(self, fw_name, fw_id, ncpus, fwtime_secs, clustertime_secs=None):
self.fw_name = fw_name
self.fw_id = fw_id
self.ncpus = ncpus
self.fwtime_secs = fwtime_secs
self.clustertime_secs = clustertime_secs
@property
def time_per_cpu(self):
if self.clustertime_secs is not None:
return self.clustertime_secs
return self.fwtime_secs
@property
def total_time(self):
return self.ncpus*self.time_per_cpu
def as_dict(self):
dd = dict(fw_name=self.fw_name, fw_id=self.fw_id,
ncpus=self.ncpus, fwtime_secs=self.fwtime_secs,
clustertime_secs=self.clustertime_secs)
return dd
@classmethod
def from_dict(cls, d):
return cls(fw_name=d['fw_name'], fw_id=d['fw_id'],
ncpus=d['ncpus'], fwtime_secs=d['fwtime_secs'], clustertime_secs=d['clustertime_secs'])
@classmethod
def from_fw_id(cls, fw_id, lpad=None):
if lpad is None:
lpad = LaunchPad.auto_load()
fw_dict = lpad.get_fw_dict_by_id(fw_id=fw_id)
name = fw_dict['name']
# TODO: find a way to know the number of cpus here ? Or should we always assume it is 1 ?
ncpus = 1
fwtime_secs = 0.0
# TODO: get the runtime from the cluster (taking the reservation_id and things like that ?)
clustertime_secs = None
return cls(fw_name=name, fw_id=fw_id,
ncpus=ncpus, fwtime_secs=fwtime_secs, clustertime_secs=clustertime_secs)
class SRCFWTime(FWTime):
def __init__(self, fw_name, fw_id, ncpus, fwtime_secs, clustertime_secs=None,
src_type=None, task_type=None, task_index=None):
super().__init__(fw_name=fw_name, fw_id=fw_id, ncpus=ncpus,
fwtime_secs=fwtime_secs, clustertime_secs=clustertime_secs)
self.src_type = src_type
self.task_type = task_type
self.task_index = task_index
def as_dict(self):
dd = dict(src_type=self.src_type, task_type=self.task_type, task_index=self.task_index.as_dict(),
ncpus=self.ncpus, fwtime_secs=self.fwtime_secs,
clustertime_secs=self.clustertime_secs)
return dd
@classmethod
def from_dict(cls, d):
return cls(src_type=d['src_type'], task_type=d['task_type'], task_index=SRCTaskIndex.from_any(d['task_index']),
ncpus=d['ncpus'], fwtime_secs=d['fwtime_secs'], clustertime_secs=d['clustertime_secs'])
@classmethod
def from_fw_id(cls, fw_id, lpad=None):
if lpad is None:
lpad = LaunchPad.auto_load()
fw_dict = lpad.get_fw_dict_by_id(fw_id=fw_id)
name = fw_dict['name']
# TODO: find a way to know the number of cpus here ? Or should we always assume it is 1 ?
ncpus = 1
fwtime_secs = 0.0
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module estimates the X-rays and extreme-ultraviolet luminosity of a star.
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import numpy as np
import astropy.units as u
__all__ = ["x_rays_luminosity"]
# Calculates the X-rays luminosity
def x_rays_luminosity(b_v_color, age, age_uncertainty, n_sample=10000):
"""
Parameters
----------
b_v_color
age
age_uncertainty
n_sample
Returns
-------
"""
# First let's convert the unit of age to year and create a random sample
age = age.to(u.yr).value
age_sigma = age_uncertainty.to(u.yr).value
ages = np.random.normal(loc=age, scale=age_sigma, size=n_sample)
# Hard-coding the several cases of Jackson+2012
params = {'case': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
'b_v_0': [0.290, 0.450, 0.565, 0.675, 0.790, 0.935, 1.275],
'b_v_1': [0.450, 0.565, 0.675, 0.790, 0.935, 1.275, 1.410],
'saturation_log_age': [7.87, 8.35, 7.84, 8.03, 7.90, 8.28, 8.21],
'sat_log_age_unc': [0.10, 0.05, 0.06, 0.06, 0.05, 0.07, 0.04],
'saturation_lum_x': [-4.28, -4.24, -3.67, -3.71, -3.36, -3.35,
-3.14],
'sat_lum_x_unc': [0.05, 0.02, 0.01, 0.05, 0.02, 0.01, 0.02],
'alpha': [1.22, 1.24, 1.13, 1.28, 1.40, 1.09, 1.18],
'alpha_unc': [0.30, 0.19, 0.13, 0.17, 0.11, 0.28, 0.31]
}
# First we identify the case
cases = params['case']
ind = None
for i in range(len(cases)):
if params['b_v_0'][i] < b_v_color < params['b_v_1'][i]:
ind = i
break
else:
pass
# The saturated case
if np.log10(age) <= params['saturation_log_age'][ind]:
lumx_lumbol_mu = 10 ** params['saturation_lum_x'][ind]
lumx_lumbol_sigma = 10 ** params['sat_lum_x_unc'][ind]
lumx_lumbol_sample = np.random.normal(loc=lumx_lumbol_mu,
scale=lumx_lumbol_sigma,
size=n_sample)
else:
alpha = params['alpha'][ind]
alpha_sigma = params['alpha_unc'][ind]
alpha_sample = np.random.normal(loc=alpha, scale=alpha_sigma,
size=n_sample)
sat_age = 10 ** params['saturation_log_age'][ind]
sat_age_sigma = 10 ** params['sat_log_age_unc'][ind]
sat_age_sample = np.random.normal(loc=sat_age, scale=sat_age_sigma,
size=n_sample)
sat_lumx_lumbol = 10 ** params['saturation_lum_x'][ind]
sat_lumx_lumbol_sigma = 10 ** params['sat_lum_x_unc'][ind]
sat_lumx_lumbol_sample = np.random.normal(loc=sat_lumx_lumbol,
scale=sat_lumx_lumbol_sigma,
size=n_sample)
lumx_lumbol_sample = sat_lumx_lumbol_sample * (ages / sat_age_sample) \
** (-alpha_sample)
# Finally calculate lumx_lumbol
percentiles = np.percentile(lumx_lumbol_sample, [16, 50, 84])
q = np.diff(percentiles)
lumx_lumbol = percentiles[1]
lumx_lumbol_sigma_up = q[1]
lumx_lumbol_sigma_low = q[0]
return lumx_lumbol, lumx_lumbol_sigma_up, lumx_lumbol_sigma_low
|
import svgwrite as sw
# Require the python package svgwrite
# Input sequence here for graphic generation
sequence = "TTLTNLTTLESIK"
output_filename = "TTLTNLTTLESIK.svg"
def draw_aa(group, canvas, aa, x_pos, y_pos, font_size=10, font_style="normal", font_family="Courier"):
# Function for drawing amino acid letter
aa_group = group.add(canvas.g(fill="black", font_style=font_style, font_family=font_family))
aa_group.add(canvas.text(aa, insert=(x_pos, y_pos), font_size=font_size))
def draw_divider(group, canvas, points, fill="none", stroke="black"):
# Function for drawing transition divider.
group.add(canvas.polyline(points, fill=fill, stroke=stroke))
def draw_transition_label(group, canvas, transition, number, pos_x, pos_y, font_size=6, subscript_font_size=3,
font_style="normal", font_family="Helvetica"):
# Function for drawing transition label
label_group = group.add(canvas.g(fill="black",
font_style=font_style,
font_size=font_size, font_family=font_family))
transition = label_group.add(canvas.text(transition, insert=(pos_x, pos_y)))
transition.add(canvas.tspan(number, baseline_shift="sub", font_size=subscript_font_size))
def draw_peptide_transition(group, canvas, sequence, pos_x, pos_y, distance_between_aa=12):
# Function for drawing the sequence
sequence_len = len(sequence)
for i in range(sequence_len):
# Iterate through each amino acid and draw out
draw_aa(group, canvas, sequence[i], pos_x + i * distance_between_aa, pos_y)
if i != sequence_len - 1:
draw_divider(group, canvas, [(pos_x + i * distance_between_aa + distance_between_aa + 6, pos_y - 8),
(pos_x + i * distance_between_aa + distance_between_aa - 3, pos_y - 8),
(pos_x + i * distance_between_aa + distance_between_aa - 3, pos_y - 3),
(pos_x + i * distance_between_aa + distance_between_aa - 3, pos_y + 2),
(pos_x + i * distance_between_aa, pos_y + 2)])
draw_transition_label(group, canvas, "b", i + 1, pos_x + i * distance_between_aa, pos_y + 8)
draw_transition_label(group, canvas, "y", i + 1, (sequence_len - i) * distance_between_aa, pos_y - 11)
if __name__ == "__main__":
# Drawing the initial canvas and create the svg file
canvas = sw.Drawing(output_filename, size=(200, 100))
# Create the initial graphic object group for holding all the svg elements of the graph.
peptide_group = canvas.add(canvas.g(id="peptide-transition"))
# Draw the graph
draw_peptide_transition(peptide_group, canvas, sequence, 10, 20)
canvas.save()
|
from django.db import models
from django.contrib.auth.models import User
from PIL import Image
from django.urls import reverse
class Neighbourhood(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('neighbourhood', kwargs={'pk':self.pk})
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg', upload_to='profile_pics')
email = models.CharField(max_length=30)
neighbourhood = models.ForeignKey(Neighbourhood, on_delete = models.CASCADE, null=True)
def __str__(self):
return f'{self.user.username} Profile'
def save(self, *args, **kwargs):
super(Profile, self).save(*args, **kwargs)
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
|
# This file is a part of Arjuna
# Copyright 2015-2021 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from arjuna import *
@test
def check_random_ustr_01(request):
for i in range(100):
s = Random.ustr()
print(s)
assert len(s) == 36
@test
def check_random_ustr_02(request):
for i in range(1000):
s = Random.ustr(prefix="abc")
print(s)
assert len(s) == 40
@test
def check_random_ustr_03(request):
for i in range(1000):
s = Random.ustr(prefix="abc", delim="*")
assert len(s) == 40
s = Random.ustr(prefix="abc", delim="***")
assert len(s) == 42
s = Random.ustr(delim="***")
assert len(s) == 36 # delim is ignored if prefix is not defined.
@test
def check_random_ustr_04(request):
for i in range(1000):
s = Random.ustr(minlen=17) # less than half of base string
print(s)
assert len(s) >= 17
assert len(s) <= 36
s = Random.ustr(minlen=18) # == half of base string
assert len(s) >= 18
assert len(s) <= 36
s = Random.ustr(minlen=19) # > half of base string
assert len(s) >= 19
assert len(s) <= 38
s = Random.ustr(minlen=19, prefix="abc", delim="*") # less than half of base string
assert len(s) >= 19
assert len(s) <= 40
s = Random.ustr(minlen=20, prefix="abc", delim="*") # == half of base string
assert len(s) >= 20
assert len(s) <= 40
s = Random.ustr(minlen=21, prefix="abc", delim="*") # > half of base string
assert len(s) >= 21
assert len(s) <= 42
s = Random.ustr(minlen=71) # less than twice of base string
print(s)
assert len(s) >= 71
assert len(s) <= 142
s = Random.ustr(minlen=72) # == twice of base string
assert len(s) >= 72
assert len(s) <= 144
s = Random.ustr(minlen=73) # > twice of base string
assert len(s) >= 73
assert len(s) <= 146
s = Random.ustr(minlen=79, prefix="abc", delim="*") # less than twice of base string
assert len(s) >= 79
assert len(s) <= 79 * 2
s = Random.ustr(minlen=80, prefix="abc", delim="*") # == twice of base string
assert len(s) >= 80
assert len(s) <= 80 * 2
s = Random.ustr(minlen=81, prefix="abc", delim="*") # > twice of base string
assert len(s) >= 81
assert len(s) <= 81 * 2
@test
def check_random_ustr_05(request):
for i in range(1000):
s = Random.ustr(maxlen=35) # < base string
print(s)
assert len(s) <= 35
s = Random.ustr(maxlen=36) # = base string
assert len(s) <= 36
s = Random.ustr(maxlen=37) # > base string
assert len(s) <= 37
s = Random.ustr(prefix="abc", delim="*", maxlen=39) # < base string
assert len(s) <= 39
s = Random.ustr(prefix="abc", delim="*", maxlen=40) # = base string
assert len(s) <= 40
s = Random.ustr(prefix="abc", delim="*", maxlen=41) # > base string
assert len(s) <= 41
@test
def check_random_ustr_06(request):
for i in range(1000):
s = Random.ustr(prefix="abc", delim="*", minlen=60, maxlen=85)
print(s)
assert len(s) >= 60
assert len(s) <=85
@test(xfail=True)
def check_random_ustr_07_error(request):
s = Random.ustr(prefix="abc", delim="*", minlen=200, maxlen=100)
@test
def check_random_ustr_08(request):
s = Random.ustr(prefix="abc", delim="*", minlen=24)
@test
def check_random_ustr_09(request):
s = Random.ustr(prefix="abc", delim="*", minlen=24) # less than base string. reset to base string length
@test(xfail=True)
def check_random_ustr_10_error(request):
s = Random.ustr(prefix="abc", delim="*", maxlen=24, strict=True) # exception
@test
def check_random_fstr_01(request):
s = Random.fixed_length_str(length=40)
print(s)
assert len(s) == 40
@test
def check_random_class_locale_property_basic(request):
print(Random.locale)
print(Random.locale.en)
@test
def check_random_class_locale_property_supported(request):
print(Random.locale.supported)
@test
def check_random_class_locale_property_objreuse(request):
id1 = id(Random.locale.en)
id2 = id(Random.locale.en)
assert id1 == id2
@test
def check_random_class_local_first_name(request):
for lname in Random.locale.supported:
locale = getattr(Random.locale, lname)
print("{}: {}".format(lname, Random.first_name(locale=locale)))
@test
def check_random_class_local_last_name(request):
for lname in Random.locale.supported:
locale = getattr(Random.locale, lname)
print("{}: {}".format(lname, Random.last_name(locale=locale)))
@test
def check_random_class_local_name(request):
for lname in Random.locale.supported:
locale = getattr(Random.locale, lname)
print("{}: {}".format(lname, Random.name(locale=locale)))
@test
def check_random_class_local_phone(request):
for lname in Random.locale.supported:
locale = getattr(Random.locale, lname)
print("{}: {}".format(lname, Random.phone(locale=locale)))
@test
def check_random_class_local_email(request):
for lname in Random.locale.supported:
locale = getattr(Random.locale, lname)
print("{}: {}".format(lname, Random.email(locale=locale)))
@test
def check_random_class_local_street_name(request):
for lname in Random.locale.supported:
locale = getattr(Random.locale, lname)
print("{}: {}".format(lname, Random.street_name(locale=locale)))
@test
def check_random_class_local_street_number(request):
for lname in Random.locale.supported:
locale = getattr(Random.locale, lname)
print("{}: {}".format(lname, Random.street_number(locale=locale)))
@test
def check_random_class_local_house_number(request):
for lname in Random.locale.supported:
locale = getattr(Random.locale, lname)
print("{}: {}".format(lname, Random.house_number(locale=locale)))
@test
def check_random_class_local_postal_code(request):
for lname in Random.locale.supported:
locale = getattr(Random.locale, lname)
print("{}: {}".format(lname, Random.postal_code(locale=locale)))
@test
def check_random_class_local_city(request):
for lname in Random.locale.supported:
locale = getattr(Random.locale, lname)
print("{}: {}".format(lname, Random.city(locale=locale)))
@test
def check_random_class_local_country(request):
for lname in Random.locale.supported:
locale = getattr(Random.locale, lname)
print("{}: {}".format(lname, Random.country(locale=locale)))
@test
def check_random_class_local_sentence(request):
for lname in Random.locale.supported:
locale = getattr(Random.locale, lname)
print("{}: {}".format(lname, Random.sentence(locale=locale)))
@test
def check_random_class_local_alphabet(request):
for lname in Random.locale.supported:
locale = getattr(Random.locale, lname)
print("{}: {}".format(lname, Random.alphabet(locale=locale)))
@test
def check_random_class_person_1(request):
print(Random.person())
@test
def check_random_class_address_1(request):
print(Random.address())
@test
def check_random_class_email_1(request):
print(Random.email())
@test
def check_random_class_email_2(request):
print(Random.email(name="test"))
@test
def check_random_class_email_3(request):
print(Random.email(domain="test.com"))
@test
def check_random_class_email_4(request):
print(Random.email(name="test", domain="test.com"))
@test
def check_random_class_street_number_1(request):
print(Random.street_number())
@test
def check_random_class_street_number_2(request):
print(Random.street_number(prefix="St No."))
@test
def check_random_class_house_number_1(request):
print(Random.house_number())
@test
def check_random_class_house_number_2(request):
print(Random.house_number(prefix="H.No."))
@test
def check_random_class_alphabet_1(request):
print(Random.alphabet())
@test
def check_random_class_alphabet_2(request):
print(Random.alphabet(lower_case=True))
@test
def check_random_class_file_1(request):
from arjuna.engine.data.generator.file import File
for name in vars(File):
if not name.startswith("__"):
print(getattr(Random, name)())
@test
def check_random_class_color_1(request):
print(Random.color())
print(Random.rgb_color())
print(Random.hex_color()) |
#
# @lc app=leetcode.cn id=69 lang=python3
#
# [69] sqrtx
#
None
# @lc code=end |
# -*- encoding: utf-8 -*-
"""
=================================================
@path : pointnet.pytorch -> tools.py
@IDE : PyCharm
@Author : zYx.Tom, [email protected]
@Date : 2021-12-23 15:11
@Version: v0.1
@License: (C)Copyright 2020-2021, zYx.Tom
@Reference:
@Desc :
==================================================
"""
import logging
from datetime import datetime
# ้ป่ฎค็warning็บงๅซ๏ผๅช่พๅบwarningไปฅไธ็
# ไฝฟ็จbasicConfig()ๆฅๆๅฎๆฅๅฟ็บงๅซๅ็ธๅ
ณไฟกๆฏ
logging.basicConfig(
level=logging.DEBUG,
filename='./log/demo.log',
filemode='w',
format="%(asctime)s - %(name)s - %(levelname)-9s - %(filename)-8s : %(lineno)s line - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S"
)
def show_subtitle(message):
# ่พๅบ่ฟ่กๆจกๅ็ๅญๆ ้ข
print('-' * 15, '>' + message + '<', '-' * 15)
pass
def show_title(message):
# ่พๅบ่ฟ่กๆจกๅ็ๅญๆ ้ข
print()
print('=' * 15, '>' + message + '<', '=' * 15)
pass
def log_title(message):
message = '=' * 15 + '>' + message + '<' + '=' * 15
logging.info(message)
def log_subtitle(message):
message = '-' * 15 + '>' + message + '<' + '-' * 15
logging.info(message)
def log_debug(message):
logging.debug(message)
def log_info(message):
logging.info(message)
def beep_end():
# ่ฟ่ก็ปๆ็ๆ้
import winsound
winsound.Beep(600, 500)
pass
def main(name):
print(f'Hi, {name}', datetime.now())
show_title("title")
show_subtitle("subtitle")
pass
if __name__ == "__main__":
__author__ = 'zYx.Tom'
main(__author__)
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from navigation import Menu
from .icons import icon_cabinet_list
menu_cabinets = Menu(
icon_class=icon_cabinet_list, label=_('Cabinets'), name='cabinets menu'
)
|
"""Visualization helpers."""
from contextlib import contextmanager
import copy
import os.path as op
import numpy as np
from scipy import linalg
from mne import read_proj, read_events, pick_types
from mne.utils import verbose
from mne.viz.utils import tight_layout, plt_show
from ._sss import compute_good_coils
from ._paths import get_raw_fnames
def _viz_raw_ssp_events(p, subj, ridx):
"""Plot filtered cleaned raw trace with ExG events"""
from ._ssp import _raw_LRFCP
pca_dir = op.join(p.work_dir, subj, p.pca_dir)
raw_names = get_raw_fnames(p, subj, 'sss', False, False, ridx)
pre_list = [r for ri, r in enumerate(raw_names)
if ri in p.get_projs_from]
all_proj = op.join(pca_dir, 'preproc_all-proj.fif')
projs = read_proj(all_proj)
colors = dict()
ev = np.zeros((0, 3), int)
for n, c, cid in zip(['ecg', 'blink'], ['r', 'b'], [999, 998]):
fname = op.join(pca_dir, 'preproc_%s-eve.fif' % n)
if op.isfile(fname):
ev = np.concatenate((ev, read_events(fname)))
colors[cid] = c
ev = ev[np.argsort(ev[:, 0], axis=0)]
raw = _raw_LRFCP(pre_list, p.proj_sfreq, None, None, p.n_jobs_fir,
p.n_jobs_resample, projs, None, p.disp_files,
method='fir', filter_length=p.filter_length,
force_bads=False, l_trans=p.hp_trans, h_trans=p.lp_trans)
raw.plot(events=ev, event_color=colors)
def clean_brain(brain_img):
"""Remove borders of a brain image and make transparent."""
bg = (brain_img == brain_img[0, 0]).all(-1)
brain_img = brain_img[(~bg).any(axis=-1)]
brain_img = brain_img[:, (~bg).any(axis=0)]
alpha = 255 * np.ones(brain_img.shape[:-1], np.uint8)
x, y = np.where((brain_img == 255).all(-1))
alpha[x, y] = 0
return np.concatenate((brain_img, alpha[..., np.newaxis]), -1)
def plot_colorbar(lims, ticks=None, ticklabels=None, figsize=(1, 2),
labelsize='small', ticklabelsize='x-small', ax=None,
label='', tickrotation=0., orientation='vertical',
end_labels=None, colormap='mne', transparent=False,
diverging=None):
import matplotlib.pyplot as plt
from matplotlib.colorbar import ColorbarBase
from matplotlib.colors import Normalize
from mne.viz._3d import _limits_to_control_points
with plt.rc_context({'axes.labelsize': labelsize,
'xtick.labelsize': ticklabelsize,
'ytick.labelsize': ticklabelsize}):
if diverging is None:
diverging = (colormap == 'mne') # simple heuristic here
if diverging:
use_lims = dict(kind='value', pos_lims=lims)
else:
use_lims = dict(kind='value', lims=lims)
cmap, scale_pts, diverging, _, none_ticks = _limits_to_control_points(
use_lims, 0, colormap, transparent, linearize=True)
vmin, vmax = scale_pts[0], scale_pts[-1]
if ticks is None:
ticks = none_ticks
del colormap, lims, use_lims
adjust = (ax is None)
if ax is None:
fig, ax = plt.subplots(1, figsize=figsize)
else:
fig = ax.figure
norm = Normalize(vmin=vmin, vmax=vmax)
if ticklabels is None:
ticklabels = ticks
assert len(ticks) == len(ticklabels)
cbar = ColorbarBase(ax, cmap, norm=norm, ticks=ticks, label=label,
orientation=orientation)
for key in ('left', 'top',
'bottom' if orientation == 'vertical' else 'right'):
ax.spines[key].set_visible(False)
cbar.set_ticklabels(ticklabels)
cbar.patch.set(facecolor='0.5', edgecolor='0.5')
if orientation == 'horizontal':
plt.setp(ax.xaxis.get_majorticklabels(), rotation=tickrotation)
else:
plt.setp(ax.yaxis.get_majorticklabels(), rotation=tickrotation)
cbar.outline.set_visible(False)
lims = np.array(list(ax.get_xlim()) + list(ax.get_ylim()))
if end_labels is not None:
if orientation == 'horizontal':
delta = np.diff(lims[:2]) * np.array([-0.05, 0.05])
xs = np.array(lims[:2]) + delta
has = ['right', 'left']
ys = [lims[2:].mean()] * 2
vas = ['center', 'center']
else:
xs = [lims[:2].mean()] * 2
has = ['center'] * 2
delta = np.diff(lims[2:]) * np.array([-0.05, 0.05])
ys = lims[2:] + delta
vas = ['top', 'bottom']
for x, y, l, ha, va in zip(xs, ys, end_labels, has, vas):
ax.text(x, y, l, ha=ha, va=va, fontsize=ticklabelsize)
if adjust:
fig.subplots_adjust(0.01, 0.05, 0.2, 0.95)
return fig
def plot_reconstruction(evoked, origin=(0., 0., 0.04)):
"""Plot the reconstructed data for Evoked
Currently only works for MEG data.
Parameters
----------
evoked : instance of Evoked
The evoked data.
origin : array-like, shape (3,)
The head origin to use.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from mne.forward._field_interpolation import _map_meg_channels
import matplotlib.pyplot as plt
evoked = evoked.copy().pick_types(meg=True, exclude='bads')
info_to = copy.deepcopy(evoked.info)
info_to['projs'] = []
op = _map_meg_channels(
evoked.info, info_to, mode='accurate', origin=(0., 0., 0.04))
fig, axs = plt.subplots(3, 2, squeeze=False)
titles = dict(grad='Gradiometers (fT/cm)', mag='Magnetometers (fT)')
for mi, meg in enumerate(('grad', 'mag')):
picks = pick_types(evoked.info, meg=meg)
kwargs = dict(ylim=dict(grad=[-250, 250], mag=[-600, 600]),
spatial_colors=True, picks=picks)
evoked.plot(axes=axs[0, mi], proj=False,
titles=dict(grad='Proj off', mag=''), **kwargs)
evoked_remap = evoked.copy().apply_proj()
evoked_remap.info['projs'] = []
evoked_remap.plot(axes=axs[1, mi],
titles=dict(grad='Proj on', mag=''), **kwargs)
evoked_remap.data = np.dot(op, evoked_remap.data)
evoked_remap.plot(axes=axs[2, mi],
titles=dict(grad='Recon', mag=''), **kwargs)
axs[0, mi].set_title(titles[meg])
for ii in range(3):
if ii in (0, 1):
axs[ii, mi].set_xlabel('')
if ii in (1, 2):
axs[ii, mi].set_title('')
for ii in range(3):
axs[ii, 1].set_ylabel('')
axs[0, 0].set_ylabel('Original')
axs[1, 0].set_ylabel('Projection')
axs[2, 0].set_ylabel('Reconstruction')
fig.tight_layout()
return fig
def plot_chpi_snr_raw(raw, win_length, n_harmonics=None, show=True,
verbose=True):
"""Compute and plot cHPI SNR from raw data
Parameters
----------
win_length : float
Length of window to use for SNR estimates (seconds). A longer window
will naturally include more low frequency power, resulting in lower
SNR.
n_harmonics : int or None
Number of line frequency harmonics to include in the model. If None,
use all harmonics up to the MEG analog lowpass corner.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
cHPI SNR as function of time, residual variance.
Notes
-----
A general linear model including cHPI and line frequencies is fit into
each data window. The cHPI power obtained from the model is then divided
by the residual variance (variance of signal unexplained by the model) to
obtain the SNR.
The SNR may decrease either due to decrease of cHPI amplitudes (e.g.
head moving away from the helmet), or due to increase in the residual
variance. In case of broadband interference that overlaps with the cHPI
frequencies, the resulting decreased SNR accurately reflects the true
situation. However, increased narrowband interference outside the cHPI
and line frequencies would also cause an increase in the residual variance,
even though it wouldn't necessarily affect estimation of the cHPI
amplitudes. Thus, this method is intended for a rough overview of cHPI
signal quality. A more accurate picture of cHPI quality (at an increased
computational cost) can be obtained by examining the goodness-of-fit of
the cHPI coil fits.
"""
import matplotlib.pyplot as plt
try:
from mne.chpi import get_chpi_info
except ImportError:
from mne.chpi import _get_hpi_info as get_chpi_info
# plotting parameters
legend_fontsize = 6
title_fontsize = 10
tick_fontsize = 10
label_fontsize = 10
# get some info from fiff
sfreq = raw.info['sfreq']
linefreq = raw.info['line_freq']
if n_harmonics is not None:
linefreqs = (np.arange(n_harmonics + 1) + 1) * linefreq
else:
linefreqs = np.arange(linefreq, raw.info['lowpass'], linefreq)
buflen = int(win_length * sfreq)
if buflen <= 0:
raise ValueError('Window length should be >0')
cfreqs = get_chpi_info(raw.info, verbose=False)[0]
if verbose:
print('Nominal cHPI frequencies: %s Hz' % cfreqs)
print('Sampling frequency: %s Hz' % sfreq)
print('Using line freqs: %s Hz' % linefreqs)
print('Using buffers of %s samples = %s seconds\n'
% (buflen, buflen / sfreq))
pick_meg = pick_types(raw.info, meg=True, exclude=[])
pick_mag = pick_types(raw.info, meg='mag', exclude=[])
pick_grad = pick_types(raw.info, meg='grad', exclude=[])
nchan = len(pick_meg)
# grad and mag indices into an array that already has meg channels only
pick_mag_ = np.in1d(pick_meg, pick_mag).nonzero()[0]
pick_grad_ = np.in1d(pick_meg, pick_grad).nonzero()[0]
# create general linear model for the data
t = np.arange(buflen) / float(sfreq)
model = np.empty((len(t), 2 + 2 * (len(linefreqs) + len(cfreqs))))
model[:, 0] = t
model[:, 1] = np.ones(t.shape)
# add sine and cosine term for each freq
allfreqs = np.concatenate([linefreqs, cfreqs])
model[:, 2::2] = np.cos(2 * np.pi * t[:, np.newaxis] * allfreqs)
model[:, 3::2] = np.sin(2 * np.pi * t[:, np.newaxis] * allfreqs)
inv_model = linalg.pinv(model)
# drop last buffer to avoid overrun
bufs = np.arange(0, raw.n_times, buflen)[:-1]
tvec = bufs / sfreq
snr_avg_grad = np.zeros([len(cfreqs), len(bufs)])
hpi_pow_grad = np.zeros([len(cfreqs), len(bufs)])
snr_avg_mag = np.zeros([len(cfreqs), len(bufs)])
resid_vars = np.zeros([nchan, len(bufs)])
for ind, buf0 in enumerate(bufs):
if verbose:
print('Buffer %s/%s' % (ind + 1, len(bufs)))
megbuf = raw[pick_meg, buf0:buf0 + buflen][0].T
coeffs = np.dot(inv_model, megbuf)
coeffs_hpi = coeffs[2 + 2 * len(linefreqs):]
resid_vars[:, ind] = np.var(megbuf - np.dot(model, coeffs), 0)
# get total power by combining sine and cosine terms
# sinusoidal of amplitude A has power of A**2/2
hpi_pow = (coeffs_hpi[0::2, :] ** 2 + coeffs_hpi[1::2, :] ** 2) / 2
hpi_pow_grad[:, ind] = hpi_pow[:, pick_grad_].mean(1)
# divide average HPI power by average variance
snr_avg_grad[:, ind] = hpi_pow_grad[:, ind] / \
resid_vars[pick_grad_, ind].mean()
snr_avg_mag[:, ind] = hpi_pow[:, pick_mag_].mean(1) / \
resid_vars[pick_mag_, ind].mean()
cfreqs_legend = ['%s Hz' % fre for fre in cfreqs]
fig, axs = plt.subplots(4, 1, sharex=True)
# SNR plots for gradiometers and magnetometers
ax = axs[0]
lines1 = ax.plot(tvec, 10 * np.log10(snr_avg_grad.T))
lines1_med = ax.plot(tvec, 10 * np.log10(np.median(snr_avg_grad, axis=0)),
lw=2, ls=':', color='k')
ax.set_xlim([tvec.min(), tvec.max()])
ax.set(ylabel='SNR (dB)')
ax.yaxis.label.set_fontsize(label_fontsize)
ax.set_title('Mean cHPI power / mean residual variance, gradiometers',
fontsize=title_fontsize)
ax.tick_params(axis='both', which='major', labelsize=tick_fontsize)
ax = axs[1]
lines2 = ax.plot(tvec, 10 * np.log10(snr_avg_mag.T))
lines2_med = ax.plot(tvec, 10 * np.log10(np.median(snr_avg_mag, axis=0)),
lw=2, ls=':', color='k')
ax.set_xlim([tvec.min(), tvec.max()])
ax.set(ylabel='SNR (dB)')
ax.yaxis.label.set_fontsize(label_fontsize)
ax.set_title('Mean cHPI power / mean residual variance, magnetometers',
fontsize=title_fontsize)
ax.tick_params(axis='both', which='major', labelsize=tick_fontsize)
ax = axs[2]
lines3 = ax.plot(tvec, hpi_pow_grad.T)
lines3_med = ax.plot(tvec, np.median(hpi_pow_grad, axis=0),
lw=2, ls=':', color='k')
ax.set_xlim([tvec.min(), tvec.max()])
ax.set(ylabel='Power (T/m)$^2$')
ax.yaxis.label.set_fontsize(label_fontsize)
ax.set_title('Mean cHPI power, gradiometers',
fontsize=title_fontsize)
ax.tick_params(axis='both', which='major', labelsize=tick_fontsize)
# residual (unexplained) variance as function of time
ax = axs[3]
cls = plt.get_cmap('plasma')(np.linspace(0., 0.7, len(pick_meg)))
ax.set_prop_cycle(color=cls)
ax.semilogy(tvec, resid_vars[pick_grad_, :].T, alpha=.4)
ax.set_xlim([tvec.min(), tvec.max()])
ax.set(ylabel='Var. (T/m)$^2$', xlabel='Time (s)')
ax.xaxis.label.set_fontsize(label_fontsize)
ax.yaxis.label.set_fontsize(label_fontsize)
ax.set_title('Residual (unexplained) variance, all gradiometer channels',
fontsize=title_fontsize)
ax.tick_params(axis='both', which='major', labelsize=tick_fontsize)
tight_layout(pad=.5, w_pad=.1, h_pad=.2) # from mne.viz
# tight_layout will screw these up
ax = axs[0]
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# order curve legends according to mean of data
sind = np.argsort(snr_avg_grad.mean(axis=1))[::-1]
handles = [lines1[i] for i in sind]
handles.append(lines1_med[0])
labels = [cfreqs_legend[i] for i in sind]
labels.append('Median')
leg_kwargs = dict(
prop={'size': legend_fontsize}, bbox_to_anchor=(1.02, 0.5, ),
loc='center left', borderpad=1, handlelength=1,
)
ax.legend(handles, labels, **leg_kwargs)
ax = axs[1]
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
sind = np.argsort(snr_avg_mag.mean(axis=1))[::-1]
handles = [lines2[i] for i in sind]
handles.append(lines2_med[0])
labels = [cfreqs_legend[i] for i in sind]
labels.append('Median')
ax.legend(handles, labels, **leg_kwargs)
ax = axs[2]
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
sind = np.argsort(hpi_pow_grad.mean(axis=1))[::-1]
handles = [lines3[i] for i in sind]
handles.append(lines3_med[0])
labels = [cfreqs_legend[i] for i in sind]
labels.append('Median')
ax.legend(handles, labels, **leg_kwargs)
ax = axs[3]
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
if show:
plt.show()
return fig
@verbose
def plot_good_coils(raw, t_step=1., t_window=0.2, dist_limit=0.005,
show=True, verbose=None):
"""Plot the good coil count as a function of time."""
import matplotlib.pyplot as plt
if isinstance(raw, dict): # fit_data calculated and stored to disk
t = raw['fit_t']
counts = raw['counts']
n_coils = raw['n_coils']
else:
t, counts, n_coils = compute_good_coils(raw, t_step, t_window,
dist_limit)
del t_step, t_window, dist_limit
fig, ax = plt.subplots(figsize=(8, 2))
ax.step(t, counts, zorder=4, color='k', clip_on=False)
ax.set(xlim=t[[0, -1]], ylim=[0, n_coils], xlabel='Time (sec)',
ylabel='Good coils')
ax.set(yticks=np.arange(n_coils + 1))
for comp, n, color in ((np.greater_equal, 5, '#2ca02c'),
(np.equal, 4, '#98df8a'),
(np.equal, 3, (1, 1, 0)),
(np.less_equal, 2, (1, 0, 0))):
mask = comp(counts, n)
mask[:-1] |= comp(counts[1:], n)
ax.fill_between(t, 0, n_coils, where=mask,
color=color, edgecolor='none', linewidth=0, zorder=1)
ax.grid(True)
fig.tight_layout()
plt_show(show)
return fig
@contextmanager
def mlab_offscreen(offscreen=True):
"""Use mlab in offscreen mode."""
import mne
if mne.viz.get_3d_backend() == 'mayavi':
from mayavi import mlab
old_offscreen = mlab.options.offscreen
mlab.options.offscreen = offscreen
try:
yield
finally:
mlab.options.offscreen = old_offscreen
else:
# XXX eventually something should go here for PyVista
yield
def discretize_cmap(colormap, lims, transparent=True):
"""Discretize a colormap."""
lims = np.array(lims, int)
assert lims.shape == (2,)
from matplotlib import colors, pyplot as plt
n_pts = lims[1] - lims[0] + 1
assert n_pts > 0
if n_pts == 1:
vals = np.ones(256)
else:
vals = np.round(np.linspace(-0.5, n_pts - 0.5, 256)) / (n_pts - 1)
colormap = plt.get_cmap(colormap)(vals)
if transparent:
colormap[:, 3] = np.clip((vals + 0.5 / n_pts) * 2, 0, 1)
colormap[0, 3] = 0.
colormap = colors.ListedColormap(colormap)
use_lims = [lims[0] - 0.5, (lims[0] + lims[1]) / 2., lims[1] + 0.5]
return colormap, use_lims
def trim_bg(img, color=None):
"""Trim background rows/cols from an image-like object."""
if color is None:
color = img[0, 0]
img = img[:, (img != color).any(0).any(-1)]
img = img[(img != color).any(1).any(-1)]
return img
|
from snovault import upgrade_step
@upgrade_step('publication', '1', '2')
def publication_1_2(value, system):
if 'identifiers' in value:
for i in value['identifiers']:
path = i.split(':')
if path[0] == 'doi':
value['doi'] = path[1]
elif path[0] == 'PMID':
value['pmid'] = path[1]
del value['identifiers']
|
n = int(input("Welcher Wert fรผr n?"))
j = 0
i = 1
zeilenbreite = n + (n-1)
while j < (n*2-1):
if j <= n-1: #dach wird hier gemacht
zeile = i+(i-1)
zeilenseite = int((zeilenbreite - zeile)/2)
print (zeilenseite*" ",end='')
print (zeile*"0", end='')
print (zeilenseite*" ")
i = i + 1
elif j == n*2-2: #"Boden" wird hier gemacht
print(zeilenbreite*"0")
else: #Mitte wird hier gemacht
print ("0", end='')
print ((zeilenbreite-2)*" ", end='')
print ("0")
j = j+1
|
import os.path
import numpy as np
import numpy.testing as npt
import pytest
from base import _AdapterTester
from scmdata import ScmRun
from openscm_runner import run
from openscm_runner.adapters import FAIR
from openscm_runner.utils import calculate_quantiles
class TestFairAdapter(_AdapterTester):
@pytest.mark.parametrize("nworkers", (1, 4))
def test_run(
self,
test_scenarios,
monkeypatch,
nworkers,
test_data_dir,
update_expected_values,
):
expected_output_file = os.path.join(
test_data_dir,
"expected-integration-output",
"expected_fair1X_test_run_output.json",
)
monkeypatch.setenv("FAIR_WORKER_NUMBER", "{}".format(nworkers))
res = run(
climate_models_cfgs={
"FaIR": [
{},
{"q": np.array([0.3, 0.45]), "r0": 30.0, "lambda_global": 0.9},
{"q": np.array([0.35, 0.4]), "r0": 25.0, "lambda_global": 1.1},
],
},
scenarios=test_scenarios.filter(scenario=["ssp126", "ssp245", "ssp370"]),
output_variables=(
"Surface Air Temperature Change",
"Atmospheric Concentrations|CO2",
"Heat Content",
"Heat Uptake",
"Effective Radiative Forcing",
"Effective Radiative Forcing|Aerosols",
"Effective Radiative Forcing|CO2",
"CO2 Air to Land Flux", # should be ignored
),
out_config=None,
)
assert isinstance(res, ScmRun)
assert res["run_id"].min() == 0
assert res["run_id"].max() == 8
assert res.get_unique_meta(
"climate_model", no_duplicates=True
) == "FaIRv{}".format(FAIR.get_version())
assert set(res.get_unique_meta("variable")) == set(
[
"Surface Air Temperature Change",
"Atmospheric Concentrations|CO2",
"Heat Content",
"Heat Uptake",
"Effective Radiative Forcing",
"Effective Radiative Forcing|Aerosols",
"Effective Radiative Forcing|CO2",
]
)
# check we can also calcluate quantiles
assert "run_id" in res.meta
quantiles = calculate_quantiles(res, [0, 0.05, 0.17, 0.5, 0.83, 0.95, 1])
assert "run_id" not in quantiles.meta
# TODO CHECK: heat content is not zero in the first year in FaIR?
self._check_heat_content_heat_uptake_consistency(res)
self._check_output(res, expected_output_file, update_expected_values)
def test_variable_naming(self, test_scenarios):
missing_from_fair = (
"Effective Radiative Forcing|Aerosols|Direct Effect|BC|MAGICC AFOLU",
"Effective Radiative Forcing|Aerosols|Direct Effect|BC|MAGICC Fossil and Industrial",
"Effective Radiative Forcing|Aerosols|Direct Effect|OC|MAGICC AFOLU",
"Effective Radiative Forcing|Aerosols|Direct Effect|OC|MAGICC Fossil and Industrial",
"Effective Radiative Forcing|Aerosols|Direct Effect|SOx|MAGICC AFOLU",
"Effective Radiative Forcing|Aerosols|Direct Effect|SOx|MAGICC Fossil and Industrial",
"Net Atmosphere to Ocean Flux|CO2",
"Net Atmosphere to Land Flux|CO2",
)
common_variables = [
c for c in self._common_variables if c not in missing_from_fair
]
res = run(
climate_models_cfgs={"FaIR": ({"startyear": 1750},)},
scenarios=test_scenarios.filter(scenario="ssp126"),
output_variables=common_variables,
)
missing_vars = set(common_variables) - set(res["variable"])
if missing_vars:
raise AssertionError(missing_vars)
def test_fair_ocean_factors(test_scenarios):
res_default_factors = run(
climate_models_cfgs={"FaIR": [{}]},
scenarios=test_scenarios.filter(scenario=["ssp585"]),
output_variables=(
"Surface Air Ocean Blended Temperature Change",
"Heat Uptake|Ocean",
"Heat Content|Ocean",
),
)
res_custom_factors = run(
climate_models_cfgs={
"FaIR": [
{
"gmst_factor": np.linspace(0.90, 1.00, 351), # test with array
"ohu_factor": 0.93,
}
]
},
scenarios=test_scenarios.filter(scenario=["ssp585"]),
output_variables=(
"Surface Air Ocean Blended Temperature Change",
"Heat Uptake|Ocean",
"Heat Content|Ocean",
),
)
assert (
res_default_factors.filter(
variable="Surface Air Ocean Blended Temperature Change",
region="World",
year=2100,
scenario="ssp585",
).values
!= res_custom_factors.filter(
variable="Surface Air Ocean Blended Temperature Change",
region="World",
year=2100,
scenario="ssp585",
).values
)
def test_startyear(test_scenarios, test_scenarios_2600):
# we can't run different start years in the same ensemble as output files will differ in shape.
# There is a separate test to ensure this does raise an error.
res_1850 = run(
climate_models_cfgs={"FaIR": [{"startyear": 1850}]},
scenarios=test_scenarios.filter(scenario=["ssp245"]),
output_variables=("Surface Air Temperature Change",),
out_config=None,
)
res_1750 = run(
climate_models_cfgs={"FaIR": [{"startyear": 1750}]},
scenarios=test_scenarios.filter(scenario=["ssp245"]),
output_variables=("Surface Air Temperature Change",),
out_config=None,
)
res_default = run(
climate_models_cfgs={"FaIR": [{}]},
scenarios=test_scenarios.filter(scenario=["ssp245"]),
output_variables=("Surface Air Temperature Change",),
out_config=None,
)
gsat2100_start1850 = res_1850.filter(
variable="Surface Air Temperature Change", region="World", year=2100,
).values
gsat2100_start1750 = res_1750.filter(
variable="Surface Air Temperature Change", region="World", year=2100,
).values
gsat2100_startdefault = res_default.filter(
variable="Surface Air Temperature Change", region="World", year=2100,
).values
assert gsat2100_start1850 != gsat2100_start1750
assert gsat2100_start1750 == gsat2100_startdefault
with pytest.raises(ValueError):
run(
climate_models_cfgs={"FaIR": [{"startyear": 1650}]},
scenarios=test_scenarios.filter(scenario=["ssp245"]),
output_variables=("Surface Air Temperature Change",),
out_config=None,
)
with pytest.raises(ValueError):
run(
climate_models_cfgs={"FaIR": [{}]},
scenarios=test_scenarios_2600.filter(scenario=["ssp245"]),
output_variables=("Surface Air Temperature Change",),
out_config=None,
)
with pytest.raises(ValueError):
run(
climate_models_cfgs={"FaIR": [{"startyear": 1750}, {"startyear": 1850}]},
scenarios=test_scenarios.filter(scenario=["ssp245"]),
output_variables=("Surface Air Temperature Change",),
out_config=None,
)
def test_forcing_categories(test_scenarios):
forcing_categories = [
"Effective Radiative Forcing|CO2",
"Effective Radiative Forcing|CH4",
"Effective Radiative Forcing|N2O",
"Effective Radiative Forcing|CF4",
"Effective Radiative Forcing|C2F6",
"Effective Radiative Forcing|C6F14",
"Effective Radiative Forcing|HFC23",
"Effective Radiative Forcing|HFC32",
"Effective Radiative Forcing|HFC125",
"Effective Radiative Forcing|HFC134a",
"Effective Radiative Forcing|HFC143a",
"Effective Radiative Forcing|HFC227ea",
"Effective Radiative Forcing|HFC245fa",
"Effective Radiative Forcing|HFC4310mee",
"Effective Radiative Forcing|SF6",
"Effective Radiative Forcing|CFC11",
"Effective Radiative Forcing|CFC12",
"Effective Radiative Forcing|CFC113",
"Effective Radiative Forcing|CFC114",
"Effective Radiative Forcing|CFC115",
"Effective Radiative Forcing|CCl4",
"Effective Radiative Forcing|CH3CCl3",
"Effective Radiative Forcing|HCFC22",
"Effective Radiative Forcing|HCFC141b",
"Effective Radiative Forcing|HCFC142b",
"Effective Radiative Forcing|Halon1211",
"Effective Radiative Forcing|Halon1202",
"Effective Radiative Forcing|Halon1301",
"Effective Radiative Forcing|Halon2402",
"Effective Radiative Forcing|CH3Br",
"Effective Radiative Forcing|CH3Cl",
"Effective Radiative Forcing|Tropospheric Ozone",
"Effective Radiative Forcing|Stratospheric Ozone",
"Effective Radiative Forcing|CH4 Oxidation Stratospheric H2O",
"Effective Radiative Forcing|Contrails",
"Effective Radiative Forcing|Aerosols|Direct Effect|SOx",
"Effective Radiative Forcing|Aerosols|Direct Effect|Secondary Organic Aerosol",
"Effective Radiative Forcing|Aerosols|Direct Effect|Nitrate",
"Effective Radiative Forcing|Aerosols|Direct Effect|BC",
"Effective Radiative Forcing|Aerosols|Direct Effect|OC",
"Effective Radiative Forcing|Aerosols|Indirect Effect",
"Effective Radiative Forcing|Black Carbon on Snow",
"Effective Radiative Forcing|Land-use Change",
"Effective Radiative Forcing|Volcanic",
"Effective Radiative Forcing|Solar",
"Effective Radiative Forcing",
"Effective Radiative Forcing|Anthropogenic",
"Effective Radiative Forcing|Greenhouse Gases",
"Effective Radiative Forcing|Kyoto Gases",
"Effective Radiative Forcing|CO2, CH4 and N2O",
"Effective Radiative Forcing|F-Gases",
"Effective Radiative Forcing|Montreal Protocol Halogen Gases",
"Effective Radiative Forcing|Aerosols|Direct Effect",
"Effective Radiative Forcing|Aerosols",
"Effective Radiative Forcing|Ozone",
"Effective Radiative Forcing",
]
res = run(
climate_models_cfgs={"FaIR": [{}]},
scenarios=test_scenarios.filter(scenario=["ssp245"]),
output_variables=tuple(forcing_categories),
out_config=None,
)
# storing results in a dict makes this a bit more compact
forcing = {}
for variable in forcing_categories:
forcing[variable] = res.filter(variable=variable, region="World").values
npt.assert_allclose(
forcing["Effective Radiative Forcing|CO2"]
+ forcing["Effective Radiative Forcing|CH4"]
+ forcing["Effective Radiative Forcing|N2O"]
+ forcing["Effective Radiative Forcing|CF4"]
+ forcing["Effective Radiative Forcing|C2F6"]
+ forcing["Effective Radiative Forcing|C6F14"]
+ forcing["Effective Radiative Forcing|HFC23"]
+ forcing["Effective Radiative Forcing|HFC32"]
+ forcing["Effective Radiative Forcing|HFC125"]
+ forcing["Effective Radiative Forcing|HFC134a"]
+ forcing["Effective Radiative Forcing|HFC143a"]
+ forcing["Effective Radiative Forcing|HFC227ea"]
+ forcing["Effective Radiative Forcing|HFC245fa"]
+ forcing["Effective Radiative Forcing|HFC4310mee"]
+ forcing["Effective Radiative Forcing|SF6"]
+ forcing["Effective Radiative Forcing|CFC11"]
+ forcing["Effective Radiative Forcing|CFC12"]
+ forcing["Effective Radiative Forcing|CFC113"]
+ forcing["Effective Radiative Forcing|CFC114"]
+ forcing["Effective Radiative Forcing|CFC115"]
+ forcing["Effective Radiative Forcing|CCl4"]
+ forcing["Effective Radiative Forcing|CH3CCl3"]
+ forcing["Effective Radiative Forcing|HCFC22"]
+ forcing["Effective Radiative Forcing|HCFC141b"]
+ forcing["Effective Radiative Forcing|HCFC142b"]
+ forcing["Effective Radiative Forcing|Halon1211"]
+ forcing["Effective Radiative Forcing|Halon1202"]
+ forcing["Effective Radiative Forcing|Halon1301"]
+ forcing["Effective Radiative Forcing|Halon2402"]
+ forcing["Effective Radiative Forcing|CH3Br"]
+ forcing["Effective Radiative Forcing|CH3Cl"],
forcing[
"Effective Radiative Forcing|Greenhouse Gases"
], # should this be "well mixed" greenhouse gases?
)
npt.assert_allclose(
forcing["Effective Radiative Forcing|CO2"]
+ forcing["Effective Radiative Forcing|CH4"]
+ forcing["Effective Radiative Forcing|N2O"]
+ forcing["Effective Radiative Forcing|CF4"]
+ forcing["Effective Radiative Forcing|C2F6"]
+ forcing["Effective Radiative Forcing|C6F14"]
+ forcing["Effective Radiative Forcing|HFC23"]
+ forcing["Effective Radiative Forcing|HFC32"]
+ forcing["Effective Radiative Forcing|HFC125"]
+ forcing["Effective Radiative Forcing|HFC134a"]
+ forcing["Effective Radiative Forcing|HFC143a"]
+ forcing["Effective Radiative Forcing|HFC227ea"]
+ forcing["Effective Radiative Forcing|HFC245fa"]
+ forcing["Effective Radiative Forcing|HFC4310mee"]
+ forcing["Effective Radiative Forcing|SF6"],
forcing["Effective Radiative Forcing|Kyoto Gases"],
)
npt.assert_allclose(
forcing["Effective Radiative Forcing|CO2"]
+ forcing["Effective Radiative Forcing|CH4"]
+ forcing["Effective Radiative Forcing|N2O"],
forcing["Effective Radiative Forcing|CO2, CH4 and N2O"],
)
npt.assert_allclose(
forcing["Effective Radiative Forcing|CF4"]
+ forcing["Effective Radiative Forcing|C2F6"]
+ forcing["Effective Radiative Forcing|C6F14"]
+ forcing["Effective Radiative Forcing|HFC23"]
+ forcing["Effective Radiative Forcing|HFC32"]
+ forcing["Effective Radiative Forcing|HFC125"]
+ forcing["Effective Radiative Forcing|HFC134a"]
+ forcing["Effective Radiative Forcing|HFC143a"]
+ forcing["Effective Radiative Forcing|HFC227ea"]
+ forcing["Effective Radiative Forcing|HFC245fa"]
+ forcing["Effective Radiative Forcing|HFC4310mee"]
+ forcing["Effective Radiative Forcing|SF6"],
forcing["Effective Radiative Forcing|F-Gases"],
)
npt.assert_allclose(
forcing["Effective Radiative Forcing|CFC11"]
+ forcing["Effective Radiative Forcing|CFC12"]
+ forcing["Effective Radiative Forcing|CFC113"]
+ forcing["Effective Radiative Forcing|CFC114"]
+ forcing["Effective Radiative Forcing|CFC115"]
+ forcing["Effective Radiative Forcing|CCl4"]
+ forcing["Effective Radiative Forcing|CH3CCl3"]
+ forcing["Effective Radiative Forcing|HCFC22"]
+ forcing["Effective Radiative Forcing|HCFC141b"]
+ forcing["Effective Radiative Forcing|HCFC142b"]
+ forcing["Effective Radiative Forcing|Halon1211"]
+ forcing["Effective Radiative Forcing|Halon1202"]
+ forcing["Effective Radiative Forcing|Halon1301"]
+ forcing["Effective Radiative Forcing|Halon2402"]
+ forcing["Effective Radiative Forcing|CH3Br"]
+ forcing["Effective Radiative Forcing|CH3Cl"],
forcing["Effective Radiative Forcing|Montreal Protocol Halogen Gases"],
)
npt.assert_allclose(
forcing["Effective Radiative Forcing|Tropospheric Ozone"]
+ forcing["Effective Radiative Forcing|Stratospheric Ozone"],
forcing["Effective Radiative Forcing|Ozone"],
)
npt.assert_allclose(
forcing["Effective Radiative Forcing|Aerosols|Direct Effect|SOx"]
+ forcing[
"Effective Radiative Forcing|Aerosols|Direct Effect|Secondary Organic Aerosol"
]
+ forcing["Effective Radiative Forcing|Aerosols|Direct Effect|Nitrate"]
+ forcing["Effective Radiative Forcing|Aerosols|Direct Effect|BC"]
+ forcing["Effective Radiative Forcing|Aerosols|Direct Effect|OC"],
forcing["Effective Radiative Forcing|Aerosols|Direct Effect"],
)
# If up to here is fine, then we only need to check previouly defined aggregates against "super-aggregates"
npt.assert_allclose(
forcing["Effective Radiative Forcing|Aerosols|Direct Effect"]
+ forcing["Effective Radiative Forcing|Aerosols|Indirect Effect"],
forcing["Effective Radiative Forcing|Aerosols"],
)
npt.assert_allclose(
forcing["Effective Radiative Forcing|Greenhouse Gases"]
+ forcing["Effective Radiative Forcing|Ozone"]
+ forcing["Effective Radiative Forcing|CH4 Oxidation Stratospheric H2O"]
+ forcing["Effective Radiative Forcing|Contrails"]
+ forcing["Effective Radiative Forcing|Aerosols"]
+ forcing["Effective Radiative Forcing|Black Carbon on Snow"]
+ forcing["Effective Radiative Forcing|Land-use Change"],
forcing["Effective Radiative Forcing|Anthropogenic"],
)
npt.assert_allclose(
forcing["Effective Radiative Forcing|Anthropogenic"]
+ forcing["Effective Radiative Forcing|Volcanic"]
+ forcing["Effective Radiative Forcing|Solar"],
forcing["Effective Radiative Forcing"],
)
|
import inspect
import wrapt
from typing import NamedTuple, List
from functools import reduce
class FunctionReference(NamedTuple):
name: str
line: int
source: str
short_purpose: List[str] = []
references: List[str] = []
class Biblio(dict):
track_references: bool = False
def __str__(self):
def add_record(out, record):
index = 1
out += f"Referenced in: {record.name}"
out += f"\nSource file: {record.source}"
out += f"\nLine: {record.line}\n"
for short, ref in zip(record.short_purpose, record.references):
out += f"\t[{index}] {short} - {ref}\n"
index += 1
out += "\n"
return out
return reduce(add_record, self.values(), "")
@property
def references(self):
"""Return a list of unique references."""
output = []
for record in self.values():
output = output + [ref for ref in record.references if ref not in output]
return output
def tracking(self, enabled=True):
"""Enable the tracking of references."""
self.track_references = enabled
BIBLIOGRAPHY: Biblio = Biblio()
def add_reference(*, short_purpose: str, reference: str):
"""Decorator to link a reference to a function or method.
Acts as a marker in code where particular alogrithms/data/... originates.
General execution of code silently passes these markers, but remembers how and where
they were called. Which markers were passed in a particular program run
can be recalled with print_references().
Arguments:
short_purpose: Identify the thing being referenced (string)
reference: The reference itself, in any sensible format.
"""
@wrapt.decorator(enabled=lambda: BIBLIOGRAPHY.track_references)
def wrapper(wrapped, instance, args, kwargs):
source = inspect.getsourcefile(wrapped)
line = inspect.getsourcelines(wrapped)[1]
identifier = f"{source}:{line}"
if (
identifier in BIBLIOGRAPHY
and reference in BIBLIOGRAPHY[identifier].references
):
return wrapped(*args, **kwargs)
if identifier not in BIBLIOGRAPHY:
BIBLIOGRAPHY[identifier] = FunctionReference(wrapped.__name__, line, source)
BIBLIOGRAPHY[identifier].short_purpose.append(short_purpose)
BIBLIOGRAPHY[identifier].references.append(reference)
return wrapped(*args, **kwargs)
return wrapper
|
import json
from datetime import datetime
class CRMSystemError(Exception):
def __init__(self, errorId, errorMessage, *args, **kwargs):
super().__init__(errorMessage, *args, **kwargs)
self.errorId = errorId
self.errorMessage = errorMessage
def __str__(self):
message = "{} - {} - {}".format(datetime.now(),
self.errorId, self.errorMessage)
return message
def toJson(self):
return json.dumps(self.__dict__)
def __repr__(self):
return self.toJson()
|
# -*- coding: utf-8 -*-
import cv2, os
import numpy as np
from scipy.io import loadmat
import matplotlib.pyplot as plt
class stereoCameral(object):
def __init__(self):
stereoParameters = loadmat("./internal_reference/stereoParameters.mat")
self.cam_matrix_left = stereoParameters["stereoParameters"]["K1"][0][0] # IntrinsicMatrix
self.distortion_l = stereoParameters["stereoParameters"]["D1"][0][0] # distortion
self.cam_matrix_right = stereoParameters["stereoParameters"]["K2"][0][0]
self.distortion_r = stereoParameters["stereoParameters"]["D2"][0][0]
self.size = stereoParameters["stereoParameters"]["size"][0][0] # image size
self.R = stereoParameters["stereoParameters"]["rot"][0][0]
for i in range(3):
for j in range(3):
if self.R[i,j]<0.9:
self.R[i,j]=-self.R[i,j]
self.T = stereoParameters["stereoParameters"]["trans"][0][0].T
#self.T[0] = -self.T[0]
# ้ขๅค็
def preprocess(img1, img2):
# ๅฝฉ่ฒๅพ->็ฐๅบฆๅพ
if(img1.ndim == 3):
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) # ้่ฟOpenCVๅ ่ฝฝ็ๅพๅ้้้กบๅบๆฏBGR
if(img2.ndim == 3):
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# ็ดๆนๅพๅ่กก
img1 = cv2.equalizeHist(img1)
img2 = cv2.equalizeHist(img2)
return img1, img2
# ๆถ้ค็ธๅ
def undistortion(image, camera_matrix, dist_coeff):
undistortion_image = cv2.undistort(image, camera_matrix, dist_coeff)
return undistortion_image
# ่ทๅ็ธๅๆ กๆญฃๅ็ซไฝๆ กๆญฃ็ๆ ๅฐๅๆข็ฉ้ตใ้ๆๅฝฑ็ฉ้ต
# @param๏ผconfigๆฏไธไธช็ฑป๏ผๅญๅจ็ๅ็ฎๆ ๅฎ็ๅๆฐ:config = stereoconfig.stereoCamera()
def getRectifyTransform(height, width, config):
"""
ๅ
ฑ้ข่ก่พๅ
"""
# ่ฏปๅๅ
ๅๅๅคๅ
left_K = config.cam_matrix_left
right_K = config.cam_matrix_right
left_distortion = config.distortion_l
right_distortion = config.distortion_r
R = config.R
T = config.T
# ่ฎก็ฎๆ กๆญฃๅๆข
R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(
left_K, left_distortion, right_K, right_distortion,
(width, height), R, T, alpha=0)
map1x, map1y = cv2.initUndistortRectifyMap(
left_K, left_distortion, R1, P1, (width, height), cv2.CV_32FC1)
map2x, map2y = cv2.initUndistortRectifyMap(
right_K, right_distortion, R2, P2, (width, height), cv2.CV_32FC1)
return map1x, map1y, map2x, map2y, Q
# ็ธๅๆ กๆญฃๅ็ซไฝๆ กๆญฃ
def rectifyImage(image1, image2, map1x, map1y, map2x, map2y):
rectifyed_img1 = cv2.remap(image1, map1x, map1y, cv2.INTER_AREA)
rectifyed_img2 = cv2.remap(image2, map2x, map2y, cv2.INTER_AREA)
return rectifyed_img1, rectifyed_img2
# ็ซไฝๆ กๆญฃๆฃ้ช----็ป็บฟ
def draw_line(image1, image2):
# ๅปบ็ซ่พๅบๅพๅ
height = max(image1.shape[0], image2.shape[0])
width = image1.shape[1] + image2.shape[1]
output = np.zeros((height, width, 3), dtype=np.uint8)
output[0:image1.shape[0], 0:image1.shape[1]] = image1
output[0:image2.shape[0], image1.shape[1]:] = image2
# ็ปๅถ็ญ้ด่ทๅนณ่ก็บฟ
line_interval = 50 # ็ด็บฟ้ด้๏ผ50
for k in range(height // line_interval):
cv2.line(output, (0, line_interval * (k + 1)), (2 * width, line_interval * (k + 1)), (0, 255, 0), thickness=2, lineType=cv2.LINE_AA)
return output
# ่งๅทฎ่ฎก็ฎ
def stereoMatchSGBM(left_image, right_image, down_scale=False):
# SGBMๅน้
ๅๆฐ่ฎพ็ฝฎ
if left_image.ndim == 2:
img_channels = 1
else:
img_channels = 3
blockSize = 3
paraml = {'minDisparity': 0,
'numDisparities': 128,
'blockSize': blockSize,
'P1': 8 * img_channels * blockSize ** 2,
'P2': 32 * img_channels * blockSize ** 2,
'disp12MaxDiff': 1,
'preFilterCap': 63,
'uniquenessRatio': 15,
'speckleWindowSize': 100,
'speckleRange': 1,
'mode': cv2.STEREO_SGBM_MODE_SGBM_3WAY
}
# ๆๅปบSGBMๅฏน่ฑก
left_matcher = cv2.StereoSGBM_create(**paraml)
paramr = paraml
paramr['minDisparity'] = -paraml['numDisparities']
right_matcher = cv2.StereoSGBM_create(**paramr)
# ่ฎก็ฎ่งๅทฎๅพ
size = (left_image.shape[1], left_image.shape[0])
if down_scale == False:
disparity_left = left_matcher.compute(left_image, right_image)
disparity_right = right_matcher.compute(right_image, left_image)
else:
left_image_down = cv2.pyrDown(left_image)
right_image_down = cv2.pyrDown(right_image)
factor = left_image.shape[1] / left_image_down.shape[1]
disparity_left_half = left_matcher.compute(left_image_down,
right_image_down)
disparity_right_half = right_matcher.compute(right_image_down,
left_image_down)
disparity_left = cv2.resize(disparity_left_half,
size, interpolation=cv2.INTER_AREA)
disparity_right = cv2.resize(disparity_right_half,
size, interpolation=cv2.INTER_AREA)
disparity_left = factor * disparity_left
disparity_right = factor * disparity_right
# ็ๅฎ่งๅทฎ๏ผๅ ไธบSGBM็ฎๆณๅพๅฐ็่งๅทฎๆฏร16็๏ผ
trueDisp_left = disparity_left.astype(np.float32) / 16.
trueDisp_right = disparity_right.astype(np.float32) / 16.
return trueDisp_left, trueDisp_right
# ๅฐhรwร3ๆฐ็ป่ฝฌๆขไธบNร3็ๆฐ็ป
def hw3ToN3(points):
height, width = points.shape[0:2]
points_1 = points[:, :, 0].reshape(height * width, 1)
points_2 = points[:, :, 1].reshape(height * width, 1)
points_3 = points[:, :, 2].reshape(height * width, 1)
points_ = np.hstack((points_1, points_2, points_3))
return points_
# ๆทฑๅบฆใ้ข่ฒ่ฝฌๆขไธบ็นไบ
def DepthColor2Cloud(points_3d, colors):
rows, cols = points_3d.shape[0:2]
size = rows * cols
points_ = hw3ToN3(points_3d)
colors_ = hw3ToN3(colors).astype(np.int64)
# ้ข่ฒไฟกๆฏ
blue = colors_[:, 0].reshape(size, 1)
green = colors_[:, 1].reshape(size, 1)
red = colors_[:, 2].reshape(size, 1)
rgb = np.left_shift(blue,0)+np.left_shift(green,8)+np.left_shift(red,16)
# ๅฐๅๆ +้ข่ฒๅ ๅ ไธบ็นไบๆฐ็ป
pointcloud = np.hstack((points_, rgb)).astype(np.float32)
# ๅ ๆไธไบไธๅ้็็น
X = pointcloud[:, 0]
Y = pointcloud[:, 1]
Z = pointcloud[:, 2]
remove_idx1 = np.where(Z <= 0)
remove_idx2 = np.where(Z > 15000)
remove_idx3 = np.where(X > 10000)
remove_idx4 = np.where(X < -10000)
remove_idx5 = np.where(Y > 10000)
remove_idx6 = np.where(Y < -10000)
remove_idx = np.hstack((
remove_idx1[0], remove_idx2[0],
remove_idx3[0], remove_idx4[0], remove_idx5[0], remove_idx6[0]))
pointcloud_1 = np.delete(pointcloud, remove_idx, 0)
return pointcloud_1
if __name__ == '__main__':
# ่ฏปๅMiddleBurryๆฐๆฎ้็ๅพ็
cali_folder_left = 'D:/cxn_project/Strain-gauges-recognition/cali_img/left/'
cali_folder_right = 'D:/cxn_project/Strain-gauges-recognition/cali_img/right/'
iml = cv2.imread(
'D:/cxn_project/Strain-gauges-recognition/cali_img/left/l17.bmp') # ๅทฆๅพ
imr = cv2.imread(
'D:/cxn_project/Strain-gauges-recognition/cali_img/right/r17.bmp') # ๅณๅพ
# iml = cv2.imread(
# 'D:/Program Files/Polyspace/R2019a/toolbox/vision/visiondata/calibration/stereo/left/left06.png') # ๅทฆๅพ
# imr = cv2.imread(
# 'D:/Program Files/Polyspace/R2019a/toolbox/vision/visiondata/calibration/stereo/right/right06.png')
height, width = iml.shape[0:2]
# ่ฏปๅ็ธๆบๅ
ๅๅๅคๅ
config = stereoCameral()
iml = undistortion(iml ,config.cam_matrix_left , config.distortion_l )
imr = undistortion(imr ,config.cam_matrix_right, config.distortion_r )
# ็ซไฝๆ กๆญฃ
map1x, map1y, map2x, map2y, Q = getRectifyTransform(height, width, config)
# ่ทๅ็จไบ็ธๅๆ กๆญฃๅ็ซไฝๆ กๆญฃ็ๆ ๅฐ็ฉ้ตไปฅๅ็จไบ่ฎก็ฎๅ็ด ็ฉบ้ดๅๆ ็้ๆๅฝฑ็ฉ้ต
iml_rectified, imr_rectified = rectifyImage(
iml, imr, map1x, map1y, map2x, map2y)
print(Q)
# ็ปๅถ็ญ้ด่ทๅนณ่ก็บฟ๏ผๆฃๆฅ็ซไฝๆ กๆญฃ็ๆๆ
line = draw_line(iml_rectified, imr_rectified)
plt.imshow(line, cmap ='gray')
# ็ซไฝๅน้
# iml_, imr_ = preprocess(iml, imr) # ้ขๅค็๏ผไธ่ฌๅฏไปฅๅๅผฑๅ
็
งไธๅ็ๅฝฑๅ๏ผไธๅไนๅฏไปฅ
displ, dispr = stereoMatchSGBM(iml_rectified, imr_rectified, True)
plt.figure()
plt.imshow(displ, cmap ='gray')
# ่ฎก็ฎๅ็ด ็น็3Dๅๆ (ๅทฆ็ธๆบๅๆ ็ณปไธ)็ฌฌไธ็ปดๅบ่ฏฅๆฏx,y,z
points_3d = cv2.reprojectImageTo3D(displ, Q)
# ๆๅปบ็นไบ--Point_XYZRGBAๆ ผๅผ
pointcloud = DepthColor2Cloud(points_3d, iml)
|
import CatalogItem
from toontown.toonbase import ToontownGlobals
from toontown.fishing import FishGlobals
from direct.actor import Actor
from toontown.toonbase import TTLocalizer
from direct.interval.IntervalGlobal import *
class CatalogTankItem(CatalogItem.CatalogItem):
sequenceNumber = 0
def makeNewItem(self, maxTank):
self.maxTank = maxTank
CatalogItem.CatalogItem.makeNewItem(self)
def getPurchaseLimit(self):
return 1
def reachedPurchaseLimit(self, avatar):
return avatar.getMaxFishTank() >= self.maxTank or self in avatar.onOrder or self in avatar.mailboxContents
def saveHistory(self):
return 1
def getTypeName(self):
return TTLocalizer.TankTypeName
def getName(self):
return TTLocalizer.FishTank % TTLocalizer.FishTankNameDict[self.maxTank]
def recordPurchase(self, avatar, optional):
if self.maxTank < 0 or self.maxTank > FishGlobals.MaxTank:
return ToontownGlobals.P_InvalidIndex
if self.maxTank <= avatar.getMaxFishTank():
return ToontownGlobals.P_ItemUnneeded
avatar.b_setMaxFishTank(self.maxTank)
return ToontownGlobals.P_ItemOnOrder
def isGift(self):
return 0
def getDeliveryTime(self):
return 1
def getPicture(self, avatar):
gui = loader.loadModel('phase_4/models/gui/fishingGui')
bucket = gui.find('**/bucket')
bucket.setScale(2.7)
bucket.setPos(-3.15, 0, 3.2)
frame = self.makeFrame()
bucket.reparentTo(frame)
gui.removeNode()
return (frame, None)
def getAcceptItemErrorText(self, retcode):
if retcode == ToontownGlobals.P_ItemAvailable:
return TTLocalizer.CatalogAcceptTank
elif retcode == ToontownGlobals.P_ItemUnneeded:
return TTLocalizer.CatalogAcceptTankUnneeded
return CatalogItem.CatalogItem.getAcceptItemErrorText(self, retcode)
def output(self, store = -1):
return 'CatalogTankItem(%s%s)' % (self.maxTank, self.formatOptionalData(store))
def compareTo(self, other):
return self.maxTank - other.maxTank
def getHashContents(self):
return self.maxTank
def getBasePrice(self):
return FishGlobals.TankPriceDict[self.maxTank]
def decodeDatagram(self, di, versionNumber, store):
CatalogItem.CatalogItem.decodeDatagram(self, di, versionNumber, store)
self.maxTank = di.getUint8()
def encodeDatagram(self, dg, store):
CatalogItem.CatalogItem.encodeDatagram(self, dg, store)
dg.addUint8(self.maxTank)
def nextAvailableTank(avatar, duplicateItems):
tank = avatar.getMaxFishTank()
if not tank in FishGlobals.NextTank:
return None
return CatalogTankItem(FishGlobals.NextTank[tank])
def getAllTanks():
list = []
for old, new in FishGlobals.NextTank.iteritems():
list.append(CatalogTankItem(new))
return list
|
def dfs(grafo, origen, v, padre, distancia, peso_min_actual):
for w in grafo.adyacentes(v):
if w == origen and padre[v] != origen:
peso_camino = distancia[v] + grafo.peso_arista(v, origen)
if len(distancia) == len(grafo) and peso_camino < peso_min_actual[0]:
peso_min_actual[0] = peso_camino
return
if w in distancia:
continue
distancia[w] = distancia[v] + grafo.peso_arista(v, w)
padre[w] = v
dfs(grafo, origen, w, padre, distancia, peso_min_actual)
distancia.pop(w)
padre.pop(w)
def viajante(grafo):
peso_min_actual = [float('inf')]
for v in grafo:
dfs(grafo, v, v, {v: None}, {v: 0}, peso_min_actual)
return peso_min_actual[0]
|
from crawler.exportExcel import export_product_excel
from crawler.stringgetter import get_page_string
from bs4 import BeautifulSoup
import time
depth = [('445726', '445626', '๊ฐ์์ง ์ฌ๋ฃ', '๊ฑด์์ฌ๋ฃ', '๊ฑด์์ฌ๋ฃ', '๊ฑด์์ฌ๋ฃ'),
('445727', '445627', '๊ฐ์์ง ์ฌ๋ฃ', '์ํํธ์ฌ๋ฃ', '์ํํธ์ฌ๋ฃ', '์ํํธ์ฌ๋ฃ'),
('445728', '445628', '๊ฐ์์ง ์ฌ๋ฃ', '์ต์์ฌ๋ฃ', '์ต์์ฌ๋ฃ', '์ต์์ฌ๋ฃ'),
('445731', '445631', '๊ฐ์์ง ์ฌ๋ฃ', '๋ถ์ ', '๋ถ์ ', '๋ถ์ '),
('119018', '121434', '๊ฐ์์ง ๊ฐ์', '์บ/ํ์ฐ์น', '์บ', '์บ'),
('119019', '121435', '๊ฐ์์ง ๊ฐ์', '์บ/ํ์ฐ์น', 'ํ์ฐ์น', 'ํ์ฐ์น'),
('385001', '384901', '๊ฐ์์ง ๊ฐ์', '๋ดํ๊ป', '๋ดํ๊ป', '๋ดํ๊ป'),
('385002', '384902', '๊ฐ์์ง ๊ฐ์', '๊ฑด์กฐ๊ฐ์/์กํฌ', '๊ฑด์กฐ๊ฐ์/์กํฌ', '๊ฑด์กฐ๊ฐ์/์กํฌ'),
('385003', '384903', '๊ฐ์์ง ๊ฐ์', '๋๊ฒฐ๊ฑด์กฐ๊ฐ์', '๋๊ฒฐ๊ฑด์กฐ๊ฐ์', '๋๊ฒฐ๊ฑด์กฐ๊ฐ์'),
('118974', '121427', '๊ฐ์์ง ๊ฐ์', '๋น์คํท/์๋ฆฌ์ผ/์ฟ ํค', '๋น์คํท/์๋ฆฌ์ผ/์ฟ ํค', '๋น์คํท/์๋ฆฌ์ผ/์ฟ ํค'),
('225063', '224963', '๊ฐ์์ง ๊ฐ์', '์บ๋/์ ค๋ฆฌ', '์บ๋/์ ค๋ฆฌ', '์บ๋/์ ค๋ฆฌ'),
('385004', '384904', '๊ฐ์์ง ๊ฐ์', '์ ธํค/ํธ๋ฆฟ', '์ ธํค/ํธ๋ฆฟ', '์ ธํค/ํธ๋ฆฟ'),
('118980', '121447', '๊ฐ์์ง ๊ฐ์', '์๋ฃ', '์๋ฃ', '์๋ฃ'),
('486320', '486220', '๊ฐ์์ง ์์์ ', '์์์ ', '์ข
ํฉ์์์ ', '์ข
ํฉ์์์ '),
('486321', '486221', '๊ฐ์์ง ์์์ ', '์์์ ', '์นผ์/๊ด์ ์์์ ', '์นผ์/๊ด์ ์์์ '),
('486330', '486230', '๊ฐ์์ง ์์์ ', '๊ฑด๊ฐ๋ณด์กฐ์ ', '๊ตฌ์ถฉ์ ', '๊ตฌ์ถฉ์ '),
('119254', '121569', '๊ฐ์์ง ์ฉํ', 'ํ์ฐ์ค/์ธํ๋ฆฌ', 'ํ์ฐ์ค', '์ฟ ์
ํ์ฐ์ค'),
('119255', '121570', '๊ฐ์์ง ์ฉํ', 'ํ์ฐ์ค/์ธํ๋ฆฌ', 'ํ์ฐ์ค', 'ํ๋ํ์ฐ์ค'),
('119248', '121572', '๊ฐ์์ง ์ฉํ', 'ํ์ฐ์ค/์ธํ๋ฆฌ', '์ฟ ์
/๋ฐฉ์', '์ฟ ์
/๋ฐฉ์'),
('119251', '121575', '๊ฐ์์ง ์ฉํ', 'ํ์ฐ์ค/์ธํ๋ฆฌ', '๋ด์/์ด๋ถ', '๋ด์/์ด๋ถ'),
('419203', '419103', '๊ฐ์์ง ์ฉํ', 'ํ์ฐ์ค/์ธํ๋ฆฌ', '์ธํ๋ฆฌ', '์ธํ๋ฆฌ'),
('373677', '373577', '๊ฐ์์ง ์ฉํ', '๊ธ์๊ธฐ/๊ธ์๊ธฐ', '์ํ', '์ํ'),
('119234', '121546', '๊ฐ์์ง ์ฉํ', '๊ธ์๊ธฐ/๊ธ์๊ธฐ', '์๊ธฐ', '์คํ
์ธ๋ฆฌ์ค ์๊ธฐ'),
('119233', '121545', '๊ฐ์์ง ์ฉํ', '๊ธ์๊ธฐ/๊ธ์๊ธฐ', '์๊ธฐ', 'ํ๋ผ์คํฑ ์๊ธฐ'),
('119212', '121554', '๊ฐ์์ง ์ฉํ', '๊ธ์๊ธฐ/๊ธ์๊ธฐ', '๋ฌผ๋ณ/๊ธ์๊ธฐ/๊ธ์๊ธฐ', '๋ฌผ๋ณ/๊ธ์๊ธฐ/๊ธ์๊ธฐ'),
('119211', '121553', '๊ฐ์์ง ์ฉํ', '๊ธ์๊ธฐ/๊ธ์๊ธฐ', '์ ๋ณ', '์ ๋ณ'),
('373633', '373533', '๊ฐ์์ง ์ฉํ', '์๋ฅ/ํจ์
', '์ฌ์ธ์', '์ฌ์ธ์'),
('373636', '373536', '๊ฐ์์ง ์ฉํ', '์๋ฅ/ํจ์
', '์ํผ์ค/๋๋ ์ค', '์ํผ์ค/๋๋ ์ค'),
('373639', '373539', '๊ฐ์์ง ์ฉํ', '์๋ฅ/ํจ์
', '์ฝ์คํฌ ์๋ฅ', '์ฝ์คํฌ ์๋ฅ'),
('119385', '121658', '๊ฐ์์ง ์ฉํ', '์๋ฅ/ํจ์
', 'ํจ์
์
์ธ์๋ฆฌ/๊ธฐํ', '๋ฐ๋ค๋/์ค์นดํ'),
('119093', '121474', '๊ฐ์์ง ์ฉํ', '๋ฐฐ๋ณ์ฉํ', '๋ฐฐ๋ณํ', '๋ฐฐ๋ณํ'),
('119092', '121473', '๊ฐ์์ง ์ฉํ', '๋ฐฐ๋ณ์ฉํ', '๋ฐฐ๋ณํจ๋', '๋ฐฐ๋ณํจ๋'),
('119096', '121477', '๊ฐ์์ง ์ฉํ', '๋ฐฐ๋ณ์ฉํ', 'ํ์ทจ์ /์๋
', 'ํ์ทจ์ /์๋
'),
('119097', '121478', '๊ฐ์์ง ์ฉํ', '๋ฐฐ๋ณ์ฉํ', '๋ฌผํฐ์', '๋ฌผํฐ์'),
('385009', '384909', '๊ฐ์์ง ์ฉํ', '๋ฐฐ๋ณ์ฉํ', 'ํญ๋ฌธ์ธ์ ์ ', 'ํญ๋ฌธ์ธ์ ์ '),
('119181', '121483', '๊ฐ์์ง ์ฉํ', '๋ฏธ์ฉ/๋ชฉ์', '์ดํธ/๋ฆฐ์ค', '์ดํธ'),
('119182', '121484', '๊ฐ์์ง ์ฉํ', '๋ฏธ์ฉ/๋ชฉ์', '์ดํธ/๋ฆฐ์ค', '๋ฆฐ์ค/์ปจ๋์
๋'),
('373644', '373544', '๊ฐ์์ง ์ฉํ', '๋ฏธ์ฉ/๋ชฉ์', '๋น๋', '๋น๋'),
('119310', '121610', '๊ฐ์์ง ์ฉํ', '์ฅ๋๊ฐ/ํ๋ จ์ฉํ', '๋ณผ', '๋ณผ'),
('120246', '121615', '๊ฐ์์ง ์ฉํ', '์ฅ๋๊ฐ/ํ๋ จ์ฉํ', '๋
ธ์ฆ์ํฌ/IQ ์ฅ๋๊ฐ', '๋
ธ์ฆ์ํฌ/IQ ์ฅ๋๊ฐ'),
('119311', '121611', '๊ฐ์์ง ์ฉํ', '์ฅ๋๊ฐ/ํ๋ จ์ฉํ', '๋กํ/์น์ค์ฅ๋๊ฐ', '๋กํ/์น์ค์ฅ๋๊ฐ'),
('119313', '121613', '๊ฐ์์ง ์ฉํ', '์ฅ๋๊ฐ/ํ๋ จ์ฉํ', '๋ผํ
์ค/๊ณ ๋ฌด์ฅ๋๊ฐ', '๋ผํ
์ค/๊ณ ๋ฌด์ฅ๋๊ฐ'),
('119314', '121614', '๊ฐ์์ง ์ฉํ', '์ฅ๋๊ฐ/ํ๋ จ์ฉํ', '๋ด์ ์ฅ๋๊ฐ', '๋ด์ ์ฅ๋๊ฐ'),
('119315', '121616', '๊ฐ์์ง ์ฉํ', '์ฅ๋๊ฐ/ํ๋ จ์ฉํ', '๊ธฐํ์ฅ๋๊ฐ', '๊ธฐํ์ฅ๋๊ฐ'),
('419390', '419290', '๊ฐ์์ง ์ฉํ', '์ด๋์ฅ/์ธ์ถ์ฉํ', '์ค/ํ๋ค์ค', '๊ฐ์ด์ค/ํ๋ค์ค'),
('419391', '419291', '๊ฐ์์ง ์ฉํ', '์ด๋์ฅ/์ธ์ถ์ฉํ', '์ค,/ํ๋ค์ค', '๋ชฉ์ค'),
('419392', '419292', '๊ฐ์์ง ์ฉํ', '์ด๋์ฅ/์ธ์ถ์ฉํ', '์ค,/ํ๋ค์ค', '๋ฆฌ๋์ค'),
('419393', '419293', '๊ฐ์์ง ์ฉํ', '์ด๋์ฅ/์ธ์ถ์ฉํ', '์ค,/ํ๋ค์ค', '์๋์ค'),
('445860', '445760', '๊ณ ์์ด ์ฌ๋ฃ', '๊ฑด์์ฌ๋ฃ', '๊ฑด์์ฌ๋ฃ', '๊ฑด์์ฌ๋ฃ'),
('119522', '121721', '๊ณ ์์ด ๊ฐ์', '์บ', '์บ', '์บ'),
('119499', '121722', '๊ณ ์์ด ๊ฐ์', 'ํ์ฐ์น', 'ํ์ฐ์น', 'ํ์ฐ์น'),
('119502', '121725', '๊ณ ์์ด ๊ฐ์', '์ ธํค/์คํฑ', '์ ธํค/์คํฑ', '์ ธํค/์คํฑ'),
('119505', '121728', '๊ณ ์์ด ๊ฐ์', '์บฃ๋ข/์บฃ๊ธ๋ผ์ค', '์บฃ๋ข/์บฃ๊ธ๋ผ์ค', '์บฃ๋ข/์บฃ๊ธ๋ผ์ค'),
('119574', '121760', '๊ณ ์์ด ์ฉํ', '๋ชจ๋/ํ์ฅ์', '๋ชจ๋', '๋ฒคํ ๋์ดํธ'),
('119577', '121763', '๊ณ ์์ด ์ฉํ', '๋ชจ๋/ํ์ฅ์', '๋ชจ๋', '๋๋ถ')]
url = "https://www.coupang.com/np/categories/{}?sorter=saleCountDesc&component={}"
def web_crawler_loop():
result = []
for tu in depth:
page_string = get_page_string(url.format(tu[0], tu[1]))
result.extend(get_products(page_string, tu[-4:]))
time.sleep(1)
return result
def get_products(string, category):
bs_obj = BeautifulSoup(string, "html.parser")
ul = bs_obj.find("ul", {"id": "productList"}) # ์์ดํ
๋ฆฌ์คํธ๋ถ๋ถ ์ถ์ถ
lis = ul.findAll("li", {"class": "baby-product renew-badge"}) # ๊ฐ ์์ดํ
์ถ์ถ
products = []
rank = 1
for item in lis:
# name
div_name = item.find("div", {"class": "name"})
name = div_name.getText()
# print("name:", name.strip())
# image
dt_image = item.find("dt", {"class": "image"})
image = dt_image.find("img").get('src')
# print("image:", image)
# price
price = item.find("strong", {"class": "price-value"}).getText().replace(",", "")
# print("price:", price)
products.append({"์์": rank,
"์ ํ๋ช
": name.strip(),
"์ ํ์ด๋ฏธ์ง": "https:" + image,
"๊ฐ๊ฒฉ": price,
"์ถ์ฒ": '์ฟ ํก',
"๊ตฌ๋ถ1": category[0],
"๊ตฌ๋ถ2": category[1],
"๊ตฌ๋ถ3": category[2],
"๊ตฌ๋ถ4": category[3]})
rank = rank + 1
print('[์ฟ ํก - {} - {} - {} - {}] size = {}'
.format(category[0], category[1], category[2], category[3], len(products)))
return products
file_path = '../../../doc/coupang.xlsx'
export_product_excel(web_crawler_loop(), file_path)
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.AddReference('RevitServices')
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
if not isinstance(IN[0], list):
names = [IN[0]]
else:
names = IN[0]
names = UnwrapElement(names)
areaSchemes = FilteredElementCollector(doc)
areaSchemes = areaSchemes.OfClass(AreaSchemes)
outList = []
for name in names:
for scheme in areaSchemes:
if scheme.Name == name:
outList.append(scheme)
OUT = outList
|
class C:
"""
Examples
----------
>>> c = C()
>>> c.setx(4)
>>> print(c.getx())
4
>>> c.delx()
"""
def __init__(self):
self._x = None
def getx(self):
return self._x
def setx(self, value):
if value < 0:
raise ValueError
else:
self._x = value
def delx(self):
del self._x
x = property(getx, setx, delx, "I'm the 'x' property.")
class D:
"""
Examples
------------
>>> d = D()
>>> d.x = 4
>>> d.x = -4
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 14, in x
ValueError
>>> print(d.x)
4
>>> del d.x
"""
def __init__(self):
self._x = None
@property
def x(self):
"""I'm the 'x' property.
Returns:
x: x
"""
return self._x
@x.setter
def x(self, value):
if value < 0:
raise ValueError
else:
self._x = value
@x.deleter
def x(self):
del self._x
return "deleted"
if __name__ == "__main__":
pass
|
import sys
from os.path import abspath, dirname
root_dir = dirname(dirname(abspath(__file__)))
sys.path.append(root_dir)
pytest_plugins = [
]
|
# -*- coding: utf-8 -*-
from sys import stdout
if __name__ == '__main__':
input = int(input())
for x in range(input, 0, -1):
for y in range(input - x):
stdout.write(" ")
for z in range(2 * x - 1):
stdout.write("*")
print()
for x in range(1, input):
for y in range(input - x - 1):
stdout.write(" ")
for z in range(2 * x + 1):
stdout.write("*")
print()
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import converters
from neutron_lib.api.definitions import subnet as subnet_def
from neutron_lib import constants
USE_DEFAULT_SUBNETPOOL = 'use_default_subnetpool'
ALIAS = 'default-subnetpools'
IS_SHIM_EXTENSION = False
IS_STANDARD_ATTR_EXTENSION = False
NAME = 'Default Subnetpools'
API_PREFIX = ''
DESCRIPTION = 'Provides ability to mark and use a subnetpool as the default.'
UPDATED_TIMESTAMP = '2016-02-18T18:00:00-00:00'
RESOURCE_NAME = subnet_def.RESOURCE_NAME
COLLECTION_NAME = subnet_def.COLLECTION_NAME
RESOURCE_ATTRIBUTE_MAP = {
COLLECTION_NAME: {
USE_DEFAULT_SUBNETPOOL: {'allow_post': True,
'allow_put': False,
'default': False,
'convert_to': converters.convert_to_boolean,
'is_visible': False},
},
}
SUB_RESOURCE_ATTRIBUTE_MAP = {}
ACTION_MAP = {}
REQUIRED_EXTENSIONS = [constants.SUBNET_ALLOCATION_EXT_ALIAS]
OPTIONAL_EXTENSIONS = []
ACTION_STATUS = {}
|
Subsets and Splits