id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/Finance-Ultron-1.0.8.1.tar.gz/Finance-Ultron-1.0.8.1/ultron/ump/ump/main_deg.py | """示例ump主裁特征走势拟合角度模块"""
from ultron.ump.trade.ml_feature import FeatureDeg, FeatureDegExtend
from ultron.ump.ump.base import ump_main_make_xy, BuyUmpMixin
from ultron.ump.ump.main_base import MainBase
from ultron.ump.model.principles import Principles
class DegFiter(Principles):
"""
内部类,只需要实现make_xy,且使用ump_main_make_xy装饰
在__init__中通过:
self.fiter_cls = self.get_fiter_class()
self.fiter = self.fiter_cls(orders_pd=self.orders_pd, **kwarg)
构造裁判的filter以及重要的self.fiter.df,即pd.DataFrame对象特征
"""
@ump_main_make_xy
def make_xy(self, **kwarg):
"""
make_xy通过装饰器ump_main_make_xy进行二次包装
这里只需要使用filter选取需要的特征,即从self.order_has_ret中使用filter选取特征列形成df
:param kwarg: ump_main_make_xy装饰器中使用kwarg
kwargs['orders_pd'] 做为必须要有的关键字参数:交易训练集数据,pd.DataFrame对象
kwargs['scaler'] 做为可选关键字参数:控制在make_xy中返回的特征矩阵数据是否进行标准化处理
:return: self.order_has_ret中使用filter选取特征列形成deg_df
ump_main_make_xy装饰器在make_xy返回deg_df后做转换matrix,形成x,y等工作
eg: deg_df
result buy_deg_ang42 buy_deg_ang252 buy_deg_ang60 \
2014-09-24 0 3.378 3.458 3.458
2014-10-24 0 0.191 2.889 2.809
2014-10-29 1 -2.026 16.689 -0.761
2014-10-29 1 -3.427 -11.956 -8.296
2014-10-29 1 -2.915 39.469 -6.043
2014-10-29 1 -2.026 16.689 -0.761
2014-11-03 1 0.103 39.202 -4.614
2014-11-11 1 8.341 -9.450 0.730
2014-11-12 0 3.963 6.595 -7.524
2014-11-26 0 14.052 6.061 7.566
... ... ... ... ...
2016-03-14 1 4.002 -10.559 -7.992
2016-03-14 0 0.129 -6.649 -10.880
2016-03-30 0 13.121 -8.461 4.498
2016-04-04 1 4.409 -33.097 -6.281
2016-04-13 0 6.603 -31.459 0.191
2016-04-14 0 4.611 18.428 3.134
2016-04-15 0 4.238 -13.247 4.693
2016-04-15 0 4.238 -13.247 4.693
2016-04-29 1 1.445 16.266 4.615
2016-04-29 1 1.445 16.266 4.615
buy_deg_ang21
2014-09-24 1.818
2014-10-24 -1.089
2014-10-29 1.980
2014-10-29 6.507
2014-10-29 7.046
2014-10-29 1.980
2014-11-03 10.125
2014-11-11 12.397
2014-11-12 6.671
2014-11-26 12.494
... ...
2016-03-14 9.324
2016-03-14 5.201
2016-03-30 4.070
2016-04-04 5.618
2016-04-13 4.457
2016-04-14 0.733
2016-04-15 1.162
2016-04-15 1.162
2016-04-29 -1.115
2016-04-29 -1.115
"""
# regex='result|buy_deg_ang42|buy_deg_ang252|buy_deg_ang60|buy_deg_ang21'
regex = 'result|{}'.format('|'.join(
FeatureDeg().get_feature_ump_keys(ump_cls=MainDeg)))
# noinspection PyUnresolvedReferences
deg_df = self.order_has_ret.filter(regex=regex)
return deg_df
class MainDeg(MainBase, BuyUmpMixin):
"""主裁走势拟合角度特征类,MainBase子类,混入BuyUmpMixin,做为买入ump类"""
def get_predict_col(self):
"""
主裁走势拟合角度特征keys:['buy_deg_ang42', 'buy_deg_ang252', 'buy_deg_ang60', 'buy_deg_ang21']
:return: ['buy_deg_ang42', 'buy_deg_ang252', 'buy_deg_ang60', 'buy_deg_ang21']
"""
return FeatureDeg().get_feature_ump_keys(ump_cls=MainDeg)
def get_fiter_class(self):
"""
主裁特征走势拟合角度返回的UltronMLPd子类:UltronUmpMainDeg.UmpDegFiter
:return: UltronUmpMainDeg.UmpDegFiter
"""
return DegFiter
@classmethod
def class_unique_id(cls):
"""
具体ump类关键字唯一名称,类方法:return 'deg_main'
主要针对外部user设置自定义ump使用, 需要user自己保证class_unique_id的唯一性,内部不做检测
具体使用见UltronUmpManager中extend_ump_block方法
"""
return 'deg_main'
class ExtendFeatureFiter(Principles):
"""角度主裁扩展类make_xy"""
@ump_main_make_xy
def make_xy(self, **kwarg):
# 这里使用get_feature_ump_keys,只需要传递当前类名称即可,其根据是买入ump还是卖出ump返回对应特征列
col = FeatureDegExtend().get_feature_ump_keys(ump_cls=MainDegExtend)
regex = 'result|{}'.format('|'.join(col))
extend_deg_df = self.order_has_ret.filter(regex=regex)
return extend_deg_df
class MainDegExtend(MainBase, BuyUmpMixin):
"""主裁使用新的视角来决策交易,UltronUmpMainBase子类,混入BuyUmpMixin,做为买入ump类"""
def get_predict_col(self):
# 这里使用get_feature_ump_keys,只需要传递当前类名称即可,其根据是买入ump还是卖出ump返回对应特征列
col = FeatureDegExtend().get_feature_ump_keys(ump_cls=MainDegExtend)
return col
def get_fiter_class(self):
return ExtendFeatureFiter
@classmethod
def class_unique_id(cls):
return 'extend_main_deg' | PypiClean |
/AndroidN-0.0.9.tar.gz/AndroidN-0.0.9/N/N.py | import ctypes
import os
import sys
import threading
from ctypes import *
dll_dir = os.path.dirname(os.path.abspath(__file__))
os.add_dll_directory(dll_dir)
N_dll = CDLL("N.dll")
Run_ID = 0
lock_ID = threading.Lock() # 创建一个锁对象
# 创建一个全局变量以存储回调函数
global callback_func
def GetRun_ID():
"""获取运行的ID"""
global Run_ID
with lock_ID: # 使用锁来保证 Run_ID 的线程安全
Run_ID = Run_ID + 1
if Run_ID > 200:
Run_ID = 1
return Run_ID
def on_callback(Msg=None):
"""回调函数"""
print("callback:", Msg.decode("gbk"))
return 42
def callback_Add():
# 建立一个全局的回调函数
# 将 Python 函数转换为 C 可调用的函数指针
callback_type = WINFUNCTYPE(c_int, c_char_p)
global callback_func
callback_func = callback_type(on_callback)
# 打印函数指针的整数表示
callback_int = cast(callback_func, c_void_p).value
return callback_int
def initialize(callback=True):
"""初始化"""
callback_int = callback_Add() if callback else 0
# 建立回调函数
N_initialize = N_dll.N_initialize
r = N_initialize(callback_int)
return string_at(r).decode("gbk")
def login(ID, Uin, Password, Guid=None):
"""常规登录"""
Guid = Guid or ""
N_Login = N_dll.N_Login
N_Login.argtypes = [c_int, c_char_p, c_char_p, c_char_p]
result = string_at(
N_Login(ID, Uin.encode("gbk"), Password.encode("gbk"), Guid.encode("gbk"))
)
return result.decode("gbk")
def login_tailless(ID, TokenA):
"""无尾模式"""
N_login_tailless = N_dll.N_login_tailless
r = N_login_tailless(c_int(ID), c_char_p(TokenA.encode("gbk")))
return string_at(r).decode("gbk")
def login_Submit_slider(ID, Ticket):
"""提交滑块"""
N_login_Submit_slider = N_dll.N_login_Submit_slider
print(N_login_Submit_slider)
r = N_login_Submit_slider(ID, c_char_p(Ticket.encode("gbk")))
return string_at(r).decode("gbk")
def login_Send_verification_to_the_phone(ID):
"""发送验证码到手机"""
N_login_Send_verification_to_the_phone = (
N_dll.N_login_Send_verification_to_the_phone
)
r = N_login_Send_verification_to_the_phone(ID)
return string_at(r).decode("gbk")
def login_Submit_verificationcode(ID, code):
"""设备锁提交验证码"""
N_login_Submit_verificationcode = N_dll.N_login_Submit_verificationcode
r = N_login_Submit_verificationcode(ID, c_char_p(code.encode("gbk")))
return string_at(r).decode("gbk")
def Scan_code_authorization(ID, k, TokenA):
"""扫码授权"""
N_Scan_code_authorization = N_dll.N_Scan_code_authorization
r = N_Scan_code_authorization(
ID, c_char_p(k.encode("gbk")), c_char_p(TokenA.encode("gbk"))
)
return string_at(r).decode("gbk")
def Scan_code_authorization_new(ID, k, TokenA, _Type):
"""扫码授权
Type=0 扫码
Type=1 允许授权
"""
N_Scan_code_authorization_new = N_dll.N_Scan_code_authorization_new
r = N_Scan_code_authorization_new(
ID, c_char_p(k.encode("gbk")), c_char_p(TokenA.encode("gbk")), c_int(_Type)
)
return string_at(r).decode("gbk")
def Scan_code_assist(ID, str_url):
"""扫码——辅助验证"""
N_Scan_code_assist = N_dll.N_Scan_code_assist
r = N_Scan_code_assist(ID, c_char_p(str_url.encode("gbk")))
return string_at(r).decode("gbk")
def Refresh_token(ID):
"""
刷新令牌,刷新成功后将返回新的解登录包,也可以通过GetTokenA获取新的TokenA
"""
N_login_Refresh_token = N_dll.N_login_Refresh_token
r = N_login_Refresh_token(ID)
return string_at(r).decode("gbk")
def GetTokenA(ID):
"""获取当前运行ID的TokenA"""
N_GetTokenA = N_dll.N_GetTokenA
r = N_GetTokenA(ID)
return string_at(r).decode("gbk")
def Group_Get_condition(ID, Group):
"""获取群条件"""
N_Group_Get_condition = N_dll.N_Group_Get_condition
r = N_Group_Get_condition(ID, c_int64(Group))
return string_at(r).decode("gbk")
def N_subscribe_unfollow(ID, Target):
"""
取消订阅号关注
2720152058 QQ团队
1770946116 安全中心
"""
N_subscribe_unfollow = N_dll.N_subscribe_unfollow
r = N_subscribe_unfollow(ID, c_int64(Target))
return string_at(r).decode("gbk")
def AS_Get_login_infor(ID, type_):
"""
账号安全_获取登陆信息
1 在线设备 2 历史设备 3 在线和历史不区分
"""
N_AS_Get_login_infor = N_dll.N_AS_Get_login_infor
r = N_AS_Get_login_infor(ID, c_int(type_))
return string_at(r).decode("gbk")
def AS_Del_login_Infor(ID, target):
"""
账号安全_删除设备信息
target为获取设备信息里面的j7
"""
N_AS_Del_login_Infor = N_dll.N_AS_Del_login_Infor
r = N_AS_Del_login_Infor(ID, c_char_p(target))
return string_at(r).decode("gbk")
def auth_get_list(ID, num):
"""授权获取授权列表"""
N_auth_get_list = N_dll.N_auth_get_list
r = N_auth_get_list(ID, c_int(num))
return string_at(r).decode("gbk")
def Get_Phone(ID):
"""授权获取授权列表"""
N_Get_Phone = N_dll.N_Get_Phone
r = N_Get_Phone(ID)
return string_at(r).decode("gbk")
def TCP_Send(ID, data, wait, ssoseq):
"""TCP发送数据"""
N_TCP_Send = N_dll.N_TCP_Send
r = N_TCP_Send(ID, c_char_p(data, wait, ssoseq))
return string_at(r).decode("gbk")
def Get_version():
"""获取版本号"""
r = N_dll.Get_Version_infor()
return string_at(r).decode("gbk")
# 默认就初始化
print(initialize()) | PypiClean |
/Montreal-Forced-Aligner-3.0.0a3.tar.gz/Montreal-Forced-Aligner-3.0.0a3/montreal_forced_aligner/corpus/acoustic_corpus.py | from __future__ import annotations
import logging
import os
import sys
import threading
import time
import typing
from abc import ABCMeta
from multiprocessing.pool import ThreadPool
from pathlib import Path
from queue import Empty, Queue
from typing import List, Optional
import sqlalchemy
from kalpy.data import KaldiMapping
from kalpy.feat.cmvn import CmvnComputer
from kalpy.feat.data import FeatureArchive
from kalpy.utils import kalpy_logger
from tqdm.rich import tqdm
from montreal_forced_aligner import config
from montreal_forced_aligner.abc import MfaWorker
from montreal_forced_aligner.corpus.base import CorpusMixin
from montreal_forced_aligner.corpus.classes import FileData
from montreal_forced_aligner.corpus.features import (
CalcFmllrArguments,
CalcFmllrFunction,
ComputeVadFunction,
FeatureConfigMixin,
FinalFeatureArguments,
FinalFeatureFunction,
MfccArguments,
MfccFunction,
VadArguments,
)
from montreal_forced_aligner.corpus.helper import find_exts
from montreal_forced_aligner.corpus.multiprocessing import (
AcousticDirectoryParser,
CorpusProcessWorker,
)
from montreal_forced_aligner.data import DatabaseImportData, PhoneType, WorkflowType
from montreal_forced_aligner.db import (
Corpus,
CorpusWorkflow,
File,
Phone,
PhoneInterval,
SoundFile,
Speaker,
TextFile,
Utterance,
bulk_update,
)
from montreal_forced_aligner.dictionary.mixins import DictionaryMixin
from montreal_forced_aligner.dictionary.multispeaker import MultispeakerDictionaryMixin
from montreal_forced_aligner.exceptions import (
FeatureGenerationError,
SoundFileError,
TextGridParseError,
TextParseError,
)
from montreal_forced_aligner.helper import load_scp, mfa_open
from montreal_forced_aligner.textgrid import parse_aligned_textgrid
from montreal_forced_aligner.utils import Counter, run_kaldi_function
__all__ = [
"AcousticCorpusMixin",
"AcousticCorpus",
"AcousticCorpusWithPronunciations",
"AcousticCorpusPronunciationMixin",
]
logger = logging.getLogger("mfa")
class AcousticCorpusMixin(CorpusMixin, FeatureConfigMixin, metaclass=ABCMeta):
"""
Mixin class for acoustic corpora
Parameters
----------
audio_directory: str
Extra directory to look for audio files
See Also
--------
:class:`~montreal_forced_aligner.corpus.base.CorpusMixin`
For corpus parsing parameters
:class:`~montreal_forced_aligner.corpus.features.FeatureConfigMixin`
For feature generation parameters
Attributes
----------
sound_file_errors: list[str]
List of sound files with errors in loading
stopped: :class:`~threading.Event`
Stop check for loading the corpus
"""
def __init__(self, audio_directory: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
self.audio_directory = audio_directory
self.sound_file_errors = []
self.stopped = threading.Event()
self.features_generated = False
self.transcription_done = False
self.alignment_evaluation_done = False
def has_alignments(self, workflow_id: typing.Optional[int] = None) -> bool:
with self.session() as session:
if workflow_id is None:
check = session.query(PhoneInterval).limit(1).first() is not None
else:
if isinstance(workflow_id, int):
check = (
session.query(CorpusWorkflow.alignments_collected)
.filter(CorpusWorkflow.id == workflow_id)
.scalar()
)
else:
check = (
session.query(CorpusWorkflow.alignments_collected)
.filter(CorpusWorkflow.workflow_type == workflow_id)
.scalar()
)
return check
def has_ivectors(self) -> bool:
with self.session() as session:
check = (
session.query(Corpus)
.filter(Corpus.ivectors_calculated == True) # noqa
.limit(1)
.first()
is not None
)
return check
def has_xvectors(self) -> bool:
with self.session() as session:
check = (
session.query(Corpus)
.filter(Corpus.xvectors_loaded == True) # noqa
.limit(1)
.first()
is not None
)
return check
def has_any_ivectors(self) -> bool:
with self.session() as session:
check = (
session.query(Corpus)
.filter(
sqlalchemy.or_(
Corpus.ivectors_calculated == True, Corpus.xvectors_loaded == True # noqa
)
)
.limit(1)
.first()
is not None
)
return check
@property
def no_transcription_files(self) -> List[str]:
"""List of sound files without text files"""
with self.session() as session:
files = session.query(SoundFile.sound_file_path).filter(
~sqlalchemy.exists().where(SoundFile.file_id == TextFile.file_id)
)
return [x[0] for x in files]
@property
def transcriptions_without_wavs(self) -> List[str]:
"""List of text files without sound files"""
with self.session() as session:
files = session.query(TextFile.text_file_path).filter(
~sqlalchemy.exists().where(SoundFile.file_id == TextFile.file_id)
)
return [x[0] for x in files]
def inspect_database(self) -> None:
"""Check if a database file exists and create the necessary metadata"""
self.initialize_database()
with self.session() as session:
corpus = session.query(Corpus).first()
if corpus:
self.imported = corpus.imported
self.features_generated = corpus.features_generated
self.text_normalized = corpus.text_normalized
else:
session.add(
Corpus(
name=self.data_source_identifier,
path=self.corpus_directory,
data_directory=self.corpus_output_directory,
)
)
session.commit()
def load_reference_alignments(self, reference_directory: Path) -> None:
"""
Load reference alignments to use in alignment evaluation from a directory
Parameters
----------
reference_directory: :class:`~pathlib.Path`
Directory containing reference alignments
"""
self.create_new_current_workflow(WorkflowType.reference)
workflow = self.current_workflow
if workflow.alignments_collected:
logger.info("Reference alignments already loaded!")
return
logger.info("Loading reference files...")
indices = []
jobs = []
reference_intervals = []
with tqdm(total=self.num_files, disable=config.QUIET) as pbar, self.session() as session:
phone_mapping = {}
max_id = 0
interval_id = session.query(sqlalchemy.func.max(PhoneInterval.id)).scalar()
if not interval_id:
interval_id = 0
interval_id += 1
for p, p_id in session.query(Phone.phone, Phone.id):
phone_mapping[p] = p_id
if p_id > max_id:
max_id = p_id
new_phones = []
for root, _, files in os.walk(reference_directory, followlinks=True):
root_speaker = os.path.basename(root)
for f in files:
if f.endswith(".TextGrid"):
file_name = f.replace(".TextGrid", "")
file_id = session.query(File.id).filter_by(name=file_name).scalar()
if not file_id:
continue
if config.USE_MP:
indices.append(file_id)
jobs.append((os.path.join(root, f), root_speaker))
else:
intervals = parse_aligned_textgrid(os.path.join(root, f), root_speaker)
utterances = (
session.query(
Utterance.id, Speaker.name, Utterance.begin, Utterance.end
)
.join(Utterance.speaker)
.filter(Utterance.file_id == file_id)
.order_by(Utterance.begin)
)
for u_id, speaker_name, begin, end in utterances:
if speaker_name not in intervals:
continue
while intervals[speaker_name]:
interval = intervals[speaker_name].pop(0)
dur = interval.end - interval.begin
mid_point = interval.begin + (dur / 2)
if begin <= mid_point <= end:
if interval.label not in phone_mapping:
max_id += 1
phone_mapping[interval.label] = max_id
new_phones.append(
{
"id": max_id,
"mapping_id": max_id - 1,
"phone": interval.label,
"kaldi_label": interval.label,
"phone_type": PhoneType.extra,
}
)
reference_intervals.append(
{
"id": interval_id,
"begin": interval.begin,
"end": interval.end,
"phone_id": phone_mapping[interval.label],
"utterance_id": u_id,
"workflow_id": workflow.id,
}
)
interval_id += 1
if mid_point > end:
intervals[speaker_name].insert(0, interval)
break
pbar.update(1)
if config.USE_MP:
with ThreadPool(config.NUM_JOBS) as pool:
gen = pool.starmap(parse_aligned_textgrid, jobs)
for i, intervals in enumerate(gen):
pbar.update(1)
file_id = indices[i]
utterances = (
session.query(
Utterance.id, Speaker.name, Utterance.begin, Utterance.end
)
.join(Utterance.speaker)
.filter(Utterance.file_id == file_id)
.order_by(Utterance.begin)
)
for u_id, speaker_name, begin, end in utterances:
if speaker_name not in intervals:
continue
while intervals[speaker_name]:
interval = intervals[speaker_name].pop(0)
dur = interval.end - interval.begin
mid_point = interval.begin + (dur / 2)
if begin <= mid_point <= end:
if interval.label not in phone_mapping:
max_id += 1
phone_mapping[interval.label] = max_id
new_phones.append(
{
"id": max_id,
"mapping_id": max_id - 1,
"phone": interval.label,
"kaldi_label": interval.label,
"phone_type": PhoneType.extra,
}
)
reference_intervals.append(
{
"id": interval_id,
"begin": interval.begin,
"end": interval.end,
"phone_id": phone_mapping[interval.label],
"utterance_id": u_id,
"workflow_id": workflow.id,
}
)
interval_id += 1
if mid_point > end:
intervals[speaker_name].insert(0, interval)
break
if new_phones:
session.execute(sqlalchemy.insert(Phone.__table__), new_phones)
session.commit()
session.execute(sqlalchemy.insert(PhoneInterval.__table__), reference_intervals)
session.query(CorpusWorkflow).filter(CorpusWorkflow.id == workflow.id).update(
{CorpusWorkflow.done: True, CorpusWorkflow.alignments_collected: True}
)
session.commit()
def load_corpus(self) -> None:
"""
Load the corpus
"""
self.initialize_database()
self._load_corpus()
self._create_dummy_dictionary()
self.initialize_jobs()
self.normalize_text()
self.generate_features()
def reset_features(self):
with self.session() as session:
session.execute(
sqlalchemy.update(Corpus).values(
ivectors_calculated=False,
plda_calculated=False,
xvectors_loaded=False,
features_generated=False,
)
)
session.execute(
sqlalchemy.update(Utterance).values(
ivector=None, features=None, xvector=None, plda_vector=None
)
)
session.execute(
sqlalchemy.update(Speaker).values(
cmvn=None, fmllr=None, ivector=None, xvector=None, plda_vector=None
)
)
session.commit()
paths = [
self.output_directory.joinpath("cmvn.ark"),
self.output_directory.joinpath("cmvn.scp"),
self.output_directory.joinpath("feats.scp"),
self.output_directory.joinpath("ivectors.scp"),
]
for path in paths:
path.unlink(missing_ok=True)
for j in self.jobs:
paths = [
j.construct_path(self.split_directory, "cmvn", "scp"),
j.construct_path(self.split_directory, "ivectors", "scp"),
j.construct_path(self.split_directory, "ivectors", "ark"),
]
for path in paths:
path.unlink(missing_ok=True)
for d_id in j.dictionary_ids:
paths = [
j.construct_path(self.split_directory, "trans", "scp", d_id),
j.construct_path(self.split_directory, "trans", "ark", d_id),
j.construct_path(self.split_directory, "cmvn", "scp", d_id),
j.construct_path(self.split_directory, "feats", "scp", d_id),
j.construct_path(self.split_directory, "feats", "ark", d_id),
j.construct_path(self.split_directory, "final_features", "scp", d_id),
j.construct_path(self.split_directory, "final_features", "ark", d_id),
]
for path in paths:
path.unlink(missing_ok=True)
def generate_final_features(self) -> None:
"""
Generate features for the corpus
"""
logger.info("Generating final features...")
time_begin = time.time()
log_directory = self.split_directory.joinpath("log")
os.makedirs(log_directory, exist_ok=True)
arguments = self.final_feature_arguments()
with tqdm(total=self.num_utterances, disable=config.QUIET) as pbar:
for _ in run_kaldi_function(FinalFeatureFunction, arguments, pbar.update):
pass
with self.session() as session:
update_mapping = {}
session.query(Utterance).update({"ignored": True})
session.commit()
for j in self.jobs:
with mfa_open(j.feats_scp_path, "r") as f:
for line in f:
line = line.strip()
if line == "":
continue
f = line.split(maxsplit=1)
utt_id = int(f[0].split("-")[-1])
feats = f[1]
update_mapping[utt_id] = {
"id": utt_id,
"features": feats,
"ignored": False,
}
bulk_update(session, Utterance, list(update_mapping.values()))
session.commit()
non_ignored_check = (
session.query(Utterance).filter(Utterance.ignored == False).first() # noqa
)
if non_ignored_check is None:
raise FeatureGenerationError(
"No utterances had features, please check the logs for errors."
)
ignored_utterances = (
session.query(
SoundFile.sound_file_path,
Speaker.name,
Utterance.begin,
Utterance.end,
Utterance.text,
)
.join(Utterance.speaker)
.join(Utterance.file)
.join(File.sound_file)
.filter(Utterance.ignored == True) # noqa
)
ignored_count = 0
for sound_file_path, speaker_name, begin, end, text in ignored_utterances:
logger.debug(f" - Ignored File: {sound_file_path}")
logger.debug(f" - Speaker: {speaker_name}")
logger.debug(f" - Begin: {begin}")
logger.debug(f" - End: {end}")
logger.debug(f" - Text: {text}")
ignored_count += 1
if ignored_count:
logger.warning(
f"There were {ignored_count} utterances ignored due to an issue in feature generation, see the log file for full "
"details or run `mfa validate` on the corpus."
)
logger.debug(f"Generating final features took {time.time() - time_begin:.3f} seconds")
def generate_features(self) -> None:
"""
Generate features for the corpus
"""
with self.session() as session:
final_features_check = session.query(Corpus).first().features_generated
if final_features_check:
self.features_generated = True
logger.info("Features already generated.")
return
feature_check = (
session.query(Utterance).filter(Utterance.features != None).first() # noqa
is not None
)
if self.feature_type == "mfcc" and not feature_check:
self.mfcc()
self.combine_feats()
if self.uses_cmvn:
logger.info("Calculating CMVN...")
self.calc_cmvn()
if self.uses_voiced:
self.compute_vad()
self.generate_final_features()
self._write_feats()
self.features_generated = True
with self.session() as session:
session.query(Corpus).update({"features_generated": True})
session.commit()
self.create_corpus_split()
def create_corpus_split(self) -> None:
"""Create the split directory for the corpus"""
with self.session() as session:
c = session.query(Corpus).first()
c.current_subset = 0
session.commit()
logger.info("Creating corpus split...")
super().create_corpus_split()
def compute_vad_arguments(self) -> List[VadArguments]:
"""
Generate Job arguments for :class:`~montreal_forced_aligner.corpus.features.ComputeVadFunction`
Returns
-------
list[:class:`~montreal_forced_aligner.corpus.features.VadArguments`]
Arguments for processing
"""
return [
VadArguments(
j.id,
getattr(self, "session", ""),
self.split_directory.joinpath("log", f"compute_vad.{j.id}.log"),
self.vad_options,
)
for j in self.jobs
]
def calc_fmllr_arguments(self, iteration: Optional[int] = None) -> List[CalcFmllrArguments]:
"""
Generate Job arguments for :class:`~montreal_forced_aligner.corpus.features.CalcFmllrFunction`
Returns
-------
list[:class:`~montreal_forced_aligner.corpus.features.CalcFmllrArguments`]
Arguments for processing
"""
base_log = "calc_fmllr"
if iteration is not None:
base_log += f".{iteration}"
arguments = []
thread_lock = threading.Lock()
for j in self.jobs:
feat_strings = {}
for d_id in j.dictionary_ids:
feat_strings[d_id] = j.construct_feature_proc_string(
self.working_directory,
d_id,
self.feature_options["uses_splices"],
self.feature_options["splice_left_context"],
self.feature_options["splice_right_context"],
self.feature_options["uses_speaker_adaptation"],
)
arguments.append(
CalcFmllrArguments(
j.id,
getattr(self, "session", ""),
self.working_log_directory.joinpath(f"{base_log}.{j.id}.log"),
self.working_directory,
self.alignment_model_path,
self.model_path,
self.fmllr_options,
thread_lock,
)
)
return arguments
def mfcc_arguments(self) -> List[MfccArguments]:
"""
Generate Job arguments for :class:`~montreal_forced_aligner.corpus.features.MfccFunction`
Returns
-------
list[:class:`~montreal_forced_aligner.corpus.features.MfccArguments`]
Arguments for processing
"""
return [
MfccArguments(
j.id,
self.session,
self.split_directory.joinpath("log", f"make_mfcc.{j.id}.log"),
self.split_directory,
self.mfcc_computer,
self.pitch_computer,
)
for j in self.jobs
]
def final_feature_arguments(self) -> List[FinalFeatureArguments]:
"""
Generate Job arguments for :class:`~montreal_forced_aligner.corpus.features.MfccFunction`
Returns
-------
list[:class:`~montreal_forced_aligner.corpus.features.MfccArguments`]
Arguments for processing
"""
return [
FinalFeatureArguments(
j.id,
self.session,
self.split_directory.joinpath("log", f"generate_final_features.{j.id}.log"),
self.split_directory,
self.uses_cmvn,
getattr(self, "sliding_cmvn", False),
self.uses_voiced,
getattr(self, "subsample", None),
)
for j in self.jobs
]
def mfcc(self) -> None:
"""
Multiprocessing function that converts sound files into MFCCs.
See :kaldi_docs:`feat` for an overview on feature generation in Kaldi.
See Also
--------
:class:`~montreal_forced_aligner.corpus.features.MfccFunction`
Multiprocessing helper function for each job
:meth:`.AcousticCorpusMixin.mfcc_arguments`
Job method for generating arguments for helper function
:kaldi_steps:`make_mfcc`
Reference Kaldi script
"""
logger.info("Generating MFCCs...")
begin = time.time()
log_directory = self.split_directory.joinpath("log")
os.makedirs(log_directory, exist_ok=True)
arguments = self.mfcc_arguments()
with tqdm(total=self.num_utterances, disable=config.QUIET) as pbar:
for _ in run_kaldi_function(MfccFunction, arguments, pbar.update):
pass
logger.debug(f"Generating MFCCs took {time.time() - begin:.3f} seconds")
def calc_cmvn(self) -> None:
"""
Calculate CMVN statistics for speakers
See Also
--------
:kaldi_src:`compute-cmvn-stats`
Relevant Kaldi binary
"""
self._write_spk2utt()
spk2utt_path = self.corpus_output_directory.joinpath("spk2utt.scp")
feats_scp_path = self.corpus_output_directory.joinpath("feats.scp")
cmvn_ark_path = self.corpus_output_directory.joinpath("cmvn.ark")
log_path = self.features_log_directory.joinpath("cmvn.log")
with kalpy_logger("kalpy.cmvn", log_path) as cmvn_logger:
cmvn_logger.info(f"Reading features from: {feats_scp_path}")
cmvn_logger.info(f"Reading spk2utt from: {spk2utt_path}")
spk2utt = KaldiMapping(list_mapping=True)
spk2utt.load(spk2utt_path)
mfcc_archive = FeatureArchive(feats_scp_path)
computer = CmvnComputer()
computer.export_cmvn(cmvn_ark_path, mfcc_archive, spk2utt, write_scp=True)
mfcc_archive.close()
update_mapping = []
cmvn_scp = self.corpus_output_directory.joinpath("cmvn.scp")
with self.session() as session:
for s, cmvn in load_scp(cmvn_scp).items():
if isinstance(cmvn, list):
cmvn = " ".join(cmvn)
update_mapping.append({"id": int(s), "cmvn": cmvn})
bulk_update(session, Speaker, update_mapping)
session.commit()
for j in self.jobs:
query = (
session.query(Speaker.id, Speaker.cmvn)
.join(Speaker.utterances)
.filter(Speaker.cmvn != None, Utterance.job_id == j.id) # noqa
.distinct()
)
with mfa_open(j.construct_path(self.split_directory, "cmvn", "scp"), "w") as f:
for s_id, cmvn in sorted(query, key=lambda x: str(x)):
f.write(f"{s_id} {cmvn}\n")
def calc_fmllr(self, iteration: Optional[int] = None) -> None:
"""
Multiprocessing function that computes speaker adaptation transforms via
feature-space Maximum Likelihood Linear Regression (fMLLR).
See Also
--------
:class:`~montreal_forced_aligner.corpus.features.CalcFmllrFunction`
Multiprocessing helper function for each job
:meth:`.AcousticCorpusMixin.calc_fmllr_arguments`
Job method for generating arguments for the helper function
:kaldi_steps:`align_fmllr`
Reference Kaldi script
:kaldi_steps:`train_sat`
Reference Kaldi script
"""
begin = time.time()
logger.info("Calculating fMLLR for speaker adaptation...")
arguments = self.calc_fmllr_arguments(iteration=iteration)
with tqdm(total=self.num_speakers, disable=config.QUIET) as pbar:
for _ in run_kaldi_function(CalcFmllrFunction, arguments, pbar.update):
pass
self.uses_speaker_adaptation = True
update_mapping = []
if not config.SINGLE_SPEAKER:
for j in self.jobs:
for d_id in j.dictionary_ids:
scp_p = j.construct_path(self.split_directory, "trans", "scp", d_id)
with mfa_open(scp_p) as f:
for line in f:
line = line.strip()
speaker, ark = line.split(maxsplit=1)
speaker = int(speaker)
update_mapping.append({"id": speaker, "fmllr": ark})
with self.session() as session:
bulk_update(session, Speaker, update_mapping)
session.commit()
logger.debug(f"Fmllr calculation took {time.time() - begin:.3f} seconds")
def compute_vad(self) -> None:
"""
Compute Voice Activity Detection features over the corpus
See Also
--------
:class:`~montreal_forced_aligner.corpus.features.ComputeVadFunction`
Multiprocessing helper function for each job
:meth:`.AcousticCorpusMixin.compute_vad_arguments`
Job method for generating arguments for helper function
"""
with self.session() as session:
c = session.query(Corpus).first()
if c.vad_calculated:
logger.info("VAD already computed, skipping!")
return
begin = time.time()
logger.info("Computing VAD...")
arguments = self.compute_vad_arguments()
with tqdm(total=self.num_utterances, disable=config.QUIET) as pbar:
for _ in run_kaldi_function(ComputeVadFunction, arguments, pbar.update):
pass
vad_lines = []
utterance_mapping = []
for j in self.jobs:
vad_scp_path = j.construct_path(self.split_directory, "vad", "scp")
with mfa_open(vad_scp_path) as inf:
for line in inf:
vad_lines.append(line)
utt_id, ark = line.strip().split(maxsplit=1)
utt_id = int(utt_id.split("-")[-1])
utterance_mapping.append({"id": utt_id, "vad_ark": ark})
with self.session() as session:
bulk_update(session, Utterance, utterance_mapping)
session.query(Corpus).update({Corpus.vad_calculated: True})
session.commit()
with mfa_open(self.corpus_output_directory.joinpath("vad.scp"), "w") as outf:
for line in sorted(vad_lines, key=lambda x: x.split(maxsplit=1)[0]):
outf.write(line)
logger.debug(f"VAD computation took {time.time() - begin:.3f} seconds")
def combine_feats(self) -> None:
"""
Combine feature generation results and store relevant information
"""
lines = []
for j in self.jobs:
with mfa_open(j.feats_scp_path) as f:
for line in f:
lines.append(line)
with open(self.corpus_output_directory.joinpath("feats.scp"), "w", encoding="utf8") as f:
for line in sorted(lines, key=lambda x: x.split(maxsplit=1)[0]):
f.write(line)
def _write_feats(self) -> None:
"""Write feats scp file for Kaldi"""
with self.session() as session, open(
self.corpus_output_directory.joinpath("feats.scp"), "w", encoding="utf8"
) as f:
utterances = (
session.query(Utterance.kaldi_id, Utterance.features)
.filter_by(ignored=False)
.order_by(Utterance.kaldi_id)
)
for u_id, features in utterances:
f.write(f"{u_id} {features}\n")
def get_feat_dim(self) -> int:
"""
Calculate the feature dimension for the corpus
Returns
-------
int
Dimension of feature vectors
"""
job = self.jobs[0]
dict_id = None
if job.dictionary_ids:
dict_id = self.jobs[0].dictionary_ids[0]
feature_archive = job.construct_feature_archive(self.working_directory, dict_id)
feat_dim = None
for _, feats in feature_archive:
feat_dim = feats.NumCols()
break
return feat_dim
def _load_corpus_from_source_mp(self) -> None:
"""
Load a corpus using multiprocessing
"""
begin_time = time.process_time()
job_queue = Queue()
return_queue = Queue()
finished_adding = threading.Event()
stopped = threading.Event()
file_counts = Counter()
error_dict = {}
procs = []
parser = AcousticDirectoryParser(
self.corpus_directory,
job_queue,
self.audio_directory,
stopped,
finished_adding,
file_counts,
)
parser.start()
for i in range(config.NUM_JOBS):
p = CorpusProcessWorker(
i,
job_queue,
return_queue,
stopped,
finished_adding,
self.speaker_characters,
self.sample_frequency,
)
procs.append(p)
p.start()
last_poll = time.time() - 30
try:
with self.session() as session, tqdm(total=100, disable=config.QUIET) as pbar:
import_data = DatabaseImportData()
while True:
try:
file = return_queue.get(timeout=1)
if isinstance(file, tuple):
error_type = file[0]
error = file[1]
if error_type == "error":
error_dict[error_type] = error
else:
if error_type not in error_dict:
error_dict[error_type] = []
error_dict[error_type].append(error)
continue
if self.stopped.is_set():
continue
except Empty:
for proc in procs:
if not proc.finished_processing.is_set():
break
else:
break
continue
if time.time() - last_poll > 5:
pbar.total = file_counts.value()
last_poll = time.time()
pbar.update(1)
import_data.add_objects(self.generate_import_objects(file))
logger.debug(f"Processing queue: {time.process_time() - begin_time}")
if "error" in error_dict:
session.rollback()
raise error_dict["error"]
self._finalize_load(session, import_data)
for k in ["sound_file_errors", "decode_error_files", "textgrid_read_errors"]:
if hasattr(self, k):
if k in error_dict:
logger.info(
"There were some issues with files in the corpus. "
"Please look at the log file or run the validator for more information."
)
logger.debug(f"{k} showed {len(error_dict[k])} errors:")
if k in {"textgrid_read_errors", "sound_file_errors"}:
getattr(self, k).extend(error_dict[k])
for e in error_dict[k]:
logger.debug(f"{e.file_name}: {e.error}")
else:
logger.debug(", ".join(error_dict[k]))
setattr(self, k, error_dict[k])
except Exception as e:
if isinstance(e, KeyboardInterrupt):
logger.info(
"Detected ctrl-c, please wait a moment while we clean everything up..."
)
self.stopped.set_sigint_source()
self.stopped.set()
finished_adding.set()
while True:
try:
_ = job_queue.get(timeout=1)
if self.stopped.is_set():
continue
except Empty:
for proc in procs:
if not proc.finished_processing.is_set():
break
else:
break
try:
_ = return_queue.get(timeout=1)
_ = job_queue.get(timeout=1)
if self.stopped.is_set():
continue
except Empty:
for proc in procs:
if not proc.finished_processing.is_set():
break
else:
break
raise
finally:
parser.join()
for p in procs:
p.join()
if self.stopped.is_set():
logger.info(f"Stopped parsing early ({time.process_time() - begin_time} seconds)")
if self.stopped.source():
sys.exit(0)
else:
logger.debug(
f"Parsed corpus directory with {config.NUM_JOBS} jobs in {time.process_time() - begin_time} seconds"
)
def _load_corpus_from_source(self) -> None:
"""
Load a corpus without using multiprocessing
"""
begin_time = time.time()
all_sound_files = {}
use_audio_directory = False
if self.audio_directory and os.path.exists(self.audio_directory):
use_audio_directory = True
for root, _, files in os.walk(self.audio_directory, followlinks=True):
if self.stopped.is_set():
return
exts = find_exts(files)
exts.wav_files = {k: os.path.join(root, v) for k, v in exts.wav_files.items()}
exts.other_audio_files = {
k: os.path.join(root, v) for k, v in exts.other_audio_files.items()
}
all_sound_files.update(exts.other_audio_files)
all_sound_files.update(exts.wav_files)
logger.debug(f"Walking through {self.corpus_directory}...")
with self.session() as session:
import_data = DatabaseImportData()
for root, _, files in os.walk(self.corpus_directory, followlinks=True):
exts = find_exts(files)
relative_path = (
root.replace(str(self.corpus_directory), "").lstrip("/").lstrip("\\")
)
if self.stopped.is_set():
return
if not use_audio_directory:
all_sound_files = {}
wav_files = {k: os.path.join(root, v) for k, v in exts.wav_files.items()}
other_audio_files = {
k: os.path.join(root, v) for k, v in exts.other_audio_files.items()
}
all_sound_files.update(other_audio_files)
all_sound_files.update(wav_files)
for file_name in exts.identifiers:
wav_path = None
transcription_path = None
if file_name in all_sound_files:
wav_path = all_sound_files[file_name]
if file_name in exts.lab_files:
lab_name = exts.lab_files[file_name]
transcription_path = os.path.join(root, lab_name)
elif file_name in exts.textgrid_files:
tg_name = exts.textgrid_files[file_name]
transcription_path = os.path.join(root, tg_name)
if wav_path is None: # Not a file for MFA
continue
try:
file = FileData.parse_file(
file_name,
wav_path,
transcription_path,
relative_path,
self.speaker_characters,
self.sample_frequency,
)
import_data.add_objects(self.generate_import_objects(file))
except TextParseError as e:
self.decode_error_files.append(e)
except TextGridParseError as e:
self.textgrid_read_errors.append(e)
except SoundFileError as e:
self.sound_file_errors.append(e)
self._finalize_load(session, import_data)
if self.decode_error_files or self.textgrid_read_errors:
logger.info(
"There were some issues with files in the corpus. "
"Please look at the log file or run the validator for more information."
)
if self.decode_error_files:
logger.debug(
f"There were {len(self.decode_error_files)} errors decoding text files:"
)
logger.debug(", ".join(self.decode_error_files))
if self.textgrid_read_errors:
logger.debug(
f"There were {len(self.textgrid_read_errors)} errors decoding reading TextGrid files:"
)
for e in self.textgrid_read_errors:
logger.debug(f"{e.file_name}: {e.error}")
logger.debug(f"Parsed corpus directory in {time.time() - begin_time:.3f} seconds")
class AcousticCorpusPronunciationMixin(
AcousticCorpusMixin, MultispeakerDictionaryMixin, metaclass=ABCMeta
):
"""
Mixin for acoustic corpora with Pronunciation dictionaries
See Also
--------
:class:`~montreal_forced_aligner.corpus.acoustic_corpus.AcousticCorpusMixin`
For corpus parsing parameters
:class:`~montreal_forced_aligner.dictionary.multispeaker.MultispeakerDictionaryMixin`
For dictionary parsing parameters
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def load_corpus(self) -> None:
"""
Load the corpus
"""
all_begin = time.time()
if self.dictionary_model is not None:
logger.debug(f"Using {self.phone_set_type}")
self.dictionary_setup()
logger.debug(f"Loaded dictionary in {time.time() - all_begin:.3f} seconds")
begin = time.time()
self._load_corpus()
logger.debug(f"Loaded corpus in {time.time() - begin:.3f} seconds")
begin = time.time()
self.initialize_jobs()
logger.debug(f"Initialized jobs in {time.time() - begin:.3f} seconds")
self.normalize_text()
begin = time.time()
self.write_lexicon_information()
logger.debug(f"Wrote lexicon information in {time.time() - begin:.3f} seconds")
begin = time.time()
self.generate_features()
logger.debug(f"Generated features in {time.time() - begin:.3f} seconds")
logger.debug(f"Setting up corpus took {time.time() - all_begin:.3f} seconds")
class AcousticCorpus(AcousticCorpusMixin, DictionaryMixin, MfaWorker):
"""
Standalone class for working with acoustic corpora and pronunciation dictionaries
Most functionality in MFA will use the :class:`~montreal_forced_aligner.corpus.acoustic_corpus.AcousticCorpusPronunciationMixin` class instead of this class.
See Also
--------
:class:`~montreal_forced_aligner.corpus.acoustic_corpus.AcousticCorpusPronunciationMixin`
For dictionary and corpus parsing parameters
:class:`~montreal_forced_aligner.abc.MfaWorker`
For MFA processing parameters
:class:`~montreal_forced_aligner.abc.TemporaryDirectoryMixin`
For temporary directory parameters
"""
def __init__(self, **kwargs):
super(AcousticCorpus, self).__init__(**kwargs)
@property
def identifier(self) -> str:
"""Identifier for the corpus"""
return self.data_source_identifier
@property
def output_directory(self) -> Path:
"""Root temporary directory to store corpus and dictionary files"""
return config.TEMPORARY_DIRECTORY.joinpath(self.identifier)
@property
def working_directory(self) -> Path:
"""Working directory to save temporary corpus and dictionary files"""
return self.corpus_output_directory
class AcousticCorpusWithPronunciations(AcousticCorpusPronunciationMixin, MfaWorker):
"""
Standalone class for parsing an acoustic corpus with a pronunciation dictionary
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@property
def identifier(self) -> str:
"""Identifier for the corpus"""
return self.data_source_identifier
@property
def output_directory(self) -> Path:
"""Root temporary directory to store corpus and dictionary files"""
return config.TEMPORARY_DIRECTORY.joinpath(self.identifier)
@property
def working_directory(self) -> Path:
"""Working directory to save temporary corpus and dictionary files"""
return self.output_directory | PypiClean |
/DJModels-0.0.6-py3-none-any.whl/djmodels/db/models/sql/subqueries.py | from djmodels.core.exceptions import FieldError
from djmodels.db import connections
from djmodels.db.models.query_utils import Q
from djmodels.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, NO_RESULTS,
)
from djmodels.db.models.sql.query import Query
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""A DELETE SQL query."""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.alias_map = {table: self.alias_map[table]}
self.where = where
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
def delete_batch(self, pk_list, using):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
# number of objects deleted
num_deleted = 0
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(
**{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]}))
num_deleted += self.do_query(self.get_meta().db_table, self.where, using=using)
return num_deleted
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = tuple([t for t in innerq.alias_map if innerq.alias_refcount[t]])
if not innerq_used_tables or innerq_used_tables == tuple(self.alias_map):
# There is only the base table in use in the query.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return 0
return self.delete_batch(values, using)
else:
innerq.clear_select_clause()
innerq.select = [
pk.get_col(self.get_initial_alias())
]
values = innerq
self.where = self.where_class()
self.add_q(Q(pk__in=values))
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
class UpdateQuery(Query):
"""An UPDATE SQL query."""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Run on initialization and at the end of chaining. Any attributes that
would normally be set in __init__() should go here instead.
"""
self.values = []
self.related_ids = None
self.related_updates = {}
def clone(self):
obj = super().clone()
obj.related_updates = self.related_updates.copy()
return obj
def update_batch(self, pk_list, values, using):
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE]))
self.get_compiler(using).execute_sql(NO_RESULTS)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in values.items():
field = self.get_meta().get_field(name)
direct = not (field.auto_created and not field.concrete) or not field.concrete
model = field.model._meta.concrete_model
if not direct or (field.is_relation and field.many_to_many):
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
if model is not self.get_meta().concrete_model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Append a sequence of (field, model, value) triples to the internal list
that will be used to generate the UPDATE query. Might be more usefully
called add_update_targets() to hint at the extra information here.
"""
for field, model, val in values_seq:
if hasattr(val, 'resolve_expression'):
# Resolve expressions here so that annotations are no longer needed
val = val.resolve_expression(self, allow_joins=False, for_save=True)
self.values.append((field, model, val))
def add_related_update(self, model, field, value):
"""
Add (name, value) to an update query for an ancestor model.
Update are coalesced so that only one update query per ancestor is run.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Return a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in self.related_updates.items():
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, ignore_conflicts=False, **kwargs):
super().__init__(*args, **kwargs)
self.fields = []
self.objs = []
self.ignore_conflicts = ignore_conflicts
def insert_values(self, fields, objs, raw=False):
self.fields = fields
self.objs = objs
self.raw = raw
class AggregateQuery(Query):
"""
Take another query as a parameter to the FROM clause and only select the
elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
query.subquery = True
self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True) | PypiClean |
/JumpScale-core-6.0.0.tar.gz/JumpScale-core-6.0.0/lib/JumpScale/base/FSWALKER.py | from JumpScale import j
import struct
try:
from REGEXTOOL import *
except:
pass
from .FS import *
class FSWalkerStats():
def __init__(self):
self.start=j.base.time.getTimeEpoch()
self.stop=0
self.sizeUncompressed={}
self.sizeCompressed={}
self.nr={}
self.duplicate={}
for i in ["D","F","L"]:
self.registerType(i)
self.sizeUncompressedTotal=0
self.sizeCompressedTotal=0
self.nrTotal=0
self.duplicateTotal=0
def registerType(self,ttype):
if not self.sizeUncompressed.has_key(ttype):
self.sizeUncompressed[ttype]=0
if not self.sizeCompressed.has_key(ttype):
self.sizeCompressed[ttype]=0
if not self.nr.has_key(ttype):
self.nr[ttype]=0
if not self.duplicate.has_key(ttype):
self.duplicate[ttype]=0
def callstop(self):
self.stop=j.base.time.getTimeEpoch()
self._getTotals()
def _getTotals(self):
sizeUncompressed=0
for key in self.sizeUncompressed.keys():
sizeUncompressed+=self.sizeUncompressed[key]
self.sizeUncompressedTotal=sizeUncompressed
sizeCompressed=0
for key in self.sizeCompressed.keys():
sizeCompressed+=self.sizeCompressed[key]
self.sizeCompressedTotal=sizeCompressed
nr=0
for key in self.nr.keys():
nr+=self.nr[key]
self.nrTotal=nr
duplicate=0
for key in self.duplicate.keys():
duplicate+=self.duplicate[key]
self.duplicateTotal=duplicate
def add2stat(self,ttype="F",sizeUncompressed=0,sizeCompressed=0,duplicate=False):
self.sizeUncompressed[ttype]+=sizeUncompressed
self.sizeCompressed[ttype]+=sizeCompressed
self.nr[ttype]+=1
if duplicate:
self.duplicate[ttype]+=1
def __repr__(self):
self.callstop()
duration=self.stop-self.start
out="nrsecs:%s"%duration
out="nrfiles:%s\n"%self.nrTotal
out+="nrfilesDuplicate:%s\n"%self.duplicateTotal
sizeUncompressedTotal=(float(self.sizeUncompressedTotal)/1024/1024)
out+="size uncompressed:%s\n"%sizeUncompressedTotal
sizeCompressedTotal=(float(self.sizeCompressedTotal)/1024/1024)
out+="size compressed:%s\n"%sizeCompressedTotal
out+="uncompressed send per sec in MB/sec: %s"% round(sizeUncompressedTotal/duration,2)
out+="compressed send per sec in MB/sec: %s"% round(sizeCompressedTotal/duration,2)
__str__=__repr__
class LocalFS():
def abspath(self,path):
return os.path.abspath(path)
def isFile(self,path,followlinks=True):
return j.system.fs.isFile(path,followlinks)
def isDir(self,path,followlinks=True):
return j.system.fs.isDir(path,followlinks)
def isLink(self,path,junction=True):
return j.system.fs.isLink(path,junction)
def stat(self,path):
return os.stat(path)
def lstat(self,path):
return os.lstat(path)
def list(self,path):
return j.base.fs.list(path)
class FSWalker():
def __init__(self,filesystemobject=None):
self.stats=None
self.statsStart()
self.statsNr=0
self.statsSize=0
self.lastPath=""
if filesystemobject==None:
self.fs=LocalFS()
else:
self.fs=filesystemobject()
def log(self,msg):
print(msg)
def statsStart(self):
self.stats=FSWalkerStats()
def statsPrint(self):
print("lastpath:%s"%self.lastPath)
try:
print(str(self.stats))
except:
print('None')
def statsAdd(self,path="",ttype="F",sizeUncompressed=0,sizeCompressed=0,duplicate=False):
self.stats.add2stat(ttype=ttype,sizeUncompressed=sizeUncompressed,sizeCompressed=sizeCompressed,duplicate=duplicate)
self.statsNr+=1
self.statsSize+=sizeUncompressed
self.lastPath=path
if self.statsNr>2000 or self.statsSize>100000000:
self.statsPrint()
self.statsNr=0
self.statsSize=0
def _findhelper(self,arg,path):
arg.append(path)
def find(self,root, includeFolders=False,includeLinks=False, pathRegexIncludes={},pathRegexExcludes={},followlinks=False,\
childrenRegexExcludes=[".*/log/.*","/dev/.*","/proc/.*"],mdserverclient=None):
"""
@return {files:[],dirs:[],links:[],...$othertypes}
"""
result={}
result["F"]=[]
result["D"]=[]
result["L"]=[]
def processfile(path,stat,arg):
result["F"].append([path,stat])
def processdir(path,stat,arg):
result["D"].append([path,stat])
def processlink(src,dest,stat,arg):
result["L"].append([src,dest,stat])
def processother(path,stat,type,arg):
if result.has_key(type):
result[type]=[]
result[type].append([path,stat])
callbackFunctions={}
callbackFunctions["F"]=processfile
callbackFunctions["D"]=processdir
callbackFunctions["L"]=processlink
callbackFunctions["O"]=processother #type O is a generic callback which matches all not specified (will not match F,D,L)
callbackMatchFunctions=self.getCallBackMatchFunctions(pathRegexIncludes,pathRegexExcludes,includeFolders=includeFolders,includeLinks=includeLinks)
root = os.path.abspath(root)
self.walk(root,callbackFunctions,arg={},callbackMatchFunctions=callbackMatchFunctions,childrenRegexExcludes=childrenRegexExcludes,\
pathRegexIncludes=pathRegexIncludes,pathRegexExcludes=pathRegexExcludes,mdserverclient=mdserverclient)
return result
def getCallBackMatchFunctions(self,pathRegexIncludes={},pathRegexExcludes={},includeFolders=True,includeLinks=True):
C="""
if pathRegexIncludes.has_key("$type") and not pathRegexExcludes.has_key("$type"):
def matchobj$type(path,arg,pathRegexIncludes,pathRegexExcludes):
return REGEXTOOL.matchPath(path,pathRegexIncludes["$type"],[])
elif not pathRegexIncludes.has_key("$type") and pathRegexExcludes.has_key("$type"):
def matchobj$type(path,arg,pathRegexIncludes,pathRegexExcludes):
return REGEXTOOL.matchPath(path,[],pathRegexExcludes["$type"])
elif pathRegexIncludes.has_key("$type") and pathRegexExcludes.has_key("$type"):
def matchobj$type(path,arg,pathRegexIncludes,pathRegexExcludes):
return REGEXTOOL.matchPath(path,pathRegexIncludes["$type"],pathRegexExcludes["$type"])
else:
matchobj$type=None
"""
for ttype in ["F","D","L"]:
C2=C.replace("$type",ttype)
exec(C2)
callbackMatchFunctions={}
if matchobjF!=None and (pathRegexIncludes.has_key("F") or pathRegexExcludes.has_key("F")):
callbackMatchFunctions["F"]=matchobjF
if includeFolders:
if matchobjD!=None and (pathRegexIncludes.has_key("D") or pathRegexExcludes.has_key("D")):
callbackMatchFunctions["D"]=matchobjD
if includeLinks:
if matchobjL!=None and (pathRegexIncludes.has_key("L") or pathRegexExcludes.has_key("L")):
callbackMatchFunctions["L"]=matchobjL
if pathRegexIncludes.has_key("O") or pathRegexExcludes.has_key("O"):
callbackMatchFunctions["O"]=matchobjO
return callbackMatchFunctions
def walk(self,root,callbackFunctions={},arg=None,callbackMatchFunctions={},followlinks=False,\
childrenRegexExcludes=[".*/log/.*","/dev/.*","/proc/.*"],pathRegexIncludes={},pathRegexExcludes={},mdserverclient=None):
'''
Walk through filesystem and execute a method per file and dirname if the match function selected the item
Walk through all files and folders and other objects starting at root,
recursive by default, calling a given callback with a provided argument and file
path for every file & dir we could find.
To match the function use the callbackMatchFunctions which are separate for all types of objects (Dir=D, File=F, Link=L)
when it returns True the path will be further processed
Examples
========
>>> def my_print(path,arg):
... print arg+path
...
>>> def match(path,arg):
... return True #means will process the object e.g. file which means call my_print in this example
...
>>> self.walk('/foo', my_print,arg="Test: ", callbackMatchFunctions=match)
test: /foo/file1
test: /foo/file2
test: /foo/file3
test: /foo/bar/file4
@param root: Filesystem root to crawl (string)
'''
#We want to work with full paths, even if a non-absolute path is provided
root = os.path.abspath(root)
if not self.fs.isDir(root):
raise ValueError('Root path for walk should be a folder')
# print "ROOT OF WALKER:%s"%root
if mdserverclient==None:
self._walkFunctional(root,callbackFunctions, arg,callbackMatchFunctions,followlinks,\
childrenRegexExcludes=childrenRegexExcludes,pathRegexIncludes=pathRegexIncludes,pathRegexExcludes=pathRegexExcludes)
else:
self._walkFunctionalMDS(root,callbackFunctions, arg,callbackMatchFunctions,followlinks,\
childrenRegexExcludes=childrenRegexExcludes,pathRegexIncludes=pathRegexIncludes,pathRegexExcludes=pathRegexExcludes)
def _walkFunctional(self,root,callbackFunctions={},arg=None,callbackMatchFunctions={},followlinks=False,\
childrenRegexExcludes=[],pathRegexIncludes={},pathRegexExcludes={}):
paths=self.fs.list(root)
for path2 in paths:
self.log("walker path:%s"% path2)
if self.fs.isFile(path2):
ttype="F"
elif self.fs.isLink(path2):
ttype="L"
elif self.fs.isDir(path2,followlinks):
ttype="D"
else:
raise RuntimeError("Can only detect files, dirs, links")
if not callbackMatchFunctions.has_key(ttype) or (callbackMatchFunctions.has_key(ttype) and callbackMatchFunctions[ttype](path2,arg,pathRegexIncludes,pathRegexExcludes)):
self.log("walker filepath:%s"% path2)
self.statsAdd(path=path2,ttype=ttype,sizeUncompressed=0,sizeCompressed=0,duplicate=False)
if callbackFunctions.has_key(ttype):
if ttype in "DF":
stat=self.fs.stat(path2)
statb=struct.pack("<IHHII",stat.st_mode,stat.st_gid,stat.st_uid,stat.st_size,stat.st_mtime)
callbackFunctions[ttype](path=path2,stat=statb,arg=arg)
else:
stat=self.fs.lstat(path2)
statb=struct.pack("<IHHII",stat.st_mode,stat.st_gid,stat.st_uid,stat.st_size,stat.st_mtime)
callbackFunctions[ttype](src=path2,dest=os.path.realpath(path2),arg=arg,stat=statb)
if ttype=="D":
if REGEXTOOL.matchPath(path2,pathRegexIncludes.get(ttype,[]) ,childrenRegexExcludes):
self._walkFunctional(path2,callbackFunctions, arg,callbackMatchFunctions,followlinks,\
childrenRegexExcludes=childrenRegexExcludes,pathRegexIncludes=pathRegexIncludes,pathRegexExcludes=pathRegexExcludes)
class FSWalkerFactory():
def get(self,filesystemobject=None):
return FSWalker(filesystemobject=filesystemobject)
j.base.fswalker=FSWalkerFactory() | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/npmlog/log.js | 'use strict'
var Progress = require('are-we-there-yet')
var Gauge = require('gauge')
var EE = require('events').EventEmitter
var log = exports = module.exports = new EE()
var util = require('util')
var setBlocking = require('set-blocking')
var consoleControl = require('console-control-strings')
setBlocking(true)
var stream = process.stderr
Object.defineProperty(log, 'stream', {
set: function (newStream) {
stream = newStream
if (this.gauge) {
this.gauge.setWriteTo(stream, stream)
}
},
get: function () {
return stream
},
})
// by default, decide based on tty-ness.
var colorEnabled
log.useColor = function () {
return colorEnabled != null ? colorEnabled : stream.isTTY
}
log.enableColor = function () {
colorEnabled = true
this.gauge.setTheme({hasColor: colorEnabled, hasUnicode: unicodeEnabled})
}
log.disableColor = function () {
colorEnabled = false
this.gauge.setTheme({hasColor: colorEnabled, hasUnicode: unicodeEnabled})
}
// default level
log.level = 'info'
log.gauge = new Gauge(stream, {
enabled: false, // no progress bars unless asked
theme: {hasColor: log.useColor()},
template: [
{type: 'progressbar', length: 20},
{type: 'activityIndicator', kerning: 1, length: 1},
{type: 'section', default: ''},
':',
{type: 'logline', kerning: 1, default: ''},
],
})
log.tracker = new Progress.TrackerGroup()
// we track this separately as we may need to temporarily disable the
// display of the status bar for our own loggy purposes.
log.progressEnabled = log.gauge.isEnabled()
var unicodeEnabled
log.enableUnicode = function () {
unicodeEnabled = true
this.gauge.setTheme({hasColor: this.useColor(), hasUnicode: unicodeEnabled})
}
log.disableUnicode = function () {
unicodeEnabled = false
this.gauge.setTheme({hasColor: this.useColor(), hasUnicode: unicodeEnabled})
}
log.setGaugeThemeset = function (themes) {
this.gauge.setThemeset(themes)
}
log.setGaugeTemplate = function (template) {
this.gauge.setTemplate(template)
}
log.enableProgress = function () {
if (this.progressEnabled) {
return
}
this.progressEnabled = true
this.tracker.on('change', this.showProgress)
if (this._paused) {
return
}
this.gauge.enable()
}
log.disableProgress = function () {
if (!this.progressEnabled) {
return
}
this.progressEnabled = false
this.tracker.removeListener('change', this.showProgress)
this.gauge.disable()
}
var trackerConstructors = ['newGroup', 'newItem', 'newStream']
var mixinLog = function (tracker) {
// mixin the public methods from log into the tracker
// (except: conflicts and one's we handle specially)
Object.keys(log).forEach(function (P) {
if (P[0] === '_') {
return
}
if (trackerConstructors.filter(function (C) {
return C === P
}).length) {
return
}
if (tracker[P]) {
return
}
if (typeof log[P] !== 'function') {
return
}
var func = log[P]
tracker[P] = function () {
return func.apply(log, arguments)
}
})
// if the new tracker is a group, make sure any subtrackers get
// mixed in too
if (tracker instanceof Progress.TrackerGroup) {
trackerConstructors.forEach(function (C) {
var func = tracker[C]
tracker[C] = function () {
return mixinLog(func.apply(tracker, arguments))
}
})
}
return tracker
}
// Add tracker constructors to the top level log object
trackerConstructors.forEach(function (C) {
log[C] = function () {
return mixinLog(this.tracker[C].apply(this.tracker, arguments))
}
})
log.clearProgress = function (cb) {
if (!this.progressEnabled) {
return cb && process.nextTick(cb)
}
this.gauge.hide(cb)
}
log.showProgress = function (name, completed) {
if (!this.progressEnabled) {
return
}
var values = {}
if (name) {
values.section = name
}
var last = log.record[log.record.length - 1]
if (last) {
values.subsection = last.prefix
var disp = log.disp[last.level] || last.level
var logline = this._format(disp, log.style[last.level])
if (last.prefix) {
logline += ' ' + this._format(last.prefix, this.prefixStyle)
}
logline += ' ' + last.message.split(/\r?\n/)[0]
values.logline = logline
}
values.completed = completed || this.tracker.completed()
this.gauge.show(values)
}.bind(log) // bind for use in tracker's on-change listener
// temporarily stop emitting, but don't drop
log.pause = function () {
this._paused = true
if (this.progressEnabled) {
this.gauge.disable()
}
}
log.resume = function () {
if (!this._paused) {
return
}
this._paused = false
var b = this._buffer
this._buffer = []
b.forEach(function (m) {
this.emitLog(m)
}, this)
if (this.progressEnabled) {
this.gauge.enable()
}
}
log._buffer = []
var id = 0
log.record = []
log.maxRecordSize = 10000
log.log = function (lvl, prefix, message) {
var l = this.levels[lvl]
if (l === undefined) {
return this.emit('error', new Error(util.format(
'Undefined log level: %j', lvl)))
}
var a = new Array(arguments.length - 2)
var stack = null
for (var i = 2; i < arguments.length; i++) {
var arg = a[i - 2] = arguments[i]
// resolve stack traces to a plain string.
if (typeof arg === 'object' && arg instanceof Error && arg.stack) {
Object.defineProperty(arg, 'stack', {
value: stack = arg.stack + '',
enumerable: true,
writable: true,
})
}
}
if (stack) {
a.unshift(stack + '\n')
}
message = util.format.apply(util, a)
var m = {
id: id++,
level: lvl,
prefix: String(prefix || ''),
message: message,
messageRaw: a,
}
this.emit('log', m)
this.emit('log.' + lvl, m)
if (m.prefix) {
this.emit(m.prefix, m)
}
this.record.push(m)
var mrs = this.maxRecordSize
var n = this.record.length - mrs
if (n > mrs / 10) {
var newSize = Math.floor(mrs * 0.9)
this.record = this.record.slice(-1 * newSize)
}
this.emitLog(m)
}.bind(log)
log.emitLog = function (m) {
if (this._paused) {
this._buffer.push(m)
return
}
if (this.progressEnabled) {
this.gauge.pulse(m.prefix)
}
var l = this.levels[m.level]
if (l === undefined) {
return
}
if (l < this.levels[this.level]) {
return
}
if (l > 0 && !isFinite(l)) {
return
}
// If 'disp' is null or undefined, use the lvl as a default
// Allows: '', 0 as valid disp
var disp = log.disp[m.level] != null ? log.disp[m.level] : m.level
this.clearProgress()
m.message.split(/\r?\n/).forEach(function (line) {
if (this.heading) {
this.write(this.heading, this.headingStyle)
this.write(' ')
}
this.write(disp, log.style[m.level])
var p = m.prefix || ''
if (p) {
this.write(' ')
}
this.write(p, this.prefixStyle)
this.write(' ' + line + '\n')
}, this)
this.showProgress()
}
log._format = function (msg, style) {
if (!stream) {
return
}
var output = ''
if (this.useColor()) {
style = style || {}
var settings = []
if (style.fg) {
settings.push(style.fg)
}
if (style.bg) {
settings.push('bg' + style.bg[0].toUpperCase() + style.bg.slice(1))
}
if (style.bold) {
settings.push('bold')
}
if (style.underline) {
settings.push('underline')
}
if (style.inverse) {
settings.push('inverse')
}
if (settings.length) {
output += consoleControl.color(settings)
}
if (style.beep) {
output += consoleControl.beep()
}
}
output += msg
if (this.useColor()) {
output += consoleControl.color('reset')
}
return output
}
log.write = function (msg, style) {
if (!stream) {
return
}
stream.write(this._format(msg, style))
}
log.addLevel = function (lvl, n, style, disp) {
// If 'disp' is null or undefined, use the lvl as a default
if (disp == null) {
disp = lvl
}
this.levels[lvl] = n
this.style[lvl] = style
if (!this[lvl]) {
this[lvl] = function () {
var a = new Array(arguments.length + 1)
a[0] = lvl
for (var i = 0; i < arguments.length; i++) {
a[i + 1] = arguments[i]
}
return this.log.apply(this, a)
}.bind(this)
}
this.disp[lvl] = disp
}
log.prefixStyle = { fg: 'magenta' }
log.headingStyle = { fg: 'white', bg: 'black' }
log.style = {}
log.levels = {}
log.disp = {}
log.addLevel('silly', -Infinity, { inverse: true }, 'sill')
log.addLevel('verbose', 1000, { fg: 'blue', bg: 'black' }, 'verb')
log.addLevel('info', 2000, { fg: 'green' })
log.addLevel('timing', 2500, { fg: 'green', bg: 'black' })
log.addLevel('http', 3000, { fg: 'green', bg: 'black' })
log.addLevel('notice', 3500, { fg: 'blue', bg: 'black' })
log.addLevel('warn', 4000, { fg: 'black', bg: 'yellow' }, 'WARN')
log.addLevel('error', 5000, { fg: 'red', bg: 'black' }, 'ERR!')
log.addLevel('silent', Infinity)
// allow 'error' prefix
log.on('error', function () {}) | PypiClean |
/3d-converter-0.9.0.tar.gz/3d-converter-0.9.0/models_converter/formats/scw/chunks/node.py | from models_converter.utilities.math import Vector3
from models_converter.utilities.math import Quaternion
from . import Chunk
from ...universal.node import Node
class NODE(Chunk):
def __init__(self, header):
super().__init__(header)
self.chunk_name = 'NODE'
self.nodes = []
def parse(self, buffer: bytes):
super().parse(buffer)
nodes_count = self.readUShort()
for node_index in range(nodes_count):
node = Node(
name=self.readString(),
parent=self.readString()
)
instances_count = self.readUShort()
for x in range(instances_count):
instance = Node.Instance(
instance_type=self.readChars(4),
name=self.readString()
)
if instance.get_type() in ('GEOM', 'CONT'):
materials_count = self.readUShort()
for bind in range(materials_count):
symbol = self.readString()
target = self.readString()
instance.add_bind(symbol, target)
elif instance.get_type() == 'CAME':
instance.set_target(self.readString())
node.add_instance(instance)
frames_count = self.readUShort()
if frames_count > 0:
rotation = Quaternion()
position = Vector3()
scale = Vector3()
node.frames_settings = self.readUByte()
for frame_index in range(frames_count):
frame = Node.Frame(self.readUShort())
if node.frames_settings & 1 or frame_index == 0: # Rotation
rotation.x = self.readNShort()
rotation.y = self.readNShort()
rotation.z = self.readNShort()
rotation.w = self.readNShort()
if node.frames_settings & 2 or frame_index == 0: # Position X
position.x = self.readFloat()
if node.frames_settings & 4 or frame_index == 0: # Position Y
position.y = self.readFloat()
if node.frames_settings & 8 or frame_index == 0: # Position Z
position.z = self.readFloat()
if node.frames_settings & 16 or frame_index == 0: # Scale X
scale.x = self.readFloat()
if node.frames_settings & 32 or frame_index == 0: # Scale Y
scale.y = self.readFloat()
if node.frames_settings & 64 or frame_index == 0: # Scale Z
scale.z = self.readFloat()
frame.set_rotation(rotation.clone())
frame.set_position(position.clone())
frame.set_scale(scale.clone())
node.add_frame(frame)
self.nodes.append(node)
def encode(self):
super().encode()
self.writeUShort(len(self.nodes))
for node in self.nodes:
self.writeString(node.get_name())
self.writeString(node.get_parent())
self.writeUShort(len(node.get_instances()))
for instance in node.get_instances():
self.writeChar(instance.get_type())
self.writeString(instance.get_name())
self.writeUShort(len(instance.get_binds()))
for bind in instance.get_binds():
self.writeString(bind.get_symbol())
self.writeString(bind.get_target())
self._encode_frames(node.get_frames(), node.frames_settings)
self.length = len(self.buffer)
def _encode_frames(self, frames, frames_settings):
self.writeUShort(len(frames))
if len(frames) > 0:
self.writeUByte(frames_settings)
for frame in frames:
self.writeUShort(frame.get_id())
if frames_settings & 128 or frames.index(frame) == 0: # Rotation
rotation = frame.get_rotation()
self.writeNShort(rotation.x)
self.writeNShort(rotation.y)
self.writeNShort(rotation.z)
self.writeNShort(rotation.w)
if frames_settings & 16 or frames.index(frame) == 0: # Position X
self.writeFloat(frame.get_position().x)
if frames_settings & 32 or frames.index(frame) == 0: # Position Y
self.writeFloat(frame.get_position().y)
if frames_settings & 64 or frames.index(frame) == 0: # Position Z
self.writeFloat(frame.get_position().z)
if frames_settings & 2 or frames.index(frame) == 0: # Scale X
self.writeFloat(frame.get_scale().x)
if frames_settings & 4 or frames.index(frame) == 0: # Scale Y
self.writeFloat(frame.get_scale().y)
if frames_settings & 8 or frames.index(frame) == 0: # Scale Z
self.writeFloat(frame.get_scale().z) | PypiClean |
/MacroPy-1.0.3.zip/MacroPy-1.0.3/macropy/core/cleanup.py | from ast import *
from macropy.core.util import register
from macros import filters
from walkers import Walker
@register(filters)
def fix_ctx(tree, **kw):
return ast_ctx_fixer.recurse(tree, Load())
@Walker
def ast_ctx_fixer(tree, ctx, stop, set_ctx, **kw):
"""Fix any missing `ctx` attributes within an AST; allows you to build
your ASTs without caring about that stuff and just filling it in later."""
if "ctx" in type(tree)._fields and (not hasattr(tree, "ctx") or tree.ctx is None):
tree.ctx = ctx
if type(tree) is arguments:
for arg in tree.args:
ast_ctx_fixer.recurse(arg, Param())
for default in tree.defaults:
ast_ctx_fixer.recurse(default, Load())
stop()
return tree
if type(tree) is AugAssign:
ast_ctx_fixer.recurse(tree.target, AugStore())
ast_ctx_fixer.recurse(tree.value, AugLoad())
stop()
return tree
if type(tree) is Attribute:
set_ctx(Load())
return tree
if type(tree) is Assign:
for target in tree.targets:
ast_ctx_fixer.recurse(target, Store())
ast_ctx_fixer.recurse(tree.value, Load())
stop()
return tree
if type(tree) is Delete:
for target in tree.targets:
ast_ctx_fixer.recurse(target, Del())
stop()
return tree
@register(filters)
def fill_line_numbers(tree, lineno, col_offset, **kw):
"""Fill in line numbers somewhat more cleverly than the
ast.fix_missing_locations method, which doesn't take into account the
fact that line numbers are monotonically increasing down lists of AST
nodes."""
if type(tree) is list:
for sub in tree:
if isinstance(sub, AST) \
and hasattr(sub, "lineno") \
and hasattr(sub, "col_offset") \
and (sub.lineno, sub.col_offset) > (lineno, col_offset):
lineno = sub.lineno
col_offset = sub.col_offset
fill_line_numbers(sub, lineno, col_offset)
elif isinstance(tree, AST):
if not (hasattr(tree, "lineno") and hasattr(tree, "col_offset")):
tree.lineno = lineno
tree.col_offset = col_offset
for name, sub in iter_fields(tree):
fill_line_numbers(sub, tree.lineno, tree.col_offset)
return tree | PypiClean |
/Flask-Administration-0.1.42.tar.gz/Flask-Administration-0.1.42/flask_administration/static/jsmin/administration.js | (function(){var a,b,c,d,e,f,g,h,i,j=Object.prototype.hasOwnProperty,k=function(a,b){function d(){this.constructor=a}for(var c in b)j.call(b,c)&&(a[c]=b[c]);return d.prototype=b.prototype,a.prototype=new d,a.__super__=b.prototype,a},l=function(a,b){return function(){return a.apply(b,arguments)}};a=jQuery,b="http://127.0.0.1:5000/admin",h={},f={},i={},g={},d={templates:{},get:function(a,b){var c;a.replace("#",""),c=this.templates[a],c?b(c):this.fetch(a+".html",b);if(_(this.name).endsWith("html"))return this.fetch(a,b)},fetch:function(b,c){var d=this;return a.ajax("/admin/static/views/"+b,{type:"GET",dataType:"html",error:function(b,c,d){return a("body").append("AJAX Error: #{textStatus}")},success:function(b,e,f){return d.template=_.template(a(b).html()),c(d.template)}})}},c=_.extend({},Backbone.Events),e=function(){function a(a){this.timezone="UTC",this.nowLocal=new Date,this.nowUTC=new Date(this.nowLocal.getUTCFullYear(),this.nowLocal.getUTCMonth(),this.nowLocal.getUTCDate(),this.nowLocal.getUTCHours(),this.nowLocal.getUTCMinutes(),this.nowLocal.getUTCSeconds()),this.tz(this.timezone)}return a.prototype.current_tz_offset=function(){var a,b;return a=new Date,b=a.getTimezoneOffset()/60},a.prototype.tz=function(a){switch(a){case"PST":this.offset=-8;break;case"MST":this.offset=-7;break;case"CST":this.offset=-6;break;case"EST":this.offset=-5}return this},a.prototype.nowString=function(){var a,b,c,d,e,f;return this.timezone==="UTC"?d=this.nowUTC:(e=this.nowUTC.getTime()+36e5*this.offset,d=new Date(e)),a=d.getHours(),c=d.getMinutes(),f=d.getSeconds(),b=a<12?"AM":"PM",a>12&&(a-=12),a===0&&(a=12),c<10&&(c="0"+c),f<10&&(f="0"+f),""+a+":"+c+":"+f+" "+b},a}(),h.Gauge=function(a){function b(){b.__super__.constructor.apply(this,arguments)}return k(b,a),b}(Backbone.Model),h.Dashboard=function(a){function b(){b.__super__.constructor.apply(this,arguments)}return k(b,a),b}(Backbone.Model),f.Gauges=function(a){function b(){b.__super__.constructor.apply(this,arguments)}return k(b,a),b.prototype.model=h.Gauge,b.prototype.initialize=function(a){return this.url=a.url},b}(Backbone.Collection),f.Dashboards=function(a){function b(){b.__super__.constructor.apply(this,arguments)}return k(b,a),b.prototype.model=h.Dashboard,b.prototype.initialize=function(a){return this.url=a.url},b}(Backbone.Collection),i.GaugeView=function(b){function c(){c.__super__.constructor.apply(this,arguments)}return k(c,b),c.prototype.tagName="div",c.prototype.template=_.template(a("#gauge-template").html()),c.prototype.initialize=function(a){return this.nid=a.nid,this.parent=a.parent,_.bindAll(this,"render"),this},c.prototype.render=function(){var b=this;return d.get("gauge-template",function(c){return b.parent.collections.gauges.fetch({success:function(){var d;return d=b.parent.collections.gauges.get(b.nid).attributes.data,b.$el.html(a(c({id:b.nid,data:d}))),b.$el.draggable({snap:"#main"})}})}),this},c}(Backbone.View),i.TimeView=function(b){function d(){this.update=l(this.update,this),d.__super__.constructor.apply(this,arguments)}return k(d,b),d.prototype.template=_.template(a("#gauge-timeline-template").html()),d.prototype.timezoneInteger=0,d.prototype.initialize=function(b){var d=this;return this.nid=b.nid,this.parent=b.parent,this.timeElement=a("#time-"+this.nid),this.parent.collections.gauges.fetch({success:function(){d.tz=d.parent.collections.gauges.get(d.nid).get("timezone");switch(d.tz){case"EST":d.timezoneInteger=-5;break;case"PST":d.timezoneInteger=-8}return c.on("tick:rtc",d.update)}}),this},d.prototype.update=function(){var b;return b=new e({timezone:this.tz}),a("#time-"+this.nid).text(b.nowString())},d}(i.GaugeView),i.TimelineView=function(a){function b(){b.__super__.constructor.apply(this,arguments)}return k(b,a),b}(i.GaugeView),i.Dashboard=function(b){function d(){this.incrementTick=l(this.incrementTick,this),this.startRTC=l(this.startRTC,this),d.__super__.constructor.apply(this,arguments)}return k(d,b),d.prototype.ticks=0,d.prototype.views=[],d.prototype.collections={dashboards:new f.Dashboards({url:"/admin/dashboard/load"}),gauges:new f.Gauges({url:"/admin/dashboard/gauge/"})},d.prototype.initialize=function(b){var c=this;this.el=a(b.el),_.bindAll(this,"render");try{function(){return c.Jugs=new Juggernaut,c.hasJugs=!0}}catch(d){function(){return console.log(d),c.hasJugs=!1}}return this.startTimerOrChannel(b),this},d.prototype.startTimerOrChannel=function(a){return this.hasJugs?(this.startRTC(),this.openChannel(a.channel)):this.incrementTick()},d.prototype.openChannel=function(a){return this.Jugs.subscribe(a,function(a){return c.trigger(chanelName,a)})},d.prototype.startRTC=function(){return this._interval=window.setTimeout(this.startRTC,1e3),c.trigger("tick:rtc"),this},d.prototype.incrementTick=function(){return this._interval=window.setTimeout(this.incrementTick,500),this.ticks%2===0&&c.trigger("tick:rtc"),this.ticks++,this},d.prototype.render=function(){var b=this;return a("#js-loading").remove(),this.views=[],this.el.empty(),this.collections.dashboards.map(function(a){var c,d,e,f,g,h;return e=a.get("gauge"),f=a.get("id"),h=i[e.type],c="#gauge-"+f,g=new h({parent:b,nid:f,id:c}),d=g.render().el,b.el.append(d),b.views.push(g)}),this},d.prototype.preRender=function(){var a=this;return this.collections.dashboards.fetch({success:function(){return a.render()}})},d}(Backbone.View),a(function(){var b;return b=new i.Dashboard({el:a("#main")}),b.preRender()})}).call(this),define("administration",function(){}) | PypiClean |
/JumpScale-core-6.0.0.tar.gz/JumpScale-core-6.0.0/lib/JumpScale/core/properties/DirtyFlaggedProperty.py |
class DirtyFlaggingProperty(property):
"""
This property, if used in combination with an object inheriting from 'DirtyObjectMixin'
will flag the object as dirty if the value of the attribute is set
"""
def __init__(self, propname, checkType = None, *args, **kwargs):
"""
Contstructor for this property
@param propname : the name of the property
@param checktype : the j.basetype type class that can be used for type validation of the value
"""
self._propname = propname
self._checkType = checkType
def __get__(self, obj, objtype=None):
"""
Return the value of the attribute
"""
return getattr(obj, '_%s' % self._propname)
def __set__(self, obj, value):
"""
Sets the value of the attribute.
If a checktype class is supplied, the check() mathod on this class will be executed to validate the supplied value.
"""
if value == getattr(obj, '_%s' % self._propname):
return
if value is not None and self._checkType and callable(self._checkType.check) and not self._checkType.check(value):
raise TypeError("The type of the value for '%s' is invalid."%self._propname)
d = getattr(obj, '_dirtied', set())
if not self._propname in d:
d.add(self._propname)
obj._dirtied = d
setattr(obj, '_%s' % self._propname, value)
class DirtyObjectMixin:
"""
Mixin class that will add 2 attributes on the a class containing data about changes to the properties
isDirty : will return true is the list of dirtyProperties contains items
dirtyProperties : contains the set of updated properties
"""
def _get_dirty_properties(self):
'''Return all dirty properties in this instance
@returns: Dirty property names
@rtype: set
'''
dirty = getattr(self, '_dirtied', None)
if dirty is None: #No if not dirty: not set() == True
dirty = set()
self._dirtied = dirty
return dirty
isDirty = property(fget=lambda s: len(s.dirtyProperties) > 0)
dirtyProperties = property(fget=_get_dirty_properties) | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/diofant/matrices/expressions/slice.py | from ...core import Expr, Integer, Tuple
from ...functions import floor
from ...logic import true
from .matexpr import MatrixExpr
def normalize(i, parentsize):
if isinstance(i, slice):
i = (i.start, i.stop, i.step)
if not isinstance(i, (tuple, list, Tuple)):
if (i < Integer(0)) == true:
i += parentsize
i = (i, i + 1, 1)
i = list(i)
if len(i) == 2:
i.append(1)
start, stop, step = i
start = start or 0
if stop is None:
stop = parentsize
if (start < Integer(0)) == true:
start += parentsize
if (stop < Integer(0)) == true:
stop += parentsize
step = step or 1
if ((stop - start) * step < Integer(1)) == true:
raise IndexError()
return start, stop, step
class MatrixSlice(MatrixExpr):
"""A MatrixSlice of a Matrix Expression
Examples
========
>>> M = ImmutableMatrix(4, 4, range(16))
>>> print(M)
Matrix([
[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> B = MatrixSlice(M, (0, 2), (2, 4))
>>> print(ImmutableMatrix(B))
Matrix([
[2, 3],
[6, 7]])
"""
parent = property(lambda self: self.args[0])
rowslice = property(lambda self: self.args[1])
colslice = property(lambda self: self.args[2])
def __new__(cls, parent, rowslice, colslice):
rowslice = normalize(rowslice, parent.shape[0])
colslice = normalize(colslice, parent.shape[1])
if true in (0 > rowslice[0], parent.shape[0] < rowslice[1],
0 > colslice[0], parent.shape[1] < colslice[1]):
raise IndexError()
if isinstance(parent, MatrixSlice):
return mat_slice_of_slice(parent, rowslice, colslice)
return Expr.__new__(cls, parent, Tuple(*rowslice), Tuple(*colslice))
@property
def shape(self):
rows = self.rowslice[1] - self.rowslice[0]
rows = rows if self.rowslice[2] == 1 else floor(rows/self.rowslice[2])
cols = self.colslice[1] - self.colslice[0]
cols = cols if self.colslice[2] == 1 else floor(cols/self.colslice[2])
return rows, cols
def _entry(self, i, j):
return self.parent._entry(i*self.rowslice[2] + self.rowslice[0],
j*self.colslice[2] + self.colslice[0])
@property
def on_diag(self):
return self.rowslice == self.colslice
def slice_of_slice(s, t):
start1, stop1, step1 = s
start2, stop2, step2 = t
start = start1 + start2*step1
step = step1 * step2
stop = start1 + step1*stop2
assert stop <= stop1
return start, stop, step
def mat_slice_of_slice(parent, rowslice, colslice):
"""Collapse nested matrix slices
>>> X = MatrixSymbol('X', 10, 10)
>>> X[:, 1:5][5:8, :]
X[5:8, 1:5]
>>> X[1:9:2, 2:6][1:3, 2]
X[3:7:2, 4]
"""
row = slice_of_slice(parent.rowslice, rowslice)
col = slice_of_slice(parent.colslice, colslice)
return MatrixSlice(parent.parent, row, col) | PypiClean |
/EasyAIScaffolding-1.0.0.tar.gz/EasyAIScaffolding-1.0.0/EasyAI/TextClassifier/textcnn/textcnn_classifier.py | import argparse
import json
import os
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from loguru import logger
from sklearn import metrics
from sklearn.model_selection import train_test_split
from EasyAI.TextClassifier.base_classifier import ClassifierABC, load_data
from EasyAI.TextClassifier.data_helper import set_seed, build_vocab, load_vocab
from EasyAI.common.time_utils import get_time_spend
from .data import build_dataset, build_iterator
from .models import TextCNNModel
pwd_path = os.path.abspath(os.path.dirname(__file__))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class TextCNNClassifier(ClassifierABC):
def __init__(
self,
model_dir,
filter_sizes=(2, 3, 4),
num_filters=256,
dropout_rate=0.5,
batch_size=64,
max_seq_length=128,
embed_size=200,
max_vocab_size=10000,
unk_token="[UNK]",
pad_token="[PAD]",
tokenizer=None,
):
"""
Init the TextCNNClassifier
@param model_dir: 模型保存路径
@param filter_sizes: 卷积核尺寸
@param num_filters: 卷积核数量(channels数)
@param dropout_rate:
@param batch_size:
@param max_seq_length:
@param embed_size:
@param max_vocab_size:
@param unk_token:
@param pad_token:
@param tokenizer: 切词器,默认为字粒度切分
"""
self.model_dir = model_dir
self.is_trained = False
self.model = None
logger.debug(f"device: {device}")
self.filter_sizes = filter_sizes
self.num_filters = num_filters
self.dropout_rate = dropout_rate
self.batch_size = batch_size
self.max_seq_length = max_seq_length
self.embed_size = embed_size
self.max_vocab_size = max_vocab_size
self.unk_token = unk_token
self.pad_token = pad_token
self.tokenizer = (
tokenizer if tokenizer else lambda x: [y for y in x]
) # char-level
def __str__(self):
return f"TextCNNClassifier instance ({self.model})"
def train(
self,
data_list_or_path,
header=None,
names=("labels", "text"),
delimiter="\t",
test_size=0.1,
num_epochs=20,
learning_rate=1e-3,
require_improvement=1000,
evaluate_during_training_steps=100,
):
"""
Train model with data_list_or_path and save model to model_dir
@param data_list_or_path:
@param model_dir:
@param header:
@param names:
@param delimiter:
@param test_size:
@param num_epochs: epoch数
@param learning_rate: 学习率
@param require_improvement: 若超过1000batch效果还没提升,则提前结束训练
@param evaluate_during_training_steps: 每隔多少step评估一次模型
@return:
"""
logger.debug("train model...")
SEED = 1
set_seed(SEED)
# load data
X, y, data_df = load_data(
data_list_or_path,
header=header,
names=names,
delimiter=delimiter,
is_train=True,
)
model_dir = self.model_dir
if model_dir:
os.makedirs(model_dir, exist_ok=True)
word_vocab_path = os.path.join(model_dir, "word_vocab.json")
label_vocab_path = os.path.join(model_dir, "label_vocab.json")
save_model_path = os.path.join(model_dir, "model.pth")
dataset, self.word_id_map, self.label_id_map = build_dataset(
self.tokenizer,
X,
y,
word_vocab_path,
label_vocab_path,
max_vocab_size=self.max_vocab_size,
max_seq_length=self.max_seq_length,
unk_token=self.unk_token,
pad_token=self.pad_token,
)
train_data, dev_data = train_test_split(
dataset, test_size=test_size, random_state=SEED
)
logger.debug(
f"train_data size: {len(train_data)}, dev_data size: {len(dev_data)}"
)
logger.debug(
f"train_data sample:\n{train_data[:3]}\ndev_data sample:\n{dev_data[:3]}"
)
train_iter = build_iterator(train_data, device, self.batch_size)
dev_iter = build_iterator(dev_data, device, self.batch_size)
# create model
vocab_size = len(self.word_id_map)
num_classes = len(self.label_id_map)
logger.debug(f"vocab_size:{vocab_size}", "num_classes:", num_classes)
self.model = TextCNNModel(
vocab_size,
num_classes,
embed_size=self.embed_size,
filter_sizes=self.filter_sizes,
num_filters=self.num_filters,
dropout_rate=self.dropout_rate,
)
self.model.to(device)
# init_network(self.model)
logger.info(self.model.parameters)
# train model
history = self.train_model_from_data_iterator(
save_model_path,
train_iter,
dev_iter,
num_epochs,
learning_rate,
require_improvement,
evaluate_during_training_steps,
)
self.is_trained = True
logger.debug("train model done")
return history
def train_model_from_data_iterator(
self,
save_model_path,
train_iter,
dev_iter,
num_epochs=10,
learning_rate=1e-3,
require_improvement=1000,
evaluate_during_training_steps=100,
):
history = []
# train
start_time = time.time()
optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)
total_batch = 0 # 记录进行到多少batch
dev_best_loss = 1e10
last_improve = 0 # 记录上次验证集loss下降的batch数
flag = False # 记录是否很久没有效果提升
for epoch in range(num_epochs):
logger.debug("Epoch [{}/{}]".format(epoch + 1, num_epochs))
for i, (trains, labels) in enumerate(train_iter):
self.model.train()
outputs = self.model(trains)
loss = F.cross_entropy(outputs, labels)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if total_batch % evaluate_during_training_steps == 0:
# 输出在训练集和验证集上的效果
y_true = labels.cpu()
y_pred = torch.max(outputs, 1)[1].cpu()
train_acc = metrics.accuracy_score(y_true, y_pred)
if dev_iter is not None:
dev_acc, dev_loss = self.evaluate(dev_iter)
if dev_loss < dev_best_loss:
dev_best_loss = dev_loss
torch.save(self.model.state_dict(), save_model_path)
logger.debug(f"Saved model: {save_model_path}")
improve = "*"
last_improve = total_batch
else:
improve = ""
time_dif = get_time_spend(start_time)
msg = "Iter:{0:>6},Train Loss:{1:>5.2},Train Acc:{2:>6.2%},Val Loss:{3:>5.2},Val Acc:{4:>6.2%},Time:{5} {6}".format(
total_batch,
loss.item(),
train_acc,
dev_loss,
dev_acc,
time_dif,
improve,
)
else:
time_dif = get_time_spend(start_time)
msg = "Iter:{0:>6},Train Loss:{1:>5.2},Train Acc:{2:>6.2%},Time:{3}".format(
total_batch, loss.item(), train_acc, time_dif
)
logger.debug(msg)
history.append(msg)
self.model.train()
total_batch += 1
if total_batch - last_improve > require_improvement:
# 验证集loss超过1000batch没下降,结束训练
logger.debug("No optimization for a long time, auto-stopping...")
flag = True
break
if flag:
break
return history
def predict(self, sentences: list):
"""
Predict labels and label probability for sentences.
@param sentences: list, input text list, eg: [text1, text2, ...]
@return: predict_label, predict_prob
"""
if not self.is_trained:
raise ValueError("model not trained.")
self.model.eval()
def load_dataset(X, max_seq_length=128):
contents = []
for content in X:
words_line = []
token = self.tokenizer(content)
seq_len = len(token)
if max_seq_length:
if len(token) < max_seq_length:
token.extend([self.pad_token] * (max_seq_length - len(token)))
else:
token = token[:max_seq_length]
seq_len = max_seq_length
# word to id
for word in token:
words_line.append(
self.word_id_map.get(word, self.word_id_map.get(self.unk_token))
)
contents.append((words_line, 0, seq_len))
return contents
data = load_dataset(sentences, self.max_seq_length)
data_iter = build_iterator(data, device, self.batch_size)
# predict prob
predict_all = np.array([], dtype=int)
proba_all = np.array([], dtype=float)
with torch.no_grad():
for texts, _ in data_iter:
outputs = self.model(texts)
logit = F.softmax(outputs, dim=1).detach().cpu().numpy()
pred = np.argmax(logit, axis=1)
proba = np.max(logit, axis=1)
predict_all = np.append(predict_all, pred)
proba_all = np.append(proba_all, proba)
id_label_map = {v: k for k, v in self.label_id_map.items()}
predict_labels = [id_label_map.get(i) for i in predict_all]
predict_probs = proba_all.tolist()
return predict_labels, predict_probs
def evaluate_model(
self, data_list_or_path, header=None, names=("labels", "text"), delimiter="\t"
):
X_test, y_test, df = load_data(
data_list_or_path, header=header, names=names, delimiter=delimiter
)
self.load_model()
data, word_id_map, label_id_map = build_dataset(
self.tokenizer,
X_test,
y_test,
self.word_vocab_path,
self.label_vocab_path,
max_vocab_size=self.max_vocab_size,
max_seq_length=self.max_seq_length,
unk_token=self.unk_token,
pad_token=self.pad_token,
)
data_iter = build_iterator(data, device, self.batch_size)
return self.evaluate(data_iter)[0]
def evaluate(self, data_iter):
"""
Evaluate model.
@param data_iter:
@return: accuracy score, loss
"""
if not self.model:
raise ValueError("model not trained.")
self.model.eval()
loss_total = 0.0
predict_all = np.array([], dtype=int)
labels_all = np.array([], dtype=int)
with torch.no_grad():
for texts, labels in data_iter:
outputs = self.model(texts)
loss = F.cross_entropy(outputs, labels)
loss_total += loss
labels = labels.cpu().numpy()
predic = torch.max(outputs, 1)[1].cpu().numpy()
labels_all = np.append(labels_all, labels)
predict_all = np.append(predict_all, predic)
logger.debug(f"evaluate, last batch, y_true: {labels}, y_pred: {predic}")
acc = metrics.accuracy_score(labels_all, predict_all)
return acc, loss_total / len(data_iter)
def load_model(self):
"""
Load model from model_dir
@return:
"""
model_path = os.path.join(self.model_dir, "model.pth")
if os.path.exists(model_path):
self.word_vocab_path = os.path.join(self.model_dir, "word_vocab.json")
self.label_vocab_path = os.path.join(self.model_dir, "label_vocab.json")
self.word_id_map = load_vocab(self.word_vocab_path)
self.label_id_map = load_vocab(self.label_vocab_path)
vocab_size = len(self.word_id_map)
num_classes = len(self.label_id_map)
self.model = TextCNNModel(
vocab_size,
num_classes,
embed_size=self.embed_size,
filter_sizes=self.filter_sizes,
num_filters=self.num_filters,
dropout_rate=self.dropout_rate,
)
self.model.load_state_dict(torch.load(model_path, map_location=device))
self.model.to(device)
self.is_trained = True
else:
logger.error(f"{model_path} not exists.")
self.is_trained = False
return self.is_trained | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/js/tiny_mce/themes/advanced/js/charmap.js | tinyMCEPopup.requireLangPack();
var charmap = [
[' ', ' ', true, 'no-break space'],
['&', '&', true, 'ampersand'],
['"', '"', true, 'quotation mark'],
// finance
['¢', '¢', true, 'cent sign'],
['€', '€', true, 'euro sign'],
['£', '£', true, 'pound sign'],
['¥', '¥', true, 'yen sign'],
// signs
['©', '©', true, 'copyright sign'],
['®', '®', true, 'registered sign'],
['™', '™', true, 'trade mark sign'],
['‰', '‰', true, 'per mille sign'],
['µ', 'µ', true, 'micro sign'],
['·', '·', true, 'middle dot'],
['•', '•', true, 'bullet'],
['…', '…', true, 'three dot leader'],
['′', '′', true, 'minutes / feet'],
['″', '″', true, 'seconds / inches'],
['§', '§', true, 'section sign'],
['¶', '¶', true, 'paragraph sign'],
['ß', 'ß', true, 'sharp s / ess-zed'],
// quotations
['‹', '‹', true, 'single left-pointing angle quotation mark'],
['›', '›', true, 'single right-pointing angle quotation mark'],
['«', '«', true, 'left pointing guillemet'],
['»', '»', true, 'right pointing guillemet'],
['‘', '‘', true, 'left single quotation mark'],
['’', '’', true, 'right single quotation mark'],
['“', '“', true, 'left double quotation mark'],
['”', '”', true, 'right double quotation mark'],
['‚', '‚', true, 'single low-9 quotation mark'],
['„', '„', true, 'double low-9 quotation mark'],
['<', '<', true, 'less-than sign'],
['>', '>', true, 'greater-than sign'],
['≤', '≤', true, 'less-than or equal to'],
['≥', '≥', true, 'greater-than or equal to'],
['–', '–', true, 'en dash'],
['—', '—', true, 'em dash'],
['¯', '¯', true, 'macron'],
['‾', '‾', true, 'overline'],
['¤', '¤', true, 'currency sign'],
['¦', '¦', true, 'broken bar'],
['¨', '¨', true, 'diaeresis'],
['¡', '¡', true, 'inverted exclamation mark'],
['¿', '¿', true, 'turned question mark'],
['ˆ', 'ˆ', true, 'circumflex accent'],
['˜', '˜', true, 'small tilde'],
['°', '°', true, 'degree sign'],
['−', '−', true, 'minus sign'],
['±', '±', true, 'plus-minus sign'],
['÷', '÷', true, 'division sign'],
['⁄', '⁄', true, 'fraction slash'],
['×', '×', true, 'multiplication sign'],
['¹', '¹', true, 'superscript one'],
['²', '²', true, 'superscript two'],
['³', '³', true, 'superscript three'],
['¼', '¼', true, 'fraction one quarter'],
['½', '½', true, 'fraction one half'],
['¾', '¾', true, 'fraction three quarters'],
// math / logical
['ƒ', 'ƒ', true, 'function / florin'],
['∫', '∫', true, 'integral'],
['∑', '∑', true, 'n-ary sumation'],
['∞', '∞', true, 'infinity'],
['√', '√', true, 'square root'],
['∼', '∼', false,'similar to'],
['≅', '≅', false,'approximately equal to'],
['≈', '≈', true, 'almost equal to'],
['≠', '≠', true, 'not equal to'],
['≡', '≡', true, 'identical to'],
['∈', '∈', false,'element of'],
['∉', '∉', false,'not an element of'],
['∋', '∋', false,'contains as member'],
['∏', '∏', true, 'n-ary product'],
['∧', '∧', false,'logical and'],
['∨', '∨', false,'logical or'],
['¬', '¬', true, 'not sign'],
['∩', '∩', true, 'intersection'],
['∪', '∪', false,'union'],
['∂', '∂', true, 'partial differential'],
['∀', '∀', false,'for all'],
['∃', '∃', false,'there exists'],
['∅', '∅', false,'diameter'],
['∇', '∇', false,'backward difference'],
['∗', '∗', false,'asterisk operator'],
['∝', '∝', false,'proportional to'],
['∠', '∠', false,'angle'],
// undefined
['´', '´', true, 'acute accent'],
['¸', '¸', true, 'cedilla'],
['ª', 'ª', true, 'feminine ordinal indicator'],
['º', 'º', true, 'masculine ordinal indicator'],
['†', '†', true, 'dagger'],
['‡', '‡', true, 'double dagger'],
// alphabetical special chars
['À', 'À', true, 'A - grave'],
['Á', 'Á', true, 'A - acute'],
['Â', 'Â', true, 'A - circumflex'],
['Ã', 'Ã', true, 'A - tilde'],
['Ä', 'Ä', true, 'A - diaeresis'],
['Å', 'Å', true, 'A - ring above'],
['Æ', 'Æ', true, 'ligature AE'],
['Ç', 'Ç', true, 'C - cedilla'],
['È', 'È', true, 'E - grave'],
['É', 'É', true, 'E - acute'],
['Ê', 'Ê', true, 'E - circumflex'],
['Ë', 'Ë', true, 'E - diaeresis'],
['Ì', 'Ì', true, 'I - grave'],
['Í', 'Í', true, 'I - acute'],
['Î', 'Î', true, 'I - circumflex'],
['Ï', 'Ï', true, 'I - diaeresis'],
['Ð', 'Ð', true, 'ETH'],
['Ñ', 'Ñ', true, 'N - tilde'],
['Ò', 'Ò', true, 'O - grave'],
['Ó', 'Ó', true, 'O - acute'],
['Ô', 'Ô', true, 'O - circumflex'],
['Õ', 'Õ', true, 'O - tilde'],
['Ö', 'Ö', true, 'O - diaeresis'],
['Ø', 'Ø', true, 'O - slash'],
['Œ', 'Œ', true, 'ligature OE'],
['Š', 'Š', true, 'S - caron'],
['Ù', 'Ù', true, 'U - grave'],
['Ú', 'Ú', true, 'U - acute'],
['Û', 'Û', true, 'U - circumflex'],
['Ü', 'Ü', true, 'U - diaeresis'],
['Ý', 'Ý', true, 'Y - acute'],
['Ÿ', 'Ÿ', true, 'Y - diaeresis'],
['Þ', 'Þ', true, 'THORN'],
['à', 'à', true, 'a - grave'],
['á', 'á', true, 'a - acute'],
['â', 'â', true, 'a - circumflex'],
['ã', 'ã', true, 'a - tilde'],
['ä', 'ä', true, 'a - diaeresis'],
['å', 'å', true, 'a - ring above'],
['æ', 'æ', true, 'ligature ae'],
['ç', 'ç', true, 'c - cedilla'],
['è', 'è', true, 'e - grave'],
['é', 'é', true, 'e - acute'],
['ê', 'ê', true, 'e - circumflex'],
['ë', 'ë', true, 'e - diaeresis'],
['ì', 'ì', true, 'i - grave'],
['í', 'í', true, 'i - acute'],
['î', 'î', true, 'i - circumflex'],
['ï', 'ï', true, 'i - diaeresis'],
['ð', 'ð', true, 'eth'],
['ñ', 'ñ', true, 'n - tilde'],
['ò', 'ò', true, 'o - grave'],
['ó', 'ó', true, 'o - acute'],
['ô', 'ô', true, 'o - circumflex'],
['õ', 'õ', true, 'o - tilde'],
['ö', 'ö', true, 'o - diaeresis'],
['ø', 'ø', true, 'o slash'],
['œ', 'œ', true, 'ligature oe'],
['š', 'š', true, 's - caron'],
['ù', 'ù', true, 'u - grave'],
['ú', 'ú', true, 'u - acute'],
['û', 'û', true, 'u - circumflex'],
['ü', 'ü', true, 'u - diaeresis'],
['ý', 'ý', true, 'y - acute'],
['þ', 'þ', true, 'thorn'],
['ÿ', 'ÿ', true, 'y - diaeresis'],
['Α', 'Α', true, 'Alpha'],
['Β', 'Β', true, 'Beta'],
['Γ', 'Γ', true, 'Gamma'],
['Δ', 'Δ', true, 'Delta'],
['Ε', 'Ε', true, 'Epsilon'],
['Ζ', 'Ζ', true, 'Zeta'],
['Η', 'Η', true, 'Eta'],
['Θ', 'Θ', true, 'Theta'],
['Ι', 'Ι', true, 'Iota'],
['Κ', 'Κ', true, 'Kappa'],
['Λ', 'Λ', true, 'Lambda'],
['Μ', 'Μ', true, 'Mu'],
['Ν', 'Ν', true, 'Nu'],
['Ξ', 'Ξ', true, 'Xi'],
['Ο', 'Ο', true, 'Omicron'],
['Π', 'Π', true, 'Pi'],
['Ρ', 'Ρ', true, 'Rho'],
['Σ', 'Σ', true, 'Sigma'],
['Τ', 'Τ', true, 'Tau'],
['Υ', 'Υ', true, 'Upsilon'],
['Φ', 'Φ', true, 'Phi'],
['Χ', 'Χ', true, 'Chi'],
['Ψ', 'Ψ', true, 'Psi'],
['Ω', 'Ω', true, 'Omega'],
['α', 'α', true, 'alpha'],
['β', 'β', true, 'beta'],
['γ', 'γ', true, 'gamma'],
['δ', 'δ', true, 'delta'],
['ε', 'ε', true, 'epsilon'],
['ζ', 'ζ', true, 'zeta'],
['η', 'η', true, 'eta'],
['θ', 'θ', true, 'theta'],
['ι', 'ι', true, 'iota'],
['κ', 'κ', true, 'kappa'],
['λ', 'λ', true, 'lambda'],
['μ', 'μ', true, 'mu'],
['ν', 'ν', true, 'nu'],
['ξ', 'ξ', true, 'xi'],
['ο', 'ο', true, 'omicron'],
['π', 'π', true, 'pi'],
['ρ', 'ρ', true, 'rho'],
['ς', 'ς', true, 'final sigma'],
['σ', 'σ', true, 'sigma'],
['τ', 'τ', true, 'tau'],
['υ', 'υ', true, 'upsilon'],
['φ', 'φ', true, 'phi'],
['χ', 'χ', true, 'chi'],
['ψ', 'ψ', true, 'psi'],
['ω', 'ω', true, 'omega'],
// symbols
['ℵ', 'ℵ', false,'alef symbol'],
['ϖ', 'ϖ', false,'pi symbol'],
['ℜ', 'ℜ', false,'real part symbol'],
['ϑ','ϑ', false,'theta symbol'],
['ϒ', 'ϒ', false,'upsilon - hook symbol'],
['℘', '℘', false,'Weierstrass p'],
['ℑ', 'ℑ', false,'imaginary part'],
// arrows
['←', '←', true, 'leftwards arrow'],
['↑', '↑', true, 'upwards arrow'],
['→', '→', true, 'rightwards arrow'],
['↓', '↓', true, 'downwards arrow'],
['↔', '↔', true, 'left right arrow'],
['↵', '↵', false,'carriage return'],
['⇐', '⇐', false,'leftwards double arrow'],
['⇑', '⇑', false,'upwards double arrow'],
['⇒', '⇒', false,'rightwards double arrow'],
['⇓', '⇓', false,'downwards double arrow'],
['⇔', '⇔', false,'left right double arrow'],
['∴', '∴', false,'therefore'],
['⊂', '⊂', false,'subset of'],
['⊃', '⊃', false,'superset of'],
['⊄', '⊄', false,'not a subset of'],
['⊆', '⊆', false,'subset of or equal to'],
['⊇', '⊇', false,'superset of or equal to'],
['⊕', '⊕', false,'circled plus'],
['⊗', '⊗', false,'circled times'],
['⊥', '⊥', false,'perpendicular'],
['⋅', '⋅', false,'dot operator'],
['⌈', '⌈', false,'left ceiling'],
['⌉', '⌉', false,'right ceiling'],
['⌊', '⌊', false,'left floor'],
['⌋', '⌋', false,'right floor'],
['⟨', '〈', false,'left-pointing angle bracket'],
['⟩', '〉', false,'right-pointing angle bracket'],
['◊', '◊', true, 'lozenge'],
['♠', '♠', true, 'black spade suit'],
['♣', '♣', true, 'black club suit'],
['♥', '♥', true, 'black heart suit'],
['♦', '♦', true, 'black diamond suit'],
[' ', ' ', false,'en space'],
[' ', ' ', false,'em space'],
[' ', ' ', false,'thin space'],
['‌', '‌', false,'zero width non-joiner'],
['‍', '‍', false,'zero width joiner'],
['‎', '‎', false,'left-to-right mark'],
['‏', '‏', false,'right-to-left mark'],
['­', '­', false,'soft hyphen']
];
tinyMCEPopup.onInit.add(function() {
tinyMCEPopup.dom.setHTML('charmapView', renderCharMapHTML());
addKeyboardNavigation();
});
function addKeyboardNavigation(){
var tableElm, cells, settings;
cells = tinyMCEPopup.dom.select("a.charmaplink", "charmapgroup");
settings ={
root: "charmapgroup",
items: cells
};
cells[0].tabindex=0;
tinyMCEPopup.dom.addClass(cells[0], "mceFocus");
if (tinymce.isGecko) {
cells[0].focus();
} else {
setTimeout(function(){
cells[0].focus();
}, 100);
}
tinyMCEPopup.editor.windowManager.createInstance('tinymce.ui.KeyboardNavigation', settings, tinyMCEPopup.dom);
}
function renderCharMapHTML() {
var charsPerRow = 20, tdWidth=20, tdHeight=20, i;
var html = '<div id="charmapgroup" aria-labelledby="charmap_label" tabindex="0" role="listbox">'+
'<table role="presentation" border="0" cellspacing="1" cellpadding="0" width="' + (tdWidth*charsPerRow) +
'"><tr height="' + tdHeight + '">';
var cols=-1;
for (i=0; i<charmap.length; i++) {
var previewCharFn;
if (charmap[i][2]==true) {
cols++;
previewCharFn = 'previewChar(\'' + charmap[i][1].substring(1,charmap[i][1].length) + '\',\'' + charmap[i][0].substring(1,charmap[i][0].length) + '\',\'' + charmap[i][3] + '\');';
html += ''
+ '<td class="charmap">'
+ '<a class="charmaplink" role="button" onmouseover="'+previewCharFn+'" onfocus="'+previewCharFn+'" href="javascript:void(0)" onclick="insertChar(\'' + charmap[i][1].substring(2,charmap[i][1].length-1) + '\');" onclick="return false;" onmousedown="return false;" title="' + charmap[i][3] + ' '+ tinyMCEPopup.editor.translate("advanced_dlg.charmap_usage")+'">'
+ charmap[i][1]
+ '</a></td>';
if ((cols+1) % charsPerRow == 0)
html += '</tr><tr height="' + tdHeight + '">';
}
}
if (cols % charsPerRow > 0) {
var padd = charsPerRow - (cols % charsPerRow);
for (var i=0; i<padd-1; i++)
html += '<td width="' + tdWidth + '" height="' + tdHeight + '" class="charmap"> </td>';
}
html += '</tr></table></div>';
html = html.replace(/<tr height="20"><\/tr>/g, '');
return html;
}
function insertChar(chr) {
tinyMCEPopup.execCommand('mceInsertContent', false, '&#' + chr + ';');
// Refocus in window
if (tinyMCEPopup.isWindow)
window.focus();
tinyMCEPopup.editor.focus();
tinyMCEPopup.close();
}
function previewChar(codeA, codeB, codeN) {
var elmA = document.getElementById('codeA');
var elmB = document.getElementById('codeB');
var elmV = document.getElementById('codeV');
var elmN = document.getElementById('codeN');
if (codeA=='#160;') {
elmV.innerHTML = '__';
} else {
elmV.innerHTML = '&' + codeA;
}
elmB.innerHTML = '&' + codeA;
elmA.innerHTML = '&' + codeB;
elmN.innerHTML = codeN;
} | PypiClean |
/Fabric39-1.15.3.post1.tar.gz/Fabric39-1.15.3.post1/fabric/state.py | import os
import sys
from optparse import make_option
from fabric.network import HostConnectionCache, ssh
from fabric.version import get_version
from fabric.utils import _AliasDict, _AttributeDict
#
# Win32 flag
#
# Impacts a handful of platform specific behaviors. Note that Cygwin's Python
# is actually close enough to "real" UNIXes that it doesn't need (or want!) to
# use PyWin32 -- so we only test for literal Win32 setups (vanilla Python,
# ActiveState etc) here.
win32 = (sys.platform == 'win32')
#
# Environment dictionary - support structures
#
# By default, if the user (including code using Fabric as a library) doesn't
# set the username, we obtain the currently running username and use that.
def _get_system_username():
"""
Obtain name of current system user, which will be default connection user.
"""
import getpass
username = None
try:
username = getpass.getuser()
# getpass.getuser supported on both Unix and Windows systems.
# getpass.getuser may call pwd.getpwuid which in turns may raise KeyError
# if it cannot find a username for the given UID, e.g. on ep.io
# and similar "non VPS" style services. Rather than error out, just keep
# the 'default' username to None. Can check for this value later if needed.
except KeyError:
pass
except ImportError:
if win32:
import win32api
import win32security # noqa
import win32profile # noqa
username = win32api.GetUserName()
return username
def _rc_path():
"""
Return platform-specific default file path for $HOME/.fabricrc.
"""
rc_file = '.fabricrc'
rc_path = '~/' + rc_file
expanded_rc_path = os.path.expanduser(rc_path)
if expanded_rc_path == rc_path and win32:
from win32com.shell.shell import SHGetSpecialFolderPath
from win32com.shell.shellcon import CSIDL_PROFILE
expanded_rc_path = "%s/%s" % (
SHGetSpecialFolderPath(0, CSIDL_PROFILE),
rc_file
)
return expanded_rc_path
default_port = '22' # hurr durr
default_ssh_config_path = os.path.join(os.path.expanduser('~'), '.ssh', 'config')
# Options/settings which exist both as environment keys and which can be set on
# the command line, are defined here. When used via `fab` they will be added to
# the optparse parser, and either way they are added to `env` below (i.e. the
# 'dest' value becomes the environment key and the value, the env value).
#
# Keep in mind that optparse changes hyphens to underscores when automatically
# deriving the `dest` name, e.g. `--reject-unknown-hosts` becomes
# `reject_unknown_hosts`.
#
# Furthermore, *always* specify some sort of default to avoid ending up with
# optparse.NO_DEFAULT (currently a two-tuple)! In general, None is a better
# default than ''.
#
# User-facing documentation for these are kept in sites/docs/env.rst.
env_options = [
make_option('-a', '--no_agent',
action='store_true',
default=False,
help="don't use the running SSH agent"
),
make_option('-A', '--forward-agent',
action='store_true',
default=False,
help="forward local agent to remote end"
),
make_option('--abort-on-prompts',
action='store_true',
default=False,
help="abort instead of prompting (for password, host, etc)"
),
make_option('-c', '--config',
dest='rcfile',
default=_rc_path(),
metavar='PATH',
help="specify location of config file to use"
),
make_option('--colorize-errors',
action='store_true',
default=False,
help="Color error output",
),
make_option('-D', '--disable-known-hosts',
action='store_true',
default=False,
help="do not load user known_hosts file"
),
make_option('-e', '--eagerly-disconnect',
action='store_true',
default=False,
help="disconnect from hosts as soon as possible"
),
make_option('-f', '--fabfile',
default='fabfile',
metavar='PATH',
help="python module file to import, e.g. '../other.py'"
),
make_option('-g', '--gateway',
default=None,
metavar='HOST',
help="gateway host to connect through"
),
make_option('--gss-auth',
action='store_true',
default=None,
help="Use GSS-API authentication"
),
make_option('--gss-deleg',
action='store_true',
default=None,
help="Delegate GSS-API client credentials or not"
),
make_option('--gss-kex',
action='store_true',
default=None,
help="Perform GSS-API Key Exchange and user authentication"
),
make_option('--hide',
metavar='LEVELS',
help="comma-separated list of output levels to hide"
),
make_option('-H', '--hosts',
default=[],
help="comma-separated list of hosts to operate on"
),
make_option('-i',
action='append',
dest='key_filename',
metavar='PATH',
default=None,
help="path to SSH private key file. May be repeated."
),
make_option('-k', '--no-keys',
action='store_true',
default=False,
help="don't load private key files from ~/.ssh/"
),
make_option('--keepalive',
dest='keepalive',
type=int,
default=0,
metavar="N",
help="enables a keepalive every N seconds"
),
make_option('--linewise',
action='store_true',
default=False,
help="print line-by-line instead of byte-by-byte"
),
make_option('-n', '--connection-attempts',
type='int',
metavar='M',
dest='connection_attempts',
default=1,
help="make M attempts to connect before giving up"
),
make_option('--no-pty',
dest='always_use_pty',
action='store_false',
default=True,
help="do not use pseudo-terminal in run/sudo"
),
make_option('-p', '--password',
default=None,
help="password for use with authentication and/or sudo"
),
make_option('-P', '--parallel',
dest='parallel',
action='store_true',
default=False,
help="default to parallel execution method"
),
make_option('--port',
default=default_port,
help="SSH connection port"
),
make_option('-r', '--reject-unknown-hosts',
action='store_true',
default=False,
help="reject unknown hosts"
),
make_option('--sudo-password',
default=None,
help="password for use with sudo only",
),
make_option('--system-known-hosts',
default=None,
help="load system known_hosts file before reading user known_hosts"
),
make_option('-R', '--roles',
default=[],
help="comma-separated list of roles to operate on"
),
make_option('-s', '--shell',
default='/bin/bash -l -c',
help="specify a new shell, defaults to '/bin/bash -l -c'"
),
make_option('--show',
metavar='LEVELS',
help="comma-separated list of output levels to show"
),
make_option('--skip-bad-hosts',
action="store_true",
default=False,
help="skip over hosts that can't be reached"
),
make_option('--skip-unknown-tasks',
action="store_true",
default=False,
help="skip over unknown tasks"
),
make_option('--ssh-config-path',
default=default_ssh_config_path,
metavar='PATH',
help="Path to SSH config file"
),
make_option('-t', '--timeout',
type='int',
default=10,
metavar="N",
help="set connection timeout to N seconds"
),
make_option('-T', '--command-timeout',
dest='command_timeout',
type='int',
default=None,
metavar="N",
help="set remote command timeout to N seconds"
),
make_option('-u', '--user',
default=_get_system_username(),
help="username to use when connecting to remote hosts"
),
make_option('-w', '--warn-only',
action='store_true',
default=False,
help="warn, instead of abort, when commands fail"
),
make_option('-x', '--exclude-hosts',
default=[],
metavar='HOSTS',
help="comma-separated list of hosts to exclude"
),
make_option('-z', '--pool-size',
dest='pool_size',
type='int',
metavar='INT',
default=0,
help="number of concurrent processes to use in parallel mode",
),
]
#
# Environment dictionary - actual dictionary object
#
# Global environment dict. Currently a catchall for everything: config settings
# such as global deep/broad mode, host lists, username etc.
# Most default values are specified in `env_options` above, in the interests of
# preserving DRY: anything in here is generally not settable via the command
# line.
env = _AttributeDict({
'abort_exception': None,
'again_prompt': 'Sorry, try again.',
'all_hosts': [],
'combine_stderr': True,
'colorize_errors': False,
'command': None,
'command_prefixes': [],
'cwd': '', # Must be empty string, not None, for concatenation purposes
'dedupe_hosts': True,
'default_port': default_port,
'eagerly_disconnect': False,
'echo_stdin': True,
'effective_roles': [],
'exclude_hosts': [],
'gateway': None,
'gss_auth': None,
'gss_deleg': None,
'gss_kex': None,
'host': None,
'host_string': None,
'lcwd': '', # Must be empty string, not None, for concatenation purposes
'local_user': _get_system_username(),
'output_prefix': True,
'passwords': {},
'path': '',
'path_behavior': 'append',
'port': default_port,
'real_fabfile': None,
'remote_interrupt': None,
'roles': [],
'roledefs': {},
'shell_env': {},
'skip_bad_hosts': False,
'skip_unknown_tasks': False,
'ssh_config_path': default_ssh_config_path,
'sudo_passwords': {},
'ok_ret_codes': [0], # a list of return codes that indicate success
# -S so sudo accepts passwd via stdin, -p with our known-value prompt for
# later detection (thus %s -- gets filled with env.sudo_prompt at runtime)
'sudo_prefix': "sudo -S -p '%(sudo_prompt)s' ",
'sudo_prompt': 'sudo password:',
'sudo_user': None,
'tasks': [],
'prompts': {},
'use_exceptions_for': {'network': False},
'use_shell': True,
'use_ssh_config': False,
'user': None,
'version': get_version('short')
})
# Fill in exceptions settings
exceptions = ['network']
exception_dict = {}
for e in exceptions:
exception_dict[e] = False
env.use_exceptions_for = _AliasDict(exception_dict,
aliases={'everything': exceptions})
# Add in option defaults
for option in env_options:
env[option.dest] = option.default
#
# Command dictionary
#
# Keys are the command/function names, values are the callables themselves.
# This is filled in when main() runs.
commands = {}
#
# Host connection dict/cache
#
connections = HostConnectionCache()
def _open_session():
transport = connections[env.host_string].get_transport()
# Try passing session-open timeout for Paramiko versions which support it
# (1.14.3+)
try:
session = transport.open_session(timeout=env.timeout)
# Revert to old call behavior if we seem to have hit arity error.
# TODO: consider introspecting the exception to avoid masking other
# TypeErrors; but this is highly fragile, especially when taking i18n into
# account.
except TypeError: # Assume arity error
session = transport.open_session()
return session
def default_channel():
"""
Return a channel object based on ``env.host_string``.
"""
try:
chan = _open_session()
except ssh.SSHException as err:
if str(err) == 'SSH session not active':
connections[env.host_string].close()
del connections[env.host_string]
chan = _open_session()
else:
raise
chan.settimeout(0.1)
chan.input_enabled = True
return chan
#
# Output controls
#
# Keys are "levels" or "groups" of output, values are always boolean,
# determining whether output falling into the given group is printed or not
# printed.
#
# By default, everything except 'debug' is printed, as this is what the average
# user, and new users, are most likely to expect.
#
# See docs/usage.rst for details on what these levels mean.
output = _AliasDict({
'status': True,
'aborts': True,
'warnings': True,
'running': True,
'stdout': True,
'stderr': True,
'exceptions': False,
'debug': False,
'user': True
}, aliases={
'everything': ['warnings', 'running', 'user', 'output', 'exceptions'],
'output': ['stdout', 'stderr'],
'commands': ['stdout', 'running']
}) | PypiClean |
/ECoXiPy-0.4.0.tar.gz/ECoXiPy-0.4.0/ecoxipy/pyxom/output.py | u'''\
:mod:`ecoxipy.pyxom.output` - Building PyXOM Structures
=======================================================
:class:`PyXOMOutput` creates structures consisting of
:class:`ecoxipy.pyxom` data.
.. _ecoxipy.pyxom.output.examples:
Examples
--------
Creating a document and retrieving the byte string:
>>> from ecoxipy import MarkupBuilder
>>> b = MarkupBuilder()
>>> document = b[:'section':True] (
... b.article(
... b.h1(
... b & '<Example>', # Explicitly insert text
... data='to quote: <&>"\\''
... ),
... b.p(
... {'umlaut-attribute': u'äöüß'},
... 'Hello', b.em(' World', count=1), '!'
... ),
... None,
... b.div(
... # Insert elements with special names using subscripts:
... b['data-element'](u'äöüß <&>'),
... # Import content by calling the builder:
... b(
... '<p attr="value">raw content</p>Some Text',
... # Create an element without calling the creating method:
... b.br,
... (i for i in range(3))
... ),
... (i for i in range(3, 6))
... ),
... b | '<This is a comment!>',
... b['pi-target':'<PI content>'],
... b['pi-without-content':],
... lang='en'
... )
... )
>>> document_string = u"""<!DOCTYPE section><article lang="en"><h1 data="to quote: <&>"'"><Example></h1><p umlaut-attribute="äöüß">Hello<em count="1"> World</em>!</p><div><data-element>äöüß <&></data-element><p attr="value">raw content</p>Some Text<br/>012345</div><!--<This is a comment!>--><?pi-target <PI content>?><?pi-without-content?></article>"""
>>> bytes(document) == document_string.encode('UTF-8')
True
For more examples see :mod:`ecoxipy.pyxom`.
:class:`Output` Implementation
------------------------------
.. autoclass:: ecoxipy.pyxom.output.PyXOMOutput
'''
from ecoxipy import Output, _unicode, pyxom
class PyXOMOutput(Output):
'''\
An :class:`Output` implementation which creates
:class:`ecoxipy.pyxom.XMLNode` instances and Unicode string instances.
:param check_well_formedness: The attribute
:attr:`check_well_formedness` is determined by this value.
:type check_well_formedness: :func:`bool`
'''
def __init__(self, check_well_formedness=False):
self._check_well_formedness = bool(check_well_formedness)
@property
def check_well_formedness(self):
'''If :const:`True` the nodes will be checked for valid values.'''
return self._check_well_formedness
@staticmethod
def is_native_type(content):
'''\
Tests if an object has the attribute ``_IS_PYXOM_NODE`` and that this
is :const:`True`.
:returns: :const:`True` if the object has the attribute
``_IS_PYXOM_NODE`` being :const`True`, :const:`False` otherwise.
'''
try:
return content._IS_PYXOM_NODE is True
except AttributeError:
return False
def element(self, name, children, attributes):
'''\
Returns an :class:`ecoxipy.pyxom.Element`.
:rtype: :class:`ecoxipy.pyxom.Element`
:raises ecoxipy.XMLWellFormednessException: If
:attr:`check_well_formedness` is :const:`True` and the
``name`` is not a valid XML name.
'''
return pyxom.Element(name, children, attributes,
self._check_well_formedness)
def text(self, content):
'''\
Creates a :class:`ecoxipy.pyxom.Text` node.
:rtype: :class:`ecoxipy.pyxom.Text`
'''
return pyxom.Text(content)
def comment(self, content):
'''\
Creates a :class:`ecoxipy.pyxom.Comment`.
:rtype: :class:`ecoxipy.pyxom.Comment`
:raises ecoxipy.XMLWellFormednessException: If
:attr:`check_well_formedness` is :const:`True` and ``content``
is not valid.
'''
return pyxom.Comment(content, self._check_well_formedness)
def processing_instruction(self, target, content):
'''\
Creates a :class:`ecoxipy.pyxom.ProcessingInstruction`.
:rtype: :class:`ecoxipy.pyxom.ProcessingInstruction`
:raises ecoxipy.XMLWellFormednessException: If
:attr:`check_well_formedness` is :const:`True` and
either the ``target`` or the``content`` are not valid.
'''
return pyxom.ProcessingInstruction(target, content,
self._check_well_formedness)
def document(self, doctype_name, doctype_publicid, doctype_systemid,
children, omit_xml_declaration, encoding):
'''\
Creates a :class:`ecoxipy.pyxom.Document` instance.
:rtype: :class:`ecoxipy.pyxom.Document`
:raises ecoxipy.XMLWellFormednessException: If
:attr:`check_well_formedness` is :const:`True` and
``doctype_name`` is not a valid XML name, ``doctype_publicid``
is not a valid public ID or ``doctype_systemid`` is not a
valid system ID.
'''
return pyxom.Document(doctype_name, doctype_publicid,
doctype_systemid, children, omit_xml_declaration, encoding,
self._check_well_formedness)
del Output | PypiClean |
/Kallithea-0.7.0.tar.gz/Kallithea-0.7.0/kallithea/lib/utils.py | import logging
import os
import re
import traceback
import urllib.error
import mercurial.config
import mercurial.error
import mercurial.ui
import kallithea.lib.conf
from kallithea.lib import webutils
from kallithea.lib.exceptions import InvalidCloneUriException
from kallithea.lib.utils2 import ascii_bytes, aslist, safe_bytes, safe_str
from kallithea.lib.vcs.backends.git.repository import GitRepository
from kallithea.lib.vcs.backends.hg.repository import MercurialRepository
from kallithea.lib.vcs.conf import settings
from kallithea.lib.vcs.exceptions import VCSError
from kallithea.lib.vcs.utils.fakemod import create_module
from kallithea.lib.vcs.utils.helpers import get_scm
from kallithea.model import db, meta
log = logging.getLogger(__name__)
REMOVED_REPO_PAT = re.compile(r'rm__\d{8}_\d{6}_\d{6}_.*')
#==============================================================================
# PERM DECORATOR HELPERS FOR EXTRACTING NAMES FOR PERM CHECKS
#==============================================================================
def get_repo_slug(request):
_repo = request.environ['pylons.routes_dict'].get('repo_name')
if _repo:
_repo = _repo.rstrip('/')
return _repo
def get_repo_group_slug(request):
_group = request.environ['pylons.routes_dict'].get('group_name')
if _group:
_group = _group.rstrip('/')
return _group
def get_user_group_slug(request):
_group = request.environ['pylons.routes_dict'].get('id')
_group = db.UserGroup.get(_group)
if _group:
return _group.users_group_name
return None
def _get_permanent_id(s):
"""Helper for decoding stable URLs with repo ID. For a string like '_123'
return 123.
"""
by_id_match = re.match(r'^_(\d+)$', s)
if by_id_match is None:
return None
return int(by_id_match.group(1))
def fix_repo_id_name(path):
"""
Rewrite repo_name for _<ID> permanent URLs.
Given a path, if the first path element is like _<ID>, return the path with
this part expanded to the corresponding full repo name, else return the
provided path.
"""
first, rest = path, ''
if '/' in path:
first, rest_ = path.split('/', 1)
rest = '/' + rest_
repo_id = _get_permanent_id(first)
if repo_id is not None:
repo = db.Repository.get(repo_id)
if repo is not None:
return repo.repo_name + rest
return path
def get_filesystem_repos(path):
"""
Scans given path for repos and return (name,(type,path)) tuple
:param path: path to scan for repositories
:param recursive: recursive search and return names with subdirs in front
"""
# remove ending slash for better results
path = path.rstrip(os.sep)
log.debug('now scanning in %s', path)
def isdir(*n):
return os.path.isdir(os.path.join(*n))
for root, dirs, _files in os.walk(path):
recurse_dirs = []
for subdir in dirs:
# skip removed repos
if REMOVED_REPO_PAT.match(subdir):
continue
# skip .<something> dirs TODO: rly? then we should prevent creating them ...
if subdir.startswith('.'):
continue
cur_path = os.path.join(root, subdir)
if isdir(cur_path, '.git'):
log.warning('ignoring non-bare Git repo: %s', cur_path)
continue
if (isdir(cur_path, '.hg') or
isdir(cur_path, '.svn') or
isdir(cur_path, 'objects') and (isdir(cur_path, 'refs') or
os.path.isfile(os.path.join(cur_path, 'packed-refs')))):
if not os.access(cur_path, os.R_OK) or not os.access(cur_path, os.X_OK):
log.warning('ignoring repo path without access: %s', cur_path)
continue
if not os.access(cur_path, os.W_OK):
log.warning('repo path without write access: %s', cur_path)
try:
scm_info = get_scm(cur_path)
assert cur_path.startswith(path)
repo_path = cur_path[len(path) + 1:]
yield repo_path, scm_info
continue # no recursion
except VCSError:
# We should perhaps ignore such broken repos, but especially
# the bare git detection is unreliable so we dive into it
pass
recurse_dirs.append(subdir)
dirs[:] = recurse_dirs
def is_valid_repo_uri(repo_type, url, ui):
"""Check if the url seems like a valid remote repo location
Raise InvalidCloneUriException if any problems"""
if repo_type == 'hg':
if url.startswith('http') or url.startswith('ssh'):
# initially check if it's at least the proper URL
# or does it pass basic auth
try:
MercurialRepository._check_url(url, ui)
except urllib.error.URLError as e:
raise InvalidCloneUriException('URI %s URLError: %s' % (url, e))
except mercurial.error.RepoError as e:
raise InvalidCloneUriException('Mercurial %s: %s' % (type(e).__name__, safe_str(bytes(e))))
elif url.startswith('git+http'):
raise InvalidCloneUriException('URI type %s not implemented' % (url,))
else:
raise InvalidCloneUriException('URI %s not allowed' % (url,))
elif repo_type == 'git':
if url.startswith('http') or url.startswith('git'):
# initially check if it's at least the proper URL
# or does it pass basic auth
try:
GitRepository._check_url(url)
except urllib.error.URLError as e:
raise InvalidCloneUriException('URI %s URLError: %s' % (url, e))
elif url.startswith('hg+http'):
raise InvalidCloneUriException('URI type %s not implemented' % (url,))
else:
raise InvalidCloneUriException('URI %s not allowed' % (url))
def is_valid_repo(repo_name, base_path, scm=None):
"""
Returns True if given path is a valid repository False otherwise.
If scm param is given also compare if given scm is the same as expected
from scm parameter
:param repo_name:
:param base_path:
:param scm:
:return True: if given path is a valid repository
"""
# TODO: paranoid security checks?
full_path = os.path.join(base_path, repo_name)
try:
scm_ = get_scm(full_path)
if scm:
return scm_[0] == scm
return True
except VCSError:
return False
def is_valid_repo_group(repo_group_name, base_path, skip_path_check=False):
"""
Returns True if given path is a repository group False otherwise
:param repo_name:
:param base_path:
"""
full_path = os.path.join(base_path, repo_group_name)
# check if it's not a repo
if is_valid_repo(repo_group_name, base_path):
return False
try:
# we need to check bare git repos at higher level
# since we might match branches/hooks/info/objects or possible
# other things inside bare git repo
get_scm(os.path.dirname(full_path))
return False
except VCSError:
pass
# check if it's a valid path
if skip_path_check or os.path.isdir(full_path):
return True
return False
def make_ui(repo_path=None):
"""
Create an Mercurial 'ui' object based on database Ui settings, possibly
augmenting with content from a hgrc file.
"""
baseui = mercurial.ui.ui()
# clean the baseui object
baseui._ocfg = mercurial.config.config()
baseui._ucfg = mercurial.config.config()
baseui._tcfg = mercurial.config.config()
sa = meta.Session()
for ui_ in sa.query(db.Ui).order_by(db.Ui.ui_section, db.Ui.ui_key):
if ui_.ui_active:
log.debug('config from db: [%s] %s=%r', ui_.ui_section,
ui_.ui_key, ui_.ui_value)
baseui.setconfig(ascii_bytes(ui_.ui_section), ascii_bytes(ui_.ui_key),
b'' if ui_.ui_value is None else safe_bytes(ui_.ui_value))
# force set push_ssl requirement to False, Kallithea handles that
baseui.setconfig(b'web', b'push_ssl', False)
baseui.setconfig(b'web', b'allow_push', b'*')
# prevent interactive questions for ssh password / passphrase
ssh = baseui.config(b'ui', b'ssh', default=b'ssh')
baseui.setconfig(b'ui', b'ssh', b'%s -oBatchMode=yes -oIdentitiesOnly=yes' % ssh)
# push / pull hooks
baseui.setconfig(b'hooks', b'changegroup.kallithea_push_action', b'python:kallithea.bin.vcs_hooks.push_action')
baseui.setconfig(b'hooks', b'outgoing.kallithea_pull_action', b'python:kallithea.bin.vcs_hooks.pull_action')
if baseui.config(b'hooks', ascii_bytes(db.Ui.HOOK_REPO_SIZE)): # ignore actual value
baseui.setconfig(b'hooks', ascii_bytes(db.Ui.HOOK_REPO_SIZE), b'python:kallithea.bin.vcs_hooks.repo_size')
if baseui.config(b'hooks', ascii_bytes(db.Ui.HOOK_UPDATE)): # ignore actual value
baseui.setconfig(b'hooks', ascii_bytes(db.Ui.HOOK_UPDATE), b'python:kallithea.bin.vcs_hooks.update')
if repo_path is not None:
# Note: MercurialRepository / mercurial.localrepo.instance will do this too, so it will always be possible to override db settings or what is hardcoded above
baseui.readconfig(safe_bytes(os.path.join(repo_path, '.hg', 'hgrc')))
assert baseui.plain() # set by hgcompat.monkey_do (invoked from import of vcs.backends.hg) to minimize potential impact of loading config files
return baseui
def set_app_settings(config):
"""
Updates app config with new settings from database
:param config:
"""
settings = db.Setting.get_app_settings()
for k, v in settings.items():
config[k] = v
config['base_path'] = db.Ui.get_repos_location()
def set_vcs_config(config):
"""
Patch VCS config with some Kallithea specific stuff
:param config: kallithea.CONFIG
"""
settings.BACKENDS = {
'hg': 'kallithea.lib.vcs.backends.hg.MercurialRepository',
'git': 'kallithea.lib.vcs.backends.git.GitRepository',
}
settings.GIT_EXECUTABLE_PATH = config.get('git_path', 'git')
settings.GIT_REV_FILTER = config.get('git_rev_filter', '--all').strip()
settings.DEFAULT_ENCODINGS = aslist(config.get('default_encoding',
'utf-8'), sep=',')
def set_indexer_config(config):
"""
Update Whoosh index mapping
:param config: kallithea.CONFIG
"""
log.debug('adding extra into INDEX_EXTENSIONS')
kallithea.lib.conf.INDEX_EXTENSIONS.extend(re.split(r'\s+', config.get('index.extensions', '')))
log.debug('adding extra into INDEX_FILENAMES')
kallithea.lib.conf.INDEX_FILENAMES.extend(re.split(r'\s+', config.get('index.filenames', '')))
def map_groups(path):
"""
Given a full path to a repository, create all nested groups that this
repo is inside. This function creates parent-child relationships between
groups and creates default perms for all new groups.
:param paths: full path to repository
"""
from kallithea.model.repo_group import RepoGroupModel
sa = meta.Session()
groups = path.split(kallithea.URL_SEP)
parent = None
group = None
# last element is repo in nested groups structure
groups = groups[:-1]
rgm = RepoGroupModel()
owner = db.User.get_first_admin()
for lvl, group_name in enumerate(groups):
group_name = '/'.join(groups[:lvl] + [group_name])
group = db.RepoGroup.get_by_group_name(group_name)
desc = '%s group' % group_name
# skip folders that are now removed repos
if REMOVED_REPO_PAT.match(group_name):
break
if group is None:
log.debug('creating group level: %s group_name: %s',
lvl, group_name)
group = db.RepoGroup(group_name, parent)
group.group_description = desc
group.owner = owner
sa.add(group)
rgm._create_default_perms(group)
sa.flush()
parent = group
return group
def repo2db_mapper(initial_repo_dict, remove_obsolete=False,
install_git_hooks=False, user=None, overwrite_git_hooks=False):
"""
maps all repos given in initial_repo_dict, non existing repositories
are created, if remove_obsolete is True it also check for db entries
that are not in initial_repo_dict and removes them.
:param initial_repo_dict: mapping with repositories found by scanning methods
:param remove_obsolete: check for obsolete entries in database
:param install_git_hooks: if this is True, also check and install git hook
for a repo if missing
:param overwrite_git_hooks: if this is True, overwrite any existing git hooks
that may be encountered (even if user-deployed)
"""
from kallithea.model.repo import RepoModel
from kallithea.model.scm import ScmModel
sa = meta.Session()
repo_model = RepoModel()
if user is None:
user = db.User.get_first_admin()
added = []
# creation defaults
defs = db.Setting.get_default_repo_settings(strip_prefix=True)
enable_statistics = defs.get('repo_enable_statistics')
enable_downloads = defs.get('repo_enable_downloads')
private = defs.get('repo_private')
for name, repo in sorted(initial_repo_dict.items()):
group = map_groups(name)
db_repo = repo_model.get_by_repo_name(name)
# found repo that is on filesystem not in Kallithea database
if not db_repo:
log.info('repository %s not found, creating now', name)
added.append(name)
desc = (repo.description
if repo.description != 'unknown'
else '%s repository' % name)
try:
new_repo = repo_model._create_repo(
repo_name=name,
repo_type=repo.alias,
description=desc,
repo_group=getattr(group, 'group_id', None),
owner=user,
enable_downloads=enable_downloads,
enable_statistics=enable_statistics,
private=private,
state=db.Repository.STATE_CREATED
)
except Exception as e:
log.error('error creating %r: %s: %s', name, type(e).__name__, e)
sa.rollback()
continue
sa.commit()
# we added that repo just now, and make sure it has githook
# installed, and updated server info
if new_repo.repo_type == 'git':
git_repo = new_repo.scm_instance
ScmModel().install_git_hooks(git_repo)
# update repository server-info
log.debug('Running update server info')
git_repo._update_server_info()
new_repo.update_changeset_cache()
elif install_git_hooks or overwrite_git_hooks:
if db_repo.repo_type == 'git':
ScmModel().install_git_hooks(db_repo.scm_instance, force=overwrite_git_hooks)
removed = []
# remove from database those repositories that are not in the filesystem
for repo in sa.query(db.Repository).all():
if repo.repo_name not in initial_repo_dict:
if remove_obsolete:
log.debug("Removing non-existing repository found in db `%s`",
repo.repo_name)
try:
RepoModel().delete(repo, forks='detach', fs_remove=False)
sa.commit()
except Exception:
#don't hold further removals on error
log.error(traceback.format_exc())
sa.rollback()
removed.append(repo.repo_name)
return added, removed
def load_extensions(root_path):
try:
ext = create_module('extensions', os.path.join(root_path, 'extensions.py'))
except FileNotFoundError:
try:
ext = create_module('rc', os.path.join(root_path, 'rcextensions', '__init__.py'))
log.warning('The name "rcextensions" is deprecated. Please use a file `extensions.py` instead of a directory `rcextensions`.')
except FileNotFoundError:
return
log.info('Loaded Kallithea extensions from %s', ext)
kallithea.EXTENSIONS = ext
# Additional mappings that are not present in the pygments lexers
kallithea.lib.conf.LANGUAGES_EXTENSIONS_MAP.update(getattr(ext, 'EXTRA_MAPPINGS', {}))
# Override any INDEX_EXTENSIONS
if getattr(ext, 'INDEX_EXTENSIONS', []):
log.debug('settings custom INDEX_EXTENSIONS')
kallithea.lib.conf.INDEX_EXTENSIONS = getattr(ext, 'INDEX_EXTENSIONS', [])
# Additional INDEX_EXTENSIONS
log.debug('adding extra into INDEX_EXTENSIONS')
kallithea.lib.conf.INDEX_EXTENSIONS.extend(getattr(ext, 'EXTRA_INDEX_EXTENSIONS', []))
#==============================================================================
# MISC
#==============================================================================
def extract_mentioned_users(text):
""" Returns set of actual database Users @mentioned in given text. """
result = set()
for name in webutils.extract_mentioned_usernames(text):
user = db.User.get_by_username(name, case_insensitive=True)
if user is not None and not user.is_default_user:
result.add(user)
return result | PypiClean |
/Flask-Config-Helper-0.1.1.tar.gz/Flask-Config-Helper-0.1.1/flask_config_helper/__init__.py | import os
import click
import yaml
from flask import Config as BaseConfig
from flask.ext.script import Command, Option
__version__ = '0.1.1'
class Config(object):
app = None
def __init__(self, app=None):
self.config = self._make_config(app)
if app:
self.init_app(app)
def __setattr__(self, key, value):
if key in ['app', 'config']:
super(Config, self).__setattr__(key, value)
self.config[key] = value
def __getattr__(self, item):
if item in ('app', 'config', 'init_app', '_make_config',):
super(Config, self).__getattr__(item)
try:
return self.config[item]
except:
raise AttributeError()
def _make_config(self, app, instance_relative=False):
root_path = None
default_config = {}
if app:
root_path = app.root_path
default_config = app.default_config
if instance_relative:
root_path = app.instance_path
return ExtendedConfig(root_path, default_config)
def init_app(self, app):
self.app = app
self.config.root_path = app.root_path
for k in app.config.iterkeys():
self.config[k] = app.config[k]
self.app.config = self.config
class ExtendedConfig(BaseConfig):
def from_yaml(self, config_name=None, file_name='config.yaml',
search_paths=None):
env = os.environ.get('FLASK_ENV', 'development').upper()
self['ENVIRONMENT'] = env.lower()
if not config_name:
config_name = env
config_name = config_name.upper()
if search_paths is None:
search_paths = (self.root_path,)
for path in search_paths:
config_file = os.path.join(path, file_name)
try:
with open(config_file) as f:
c = yaml.load(f)
for key, value in c[config_name].iteritems():
if key.isupper():
self[key] = value
break
except Exception as e:
pass
def from_heroku(self, mappings={}, keys=[]):
# Register Config from Environ Variables
for k, v in mappings.iteritems():
if k in os.environ:
self[v] = os.environ[k]
for k in keys:
if k in os.environ:
self[k] = os.environ[k]
class InitConfig(Command):
def __init__(self, directory=None, config_filename='config.yaml', config_contents=None):
from os.path import expanduser
if directory:
self.directory = directory
else:
self.directory = expanduser("~")
self.config_filename = config_filename
self.config_contents = config_contents
def get_options(self):
return [
Option('-d', '--directory', dest='directory', default=self.directory),
]
def run(self, directory):
if directory and not os.path.exists(directory):
os.makedirs(directory)
config_filename = os.path.join(directory, self.config_filename)
if os.path.isfile(config_filename):
click.confirm("File already exists at '%s', overwrite?" % click.format_filename(config_filename),
abort=True)
with click.open_file(config_filename, 'wb') as fp:
fp.write(self.config_contents) | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/prism/plugins/autoloader/prism-autoloader.js | (function () {
if (typeof self === 'undefined' || !self.Prism || !self.document || !document.createElement) {
return;
}
// The dependencies map is built automatically with gulp
var lang_dependencies = /*languages_placeholder[*/{"javascript":"clike","actionscript":"javascript","arduino":"cpp","aspnet":"markup","bison":"c","c":"clike","csharp":"clike","cpp":"c","coffeescript":"javascript","crystal":"ruby","css-extras":"css","d":"clike","dart":"clike","django":"markup","fsharp":"clike","flow":"javascript","glsl":"clike","go":"clike","groovy":"clike","haml":"ruby","handlebars":"markup","haxe":"clike","java":"clike","jolie":"clike","kotlin":"clike","less":"css","markdown":"markup","n4js":"javascript","nginx":"clike","objectivec":"c","opencl":"cpp","parser":"markup","php":"clike","php-extras":"php","processing":"clike","protobuf":"clike","pug":"javascript","qore":"clike","jsx":["markup","javascript"],"reason":"clike","ruby":"clike","sass":"css","scss":"css","scala":"java","smarty":"markup","swift":"clike","textile":"markup","twig":"markup","typescript":"javascript","vbnet":"basic","wiki":"markup"}/*]*/;
var lang_data = {};
var ignored_language = 'none';
var config = Prism.plugins.autoloader = {
languages_path: 'components/',
use_minified: true
};
/**
* Lazy loads an external script
* @param {string} src
* @param {function=} success
* @param {function=} error
*/
var script = function (src, success, error) {
var s = document.createElement('script');
s.src = src;
s.async = true;
s.onload = function() {
document.body.removeChild(s);
success && success();
};
s.onerror = function() {
document.body.removeChild(s);
error && error();
};
document.body.appendChild(s);
};
/**
* Returns the path to a grammar, using the language_path and use_minified config keys.
* @param {string} lang
* @returns {string}
*/
var getLanguagePath = function (lang) {
return config.languages_path +
'prism-' + lang
+ (config.use_minified ? '.min' : '') + '.js'
};
/**
* Tries to load a grammar and
* highlight again the given element once loaded.
* @param {string} lang
* @param {HTMLElement} elt
*/
var registerElement = function (lang, elt) {
var data = lang_data[lang];
if (!data) {
data = lang_data[lang] = {};
}
// Look for additional dependencies defined on the <code> or <pre> tags
var deps = elt.getAttribute('data-dependencies');
if (!deps && elt.parentNode && elt.parentNode.tagName.toLowerCase() === 'pre') {
deps = elt.parentNode.getAttribute('data-dependencies');
}
if (deps) {
deps = deps.split(/\s*,\s*/g);
} else {
deps = [];
}
loadLanguages(deps, function () {
loadLanguage(lang, function () {
Prism.highlightElement(elt);
});
});
};
/**
* Sequentially loads an array of grammars.
* @param {string[]|string} langs
* @param {function=} success
* @param {function=} error
*/
var loadLanguages = function (langs, success, error) {
if (typeof langs === 'string') {
langs = [langs];
}
var i = 0;
var l = langs.length;
var f = function () {
if (i < l) {
loadLanguage(langs[i], function () {
i++;
f();
}, function () {
error && error(langs[i]);
});
} else if (i === l) {
success && success(langs);
}
};
f();
};
/**
* Load a grammar with its dependencies
* @param {string} lang
* @param {function=} success
* @param {function=} error
*/
var loadLanguage = function (lang, success, error) {
var load = function () {
var force = false;
// Do we want to force reload the grammar?
if (lang.indexOf('!') >= 0) {
force = true;
lang = lang.replace('!', '');
}
var data = lang_data[lang];
if (!data) {
data = lang_data[lang] = {};
}
if (success) {
if (!data.success_callbacks) {
data.success_callbacks = [];
}
data.success_callbacks.push(success);
}
if (error) {
if (!data.error_callbacks) {
data.error_callbacks = [];
}
data.error_callbacks.push(error);
}
if (!force && Prism.languages[lang]) {
languageSuccess(lang);
} else if (!force && data.error) {
languageError(lang);
} else if (force || !data.loading) {
data.loading = true;
var src = getLanguagePath(lang);
script(src, function () {
data.loading = false;
languageSuccess(lang);
}, function () {
data.loading = false;
data.error = true;
languageError(lang);
});
}
};
var dependencies = lang_dependencies[lang];
if(dependencies && dependencies.length) {
loadLanguages(dependencies, load);
} else {
load();
}
};
/**
* Runs all success callbacks for this language.
* @param {string} lang
*/
var languageSuccess = function (lang) {
if (lang_data[lang] && lang_data[lang].success_callbacks && lang_data[lang].success_callbacks.length) {
lang_data[lang].success_callbacks.forEach(function (f) {
f(lang);
});
}
};
/**
* Runs all error callbacks for this language.
* @param {string} lang
*/
var languageError = function (lang) {
if (lang_data[lang] && lang_data[lang].error_callbacks && lang_data[lang].error_callbacks.length) {
lang_data[lang].error_callbacks.forEach(function (f) {
f(lang);
});
}
};
Prism.hooks.add('complete', function (env) {
if (env.element && env.language && !env.grammar) {
if (env.language !== ignored_language) {
registerElement(env.language, env.element);
}
}
});
}()); | PypiClean |
/Emotion_recognition-1.0.2.tar.gz/Emotion_recognition-1.0.2/Emotion_recognition/Signed_Spectral_Cluster/wordgraph.py | from time import time
from ppsing import sin_ant
import numpy as np
from sklearn.metrics.pairwise import pairwise_kernels
import pickle
import scipy.spatial.distance as sp
def T_syn_ant(vocab):
""" Funcion que calcula las matrices de antonimos y sinonimos.
Entradas:
vocab : Vocabulario del wordmebedding implementado
Salidas
T_syn: matriz de sinonimos, T_syn (ij)= 1 si wi y wj son sinonimos, 0 otro caso
T_ant: matriz de antonimos, T_ant (ij)= -1 si wi y wj son antonimos, 0 otro caso
"""
n=len(vocab)
# inicializa arrays con 0
T_syn = np.array([[0.0]*n]*n)
T_ant = np.array([[0.0]*n]*n)
for i in range(n):
#obtener sinonimos y antonimos
syn,ant = sin_ant(vocab[i])
for j in syn:
try: T_syn[i][vocab.index(j)]= 1.0
except: continue
for j in ant:
try: T_ant[i][vocab.index(j)]= -1.0
except: continue
return T_syn,T_ant
#funcion de metricas
KERNELS={
'gaus':'rbf',
'cosine':'cosine',
'mah': sp.mahalanobis }
def W(X,kernel='gaus',**kwds ):
"""
hay varias formas para implementar el calculode W.
uno es con cosine similarity.
otro es distance mahalanobis, revizar
"""
try:
kernel_func=KERNELS[kernel]
except:
print('metrica no correspondiente')
return 1
matrix=pairwise_kernels(X,metric=kernel_func,filter_params=True,n_jobs=-1,**kwds)
return matrix
def W_af(W,T_ant,T_syn,gama,b_ant,b_syn):
W_final = gama*W + b_ant*T_ant*W + b_syn*T_syn*W
return W_final
def cont_neg_pos(W):
"""
Esta funcion cuenta enlaces negativos y positivos de la matriz W
"""
cont_neg=0
cont_pos=0
for i in range(len(W)):
for j in range(i+1,len(W)):
if(W[i][j]<0.0):
cont_neg +=1
elif (W[i][j]):
cont_pos +=1
return cont_neg, cont_pos | PypiClean |
/NeuNorm-1.6.2.tar.gz/NeuNorm-1.6.2/documentation/source/cropping_data.rst | *************
Cropping Data
*************
You have the option to crop the data but if you do, this must be done after running the normalization.
The algorithm only cropped the normalized sample and ob data
- the 4 corners of the region of interest (ROI)
- the top left corner coordinates, width and height of the ROI
let's use the first method and let's pretend the ROI is defined by
- x0 = 5
- y0 = 5
- x1 = 200
- y1 = 250
>>> my_crop_roi = ROI(x0=5, y0=5, x1=200, y1=250)
>>> o_norm.crop(roi=my_crop_roi)
| PypiClean |
/Montreal-Forced-Aligner-3.0.0a3.tar.gz/Montreal-Forced-Aligner-3.0.0a3/docs/source/user_guide/configuration/segment.rst |
.. _configuration_segmentation:
********************
Segmentation options
********************
.. csv-table::
:widths: 20, 20, 60
:header: "Parameter", "Default value", "Notes"
"energy_threshold", 5.5, "Energy threshold above which a frame will be counted as voiced"
"energy_mean_scale", 0.5, "Proportion of the mean energy of the file that should be added to the energy_threshold"
"max_segment_length", 30, "Maximum length of segments before they do not get merged"
"min_pause_duration", 0.05, "Minimum unvoiced duration to split speech segments"
.. _default_segment_config:
Default segmentation config file
--------------------------------
.. code-block:: yaml
energy_threshold: 5.5
energy_mean_scale: 0.5
max_segment_length: 30
min_pause_duration: 0.05
| PypiClean |
/MacSyFinder-2.1.2.tar.gz/MacSyFinder-2.1.2/macsypy/model.py |
import logging
_log = logging.getLogger(__name__)
from itertools import chain
from .error import ModelInconsistencyError
from .registries import DefinitionLocation
from .hit import ModelHit
from .gene import GeneStatus
class ModelBank:
"""
Store all Models objects.
"""
def __init__(self):
self._model_bank = {}
def __getitem__(self, fqn):
"""
:param fqn: the fully qualified name of the model
:type fqn: string
:return: the model corresponding to the fqn.
:rtype: :class:`macsypy.model.Model` object
:raise KeyError: if the model corresponding to the name does not exists
"""
if fqn in self._model_bank:
return self._model_bank[fqn]
else:
raise KeyError(fqn)
def __contains__(self, model):
"""
Implement the membership test operator
:param model: the model to test
:type model: :class:`macsypy.model.Model` object
:return: True if the model is in the Model factory, False otherwise
:rtype: boolean
"""
return model in self._model_bank.values()
def __iter__(self):
"""
Return an iterator object on the models contained in the bank
"""
return iter(self._model_bank.values())
def __len__(self):
"""
:return: the number of models stored in the bank
:rtype: integer
"""
return len(self._model_bank)
def add_model(self, model):
"""
:param model: the model to add
:type model: :class:`macsypy.model.Model` object
:raise: KeyError if a model with the same name is already registered.
"""
if model.fqn in self._model_bank:
raise KeyError(f"a model named {model.name} is already registered in the models' bank")
else:
self._model_bank[model.fqn] = model
class MetaModel(type):
"""
control the different type of gene in a model ('mandatory, accessory, ....)
and how to access to them.
The type of genes are defined in the model itself via *_gene_category* class attribute.
"""
def getter_maker(cat):
"""
Create a property which allow to access to the gene corresponding of the cat of the model
:param str cat: the type of gene category to which we create the getter
:return: unbound method
"""
def getter(self):
return getattr(self, f"_{cat}_genes")
return getter
def setter_maker(cat):
"""
Create the method add_<cat>_gene which allow to add gene in the right category of the model
:param str cat: the type of gene category to which we create the mutator
:return: unbound method
"""
def setter(self, gene):
gene.set_status(getattr(GeneStatus, cat.upper()))
getattr(self, f"_{cat}_genes").append(gene)
return setter
def __call__(cls, *args, **kwargs):
new_model_inst = super().__call__(*args, **kwargs)
setattr(cls, "gene_category", property(lambda cls: cls._gene_category))
for cat in new_model_inst.gene_category:
# set the private attribute in the Model instance
setattr(new_model_inst, f"_{cat}_genes", [])
# set the public property in the Model class
setattr(cls, f"{cat}_genes", property(MetaModel.getter_maker(cat)))
# add method to add new gene in the Model class
setattr(cls, f"add_{cat}_gene", MetaModel.setter_maker(cat))
return new_model_inst
class Model(metaclass=MetaModel):
"""
Handles a macromolecular model.
Contains all its pre-defined characteristics expected to be fulfilled to predict a complete model:
- component list (genes that are mandatory, accessory, neutral, forbidden)
- quorum (number of genes)
- genetic architecture
"""
_gene_category = ('mandatory', 'accessory', 'neutral', 'forbidden')
def __init__(self, fqn, inter_gene_max_space, min_mandatory_genes_required=None,
min_genes_required=None, max_nb_genes=None, multi_loci=False):
"""
:param fqn: the fully qualified name of the model CRISPR-Cas/sub-typing/CAS-TypeIE
:type fqn: string
:param inter_gene_max_space: the maximum distance between two genes (**co-localization** parameter)
:type inter_gene_max_space: integer
:param min_mandatory_genes_required: the quorum of mandatory genes to define this model
:type min_mandatory_genes_required: integer
:param min_genes_required: the quorum of genes to define this model
:type min_genes_required: integer
:param max_nb_genes: The number of gene to be considered as full system
Used to compute the wholeness.
If None the mx_nb_genes = mandatory + accessory
:type max_nb_genes: integer
:param multi_loci:
:type multi_loci: boolean
:raise ModelInconsistencyError: if an error is found in model logic.
For instance *genes_required* > *min_mandatory_genes_required*
"""
self.fqn = fqn
self._name = DefinitionLocation.split_fqn(self.fqn)[-1]
self._inter_gene_max_space = inter_gene_max_space
self._min_mandatory_genes_required = min_mandatory_genes_required
self._min_genes_required = min_genes_required
if self._min_mandatory_genes_required is not None and self._min_genes_required is not None:
if self._min_genes_required < self._min_mandatory_genes_required:
raise ModelInconsistencyError(f"{self.fqn}: min_genes_required '{self.min_genes_required}' "
f"must be greater or equal than min_mandatory_genes_required "
f"'{self.min_mandatory_genes_required}'"
)
self._max_nb_genes = max_nb_genes
self._multi_loci = multi_loci
def __str__(self):
rep = f"name: {self.name}\n"
rep += f"fqn: {self.fqn}\n"
for cat in self._gene_category:
rep += f"==== {cat} genes ====\n"
for gene in getattr(self, f"{cat}_genes"):
rep += f"{gene.name}\n"
rep += "============== end pprint model ================\n"
return rep
def __hash__(self):
"""
:return:
"""
return hash(self.fqn)
def __lt__(self, other):
"""
:param other: the other model to compare
:return: True if this fully qualified name is lesser than to other fully qualified name.
False otherwise.
:rtype: boolean
"""
return self.fqn < other.fqn
def __gt__(self, other):
"""
:param other: the other model to compare
:return: True if this fully qualified name is greater than to other fully qualified name.
False otherwise.
:rtype: boolean
"""
return self.fqn > other.fqn
def __eq__(self, other):
"""
:param other: the other model to compare
:return: True if this fully qualified name is equal to other fully qualified name.
False otherwise.
:rtype: boolean
"""
return self.fqn == other.fqn
@property
def name(self):
"""
:return: the short name of this model
"""
return self._name
@property
def family_name(self):
"""
:return: the family name of the model for instance 'CRISPRCas' or 'TXSS'
:rtype: str
"""
return DefinitionLocation.root_name(self.fqn)
@property
def inter_gene_max_space(self):
"""
:return: set the maximum distance allowed between 2 genes for this model
:rtype: integer
"""
# self._inter_gene_max_space come from the definition (xml)
# cfg_inter_gene_max_space come from the configuration command line option or conf file
# so cfg_inter_gene_max_space must superseed self._inter_gene_max_space
return self._inter_gene_max_space
@property
def min_mandatory_genes_required(self):
"""
:return: get the quorum of mandatory genes required for this model
:rtype: integer
"""
if self._min_mandatory_genes_required is None:
return len(self.mandatory_genes)
return self._min_mandatory_genes_required
@property
def min_genes_required(self):
"""
:return: get the minimum number of genes to assess for the model presence.
:rtype: integer
"""
if self._min_genes_required is None:
return len(self.mandatory_genes)
return self._min_genes_required
@property
def max_nb_genes(self):
"""
:return: the maximum number of genes to assess the model presence.
:rtype: int (or None)
"""
if self._max_nb_genes is None:
max_nb_genes = len(self.mandatory_genes) + len(self.accessory_genes)
else:
max_nb_genes = self._max_nb_genes
return max_nb_genes
@property
def multi_loci(self):
"""
:return: True if the model is authorized to be inferred from multiple loci, False otherwise
:rtype: boolean
"""
return self._multi_loci
def get_gene(self, gene_name):
"""
:param gene_name: the name of the gene to get
:type gene_name: string
:return: the gene corresponding to gene_name.
:rtype: a :class:`macsypy.gene.ModelGene` object.
:raise: KeyError the model does not contain any gene with name gene_name.
"""
# create a dict with genes from all categories
all_genes = {g.name: g for sublist in [getattr(self, f"{cat}_genes") for cat in self._gene_category]
for g in sublist}
if gene_name in all_genes:
return all_genes[gene_name]
else:
for gene in all_genes.values():
for ex in gene.exchangeables:
if ex.name == gene_name:
return ex
raise KeyError(f"Model {self.name} does not contain gene {gene_name}")
def genes(self, exchangeable=False):
"""
:param bool exchangeable: include exchageables if True
:return: all the genes described in the model.
with exchangeables if exchageable is True.
otherwise only "first level" genes.
:rtype: set of :class:`macsypy.gene.ModelGene` objects.
"""
# we assume that a gene cannot appear twice in a model
primary_genes = {g for sublist in [getattr(self, f"{cat}_genes") for cat in self._gene_category]
for g in sublist}
if exchangeable:
exchangeable_genes = [g_ex for g in primary_genes for g_ex in g.exchangeables]
all_genes = set(chain(primary_genes, exchangeable_genes))
else:
all_genes = primary_genes
return all_genes
def filter(self, hits):
"""
filter out the hits according to this model.
The filtering is based on the name of CoreGene associated to hit
and the name of ModelGene of the model
(the name of the ModelGene is the name of the CoreGene embed in the ModelGene)
only the hits related to genes implied in the model are kept.
:param hits: list of hits to filter
:type hits: list of :class:`macsypy.report.CoreHit` object
:return: list of hits
:rtype: list of :class:`macsypy.report.Model` object
"""
all_genes = {g.name: g for g in self.genes(exchangeable=True)}
compatible_hits = []
for hit in hits:
if hit.gene.name in all_genes:
gene = all_genes[hit.gene.name]
mh = ModelHit(hit, gene, gene.status)
compatible_hits.append(mh)
return compatible_hits | PypiClean |
/ActiveReign-1.0.5.tar.gz/ActiveReign-1.0.5/ar3/core/wmi.py | from impacket.dcerpc.v5.dcom import wmi
from impacket.dcerpc.v5.dtypes import NULL
from impacket.dcerpc.v5.dcomrt import DCOMConnection
from impacket.dcerpc.v5.dcom.wmi import WBEM_FLAG_FORWARD_ONLY
from ar3.logger import highlight
from ar3.core.connector import Connector
class WmiCon(Connector):
def __init__(self, args, loggers, ip, host):
Connector.__init__(self, args, loggers, ip)
self.display_ip = ip
self.display_host = host
self._debug = False
self.dcom = None
self.wmi_con = None
self.process_list = {}
def create_wmi_con(self, namespace='root\\cimv2'):
self.dcom = DCOMConnection(self.host, self.username, self.password, self.domain, self.lmhash, self.nthash)
iInterface = self.dcom.CoCreateInstanceEx(wmi.CLSID_WbemLevel1Login,wmi.IID_IWbemLevel1Login)
iWbemLevel1Login = wmi.IWbemLevel1Login(iInterface)
self.wmi_con = iWbemLevel1Login.NTLMLogin('\\\\{}\\{}'.format(self.host, namespace), NULL, NULL)
def get_netprocess(self, tasklist=False):
self.create_wmi_con()
wmi_enum_process = self.wmi_con.ExecQuery('SELECT * from Win32_Process', lFlags=WBEM_FLAG_FORWARD_ONLY)
while True:
try:
wmi_process = wmi_enum_process.Next(0xffffffff, 1)[0]
wmi_process_owner = wmi_process.GetOwner()
attributes = {'computername': self.host,
'processname': wmi_process.Name,
'processid': wmi_process.ProcessId,
'user': wmi_process_owner.User,
'domain': wmi_process_owner.Domain}
# Dont wait until end to print
if tasklist:
self.logger.info([self.display_host, self.display_ip, "TASKLIST","PID: {:<6} Name: {:<20} User: {:<17} Host: {:<15} Domain: {}".
format(attributes['processid'], attributes['processname'], attributes['user'],
attributes['computername'], attributes['domain'])])
self.process_list[wmi_process.ProcessId] = attributes
except Exception as e:
if str(e).find('S_FALSE') < 0:
self.logger.debug( "Get-NetProcess: {}".format(str(e)))
else:
break
self.disconnect()
def get_netlocalgroups(self):
self.create_wmi_con('root\\cimv2')
query = 'Select Name from win32_group'
wmi_query = self.wmi_con.ExecQuery(query, lFlags=WBEM_FLAG_FORWARD_ONLY)
while True:
try:
wmi_results = wmi_query.Next(0xffffffff, 1)[0]
wmi_results = wmi_results.getProperties()
for key,value in wmi_results.items():
self.logger.info([self.display_host, self.display_ip, "LOCAL GROUPS", value['value']])
except Exception as e:
if str(e).find('S_FALSE') < 0:
self.logger.debug([self.display_host, self.display_ip, "LOCAL GROUPS", str(e)])
else:
break
self.disconnect()
def get_localgroup_members(self, domain, group):
self.create_wmi_con('root\\cimv2')
query = "SELECT PartComponent FROM Win32_GroupUser WHERE GroupComponent=\"Win32_Group.Domain='{}',Name='{}'\"".format(domain, group)
wmi_query = self.wmi_con.ExecQuery(query, lFlags=WBEM_FLAG_FORWARD_ONLY)
while True:
try:
wmi_results = wmi_query.Next(0xffffffff, 1)[0]
wmi_results = wmi_results.getProperties()
for key,value in wmi_results.items():
member = self.parse_local_members(value['value'])
self.logger.info([self.display_host, self.display_ip, "LOCAL MEMBERS", "{:<30} {}".format(group.title(), member)])
except Exception as e:
if str(e).find('S_FALSE') < 0:
self.logger.debug([self.display_host, self.display_ip, "LOCAL MEMBERS", str(e)])
else:
break
self.disconnect()
def parse_local_members(self, line):
# Parse domain\account_name from wmi output query
try:
data = line.split('.')[1]
domain, account = data.split(',')
return "{}\\{}".format(domain.split("=")[1].strip("\""), account.split("=")[1].strip("\""))
except:
return line
def wmi_query(self,namespace, query, name="WMI QUERY"):
self.create_wmi_con(namespace)
wmi_query = self.wmi_con.ExecQuery(query, lFlags=WBEM_FLAG_FORWARD_ONLY)
while True:
try:
wmi_results = wmi_query.Next(0xffffffff, 1)[0]
wmi_results = wmi_results.getProperties()
for k,v in wmi_results.items():
self.logger.info([self.display_host, self.display_ip, name, "{:<30} {}".format(k, v['value'])])
except Exception as e:
if str(e).find('S_FALSE') < 0:
self.logger.debug( "WMIQuery: {}".format(str(e)))
else:
break
self.disconnect()
def disconnect(self):
self.dcom.disconnect() | PypiClean |
/Integral-0.1.0.tar.gz/Integral-0.1.0/integral/sde/srk_scalar.py |
from integral import backend
from integral import profile
__all__ = [
'srk1w1_scalar',
'srk2w1_scalar',
'KlPl_scalar',
]
def srk1w1_scalar(f=None, g=None):
"""Order 1.5 Strong SRK Methods for SDEs witdt Scalar Noise.
This method has have strong orders :math:`(p_d, p_s) = (2.0,1.5)`.
The Butcher table is:
.. math::
\\begin{array}{l|llll|llll|llll}
0 &&&&& &&&& &&&& \\\\
3/4 &3/4&&&& 3/2&&& &&&& \\\\
0 &0&0&0&& 0&0&0&& &&&&\\\\
\\hline
0 \\\\
1/4 & 1/4&&& & 1/2&&&\\\\
1 & 1&0&&& -1&0&\\\\
1/4& 0&0&1/4&& -5&3&1/2\\\\
\\hline
& 1/3& 2/3& 0 & 0 & -1 & 4/3 & 2/3&0 & -1 &4/3 &-1/3 &0 \\\\
\\hline
& &&&& 2 &-4/3 & -2/3 & 0 & -2 & 5/3 & -2/3 & 1
\\end{array}
References
----------
[1] Rößler, Andreas. "Strong and weak approximation methods for stochastic differential
equations—some recent developments." Recent developments in applied probability and
statistics. Physica-Verlag HD, 2010. 127-153.
[2] Rößler, Andreas. "Runge–Kutta methods for the strong approximation of solutions of
stochastic differential equations." SIAM Journal on Numerical Analysis 48.3
(2010): 922-952.
"""
dt = profile.get_dt()
dt_sqrt = dt ** 0.5
A0_21 = 0.75
A1_21 = 0.25
B0_21 = 1.5
B1_21 = 0.5
A1_31 = 1
B1_31 = -1
A1_43 = 0.25
B1_41 = -5
B1_42 = 3
B1_43 = 0.5
alpha1 = 1 / 3
alpha2 = 2 / 3
c0_2 = 0.75
c1_2 = 0.25
c1_3 = 1
c1_4 = 0.25
beta1_1 = -1
beta1_2 = 4 / 3
beta1_3 = 2 / 3
beta2_1 = -1
beta2_2 = 4 / 3
beta2_3 = -1 / 3
beta3_1 = 2
beta3_2 = -4 / 3
beta3_3 = -2 / 3
beta4_1 = -2
beta4_2 = 5 / 3
beta4_3 = -2 / 3
beta4_4 = 1
def wrapper(f_df, f_dg):
def init_func(x, t, *args):
I1 = backend.normal(0.0, dt_sqrt, backend.shape(x))
I0 = backend.normal(0.0, dt_sqrt, backend.shape(x))
I10 = 0.5 * dt * (I1 + I0 / 3.0 ** 0.5)
I11 = 0.5 * (I1 ** 2 - dt)
I111 = (I1 ** 3 - 3 * dt * I1) / 6
H0s1 = x
H1s1 = x
f_t_H0s1 = f_df(t, H0s1, *args)
g_t_H1s1 = f_dg(t, H1s1, *args)
H0s2 = x + dt * A0_21 * f_t_H0s1 + B0_21 * g_t_H1s1 * I10 / dt
H1s2 = x + dt * A1_21 * f_t_H0s1 + dt_sqrt * B1_21 * g_t_H1s1
f_t_H0s2 = f_df(t + c0_2 * dt, H0s2, *args)
g_t_H1s2 = f_dg(t + c1_2 * dt, H1s2, *args)
H0s3 = x
H1s3 = x + dt * (A1_31 * f_t_H0s1) + dt_sqrt * B1_31 * g_t_H1s1
g_t_H1s3 = f_dg(t + c1_3 * dt, H1s3, *args)
H1s4 = x + dt * A1_43 * f_df(t, H0s3, *args) + \
dt_sqrt * (B1_41 * g_t_H1s1 + B1_42 * g_t_H1s2 + B1_43 * g_t_H1s3)
g_t_H1s4 = f_dg(t + c1_4 * dt, H1s4, *args)
y1 = x + dt * (alpha1 * f_t_H0s1 + alpha2 * f_t_H0s2) + \
(beta1_1 * I1 + beta2_1 * I11 / dt_sqrt + beta3_1 * I10 / dt + beta4_1 * I111 / dt) * g_t_H1s1 + \
(beta1_2 * I1 + beta2_2 * I11 / dt_sqrt + beta3_2 * I10 / dt + beta4_2 * I111 / dt) * g_t_H1s2 + \
(beta1_3 * I1 + beta2_3 * I11 / dt_sqrt + beta3_3 * I10 / dt + beta4_3 * I111 / dt) * g_t_H1s3 + \
(beta4_4 * I111 / dt) * g_t_H1s4
return y1
return init_func
if f is not None and g is not None:
return wrapper(f, g)
elif f is not None:
return lambda g: wrapper(f, g)
elif g is not None:
return lambda f: wrapper(f, g)
else:
raise ValueError('Must provide "f" or "g".')
def srk2w1_scalar(f=None, g=None):
"""Order 1.5 Strong SRK Methods for SDEs witdt Scalar Noise.
This method has have strong orders :math:`(p_d, p_s) = (3.0,1.5)`.
The Butcher table is:
.. math::
\\begin{array}{c|cccc|cccc|ccc|}
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & & & & \\\\
1 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & & & & \\\\
1 / 2 & 1 / 4 & 1 / 4 & 0 & 0 & 1 & 1 / 2 & 0 & 0 & & & & \\\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & & & & \\\\
\\hline 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & & & & \\\\
1 / 4 & 1 / 4 & 0 & 0 & 0 & -1 / 2 & 0 & 0 & 0 & & & & \\\\
1 & 1 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & & & & \\\\
1 / 4 & 0 & 0 & 1 / 4 & 0 & 2 & -1 & 1 / 2 & 0 & & & & \\\\
\\hline & 1 / 6 & 1 / 6 & 2 / 3 & 0 & -1 & 4 / 3 & 2 / 3 & 0 & -1 & -4 / 3 & 1 / 3 & 0 \\\\
\\hline & & & & &2 & -4 / 3 & -2 / 3 & 0 & -2 & 5 / 3 & -2 / 3 & 1
\\end{array}
References
----------
[1] Rößler, Andreas. "Strong and weak approximation methods for stochastic differential
equations—some recent developments." Recent developments in applied probability and
statistics. Physica-Verlag HD, 2010. 127-153.
[2] Rößler, Andreas. "Runge–Kutta methods for the strong approximation of solutions of
stochastic differential equations." SIAM Journal on Numerical Analysis 48.3
(2010): 922-952.
"""
dt = profile.get_dt()
dt_sqrt = dt ** 0.5
A0_21 = 1
A0_31 = 0.25
A0_32 = 0.25
A1_21 = 0.25
A1_31 = 1
A1_43 = 0.25
B0_31 = 1
B0_32 = 0.5
B1_21 = -0.5
B1_31 = 1
B1_41 = 2
B1_42 = -1
B1_43 = 0.5
alpha1 = 1 / 6
alpha2 = 1 / 6
alpha3 = 2 / 3
c0_2 = 1
c0_3 = 0.5
c1_2 = 0.25
c1_3 = 1
c1_4 = 0.25
beta1_1 = -1
beta1_2 = 4 / 3
beta1_3 = 2 / 3
beta2_1 = 1
beta2_2 = -4 / 3
beta2_3 = 1 / 3
beta3_1 = 2
beta3_2 = -4 / 3
beta3_3 = -2 / 3
beta4_1 = -2
beta4_2 = 5 / 3
beta4_3 = -2 / 3
beta4_4 = 1
def wrapper(f_df, f_dg):
def init_func(x, t, *args):
I1 = backend.normal(0.0, dt_sqrt, backend.shape(x))
I0 = backend.normal(0.0, dt_sqrt, backend.shape(x))
I10 = 0.5 * dt * (I1 + I0 / 3.0 ** 0.5)
I11 = 0.5 * (I1 ** 2 - dt)
I111 = (I1 ** 3 - 3 * dt * I1) / 6
H0s1 = x
H1s1 = x
f_t_H0s1 = f_df(t, H0s1, *args)
g_t_H1s1 = f_dg(t, H1s1, *args)
H0s2 = x + dt * (A0_21 * f_t_H0s1)
H1s2 = x + dt * (A1_21 * f_t_H0s1) + dt_sqrt * (B1_21 * g_t_H1s1)
f_t_H0s2 = f_df(t + c0_2 * dt, H0s2, *args)
g_t_H1s2 = f_dg(t + c1_2 * dt, H1s2, *args)
H0s3 = x + dt * (A0_31 * f_t_H0s1 + A0_32 * f_t_H0s2) + \
(B0_31 * g_t_H1s1 + B0_32 * g_t_H1s2) * I10 / dt
H1s3 = x + dt * (A1_31 * f_t_H0s1) + dt_sqrt * (B1_31 * g_t_H1s1)
f_t_H0s3 = f_dg(t + c0_3 * dt, H0s3, *args)
g_t_H1s3 = f_dg(t + c1_3 * dt, H1s3, *args)
H1s4 = x + dt * (A1_43 * f_t_H0s3) + \
dt_sqrt * (B1_41 * g_t_H1s1 + B1_42 * g_t_H1s2 + B1_43 * g_t_H1s3)
g_t_H1s4 = f_dg(t + c1_4 * dt, H1s4, *args)
y1 = x + dt * (alpha1 * f_t_H0s1 + alpha2 * f_t_H0s2 + alpha3 * f_t_H0s3) + \
(beta1_1 * I1 + beta2_1 * I11 / dt_sqrt + beta3_1 * I10 / dt + beta4_1 * I111 / dt) * g_t_H1s1 + \
(beta1_2 * I1 + beta2_2 * I11 / dt_sqrt + beta3_2 * I10 / dt + beta4_2 * I111 / dt) * g_t_H1s2 + \
(beta1_3 * I1 + beta2_3 * I11 / dt_sqrt + beta3_3 * I10 / dt + beta4_3 * I111 / dt) * g_t_H1s3 + \
(beta4_4 * I111 / dt) * g_t_H1s4
return y1
return init_func
if f is not None and g is not None:
return wrapper(f, g)
elif f is not None:
return lambda g: wrapper(f, g)
elif g is not None:
return lambda f: wrapper(f, g)
else:
raise ValueError('Must provide "f" or "g".')
def KlPl_scalar(f=None, g=None):
"""Order 1.0 Strong SRK Methods for SDEs witdt Scalar Noise.
This method has have orders :math:`p_s = 1.0`.
The Butcher table is:
.. math::
\\begin{array}{c|cc|cc|cc|c}
0 & 0 & 0 & 0 & 0 & & \\\\
0 & 0 & 0 & 0 & 0 & & \\\\
\\hline 0 & 0 & 0 & 0 & 0 & & \\\\
0 & 1 & 0 & 1 & 0 & & \\\\
\\hline 0 & 1 & 0 & 1 & 0 & -1 & 1 \\\\
\\hline & & & 1 & 0 & 0 & 0
\\end{array}
References
----------
[1] P. E. Kloeden, E. Platen, Numerical Solution of Stochastic Differential
Equations, 2nd Edition, Springer, Berlin Heidelberg New York, 1995.
"""
dt = profile.get_dt()
dt_sqrt = dt ** 0.5
A1_21 = 1
B1_21 = 1
alpha_1 = 1
beta1_1 = 1
beta2_1 = -1
beta2_2 = 1
beta3_1 = 1
def wrapper(f_df, g_dg):
def init_func(x, t0, *args):
I1 = backend.normal(0.0, dt_sqrt, backend.shape(x))
I0 = backend.normal(0.0, dt_sqrt, backend.shape(x))
I10 = 0.5 * dt * (I1 + I0 / 3.0 ** 0.5)
I11 = 0.5 * (I1 ** 2 - dt)
X0_1 = x
X1_1 = x
f_t_H0s1 = f_df(t0, X0_1, *args)
g_t_H1s1 = g_dg(t0, X1_1, *args)
X1_2 = x + dt * A1_21 * f_t_H0s1 + dt_sqrt * B1_21 * g_t_H1s1
g_t_H1s2 = g_dg(t0, X1_2, *args)
y1 = x + dt * alpha_1 * f_t_H0s1 + \
(beta1_1 * I1 + beta2_1 * I11 / dt_sqrt + beta3_1 * I10 / dt) * g_t_H1s1 + \
beta2_2 * I11 / dt_sqrt * g_t_H1s2
return y1
return init_func
if f is not None and g is not None:
return wrapper(f, g)
elif f is not None:
return lambda g: wrapper(f, g)
elif g is not None:
return lambda f: wrapper(f, g)
else:
raise ValueError('Must provide "f" or "g".') | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/core/visualization/image.py | import sys
import cv2
import matplotlib.pyplot as plt
import mmcv
import numpy as np
import pycocotools.mask as mask_util
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET
from ..mask.structures import bitmap_to_polygon
from ..utils import mask2ndarray
from .palette import get_palette, palette_val
__all__ = [
'color_val_matplotlib', 'draw_masks', 'draw_bboxes', 'draw_labels',
'imshow_det_bboxes', 'imshow_gt_det_bboxes'
]
EPS = 1e-2
def color_val_matplotlib(color):
"""Convert various input in BGR order to normalized RGB matplotlib color
tuples.
Args:
color (:obj`Color` | str | tuple | int | ndarray): Color inputs.
Returns:
tuple[float]: A tuple of 3 normalized floats indicating RGB channels.
"""
color = mmcv.color_val(color)
color = [color / 255 for color in color[::-1]]
return tuple(color)
def _get_adaptive_scales(areas, min_area=800, max_area=30000):
"""Get adaptive scales according to areas.
The scale range is [0.5, 1.0]. When the area is less than
``'min_area'``, the scale is 0.5 while the area is larger than
``'max_area'``, the scale is 1.0.
Args:
areas (ndarray): The areas of bboxes or masks with the
shape of (n, ).
min_area (int): Lower bound areas for adaptive scales.
Default: 800.
max_area (int): Upper bound areas for adaptive scales.
Default: 30000.
Returns:
ndarray: The adaotive scales with the shape of (n, ).
"""
scales = 0.5 + (areas - min_area) / (max_area - min_area)
scales = np.clip(scales, 0.5, 1.0)
return scales
def _get_bias_color(base, max_dist=30):
"""Get different colors for each masks.
Get different colors for each masks by adding a bias
color to the base category color.
Args:
base (ndarray): The base category color with the shape
of (3, ).
max_dist (int): The max distance of bias. Default: 30.
Returns:
ndarray: The new color for a mask with the shape of (3, ).
"""
new_color = base + np.random.randint(
low=-max_dist, high=max_dist + 1, size=3)
return np.clip(new_color, 0, 255, new_color)
def draw_bboxes(ax, bboxes, color='g', alpha=0.8, thickness=2):
"""Draw bounding boxes on the axes.
Args:
ax (matplotlib.Axes): The input axes.
bboxes (ndarray): The input bounding boxes with the shape
of (n, 4).
color (list[tuple] | matplotlib.color): the colors for each
bounding boxes.
alpha (float): Transparency of bounding boxes. Default: 0.8.
thickness (int): Thickness of lines. Default: 2.
Returns:
matplotlib.Axes: The result axes.
"""
polygons = []
for i, bbox in enumerate(bboxes):
bbox_int = bbox.astype(np.int32)
poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]],
[bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]]
np_poly = np.array(poly).reshape((4, 2))
polygons.append(Polygon(np_poly))
p = PatchCollection(
polygons,
facecolor='none',
edgecolors=color,
linewidths=thickness,
alpha=alpha)
ax.add_collection(p)
return ax
def draw_labels(ax,
labels,
positions,
scores=None,
class_names=None,
color='w',
font_size=8,
scales=None,
horizontal_alignment='left'):
"""Draw labels on the axes.
Args:
ax (matplotlib.Axes): The input axes.
labels (ndarray): The labels with the shape of (n, ).
positions (ndarray): The positions to draw each labels.
scores (ndarray): The scores for each labels.
class_names (list[str]): The class names.
color (list[tuple] | matplotlib.color): The colors for labels.
font_size (int): Font size of texts. Default: 8.
scales (list[float]): Scales of texts. Default: None.
horizontal_alignment (str): The horizontal alignment method of
texts. Default: 'left'.
Returns:
matplotlib.Axes: The result axes.
"""
for i, (pos, label) in enumerate(zip(positions, labels)):
label_text = class_names[
label] if class_names is not None else f'class {label}'
if scores is not None:
label_text += f'|{scores[i]:.02f}'
text_color = color[i] if isinstance(color, list) else color
font_size_mask = font_size if scales is None else font_size * scales[i]
ax.text(
pos[0],
pos[1],
f'{label_text}',
bbox={
'facecolor': 'black',
'alpha': 0.8,
'pad': 0.7,
'edgecolor': 'none'
},
color=text_color,
fontsize=font_size_mask,
verticalalignment='top',
horizontalalignment=horizontal_alignment)
return ax
def draw_masks(ax, img, masks, color=None, with_edge=True, alpha=0.8):
"""Draw masks on the image and their edges on the axes.
Args:
ax (matplotlib.Axes): The input axes.
img (ndarray): The image with the shape of (3, h, w).
masks (ndarray): The masks with the shape of (n, h, w).
color (ndarray): The colors for each masks with the shape
of (n, 3).
with_edge (bool): Whether to draw edges. Default: True.
alpha (float): Transparency of bounding boxes. Default: 0.8.
Returns:
matplotlib.Axes: The result axes.
ndarray: The result image.
"""
taken_colors = set([0, 0, 0])
if color is None:
random_colors = np.random.randint(0, 255, (masks.size(0), 3))
color = [tuple(c) for c in random_colors]
color = np.array(color, dtype=np.uint8)
polygons = []
for i, mask in enumerate(masks):
if with_edge:
contours, _ = bitmap_to_polygon(mask)
polygons += [Polygon(c) for c in contours]
color_mask = color[i]
while tuple(color_mask) in taken_colors:
color_mask = _get_bias_color(color_mask)
taken_colors.add(tuple(color_mask))
mask = mask.astype(bool)
img[mask] = img[mask] * (1 - alpha) + color_mask * alpha
p = PatchCollection(
polygons, facecolor='none', edgecolors='w', linewidths=1, alpha=0.8)
ax.add_collection(p)
return ax, img
def imshow_det_bboxes(img,
bboxes=None,
labels=None,
segms=None,
class_names=None,
score_thr=0,
bbox_color='green',
text_color='green',
mask_color=None,
thickness=2,
font_size=8,
win_name='',
show=True,
wait_time=0,
out_file=None):
"""Draw bboxes and class labels (with scores) on an image.
Args:
img (str | ndarray): The image to be displayed.
bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5).
labels (ndarray): Labels of bboxes.
segms (ndarray | None): Masks, shaped (n,h,w) or None.
class_names (list[str]): Names of each classes.
score_thr (float): Minimum score of bboxes to be shown. Default: 0.
bbox_color (list[tuple] | tuple | str | None): Colors of bbox lines.
If a single color is given, it will be applied to all classes.
The tuple of color should be in RGB order. Default: 'green'.
text_color (list[tuple] | tuple | str | None): Colors of texts.
If a single color is given, it will be applied to all classes.
The tuple of color should be in RGB order. Default: 'green'.
mask_color (list[tuple] | tuple | str | None, optional): Colors of
masks. If a single color is given, it will be applied to all
classes. The tuple of color should be in RGB order.
Default: None.
thickness (int): Thickness of lines. Default: 2.
font_size (int): Font size of texts. Default: 13.
show (bool): Whether to show the image. Default: True.
win_name (str): The window name. Default: ''.
wait_time (float): Value of waitKey param. Default: 0.
out_file (str, optional): The filename to write the image.
Default: None.
Returns:
ndarray: The image with bboxes drawn on it.
"""
assert bboxes is None or bboxes.ndim == 2, \
f' bboxes ndim should be 2, but its ndim is {bboxes.ndim}.'
assert labels.ndim == 1, \
f' labels ndim should be 1, but its ndim is {labels.ndim}.'
assert bboxes is None or bboxes.shape[1] == 4 or bboxes.shape[1] == 5, \
f' bboxes.shape[1] should be 4 or 5, but its {bboxes.shape[1]}.'
assert bboxes is None or bboxes.shape[0] <= labels.shape[0], \
'labels.shape[0] should not be less than bboxes.shape[0].'
assert segms is None or segms.shape[0] == labels.shape[0], \
'segms.shape[0] and labels.shape[0] should have the same length.'
assert segms is not None or bboxes is not None, \
'segms and bboxes should not be None at the same time.'
img = mmcv.imread(img).astype(np.uint8)
if score_thr > 0:
assert bboxes is not None and bboxes.shape[1] == 5
scores = bboxes[:, -1]
inds = scores > score_thr
bboxes = bboxes[inds, :]
labels = labels[inds]
if segms is not None:
segms = segms[inds, ...]
img = mmcv.bgr2rgb(img)
width, height = img.shape[1], img.shape[0]
img = np.ascontiguousarray(img)
fig = plt.figure(win_name, frameon=False)
plt.title(win_name)
canvas = fig.canvas
dpi = fig.get_dpi()
# add a small EPS to avoid precision lost due to matplotlib's truncation
# (https://github.com/matplotlib/matplotlib/issues/15363)
fig.set_size_inches((width + EPS) / dpi, (height + EPS) / dpi)
# remove white edges by set subplot margin
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
ax = plt.gca()
ax.axis('off')
max_label = int(max(labels) if len(labels) > 0 else 0)
text_palette = palette_val(get_palette(text_color, max_label + 1))
text_colors = [text_palette[label] for label in labels]
num_bboxes = 0
if bboxes is not None:
num_bboxes = bboxes.shape[0]
bbox_palette = palette_val(get_palette(bbox_color, max_label + 1))
colors = [bbox_palette[label] for label in labels[:num_bboxes]]
draw_bboxes(ax, bboxes, colors, alpha=0.8, thickness=thickness)
horizontal_alignment = 'left'
positions = bboxes[:, :2].astype(np.int32) + thickness
areas = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0])
scales = _get_adaptive_scales(areas)
scores = bboxes[:, 4] if bboxes.shape[1] == 5 else None
draw_labels(
ax,
labels[:num_bboxes],
positions,
scores=scores,
class_names=class_names,
color=text_colors,
font_size=font_size,
scales=scales,
horizontal_alignment=horizontal_alignment)
if segms is not None:
mask_palette = get_palette(mask_color, max_label + 1)
colors = [mask_palette[label] for label in labels]
colors = np.array(colors, dtype=np.uint8)
draw_masks(ax, img, segms, colors, with_edge=True)
if num_bboxes < segms.shape[0]:
segms = segms[num_bboxes:]
horizontal_alignment = 'center'
areas = []
positions = []
for mask in segms:
_, _, stats, centroids = cv2.connectedComponentsWithStats(
mask.astype(np.uint8), connectivity=8)
largest_id = np.argmax(stats[1:, -1]) + 1
positions.append(centroids[largest_id])
areas.append(stats[largest_id, -1])
areas = np.stack(areas, axis=0)
scales = _get_adaptive_scales(areas)
draw_labels(
ax,
labels[num_bboxes:],
positions,
class_names=class_names,
color=text_colors,
font_size=font_size,
scales=scales,
horizontal_alignment=horizontal_alignment)
plt.imshow(img)
stream, _ = canvas.print_to_buffer()
buffer = np.frombuffer(stream, dtype='uint8')
if sys.platform == 'darwin':
width, height = canvas.get_width_height(physical=True)
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
img = rgb.astype('uint8')
img = mmcv.rgb2bgr(img)
if show:
# We do not use cv2 for display because in some cases, opencv will
# conflict with Qt, it will output a warning: Current thread
# is not the object's thread. You can refer to
# https://github.com/opencv/opencv-python/issues/46 for details
if wait_time == 0:
plt.show()
else:
plt.show(block=False)
plt.pause(wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
plt.close()
return img
def imshow_gt_det_bboxes(img,
annotation,
result,
class_names=None,
score_thr=0,
gt_bbox_color=(61, 102, 255),
gt_text_color=(200, 200, 200),
gt_mask_color=(61, 102, 255),
det_bbox_color=(241, 101, 72),
det_text_color=(200, 200, 200),
det_mask_color=(241, 101, 72),
thickness=2,
font_size=13,
win_name='',
show=True,
wait_time=0,
out_file=None,
overlay_gt_pred=True):
"""General visualization GT and result function.
Args:
img (str | ndarray): The image to be displayed.
annotation (dict): Ground truth annotations where contain keys of
'gt_bboxes' and 'gt_labels' or 'gt_masks'.
result (tuple[list] | list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str]): Names of each classes.
score_thr (float): Minimum score of bboxes to be shown. Default: 0.
gt_bbox_color (list[tuple] | tuple | str | None): Colors of bbox lines.
If a single color is given, it will be applied to all classes.
The tuple of color should be in RGB order. Default: (61, 102, 255).
gt_text_color (list[tuple] | tuple | str | None): Colors of texts.
If a single color is given, it will be applied to all classes.
The tuple of color should be in RGB order. Default: (200, 200, 200).
gt_mask_color (list[tuple] | tuple | str | None, optional): Colors of
masks. If a single color is given, it will be applied to all classes.
The tuple of color should be in RGB order. Default: (61, 102, 255).
det_bbox_color (list[tuple] | tuple | str | None):Colors of bbox lines.
If a single color is given, it will be applied to all classes.
The tuple of color should be in RGB order. Default: (241, 101, 72).
det_text_color (list[tuple] | tuple | str | None):Colors of texts.
If a single color is given, it will be applied to all classes.
The tuple of color should be in RGB order. Default: (200, 200, 200).
det_mask_color (list[tuple] | tuple | str | None, optional): Color of
masks. If a single color is given, it will be applied to all classes.
The tuple of color should be in RGB order. Default: (241, 101, 72).
thickness (int): Thickness of lines. Default: 2.
font_size (int): Font size of texts. Default: 13.
win_name (str): The window name. Default: ''.
show (bool): Whether to show the image. Default: True.
wait_time (float): Value of waitKey param. Default: 0.
out_file (str, optional): The filename to write the image.
Default: None.
overlay_gt_pred (bool): Whether to plot gts and predictions on the
same image. If False, predictions and gts will be plotted on two same
image which will be concatenated in vertical direction. The image
above is drawn with gt, and the image below is drawn with the
prediction result. Default: True.
Returns:
ndarray: The image with bboxes or masks drawn on it.
"""
assert 'gt_bboxes' in annotation
assert 'gt_labels' in annotation
assert isinstance(result, (tuple, list, dict)), 'Expected ' \
f'tuple or list or dict, but get {type(result)}'
gt_bboxes = annotation['gt_bboxes']
gt_labels = annotation['gt_labels']
gt_masks = annotation.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
gt_seg = annotation.get('gt_semantic_seg', None)
if gt_seg is not None:
pad_value = 255 # the padding value of gt_seg
sem_labels = np.unique(gt_seg)
all_labels = np.concatenate((gt_labels, sem_labels), axis=0)
all_labels, counts = np.unique(all_labels, return_counts=True)
stuff_labels = all_labels[np.logical_and(counts < 2,
all_labels != pad_value)]
stuff_masks = gt_seg[None] == stuff_labels[:, None, None]
gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0)
gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)),
axis=0)
# If you need to show the bounding boxes,
# please comment the following line
# gt_bboxes = None
img = mmcv.imread(img)
img_with_gt = imshow_det_bboxes(
img,
gt_bboxes,
gt_labels,
gt_masks,
class_names=class_names,
bbox_color=gt_bbox_color,
text_color=gt_text_color,
mask_color=gt_mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=False)
if not isinstance(result, dict):
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
segms = None
if segm_result is not None and len(labels) > 0: # non empty
segms = mmcv.concat_list(segm_result)
segms = mask_util.decode(segms)
segms = segms.transpose(2, 0, 1)
else:
assert class_names is not None, 'We need to know the number ' \
'of classes.'
VOID = len(class_names)
bboxes = None
pan_results = result['pan_results']
# keep objects ahead
ids = np.unique(pan_results)[::-1]
legal_indices = ids != VOID
ids = ids[legal_indices]
labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
segms = (pan_results[None] == ids[:, None, None])
if overlay_gt_pred:
img = imshow_det_bboxes(
img_with_gt,
bboxes,
labels,
segms=segms,
class_names=class_names,
score_thr=score_thr,
bbox_color=det_bbox_color,
text_color=det_text_color,
mask_color=det_mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
else:
img_with_det = imshow_det_bboxes(
img,
bboxes,
labels,
segms=segms,
class_names=class_names,
score_thr=score_thr,
bbox_color=det_bbox_color,
text_color=det_text_color,
mask_color=det_mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=False)
img = np.concatenate([img_with_gt, img_with_det], axis=0)
plt.imshow(img)
if show:
if wait_time == 0:
plt.show()
else:
plt.show(block=False)
plt.pause(wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
plt.close()
return img | PypiClean |
/Clockwork-1.2.0.zip/Clockwork-1.2.0/clockwork/clockwork.py | from xml.etree import ElementTree as etree
from . import clockwork_http
from . import clockwork_exceptions
SMS_URL = 'https://api.clockworksms.com/xml/send.aspx'
CREDIT_URL = 'https://api.clockworksms.com/xml/credit.aspx'
BALANCE_URL = 'https://api.clockworksms.com/xml/balance.aspx'
class SMS(object):
"""An SMS object"""
def __init__(self, to, message, client_id=None, from_name=None, long=None, truncate=None, invalid_char_option=None):
self.client_id = client_id
self.from_name = from_name
self.long = long
self.truncate = truncate
self.invalid_char_option = invalid_char_option
self.to = to
self.message = message
class SMSResponse(object):
"""An wrapper around an SMS reponse"""
def __init__(self, sms, id, error_code, error_message, success):
self.sms = sms
self.id = id
self.error_code = error_code
self.error_message = error_message
self.success = success
class API(object):
"""Wraps the clockwork API"""
def __init__(self, apikey, from_name='Clockwork', concat=3,
invalid_char_option='error', long=False, truncate=True,
use_ssl=True):
self.apikey = apikey
self.from_name = from_name
self.concat = concat
self.invalid_char_option = invalid_char_option
self.long = long
self.truncate = truncate
self.use_ssl = use_ssl
def get_balance(self):
"""Check the balance fot this account.
Returns a dictionary containing:
account_type: The account type
balance: The balance remaining on the account
currency: The currency used for the account balance. Assume GBP in not set"""
xml_root = self.__init_xml('Balance')
response = clockwork_http.request(BALANCE_URL, etree.tostring(xml_root, encoding='utf-8'))
data_etree = etree.fromstring(response['data'])
err_desc = data_etree.find('ErrDesc')
if err_desc is not None:
raise clockwork_exceptions.ApiException(err_desc.text, data_etree.find('ErrNo').text)
result = {}
result['account_type'] = data_etree.find('AccountType').text
result['balance'] = data_etree.find('Balance').text
result['currency'] = data_etree.find('Currency').text
return result
def send(self, messages):
"""Send a SMS message, or an array of SMS messages"""
tmpSms = SMS(to='', message='')
if str(type(messages)) == str(type(tmpSms)):
messages = [messages]
xml_root = self.__init_xml('Message')
wrapper_id = 0
for m in messages:
m.wrapper_id = wrapper_id
msg = self.__build_sms_data(m)
sms = etree.SubElement(xml_root, 'SMS')
for sms_element in msg:
element = etree.SubElement(sms, sms_element)
element.text = msg[sms_element]
# print etree.tostring(xml_root)
response = clockwork_http.request(SMS_URL, etree.tostring(xml_root, encoding='utf-8'))
response_data = response['data']
# print response_data
data_etree = etree.fromstring(response_data)
# Check for general error
err_desc = data_etree.find('ErrDesc')
if err_desc is not None:
raise clockwork_exceptions.ApiException(err_desc.text, data_etree.find('ErrNo').text)
# Return a consistent object
results = []
for sms in data_etree:
matching_sms = next((s for s in messages if str(s.wrapper_id) == sms.find('WrapperID').text), None)
new_result = SMSResponse(
sms = matching_sms,
id = '' if sms.find('MessageID') is None else sms.find('MessageID').text,
error_code = 0 if sms.find('ErrNo') is None else sms.find('ErrNo').text,
error_message = '' if sms.find('ErrDesc') is None else sms.find('ErrDesc').text,
success = True if sms.find('ErrNo') is None else (sms.find('ErrNo').text == 0)
)
results.append(new_result)
if len(results) > 1:
return results
return results[0]
def __init_xml(self, rootElementTag):
"""Init a etree element and pop a key in there"""
xml_root = etree.Element(rootElementTag)
key = etree.SubElement(xml_root, "Key")
key.text = self.apikey
return xml_root
def __build_sms_data(self, message):
"""Build a dictionary of SMS message elements"""
attributes = {}
attributes_to_translate = {
'to' : 'To',
'message' : 'Content',
'client_id' : 'ClientID',
'concat' : 'Concat',
'from_name': 'From',
'invalid_char_option' : 'InvalidCharOption',
'truncate' : 'Truncate',
'wrapper_id' : 'WrapperId'
}
for attr in attributes_to_translate:
val_to_use = None
if hasattr(message, attr):
val_to_use = getattr(message, attr)
if val_to_use is None and hasattr(self, attr):
val_to_use = getattr(self, attr)
if val_to_use is not None:
attributes[attributes_to_translate[attr]] = str(val_to_use)
return attributes | PypiClean |
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/glances/plugins/glances_gpu.py | from glances.compat import nativestr, to_fahrenheit
from glances.logger import logger
from glances.plugins.glances_plugin import GlancesPlugin
# In Glances 3.1.4 or higher, we use the py3nvml lib (see issue #1523)
try:
import py3nvml.py3nvml as pynvml
except Exception as e:
import_error_tag = True
# Display debug message if import KeyError
logger.warning("Missing Python Lib ({}), Nvidia GPU plugin is disabled".format(e))
else:
import_error_tag = False
# Define the history items list
# All items in this list will be historised if the --enable-history tag is set
items_history_list = [
{'name': 'proc', 'description': 'GPU processor', 'y_unit': '%'},
{'name': 'mem', 'description': 'Memory consumption', 'y_unit': '%'},
]
class Plugin(GlancesPlugin):
"""Glances GPU plugin (limited to NVIDIA chipsets).
stats is a list of dictionaries with one entry per GPU
"""
def __init__(self, args=None, config=None):
"""Init the plugin."""
super(Plugin, self).__init__(args=args, config=config, stats_init_value=[])
# Init the Nvidia API
self.init_nvidia()
# We want to display the stat in the curse interface
self.display_curse = True
def init_nvidia(self):
"""Init the NVIDIA API."""
if import_error_tag:
self.nvml_ready = False
try:
pynvml.nvmlInit()
self.device_handles = get_device_handles()
self.nvml_ready = True
except Exception:
logger.debug("pynvml could not be initialized.")
self.nvml_ready = False
return self.nvml_ready
def get_key(self):
"""Return the key of the list."""
return 'gpu_id'
@GlancesPlugin._check_decorator
@GlancesPlugin._log_result_decorator
def update(self):
"""Update the GPU stats."""
# Init new stats
stats = self.get_init_value()
if not self.nvml_ready:
# !!!
# Uncomment to test on computer without GPU
# One GPU sample:
# self.stats = [
# {
# "key": "gpu_id",
# "gpu_id": 0,
# "name": "Fake GeForce GTX",
# "mem": 5.792331695556641,
# "proc": 4,
# "temperature": 26,
# "fan_speed": 30
# }
# ]
# Two GPU sample:
# self.stats = [
# {
# "key": "gpu_id",
# "gpu_id": 0,
# "name": "Fake GeForce GTX1",
# "mem": 5.792331695556641,
# "proc": 4,
# "temperature": 26,
# "fan_speed": 30
# },
# {
# "key": "gpu_id",
# "gpu_id": 1,
# "name": "Fake GeForce GTX2",
# "mem": 15,
# "proc": 8,
# "temperature": 65,
# "fan_speed": 75
# }
# ]
return self.stats
if self.input_method == 'local':
stats = self.get_device_stats()
elif self.input_method == 'snmp':
# not available
pass
# Update the stats
self.stats = stats
return self.stats
def update_views(self):
"""Update stats views."""
# Call the father's method
super(Plugin, self).update_views()
# Add specifics information
# Alert
for i in self.stats:
# Init the views for the current GPU
self.views[i[self.get_key()]] = {'proc': {}, 'mem': {}, 'temperature': {}}
# Processor alert
if 'proc' in i:
alert = self.get_alert(i['proc'], header='proc')
self.views[i[self.get_key()]]['proc']['decoration'] = alert
# Memory alert
if 'mem' in i:
alert = self.get_alert(i['mem'], header='mem')
self.views[i[self.get_key()]]['mem']['decoration'] = alert
# Temperature alert
if 'temperature' in i:
alert = self.get_alert(i['temperature'], header='temperature')
self.views[i[self.get_key()]]['temperature']['decoration'] = alert
return True
def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist, not empty (issue #871) and plugin not disabled
if not self.stats or (self.stats == []) or self.is_disabled():
return ret
# Check if all GPU have the same name
same_name = all(s['name'] == self.stats[0]['name'] for s in self.stats)
# gpu_stats contain the first GPU in the list
gpu_stats = self.stats[0]
# Header
header = ''
if len(self.stats) > 1:
header += '{} '.format(len(self.stats))
if same_name:
header += '{} {}'.format('GPU', gpu_stats['name'])
else:
header += '{}'.format('GPU')
msg = header[:17]
ret.append(self.curse_add_line(msg, "TITLE"))
# Build the string message
if len(self.stats) == 1 or args.meangpu:
# GPU stat summary or mono GPU
# New line
ret.append(self.curse_new_line())
# GPU PROC
try:
mean_proc = sum(s['proc'] for s in self.stats if s is not None) / len(self.stats)
except TypeError:
mean_proc_msg = '{:>4}'.format('N/A')
else:
mean_proc_msg = '{:>3.0f}%'.format(mean_proc)
if len(self.stats) > 1:
msg = '{:13}'.format('proc mean:')
else:
msg = '{:13}'.format('proc:')
ret.append(self.curse_add_line(msg))
ret.append(
self.curse_add_line(
mean_proc_msg, self.get_views(item=gpu_stats[self.get_key()], key='proc', option='decoration')
)
)
# New line
ret.append(self.curse_new_line())
# GPU MEM
try:
mean_mem = sum(s['mem'] for s in self.stats if s is not None) / len(self.stats)
except TypeError:
mean_mem_msg = '{:>4}'.format('N/A')
else:
mean_mem_msg = '{:>3.0f}%'.format(mean_mem)
if len(self.stats) > 1:
msg = '{:13}'.format('mem mean:')
else:
msg = '{:13}'.format('mem:')
ret.append(self.curse_add_line(msg))
ret.append(
self.curse_add_line(
mean_mem_msg, self.get_views(item=gpu_stats[self.get_key()], key='mem', option='decoration')
)
)
# New line
ret.append(self.curse_new_line())
# GPU TEMPERATURE
try:
mean_temperature = sum(s['temperature'] for s in self.stats if s is not None) / len(self.stats)
except TypeError:
mean_temperature_msg = '{:>4}'.format('N/A')
else:
unit = 'C'
if args.fahrenheit:
mean_temperature = to_fahrenheit(mean_temperature)
unit = 'F'
mean_temperature_msg = '{:>3.0f}{}'.format(mean_temperature, unit)
if len(self.stats) > 1:
msg = '{:13}'.format('temp mean:')
else:
msg = '{:13}'.format('temperature:')
ret.append(self.curse_add_line(msg))
ret.append(
self.curse_add_line(
mean_temperature_msg,
self.get_views(item=gpu_stats[self.get_key()], key='temperature', option='decoration'),
)
)
else:
# Multi GPU
# Temperature is not displayed in this mode...
for gpu_stats in self.stats:
# New line
ret.append(self.curse_new_line())
# GPU ID + PROC + MEM + TEMPERATURE
id_msg = '{}'.format(gpu_stats['gpu_id'])
try:
proc_msg = '{:>3.0f}%'.format(gpu_stats['proc'])
except (ValueError, TypeError):
proc_msg = '{:>4}'.format('N/A')
try:
mem_msg = '{:>3.0f}%'.format(gpu_stats['mem'])
except (ValueError, TypeError):
mem_msg = '{:>4}'.format('N/A')
msg = '{}: {} mem: {}'.format(id_msg, proc_msg, mem_msg)
ret.append(self.curse_add_line(msg))
return ret
def get_device_stats(self):
"""Get GPU stats."""
stats = []
for index, device_handle in enumerate(self.device_handles):
device_stats = dict()
# Dictionary key is the GPU_ID
device_stats['key'] = self.get_key()
# GPU id (for multiple GPU, start at 0)
device_stats['gpu_id'] = index
# GPU name
device_stats['name'] = get_device_name(device_handle)
# Memory consumption in % (not available on all GPU)
device_stats['mem'] = get_mem(device_handle)
# Processor consumption in %
device_stats['proc'] = get_proc(device_handle)
# Processor temperature in °C
device_stats['temperature'] = get_temperature(device_handle)
# Fan speed in %
device_stats['fan_speed'] = get_fan_speed(device_handle)
stats.append(device_stats)
return stats
def exit(self):
"""Overwrite the exit method to close the GPU API."""
if self.nvml_ready:
try:
pynvml.nvmlShutdown()
except Exception as e:
logger.debug("pynvml failed to shutdown correctly ({})".format(e))
# Call the father exit method
super(Plugin, self).exit()
def get_device_handles():
"""Get a list of NVML device handles, one per device.
Can throw NVMLError.
"""
return [pynvml.nvmlDeviceGetHandleByIndex(i) for i in range(pynvml.nvmlDeviceGetCount())]
def get_device_name(device_handle):
"""Get GPU device name."""
try:
return nativestr(pynvml.nvmlDeviceGetName(device_handle))
except pynvml.NVMLError:
return "NVIDIA"
def get_mem(device_handle):
"""Get GPU device memory consumption in percent."""
try:
memory_info = pynvml.nvmlDeviceGetMemoryInfo(device_handle)
return memory_info.used * 100.0 / memory_info.total
except pynvml.NVMLError:
return None
def get_proc(device_handle):
"""Get GPU device CPU consumption in percent."""
try:
return pynvml.nvmlDeviceGetUtilizationRates(device_handle).gpu
except pynvml.NVMLError:
return None
def get_temperature(device_handle):
"""Get GPU device CPU temperature in Celsius."""
try:
return pynvml.nvmlDeviceGetTemperature(device_handle, pynvml.NVML_TEMPERATURE_GPU)
except pynvml.NVMLError:
return None
def get_fan_speed(device_handle):
"""Get GPU device fan speed in percent."""
try:
return pynvml.nvmlDeviceGetFanSpeed(device_handle)
except pynvml.NVMLError:
return None | PypiClean |
/KeralaPyApiV2-2.0.2020.tar.gz/KeralaPyApiV2-2.0.2020/pyrogram/client/types/input_media/input_media_audio.py |
from typing import Union
from . import InputMedia
class InputMediaAudio(InputMedia):
"""An audio to be sent inside an album.
It is intended to be used with :obj:`send_media_group() <pyrogram.Client.send_media_group>`.
Parameters:
media (``str``):
Audio to send.
Pass a file_id as string to send an audio that exists on the Telegram servers or
pass a file path as string to upload a new audio that exists on your local machine.
file_ref (``str``, *optional*):
A valid file reference obtained by a recently fetched media message.
To be used in combination with a file id in case a file reference is needed.
thumb (``str``, *optional*):
Thumbnail of the music file album cover.
The thumbnail should be in JPEG format and less than 200 KB in size.
A thumbnail's width and height should not exceed 320 pixels.
Thumbnails can't be reused and can be only uploaded as a new file.
caption (``str``, *optional*):
Caption of the audio to be sent, 0-1024 characters
parse_mode (``str``, *optional*):
By default, texts are parsed using both Markdown and HTML styles.
You can combine both syntaxes together.
Pass "markdown" or "md" to enable Markdown-style parsing only.
Pass "html" to enable HTML-style parsing only.
Pass None to completely disable style parsing.
duration (``int``, *optional*):
Duration of the audio in seconds
performer (``int``, *optional*):
Performer of the audio
title (``int``, *optional*):
Title of the audio
"""
def __init__(
self,
media: str,
file_ref: str = None,
thumb: str = None,
caption: str = "",
parse_mode: Union[str, None] = object,
duration: int = 0,
performer: int = "",
title: str = ""
):
super().__init__(media, file_ref, caption, parse_mode)
self.thumb = thumb
self.duration = duration
self.performer = performer
self.title = title | PypiClean |
/Distribution_Ritika-0.17.tar.gz/Distribution_Ritika-0.17/Distribution_Ritika/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/Arc_api-1.1.tar.gz/Arc_api-1.1/Arcapi/async_api.py | import re
import json
from typing import List, Dict, Any
import websockets
import brotli
from Arcapi.api import Api
from Arcapi.exceptions import *
class AsyncApi(Api):
user_code: str # User's 9-digit code for login
start: int # The beginning constant for consulting
end: int # The ending constant for consulting
timeout: int # The time for connecting wss
def __init__(self, user_code: str, start: int = 8, end: int = 12, timeout: int = 5) -> None:
self.ws_endpoint = 'wss://arc.estertion.win:616'
if not re.fullmatch(r'\d{9}', user_code):
raise ArcInvaidUserCodeException
self.user_code = user_code
self.start = start
self.end = end
self.timeout = timeout
async def call_action(self, action: str, **params) -> Any:
if 'start' in params:
_start = params['start']
else:
_start = self.start
if 'end' in params:
_end = params['end']
else:
_end = self.end
container: List[Dict] = [] # The result list for request objects
async with websockets.connect(self.ws_endpoint, timeout=self.timeout) as conn:
await conn.send(f'{self.user_code} {_start} {_end}')
_recv = await conn.recv()
if _recv == 'invalid id':
raise ArcInvaidUserCodeException
elif _recv == 'queried':
while True:
_r = await conn.recv()
if isinstance(_r, str) and _r == 'bye':
break
elif isinstance(_r, (bytes, bytearray)):
_data = json.loads(brotli.decompress(_r))
if _data['cmd'] == action:
if type(_data['data']) is list:
for _item in _data['data']:
container.append(_item)
else:
container.append(_data['data'])
else:
raise ArcUnknownException(_recv)
return container | PypiClean |
/GOldwasher-0.2.9.tar.gz/GOldwasher-0.2.9/README.md |
GOldwasher
=======
GOldwasher is a light wrapper for the R package
[topGO](https://bioconductor.org/packages/release/bioc/html/topGO.html). The
function of GOldwasher is limited to the calculation of GO term enrichment
(via elimFisher algorithm) of target gene lists and also the generation their
respective GO subgraph images (via
[Graphviz](http://www.graphviz.org/) - Graph Visualization Software)
subsuming their annotations. These elements are then incorporated together into
html a single report file per input list which can then be interactively
explored. This module purpose and focus is to facilitate batch processing of
several gene lists.
----------
**Current release:** *0.2.9 (alpha)*
Provides basic functionality and basic documentation. Methods and functions
are working if the input does not deviate from expected (none to little input
sanitization) but they haven't been extensively tested.
**TODO:**
- Write (proper) tests
- Improve documentation
- Re-think R interface mod
- Make OBOe mod independent of ontology mod
- Extend functionality (select and compare subsets)
Installation
------------
pip install GOldwasher-0.2.8.tar.gz
or
pip install GOldwasher
or you can just checkout the code out of here and fiddle with it as you wish.
**Requirements**:
The non-python dependencies are:
- **graphviz** http://www.graphviz.org/
- **GO OBO file** (http://geneontology.org/page/download-ontology)
R packages (and inherently **R**):
- **topGO** http://bioconductor.org/packages/release/bioc/html/topGO.html
- **jsonlite** https://cran.r-project.org/web/packages/jsonlite/index.html
These packages/file should be installed/downloaded for GOldwasher to work. All
other python dependencies should be automatically resolved by pip.
Usage
-----
GOldwasher can be used as a module, making use of its methods, or more easily
it can conveniently be used from its command tool 'goldpanner'
goldpanner [-h] -c CONFIG -i INPUTDIR
{ANNOT,ENRICH,DAG,REPORT}
e.g.:
goldpanner -c settings.ini -i lists/ REPORT
-**c**
ini file with general settings using the following structure:
[meta]
[vars]
alpha = 0.01
organism = phaeodactylum
[sources]
functionalDesc = /path/to/tabseparedfile/withIDtabFunctionalDescription.txt
g_map = /path/to/mappings/identifier2GOaccessions.txt
obofile = /path/to/go-basic.obo
#linkinsets = /path/to/custom/organisms.json
**[vars]**
---
**alpha** - significance level
**organism** - name of the organism (as key name on 'organisms.json')
**[sources]**
---
**functionalDesc**
path to tab-separated file holding a column of identifiers and their matching functional descriptors.
**g_map**
path to tab-separated file holding a column of identifiers and a second column with their associated GO term accession numbers separated by commas.
e.g.:
Phatr3_J43587.t1 GO:0006396,GO:0005622,GO:0005515
**obofile**
path to the GO ontology obo file. It can be downloaded from: http://purl.obolibrary.org/obo/go/go-basic.obo
**linkinsets**
If using organisms other than 'Arabidopis thaliana' or 'Phaeodactylum tricornutum' uncomment this variable and set it as the path to the customized 'organisms.json'. By default no cross-links are generated for unknown/unset organisms.
**-i** directory with the target lists.
**Commands**:
---
ANNOT - annotates identifiers lists with respective available functional descriptors.
ENRICH - performs GO term enrichment on the annotated lists.
DAG - generates color-coded GO graph image (svg format) from (topGO) enrichment results.
REPORT - generates an interactive html GO enrichment report for each list on the input directory.
**optional argument**:
**-o** output directory (can be used with all commands except ENRICH)
3rd party libraries
-------
Additional required 3rd party content is also bundled together with the source
code for this program. That content is listed below along with the licenses
under which they have been released.
> **OBO Ontology python module**
http://pythonhosted.org/Orange-Bioinformatics/
Copyright (c) Bioinformatics Laboratory, FRI UL
Released under the GNU General Public License license
> **Sortable tables**
http://www.kryogenix.org/code/browser/sorttable/
Copyright (c) Stuart Langridge
Released under the X11 (MIT) license
http://www.kryogenix.org/code/browser/licence.html
> **jQuery**
Copyright (c) jQuery Foundation and other contributors
Released under the MIT license
http://jquery.org/license
> **SVGPan v3.2.9**
https://github.com/ariutta/svg-pan-zoom
Copyright (c) Andrea Leofreddi
The code from the SVGPan library is licensed under the following BSD license
https://raw.githubusercontent.com/ariutta/svg-pan-zoom/master/LICENSE
> **Tabbed Content v2013.7.6**
http://www.menucool.com/tabbed-content
Free to use
| PypiClean |
/DTMC/spatialModel/randomMovement/randMoveSEIR.py | import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from Eir.utility import randEvent
from Eir.utility import Person1 as Person
from .randMoveSIR import RandMoveSIR
from Eir.DTMC.spatialModel.simul_details import Simul_Details
class RandMoveSEIR(RandMoveSIR):
"""
Class that simulates the random movement model with an SEIR model. People in the Exposed compartment are presumed to not be able to propogate infection.
Parameters:
----------
S0: int
The starting number of susceptible individuals in the simulation.
E0: int
The starting number of exposed individuals in the simulation.
I0: int
The starting number of infectious individuals in the simulation.
R0: int
The starting number of recovered individuals in the simulation.
rho: float
The probability of someone going from the E compartment to the I compartment.
gamma: float
The recovery probability of an individual going from I -> R.
planeSize : float
The length of each side of the square plane in which the individuals are confined to. For example,
if planeSize=50, then the region which people in the simulation are confined to is the square with
vertices (0,0), (50,0), (50,50), and (0,50).
move_r: float
The mean of the movement radius of each person in the simulation. Will be used as mean along with
sigma_R as the standard deviation to pull from a normal distribution movement radii each time
_move(day) function is called.
sigma_R: float
The standard deviation of the movement radius of each person in the simulation. Will be used along with
move_R as the mean to pull from a normal distribution movement radii each time _move(day) function is
called.
spread_r: float
The mean of the spreading radius of each person in the simulation. Will be used along with sigma_r
as the standard deviation to pull from an normal distribution spreading radii for each individaul person
when the RandMoveSIS object is initialized.
sigma_r: float
The standard deviation of the spreading radius of each person in the simulation.
Will be used along with spread_r as the mean to pull from an normal distribution spreading radii
for each individaul person when the RandMoveSIS object is initialized.
days: int
The number of days that was simulated.
w0: float optional
The probability of infection if the distance between an infectious person and susceptible person is 0. Default is 1.0.
alpha: float optional
A constant used in the _infect() method. The greater the constant, the greater the infection probability. Default is 2.0.
Attributes
----------
S: ndarray
A numpy array that stores the number of people in the susceptible state on each given day of the simulation.
E: ndarray
A numpy array that stores the number of people in the exposed state on each given day of the simulation.
I: ndarray
A numpy array that stores the number of people in the infected state on each given day of the simulation.
R: ndarray
A numpy array that stores the number of people in the recovered state on each given day of the simulation.
popsize: int
The total size of the population in the simulation. Given by S0 + I0
Scollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is SUSCEPTIBLE. Has a total of popsize Person objects,
with numbers [0, popsize).
Ecollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is EXPOSED. Has a total of popsize Person objects,
with numbers [0, popsize).
Icollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is INFECTED. Has a total of popsize Person objects,
with numbers [0, popsize).
Rcollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is RECOVERED. Has a total of popsize Person objects,
with numbers [0, popsize).
details: Simul_Details
An object that can be returned to give a more in-depth look into the simulation. With this object,
one can see transmission chains, state changes, the movement history of each individaul, the state
history of each person, and more.
"""
def __init__(self, S0:int, E0:int, I0:int, R0:int, rho: float, gamma: float, planeSize: float, move_r: float, sigma_R: float,
spread_r: float, sigma_r: float, days:int, w0=1.0, alpha=2.0):
# error checks
self.intCheck([S0, E0, I0, R0, days])
self.floatCheck(rho, gamma, planeSize, move_r, sigma_R, spread_r, sigma_r, w0, alpha)
self.negValCheck(S0, E0, I0, R0, rho, gamma, planeSize, move_r, sigma_R, spread_r, sigma_r, days, w0, alpha)
self.probValCheck([rho, gamma, w0])
super(RandMoveSEIR, self).__init__(S0=S0, I0=I0, R0=R0, gamma=gamma, planeSize=planeSize, move_r=move_r, sigma_R=sigma_R, spread_r=spread_r, sigma_r=sigma_r, days=days, w0=w0,alpha=alpha)
# rho is the E->I
self.rho=rho
# create the numpy array for exposed
self.E = np.zeros(days+1)
self.E[0] = E0
self.popsize = S0 + E0 + I0 + R0
#print("Population: ", self.popsize)
# reinstantiate the Simul_Details object
self.details = Simul_Details(days=days, popsize=self.popsize)
# create the collection data structures
self.Scollect, self.Ecollect, self.Icollect, self.Rcollect = [], [], [], []
# random x,y locations for the start of the simulation
loc_x, loc_y = np.random.random(self.popsize) * self.planeSize, np.random.random(self.popsize) * self.planeSize
spreading_r = np.random.normal(spread_r, sigma_r, self.popsize)
for i in range(self.popsize):
p1 = Person(loc_x[i], loc_y[i], 0, spreading_r[i])
p2 = Person(loc_x[i], loc_y[i], 0, spreading_r[i])
p3 = Person(loc_x[i], loc_y[i], 0, spreading_r[i])
p4 = Person(loc_x[i], loc_y[i], 0, spreading_r[i])
if i < S0:
p1.isIncluded = True
self.details.addStateChange(i, "S", 0)
elif S0 <= i < S0 + E0:
p2.isIncluded=True
self.details.addStateChange(i, "E", 0)
elif S0 + E0 <= i < S0 + E0 + I0:
p3.isIncluded=True
self.details.addStateChange(i, "I", 0)
else:
p4.isIncluded=True
self.details.addStateChange(i, "R", 0)
self.Scollect.append(p1)
self.Ecollect.append(p2)
self.Icollect.append(p3)
self.Rcollect.append(p4)
self.details.addLocation(0, (p1.x, p1.y))
# essentially the same function, except those who are infected will go to E not I. Simply return set of all infected people.
# _StoI() also adds the transmission data as to who infected who, so that doesn't need to be written again.
def _StoE(self, day:int):
return self._StoI(day)
# runs state changes from E to I
def _EtoI(self):
return self._changeHelp(self.Ecollect, self.rho)
def run(self, getDetails=True):
for i in range(1, self.days+1):
StoE = self._StoE(i)
EtoI = self._EtoI()
ItoR = self._ItoR()
self._stateChanger(StoE, self.Ecollect, "E", i)
self._stateChanger(EtoI, self.Icollect, "I", i)
self._stateChanger(ItoR, self.Rcollect, "R", i)
self._move(i, [self.Scollect, self.Ecollect, self.Icollect, self.Rcollect])
self.S[i] = self.S[i-1] - len(StoE)
self.E[i] = self.E[i-1] + len(StoE) - len(EtoI)
self.I[i] = self.I[i-1] + len(EtoI) - len(ItoR)
self.R[i] = self.R[i-1] + len(ItoR)
if getDetails:
return self.details
def toDataFrame(self):
"""
Gives user access to pandas dataframe with amount of people in each state on each day.
Returns
-------
pd.DataFrame
DataFrame object containing the number of susceptibles and number of infecteds on each day.
"""
# create the linspaced numpy array
t = np.linspace(0, self.days, self.days + 1)
# create a 2D array with the days and susceptible and infected arrays
# do it over axis one so that it creates columns days, susceptible, infected
arr = np.stack([t, self.S, self.E, self.I, self.R], axis=1)
df = pd.DataFrame(arr, columns=["Days", "Susceptible", "Exposed", "Infected", "Removed"])
return df
def plot(self):
"Plots the number of susceptible, exposed, infected, and recovered individuals on the y-axis and the number of days on the x-axis."
t = np.linspace(0, self.days, self.days + 1)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=4, sharex='all')
ax1.plot(t, self.S, label="Susceptible", color='r')
ax1.set_ylabel("# Susceptibles")
ax1.set_title("Random Movement SEIR Simulation")
ax2.plot(t, self.E, label="Exposed", color='g')
ax2.set_ylabel("# Exposed")
ax3.plot(t, self.I, label="Active Cases", color='b')
ax3.set_ylabel("# Active Infections")
ax4.set_xlabel("Days")
ax4.set_ylabel("# Recovered")
ax4.plot(t, self.R, label="Removed")
ax1.legend()
ax2.legend()
ax3.legend()
ax4.legend()
plt.show() | PypiClean |
/FiPy-3.4.4.tar.gz/FiPy-3.4.4/INSTALLATION.rst | .. _INSTALLATION:
============
Installation
============
The :term:`FiPy` finite volume PDE solver relies on several
third-party packages. It is *best to obtain and install those first*
before attempting to install :term:`FiPy`. This document explains how
to install :term:`FiPy`, not how to use it. See :ref:`USAGE`
for details on how to use :term:`FiPy`.
.. note::
It may be useful to set up a :ref:`ENVIRONMENT` before beginning
the installation process.
.. only:: html
.. note::
By selecting the links on this page, you will be leaving NIST
webspace. We have provided these links to other web sites because
they may have information that would be of interest to you. No
inferences should be drawn on account of other sites being
referenced, or not, from this page. There may be other web sites that
are more appropriate for your purpose. NIST does not necessarily
endorse the views expressed, or concur with the facts presented on
these sites. Further, NIST does not endorse any commercial products
that may be mentioned on these sites. Please address comments about
this page to [email protected].
-----------------------
Pre-Installed on Binder
-----------------------
A full :term:`FiPy` installation is available for basic exploration on
Binder_. The default notebook gives a rudimentary introduction to :term:`FiPy`
syntax and, like any `Jupyter Notebook`_ interface, tab completion will help
you explore the package interactively.
.. _Binder: https://mybinder.org/v2/gh/usnistgov/fipy/master
.. _Jupyter Notebook: http://jupyter.org
------------------
Recommended Method
------------------
|CondaForge|_
.. attention::
There are many ways to obtain the software
packages necessary to run :term:`FiPy`, but the most expedient way is
with the conda_ package manager. In addition to the scientific
:term:`Python` stack, conda_ also provides virtual environment
management. Keeping separate installations is useful *e.g.* for
comparing :term:`Python` 2 and :term:`Python` 3 software stacks, or when
the user does not have sufficient privileges to install software
system-wide.
In addition to the default packages, many other developers provide
"channels" to distribute their own builds of a variety of software.
These days, the most useful channel is `conda-forge`, which provides
everything necessary to install :term:`FiPy`.
* `install Miniconda`_ on your computer
* run::
$ conda create --name <MYFIPYENV> --channel conda-forge python=<PYTHONVERSION> fipy gmsh
.. note::
This command creates a self-contained conda_ environment and then
downloads and populates the environment with the prerequisites for
:term:`FiPy` from the conda-forge_ channel at https://anaconda.org.
:term:`Gmsh` is an optional package because some versions are
incompatible with :term:`FiPy`, so it must be requested explicitly.
.. note::
The `fipy conda-forge`_ package is a convenience. You may choose to
install packages explicitly, e.g.,::
$ conda create --name <MYFIPYENV> --channel conda-forge python=3 numpy scipy matplotlib-base future packaging mpich mpi4py petsc4py mayavi "gmsh <4.0|>=4.5.2"
or
$ conda create --name <MYFIPYENV> --channel conda-forge python=2.7 numpy scipy matplotlib-base future packaging pysparse mayavi "traitsui<7.0.0" "gmsh<4.0"
.. attention::
Windows x86_64 is fully supported, but this does not work on
Windows x86_32, as conda-forge_ no longer supports that platform. For
Python 2.7.x, you should be able to do::
conda create --name <MYFIPYENV> --channel conda-forge python=2.7 numpy scipy matplotlib pysparse mayavi weave
and for Python 3.x, you should be able to do::
conda create --name <MYFIPYENV> --channel conda-forge python=3 numpy scipy matplotlib pysparse gmsh
followed, for either, by::
activate <MYFIPYENV>
python -m pip install fipy
.. attention::
Bit rot has started to set in for Python 2.7. One consequence is that
:class:`~fipy.viewers.vtkViewer.VTKViewer`\s can raise errors
(probably other uses of :term:`Mayavi`, too). You may be able to remedy this by
creating your environment with::
$ conda create --name <MYFIPYENV> --channel conda-forge python=2.7 fipy "traitsui<7.0.0"
* enable this new environment with::
$ conda activate <MYFIPYENV>
or
$ source activate <MYFIPYENV>
.. note::
``$ activate <MYFIPYENV>`` on Windows_
* move on to :ref:`USAGE`.
.. note::
conda_ can be
`quite <https://www.anaconda.com/blog/understanding-and-improving-condas-performance>`_
`slow <https://medium.com/@marius.v.niekerk/conda-metachannel-f962241c9437>`_
to resolve all dependencies when performing
an installation. You may wish to consider using the alternative
mamba_ installation manager to speed things up.
.. note::
On Linux_ and `Mac OS X`_, you should have a pretty complete system
to run and visualize :term:`FiPy` simulations. On Windows_, there
are fewer packages available via conda_, particularly amongst the
sparse matrix :ref:`SOLVERS`, but the system still should be
functional. Significantly, you will need to download and install
:term:`Gmsh` manually when using Python 2.7.
.. attention::
When installed via conda_ or :term:`pip`, :term:`FiPy` will not include
its :ref:`examples <part:examples>`. These can be obtained by
`cloning the repository`_ or downloading a `compressed archive`_.
.. _install Miniconda: https://conda.io/projects/conda/en/latest/user-guide/install
.. _guyer: https://anaconda.org/guyer
.. _conda-forge: https://conda-forge.github.io/
.. _Mac OS X: http://www.apple.com/macosx/
.. _Linux: http://www.linux.org/
.. _Windows: http://www.microsoft.com/windows/
.. |CondaForge| image:: https://anaconda.org/conda-forge/fipy/badges/installer/conda.svg
.. _CondaForge: https://anaconda.org/conda-forge/fipy
.. _mamba: https://mamba.readthedocs.io/
.. _fipy conda-forge: https://anaconda.org/conda-forge/fipy
--------------
Obtaining FiPy
--------------
:term:`FiPy` is freely available for download via Git_ or as a
`compressed archive`_. Please see
:ref:`documentation:GIT` for instructions on obtaining :term:`FiPy`
with Git_.
.. warning::
Keep in mind that if you choose to download the `compressed
archive`_ you will then need to preserve your changes when upgrades
to :term:`FiPy` become available (upgrades via Git_ will handle
this issue automatically).
.. _Git: https://github.com/usnistgov/fipy
.. _compressed archive: https://github.com/usnistgov/fipy/releases
---------------
Installing FiPy
---------------
Details of the `Required Packages`_ and links are given below,
but for the courageous and the
impatient, :term:`FiPy` can be up and running quickly by simply
installing the following prerequisite packages on your system:
* Python_
* NumPy_
* At least one of the :ref:`SOLVERS`
* At least one of the :ref:`VIEWERS` (:term:`FiPy`'s tests will run
without a viewer, but you'll want one for any practical work)
Other :ref:`OPTIONALPACKAGES` add greatly to :term:`FiPy`'s
capabilities, but are not necessary for an initial installation or to
simply run the test suite.
It is not necessary to formally install :term:`FiPy`, but if you wish
to do so and you are confident that all of the requisite packages have
been installed properly, you can install it by typing::
$ python -m pip install fipy
or by unpacking the archive and typing::
$ python setup.py install
at the command line in the base :term:`FiPy` directory. You can also install
:term:`FiPy` in "development mode" by typing::
$ python setup.py develop
which allows the source code to be altered in place and executed without
issuing further installation commands.
Alternatively, you may choose not to formally install :term:`FiPy` and
to simply work within the base directory instead. In this case or if you
are making a non-standard install (without admin privileges), read about
setting up your :ref:`ENVIRONMENT` before beginning the installation
process.
.. _REQUIREDPACKAGES:
-----------------
Required Packages
-----------------
.. warning:
:term:`FiPy` will not run if the following items are not installed.
Python
======
http://www.python.org/
:term:`FiPy` is written in the :term:`Python` language and requires a
:term:`Python` installation to run. :term:`Python` comes pre-installed
on many operating systems, which you can check by opening a terminal
and typing ``python``, *e.g.*::
$ python
Python 2.7.15 | ...
...
Type "help", "copyright", "credits" or "license" for more information.
>>>
If necessary, you can download_ and install it for your platform
<http://www.python.org/download>.
.. note::
:term:`FiPy` requires at least version 2.7.x of :term:`Python`.
.. _download: http://www.python.org/download/
:term:`Python` along with many of :term:`FiPy`'s required and optional
packages is available with one of the following distributions.
NumPy
=====
http://numpy.scipy.org
Obtain and install the :term:`NumPy` package. :term:`FiPy` requires at
least version 1.0 of NumPy_.
.. _OPTIONALPACKAGES:
-----------------
Optional Packages
-----------------
.. note:
The following packages are not required to run :term:`FiPy`, but they can
be helpful.
Gmsh
====
http://www.geuz.org/gmsh/
:term:`Gmsh` is an application that allows the creation of irregular meshes.
When running in parallel, :term:`FiPy` requires a version of :term:`Gmsh`
>= 2.5 and < 4.0 or >= 4.5.2.
SciPy
=====
http://www.scipy.org/
:term:`SciPy` provides a large collection of functions and tools that can
be useful for running and analyzing :term:`FiPy` simulations. Significantly
improved performance has been achieved with the judicious use of C language
inlining (see the :ref:`FlagsAndEnvironmentVariables` section for more
details), via the :mod:`weave` module.
.. note:
A handful of test cases use functions from the :term:`SciPy`
library and will throw errors if it is missing.
------------------
Level Set Packages
------------------
To use the level set (:cite:`levelSetBook`) components of :term:`FiPy` one of the following is
required.
.. _SCIKITFMM:
Scikit-fmm
==========
http://packages.python.org/scikit-fmm/
Scikit-fmm_ is a python extension module which implements the fast
marching method.
.. _Scikit-fmm: http://packages.python.org/scikit-fmm/
.. _LSMLIBDOC:
LSMLIB
======
http://ktchu.serendipityresearch.org/software/lsmlib/index.html
The Level Set Method Library (LSMLIB_) provides support for the serial
and parallel simulation of implicit surface and curve dynamics in two-
and three-dimensions.
Install LSMLIB_ as per the instructions on the website. Additionally
PyLSMLIB_ is required. To install, follow the instructions on the
website,
https://github.com/ktchu/LSMLIB/tree/master/pylsmlib#pylsmlib.
.. _PyLSMLIB: https://github.com/ktchu/LSMLIB/tree/master/pylsmlib#pylsmlib
.. _LSMLIB: http://ktchu.serendipityresearch.org/software/lsmlib/index.html
.. _ENVIRONMENT:
-----------------------
Development Environment
-----------------------
It is often preferable to not formally install packages in the system
directories. The reasons for this include:
* developing or altering the package source code,
* trying out a new package along with its dependencies without
violating a working system,
* dealing with conflicting packages and dependencies,
* or not having admin privileges.
To avoid tampering with the system Python_ installation, you can employ one
of the utilities that manage packages and their dependencies independently
of the system package manager and the system directories. These utilities
include conda_, Nix_, Stow_, Virtualenv_ and Buildout_, amongst others.
Conda_ and Nix_ are only ones of these we have the resources to support.
Our preferred development environment is set up with::
$ conda create --name <MYFIPYENV> --channel conda-forge python=<PYTHONVERSION> fipy
$ source activate <MYFIPYENV>
$ python -m pip install scikit-fmm
$ conda remove --channel conda-forge --force fipy
$ git clone https://github.com/usnistgov/fipy.git
$ cd fipy
$ python setup.py develop
.. _Conda: https://conda.io
.. _Stow: http://savannah.gnu.org/projects/stow/
.. _Buildout: http://pypi.python.org/pypi/zc.buildout
.. _Virtualenv: https://virtualenv.pypa.io
.. _documentation:GIT:
.. include:: documentation/GIT.rst
---
Nix
---
.. _nixinstall:
.. include:: documentation/NIX-README.rst
| PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/typescript/lib/lib.webworker.iterable.d.ts |
/////////////////////////////
/// Worker Iterable APIs
/////////////////////////////
interface Cache {
addAll(requests: Iterable<RequestInfo>): Promise<void>;
}
interface CanvasPath {
roundRect(x: number, y: number, w: number, h: number, radii?: number | DOMPointInit | Iterable<number | DOMPointInit>): void;
}
interface CanvasPathDrawingStyles {
setLineDash(segments: Iterable<number>): void;
}
interface DOMStringList {
[Symbol.iterator](): IterableIterator<string>;
}
interface FileList {
[Symbol.iterator](): IterableIterator<File>;
}
interface FontFaceSet extends Set<FontFace> {
}
interface FormData {
[Symbol.iterator](): IterableIterator<[string, FormDataEntryValue]>;
/** Returns an array of key, value pairs for every entry in the list. */
entries(): IterableIterator<[string, FormDataEntryValue]>;
/** Returns a list of keys in the list. */
keys(): IterableIterator<string>;
/** Returns a list of values in the list. */
values(): IterableIterator<FormDataEntryValue>;
}
interface Headers {
[Symbol.iterator](): IterableIterator<[string, string]>;
/** Returns an iterator allowing to go through all key/value pairs contained in this object. */
entries(): IterableIterator<[string, string]>;
/** Returns an iterator allowing to go through all keys of the key/value pairs contained in this object. */
keys(): IterableIterator<string>;
/** Returns an iterator allowing to go through all values of the key/value pairs contained in this object. */
values(): IterableIterator<string>;
}
interface IDBDatabase {
/** Returns a new transaction with the given mode ("readonly" or "readwrite") and scope which can be a single object store name or an array of names. */
transaction(storeNames: string | Iterable<string>, mode?: IDBTransactionMode, options?: IDBTransactionOptions): IDBTransaction;
}
interface IDBObjectStore {
/**
* Creates a new index in store with the given name, keyPath and options and returns a new IDBIndex. If the keyPath and options define constraints that cannot be satisfied with the data already in store the upgrade transaction will abort with a "ConstraintError" DOMException.
*
* Throws an "InvalidStateError" DOMException if not called within an upgrade transaction.
*/
createIndex(name: string, keyPath: string | Iterable<string>, options?: IDBIndexParameters): IDBIndex;
}
interface MessageEvent<T = any> {
/** @deprecated */
initMessageEvent(type: string, bubbles?: boolean, cancelable?: boolean, data?: any, origin?: string, lastEventId?: string, source?: MessageEventSource | null, ports?: Iterable<MessagePort>): void;
}
interface SubtleCrypto {
deriveKey(algorithm: AlgorithmIdentifier | EcdhKeyDeriveParams | HkdfParams | Pbkdf2Params, baseKey: CryptoKey, derivedKeyType: AlgorithmIdentifier | AesDerivedKeyParams | HmacImportParams | HkdfParams | Pbkdf2Params, extractable: boolean, keyUsages: Iterable<KeyUsage>): Promise<CryptoKey>;
generateKey(algorithm: RsaHashedKeyGenParams | EcKeyGenParams, extractable: boolean, keyUsages: ReadonlyArray<KeyUsage>): Promise<CryptoKeyPair>;
generateKey(algorithm: AesKeyGenParams | HmacKeyGenParams | Pbkdf2Params, extractable: boolean, keyUsages: ReadonlyArray<KeyUsage>): Promise<CryptoKey>;
generateKey(algorithm: AlgorithmIdentifier, extractable: boolean, keyUsages: Iterable<KeyUsage>): Promise<CryptoKeyPair | CryptoKey>;
importKey(format: "jwk", keyData: JsonWebKey, algorithm: AlgorithmIdentifier | RsaHashedImportParams | EcKeyImportParams | HmacImportParams | AesKeyAlgorithm, extractable: boolean, keyUsages: ReadonlyArray<KeyUsage>): Promise<CryptoKey>;
importKey(format: Exclude<KeyFormat, "jwk">, keyData: BufferSource, algorithm: AlgorithmIdentifier | RsaHashedImportParams | EcKeyImportParams | HmacImportParams | AesKeyAlgorithm, extractable: boolean, keyUsages: Iterable<KeyUsage>): Promise<CryptoKey>;
unwrapKey(format: KeyFormat, wrappedKey: BufferSource, unwrappingKey: CryptoKey, unwrapAlgorithm: AlgorithmIdentifier | RsaOaepParams | AesCtrParams | AesCbcParams | AesGcmParams, unwrappedKeyAlgorithm: AlgorithmIdentifier | RsaHashedImportParams | EcKeyImportParams | HmacImportParams | AesKeyAlgorithm, extractable: boolean, keyUsages: Iterable<KeyUsage>): Promise<CryptoKey>;
}
interface URLSearchParams {
[Symbol.iterator](): IterableIterator<[string, string]>;
/** Returns an array of key, value pairs for every entry in the search params. */
entries(): IterableIterator<[string, string]>;
/** Returns a list of keys in the search params. */
keys(): IterableIterator<string>;
/** Returns a list of values in the search params. */
values(): IterableIterator<string>;
}
interface WEBGL_draw_buffers {
drawBuffersWEBGL(buffers: Iterable<GLenum>): void;
}
interface WEBGL_multi_draw {
multiDrawArraysInstancedWEBGL(mode: GLenum, firstsList: Int32Array | Iterable<GLint>, firstsOffset: GLuint, countsList: Int32Array | Iterable<GLsizei>, countsOffset: GLuint, instanceCountsList: Int32Array | Iterable<GLsizei>, instanceCountsOffset: GLuint, drawcount: GLsizei): void;
multiDrawArraysWEBGL(mode: GLenum, firstsList: Int32Array | Iterable<GLint>, firstsOffset: GLuint, countsList: Int32Array | Iterable<GLsizei>, countsOffset: GLuint, drawcount: GLsizei): void;
multiDrawElementsInstancedWEBGL(mode: GLenum, countsList: Int32Array | Iterable<GLsizei>, countsOffset: GLuint, type: GLenum, offsetsList: Int32Array | Iterable<GLsizei>, offsetsOffset: GLuint, instanceCountsList: Int32Array | Iterable<GLsizei>, instanceCountsOffset: GLuint, drawcount: GLsizei): void;
multiDrawElementsWEBGL(mode: GLenum, countsList: Int32Array | Iterable<GLsizei>, countsOffset: GLuint, type: GLenum, offsetsList: Int32Array | Iterable<GLsizei>, offsetsOffset: GLuint, drawcount: GLsizei): void;
}
interface WebGL2RenderingContextBase {
clearBufferfv(buffer: GLenum, drawbuffer: GLint, values: Iterable<GLfloat>, srcOffset?: GLuint): void;
clearBufferiv(buffer: GLenum, drawbuffer: GLint, values: Iterable<GLint>, srcOffset?: GLuint): void;
clearBufferuiv(buffer: GLenum, drawbuffer: GLint, values: Iterable<GLuint>, srcOffset?: GLuint): void;
drawBuffers(buffers: Iterable<GLenum>): void;
getActiveUniforms(program: WebGLProgram, uniformIndices: Iterable<GLuint>, pname: GLenum): any;
getUniformIndices(program: WebGLProgram, uniformNames: Iterable<string>): Iterable<GLuint> | null;
invalidateFramebuffer(target: GLenum, attachments: Iterable<GLenum>): void;
invalidateSubFramebuffer(target: GLenum, attachments: Iterable<GLenum>, x: GLint, y: GLint, width: GLsizei, height: GLsizei): void;
transformFeedbackVaryings(program: WebGLProgram, varyings: Iterable<string>, bufferMode: GLenum): void;
uniform1uiv(location: WebGLUniformLocation | null, data: Iterable<GLuint>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniform2uiv(location: WebGLUniformLocation | null, data: Iterable<GLuint>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniform3uiv(location: WebGLUniformLocation | null, data: Iterable<GLuint>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniform4uiv(location: WebGLUniformLocation | null, data: Iterable<GLuint>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniformMatrix2x3fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniformMatrix2x4fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniformMatrix3x2fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniformMatrix3x4fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniformMatrix4x2fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniformMatrix4x3fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void;
vertexAttribI4iv(index: GLuint, values: Iterable<GLint>): void;
vertexAttribI4uiv(index: GLuint, values: Iterable<GLuint>): void;
}
interface WebGL2RenderingContextOverloads {
uniform1fv(location: WebGLUniformLocation | null, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniform1iv(location: WebGLUniformLocation | null, data: Iterable<GLint>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniform2fv(location: WebGLUniformLocation | null, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniform2iv(location: WebGLUniformLocation | null, data: Iterable<GLint>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniform3fv(location: WebGLUniformLocation | null, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniform3iv(location: WebGLUniformLocation | null, data: Iterable<GLint>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniform4fv(location: WebGLUniformLocation | null, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniform4iv(location: WebGLUniformLocation | null, data: Iterable<GLint>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniformMatrix2fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniformMatrix3fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void;
uniformMatrix4fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void;
}
interface WebGLRenderingContextBase {
vertexAttrib1fv(index: GLuint, values: Iterable<GLfloat>): void;
vertexAttrib2fv(index: GLuint, values: Iterable<GLfloat>): void;
vertexAttrib3fv(index: GLuint, values: Iterable<GLfloat>): void;
vertexAttrib4fv(index: GLuint, values: Iterable<GLfloat>): void;
}
interface WebGLRenderingContextOverloads {
uniform1fv(location: WebGLUniformLocation | null, v: Iterable<GLfloat>): void;
uniform1iv(location: WebGLUniformLocation | null, v: Iterable<GLint>): void;
uniform2fv(location: WebGLUniformLocation | null, v: Iterable<GLfloat>): void;
uniform2iv(location: WebGLUniformLocation | null, v: Iterable<GLint>): void;
uniform3fv(location: WebGLUniformLocation | null, v: Iterable<GLfloat>): void;
uniform3iv(location: WebGLUniformLocation | null, v: Iterable<GLint>): void;
uniform4fv(location: WebGLUniformLocation | null, v: Iterable<GLfloat>): void;
uniform4iv(location: WebGLUniformLocation | null, v: Iterable<GLint>): void;
uniformMatrix2fv(location: WebGLUniformLocation | null, transpose: GLboolean, value: Iterable<GLfloat>): void;
uniformMatrix3fv(location: WebGLUniformLocation | null, transpose: GLboolean, value: Iterable<GLfloat>): void;
uniformMatrix4fv(location: WebGLUniformLocation | null, transpose: GLboolean, value: Iterable<GLfloat>): void;
} | PypiClean |
/EasyNN-0.2.1.tar.gz/EasyNN-0.2.1/README.md | 
# EasyNN - Neural Networks made Easy
EasyNN is a python package designed to provide an easy-to-use Neural Network. The package is designed to work right out of the box with multiple datasets, while also allowing the user to customize features as they see fit.
### EasyNN requires Python version 3.9.7 or greater.
## See our [wiki](https://github.com/danielwilczak101/EasyNN/wiki) for more information and [Datasets](https://github.com/danielwilczak101/EasyNN/wiki).
## Installation:
Run python's pip3 to install:
```Python
pip3 install EasyNN
```
### Model:
```Python
from EasyNN.examples.mnist.number.trained import model
# Classify an image.
print(model.classify(image))
```
### Dataset Example:
```Python
from EasyNN.examples.mnist.number.trained import model
from EasyNN.examples.mnist.number.data import dataset
images, labels = dataset
# Classify what the second image is in the dataset.
print(model.classify(images[1]))
# Show the image.
model.show(images[1])
```
### Dataset example output:
```
Downloading - number_parameters.npz:
[################################] 1769/1769 - 00:00:00
Downloading - number_structure.pkl:
[################################] 10700/10700 - 00:00:00
Downloading - number_dataset.npz:
[################################] 11221/11221 - 00:00:00
0
```
### Full example:
More info can be found about [converting images](https://github.com/danielwilczak101/EasyNN/wiki/Image-Utility) in the utilities section.
```Python
from EasyNN.examples.mnist.number.trained import model
from EasyNN.utilities import Preprocess, download
# Download an example image.
download("three.jpg","https://bit.ly/3dbO1eV")
format_options = dict(
grayscale=True,
invert=True,
process=True,
contrast=30,
resize=(28, 28),
rotate=3,
)
# Converting your image into the correct format for the mnist number dataset.
image = Preprocess("three.jpg").format(**format_options)
# Classify what the image is using the pretrained model.
print(model.classify(image))
# Show the image after it has been processed.
model.show(image)
```
### Output:
```bash
Downloading - four.jpg:
[################################] 1371/1371 - 00:00:00
3
```
### Image output:
<p align="center">
<img width="400px" height="400px" src="https://raw.githubusercontent.com/danielwilczak101/EasyNN/media/images/example_three.png">
</p>
### Trained Models
Use the trained models section to see EasyNN's datasets and pre-trained neural networks ready to run.
<br />
[MNIST Number](https://github.com/danielwilczak101/EasyNN/wiki/MNIST-Numbers) Classifier network for images of handwritten single digits between 0 and 9.
[MNIST Fashion](https://github.com/danielwilczak101/EasyNN/wiki/MNIST-Fashion) Classifier network for ten classes of human clothing images of the size 28x28 pixels.
[Cifar 10](https://github.com/danielwilczak101/EasyNN/wiki/Cifar10) Classifier network for ten types of images varying from airplane, cat, dog, etc - 32x32 RGB images.
## To see more examples with many other datasets. Please visit our [wiki](https://github.com/danielwilczak101/EasyNN/wiki).
| PypiClean |
/FetchNovels-0.9.6.2.tar.gz/FetchNovels-0.9.6.2/novel/main.py |
import argparse
import re
import sys
import textwrap
from novel import __version__
from novel import cli, config
class MyParser(argparse.ArgumentParser):
def __init__(self):
description = textwrap.dedent("""\
Fetch novels from Internet, and write into file.
Available sources:
bgif2, biquge, dzxsw, feizw, haxtxt, jjwxc, klxsw, lou19,
lwxs, lwxs520, lwxsw, piaotian, piaotiancc, ranwen, shu69,
shushu8, sto, ttshuba, ttzw, ttzw5, uks5, wdxs, xs365, yfzww,
yq33, zhaishu8, doubangroup, ...
""")
super().__init__(
prog='fetchnovels',
description=description,
formatter_class=argparse.RawTextHelpFormatter
)
self.add_argument('-V', '--version', action='version',
version=__version__)
group = self.add_mutually_exclusive_group()
group.add_argument('-u', '--update', action='store_true',
help='update novels in the database')
group.add_argument('-d', '--dump-only', action='store_true',
help='dump only without update')
group.add_argument('-l', '--list', action='store_true',
help='list novels in the database')
group.add_argument('-ls', '--list-serial', action='store_true',
help='list serials in the database')
group.add_argument('-la', '--list-article', action='store_true',
help='list articles in the database')
group.add_argument('-D', '--delete', action='store_true',
help='delete novels in the database')
group.add_argument('-m', '--mark-finish', action='store_true',
help='mark novels as finished')
self.add_argument('-v', '--verbose', action='count',
help='show in more detail')
self.add_argument('-r', '--refresh', action='store_true',
help='refresh novel in the database')
proxy_group = self.add_mutually_exclusive_group()
proxy_group.add_argument('-p', '--proxy', action='store',
help='use specific proxy')
proxy_group.add_argument('-n', '--no-proxy', action='store_true',
help='do not use any proxies')
self.add_argument('source', nargs='?',
help='download source')
self.add_argument('tid', nargs='*',
help='id for novels to download')
def main():
parser = MyParser()
args = parser.parse_args()
if args.source:
need_fix = re.match(r'(\d+)(.+)', args.source)
if need_fix:
source = '{g[1]}{g[0]}'.format(g=need_fix.groups())
else:
source = args.source
else:
source = None
if args.no_proxy:
proxies = '---'
else:
proxies = args.proxy
config.check_first()
with cli.NovelCmdline(source, args.tid, proxies, args.verbose) as fac:
if args.list:
fac.list()
elif args.list_serial:
fac.list_serial()
elif args.list_article:
fac.list_article()
elif args.delete:
fac.delete()
elif args.mark_finish:
fac.try_mark_finish()
elif args.update:
if args.refresh:
fac.refresh()
else:
fac.update()
elif args.dump_only:
fac.dump()
elif source:
print('{}: {}'.format(source, args.tid))
if not args.tid:
print('No specific tid to download!')
sys.exit(1)
if args.refresh:
fac.refresh()
else:
fac.update()
fac.dump()
else:
parser.print_usage()
if __name__ == '__main__':
main() | PypiClean |
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/layer/rnn.py | import numpy as np
from ..optimizer.adam import Adam
from ..optimizer.momentum import Momentum
from ..optimizer.rmsprop import Rmsprop
from ..optimizer.sgd import Sgd
class RNN:
@staticmethod
def init(layer, flow_data_shape, config):
sequence_length = int(flow_data_shape["sequence_length"])
# 何凯明初始化,主要针对relu激活函数
if layer["weight_init"] == 'msra':
layer["U"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"]) * (
np.sqrt(2 / sequence_length))
layer["W"] = np.random.randn(layer['neurons_number'], layer['neurons_number']) * (
np.sqrt(2 / layer['neurons_number']))
layer["V"] = np.random.randn(flow_data_shape["sequence_length"], layer['neurons_number']) * (
np.sqrt(2 / layer['neurons_number']))
# xavier,主要针对tanh激活函数
elif layer["weight_init"] == 'xavier':
layer["U"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"]) * (
np.sqrt(1 / sequence_length))
layer["W"] = np.random.randn(layer['neurons_number'], layer['neurons_number']) * (
np.sqrt(1 / layer['neurons_number']))
layer["V"] = np.random.randn(flow_data_shape["sequence_length"], layer['neurons_number']) * (
np.sqrt(1 / layer['neurons_number']))
else:
layer["U"] = np.random.randn(layer['neurons_number'], flow_data_shape["sequence_length"]) * 0.01
layer["W"] = np.random.randn(layer['neurons_number'], layer['neurons_number']) * 0.01
layer["V"] = np.random.randn(flow_data_shape["sequence_length"], layer['neurons_number']) * 0.01
layer["bW"] = np.zeros((layer['neurons_number'], 1))
layer["bV"] = np.zeros((flow_data_shape["sequence_length"], 1))
flow_data_shape = {
"flatten_size": flow_data_shape["sequence_length"],
"batch_size": flow_data_shape["batch_size"]
}
return layer, flow_data_shape
@staticmethod
def forword(layer, flow_data, is_train):
# flow_data = flow_data[0]
h = np.zeros((layer['neurons_number'], flow_data.shape[0]))
for i in range(flow_data.shape[1]):
sequence = flow_data[:, i]
layer["U_input_" + str(i)] = sequence
U_multiply_X = np.dot(layer["U"], sequence.T)
layer["W_input_" + str(i)] = h
W_multiply_h = np.dot(layer["W"], h)
h = U_multiply_X + W_multiply_h
h = h + layer["bW"]
h = np.tanh(h)
layer["tanh_output"] = h
# 缓存该层的输入
layer["V_input"] = h
flow_data = np.dot(layer["V"], h) + layer["bV"]
return flow_data, layer
@staticmethod
def backword(flow_data, layer, config):
output_all = np.zeros(layer["input"].shape)
layer["dW"] = np.zeros(layer["W"].shape)
layer["dU"] = np.zeros(layer["U"].shape)
layer["dbW"] = np.zeros(layer["bW"].shape)
layer["dbV"] = np.sum(flow_data, axis=1, keepdims=True)
layer["dV"] = np.dot(flow_data, layer['V_input'].T)
h = np.dot(layer["V"].T, flow_data)
for i in reversed(range(0, layer['input'].shape[1])):
# tanh 梯度
h = h * (1 - np.power(layer["tanh_output"], 2))
layer["dbW"] += np.sum(h, axis=1, keepdims=True)
layer["dW"] += np.dot(h, layer["W_input_" + str(i)].T)
layer["dU"] += np.dot(h, layer["U_input_" + str(i)])
output_all[:, i] = np.dot(h.T, layer["U"])
h = np.dot(layer["W"].T, h)
return output_all, layer
@staticmethod
def update_parameters(layer, config, iteration):
"""
更新权重和偏置项
:param layer: 层,包含该层的权重、偏置项、梯度、前向输入输出缓存、实例化对象等信息
:param config:配置
:param iteration:迭代次数
:return: 更新后的层
"""
# 要更新的键名
keys = ['U', 'W', 'V', 'bW', 'bV']
if "optimizer" in config.keys() and config["optimizer"] == 'momentum':
layer = Momentum.update_parameters(layer, keys, config['learning_rate'], config['momentum_coefficient'])
elif "optimizer" in config.keys() and config["optimizer"] == 'rmsprop':
layer = Rmsprop.update_parameters(layer, keys, config['learning_rate'])
elif "optimizer" in config.keys() and config["optimizer"] == 'adam':
layer = Adam.update_parameters(layer, keys, config['learning_rate'], iteration)
else:
# 默认使用 sgd
layer = Sgd.update_parameters(layer, keys, config['learning_rate'])
return layer | PypiClean |
/564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/aiohttp/views.py | from __future__ import annotations
import asyncio
import json
from datetime import timedelta
from typing import TYPE_CHECKING, Iterable
from aiohttp import web
from strawberry.aiohttp.handlers import (
GraphQLTransportWSHandler,
GraphQLWSHandler,
HTTPHandler,
)
from strawberry.http import process_result
from strawberry.subscriptions import GRAPHQL_TRANSPORT_WS_PROTOCOL, GRAPHQL_WS_PROTOCOL
if TYPE_CHECKING:
from strawberry.http import GraphQLHTTPResponse
from strawberry.schema import BaseSchema
from strawberry.types import ExecutionResult
class GraphQLView:
# Mark the view as coroutine so that AIOHTTP does not confuse it with a deprecated
# bare handler function.
_is_coroutine = asyncio.coroutines._is_coroutine # type: ignore[attr-defined]
graphql_transport_ws_handler_class = GraphQLTransportWSHandler
graphql_ws_handler_class = GraphQLWSHandler
http_handler_class = HTTPHandler
def __init__(
self,
schema: BaseSchema,
graphiql: bool = True,
allow_queries_via_get: bool = True,
keep_alive: bool = True,
keep_alive_interval: float = 1,
debug: bool = False,
subscription_protocols: Iterable[str] = (
GRAPHQL_TRANSPORT_WS_PROTOCOL,
GRAPHQL_WS_PROTOCOL,
),
connection_init_wait_timeout: timedelta = timedelta(minutes=1),
):
self.schema = schema
self.graphiql = graphiql
self.allow_queries_via_get = allow_queries_via_get
self.keep_alive = keep_alive
self.keep_alive_interval = keep_alive_interval
self.debug = debug
self.subscription_protocols = subscription_protocols
self.connection_init_wait_timeout = connection_init_wait_timeout
async def __call__(self, request: web.Request) -> web.StreamResponse:
ws = web.WebSocketResponse(protocols=self.subscription_protocols)
ws_test = ws.can_prepare(request)
if ws_test.ok:
if ws_test.protocol == GRAPHQL_TRANSPORT_WS_PROTOCOL:
return await self.graphql_transport_ws_handler_class(
schema=self.schema,
debug=self.debug,
connection_init_wait_timeout=self.connection_init_wait_timeout,
get_context=self.get_context, # type: ignore
get_root_value=self.get_root_value,
request=request,
).handle()
elif ws_test.protocol == GRAPHQL_WS_PROTOCOL:
return await self.graphql_ws_handler_class(
schema=self.schema,
debug=self.debug,
keep_alive=self.keep_alive,
keep_alive_interval=self.keep_alive_interval,
get_context=self.get_context,
get_root_value=self.get_root_value,
request=request,
).handle()
else:
await ws.prepare(request)
await ws.close(code=4406, message=b"Subprotocol not acceptable")
return ws
else:
return await self.http_handler_class(
schema=self.schema,
graphiql=self.graphiql,
allow_queries_via_get=self.allow_queries_via_get,
get_context=self.get_context,
get_root_value=self.get_root_value,
encode_json=self.encode_json,
process_result=self.process_result,
request=request,
).handle()
async def get_root_value(self, request: web.Request) -> object:
return None
async def get_context(
self, request: web.Request, response: web.StreamResponse
) -> object:
return {"request": request, "response": response}
async def process_result(
self, request: web.Request, result: ExecutionResult
) -> GraphQLHTTPResponse:
return process_result(result)
def encode_json(self, response_data: GraphQLHTTPResponse) -> str:
return json.dumps(response_data) | PypiClean |
/DeepCell-Spots-0.3.2.tar.gz/DeepCell-Spots-0.3.2/deepcell_spots/image_alignment.py |
import os
import cv2
import numpy as np
def read_images(root_dir, dataorg, verbose=True):
"""Reads in image files from given directories and parses them into dictionaries of different
types.
Args:
root_dir (str): Directory containing all image files
image_files (list): List of image names (str) in root directory. Paths must be to images
must be saved in .npy format.
dataorg (pandas.DataFrame): Data frame with required columns `'fileName'` (item in
image_files), `'readoutName'` (unique ID name given to each channel in each image),
`'fiducialFrame'` (frame number for image to be used for alignment), `'cytoplasmFrame'`
(frame number for image to be used for cell segmentation).
verbose (bool, optional): Boolean determining if file names are printed as they are
processed. Defaults to ``True``.
Returns:
(dict, dict, dict): `max_im_dict` is a dictionary where keys are image IDs (`'readoutName'`)
and values are maximum intensity projections of frames associated with that readout name.
`fiducial_dict` is a dictionary where keys are image IDs (`'readoutName'`) and values are
fiducial channel (image used for alignment) for each readout name (multiple readout names
may have the same). `cytoplasm_dict` is a dictionary where keys are image IDs
(`'readoutName'`) and values are cytoplasm label image for each readout name (multiple
readout names may have the same).
"""
max_im_dict = {}
reference_dict = {}
cytoplasm_dict = {}
# Grab all file names
image_files = list(dataorg['fileName'].unique())
# Remove invalid file names
image_files = [x for x in image_files if type(x) == str]
for i in range(len(image_files)):
# Slice data organization table to get information for this image stack
round_df = dataorg.loc[dataorg['fileName'] == image_files[i]]
image_file = os.path.join(root_dir, image_files[i])
# Load image stack
image_stack = np.load(image_file)
# Grab ID names for each image in stack
rounds = round_df['readoutName'].values
rounds = [item for item in rounds if 'Spots' in item]
for item in rounds:
if verbose:
print('Working on: {}'.format(item))
# Get frames associated with a round in the image stack
frame_list = round_df['frame'].loc[round_df['readoutName'] == item].values[0]
frame_list = frame_list.strip('][').split(', ')
frame_list = np.array(frame_list).astype(int)
start_frame = frame_list[0]
end_frame = frame_list[-1]
# Maximum projection
max_im = np.max(image_stack[:, :, start_frame:end_frame + 1], axis=2)
# Clip outlier high pixel values
im = np.clip(max_im, np.min(max_im), np.percentile(max_im, 99.9))
im = np.expand_dims(im, axis=[0, -1])
max_im_dict[item] = im
ref_frame = dataorg.loc[dataorg['readoutName'] == 'Reference']['frame'].values[0]
ref_frame = ref_frame.strip('][').split(', ')
ref_frame = np.array(ref_frame).astype(int)
ref_frame = np.mean([ref_frame[0], ref_frame[-1]]).astype(int)
reference_dict[item] = np.expand_dims(image_stack[:, :, ref_frame], axis=[0, -1])
cyto_frame = dataorg.loc[dataorg['readoutName'] == 'Cytoplasm']['frame'].values[0]
cyto_frame = cyto_frame.strip('][').split(', ')
cyto_frame = np.array(cyto_frame).astype(int)
cyto_frame = np.mean([cyto_frame[0], cyto_frame[-1]]).astype(int)
cytoplasm_dict[item] = np.expand_dims(image_stack[:, :, cyto_frame], axis=[0, -1])
return(max_im_dict, reference_dict, cytoplasm_dict)
def align_images(image_dict, reference_dict):
"""Aligns input images with alignment transformation learned from reference images.
Args:
image_dict (dict): Dictionary where keys are image IDs (`'readoutName'`) and values are
images to be aligned for each readout name.
reference_dict (dict): Dictionary where keys are image IDs (`'readoutName'`) and values are
fiducial channel (image used for alignment) for each readout name (multiple readout
names may have the same reference image).
Returns:
aligned_dict (dict): Dictionary where keys are image IDs (`'readoutName'`) and values are
images from `image_dict` that have been aligned by transformations learned from
images from `reference_dict`.
"""
aligned_dict = {}
image_keys = list(image_dict.keys())
num_images = len(image_keys)
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
orb = cv2.ORB_create(MAX_FEATURES)
reference_im = cv2.convertScaleAbs(reference_dict[image_keys[0]][0, :, :, :],
alpha=(255.0 / 65535.0))
keypoints2, descriptors2 = orb.detectAndCompute(reference_im, None)
for idx in range(num_images):
im1 = cv2.convertScaleAbs(reference_dict[image_keys[idx]][0, :, :, :],
alpha=(255.0 / 65535.0))
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1, None)
# Match features
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches = sorted(matches, key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width = reference_im.shape
target_im = image_dict[image_keys[idx]][0, :, :, :]
aligned_im = cv2.warpPerspective(target_im, h, (width, height))
aligned_dict[image_keys[idx]] = np.expand_dims(aligned_im, axis=[0, -1])
return aligned_dict
def crop_images(aligned_dict):
"""Crops images to remove zero-value pixels resulting from image alignment.
Args:
aligned_dict (dict): Dictionary where keys are image IDs (`'readoutName'`) and values are
images from `image_dict` that have been aligned with `align_images`.
Returns:
crop_dict (dict): Dictionary where keys are image IDs (`'readoutName'`) and values are
images from `image_dict` that have been aligned with `align_images` with zero-value
pixels cropped out.
"""
crop_dict = {}
crop_bool = np.array(list(aligned_dict.values())) > 0
crop_bool_all = np.min(crop_bool, axis=0)
top = 0
while np.array([crop_bool_all[0, :, :, 0][top] == 0]).all():
top += 1
bottom = np.shape(crop_bool_all)[1] - 1
while np.array([crop_bool_all[0, :, :, 0][bottom] == 0]).all():
bottom -= 1
left = 0
while np.array([crop_bool_all[0, :, :, 0][:, left] == 0]).all():
left += 1
right = np.shape(crop_bool_all)[2] - 1
while np.array([crop_bool_all[0, :, :, 0][:, right] == 0]).all():
right -= 1
for item in aligned_dict.keys():
# increment one more because sometimes low value pixels at edges of image from alignment
crop_dict[item] = aligned_dict[item][:, top + 1:bottom, left + 1:right, :]
return(crop_dict) | PypiClean |
/MRFI-2.0.0.tar.gz/MRFI-2.0.0/experiments/resnet18_weight.py | from dataset.imagenet import make_testloader
from mrfi import MRFI, EasyConfig
from mrfi.experiment import BER_Acc_experiment, logspace_density, Acc_golden, get_weight_info
from torchvision.models import resnet18
batch_size = 4
n_images = 10000
range = logspace_density(-9, -3)
econfig = EasyConfig.load_file('easyconfigs/float_weight_fi.yaml')
fi_model = MRFI(resnet18(pretrained = True).cuda().eval(), econfig)
print('golden_Acc', Acc_golden(fi_model, make_testloader(n_images, batch_size = batch_size)))
selector_cfg = fi_model.get_weights_configs('selector')
BER, fl32 = BER_Acc_experiment(fi_model, selector_cfg, make_testloader(n_images, batch_size = batch_size), range, bit_width=32)
econfig = EasyConfig.load_file('easyconfigs/weight_fi.yaml')
econfig.faultinject[0]['error_mode']['method'] = 'IntRandomBitFlip'
econfig.faultinject[0]['quantization']['scale_factor'] = 2
fi_model = MRFI(resnet18(pretrained = True).cuda().eval(), econfig)
selector_cfg = fi_model.get_weights_configs('selector')
BER, Qlw = BER_Acc_experiment(fi_model, selector_cfg, make_testloader(n_images, batch_size = batch_size), range)
econfig = EasyConfig.load_file('easyconfigs/fxp_weight_fi.yaml')
del econfig.faultinject[0]['module_name']
econfig.faultinject[0]['module_type'] = ['Conv2d', 'Linear']
fi_model = MRFI(resnet18(pretrained = True).cuda().eval(), econfig)
selector_cfg = fi_model.get_weights_configs('selector')
quantization_cfg = fi_model.get_weights_configs('quantization.args')
errormode_cfg = fi_model.get_weights_configs('error_mode.args')
print(get_weight_info(fi_model, weight_name=['weight', 'bias']))
quantization_cfg.integer_bit = 2
quantization_cfg.decimal_bit = 12
errormode_cfg.bit_width = 15
BER, fxp2 = BER_Acc_experiment(fi_model, selector_cfg, make_testloader(n_images, batch_size = batch_size), range, bit_width=15)
quantization_cfg.integer_bit = 3
quantization_cfg.decimal_bit = 13
errormode_cfg.bit_width = 17
BER, fxp3 = BER_Acc_experiment(fi_model, selector_cfg, make_testloader(n_images, batch_size = batch_size), range, bit_width=17)
import numpy as np
np.savez('result/resnet18_weight.npz', BER = BER, fxp2 = fxp2, fxp3 = fxp3, fl32 = fl32, Qlw = Qlw) | PypiClean |
/Flask_RESTful_DRY-0.3.1-py3-none-any.whl/Flask_RESTful_DRY-0.3.1.dist-info/DESCRIPTION.rst | =================
Flask-RESTful-DRY
=================
Allows the declaration of RESTful APIs using simple declarations so that you
don't need to write executable code for every HTTP method on every API.
Introduces:
* Inheritable declarations at the class level, including the ability to
modify the base class declarations.
* Dynamic creation of Flask-RESTful_ API Resource classes, so that you
can automatically generate the same pattern of URLs many times.
* Reducing HTTP method code to a series of re-usable steps. These are
automatically ordered for each HTTP method to meet ordering
constraints specified on each step. This allows the requirements for
the HTTP method code to be reduced to a simple declaration of what
steps to include. Because these steps are automatically ordered,
they may be specified in any order, making it easy to add or remove
steps from inherited standard recipes.
* Adds column validation and introspection to Flask-SQLAlchemy_ tables,
and automatically generates metadata descriptions of what colums are
allowed to be POSTed or PUT, including the server validation rules
that will be applied to each column. This lets you specify the
validation rules in one place for both server and client.
* Provides column-level authorization for all HTTP methods.
* Supports nested child rows (relationships) in APIs (and nested
column-level authorization).
The documentation is on ReadTheDocs.org here_.
.. _here: http://flask-restful-dry.readthedocs.org/en/latest/dry.html
.. _Flask-RESTful: https://flask-restful.readthedocs.org
.. _Flask-SQLAlchemy: https://pythonhosted.org/Flask-SQLAlchemy/
| PypiClean |
/KDVS-2.0.0.tar.gz/KDVS-2.0.0/kdvs/bin/experiment.py |
r"""
Provides KDVS application 'experiment'. It performs prior--knowledge--guided
feature selection and re--annotation, according to specified configuration.
It uses Gene Ontology as prior knowledge source and microarray gene expression
as measured data.
IMPORTANT NOTE. This application is not polished enough. The API needs to be
refined more. Some details are still hard--coded.
"""
from kdvs.core.error import Error, Warn
from kdvs.core.util import getFileNameComponent, importComponent, serializeObj, \
pprintObj, deserializeObj, writeObj, serializeTxt, quote, resolveIndexes
from kdvs.fw.Annotation import get_em2annotation
from kdvs.fw.Categorizer import Categorizer
from kdvs.fw.DSV import DSV
from kdvs.fw.Job import NOTPRODUCED
from kdvs.fw.Map import SetBDMap
from kdvs.fw.Stat import Labels, RESULTS_PLOTS_ID_KEY
from kdvs.fw.impl.annotation.HGNC import correctHGNCApprovedSymbols, \
generateHGNCPreviousSymbols, generateHGNCSynonyms
from kdvs.fw.impl.app.CmdLineApp import CmdLineApp
from kdvs.fw.impl.data.PKDrivenData import PKDrivenDBDataManager, \
PKDrivenDBSubsetHierarchy
from kdvs.fw.impl.pk.go.GeneOntology import GO_id2num, GO_num2id
import collections
import glob
import operator
import os
from kdvs.core.provider import fileProvider
class MA_GO_Experiment_App(CmdLineApp):
r"""
Main application class. It interprets the instance of
:data:`~kdvs.fw.impl.app.Profile.MA_GO_PROFILE`.
"""
def prepare(self):
r"""
Add all actions, in the following order:
* :func:`resolveStaticDataFiles`
* :func:`loadStaticData`
* :func:`postprocessStaticData`
* :func:`loadUserData`
* :func:`resolveProfileComponents`
* :func:`buildGeneIDMap`
* :func:`buildPKCIDMap`
* :func:`obtainLabels`
* :func:`buildPKDrivenDataSubsets`
* :func:`buildSubsetHierarchy`
* :func:`submitSubsetOperations`
* :func:`executeSubsetOperations`
* :func:`postprocessSubsetOperations`
* :func:`performSelections`
* :func:`storeCompleteResults`
* :func:`prepareReports`
"""
self.env.addCallable(resolveStaticDataFiles)
self.env.addCallable(loadStaticData)
self.env.addCallable(postprocessStaticData)
self.env.addCallable(loadUserData)
self.env.addCallable(resolveProfileComponents)
self.env.addCallable(buildGeneIDMap)
self.env.addCallable(buildPKCIDMap)
self.env.addCallable(obtainLabels)
self.env.addCallable(buildPKDrivenDataSubsets)
self.env.addCallable(buildSubsetHierarchy)
self.env.addCallable(submitSubsetOperations)
self.env.addCallable(executeSubsetOperations)
self.env.addCallable(postprocessSubsetOperations)
self.env.addCallable(performSelections)
self.env.addCallable(storeCompleteResults)
self.env.addCallable(prepareReports)
# ---- experiment methods
def resolveStaticDataFiles(env):
r"""
Action that resolves file paths for all static data files, according to specification.
Specification is taken from 'static_data_files' dictionary that comes from default
configuration file. The names may contain '*' and are interpreted according to
:mod:`glob` module rules. Also opens the files and stores their file handles in
the same dictionary (under the keys 'path' and 'fh'). See 'kdvs/config/default_cfg.py'
for details.
See Also
--------
kdvs.core.config.getDefaultCfgFilePath
kdvs.core.config.getDefaultDataRootPath
"""
env.logger.info('Started resolving static data files')
data_path = env.var('data_path')
static_data_files = env.var('static_data_files')
for sfID, sfdata in static_data_files.iteritems():
spec = sfdata['spec']
# parse specification and resolve physical path
# specification follows rules for 'glob' file name matching
dirs = spec[:-1]
filepattern = spec[-1]
frootdir = os.path.join(data_path, *dirs)
fpatt = os.path.join(frootdir, filepattern)
foundfiles = [(p, os.path.getmtime(p)) for p in glob.glob(fpatt)]
foundfiles.sort(key=operator.itemgetter(1))
# get newest file as found
foundfile = foundfiles[0][0]
sfdata['path'] = foundfile
# open file and store fh
sfdata['fh'] = fileProvider(sfdata['path'])
env.logger.info('File %s resolved as %s' % (quote(sfID), quote(foundfile)))
env.logger.info('Finished resolving static data files')
def loadStaticData(env):
r"""
Action that loads all static data files, either into database governed by the instance
of :class:`~kdvs.core.db.DBManager`, or through associated manager, if present.
It interprets 'static_data_files' dictionary that comes from default configuration
file. It uses two exclusive elements. If 'loadToDb' is True, the file is loaded
into database and wrapped in :class:`~kdvs.fw.DSV.DSV` instance. If 'manager' is
not None, it instantiates the :class:`~kdvs.fw.PK.PKCManager` instance that
governs the content of the file; also, if debug output was requested, it instructs
the manager to :meth:`dump` all the information. See 'kdvs/config/default_cfg.py'
for details.
The 'experiment' application recognizes two static data files. See 'kdvs/data/README'
for details.
Raises
------
Warn
if more than one manager was specified for static data file
"""
env.logger.info('Started loading static data files')
dbm = env.var('dbm')
data_db_id = env.var('data_db_id')
pk_manager_dump_suffix = env.var('pk_manager_dump_suffix')
use_debug_output = env.var('use_debug_output')
debug_output_path = env.var('debug_output_path')
static_data_files = env.var('static_data_files')
for stfiledata in static_data_files.values():
if stfiledata['loadToDB']:
stfile_path = stfiledata['path']
stfile_table = stfiledata['DBID']
stfile_delim = stfiledata['metadata']['delimiter']
stfile_comment = stfiledata['metadata']['comment']
# for DSV we overwrite normal fh preservation mechanism
stfile_fh = DSV.getHandle(stfile_path, 'rb')
stfiledata['fh'] = stfile_fh
stfile_dsv = DSV(dbm, data_db_id, stfile_fh, dtname=stfile_table, delimiter=stfile_delim, comment=stfile_comment)
stfile_indexes = resolveIndexes(stfile_dsv, stfiledata['indexes'])
stfile_dsv.create(indexed_columns=stfile_indexes)
stfile_dsv.loadAll()
stfile_dsv.close()
env.addVar('%s_table' % stfiledata['DBID'], stfile_table)
env.addVar('%s_dsv' % stfiledata['DBID'], stfile_dsv)
env.logger.info('Loaded %s into %s as %s' % (stfile_path, data_db_id, stfile_table))
elif stfiledata['manager'] is not None:
if len(stfiledata['manager']) == 1:
managerClassName, managerParams = next(iter(stfiledata['manager'].items()))
managerClass = importComponent(managerClassName)
manager = managerClass()
manager.configure(**managerParams['configure'])
managerID = 'pkc_manager'
env.logger.info('Configured manager %s of class %s' % (managerID, managerClassName))
manager.load(stfiledata['fh'], **managerParams['load'])
env.logger.info('Loaded %s' % stfiledata['path'])
env.addVar(managerID, manager)
if use_debug_output:
dump = manager.dump()
dump_key = '%s_%s_%s' % (managerID, stfiledata['DBID'], pk_manager_dump_suffix)
dump_path = os.path.join(debug_output_path, dump_key)
with open(dump_path, 'wb') as f:
pprintObj(dump, f)
env.logger.info('Manager %s content dumped as %s' % (managerID, dump_key))
else:
raise Warn('More than 1 manager specified for static data file %s!' % (stfiledata['path']))
env.logger.info('Finished loading static data files')
def postprocessStaticData(env):
r"""
Action that performs postprocessing of static data. Currently, it performs
corrections of withdrawn symbols, and generates helper tables with HGNC synonyms
and previous symbols; helper tables are wrapped in :class:`~kdvs.fw.DBTable.DBTable`
instances.
See Also
--------
kdvs.fw.impl.annotation.HGNC.correctHGNCApprovedSymbols
kdvs.fw.impl.annotation.HGNC.generateHGNCPreviousSymbols
kdvs.fw.impl.annotation.HGNC.generateHGNCSynonyms
"""
env.logger.info('Started postprocessing static data')
map_db_id = env.var('map_db_id')
static_data_files = env.var('static_data_files')
hgnc_dsv_var = '%s_dsv' % static_data_files['hgnc_file']['DBID']
hgnc_dsv = env.var(hgnc_dsv_var)
# ---- correct symbols in HGNC for unified querying
correctHGNCApprovedSymbols(hgnc_dsv)
env.logger.info('HGNC symbols corrected')
# ---- create inverted table for HGNC previous symbols
previous_dt = generateHGNCPreviousSymbols(hgnc_dsv, map_db_id)
env.addVar('previous_dt', previous_dt)
env.logger.info('Generated index of HGNC Previous Symbols')
# ---- create inverted table for HGNC proper synonyms
synonyms_dt = generateHGNCSynonyms(hgnc_dsv, map_db_id)
env.addVar('synonyms_dt', synonyms_dt)
env.logger.info('Generated index of HGNC Synonyms')
env.logger.info('Finished postprocessing static data')
def loadUserData(env):
r"""
Action that resolves and loads user data files. The following profile sections are
interpreted:
* 'annotation_file'
* 'gedm_file'
* 'labels_file'
All these are DSV files; after loading into database, they are wrapped in
:class:`~kdvs.fw.DSV.DSV` instances.
See 'kdvs/example_experiment/example_experiment_cfg.py' for details.
"""
env.logger.info('Started loading user data')
dbm = env.var('dbm')
data_db_id = env.var('data_db_id')
profile = env.var('profile')
# ---- load annotations file
anno_data = profile['annotation_file']
anno_file = os.path.abspath(anno_data['path'])
anno_table = getFileNameComponent(anno_file)
anno_delim = anno_data['metadata']['delimiter']
anno_comment = anno_data['metadata']['comment']
anno_fh = DSV.getHandle(anno_file, 'rb')
anno_dsv = DSV(dbm, data_db_id, anno_fh, dtname=anno_table, delimiter=anno_delim, comment=anno_comment)
anno_indexes = resolveIndexes(anno_dsv, anno_data['indexes'])
anno_dsv.create(indexed_columns=anno_indexes)
anno_dsv.loadAll()
anno_dsv.close()
env.addVar('anno_table', anno_table)
env.addVar('anno_dsv', anno_dsv)
env.logger.info('Loaded %s into %s as %s' % (anno_file, data_db_id, anno_table))
# ---- load GEDM
gedm_data = profile['gedm_file']
gedm_file = os.path.abspath(gedm_data['path'])
gedm_table = getFileNameComponent(gedm_file)
gedm_delim = gedm_data['metadata']['delimiter']
gedm_comment = gedm_data['metadata']['comment']
gedm_fh = DSV.getHandle(gedm_file, 'rb')
gedm_dsv = DSV(dbm, data_db_id, gedm_fh, dtname=gedm_table, delimiter=gedm_delim, comment=gedm_comment)
gedm_indexes = resolveIndexes(gedm_dsv, gedm_data['indexes'])
gedm_dsv.create(indexed_columns=gedm_indexes)
gedm_dsv.loadAll()
gedm_dsv.close()
env.addVar('gedm_table', gedm_table)
env.addVar('gedm_dsv', gedm_dsv)
env.logger.info('Loaded %s into %s as %s' % (gedm_file, data_db_id, gedm_table))
# ---- load labels
labels_data = profile['labels_file']
labels_file = labels_data['path']
if labels_file is not None:
labels_file = os.path.abspath(labels_file)
labels_table = getFileNameComponent(labels_file)
labels_delim = labels_data['metadata']['delimiter']
labels_comment = labels_data['metadata']['comment']
labels_fh = DSV.getHandle(labels_file, 'rb')
labels_dsv = DSV(dbm, data_db_id, labels_fh, dtname=labels_table, delimiter=labels_delim, comment=labels_comment)
labels_indexes = resolveIndexes(labels_dsv, labels_data['indexes'])
labels_dsv.create(indexed_columns=labels_indexes)
labels_dsv.loadAll()
labels_dsv.close()
env.addVar('labels_table', labels_table)
env.addVar('labels_dsv', labels_dsv)
env.logger.info('Loaded %s into %s as %s' % (labels_file, data_db_id, labels_table))
env.logger.info('Finished loading user data')
def resolveProfileComponents(env):
r"""
Action that goes through application profile and resolves all dynamically created
components, that is, reads the individual specifications, creates instances, and
performs individual configurations. Currently, the following groups of components
are processed, and concrete instances are created:
* categorizers (:class:`~kdvs.fw.Categorizer.Categorizer`)
* orderers (:class:`~kdvs.fw.Categorizer.Orderer`)
* statistical techniques (:class:`~kdvs.fw.Stat.Technique`)
* outer selectors (:class:`~kdvs.fw.impl.stat.PKCSelector.OuterSelector`)
* inner selectors (:class:`~kdvs.fw.impl.stat.PKCSelector.InnerSelector`)
* reporters (:class:`~kdvs.fw.Report.Reporter`)
* EnvOps (:class:`~kdvs.fw.EnvOp.EnvOp`)
Also, for statistical techniques, the corresponding degrees of freedom (DOFs) are
expanded.
"""
env.logger.info('Started resolving profile components')
profile = env.var('profile')
# ---- resolve categorizers
categorizers = _resolveProfileInstanceGroup(profile['subset_categorizers'])
# add reversed lookup by internal IDs
catIDmap = dict()
for catID, ct in categorizers.iteritems():
catIDmap[ct.id] = catID
env.addVar('pc_categorizers', categorizers)
env.addVar('pc_catidmap', catIDmap)
env.logger.info('Resolved subset categorizers (%d found)' % (len(categorizers.keys())))
# ---- resolve subset orderers
sords = _resolveProfileInstanceGroup(profile['subset_orderers'])
env.addVar('pc_sords', sords)
env.logger.info('Resolved subset orderers (%d found)' % (len(sords.keys())))
# ---- resolve statistical techniques
# statistical technique is configured by parameters
stechs = _resolveProfileInstanceGroup(profile['statistical_techniques'])
# resolve null dofs
for stechInst in stechs.values():
dofs = stechInst.parameters['global_degrees_of_freedom']
if dofs is None:
dofs = (env.var('null_dof'),)
stechInst.parameters['global_degrees_of_freedom'] = dofs
env.addVar('pc_stechs', stechs)
env.logger.info('Resolved statistical techniques (%d found)' % (len(stechs.keys())))
# ---- resolve outer level selectors
# outer level selector is configured by parameters
osels = _resolveProfileInstanceGroup(profile['subset_outer_selectors'])
env.addVar('pc_osels', osels)
env.logger.info('Resolved subset outer selectors (%d found)' % (len(osels.keys())))
# ---- resolve inner level selectors
# inner level selector is configured by parameters
isels = _resolveProfileInstanceGroup(profile['subset_inner_selectors'])
env.addVar('pc_isels', isels)
env.logger.info('Resolved subset inner selectors (%d found)' % (len(isels.keys())))
# ---- resolve reporters
# reporter is configured by parameters
reporters = _resolveProfileInstanceGroup(profile['reporters'])
env.addVar('pc_reporters', reporters)
env.logger.info('Resolved reporters (%d found)' % (len(reporters.keys())))
# ---- resolve envops
# envop is configured by parameters
envops = _resolveProfileInstanceGroup(profile['envops'])
env.addVar('pc_envops', envops)
env.logger.info('Resolved EnvOps (%d found)' % (len(envops.keys())))
#
env.logger.info('Finished resolving profile components')
def buildGeneIDMap(env):
r"""
Action that constructs the concrete instance of :class:`~kdvs.fw.Map.GeneIDMap`
and builds appropriate mapping. The instance type is specified in user
configuration file as 'geneidmap_type' variable. Also, if debug output was
requested, dump the mapping. See
'kdvs/example_experiment/example_experiment_cfg.py' for details.
"""
env.logger.info('Started building GeneID Map')
map_db_id = env.var('map_db_id')
static_data_files = env.var('static_data_files')
hgnc_dsv_var = '%s_dsv' % static_data_files['hgnc_file']['DBID']
hgnc_dsv = env.var(hgnc_dsv_var)
anno_dsv = env.var('anno_dsv')
geneidmap_type = env.var('geneidmap_type')
use_debug_output = env.var('use_debug_output')
debug_output_path = env.var('debug_output_path')
# ---- construct GeneID map
env.logger.info('Started building GeneID map of type %s' % geneidmap_type)
geneidmap_class = importComponent(geneidmap_type)
geneidmap = geneidmap_class()
geneidmap.build(anno_dsv, hgnc_dsv, map_db_id)
env.addVar('geneidmap', geneidmap)
nentries_geneidmap = len(geneidmap.gene2emid.getFwdMap().keys())
env.logger.info('GeneID map built (%d entries found)' % (nentries_geneidmap))
# ---- serialize GeneID map
if use_debug_output:
geneidmap_key = env.var('geneidmap_key')
with open(os.path.join(debug_output_path, geneidmap_key), 'wb') as f:
serializeObj(geneidmap.gene2emid, f)
env.logger.info('GeneID map serialized to %s' % geneidmap_key)
env.logger.info('Finished building GeneID Map')
def buildPKCIDMap(env):
r"""
Action that constructs the concrete instance of :class:`~kdvs.fw.Map.PKCIDMap`
and builds appropriate mapping. The instance type is specified in user
configuration file as 'pkcidmap_type' variable. Also, if debug output was
requested, dumps the mapping. In addition, since in 'experiment' application
Gene Ontology is used as prior knowledge source, builds specialized submapping
for selected GO domain. The GO domain is specified in :data:`~kdvs.fw.impl.app.Profile.MA_GO_PROFILE`
as 'go_domain' element. See 'kdvs/example_experiment/example_experiment_cfg.py' for details.
See Also
--------
kdvs.fw.impl.map.PKCID.GPL
"""
env.logger.info('Started building PKCID Map')
map_db_id = env.var('map_db_id')
profile = env.var('profile')
use_debug_output = env.var('use_debug_output')
debug_output_path = env.var('debug_output_path')
# ---- construct PKCID map
pkcidmap_type = env.var('pkcidmap_type')
pkcidmap_class = importComponent(pkcidmap_type)
pkcidmap = pkcidmap_class()
env.logger.info('Started building PKCID map of type %s' % pkcidmap_type)
anno_dsv = env.var('anno_dsv')
pkcidmap.build(anno_dsv, map_db_id)
env.addVar('pkcidmap', pkcidmap)
nentries_pkcidmap = len(pkcidmap.pkc2emid.getFwdMap().keys())
env.logger.info('PKCID map built (%d entries found)' % (nentries_pkcidmap))
# ---- serialize PKCID map
if use_debug_output:
pkcidmap_key = env.var('pkcidmap_key')
with open(os.path.join(debug_output_path, pkcidmap_key), 'wb') as f:
serializeObj(pkcidmap.pkc2emid, f)
env.logger.info('PKCID map serialized to %s' % pkcidmap_key)
# ---- resolving map for GO domain
go_domain = profile['go_domain']
go_domain_map = pkcidmap.getMapForDomain(go_domain)
env.addVar('go_domain_map', go_domain_map)
nentries_go_domain_map = len(go_domain_map.getFwdMap().keys())
env.logger.info('Obtained detailed PKCID map for GO domain %s (%d entries found)' % (go_domain, nentries_go_domain_map))
# ---- serialize PKCID map for GO domain
if use_debug_output:
go_domain_map_key = '%s_GO_%s' % (pkcidmap_key, go_domain)
with open(os.path.join(debug_output_path, go_domain_map_key), 'wb') as f:
serializeObj(go_domain_map, f)
env.logger.info('Detailed PKCID map for GO domain %s serialized to %s' % (go_domain, go_domain_map_key))
env.logger.info('Finished building PKCID Map')
def obtainLabels(env):
r"""
Action that obtains information about samples and labels (if present) and creates
:class:`~kdvs.fw.Stat.Labels` instance. It reads samples from primary dataset,
reads labels file, and re--orders labels according to samples from primary dataset.
Primary dataset has been specified in :data:`~kdvs.fw.impl.app.Profile.MA_GO_PROFILE`
as 'gedm_file' element, loaded earlier into database, and wrapped in :class:`~kdvs.fw.DSV.DSV`
instance.
"""
env.logger.info('Started obtaining labels')
# ---- determine samples
gedm_dsv = env.var('gedm_dsv')
gedm_samples = gedm_dsv.header[1:]
env.addVar('gedm_samples', gedm_samples)
env.logger.info('%d samples obtained from data' % len(gedm_samples))
unused_sample_label = env.var('unused_sample_label')
env.logger.info('Unused sample label: %d' % unused_sample_label)
try:
labels_dsv = env.var('labels_dsv')
labels_inst = Labels(labels_dsv, unused_sample_label)
labels = labels_inst.getLabels(samples_order=gedm_samples, as_array=False)
labels_num = labels_inst.getLabels(samples_order=gedm_samples, as_array=True)
samples = labels_inst.getSamples(samples_order=gedm_samples)
env.addVar('labels_inst', labels_inst)
env.addVar('labels', labels)
env.addVar('labels_num', labels_num)
env.addVar('samples', samples)
env.logger.info('Labels read (%d used)' % (len(labels_inst.labels)))
except ValueError:
pass
env.logger.info('Finished obtaining labels')
def buildPKDrivenDataSubsets(env):
r"""
Action that builds all prior--knowledge--driven data subsets. The 'build' refers
to querying of samples and variables from primary dataset. At this stage,
the mapping 'subsets'
* {PKC_ID : [subsetID, numpy.shape(ds), [vars], [samples]]}
is constructed, and the :class:`numpy.ndarray` component of :class:`~kdvs.fw.DataSet.DataSet`
is serialized for each data subset. Currently, the instances of :class:`~kdvs.fw.DataSet.DataSet`
are not preserved to conserve memory. Also, the iterable of tuples (pkcID, size),
sorted in descending order wrt subset size (i.e. starting from largest), is
constructed here as 'pkc2ss'.
"""
env.logger.info('Started building PKC driven data subsets')
gedm_dsv = env.var('gedm_dsv')
pkcidmap = env.var('pkcidmap')
# ---- create instance
pkdm = PKDrivenDBDataManager(gedm_dsv, pkcidmap)
env.addVar('pkdm', pkdm)
env.logger.info('Created %s instance' % (pkdm.__class__.__name__))
# ---- resolve samples
try:
samples = env.var('samples')
except ValueError:
samples = '*'
# ---- generate subsets for specific PKCID map
go_domain_map = env.var('go_domain_map').getMap()
sslen = len(go_domain_map.keys())
env.logger.info('Started generating subsets (%d)' % sslen)
# create subset location
rootsm = env.var('rootsm')
rloc = env.var('root_output_location')
subsets_location_part = env.var('subsets_location')
ssloc = rootsm.sublocation_separator.join([rloc, subsets_location_part])
rootsm.createLocation(ssloc)
env.addVar('subsets_location_id', ssloc)
sslocpath = rootsm.getLocation(ssloc)
# proceed with generating
subset_dict = dict()
pkc2ss = list()
for i, pkcID in enumerate(go_domain_map.keys()):
pkc_ssinfo, pkc_ds = pkdm.getSubset(pkcID, forSamples=samples, get_ssinfo=True, get_dataset=True)
pkc_ds_content = pkc_ds.array
ds_vars = pkc_ssinfo['rows']
ds_samples = pkc_ssinfo['cols']
ssname = GO_id2num(pkcID, numint=False)
subset_dict[pkcID] = dict()
subset_dict[pkcID]['mat'] = ssname
subset_dict[pkcID]['shape'] = pkc_ds_content.shape
subset_dict[pkcID]['vars'] = ds_vars
subset_dict[pkcID]['samples'] = ds_samples
# resolve subset key and serialize subset
ss_key = ssname
ss_path = os.path.join(sslocpath, ss_key)
with open(ss_path, 'wb') as f:
serializeObj(pkc_ds_content, f)
env.logger.info('Serialized subset (%d of %d) to %s' % (i + 1, sslen, ss_key))
# add size entry
pkc2ss.append((pkcID, len(ds_vars)))
# finalize pkc2ss by sorting according to size
pkc2ss = sorted(pkc2ss, key=operator.itemgetter(1), reverse=True)
# preserve subsets
env.addVar('subsets', subset_dict)
# preserve pkc2ss
env.addVar('pkc2ss', pkc2ss)
# ---- serialize meta dictionary of subsets
use_debug_output = env.var('use_debug_output')
debug_output_path = env.var('debug_output_path')
if use_debug_output:
subsets_key = env.var('subsets_key')
subsets_txt_key = '%s%s' % (subsets_key, env.var('txt_suffix'))
subsets_path = os.path.join(debug_output_path, subsets_key)
with open(subsets_path, 'wb') as f:
serializeObj(subset_dict, f)
subsets_txt_path = os.path.join(debug_output_path, subsets_txt_key)
with open(subsets_txt_path, 'wb') as f:
pprintObj(subset_dict, f)
env.logger.info('Subsets dictionary serialized to %s' % subsets_key)
env.logger.info('Finished building PKC driven data subsets')
def buildSubsetHierarchy(env):
r"""
Action that constructs the instance of
:class:`~kdvs.fw.impl.data.PKDrivenData.PKDrivenDBSubsetHierarchy`. Also,
constructs the `operation map`, that is, determines the sequence of all
operations to be performed on each category, and within, on each data subset,
such as orderers, env--ops, statistical techniques, reporters etc. The `operation
map` has two components: executable and textual. The executable component stores
all references to actual callables to be performed; the textual component stores
all textual IDs of the configurable instances that provide the callables themselves.
The textual IDs are taken from user configuration file; the instances were created
in :meth:`resolveProfileComponents` action. In addition, if debug output was
requested, serializes constructed data structures.
"""
env.logger.info('Started building subset hierarchy')
profile = env.var('profile')
# rlocpath = env.var('root_output_path')
subsets = env.var('subsets')
pkdm = env.var('pkdm')
categorizers = env.var('pc_categorizers')
catIDmap = env.var('pc_catidmap')
orderers = env.var('pc_sords')
stechs = env.var('pc_stechs')
osels = env.var('pc_osels')
isels = env.var('pc_isels')
reporters = env.var('pc_reporters')
envops = env.var('pc_envops')
# ---- resolve samples
try:
samples = env.var('samples')
except ValueError:
samples = '*'
# ---- get related profile components
# obtain categorizers chain
cchain = profile['subset_hierarchy_categorizers_chain']
env.logger.info('Categorizers chain utilized: %s' % (cchain,))
# collect all symbols for subset hierarchy
initial_symbols = subsets.keys()
env.logger.info('Initial symbols (PKC IDs) resolved (%d found)' % len(initial_symbols))
# ---- build subset hierarchy with recognized categorizers
pkdrivenss = PKDrivenDBSubsetHierarchy(pkdm_inst=pkdm, samples_iter=samples)
pkdrivenss.build(cchain, categorizers, initial_symbols)
env.logger.info('Subset hierarchy built: %s' % pkdrivenss.hierarchy)
env.logger.info('Root symbol tree categories: %s' % (pkdrivenss.symboltree.keys()))
for cat, stcnt in pkdrivenss.symboltree.iteritems():
for symbolcat, symbols in stcnt.iteritems():
env.logger.info('Symbol tree category %s of %s with %d symbols' % (symbolcat, cat, len(symbols)))
env.addVar('pkdrivenss', pkdrivenss)
# obtain components map
cmap = profile['subset_hierarchy_components_map']
# prepare operations map for each member of all symbols groups
# create map with instances and textual image with IDs as referred to in configuration file
operations_map = dict()
operations_map_img = dict()
# walk hierarchy with this embedded recursive function
def _walk_hierarchy(parent_category):
if parent_category in pkdrivenss.symboltree:
# if requested category is present we proceed
st_elem = pkdrivenss.symboltree[parent_category]
# obtain descending categories and associated symbols lists
for category, syms in st_elem.iteritems():
# identify categorizer and category
intID, intCat = Categorizer.deuniquifyCategory(category)
if intID is not None and intCat is not None:
catID = catIDmap[intID]
# get components
components = cmap[catID][intCat]
# single orderer
ordererID = components['orderer']
try:
orderer_inst = orderers[ordererID]
except KeyError:
orderer_inst = None
# single technique
techID = components['technique']
try:
tech_inst = stechs[techID]
except KeyError:
tech_inst = None
# single outer selector
oselID = components['outer_selector']
try:
osel_inst = osels[oselID]
except KeyError:
osel_inst = None
# single inner selector
iselID = components['inner_selector']
try:
isel_inst = isels[iselID]
except KeyError:
isel_inst = None
# category can have many reporters that are executed in order specified here
reporterIDs = components['reporter']
try:
reporter_insts = [reporters[rID] for rID in reporterIDs]
except:
reporter_insts = None
# category can have many pre-EnvOps that are executed in order specified here
preenvopIDs = components['preenvop']
try:
preenvop_insts = [envops[eID] for eID in preenvopIDs]
except:
preenvop_insts = None
# category can have many post-EnvOps that are executed in order specified here
postenvopIDs = components['postenvop']
try:
postenvop_insts = [envops[eID] for eID in postenvopIDs]
except:
postenvop_insts = None
# miscellaneous data that follows
miscData = components['misc']
# fill operations map
operations_map[category] = dict()
operations_map_img[category] = dict()
# fill real map record with instances and image map record with IDs
# global operations (on all symbols)
if len(miscData) > 0:
operations_map[category]['__misc_data__'] = miscData
operations_map_img[category]['__misc_data__'] = miscData
operations_map[category]['__orderer__'] = orderer_inst
operations_map_img[category]['__orderer__'] = ordererID
operations_map[category]['__outer_selector__'] = osel_inst
operations_map_img[category]['__outer_selector__'] = oselID
operations_map[category]['__inner_selector__'] = isel_inst
operations_map_img[category]['__inner_selector__'] = iselID
operations_map[category]['__reporters__'] = reporter_insts
operations_map_img[category]['__reporters__'] = reporterIDs
operations_map[category]['__preenvops__'] = preenvop_insts
operations_map_img[category]['__preenvops__'] = preenvopIDs
operations_map[category]['__postenvops__'] = postenvop_insts
operations_map_img[category]['__postenvops__'] = postenvopIDs
# local operations (for each symbol)
for sym in syms:
operations_map[category][sym] = dict()
operations_map_img[category][sym] = dict()
operations_map[category][sym]['__technique__'] = tech_inst
operations_map_img[category][sym]['__technique__'] = techID
# operations_map[category][sym]['__reporter__'] = reporter_inst
# operations_map_img[category][sym]['__reporter__'] = reporterID
# repeat the same for each descending category
_walk_hierarchy(category)
else:
# (done for visibility)
# otherwise we simply return and associated execution branch ends
pass
# ---- perform walking the hierarchy starting from root
env.logger.info('Started building operations map')
_walk_hierarchy(None)
env.logger.info('Finished building operations map')
# preserve operation maps
env.addVar('operations_map', operations_map)
env.addVar('operations_map_img', operations_map_img)
use_debug_output = env.var('use_debug_output')
debug_output_path = env.var('debug_output_path')
if use_debug_output:
# ---- serialize subset hierarchy
pkdrivenss_imgobj = {'hierarchy' : dict(pkdrivenss.hierarchy), 'symboltree' : dict(pkdrivenss.symboltree)}
subset_hierarchy_key = env.var('subset_hierarchy_key')
subset_hierarchy_txt_key = '%s%s' % (subset_hierarchy_key, env.var('txt_suffix'))
subset_hierarchy_path = os.path.join(debug_output_path, subset_hierarchy_key)
with open(subset_hierarchy_path, 'wb') as f:
serializeObj(pkdrivenss_imgobj, f)
subset_hierarchy_txt_path = os.path.join(debug_output_path, subset_hierarchy_txt_key)
with open(subset_hierarchy_txt_path, 'wb') as f:
pprintObj(pkdrivenss_imgobj, f)
env.logger.info('Subset hierarchy serialized to %s' % subset_hierarchy_key)
# ---- serialize operations map
operations_map_imgobj = operations_map_img
operations_map_key = env.var('operations_map_key')
operations_map_txt_key = '%s%s' % (operations_map_key, env.var('txt_suffix'))
operations_map_path = os.path.join(debug_output_path, operations_map_key)
with open(operations_map_path, 'wb') as f:
serializeObj(operations_map_imgobj, f)
operations_map_txt_path = os.path.join(debug_output_path, operations_map_txt_key)
with open(operations_map_txt_path, 'wb') as f:
pprintObj(operations_map_imgobj, f)
env.logger.info('Operations map serialized to %s' % operations_map_key)
env.logger.info('Finished building subset hierarchy')
def submitSubsetOperations(env):
r"""
Action that does the following:
* instantiates requested concrete :class:`~kdvs.fw.Job.JobContainer` and :class:`~kdvs.fw.Job.JobGroupManager` instances, as specified in configuration file(s)
* for each category:
* executes associated pre--Env-Op(s)
* determines `test mode` directives, if any; in test mode, only fraction of computational jobs are executed;
looks for two directives in dictionary 'subset_hierarchy_components_map'->category_name->'misc'
in :data:`~kdvs.fw.impl.app.Profile.MA_GO_PROFILE`:
* 'test_mode_elems' (integer) -- number of test data subsets to consider
* 'test_mode_elems_order' (string) ('first'/'last') -- consider 'first' or 'last' number of data subsets
only computational jobs generated for specified test data subsets will be executed
* determine `submission order`, i.e. the final list of data subsets to process further
* executes associated orderer(s) on the generated submission order
* for each data subset:
* generates all job(s) and adds them to job container
* starts job container
* serializes the following technical mapping: { internal_job_ID : custom_job_ID },
where internal job ID is assigned by job container and custom job ID comes
from statistical technique
* if debug output was requested, serializes the submission order
"""
env.logger.info('Started submitting subset operations')
profile = env.var('profile')
categorizers = env.var('pc_categorizers')
operations_map = env.var('operations_map')
operations_map_img = env.var('operations_map_img')
subsets = env.var('subsets')
labels_num = env.var('labels_num')
samples = env.var('samples')
# ---- instantiate job container
job_container_type = env.var('job_container_type')
job_container_cfg = env.var('job_container_cfg')
jobContainer = importComponent(job_container_type)(**job_container_cfg)
# ---- instantiate job group manager
job_group_manager_type = env.var('job_group_manager_type')
job_group_manager_cfg = env.var('job_group_manager_cfg')
jobGroupManager = importComponent(job_group_manager_type)(**job_group_manager_cfg)
# get location of subsets
rootsm = env.var('rootsm')
rloc = env.var('root_output_location')
ss_loc_id = env.var('subsets_location_id')
sslocpath = rootsm.getLocation(ss_loc_id)
# prepare location for results
subsets_results_location_part = env.var('subsets_results_location')
ssresloc = rootsm.sublocation_separator.join([rloc, subsets_results_location_part])
rootsm.createLocation(ssresloc)
env.addVar('subsets_results_location_id', ssresloc)
# prepare location for jobs
jobs_location_part = env.var('jobs_location')
jobsloc = rootsm.sublocation_separator.join([rloc, jobs_location_part])
rootsm.createLocation(jobsloc)
env.addVar('jobs_location_id', jobsloc)
jobs_path = rootsm.getLocation(jobsloc)
env.addVar('jobs_path', jobs_path)
# get text suffix
txt_suffix = env.var('txt_suffix')
# cached job dictionaries
# jobs grouped by categorizers
ss_jobs = dict()
# linear cache of job objects (volatile, not saved)
all_jobs = dict()
# job ID map
# (map between automatically assigned jobIDs and customIDs created by the user)
jobIDmap = dict()
# default additional job data
additionalJobData = { 'samples' : samples }
# # get pkc2ss mapping
# misc = env.var('misc')
# pkc2ss = misc['data_pkc2ss']
pkc2ss = env.var('pkc2ss')
# test jobs may be requested
test_mode_elems = None
test_mode_elems_order = None
# submission order for all categories
submission_order = dict()
ss_submitted = dict()
# one must walk categories in relative order to categorizers chain
# the order of categories within categories is irrelevant
cchain = profile['subset_hierarchy_categorizers_chain']
# ---- walk categorizers
for categorizerID in cchain:
categorizer = categorizers[categorizerID]
categories = [categorizer.uniquifyCategory(c) for c in categorizer.categories()]
submission_order[categorizerID] = dict()
ss_submitted[categorizerID] = dict()
ss_jobs[categorizerID] = dict()
# ---- walk associated categories
for category in categories:
# ---- process operations map
cdata = operations_map[category]
env.logger.info('Started processing operations for category %s' % category)
# ---- execute all pre-EnvOps here
preenvops = cdata['__preenvops__']
if preenvops is not None:
env.logger.info('Found %d pre-EnvOps for category %s : %s' % (len(preenvops), category, operations_map_img[category]['__preenvops__']))
for preenvopID, preenvop in zip(operations_map_img[category]['__preenvops__'], preenvops):
preenvop.perform(env)
env.logger.info('Pre-EnvOp %s executed' % (preenvopID))
else:
env.logger.info('No pre-EnvOps present for category %s' % (category))
# get misc data if any
try:
misc_data = cdata['__misc_data__']
env.logger.info('Found misc data for category %s' % (category))
try:
test_mode_elems = misc_data['test_mode_elems']
test_mode_elems_order = misc_data['test_mode_elems_order']
env.logger.info('Test mode specification (#: %d, order: %s) present for category %s' % (test_mode_elems, test_mode_elems_order, category))
except KeyError:
test_mode_elems = None
test_mode_elems_order = None
except KeyError:
test_mode_elems = None
test_mode_elems_order = None
env.logger.info('No misc data present for category %s' % (category))
# get orderer instance
orderer = cdata['__orderer__']
if orderer is not None:
env.logger.info('Found orderer for category %s : %s' % (category, operations_map_img[category]['__orderer__']))
else:
env.logger.info('No orderer present for category %s' % (category))
# ---- get all subset symbols for this category
symbols = set([s for s in cdata.keys() if not s.startswith('__') and not s.endswith('__')])
# symbols = set(cdata.keys()) - set(['__misc_data__', '__orderer__', '__outer_selector__'])
env.logger.info('Symbols found for category %s : %d' % (category, len(symbols)))
# ---- apply orderer
env.logger.info('Started determining subset ordering')
if orderer is not None:
# get all PKC IDs already sorted starting from the largest subset
local_pkc2ss = [pss[0] for pss in pkc2ss if pss[0] in symbols]
env.logger.info('Built local pkc2ss map (%d elements found)' % (len(local_pkc2ss)))
orderer.build(local_pkc2ss)
ss_submission_order = orderer.order()
env.logger.info('Submission order determined from orderer')
else:
ss_submission_order = symbols
env.logger.info('Submission order non-specific')
# ---- process test requests
if test_mode_elems is not None and test_mode_elems_order is not None:
if test_mode_elems_order == 'first':
ss_submission_order = ss_submission_order[:test_mode_elems]
elif test_mode_elems_order == 'last':
ss_submission_order = ss_submission_order[-test_mode_elems:]
test_submission_listing_thr = env.var('test_submission_listing_thr')
if len(ss_submission_order) <= test_submission_listing_thr:
subm_listing = ', '.join(ss_submission_order)
else:
subm_listing = '>%d,skipped' % test_submission_listing_thr
env.logger.info('Test mode submission requested for category %s (%s subsets): %s' % (category, len(ss_submission_order), subm_listing))
env.logger.info('NOTE: only requested test jobs will be submitted')
env.logger.info('Finished determining subset ordering')
# ---- store determined submission order
submission_order[categorizerID][category] = list(ss_submission_order)
ss_submitted[categorizerID][category] = list()
ss_jobs[categorizerID][category] = dict()
total_ss_jobs = len(ss_submission_order)
# ---- submit subset operations
for i, pkcid in enumerate(ss_submission_order):
# obtain operations for subset
scdata = cdata[pkcid]
# get technique
technique = scdata['__technique__']
technique_id = operations_map_img[category][pkcid]['__technique__']
# # get reporter ID
# reporter_id = operations_map_img[category][pkcid]['__reporter__']
if technique is not None:
# ---- obtain subset instance
ss = subsets[pkcid]
ssname = ss['mat']
# ---- deserialize subset
ss_path = os.path.join(sslocpath, ssname)
with open(ss_path, 'rb') as f:
ss_num = deserializeObj(f)
env.logger.info('Deserialized subset %s' % ssname)
# cache jobs
ss_jobs[categorizerID][category][pkcid] = dict()
# resolve assignment of jobs to group
job_group = ssname
# job may be importable if run with remote job container
job_importable = technique.parameters['job_importable']
# lazy evaluation of jobs
for customID, job in technique.createJob(ssname, ss_num, labels_num, additionalJobData):
# add job to container
jobID = jobContainer.addJob(job, importable=job_importable)
# jobID = jobContainer.addJob(job)
# assign job to group
jobGroupManager.addJobIDToGroup(job_group, jobID)
# add to cache grouped by categorizers
ss_jobs[categorizerID][category][pkcid][jobID] = dict()
ss_jobs[categorizerID][category][pkcid][jobID]['job'] = job
ss_jobs[categorizerID][category][pkcid][jobID]['mat'] = ssname
ss_jobs[categorizerID][category][pkcid][jobID]['technique'] = technique_id
# ss_jobs[categorizerID][category][pkcid][jobID]['reporter'] = reporter_id
# add to linear cache
all_jobs[jobID] = dict()
all_jobs[jobID]['job'] = job
all_jobs[jobID]['mat'] = ssname
all_jobs[jobID]['technique'] = technique_id
# all_jobs[jobID]['reporter'] = reporter_id
# store original job information in jobs location
if customID is not None:
jobIDmap[jobID] = customID
env.logger.info('Custom ID provided for job %s: %s' % (jobID, customID))
ss_jobs[categorizerID][category][pkcid][jobID]['customID'] = customID
all_jobs[jobID]['customID'] = customID
# store these raw job data
job_stor = all_jobs[jobID]
job_stor_key = customID
with open(os.path.join(jobs_path, job_stor_key), 'wb') as f:
serializeObj(job_stor, f)
# job_stor_txt_key = '%s%s' % (job_stor_key, txt_suffix)
# with open(os.path.join(jobs_path, job_stor_txt_key), 'wb') as f:
# pprintObj(job_stor, f)
# submission finished
env.logger.info('Job submitted for %s (%d of %d) (in group %s): %s' % (pkcid, i + 1, total_ss_jobs, job_group, jobID))
ss_submitted[categorizerID][category].append(pkcid)
env.logger.info('Finished processing operations for category %s' % category)
# finished, preserve submission order
env.addVar('submission_order', submission_order)
#
jobContainer.start()
env.logger.info('Job container started with %d jobs' % (jobContainer.getJobCount()))
env.addVar('jobContainer', jobContainer)
env.addVar('ss_jobs', ss_jobs)
env.addVar('all_jobs', all_jobs)
env.addVar('ss_submitted', ss_submitted)
env.addVar('jobGroupManager', jobGroupManager)
# immediately store jobIDmap if any customIDs were provided
jobID_map_key = env.var('jobID_map_key')
if len(jobIDmap.keys()) > 0:
with open(os.path.join(jobs_path, jobID_map_key), 'wb') as f:
serializeObj(jobIDmap, f)
jobID_map_txt_key = '%s%s' % (jobID_map_key, txt_suffix)
jobID_map_lines = ["%s\t%s\n" % (jid, jobIDmap[jid]) for jid in sorted(jobIDmap.keys())]
with open(os.path.join(jobs_path, jobID_map_txt_key), 'wb') as f:
serializeTxt(jobID_map_lines, f)
# pprintObj(jobIDmap, f)
# env.addVar('jobIDmap', jobIDmap)
# store submission order for debug purposes
use_debug_output = env.var('use_debug_output')
debug_output_path = env.var('debug_output_path')
if use_debug_output:
submission_order_key = env.var('submission_order_key')
submission_order_txt_key = '%s%s' % (submission_order_key, txt_suffix)
submission_order_path = os.path.join(debug_output_path, submission_order_key)
with open(submission_order_path, 'wb') as f:
serializeObj(submission_order, f)
submission_order_txt_path = os.path.join(debug_output_path, submission_order_txt_key)
with open(submission_order_txt_path, 'wb') as f:
pprintObj(submission_order, f)
env.logger.info('Submission order dictionary serialized to %s' % submission_order_key)
env.logger.info('Finished submitting subset operations')
def executeSubsetOperations(env):
r"""
Action that performs the following:
* closes job container and executes submitted jobs; this call is blocking for most job containers;
any exceptions from jobs are serialized for further manual inspection
* :meth:`postClose`-ses job container and serializes its technical data obtained with :meth:`getMiscData`, if any
* collects all raw job results and prepares them for further post--processing and generation of :class:`~kdvs.fw.Stat.Results` instances
"""
env.logger.info('Started executing subset operations')
# get root output location (for subsets)
rootsm = env.var('rootsm')
# rlocpath = env.var('root_output_path')
# retrieve results location
ssresloc = env.var('subsets_results_location_id')
ssreslocpath = rootsm.getLocation(ssresloc)
# retrieve jobs location path
jobs_path = env.var('jobs_path')
jobs_raw_output_suffix = env.var('jobs_raw_output_suffix')
txt_suffix = env.var('txt_suffix')
#
jobContainer = env.var('jobContainer')
# ss_jobs = env.var('ss_jobs')
all_jobs = env.var('all_jobs')
# possibly blocking call
env.logger.info('About to close job container (possibly blocking call)')
jexc = jobContainer.close()
env.logger.info('Job container closed (%d job exceptions)' % (len(jexc)))
if len(jexc) > 0:
jobs_exceptions_key = env.var('jobs_exceptions_key')
jobs_exceptions_txt_key = '%s%s' % (jobs_exceptions_key, txt_suffix)
with open(os.path.join(ssreslocpath, jobs_exceptions_key), 'wb') as f:
serializeObj(jexc, f)
with open(os.path.join(ssreslocpath, jobs_exceptions_txt_key), 'wb') as f:
pprintObj(jexc, f)
env.logger.info('Jobs exceptions serialized to %s' % jobs_exceptions_key)
# ---- execute postClose for current job container
destPath = jobs_path
env.logger.info('Started post closing job container with destination path "%s"' % destPath)
jobContainer.postClose(destPath)
env.logger.info('Finished post closing job container')
# ---- store misc data from job container
jmdata = jobContainer.getMiscData()
jobs_misc_data_key = env.var('jobs_misc_data_key')
jobs_misc_data_txt_key = '%s%s' % (jobs_misc_data_key, txt_suffix)
with open(os.path.join(destPath, jobs_misc_data_key), 'wb') as f:
serializeObj(jmdata, f)
with open(os.path.join(destPath, jobs_misc_data_txt_key), 'wb') as f:
pprintObj(jmdata, f)
env.logger.info('Job container misc data serialized to %s' % jobs_misc_data_key)
# ---- retrieve results
# the content of all_jobs and ss_jobs will be updated simultaneously
for jobID, jobdata in all_jobs.iteritems():
jobResult = jobContainer.getJobResult(jobID)
jobdata['job'].result = jobResult
# save raw output immediately if customID was provided
try:
customID = jobdata['customID']
jobs_raw_output_key = '%s_%s' % (customID, jobs_raw_output_suffix)
with open(os.path.join(jobs_path, jobs_raw_output_key), 'wb') as f:
serializeObj(jobResult, f)
# jobs_raw_output_txt_key = '%s%s' % (jobs_raw_output_key, txt_suffix)
# with open(os.path.join(jobs_path, jobs_raw_output_txt_key), 'wb') as f:
# pprintObj(jobResult, f)
except:
pass
env.logger.info('Job results collected')
# TODO: decide if we want to store such large object!
# use_debug_output = env.var('use_debug_output')
# debug_output_path = env.var('debug_output_path')
# if use_debug_output:
# txt_suffix = env.var('txt_suffix')
# subsets_results_key = env.var('subsets_results_key')
# subsets_results_txt_key = '%s%s' % (subsets_results_key, txt_suffix)
# subsets_results_path = os.path.join(debug_output_path, subsets_results_key)
# with open(subsets_results_path, 'wb') as f:
# serializeObj(ss_jobs, f)
# subsets_results_txt_path = os.path.join(debug_output_path, subsets_results_txt_key)
# with open(subsets_results_txt_path, 'wb') as f:
# pprintObj(ss_jobs, f)
# env.logger.info('Subsets results dictionary serialized to %s' % subsets_results_key)
env.logger.info('Finished executing subset operations')
def postprocessSubsetOperations(env):
r"""
Action that performs the following:
* checks completion of all jobs, and all individual job groups if any
* for completed jobs and job groups, generate :class:`~kdvs.fw.Stat.Results` instances
* serializes technical mapping { technique_ID : [subset_IDs] }, available as 'technique2ssname'
* if debug output was requested, serialize job group completion dictionary
* create the following technical mapping available as 'technique2DOF', and serialize it if debug output was requested:
{techniqueID : { 'DOFS_IDXS': (0, 1, ..., n), 'DOFs': (name_DOF0, name_DOF1, ..., name_DOFn)}
* for each category:
* executes associated post--Env-Op(s)
"""
env.logger.info('Started postprocessing subset operations')
rootsm = env.var('rootsm')
# rlocpath = env.var('root_output_path')
# get main results location
ssrootresloc = env.var('subsets_results_location_id')
# get miscellaneous
subset_results_suffix = env.var('subset_results_suffix')
txt_suffix = env.var('txt_suffix')
# get raw jobs results
all_jobs = env.var('all_jobs')
stechs = env.var('pc_stechs')
# get job group manager
jobGroupManager = env.var('jobGroupManager')
# prepare dictionary of completed groups
groupsCompleted = dict()
allJobGroups = jobGroupManager.getGroups()
for jobGroup in allJobGroups:
groupJobs = jobGroupManager.getGroupJobsIDs(jobGroup)
groupsCompleted[jobGroup] = dict()
groupsCompleted[jobGroup]['jobs'] = dict()
for gj in groupJobs:
groupsCompleted[jobGroup]['jobs'][gj] = False
groupsCompleted[jobGroup]['completed'] = False
# ---- loop through all jobs and check their status
for jobID, jobdata in all_jobs.iteritems():
jobGroup = jobGroupManager.findGroupByJobID(jobID)
jobResult = jobdata['job'].result
if jobResult != NOTPRODUCED:
groupsCompleted[jobGroup]['jobs'][jobID] = True
cmpl_count = 0
for jobGroup in allJobGroups:
cmpl_status = all(groupsCompleted[jobGroup]['jobs'].values())
groupsCompleted[jobGroup]['completed'] = cmpl_status
if cmpl_status:
cmpl_count += 1
env.logger.info('Job groups completed: %d, not completed: %d' % (cmpl_count, len(allJobGroups) - cmpl_count))
# ---- send jobs from completed groups to postprocessing and produce final Results
ssIndResults = dict()
technique2ssname = collections.defaultdict(set)
env.logger.info('Started job postprocessing')
for jobGroup in allJobGroups:
if groupsCompleted[jobGroup]['completed']:
groupJobIDs = jobGroupManager.getGroupJobsIDs(jobGroup)
# determine common technique and subset name across completed jobs
groupTechnique = set()
groupSSName = set()
groupJobs = list()
for jobID in groupJobIDs:
jobdata = all_jobs[jobID]
groupTechnique.add(all_jobs[jobID]['technique'])
groupSSName.add(all_jobs[jobID]['mat'])
groupJobs.append(all_jobs[jobID]['job'])
# should not be thrown but to be safe
if len(groupTechnique) > 1 or len(groupSSName) > 1:
raise Error('Jobs of group %s not having same technique and/or ssname! (%s,%s)' % (list(groupTechnique), list(groupSSName)))
ssname = next(iter(groupSSName))
techID = next(iter(groupTechnique))
# group ssnames across techniques
technique2ssname[techID].add(ssname)
technique = stechs[techID]
# compile runtime data for this technique
# runtime data consists of useful information that can be later used
# in reporting results
runtime_data = dict()
runtime_data['techID'] = techID
# ---- produce Results
result = technique.produceResults(ssname, groupJobs, runtime_data)
# store Results
ssIndResults[ssname] = result
# # ---- serialize Results
# # create individual location for subset
# ssresloc = rootsm.sublocation_separator.join([ssrootresloc, ssname])
# rootsm.createLocation(ssresloc)
# ssreslocpath = rootsm.getLocation(ssresloc)
# # first save any plots separately
# ssplots = result[RESULTS_PLOTS_ID_KEY]
# # for plotname, plotcontent in ssplots.iteritems():
# for plotname in sorted(ssplots.keys()):
# plotcontent = ssplots[plotname]
# plotpath = os.path.join(ssreslocpath, plotname)
# with open(plotpath, 'wb') as f:
# writeObj(plotcontent, f)
# env.logger.info('Plot %s saved' % (plotname))
# # serialize individual output
# result_img = dict((k, result[k]) for k in result.keys())
# del result_img[RESULTS_PLOTS_ID_KEY]
# ind_subset_result_key = '%s%s' % (ssname, subset_results_suffix)
# ind_subset_result_path = os.path.join(ssreslocpath, ind_subset_result_key)
# with open(ind_subset_result_path, 'wb') as f:
# # serializeObj(result, f)
# serializeObj(result_img, f)
# ind_subset_result_txt_key = '%s%s' % (ind_subset_result_key, txt_suffix)
# ind_subset_result_txt_path = os.path.join(ssreslocpath, ind_subset_result_txt_key)
# with open(ind_subset_result_txt_path, 'wb') as f:
# # pprintObj(result, f)
# pprintObj(result_img, f)
# env.logger.info('Results for %s serialized to %s' % (ssname, ind_subset_result_key))
env.logger.info('Finished job postprocessing')
# serialize very useful technique2ssname map in results root location
ssrootreslocpath = rootsm.getLocation(ssrootresloc)
technique2ssname = dict([(k, list(v)) for k, v in technique2ssname.iteritems()])
technique2ssname_key = env.var('technique2ssname_key')
technique2ssname_path = os.path.join(ssrootreslocpath, technique2ssname_key)
with open(technique2ssname_path, 'wb') as f:
serializeObj(technique2ssname, f)
technique2ssname_txt_key = '%s%s' % (technique2ssname_key, txt_suffix)
technique2ssname_txt_path = os.path.join(ssrootreslocpath, technique2ssname_txt_key)
t2ss_lines = list()
for t, ssl in technique2ssname.iteritems():
t2ss_lines.append('%s\n' % t)
for ss in ssl:
t2ss_lines.append('\t%s\n' % ss)
with open(technique2ssname_txt_path, 'wb') as f:
# pprintObj(technique2ssname, f)
serializeTxt(t2ss_lines, f)
env.logger.info('Technique to subset names map serialized to %s' % technique2ssname_key)
use_debug_output = env.var('use_debug_output')
debug_output_path = env.var('debug_output_path')
if use_debug_output:
group_completion_key = env.var('group_completion_key')
group_completion_txt_key = '%s%s' % (group_completion_key, txt_suffix)
group_completion_path = os.path.join(debug_output_path, group_completion_key)
with open(group_completion_path, 'wb') as f:
serializeObj(groupsCompleted, f)
group_completion_txt_path = os.path.join(debug_output_path, group_completion_txt_key)
with open(group_completion_txt_path, 'wb') as f:
pprintObj(groupsCompleted, f)
env.logger.info('Group completion dictionary serialized to %s' % group_completion_key)
# create technique2DOF mapping
technique2DOF = dict()
for techID in technique2ssname.keys():
technique2DOF[techID] = dict()
technique = stechs[techID]
dofs = technique.parameters['global_degrees_of_freedom']
dof_idxs = tuple(range(len(dofs)))
technique2DOF[techID]['DOFs'] = dofs
technique2DOF[techID]['DOFS_IDXS'] = dof_idxs
if use_debug_output:
technique2dof_key = env.var('technique2dof_key')
technique2dof_txt_key = '%s%s' % (technique2dof_key, txt_suffix)
technique2dof_path = os.path.join(debug_output_path, technique2dof_key)
with open(technique2dof_path, 'wb') as f:
serializeObj(technique2DOF, f)
technique2dof_txt_path = os.path.join(debug_output_path, technique2dof_txt_key)
with open(technique2dof_txt_path, 'wb') as f:
pprintObj(technique2DOF, f)
env.logger.info('Technique2DOF mapping serialized to %s' % technique2dof_key)
# env.addVar('technique2ssname', technique2ssname)
env.addVar('technique2DOF', technique2DOF)
env.addVar('ssIndResults', ssIndResults)
# ---- also here, immediately after postprocessing, execute remaining post-EnvOps
profile = env.var('profile')
categorizers = env.var('pc_categorizers')
operations_map = env.var('operations_map')
operations_map_img = env.var('operations_map_img')
# one must walk categories in relative order to categorizers chain
# the order of categories within categories is irrelevant
cchain = profile['subset_hierarchy_categorizers_chain']
# ---- walk categorizers
for categorizerID in cchain:
categorizer = categorizers[categorizerID]
categories = [categorizer.uniquifyCategory(c) for c in categorizer.categories()]
# ---- walk associated categories
for category in categories:
# ---- process operations map
cdata = operations_map[category]
env.logger.info('Started processing operations for category %s' % category)
# ---- execute all post-EnvOps here
postenvops = cdata['__postenvops__']
if postenvops is not None:
env.logger.info('Found %d post-EnvOps for category %s : %s' % (len(postenvops), category, operations_map_img[category]['__postenvops__']))
for postenvopID, postenvop in zip(operations_map_img[category]['__postenvops__'], postenvops):
postenvop.perform(env)
env.logger.info('Post-EnvOp %s executed' % (postenvopID))
else:
env.logger.info('No post-EnvOps present for category %s' % (category))
env.logger.info('Finished postprocessing subset operations')
def performSelections(env):
r"""
Action that performs the following:
* for each category:
* having all :class:`~kdvs.fw.Stat.Results` instances, executes associated outer selector(s) and inner selector(s)
* if debug output was requested, serialize direct output of outer and inner selector(s)
"""
env.logger.info('Started performing selections')
txt_suffix = env.var('txt_suffix')
# rlocpath = env.var('root_output_path')
profile = env.var('profile')
categorizers = env.var('pc_categorizers')
operations_map = env.var('operations_map')
operations_map_img = env.var('operations_map_img')
submission_order = env.var('submission_order')
subsets = env.var('subsets')
ssIndResults = env.var('ssIndResults')
outerSelection = dict()
innerSelection = dict()
# one must walk categories in relative order to categorizers chain
# the order of categories within categories is irrelevant
cchain = profile['subset_hierarchy_categorizers_chain']
# ---- walk categorizers
for categorizerID in cchain:
categorizer = categorizers[categorizerID]
categories = [categorizer.uniquifyCategory(c) for c in categorizer.categories()]
outerSelection[categorizerID] = dict()
innerSelection[categorizerID] = dict()
# ---- walk associated categories
for category in categories:
outerSelection[categorizerID][category] = dict()
innerSelection[categorizerID][category] = dict()
# ---- process operations map
cdata = operations_map[category]
env.logger.info('Started processing operations for category %s' % category)
# ---- get outer selector instance and process iterable
outer_selector = cdata['__outer_selector__']
if outer_selector is not None:
env.logger.info('Found outer selector for category %s : %s' % (category, operations_map_img[category]['__outer_selector__']))
# ---- get submission order
subm_order = submission_order[categorizerID][category]
env.logger.info('Submission order reconstructed with %d entries' % (len(subm_order)))
# ---- construct results iterable ordered according to submission order
# NOTE : we are careful here since we can have less results than
# submission order may suggest! this is very likely when using
# test mode, and in addition if top category encloses ALL subsets,
# then it is pretty much guaranteed (see global submission order
# dump for reference)
indResultsList = list()
for pkcid in subm_order:
try:
indResultsList.append(ssIndResults[subsets[pkcid]['mat']])
except:
pass
# indResultsList = [ssIndResults[subsets[pkcid]['mat']] for pkcid in subm_order]
env.logger.info('Individual results obtained (%d entries)' % (len(indResultsList)))
# ---- perform outer selection
outerSelectionResultsList = outer_selector.perform(indResultsList)
for ss, oselres in zip(subm_order, outerSelectionResultsList):
outerSelection[categorizerID][category][ss] = oselres
env.logger.info('Outer selection (selection level 1) done for %d individual results' % (len(indResultsList)))
else:
env.logger.info('No outer selector present for category %s' % (category))
# ---- get inner selector instance and process iterable
inner_selector = cdata['__inner_selector__']
if inner_selector is not None:
env.logger.info('Found inner selector for category %s : %s' % (category, operations_map_img[category]['__inner_selector__']))
innerSelectionResultsDict = inner_selector.perform(outerSelection[categorizerID][category], ssIndResults, subsets)
for pkcid, isdata in innerSelectionResultsDict.iteritems():
innerSelection[categorizerID][category][pkcid] = isdata
env.logger.info('Inner selection (selection level 2) done for %d individual results' % (len(ssIndResults.keys())))
else:
env.logger.info('No inner selector present for category %s' % (category))
env.addVar('outerSelection', outerSelection)
env.addVar('innerSelection', innerSelection)
use_debug_output = env.var('use_debug_output')
debug_output_path = env.var('debug_output_path')
if use_debug_output:
outer_selection_key = env.var('outer_selection_key')
outer_selection_txt_key = '%s%s' % (outer_selection_key, txt_suffix)
outer_selection_path = os.path.join(debug_output_path, outer_selection_key)
with open(outer_selection_path, 'wb') as f:
serializeObj(outerSelection, f)
outer_selection_txt_path = os.path.join(debug_output_path, outer_selection_txt_key)
with open(outer_selection_txt_path, 'wb') as f:
pprintObj(outerSelection, f)
env.logger.info('Outer selection results dictionary serialized to %s' % outer_selection_key)
inner_selection_key = env.var('inner_selection_key')
inner_selection_txt_key = '%s%s' % (inner_selection_key, txt_suffix)
inner_selection_path = os.path.join(debug_output_path, inner_selection_key)
with open(inner_selection_path, 'wb') as f:
serializeObj(innerSelection, f)
inner_selection_txt_path = os.path.join(debug_output_path, inner_selection_txt_key)
with open(inner_selection_txt_path, 'wb') as f:
pprintObj(innerSelection, f)
env.logger.info('Inner selection results dictionary serialized to %s' % inner_selection_key)
env.logger.info('Finished performing selections')
def storeCompleteResults(env):
r"""
Action that performs the following:
* for each data subset:
* create individual location for results under current storage manager
* save all generated plots as physical files there
* serialize :class:`~kdvs.fw.Stat.Results` instance there
"""
env.logger.info('Started storing complete job results')
rootsm = env.var('rootsm')
# get main results location
ssrootresloc = env.var('subsets_results_location_id')
subset_results_suffix = env.var('subset_results_suffix')
txt_suffix = env.var('txt_suffix')
ssIndResults = env.var('ssIndResults')
for ssname, result in ssIndResults.iteritems():
# ---- serialize Results
# create individual location for subset
ssresloc = rootsm.sublocation_separator.join([ssrootresloc, ssname])
rootsm.createLocation(ssresloc)
ssreslocpath = rootsm.getLocation(ssresloc)
# first save any plots separately
ssplots = result[RESULTS_PLOTS_ID_KEY]
for plotname in sorted(ssplots.keys()):
plotcontent = ssplots[plotname]
plotpath = os.path.join(ssreslocpath, plotname)
with open(plotpath, 'wb') as f:
writeObj(plotcontent, f)
env.logger.info('Plot %s saved' % (plotname))
# serialize individual output
result_img = dict((k, result[k]) for k in result.keys())
del result_img[RESULTS_PLOTS_ID_KEY]
ind_subset_result_key = '%s%s' % (ssname, subset_results_suffix)
ind_subset_result_path = os.path.join(ssreslocpath, ind_subset_result_key)
with open(ind_subset_result_path, 'wb') as f:
serializeObj(result_img, f)
ind_subset_result_txt_key = '%s%s' % (ind_subset_result_key, txt_suffix)
ind_subset_result_txt_path = os.path.join(ssreslocpath, ind_subset_result_txt_key)
with open(ind_subset_result_txt_path, 'wb') as f:
pprintObj(result_img, f)
env.logger.info('Results for %s serialized to %s' % (ssname, ind_subset_result_key))
env.logger.info('Finished storing complete job results')
def prepareReports(env):
r"""
Action that performs the following:
* obtains/constructs the following mappings/instances used by any L1L2--associated reporters:
* `subsets`
{PKC_ID : [subsetID, numpy.shape(ds), [vars], [samples]]}
* `pkcid2ssname`
{PKC_ID : subsetID}
* `technique2DOF`
{techniqueID : { 'DOFS_IDXS': (0, 1, ..., n), 'DOFs': (name_DOF0, name_DOF1, ..., name_DOFn)}
* `operations_map_img`
(textual component of `operation map`)
* `categories_map`
{categorizerID : [categories]}
* `cchain`
i.e. categorizers chain, comes directly from :data:`~kdvs.fw.impl.app.Profile.MA_GO_PROFILE` application profile (element 'subset_hierarchy_categorizers_chain')
* `submission_order`
an iterable of PKC IDs sorted in order of submission of their jobs
* `pkc_manager`
a concrete instance of :class:`~kdvs.fw.PK.PKCManager` that governs all PKCs generated
* for each category:
* having all :class:`~kdvs.fw.Stat.Results` instances, and all additional data collected, executes associated reporter(s)
(physical report files are saved to the specific location(s) under current storage manager)
"""
env.logger.info('Started preparing reports')
rootsm = env.var('rootsm')
ssrootresloc = env.var('subsets_results_location_id')
use_debug_output = env.var('use_debug_output')
debug_output_path = env.var('debug_output_path')
txt_suffix = env.var('txt_suffix')
profile = env.var('profile')
categorizers = env.var('pc_categorizers')
operations_map = env.var('operations_map')
operations_map_img = env.var('operations_map_img')
submission_order = env.var('submission_order')
pkc_manager = env.var('pkc_manager')
subsets = env.var('subsets')
cchain = profile['subset_hierarchy_categorizers_chain']
technique2DOF = env.var('technique2DOF')
pkdrivenss = env.var('pkdrivenss')
ssIndResults = env.var('ssIndResults')
# ---- obtain em2annotation that provides annotations for reporters
geneidmap = env.var('geneidmap')
em2annotation = get_em2annotation(geneidmap.dbt)
env.logger.info('Obtained mapping em2annotation (%d entries found)' % (len(em2annotation.keys())))
if use_debug_output:
em2annotation_key = env.var('em2annotation_key')
em2annotation_txt_key = '%s%s' % (em2annotation_key, txt_suffix)
em2annotation_path = os.path.join(debug_output_path, em2annotation_key)
with open(em2annotation_path, 'wb') as f:
serializeObj(em2annotation, f)
em2annotation_txt_path = os.path.join(debug_output_path, em2annotation_txt_key)
with open(em2annotation_txt_path, 'wb') as f:
pprintObj(em2annotation, f)
env.logger.info('Mapping em2annotation serialized to %s' % em2annotation_key)
# ---- obtain pkcid2ssname that will facilitate querying across various misc dictionaries
pkcid2ssname_s = SetBDMap()
for pkcID, sdata in subsets.iteritems():
ssname = sdata['mat']
pkcid2ssname_s[pkcID] = ssname
env.logger.info('Obtained BD mapping pkcid2ssname (%d entries found)' % (len(pkcid2ssname_s.getFwdMap().keys())))
pkcid2ssname = dict()
pkcid2ssname['fwd'] = dict()
for ok, ov in pkcid2ssname_s.getFwdMap().iteritems():
iv = next(iter(ov))
pkcid2ssname['fwd'][ok] = iv
pkcid2ssname['bwd'] = dict()
for ok, ov in pkcid2ssname_s.getBwdMap().iteritems():
iv = next(iter(ov))
pkcid2ssname['bwd'][ok] = iv
if use_debug_output:
pkcid2ssname_key = env.var('pkcid2ssname_key')
pkcid2ssname_txt_key = '%s%s' % (pkcid2ssname_key, txt_suffix)
pkcid2ssname_path = os.path.join(debug_output_path, pkcid2ssname_key)
with open(pkcid2ssname_path, 'wb') as f:
serializeObj(pkcid2ssname, f)
pkcid2ssname_txt_path = os.path.join(debug_output_path, pkcid2ssname_txt_key)
with open(pkcid2ssname_txt_path, 'wb') as f:
pprintObj(pkcid2ssname, f)
env.logger.info('Mapping pkcid2ssname serialized to %s' % pkcid2ssname_key)
# quickly pre-compute all nested categories hierarchy
categories_map = dict()
for categorizerID in cchain:
categorizer = categorizers[categorizerID]
categories = [categorizer.uniquifyCategory(c) for c in categorizer.categories()]
categories_map[categorizerID] = categories
# prepare additional data for reporters
reportersAdditionalData = dict()
# in general any reporter will benefit from annotations
reportersAdditionalData['em2annotation'] = em2annotation
# subset details also may be useful
reportersAdditionalData['subsets'] = subsets
# this mapping will facilitate querying across differently keyed dictionaries
reportersAdditionalData['pkcid2ssname'] = pkcid2ssname
# this mapping simplifies obtaining technical details for technique
reportersAdditionalData['technique2DOF'] = technique2DOF
# global subset hierarchy used to construct nesting groups of subsets
# # used for global reports that cross boundaries of categorizers
# reportersAdditionalData['subsetHierarchy'] = pkdrivenss
# used for identification of techniques across all categories in global reporters
reportersAdditionalData['operations_map_img'] = operations_map_img
# used for moving across categories in global reporters
reportersAdditionalData['categories_map'] = categories_map
# used for moving across categories in global reporters
reportersAdditionalData['cchain'] = cchain
# used for correctly resolving test modes
reportersAdditionalData['submission_order'] = submission_order
reportersAdditionalData['pkc_manager'] = pkc_manager
# one must walk categories in relative order to categorizers chain
# the order of categories within categories is irrelevant
# ---- walk categorizers
for categorizerID in cchain:
categorizer = categorizers[categorizerID]
categories = [categorizer.uniquifyCategory(c) for c in categorizer.categories()]
# ---- walk associated categories
for category in categories:
# ---- process operations map
cdata = operations_map[category]
env.logger.info('Started processing operations for category %s' % category)
# ---- get reporters
reporters = cdata['__reporters__']
if reporters is not None:
env.logger.info('Found %d reporters for category %s : %s' % (len(reporters), category, operations_map_img[category]['__reporters__']))
# ---- reconstruct submission order and results iterable
subm_order = submission_order[categorizerID][category]
env.logger.info('Submission order reconstructed with %d entries' % (len(subm_order)))
# ---- construct results iterable ordered according to submission order
# NOTE : we are careful here since we can have less results than
# submission order may suggest! this is very likely when using
# test mode, and in addition if top category encloses ALL subsets,
# then it is pretty much guaranteed (see global submission order
# dump for reference)
indResultsList = list()
for pkcid in subm_order:
try:
indResultsList.append(ssIndResults[subsets[pkcid]['mat']])
except:
pass
# indResultsList = [ssIndResults[subsets[pkcid]['mat']] for pkcid in subm_order]
env.logger.info('Individual results obtained (%d entries)' % (len(indResultsList)))
for reporterID, reporter in zip(operations_map_img[category]['__reporters__'], reporters):
reporter.initialize(rootsm, ssrootresloc, reportersAdditionalData)
reporter.produce(indResultsList)
reporter.produceForHierarchy(pkdrivenss, ssIndResults, categorizerID, category)
env.logger.info('Reporter %s produced %d report(s)' % (reporterID, len(reporter.getReports().keys())))
reporter.finalize()
else:
env.logger.info('No reporters present for category %s' % (category))
# TODO: finish!
env.logger.info('Finished preparing reports')
# ---- private functions
def _resolveProfileInstanceGroup(profile_ig_data):
instances = dict()
for instID, instData in profile_ig_data.iteritems():
for instComponentID, instParams in instData.iteritems():
inst = importComponent(instComponentID)(**instParams)
instances[instID] = inst
return instances
def main():
MA_GO_Experiment_App().run()
if __name__ == '__main__':
main() | PypiClean |
/MLChallenge-0.1.0.tar.gz/MLChallenge-0.1.0/bin/model.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import scipy.stats as stats
import pandas_profiling #need to install using anaconda prompt (pip install pandas_profiling)
%matplotlib inline
plt.rcParams['figure.figsize'] = 10, 7.5
plt.rcParams['axes.grid'] = True
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
# reading data into dataframe
data = pd.read_csv("C:/Users/Tushar/Documents/Eloy/Machine Learning Challenge.csv")
data.apply(lambda x: sum(x.isnull()))
data.loc[data['customer_age'] == 'o', 'customer_age'] = np.nan
data.fillna("o", inplace = True)
data.head()
data_new = pd.get_dummies(data, columns=['gender'], drop_first=True, prefix='gender')
data_new.loc[data_new['customer_age'] == 'o', 'customer_age'] = np.nan
data_new['customer_age'] = pd.to_numeric(data_new['customer_age'])
data_new.drop('consumer_id', axis=1, inplace=True)
#Handling missings - Method2
def Missing_imputation(x):
x = x.fillna(x.median())
return x
data_new=data_new.apply(lambda x: Missing_imputation(x))
data_new.drop('account_status', axis=1, inplace=True)
#PCA
sc = StandardScaler()
std_model = sc.fit(data_new)
data_scaled = std_model.transform(data_new)
data_scaled = pd.DataFrame(data_scaled, columns=data_new.columns)
data_scaled.describe()
#Using PCA to create to reduce the data to two important features
from sklearn import preprocessing
# reduce to 2 importants features
pca = PCA(n_components=2)
data_pca = pca.fit_transform(data_scaled)
# standardize these 2 new features
min_max_scaler = preprocessing.StandardScaler()
np_scaled = min_max_scaler.fit_transform(data_pca)
data_pced = pd.DataFrame(np_scaled)
sse = []
list_k = list(range(1, 10))
for k in list_k:
km = KMeans(n_clusters=k)
km.fit(data_pced)
sse.append(km.inertia_)
# Plot sse against k
plt.figure(figsize=(6, 6))
plt.plot(list_k, sse, '-o')
plt.xlabel(r'Number of clusters *k*')
plt.ylabel('Sum of squared distance');
#the curve seems to be monotonically decreasing and does not show any elbow clealry. From the graph it does looks that value of k
#between 2 and 6 could be a good choice.
#Silhouette analysis can be used to determine the degree of separation between clusters.we want the coefficients to be as
#big as possible and close to 1 to have a good clusters. Iterating it between 2 and 6
from sklearn.metrics import silhouette_samples, silhouette_score
for i, k in enumerate([2, 3, 4, 5,6,7,8]):
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# Run the Kmeans algorithm
km = KMeans(n_clusters=k)
labels = km.fit_predict(data_pced)
centroids = km.cluster_centers_
# Get silhouette samples
silhouette_vals = silhouette_samples(data_pced, labels)
# Silhouette plot
y_ticks = []
y_lower, y_upper = 0, 0
for i, cluster in enumerate(np.unique(labels)):
cluster_silhouette_vals = silhouette_vals[labels == cluster]
cluster_silhouette_vals.sort()
y_upper += len(cluster_silhouette_vals)
ax1.barh(range(y_lower, y_upper), cluster_silhouette_vals, edgecolor='none', height=1)
ax1.text(-0.03, (y_lower + y_upper) / 2, str(i + 1))
y_lower += len(cluster_silhouette_vals)
# Get the average silhouette score and plot it
avg_score = np.mean(silhouette_vals)
ax1.axvline(avg_score, linestyle='--', linewidth=2, color='green')
ax1.set_yticks([])
ax1.set_xlim([-0.1, 1])
ax1.set_xlabel('Silhouette coefficient values')
ax1.set_ylabel('Cluster labels')
ax1.set_title('Silhouette plot for the various clusters', y=1.02);
# Scatter plot of data colored with labels
ax2.scatter(data_pced.iloc[:, 0], data_pced.iloc[:, 1], c=labels)
ax2.scatter(centroids[:, 0], centroids[:, 1], marker='*', c='r', s=250)
ax2.set_xlim([-2, 2])
ax2.set_xlim([-2, 2])
ax2.set_xlabel('Eruption time in mins')
ax2.set_ylabel('Waiting time to next eruption')
ax2.set_title('Visualization of clustered data', y=1.02)
ax2.set_aspect('equal')
plt.tight_layout()
plt.suptitle(f'Silhouette analysis using k = {k}',
fontsize=16, fontweight='semibold', y=1.05);
#Using n=3 and performing clustering
km_3 = KMeans(n_clusters=3, random_state=123)
km_3 = km_3.fit(data_pced)
#Using n=4 and performing clustering
km_4 = KMeans(n_clusters=4, random_state=123)
km_4 = km_4.fit(data_pced)
data_new['cluster_3'] = km_3.labels_
data_new['cluster_4'] = km_4.labels_
#Visualising clusters with n=3
#plot the different clusters with the 2 main features
fig, ax = plt.subplots()
colors = {0:'red', 1:'blue', 2:'green', 3:'pink'}
ax.scatter(data_new['principal_feature1'], data_new['principal_feature2'], c=data_new["cluster_3"].apply(lambda x: colors[x]))
plt.show()
#Visualising the clusters with n=4
#plot the different clusters with the 2 main features
fig, ax = plt.subplots()
colors = {0:'red', 1:'blue', 2:'green', 3:'pink', 4:'pink'}
ax.scatter(data_new['principal_feature1'], data_new['principal_feature2'], c=data_new["cluster_4"].apply(lambda x: colors[x]))
plt.show()
data_new['principal_feature1'] = data_pced[0]
data_new['principal_feature2'] = data_pced[1]
# Function to return Series of distance between each point and his distance with the closest centroid
def getDistanceByPoint(data, model):
distance = pd.Series()
for i in range(0,len(data)):
Xa = np.array(data.loc[i])
Xb = model.cluster_centers_[model.labels_[i]-1]
distance.set_value(i, np.linalg.norm(Xa-Xb))
return distance
#Findning out the anomalies based on the distance using 3 clusters
outliers_fraction = 0.01
distance = getDistanceByPoint(data_pced, km_3)
number_of_outliers = int(outliers_fraction*len(distance))
threshold = distance.nlargest(number_of_outliers).min()
# anomaly25 contain the anomaly result of method 2.1 Cluster (0:normal, 1:anomaly)
data_new['anomaly25'] = (distance >= threshold).astype(int)
#Finding the number of anomalies identified
data_new.anomaly25.value_counts()
#Visualisin the anomalies
fig, ax = plt.subplots()
colors = {0:'blue', 1:'red'}
ax.scatter(data_new['principal_feature1'], data_new['principal_feature2'], c=data_new["anomaly25"].apply(lambda x: colors[x]))
plt.show()
#Finding the anomalies using 4 clusters
outliers_fraction = 0.01
distance = getDistanceByPoint(data_pced, km_4)
number_of_outliers = int(outliers_fraction*len(distance))
threshold = distance.nlargest(number_of_outliers).min()
# anomaly21 contain the anomaly result of method 2.1 Cluster (0:normal, 1:anomaly)
data_new['anomaly26'] = (distance >= threshold).astype(int)
#Visualising the anomalies
fig, ax = plt.subplots()
colors = {0:'blue', 1:'red'}
ax.scatter(data_new['principal_feature1'], data_new['principal_feature2'], c=data_new["anomaly26"].apply(lambda x: colors[x]))
plt.show()
#Calculating the number of anomalies found
data_new.anomaly26.value_counts()
#Again the number of anomalies found is 100.
#Which model to choose can be based on the silhoute score of both the models
print("Silhoute Score for three cluster: " + (silhouette_score(data_pced, data_new.cluster_3)).astype(str))
print("Silhoute Score for four cluster: " + (silhouette_score(data_pced, data_new.cluster_4)).astype(str))
#As stated the model with the score closest to 1 is a better model. Hence the model with 4 cluster is better and more accurate
import pickle
pickle.dump(km_4, open('model.pkl', 'wb')) | PypiClean |
/Myplayer-1.3.0.tar.gz/Myplayer-1.3.0/README.md | # Como usar o "Myplayer"
1. Crie um arquivo ".py"
2. Importe usando: "import Myplayer"
3. Abra a UI (Interface Gráfica) com "Myplayer.Misc()"
4. Selecione uma pasta com suas músicas
5. Selecione uma Música e Clique em "Play"
# Controles
1. Play (Tocar a Música)
2. Resume (Tocar a música se estiver no modo pausado)
3. Pause (Pausar a Música)
# Bibliotecas
1. tkinter
2. pygame
!! ATENÇÃO INSTALEM SOZINHO O TKINTER, O PYGAME SERÁ INSTALADO AUTOMATICAMENTE !!
1. pip install tkinter
# Criadores
Eduardo Martins Panassi
| PypiClean |
/NREL_reVX-0.3.53-py3-none-any.whl/reVX/setbacks/setbacks_cli.py | import os
import glob
import shutil
import logging
from pathlib import Path
from warnings import warn
from gaps.config import load_config
from gaps.cli import CLICommandFromFunction, make_cli
from gaps.cli.preprocessing import preprocess_collect_config
from reVX.setbacks import SETBACKS
from reVX.setbacks.regulations import (validate_setback_regulations_input,
select_setback_regulations)
from reVX.setbacks.setbacks_converter import parse_setbacks
from reVX.handlers.geotiff import Geotiff
from reVX.utilities import ExclusionsConverter
from reVX import __version__
from reVX.setbacks.setbacks import SETBACK_SPECS, setbacks_calculator
logger = logging.getLogger(__name__)
def _validate_multipliers(features, generic_setback_multiplier=None):
"""Load, format, and validate generic multipliers. """
if isinstance(generic_setback_multiplier, str):
logger.debug("Loading multipliers from {!r}"
.format(generic_setback_multiplier))
generic_setback_multiplier = load_config(generic_setback_multiplier)
try:
missing = {feat for feat in features
if feat not in generic_setback_multiplier}
except TypeError:
return {feat: generic_setback_multiplier for feat in features}
if missing:
msg = ("The following features are missing from the "
"`generic_cost_multiplier` input: {}. Please make sure every "
"key in the `features` input has a matching key in the "
"`generic_cost_multiplier` dictionary"
.format(missing))
logger.error(msg)
raise KeyError(msg)
return {feat: generic_setback_multiplier[feat] for feat in features}
def preprocess_setbacks_config(config, features,
generic_setback_multiplier=None):
"""Pre-process setbacks computation config.
Parameters
----------
config : dict
Setbacks compute config. This config will be updated to include
a the ``node_feature_type``, ``node_file_path``, and
``node_multiplier`` keys based on user input.
features : dict
Dictionary specifying which features/data to process. The keys
of this dictionary must be the a key from the
:attr:`~reVX.setbacks.setbacks.SETBACK_SPECS` dictionary or the
``feature_specs`` input dictionary specifying the feature type
to run setbacks for. The value of each key must be a path or a
list of paths to calculate that particular setback for.
The path(s) can contain unix-style file-pattern matching syntax
to point to multiple files. The paths may be specified relative
to the config file. For example::
features: {
"parcel": "../relative/path/to/parcel_colorado.gpkg",
"road": [
"/full/path/to/road/data/*.gpkg",
"../../relative/path/to/data_i[l,n].gpkg",
]
}
With this input, parcel setbacks would be computed for the
data in ``../relative/path/to/parcel_colorado.gpkg``, and road
setbacks would be calculated for *all* GeoPackage data files in
``/full/path/to/road/data/`` and for the files
``../../relative/path/to/data_il.gpkg`` and
``../../relative/path/to/data_in.gpkg``.
generic_setback_multiplier : int | float | str, optional
Optional setback multiplier to use where local regulations are
not supplied. This multiplier will be applied to the
``base_setback_dist`` (or the turbine tip-height) to calculate
the setback. If supplied along with ``regulations_fpath``, this
input will be used to apply a setback to all counties not listed
in the regulations file. This input can also be a path to a
config file containing feature types as keys and
feature-specific generic multipliers as values. For example::
{
"parcel": 1.1,
"road": 2,
"structure": 3.5
}
If specified this way, every key in the ``features`` inputs
must also be given in the generic multipliers config. If
``None``, no generic setback computation is performed.
By default, ``None``.
Returns
-------
config : dict
Updated setbacks computation config dictionary.
Raises
------
ValueError
If ``features`` is not a dictionary, or any key in ``features``
is not a valid key of :attr:`SETBACKS`.
FileNotFoundError
If no input GeoPackage files are found on disk.
"""
if not isinstance(features, dict):
raise ValueError("`features` key must be a dictionary, got {}"
.format(features))
feature_specs = config.get("feature_specs", {})
combos_to_run = []
multipliers = _validate_multipliers(features, generic_setback_multiplier)
for feature_type, features_fpath in features.items():
if feature_type not in set(SETBACKS) | set(feature_specs):
msg = ("feature_type must be one of: {}; got {!r}"
.format(set(SETBACKS.keys()), feature_type))
raise ValueError(msg)
if isinstance(features_fpath, str):
features_fpath = [features_fpath]
for path in features_fpath:
glob_path = Path(path).expanduser().resolve()
if glob_path.is_dir():
glob_path = glob_path / '*'
need_to_run = [(feature_type, str(f), multipliers[feature_type])
for f in glob_path.parent.glob(glob_path.name)
if f.name.endswith("gpkg")]
if not need_to_run:
msg = ("Found no unprocessed GeoPackage files matching the "
"input {!r}!".format(path))
logger.warning(msg)
warn(msg)
continue
combos_to_run += need_to_run
if not combos_to_run:
msg = 'No unprocessed GeoPackage files found!'
logger.error(msg)
raise FileNotFoundError(msg)
feature_type, file_path, multiplier = zip(*sorted(combos_to_run))
config["node_feature_type"] = feature_type
config["node_file_path"] = file_path
config["node_multiplier"] = multiplier
validate_setback_regulations_input(config.get("base_setback_dist"),
config.get("hub_height"),
config.get("rotor_diameter"))
_update_setbacks_calculators(feature_specs) # test for errors
return config
def preprocess_merge_config(config, project_dir, command_name,
merge_file_pattern="PIPELINE"):
"""Pre-process merge config.
Parameters
----------
config : dict
Collection config. This config will be updated to include the
keys ``node_out_path`` and ``node_pattern`` representing the
output file path and the input file pattern, respectively.
project_dir : path-like
Path to project directory. This path is used to resolve the
output filepath.
command_name : str
Name of the command being run. This is used to parse the
pipeline status for output files if
``"merge_file_pattern": "PIPELINE"`` in the input `config`.
merge_file_pattern : str | list | dict, optional
Unix-style ``/filepath/pattern*.h5`` representing the files to
be merged into a single output GeoTIFF file. If no output file
path is specified (i.e. this input is a single pattern or a list
of patterns), the output file path will be inferred from the
pattern itself (specifically, the wildcard will be removed
and the result will be the output file path). If a list of
patterns is provided, each pattern will be merged into a
separate output file. To specify the name of the output file(s),
set this input to a dictionary whose keys are paths to the
output file (relative paths are allowed) and the values are
patterns representing the input files that should be merged into
the output TIFF. If running a merge job as part of a pipeline,
this input can be set to ``"PIPELINE"``, which will parse the
output of the previous step (``compute``) and generate the input
file pattern and output file name automatically.
By default, ``"PIPELINE"``.
Returns
-------
config : dict
Updated merge config dictionary.
"""
config = preprocess_collect_config(config, project_dir, command_name,
collect_pattern=merge_file_pattern)
config["node_out_path"] = config.pop("_out_path", None)
config["node_pattern"] = config.pop("_pattern", None)
return config
def _update_setbacks_calculators(feature_specs=None):
"""Update `SETBACKS` to include new specs """
for feature_name, feature_spec in (feature_specs or {}).items():
spec = SETBACK_SPECS.get(feature_name, {})
spec.update(feature_spec)
SETBACKS[feature_name] = setbacks_calculator(**spec)
def compute_setbacks(excl_fpath, node_feature_type, node_file_path,
node_multiplier, out_dir, tag, hub_height=None,
rotor_diameter=None, base_setback_dist=None,
regulations_fpath=None,
weights_calculation_upscale_factor=None,
replace=False, hsds=False, out_layers=None,
feature_specs=None, max_workers=None):
"""Compute Setbacks.
Setbacks can be computed for a specific turbine (hub height and
rotor diameter) or more generally using a base setback distance.
Setbacks can be computed either locally (on a per-county basis with
given distances/multipliers) or everywhere under a generic setback
multiplier assumption applied to either the turbine tip-height or
the base setback distance. These two methods can also be applied
simultaneously - local setbacks are computed where given (via a the
regulation file input) and a generic multiplier applied to the
turbine tip-height or the base setback distance everywhere else.
Partial inclusions can be computed instead of boolean exclusions,
both of which can be fed directly into ``reV``.
Parameters
----------
excl_fpath : str
Path to HDF5 file containing the county FIPS layer (should be
called ``cnty_fips``) used to match local regulations in
``regulations_fpath`` to counties on the grid. No data will be
written to this file unless explicitly requested via the
``out_layers`` input.
node_feature_type : str
Name of the feature type being run. Must be a key of the
:attr:`SETBACKS` dictionary.
node_file_path : str
Path to input feature file. This file MUST be a GeoPackage (and
have the ".gpkg" extension).
node_multiplier : int | float | str | None, optional
A setback multiplier to use if regulations are not supplied.
This multiplier will be applied to the ``base_setback_dist``
to calculate the setback. If supplied along with
``regulations_fpath``, this input will be used to apply a
setback to all counties not listed in the regulations file.
out_dir : str
Path to output directory where output file should be written.
tag : str
Tag to add to each output file to make it unique (i.e. not clash
with output files from other nodes).
hub_height : int | float, optional
Turbine hub height (m), used along with rotor diameter to
compute the blade tip-height which is used as the base setback
distance for generic/local regulations. If this input is
specified, ``rotor_diameter`` must also be given, and
``base_setback_dist`` *must be set to None*, otherwise an
error in thrown. The base setback distance is scaled by
generic/local multipliers (provided either via the
``regulations_fpath`` csv, or the ``generic_setback_multiplier``
input, or both) before setbacks are computed.
By default, ``None``.
rotor_diameter : int | float, optional
Turbine rotor diameter (m), used along with hub height to
compute the blade tip-height, which is used as the base setback
distance for generic/local regulations. If this input is
specified, ``hub_height`` must also be given, and
``base_setback_dist`` *must be set to None*, otherwise an
error in thrown. The base setback distance is scaled by
generic/local multipliers (provided either via the
``regulations_fpath`` csv, or the ``generic_setback_multiplier``
input, or both) before setbacks are computed.
By default, ``None``.
base_setback_dist : int | float, optional
Base setback distance (m). This value is used as the base
setback distance for generic/local regulations. If this input is
specified, both ``hub_height``and ``rotor_diameter`` *must be
set to None*, otherwise an error in thrown. The base setback
distance is scaled by generic/local multipliers (provided either
via the ``regulations_fpath`` csv, or the
``generic_setback_multiplier`` input, or both) before setbacks
are computed. By default, ``None``.
regulations_fpath : str, optional
Path to regulations ``.csv`` or ``.gpkg`` file. At a minimum,
this file must contain the following columns: ``Feature Type``,
which contains labels for the type of setback that each row
represents, ``Feature Subtype``, which contains labels for
feature subtypes, ``Value Type``, which specifies wether the
value is a multiplier or static height, ``Value``, which
specifies the numeric value of the setback or multiplier, and
``FIPS``, which specifies a unique 5-digit code for each county
(this can be an integer - no leading zeros required). See
:obj:`~reVX.setbacks.regulations.SetbackRegulations` (if using
only ``base_setback_dist`` input) or
:obj:`~reVX.setbacks.regulations.WindSetbackRegulations` (if
using ``hub_height`` + ``rotor_diameter`` input) for more info.
This option overrides the ``generic_setback_multiplier`` input,
but only for counties that are listed in the input CSV file.
This means both ``regulations_fpath`` and
``generic_setback_multiplier`` can be specified
simultaneously in order to compute setbacks driven by local
ordinance where given + a generic multiplier applied everywhere
else. By default, ``None``, which does not compute any
local setbacks.
weights_calculation_upscale_factor : int, optional
Optional input to specify *partial* setback calculations.
If this value is an int > 1, the output will be a layer with
**inclusion** weight values (floats ranging from 0 to 1). Note
that this is backwards w.r.t the typical output of exclusion
integer values (1 for excluded, 0 otherwise). Values <= 1 will
still return a standard exclusion mask. For example, a cell that
was previously excluded with a boolean mask (value of 1) may
instead be converted to an inclusion weight value of 0.75,
meaning that 75% of the area corresponding to that point should
be included (i.e. the exclusion feature only intersected a small
portion - 25% - of the cell). This percentage inclusion value is
calculated by upscaling the output array using this input value,
rasterizing the exclusion features onto it, and counting the
number of resulting sub-cells excluded by the feature. For
example, setting the value to ``3`` would split each output cell
into nine sub-cells - 3 divisions in each dimension. After the
feature is rasterized on this high-resolution sub-grid, the area
of the non-excluded sub-cells is totaled and divided by the area
of the original cell to obtain the final inclusion percentage.
Therefore, a larger upscale factor results in more accurate
percentage values. If ``None`` (or a value <= 1), this process
is skipped and the output is a boolean exclusion mask.
By default ``None``.
replace : bool, optional
Flag to replace the output GeoTIFF if it already exists.
By default, ``False``.
hsds : bool, optional
Boolean flag to use ``h5pyd`` to handle HDF5 "files" hosted on
AWS behind HSDS. By default, ``False``.
out_layers : dict, optional
Dictionary mapping the input feature file names (with extension)
to names of layers under which exclusions should be saved in the
``excl_fpath`` HDF5 file. If ``None`` or empty dictionary,
no layers are saved to the HDF5 file. By default, ``None``.
feature_specs : dict, optional
Optional dictionary specifying new feature setback calculators
or updates to existing ones. The keys of this dictionary should
be names of the features for which a specification is being
provided. If the name is already a key in
:attr:`~reVX.setbacks.setbacks.SETBACK_SPECS`, the corresponding
specifications wil be updated for that feature. Otherwise, the
name will represent a new feature type, which can be used as a
key in the ``features`` input. The values of the feature-type
keys should be dictionaries, where the keys are parameters of
the :func:`~reVX.setbacks.setbacks.setbacks_calculator`
function. Required parameters in that function are required keys
of these dictionaries. Values should be the updated value.
For example, the input
::
feature_specs: {
"water": {
"num_features_per_worker": 500
},
"oil_and_gas_pipelines": {
"feature_type": "oil and gas",
"feature_filter_type": "clip"
}
}
would update the existing ``"water"`` setbacks calculator to
compute 500 features per worker at a time and create a new
``"oil_and_gas_pipelines"`` feature that looks for the string
``"oil and gas"`` in the regulations file and clips the feature
to a county before calculating a setback. Note that even though
``"oil_and_gas_pipelines"`` is not a default feature supported
by ``reVX``, you can now use it in the ``features`` input.
This can also be helpful if you need to compute the same type of
setback for multiple different input datasets. For example, the
input
::
feature_specs: {
"water-nwi": {
"feature_type": "water",
"buffer_type": "default",
"feature_filter_type": "clip",
"num_features_per_worker": 700,
},
"water-nhd": {
"feature_type": "water",
"buffer_type": "default",
"feature_filter_type": "clip",
"num_features_per_worker": 10_000,
}
}
would allow you to set up your ``features`` input like so::
features: {
"water-nwi": "/path/to/nwi/*.gpkg",
"water-nhd": "/path/to/nhd/*.gpkg",
}
By default, ``None``, which does not add any new setback
calculators (the default ones defined in
:attr:`~reVX.setbacks.setbacks.SETBACK_SPECS` are still
available).
max_workers : int, optional
Number of workers to use for setback exclusion computation. If
this value is 1, the computation runs in serial. If this value
is > 1, the computation runs in parallel with that many workers.
If ``None``, the computation runs in parallel on all available
cores. By default, ``None``.
Returns
-------
str
Path to output GeoTIFF file containing setback exclusion data.
"""
_update_setbacks_calculators(feature_specs)
logger.info('Computing setbacks from {} in {}'
.format(node_feature_type, node_file_path))
logger.debug('Setbacks to be computed with:\n'
'- base_setback_dist = {}\n'
'- hub_height = {}\n'
'- rotor_diameter = {}\n'
'- regulations_fpath = {}\n'
'- generic_setback_multiplier = {}\n'
'- using max_workers = {}\n'
'- replace layer if needed = {}\n'
'- weights calculation upscale factor = {}\n'
'- out_layers = {}\n'
.format(base_setback_dist, hub_height, rotor_diameter,
regulations_fpath, node_multiplier, max_workers,
replace,
weights_calculation_upscale_factor, out_layers))
regulations = select_setback_regulations(base_setback_dist, hub_height,
rotor_diameter, regulations_fpath,
node_multiplier)
setbacks_class = SETBACKS[node_feature_type]
wcuf = weights_calculation_upscale_factor
fn = ("setbacks_{}_{}{}.tif"
.format(node_feature_type, os.path.basename(out_dir), tag))
out_fn = os.path.join(out_dir, fn)
setbacks_class.run(excl_fpath, node_file_path, out_fn, regulations,
weights_calculation_upscale_factor=wcuf,
max_workers=max_workers, replace=replace, hsds=hsds,
out_layers=out_layers)
logger.info('Setbacks computed and written to {}'.format(out_fn))
return out_fn
def merge_setbacks(node_out_path, node_pattern, are_partial_inclusions=None,
purge_chunks=False):
"""Combine many input setback GeoTIFFs into a single layer.
Parameters
----------
node_out_path : str
Path to output GeoTIFF file.
node_pattern : str
Input GeoTIFF file pattern.
are_partial_inclusions : bool, optional
Flag indicating wether the inputs are partial inclusion values
or boolean exclusions. If ``None``, will try to infer
automatically from the input file's GeoTIFF profile
(``dtype != uint8``). By default, ``None``.
purge_chunks : bool, optional
Flag indicating wether individual "chunk" files should be
deleted after a successful merge (``True``), or if they should
be stored in a "chunk_files" directory (``False``).
By default, ``False``.
Raises
------
FileNotFoundError
If no input files found on disk matching the input pattern.
RuntimeError
If the ``are_partial_inclusions`` cannot be inferred (GeoTIFF
profile does not have "dtype" field).
"""
out_file = Path(node_out_path).resolve()
logger.info("Merging TIFF files in {!r} and writing to {!r}"
.format(out_file.parent.as_posix(), out_file))
input_setback_files = list(glob.glob(node_pattern))
if not input_setback_files:
msg = ("Did not find any files matching pattern {!r} in directory {!r}"
.format(node_pattern, out_file.parent.as_posix()))
logger.error(msg)
raise FileNotFoundError(msg)
with Geotiff(input_setback_files[0]) as tif:
profile = tif.profile
if are_partial_inclusions is None:
data_dtype = profile.get("dtype")
if data_dtype is None:
msg = ("Cannot infer whether data are partial inclusions because "
"data dtype is not set in the GeoTIFF profile. Please "
"explicitly specify the `are_partial_inclusions` input!")
logger.error(msg)
raise RuntimeError(msg)
are_partial_inclusions = data_dtype.casefold() != "uint8"
setbacks = parse_setbacks(input_setback_files,
is_inclusion_layer=are_partial_inclusions)
logger.info("Writing data to {!r}".format(out_file.as_posix()))
ExclusionsConverter.write_geotiff(out_file.as_posix(), profile, setbacks)
if purge_chunks:
for fpath in input_setback_files:
os.remove(fpath)
else:
chunk_dir = out_file.parent / "chunk_files"
chunk_dir.mkdir(parents=True, exist_ok=True)
for fpath in input_setback_files:
shutil.move(fpath, chunk_dir / os.path.basename(fpath))
logger.info("Moved chunk files from {} to sub_dir: {}"
.format(out_file.parent, chunk_dir))
PRIVATE_COMPUTE_KEYS = ("node_feature_type", "node_file_path",
"node_multiplier")
PRIVATE_MERGE_KEYS = ("node_out_path", "node_pattern")
commands = [
CLICommandFromFunction(
function=compute_setbacks, name="compute",
split_keys=[PRIVATE_COMPUTE_KEYS],
config_preprocessor=preprocess_setbacks_config,
skip_doc_params=PRIVATE_COMPUTE_KEYS,
),
CLICommandFromFunction(
function=merge_setbacks, name="merge",
split_keys=[PRIVATE_MERGE_KEYS],
config_preprocessor=preprocess_merge_config,
skip_doc_params=PRIVATE_MERGE_KEYS,
),
]
cli = make_cli(commands)
if __name__ == '__main__':
try:
cli(obj={})
except Exception:
logger.exception('Error running Setbacks CLI')
raise | PypiClean |
/CustomPipeline-0.0.3-py3-none-any.whl/rpplugins/vxgi/voxelization_stage.py | from __future__ import division
from rpcore.globals import Globals
from rpcore.image import Image
from rpcore.render_stage import RenderStage
from panda3d.core import Camera, OrthographicLens, NodePath, CullFaceAttrib
from panda3d.core import DepthTestAttrib, Vec4, PTALVecBase3, Vec3, SamplerState
from panda3d.core import ColorWriteAttrib
class VoxelizationStage(RenderStage):
""" This stage voxelizes the whole scene """
required_inputs = ["DefaultEnvmap", "AllLightsData", "maxLightIndex"]
required_pipes = []
# The different states of voxelization
S_disabled = 0
S_voxelize_x = 1
S_voxelize_y = 2
S_voxelize_z = 3
S_gen_mipmaps = 4
def __init__(self, pipeline):
RenderStage.__init__(self, pipeline)
self.voxel_resolution = 256
self.voxel_world_size = -1
self.state = self.S_disabled
self.create_ptas()
def set_grid_position(self, pos):
self.pta_next_grid_pos[0] = pos
def create_ptas(self):
self.pta_next_grid_pos = PTALVecBase3.empty_array(1)
self.pta_grid_pos = PTALVecBase3.empty_array(1)
@property
def produced_inputs(self):
return {"voxelGridPosition": self.pta_grid_pos}
@property
def produced_pipes(self):
return {"SceneVoxels": self.voxel_grid}
def create(self):
# Create the voxel grid used to generate the voxels
self.voxel_temp_grid = Image.create_3d(
"VoxelsTemp", self.voxel_resolution, self.voxel_resolution,
self.voxel_resolution, "RGBA8")
self.voxel_temp_grid.set_clear_color(Vec4(0))
self.voxel_temp_nrm_grid = Image.create_3d(
"VoxelsTemp", self.voxel_resolution, self.voxel_resolution,
self.voxel_resolution, "R11G11B10")
self.voxel_temp_nrm_grid.set_clear_color(Vec4(0))
# Create the voxel grid which is a copy of the temporary grid, but stable
self.voxel_grid = Image.create_3d(
"Voxels", self.voxel_resolution, self.voxel_resolution, self.voxel_resolution, "RGBA8")
self.voxel_grid.set_clear_color(Vec4(0))
self.voxel_grid.set_minfilter(SamplerState.FT_linear_mipmap_linear)
# Create the camera for voxelization
self.voxel_cam = Camera("VoxelizeCam")
self.voxel_cam.set_camera_mask(self._pipeline.tag_mgr.get_mask("voxelize"))
self.voxel_cam_lens = OrthographicLens()
self.voxel_cam_lens.set_film_size(
-2.0 * self.voxel_world_size, 2.0 * self.voxel_world_size)
self.voxel_cam_lens.set_near_far(0.0, 2.0 * self.voxel_world_size)
self.voxel_cam.set_lens(self.voxel_cam_lens)
self.voxel_cam_np = Globals.base.render.attach_new_node(self.voxel_cam)
self._pipeline.tag_mgr.register_camera("voxelize", self.voxel_cam)
# Create the voxelization target
self.voxel_target = self.create_target("VoxelizeScene")
self.voxel_target.size = self.voxel_resolution
self.voxel_target.prepare_render(self.voxel_cam_np)
# Create the target which copies the voxel grid
self.copy_target = self.create_target("CopyVoxels")
self.copy_target.size = self.voxel_resolution
self.copy_target.prepare_buffer()
# TODO! Does not work with the new render target yet - maybe add option
# to post process region for instances?
self.copy_target.instance_count = self.voxel_resolution
self.copy_target.set_shader_inputs(
SourceTex=self.voxel_temp_grid,
DestTex=self.voxel_grid)
# Create the target which generates the mipmaps
self.mip_targets = []
mip_size, mip = self.voxel_resolution, 0
while mip_size > 1:
mip_size, mip = mip_size // 2, mip + 1
mip_target = self.create_target("GenMipmaps:" + str(mip))
mip_target.size = mip_size
mip_target.prepare_buffer()
mip_target.instance_count = mip_size
mip_target.set_shader_inputs(
SourceTex=self.voxel_grid,
sourceMip=(mip - 1))
mip_target.set_shader_input("DestTex", self.voxel_grid, False, True, -1, mip, 0)
self.mip_targets.append(mip_target)
# Create the initial state used for rendering voxels
initial_state = NodePath("VXGIInitialState")
initial_state.set_attrib(CullFaceAttrib.make(CullFaceAttrib.M_cull_none), 100000)
initial_state.set_attrib(DepthTestAttrib.make(DepthTestAttrib.M_none), 100000)
initial_state.set_attrib(ColorWriteAttrib.make(ColorWriteAttrib.C_off), 100000)
self.voxel_cam.set_initial_state(initial_state.get_state())
Globals.base.render.set_shader_inputs(
voxelGridPosition=self.pta_next_grid_pos,
VoxelGridDest=self.voxel_temp_grid)
def update(self):
self.voxel_cam_np.show()
self.voxel_target.active = True
self.copy_target.active = False
for target in self.mip_targets:
target.active = False
# Voxelization disable
if self.state == self.S_disabled:
self.voxel_cam_np.hide()
self.voxel_target.active = False
# Voxelization from X-Axis
elif self.state == self.S_voxelize_x:
# Clear voxel grid
self.voxel_temp_grid.clear_image()
self.voxel_cam_np.set_pos(
self.pta_next_grid_pos[0] + Vec3(self.voxel_world_size, 0, 0))
self.voxel_cam_np.look_at(self.pta_next_grid_pos[0])
# Voxelization from Y-Axis
elif self.state == self.S_voxelize_y:
self.voxel_cam_np.set_pos(
self.pta_next_grid_pos[0] + Vec3(0, self.voxel_world_size, 0))
self.voxel_cam_np.look_at(self.pta_next_grid_pos[0])
# Voxelization from Z-Axis
elif self.state == self.S_voxelize_z:
self.voxel_cam_np.set_pos(
self.pta_next_grid_pos[0] + Vec3(0, 0, self.voxel_world_size))
self.voxel_cam_np.look_at(self.pta_next_grid_pos[0])
# Generate mipmaps
elif self.state == self.S_gen_mipmaps:
self.voxel_target.active = False
self.copy_target.active = True
self.voxel_cam_np.hide()
for target in self.mip_targets:
target.active = True
# As soon as we generate the mipmaps, we need to update the grid position
# as well
self.pta_grid_pos[0] = self.pta_next_grid_pos[0]
def reload_shaders(self):
self.copy_target.shader = self.load_plugin_shader(
"/$$rp/shader/default_post_process_instanced.vert.glsl", "copy_voxels.frag.glsl")
mip_shader = self.load_plugin_shader(
"/$$rp/shader/default_post_process_instanced.vert.glsl", "generate_mipmaps.frag.glsl")
for target in self.mip_targets:
target.shader = mip_shader
def set_shader_input(self, *args):
Globals.render.set_shader_input(*args)
def set_shader_inputs(self, **kwargs):
Globals.render.set_shader_inputs(**kwargs) | PypiClean |
/Numberjack-1.2.0.tar.gz/Numberjack-1.2.0/examples/JobshopDichotomy.py | from __future__ import print_function
from Numberjack import *
# Job Shop Scheduling
# Given a set of N job of various sizes, the job shop scheduling problem is to
# schedule these on M machies such that the overall makespan is minimized. The
# makespan is the total length of the schedule.
#
# This example using dichotomic search to solve the problem.
###############################################
####### Class JSP: problem instance #######
###############################################
class JSP:
def __init__(self, data_file):
stream = open(data_file)
n, m = stream.readline().split()[:2]
self.nJobs = int(n)
self.nMachines = int(m)
stream.readline()
self.job = []
self.machine = [[] for i in range(self.nMachines)]
self.m = [[None]*self.nMachines for i in range(self.nJobs)]
for i in range(self.nJobs):
self.job.append([int(elt) for elt in (stream.readline()[:-1]).split()])
stream.readline()
for i in range(self.nJobs):
machines = (stream.readline()[:-1]).split()
for j in range(len(machines)):
self.machine[int(machines[j]) - 1].append((i, j))
self.m[i][j] = (int(machines[j])-1)
def __str__(self):
return '\n' + str(self.job) + '\n\n' + str(self.machine) + '\n\n' + str(self.m) + '\n'
def lower_bound(self):
longest_job = max([sum(job) for job in self.job])
longest_machine = max([sum([self.job[i][j] for i, j in mac]) for mac in self.machine])
return max([longest_job, longest_machine])
def upper_bound(self):
M_job = [0]*self.nJobs
M_machine = [0]*self.nMachines
for i in range(self.nMachines):
for j in range(self.nJobs):
start_time = max(M_job[j], M_machine[self.m[j][i]])
M_job[j] = start_time+self.job[j][i]
M_machine[self.m[j][i]] = start_time+self.job[j][i]
return max(max(M_job), max(M_machine))
###############################################
###### Class JSP_Model: constraint model ######
###############################################
class JSP_Model(Model):
def __init__(self, jsp):
Model.__init__(self)
C_max = jsp.upper_bound()
Tasks = Matrix([[Task(C_max, p) for p in job] for job in jsp.job])
Machines = [UnaryResource([Tasks[m] for m in machine]) for machine in jsp.machine]
for task in Tasks.row:
self += [task[i] < task[i+1] for i in range(jsp.nMachines-1)]
self += Machines
self.sequence = sum(Machines, [])
self.tasks = Tasks.flat
self.Jobs = Tasks
###############################################
############## function solve ##############
###############################################
def dichotomic_step(model, solver, C_max, best_solution, verb, cutoff):
solver.save()
for task in model.tasks:
solver.post(task < C_max)
if best_solution:
solver.guide(best_solution)
solver.setNodeLimit(cutoff)
solver.solveAndRestart(GEOMETRIC, 256, 1.3)
outcome = (None, None, C_max)
if solver.is_sat():
outcome = (True, solver.get_solution(), max([task.get_min() + task.duration for task in model.tasks]))
if verb > 0:
print('SAT', end=' ')
elif solver.is_unsat():
outcome = (False, None, C_max)
if verb > 0:
print('UNSAT', end=' ')
else:
if verb > 0:
print('ABORT', end=' ')
if verb > 0:
print(str(solver.getTime()).rjust(8), 's', str(solver.getNodes()).rjust(10), 'nds')
solver.reset()
solver.undo()
return outcome
###############################################
######## function dichotomic search ########
###############################################
def dichotomic_search(model, solver, max_infeasible, min_feasible, verb, cutoff):
if verb > 0:
print('start dichotmic search', cutoff)
lb = max_infeasible
ub = min_feasible
best_solution = None
while lb + 1 < ub:
C_max = int((lb + ub) / 2)
if verb > 0:
print('c current bounds:', ('[' + str(lb + 1) + '..' + str(ub) + ']').rjust(16), ' solve', str(C_max).ljust(6), end=' ')
feasible, solution, C_max = dichotomic_step(model, solver, C_max, best_solution, verb, cutoff)
if feasible:
ub = C_max
best_solution = solution
else:
lb = C_max
if feasible:
max_infeasible = C_max
min_feasible = ub
return max_infeasible, min_feasible, best_solution
###############################################
######## function branch and bound ########
###############################################
def branch_and_bound(model, lib, max_infeasible, min_feasible, verb, best=None):
C_max = Variable(max_infeasible+1, min_feasible)
for task in model.tasks:
model.add(task < C_max)
model.add(Minimise(C_max))
if lib == 'Mistral':
solver = model.load(lib, model.sequence)
else:
solver = model.load(lib)
solver.setVerbosity(verb-1)
solver.setHeuristic('Scheduling')
if best is not None:
solver.guide(best)
outcome = (max_infeasible + 1, None)
solver.solve()
if solver.is_sat():
best = solver.get_solution()
if solver.is_opt():
if verb > 0:
print('c Found optimal solution:', C_max.solution())
outcome = (C_max.get_value(), C_max.get_value(), best)
else:
if verb > 0:
print('c Best C_max:', C_max.solution())
outcome = (max_infeasible + 1, C_max.get_value(), best)
return outcome
###############################################
########### main solver function ###########
###############################################
def solve(param):
jsp = JSP(param['data'])
lib = param['solver']
verb = param['verbose']
model = JSP_Model(jsp)
if lib == 'Mistral':
solver = model.load(lib, model.sequence)
else:
solver = model.load(lib)
solver.setHeuristic('Scheduling', 'Promise')
solver.setVerbosity(param['verbose']-1)
(lb, ub) = (jsp.lower_bound()-1, jsp.upper_bound())
(lb, ub, best) = dichotomic_search(model, solver, lb, ub, verb, param['tcutoff'])
if verb > 0:
print('start branch & bound in ['+str(lb)+'..'+str(ub)+']')
if lb + 1 < ub:
(lb, ub, best) = branch_and_bound(model, lib, lb, ub, verb, best)
## finalize the solution (tasks)
solver.reset()
if lib == 'Mistral':
for disjunct in model.sequence:
solver.post(disjunct == best[disjunct])
solver.propagate()
for task in model.tasks:
solver.post(task == task.get_min())
solver.propagate()
best = solver.get_solution()
schedule = [[-1]*ub for job in jsp.job]
index = 0
for machine in jsp.machine:
index += 1
for m in machine:
start = model.Jobs[m].get_value()
for i in range(model.Jobs[m].duration):
schedule[m[0]][start+i] = index
out = ''
if solver.is_sat():
out = str(schedule)
out += ('\nNodes: ' + str(solver.getNodes()))
return out
if param['print'] == 'yes':
###############################################
############# Output (Matplotlib) #############
###############################################
print('\n display schedule')
width = 60
print_schedule = []
for row in schedule:
print_schedule.extend([row]*width)
import pylab
pylab.yticks(pylab.arange(int(width / 2), width * (len(jsp.job) + 1), width), ['job' + str(i + 1) for i in range(len(jsp.job))])
cmap = pylab.cm.get_cmap('jet', len(jsp.machine)+1)
cmap.set_under(color='w')
pylab.imshow(print_schedule, cmap=cmap, interpolation='nearest', vmin=0)
#pylab.colorbar()
pylab.show()
default = {'solver': 'Mistral', 'data': 'data/tiny_jsp.txt', 'print': 'no', 'verbose': 1, 'tcutoff': 3}
if __name__ == '__main__':
param = input(default)
print(solve(param)) | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas_rhino/conduits/labels.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from System.Drawing.Color import FromArgb
from Rhino.Geometry import Point3d
from compas.utilities import iterable_like
from compas.utilities import is_sequence_of_iterable
from .base import BaseConduit
class LabelsConduit(BaseConduit):
"""A Rhino display conduit for labels.
Parameters
----------
labels : list[tuple[[float, float, float] | :class:`~compas.geometry.Point`, str]]
A list of label tuples.
Each tuple contains a position and text for the label.
color : list[tuple[tuple[int, int, int], tuple[int, int, int]]], optional
The colors of the labels.
Each color is a tuple with a background color and a text color.
The default background color is :attr:`LabelsConduit.default_color`,
and the default text color is :attr:`LabelsConduit.default_textcolor`.
Attributes
----------
color : list[tuple[System.Drawing.Color, System.Drawing.Color]]
A color specification per label.
labels : list[tuple[[float, float, float] | :class:`~compas.geometry.Point`, str]]
A list of label tuples.
Each tuple contains a position and text for the label.
Class Attributes
----------------
default_color : System.Drawing.Color
The default background color is ``FromArgb(0, 0, 0)``.
default_textcolor : System.Drawing.Color
The default text color is ``FromArgb(255, 255, 255)``.
Examples
--------
.. code-block:: python
from random import randint
from compas_rhino.conduits import LabelsConduit
labels = [([1.0 * randint(0, 100), 1.0 * randint(0, 100), 0.0], str(i)) for i in range(100)]
conduit = LabelsConduit(labels)
with conduit.enabled():
for i in range(100):
conduit.labels = [([1.0 * randint(0, 100), 1.0 * randint(0, 100), 0.0], str(i)) for i in range(100)]
conduit.redraw(pause=0.1)
"""
default_color = FromArgb(0, 0, 0)
default_textcolor = FromArgb(255, 255, 255)
def __init__(self, labels, color=None, **kwargs):
super(LabelsConduit, self).__init__(**kwargs)
self._color = None
self.labels = labels or []
self.color = color
@property
def color(self):
return self._colors
@color.setter
def color(self, color):
if not color:
return
if not is_sequence_of_iterable(color[0]):
# the first item in the list should be a tuple of colors
# if not, wrap the tuple
color = [color]
color = [
(FromArgb(*bg), FromArgb(*text))
for bg, text in iterable_like(self.labels, color, (self.default_color, self.default_textcolor))
]
self._color = color
def DrawForeground(self, e):
"""Draw the labels as text dots.
Parameters
----------
e : Rhino.Display.DrawEventArgs
Returns
-------
None
"""
for i, (pos, text) in enumerate(self.labels):
if self.color:
color, textcolor = self.color[i]
e.Display.DrawDot(Point3d(*pos), text, color, textcolor)
else:
e.Display.DrawDot(Point3d(*pos), text, self.default_color, self.default_textcolor) | PypiClean |
/dipex-4.54.5.tar.gz/dipex-4.54.5/integrations/ad_integration/utils.py | from collections.abc import Mapping
from functools import wraps
class AttrDict(dict):
"""Enable dot.notation access for a dict object.
Example:
script_result = AttrDict({"exit_code": 0})
self.assertEqual(script_result.exit_code, 0)
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__ # type: ignore
__delattr__ = dict.__delitem__ # type: ignore
def recursive_dict_update(original, updates):
"""Recursively update 'original' with keys from 'updates'.
Example:
original = {'alfa': {'beta': 2, 'charlie': 3}},
updates = {'alfa': {'beta': 4}}
# Non recursive update
updated = {**original, **updates}
self.assertEqual(updated, {'alfa': {'beta': 4}})
# Recursive update
r_updated = recursive_dict_update(original, updates)
self.assertEqual(r_updated, {'alfa': {'beta': 4, 'charlie': 3}})
Returns:
dict: modified 'original'
"""
for key, value in updates.items():
if isinstance(value, Mapping):
original[key] = recursive_dict_update(original.get(key, {}), value)
else:
original[key] = value
return original
def dict_map(dicty, key_func=None, value_func=None, func=None):
"""Map the dict values.
Example:
input_dict = {1: 1, 2: 2, 3: 3}
output_dict = dict_map(input_dict, value_func=lambda value: value ** 2)
self.assertEqual(output_dict, {1: 1, 2: 4, 3: 6})
Returns:
dict: A dict where func has been applied to every value.
"""
def identity(x):
return x
def tuple_identity(x, y):
return (x, y)
def help_call(func):
def inner(x, **kwargs):
try:
return func(x, **kwargs)
except TypeError:
return func(x)
return inner
key_func = help_call(key_func or identity)
value_func = help_call(value_func or identity)
func = func or tuple_identity
return dict(
[
func(key_func(key, value=value), value_func(value, key=key))
for key, value in dicty.items()
]
)
def dict_partition(func, dicty):
"""Partition the input dict into two using the predicate function.
Example:
input_dict = {0: 'value', 1: 'value': 2: 'value'}
odd_dict, even_dict = dict_partition(
lambda key, value: value % 2 == 0, input_dict
)
self.assertEqual(odd_dict, {1: 'value'})
self.assertEqual(even_dict, {0: 'value', 2: 'value'})
Returns:
(dict, dict): A dict containing key-value pairs that failed the
predicate and a dict containing the key-value pairs
that passed the predicate.
"""
falsy, truesy = {}, {}
for key, value in dicty.items():
write_dict = truesy if func(key, value) else falsy
write_dict[key] = value
return falsy, truesy
def dict_filter(func, dicty):
return dict_partition(func, dicty)[1]
def dict_exclude(dicty, keys):
return dict_filter(lambda key, value: key not in keys, dicty)
def dict_subset(dicty, keys):
return dict_filter(lambda key, value: key in keys, dicty)
def duplicates(iterable):
"""Return set of duplicates from iterable.
Example:
input_list = [1, 5, 2, 4, 2, 1]
dup_set = duplicates(input_list)
self.assertEqual(dup_set, {1, 2})
Returns:
set: A set of the elements which are duplicates.
"""
seen = set()
return set(x for x in iterable if x in seen or seen.add(x))
def lower_list(listy):
"""Convert each element in the list to lower-case.
Example:
result = lower_list(['Alfa', 'BETA', 'gamma'])
self.assertEqual(result, ['alfa', 'beta', 'gamma'])
Args:
listy: The list of strings to force into lowercase.
Returns:
list: A list where all contained the strings are lowercase.
"""
return [x.lower() for x in listy]
def apply(func):
"""Return decorated / applied version of func.
Example:
@apply
def tuple_expand(num, char):
return char * num
elements = [(1,'a'), (2, 'b'), (3, 'c')]
elements = list(map(tuple_expand, elements))
self.assertEqual(elements, ['a', 'bb', 'ccc'])
Args:
func: The function to apply arguments for
Returns:
wrapped function: Which has its one argument applied.
"""
@wraps(func)
def applied(tup):
return func(*tup)
return applied | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojo/cldr/nls/he/gregorian.js | define(
"dojo/cldr/nls/he/gregorian", //begin v1.x content
{
"field-weekday": "יום בשבוע",
"dateFormatItem-yQQQ": "y QQQ",
"dateFormatItem-yMEd": "EEE, d.M.yyyy",
"dateFormatItem-MMMEd": "E, d בMMM",
"eraNarrow": [
"לפנה״ס",
"לסה״נ"
],
"dateFormat-long": "d בMMMM y",
"months-format-wide": [
"ינואר",
"פברואר",
"מרס",
"אפריל",
"מאי",
"יוני",
"יולי",
"אוגוסט",
"ספטמבר",
"אוקטובר",
"נובמבר",
"דצמבר"
],
"dateFormatItem-EEEd": "EEE ה-d",
"dayPeriods-format-wide-pm": "אחה״צ",
"dateFormat-full": "EEEE, d בMMMM y",
"dateFormatItem-Md": "d/M",
"field-era": "תקופה",
"dateFormatItem-yM": "M.yyyy",
"months-standAlone-wide": [
"ינואר",
"פברואר",
"מרס",
"אפריל",
"מאי",
"יוני",
"יולי",
"אוגוסט",
"ספטמבר",
"אוקטובר",
"נובמבר",
"דצמבר"
],
"timeFormat-short": "HH:mm",
"quarters-format-wide": [
"רבעון 1",
"רבעון 2",
"רבעון 3",
"רבעון 4"
],
"timeFormat-long": "HH:mm:ss z",
"field-year": "שנה",
"dateFormatItem-yMMM": "MMM y",
"dateFormatItem-yQ": "yyyy Q",
"dateFormatItem-yyyyMMMM": "MMMM y",
"field-hour": "שעה",
"dateFormatItem-MMdd": "dd/MM",
"months-format-abbr": [
"ינו",
"פבר",
"מרס",
"אפר",
"מאי",
"יונ",
"יול",
"אוג",
"ספט",
"אוק",
"נוב",
"דצמ"
],
"dateFormatItem-yyQ": "Q yy",
"timeFormat-full": "HH:mm:ss zzzz",
"field-day-relative+0": "היום",
"field-day-relative+1": "מחר",
"field-day-relative+2": "מחרתיים",
"dateFormatItem-H": "HH",
"field-day-relative+3": "בעוד שלושה ימים",
"months-standAlone-abbr": [
"ינו׳",
"פבר׳",
"מרס",
"אפר׳",
"מאי",
"יונ׳",
"יול׳",
"אוג׳",
"ספט׳",
"אוק׳",
"נוב׳",
"דצמ׳"
],
"quarters-format-abbr": [
"רבעון 1",
"רבעון 2",
"רבעון 3",
"רבעון 4"
],
"quarters-standAlone-wide": [
"רבעון 1",
"רבעון 2",
"רבעון 3",
"רבעון 4"
],
"dateFormatItem-M": "L",
"days-standAlone-wide": [
"יום ראשון",
"יום שני",
"יום שלישי",
"יום רביעי",
"יום חמישי",
"יום שישי",
"יום שבת"
],
"dateFormatItem-MMMMd": "d בMMMM",
"dateFormatItem-yyMMM": "MMM yyyy",
"timeFormat-medium": "HH:mm:ss",
"dateFormatItem-Hm": "HH:mm",
"quarters-standAlone-abbr": [
"רבעון 1",
"רבעון 2",
"רבעון 3",
"רבעון 4"
],
"eraAbbr": [
"לפנה״ס",
"לסה״נ"
],
"field-minute": "דקה",
"field-dayperiod": "לפה״צ/אחה״צ",
"days-standAlone-abbr": [
"יום א׳",
"יום ב׳",
"יום ג׳",
"יום ד׳",
"יום ה׳",
"יום ו׳",
"שבת"
],
"dateFormatItem-d": "d",
"dateFormatItem-ms": "mm:ss",
"field-day-relative+-1": "אתמול",
"field-day-relative+-2": "שלשום",
"field-day-relative+-3": "לפני שלושה ימים",
"dateFormatItem-MMMd": "d בMMM",
"dateFormatItem-MEd": "E, M-d",
"dateFormatItem-yMMMM": "MMMM y",
"field-day": "יום",
"days-format-wide": [
"יום ראשון",
"יום שני",
"יום שלישי",
"יום רביעי",
"יום חמישי",
"יום שישי",
"יום שבת"
],
"field-zone": "אזור",
"dateFormatItem-yyyyMM": "MM/yyyy",
"dateFormatItem-y": "y",
"dateFormatItem-yyMM": "MM/yy",
"dateFormatItem-hm": "h:mm a",
"days-format-abbr": [
"יום א׳",
"יום ב׳",
"יום ג׳",
"יום ד׳",
"יום ה׳",
"יום ו׳",
"שבת"
],
"eraNames": [
"לפני הספירה",
"לספירה"
],
"days-format-narrow": [
"א",
"ב",
"ג",
"ד",
"ה",
"ו",
"ש"
],
"field-month": "חודש",
"days-standAlone-narrow": [
"א",
"ב",
"ג",
"ד",
"ה",
"ו",
"ש"
],
"dateFormatItem-MMM": "LLL",
"dayPeriods-format-wide-am": "לפנה״צ",
"dateFormatItem-MMMMEd": "E, d בMMMM",
"dateFormat-short": "dd/MM/yy",
"field-second": "שנייה",
"dateFormatItem-yMMMEd": "EEE, d בMMM y",
"dateFormatItem-Ed": "E ה-d",
"field-week": "שבוע",
"dateFormat-medium": "d בMMM yyyy",
"dateFormatItem-mmss": "mm:ss",
"dateFormatItem-Hms": "HH:mm:ss",
"dateFormatItem-hms": "h:mm:ss a",
"dateFormatItem-yyyy": "y"
}
//end v1.x content
); | PypiClean |
/Glams-0.2.5.zip/Glams-0.2.5/glams/glams/databaseInterface/reset.py | import os
if __name__=='__main__':
from connect import db, importconfig
rootDirectory=os.path.abspath('../..')
else:
from .connect import db, importconfig
rootDirectory=os.getcwd()
import hashlib
def reset():
#How to add a column to an existing table without reset: ALTER TABLE oldtable ADD newfield BOOL;
print('Starting Database Reset')
db.execute("DROP TABLE IF EXISTS strains",commit=True); print(' Dropped Table "strains"')
db.execute("DROP TABLE IF EXISTS litters",commit=True); print(' Dropped Table "litters"')
db.execute("DROP TABLE IF EXISTS care_taker",commit=True); print(' Dropped Table "care_taker"')
db.execute("DROP TABLE IF EXISTS housing",commit=True); print(' Dropped Table "housing"')
db.execute("DROP TABLE IF EXISTS cages",commit=True); print(' Dropped Table "cages"')
db.execute("DROP TABLE IF EXISTS experiments",commit=True); print(' Dropped Table "experiments"')
db.execute("DROP TABLE IF EXISTS genetics",commit=True); print(' Dropped Table "genetics"')
db.execute("DROP TABLE IF EXISTS lineage",commit=True); print(' Dropped Table "lineage"')
db.execute("DROP TABLE IF EXISTS genes",commit=True); print(' Dropped Table "genes"')
db.execute("DROP TABLE IF EXISTS lab_members",commit=True); print(' Dropped Table "lab_members"')
db.execute("DROP TABLE IF EXISTS mice",commit=True); print(' Dropped Table "mice"')
queries=[]
queries.append("""
CREATE TABLE mice (
id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id),
name VARCHAR(255) CHARACTER SET utf8 collate utf8_bin NOT NULL UNIQUE,
strain VARCHAR(255) CHARACTER SET utf8 collate utf8_bin,
genotyped BOOL,
sex VARCHAR(255) CHARACTER SET utf8 collate utf8_bin,
life_status VARCHAR(255) CHARACTER SET utf8 collate utf8_bin,
breeding_status VARCHAR(255) CHARACTER SET utf8 collate utf8_bin,
DOB DATETIME,
DOD DATETIME,
cause_of_death VARCHAR(255) CHARACTER SET utf8 collate utf8_bin,
tag VARCHAR(255) CHARACTER SET utf8 collate utf8_bin,
notes TEXT(65535) CHARACTER SET utf8 collate utf8_bin
)""")
queries.append("""
CREATE TABLE lab_members (
id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id),
name VARCHAR(255) CHARACTER SET utf8 collate utf8_bin UNIQUE,
password VARCHAR(255) CHARACTER SET utf8 collate utf8_bin,
email VARCHAR(255) CHARACTER SET utf8 collate utf8_bin,
viewtype VARCHAR(255) CHARACTER SET utf8 collate utf8_bin,
columns VARCHAR(500) CHARACTER SET utf8 collate utf8_bin,
sortby VARCHAR(255) CHARACTER SET utf8 collate utf8_bin
)""")
queries.append("""
CREATE TABLE genes (
id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id),
name VARCHAR(255) CHARACTER SET utf8 collate utf8_bin UNIQUE,
default_presence BOOL
)""")
queries.append("""
CREATE TABLE strains (
id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id),
name VARCHAR(255) CHARACTER SET utf8 collate utf8_bin UNIQUE
)""")
queries.append("""
CREATE TABLE cages
(
id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id),
name VARCHAR(255) CHARACTER SET utf8 collate utf8_bin NOT NULL UNIQUE,
active BOOL,
expectingpl BOOL,
notes TEXT(65535) CHARACTER SET utf8 collate utf8_bin,
cagegroup VARCHAR(255) CHARACTER SET utf8 collate utf8_bin,
date_activated DATE,
date_inactivated DATE,
location VARCHAR(255) CHARACTER SET utf8 collate utf8_bin
)""")
queries.append("""
CREATE TABLE genetics
(
mouse_id INT NOT NULL,
FOREIGN KEY(mouse_id) REFERENCES mice(id) ON DELETE CASCADE,
gene_id INT NOT NULL,
FOREIGN KEY(gene_id) REFERENCES genes(id) ON DELETE CASCADE,
zygosity VARCHAR(255) CHARACTER SET utf8 collate utf8_bin
)""")
queries.append("""
CREATE TABLE lineage
(
mother_id INT NOT NULL,
FOREIGN KEY(mother_id) REFERENCES mice(id) ON DELETE CASCADE,
father_id INT NOT NULL,
FOREIGN KEY(father_id) REFERENCES mice(id) ON DELETE CASCADE,
child_id INT NOT NULL UNIQUE,
FOREIGN KEY(child_id) REFERENCES mice(id) ON DELETE CASCADE
)""")
queries.append("""
CREATE TABLE experiments
(
id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id),
mouse_id INT NOT NULL,
FOREIGN KEY(mouse_id) REFERENCES mice(id) ON DELETE CASCADE,
lab_member_id INT NOT NULL,
FOREIGN KEY(lab_member_id) REFERENCES lab_members(id) ON DELETE CASCADE,
date DATETIME,
description VARCHAR(255) CHARACTER SET utf8 collate utf8_bin,
notes TEXT(65535) CHARACTER SET utf8 collate utf8_bin,
status VARCHAR(255) CHARACTER SET utf8 collate utf8_bin,
filenames TEXT(65535) CHARACTER SET utf8 collate utf8_bin
)""")
queries.append("""
CREATE TABLE housing
(
mouse_id INT NOT NULL,
FOREIGN KEY(mouse_id) REFERENCES mice(id) ON DELETE CASCADE,
cage_id INT NOT NULL,
FOREIGN KEY(cage_id) REFERENCES cages(id) ON DELETE CASCADE,
start_date DATETIME,
end_date DATETIME,
currentcage BOOL
)""")
queries.append("""
CREATE TABLE care_taker
(
cage_id INT NOT NULL,
FOREIGN KEY(cage_id) REFERENCES cages(id) ON DELETE CASCADE,
lab_member_id INT NOT NULL,
FOREIGN KEY(lab_member_id) REFERENCES lab_members(id) ON DELETE CASCADE
)""")
queries.append("""
CREATE TABLE litters
(
cage_id INT NOT NULL,
FOREIGN KEY(cage_id) REFERENCES cages(id) ON DELETE CASCADE,
DOB DATETIME,
notes TEXT(65535) CHARACTER SET utf8 collate utf8_bin,
mother_id INT NOT NULL,
FOREIGN KEY(mother_id) REFERENCES mice(id) ON DELETE CASCADE,
father_id INT NOT NULL,
FOREIGN KEY(father_id) REFERENCES mice(id) ON DELETE CASCADE
)""")
for query in queries:
print('executing query')
db.execute(query,commit=True)
print('Reset Finished')
print('Adding admin.')
password='password'
config=importconfig()
salt=config['salt']
hashedpassword=hashlib.md5((password+salt).encode('utf-8')).hexdigest()
db.execute(""" INSERT INTO lab_members SET name='admin', password=%s, email='', viewtype='mouse', columns='mousename,,cagename,,cagename2,,genetics,,' """,(hashedpassword,))
print('Finished adding admin.')
if __name__=='__main__':
print('I turned this function off for safety')
#reset() | PypiClean |
/AlgebraicNumber-2.4.4.tar.gz/AlgebraicNumber-2.4.4/README.md | # AlgebraicNumber
[](https://pypi.python.org/pypi/AlgebraicNumber)
[](https://gitlab.com/ydethe/algebraicnumber/pipelines)
[](https://codecov.io/gl/ydethe/algebraicnumber)
A library to manipulate algebraic numbers
## Documentation
To generate the documentation,run:
nox
https://ydethe.gitlab.io/algebraicnumber/docs
## Usage
>>> z = AlgebraicNumber.unity() + AlgebraicNumber.imaginary()
>>> z.poly.printCoeff()
'[2,-2,1]'
>>> p = z*z.conj()
>>> p.poly.printCoeff()
'[-2,1]'
| PypiClean |
/HTSQL-2.3.3.tar.gz/HTSQL-2.3.3/src/htsql/core/tr/rewrite.py | from ..adapter import Utility, Adapter, adapt, adapt_many
from ..domain import BooleanDomain
from ..error import Error, translate_guard
from .coerce import coerce
from .flow import (Expression, QueryExpr, SegmentCode, Flow, RootFlow,
FiberTableFlow, QuotientFlow, ComplementFlow, MonikerFlow, ForkedFlow,
AttachFlow, ClippedFlow, LocatorFlow, FilteredFlow, OrderedFlow, Code,
LiteralCode, CastCode, RecordCode, IdentityCode, AnnihilatorCode,
FormulaCode, Unit, ColumnUnit, CompoundUnit, ScalarUnit,
AggregateUnitBase, AggregateUnit, KernelUnit, CoveringUnit)
from .signature import Signature, OrSig, AndSig, IsEqualSig, isformula
# FIXME: move `IfSig` and `SwitchSig` to `htsql.core.tr.signature`.
from .fn.signature import IfSig
class RewritingState(object):
"""
Encapsulates the state of the rewriting process.
State attributes:
`root` (:class:`htsql.core.tr.flow.RootFlow`)
The root data flow.
`mask` (:class:`htsql.core.tr.flow.Flow`)
The dominant flow; used to prune dependent flows on
the *unmasking* phase.
`collection` (list of :class:`htsql.core.tr.flow.Unit`)
A list of units accumulated on the *collecting* phase.
"""
def __init__(self):
# The root flow.
self.root = None
# The current mask flow.
self.mask = None
# Stack of saved previous mask flows.
self.mask_stack = []
# List of collected units.
self.collection = None
# Dictionaries caching the results of `rewrite`, `unmask` and `replace`
# phases.
self.rewrite_cache = {}
self.unmask_cache = {}
self.replace_cache = {}
def set_root(self, flow):
"""
Set the root data flow.
This function initializes the rewriting state.
`root` (:class:`htsql.core.tr.flow.RootFlow`)
The root flow.
"""
assert isinstance(flow, RootFlow)
# Check that it is not initialized already.
assert self.root is None
assert self.mask is None
assert self.collection is None
self.root = flow
self.mask = flow
self.collection = []
def flush(self):
"""
Clears the state.
"""
assert self.root is not None
assert self.mask is self.root
assert not self.mask_stack
self.root = None
self.mask = None
self.collection = None
self.rewrite_cache = {}
self.unmask_cache = {}
self.replace_cache = {}
def spawn(self):
"""
Creates an empty copy of the state.
"""
copy = RewritingState()
copy.set_root(self.root)
return copy
def push_mask(self, mask):
"""
Sets a new mask flow.
`mask` (:class:`htsql.core.tr.flow.Flow`)
A new mask flow.
"""
assert isinstance(mask, Flow)
self.mask_stack.append(self.mask)
self.mask = mask
def pop_mask(self):
"""
Restores the previous mask flow.
"""
self.mask = self.mask_stack.pop()
def memorize(self, expression, replacement):
"""
Memorizes a replacement node for the given expression node.
`expression` (:class:`htsql.core.tr.flow.Expression`)
The expression node to replace.
`replacement` (:class:`htsql.core.tr.flow.Expression`)
The replacement.
"""
assert isinstance(expression, Expression)
assert isinstance(replacement, Expression)
assert expression not in self.replace_cache
self.replace_cache[expression] = replacement
def rewrite(self, expression):
"""
Rewrites the given expression node.
Returns an expression node semantically equivalent to the given node,
but optimized for compilation. May return the same node.
`expression` (:class:`htsql.core.tr.flow.Expression`)
The expression to rewrite.
"""
# Check if the expression was already rewritten
if expression in self.rewrite_cache:
return self.rewrite_cache[expression]
# Apply `Rewrite` adapter.
replacement = rewrite(expression, self)
# Cache the output.
self.rewrite_cache[expression] = replacement
return replacement
def unmask(self, expression, mask=None):
"""
Unmasks the given expression node.
Unmasking prunes non-axial flow operations that are already
enforced by the mask flow.
`expression` (:class:`htsql.core.tr.flow.Expression`)
The expression to unmask.
`mask` (:class:`htsql.core.tr.flow.Flow` or ``None``)
If set, specifies the mask to use; otherwise, the current
mask is to be used.
"""
# Set the new mask if provided.
if mask is not None:
self.push_mask(mask)
# The result of the unmasking operation depends on both the expression
# and the current mask, so they make a key in the cache.
key = (self.mask, expression)
# If the key is not in the cache, apply the `Unmask` adapter and store
# the result in the cache.
if key not in self.unmask_cache:
with translate_guard(expression):
replacement = Unmask.__invoke__(expression, self)
self.unmask_cache[key] = replacement
# Otherwise, fetch the result from the cache.
else:
replacement = self.unmask_cache[key]
# Restore the current mask.
if mask is not None:
self.pop_mask()
# Return the result of the operation.
return replacement
def collect(self, expression):
"""
Collects scalar and aggregate units from the given expression.
The collected units are stored in the state attribute
:attr:`collection`.
`expression` (:class:`htsql.core.tr.flow.Expression`)
The expression to collect units from.
"""
with translate_guard(expression):
Collect.__invoke__(expression, self)
def recombine(self):
"""
Recombines scalar and aggregate units.
This process adds compilation hints to facilitate merging
similar scalar and aggregate units into shared SQL frames.
Updated units are stored in the replace cache.
"""
# Apply `Recombine` utility.
Recombine.__invoke__(self)
def replace(self, expression):
"""
Replaces the given expression with a recombined clone.
Returns a new expression node with scalar and aggregate units
recombined.
`expression` (:class:`htsql.core.tr.flow.Expression`)
The expression to replace.
"""
# Check if the expression is in the cache.
if expression in self.replace_cache:
return self.replace_cache[expression]
# If not, apply the `Replace` adapter.
with translate_guard(expression):
replacement = Replace.__invoke__(expression, self)
# Store the result in the cache and return it.
self.replace_cache[expression] = replacement
return replacement
class Recombine(Utility):
"""
Recombines scalar and aggregate units.
This utility adds compilation hints to collected scalar and aggregate
units that help the compiler to use shared frames for similar units.
`state` (:class:`RewritingState`)
The current state of the rewriting process.
"""
def __init__(self, state):
assert isinstance(state, RewritingState)
self.state = state
def __call__(self):
# Recombine scalar units.
self.recombine_scalars()
# Recombine aggregate units.
self.recombine_aggregates()
def recombine_scalars(self):
# Recombines scalar units in the collection.
# Duplicate unit nodes.
duplicates = set()
# List of unique flows of the units.
flows = []
# A mapping: flow -> units with this flow.
flow_to_units = {}
# Iterate over all collected units.
for unit in self.state.collection:
# We are only interested in scalar units.
if not isinstance(unit, ScalarUnit):
continue
# Skip duplicates.
if unit in duplicates:
continue
duplicates.add(unit)
# If the unit flow is new, add it to the list of unique flows.
flow = unit.flow
if flow not in flow_to_units:
flows.append(flow)
flow_to_units[flow] = []
# Store the unit.
flow_to_units[flow].append(unit)
# Iterate over all unique unit flows.
for flow in flows:
# Take all units with this flow.
units = flow_to_units[flow]
# Recombine the units.
self.recombine_scalar_batch(flow, units)
def recombine_aggregates(self):
# Recombine aggregate units in the collection.
# Duplicate unit nodes.
duplicates = set()
# Unique pairs of `(plural_flow, flow)` taken from aggregate units.
flow_pairs = []
# A mapping: (plural_flow, flow) -> associated aggregate units.
flow_pair_to_units = {}
# Note that we strip top filtering operations from the plural flow;
# that's because aggregates which plural flows differ only by
# filtering could still use a shared frame; so we need them in
# the same batch.
# Iterate over all collected units.
for unit in self.state.collection:
# We are only interested in aggregate units.
if not isinstance(unit, AggregateUnit):
continue
# Skip duplicates.
if unit in duplicates:
continue
duplicates.add(unit)
# The base flow of the unit.
flow = unit.flow
# The flow of the unit argument.
plural_flow = unit.plural_flow
# Strip top filtering operations from the plural flow.
while isinstance(plural_flow, FilteredFlow):
plural_flow = plural_flow.base
# The flow pair associated with the unit.
pair = (plural_flow, flow)
# Check if the flow pair is new.
if pair not in flow_pair_to_units:
flow_pairs.append(pair)
flow_pair_to_units[pair] = []
# Store the unit.
flow_pair_to_units[pair].append(unit)
# Iterate over all unique flow pairs.
for pair in flow_pairs:
plural_flow, flow = pair
# Aggregates associated with the pair.
units = flow_pair_to_units[pair]
# Recombine the aggregates.
self.recombine_aggregate_batch(plural_flow, flow, units)
def recombine_scalar_batch(self, flow, units):
# Recombines a batch of scalar units sharing the same unit flow.
# Nothing to recombine if there are less than 2 units.
if len(units) <= 1:
return
# Expressions associated with the units.
codes = [unit.code for unit in units]
# Recombine the unit flow and unit expressions against a blank state.
substate = self.state.spawn()
substate.collect(flow)
for code in codes:
substate.collect(code)
substate.recombine()
flow = substate.replace(flow)
codes = [substate.replace(code) for code in codes]
# Iterate over the units, generating a replacement for each.
for idx, unit in enumerate(units):
# New unit expression.
code = codes[idx]
# Expressions for companion units to be injected together with
# the selected unit.
companions = codes[:idx]+codes[idx+1:]
# Generate and memorize the replacement.
batch = unit.clone(code=code, flow=flow,
companions=companions)
self.state.memorize(unit, batch)
def recombine_aggregate_batch(self, plural_flow, flow, units):
# Recombines a batch of aggregate units sharing the same
# unit and operand flows.
# This flag indicates that the units belong to a quotient
# flow and the unit operands belong to the complement to
# the quotient. In this case, the aggregates could reuse
# the frame that generates quotient flow.
is_quotient = (isinstance(flow, QuotientFlow) and
isinstance(plural_flow, ComplementFlow) and
plural_flow.base == flow)
# Nothing to recombine if we don't have at least two units.
# However, continue in case when the aggregate could be
# embedded into a quotient frame.
if len(units) <= 1 and not is_quotient:
return
# The common base flow of all units.
base_flow = flow
# Plural flows of the units may differ from each other
# since they may have extra filters attached to the common parent.
# Here we find the longest common ancestor of all plural flows.
# Candidate common ancestors, longest last.
candidate_flows = []
candidate_flow = units[0].plural_flow
candidate_flows.append(candidate_flow)
while isinstance(candidate_flow, FilteredFlow):
candidate_flow = candidate_flow.base
candidate_flows.append(candidate_flow)
candidate_flows.reverse()
# Iterate over the units reducing the number of common ancestors.
for unit in units[1:]:
# Ancestors of the selected unit, longest first.
alternate_flows = []
alternate_flow = unit.plural_flow
alternate_flows.append(alternate_flow)
while isinstance(alternate_flow, FilteredFlow):
alternate_flow = alternate_flow.base
alternate_flows.append(alternate_flow)
alternate_flows.reverse()
# Find the common prefix of `candidate_flows` and
# `alternate_flows`.
if len(alternate_flows) < len(candidate_flows):
candidate_flows = candidate_flows[:len(alternate_flows)]
for idx in range(len(candidate_flows)):
if candidate_flows[idx] != alternate_flows[idx]:
assert idx > 0
candidate_flows = candidate_flows[:idx]
break
# Take the longest of the common ancestors.
shared_flow = candidate_flows[-1]
# But when the aggregate is over a complement, ignore any shared
# filter and take the axis flow instead; that's because in this case,
# applying filters does not provide any performance benefits, but
# prevents the units from being embedded into the quotient frame.
if isinstance(plural_flow, ComplementFlow):
shared_flow = plural_flow
# Move non-shared filters from the operand flow to the operand, i.e.,
# unit(plural_flow{op}?filter) => unit(plural_flow{if(filter,op)})
# Rewritten operands.
codes = []
# Non-shared filters, to be `OR`-ed and applied to the shared flow.
filters = []
# Iterate over the given aggregates.
for unit in units:
# The original operand of the aggregate.
code = unit.code
# A list of all non-shared filters on the unit operand flow.
code_filters = []
unit_flow = unit.plural_flow
while unit_flow != shared_flow:
code_filters.append(unit_flow.filter)
unit_flow = unit_flow.base
# If there are any filters, we need to rewrite the operand.
if code_filters:
# Merge all filters using `AND`.
if len(code_filters) > 1:
code_filter = FormulaCode(AndSig(),
coerce(BooleanDomain()),
unit.flow.binding,
ops=code_filters)
else:
[code_filter] = code_filters
# Add the filter to the list of all non-shared filters.
filters.append(code_filter)
# Rewrite the operand:
# op => if(filter,op)
# FIXME: we assume `code` is a formula with an aggregate
# signature, and that the aggregate ignores `NULL` values;
# need a way to check this and abort if it's not true.
op = code.op
op = FormulaCode(IfSig(), op.domain, op.binding,
predicates=[code_filter],
consequents=[op],
alternative=None)
code = code.clone(op=op)
# Add the (possibly) rewritten operand to the list.
codes.append(code)
# Check if we can apply the non-shared filters to the shared
# flow. Technically, it is not necessary, but may improve
# performance in some cases. So we can do it only if every
# aggregate has some filter applied on top of the shared flow.
# Also, we don't apply the filters on top of a complement flow
# as it cannot improve the performace.
if (not isinstance(shared_flow, ComplementFlow) and
all(unit.plural_flow != shared_flow for unit in units)):
if len(filters) > 1:
filter = FormulaCode(OrSig(), coerce(BooleanDomain()),
shared_flow.binding, ops=filters)
else:
[filter] = filters
shared_flow = FilteredFlow(shared_flow, filter,
shared_flow.binding)
# Now that the content of new units is generated, recombine
# it against a blank state.
substate = self.state.spawn()
substate.collect(base_flow)
substate.collect(shared_flow)
for code in codes:
substate.collect(code)
substate.recombine()
base_flow = substate.replace(base_flow)
shared_flow = substate.replace(shared_flow)
codes = [substate.replace(code) for code in codes]
# Iterate over original units generating replacements.
for idx, unit in enumerate(units):
# The new unit expression and companions.
code = codes[idx]
# Generate and memorize the replacement.
companions = codes[:idx]+codes[idx+1:]
batch = unit.clone(code=code, plural_flow=shared_flow,
flow=base_flow, companions=companions)
self.state.memorize(unit, batch)
# The case when the aggregates could be embedded into the quotient
# frame.
if is_quotient:
base_flow = base_flow.clone(companions=codes)
self.state.memorize(flow, base_flow)
class RewriteBase(Adapter):
"""
Applies the rewriting process to the given node.
This is a base class for all rewriting adapters, it encapsulates
common attributes and methods shared by all its subclasses.
Most rewriting adapters have the following signature:
Rewrite: (Expression, RewritingState) -> Expression
The adapters are polymorphic on the first argument.
`expression` (:class:`htsql.core.tr.flow.Expression`)
The expression node to rewrite.
`state` (:class:`RewritingState`)
The current state of rewriting process.
"""
adapt(Expression)
def __init__(self, expression, state):
assert isinstance(expression, Expression)
assert isinstance(state, RewritingState)
self.expression = expression
self.state = state
def __call__(self):
# Must not be reachable.
raise NotImplementedError("the rewrite adapter is not implemented"
" for a %r node" % self.expression)
class Rewrite(RewriteBase):
"""
Rewrites the given expression node.
Returns an expression node semantically equivalent to the given node,
but optimized for compilation. May return the same node.
"""
class Unmask(RewriteBase):
"""
Unmasks an expression node.
Unmasking prunes non-axial flow nodes that are already enforced
by the current mask flow.
"""
class Collect(RewriteBase):
"""
Collects scalar and aggregate units in the given expression node.
"""
class Replace(RewriteBase):
"""
Replaces the given expression with a recombined copy.
"""
class RewriteQuery(Rewrite):
adapt(QueryExpr)
def __call__(self):
# Initialize the rewriting state.
self.state.set_root(RootFlow(None, self.expression.binding))
# Rewrite the segment, if present.
segment = None
if self.expression.segment is not None:
segment = self.expression.segment
# Rewrite: simplify expressions matching certain patterns.
segment = self.state.rewrite(segment)
# Unmask: eliminate redundant non-axial flow operations.
segment = self.state.unmask(segment)
# Collect: gather scalar and aggregate units.
self.state.collect(segment)
# Recombine: attach compilation hints to the collected units.
self.state.recombine()
# Replace: replace units with recombined copies.
segment = self.state.replace(segment)
# Clear the state.
self.state.flush()
# Clone the query node with a rewritten segment.
return self.expression.clone(segment=segment)
class RewriteSegment(Rewrite):
adapt(SegmentCode)
def __call__(self):
# Rewrite the output flow and output record.
root = self.state.rewrite(self.expression.root)
flow = self.state.rewrite(self.expression.flow)
code = self.state.rewrite(self.expression.code)
return self.expression.clone(root=root, flow=flow, code=code)
class UnmaskSegment(Unmask):
adapt(SegmentCode)
def __call__(self):
# Unmask the output record against the output flow.
code = self.state.unmask(self.expression.code,
mask=self.expression.flow)
# Unmask the flow itself.
flow = self.state.unmask(self.expression.flow,
mask=self.expression.root)
root = self.state.unmask(self.expression.root)
# Produce a clone of the segment with new flow and output columns.
return self.expression.clone(root=root, flow=flow, code=code)
class CollectSegment(Collect):
adapt(SegmentCode)
def __call__(self):
pass
class ReplaceSegment(Replace):
adapt(SegmentCode)
def __call__(self):
# Recombine the content of the segment against a blank state.
substate = self.state.spawn()
substate.collect(self.expression.root)
substate.collect(self.expression.flow)
substate.collect(self.expression.code)
substate.recombine()
root = substate.replace(self.expression.root)
flow = substate.replace(self.expression.flow)
code = substate.replace(self.expression.code)
return self.expression.clone(root=root, flow=flow, code=code)
class RewriteFlow(Rewrite):
adapt(Flow)
def __init__(self, flow, state):
# Overriden to replace the attribute.
super(RewriteFlow, self).__init__(flow, state)
self.flow = flow
def __call__(self):
# No-op for the root flow.
if self.flow.base is None:
return self.flow
# Otherwise, apply the adapter to the parent flow.
base = self.state.rewrite(self.flow.base)
return self.flow.clone(base=base)
class UnmaskFlow(Unmask):
adapt(Flow)
def __init__(self, flow, state):
# Overriden to rename the attribute.
super(UnmaskFlow, self).__init__(flow, state)
self.flow = flow
def __call__(self):
# No-op for the root flow.
if self.flow.base is None:
return self.flow
# Apply the adapter to the parent flow.
base = self.state.unmask(self.flow.base)
return self.flow.clone(base=base)
class CollectFlow(Collect):
adapt(Flow)
def __init__(self, flow, state):
# Overriden to rename the attribute.
super(CollectFlow, self).__init__(flow, state)
self.flow = flow
def __call__(self):
# No-op for the root flow.
if self.flow.base is None:
return
# Apply the adapter to the parent flow.
self.state.collect(self.flow.base)
class ReplaceFlow(Replace):
adapt(Flow)
def __init__(self, flow, state):
# Overriden to rename the attribute.
super(ReplaceFlow, self).__init__(flow, state)
self.flow = flow
def __call__(self):
# No-op for the root flow.
if self.flow.base is None:
return self.flow
# Otherwise, replace the parent flow.
base = self.state.replace(self.flow.base)
return self.flow.clone(base=base)
class RewriteQuotient(RewriteFlow):
adapt(QuotientFlow)
def __call__(self):
# Apply the adapter to all sub-nodes.
base = self.state.rewrite(self.flow.base)
seed = self.state.rewrite(self.flow.family.seed)
kernels = [self.state.rewrite(code)
for code in self.flow.family.kernels]
return self.flow.clone(base=base, seed=seed, kernels=kernels)
class UnmaskQuotient(UnmaskFlow):
adapt(QuotientFlow)
def __call__(self):
# Unmask the kernel against the seed flow.
kernels = [self.state.unmask(code, mask=self.flow.family.seed)
for code in self.flow.family.kernels]
# Verify that the kernel is not scalar. We can't do it earlier
# because since unmasking may remove fantom units.
if all(not code.units for code in kernels):
raise Error("Found an empty or constant kernel")
# Unmask the seed against the quotient parent flow.
seed = self.state.unmask(self.flow.family.seed, mask=self.flow.base)
# Unmask the parent flow against the current mask.
base = self.state.unmask(self.flow.base)
return self.flow.clone(base=base, seed=seed, kernels=kernels)
class ReplaceQuotient(ReplaceFlow):
adapt(QuotientFlow)
def __call__(self):
# Replace the parent flow.
base = self.state.replace(self.flow.base)
# Create a new empty state.
substate = self.state.spawn()
# Collect/recombine/replace units in the seed and kernel expressions
# against a fresh state.
substate.collect(self.flow.seed)
for code in self.flow.kernels:
substate.collect(code)
substate.recombine()
seed = substate.replace(self.flow.seed)
kernels = [substate.replace(code)
for code in self.flow.kernels]
# Produce a recombined node.
return self.flow.clone(base=base, seed=seed, kernels=kernels)
class RewriteMoniker(RewriteFlow):
adapt_many(MonikerFlow,
ClippedFlow)
def __call__(self):
# Apply the adapter to all child nodes.
base = self.state.rewrite(self.flow.base)
seed = self.state.rewrite(self.flow.seed)
return self.flow.clone(base=base, seed=seed)
class UnmaskMoniker(UnmaskFlow):
adapt_many(MonikerFlow,
ClippedFlow)
def __call__(self):
# Unmask the seed flow against the parent flow.
seed = self.state.unmask(self.flow.seed, mask=self.flow.base)
# Unmask the parent flow against the current mask.
base = self.state.unmask(self.flow.base)
return self.flow.clone(base=base, seed=seed)
class ReplaceMoniker(Replace):
adapt_many(MonikerFlow,
ClippedFlow)
def __call__(self):
# Replace the parent flow.
base = self.state.replace(self.flow.base)
# Recombine the seed flow against a fresh state.
substate = self.state.spawn()
substate.collect(self.flow.seed)
substate.recombine()
seed = substate.replace(self.flow.seed)
# Produce a recombined flow node.
return self.flow.clone(base=base, seed=seed)
class RewriteForked(RewriteFlow):
adapt(ForkedFlow)
def __call__(self):
# Apply the adapter to all child nodes.
base = self.state.rewrite(self.flow.base)
seed = self.state.rewrite(self.flow.seed)
kernels = [self.state.rewrite(code)
for code in self.flow.kernels]
return self.flow.clone(base=base, seed=seed, kernels=kernels)
class UnmaskForked(UnmaskFlow):
adapt(ForkedFlow)
def __call__(self):
# Prune all but trailing non-axial operations from the seed flow.
seed = self.state.unmask(self.flow.seed, mask=self.flow.ground)
# Unmask the kernel against the parent flow.
kernels = [self.state.unmask(code, mask=self.flow.base)
for code in self.flow.kernels]
# Unmask the parent flow.
base = self.state.unmask(self.flow.base)
return self.flow.clone(base=base, seed=seed, kernels=kernels)
class CollectForked(Collect):
adapt(ForkedFlow)
def __call__(self):
# Collect units in the parent flow.
self.state.collect(self.flow.base)
# Ignore the seed flow as it is a duplicate of the parent flow,
# but process through the kernel.
# FIXME: do we need to process the kernel?
for code in self.flow.kernels:
self.state.collect(code)
class ReplaceForked(Replace):
adapt(ForkedFlow)
def __call__(self):
# Replace the parent flow.
base = self.state.replace(self.flow.base)
# Replace the kernel.
# FIXME: where to replace the kernel? Perhaps store two copies
# of the kernel?
kernels = [self.state.replace(code) for code in self.flow.kernels]
# Recombine the seed flow against a fresh state.
substate = self.state.spawn()
substate.collect(self.flow.seed)
substate.recombine()
seed = substate.replace(self.flow.seed)
# Produce a recombined node.
return self.flow.clone(base=base, seed=seed, kernels=kernels)
class RewriteAttach(RewriteFlow):
adapt(AttachFlow)
def __call__(self):
# Rewrite the child nodes.
base = self.state.rewrite(self.flow.base)
seed = self.state.rewrite(self.flow.seed)
images = [(self.state.rewrite(lcode), self.state.rewrite(rcode))
for lcode, rcode in self.flow.images]
filter = self.flow.filter
if filter is not None:
filter = self.state.rewrite(filter)
if (isinstance(filter, LiteralCode) and
isinstance(filter.domain, BooleanDomain) and
filter.value is True):
filter = None
predicates = []
all_images = images
images = []
for lcode, rcode in all_images:
if not lcode.units:
code = FormulaCode(IsEqualSig(+1), BooleanDomain(),
self.flow.binding, lop=rcode, rop=lcode)
predicates.append(code)
else:
images.append((lcode, rcode))
if filter is not None:
if isformula(filter, AndSig):
ops = filter.ops
else:
ops = [filter]
for op in ops:
if (isformula(op, IsEqualSig) and
op.signature.polarity == +1):
if (op.lop.units and
all(self.flow.base.spans(unit.flow)
for unit in op.lop.units) and
any(not self.flow.base.spans(unit.flow)
for unit in op.rop.units)):
images.append((op.lop, op.rop))
continue
if (op.rop.units and
all(self.flow.base.spans(unit.flow)
for unit in op.rop.units) and
any(not self.flow.base.spans(unit.flow)
for unit in op.lop.units)):
images.append((op.rop, op.lop))
continue
predicates.append(op)
if len(predicates) == 0:
filter = None
elif len(predicates) == 1:
[filter] = predicates
else:
filter = FormulaCode(AndSig(), BooleanDomain(),
self.flow.binding, ops=predicates)
return self.flow.clone(base=base, seed=seed, images=images,
filter=filter)
class UnmaskAttach(UnmaskFlow):
adapt(AttachFlow)
def __call__(self):
# Unmask the parent flow.
base = self.state.unmask(self.flow.base)
# Unmask the seed flow against the parent.
seed = self.state.unmask(self.flow.seed, mask=self.flow.base)
# Unmask the parent image against the parent flow and the seed
# image against the seed flow.
images = [(self.state.unmask(lcode, mask=self.flow.base),
self.state.unmask(rcode, mask=self.flow.seed))
for lcode, rcode in self.flow.images]
filter = None
if self.flow.filter is not None:
filter = self.state.unmask(self.flow.filter, mask=self.flow.seed)
return self.flow.clone(base=base, seed=seed, images=images,
filter=filter)
class CollectAttach(Collect):
adapt(AttachFlow)
def __call__(self):
# Gather units in the parent flow and the parent images.
self.state.collect(self.flow.base)
for lcode, rcode in self.flow.images:
self.state.collect(lcode)
class ReplaceAttach(Replace):
adapt(AttachFlow)
def __call__(self):
# Replace the parent flow and parental images.
base = self.state.replace(self.flow.base)
images = [(self.state.replace(lcode), rcode)
for lcode, rcode in self.flow.images]
# Recombine the seed flow and seed images against a fresh state.
substate = self.state.spawn()
substate.collect(self.flow.seed)
for lcode, rcode in images:
substate.collect(rcode)
substate.recombine()
seed = substate.replace(self.flow.seed)
images = [(lcode, substate.replace(rcode))
for lcode, rcode in images]
return self.flow.clone(base=base, seed=seed, images=images)
class RewriteFiltered(RewriteFlow):
adapt(FilteredFlow)
def __call__(self):
# Rewrite the parent flow and the filter expression.
base = self.state.rewrite(self.flow.base)
filter = self.state.rewrite(self.flow.filter)
# Eliminate a `?true` filter.
if (isinstance(filter, LiteralCode) and
isinstance(filter.domain, BooleanDomain) and
filter.value is True):
return base
return self.flow.clone(base=base, filter=filter)
class UnmaskFiltered(UnmaskFlow):
adapt(FilteredFlow)
def __call__(self):
# If the filter is already enforced by the mask,
# remove the filter, return an unmasked parent flow.
if (self.flow.prune(self.state.mask)
== self.flow.base.prune(self.state.mask)):
return self.state.unmask(self.flow.base)
# Choose the mask to use for unmasking the filter. Use the parent
# flow unless it dominates the current mask (that is, the mask
# contains all non-axial operations of the parent flow),
if self.flow.base.dominates(self.state.mask):
filter = self.state.unmask(self.flow.filter)
else:
filter = self.state.unmask(self.flow.filter,
mask=self.flow.base)
# Unmask the parent flow against the current mask.
base = self.state.unmask(self.flow.base)
return self.flow.clone(base=base, filter=filter)
class CollectFiltered(Collect):
adapt(FilteredFlow)
def __call__(self):
# Collect units in all child nodes.
self.state.collect(self.flow.base)
self.state.collect(self.flow.filter)
class ReplaceFiltered(Replace):
adapt(FilteredFlow)
def __call__(self):
# Replace all child nodes.
base = self.state.replace(self.flow.base)
filter = self.state.replace(self.flow.filter)
return self.flow.clone(base=base, filter=filter)
class RewriteOrdered(RewriteFlow):
adapt(OrderedFlow)
def __call__(self):
# Rewrite child nodes.
base = self.state.rewrite(self.flow.base)
order = [(self.state.rewrite(code), direction)
for code, direction in self.flow.order]
return self.flow.clone(base=base, order=order)
class UnmaskOrdered(UnmaskFlow):
adapt(OrderedFlow)
def __call__(self):
# If the ordering operation is already enforced by the mask,
# return the parent flow.
if (self.flow.prune(self.state.mask)
== self.flow.base.prune(self.state.mask)):
return self.state.unmask(self.flow.base)
# Choose a mask for order expressions; use the parent flow
# unless it dominates the current mask, in which case use
# the current mask.
if self.flow.base.dominates(self.state.mask):
order = [(self.state.unmask(code), direction)
for code, direction in self.flow.order]
else:
order = [(self.state.unmask(code, mask=self.flow.base),
direction)
for code, direction in self.flow.order]
# Unmask the parent flow, but only if `limit` and `offset` are
# not specified.
if self.flow.is_expanding:
base = self.state.unmask(self.flow.base)
else:
base = self.state.unmask(self.flow.base, mask=self.state.root)
return self.flow.clone(base=base, order=order)
class CollectOrdered(Collect):
adapt(OrderedFlow)
def __call__(self):
# Collect units in all child nodes.
self.state.collect(self.flow.base)
for code, direction in self.flow.order:
self.state.collect(code)
class ReplaceOrdered(Replace):
adapt(OrderedFlow)
def __call__(self):
# Replace units in all child nodes.
base = self.state.replace(self.flow.base)
order = [(self.state.replace(code), direction)
for code, direction in self.flow.order]
return self.flow.clone(base=base, order=order)
class RewriteCode(Rewrite):
adapt(Code)
def __init__(self, code, state):
# Override to change the attribute name.
super(RewriteCode, self).__init__(code, state)
self.code = code
def __call__(self):
# The default implementation is no-op; override in subclasses
# if necessary.
return self.code
class UnmaskCode(Unmask):
adapt(Code)
def __init__(self, code, state):
# Override to change the attribute name.
super(UnmaskCode, self).__init__(code, state)
self.code = code
def __call__(self):
# The default implementation is no-op; override in subclasses
# if necessary.
return self.code
class CollectCode(Collect):
adapt(Code)
def __init__(self, code, state):
# Override to change the attribute name.
super(CollectCode, self).__init__(code, state)
self.code = code
def __call__(self):
# Collect all units in the node.
for unit in self.code.units:
self.state.collect(unit)
class ReplaceCode(Replace):
adapt(Code)
def __init__(self, code, state):
# Override to change the attribute name.
super(ReplaceCode, self).__init__(code, state)
self.code = code
def __call__(self):
# The default implementation is no-op; should be changed
# in subclasses.
return self.code
class RewriteCast(RewriteCode):
adapt(CastCode)
def __call__(self):
# Rewrite the operand of the cast.
base = self.state.rewrite(self.code.base)
return self.code.clone(base=base)
class UnmaskCast(UnmaskCode):
adapt(CastCode)
def __call__(self):
# Unmask the operand of the cast.
base = self.state.unmask(self.code.base)
return self.code.clone(base=base)
class ReplaceCast(ReplaceCode):
adapt(CastCode)
def __call__(self):
# Replace units in the operand of the cast.
base = self.state.replace(self.code.base)
return self.code.clone(base=base)
class RewriteFormula(RewriteCode):
adapt(FormulaCode)
def __call__(self):
# Delegate to an auxiliary adapter dispatched by the formula signature.
return RewriteBySignature.__invoke__(self.code, self.state)
class UnmaskFormula(UnmaskCode):
adapt(FormulaCode)
def __call__(self):
# Unmask formula arguments.
arguments = self.code.arguments.map(self.state.unmask)
return FormulaCode(self.code.signature, self.code.domain,
self.code.binding, **arguments)
class ReplaceFormula(ReplaceCode):
adapt(FormulaCode)
def __call__(self):
# Replace units in the formula arguments.
arguments = self.code.arguments.map(self.state.replace)
return FormulaCode(self.code.signature, self.code.domain,
self.code.binding, **arguments)
class RewriteBySignature(Adapter):
"""
Rewrites a formula node.
This is an auxiliary interface used by :class:`Rewrite` adapter.
It is polymorphic on the signature of the formula.
`code` (:class:`htsql.core.tr.flow.FormulaCode`)
The formula node to rewrite.
`state` (:class:`RewritingState`)
The current state of rewrite process.
"""
adapt(Signature)
@classmethod
def __dispatch__(interface, code, *args, **kwds):
# Extract the dispatch key from the arguments.
assert isinstance(code, FormulaCode)
return (type(code.signature),)
def __init__(self, code, state):
assert isinstance(code, FormulaCode)
assert isinstance(state, RewritingState)
self.code = code
self.state = state
# Extract commonly used attributes of the formula node.
self.signature = code.signature
self.domain = code.domain
self.arguments = code.arguments
def __call__(self):
# The default implementation rewrites the arguments of the formula.
# Override in subclasses to provide specific optimizations.
arguments = self.arguments.map(self.state.rewrite)
return FormulaCode(self.signature, self.domain,
self.code.binding, **arguments)
class RewriteRecord(Rewrite):
adapt_many(RecordCode,
IdentityCode)
def __call__(self):
fields = [self.state.rewrite(field)
for field in self.code.fields]
return self.code.clone(fields=fields)
class UnmaskRecord(Unmask):
adapt_many(RecordCode,
IdentityCode)
def __call__(self):
fields = [self.state.unmask(field)
for field in self.code.fields]
return self.code.clone(fields=fields)
class CollectRecord(Collect):
adapt_many(RecordCode,
IdentityCode)
def __call__(self):
for field in self.code.fields:
self.state.collect(field)
class ReplaceRecord(Replace):
adapt_many(RecordCode,
IdentityCode)
def __call__(self):
fields = [self.state.replace(field)
for field in self.code.fields]
return self.code.clone(fields=fields)
class RewriteAnnihilator(Rewrite):
adapt(AnnihilatorCode)
def __call__(self):
code = self.state.rewrite(self.code.code)
indicator = self.state.rewrite(self.code.indicator)
return self.code.clone(code=code, indicator=indicator)
class UnmaskAnnihilator(Unmask):
adapt(AnnihilatorCode)
def __call__(self):
code = self.state.unmask(self.code.code)
indicator = self.state.unmask(self.code.indicator)
if not isinstance(indicator, Unit):
return code
return self.code.clone(code=code, indicator=indicator)
class CollectAnnihilator(Collect):
adapt(AnnihilatorCode)
def __call__(self):
self.state.collect(self.code.code)
self.state.collect(self.code.indicator)
class ReplaceAnnihilator(Replace):
adapt(AnnihilatorCode)
def __call__(self):
code = self.state.replace(self.code.code)
indicator = self.state.replace(self.code.indicator)
return self.code.clone(code=code, indicator=indicator)
class RewriteUnit(RewriteCode):
adapt(Unit)
def __init__(self, unit, state):
# Overriden to rename the attribute.
super(RewriteUnit, self).__init__(unit, state)
self.unit = unit
def __call__(self):
# Apply the adapter to child nodes.
flow = self.state.rewrite(self.unit.flow)
return self.unit.clone(flow=flow)
class UnmaskUnit(UnmaskCode):
adapt(Unit)
def __init__(self, unit, state):
# Overriden to rename the attribute.
super(UnmaskUnit, self).__init__(unit, state)
self.unit = unit
def __call__(self):
# Apply the adapter to child nodes.
flow = self.state.unmask(self.unit.flow)
return self.unit.clone(flow=flow)
class CollectUnit(CollectCode):
adapt(Unit)
def __init__(self, unit, state):
# Overriden to rename the attribute.
super(CollectUnit, self).__init__(unit, state)
self.unit = unit
def __call__(self):
# Add the unit to the collection. Note that we do not
# go to the child nodes of the unit, it is done against
# a blank rewriting state in the `Replace` implementation.
self.state.collection.append(self.unit)
class ReplaceUnit(ReplaceCode):
adapt(Unit)
def __init__(self, unit, state):
# Overriden to rename the attribute.
super(ReplaceUnit, self).__init__(unit, state)
self.unit = unit
def __call__(self):
# Recombine the content of the unit node against a blank state.
substate = self.state.spawn()
substate.collect(self.unit.flow)
substate.recombine()
flow = substate.replace(self.unit.flow)
return self.unit.clone(flow=flow)
class UnmaskColumn(UnmaskUnit):
adapt(ColumnUnit)
def __call__(self):
flow = self.state.unmask(self.unit.flow)
column = self.unit.column
while (isinstance(flow, FiberTableFlow) and flow.join.is_direct and
flow.is_expanding and flow.is_contracting):
for origin_column, target_column in zip(flow.join.origin_columns,
flow.join.target_columns):
if column is target_column:
flow = flow.base
column = origin_column
break
else:
break
return self.unit.clone(flow=flow, column=column)
class RewriteCompound(RewriteUnit):
adapt(CompoundUnit)
def __call__(self):
# Rewrite the content of the node.
code = self.state.rewrite(self.unit.code)
flow = self.state.rewrite(self.unit.flow)
return self.unit.clone(code=code, flow=flow)
class ReplaceCompound(ReplaceUnit):
adapt(CompoundUnit)
def __call__(self):
# Recombine the content of the unit node against a blank state.
substate = self.state.spawn()
substate.collect(self.unit.code)
substate.collect(self.unit.flow)
substate.recombine()
code = substate.replace(self.unit.code)
flow = substate.replace(self.unit.flow)
return self.unit.clone(code=code, flow=flow)
class UnmaskScalar(UnmaskUnit):
adapt(ScalarUnit)
def __call__(self):
# The unit is redundant if the mask is dominated by the unit flow.
if self.unit.flow.dominates(self.state.mask):
code = self.state.unmask(self.unit.code)
return code
# It is also redundant if the operand is a unit under the same
# or a dominated flow.
if (isinstance(self.unit.code, Unit) and
self.unit.flow.dominates(self.unit.code.flow)):
code = self.state.unmask(self.unit.code)
return code
# Unmask the unit expression against the unit flow.
code = self.state.unmask(self.unit.code, mask=self.unit.flow)
# Unmask the unit flow against the current mask.
flow = self.state.unmask(self.unit.flow)
return self.unit.clone(code=code, flow=flow)
class RewriteAggregate(RewriteUnit):
adapt(AggregateUnitBase)
def __call__(self):
# Rewrite the content of the node.
code = self.state.rewrite(self.unit.code)
plural_flow = self.state.rewrite(self.unit.plural_flow)
flow = self.state.rewrite(self.unit.flow)
return self.unit.clone(code=code, plural_flow=plural_flow, flow=flow)
class UnmaskAggregate(UnmaskUnit):
adapt(AggregateUnitBase)
def __call__(self):
# Unmask the argument against the plural flow.
code = self.state.unmask(self.unit.code, mask=self.unit.plural_flow)
# Unmask the plural flow against the unit flow unless it dominates
# the current mask.
if self.unit.flow.dominates(self.state.mask):
plural_flow = self.state.unmask(self.unit.plural_flow)
else:
plural_flow = self.state.unmask(self.unit.plural_flow,
mask=self.unit.flow)
# Unmask the unit flow against the current mask.
flow = self.state.unmask(self.unit.flow)
return self.unit.clone(code=code, plural_flow=plural_flow, flow=flow)
class ReplaceAggregate(ReplaceUnit):
adapt(AggregateUnitBase)
def __call__(self):
# Recombine the content of the unit node against a blank state.
substate = self.state.spawn()
substate.collect(self.unit.code)
substate.collect(self.unit.plural_flow)
substate.collect(self.unit.flow)
substate.recombine()
code = substate.replace(self.unit.code)
plural_flow = substate.replace(self.unit.plural_flow)
flow = substate.replace(self.unit.flow)
return self.unit.clone(code=code, plural_flow=plural_flow, flow=flow)
class RewriteKernel(RewriteUnit):
adapt(KernelUnit)
def __call__(self):
# At this stage, the kernel code is an element of the family kernel.
assert self.unit.code in self.unit.flow.family.kernels
index = self.unit.flow.family.kernels.index(self.unit.code)
# Rewrite the quotient flow.
flow = self.state.rewrite(self.unit.flow)
# Get the new kernel code.
code = flow.family.kernels[index]
return self.unit.clone(code=code, flow=flow)
class UnmaskKernel(UnmaskUnit):
adapt(KernelUnit)
def __call__(self):
# At this stage, the kernel code is an element of the family kernel.
assert self.unit.code in self.unit.flow.family.kernels
index = self.unit.flow.family.kernels.index(self.unit.code)
# Unmask the quotient flow.
flow = self.state.unmask(self.unit.flow)
# Get the new kernel code.
code = flow.family.kernels[index]
return self.unit.clone(code=code, flow=flow)
class ReplaceKernel(ReplaceUnit):
adapt(KernelUnit)
def __call__(self):
# At this stage, the kernel code is an element of the family kernel.
assert self.unit.code in self.unit.flow.family.kernels
index = self.unit.flow.family.kernels.index(self.unit.code)
# Recombine the quotient flow.
substate = self.state.spawn()
substate.collect(self.unit.flow)
substate.recombine()
flow = substate.replace(self.unit.flow)
# Get the new kernel code.
code = flow.family.kernels[index]
return self.unit.clone(code=code, flow=flow)
class UnmaskCovering(UnmaskUnit):
# FIXME: not used?
adapt(CoveringUnit)
def __call__(self):
# The unit expression is evaluated against the seed flow
# of the unit flow, so use the seed as the mask.
code = self.state.unmask(self.unit.code,
mask=self.unit.flow.seed)
# Unmask the unit flow.
flow = self.state.unmask(self.unit.flow)
return self.unit.clone(code=code, flow=flow)
def rewrite(expression, state=None):
"""
Rewrites the given expression node.
Returns a clone of the given node optimized for compilation.
`expression` (:class:`htsql.core.tr.flow.Expression`)
The expression node to rewrite.
`state` (:class:`RewritingState` or ``None``)
The rewriting state to use. If not set, a new rewriting state
is created.
"""
# If a state is not provided, create a new one.
if state is None:
state = RewritingState()
# Apply the `Rewrite` adapter.
with translate_guard(expression):
return Rewrite.__invoke__(expression, state) | PypiClean |
/IdaiKuri-1.0.0.tar.gz/IdaiKuri-1.0.0/TESTS/Test_Parser.py | from IdaiKuri.Parser import ParserTemplateEngine as TemplateEngine
import pytest
##########################################################################################################
## Default Test cases using default Regex delimiters without custom function calls
##########################################################################################################
@pytest.fixture
def DefaultRegex1():
TE = TemplateEngine();
return TE;
def test_DefaultCase1(DefaultRegex1):
print("Test cases using "
"default Regex delimiters in templates "
"without custom function calls "
"for parsing file 1")
input_dict = {};
TE = DefaultRegex1;
result_dict = TE.Root("TemplateFiles/Template_DefaultCase1.html", "GeneratedFiles/GuidoVanRossum_DefaultCase1.html");
input_dict["inputPortrait"] = "images/GuidoVanRossum.png";
input_dict["Logo"] = "images/PythonLogo.png";
input_dict["FullName"] = "Guido Van Rossum";
input_dict["Position"] = "Python's Benevolent Dictator for life";
input_dict["Quote"] = "In Python, every symbol you type is essential.";
input_dict["Author"] = "Guido van Rossum";
for x in result_dict.keys():
for y in input_dict.keys():
if(y in x):
assert(result_dict[x] == input_dict[y]);
def test_DefaultCase2(DefaultRegex1):
print("Test cases using "
"default Regex delimiters in templates "
"without custom function calls "
"for parsing file 2")
input_dict = {};
TE = DefaultRegex1;
result_dict = TE.Root("TemplateFiles/Template_DefaultCase1.html", "GeneratedFiles/LinusTorvalds_DefaultCase1.html");
input_dict["Portrait"] = "images/LinusTorvalds.png";
input_dict["Logo"] = "images/LinuxLogo.png";
input_dict["FullName"] = "Linus Torvalds";
input_dict["Position"] = "Creator of Linux and Git";
input_dict["Quote"] = "The Linux philosophy is \n'laugh in the face of danger'. \n<br> Oops. Wrong one. \n<br>'Do it yourself'. \nThat's it.";
input_dict["Author"] = "Linus Torvalds";
for x in result_dict.keys():
for y in input_dict.keys():
if(y in x):
assert(result_dict[x] == input_dict[y]);
def test_DefaultCase3(DefaultRegex1):
print("Test cases using "
"default Regex delimiters in templates "
"with custom function calls "
"for parsing file 1")
input_dict = {};
TE = DefaultRegex1;
result_dict = TE.Root("TemplateFiles/Template_DefaultCase2.html", "GeneratedFiles/GuidoVanRossum_DefaultCase2.html");
input_dict["FullName"] = "Guido Van Rossum";
input_dict["Position"] = "Python's Benevolent Dictator for life";
input_dict["Quote"] = "In Python, every symbol you type is essential.";
input_dict["Contribution"] = "Python"
for x in result_dict.keys():
for y in input_dict.keys():
if(y in x):
if("get_portrait" in x): assert(result_dict[x] == "images/GuidoVanRossum.png");
elif("get_logo" in x): assert(result_dict[x] == "images/PythonLogo.png");
else:assert(result_dict[x] == input_dict[y]);
def test_DefaultCase4(DefaultRegex1):
print("Test cases using "
"default Regex delimiters in templates "
"with custom function calls "
"for parsing file 2")
input_dict = {};
TE = DefaultRegex1;
result_dict = TE.Root("TemplateFiles/Template_DefaultCase2.html", "GeneratedFiles/LinusTorvalds_DefaultCase2.html");
input_dict["FullName"] = "Linus Torvalds";
input_dict["Position"] = "Creator of Linux and Git";
input_dict["Quote"] = "The Linux philosophy is \n'laugh in the face of danger'. \n<br> Oops. Wrong one. \n<br>'Do it yourself'. \nThat's it.";
input_dict["Contribution"] = "Linux";
for x in result_dict.keys():
for y in input_dict.keys():
if(y in x):
if("get_portrait" in x): assert(result_dict[x] == "images/LinusTorvalds.png");
elif("get_logo" in x): assert(result_dict[x] == "images/LinuxLogo.png");
else:assert(result_dict[x] == input_dict[y]);
##########################################################################################################
## Custom Test cases using custom Regex delimiters with custom function calls
##########################################################################################################
@pytest.fixture
def SpecialRegex1():
TE = TemplateEngine(("_StArT_", "_eNd_"), "_StArT_self.FUNC_CALL()_eNd_");
return TE;
@pytest.fixture
def SpecialRegex2():
TE = TemplateEngine(("<<", ">>"), "<<self.FUNC_CALL()>>");
return TE;
@pytest.fixture
def SpecialRegex3():
TE = TemplateEngine(("\\[\\[", "\\]\\]"), "[[self.FUNC_CALL()]]");
return TE;
@pytest.fixture
def SpecialRegex4():
TE = TemplateEngine(("{\\[<{\\[", "}\\]>}\\]"), "{[<{[self.FUNC_CALL()}]>}]");
return TE;
def test_SpecialCase1(SpecialRegex1, SpecialRegex2, SpecialRegex3, SpecialRegex4):
print("Test cases using "
"custom Regex delimiters in templates "
"with custom function calls (commented/uncommented) "
"for parsing file 1")
for filex in ([1,2,3,4]):
if(filex==1):TE = SpecialRegex1;
if(filex==2):TE = SpecialRegex2;
if(filex==3):TE = SpecialRegex3;
if(filex==4):TE = SpecialRegex4;
input_dict = {};
result_dict = TE.Root(f"TemplateFiles/Template_SpecialCase{filex}.html", f"GeneratedFiles/GuidoVanRossum_SpecialCase{filex}.html");
input_dict["FullName"] = "Guido Van Rossum";
input_dict["Position"] = "Python's Benevolent Dictator for life";
input_dict["Quote"] = "In Python, every symbol you type is essential.";
input_dict["Contribution"] = "Python"
for x in result_dict.keys():
if("#" not in x):
for y in input_dict.keys():
if(y in x):
if("get_portrait" in x): assert(result_dict[x] == "images/GuidoVanRossum.png");
elif("get_logo" in x): assert(result_dict[x] == "images/PythonLogo.png");
else:assert(result_dict[x] == input_dict[y]);
else:
assert(result_dict[x] == "#Commented");
def test_SpecialCase2(SpecialRegex1, SpecialRegex2, SpecialRegex3, SpecialRegex4):
print("Test cases using "
"custom Regex delimiters in templates "
"with custom function calls (commented/uncommented) "
"for parsing file 2")
for filex in ([1,2,3,4]):
if(filex==1):TE = SpecialRegex1;
if(filex==2):TE = SpecialRegex2;
if(filex==3):TE = SpecialRegex3;
if(filex==4):TE = SpecialRegex4;
input_dict = {};
result_dict = TE.Root(f"TemplateFiles/Template_SpecialCase{filex}.html", f"GeneratedFiles/LinusTorvalds_SpecialCase{filex}.html");
input_dict["FullName"] = "Linus Torvalds";
input_dict["Position"] = "Creator of Linux and Git";
input_dict["Quote"] = "The Linux philosophy is \n'laugh in the face of danger'. \n<br> Oops. Wrong one. \n<br>'Do it yourself'. \nThat's it.";
input_dict["Contribution"] = "Linux";
for x in result_dict.keys():
if("#" not in x):
for y in input_dict.keys():
if(y in x):
if("get_portrait" in x): assert(result_dict[x] == "images/LinusTorvalds.png");
elif("get_logo" in x): assert(result_dict[x] == "images/LinuxLogo.png");
else:assert(result_dict[x] == input_dict[y]);
else:
assert(result_dict[x] == "#Commented"); | PypiClean |
/AIDnD_mvnpy-1.0.1.tar.gz/AIDnD_mvnpy-1.0.1/mvnpy/TestObjects.py | import os
import xml.etree.ElementTree as ET
import re
import javalang
class TestClass(object):
def __init__(self, file_path):
self._path = os.path.realpath(file_path)
self._module = self.find_module(self._path)
self._mvn_name = self.generate_mvn_name()
self._testcases = []
self._report = None
self._id = '#'.join([os.path.basename(self.module), self.mvn_name])
with open(self._path, 'r') as src_file:
try:
self._tree = javalang.parse.parse(src_file.read())
except UnicodeDecodeError as e:
raise TestParserException('Java file parsing problem:'+'\n'+str(e))
class_decls = [class_dec for _, class_dec in self.tree.filter(javalang.tree.ClassDeclaration)]
for class_decl in class_decls:
for method in class_decl.methods:
if self.is_valid_testcase(method):
self._testcases.append(TestCase(method, class_decl, self))
@property
def mvn_name(self):
return self._mvn_name
@property
def src_path(self):
return self._path
@property
def testcases(self):
return self._testcases
@property
def module(self):
return self._module
@property
def report(self):
return self._report
@report.setter
def report(self, report):
self._report = report
@property
def tree(self):
return self._tree
@property
def id(self):
return self._id
def parse_src_path(self):
ans = self.module
ans += '\\src\\test\\java'
packages = self.name.split('.')
for p in packages:
ans += '\\' + p
return ans + '.java'
def get_report_path(self):
return self.module + '\\target\\surefire-reports\\' + 'TEST-' + self.mvn_name + '.xml'
def attach_report_to_testcase(self, testcase):
try:
testcase.report = self.report.get_testcase_report(testcase.mvn_name)
except TestParserException as e:
self.report = None
raise e
# Looking for report, and if finds one, attach it to the self and al it's testcases
def look_for_report(self):
try:
self.report = TestClassReport(self.get_report_path(), self.module)
for t in self.testcases:
self.attach_report_to_testcase(t)
except TestParserException:
pass
def clear_report(self):
self.report = None
for t in self.testcases:
t.clear_report()
def find_module(self, file_path):
parent_dir = os.path.abspath(os.path.join(file_path, os.pardir))
is_root = False
while not is_root:
if os.path.isfile(parent_dir + '//pom.xml'):
return parent_dir
else:
tmp = os.path.abspath(os.path.join(parent_dir, os.pardir))
is_root = tmp == parent_dir
parent_dir = tmp
raise TestParserException(file_path + ' is not part of a maven module')
def is_valid_testcase(self, method):
return method.name.lower() != 'setup' and method.name.lower() != 'teardown' and\
len(method.parameters)==0 and method.return_type==None
def generate_mvn_name(self):
relpath = os.path.relpath(self.src_path, self.module + '\\src\\test\\java').replace('.java', '')
if relpath.startswith('..\\'):
relpath = relpath[3:]
return relpath.replace('\\', '.')
def __repr__(self):
return str(self.src_path)
def __eq__(self, other):
if not isinstance(other, TestClass):
return False
else:
return self.id == other.id
class TestCase(object):
def __init__(self, method, class_decl, parent):
self._parent = parent
self._method = method
self._mvn_name = self.parent.mvn_name + '#' + self.method.name
self.class_decl = class_decl
self._id = self.generate_id()
self._report = None
self._start_line = self.method.position[0]
self._end_line = self.find_end_line(self._start_line)
assert self._end_line != -1
@property
def mvn_name(self):
return self._mvn_name
@property
def src_path(self):
return self.parent.src_path
@property
def id(self):
return self._id
@property
def module(self):
return self.parent.module
@property
def method(self):
return self._method
@property
def parent(self):
return self._parent
@property
def report(self):
return self._report
@report.setter
def report(self, report):
self._report = report
@property
def start_line(self):
return self._start_line
@property
def end_line(self):
return self._end_line
@property
def passed(self):
return self.report.passed
@property
def failed(self):
return self.report.failed
@property
def has_error(self):
return self.report.has_error
def clear_report(self):
self.report = None
def get_error(self):
return self.report.get_error()
def has_the_same_code_as(self, other):
if len(self.method.body)==len(other.method.body):
i=0
while i< len(self.method.body):
if not self.method.body[i] == other.method.body[i]:
return False
return True
else:
return False
def generate_id(self):
ret_type = str(self.method.return_type)
if len(self.method.parameters) == 0:
parameters = '()'
else:
parameters = '(' + self.method.parameters[0].type.name
if len(self.method.parameters) > 1:
param_iter = iter(self.method.parameters)
next(param_iter)
for param in param_iter:
parameters += ', ' + param.type.name
parameters += ')'
return self.parent.src_path + '#' + self.class_decl.name + '#' + ret_type + '_' + self.method.name + parameters
def get_lines_range(self):
lower_position = self.method.position[0]
for annotation in self.method.annotations:
if annotation.position[0] < lower_position:
lower_position = annotation.position[0]
return (lower_position, self.end_line)
def contains_line(self, line):
range = self.get_lines_range()
return range[0] <= line <= range[1]
def find_end_line(self, line_num):
brackets_stack = []
open_position = (-1, -1)
with open(self.src_path, 'r') as j_file:
lines = j_file.readlines()
i = 1
for line in lines:
if i < line_num:
i += 1
continue
j = 1
for letter in line:
if '{' == letter:
brackets_stack.append('{')
break
else:
j += 1
if len(brackets_stack) == 1:
open_position = (i, j)
break
i+=1
if open_position[0] == -1 or open_position[1] == -1:
return -1
i = 1
for line in lines:
if i < open_position[0]:
i += 1
continue
j = 1
for letter in line:
if i == open_position[0] and j <= open_position[1]:
j += 1
continue
if letter == '{':
brackets_stack.append('{')
if letter == '}':
brackets_stack.pop()
if len(brackets_stack) == 0:
return i
j += 1
i += 1
def __repr__(self):
return self.id
def __eq__(self, other):
if not isinstance(other, TestCase):
return False
else:
return self.id == other.id
class TestClassReport(object):
def __init__(self, xml_doc_path, modlue_path):
if not os.path.isfile(xml_doc_path):
raise TestParserException('No such report file :' + xml_doc_path)
self.xml_path = xml_doc_path
self.success_testcases = []
self.failed_testcases = []
self._testcases = []
self._time = 0.0
self.maven_multiModuleProjectDirectory = ''
self._module_path = modlue_path
tree = ET.parse(self.xml_path)
root = tree.getroot()
self._name = root.get('name')
self._src_file_path = self.parse_src_path()
for testcase in root.findall('testcase'):
m_test = TestCaseReport(testcase, self)
if m_test.passed:
self.success_testcases.append(m_test)
else:
self.failed_testcases.append(m_test)
self._testcases.append(m_test)
self._time += m_test.time
properties_root = root.find('properties')
properties = properties_root.findall('property')
for property in properties:
if property.get('name') == 'maven.multiModuleProjectDirectory':
self.maven_multiModuleProjectDirectory = property.get('value')
@property
def time(self):
return self._time
@property
def name(self):
return self._name
@property
def testcases(self):
return self._testcases
def passed(self):
return len(self.failed_testcases) == 0
@property
def module(self):
return self._module_path
@property
def src_path(self):
return self._src_file_path
# Returns true if the given test name is this test or it's one of its testcases
def is_associated(self, test):
if test == 'test' or test == 'TEST' or test == 'Test':
return False
if test in self.name:
return True
for testcase in self.testcases:
if test in testcase.name:
return True
return False
def __repr__(self):
return str(self.name)
def parse_src_path(self):
test_name = os.path.basename(self.xml_path).replace('TEST-', '').replace('.java', '').replace('.xml', '')
test_name = test_name.replace('.', '\\')
test_name += '.java'
return self.module + '\\src\\test\\java\\' + test_name
def get_testcase_report(self, testcase_mvn_name):
ans_singelton = list(filter(lambda t: testcase_mvn_name.endswith(t.name), self.testcases))
if not len(ans_singelton) == 1:
raise TestParserException(str(len(ans_singelton)) + ' possible testcases reports for ' + testcase_mvn_name)
return ans_singelton[0]
class TestCaseReport(object):
def __init__(self, testcase, parent):
self._parent = parent
self.testcase_tag = testcase
self._name = self.parent.name + '#'+self.testcase_tag.get('name')
self._time = float(re.sub('[,]', '', self.testcase_tag.get('time')))
self._passed = False
self._failed = False
self._has_error = False
failure = self.testcase_tag.find('failure')
if not failure is None:
self._failed = True
self.error = self.testcase_tag.find('error')
if not self.error is None:
self._has_error = True
self._passed = not self._failed and not self._has_error
@property
def time(self):
return self._time
@property
def name(self):
return self._name
@property
def passed(self):
return self._passed
@property
def failed(self):
return self._failed
@property
def has_error(self):
return self._has_error
@property
def src_path(self):
return self.parent.src_path
@property
def module(self):
return self.parent.module
@property
def parent(self):
return self._parent
def get_error(self):
return self.error.text
def __repr__(self):
return str(self.name)
class TestParserException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg) | PypiClean |
/Ciw-3.0.0.tar.gz/Ciw-3.0.0/README.rst | Ciw
===
A discrete event simulation library for queueing networks
---------------------------------------------------------
.. image:: https://github.com/CiwPython/Ciw/actions/workflows/tests.yml/badge.svg
:target: https://github.com/CiwPython/Ciw/actions/workflows/tests.yml
.. image:: https://img.shields.io/pypi/v/ciw.svg
:target: https://pypi.python.org/pypi/Ciw
.. image:: https://zenodo.org/badge/47995577.svg
:target: https://zenodo.org/badge/latestdoi/47995577
.. figure:: https://github.com/CiwPython/Ciw/blob/master/docs/_static/logo_small.png?raw=true
:width: 150px
:height: 150px
:scale: 100%
:align: center
Ciw is a discrete event simulation library for open queueing networks.
It’s core features include the capability to simulate networks of queues, multiple customer classes, and implementation of Type I blocking for restricted networks.
- `Read the documentation <https://ciw.readthedocs.io>`_
- `Contribution guidelines <https://github.com/CiwPython/Ciw/blob/master/CONTRIBUTING.rst>`_
- `Our great contributors <https://github.com/CiwPython/Ciw/blob/master/AUTHORS.rst>`_
Install with :code:`pip install ciw`.
Current supported version of Python:
- Python 3.7
- Python 3.8
- Python 3.9
- Python 3.10
- Python 3.11
Usage
-----
Import Ciw::
>>> import ciw
To define an M/M/3 queue, with λ = 0.2 and μ = 0.1::
>>> N = ciw.create_network(
... arrival_distributions=[ciw.dists.Exponential(rate=0.2)],
... service_distributions=[ciw.dists.Exponential(rate=0.1)],
... number_of_servers=[3]
... )
Now set a seed, create a Simulation object, and simulate for 1440 time units::
>>> ciw.seed(1)
>>> Q = ciw.Simulation(N)
>>> Q.simulate_until_max_time(1440)
Collect results::
>>> recs = Q.get_all_records()
Manipulate results to get useful statistics, e.g. average waiting time::
>>> waits = [r.waiting_time for r in recs]
>>> sum(waits) / len(waits)
4.2305...
Features
--------
A number of other features are also implemented, including:
+ `Type I blocking <https://ciw.readthedocs.io/en/latest/Tutorial-II/tutorial_vi.html>`_
+ `A large range of sampling distributions <https://ciw.readthedocs.io/en/latest/Reference/distributions.html>`_
+ `Phase-Type distributions <https://ciw.readthedocs.io/en/latest/Guides/phasetype.html>`_
+ `Time-dependent and state-dependent distributions <https://ciw.readthedocs.io/en/latest/Guides/time_dependent.html>`_
+ `Batch arrivals <https://ciw.readthedocs.io/en/latest/Guides/batching.html>`_
+ `Baulking customers <https://ciw.readthedocs.io/en/latest/Guides/baulking.html>`_
+ `Reneging customers <https://ciw.readthedocs.io/en/latest/Guides/reneging.html>`_
+ `Processor sharing <https://ciw.readthedocs.io/en/latest/Guides/processor-sharing.html>`_
+ `Multiple customer classes <https://ciw.readthedocs.io/en/latest/Tutorial-II/tutorial_vii.html>`_
+ `Priorities <https://ciw.readthedocs.io/en/latest/Guides/priority.html>`_
+ `Server priorities <https://ciw.readthedocs.io/en/latest/Guides/server_priority.html>`_
+ `Service disciplines <https://ciw.readthedocs.io/en/latest/Guides/service_disciplines.html>`_
+ `Customers changing classes <https://ciw.readthedocs.io/en/latest/Guides/dynamic_customerclasses.html>`_
+ `Server schedules <https://ciw.readthedocs.io/en/latest/Guides/server_schedule.html>`_
+ `State tracking <https://ciw.readthedocs.io/en/latest/Guides/state_trackers.html>`_
+ `Stopping the simulation after a certain amount of customers <https://ciw.readthedocs.io/en/latest/Guides/sim_numcusts.html>`_
+ `Process-based routing <https://ciw.readthedocs.io/en/latest/Guides/process_based.html>`_
+ `Deadlock detection <https://ciw.readthedocs.io/en/latest/Guides/deadlock.html>`_
| PypiClean |
/GB2260-v2-0.2.1.tar.gz/GB2260-v2-0.2.1/gb2260_v2/data/curated/revision_201607.py | from __future__ import unicode_literals
name = '201607'
division_schema = {
'110000': '北京市',
'110100': '市辖区',
'110101': '东城区',
'110102': '西城区',
'110105': '朝阳区',
'110106': '丰台区',
'110107': '石景山区',
'110108': '海淀区',
'110109': '门头沟区',
'110111': '房山区',
'110112': '通州区',
'110113': '顺义区',
'110114': '昌平区',
'110115': '大兴区',
'110116': '怀柔区',
'110117': '平谷区',
'110118': '密云区',
'110119': '延庆区',
'120000': '天津市',
'120100': '市辖区',
'120101': '和平区',
'120102': '河东区',
'120103': '河西区',
'120104': '南开区',
'120105': '河北区',
'120106': '红桥区',
'120110': '东丽区',
'120111': '西青区',
'120112': '津南区',
'120113': '北辰区',
'120114': '武清区',
'120115': '宝坻区',
'120116': '滨海新区',
'120117': '宁河区',
'120118': '静海区',
'120119': '蓟州区',
'130000': '河北省',
'130100': '石家庄市',
'130101': '市辖区',
'130102': '长安区',
'130104': '桥西区',
'130105': '新华区',
'130107': '井陉矿区',
'130108': '裕华区',
'130109': '藁城区',
'130110': '鹿泉区',
'130111': '栾城区',
'130121': '井陉县',
'130123': '正定县',
'130125': '行唐县',
'130126': '灵寿县',
'130127': '高邑县',
'130128': '深泽县',
'130129': '赞皇县',
'130130': '无极县',
'130131': '平山县',
'130132': '元氏县',
'130133': '赵县',
'130183': '晋州市',
'130184': '新乐市',
'130200': '唐山市',
'130201': '市辖区',
'130202': '路南区',
'130203': '路北区',
'130204': '古冶区',
'130205': '开平区',
'130207': '丰南区',
'130208': '丰润区',
'130209': '曹妃甸区',
'130223': '滦县',
'130224': '滦南县',
'130225': '乐亭县',
'130227': '迁西县',
'130229': '玉田县',
'130281': '遵化市',
'130283': '迁安市',
'130300': '秦皇岛市',
'130301': '市辖区',
'130302': '海港区',
'130303': '山海关区',
'130304': '北戴河区',
'130306': '抚宁区',
'130321': '青龙满族自治县',
'130322': '昌黎县',
'130324': '卢龙县',
'130400': '邯郸市',
'130401': '市辖区',
'130402': '邯山区',
'130403': '丛台区',
'130404': '复兴区',
'130406': '峰峰矿区',
'130421': '邯郸县',
'130423': '临漳县',
'130424': '成安县',
'130425': '大名县',
'130426': '涉县',
'130427': '磁县',
'130428': '肥乡县',
'130429': '永年县',
'130430': '邱县',
'130431': '鸡泽县',
'130432': '广平县',
'130433': '馆陶县',
'130434': '魏县',
'130435': '曲周县',
'130481': '武安市',
'130500': '邢台市',
'130501': '市辖区',
'130502': '桥东区',
'130503': '桥西区',
'130521': '邢台县',
'130522': '临城县',
'130523': '内丘县',
'130524': '柏乡县',
'130525': '隆尧县',
'130526': '任县',
'130527': '南和县',
'130528': '宁晋县',
'130529': '巨鹿县',
'130530': '新河县',
'130531': '广宗县',
'130532': '平乡县',
'130533': '威县',
'130534': '清河县',
'130535': '临西县',
'130581': '南宫市',
'130582': '沙河市',
'130600': '保定市',
'130601': '市辖区',
'130602': '竞秀区',
'130606': '莲池区',
'130607': '满城区',
'130608': '清苑区',
'130609': '徐水区',
'130623': '涞水县',
'130624': '阜平县',
'130626': '定兴县',
'130627': '唐县',
'130628': '高阳县',
'130629': '容城县',
'130630': '涞源县',
'130631': '望都县',
'130632': '安新县',
'130633': '易县',
'130634': '曲阳县',
'130635': '蠡县',
'130636': '顺平县',
'130637': '博野县',
'130638': '雄县',
'130681': '涿州市',
'130683': '安国市',
'130684': '高碑店市',
'130700': '张家口市',
'130701': '市辖区',
'130702': '桥东区',
'130703': '桥西区',
'130705': '宣化区',
'130706': '下花园区',
'130708': '万全区',
'130709': '崇礼区',
'130722': '张北县',
'130723': '康保县',
'130724': '沽源县',
'130725': '尚义县',
'130726': '蔚县',
'130727': '阳原县',
'130728': '怀安县',
'130730': '怀来县',
'130731': '涿鹿县',
'130732': '赤城县',
'130800': '承德市',
'130801': '市辖区',
'130802': '双桥区',
'130803': '双滦区',
'130804': '鹰手营子矿区',
'130821': '承德县',
'130822': '兴隆县',
'130823': '平泉县',
'130824': '滦平县',
'130825': '隆化县',
'130826': '丰宁满族自治县',
'130827': '宽城满族自治县',
'130828': '围场满族蒙古族自治县',
'130900': '沧州市',
'130901': '市辖区',
'130902': '新华区',
'130903': '运河区',
'130921': '沧县',
'130922': '青县',
'130923': '东光县',
'130924': '海兴县',
'130925': '盐山县',
'130926': '肃宁县',
'130927': '南皮县',
'130928': '吴桥县',
'130929': '献县',
'130930': '孟村回族自治县',
'130981': '泊头市',
'130982': '任丘市',
'130983': '黄骅市',
'130984': '河间市',
'131000': '廊坊市',
'131001': '市辖区',
'131002': '安次区',
'131003': '广阳区',
'131022': '固安县',
'131023': '永清县',
'131024': '香河县',
'131025': '大城县',
'131026': '文安县',
'131028': '大厂回族自治县',
'131081': '霸州市',
'131082': '三河市',
'131100': '衡水市',
'131101': '市辖区',
'131102': '桃城区',
'131103': '冀州区',
'131121': '枣强县',
'131122': '武邑县',
'131123': '武强县',
'131124': '饶阳县',
'131125': '安平县',
'131126': '故城县',
'131127': '景县',
'131128': '阜城县',
'131182': '深州市',
'139000': '省直辖县级行政区划',
'139001': '定州市',
'139002': '辛集市',
'140000': '山西省',
'140100': '太原市',
'140101': '市辖区',
'140105': '小店区',
'140106': '迎泽区',
'140107': '杏花岭区',
'140108': '尖草坪区',
'140109': '万柏林区',
'140110': '晋源区',
'140121': '清徐县',
'140122': '阳曲县',
'140123': '娄烦县',
'140181': '古交市',
'140200': '大同市',
'140201': '市辖区',
'140202': '城区',
'140203': '矿区',
'140211': '南郊区',
'140212': '新荣区',
'140221': '阳高县',
'140222': '天镇县',
'140223': '广灵县',
'140224': '灵丘县',
'140225': '浑源县',
'140226': '左云县',
'140227': '大同县',
'140300': '阳泉市',
'140301': '市辖区',
'140302': '城区',
'140303': '矿区',
'140311': '郊区',
'140321': '平定县',
'140322': '盂县',
'140400': '长治市',
'140401': '市辖区',
'140402': '城区',
'140411': '郊区',
'140421': '长治县',
'140423': '襄垣县',
'140424': '屯留县',
'140425': '平顺县',
'140426': '黎城县',
'140427': '壶关县',
'140428': '长子县',
'140429': '武乡县',
'140430': '沁县',
'140431': '沁源县',
'140481': '潞城市',
'140500': '晋城市',
'140501': '市辖区',
'140502': '城区',
'140521': '沁水县',
'140522': '阳城县',
'140524': '陵川县',
'140525': '泽州县',
'140581': '高平市',
'140600': '朔州市',
'140601': '市辖区',
'140602': '朔城区',
'140603': '平鲁区',
'140621': '山阴县',
'140622': '应县',
'140623': '右玉县',
'140624': '怀仁县',
'140700': '晋中市',
'140701': '市辖区',
'140702': '榆次区',
'140721': '榆社县',
'140722': '左权县',
'140723': '和顺县',
'140724': '昔阳县',
'140725': '寿阳县',
'140726': '太谷县',
'140727': '祁县',
'140728': '平遥县',
'140729': '灵石县',
'140781': '介休市',
'140800': '运城市',
'140801': '市辖区',
'140802': '盐湖区',
'140821': '临猗县',
'140822': '万荣县',
'140823': '闻喜县',
'140824': '稷山县',
'140825': '新绛县',
'140826': '绛县',
'140827': '垣曲县',
'140828': '夏县',
'140829': '平陆县',
'140830': '芮城县',
'140881': '永济市',
'140882': '河津市',
'140900': '忻州市',
'140901': '市辖区',
'140902': '忻府区',
'140921': '定襄县',
'140922': '五台县',
'140923': '代县',
'140924': '繁峙县',
'140925': '宁武县',
'140926': '静乐县',
'140927': '神池县',
'140928': '五寨县',
'140929': '岢岚县',
'140930': '河曲县',
'140931': '保德县',
'140932': '偏关县',
'140981': '原平市',
'141000': '临汾市',
'141001': '市辖区',
'141002': '尧都区',
'141021': '曲沃县',
'141022': '翼城县',
'141023': '襄汾县',
'141024': '洪洞县',
'141025': '古县',
'141026': '安泽县',
'141027': '浮山县',
'141028': '吉县',
'141029': '乡宁县',
'141030': '大宁县',
'141031': '隰县',
'141032': '永和县',
'141033': '蒲县',
'141034': '汾西县',
'141081': '侯马市',
'141082': '霍州市',
'141100': '吕梁市',
'141101': '市辖区',
'141102': '离石区',
'141121': '文水县',
'141122': '交城县',
'141123': '兴县',
'141124': '临县',
'141125': '柳林县',
'141126': '石楼县',
'141127': '岚县',
'141128': '方山县',
'141129': '中阳县',
'141130': '交口县',
'141181': '孝义市',
'141182': '汾阳市',
'150000': '内蒙古自治区',
'150100': '呼和浩特市',
'150101': '市辖区',
'150102': '新城区',
'150103': '回民区',
'150104': '玉泉区',
'150105': '赛罕区',
'150121': '土默特左旗',
'150122': '托克托县',
'150123': '和林格尔县',
'150124': '清水河县',
'150125': '武川县',
'150200': '包头市',
'150201': '市辖区',
'150202': '东河区',
'150203': '昆都仑区',
'150204': '青山区',
'150205': '石拐区',
'150206': '白云鄂博矿区',
'150207': '九原区',
'150221': '土默特右旗',
'150222': '固阳县',
'150223': '达尔罕茂明安联合旗',
'150300': '乌海市',
'150301': '市辖区',
'150302': '海勃湾区',
'150303': '海南区',
'150304': '乌达区',
'150400': '赤峰市',
'150401': '市辖区',
'150402': '红山区',
'150403': '元宝山区',
'150404': '松山区',
'150421': '阿鲁科尔沁旗',
'150422': '巴林左旗',
'150423': '巴林右旗',
'150424': '林西县',
'150425': '克什克腾旗',
'150426': '翁牛特旗',
'150428': '喀喇沁旗',
'150429': '宁城县',
'150430': '敖汉旗',
'150500': '通辽市',
'150501': '市辖区',
'150502': '科尔沁区',
'150521': '科尔沁左翼中旗',
'150522': '科尔沁左翼后旗',
'150523': '开鲁县',
'150524': '库伦旗',
'150525': '奈曼旗',
'150526': '扎鲁特旗',
'150581': '霍林郭勒市',
'150600': '鄂尔多斯市',
'150601': '市辖区',
'150602': '东胜区',
'150603': '康巴什区',
'150621': '达拉特旗',
'150622': '准格尔旗',
'150623': '鄂托克前旗',
'150624': '鄂托克旗',
'150625': '杭锦旗',
'150626': '乌审旗',
'150627': '伊金霍洛旗',
'150700': '呼伦贝尔市',
'150701': '市辖区',
'150702': '海拉尔区',
'150703': '扎赉诺尔区',
'150721': '阿荣旗',
'150722': '莫力达瓦达斡尔族自治旗',
'150723': '鄂伦春自治旗',
'150724': '鄂温克族自治旗',
'150725': '陈巴尔虎旗',
'150726': '新巴尔虎左旗',
'150727': '新巴尔虎右旗',
'150781': '满洲里市',
'150782': '牙克石市',
'150783': '扎兰屯市',
'150784': '额尔古纳市',
'150785': '根河市',
'150800': '巴彦淖尔市',
'150801': '市辖区',
'150802': '临河区',
'150821': '五原县',
'150822': '磴口县',
'150823': '乌拉特前旗',
'150824': '乌拉特中旗',
'150825': '乌拉特后旗',
'150826': '杭锦后旗',
'150900': '乌兰察布市',
'150901': '市辖区',
'150902': '集宁区',
'150921': '卓资县',
'150922': '化德县',
'150923': '商都县',
'150924': '兴和县',
'150925': '凉城县',
'150926': '察哈尔右翼前旗',
'150927': '察哈尔右翼中旗',
'150928': '察哈尔右翼后旗',
'150929': '四子王旗',
'150981': '丰镇市',
'152200': '兴安盟',
'152201': '乌兰浩特市',
'152202': '阿尔山市',
'152221': '科尔沁右翼前旗',
'152222': '科尔沁右翼中旗',
'152223': '扎赉特旗',
'152224': '突泉县',
'152500': '锡林郭勒盟',
'152501': '二连浩特市',
'152502': '锡林浩特市',
'152522': '阿巴嘎旗',
'152523': '苏尼特左旗',
'152524': '苏尼特右旗',
'152525': '东乌珠穆沁旗',
'152526': '西乌珠穆沁旗',
'152527': '太仆寺旗',
'152528': '镶黄旗',
'152529': '正镶白旗',
'152530': '正蓝旗',
'152531': '多伦县',
'152900': '阿拉善盟',
'152921': '阿拉善左旗',
'152922': '阿拉善右旗',
'152923': '额济纳旗',
'210000': '辽宁省',
'210100': '沈阳市',
'210101': '市辖区',
'210102': '和平区',
'210103': '沈河区',
'210104': '大东区',
'210105': '皇姑区',
'210106': '铁西区',
'210111': '苏家屯区',
'210112': '浑南区',
'210113': '沈北新区',
'210114': '于洪区',
'210115': '辽中区',
'210123': '康平县',
'210124': '法库县',
'210181': '新民市',
'210200': '大连市',
'210201': '市辖区',
'210202': '中山区',
'210203': '西岗区',
'210204': '沙河口区',
'210211': '甘井子区',
'210212': '旅顺口区',
'210213': '金州区',
'210214': '普兰店区',
'210224': '长海县',
'210281': '瓦房店市',
'210283': '庄河市',
'210300': '鞍山市',
'210301': '市辖区',
'210302': '铁东区',
'210303': '铁西区',
'210304': '立山区',
'210311': '千山区',
'210321': '台安县',
'210323': '岫岩满族自治县',
'210381': '海城市',
'210400': '抚顺市',
'210401': '市辖区',
'210402': '新抚区',
'210403': '东洲区',
'210404': '望花区',
'210411': '顺城区',
'210421': '抚顺县',
'210422': '新宾满族自治县',
'210423': '清原满族自治县',
'210500': '本溪市',
'210501': '市辖区',
'210502': '平山区',
'210503': '溪湖区',
'210504': '明山区',
'210505': '南芬区',
'210521': '本溪满族自治县',
'210522': '桓仁满族自治县',
'210600': '丹东市',
'210601': '市辖区',
'210602': '元宝区',
'210603': '振兴区',
'210604': '振安区',
'210624': '宽甸满族自治县',
'210681': '东港市',
'210682': '凤城市',
'210700': '锦州市',
'210701': '市辖区',
'210702': '古塔区',
'210703': '凌河区',
'210711': '太和区',
'210726': '黑山县',
'210727': '义县',
'210781': '凌海市',
'210782': '北镇市',
'210800': '营口市',
'210801': '市辖区',
'210802': '站前区',
'210803': '西市区',
'210804': '鲅鱼圈区',
'210811': '老边区',
'210881': '盖州市',
'210882': '大石桥市',
'210900': '阜新市',
'210901': '市辖区',
'210902': '海州区',
'210903': '新邱区',
'210904': '太平区',
'210905': '清河门区',
'210911': '细河区',
'210921': '阜新蒙古族自治县',
'210922': '彰武县',
'211000': '辽阳市',
'211001': '市辖区',
'211002': '白塔区',
'211003': '文圣区',
'211004': '宏伟区',
'211005': '弓长岭区',
'211011': '太子河区',
'211021': '辽阳县',
'211081': '灯塔市',
'211100': '盘锦市',
'211101': '市辖区',
'211102': '双台子区',
'211103': '兴隆台区',
'211104': '大洼区',
'211122': '盘山县',
'211200': '铁岭市',
'211201': '市辖区',
'211202': '银州区',
'211204': '清河区',
'211221': '铁岭县',
'211223': '西丰县',
'211224': '昌图县',
'211281': '调兵山市',
'211282': '开原市',
'211300': '朝阳市',
'211301': '市辖区',
'211302': '双塔区',
'211303': '龙城区',
'211321': '朝阳县',
'211322': '建平县',
'211324': '喀喇沁左翼蒙古族自治县',
'211381': '北票市',
'211382': '凌源市',
'211400': '葫芦岛市',
'211401': '市辖区',
'211402': '连山区',
'211403': '龙港区',
'211404': '南票区',
'211421': '绥中县',
'211422': '建昌县',
'211481': '兴城市',
'220000': '吉林省',
'220100': '长春市',
'220101': '市辖区',
'220102': '南关区',
'220103': '宽城区',
'220104': '朝阳区',
'220105': '二道区',
'220106': '绿园区',
'220112': '双阳区',
'220113': '九台区',
'220122': '农安县',
'220182': '榆树市',
'220183': '德惠市',
'220200': '吉林市',
'220201': '市辖区',
'220202': '昌邑区',
'220203': '龙潭区',
'220204': '船营区',
'220211': '丰满区',
'220221': '永吉县',
'220281': '蛟河市',
'220282': '桦甸市',
'220283': '舒兰市',
'220284': '磐石市',
'220300': '四平市',
'220301': '市辖区',
'220302': '铁西区',
'220303': '铁东区',
'220322': '梨树县',
'220323': '伊通满族自治县',
'220381': '公主岭市',
'220382': '双辽市',
'220400': '辽源市',
'220401': '市辖区',
'220402': '龙山区',
'220403': '西安区',
'220421': '东丰县',
'220422': '东辽县',
'220500': '通化市',
'220501': '市辖区',
'220502': '东昌区',
'220503': '二道江区',
'220521': '通化县',
'220523': '辉南县',
'220524': '柳河县',
'220581': '梅河口市',
'220582': '集安市',
'220600': '白山市',
'220601': '市辖区',
'220602': '浑江区',
'220605': '江源区',
'220621': '抚松县',
'220622': '靖宇县',
'220623': '长白朝鲜族自治县',
'220681': '临江市',
'220700': '松原市',
'220701': '市辖区',
'220702': '宁江区',
'220721': '前郭尔罗斯蒙古族自治县',
'220722': '长岭县',
'220723': '乾安县',
'220781': '扶余市',
'220800': '白城市',
'220801': '市辖区',
'220802': '洮北区',
'220821': '镇赉县',
'220822': '通榆县',
'220881': '洮南市',
'220882': '大安市',
'222400': '延边朝鲜族自治州',
'222401': '延吉市',
'222402': '图们市',
'222403': '敦化市',
'222404': '珲春市',
'222405': '龙井市',
'222406': '和龙市',
'222424': '汪清县',
'222426': '安图县',
'230000': '黑龙江省',
'230100': '哈尔滨市',
'230101': '市辖区',
'230102': '道里区',
'230103': '南岗区',
'230104': '道外区',
'230108': '平房区',
'230109': '松北区',
'230110': '香坊区',
'230111': '呼兰区',
'230112': '阿城区',
'230113': '双城区',
'230123': '依兰县',
'230124': '方正县',
'230125': '宾县',
'230126': '巴彦县',
'230127': '木兰县',
'230128': '通河县',
'230129': '延寿县',
'230183': '尚志市',
'230184': '五常市',
'230200': '齐齐哈尔市',
'230201': '市辖区',
'230202': '龙沙区',
'230203': '建华区',
'230204': '铁锋区',
'230205': '昂昂溪区',
'230206': '富拉尔基区',
'230207': '碾子山区',
'230208': '梅里斯达斡尔族区',
'230221': '龙江县',
'230223': '依安县',
'230224': '泰来县',
'230225': '甘南县',
'230227': '富裕县',
'230229': '克山县',
'230230': '克东县',
'230231': '拜泉县',
'230281': '讷河市',
'230300': '鸡西市',
'230301': '市辖区',
'230302': '鸡冠区',
'230303': '恒山区',
'230304': '滴道区',
'230305': '梨树区',
'230306': '城子河区',
'230307': '麻山区',
'230321': '鸡东县',
'230381': '虎林市',
'230382': '密山市',
'230400': '鹤岗市',
'230401': '市辖区',
'230402': '向阳区',
'230403': '工农区',
'230404': '南山区',
'230405': '兴安区',
'230406': '东山区',
'230407': '兴山区',
'230421': '萝北县',
'230422': '绥滨县',
'230500': '双鸭山市',
'230501': '市辖区',
'230502': '尖山区',
'230503': '岭东区',
'230505': '四方台区',
'230506': '宝山区',
'230521': '集贤县',
'230522': '友谊县',
'230523': '宝清县',
'230524': '饶河县',
'230600': '大庆市',
'230601': '市辖区',
'230602': '萨尔图区',
'230603': '龙凤区',
'230604': '让胡路区',
'230605': '红岗区',
'230606': '大同区',
'230621': '肇州县',
'230622': '肇源县',
'230623': '林甸县',
'230624': '杜尔伯特蒙古族自治县',
'230700': '伊春市',
'230701': '市辖区',
'230702': '伊春区',
'230703': '南岔区',
'230704': '友好区',
'230705': '西林区',
'230706': '翠峦区',
'230707': '新青区',
'230708': '美溪区',
'230709': '金山屯区',
'230710': '五营区',
'230711': '乌马河区',
'230712': '汤旺河区',
'230713': '带岭区',
'230714': '乌伊岭区',
'230715': '红星区',
'230716': '上甘岭区',
'230722': '嘉荫县',
'230781': '铁力市',
'230800': '佳木斯市',
'230801': '市辖区',
'230803': '向阳区',
'230804': '前进区',
'230805': '东风区',
'230811': '郊区',
'230822': '桦南县',
'230826': '桦川县',
'230828': '汤原县',
'230881': '同江市',
'230882': '富锦市',
'230883': '抚远市',
'230900': '七台河市',
'230901': '市辖区',
'230902': '新兴区',
'230903': '桃山区',
'230904': '茄子河区',
'230921': '勃利县',
'231000': '牡丹江市',
'231001': '市辖区',
'231002': '东安区',
'231003': '阳明区',
'231004': '爱民区',
'231005': '西安区',
'231025': '林口县',
'231081': '绥芬河市',
'231083': '海林市',
'231084': '宁安市',
'231085': '穆棱市',
'231086': '东宁市',
'231100': '黑河市',
'231101': '市辖区',
'231102': '爱辉区',
'231121': '嫩江县',
'231123': '逊克县',
'231124': '孙吴县',
'231181': '北安市',
'231182': '五大连池市',
'231200': '绥化市',
'231201': '市辖区',
'231202': '北林区',
'231221': '望奎县',
'231222': '兰西县',
'231223': '青冈县',
'231224': '庆安县',
'231225': '明水县',
'231226': '绥棱县',
'231281': '安达市',
'231282': '肇东市',
'231283': '海伦市',
'232700': '大兴安岭地区',
'232721': '呼玛县',
'232722': '塔河县',
'232723': '漠河县',
'310000': '上海市',
'310100': '市辖区',
'310101': '黄浦区',
'310104': '徐汇区',
'310105': '长宁区',
'310106': '静安区',
'310107': '普陀区',
'310109': '虹口区',
'310110': '杨浦区',
'310112': '闵行区',
'310113': '宝山区',
'310114': '嘉定区',
'310115': '浦东新区',
'310116': '金山区',
'310117': '松江区',
'310118': '青浦区',
'310120': '奉贤区',
'310151': '崇明区',
'320000': '江苏省',
'320100': '南京市',
'320101': '市辖区',
'320102': '玄武区',
'320104': '秦淮区',
'320105': '建邺区',
'320106': '鼓楼区',
'320111': '浦口区',
'320113': '栖霞区',
'320114': '雨花台区',
'320115': '江宁区',
'320116': '六合区',
'320117': '溧水区',
'320118': '高淳区',
'320200': '无锡市',
'320201': '市辖区',
'320205': '锡山区',
'320206': '惠山区',
'320211': '滨湖区',
'320213': '梁溪区',
'320214': '新吴区',
'320281': '江阴市',
'320282': '宜兴市',
'320300': '徐州市',
'320301': '市辖区',
'320302': '鼓楼区',
'320303': '云龙区',
'320305': '贾汪区',
'320311': '泉山区',
'320312': '铜山区',
'320321': '丰县',
'320322': '沛县',
'320324': '睢宁县',
'320381': '新沂市',
'320382': '邳州市',
'320400': '常州市',
'320401': '市辖区',
'320402': '天宁区',
'320404': '钟楼区',
'320411': '新北区',
'320412': '武进区',
'320413': '金坛区',
'320481': '溧阳市',
'320500': '苏州市',
'320501': '市辖区',
'320505': '虎丘区',
'320506': '吴中区',
'320507': '相城区',
'320508': '姑苏区',
'320509': '吴江区',
'320581': '常熟市',
'320582': '张家港市',
'320583': '昆山市',
'320585': '太仓市',
'320600': '南通市',
'320601': '市辖区',
'320602': '崇川区',
'320611': '港闸区',
'320612': '通州区',
'320621': '海安县',
'320623': '如东县',
'320681': '启东市',
'320682': '如皋市',
'320684': '海门市',
'320700': '连云港市',
'320701': '市辖区',
'320703': '连云区',
'320706': '海州区',
'320707': '赣榆区',
'320722': '东海县',
'320723': '灌云县',
'320724': '灌南县',
'320800': '淮安市',
'320801': '市辖区',
'320803': '淮安区',
'320804': '淮阴区',
'320812': '清江浦区',
'320813': '洪泽区',
'320826': '涟水县',
'320830': '盱眙县',
'320831': '金湖县',
'320900': '盐城市',
'320901': '市辖区',
'320902': '亭湖区',
'320903': '盐都区',
'320904': '大丰区',
'320921': '响水县',
'320922': '滨海县',
'320923': '阜宁县',
'320924': '射阳县',
'320925': '建湖县',
'320981': '东台市',
'321000': '扬州市',
'321001': '市辖区',
'321002': '广陵区',
'321003': '邗江区',
'321012': '江都区',
'321023': '宝应县',
'321081': '仪征市',
'321084': '高邮市',
'321100': '镇江市',
'321101': '市辖区',
'321102': '京口区',
'321111': '润州区',
'321112': '丹徒区',
'321181': '丹阳市',
'321182': '扬中市',
'321183': '句容市',
'321200': '泰州市',
'321201': '市辖区',
'321202': '海陵区',
'321203': '高港区',
'321204': '姜堰区',
'321281': '兴化市',
'321282': '靖江市',
'321283': '泰兴市',
'321300': '宿迁市',
'321301': '市辖区',
'321302': '宿城区',
'321311': '宿豫区',
'321322': '沭阳县',
'321323': '泗阳县',
'321324': '泗洪县',
'330000': '浙江省',
'330100': '杭州市',
'330101': '市辖区',
'330102': '上城区',
'330103': '下城区',
'330104': '江干区',
'330105': '拱墅区',
'330106': '西湖区',
'330108': '滨江区',
'330109': '萧山区',
'330110': '余杭区',
'330111': '富阳区',
'330122': '桐庐县',
'330127': '淳安县',
'330182': '建德市',
'330185': '临安市',
'330200': '宁波市',
'330201': '市辖区',
'330203': '海曙区',
'330204': '江东区',
'330205': '江北区',
'330206': '北仑区',
'330211': '镇海区',
'330212': '鄞州区',
'330225': '象山县',
'330226': '宁海县',
'330281': '余姚市',
'330282': '慈溪市',
'330283': '奉化市',
'330300': '温州市',
'330301': '市辖区',
'330302': '鹿城区',
'330303': '龙湾区',
'330304': '瓯海区',
'330305': '洞头区',
'330324': '永嘉县',
'330326': '平阳县',
'330327': '苍南县',
'330328': '文成县',
'330329': '泰顺县',
'330381': '瑞安市',
'330382': '乐清市',
'330400': '嘉兴市',
'330401': '市辖区',
'330402': '南湖区',
'330411': '秀洲区',
'330421': '嘉善县',
'330424': '海盐县',
'330481': '海宁市',
'330482': '平湖市',
'330483': '桐乡市',
'330500': '湖州市',
'330501': '市辖区',
'330502': '吴兴区',
'330503': '南浔区',
'330521': '德清县',
'330522': '长兴县',
'330523': '安吉县',
'330600': '绍兴市',
'330601': '市辖区',
'330602': '越城区',
'330603': '柯桥区',
'330604': '上虞区',
'330624': '新昌县',
'330681': '诸暨市',
'330683': '嵊州市',
'330700': '金华市',
'330701': '市辖区',
'330702': '婺城区',
'330703': '金东区',
'330723': '武义县',
'330726': '浦江县',
'330727': '磐安县',
'330781': '兰溪市',
'330782': '义乌市',
'330783': '东阳市',
'330784': '永康市',
'330800': '衢州市',
'330801': '市辖区',
'330802': '柯城区',
'330803': '衢江区',
'330822': '常山县',
'330824': '开化县',
'330825': '龙游县',
'330881': '江山市',
'330900': '舟山市',
'330901': '市辖区',
'330902': '定海区',
'330903': '普陀区',
'330921': '岱山县',
'330922': '嵊泗县',
'331000': '台州市',
'331001': '市辖区',
'331002': '椒江区',
'331003': '黄岩区',
'331004': '路桥区',
'331021': '玉环县',
'331022': '三门县',
'331023': '天台县',
'331024': '仙居县',
'331081': '温岭市',
'331082': '临海市',
'331100': '丽水市',
'331101': '市辖区',
'331102': '莲都区',
'331121': '青田县',
'331122': '缙云县',
'331123': '遂昌县',
'331124': '松阳县',
'331125': '云和县',
'331126': '庆元县',
'331127': '景宁畲族自治县',
'331181': '龙泉市',
'340000': '安徽省',
'340100': '合肥市',
'340101': '市辖区',
'340102': '瑶海区',
'340103': '庐阳区',
'340104': '蜀山区',
'340111': '包河区',
'340121': '长丰县',
'340122': '肥东县',
'340123': '肥西县',
'340124': '庐江县',
'340181': '巢湖市',
'340200': '芜湖市',
'340201': '市辖区',
'340202': '镜湖区',
'340203': '弋江区',
'340207': '鸠江区',
'340208': '三山区',
'340221': '芜湖县',
'340222': '繁昌县',
'340223': '南陵县',
'340225': '无为县',
'340300': '蚌埠市',
'340301': '市辖区',
'340302': '龙子湖区',
'340303': '蚌山区',
'340304': '禹会区',
'340311': '淮上区',
'340321': '怀远县',
'340322': '五河县',
'340323': '固镇县',
'340400': '淮南市',
'340401': '市辖区',
'340402': '大通区',
'340403': '田家庵区',
'340404': '谢家集区',
'340405': '八公山区',
'340406': '潘集区',
'340421': '凤台县',
'340422': '寿县',
'340500': '马鞍山市',
'340501': '市辖区',
'340503': '花山区',
'340504': '雨山区',
'340506': '博望区',
'340521': '当涂县',
'340522': '含山县',
'340523': '和县',
'340600': '淮北市',
'340601': '市辖区',
'340602': '杜集区',
'340603': '相山区',
'340604': '烈山区',
'340621': '濉溪县',
'340700': '铜陵市',
'340701': '市辖区',
'340705': '铜官区',
'340706': '义安区',
'340711': '郊区',
'340722': '枞阳县',
'340800': '安庆市',
'340801': '市辖区',
'340802': '迎江区',
'340803': '大观区',
'340811': '宜秀区',
'340822': '怀宁县',
'340824': '潜山县',
'340825': '太湖县',
'340826': '宿松县',
'340827': '望江县',
'340828': '岳西县',
'340881': '桐城市',
'341000': '黄山市',
'341001': '市辖区',
'341002': '屯溪区',
'341003': '黄山区',
'341004': '徽州区',
'341021': '歙县',
'341022': '休宁县',
'341023': '黟县',
'341024': '祁门县',
'341100': '滁州市',
'341101': '市辖区',
'341102': '琅琊区',
'341103': '南谯区',
'341122': '来安县',
'341124': '全椒县',
'341125': '定远县',
'341126': '凤阳县',
'341181': '天长市',
'341182': '明光市',
'341200': '阜阳市',
'341201': '市辖区',
'341202': '颍州区',
'341203': '颍东区',
'341204': '颍泉区',
'341221': '临泉县',
'341222': '太和县',
'341225': '阜南县',
'341226': '颍上县',
'341282': '界首市',
'341300': '宿州市',
'341301': '市辖区',
'341302': '埇桥区',
'341321': '砀山县',
'341322': '萧县',
'341323': '灵璧县',
'341324': '泗县',
'341500': '六安市',
'341501': '市辖区',
'341502': '金安区',
'341503': '裕安区',
'341504': '叶集区',
'341522': '霍邱县',
'341523': '舒城县',
'341524': '金寨县',
'341525': '霍山县',
'341600': '亳州市',
'341601': '市辖区',
'341602': '谯城区',
'341621': '涡阳县',
'341622': '蒙城县',
'341623': '利辛县',
'341700': '池州市',
'341701': '市辖区',
'341702': '贵池区',
'341721': '东至县',
'341722': '石台县',
'341723': '青阳县',
'341800': '宣城市',
'341801': '市辖区',
'341802': '宣州区',
'341821': '郎溪县',
'341822': '广德县',
'341823': '泾县',
'341824': '绩溪县',
'341825': '旌德县',
'341881': '宁国市',
'350000': '福建省',
'350100': '福州市',
'350101': '市辖区',
'350102': '鼓楼区',
'350103': '台江区',
'350104': '仓山区',
'350105': '马尾区',
'350111': '晋安区',
'350121': '闽侯县',
'350122': '连江县',
'350123': '罗源县',
'350124': '闽清县',
'350125': '永泰县',
'350128': '平潭县',
'350181': '福清市',
'350182': '长乐市',
'350200': '厦门市',
'350201': '市辖区',
'350203': '思明区',
'350205': '海沧区',
'350206': '湖里区',
'350211': '集美区',
'350212': '同安区',
'350213': '翔安区',
'350300': '莆田市',
'350301': '市辖区',
'350302': '城厢区',
'350303': '涵江区',
'350304': '荔城区',
'350305': '秀屿区',
'350322': '仙游县',
'350400': '三明市',
'350401': '市辖区',
'350402': '梅列区',
'350403': '三元区',
'350421': '明溪县',
'350423': '清流县',
'350424': '宁化县',
'350425': '大田县',
'350426': '尤溪县',
'350427': '沙县',
'350428': '将乐县',
'350429': '泰宁县',
'350430': '建宁县',
'350481': '永安市',
'350500': '泉州市',
'350501': '市辖区',
'350502': '鲤城区',
'350503': '丰泽区',
'350504': '洛江区',
'350505': '泉港区',
'350521': '惠安县',
'350524': '安溪县',
'350525': '永春县',
'350526': '德化县',
'350527': '金门县',
'350581': '石狮市',
'350582': '晋江市',
'350583': '南安市',
'350600': '漳州市',
'350601': '市辖区',
'350602': '芗城区',
'350603': '龙文区',
'350622': '云霄县',
'350623': '漳浦县',
'350624': '诏安县',
'350625': '长泰县',
'350626': '东山县',
'350627': '南靖县',
'350628': '平和县',
'350629': '华安县',
'350681': '龙海市',
'350700': '南平市',
'350701': '市辖区',
'350702': '延平区',
'350703': '建阳区',
'350721': '顺昌县',
'350722': '浦城县',
'350723': '光泽县',
'350724': '松溪县',
'350725': '政和县',
'350781': '邵武市',
'350782': '武夷山市',
'350783': '建瓯市',
'350800': '龙岩市',
'350801': '市辖区',
'350802': '新罗区',
'350803': '永定区',
'350821': '长汀县',
'350823': '上杭县',
'350824': '武平县',
'350825': '连城县',
'350881': '漳平市',
'350900': '宁德市',
'350901': '市辖区',
'350902': '蕉城区',
'350921': '霞浦县',
'350922': '古田县',
'350923': '屏南县',
'350924': '寿宁县',
'350925': '周宁县',
'350926': '柘荣县',
'350981': '福安市',
'350982': '福鼎市',
'360000': '江西省',
'360100': '南昌市',
'360101': '市辖区',
'360102': '东湖区',
'360103': '西湖区',
'360104': '青云谱区',
'360105': '湾里区',
'360111': '青山湖区',
'360112': '新建区',
'360121': '南昌县',
'360123': '安义县',
'360124': '进贤县',
'360200': '景德镇市',
'360201': '市辖区',
'360202': '昌江区',
'360203': '珠山区',
'360222': '浮梁县',
'360281': '乐平市',
'360300': '萍乡市',
'360301': '市辖区',
'360302': '安源区',
'360313': '湘东区',
'360321': '莲花县',
'360322': '上栗县',
'360323': '芦溪县',
'360400': '九江市',
'360401': '市辖区',
'360402': '濂溪区',
'360403': '浔阳区',
'360421': '九江县',
'360423': '武宁县',
'360424': '修水县',
'360425': '永修县',
'360426': '德安县',
'360428': '都昌县',
'360429': '湖口县',
'360430': '彭泽县',
'360481': '瑞昌市',
'360482': '共青城市',
'360483': '庐山市',
'360500': '新余市',
'360501': '市辖区',
'360502': '渝水区',
'360521': '分宜县',
'360600': '鹰潭市',
'360601': '市辖区',
'360602': '月湖区',
'360622': '余江县',
'360681': '贵溪市',
'360700': '赣州市',
'360701': '市辖区',
'360702': '章贡区',
'360703': '南康区',
'360721': '赣县',
'360722': '信丰县',
'360723': '大余县',
'360724': '上犹县',
'360725': '崇义县',
'360726': '安远县',
'360727': '龙南县',
'360728': '定南县',
'360729': '全南县',
'360730': '宁都县',
'360731': '于都县',
'360732': '兴国县',
'360733': '会昌县',
'360734': '寻乌县',
'360735': '石城县',
'360781': '瑞金市',
'360800': '吉安市',
'360801': '市辖区',
'360802': '吉州区',
'360803': '青原区',
'360821': '吉安县',
'360822': '吉水县',
'360823': '峡江县',
'360824': '新干县',
'360825': '永丰县',
'360826': '泰和县',
'360827': '遂川县',
'360828': '万安县',
'360829': '安福县',
'360830': '永新县',
'360881': '井冈山市',
'360900': '宜春市',
'360901': '市辖区',
'360902': '袁州区',
'360921': '奉新县',
'360922': '万载县',
'360923': '上高县',
'360924': '宜丰县',
'360925': '靖安县',
'360926': '铜鼓县',
'360981': '丰城市',
'360982': '樟树市',
'360983': '高安市',
'361000': '抚州市',
'361001': '市辖区',
'361002': '临川区',
'361021': '南城县',
'361022': '黎川县',
'361023': '南丰县',
'361024': '崇仁县',
'361025': '乐安县',
'361026': '宜黄县',
'361027': '金溪县',
'361028': '资溪县',
'361029': '东乡县',
'361030': '广昌县',
'361100': '上饶市',
'361101': '市辖区',
'361102': '信州区',
'361103': '广丰区',
'361121': '上饶县',
'361123': '玉山县',
'361124': '铅山县',
'361125': '横峰县',
'361126': '弋阳县',
'361127': '余干县',
'361128': '鄱阳县',
'361129': '万年县',
'361130': '婺源县',
'361181': '德兴市',
'370000': '山东省',
'370100': '济南市',
'370101': '市辖区',
'370102': '历下区',
'370103': '市中区',
'370104': '槐荫区',
'370105': '天桥区',
'370112': '历城区',
'370113': '长清区',
'370124': '平阴县',
'370125': '济阳县',
'370126': '商河县',
'370181': '章丘市',
'370200': '青岛市',
'370201': '市辖区',
'370202': '市南区',
'370203': '市北区',
'370211': '黄岛区',
'370212': '崂山区',
'370213': '李沧区',
'370214': '城阳区',
'370281': '胶州市',
'370282': '即墨市',
'370283': '平度市',
'370285': '莱西市',
'370300': '淄博市',
'370301': '市辖区',
'370302': '淄川区',
'370303': '张店区',
'370304': '博山区',
'370305': '临淄区',
'370306': '周村区',
'370321': '桓台县',
'370322': '高青县',
'370323': '沂源县',
'370400': '枣庄市',
'370401': '市辖区',
'370402': '市中区',
'370403': '薛城区',
'370404': '峄城区',
'370405': '台儿庄区',
'370406': '山亭区',
'370481': '滕州市',
'370500': '东营市',
'370501': '市辖区',
'370502': '东营区',
'370503': '河口区',
'370505': '垦利区',
'370522': '利津县',
'370523': '广饶县',
'370600': '烟台市',
'370601': '市辖区',
'370602': '芝罘区',
'370611': '福山区',
'370612': '牟平区',
'370613': '莱山区',
'370634': '长岛县',
'370681': '龙口市',
'370682': '莱阳市',
'370683': '莱州市',
'370684': '蓬莱市',
'370685': '招远市',
'370686': '栖霞市',
'370687': '海阳市',
'370700': '潍坊市',
'370701': '市辖区',
'370702': '潍城区',
'370703': '寒亭区',
'370704': '坊子区',
'370705': '奎文区',
'370724': '临朐县',
'370725': '昌乐县',
'370781': '青州市',
'370782': '诸城市',
'370783': '寿光市',
'370784': '安丘市',
'370785': '高密市',
'370786': '昌邑市',
'370800': '济宁市',
'370801': '市辖区',
'370811': '任城区',
'370812': '兖州区',
'370826': '微山县',
'370827': '鱼台县',
'370828': '金乡县',
'370829': '嘉祥县',
'370830': '汶上县',
'370831': '泗水县',
'370832': '梁山县',
'370881': '曲阜市',
'370883': '邹城市',
'370900': '泰安市',
'370901': '市辖区',
'370902': '泰山区',
'370911': '岱岳区',
'370921': '宁阳县',
'370923': '东平县',
'370982': '新泰市',
'370983': '肥城市',
'371000': '威海市',
'371001': '市辖区',
'371002': '环翠区',
'371003': '文登区',
'371082': '荣成市',
'371083': '乳山市',
'371100': '日照市',
'371101': '市辖区',
'371102': '东港区',
'371103': '岚山区',
'371121': '五莲县',
'371122': '莒县',
'371200': '莱芜市',
'371201': '市辖区',
'371202': '莱城区',
'371203': '钢城区',
'371300': '临沂市',
'371301': '市辖区',
'371302': '兰山区',
'371311': '罗庄区',
'371312': '河东区',
'371321': '沂南县',
'371322': '郯城县',
'371323': '沂水县',
'371324': '兰陵县',
'371325': '费县',
'371326': '平邑县',
'371327': '莒南县',
'371328': '蒙阴县',
'371329': '临沭县',
'371400': '德州市',
'371401': '市辖区',
'371402': '德城区',
'371403': '陵城区',
'371422': '宁津县',
'371423': '庆云县',
'371424': '临邑县',
'371425': '齐河县',
'371426': '平原县',
'371427': '夏津县',
'371428': '武城县',
'371481': '乐陵市',
'371482': '禹城市',
'371500': '聊城市',
'371501': '市辖区',
'371502': '东昌府区',
'371521': '阳谷县',
'371522': '莘县',
'371523': '茌平县',
'371524': '东阿县',
'371525': '冠县',
'371526': '高唐县',
'371581': '临清市',
'371600': '滨州市',
'371601': '市辖区',
'371602': '滨城区',
'371603': '沾化区',
'371621': '惠民县',
'371622': '阳信县',
'371623': '无棣县',
'371625': '博兴县',
'371626': '邹平县',
'371700': '菏泽市',
'371701': '市辖区',
'371702': '牡丹区',
'371703': '定陶区',
'371721': '曹县',
'371722': '单县',
'371723': '成武县',
'371724': '巨野县',
'371725': '郓城县',
'371726': '鄄城县',
'371728': '东明县',
'410000': '河南省',
'410100': '郑州市',
'410101': '市辖区',
'410102': '中原区',
'410103': '二七区',
'410104': '管城回族区',
'410105': '金水区',
'410106': '上街区',
'410108': '惠济区',
'410122': '中牟县',
'410181': '巩义市',
'410182': '荥阳市',
'410183': '新密市',
'410184': '新郑市',
'410185': '登封市',
'410200': '开封市',
'410201': '市辖区',
'410202': '龙亭区',
'410203': '顺河回族区',
'410204': '鼓楼区',
'410205': '禹王台区',
'410211': '金明区',
'410212': '祥符区',
'410221': '杞县',
'410222': '通许县',
'410223': '尉氏县',
'410225': '兰考县',
'410300': '洛阳市',
'410301': '市辖区',
'410302': '老城区',
'410303': '西工区',
'410304': '瀍河回族区',
'410305': '涧西区',
'410306': '吉利区',
'410311': '洛龙区',
'410322': '孟津县',
'410323': '新安县',
'410324': '栾川县',
'410325': '嵩县',
'410326': '汝阳县',
'410327': '宜阳县',
'410328': '洛宁县',
'410329': '伊川县',
'410381': '偃师市',
'410400': '平顶山市',
'410401': '市辖区',
'410402': '新华区',
'410403': '卫东区',
'410404': '石龙区',
'410411': '湛河区',
'410421': '宝丰县',
'410422': '叶县',
'410423': '鲁山县',
'410425': '郏县',
'410481': '舞钢市',
'410482': '汝州市',
'410500': '安阳市',
'410501': '市辖区',
'410502': '文峰区',
'410503': '北关区',
'410505': '殷都区',
'410506': '龙安区',
'410522': '安阳县',
'410523': '汤阴县',
'410526': '滑县',
'410527': '内黄县',
'410581': '林州市',
'410600': '鹤壁市',
'410601': '市辖区',
'410602': '鹤山区',
'410603': '山城区',
'410611': '淇滨区',
'410621': '浚县',
'410622': '淇县',
'410700': '新乡市',
'410701': '市辖区',
'410702': '红旗区',
'410703': '卫滨区',
'410704': '凤泉区',
'410711': '牧野区',
'410721': '新乡县',
'410724': '获嘉县',
'410725': '原阳县',
'410726': '延津县',
'410727': '封丘县',
'410728': '长垣县',
'410781': '卫辉市',
'410782': '辉县市',
'410800': '焦作市',
'410801': '市辖区',
'410802': '解放区',
'410803': '中站区',
'410804': '马村区',
'410811': '山阳区',
'410821': '修武县',
'410822': '博爱县',
'410823': '武陟县',
'410825': '温县',
'410882': '沁阳市',
'410883': '孟州市',
'410900': '濮阳市',
'410901': '市辖区',
'410902': '华龙区',
'410922': '清丰县',
'410923': '南乐县',
'410926': '范县',
'410927': '台前县',
'410928': '濮阳县',
'411000': '许昌市',
'411001': '市辖区',
'411002': '魏都区',
'411023': '许昌县',
'411024': '鄢陵县',
'411025': '襄城县',
'411081': '禹州市',
'411082': '长葛市',
'411100': '漯河市',
'411101': '市辖区',
'411102': '源汇区',
'411103': '郾城区',
'411104': '召陵区',
'411121': '舞阳县',
'411122': '临颍县',
'411200': '三门峡市',
'411201': '市辖区',
'411202': '湖滨区',
'411203': '陕州区',
'411221': '渑池县',
'411224': '卢氏县',
'411281': '义马市',
'411282': '灵宝市',
'411300': '南阳市',
'411301': '市辖区',
'411302': '宛城区',
'411303': '卧龙区',
'411321': '南召县',
'411322': '方城县',
'411323': '西峡县',
'411324': '镇平县',
'411325': '内乡县',
'411326': '淅川县',
'411327': '社旗县',
'411328': '唐河县',
'411329': '新野县',
'411330': '桐柏县',
'411381': '邓州市',
'411400': '商丘市',
'411401': '市辖区',
'411402': '梁园区',
'411403': '睢阳区',
'411421': '民权县',
'411422': '睢县',
'411423': '宁陵县',
'411424': '柘城县',
'411425': '虞城县',
'411426': '夏邑县',
'411481': '永城市',
'411500': '信阳市',
'411501': '市辖区',
'411502': '浉河区',
'411503': '平桥区',
'411521': '罗山县',
'411522': '光山县',
'411523': '新县',
'411524': '商城县',
'411525': '固始县',
'411526': '潢川县',
'411527': '淮滨县',
'411528': '息县',
'411600': '周口市',
'411601': '市辖区',
'411602': '川汇区',
'411621': '扶沟县',
'411622': '西华县',
'411623': '商水县',
'411624': '沈丘县',
'411625': '郸城县',
'411626': '淮阳县',
'411627': '太康县',
'411628': '鹿邑县',
'411681': '项城市',
'411700': '驻马店市',
'411701': '市辖区',
'411702': '驿城区',
'411721': '西平县',
'411722': '上蔡县',
'411723': '平舆县',
'411724': '正阳县',
'411725': '确山县',
'411726': '泌阳县',
'411727': '汝南县',
'411728': '遂平县',
'411729': '新蔡县',
'419000': '省直辖县级行政区划',
'419001': '济源市',
'420000': '湖北省',
'420100': '武汉市',
'420101': '市辖区',
'420102': '江岸区',
'420103': '江汉区',
'420104': '硚口区',
'420105': '汉阳区',
'420106': '武昌区',
'420107': '青山区',
'420111': '洪山区',
'420112': '东西湖区',
'420113': '汉南区',
'420114': '蔡甸区',
'420115': '江夏区',
'420116': '黄陂区',
'420117': '新洲区',
'420200': '黄石市',
'420201': '市辖区',
'420202': '黄石港区',
'420203': '西塞山区',
'420204': '下陆区',
'420205': '铁山区',
'420222': '阳新县',
'420281': '大冶市',
'420300': '十堰市',
'420301': '市辖区',
'420302': '茅箭区',
'420303': '张湾区',
'420304': '郧阳区',
'420322': '郧西县',
'420323': '竹山县',
'420324': '竹溪县',
'420325': '房县',
'420381': '丹江口市',
'420500': '宜昌市',
'420501': '市辖区',
'420502': '西陵区',
'420503': '伍家岗区',
'420504': '点军区',
'420505': '猇亭区',
'420506': '夷陵区',
'420525': '远安县',
'420526': '兴山县',
'420527': '秭归县',
'420528': '长阳土家族自治县',
'420529': '五峰土家族自治县',
'420581': '宜都市',
'420582': '当阳市',
'420583': '枝江市',
'420600': '襄阳市',
'420601': '市辖区',
'420602': '襄城区',
'420606': '樊城区',
'420607': '襄州区',
'420624': '南漳县',
'420625': '谷城县',
'420626': '保康县',
'420682': '老河口市',
'420683': '枣阳市',
'420684': '宜城市',
'420700': '鄂州市',
'420701': '市辖区',
'420702': '梁子湖区',
'420703': '华容区',
'420704': '鄂城区',
'420800': '荆门市',
'420801': '市辖区',
'420802': '东宝区',
'420804': '掇刀区',
'420821': '京山县',
'420822': '沙洋县',
'420881': '钟祥市',
'420900': '孝感市',
'420901': '市辖区',
'420902': '孝南区',
'420921': '孝昌县',
'420922': '大悟县',
'420923': '云梦县',
'420981': '应城市',
'420982': '安陆市',
'420984': '汉川市',
'421000': '荆州市',
'421001': '市辖区',
'421002': '沙市区',
'421003': '荆州区',
'421022': '公安县',
'421023': '监利县',
'421024': '江陵县',
'421081': '石首市',
'421083': '洪湖市',
'421087': '松滋市',
'421100': '黄冈市',
'421101': '市辖区',
'421102': '黄州区',
'421121': '团风县',
'421122': '红安县',
'421123': '罗田县',
'421124': '英山县',
'421125': '浠水县',
'421126': '蕲春县',
'421127': '黄梅县',
'421181': '麻城市',
'421182': '武穴市',
'421200': '咸宁市',
'421201': '市辖区',
'421202': '咸安区',
'421221': '嘉鱼县',
'421222': '通城县',
'421223': '崇阳县',
'421224': '通山县',
'421281': '赤壁市',
'421300': '随州市',
'421301': '市辖区',
'421303': '曾都区',
'421321': '随县',
'421381': '广水市',
'422800': '恩施土家族苗族自治州',
'422801': '恩施市',
'422802': '利川市',
'422822': '建始县',
'422823': '巴东县',
'422825': '宣恩县',
'422826': '咸丰县',
'422827': '来凤县',
'422828': '鹤峰县',
'429000': '省直辖县级行政区划',
'429004': '仙桃市',
'429005': '潜江市',
'429006': '天门市',
'429021': '神农架林区',
'430000': '湖南省',
'430100': '长沙市',
'430101': '市辖区',
'430102': '芙蓉区',
'430103': '天心区',
'430104': '岳麓区',
'430105': '开福区',
'430111': '雨花区',
'430112': '望城区',
'430121': '长沙县',
'430124': '宁乡县',
'430181': '浏阳市',
'430200': '株洲市',
'430201': '市辖区',
'430202': '荷塘区',
'430203': '芦淞区',
'430204': '石峰区',
'430211': '天元区',
'430221': '株洲县',
'430223': '攸县',
'430224': '茶陵县',
'430225': '炎陵县',
'430281': '醴陵市',
'430300': '湘潭市',
'430301': '市辖区',
'430302': '雨湖区',
'430304': '岳塘区',
'430321': '湘潭县',
'430381': '湘乡市',
'430382': '韶山市',
'430400': '衡阳市',
'430401': '市辖区',
'430405': '珠晖区',
'430406': '雁峰区',
'430407': '石鼓区',
'430408': '蒸湘区',
'430412': '南岳区',
'430421': '衡阳县',
'430422': '衡南县',
'430423': '衡山县',
'430424': '衡东县',
'430426': '祁东县',
'430481': '耒阳市',
'430482': '常宁市',
'430500': '邵阳市',
'430501': '市辖区',
'430502': '双清区',
'430503': '大祥区',
'430511': '北塔区',
'430521': '邵东县',
'430522': '新邵县',
'430523': '邵阳县',
'430524': '隆回县',
'430525': '洞口县',
'430527': '绥宁县',
'430528': '新宁县',
'430529': '城步苗族自治县',
'430581': '武冈市',
'430600': '岳阳市',
'430601': '市辖区',
'430602': '岳阳楼区',
'430603': '云溪区',
'430611': '君山区',
'430621': '岳阳县',
'430623': '华容县',
'430624': '湘阴县',
'430626': '平江县',
'430681': '汨罗市',
'430682': '临湘市',
'430700': '常德市',
'430701': '市辖区',
'430702': '武陵区',
'430703': '鼎城区',
'430721': '安乡县',
'430722': '汉寿县',
'430723': '澧县',
'430724': '临澧县',
'430725': '桃源县',
'430726': '石门县',
'430781': '津市市',
'430800': '张家界市',
'430801': '市辖区',
'430802': '永定区',
'430811': '武陵源区',
'430821': '慈利县',
'430822': '桑植县',
'430900': '益阳市',
'430901': '市辖区',
'430902': '资阳区',
'430903': '赫山区',
'430921': '南县',
'430922': '桃江县',
'430923': '安化县',
'430981': '沅江市',
'431000': '郴州市',
'431001': '市辖区',
'431002': '北湖区',
'431003': '苏仙区',
'431021': '桂阳县',
'431022': '宜章县',
'431023': '永兴县',
'431024': '嘉禾县',
'431025': '临武县',
'431026': '汝城县',
'431027': '桂东县',
'431028': '安仁县',
'431081': '资兴市',
'431100': '永州市',
'431101': '市辖区',
'431102': '零陵区',
'431103': '冷水滩区',
'431121': '祁阳县',
'431122': '东安县',
'431123': '双牌县',
'431124': '道县',
'431125': '江永县',
'431126': '宁远县',
'431127': '蓝山县',
'431128': '新田县',
'431129': '江华瑶族自治县',
'431200': '怀化市',
'431201': '市辖区',
'431202': '鹤城区',
'431221': '中方县',
'431222': '沅陵县',
'431223': '辰溪县',
'431224': '溆浦县',
'431225': '会同县',
'431226': '麻阳苗族自治县',
'431227': '新晃侗族自治县',
'431228': '芷江侗族自治县',
'431229': '靖州苗族侗族自治县',
'431230': '通道侗族自治县',
'431281': '洪江市',
'431300': '娄底市',
'431301': '市辖区',
'431302': '娄星区',
'431321': '双峰县',
'431322': '新化县',
'431381': '冷水江市',
'431382': '涟源市',
'433100': '湘西土家族苗族自治州',
'433101': '吉首市',
'433122': '泸溪县',
'433123': '凤凰县',
'433124': '花垣县',
'433125': '保靖县',
'433126': '古丈县',
'433127': '永顺县',
'433130': '龙山县',
'440000': '广东省',
'440100': '广州市',
'440101': '市辖区',
'440103': '荔湾区',
'440104': '越秀区',
'440105': '海珠区',
'440106': '天河区',
'440111': '白云区',
'440112': '黄埔区',
'440113': '番禺区',
'440114': '花都区',
'440115': '南沙区',
'440117': '从化区',
'440118': '增城区',
'440200': '韶关市',
'440201': '市辖区',
'440203': '武江区',
'440204': '浈江区',
'440205': '曲江区',
'440222': '始兴县',
'440224': '仁化县',
'440229': '翁源县',
'440232': '乳源瑶族自治县',
'440233': '新丰县',
'440281': '乐昌市',
'440282': '南雄市',
'440300': '深圳市',
'440301': '市辖区',
'440303': '罗湖区',
'440304': '福田区',
'440305': '南山区',
'440306': '宝安区',
'440307': '龙岗区',
'440308': '盐田区',
'440400': '珠海市',
'440401': '市辖区',
'440402': '香洲区',
'440403': '斗门区',
'440404': '金湾区',
'440500': '汕头市',
'440501': '市辖区',
'440507': '龙湖区',
'440511': '金平区',
'440512': '濠江区',
'440513': '潮阳区',
'440514': '潮南区',
'440515': '澄海区',
'440523': '南澳县',
'440600': '佛山市',
'440601': '市辖区',
'440604': '禅城区',
'440605': '南海区',
'440606': '顺德区',
'440607': '三水区',
'440608': '高明区',
'440700': '江门市',
'440701': '市辖区',
'440703': '蓬江区',
'440704': '江海区',
'440705': '新会区',
'440781': '台山市',
'440783': '开平市',
'440784': '鹤山市',
'440785': '恩平市',
'440800': '湛江市',
'440801': '市辖区',
'440802': '赤坎区',
'440803': '霞山区',
'440804': '坡头区',
'440811': '麻章区',
'440823': '遂溪县',
'440825': '徐闻县',
'440881': '廉江市',
'440882': '雷州市',
'440883': '吴川市',
'440900': '茂名市',
'440901': '市辖区',
'440902': '茂南区',
'440904': '电白区',
'440981': '高州市',
'440982': '化州市',
'440983': '信宜市',
'441200': '肇庆市',
'441201': '市辖区',
'441202': '端州区',
'441203': '鼎湖区',
'441204': '高要区',
'441223': '广宁县',
'441224': '怀集县',
'441225': '封开县',
'441226': '德庆县',
'441284': '四会市',
'441300': '惠州市',
'441301': '市辖区',
'441302': '惠城区',
'441303': '惠阳区',
'441322': '博罗县',
'441323': '惠东县',
'441324': '龙门县',
'441400': '梅州市',
'441401': '市辖区',
'441402': '梅江区',
'441403': '梅县区',
'441422': '大埔县',
'441423': '丰顺县',
'441424': '五华县',
'441426': '平远县',
'441427': '蕉岭县',
'441481': '兴宁市',
'441500': '汕尾市',
'441501': '市辖区',
'441502': '城区',
'441521': '海丰县',
'441523': '陆河县',
'441581': '陆丰市',
'441600': '河源市',
'441601': '市辖区',
'441602': '源城区',
'441621': '紫金县',
'441622': '龙川县',
'441623': '连平县',
'441624': '和平县',
'441625': '东源县',
'441700': '阳江市',
'441701': '市辖区',
'441702': '江城区',
'441704': '阳东区',
'441721': '阳西县',
'441781': '阳春市',
'441800': '清远市',
'441801': '市辖区',
'441802': '清城区',
'441803': '清新区',
'441821': '佛冈县',
'441823': '阳山县',
'441825': '连山壮族瑶族自治县',
'441826': '连南瑶族自治县',
'441881': '英德市',
'441882': '连州市',
'441900': '东莞市',
'442000': '中山市',
'445100': '潮州市',
'445101': '市辖区',
'445102': '湘桥区',
'445103': '潮安区',
'445122': '饶平县',
'445200': '揭阳市',
'445201': '市辖区',
'445202': '榕城区',
'445203': '揭东区',
'445222': '揭西县',
'445224': '惠来县',
'445281': '普宁市',
'445300': '云浮市',
'445301': '市辖区',
'445302': '云城区',
'445303': '云安区',
'445321': '新兴县',
'445322': '郁南县',
'445381': '罗定市',
'450000': '广西壮族自治区',
'450100': '南宁市',
'450101': '市辖区',
'450102': '兴宁区',
'450103': '青秀区',
'450105': '江南区',
'450107': '西乡塘区',
'450108': '良庆区',
'450109': '邕宁区',
'450110': '武鸣区',
'450123': '隆安县',
'450124': '马山县',
'450125': '上林县',
'450126': '宾阳县',
'450127': '横县',
'450200': '柳州市',
'450201': '市辖区',
'450202': '城中区',
'450203': '鱼峰区',
'450204': '柳南区',
'450205': '柳北区',
'450206': '柳江区',
'450222': '柳城县',
'450223': '鹿寨县',
'450224': '融安县',
'450225': '融水苗族自治县',
'450226': '三江侗族自治县',
'450300': '桂林市',
'450301': '市辖区',
'450302': '秀峰区',
'450303': '叠彩区',
'450304': '象山区',
'450305': '七星区',
'450311': '雁山区',
'450312': '临桂区',
'450321': '阳朔县',
'450323': '灵川县',
'450324': '全州县',
'450325': '兴安县',
'450326': '永福县',
'450327': '灌阳县',
'450328': '龙胜各族自治县',
'450329': '资源县',
'450330': '平乐县',
'450331': '荔浦县',
'450332': '恭城瑶族自治县',
'450400': '梧州市',
'450401': '市辖区',
'450403': '万秀区',
'450405': '长洲区',
'450406': '龙圩区',
'450421': '苍梧县',
'450422': '藤县',
'450423': '蒙山县',
'450481': '岑溪市',
'450500': '北海市',
'450501': '市辖区',
'450502': '海城区',
'450503': '银海区',
'450512': '铁山港区',
'450521': '合浦县',
'450600': '防城港市',
'450601': '市辖区',
'450602': '港口区',
'450603': '防城区',
'450621': '上思县',
'450681': '东兴市',
'450700': '钦州市',
'450701': '市辖区',
'450702': '钦南区',
'450703': '钦北区',
'450721': '灵山县',
'450722': '浦北县',
'450800': '贵港市',
'450801': '市辖区',
'450802': '港北区',
'450803': '港南区',
'450804': '覃塘区',
'450821': '平南县',
'450881': '桂平市',
'450900': '玉林市',
'450901': '市辖区',
'450902': '玉州区',
'450903': '福绵区',
'450921': '容县',
'450922': '陆川县',
'450923': '博白县',
'450924': '兴业县',
'450981': '北流市',
'451000': '百色市',
'451001': '市辖区',
'451002': '右江区',
'451021': '田阳县',
'451022': '田东县',
'451023': '平果县',
'451024': '德保县',
'451026': '那坡县',
'451027': '凌云县',
'451028': '乐业县',
'451029': '田林县',
'451030': '西林县',
'451031': '隆林各族自治县',
'451081': '靖西市',
'451100': '贺州市',
'451101': '市辖区',
'451102': '八步区',
'451103': '平桂区',
'451121': '昭平县',
'451122': '钟山县',
'451123': '富川瑶族自治县',
'451200': '河池市',
'451201': '市辖区',
'451202': '金城江区',
'451221': '南丹县',
'451222': '天峨县',
'451223': '凤山县',
'451224': '东兰县',
'451225': '罗城仫佬族自治县',
'451226': '环江毛南族自治县',
'451227': '巴马瑶族自治县',
'451228': '都安瑶族自治县',
'451229': '大化瑶族自治县',
'451281': '宜州市',
'451300': '来宾市',
'451301': '市辖区',
'451302': '兴宾区',
'451321': '忻城县',
'451322': '象州县',
'451323': '武宣县',
'451324': '金秀瑶族自治县',
'451381': '合山市',
'451400': '崇左市',
'451401': '市辖区',
'451402': '江州区',
'451421': '扶绥县',
'451422': '宁明县',
'451423': '龙州县',
'451424': '大新县',
'451425': '天等县',
'451481': '凭祥市',
'460000': '海南省',
'460100': '海口市',
'460101': '市辖区',
'460105': '秀英区',
'460106': '龙华区',
'460107': '琼山区',
'460108': '美兰区',
'460200': '三亚市',
'460201': '市辖区',
'460202': '海棠区',
'460203': '吉阳区',
'460204': '天涯区',
'460205': '崖州区',
'460300': '三沙市',
'460400': '儋州市',
'469000': '省直辖县级行政区划',
'469001': '五指山市',
'469002': '琼海市',
'469005': '文昌市',
'469006': '万宁市',
'469007': '东方市',
'469021': '定安县',
'469022': '屯昌县',
'469023': '澄迈县',
'469024': '临高县',
'469025': '白沙黎族自治县',
'469026': '昌江黎族自治县',
'469027': '乐东黎族自治县',
'469028': '陵水黎族自治县',
'469029': '保亭黎族苗族自治县',
'469030': '琼中黎族苗族自治县',
'500000': '重庆市',
'500100': '市辖区',
'500101': '万州区',
'500102': '涪陵区',
'500103': '渝中区',
'500104': '大渡口区',
'500105': '江北区',
'500106': '沙坪坝区',
'500107': '九龙坡区',
'500108': '南岸区',
'500109': '北碚区',
'500110': '綦江区',
'500111': '大足区',
'500112': '渝北区',
'500113': '巴南区',
'500114': '黔江区',
'500115': '长寿区',
'500116': '江津区',
'500117': '合川区',
'500118': '永川区',
'500119': '南川区',
'500120': '璧山区',
'500151': '铜梁区',
'500152': '潼南区',
'500153': '荣昌区',
'500154': '开州区',
'500200': '县',
'500228': '梁平县',
'500229': '城口县',
'500230': '丰都县',
'500231': '垫江县',
'500232': '武隆县',
'500233': '忠县',
'500235': '云阳县',
'500236': '奉节县',
'500237': '巫山县',
'500238': '巫溪县',
'500240': '石柱土家族自治县',
'500241': '秀山土家族苗族自治县',
'500242': '酉阳土家族苗族自治县',
'500243': '彭水苗族土家族自治县',
'510000': '四川省',
'510100': '成都市',
'510101': '市辖区',
'510104': '锦江区',
'510105': '青羊区',
'510106': '金牛区',
'510107': '武侯区',
'510108': '成华区',
'510112': '龙泉驿区',
'510113': '青白江区',
'510114': '新都区',
'510115': '温江区',
'510116': '双流区',
'510121': '金堂县',
'510124': '郫县',
'510129': '大邑县',
'510131': '蒲江县',
'510132': '新津县',
'510181': '都江堰市',
'510182': '彭州市',
'510183': '邛崃市',
'510184': '崇州市',
'510185': '简阳市',
'510300': '自贡市',
'510301': '市辖区',
'510302': '自流井区',
'510303': '贡井区',
'510304': '大安区',
'510311': '沿滩区',
'510321': '荣县',
'510322': '富顺县',
'510400': '攀枝花市',
'510401': '市辖区',
'510402': '东区',
'510403': '西区',
'510411': '仁和区',
'510421': '米易县',
'510422': '盐边县',
'510500': '泸州市',
'510501': '市辖区',
'510502': '江阳区',
'510503': '纳溪区',
'510504': '龙马潭区',
'510521': '泸县',
'510522': '合江县',
'510524': '叙永县',
'510525': '古蔺县',
'510600': '德阳市',
'510601': '市辖区',
'510603': '旌阳区',
'510623': '中江县',
'510626': '罗江县',
'510681': '广汉市',
'510682': '什邡市',
'510683': '绵竹市',
'510700': '绵阳市',
'510701': '市辖区',
'510703': '涪城区',
'510704': '游仙区',
'510705': '安州区',
'510722': '三台县',
'510723': '盐亭县',
'510725': '梓潼县',
'510726': '北川羌族自治县',
'510727': '平武县',
'510781': '江油市',
'510800': '广元市',
'510801': '市辖区',
'510802': '利州区',
'510811': '昭化区',
'510812': '朝天区',
'510821': '旺苍县',
'510822': '青川县',
'510823': '剑阁县',
'510824': '苍溪县',
'510900': '遂宁市',
'510901': '市辖区',
'510903': '船山区',
'510904': '安居区',
'510921': '蓬溪县',
'510922': '射洪县',
'510923': '大英县',
'511000': '内江市',
'511001': '市辖区',
'511002': '市中区',
'511011': '东兴区',
'511024': '威远县',
'511025': '资中县',
'511028': '隆昌县',
'511100': '乐山市',
'511101': '市辖区',
'511102': '市中区',
'511111': '沙湾区',
'511112': '五通桥区',
'511113': '金口河区',
'511123': '犍为县',
'511124': '井研县',
'511126': '夹江县',
'511129': '沐川县',
'511132': '峨边彝族自治县',
'511133': '马边彝族自治县',
'511181': '峨眉山市',
'511300': '南充市',
'511301': '市辖区',
'511302': '顺庆区',
'511303': '高坪区',
'511304': '嘉陵区',
'511321': '南部县',
'511322': '营山县',
'511323': '蓬安县',
'511324': '仪陇县',
'511325': '西充县',
'511381': '阆中市',
'511400': '眉山市',
'511401': '市辖区',
'511402': '东坡区',
'511403': '彭山区',
'511421': '仁寿县',
'511423': '洪雅县',
'511424': '丹棱县',
'511425': '青神县',
'511500': '宜宾市',
'511501': '市辖区',
'511502': '翠屏区',
'511503': '南溪区',
'511521': '宜宾县',
'511523': '江安县',
'511524': '长宁县',
'511525': '高县',
'511526': '珙县',
'511527': '筠连县',
'511528': '兴文县',
'511529': '屏山县',
'511600': '广安市',
'511601': '市辖区',
'511602': '广安区',
'511603': '前锋区',
'511621': '岳池县',
'511622': '武胜县',
'511623': '邻水县',
'511681': '华蓥市',
'511700': '达州市',
'511701': '市辖区',
'511702': '通川区',
'511703': '达川区',
'511722': '宣汉县',
'511723': '开江县',
'511724': '大竹县',
'511725': '渠县',
'511781': '万源市',
'511800': '雅安市',
'511801': '市辖区',
'511802': '雨城区',
'511803': '名山区',
'511822': '荥经县',
'511823': '汉源县',
'511824': '石棉县',
'511825': '天全县',
'511826': '芦山县',
'511827': '宝兴县',
'511900': '巴中市',
'511901': '市辖区',
'511902': '巴州区',
'511903': '恩阳区',
'511921': '通江县',
'511922': '南江县',
'511923': '平昌县',
'512000': '资阳市',
'512001': '市辖区',
'512002': '雁江区',
'512021': '安岳县',
'512022': '乐至县',
'513200': '阿坝藏族羌族自治州',
'513201': '马尔康市',
'513221': '汶川县',
'513222': '理县',
'513223': '茂县',
'513224': '松潘县',
'513225': '九寨沟县',
'513226': '金川县',
'513227': '小金县',
'513228': '黑水县',
'513230': '壤塘县',
'513231': '阿坝县',
'513232': '若尔盖县',
'513233': '红原县',
'513300': '甘孜藏族自治州',
'513301': '康定市',
'513322': '泸定县',
'513323': '丹巴县',
'513324': '九龙县',
'513325': '雅江县',
'513326': '道孚县',
'513327': '炉霍县',
'513328': '甘孜县',
'513329': '新龙县',
'513330': '德格县',
'513331': '白玉县',
'513332': '石渠县',
'513333': '色达县',
'513334': '理塘县',
'513335': '巴塘县',
'513336': '乡城县',
'513337': '稻城县',
'513338': '得荣县',
'513400': '凉山彝族自治州',
'513401': '西昌市',
'513422': '木里藏族自治县',
'513423': '盐源县',
'513424': '德昌县',
'513425': '会理县',
'513426': '会东县',
'513427': '宁南县',
'513428': '普格县',
'513429': '布拖县',
'513430': '金阳县',
'513431': '昭觉县',
'513432': '喜德县',
'513433': '冕宁县',
'513434': '越西县',
'513435': '甘洛县',
'513436': '美姑县',
'513437': '雷波县',
'520000': '贵州省',
'520100': '贵阳市',
'520101': '市辖区',
'520102': '南明区',
'520103': '云岩区',
'520111': '花溪区',
'520112': '乌当区',
'520113': '白云区',
'520115': '观山湖区',
'520121': '开阳县',
'520122': '息烽县',
'520123': '修文县',
'520181': '清镇市',
'520200': '六盘水市',
'520201': '钟山区',
'520203': '六枝特区',
'520221': '水城县',
'520222': '盘县',
'520300': '遵义市',
'520301': '市辖区',
'520302': '红花岗区',
'520303': '汇川区',
'520304': '播州区',
'520322': '桐梓县',
'520323': '绥阳县',
'520324': '正安县',
'520325': '道真仡佬族苗族自治县',
'520326': '务川仡佬族苗族自治县',
'520327': '凤冈县',
'520328': '湄潭县',
'520329': '余庆县',
'520330': '习水县',
'520381': '赤水市',
'520382': '仁怀市',
'520400': '安顺市',
'520401': '市辖区',
'520402': '西秀区',
'520403': '平坝区',
'520422': '普定县',
'520423': '镇宁布依族苗族自治县',
'520424': '关岭布依族苗族自治县',
'520425': '紫云苗族布依族自治县',
'520500': '毕节市',
'520501': '市辖区',
'520502': '七星关区',
'520521': '大方县',
'520522': '黔西县',
'520523': '金沙县',
'520524': '织金县',
'520525': '纳雍县',
'520526': '威宁彝族回族苗族自治县',
'520527': '赫章县',
'520600': '铜仁市',
'520601': '市辖区',
'520602': '碧江区',
'520603': '万山区',
'520621': '江口县',
'520622': '玉屏侗族自治县',
'520623': '石阡县',
'520624': '思南县',
'520625': '印江土家族苗族自治县',
'520626': '德江县',
'520627': '沿河土家族自治县',
'520628': '松桃苗族自治县',
'522300': '黔西南布依族苗族自治州',
'522301': '兴义市',
'522322': '兴仁县',
'522323': '普安县',
'522324': '晴隆县',
'522325': '贞丰县',
'522326': '望谟县',
'522327': '册亨县',
'522328': '安龙县',
'522600': '黔东南苗族侗族自治州',
'522601': '凯里市',
'522622': '黄平县',
'522623': '施秉县',
'522624': '三穗县',
'522625': '镇远县',
'522626': '岑巩县',
'522627': '天柱县',
'522628': '锦屏县',
'522629': '剑河县',
'522630': '台江县',
'522631': '黎平县',
'522632': '榕江县',
'522633': '从江县',
'522634': '雷山县',
'522635': '麻江县',
'522636': '丹寨县',
'522700': '黔南布依族苗族自治州',
'522701': '都匀市',
'522702': '福泉市',
'522722': '荔波县',
'522723': '贵定县',
'522725': '瓮安县',
'522726': '独山县',
'522727': '平塘县',
'522728': '罗甸县',
'522729': '长顺县',
'522730': '龙里县',
'522731': '惠水县',
'522732': '三都水族自治县',
'530000': '云南省',
'530100': '昆明市',
'530101': '市辖区',
'530102': '五华区',
'530103': '盘龙区',
'530111': '官渡区',
'530112': '西山区',
'530113': '东川区',
'530114': '呈贡区',
'530122': '晋宁县',
'530124': '富民县',
'530125': '宜良县',
'530126': '石林彝族自治县',
'530127': '嵩明县',
'530128': '禄劝彝族苗族自治县',
'530129': '寻甸回族彝族自治县',
'530181': '安宁市',
'530300': '曲靖市',
'530301': '市辖区',
'530302': '麒麟区',
'530303': '沾益区',
'530321': '马龙县',
'530322': '陆良县',
'530323': '师宗县',
'530324': '罗平县',
'530325': '富源县',
'530326': '会泽县',
'530381': '宣威市',
'530400': '玉溪市',
'530401': '市辖区',
'530402': '红塔区',
'530403': '江川区',
'530422': '澄江县',
'530423': '通海县',
'530424': '华宁县',
'530425': '易门县',
'530426': '峨山彝族自治县',
'530427': '新平彝族傣族自治县',
'530428': '元江哈尼族彝族傣族自治县',
'530500': '保山市',
'530501': '市辖区',
'530502': '隆阳区',
'530521': '施甸县',
'530523': '龙陵县',
'530524': '昌宁县',
'530581': '腾冲市',
'530600': '昭通市',
'530601': '市辖区',
'530602': '昭阳区',
'530621': '鲁甸县',
'530622': '巧家县',
'530623': '盐津县',
'530624': '大关县',
'530625': '永善县',
'530626': '绥江县',
'530627': '镇雄县',
'530628': '彝良县',
'530629': '威信县',
'530630': '水富县',
'530700': '丽江市',
'530701': '市辖区',
'530702': '古城区',
'530721': '玉龙纳西族自治县',
'530722': '永胜县',
'530723': '华坪县',
'530724': '宁蒗彝族自治县',
'530800': '普洱市',
'530801': '市辖区',
'530802': '思茅区',
'530821': '宁洱哈尼族彝族自治县',
'530822': '墨江哈尼族自治县',
'530823': '景东彝族自治县',
'530824': '景谷傣族彝族自治县',
'530825': '镇沅彝族哈尼族拉祜族自治县',
'530826': '江城哈尼族彝族自治县',
'530827': '孟连傣族拉祜族佤族自治县',
'530828': '澜沧拉祜族自治县',
'530829': '西盟佤族自治县',
'530900': '临沧市',
'530901': '市辖区',
'530902': '临翔区',
'530921': '凤庆县',
'530922': '云县',
'530923': '永德县',
'530924': '镇康县',
'530925': '双江拉祜族佤族布朗族傣族自治县',
'530926': '耿马傣族佤族自治县',
'530927': '沧源佤族自治县',
'532300': '楚雄彝族自治州',
'532301': '楚雄市',
'532322': '双柏县',
'532323': '牟定县',
'532324': '南华县',
'532325': '姚安县',
'532326': '大姚县',
'532327': '永仁县',
'532328': '元谋县',
'532329': '武定县',
'532331': '禄丰县',
'532500': '红河哈尼族彝族自治州',
'532501': '个旧市',
'532502': '开远市',
'532503': '蒙自市',
'532504': '弥勒市',
'532523': '屏边苗族自治县',
'532524': '建水县',
'532525': '石屏县',
'532527': '泸西县',
'532528': '元阳县',
'532529': '红河县',
'532530': '金平苗族瑶族傣族自治县',
'532531': '绿春县',
'532532': '河口瑶族自治县',
'532600': '文山壮族苗族自治州',
'532601': '文山市',
'532622': '砚山县',
'532623': '西畴县',
'532624': '麻栗坡县',
'532625': '马关县',
'532626': '丘北县',
'532627': '广南县',
'532628': '富宁县',
'532800': '西双版纳傣族自治州',
'532801': '景洪市',
'532822': '勐海县',
'532823': '勐腊县',
'532900': '大理白族自治州',
'532901': '大理市',
'532922': '漾濞彝族自治县',
'532923': '祥云县',
'532924': '宾川县',
'532925': '弥渡县',
'532926': '南涧彝族自治县',
'532927': '巍山彝族回族自治县',
'532928': '永平县',
'532929': '云龙县',
'532930': '洱源县',
'532931': '剑川县',
'532932': '鹤庆县',
'533100': '德宏傣族景颇族自治州',
'533102': '瑞丽市',
'533103': '芒市',
'533122': '梁河县',
'533123': '盈江县',
'533124': '陇川县',
'533300': '怒江傈僳族自治州',
'533301': '泸水市',
'533323': '福贡县',
'533324': '贡山独龙族怒族自治县',
'533325': '兰坪白族普米族自治县',
'533400': '迪庆藏族自治州',
'533401': '香格里拉市',
'533422': '德钦县',
'533423': '维西傈僳族自治县',
'540000': '西藏自治区',
'540100': '拉萨市',
'540101': '市辖区',
'540102': '城关区',
'540103': '堆龙德庆区',
'540121': '林周县',
'540122': '当雄县',
'540123': '尼木县',
'540124': '曲水县',
'540126': '达孜县',
'540127': '墨竹工卡县',
'540200': '日喀则市',
'540202': '桑珠孜区',
'540221': '南木林县',
'540222': '江孜县',
'540223': '定日县',
'540224': '萨迦县',
'540225': '拉孜县',
'540226': '昂仁县',
'540227': '谢通门县',
'540228': '白朗县',
'540229': '仁布县',
'540230': '康马县',
'540231': '定结县',
'540232': '仲巴县',
'540233': '亚东县',
'540234': '吉隆县',
'540235': '聂拉木县',
'540236': '萨嘎县',
'540237': '岗巴县',
'540300': '昌都市',
'540302': '卡若区',
'540321': '江达县',
'540322': '贡觉县',
'540323': '类乌齐县',
'540324': '丁青县',
'540325': '察雅县',
'540326': '八宿县',
'540327': '左贡县',
'540328': '芒康县',
'540329': '洛隆县',
'540330': '边坝县',
'540400': '林芝市',
'540402': '巴宜区',
'540421': '工布江达县',
'540422': '米林县',
'540423': '墨脱县',
'540424': '波密县',
'540425': '察隅县',
'540426': '朗县',
'540500': '山南市',
'540501': '市辖区',
'540502': '乃东区',
'540521': '扎囊县',
'540522': '贡嘎县',
'540523': '桑日县',
'540524': '琼结县',
'540525': '曲松县',
'540526': '措美县',
'540527': '洛扎县',
'540528': '加查县',
'540529': '隆子县',
'540530': '错那县',
'540531': '浪卡子县',
'542400': '那曲地区',
'542421': '那曲县',
'542422': '嘉黎县',
'542423': '比如县',
'542424': '聂荣县',
'542425': '安多县',
'542426': '申扎县',
'542427': '索县',
'542428': '班戈县',
'542429': '巴青县',
'542430': '尼玛县',
'542431': '双湖县',
'542500': '阿里地区',
'542521': '普兰县',
'542522': '札达县',
'542523': '噶尔县',
'542524': '日土县',
'542525': '革吉县',
'542526': '改则县',
'542527': '措勤县',
'610000': '陕西省',
'610100': '西安市',
'610101': '市辖区',
'610102': '新城区',
'610103': '碑林区',
'610104': '莲湖区',
'610111': '灞桥区',
'610112': '未央区',
'610113': '雁塔区',
'610114': '阎良区',
'610115': '临潼区',
'610116': '长安区',
'610117': '高陵区',
'610122': '蓝田县',
'610124': '周至县',
'610125': '户县',
'610200': '铜川市',
'610201': '市辖区',
'610202': '王益区',
'610203': '印台区',
'610204': '耀州区',
'610222': '宜君县',
'610300': '宝鸡市',
'610301': '市辖区',
'610302': '渭滨区',
'610303': '金台区',
'610304': '陈仓区',
'610322': '凤翔县',
'610323': '岐山县',
'610324': '扶风县',
'610326': '眉县',
'610327': '陇县',
'610328': '千阳县',
'610329': '麟游县',
'610330': '凤县',
'610331': '太白县',
'610400': '咸阳市',
'610401': '市辖区',
'610402': '秦都区',
'610403': '杨陵区',
'610404': '渭城区',
'610422': '三原县',
'610423': '泾阳县',
'610424': '乾县',
'610425': '礼泉县',
'610426': '永寿县',
'610427': '彬县',
'610428': '长武县',
'610429': '旬邑县',
'610430': '淳化县',
'610431': '武功县',
'610481': '兴平市',
'610500': '渭南市',
'610501': '市辖区',
'610502': '临渭区',
'610503': '华州区',
'610522': '潼关县',
'610523': '大荔县',
'610524': '合阳县',
'610525': '澄城县',
'610526': '蒲城县',
'610527': '白水县',
'610528': '富平县',
'610581': '韩城市',
'610582': '华阴市',
'610600': '延安市',
'610601': '市辖区',
'610602': '宝塔区',
'610603': '安塞区',
'610621': '延长县',
'610622': '延川县',
'610623': '子长县',
'610625': '志丹县',
'610626': '吴起县',
'610627': '甘泉县',
'610628': '富县',
'610629': '洛川县',
'610630': '宜川县',
'610631': '黄龙县',
'610632': '黄陵县',
'610700': '汉中市',
'610701': '市辖区',
'610702': '汉台区',
'610721': '南郑县',
'610722': '城固县',
'610723': '洋县',
'610724': '西乡县',
'610725': '勉县',
'610726': '宁强县',
'610727': '略阳县',
'610728': '镇巴县',
'610729': '留坝县',
'610730': '佛坪县',
'610800': '榆林市',
'610801': '市辖区',
'610802': '榆阳区',
'610803': '横山区',
'610821': '神木县',
'610822': '府谷县',
'610824': '靖边县',
'610825': '定边县',
'610826': '绥德县',
'610827': '米脂县',
'610828': '佳县',
'610829': '吴堡县',
'610830': '清涧县',
'610831': '子洲县',
'610900': '安康市',
'610901': '市辖区',
'610902': '汉滨区',
'610921': '汉阴县',
'610922': '石泉县',
'610923': '宁陕县',
'610924': '紫阳县',
'610925': '岚皋县',
'610926': '平利县',
'610927': '镇坪县',
'610928': '旬阳县',
'610929': '白河县',
'611000': '商洛市',
'611001': '市辖区',
'611002': '商州区',
'611021': '洛南县',
'611022': '丹凤县',
'611023': '商南县',
'611024': '山阳县',
'611025': '镇安县',
'611026': '柞水县',
'620000': '甘肃省',
'620100': '兰州市',
'620101': '市辖区',
'620102': '城关区',
'620103': '七里河区',
'620104': '西固区',
'620105': '安宁区',
'620111': '红古区',
'620121': '永登县',
'620122': '皋兰县',
'620123': '榆中县',
'620200': '嘉峪关市',
'620201': '市辖区',
'620300': '金昌市',
'620301': '市辖区',
'620302': '金川区',
'620321': '永昌县',
'620400': '白银市',
'620401': '市辖区',
'620402': '白银区',
'620403': '平川区',
'620421': '靖远县',
'620422': '会宁县',
'620423': '景泰县',
'620500': '天水市',
'620501': '市辖区',
'620502': '秦州区',
'620503': '麦积区',
'620521': '清水县',
'620522': '秦安县',
'620523': '甘谷县',
'620524': '武山县',
'620525': '张家川回族自治县',
'620600': '武威市',
'620601': '市辖区',
'620602': '凉州区',
'620621': '民勤县',
'620622': '古浪县',
'620623': '天祝藏族自治县',
'620700': '张掖市',
'620701': '市辖区',
'620702': '甘州区',
'620721': '肃南裕固族自治县',
'620722': '民乐县',
'620723': '临泽县',
'620724': '高台县',
'620725': '山丹县',
'620800': '平凉市',
'620801': '市辖区',
'620802': '崆峒区',
'620821': '泾川县',
'620822': '灵台县',
'620823': '崇信县',
'620824': '华亭县',
'620825': '庄浪县',
'620826': '静宁县',
'620900': '酒泉市',
'620901': '市辖区',
'620902': '肃州区',
'620921': '金塔县',
'620922': '瓜州县',
'620923': '肃北蒙古族自治县',
'620924': '阿克塞哈萨克族自治县',
'620981': '玉门市',
'620982': '敦煌市',
'621000': '庆阳市',
'621001': '市辖区',
'621002': '西峰区',
'621021': '庆城县',
'621022': '环县',
'621023': '华池县',
'621024': '合水县',
'621025': '正宁县',
'621026': '宁县',
'621027': '镇原县',
'621100': '定西市',
'621101': '市辖区',
'621102': '安定区',
'621121': '通渭县',
'621122': '陇西县',
'621123': '渭源县',
'621124': '临洮县',
'621125': '漳县',
'621126': '岷县',
'621200': '陇南市',
'621201': '市辖区',
'621202': '武都区',
'621221': '成县',
'621222': '文县',
'621223': '宕昌县',
'621224': '康县',
'621225': '西和县',
'621226': '礼县',
'621227': '徽县',
'621228': '两当县',
'622900': '临夏回族自治州',
'622901': '临夏市',
'622921': '临夏县',
'622922': '康乐县',
'622923': '永靖县',
'622924': '广河县',
'622925': '和政县',
'622926': '东乡族自治县',
'622927': '积石山保安族东乡族撒拉族自治县',
'623000': '甘南藏族自治州',
'623001': '合作市',
'623021': '临潭县',
'623022': '卓尼县',
'623023': '舟曲县',
'623024': '迭部县',
'623025': '玛曲县',
'623026': '碌曲县',
'623027': '夏河县',
'630000': '青海省',
'630100': '西宁市',
'630101': '市辖区',
'630102': '城东区',
'630103': '城中区',
'630104': '城西区',
'630105': '城北区',
'630121': '大通回族土族自治县',
'630122': '湟中县',
'630123': '湟源县',
'630200': '海东市',
'630202': '乐都区',
'630203': '平安区',
'630222': '民和回族土族自治县',
'630223': '互助土族自治县',
'630224': '化隆回族自治县',
'630225': '循化撒拉族自治县',
'632200': '海北藏族自治州',
'632221': '门源回族自治县',
'632222': '祁连县',
'632223': '海晏县',
'632224': '刚察县',
'632300': '黄南藏族自治州',
'632321': '同仁县',
'632322': '尖扎县',
'632323': '泽库县',
'632324': '河南蒙古族自治县',
'632500': '海南藏族自治州',
'632521': '共和县',
'632522': '同德县',
'632523': '贵德县',
'632524': '兴海县',
'632525': '贵南县',
'632600': '果洛藏族自治州',
'632621': '玛沁县',
'632622': '班玛县',
'632623': '甘德县',
'632624': '达日县',
'632625': '久治县',
'632626': '玛多县',
'632700': '玉树藏族自治州',
'632701': '玉树市',
'632722': '杂多县',
'632723': '称多县',
'632724': '治多县',
'632725': '囊谦县',
'632726': '曲麻莱县',
'632800': '海西蒙古族藏族自治州',
'632801': '格尔木市',
'632802': '德令哈市',
'632821': '乌兰县',
'632822': '都兰县',
'632823': '天峻县',
'640000': '宁夏回族自治区',
'640100': '银川市',
'640101': '市辖区',
'640104': '兴庆区',
'640105': '西夏区',
'640106': '金凤区',
'640121': '永宁县',
'640122': '贺兰县',
'640181': '灵武市',
'640200': '石嘴山市',
'640201': '市辖区',
'640202': '大武口区',
'640205': '惠农区',
'640221': '平罗县',
'640300': '吴忠市',
'640301': '市辖区',
'640302': '利通区',
'640303': '红寺堡区',
'640323': '盐池县',
'640324': '同心县',
'640381': '青铜峡市',
'640400': '固原市',
'640401': '市辖区',
'640402': '原州区',
'640422': '西吉县',
'640423': '隆德县',
'640424': '泾源县',
'640425': '彭阳县',
'640500': '中卫市',
'640501': '市辖区',
'640502': '沙坡头区',
'640521': '中宁县',
'640522': '海原县',
'650000': '新疆维吾尔自治区',
'650100': '乌鲁木齐市',
'650101': '市辖区',
'650102': '天山区',
'650103': '沙依巴克区',
'650104': '新市区',
'650105': '水磨沟区',
'650106': '头屯河区',
'650107': '达坂城区',
'650109': '米东区',
'650121': '乌鲁木齐县',
'650200': '克拉玛依市',
'650201': '市辖区',
'650202': '独山子区',
'650203': '克拉玛依区',
'650204': '白碱滩区',
'650205': '乌尔禾区',
'650400': '吐鲁番市',
'650402': '高昌区',
'650421': '鄯善县',
'650422': '托克逊县',
'650500': '哈密市',
'650502': '伊州区',
'650521': '巴里坤哈萨克自治县',
'650522': '伊吾县',
'652300': '昌吉回族自治州',
'652301': '昌吉市',
'652302': '阜康市',
'652323': '呼图壁县',
'652324': '玛纳斯县',
'652325': '奇台县',
'652327': '吉木萨尔县',
'652328': '木垒哈萨克自治县',
'652700': '博尔塔拉蒙古自治州',
'652701': '博乐市',
'652702': '阿拉山口市',
'652722': '精河县',
'652723': '温泉县',
'652800': '巴音郭楞蒙古自治州',
'652801': '库尔勒市',
'652822': '轮台县',
'652823': '尉犁县',
'652824': '若羌县',
'652825': '且末县',
'652826': '焉耆回族自治县',
'652827': '和静县',
'652828': '和硕县',
'652829': '博湖县',
'652900': '阿克苏地区',
'652901': '阿克苏市',
'652922': '温宿县',
'652923': '库车县',
'652924': '沙雅县',
'652925': '新和县',
'652926': '拜城县',
'652927': '乌什县',
'652928': '阿瓦提县',
'652929': '柯坪县',
'653000': '克孜勒苏柯尔克孜自治州',
'653001': '阿图什市',
'653022': '阿克陶县',
'653023': '阿合奇县',
'653024': '乌恰县',
'653100': '喀什地区',
'653101': '喀什市',
'653121': '疏附县',
'653122': '疏勒县',
'653123': '英吉沙县',
'653124': '泽普县',
'653125': '莎车县',
'653126': '叶城县',
'653127': '麦盖提县',
'653128': '岳普湖县',
'653129': '伽师县',
'653130': '巴楚县',
'653131': '塔什库尔干塔吉克自治县',
'653200': '和田地区',
'653201': '和田市',
'653221': '和田县',
'653222': '墨玉县',
'653223': '皮山县',
'653224': '洛浦县',
'653225': '策勒县',
'653226': '于田县',
'653227': '民丰县',
'654000': '伊犁哈萨克自治州',
'654002': '伊宁市',
'654003': '奎屯市',
'654004': '霍尔果斯市',
'654021': '伊宁县',
'654022': '察布查尔锡伯自治县',
'654023': '霍城县',
'654024': '巩留县',
'654025': '新源县',
'654026': '昭苏县',
'654027': '特克斯县',
'654028': '尼勒克县',
'654200': '塔城地区',
'654201': '塔城市',
'654202': '乌苏市',
'654221': '额敏县',
'654223': '沙湾县',
'654224': '托里县',
'654225': '裕民县',
'654226': '和布克赛尔蒙古自治县',
'654300': '阿勒泰地区',
'654301': '阿勒泰市',
'654321': '布尔津县',
'654322': '富蕴县',
'654323': '福海县',
'654324': '哈巴河县',
'654325': '青河县',
'654326': '吉木乃县',
'659000': '自治区直辖县级行政区划',
'659001': '石河子市',
'659002': '阿拉尔市',
'659003': '图木舒克市',
'659004': '五家渠市',
'659006': '铁门关市',
'710000': '台湾省',
'810000': '香港特别行政区',
'820000': '澳门特别行政区',
} | PypiClean |
/JsoNomads-1.0.3.tar.gz/JsoNomads-1.0.3/README.md | # JsoNomads
It is a module to download and convert NOMADS data from NOAA to JSON.
Also provides a command line tool.
# Installation
pip3 install jsonomads
or if you install it from source:
pip3 install .
# Usage
usage: jsonomads [-h] [-i [GRIBFILE]] [-o [JSONFILE]] [-k] [-t [DATATYPE]]
[-p] [-r [N]] [-I [N]] [--leftlon [N]] [--rightlon [N]]
[--toplat [N]] [--bottomlat [N]] [--res [RESOLUTION]]
[--tol [LEVELTYPE]] [--level [N]] [--nou] [--nov]
Download NOMADS data from NOAA and convert the grib file to JSON
optional arguments:
-h, --help show this help message and exit
-i [GRIBFILE], --input [GRIBFILE]
Input GRIB file. Download fresh file from NOAA if not
specified.
-o [JSONFILE], --output [JSONFILE]
Output JSON file
-k, --keep Keep grib file
-t [DATATYPE], --type [DATATYPE]
Data type. (Default: wind)
JSON parameters:
-p, --print Just print json data to stdout
-r [N], --round [N] Round to N decimals
-I [N], --indent [N] Indentation
Wind parameters:
Optional wind parameters
--leftlon [N] Left longitude. (Default: 0)
--rightlon [N] Right longitude. (Default: 360)
--toplat [N] Top latitude. (Default: 90)
--bottomlat [N] Bottom latitude. (Default: -90)
--res [RESOLUTION] Resolution. (Default: 1p00)
--tol [LEVELTYPE] Type of level. (Default: heightAboveGround)
--level [N] Level. (Default: 10)
--nou No U-component
--nov No V-component
| PypiClean |
/KomiDL-0.7.1.tar.gz/KomiDL-0.7.1/komidl/extractors/tsumino.py | """This module contains the Tsumino extractor class"""
import re
import json
from urllib.parse import quote
import requests
from komidl.exceptions import ExtractorFailed
from .extractor import Extractor
class TsuminoEX(Extractor):
"""
An extractor for Tsumino.com
Unfortunately, this website tracks requests based on galleries and will
require the completion of captcha. As a result, the user will have to
manually visit the website every ~4 galleries.
All galleries hosted appear to be in English and use JPGs.
"""
def __init__(self):
super().__init__()
self.name = "Tsumino"
self.url = "https://www.tsumino.com"
self._PAGE_PATTERN = r"https?://(?:www\.)?tsumino\.com.*"
self._GALLERY_PATTERN = (r"https?://(?:www\.)?tsumino\.com/"
r"[Bb]ook/[Ii]nfo/[0-9]+/[^/]+/?")
self._IMG_DOMAIN = "https://www.tsumino.com/Image/Object?name="
requests.packages.urllib3.disable_warnings()
# =========================================================================
# Testing
# =========================================================================
# pylint: disable=line-too-long,unused-variable
def get_tests(self):
tests = ({"url": "http://www.tsumino.com/Book/Info/47994/ntr-kanojo-case-2-netosis-kasumi-haruno-",
"img_urls": ["https://www.tsumino.com/Image/Object?name=oJapP/7ab2WGMB0hU7/EcQ%3D%3D",
"https://www.tsumino.com/Image/Object?name=lFj0PpwnCjwMyKErxQ6n/Q%3D%3D",
"https://www.tsumino.com/Image/Object?name=cSvIijGcmYwDnEAsy/uZlw%3D%3D",
"https://www.tsumino.com/Image/Object?name=Fcag3NPrMNr426l%2B4fXzFA%3D%3D",
"https://www.tsumino.com/Image/Object?name=LzP9VveY3F0Y42syY9qw7g%3D%3D"
],
"size": 29,
"tags": {"Title": "NTR Kanojo Case. 2: NetoSis -Kasumi Haruno-",
"URL": "http://www.tsumino.com/Book/Info/47994/ntr-kanojo-case-2-netosis-kasumi-haruno-",
"Group": "Vpan's EXTASY",
"Artists": "Satou Kuuki",
"Category": "Doujinshi",
"Languages": "English",
"Tags": ["Ahegao", "Big Ass", "Blackmail",
"Bloomers", "Blowjob", "Condom",
"Cosplay", "Crossdressing",
"Crotchless / Breastless"]}},
)
# =========================================================================
# Getters
# =========================================================================
@staticmethod
def _get_title(soup):
raw_title = soup.title.string
# Remove the " | Tsumino" bit
index = [pos for pos, char in enumerate(raw_title) if char == "|"][-1]
full_title = raw_title[0:index].strip()
# Note: Format goes: Romaji | English | Japanese
# Sometimes the English doesn't exist, so Romaji becomes English
romaji = full_title.split("|")[0].strip()
# Note the second value is Japanese (eng, jpn = romaji.split('/'))
english, _ = romaji.split("/")
return english.strip()
@staticmethod
def _get_gallery_id(url):
if url[-1] == "/":
return url.split("/")[-3]
return url.split("/")[-2]
def get_size(self, url, soup, args):
size_element = soup.find("div", {"class": "book-data", "id": "Pages"})
size = size_element.string.strip()
return int(size)
def _get_img_urls(self, id_):
"""A POST operation to retrieve/construct image URLs"""
referer = f"http://www.tsumino.com/Read/View/{id_}"
self._session.headers.update({"Referer": referer})
response = self._session.post("http://www.tsumino.com/Read/Load",
data={'q': id_})
if response.status_code == 404:
raise ExtractorFailed("Captcha error - visit the site and "
"complete the captcha or try again later")
data = json.loads(response.content.decode("utf-8"))
return [self._format_img_url(url) for url in data["reader_page_urls"]]
def get_gallery_urls(self, url, soup, args):
url_list = []
size = self.get_size(url, soup, args)
# Length of gallery size for zero-appending filenames
size_len = len(str(size))
id_ = self._get_gallery_id(url)
img_urls = self._get_img_urls(id_)
for img_num in range(1, size+1):
base_name = str(img_num).zfill(size_len)
filename = f"{base_name}.jpg"
url_list.append([filename, img_urls[img_num-1]])
return url_list
def get_tags(self, url, soup, args):
soup_tags = {"Title": self._get_title(soup), "URL": url}
soup_tags["Tags"] = self._get_content_tags(soup)
soup_tags["Category"] = self._get_category_tags(soup)
soup_tags["Languages"] = "English"
soup_tags["Artists"] = self._get_artist_tags(soup)
soup_tags["Group"] = self._get_group_tags(soup)
soup_tags["Parody"] = self._get_parody_tags(soup)
soup_tags["Characters"] = self._get_character_tags(soup)
return soup_tags
@staticmethod
def _get_content_tags(soup):
content = soup.find("meta", {"name": "description"})["content"]
tags = content.split(":")[-1].split(",")
return [item.strip() for item in tags]
@staticmethod
def _get_category_tags(soup):
return soup.find("a", {"data-type": "Category"})["data-define"]
@staticmethod
def _get_artist_tags(soup):
artist_list = soup.find_all("a", {"data-type": "Artist"})
return [artist["data-define"] for artist in artist_list]
@staticmethod
def _get_group_tags(soup):
if soup.find("a", {"data-type": "Group"}):
group_list = soup.find_all("a", {"data-type": "Group"})
return [group["data-define"] for group in group_list]
return []
@staticmethod
def _get_parody_tags(soup):
if soup.find("a", {"data-type": "Parody"}):
parody_tag = soup.find("a", {"data-type": "Parody"})
return parody_tag["data-define"]
return []
@staticmethod
def _get_character_tags(soup):
if soup.find("a", {"data-type": "Character"}):
char_list = soup.find_all("a", {"data-type": "Character"})
return [char["data-define"] for char in char_list]
return []
# ===============================================================================
# Misc. functions
# ===============================================================================
def _format_img_url(self, url):
clean_url = quote(url)
return f"{self._IMG_DOMAIN}{clean_url}" | PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/@types/node/zlib.d.ts | declare module 'zlib' {
import * as stream from 'node:stream';
interface ZlibOptions {
/**
* @default constants.Z_NO_FLUSH
*/
flush?: number | undefined;
/**
* @default constants.Z_FINISH
*/
finishFlush?: number | undefined;
/**
* @default 16*1024
*/
chunkSize?: number | undefined;
windowBits?: number | undefined;
level?: number | undefined; // compression only
memLevel?: number | undefined; // compression only
strategy?: number | undefined; // compression only
dictionary?: NodeJS.ArrayBufferView | ArrayBuffer | undefined; // deflate/inflate only, empty dictionary by default
info?: boolean | undefined;
maxOutputLength?: number | undefined;
}
interface BrotliOptions {
/**
* @default constants.BROTLI_OPERATION_PROCESS
*/
flush?: number | undefined;
/**
* @default constants.BROTLI_OPERATION_FINISH
*/
finishFlush?: number | undefined;
/**
* @default 16*1024
*/
chunkSize?: number | undefined;
params?:
| {
/**
* Each key is a `constants.BROTLI_*` constant.
*/
[key: number]: boolean | number;
}
| undefined;
maxOutputLength?: number | undefined;
}
interface Zlib {
/** @deprecated Use bytesWritten instead. */
readonly bytesRead: number;
readonly bytesWritten: number;
shell?: boolean | string | undefined;
close(callback?: () => void): void;
flush(kind?: number, callback?: () => void): void;
flush(callback?: () => void): void;
}
interface ZlibParams {
params(level: number, strategy: number, callback: () => void): void;
}
interface ZlibReset {
reset(): void;
}
interface BrotliCompress extends stream.Transform, Zlib {}
interface BrotliDecompress extends stream.Transform, Zlib {}
interface Gzip extends stream.Transform, Zlib {}
interface Gunzip extends stream.Transform, Zlib {}
interface Deflate extends stream.Transform, Zlib, ZlibReset, ZlibParams {}
interface Inflate extends stream.Transform, Zlib, ZlibReset {}
interface DeflateRaw extends stream.Transform, Zlib, ZlibReset, ZlibParams {}
interface InflateRaw extends stream.Transform, Zlib, ZlibReset {}
interface Unzip extends stream.Transform, Zlib {}
/**
* Creates and returns a new `BrotliCompress` object.
* @since v11.7.0, v10.16.0
*/
function createBrotliCompress(options?: BrotliOptions): BrotliCompress;
/**
* Creates and returns a new `BrotliDecompress` object.
* @since v11.7.0, v10.16.0
*/
function createBrotliDecompress(options?: BrotliOptions): BrotliDecompress;
/**
* Creates and returns a new `Gzip` object.
* See `example`.
* @since v0.5.8
*/
function createGzip(options?: ZlibOptions): Gzip;
/**
* Creates and returns a new `Gunzip` object.
* @since v0.5.8
*/
function createGunzip(options?: ZlibOptions): Gunzip;
/**
* Creates and returns a new `Deflate` object.
* @since v0.5.8
*/
function createDeflate(options?: ZlibOptions): Deflate;
/**
* Creates and returns a new `Inflate` object.
* @since v0.5.8
*/
function createInflate(options?: ZlibOptions): Inflate;
/**
* Creates and returns a new `DeflateRaw` object.
*
* An upgrade of zlib from 1.2.8 to 1.2.11 changed behavior when `windowBits`is set to 8 for raw deflate streams. zlib would automatically set `windowBits`to 9 if was initially set to 8\. Newer
* versions of zlib will throw an exception,
* so Node.js restored the original behavior of upgrading a value of 8 to 9,
* since passing `windowBits = 9` to zlib actually results in a compressed stream
* that effectively uses an 8-bit window only.
* @since v0.5.8
*/
function createDeflateRaw(options?: ZlibOptions): DeflateRaw;
/**
* Creates and returns a new `InflateRaw` object.
* @since v0.5.8
*/
function createInflateRaw(options?: ZlibOptions): InflateRaw;
/**
* Creates and returns a new `Unzip` object.
* @since v0.5.8
*/
function createUnzip(options?: ZlibOptions): Unzip;
type InputType = string | ArrayBuffer | NodeJS.ArrayBufferView;
type CompressCallback = (error: Error | null, result: Buffer) => void;
/**
* @since v11.7.0, v10.16.0
*/
function brotliCompress(buf: InputType, options: BrotliOptions, callback: CompressCallback): void;
function brotliCompress(buf: InputType, callback: CompressCallback): void;
namespace brotliCompress {
function __promisify__(buffer: InputType, options?: BrotliOptions): Promise<Buffer>;
}
/**
* Compress a chunk of data with `BrotliCompress`.
* @since v11.7.0, v10.16.0
*/
function brotliCompressSync(buf: InputType, options?: BrotliOptions): Buffer;
/**
* @since v11.7.0, v10.16.0
*/
function brotliDecompress(buf: InputType, options: BrotliOptions, callback: CompressCallback): void;
function brotliDecompress(buf: InputType, callback: CompressCallback): void;
namespace brotliDecompress {
function __promisify__(buffer: InputType, options?: BrotliOptions): Promise<Buffer>;
}
/**
* Decompress a chunk of data with `BrotliDecompress`.
* @since v11.7.0, v10.16.0
*/
function brotliDecompressSync(buf: InputType, options?: BrotliOptions): Buffer;
/**
* @since v0.6.0
*/
function deflate(buf: InputType, callback: CompressCallback): void;
function deflate(buf: InputType, options: ZlibOptions, callback: CompressCallback): void;
namespace deflate {
function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>;
}
/**
* Compress a chunk of data with `Deflate`.
* @since v0.11.12
*/
function deflateSync(buf: InputType, options?: ZlibOptions): Buffer;
/**
* @since v0.6.0
*/
function deflateRaw(buf: InputType, callback: CompressCallback): void;
function deflateRaw(buf: InputType, options: ZlibOptions, callback: CompressCallback): void;
namespace deflateRaw {
function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>;
}
/**
* Compress a chunk of data with `DeflateRaw`.
* @since v0.11.12
*/
function deflateRawSync(buf: InputType, options?: ZlibOptions): Buffer;
/**
* @since v0.6.0
*/
function gzip(buf: InputType, callback: CompressCallback): void;
function gzip(buf: InputType, options: ZlibOptions, callback: CompressCallback): void;
namespace gzip {
function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>;
}
/**
* Compress a chunk of data with `Gzip`.
* @since v0.11.12
*/
function gzipSync(buf: InputType, options?: ZlibOptions): Buffer;
/**
* @since v0.6.0
*/
function gunzip(buf: InputType, callback: CompressCallback): void;
function gunzip(buf: InputType, options: ZlibOptions, callback: CompressCallback): void;
namespace gunzip {
function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>;
}
/**
* Decompress a chunk of data with `Gunzip`.
* @since v0.11.12
*/
function gunzipSync(buf: InputType, options?: ZlibOptions): Buffer;
/**
* @since v0.6.0
*/
function inflate(buf: InputType, callback: CompressCallback): void;
function inflate(buf: InputType, options: ZlibOptions, callback: CompressCallback): void;
namespace inflate {
function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>;
}
/**
* Decompress a chunk of data with `Inflate`.
* @since v0.11.12
*/
function inflateSync(buf: InputType, options?: ZlibOptions): Buffer;
/**
* @since v0.6.0
*/
function inflateRaw(buf: InputType, callback: CompressCallback): void;
function inflateRaw(buf: InputType, options: ZlibOptions, callback: CompressCallback): void;
namespace inflateRaw {
function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>;
}
/**
* Decompress a chunk of data with `InflateRaw`.
* @since v0.11.12
*/
function inflateRawSync(buf: InputType, options?: ZlibOptions): Buffer;
/**
* @since v0.6.0
*/
function unzip(buf: InputType, callback: CompressCallback): void;
function unzip(buf: InputType, options: ZlibOptions, callback: CompressCallback): void;
namespace unzip {
function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>;
}
/**
* Decompress a chunk of data with `Unzip`.
* @since v0.11.12
*/
function unzipSync(buf: InputType, options?: ZlibOptions): Buffer;
namespace constants {
const BROTLI_DECODE: number;
const BROTLI_DECODER_ERROR_ALLOC_BLOCK_TYPE_TREES: number;
const BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MAP: number;
const BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MODES: number;
const BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_1: number;
const BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_2: number;
const BROTLI_DECODER_ERROR_ALLOC_TREE_GROUPS: number;
const BROTLI_DECODER_ERROR_DICTIONARY_NOT_SET: number;
const BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_1: number;
const BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_2: number;
const BROTLI_DECODER_ERROR_FORMAT_CL_SPACE: number;
const BROTLI_DECODER_ERROR_FORMAT_CONTEXT_MAP_REPEAT: number;
const BROTLI_DECODER_ERROR_FORMAT_DICTIONARY: number;
const BROTLI_DECODER_ERROR_FORMAT_DISTANCE: number;
const BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_META_NIBBLE: number;
const BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_NIBBLE: number;
const BROTLI_DECODER_ERROR_FORMAT_HUFFMAN_SPACE: number;
const BROTLI_DECODER_ERROR_FORMAT_PADDING_1: number;
const BROTLI_DECODER_ERROR_FORMAT_PADDING_2: number;
const BROTLI_DECODER_ERROR_FORMAT_RESERVED: number;
const BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_ALPHABET: number;
const BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_SAME: number;
const BROTLI_DECODER_ERROR_FORMAT_TRANSFORM: number;
const BROTLI_DECODER_ERROR_FORMAT_WINDOW_BITS: number;
const BROTLI_DECODER_ERROR_INVALID_ARGUMENTS: number;
const BROTLI_DECODER_ERROR_UNREACHABLE: number;
const BROTLI_DECODER_NEEDS_MORE_INPUT: number;
const BROTLI_DECODER_NEEDS_MORE_OUTPUT: number;
const BROTLI_DECODER_NO_ERROR: number;
const BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION: number;
const BROTLI_DECODER_PARAM_LARGE_WINDOW: number;
const BROTLI_DECODER_RESULT_ERROR: number;
const BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: number;
const BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT: number;
const BROTLI_DECODER_RESULT_SUCCESS: number;
const BROTLI_DECODER_SUCCESS: number;
const BROTLI_DEFAULT_MODE: number;
const BROTLI_DEFAULT_QUALITY: number;
const BROTLI_DEFAULT_WINDOW: number;
const BROTLI_ENCODE: number;
const BROTLI_LARGE_MAX_WINDOW_BITS: number;
const BROTLI_MAX_INPUT_BLOCK_BITS: number;
const BROTLI_MAX_QUALITY: number;
const BROTLI_MAX_WINDOW_BITS: number;
const BROTLI_MIN_INPUT_BLOCK_BITS: number;
const BROTLI_MIN_QUALITY: number;
const BROTLI_MIN_WINDOW_BITS: number;
const BROTLI_MODE_FONT: number;
const BROTLI_MODE_GENERIC: number;
const BROTLI_MODE_TEXT: number;
const BROTLI_OPERATION_EMIT_METADATA: number;
const BROTLI_OPERATION_FINISH: number;
const BROTLI_OPERATION_FLUSH: number;
const BROTLI_OPERATION_PROCESS: number;
const BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING: number;
const BROTLI_PARAM_LARGE_WINDOW: number;
const BROTLI_PARAM_LGBLOCK: number;
const BROTLI_PARAM_LGWIN: number;
const BROTLI_PARAM_MODE: number;
const BROTLI_PARAM_NDIRECT: number;
const BROTLI_PARAM_NPOSTFIX: number;
const BROTLI_PARAM_QUALITY: number;
const BROTLI_PARAM_SIZE_HINT: number;
const DEFLATE: number;
const DEFLATERAW: number;
const GUNZIP: number;
const GZIP: number;
const INFLATE: number;
const INFLATERAW: number;
const UNZIP: number;
// Allowed flush values.
const Z_NO_FLUSH: number;
const Z_PARTIAL_FLUSH: number;
const Z_SYNC_FLUSH: number;
const Z_FULL_FLUSH: number;
const Z_FINISH: number;
const Z_BLOCK: number;
const Z_TREES: number;
// Return codes for the compression/decompression functions.
// Negative values are errors, positive values are used for special but normal events.
const Z_OK: number;
const Z_STREAM_END: number;
const Z_NEED_DICT: number;
const Z_ERRNO: number;
const Z_STREAM_ERROR: number;
const Z_DATA_ERROR: number;
const Z_MEM_ERROR: number;
const Z_BUF_ERROR: number;
const Z_VERSION_ERROR: number;
// Compression levels.
const Z_NO_COMPRESSION: number;
const Z_BEST_SPEED: number;
const Z_BEST_COMPRESSION: number;
const Z_DEFAULT_COMPRESSION: number;
// Compression strategy.
const Z_FILTERED: number;
const Z_HUFFMAN_ONLY: number;
const Z_RLE: number;
const Z_FIXED: number;
const Z_DEFAULT_STRATEGY: number;
const Z_DEFAULT_WINDOWBITS: number;
const Z_MIN_WINDOWBITS: number;
const Z_MAX_WINDOWBITS: number;
const Z_MIN_CHUNK: number;
const Z_MAX_CHUNK: number;
const Z_DEFAULT_CHUNK: number;
const Z_MIN_MEMLEVEL: number;
const Z_MAX_MEMLEVEL: number;
const Z_DEFAULT_MEMLEVEL: number;
const Z_MIN_LEVEL: number;
const Z_MAX_LEVEL: number;
const Z_DEFAULT_LEVEL: number;
const ZLIB_VERNUM: number;
}
// Allowed flush values.
/** @deprecated Use `constants.Z_NO_FLUSH` */
const Z_NO_FLUSH: number;
/** @deprecated Use `constants.Z_PARTIAL_FLUSH` */
const Z_PARTIAL_FLUSH: number;
/** @deprecated Use `constants.Z_SYNC_FLUSH` */
const Z_SYNC_FLUSH: number;
/** @deprecated Use `constants.Z_FULL_FLUSH` */
const Z_FULL_FLUSH: number;
/** @deprecated Use `constants.Z_FINISH` */
const Z_FINISH: number;
/** @deprecated Use `constants.Z_BLOCK` */
const Z_BLOCK: number;
/** @deprecated Use `constants.Z_TREES` */
const Z_TREES: number;
// Return codes for the compression/decompression functions.
// Negative values are errors, positive values are used for special but normal events.
/** @deprecated Use `constants.Z_OK` */
const Z_OK: number;
/** @deprecated Use `constants.Z_STREAM_END` */
const Z_STREAM_END: number;
/** @deprecated Use `constants.Z_NEED_DICT` */
const Z_NEED_DICT: number;
/** @deprecated Use `constants.Z_ERRNO` */
const Z_ERRNO: number;
/** @deprecated Use `constants.Z_STREAM_ERROR` */
const Z_STREAM_ERROR: number;
/** @deprecated Use `constants.Z_DATA_ERROR` */
const Z_DATA_ERROR: number;
/** @deprecated Use `constants.Z_MEM_ERROR` */
const Z_MEM_ERROR: number;
/** @deprecated Use `constants.Z_BUF_ERROR` */
const Z_BUF_ERROR: number;
/** @deprecated Use `constants.Z_VERSION_ERROR` */
const Z_VERSION_ERROR: number;
// Compression levels.
/** @deprecated Use `constants.Z_NO_COMPRESSION` */
const Z_NO_COMPRESSION: number;
/** @deprecated Use `constants.Z_BEST_SPEED` */
const Z_BEST_SPEED: number;
/** @deprecated Use `constants.Z_BEST_COMPRESSION` */
const Z_BEST_COMPRESSION: number;
/** @deprecated Use `constants.Z_DEFAULT_COMPRESSION` */
const Z_DEFAULT_COMPRESSION: number;
// Compression strategy.
/** @deprecated Use `constants.Z_FILTERED` */
const Z_FILTERED: number;
/** @deprecated Use `constants.Z_HUFFMAN_ONLY` */
const Z_HUFFMAN_ONLY: number;
/** @deprecated Use `constants.Z_RLE` */
const Z_RLE: number;
/** @deprecated Use `constants.Z_FIXED` */
const Z_FIXED: number;
/** @deprecated Use `constants.Z_DEFAULT_STRATEGY` */
const Z_DEFAULT_STRATEGY: number;
/** @deprecated */
const Z_BINARY: number;
/** @deprecated */
const Z_TEXT: number;
/** @deprecated */
const Z_ASCII: number;
/** @deprecated */
const Z_UNKNOWN: number;
/** @deprecated */
const Z_DEFLATED: number;
}
declare module 'node:zlib' {
export * from 'zlib';
} | PypiClean |
/CheckM2-1.0.1.tar.gz/CheckM2-1.0.1/checkm2/sequenceClasses.py | import gzip
class SeqReader:
# Stolen from https://github.com/lh3/readfq/blob/master/readfq.py
def readfq(self, fp): # this is a generator function
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last: break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last: break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs); # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def read_nucleotide_sequences(self, nucleotide_file):
nucleotide_sequences = {}
for name, seq, _ in self.readfq(open(nucleotide_file)):
nucleotide_sequences[name] = seq
return nucleotide_sequences
# def check_for_proper_nucleotide_seq(self, seq_file, req_perc=0.9, max_seqs_to_read=10):
#
#
# """Check if a file contains sequences in nucleotide space.
# The check is performed by looking for the characters in
# {a,c,g,t,n,.,-} and confirming that these comprise the
# majority of a sequences. A set number of sequences are
# read and the file assumed to be not be in nucleotide space
# if none of these sequences are comprised primarily of the
# defined nucleotide set.
# Parameters
# ----------
# seq_file : str
# Name of fasta/q file to read.
# req_perc : float
# Percentage of bases in {a,c,g,t,n,.,-} before
# declaring the sequences as being in nucleotide
# space.
# max_seqs_to_read : int
# Maximum sequences to read before declaring
# sequence file to not be in nucleotide space.
# Returns
# -------
# boolean
# True is sequences are in nucleotide space, or file
# contains no sequences.
# """
#
# nucleotide_bases = {'a', 'c', 'g', 't'}
# insertion_bases = {'-', '.'}
#
# seqs = self.read_nucleotide_sequences(seq_file)
# if len(seqs) == 0:
# return True
#
# seq_count = 0
# for _seq_id, seq in seqs.items():
# seq = seq.lower()
#
# nt_bases = 0
# for c in (nucleotide_bases | {'n'} | insertion_bases):
# nt_bases += seq.count(c)
#
# if float(nt_bases) / len(seq) >= req_perc:
# return True
#
# seq_count += 1
# if seq_count == max_seqs_to_read:
# break
#
# return False
def write_fasta(self, seq, outputFile):
'''write sequences to FASTA file'''
if outputFile.endswith('.gz'):
fout = gzip.open(outputFile, 'wb')
else:
fout = open(outputFile, 'w')
for seqId, seq in seq.items():
fout.write('>' + seqId + '\n')
fout.write(seq + '\n')
fout.close() | PypiClean |
/Melopy-0.1.0.tar.gz/Melopy-0.1.0/melopy/melopy.py |
import wave, struct, random, math
import os, sys
from utility import *
from scales import *
# same included wave functions
# a function of frequency and tick
# each function accepts the frequency and tick,
# and returns a value from -1 to 1
sine = lambda f, t: math.sin(2 * math.pi * t * f / 44100.0)
square = lambda f, t: 0.6 * ((t % (44100 / f) >= ((44100 / f)/2)) * 2 - 1)
sawtooth = lambda f, t: (t % (44100 / f)) / (44100 / f) * 2 - 1
def triangle(f, t):
v = 2 * (t % (44100 / f)) / (44100 / f)
if t % (44100 / f) >= (44100 / (2 * f)):
v = 2 * 1 - v
v = 2 * v - 1
return v
class Melopy:
def __init__(self, title='sound', volume=20, tempo=120, octave=4):
self.title = title.lower()
self.rate = 44100
self.volume = volume
self.data = []
self.tempo = tempo
self.octave = octave
self.wave_type = sine
def add_wave(self, frequency, length, location='END'):
if location == 'END':
location = len(self.data)
elif location < 0:
location = 0
elif location * 44100 > len(self.data):
location = len(self.data) / 44100.0
# location is a time, so let's adjust
location = int(location * 44100)
for n in range(0, int(44100 * length)):
val = self.wave_type(frequency, n)
val *= self.volume / 100.0 * 32767
if location + n >= len(self.data):
self.data.append(val)
else:
current_val = self.data[location + n]
if current_val + val > 32767:
val = 32767
elif current_val + val < -32768:
val = -32768
else:
val += current_val
self.data[location + n] = val
def add_note(self, note, length, location='END'):
"""Add a note, or if a list, add a chord."""
if not isinstance(note, list):
note = [note]
if location == 'END':
location = len(self.data) / 44100.0
for item in note:
if item[-1] not in '0123456789':
item += str(self.octave)
self.add_wave(note_to_frequency(item, self.octave), length, location)
def add_melody(self, melody, length):
for note in melody:
if note[-1] not in '0123456789':
note += self.octave
self.add_wave(note_to_frequency(note), length)
def add_whole_note(self, note, location='END'):
"""Add a whole note"""
self.add_fractional_note(note, 1.0, location)
def add_half_note(self, note, location='END'):
"""Add a half note"""
self.add_fractional_note(note, 1.0 / 2, location)
def add_quarter_note(self, note, location='END'):
"""Add a quarter note"""
self.add_fractional_note(note, 1.0 / 4, location)
def add_eighth_note(self, note, location='END'):
"""Add a eigth note"""
self.add_fractional_note(note, 1.0 / 8, location)
def add_sixteenth_note(self, note, location='END'):
"""Add a sixteenth note"""
self.add_fractional_note(note, 1.0 / 16, location)
def add_fractional_note(self, note, fraction, location='END'):
"""Add a fractional note (smaller then 1/16 notes)"""
self.add_note(note, 60.0 / self.tempo * (fraction * 4), location)
def add_rest(self, length):
for i in range(int(self.rate * length)):
self.data.append(0)
def add_whole_rest(self):
self.add_fractional_rest(1.0)
def add_half_rest(self):
self.add_fractional_rest(1.0 / 2)
def add_quarter_rest(self):
self.add_fractional_rest(1.0 / 4)
def add_eighth_rest(self):
self.add_fractional_rest(1.0 / 8)
def add_sixteenth_rest(self):
self.add_fractional_rest(1.0 / 16)
def add_fractional_rest(self, fraction):
self.add_rest(60.0 / self.tempo * (fraction * 4))
def parse(self, string, location='END'):
tracks = string.split('&&&')
# special case for multiple tracks
if len(tracks) > 1:
t = len(self.data) / 44100.0
for track in tracks:
self.parse(track, t)
return
cf = 0.25 # start with a quarter note, change accordingly
in_comment = False
for i, char in enumerate(string): # divide melody into fragments
# / this is a comment /
if char == '/':
in_comment = not in_comment
if in_comment:
continue
elif char in 'ABCDEFG':
if (i+1 < len(string)) and (string[i+1] in '#b'):
# check if the next item in the array is
# a sharp or flat, make sure we include it
char += string[i+1]
self.add_fractional_note(char, cf, location)
if location != 'END':
location += (60.0 / self.tempo * (cf * 4))
elif char in map(str, range(0, 20)):
self.octave = int(char)
elif char == '+' or char == '^':
self.octave += 1
elif char == 'V' or char == 'v' or char == '-':
self.octave -= 1
elif char == '(' or char == ']':
cf /= 2
elif char == ')' or char == '[':
cf *= 2
elif char == '_':
self.add_fractional_rest(cf)
if location != 'END':
location += (60.0 / self.tempo * (cf * 4))
def parsefile(self, filename, location='END'):
fr = open(filename, 'r')
s = fr.read()
fr.close()
self.parse(s, location)
def render(self):
"""Render a playable song out to a .wav file"""
melopy_writer = wave.open(self.title + '.wav', 'w')
melopy_writer.setparams((2, 2, 44100, 0, 'NONE', 'not compressed'))
p = -1
data_frames = []
for i in range(len(self.data)):
q = 100 * i / len(self.data)
if p != q:
sys.stdout.write("\r[%s] %d%%" % (('='*int((float(i)/len(self.data)*50))+'>').ljust(50), 100 * i / len(self.data)))
sys.stdout.flush()
p = q
packed_val = struct.pack('h', int(self.data[i]))
data_frames.append(packed_val)
data_frames.append(packed_val)
melopy_writer.writeframes(''.join(data_frames))
sys.stdout.write("\r[%s] 100%%" % ('='*50))
sys.stdout.flush()
sys.stdout.write("\nDone\n")
melopy_writer.close()
# Licensed under The MIT License (MIT)
# See LICENSE file for more | PypiClean |
/NREL-erad-0.0.0a0.tar.gz/NREL-erad-0.0.0a0/erad/scenarios/common.py | from shapely.geometry import MultiPolygon, Point,Polygon
from random import random,seed
from enum import IntEnum
import numpy as np
class ScenarioTypes(IntEnum):
flood_m = 0
wind_m_per_s = 1
fire_m = 2
earthquake_pga = 3
class AssetTypes(IntEnum):
substation = 0
solar_panels = 1
buried_lines = 2
wind_turbines= 3
battery_storage = 4
transmission_poles = 5
distribution_poles = 6
transmission_overhead_lines = 7
distribution_overhead_lines = 8
#substructures
#conduit_burial
@classmethod
def has_value(cls, value):
return value in cls._value2member_map_
@classmethod
def has_asset(cls, asset):
print(asset)
return asset in cls.__members__
def asset_list(x1=41.255, y1=-117.33, x2=41.255, y2=-117.33, samples=100):
x = np.linspace(x1, x2, samples)
y = np.linspace(y1, y2, samples)
seed(3)
asset_probabilities = {
AssetTypes.substation: 1 / 10000.0,
AssetTypes.solar_panels : 1/500,
AssetTypes.buried_lines : 1/10.0,
AssetTypes.wind_turbines : 1/5000,
AssetTypes.battery_storage :1/2000,
AssetTypes.transmission_poles: 1 / 10.0,
AssetTypes.distribution_poles : 1 / 10.0,
AssetTypes.transmission_overhead_lines : 1/10.0,
AssetTypes.distribution_overhead_lines : 1/10.0,
}
heights_ft = {
AssetTypes.substation.name : 3,
AssetTypes.solar_panels.name : 10,
AssetTypes.buried_lines.name : -3,
AssetTypes.wind_turbines.name : 25,
AssetTypes.battery_storage.name : 4,
AssetTypes.transmission_poles.name : 0,
AssetTypes.distribution_poles.name : 0,
AssetTypes.transmission_overhead_lines.name : 100,
AssetTypes.distribution_overhead_lines.name : 30,
}
assets = {
AssetTypes.substation.name : {},
AssetTypes.solar_panels.name : {},
AssetTypes.buried_lines.name : {},
AssetTypes.wind_turbines.name : {},
AssetTypes.battery_storage.name :{},
AssetTypes.transmission_poles.name : {},
AssetTypes.distribution_poles.name : {},
AssetTypes.transmission_overhead_lines.name : {},
AssetTypes.distribution_overhead_lines.name : {},
}
for asset_type, probability in asset_probabilities.items():
asset_id = 0
for x1 in x:
for y1 in y:
if random() < probability:
assets[asset_type.name][f"{asset_type.name} {asset_id}"] = {"coordinates" : (x1, y1), "heights_ft": heights_ft[asset_type.name]}
asset_id += 1
p1 = Point(x.min(), y.min())
p2 = Point(x.max(), y.min())
p3 = Point(x.max(), y.max())
p4 = Point(x.min(), y.max())
pointList = [p1, p2, p3, p4, p1]
poly = Polygon(pointList)
mypoly = MultiPolygon([poly])
return assets, mypoly | PypiClean |
/GLManager-1.1.6.tar.gz/GLManager-1.1.6/app/modules/console/socks_console.py | import typing as t
import os
from console import Console, FuncItem, COLOR_NAME
from console.formatter import create_menu_bg, create_line, Formatter
from scripts import SOCKS_PATH, CERT_PATH
from app.utilities.logger import logger
def check_screen_is_installed():
command = 'command -v screen >/dev/null 2>&1'
return os.system(command) == 0
def process_install_screen():
if check_screen_is_installed():
return
answer = input(
COLOR_NAME.YELLOW + 'Screen não está instalado. Deseja instalar? [s/N]: ' + COLOR_NAME.END
)
if answer.lower() == 's':
logger.info('Instalando screen...')
os.system('sudo apt-get install screen -y >/dev/null 2>&1')
logger.info('Screen instalado com sucesso!')
Console.pause()
class Flag:
def __init__(self, name: str, port: int = None):
self.__name = name
self.__port = port
if self.__port is None and self.__name is not None:
port = self.current_flag(self.__name)
if port:
self.__port = int(port)
@property
def name(self) -> str:
if self.__name is None:
raise ValueError('Name is not set')
return self.__name
@name.setter
def name(self, value: str):
if value is None:
raise ValueError('Name is not set')
self.__name = value
@property
def port(self) -> int:
if self.__port == -1:
raise ValueError('Port is not set')
return self.__port
@port.setter
def port(self, port: int):
if not isinstance(port, int):
raise TypeError('Port must be an integer')
if port < 0 or port > 65535:
raise ValueError('Port must be between 0 and 65535')
self.__port = port
@property
def value(self):
flag = self.name
if not flag.startswith('--'):
flag = '--' + flag
flag += ' ' + str(self.port)
return flag
@staticmethod
def current_flag(flag_name: str) -> str:
flag_name_parsed = flag_name
if '-' in flag_name:
flag_name_parsed = '\-'.join(flag_name.split('-'))
command = 'ps -aux | grep -i ' + flag_name_parsed + ' | grep -v grep'
output = os.popen(command).read().strip()
for line in output.split('\n'):
data = line.split(flag_name)
if len(data) > 1:
return data[1].split()[0]
return ''
class OpenVpnFlag(Flag):
def __init__(self):
super().__init__('openvpn-port')
if not self.port:
self.port = 1194
class SSHFlag(Flag):
def __init__(self):
super().__init__('ssh-port')
if not self.port:
self.port = 22
class V2rayFlag(Flag):
def __init__(self):
super().__init__('v2ray-port')
if not self.port:
self.port = 1080
class FlagUtils:
def __init__(self):
self.__openvpn_flag = OpenVpnFlag()
self.__ssh_flag = SSHFlag()
self.__v2ray_flag = V2rayFlag()
self.__flags: t.List[Flag] = [
self.__openvpn_flag,
self.__ssh_flag,
self.__v2ray_flag,
]
def set_flag(self, flag: Flag):
for f in self.__flags:
if f.name == flag.name:
f.port = flag.port
def command(self) -> str:
return '%s %s %s' % (
self.__openvpn_flag.value,
self.__ssh_flag.value,
self.__v2ray_flag.value,
)
def values(self) -> t.List[str]:
return [
self.__openvpn_flag.value,
self.__ssh_flag.value,
self.__v2ray_flag.value,
]
class SocksManager:
@staticmethod
def is_running(mode: str = 'http') -> bool:
cmd = 'screen -ls | grep -i "socks:[0-9]*:%s\\b"' % mode
return os.system(cmd) == 0
def start(self, mode: str = 'http', src_port: int = 80, flag_utils: FlagUtils = None):
cmd = 'screen -mdS socks:%s:%s python3 %s --port %s %s --%s' % (
src_port,
mode,
SOCKS_PATH,
src_port,
flag_utils.command(),
mode,
)
if mode == 'https':
cmd += ' --cert %s' % CERT_PATH
return os.system(cmd) == 0 and self.is_running(mode)
def stop(self, mode: str = 'http', src_port: int = 80) -> None:
cmd = 'screen -X -S socks:%s:%s quit' % (src_port, mode)
return os.system(cmd) == 0
@staticmethod
def get_running_port(mode: str = 'http') -> int:
cmd = 'screen -ls | grep -ie "socks:[0-9]*:%s\\b"' % mode
output = os.popen(cmd).read().strip()
for line in output.split('\n'):
data = line.split(':')
if len(data) > 1:
return int(data[1].split(' ')[0])
return 0
@staticmethod
def get_running_ports() -> t.List[int]:
cmd = 'screen -ls | grep socks: | awk \'{print $1}\' | awk -F: \'{print $2}\''
output = os.popen(cmd).read()
return [int(port) for port in output.split('\n') if port]
@staticmethod
def get_running_socks() -> t.Dict[int, str]:
cmd = 'screen -ls | grep socks: | awk \'{print $1}\''
output = os.popen(cmd).readlines()
socks = dict(
(int(port), mode.strip()) for port, mode in (line.split(':')[1:] for line in output)
)
return socks
class ConsoleMode:
def __init__(self):
self.console = Console('SELECIONE O MODO DE CONEXAO')
self.console.append_item(FuncItem('HTTP', lambda: 'http', exit_on_select=True))
self.console.append_item(FuncItem('HTTPS', lambda: 'https', exit_on_select=True))
def start(self) -> str:
self.console.show()
return self.console.item_returned
@classmethod
def get_mode(cls) -> str:
return cls().start()
class FormatterSocks(Formatter):
def __init__(self, port: int, mode: str) -> None:
super().__init__()
self.port = port
self.mode = mode
def build_menu(self, title):
menu = super().build_menu(title)
if self.port <= 0:
return menu
flag_utils = FlagUtils()
values = []
for flag in flag_utils.values():
name, port = flag.split()
name = name.replace('--', '')
name = name.split('-')[0]
values.append(name + ' ' + str(port))
for value in values:
menu += '%s <-> %s <-> %s\n' % (
COLOR_NAME.GREEN + self.mode.ljust(10) + COLOR_NAME.END,
COLOR_NAME.GREEN + str(self.port).rjust(10).ljust(15) + COLOR_NAME.END,
COLOR_NAME.GREEN + str(value).rjust(15) + COLOR_NAME.END,
)
return menu + create_line(color=COLOR_NAME.BLUE, show=False) + '\n'
class SocksActions:
@staticmethod
def start(mode: str, callback: t.Callable[[], None] = None) -> None:
print(create_menu_bg('PORTA - ' + mode.upper()))
manager = SocksManager()
ports = manager.get_running_ports()
if ports:
print(SocksActions.create_message_running_ports(ports))
while True:
try:
src_port = input(COLOR_NAME.YELLOW + 'Porta de escuta: ' + COLOR_NAME.RESET)
src_port = int(src_port)
if SocksManager().is_running(src_port):
logger.error('Porta %s já está em uso' % src_port)
continue
break
except ValueError:
logger.error('Porta inválida!')
except KeyboardInterrupt:
return
if not manager.start(mode=mode, src_port=src_port, flag_utils=FlagUtils()):
logger.error('Falha ao iniciar proxy!')
Console.pause()
return
logger.info('Proxy iniciado com sucesso!')
Console.pause()
if callback:
callback()
@staticmethod
def stop(mode: str, port: int, callback: t.Callable[[], None] = None) -> None:
manager = SocksManager()
if not manager.stop(mode, port):
logger.error('Falha ao desligar proxy!')
Console.pause()
return
logger.info('Proxy desligado com sucesso!')
Console.pause()
if callback:
callback(mode)
@staticmethod
def change_port(mode: str, flag: Flag) -> None:
flag_utils = FlagUtils()
socks_manager = SocksManager()
current_port = flag.port
logger.info('Porta atual: %s' % current_port)
while True:
try:
new_port = input(COLOR_NAME.YELLOW + 'Nova porta: ' + COLOR_NAME.RESET)
new_port = int(new_port)
if new_port == current_port:
raise ValueError
flag.port = new_port
break
except ValueError:
logger.error('Porta inválida!')
except KeyboardInterrupt:
return
running_port = socks_manager.get_running_port(mode)
socks_manager.stop(mode, running_port)
flag_utils.set_flag(flag)
if not socks_manager.start(mode=mode, src_port=running_port, flag_utils=flag_utils):
logger.error('Falha ao iniciar proxy!')
Console.pause()
return
logger.info('Porta OpenVPN alterada com sucesso!')
Console.pause()
@staticmethod
def create_message_running_ports(running_ports: t.List[int]) -> str:
message = create_line(show=False) + '\n'
message += COLOR_NAME.YELLOW + 'Em uso: ' + COLOR_NAME.RESET
message += ', '.join(str(port) for port in running_ports)
message += '\n'
message += create_line(show=False)
return message
def socks_console_main(mode: str):
process_install_screen()
running_port = SocksManager().get_running_port(mode)
console = Console('SOCKS Manager ' + mode.upper(), formatter=FormatterSocks(running_port, mode))
if not SocksManager.is_running(mode):
console.append_item(
FuncItem(
'INICIAR',
SocksActions.start,
mode,
lambda: socks_console_main(mode),
shuld_exit=True,
)
)
console.show()
return
console.append_item(
FuncItem(
'ALTERAR PORTA OPENVPN',
SocksActions.change_port,
mode,
OpenVpnFlag(),
)
)
console.append_item(
FuncItem(
'ALTERAR PORTA SSH',
SocksActions.change_port,
mode,
SSHFlag(),
)
)
console.append_item(
FuncItem(
'ALTERAR PORTA V2RAY',
SocksActions.change_port,
mode,
V2rayFlag(),
)
)
console.append_item(
FuncItem(
'PARAR',
SocksActions.stop,
mode,
running_port,
socks_console_main,
shuld_exit=True,
)
)
console.show() | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/cnes.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def cnes(path):
"""Variables from the 1997 Canadian National Election Study
These variables are from the mailback questionnaire to the 1997 Canadian
National Election Study, and are intended to tap attitude towards
“traditional values.”
A data frame with 1529 observations on the following 4 variables.
`MBSA2`
an ordered factor with levels `StronglyDisagree`, `Disagree`,
`Agree`, and `StronglyAgree`, in response to the statement, “We
should be more tolerant of people who choose to live according to
their own standards, even if they are very different from our own.”
`MBSA7`
an ordered factor with levels `StronglyDisagree`, `Disagree`,
`Agree`, and `StronglyAgree`, in response to the statement,
“Newer lifestyles are contributing to the breakdown of our society.”
`MBSA8`
an ordered factor with levels `StronglyDisagree`, `Disagree`,
`Agree`, and `StronglyAgree`, in response to the statement, “The
world is always changing and we should adapt our view of moral
behaviour to these changes.”
`MBSA9`
an ordered factor with levels `StronglyDisagree`, `Disagree`,
`Agree`, and `StronglyAgree`, in response to the statement,
“This country would have many fewer problems if there were more
emphasis on traditional family values.”
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `cnes.csv`.
Returns:
Tuple of np.ndarray `x_train` with 1529 rows and 4 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'cnes.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/sem/CNES.csv'
maybe_download_and_extract(path, url,
save_file_name='cnes.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | PypiClean |
/MindsDB-23.8.3.0.tar.gz/MindsDB-23.8.3.0/mindsdb/api/postgres/postgres_proxy/postgres_packets/postgres_message_formats.py | from typing import BinaryIO, Sequence, Dict, Type
from mindsdb.api.mysql.mysql_proxy.classes.sql_statement_parser import SqlStatementParser
from mindsdb.api.postgres.postgres_proxy.postgres_packets.postgres_fields import PostgresField
from mindsdb.api.postgres.postgres_proxy.postgres_packets.postgres_message import PostgresMessage
from mindsdb.api.postgres.postgres_proxy.postgres_packets.postgres_message_identifiers import \
PostgresBackendMessageIdentifier, PostgresFrontendMessageIdentifier, PostgresAuthType
from mindsdb.api.postgres.postgres_proxy.postgres_packets.postgres_packets import PostgresPacketReader
from mindsdb.api.postgres.postgres_proxy.utilities import strip_null_byte
# All docstrings for Messages are taken from
# https://www.postgresql.org/docs/current/protocol-message-formats.html as of 2023-2-8 for Postgresql 15
class NoticeResponse(PostgresMessage):
"""
NoticeResponse (B)
Byte1('N')
Identifies the message as a notice.
Int32
Length of message contents in bytes, including self.
The message body consists of one or more identified fields, followed by a zero byte as a terminator. Fields can
appear in any order. For each field there is the following:
Byte1
A code identifying the field type; if zero, this is the message terminator and no string follows. The
presently defined field types are listed in Section 55.8. Since more field types might be added in future,
frontends should silently ignore fields of unrecognized type.
String
The field value. """ # noqa
def __init__(self):
self.identifier = PostgresBackendMessageIdentifier.NOTICE_RESPONSE
self.backend_capable = True
self.frontend_capable = False
super().__init__()
def send_internal(self, write_file: BinaryIO):
self.get_packet_builder().write(write_file=write_file)
class AuthenticationOk(PostgresMessage):
"""
AuthenticationOk (B)
Byte1('R')
Identifies the message as an authentication request.
Int32(8)
Length of message contents in bytes, including self.
Int32(0)
Specifies that the authentication was successful. """ # noqa
def __init__(self):
self.identifier = PostgresBackendMessageIdentifier.AUTHENTICATION_REQUEST
self.backend_capable = True
self.frontend_capable = False
super().__init__()
def send_internal(self, write_file: BinaryIO):
self.get_packet_builder() \
.add_int32(0) \
.write(write_file=write_file)
class AuthenticationClearTextPassword(PostgresMessage):
"""
AuthenticationCleartextPassword (B)
Byte1('R')
Identifies the message as an authentication request.
Int32(8)
Length of message contents in bytes, including self.
Int32(3)
Specifies that a clear-text password is required. """ # noqa
def __init__(self):
self.identifier = PostgresBackendMessageIdentifier.AUTHENTICATION_REQUEST
self.backend_capable = True
self.frontend_capable = False
super().__init__()
def send_internal(self, write_file: BinaryIO):
self.get_packet_builder() \
.add_int32(3) \
.write(write_file=write_file)
class ReadyForQuery(PostgresMessage):
"""
ReadyForQuery (B)
Byte1('Z')
Identifies the message type. ReadyForQuery is sent whenever the backend is ready for a new query cycle.
Int32(5)
Length of message contents in bytes, including self.
Byte1
Current backend transaction status indicator. Possible values are 'I' if idle (not in a transaction block);
'T' if in a transaction block; or 'E' if in a failed transaction block (queries will be rejected until block is
ended).""" # noqa
transaction_status: bytes
def __init__(self, transaction_status=None):
self.identifier = PostgresBackendMessageIdentifier.READY_FOR_QUERY
self.backend_capable = True
self.frontend_capable = False
self.transaction_status = transaction_status or b'I'
super().__init__()
def send_internal(self, write_file: BinaryIO):
self.get_packet_builder() \
.add_char(self.transaction_status) \
.write(write_file=write_file)
class CommandComplete(PostgresMessage):
"""
CommandComplete (B)
Byte1('C')
Identifies the message as a command-completed response.
Int32
Length of message contents in bytes, including self.
String
The command tag. This is usually a single word that identifies which SQL command was completed.
For an INSERT command, the tag is INSERT oid rows, where rows is the number of rows inserted. oid used to be the object ID of the inserted row if rows was 1 and the target table had OIDs, but OIDs system columns are not supported anymore; therefore oid is always 0.
For a DELETE command, the tag is DELETE rows where rows is the number of rows deleted.
For an UPDATE command, the tag is UPDATE rows where rows is the number of rows updated.
For a SELECT or CREATE TABLE AS command, the tag is SELECT rows where rows is the number of rows retrieved.
For a MOVE command, the tag is MOVE rows where rows is the number of rows the cursor's position has been changed by.
For a FETCH command, the tag is FETCH rows where rows is the number of rows that have been retrieved from the cursor.
For a COPY command, the tag is COPY rows where rows is the number of rows copied. (Note: the row count appears only in PostgreSQL 8.2 and later.) """ # noqa
tag: bytes
def __init__(self, tag: bytes):
self.identifier = PostgresBackendMessageIdentifier.COMPLETE
self.tag = tag
self.backend_capable = True
self.frontend_capable = False
super().__init__()
def send_internal(self, write_file: BinaryIO):
self.get_packet_builder() \
.add_string(self.tag) \
.write(write_file=write_file)
class BindComplete(PostgresMessage):
"""
BindComplete (B)
Byte1('2')
Identifies the message as a Bind-complete indicator.
Int32(4)
Length of message contents in bytes, including self. """
def __init__(self):
self.identifier = PostgresBackendMessageIdentifier.BIND_COMPLETE
self.backend_capable = True
self.frontend_capable = False
super().__init__()
def send_internal(self, write_file: BinaryIO):
self.get_packet_builder() \
.write(write_file=write_file)
class Error(PostgresMessage):
"""
ErrorResponse (B)
Byte1('E')
Identifies the message as an error.
Int32
Length of message contents in bytes, including self.
The message body consists of one or more identified fields, followed by a zero byte as a terminator. Fields can
appear in any order. For each field there is the following:
Byte1 A code identifying the field type; if zero, this is the message terminator and no string follows. The
presently defined field types are listed in Section 55.8. Since more field types might be added in future,
frontends should silently ignore fields of unrecognized type.
String
The field value. """ # noqa
severity: bytes
code: bytes
message: bytes
def __init__(self, severity: bytes, code: bytes, message: bytes):
self.identifier = PostgresBackendMessageIdentifier.ERROR
self.backend_capable = True
self.frontend_capable = False
self.severity = severity
self.code = code
self.message = message
super().__init__()
def send_internal(self, write_file: BinaryIO):
self.get_packet_builder() \
.add_char(b'S') \
.add_string(self.severity) \
.add_char(b'C') \
.add_string(self.code) \
.add_char(b'M') \
.add_string(self.message) \
.add_char(b'\x00') \
.write(write_file=write_file)
@staticmethod
def from_answer(error_code: bytes, error_message: bytes):
return Error(severity=b"ERROR", code=error_code, message=error_message)
class ConnectionFailure(Error):
def __init__(self, message: str = None, charset: str = "UTF-8"):
if message is None:
message = "Connection Failure occurred."
super().__init__(severity="FATAL".encode(encoding=charset), code="08006".encode(encoding=charset),
message=message.encode(encoding=charset))
class InvalidSQLStatementName(Error):
def __init__(self, message: str = None, charset: str = "UTF-8"):
if message is None:
message = "Invalid SQL Statement Name"
super().__init__(severity="FATAL".encode(encoding=charset), code="26000".encode(encoding=charset),
message=message.encode(encoding=charset))
class DataException(Error):
def __init__(self, message: str = None, charset: str = "UTF-8", code: str = "22000"):
if message is None:
message = "Data Exception"
super().__init__(severity="FATAL".encode(encoding=charset), code=code.encode(encoding=charset),
message=message.encode(encoding=charset))
class ParameterStatus(PostgresMessage):
"""
ParameterStatus (B)
Byte1('S')
Identifies the message as a run-time parameter status report.
Int32
Length of message contents in bytes, including self.
String
The name of the run-time parameter being reported.
String
The current value of the parameter. """
def __init__(self, name: bytes, value: bytes):
self.identifier = PostgresBackendMessageIdentifier.PARAMETER
self.backend_capable = True
self.frontend_capable = False
self.name = name
self.value = value
super().__init__()
def send_internal(self, write_file: BinaryIO):
self.get_packet_builder() \
.add_string(self.name) \
.add_string(self.value) \
.write(write_file=write_file)
class RowDescriptions(PostgresMessage):
"""
RowDescription (B)
Byte1('T')
Identifies the message as a row description.
Int32
Length of message contents in bytes, including self.
Int16
Specifies the number of fields in a row (can be zero).
Then, for each field, there is the following:
String
The field name.
Int32
If the field can be identified as a column of a specific table, the object ID of the table; otherwise zero.
Int16
If the field can be identified as a column of a specific table, the attribute number of the column; otherwise zero.
Int32
The object ID of the field's data type.
Int16
The data type size (see pg_type.typlen). Note that negative values denote variable-width types.
Int32
The type modifier (see pg_attribute.atttypmod). The meaning of the modifier is type-specific.
Int16
The format code being used for the field. Currently will be zero (text) or one (binary). In a
RowDescription returned from the statement variant of Describe, the format code is not yet known and will always
be zero.""" # noqa
fields: Sequence[PostgresField]
def __init__(self, fields: Sequence[PostgresField]):
self.identifier = PostgresBackendMessageIdentifier.ROW_DESCRIPTION
self.backend_capable = True
self.frontend_capable = False
self.fields = fields
super().__init__()
def send_internal(self, write_file: BinaryIO):
self.get_packet_builder() \
.add_int16(len(self.fields)) \
.add_fields(self.fields) \
.write(write_file=write_file)
class ParameterDescription(PostgresMessage):
"""
ParameterDescription (B)
Byte1('t')
Identifies the message as a parameter description.
Int32
Length of message contents in bytes, including self.
Int16
The number of parameters used by the statement (can be zero).
Then, for each parameter, there is the following:
Int32
Specifies the object ID of the parameter data type. """
def __init__(self, parameters: Sequence):
self.identifier = PostgresBackendMessageIdentifier.PARAMETER_DESCRIPTION
self.backend_capable = True
self.frontend_capable = False
self.num_params = len(parameters)
self.parameters = parameters
super().__init__()
def send_internal(self, write_file: BinaryIO):
packet = self.get_packet_builder().add_int16(self.num_params)
for param in self.parameters:
packet = packet.add_int32(param)
packet.write(write_file=write_file)
class DataRow(PostgresMessage):
"""
DataRow (B)
Byte1('D')
Identifies the message as a data row.
Int32
Length of message contents in bytes, including self.
Int16
The number of column values that follow (possibly zero).
Next, the following pair of fields appear for each column:
Int32
The length of the column value, in bytes (this count does not include itself). Can be zero. As a special
case, -1 indicates a NULL column value. No value bytes follow in the NULL case.
Byten
The value of the column, in the format indicated by the associated format code. n is the above length. """ # noqa
rows: Sequence[Sequence[bytes]]
num_cols: int
def __init__(self, rows: Sequence[Sequence[bytes]]):
self.identifier = PostgresBackendMessageIdentifier.DATA_ROW
self.backend_capable = True
self.frontend_capable = False
if len(rows) != 0:
self.num_cols = len(rows[0])
else:
self.num_cols = 0
self.rows = rows
super().__init__()
def send_internal(self, write_file: BinaryIO):
for row in self.rows:
self.get_packet_builder() \
.add_int16(self.num_cols) \
.add_row(row) \
.write(write_file=write_file)
class NegotiateProtocolVersion(PostgresMessage):
"""
NegotiateProtocolVersion (B)
Byte1('v')
Identifies the message as a protocol version negotiation message.
Int32
Length of message contents in bytes, including self.
Int32
Newest minor protocol version supported by the server for the major protocol version requested by the client.
Int32
Number of protocol options not recognized by the server.
Then, for protocol option not recognized by the server, there is the following:
String
The option name. """
def __init__(self, major_version, minor_version, option_not_recognized=None):
self.identifier = PostgresBackendMessageIdentifier.NEGOTIATE_VERSION
self.backend_capable = True
self.frontend_capable = False
self.major_version = major_version
self.minor_version = minor_version
self.option_not_recognized = option_not_recognized
super().__init__()
def send_internal(self, write_file: BinaryIO):
packet_builder = self.get_packet_builder() \
.add_int32(self.major_version) \
.add_int32(self.minor_version)
if self.option_not_recognized:
packet_builder = packet_builder.add_string(self.option_not_recognized)
packet_builder.write(write_file=write_file)
class ParseComplete(PostgresMessage):
"""
ParseComplete (B)
Byte1('1')
Identifies the message as a Parse-complete indicator.
Int32(4)
Length of message contents in bytes, including self. """
def __init__(self):
self.identifier = PostgresBackendMessageIdentifier.PARSE_COMPLETE
self.backend_capable = True
self.frontend_capable = False
super().__init__()
def send_internal(self, write_file: BinaryIO):
self.get_packet_builder() \
.write(write_file=write_file)
class Query(PostgresMessage):
"""
Query (F)
Byte1('Q')
Identifies the message as a simple query.
Int32
Length of message contents in bytes, including self.
String
The query string itself. """
sql: bytes
length: int
def __init__(self):
self.sql = b''
self.length = 0
self.identifier = PostgresFrontendMessageIdentifier.QUERY
self.backend_capable = False
self.frontend_capable = True
super().__init__()
def read(self, packet_reader: PostgresPacketReader):
self.length = packet_reader.read_int32()
self.sql = packet_reader.read_bytes(self.length - 4)
return self
def get_parsed_sql(self, encoding=None):
if not encoding:
encoding = 'utf-8'
try:
sql = self.sql.decode('utf-8')
except Exception:
raise Exception(f'SQL contains non {encoding} values: {self.sql}')
# Remove null bytes from end of sql statement. This is important.
sql = strip_null_byte(sql)
sql = SqlStatementParser.clear_sql(sql)
return sql
class Terminate(PostgresMessage):
"""
Terminate (F)
Byte1('X')
Identifies the message as a termination.
Int32(4)
Length of message contents in bytes, including self. """
def __init__(self):
self.identifier = PostgresFrontendMessageIdentifier.TERMINATE
self.backend_capable = False
self.frontend_capable = True
super().__init__()
def read(self, packet_reader: PostgresPacketReader):
return self
class BaseFrontendMessage(PostgresMessage):
def __init__(self):
self.backend_capable = False
self.frontend_capable = True
super().__init__()
def read(self, packet_reader: PostgresPacketReader):
self.length = packet_reader.read_int32()
if (self.length - 4) > 0:
self.response = packet_reader.read_bytes(self.length - 4)
return self
class Parse(BaseFrontendMessage):
"""
Parse (F)
Byte1('P')
Identifies the message as a Parse command.
Int32
Length of message contents in bytes, including self.
String
The name of the destination prepared statement (an empty string selects the unnamed prepared statement).
String
The query string to be parsed.
Int16 The number of parameter data types specified (can be zero). Note that this is not an indication of the
number of parameters that might appear in the query string, only the number that the frontend wants to pre-specify
types for.
Then, for each parameter, there is the following:
Int32 Specifies the object ID of the parameter data type. Placing a zero here is equivalent to leaving the type
unspecified."""
def __init__(self):
self.identifier = PostgresFrontendMessageIdentifier.PARSE
self.name = None
self.query = None
self.num_params = None
self.parameters = []
super().__init__()
def read(self, packet_reader: PostgresPacketReader):
self.length = packet_reader.read_int32()
self.name = packet_reader.read_string()
self.query = packet_reader.read_string()
self.num_params = packet_reader.read_int16()
for i in range(self.num_params):
self.parameters.append(packet_reader.read_int32())
return self
class Bind(BaseFrontendMessage):
"""
Bind (F)
Byte1('B')
Identifies the message as a Bind command.
Int32
Length of message contents in bytes, including self.
String
The name of the destination portal (an empty string selects the unnamed portal).
String
The name of the source prepared statement (an empty string selects the unnamed prepared statement).
Int16 The number of parameter format codes that follow (denoted C below). This can be zero to indicate that there
are no parameters or that the parameters all use the default format (text); or one, in which case the specified
format code is applied to all parameters; or it can equal the actual number of parameters.
Int16[C]
The parameter format codes. Each must presently be zero (text) or one (binary).
Int16 The number of parameter values that follow (possibly zero). This must match the number of parameters needed
by the query.
Next, the following pair of fields appear for each parameter:
Int32 The length of the parameter value, in bytes (this count does not include itself). Can be zero. As a special
case, -1 indicates a NULL parameter value. No value bytes follow in the NULL case.
Byten
The value of the parameter, in the format indicated by the associated format code. n is the above length.
After the last parameter, the following fields appear:
Int16 The number of result-column format codes that follow (denoted R below). This can be zero to indicate that
there are no result columns or that the result columns should all use the default format (text); or one,
in which case the specified format code is applied to all result columns (if any); or it can equal the actual
number of result columns of the query.
Int16[R]
The result-column format codes. Each must presently be zero (text) or one (binary). """
def __init__(self):
self.identifier = PostgresFrontendMessageIdentifier.BIND
self.length = None
self.name = None
self.statement_name = None
self.format_codes = [] # 0=text 1=binary
self.parameters = []
self.result_format_codes = []
super().__init__()
def read(self, packet_reader: PostgresPacketReader):
self.length = packet_reader.read_int32()
self.name = packet_reader.read_string()
self.statement_name = packet_reader.read_string()
num_format_codes = packet_reader.read_int16()
for _ in range(num_format_codes):
self.format_codes.append(packet_reader.read_int16())
num_parameters = packet_reader.read_int16()
for _ in range(num_parameters):
param_length = packet_reader.read_int32()
if param_length == -1:
self.parameters.append(None)
else:
self.parameters.append(packet_reader.read_bytes(param_length))
num_result_format_codes = packet_reader.read_int16()
for _ in range(num_result_format_codes):
self.result_format_codes.append(packet_reader.read_int16())
return self
class Execute(BaseFrontendMessage):
"""
Execute (F)
Byte1('E')
Identifies the message as an Execute command.
Int32
Length of message contents in bytes, including self.
String
The name of the portal to execute (an empty string selects the unnamed portal).
Int32 Maximum number of rows to return, if portal contains a query that returns rows (ignored otherwise). Zero
denotes “no limit”."""
def __init__(self):
self.identifier = PostgresFrontendMessageIdentifier.EXECUTE
self.length = None
self.name = None
self.max_rows_ret = None
super().__init__()
def read(self, packet_reader: PostgresPacketReader):
self.length = packet_reader.read_int32()
self.name = packet_reader.read_string()
self.max_rows_ret = packet_reader.read_int32()
return self
class Sync(BaseFrontendMessage):
"""
Sync (F)
Byte1('S')
Identifies the message as a Sync command.
Int32(4)
Length of message contents in bytes, including self. """
def __init__(self):
self.identifier = PostgresFrontendMessageIdentifier.SYNC
super().__init__()
class Describe(BaseFrontendMessage):
"""
Describe (F)
Byte1('D')
Identifies the message as a Describe command.
Int32
Length of message contents in bytes, including self.
Byte1
'S' to describe a prepared statement; or 'P' to describe a portal.
String The name of the prepared statement or portal to describe (an empty string selects the unnamed prepared
statement or portal)."""
def __init__(self):
self.identifier = PostgresFrontendMessageIdentifier.DESCRIBE
self.length = None
self.describe_type = None
self.name = None
super().__init__()
def read(self, packet_reader: PostgresPacketReader):
self.length = packet_reader.read_int32()
self.describe_type = packet_reader.read_byte()
self.name = packet_reader.read_string()
return self
IMPLEMENTED_BACKEND_POSTGRES_MESSAGE_CLASSES = [
NoticeResponse, AuthenticationOk, AuthenticationClearTextPassword, ReadyForQuery, CommandComplete, Error,
RowDescriptions, DataRow, NegotiateProtocolVersion, ParameterStatus, ParseComplete, BindComplete,
ParameterDescription
]
IMPLEMENTED_FRONTEND_POSTGRES_MESSAGE_CLASSES = [
Query, Terminate, Parse, Bind, Execute, Sync, Describe
]
FE_MESSAGE_MAP: Dict[PostgresFrontendMessageIdentifier, Type[PostgresMessage]] = {
PostgresFrontendMessageIdentifier.QUERY: Query,
PostgresFrontendMessageIdentifier.TERMINATE: Terminate,
PostgresFrontendMessageIdentifier.PARSE: Parse,
PostgresFrontendMessageIdentifier.BIND: Bind,
PostgresFrontendMessageIdentifier.EXECUTE: Execute,
PostgresFrontendMessageIdentifier.SYNC: Sync,
PostgresFrontendMessageIdentifier.DESCRIBE: Describe
}
SUPPORTED_AUTH_TYPES = [PostgresAuthType.PASSWORD]
# Below Lies Unimplemented Messages
"""
'''
AuthenticationKerberosV5 (B)
Byte1('R')
Identifies the message as an authentication request.
Int32(8)
Length of message contents in bytes, including self.
Int32(2)
Specifies that Kerberos V5 authentication is required. '''
'''
AuthenticationMD5Password (B)
Byte1('R')
Identifies the message as an authentication request.
Int32(12)
Length of message contents in bytes, including self.
Int32(5)
Specifies that an MD5-encrypted password is required.
Byte4
The salt to use when encrypting the password. '''
'''
AuthenticationSCMCredential (B)
Byte1('R')
Identifies the message as an authentication request.
Int32(8)
Length of message contents in bytes, including self.
Int32(6)
Specifies that an SCM credentials message is required. '''
'''
AuthenticationGSS (B)
Byte1('R')
Identifies the message as an authentication request.
Int32(8)
Length of message contents in bytes, including self.
Int32(7)
Specifies that GSSAPI authentication is required. '''
'''
AuthenticationGSSContinue (B)
Byte1('R')
Identifies the message as an authentication request.
Int32
Length of message contents in bytes, including self.
Int32(8)
Specifies that this message contains GSSAPI or SSPI data.
Byten
GSSAPI or SSPI authentication data. '''
'''
AuthenticationSSPI (B)
Byte1('R')
Identifies the message as an authentication request.
Int32(8)
Length of message contents in bytes, including self.
Int32(9)
Specifies that SSPI authentication is required. '''
'''
AuthenticationSASL (B)
Byte1('R')
Identifies the message as an authentication request.
Int32
Length of message contents in bytes, including self.
Int32(10)
Specifies that SASL authentication is required.
The message body is a list of SASL authentication mechanisms, in the server's order of preference. A zero byte is
required as terminator after the last authentication mechanism name. For each mechanism, there is the following:
String
Name of a SASL authentication mechanism. '''
'''
AuthenticationSASLContinue (B)
Byte1('R')
Identifies the message as an authentication request.
Int32
Length of message contents in bytes, including self.
Int32(11)
Specifies that this message contains a SASL challenge.
Byten
SASL data, specific to the SASL mechanism being used. '''
'''
AuthenticationSASLFinal (B)
Byte1('R')
Identifies the message as an authentication request.
Int32
Length of message contents in bytes, including self.
Int32(12)
Specifies that SASL authentication has completed.
Byten
SASL outcome "additional data", specific to the SASL mechanism being used. '''
'''BackendKeyData (B) Byte1('K') Identifies the message as cancellation key data. The frontend must save these values
if it wishes to be able to issue CancelRequest messages later.
Int32(12)
Length of message contents in bytes, including self.
Int32
The process ID of this backend.
Int32
The secret key of this backend. '''
'''
CancelRequest (F)
Int32(16)
Length of message contents in bytes, including self.
Int32(80877102) The cancel request code. The value is chosen to contain 1234 in the most significant 16 bits,
and 5678 in the least significant 16 bits. (To avoid confusion, this code must not be the same as any protocol
version number.)
Int32
The process ID of the target backend.
Int32
The secret key for the target backend. '''
'''
Close (F)
Byte1('C')
Identifies the message as a Close command.
Int32
Length of message contents in bytes, including self.
Byte1
'S' to close a prepared statement; or 'P' to close a portal.
String The name of the prepared statement or portal to close (an empty string selects the unnamed prepared statement
or portal).'''
'''
CloseComplete (B)
Byte1('3')
Identifies the message as a Close-complete indicator.
Int32(4)
Length of message contents in bytes, including self. '''
'''
CommandComplete (B)
Byte1('C')
Identifies the message as a command-completed response.
Int32
Length of message contents in bytes, including self.
String
The command tag. This is usually a single word that identifies which SQL command was completed.
For an INSERT command, the tag is INSERT oid rows, where rows is the number of rows inserted. oid used to be the
object ID of the inserted row if rows was 1 and the target table had OIDs, but OIDs system columns are not supported
anymore; therefore oid is always 0.
For a DELETE command, the tag is DELETE rows where rows is the number of rows deleted.
For an UPDATE command, the tag is UPDATE rows where rows is the number of rows updated.
For a SELECT or CREATE TABLE AS command, the tag is SELECT rows where rows is the number of rows retrieved.
For a MOVE command, the tag is MOVE rows where rows is the number of rows the cursor's position has been changed by.
For a FETCH command, the tag is FETCH rows where rows is the number of rows that have been retrieved from the cursor.
For a COPY command, the tag is COPY rows where rows is the number of rows copied. (Note: the row count appears only
in PostgreSQL 8.2 and later.)'''
'''
CopyData (F & B)
Byte1('d')
Identifies the message as COPY data.
Int32
Length of message contents in bytes, including self.
Byten Data that forms part of a COPY data stream. Messages sent from the backend will always correspond to single
data rows, but messages sent by frontends might divide the data stream arbitrarily.'''
'''
CopyDone (F & B)
Byte1('c')
Identifies the message as a COPY-complete indicator.
Int32(4)
Length of message contents in bytes, including self. '''
'''
CopyFail (F)
Byte1('f')
Identifies the message as a COPY-failure indicator.
Int32
Length of message contents in bytes, including self.
String
An error message to report as the cause of failure. '''
'''CopyInResponse (B) Byte1('G') Identifies the message as a Start Copy In response. The frontend must now send
copy-in data (if not prepared to do so, send a CopyFail message).
Int32
Length of message contents in bytes, including self.
Int8 0 indicates the overall COPY format is textual (rows separated by newlines, columns separated by separator
characters, etc.). 1 indicates the overall copy format is binary (similar to DataRow format). See COPY for more
information.
Int16
The number of columns in the data to be copied (denoted N below).
Int16[N] The format codes to be used for each column. Each must presently be zero (text) or one (binary). All must be
zero if the overall copy format is textual.'''
'''
CopyOutResponse (B)
Byte1('H')
Identifies the message as a Start Copy Out response. This message will be followed by copy-out data.
Int32
Length of message contents in bytes, including self.
Int8 0 indicates the overall COPY format is textual (rows separated by newlines, columns separated by separator
characters, etc.). 1 indicates the overall copy format is binary (similar to DataRow format). See COPY for more
information.
Int16
The number of columns in the data to be copied (denoted N below).
Int16[N] The format codes to be used for each column. Each must presently be zero (text) or one (binary). All must be
zero if the overall copy format is textual.'''
'''
CopyBothResponse (B)
Byte1('W')
Identifies the message as a Start Copy Both response. This message is used only for Streaming Replication.
Int32
Length of message contents in bytes, including self.
Int8 0 indicates the overall COPY format is textual (rows separated by newlines, columns separated by separator
characters, etc.). 1 indicates the overall copy format is binary (similar to DataRow format). See COPY for more
information.
Int16
The number of columns in the data to be copied (denoted N below).
Int16[N] The format codes to be used for each column. Each must presently be zero (text) or one (binary). All must be
zero if the overall copy format is textual.'''
'''
Describe (F)
Byte1('D')
Identifies the message as a Describe command.
Int32
Length of message contents in bytes, including self.
Byte1
'S' to describe a prepared statement; or 'P' to describe a portal.
String The name of the prepared statement or portal to describe (an empty string selects the unnamed prepared
statement or portal).'''
'''
EmptyQueryResponse (B)
Byte1('I')
Identifies the message as a response to an empty query string. (This substitutes for CommandComplete.)
Int32(4)
Length of message contents in bytes, including self. '''
'''
Execute (F)
Byte1('E')
Identifies the message as an Execute command.
Int32
Length of message contents in bytes, including self.
String
The name of the portal to execute (an empty string selects the unnamed portal).
Int32 Maximum number of rows to return, if portal contains a query that returns rows (ignored otherwise). Zero
denotes “no limit”.'''
'''
Flush (F)
Byte1('H')
Identifies the message as a Flush command.
Int32(4)
Length of message contents in bytes, including self. '''
'''
FunctionCall (F)
Byte1('F')
Identifies the message as a function call.
Int32
Length of message contents in bytes, including self.
Int32
Specifies the object ID of the function to call.
Int16 The number of argument format codes that follow (denoted C below). This can be zero to indicate that there are
no arguments or that the arguments all use the default format (text); or one, in which case the specified format code
is applied to all arguments; or it can equal the actual number of arguments.
Int16[C]
The argument format codes. Each must presently be zero (text) or one (binary).
Int16
Specifies the number of arguments being supplied to the function.
Next, the following pair of fields appear for each argument:
Int32 The length of the argument value, in bytes (this count does not include itself). Can be zero. As a special
case, -1 indicates a NULL argument value. No value bytes follow in the NULL case.
Byten
The value of the argument, in the format indicated by the associated format code. n is the above length.
After the last argument, the following field appears:
Int16
The format code for the function result. Must presently be zero (text) or one (binary). '''
'''
FunctionCallResponse (B)
Byte1('V')
Identifies the message as a function call result.
Int32
Length of message contents in bytes, including self.
Int32 The length of the function result value, in bytes (this count does not include itself). Can be zero. As a
special case, -1 indicates a NULL function result. No value bytes follow in the NULL case.
Byten
The value of the function result, in the format indicated by the associated format code. n is the above length. '''
'''
GSSENCRequest (F)
Int32(8)
Length of message contents in bytes, including self.
Int32(80877104) The GSSAPI Encryption request code. The value is chosen to contain 1234 in the most significant 16
bits, and 5680 in the least significant 16 bits. (To avoid confusion, this code must not be the same as any protocol
version number.)'''
'''GSSResponse (F) Byte1('p') Identifies the message as a GSSAPI or SSPI response. Note that this is also used for
SASL and password response messages. The exact message type can be deduced from the context.
Int32
Length of message contents in bytes, including self.
Byten
GSSAPI/SSPI specific message data. '''
'''
NoData (B)
Byte1('n')
Identifies the message as a no-data indicator.
Int32(4)
Length of message contents in bytes, including self. '''
'''
NotificationResponse (B)
Byte1('A')
Identifies the message as a notification response.
Int32
Length of message contents in bytes, including self.
Int32
The process ID of the notifying backend process.
String
The name of the channel that the notify has been raised on.
String
The “payload” string passed from the notifying process. '''
'''
Parse (F)
Byte1('P')
Identifies the message as a Parse command.
Int32
Length of message contents in bytes, including self.
String
The name of the destination prepared statement (an empty string selects the unnamed prepared statement).
String
The query string to be parsed.
Int16 The number of parameter data types specified (can be zero). Note that this is not an indication of the number
of parameters that might appear in the query string, only the number that the frontend wants to prespecify types for.
Then, for each parameter, there is the following:
Int32 Specifies the object ID of the parameter data type. Placing a zero here is equivalent to leaving the type
unspecified.'''
'''PasswordMessage (F) Byte1('p') Identifies the message as a password response. Note that this is also used for
GSSAPI, SSPI and SASL response messages. The exact message type can be deduced from the context.
Int32
Length of message contents in bytes, including self.
String
The password (encrypted, if requested). '''
'''PortalSuspended (B) Byte1('s') Identifies the message as a portal-suspended indicator. Note this only appears if
an Execute message's row-count limit was reached.
Int32(4)
Length of message contents in bytes, including self. '''
'''SASLInitialResponse (F) Byte1('p') Identifies the message as an initial SASL response. Note that this is also used
for GSSAPI, SSPI and password response messages. The exact message type is deduced from the context.
Int32
Length of message contents in bytes, including self.
String
Name of the SASL authentication mechanism that the client selected.
Int32
Length of SASL mechanism specific "Initial Client Response" that follows, or -1 if there is no Initial Response.
Byten
SASL mechanism specific "Initial Response". '''
'''SASLResponse (F) Byte1('p') Identifies the message as a SASL response. Note that this is also used for GSSAPI,
SSPI and password response messages. The exact message type can be deduced from the context.
Int32
Length of message contents in bytes, including self.
Byten
SASL mechanism specific message data. '''
'''
StartupMessage (F)
Int32
Length of message contents in bytes, including self.
Int32(196608) The protocol version number. The most significant 16 bits are the major version number (3 for the
protocol described here). The least significant 16 bits are the minor version number (0 for the protocol described
here).
The protocol version number is followed by one or more pairs of parameter name and value strings. A zero byte is
required as a terminator after the last name/value pair. Parameters can appear in any order. user is required,
others are optional. Each parameter is specified as:
String
The parameter name. Currently recognized names are:
user
The database user name to connect as. Required; there is no default.
database
The database to connect to. Defaults to the user name.
options Command-line arguments for the backend. (This is deprecated in favor of setting individual run-time
parameters.) Spaces within this string are considered to separate arguments, unless escaped with a backslash
write to represent a literal backslash.
replication Used to connect in streaming replication mode, where a small set of replication commands can be issued
instead of SQL statements. Value can be true, false, or database, and the default is false. See Section 55.4 for
details.
In addition to the above, other parameters may be listed. Parameter names beginning with _pq_. are reserved for use
as protocol extensions, while others are treated as run-time parameters to be set at backend start time. Such
settings will be applied during backend start (after parsing the command-line arguments if any) and will act as
session defaults.
String
The parameter value. '''
""" | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/electrum_chi/electrum/gui/kivy/uix/qrcodewidget.py | from threading import Thread
from functools import partial
import qrcode
from qrcode import exceptions
from kivy.uix.floatlayout import FloatLayout
from kivy.graphics.texture import Texture
from kivy.properties import StringProperty
from kivy.properties import ObjectProperty, StringProperty, ListProperty,\
BooleanProperty
from kivy.lang import Builder
from kivy.clock import Clock
Builder.load_string('''
<QRCodeWidget>
canvas.before:
# Draw white Rectangle
Color:
rgba: root.background_color
Rectangle:
size: self.size
pos: self.pos
canvas.after:
Color:
rgba: root.foreground_color
Rectangle:
size: self.size
pos: self.pos
Image
id: qrimage
pos_hint: {'center_x': .5, 'center_y': .5}
allow_stretch: True
size_hint: None, None
size: root.width * .9, root.height * .9
''')
class QRCodeWidget(FloatLayout):
data = StringProperty(None, allow_none=True)
background_color = ListProperty((1, 1, 1, 1))
foreground_color = ListProperty((0, 0, 0, 0))
def __init__(self, **kwargs):
super(QRCodeWidget, self).__init__(**kwargs)
self.data = None
self.qr = None
self._qrtexture = None
self.failure_cb = None
def on_data(self, instance, value):
if not (self.canvas or value):
return
try:
self.update_qr()
except qrcode.exceptions.DataOverflowError:
if self.failure_cb:
self.failure_cb()
else:
raise
def set_data(self, data, failure_cb=None):
if self.data == data:
return
self.failure_cb = failure_cb
MinSize = 210 if len(data) < 128 else 500
self.setMinimumSize((MinSize, MinSize))
self.data = data
self.qr = None
def update_qr(self):
if not self.data and self.qr:
return
L = qrcode.constants.ERROR_CORRECT_L
data = self.data
self.qr = qr = qrcode.QRCode(
version=None,
error_correction=L,
box_size=10,
border=0,
)
qr.add_data(data)
qr.make(fit=True)
self.update_texture()
def setMinimumSize(self, size):
# currently unused, do we need this?
self._texture_size = size
def _create_texture(self, k):
self._qrtexture = texture = Texture.create(size=(k,k), colorfmt='rgb')
# don't interpolate texture
texture.min_filter = 'nearest'
texture.mag_filter = 'nearest'
def update_texture(self):
if not self.qr:
return
matrix = self.qr.get_matrix()
k = len(matrix)
# create the texture
self._create_texture(k)
buff = []
bext = buff.extend
cr, cg, cb, ca = self.background_color[:]
cr, cg, cb = cr*255, cg*255, cb*255
for r in range(k):
for c in range(k):
bext([0, 0, 0] if matrix[k-1-r][c] else [cr, cg, cb])
# then blit the buffer
buff = bytes(buff)
# update texture
self._upd_texture(buff)
def _upd_texture(self, buff):
texture = self._qrtexture
texture.blit_buffer(buff, colorfmt='rgb', bufferfmt='ubyte')
img = self.ids.qrimage
img.anim_delay = -1
img.texture = texture
img.canvas.ask_update()
if __name__ == '__main__':
from kivy.app import runTouchApp
import sys
data = str(sys.argv[1:])
runTouchApp(QRCodeWidget(data=data)) | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/plugins/a11yhelp/dialogs/lang/tr.js | /*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang("a11yhelp","tr",{title:"Erişilebilirlik Talimatları",contents:"Yardım içeriği. Bu pencereyi kapatmak için ESC tuşuna basın.",legend:[{name:"Genel",items:[{name:"Düzenleyici Araç Çubuğu",legend:"Araç çubuğunda gezinmek için ${toolbarFocus} basın. TAB ve SHIFT-TAB ile önceki ve sonraki araç çubuğu grubuna taşıyın. SAĞ OK veya SOL OK ile önceki ve sonraki bir araç çubuğu düğmesini hareket ettirin. SPACE tuşuna basın veya araç çubuğu düğmesini etkinleştirmek için ENTER tuşna basın."},
{name:"Diyalog Düzenleyici",legend:"Dialog penceresi içinde, sonraki iletişim alanına gitmek için SEKME tuşuna basın, önceki alana geçmek için SHIFT + TAB tuşuna basın, pencereyi göndermek için ENTER tuşuna basın, dialog penceresini iptal etmek için ESC tuşuna basın. Birden çok sekme sayfaları olan diyalogların, sekme listesine gitmek için ALT + F10 tuşlarına basın. Sonra TAB veya SAĞ OK sonraki sekmeye taşıyın. SHIFT + TAB veya SOL OK ile önceki sekmeye geçin. Sekme sayfayı seçmek için SPACE veya ENTER tuşuna basın."},
{name:"İçerik Menü Editörü",legend:"İçerik menüsünü açmak için ${contextMenu} veya UYGULAMA TUŞU'na basın. Daha sonra SEKME veya AŞAĞI OK ile bir sonraki menü seçeneği taşıyın. SHIFT + TAB veya YUKARI OK ile önceki seçeneğe gider. Menü seçeneğini seçmek için SPACE veya ENTER tuşuna basın. Seçili seçeneğin alt menüsünü SPACE ya da ENTER veya SAĞ OK açın. Üst menü öğesini geçmek için ESC veya SOL OK ile geri dönün. ESC ile bağlam menüsünü kapatın."},{name:"Liste Kutusu Editörü",legend:"Liste kutusu içinde, bir sonraki liste öğesine SEKME VEYA AŞAĞI OK ile taşıyın. SHIFT + TAB veya YUKARI önceki liste öğesi taşıyın. Liste seçeneği seçmek için SPACE veya ENTER tuşuna basın. Liste kutusunu kapatmak için ESC tuşuna basın."},
{name:"Element Yol Çubuğu Editörü",legend:"Elementlerin yol çubuğunda gezinmek için ${ElementsPathFocus} basın. SEKME veya SAĞ OK ile sonraki element düğmesine taşıyın. SHIFT + TAB veya SOL OK önceki düğmeye hareket ettirin. Editör içindeki elementi seçmek için ENTER veya SPACE tuşuna basın."}]},{name:"Komutlar",items:[{name:"Komutu geri al",legend:"$(undo)'ya basın"},{name:"Komutu geri al",legend:"${redo} basın"},{name:" Kalın komut",legend:"${bold} basın"},{name:" İtalik komutu",legend:"${italic} basın"},
{name:" Alttan çizgi komutu",legend:"${underline} basın"},{name:" Bağlantı komutu",legend:"${link} basın"},{name:" Araç çubuğu Toplama komutu",legend:"${toolbarCollapse} basın"},{name:"Önceki komut alanına odaklan",legend:"Düzeltme imleçinden önce, en yakın uzaktaki alana erişmek için ${accessPreviousSpace} basın, örneğin: iki birleşik HR elementleri. Aynı tuş kombinasyonu tekrarıyla diğer alanlarada ulaşın."},{name:"Sonraki komut alanına odaklan",legend:"Düzeltme imleçinden sonra, en yakın uzaktaki alana erişmek için ${accessNextSpace} basın, örneğin: iki birleşik HR elementleri. Aynı tuş kombinasyonu tekrarıyla diğer alanlarada ulaşın."},
{name:"Erişilebilirlik Yardımı",legend:"${a11yHelp}'e basın"}]}]}); | PypiClean |
/FEV_KEGG-1.1.4.tar.gz/FEV_KEGG-1.1.4/FEV_KEGG/Graph/Elements.py | from builtins import str
import re
from typing import List, Iterable
from FEV_KEGG.KEGG.DataTypes import Gene
from FEV_KEGG.Util import Util
from FEV_KEGG import settings
class Element(object):
def __init__(self, uniqueID: str):
"""
Generic graph element with a `uniqueID`.
Comparable (==, !=, <, >, <=, >=) and hashable by this unique ID. Converting to a string returns the `uniqueID`.
Parameters
----------
uniqueID : str
String uniquely identifying this element among all other possible elements.
Attributes
----------
self.uniqueID : str
Unique element ID.
"""
self.uniqueID = uniqueID
def getUrl(self):
"""
Get the link to KEGG for this EC number.
Returns
-------
str
URL to KEGG.
"""
return "http://kegg.jp/dbget-bin/www_bget?" + self.uniqueID
def getRestUrl(self):
"""
Get the link to KEGG's REST-API for this EC number.
Essentially the same as :func:`getUrl`, but meant to be read by machines, therefore no eye-candy.
Returns
-------
str
URL to KEGG's REST-API
"""
return "http://rest.kegg.jp/get/" + self.uniqueID
def toHtml(self, short = False, noTd = False):
"""
Get the Element's string representation surrounded by its URL as an HTML line.
"""
if self.name is None or short is True:
if noTd is True:
return "<a target='_blank' href='" + self.getUrl() + "'>" + self.__str__() + "</a>"
else:
return "<td><a target='_blank' href='" + self.getUrl() + "'>" + self.__str__() + "</a></td><td></td>"
else:
if noTd is True:
return "<a target='_blank' href='" + self.getUrl() + "'>" + self.__str__() + "</a> (" + self.name + ")"
else:
return "<td><a target='_blank' href='" + self.getUrl() + "'>" + self.__str__() + "</a></td><td>(" + self.name + ")</td>"
def __str__(self):
if settings.printElementUrl:
return str(self.uniqueID) + ' (' + self.getUrl() + ')'
else:
return self.uniqueID
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.uniqueID == other.uniqueID
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
return self.uniqueID.__hash__()
def __lt__(self, other):
return self.uniqueID < other.uniqueID
def __gt__(self, other):
return self.uniqueID > other.uniqueID
def __le__(self, other):
return self.uniqueID <= other.uniqueID
def __ge__(self, other):
return self.uniqueID >= other.uniqueID
class DrugIdError(Exception):
"""
Raised if a :class:`SubstanceID` is created from a drug ID, because only compounds and glycans are useful in our model of metabolism.
"""
pass
class SubstanceID(Element):
REGEX_PATTERN = re.compile('^C|G[0-9]{5}$')
def __init__(self, keggSubstanceID: 'C01102'):
"""
Represents a substrate/product of metabolism by compound/glycan ID from KEGG, eg. 'C01102' or 'G00160'.
Parameters
----------
keggSubstanceID : str
Unique ID of the compound or glycan.
description : str, optional
Descriptive chemical name of the compound/glycan.
Attributes
----------
self.keggCompoundID : str
Unique compound/glycan ID.
self.description : str
Descriptive chemical name of the compound/glycan. May likely be *None*. Usually a list of synonymous names.
self.name : str
Short chemical name of the compound/glycan. May likely be *None*. Is the shortest name occuring in `description`.
Raises
------
DrugIdError
Drug IDs, eg. D08603, raise a DrugIdError, because only compounds and glycans are useful in our model of metabolism. Use the synonymous Compound ID instead.
Note
----
This does not check if the compound/glycan actually exists in KEGG! You will find out eventually when trying to retrieve information about it.
See Also
--------
FEV_KEGG.Graph.SubstanceGraphs.SubstanceGraph.addSubstanceDescriptions : The function to download and add `self.description`, and `self.name`.
"""
if keggSubstanceID[0] == 'D':
raise DrugIdError('Drug IDs are not accepted, as there are usually accompanied by a synonymous Compound ID.')
if self.__class__.REGEX_PATTERN.match(keggSubstanceID) is None: # wrong format
raise ValueError('Compound/Glycan ID not formatted correctly: ' + keggSubstanceID)
Element.__init__(self, keggSubstanceID)
self.keggCompoundID = self.uniqueID
self.description = None
self.name = None
class ReactionID(Element):
def __init__(self, keggReactionID: 'R01899'):
"""
Represents a reaction of metabolism by reaction ID from KEGG, eg. 'R01899'.
Parameters
----------
keggReactionID : str
Unique ID of the reaction.
Attributes
----------
self.keggReactionID : str
Unique reaction ID.
Note
----
This does not check if the reaction actually exists in KEGG! You will find out eventually when trying to retrieve information about it.
"""
Element.__init__(self, keggReactionID)
self.keggReactionID = self.uniqueID
class Enzyme(Element):
def __init__(self, organismAbbreviation: 'eco', geneName: 'b0004', ecNumberStrings: List[str], name: 'thrC' = None, description: '(RefSeq) hydrogenase 4, subunit' = None):
"""
Represents an enzyme of metabolism.
It has exactly one GeneID, which is its unique identifier.
Parameters
----------
organismAbbreviation : str
Abbreviation string of the organism this enzyme belongs to, as known to KEGG, e.g. 'eco'. Must obviously be unique and existant in KEGG.
geneName : str
Name of the gene which represents this enzyme, e.g. 'b0004'. Will be combined with `organismAbbreviation` to form the unique :class:`GeneID`. Thus, must be unique within the organism.
ecNumberStrings : List[str]
List of strings representing the EC numbers associated with this enzyme. Will be split and parsed into :class:`EcNumber` objects.
name : str, optional
Colloquial name of this enzyme, e.g. 'thrC'. This is not used for automatic identification, you may make it *None*.
description : str, optional
Full description of this enzyme from KEGG, e.g. '(RefSeq) hydrogenase 4, subunit'. This is not used for automatic identification, you may make it *None*.
Attributes
----------
self.organismAbbreviation : str
self.geneName : str
self.geneID : GeneID
self.name : str
self.ecNumbers : Set[EcNumber]
self.description : str
Raises
------
ValueError
If `organismAbbreviation` and `geneName` do not form a valid gene ID. Or if any of the EC numbers in `ecNumberStrings` is not a valid EC number.
Note
----
This does not check if the organism, gene ID, EC numbers, or anything else actually exist in KEGG! You will find out eventually when trying to retrieve information about them.
"""
# build subclasses
# GeneID
geneID = GeneID(organismAbbreviation + ':' + geneName)
# EcNumbers
ecNumbers = set()
for ecNumberString in ecNumberStrings:
ecNumber = EcNumber(ecNumberString)
ecNumbers.add(ecNumber)
# determine unique ID
Element.__init__(self, geneID.__str__())
# save object attributes
self.organismAbbreviation = organismAbbreviation
self.geneID = geneID
self.geneName = geneName
if name is not None and name.__eq__(geneName):
self.name = None
else:
self.name = name
self.ecNumbers = ecNumbers
# replace useless substrings
if description is not None:
description = description.replace('(RefSeq) ', '')
self.description = description
def getEcNumbersString(self):
"""
EC numbers associated with this enzyme as a string.
Returns
-------
str
EC numbers associated with this enzyme in a string, eg. '1.2.3.4, 2.3.4.5'
"""
strings = []
for ecNumber in self.ecNumbers:
strings.append(ecNumber.__str__())
return ', '.join(strings)
@classmethod
def fromGene(cls, gene: Gene) -> 'Enzyme':
"""
Creates an :class:`Enzyme` from a :class:`FEV_KEGG.KEGG.DataTypes.Gene`.
Parameters
----------
gene : Gene
Gene object, retrieved and parsed from KEGG GENE at some point.
Returns
-------
Enzyme
An enzyme object.
Raises
------
ValueError
If `organismAbbreviation` and `geneName` do not form a valid gene ID. Or if any of the EC numbers in `ecNumberStrings` is not a valid EC number.
"""
return cls(organismAbbreviation = gene.organismAbbreviation, geneName = gene.number, ecNumberStrings = gene.ecNumbers, name = gene.symbol, description = gene.name)
def __lt__(self, other):
# sort by EC number first
selfEcList = list(self.ecNumbers)
otherEcList = list(other.ecNumbers)
if selfEcList == otherEcList:
# then by gene ID
return self.uniqueID < other.uniqueID
else:
return selfEcList < otherEcList
def __gt__(self, other):
# sort by EC number first
selfEcList = list(self.ecNumbers)
otherEcList = list(other.ecNumbers)
if selfEcList == otherEcList:
# then by gene ID
return self.uniqueID > other.uniqueID
else:
return selfEcList > otherEcList
def __le__(self, other):
# sort by EC number first
selfEcList = list(self.ecNumbers)
otherEcList = list(other.ecNumbers)
if selfEcList == otherEcList:
# then by gene ID
return self.uniqueID <= other.uniqueID
else:
return selfEcList <= otherEcList
def __ge__(self, other):
# sort by EC number first
selfEcList = list(self.ecNumbers)
otherEcList = list(other.ecNumbers)
if selfEcList == otherEcList:
# then by gene ID
return self.uniqueID >= other.uniqueID
else:
return selfEcList >= otherEcList
class EnzymeComplete(Enzyme):
def __init__(self, gene: Gene):
"""
Represents an enzyme of metabolism, saving the original underlying gene description `gene` for later manual use.
The underlying gene description is usually not necessary, use the parent class to save memory space.
Parameters
----------
gene : Gene
Gene object, retrieved and parsed from KEGG GENE at some point. Will be kept in memory in the *gene* attribute.
Attributes
----------
self.gene : :class:`FEV_KEGG.KEGG.DataTypes.Gene`
Original underlying gene description.
Raises
------
ValueError
See parent class.
"""
super().__init__(gene.organismAbbreviation, gene.number, gene.symbol, gene.ecNumbers)
self.gene = gene
class EcNumber(Element):
WILDCARD = '-'
REGEX_PATTERN = re.compile('^[1-7]\.(([1-9][0-9]{0,1})|\-)\.(((?<!\-\.)([1-9][0-9]{0,1}))|\-)\.(((?<!\-\.)([1-9][0-9]{0,2}))|\-)$')
def __init__(self, ecNumberString: '4.2.3.1'):
"""
Represents an enzyme of metabolism by EC number, e.g. '4.2.3.1'.
Parameters
----------
ecNumberString : str
EC number represented as a string. Will be checked for correct formatting!
Attributes
----------
self.ecNumberString : str
E.g. '4.2.3.-'.
self.ecNumberLevels : List[str]
E.g. ['4', '2', '3', '-'].
self.ecNumberLevelsInteger : List[int]
E.g. [4, 2, 3, -1]. A wildcard is translated to -1.
self.description : str
Descriptive name of the enzymes behind this EC number. May likely be *None*. Usually a list of synonymous names.
self.name : str
Short name of the enzymes behind this EC number. May likely be *None*. Is the shortest name occuring in `description`.
self.reaction : str
IUBMB string describing the reaction formula. May likely be *None*.
Raises
------
ValueError
If EC number is not formatted correctly.
See Also
--------
FEV_KEGG.Graph.SubstanceGraphs.SubstanceEcGraph.addEcDescriptions : The function to download and add `self.description`, `self.name`, and `self.reaction`.
"""
if self.__class__.REGEX_PATTERN.match(ecNumberString) is None: # wrong format
raise ValueError('EC number not formatted correctly: ' + ecNumberString)
# determine unique ID
Element.__init__(self, ecNumberString)
# save object attributes
self.ecNumberString = self.uniqueID
self.ecNumberLevels = self.ecNumberString.split('.')
self._ecNumberLevelsInteger = [-1 if level == EcNumber.WILDCARD else int(level) for level in self.ecNumberLevels]
self.description = None
self.name = None
self.reaction = None
@classmethod
def fromArray(cls, ecNumberLevels: Iterable) -> 'EcNumber':
"""
Creates EcNumber object from single EC number levels.
Parameters
----------
ecNumberLevels : Iterable
Iterable of the EC number levels, can be int or str. For a wildcard, obviously only str is reasonable.
Raises
------
ValueError
If the resulting EC number is not formatted correctly.
"""
return cls('.'.join(ecNumberLevels))
@property
def ecNumberLevelsInteger(self) -> List[int]:
if not hasattr(self, '_ecNumberLevelsInteger'):
self._ecNumberLevelsInteger = [-1 if level == EcNumber.WILDCARD else int(level) for level in self.ecNumberLevels]
return self._ecNumberLevelsInteger
def contains(self, ecNumber: 'EcNumber') -> bool:
"""
Check whether this EC number is a superset of `ecNumber`, made possibly by the wildcard.
Parameters
----------
ecNumber : EcNumber
The EC number to compare against.
Returns
-------
bool
*True*, if the other EC number is part of the set of EC numbers defined by wildcard dashes in the levels of this EC number.
For example 1.2.3.- contains 1.2.3.1 up to 1.2.3.999, but 1.2.3.4 can only contain itself.
"""
selfLevels = self.ecNumberLevels
otherLevels = ecNumber.ecNumberLevels
for i in range(0, 4):
selfNumber = selfLevels[i]
otherNumber = otherLevels[i]
if selfNumber != EcNumber.WILDCARD and selfNumber != otherNumber: # current level does not match AND is has no wildcard '-' in this EC number
return False
return True
def matchingLevels(self, ecNumber: 'EcNumber', wildcardMatchesNumber = True) -> int:
"""
Determines the number of levels which match between this EC number and `ecNumber`.
This could act as a coarse distance measure for EC numbers.
Parameters
----------
ecNumber : EcNumber
The EC number to compare against.
wildcardMatchesNumber : bool, optional
If *True*, a wildcard acts as a sure match: '1.-.-.-'.matchingLevels('1.2.3.4') = 4.
If *False*, a wildcard only matches another wildcard.
Returns
-------
int
Number of consecutive levels that match, if any, starting with the first (leftmost).
'1.2.3.4'.matchingLevels('1.2.6.7') = 2 because the first two levels match consecutively.
'1.2.3.4'.matchingLevels('2.2.3.4') = 0 because the very first level does not match.
"""
matchingLevels = 0
selfLevels = self.ecNumberLevels
otherLevels = ecNumber.ecNumberLevels
for i in range(0, 4):
selfNumber = selfLevels[i]
otherNumber = otherLevels[i]
if wildcardMatchesNumber == True:
if selfNumber == EcNumber.WILDCARD or otherNumber == EcNumber.WILDCARD or selfNumber == otherNumber: # current level matches OR is a wildcard
matchingLevels += 1
else:
return matchingLevels
else:
if selfNumber == otherNumber: # current level matches
matchingLevels += 1
else:
return matchingLevels
return matchingLevels
def hasWildcard(self) -> bool:
"""
Whether this EC number contains a wildcard.
Returns
-------
bool
*True* if this EC number contains a wildcard (-) at any level, otherwise, returns *False*.
"""
for level in self.ecNumberLevels:
if level == EcNumber.WILDCARD:
return True
return False
@staticmethod
def removeWildcards(ecNumbers: Iterable) -> Iterable:
"""
Remove EC numbers containing wildcards from an Iterable.
Parameters
----------
ecNumbers : Iterable[EcNumber]
The EcNumber objects to check for wildcards.
Returns
-------
Iterable[EcNumber]
A new Iterable of the same type, containing only EC numbers which do **not** have a wildcard (-) anywhere. This does not deduplicate EC numbers.
"""
validECnumbers = []
for ecNumber in ecNumbers:
if not ecNumber.hasWildcard():
validECnumbers.append(ecNumber)
return ecNumbers.__class__(validECnumbers)
@staticmethod
def insertWildcards(ecNumbers: Iterable, keepLevels = 3, allowHigherWildcards = True, returnSet = True, deduplicateList = False) -> Iterable:
"""
Turns EC numbers without wildcards into EC numbers with wildcards.
Returning them in a list preserves order.
Parameters
----------
ecNumbers : Iterable
The EcNumber objects to abstract using wildcards.
keepLevels : int, optional
The first x levels of each EC number are kept intact. If `keepLevels` == 3, turns 1.2.3.4 into 1.2.3.-. Only 1, 2, 3, and 4 are allowed. EC numbers already containing wildcards are left unchanged.
allowHigherWildcards : bool, optional
If *False* and there is a wildcard in a level above 'keepLevels' (e.g. 3):, 1.2.3.4 -> 1.2.3.- and 2.3.4.- -> 2.3.4.-, but 3.4.-.- is removed completely.
returnSet : bool, optional
If *True*, returns results in a set. Takes precedence over 'deduplicateList', as sets automatically deduplicate.
deduplicateList : bool, optional
If *True*, result list is deduplicated before returning, preserving order.
Returns
-------
Iterable
Either a list or a set of abstracted EC numbers.
Raises
------
ValueError
If `keepLevels` is not one of [1, 2, 3, 4].
"""
if not keepLevels in [1, 2, 3, 4]:
raise ValueError('Can not keep ' + str(keepLevels) + ' levels, there are only 1, 2, 3, or 4.')
filtered = []
for ecNumber in ecNumbers:
levels = ecNumber.ecNumberLevels
filteredLevels = []
for i in range(0, keepLevels):
level = levels[i]
# check for higher wildcards
if allowHigherWildcards is False and level == EcNumber.WILDCARD:
filteredLevels = None
break
else:
filteredLevels.append(level)
if filteredLevels is None: # higher wildcard found but disallowed
continue
else: # pad with wildcards
for _ in range(4, keepLevels, -1):
filteredLevels.append(EcNumber.WILDCARD)
filtered.append( EcNumber.fromArray(filteredLevels) )
if returnSet is True:
return set( filtered )
if deduplicateList is True:
filtered = Util.deduplicateList(filtered, preserveOrder = True)
return filtered
def addDescription(self):
"""
Query KEGG and add further description to this EC number.
Warnings
--------
Much slower than doing :func:`addEcDescriptions` for several EC numbers in bulk!
"""
from FEV_KEGG.KEGG import Database
ecNumberIdToEcEnzyme = Database.getEcEnzymeBulk([self])
ecEnzyme = ecNumberIdToEcEnzyme.get(self.uniqueID)
if ecEnzyme is not None:
self.description = ecEnzyme.description
self.name = ecEnzyme.name
self.reaction = ecEnzyme.reaction
@staticmethod
def addEcDescriptions(ecNumbers: Iterable):
"""
Query KEGG for further descriptions and add them to each EC number in `ecNumbers`.
"""
from FEV_KEGG.KEGG import Database
ecNumberIdToEcEnzyme = Database.getEcEnzymeBulk(ecNumbers)
for ecNumber in ecNumbers:
ecEnzyme = ecNumberIdToEcEnzyme.get(ecNumber.uniqueID)
if ecEnzyme is not None:
ecNumber.description = ecEnzyme.description
ecNumber.name = ecEnzyme.name
ecNumber.reaction = ecEnzyme.reaction
def __lt__(self, other):
return self.ecNumberLevelsInteger < other.ecNumberLevelsInteger
def __gt__(self, other):
return self.ecNumberLevelsInteger > other.ecNumberLevelsInteger
def __le__(self, other):
return self.ecNumberLevelsInteger <= other.ecNumberLevelsInteger
def __ge__(self, other):
return self.ecNumberLevelsInteger >= other.ecNumberLevelsInteger
class GeneID(Element):
REGEX_PATTERN = re.compile('^[a-z]{3,4}:[a-zA-Z0-9_\-\.]+$')
def __init__(self, geneIDString: 'eco:b0004'):
"""
Represents am enzyme of metabolism by gene ID, e.g. 'eco:b0004'.
Parameters
----------
geneIDString : str
Gene ID represented by a string, e.g. 'eco:b0004'. Will be checked for correct formatting!
Attributes
----------
self.geneIDString : str
Raises
------
ValueError
If gene ID is not formatted correctly.
"""
# check input
if self.__class__.REGEX_PATTERN.match(geneIDString) is None: # wrong format
raise ValueError('Gene ID not formatted correctly: ' + geneIDString)
# determine unique ID
Element.__init__(self, geneIDString)
# save object attributes
self.geneIDString = self.uniqueID
@property
def organismAbbreviation(self) -> str:
"""
Returns
-------
str
'eco' from 'eco:b0004'.
"""
geneIDSplit = self.geneIDString.split(':')
organismAbbreviation = geneIDSplit[0]
return organismAbbreviation
@property
def geneName(self) -> str:
"""
Returns
-------
str
'b0004' from 'eco:b0004'.
"""
geneIDSplit = self.geneIDString.split(':')
geneName = geneIDSplit[1]
return geneName
class KeggOrthologyID(Element):
REGEX_PATTERN = re.compile('^K[0-9]{5}$')
def __init__(self, keggOrthologyIDString: 'K01733'):
"""
Represents an enzyme of metabolism by KEGG Orthology ID.
Parameters
----------
keggOrthologyIDString : str
String representation of a KEGG Orthology ID. Will be checked for correct formatting!
Attributes
----------
self.keggOrthologyIDString : str
Raises
------
ValueError
If KEGG Orthology ID is not formatted correctly.
"""
# check input
if self.__class__.REGEX_PATTERN.match(keggOrthologyIDString) is None: # wrong format
raise ValueError('KEGG Orthology ID not formatted correctly: ' + keggOrthologyIDString)
# determine unique ID
Element.__init__(self, keggOrthologyIDString)
# save object attributes
self.keggOrthologyIDString = self.uniqueID | PypiClean |
/Deeplodocus-0.3.0-py3-none-any.whl/deeplodocus/app/models/yolo.py | import torch
import torch.nn as nn
from deeplodocus.utils.generic_utils import get_specific_module
from deeplodocus.app.layers.empty import EmptyLayer
class YOLOv3(nn.Module):
"""
Original article: https://pjreddie.com/media/files/papers/YOLOv3.pdf
"""
def __init__(
self,
backbone=None,
num_classes=80,
skip_layers=(36, 61),
input_shape=(256, 256),
anchors=(
((116, 90), (156, 198), (373, 326)),
((30, 61), (62, 45), (59, 119)),
((10, 13), (16, 30), (22, 23))
),
normalized_anchors=False,
predict=False
):
super(YOLOv3, self).__init__()
# Default backbone arguments
if backbone is None:
backbone = {
"name": "Daknet53",
"module": "deeplodocus.app.models.darknet",
"kwargs": {
"num_channels": 3,
"include_top": False
}
}
# Scale anchors by image dimensions if the anchors are normalized
if normalized_anchors:
anchors = [[(a[0] * input_shape[1], a[1] * input_shape[0]) for a in anchor] for anchor in anchors]
self.predicting = predict
# Get the number of anchor boxes
num_anchors = len(anchors)
# Get and initialise the backbone module
backbone_module = get_specific_module(name=backbone["name"], module=backbone["module"], fatal=True)
self.backbone = backbone_module(**backbone["kwargs"])
# The keys to extract from the list
self.skip_layers = skip_layers
# CONVOLUTIONAL LAYERS/BLOCKS (ConvBlocks consist of 4 conv layers)
self.conv_1_1 = ConvLayer(in_channels=1024, out_channels=512, kernel_size=1, negative_slope=0.1)
self.conv_1_2 = ConvBlock(1024)
self.conv_1_3 = ConvLayer(in_channels=512, out_channels=1024, kernel_size=3, padding=1, negative_slope=0.1)
self.conv_1_4 = nn.Conv2d(in_channels=1024, out_channels=num_anchors * (num_classes + 5), kernel_size=1)
self.conv_2_1 = ConvLayer(in_channels=512, out_channels=256, kernel_size=1, negative_slope=0.1)
self.conv_2_2 = ConvLayer(in_channels=768, out_channels=256, kernel_size=1, negative_slope=0.1)
self.conv_2_3 = ConvBlock(512)
self.conv_2_4 = ConvLayer(in_channels=256, out_channels=512, kernel_size=3, padding=1, negative_slope=0.1)
self.conv_2_5 = nn.Conv2d(in_channels=512, out_channels=num_anchors * (num_classes + 5), kernel_size=1)
self.conv_3_1 = ConvLayer(in_channels=256, out_channels=128, kernel_size=1, negative_slope=0.1)
self.conv_3_2 = ConvLayer(in_channels=384, out_channels=128, kernel_size=1, negative_slope=0.1)
self.conv_3_3 = ConvBlock(256)
self.conv_3_4 = ConvLayer(in_channels=128, out_channels=256, kernel_size=3, padding=1, negative_slope=0.1)
self.conv_3_5 = nn.Conv2d(in_channels=256, out_channels=num_anchors * (num_classes + 5), kernel_size=1)
# UPSAMPLE LAYER
self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
# YOLO LAYERS
self.yolo_layer_1 = YoloLayer(
num_classes=num_classes,
image_shape=input_shape,
anchors=anchors[0]
)
self.yolo_layer_2 = YoloLayer(
num_classes=num_classes,
image_shape=input_shape,
anchors=anchors[1]
)
self.yolo_layer_3 = YoloLayer(
num_classes=num_classes,
image_shape=input_shape,
anchors=anchors[2]
)
self.predict(predict)
def forward(self, x):
# BACKBONE
x = self.backbone(x) # b x 1024 x h/32 x w/32
# DETECTION ON LARGEST SCALE
x = self.conv_1_1(x) # b x 512 x h/32 x w/32
x = self.conv_1_2(x) # b x 512 x h/32 x w/32
output_1 = self.conv_1_3(x) # b x 1024 x h/32 x w/32
output_1 = self.conv_1_4(output_1) # b x 255 x h/32 x w/32
output_1 = self.yolo_layer_1(output_1) # First YOLO layer
# DETECTION ON MID SCALE
x = self.conv_2_1(x) #
x = self.upsample(x) # b x 256 x h/12 x w/16
x = torch.cat((x, self.backbone.skip[self.skip_layers[1]]), 1) # Concatenate x with second backbone skip layer
x = self.conv_2_2(x)
x = self.conv_2_3(x)
output_2 = self.conv_2_4(x)
output_2 = self.conv_2_5(output_2)
output_2 = self.yolo_layer_2(output_2)
# DETECTION ON SMALLEST SCALE
x = self.conv_3_1(x)
x = self.upsample(x)
x = torch.cat((x, self.backbone.skip[self.skip_layers[0]]), 1)
x = self.conv_3_2(x)
x = self.conv_3_3(x)
output_3 = self.conv_3_4(x)
output_3 = self.conv_3_5(output_3)
output_3 = self.yolo_layer_3(output_3)
# Return the concatenation of all three yolo layers
if self.predicting:
return torch.cat((output_1, output_2, output_3), 1)
else:
return output_1, output_2, output_3
def predict(self, mode=True):
"""
:param mode:
:return:
"""
if mode:
# Put model into evaluation mode
self.eval()
# Set predicting here and for all yolo layers
self.predicting = mode
self.yolo_layer_1.predicting = mode
self.yolo_layer_2.predicting = mode
self.yolo_layer_3.predicting = mode
def train(self, mode=True):
"""
Same as for a typical nn.Module, but also sets predict to False
:param mode:
:return:
"""
self.training = mode
for module in self.children():
module.train(mode)
self.predict(False)
return self
class ConvBlock(nn.Module):
def __init__(self, filters=1024, negative_slope=0.1):
super(ConvBlock, self).__init__()
self.conv_1 = ConvLayer(
in_channels=int(filters / 2),
out_channels=filters,
kernel_size=3,
padding=1,
negative_slope=negative_slope
)
self.conv_2 = ConvLayer(
in_channels=filters,
out_channels=int(filters / 2),
kernel_size=1,
negative_slope=negative_slope
)
self.conv_3 = ConvLayer(
in_channels=int(filters / 2),
out_channels=filters,
kernel_size=3,
padding=1,
negative_slope=negative_slope
)
self.conv_4 = ConvLayer(
in_channels=filters,
out_channels=int(filters / 2),
kernel_size=1,
negative_slope=negative_slope
)
def forward(self, x):
x = self.conv_1(x)
x = self.conv_2(x)
x = self.conv_3(x)
x = self.conv_4(x)
return x
class ConvLayer(nn.Module):
def __init__(
self, in_channels, out_channels,
kernel_size=3,
bias=False,
stride=1,
batch_norm=True,
padding=0,
negative_slope=0.0
):
super(ConvLayer, self).__init__()
self.conv_layer = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias,
stride=stride,
padding=padding
)
if batch_norm:
self.norm_layer = nn.BatchNorm2d(out_channels)
else:
self.norm_layer = EmptyLayer()
if negative_slope == 0:
self.activation_layer = nn.ReLU()
else:
self.activation_layer = nn.LeakyReLU(negative_slope=negative_slope)
def forward(self, x):
x = self.conv_layer(x)
x = self.norm_layer(x)
x = self.activation_layer(x)
return x
class YoloLayer(nn.Module):
def __init__(self, anchors, num_classes, image_shape, num_anchors=3, predict=False):
super(YoloLayer, self).__init__()
self.num_classes = num_classes
self.num_anchors = len(anchors)
self.image_shape = image_shape # (height, width)
self.num_anchors = num_anchors
self.anchors = anchors
self.num_classes = num_classes
self.mse_loss = nn.MSELoss(size_average=True) # Coordinate loss
self.bce_loss = nn.BCELoss(size_average=True) # Objectiveness loss
self.ce_loss = nn.CrossEntropyLoss() # Class loss
self.predicting = predict
def forward(self, x):
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
batch_size, _, h, w = x.shape
# Stride should be 32, 16 or 8
stride = self.image_shape[1] / w
# Unpack predictions b x num_anchors x h x w x [num_classes + 5]
prediction = x.view(
batch_size,
self.num_anchors,
self.num_classes + 5,
h,
w
).permute(0, 1, 3, 4, 2).contiguous()
# Scaled anchor width and height and cell offsets
scaled_anchors = FloatTensor(self.anchors) / stride
cx = torch.arange(w).repeat(h, 1).view([1, 1, h, w]).type(FloatTensor)
cy = torch.arange(h).repeat(w, 1).t().view([1, 1, h, w]).type(FloatTensor)
# Get all outputs
bx = torch.sigmoid(prediction[..., 0]) + cx
by = torch.sigmoid(prediction[..., 1]) + cy
bw = scaled_anchors[:, 0].view(1, self.num_anchors, 1, 1) * torch.exp(prediction[..., 2])
bh = scaled_anchors[:, 1].view(1, self.num_anchors, 1, 1) * torch.exp(prediction[..., 3])
obj = torch.sigmoid(prediction[..., 4])
cls = torch.sigmoid(prediction[..., 5:])
# Recombine predictions after activations have been applied
prediction = torch.cat((
bx.view(*bx.shape, 1),
by.view(*by.shape, 1),
bw.view(*bw.shape, 1),
bh.view(*bh.shape, 1),
obj.view(*obj.shape, 1),
cls
), 4)
if self.predicting:
# Scale up by stride
prediction[..., 0:4] *= stride
# Return flattened predictions
return prediction.view(batch_size, -1, self.num_classes + 5)
else:
# If not in prediction mode, return predictions without offsets and with anchors
return prediction, scaled_anchors | PypiClean |
/Linum-0.9.12.tar.gz/Linum-0.9.12/linum/excel_renderer/calendar/calendar.py | from xlsxwriter import Workbook
from xlsxwriter.worksheet import Worksheet
from linum.context import ExcelRendererContext
from linum.excel_renderer.calendar.header.header import Header
from linum.excel_renderer.calendar.space.space_row import SpaceRow
from linum.excel_renderer.calendar.views.layer_list_view import LayerListView
from linum.helper import split_by_months
from linum.layer_list import LayerList
class Calendar:
def __init__(self, layer_list: LayerList, context: ExcelRendererContext):
self.context = context
self.layer_list = layer_list
def render(self, row: int, column: int, worksheet: Worksheet, workbook: Workbook):
days_off = self.context.days_off
workdays = self.context.workdays
# Base styles
styles = self.context.styles
days_off_styles = styles.get_sub_style("days_off")
# Header styles
header_style = styles.get_sub_style("header")
days_off_header_style = days_off_styles.get_sub_style("header")
# Layer styles
layers_style = styles.get_sub_style("layers")
days_off_layers_style = days_off_styles.get_sub_style("layers")
# Space row styles
space_row_style = layers_style.get_sub_style("space_row").get_sub_style("_")
space_row_style.update({"bottom": 1, "bottom_color": 0xE0E0E0})
days_off_space_row_style = days_off_layers_style.get_sub_style("space_row").get_sub_style("_")
days_off_space_row_style.parents.insert(0, space_row_style)
# Splitting by periods
row_offset = 0
months = split_by_months(self.context.start, self.context.length)
for i in range(0, len(months), self.context.months_in_row):
m = months[i:i + self.context.months_in_row]
d, _ = m[0]
days = sum([d for _, d in m])
# Rendering header
header = Header(d, days, days_off, workdays,
header_style, days_off_header_style)
header.render(row + row_offset, column, worksheet, workbook)
# Rendering layer list
llv = LayerListView(self.layer_list, d, days,
layers_style, days_off_layers_style,
days_off, workdays)
offset = llv.render(row + row_offset + 3, column, worksheet, workbook)
# Rendering space row
sr = SpaceRow(d, days, days_off, workdays,
space_row_style, days_off_space_row_style)
sr.render(row + row_offset + 3 + offset, column, worksheet, workbook)
# Calculating offset
row_offset = row_offset + 3 + offset + 1 | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/storage/WhatWGStorageProvider.js | define(["dijit","dojo","dojox","dojo/require!dojox/storage/Provider,dojox/storage/manager"],function(_1,_2,_3){
_2.provide("dojox.storage.WhatWGStorageProvider");
_2.require("dojox.storage.Provider");
_2.require("dojox.storage.manager");
_2.declare("dojox.storage.WhatWGStorageProvider",[_3.storage.Provider],{initialized:false,_domain:null,_available:null,_statusHandler:null,_allNamespaces:null,_storageEventListener:null,initialize:function(){
if(_2.config["disableWhatWGStorage"]==true){
return;
}
this._domain=location.hostname;
this.initialized=true;
_3.storage.manager.loaded();
},isAvailable:function(){
try{
var _4=globalStorage[location.hostname];
}
catch(e){
this._available=false;
return this._available;
}
this._available=true;
return this._available;
},put:function(_5,_6,_7,_8){
if(this.isValidKey(_5)==false){
throw new Error("Invalid key given: "+_5);
}
_8=_8||this.DEFAULT_NAMESPACE;
_5=this.getFullKey(_5,_8);
this._statusHandler=_7;
if(_2.isString(_6)){
_6="string:"+_6;
}else{
_6=_2.toJson(_6);
}
var _9=_2.hitch(this,function(_a){
window.removeEventListener("storage",_9,false);
if(_7){
_7.call(null,this.SUCCESS,_5,null,_8);
}
});
window.addEventListener("storage",_9,false);
try{
var _b=globalStorage[this._domain];
_b.setItem(_5,_6);
}
catch(e){
this._statusHandler.call(null,this.FAILED,_5,e.toString(),_8);
}
},get:function(_c,_d){
if(this.isValidKey(_c)==false){
throw new Error("Invalid key given: "+_c);
}
_d=_d||this.DEFAULT_NAMESPACE;
_c=this.getFullKey(_c,_d);
var _e=globalStorage[this._domain];
var _f=_e.getItem(_c);
if(_f==null||_f==""){
return null;
}
_f=_f.value;
if(_2.isString(_f)&&(/^string:/.test(_f))){
_f=_f.substring("string:".length);
}else{
_f=_2.fromJson(_f);
}
return _f;
},getNamespaces:function(){
var _10=[this.DEFAULT_NAMESPACE];
var _11={};
var _12=globalStorage[this._domain];
var _13=/^__([^_]*)_/;
for(var i=0;i<_12.length;i++){
var _14=_12.key(i);
if(_13.test(_14)==true){
var _15=_14.match(_13)[1];
if(typeof _11[_15]=="undefined"){
_11[_15]=true;
_10.push(_15);
}
}
}
return _10;
},getKeys:function(_16){
_16=_16||this.DEFAULT_NAMESPACE;
if(this.isValidKey(_16)==false){
throw new Error("Invalid namespace given: "+_16);
}
var _17;
if(_16==this.DEFAULT_NAMESPACE){
_17=new RegExp("^([^_]{2}.*)$");
}else{
_17=new RegExp("^__"+_16+"_(.*)$");
}
var _18=globalStorage[this._domain];
var _19=[];
for(var i=0;i<_18.length;i++){
var _1a=_18.key(i);
if(_17.test(_1a)==true){
_1a=_1a.match(_17)[1];
_19.push(_1a);
}
}
return _19;
},clear:function(_1b){
_1b=_1b||this.DEFAULT_NAMESPACE;
if(this.isValidKey(_1b)==false){
throw new Error("Invalid namespace given: "+_1b);
}
var _1c;
if(_1b==this.DEFAULT_NAMESPACE){
_1c=new RegExp("^[^_]{2}");
}else{
_1c=new RegExp("^__"+_1b+"_");
}
var _1d=globalStorage[this._domain];
var _1e=[];
for(var i=0;i<_1d.length;i++){
if(_1c.test(_1d.key(i))==true){
_1e[_1e.length]=_1d.key(i);
}
}
_2.forEach(_1e,_2.hitch(_1d,"removeItem"));
},remove:function(key,_1f){
key=this.getFullKey(key,_1f);
var _20=globalStorage[this._domain];
_20.removeItem(key);
},isPermanent:function(){
return true;
},getMaximumSize:function(){
return this.SIZE_NO_LIMIT;
},hasSettingsUI:function(){
return false;
},showSettingsUI:function(){
throw new Error(this.declaredClass+" does not support a storage settings user-interface");
},hideSettingsUI:function(){
throw new Error(this.declaredClass+" does not support a storage settings user-interface");
},getFullKey:function(key,_21){
_21=_21||this.DEFAULT_NAMESPACE;
if(this.isValidKey(_21)==false){
throw new Error("Invalid namespace given: "+_21);
}
if(_21==this.DEFAULT_NAMESPACE){
return key;
}else{
return "__"+_21+"_"+key;
}
}});
_3.storage.manager.register("dojox.storage.WhatWGStorageProvider",new _3.storage.WhatWGStorageProvider());
}); | PypiClean |
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/docs/devel/thirdparty.rst | Localization libraries and packages
===================================
Weblate can be integrated into your framework using :doc:`/api`. This page
lists third-party libraries which integrate Weblate.
* `Weblate Translation Provider for Symfony Translation <https://github.com/m2mtech/weblate-translation-provider/>`_
* `Weblate plugin for Figma <https://www.figma.com/community/plugin/1053050985172660071/weblate-integration>`_
* `WeblateFX, a JavaFX-based Weblate desktop client <https://github.com/javierllorente/weblatefx>`_
* `JWeblate, a Java-based Weblate library <https://github.com/javierllorente/jweblate>`_
* `Spring MessageSource backed by a Weblate server <https://github.com/porscheinformatik/weblate-spring>`_
.. hint::
Are you building a Weblate integration for your framework? Get listed here!
Either submit a pull request with the changes directly or write us at
[email protected] to get listed.
| PypiClean |
/Biblelator-0.0.8.tar.gz/Biblelator-0.0.8/README.md | Biblelator
==========
This file last updated: 2016-03-30 RJH
A (hackable) multi-window Bible editor written in Python3.
See http://Freely-Given.org/Software/Biblelator
This has been used daily by the developer as a production Bible editor since February 2016
as an alternative to Paratext for editing Paratext USFM Bible files (on Ubuntu Linux).
The first alpha release (v0.30) was advertised in mid-March 2016.
It is still being continuously developed with many more features to come
and many early features to be further extended and polished.
By including the Bible Organisational System (BibleOrgSys), Biblelator is able
to display most un-encrypted Bibles that you have on your computer as translation
resources, and is also able to export Bibles into a large number of formats.
Some documentation can be found in the Documentation folder
including DevelopmentPrinciples.md, Installation.md, GettingStarted.md, KeyboardShortcuts.md,
and ForProgrammers.md.
| PypiClean |
/Flask_ClickHouse-1.0.1-py3-none-any.whl/flask_clickhouse/__init__.py | from clickhouse_driver import Client
from clickhouse_driver.defines import DEFAULT_PORT
class ClickHouse(object):
param_names = [
'PORT', 'DATABASE', 'USER', 'PASSWORD', 'CLIENT_NAME', 'CIPHERS',
'CONNECT_TIMEOUT', 'CA_CERTS', 'SEND_RECEIVE_TIMEOUT', 'VERIFY',
'PREFIX_COMPRESS_BLOCK_SIZE', 'COMPRESSION', 'SECURE', 'SSL_VERSION',
'SYNC_REQUEST_TIMEOUT'
]
def __init__(self, app=None, config_prefix='CLICKHOUSE'):
self.config_prefix = config_prefix
self.client = None
if app is not None:
self.init_app(app, config_prefix)
def init_app(self, app, config_prefix='CLICKHOUSE'):
"""Initialize the `app` for use with this :class:`~ClickHouse`. This is
called automatically if `app` is passed to :meth:`~ClickHouse.__init__`.
The app is configured according to the configuration variables
``PREFIX_HOST``, ``PREFIX_PORT``, ``PREFIX_DATABASE``,
``PREFIX_USER``, ``PREFIX_PASSWORD``, ``PREFIX_CLIENT_NAME``,
``PREFIX_CONNECT_TIMEOUT``, ``PREFIX_SEND_RECEIVE_TIMEOUT``,
``PREFIX_SYNC_REQUEST_TIMEOUT``, ``PREFIX_COMPRESS_BLOCK_SIZE``,
``PREFIX_COMPRESSION``, ``PREFIX_SECURE``, ``PREFIX_VERIFY``,
``PREFIX_SSL_VERSION``, ``PREFIX_CA_CERTS`` and ``PREFIX_CIPHERS``,
where "PREFIX" defaults to "CLICKHOUSE".
:param flask.Flask app: the application to configure for use with
this :class:`~ClickHouse`
:param str config_prefix: determines the set of configuration
variables used to configure this :class:`~ClickHouse`
"""
self.config_prefix = config_prefix
if 'clickhouse' not in app.extensions:
app.extensions['clickhouse'] = {}
if config_prefix in app.extensions['clickhouse']:
raise Exception('duplicate config_prefix "%s"' % config_prefix)
def key(suffix):
return '%s_%s' % (config_prefix, suffix)
app.config.setdefault(key('HOST'), 'localhost')
app.config.setdefault(key('PORT'), DEFAULT_PORT)
app.config.setdefault(key('DATABASE'), app.name)
if not isinstance(app.config[key('PORT')], int):
raise TypeError('%s_PORT must be an integer' % config_prefix)
kwargs = {}
for param in self.param_names:
value = app.config.get(key(param))
if value is not None:
kwargs[param.lower()] = value
self.client = Client(app.config[key('HOST')], **kwargs)
app.extensions['clickhouse'][config_prefix] = self
def __getattr__(self, name):
return getattr(self.client, name)
def __str__(self):
return f'<ClickHouse: {self.host}>' | PypiClean |
/maxbot-0.3.0b2-py3-none-any.whl/maxbot/channels/facebook.py | import hmac
import logging
from functools import cached_property
from urllib.parse import urljoin
import httpx
from ..maxml import PoolLimitSchema, Schema, TimeoutSchema, fields
logger = logging.getLogger(__name__)
class Gateway:
"""Facebook sender and verifier incoming messages."""
def __init__(self, app_secret, access_token, **kwargs):
"""Create a new class instance.
:param str app_secret: Facebook application secret
:param str access_token: Facebook access_token
:param dict kwargs: Arguments for creating HTTPX asynchronous client.
"""
self.app_secret = app_secret
self.access_token = access_token
self.httpx_client = httpx.AsyncClient(
base_url="https://graph.facebook.com/v15.0", **kwargs
)
async def send_request(self, json_data):
"""Send request to Facebook.
:param dict json_data: additional params
:return dict: response data
"""
url = f"/me/messages?access_token={self.access_token}"
response = await self.httpx_client.post(url, json=json_data)
response.raise_for_status()
return response.json()
def verify_token(self, data, headers):
"""Validate Payloads.
@See https://developers.facebook.com/docs/messenger-platform/webhooks#event-notifications
:param bytes data: request.data
:param dict headers: request.headers
:return http status
"""
header_signature = headers.get("x-hub-signature")
if header_signature is None:
return 403
sha_name, signature = header_signature.split("=")
if sha_name != "sha1":
return 501
mac = hmac.new(bytes(self.app_secret, "utf-8"), msg=data, digestmod="sha1").hexdigest()
if not hmac.compare_digest(mac, signature):
return 403
return 200
class _Api:
"""Simple Facebook graph api client.
* send text message
* send image message
"""
def __init__(self, gateway):
"""Create a new class instance.
:param Gateway gateway
"""
self.gateway = gateway
async def send_text(self, recipient_id, text):
"""Send text message.
@See https://developers.facebook.com/docs/messenger-platform/reference/send-api/
:param str recipient_id: Facebook recipient.id
:param str text: Message text
"""
params = {
"recipient": {"id": recipient_id},
"message": {"text": text},
"messaging_type": "RESPONSE",
}
await self.gateway.send_request(params)
async def send_image(self, recipient_id, media_url):
"""Send image message.
@See https://developers.facebook.com/docs/messenger-platform/reference/send-api/
:param str recipient_id: Facebook recipient.id
:param str media_url: Image URL
"""
params = {
"recipient": {"id": recipient_id},
"message": {
"attachment": {"type": "image", "payload": {"is_reusable": True, "url": media_url}}
},
}
await self.gateway.send_request(params)
class FacebookChannel:
"""Channel for Facebook Bots.
Set webhook for Messenger Platform:
https://developers.facebook.com/docs/messenger-platform/webhooks
You need to install additional dependencies to use this channel.
Try `pip install -U maxbot[facebook]`.
There are two channel arguments (chargs) in this channel.
dict messaging: incoming data (entry.messaging[0])
@See https://developers.facebook.com/docs/messenger-platform/reference/webhook-events
A :class:`Gateway` object represents the Facebook sender and verifier you are working with.
"""
class ConfigSchema(Schema):
"""Configuration schema for Facebook bot."""
# Facebook appsecret
# @See https://developers.facebook.com/docs/facebook-login/security/#appsecret
app_secret = fields.Str(required=True)
# Facebook access_token
# @See https://developers.facebook.com/docs/facebook-login/security/#appsecret
access_token = fields.Str(required=True)
# Default HTTP request timeouts
# @See https://www.python-httpx.org/advanced/#timeout-configuration
timeout = fields.Nested(TimeoutSchema())
# Pool limit configuration
# @See https://www.python-httpx.org/advanced/#pool-limit-configuration
limits = fields.Nested(PoolLimitSchema())
@cached_property
def gateway(self):
"""Return Facebook gateway connected to you bot.
:return Gateway:
"""
return Gateway(
self.config["app_secret"],
self.config["access_token"],
timeout=self.config.get("timeout", TimeoutSchema.DEFAULT),
limits=self.config.get("limits", PoolLimitSchema.DEFAULT),
)
@cached_property
def _api(self):
return _Api(self.gateway)
async def create_dialog(self, messaging: dict):
"""Create a dialog object from the incomming update.
:param dict messaging: an incoming update.
:return dict: a dialog with the schema :class:`~maxbot.schemas.DialogSchema`
"""
recipient_id = messaging.get("recipient", {}).get("id")
if recipient_id:
return {"channel_name": self.name, "user_id": str(recipient_id)}
return None
async def send_text(self, command: dict, dialog: dict):
"""Send an text message to the channel.
@See https://developers.facebook.com/docs/messenger-platform/reference/send-api/
:param dict command: a command with the payload :attr:`~maxbot.schemas.CommandSchema.text`.
:param dict dialog: a dialog we respond in, with the schema :class:`~maxbot.schemas.DialogSchema`
"""
await self._api.send_text(dialog["user_id"], command["text"].render())
async def send_image(self, command: dict, dialog: dict):
"""Send an image message to the channel.
@See https://developers.facebook.com/docs/messenger-platform/reference/send-api/
:param dict command: a command with the payload :attr:`~maxbot.schemas.CommandSchema.image`.
:param dict dialog: a dialog we respond in, with the schema :class:`~maxbot.schemas.DialogSchema`
"""
caption = command["image"].get("caption")
if caption:
await self._api.send_text(dialog["user_id"], caption.render())
await self._api.send_image(dialog["user_id"], command["image"]["url"])
async def receive_text(self, messaging: dict):
"""Receive an text message from the channel.
@See https://developers.facebook.com/docs/messenger-platform/reference/webhook-events/
@See https://developers.facebook.com/docs/messenger-platform/reference/webhook-events/messages
:param dict messaging: an incoming update.
:return dict: a message with the payload :class:`~maxbot.schemas.MessageSchema.text`
"""
if messaging.get("message", {}).get("text"):
return {"text": messaging["message"]["text"]}
return None
async def receive_image(self, messaging: dict):
"""Receive an image message from the channel.
@See https://developers.facebook.com/docs/messenger-platform/reference/webhook-events/
@See https://developers.facebook.com/docs/messenger-platform/reference/webhook-events/messages#payload
:param dict messaging: an incoming update.
:return dict: a message with the payload :class:`~maxbot.schemas.MessageSchema.image`
"""
if messaging.get("message", {}).get("attachments"):
images = [
a["payload"]["url"]
for a in messaging["message"]["attachments"]
if a["type"] == "image"
]
if images:
return {"image": {"url": images[0]}}
return None
def blueprint(self, callback, execute_once, public_url=None, webhook_path=None):
"""Create web application blueprint to receive incoming updates.
:param callable callback: a callback for received messages.
:param callable execute_once: Execute only for first WEB application worker.
:param string public_url: Base url to register webhook.
:param string webhook_path: An url path to receive incoming updates.
:return Blueprint: Blueprint for sanic app.
"""
from sanic import Blueprint
from sanic.response import empty
bp = Blueprint(self.name)
if webhook_path is None:
webhook_path = f"/{self.name}"
@bp.post(webhook_path)
async def webhook(request):
# @See https://developers.facebook.com/docs/messenger-platform/webhooks#event-notifications
logger.debug("%s", request.json)
http_code = self.gateway.verify_token(request.body, request.headers)
if http_code != 200:
return empty(status=http_code)
# Has many update message (entry)
for event in request.json["entry"]:
messaging = event.get("messaging")
if messaging:
# https://developers.facebook.com/docs/messenger-platform/reference/webhook-events/
# Array containing one messaging object. Note that even though this is an array,
# it will only contain one messaging object.
(messaging,) = messaging
if not messaging.get("message", {}).get("is_echo"):
await callback(messaging)
return empty()
if public_url:
webhook_url = urljoin(public_url, webhook_path)
logger.warning(
f"The {self.name} platform has no suitable api, register a webhook yourself {webhook_url}."
)
return bp | PypiClean |
/HTSQL-2.3.3.tar.gz/HTSQL-2.3.3/src/htsql/tweak/shell/vendor/codemirror-2.13/mode/yaml/yaml.js | CodeMirror.defineMode("yaml", function() {
var cons = ['true', 'false', 'on', 'off', 'yes', 'no'];
var keywordRegex = new RegExp("\\b(("+cons.join(")|(")+"))$", 'i');
return {
token: function(stream, state) {
var ch = stream.peek();
var esc = state.escaped;
state.escaped = false;
/* comments */
if (ch == "#") { stream.skipToEnd(); return "comment"; }
if (state.literal && stream.indentation() > state.keyCol) {
stream.skipToEnd(); return "string";
} else if (state.literal) { state.literal = false; }
if (stream.sol()) {
state.keyCol = 0;
state.pair = false;
state.pairStart = false;
/* document start */
if(stream.match(/---/)) { return "def"; }
/* document end */
if (stream.match(/\.\.\./)) { return "def"; }
/* array list item */
if (stream.match(/\s*-\s+/)) { return 'meta'; }
}
/* pairs (associative arrays) -> key */
if (!state.pair && stream.match(/^\s*([a-z0-9\._-])+(?=\s*:)/i)) {
state.pair = true;
state.keyCol = stream.indentation();
return "atom";
}
if (state.pair && stream.match(/^:\s*/)) { state.pairStart = true; return 'meta'; }
/* inline pairs/lists */
if (stream.match(/^(\{|\}|\[|\])/)) {
if (ch == '{')
state.inlinePairs++;
else if (ch == '}')
state.inlinePairs--;
else if (ch == '[')
state.inlineList++;
else
state.inlineList--;
return 'meta';
}
/* list seperator */
if (state.inlineList > 0 && !esc && ch == ',') {
stream.next();
return 'meta';
}
/* pairs seperator */
if (state.inlinePairs > 0 && !esc && ch == ',') {
state.keyCol = 0;
state.pair = false;
state.pairStart = false;
stream.next();
return 'meta';
}
/* start of value of a pair */
if (state.pairStart) {
/* block literals */
if (stream.match(/^\s*(\||\>)\s*/)) { state.literal = true; return 'meta'; };
/* references */
if (stream.match(/^\s*(\&|\*)[a-z0-9\._-]+\b/i)) { return 'variable-2'; }
/* numbers */
if (state.inlinePairs == 0 && stream.match(/^\s*-?[0-9\.\,]+\s?$/)) { return 'number'; }
if (state.inlinePairs > 0 && stream.match(/^\s*-?[0-9\.\,]+\s?(?=(,|}))/)) { return 'number'; }
/* keywords */
if (stream.match(keywordRegex)) { return 'keyword'; }
}
/* nothing found, continue */
state.pairStart = false;
state.escaped = (ch == '\\');
stream.next();
return null;
},
startState: function() {
return {
pair: false,
pairStart: false,
keyCol: 0,
inlinePairs: 0,
inlineList: 0,
literal: false,
escaped: false
};
}
};
});
CodeMirror.defineMIME("text/x-yaml", "yaml"); | PypiClean |
/ER-Evaluation-2.1.0.tar.gz/ER-Evaluation-2.1.0/er_evaluation/utils/_utils.py | import itertools
import logging
from importlib import resources
import numpy as np
import pandas as pd
from er_evaluation.data_structures import MembershipVector
def load_module_parquet(module, filename):
"""
Load parquet file from a submodule using pyarrow engine.
Args:
module (string): Path to a module, such as "er_evaluation.datasets.raw_data.rldata"
filename (string): Name of the parquet file.
Returns:
pandas DataFrame
"""
with resources.open_binary(module, filename) as f:
data = pd.read_parquet(f, engine="pyarrow")
return data
def load_module_tsv(module, filename, dtype=str):
"""
Load tsv file from a submodule.
Args:
module (string): Path to a module, such as "er_evaluation.datasets.raw_data.rldata"
filename (string): Name of the tsv file.
dtype: Data type to use to read the file. Defaults to str.
Returns:
pandas DataFrame
"""
with resources.open_text(module, filename) as f:
data = pd.read_csv(f, sep="\t", dtype=dtype)
return data
def sample_clusters(membership, weights="uniform", sample_prop=0.2, size=None, replace=True, random_state=1):
"""
Sample clusters from a membership vector.
Args:
membership (Series): Membership vector.
weights (str, optional): Probability weights to use. Should be one "uniform", "cluster_size", or a pandas Series indexed by cluster identifiers and with values corresponding to probability weights. Defaults to "uniform".
sample_prop (float, optional): Proportion of clusters to sample. Defaults to 0.2.
replace (bool, optional): Wether or not to sample with replacement. Defaults to True.
random_state (int, optional): Random seed. Defaults to 1.
Returns:
Series: Membership vector with elements corresponding to sampled clusters.
Examples:
Load a toy dataset:
>>> from er_evaluation.datasets import load_rldata10000_disambiguations
>>> predictions, reference = load_rldata10000_disambiguations()
Sample a set of ground truth clusters uniformly at random:
>>> sample = sample_clusters(reference, weights="uniform", sample_prop=0.2)
Compute pairwise_precision on the sample:
>>> from er_evaluation.metrics import pairwise_precision
>>> pairwise_precision(predictions['name_by'], sample)
0.96
Compare to the true precision on the full data:
>>> pairwise_precision(predictions['name_by'], reference)
0.7028571428571428
The metric computed on a sample is over-optimistic (0.96 versus true precision of 0.7). Instead, use an estimator to accurately estimate pairwise precision from a sample, which returns a point estimate and its standard deviation estimate:
>>> from er_evaluation.estimators import pairwise_precision_estimator
>>> pairwise_precision_estimator(predictions['name_by'], sample, weights="uniform")
(0.7633453805063894, 0.04223296142335369)
"""
membership = MembershipVector(membership)
np.random.seed(random_state)
if size is not None:
sample_size = size
else:
sample_size = int(sample_prop * membership.nunique())
if isinstance(weights, pd.Series):
selected_clusters = np.random.choice(
weights.index,
size=sample_size,
replace=replace,
p=weights.values / np.sum(weights.values),
)
elif isinstance(weights, str):
if weights == "uniform":
selected_clusters = np.random.choice(membership.unique(), size=sample_size, replace=replace)
elif weights == "cluster_size":
selected_clusters = np.random.choice(
membership.values,
size=sample_size,
replace=replace,
)
else:
raise ValueError(
f"Invalid weights argument. Valid strings are 'uniform' or 'cluster_size', instead got {weights}"
)
else:
raise ValueError(
f"Invalid weights argument. Should be a string or a pandas Series, instead got type {type(weights)}."
)
return membership[membership.isin(selected_clusters)]
def relevant_prediction_subset(prediction, sample):
"""Return predicted clusters which intersect sampled clusters."""
prediction = MembershipVector(prediction)
sample = MembershipVector(sample)
I = prediction.index.isin(sample.index)
J = prediction.isin(prediction[I].values)
relevant_prediction = prediction[J]
if len(relevant_prediction) == 0:
logging.warning("Relevant prediction subset is empty: predicted clusters do not overlap sample clusters.")
return relevant_prediction
def expand_grid(**kwargs):
"""
Create DataFrame from all combination of elements.
Args:
kwargs: Dictionary of elements to combine. Keys become column names.
Returns:
DataFrame: DataFrame with columns corresponding to argument names and rows for each combination of argument values.
Examples:
>>> expand_grid(col1=[1,2], col2=["a", "b"])
col1 col2
0 1 a
1 1 b
2 2 a
3 2 b
>>> expand_grid(col1={1:"something", 2:"something"}, col2=["a", "b"])
col1 col2
0 1 a
1 1 b
2 2 a
3 2 b
"""
return pd.DataFrame.from_records(itertools.product(*kwargs.values()), columns=kwargs.keys()) | PypiClean |
/MDPOW-0.8.0.tar.gz/MDPOW-0.8.0/mdpow/equil.py | import pickle
import os, errno
import shutil
import MDAnalysis as mda
try:
import gromacs.setup, gromacs.cbook
except (ImportError, OSError):
raise ImportError("Gromacs installation not found, source GMXRC?")
from gromacs.utilities import in_dir, realpath, asiterable, AttributeDict
import gromacs.utilities
from . import config
from . import forcefields
from .restart import Journalled
import logging
logger = logging.getLogger('mdpow.equil')
# ITP <-- forcefields.get_solvent_model(id).itp
# BOX <-- forcefields.get_solvent_model(id).coordinates
# TODO: change to water distance 1.2 in the future (1.0 for
# compatibility with our SAMPL5 runs)
#: minimum distance between solute and box surface (in nm)
DIST = {'water': 1.0, 'octanol': 1.5, 'cyclohexane': 1.5, 'wetoctanol': 1.5}
class Simulation(Journalled):
"""Simple MD simulation of a single compound molecule in water.
Typical use ::
S = Simulation(molecule='DRUG')
S.topology(itp='drug.itp')
S.solvate(struct='DRUG-H.pdb')
S.energy_minimize()
S.MD_relaxed()
S.MD()
.. Note:: The OPLS/AA force field and the TIP4P water molecule is the
default; changing this is possible but will require provision of
customized itp, mdp and template top files at various stages.
"""
#: Keyword arguments to pre-set some file names; they are keys in :attr:`Simulation.files`.
filekeys = ('topology', 'processed_topology', 'structure', 'solvated', 'ndx',
'energy_minimized', 'MD_relaxed', 'MD_restrained', 'MD_NPT')
topdir_default = "Equilibrium"
dirname_default = os.path.curdir
solvent_default = 'water'
#: Coordinate files of the full system in increasing order of advancement of
#: the protocol; the later the better. The values are keys into :attr:`Simulation.files`.
coordinate_structures = ('solvated', 'energy_minimized', 'MD_relaxed',
'MD_restrained', 'MD_NPT')
checkpoints = ('solvated','energy_minimized','MD_relaxed','MD_restrained','MD_NPT')
#: Check list of all methods that can be run as an independent protocol; see also
#: :meth:`Simulation.get_protocol` and :class:`restart.Journal`
protocols = ("MD_NPT", "MD_NPT_run", # *_run as dummies for the ...
"MD_relaxed", "MD_relaxed_run", # ...checkpointing logic
"MD_restrained", "MD_restrained_run",
"energy_minimize", "solvate", "topology")
#: Default Gromacs *MDP* run parameter files for the different stages.
#: (All are part of the package and are found with :func:`mdpow.config.get_template`.)
mdp_defaults = {'MD_relaxed': 'NPT_opls.mdp',
'MD_restrained': 'NPT_opls.mdp',
'MD_NPT': 'NPT_opls.mdp',
'energy_minimize': 'em_opls.mdp',
}
def __init__(self, molecule=None, **kwargs):
"""Set up Simulation instance.
The *molecule* of the compound molecule should be supplied. Existing files
(which have been generated in previous runs) can also be supplied.
:Keywords:
*molecule*
Identifier for the compound molecule. This is the same as the
entry in the ``[ molecule ]`` section of the itp file. ["DRUG"]
*filename*
If provided and *molecule* is ``None`` then load the instance from
the pickle file *filename*, which was generated with
:meth:`~mdpow.equil.Simulation.save`.
*dirname*
base directory; all other directories are created under it
*forcefield*
'OPLS-AA' or 'CHARMM' or 'AMBER'
*solvent*
'water' or 'octanol' or 'cyclohexane' or 'wetoctanol'
*solventmodel*
``None`` chooses the default (e.g, :data:`mdpow.forcefields.DEFAULT_WATER_MODEL`
for ``solvent == "water"``. Other options are the models defined in
:data:`mdpow.forcefields.GROMACS_WATER_MODELS`. At the moment, there are no
alternative parameterizations included for other solvents.
*mdp*
dict with keys corresponding to the stages ``energy_minimize``,
``MD_restrained``, ``MD_relaxed``,
``MD_NPT`` and values *mdp* file names (if no entry then the
package defaults are used)
*distance*
minimum distance between solute and closest box face
*kwargs*
advanced keywords for short-circuiting; see
:data:`mdpow.equil.Simulation.filekeys`.
"""
self.__cache = {}
filename = kwargs.pop('filename', None)
dirname = kwargs.pop('dirname', self.dirname_default)
forcefield = kwargs.pop('forcefield', 'OPLS-AA')
solvent = kwargs.pop('solvent', self.solvent_default)
# mdp files --- should get values from default runinput.cfg
# None values in the kwarg mdp dict are ignored
# self.mdp: key = stage, value = path to MDP file
# 'water' will choose the default ('tip4p'), other choices are
# 'tip3p', 'spc', 'spce', 'm24', for water; no choices
# available for 'cyclohexane' and 'octanol'
solventmodel = kwargs.pop('solventmodel', None)
mdp_kw = kwargs.pop('mdp', {})
self.mdp = dict((stage, config.get_template(fn)) for stage,fn in self.mdp_defaults.items())
self.mdp.update(dict((stage, config.get_template(fn)) for stage,fn in mdp_kw.items() if fn is not None))
if molecule is None and filename is not None:
# load from pickle file
self.load(filename)
self.filename = filename
kwargs = {} # for super
else:
self.molecule = molecule or 'DRUG'
self.dirs = AttributeDict(
basedir=realpath(dirname), # .../Equilibrium/<solvent>
includes=list(asiterable(kwargs.pop('includes',[]))) + [config.includedir],
)
# pre-set filenames: keyword == variable name
self.files = AttributeDict([(k, kwargs.pop(k, None)) for k in self.filekeys])
self.deffnm = kwargs.pop("deffnm", "md")
if self.files.topology:
# assume that a user-supplied topology lives in a 'standard' top dir
# that includes the necessary itp file(s)
self.dirs.topology = realpath(os.path.dirname(self.files.topology))
self.dirs.includes.append(self.dirs.topology)
self.forcefield = forcefield
self.solvent_type = solvent
self.solventmodel_identifier = forcefields.get_solvent_identifier(
solvent,
model=solventmodel,
forcefield=forcefield,
)
if self.solventmodel_identifier is None:
msg = "No parameters for solvent {0} and solventmodel {1} available.".format(
solvent, solventmodel)
logger.error(msg)
raise ValueError(msg)
self.solventmodel = forcefields.get_solvent_model(
self.solventmodel_identifier,
forcefield=forcefield,
)
distance = kwargs.pop('distance', None)
distance = distance if distance is not None else DIST[solvent]
self.solvent = AttributeDict(itp=self.solventmodel.itp,
box=self.solventmodel.coordinates,
distance=distance)
self.filename = filename or self.solvent_type+'.simulation'
super(Simulation, self).__init__(**kwargs)
def BASEDIR(self, *args):
return os.path.join(self.dirs.basedir, *args)
def save(self, filename=None):
"""Save instance to a pickle file.
The default filename is the name of the file that was last loaded from
or saved to.
"""
if filename is None:
if self.filename is None:
self.filename = filename or self.solvent_type+'.simulation'
logger.warning("No filename known, saving instance under name %r", self.filename)
filename = self.filename
else:
self.filename = filename
with open(filename, 'wb') as f:
pickle.dump(self, f)
logger.debug("Instance pickled to %(filename)r" % vars())
def load(self, filename=None):
"""Re-instantiate class from pickled file."""
if filename is None:
if self.filename is None:
self.filename = self.molecule.lower() + '.pickle'
logger.warning("No filename known, trying name %r", self.filename)
filename = self.filename
with open(filename, 'rb') as f:
instance = pickle.load(f)
self.__dict__.update(instance.__dict__)
logger.debug("Instance loaded from %(filename)r" % vars())
def make_paths_relative(self, prefix=os.path.curdir):
"""Hack to be able to copy directories around: prune basedir from paths.
.. Warning:: This is not guaranteed to work for all paths. In particular,
check :attr:`mdpow.equil.Simulation.dirs.includes` and adjust
manually if necessary.
"""
def assinglet(m):
if len(m) == 1:
return m[0]
elif len(m) == 0:
return None
return m
basedir = self.dirs.basedir
for key, fn in self.files.items():
try:
self.files[key] = fn.replace(basedir, prefix)
except AttributeError:
pass
for key, val in self.dirs.items():
fns = asiterable(val) # treat them all as lists
try:
self.dirs[key] = assinglet([fn.replace(basedir, prefix) for fn in fns])
except AttributeError:
pass
for key, fn in self.mdp.items():
try:
self.mdp[key] = fn.replace(basedir, prefix)
except AttributeError:
pass
logger.warning("make_paths_relative(): check/manually adjust %s.dirs.includes = %r !",
self.__class__.__name__, self.dirs.includes)
def topology(self, itp='drug.itp', prm=None, **kwargs):
"""Generate a topology for compound *molecule*.
:Keywords:
*itp*
Gromacs itp file; will be copied to topology dir and
included in topology
*prm*
Gromacs prm file; if given, will be copied to topology
dir and included in topology
*dirname*
name of the topology directory ["top"]
*kwargs*
see source for *top_template*, *topol*
"""
self.journal.start('topology')
dirname = kwargs.pop('dirname', self.BASEDIR('top'))
self.dirs.topology = realpath(dirname)
setting = forcefields.get_ff_paths(self.forcefield)
template = forcefields.get_top_template(self.solvent_type)
top_template = config.get_template(kwargs.pop('top_template', template))
topol = kwargs.pop('topol', os.path.basename(top_template))
self.top_template = top_template
itp = os.path.realpath(itp)
_itp = os.path.basename(itp)
if prm is None:
prm_kw = ''
else:
prm = os.path.realpath(prm)
_prm = os.path.basename(prm)
prm_kw = '#include "{}"'.format(_prm)
with in_dir(dirname):
shutil.copy(itp, _itp)
if prm is not None:
shutil.copy(prm, _prm)
gromacs.cbook.edit_txt(top_template,
[(r'#include +"oplsaa\.ff/forcefield\.itp"',
r'oplsaa\.ff/',
setting[0]),
(r'#include +"compound\.itp"',
r'compound\.itp',
_itp),
(r'#include +"oplsaa\.ff/tip4p\.itp"',
r'oplsaa\.ff/tip4p\.itp',
setting[0] + self.solvent.itp),
(r'#include +"oplsaa\.ff/ions_opls\.itp"',
r'oplsaa\.ff/ions_opls\.itp',
setting[1]),
(r'#include +"compound\.prm"',
r'#include +"compound\.prm"',
prm_kw),
(r'#include +"water\.itp"',
r'water\.itp',
setting[2]),
(r'Compound',
'solvent',
self.solvent_type),
(r'Compound',
'DRUG',
self.molecule),
(r'DRUG\s*1',
'DRUG',
self.molecule),
],
newname=topol)
logger.info('[%(dirname)s] Created topology %(topol)r that includes %(_itp)r', vars())
# update known files and dirs
self.files.topology = realpath(dirname, topol)
if not self.dirs.topology in self.dirs.includes:
self.dirs.includes.append(self.dirs.topology)
self.journal.completed('topology')
return {'dirname': dirname, 'topol': topol}
@staticmethod
def _setup_solvate(**kwargs):
"""Solvate structure in a single solvent box."""
return gromacs.setup.solvate(**kwargs)
def solvate(self, struct=None, **kwargs):
"""Solvate structure *struct* in a box of solvent.
The solvent is determined with the *solvent* keyword to the constructor.
:Keywords:
*struct*
pdb or gro coordinate file (if not supplied, the value is used
that was supplied to the constructor of :class:`~mdpow.equil.Simulation`)
*distance*
minimum distance between solute and the closes box face; the default depends
on the solvent but can be set explicitly here, too.
*bt*
any box type understood by :func:`gromacs.editconf` (``-bt``):
* "triclinic" is a triclinic box,
* "cubic" is a rectangular box with all sides equal;
* "dodecahedron" represents a rhombic dodecahedron;
* "octahedron" is a truncated octahedron.
The default is "dodecahedron".
*kwargs*
All other arguments are passed on to :func:`gromacs.setup.solvate`, but
set to sensible default values. *top* and *water* are always fixed.
"""
self.journal.start('solvate')
self.dirs.solvation = realpath(kwargs.setdefault('dirname', self.BASEDIR('solvation')))
kwargs['struct'] = self._checknotempty(struct or self.files.structure, 'struct')
kwargs['top'] = self._checknotempty(self.files.topology, 'top')
kwargs['water'] = self.solvent.box
kwargs.setdefault('mainselection', '"%s"' % self.molecule) # quotes are needed for make_ndx
kwargs.setdefault('distance', self.solvent.distance)
boxtype = kwargs.pop('bt', None)
boxtype = boxtype if boxtype is not None else "dodecahedron"
if boxtype not in ("dodecahedron", "triclinic", "cubic", "octahedron"):
msg = "Invalid boxtype '{0}', not suitable for 'gmx editconf'.".format(boxtype)
logger.error(msg)
raise ValueError(msg)
kwargs['bt'] = boxtype
kwargs['includes'] = asiterable(kwargs.pop('includes',[])) + self.dirs.includes
params = self._setup_solvate(**kwargs)
self.files.structure = kwargs['struct']
self.files.solvated = params['struct']
self.files.ndx = params['ndx']
# we can also make a processed topology right now
self.processed_topology(**kwargs)
self.journal.completed('solvate')
return params
def processed_topology(self, **kwargs):
"""Create a portable topology file from the topology and the solvated system."""
if self.files.solvated is None or not os.path.exists(self.files.solvated):
self.solvate(**kwargs)
kwargs['topol'] = self.files.topology
kwargs['struct'] = self.files.solvated
kwargs['includes'] = self.dirs.includes
self.files.processed_topology = gromacs.cbook.create_portable_topology(**kwargs)
return self.files.processed_topology
def energy_minimize(self, **kwargs):
"""Energy minimize the solvated structure on the local machine.
*kwargs* are passed to :func:`gromacs.setup.energ_minimize` but if
:meth:`~mdpow.equil.Simulation.solvate` step has been carried out
previously all the defaults should just work.
"""
self.journal.start('energy_minimize')
self.dirs.energy_minimization = realpath(kwargs.setdefault('dirname', self.BASEDIR('em')))
kwargs['top'] = self.files.topology
kwargs.setdefault('struct', self.files.solvated)
kwargs.setdefault('mdp', self.mdp['energy_minimize'])
kwargs['mainselection'] = None
kwargs['includes'] = asiterable(kwargs.pop('includes',[])) + self.dirs.includes
params = gromacs.setup.energy_minimize(**kwargs)
self.files.energy_minimized = params['struct']
self.journal.completed('energy_minimize')
return params
def _MD(self, protocol, **kwargs):
"""Basic MD driver for this Simulation. Do not call directly."""
self.journal.start(protocol)
kwargs.setdefault('dirname', self.BASEDIR(protocol))
kwargs.setdefault('deffnm', self.deffnm)
kwargs.setdefault('mdp', config.get_template('NPT_opls.mdp'))
self.dirs[protocol] = realpath(kwargs['dirname'])
setupMD = kwargs.pop('MDfunc', gromacs.setup.MD)
kwargs['top'] = self.files.topology
kwargs['includes'] = asiterable(kwargs.pop('includes',[])) + self.dirs.includes
kwargs['ndx'] = self.files.ndx
kwargs['mainselection'] = None # important for SD (use custom mdp and ndx!, gromacs.setup._MD)
self._checknotempty(kwargs['struct'], 'struct')
if not os.path.exists(kwargs['struct']):
# struct is not reliable as it depends on qscript so now we just try everything...
struct = gromacs.utilities.find_first(kwargs['struct'], suffices=['pdb', 'gro'])
if struct is None:
logger.error("Starting structure %(struct)r does not exist (yet)" % kwargs)
raise IOError(errno.ENOENT, "Starting structure not found", kwargs['struct'])
else:
logger.info("Found starting structure %r (instead of %r).", struct, kwargs['struct'])
kwargs['struct'] = struct
# now setup the whole simulation (this is typically gromacs.setup.MD() )
params = setupMD(**kwargs)
# params['struct'] is md.gro but could also be md.pdb --- depends entirely on qscript
self.files[protocol] = params['struct']
# Gromacs 4.5.x 'mdrun -c PDB' fails if it cannot find 'residuetypes.dat'
# so instead of fuffing with GMXLIB we just dump it into the directory
try:
shutil.copy(config.topfiles['residuetypes.dat'], self.dirs[protocol])
except IOError:
logger.warning("Failed to copy 'residuetypes.dat': mdrun will likely fail to write a final structure")
self.journal.completed(protocol)
return params
def MD_relaxed(self, **kwargs):
"""Short MD simulation with *timestep* = 0.1 fs to relax strain.
Energy minimization does not always remove all problems and LINCS
constraint errors occur. A very short *runtime* = 5 ps MD with very
short integration time step *dt* tends to solve these problems.
.. See Also:: :func:`gromacs.setup.MD`
:Keywords:
*struct*
starting coordinates (typically guessed)
*mdp*
MDP run parameter file for Gromacs
*qscript*
list of queuing system submission scripts; probably a
good idea to always include the default "local.sh" even
if you have your own ["local.sh"]
*qname*
name of the job as shown in the queuing system
*startdir*
**advanced uses**: path of the directory on a remote
system, which will be hard-coded into the queuing system
script(s); see :func:`gromacs.setup.MD` and
:class:`gromacs.manager.Manager`
"""
# user structure or restrained or solvated
kwargs.setdefault('struct', self.files.energy_minimized)
kwargs.setdefault('dt', 0.0001) # ps
kwargs.setdefault('runtime', 5) # ps
kwargs.setdefault('mdp', self.mdp['MD_relaxed'])
return self._MD('MD_relaxed', **kwargs)
def MD_restrained(self, **kwargs):
"""Short MD simulation with position restraints on compound.
See documentation of :func:`gromacs.setup.MD_restrained` for
details. The following keywords can not be changed: top, mdp, ndx,
mainselection
.. Note:: Position restraints are activated with ``-DPOSRES`` directives
for :func:`gromacs.grompp`. Hence this will only work if the
compound itp file does indeed contain a ``[ posres ]``
section that is protected by a ``#ifdef POSRES`` clause.
.. See Also:: :func:`gromacs.setup.MD_restrained`
:Keywords:
*struct*
starting coordinates (leave empty for inspired guess of file name)
*mdp*
MDP run parameter file for Gromacs
*qscript*
list of queuing system submission scripts; probably a
good idea to always include the default "local.sh" even
if you have your own ["local.sh"]
*qname*
name of the job as shown in the queuing system
*startdir*
**advanced uses**: path of the directory on a remote
system, which will be hard-coded into the queuing system
script(s); see :func:`gromacs.setup.MD` and
:class:`gromacs.manager.Manager`
"""
kwargs.setdefault('struct',
self._lastnotempty([self.files.energy_minimized, self.files.MD_relaxed]))
kwargs.setdefault('mdp', self.mdp['MD_restrained'])
kwargs['MDfunc'] = gromacs.setup.MD_restrained
return self._MD('MD_restrained', **kwargs)
def MD_NPT(self, **kwargs):
"""Short NPT MD simulation.
See documentation of :func:`gromacs.setup.MD` for details such
as *runtime* or specific queuing system options. The following
keywords can not be changed: *top*, *mdp*, *ndx*, *mainselection*.
.. Note:: If the system crashes (with LINCS errors), try initial
equilibration with timestep *dt* = 0.0001 ps (0.1 fs instead
of 2 fs) and *runtime* = 5 ps as done in :meth:`~Simulation.MD_relaxed`
.. See Also:: :func:`gromacs.setup.MD` and :meth:`Simulation.MD_relaxed`
:Keywords:
*struct*
starting conformation; by default, the *struct* is the last frame
from the position restraints run, or, if this file cannot be
found (e.g. because :meth:`Simulation.MD_restrained` was not run)
it falls back to the relaxed and then the solvated system.
*mdp*
MDP run parameter file for Gromacs
*runtime*
total run time in ps
*qscript*
list of queuing system scripts to prepare; available values are
in :data:`gromacs.config.templates` or you can provide your own
filename(s) in the current directory (see :mod:`gromacs.qsub` for
the format of the templates)
*qname*
name of the job as shown in the queuing system
*startdir*
**advanced uses**: path of the directory on a remote
system, which will be hard-coded into the queuing system
script(s); see :func:`gromacs.setup.MD` and
:class:`gromacs.manager.Manager`
"""
# user structure or relaxed or restrained or solvated
kwargs.setdefault('struct', self.get_last_structure())
kwargs.setdefault('t',self.get_last_checkpoint()) # Pass checkpoint file from md_relaxed
kwargs.setdefault('mdp', self.mdp['MD_NPT'])
return self._MD('MD_NPT', **kwargs)
# for convenience and compatibility
MD = MD_NPT
@staticmethod
def _checknotempty(value, name):
if value is None or value == "":
raise ValueError("Parameter %s cannot be empty." % name)
return value
@staticmethod
def _lastnotempty(l):
"""Return the last non-empty value in list *l* (or None :-p)"""
nonempty = [None] + [x for x in l if not (x is None or x == "" or x == [])]
return nonempty[-1]
def get_last_structure(self):
"""Returns the coordinates of the most advanced step in the protocol."""
return self._lastnotempty([self.files[name] for name in self.coordinate_structures])
def get_last_checkpoint(self):
"""Returns the checkpoint of the most advanced step in the protocol.
Relies on md.gro being present from previous simulation, assumes that checkpoint is then present.
"""
return self._lastnotempty([self.files[name] for name in self.checkpoints]).replace('.gro','.cpt')
class WaterSimulation(Simulation):
"""Equilibrium MD of a solute in a box of water."""
solvent_default = 'water'
dirname_default = os.path.join(Simulation.topdir_default, solvent_default)
class CyclohexaneSimulation(Simulation):
"""Equilibrium MD of a solute in a box of cyclohexane."""
solvent_default = 'cyclohexane'
dirname_default = os.path.join(Simulation.topdir_default, solvent_default)
class OctanolSimulation(Simulation):
"""Equilibrium MD of a solute in a box of octanol."""
solvent_default = 'octanol'
dirname_default = os.path.join(Simulation.topdir_default, solvent_default)
class WetOctanolSimulation(Simulation):
"""Equilibrium MD of a solute in a box of wet octanol."""
solvent_default = 'wetoctanol'
dirname_default = os.path.join(Simulation.topdir_default, solvent_default)
def _setup_solvate(self, **kwargs):
sol = gromacs.setup.solvate_sol(**kwargs)
with in_dir(self.dirs.solvation, create=False):
u = mda.Universe('solvated.gro')
octanol = u.select_atoms('resname OcOH')
n = octanol.n_residues
with in_dir(self.dirs.topology, create=False):
gromacs.cbook.edit_txt(self.files.topology,
[('OcOH 1', '1', n)])
ionkwargs = kwargs
ionkwargs['struct'] = sol['struct']
params = gromacs.setup.solvate_ion(**ionkwargs)
return params | PypiClean |
/Hatta-1.6.7.tar.gz/Hatta-1.6.7/hatta/page.py |
import difflib
import mimetypes
import os
import re
import werkzeug
import werkzeug.contrib.atom
pygments = None
try:
import pygments
import pygments.formatters
import pygments.lexers
import pygments.styles
import pygments.util
except ImportError:
pass
captcha = None
try:
from recaptcha.client import captcha
except ImportError:
pass
Image = None
try:
import Image
except ImportError:
pass
import hatta.error
import hatta.parser
def check_lock(wiki, title):
_ = wiki.gettext
restricted_pages = [
'scripts.js',
'robots.txt',
]
if wiki.read_only:
raise hatta.error.ForbiddenErr(_(u"This site is read-only."))
if title in restricted_pages:
raise hatta.error.ForbiddenErr(_(u"""Can't edit this page.
It can only be edited by the site admin directly on the disk."""))
if title in wiki.index.page_links(wiki.locked_page):
raise hatta.error.ForbiddenErr(_(u"This page is locked."))
def get_page(request, title, wiki=None):
"""Creates a page object based on page's mime type"""
if wiki is None:
wiki = request.wiki
if title:
try:
page_class, mime = wiki.filename_map[title]
except KeyError:
mime = page_mime(title)
major, minor = mime.split('/', 1)
try:
page_class = wiki.mime_map[mime]
except KeyError:
try:
plus_pos = minor.find('+')
if plus_pos > 0:
minor_base = minor[plus_pos:]
else:
minor_base = ''
base_mime = '/'.join([major, minor_base])
page_class = wiki.mime_map[base_mime]
except KeyError:
try:
page_class = wiki.mime_map[major]
except KeyError:
page_class = wiki.mime_map['']
else:
page_class = WikiPageSpecial
mime = ''
return page_class(wiki, request, title, mime)
def page_mime(title):
"""
Guess page's mime type based on corresponding file name.
Default ot text/x-wiki for files without an extension.
>>> page_mime(u'something.txt')
'text/plain'
>>> page_mime(u'SomePage')
'text/x-wiki'
>>> page_mime(u'ąęśUnicodePage')
'text/x-wiki'
>>> page_mime(u'image.png')
'image/png'
>>> page_mime(u'style.css')
'text/css'
>>> page_mime(u'archive.tar.gz')
'archive/gzip'
"""
addr = title.encode('utf-8') # the encoding doesn't relly matter here
mime, encoding = mimetypes.guess_type(addr, strict=False)
if encoding:
mime = 'archive/%s' % encoding
if mime is None:
mime = 'text/x-wiki'
return mime
def date_html(date_time):
"""
Create HTML for a date, according to recommendation at
http://microformats.org/wiki/date
"""
return date_time.strftime(
'<abbr class="date" title="%Y-%m-%dT%H:%M:%SZ">%Y-%m-%d %H:%M</abbr>')
class WikiPage(object):
"""Everything needed for rendering a page."""
def __init__(self, wiki, request, title, mime):
self.request = request
self.title = title
self.mime = mime
# for now we just use the globals from wiki object
if request:
self.get_url = request.get_url
self.get_download_url = request.get_download_url
self.wiki = wiki
self.storage = self.wiki.storage
self.index = self.wiki.index
self.config = self.wiki.config
if self.wiki.alias_page and self.wiki.alias_page in self.storage:
self.aliases = dict(
self.index.page_links_and_labels(self.wiki.alias_page))
else:
self.aliases = {}
def link_alias(self, addr):
"""Find a target address for an alias."""
try:
alias, target = addr.split(':', 1)
except ValueError:
return self.wiki.alias_page
try:
pattern = self.aliases[alias]
except KeyError:
return self.wiki.alias_page
try:
link = pattern % target
except TypeError:
link = pattern + target
return link
def wiki_link(self, addr, label=None, class_=None, image=None, lineno=0):
"""Create HTML for a wiki link."""
addr = addr.strip()
text = werkzeug.escape(label or addr)
chunk = ''
if class_ is not None:
classes = [class_]
else:
classes = []
if hatta.parser.external_link(addr):
classes.append('external')
if addr.startswith('mailto:'):
# Obfuscate e-mails a little bit.
classes.append('mail')
text = text.replace('@', '@').replace('.', '.')
href = werkzeug.escape(addr,
quote=True).replace('@', '%40').replace('.', '%2E')
else:
href = werkzeug.escape(werkzeug.url_fix(addr), quote=True)
else:
if '#' in addr:
addr, chunk = addr.split('#', 1)
chunk = '#' + werkzeug.url_fix(chunk)
if addr.startswith(':'):
alias = self.link_alias(addr[1:])
href = werkzeug.escape(werkzeug.url_fix(alias) + chunk, True)
classes.append('external')
classes.append('alias')
elif addr.startswith('+'):
href = '/'.join([self.request.script_root,
'+' + werkzeug.escape(addr[1:], quote=True)])
classes.append('special')
elif addr == u'':
href = werkzeug.escape(chunk, True)
classes.append('anchor')
else:
classes.append('wiki')
href = werkzeug.escape(self.get_url(addr) + chunk, True)
if addr not in self.storage:
classes.append('nonexistent')
class_ = werkzeug.escape(' '.join(classes) or '', True)
# We need to output HTML on our own to prevent escaping of href
return '<a href="%s" class="%s" title="%s">%s</a>' % (
href, class_, werkzeug.escape(addr + chunk, True),
image or text)
def wiki_image(self, addr, alt, class_='wiki', lineno=0):
"""Create HTML for a wiki image."""
addr = addr.strip()
html = werkzeug.html
chunk = ''
if hatta.parser.external_link(addr):
return html.img(src=werkzeug.url_fix(addr), class_="external",
alt=alt)
if '#' in addr:
addr, chunk = addr.split('#', 1)
if addr == '':
return html.a(name=chunk)
elif addr.startswith(':'):
if chunk:
chunk = '#' + chunk
alias = self.link_alias(addr[1:])
href = werkzeug.url_fix(alias + chunk)
return html.img(src=href, class_="external alias", alt=alt)
elif addr in self.storage:
mime = page_mime(addr)
if mime.startswith('image/'):
return html.img(src=self.get_download_url(addr), class_=class_,
alt=alt)
else:
return html.img(href=self.get_download_url(addr), alt=alt)
else:
return html.a(html(alt), href=self.get_url(addr))
def menu(self):
"""Generate the menu items"""
_ = self.wiki.gettext
if self.wiki.menu_page in self.storage:
items = self.index.page_links_and_labels(self.wiki.menu_page)
else:
items = [
(self.wiki.front_page, self.wiki.front_page),
('+history', _(u'Recent changes')),
]
for link, label in items:
if link == self.title:
class_ = "current"
else:
class_ = None
yield self.wiki_link(link, label, class_=class_)
def template(self, template_name, **kwargs):
template = self.wiki.template_env.get_template(template_name)
edit_url = None
if self.title:
try:
check_lock(self.wiki, self.title)
edit_url = self.get_url(self.title, 'edit')
except hatta.error.ForbiddenErr:
pass
context = {
'request': self.request,
'wiki': self.wiki,
'title': self.title,
'mime': self.mime,
'url': self.get_url,
'download_url': self.get_download_url,
'config': self.config,
'page': self,
'edit_url': edit_url,
}
context.update(kwargs)
stream = template.stream(**context)
stream.enable_buffering(10)
return stream
def dependencies(self):
"""Refresh the page when any of those pages was changed."""
dependencies = set()
for title in [self.wiki.logo_page, self.wiki.menu_page]:
if title not in self.storage:
dependencies.add(werkzeug.url_quote(title))
for title in [self.wiki.menu_page]:
if title in self.storage:
rev, date, author, comment = self.storage.page_meta(title)
etag = '%s/%d-%s' % (werkzeug.url_quote(title), rev, date.isoformat())
dependencies.add(etag)
return dependencies
def get_edit_help(self):
page = get_page(self.request, self.wiki.help_page)
try:
return ''.join(page.view_content())
except hatta.error.NotFoundErr:
return ''
def render_editor(self, preview=None, captcha_error=None):
_ = self.wiki.gettext
author = self.request.get_author()
if self.title in self.storage:
comment = _(u'changed')
(rev, old_date, old_author,
old_comment) = self.storage.page_meta(self.title)
if old_author == author:
comment = old_comment
else:
comment = _(u'uploaded')
rev = -1
if captcha and self.wiki.recaptcha_public_key:
recaptcha_html = captcha.displayhtml(
self.wiki.recaptcha_public_key, error=captcha_error)
else:
recaptcha_html = None
context = {
'comment': comment,
'author': author,
'parent': rev,
'recaptcha_html': recaptcha_html,
'help': self.get_edit_help(),
}
return self.template('edit_file.html', **context)
class WikiPageSpecial(WikiPage):
"""Special pages, like recent changes, index, etc."""
class WikiPageText(WikiPage):
"""Pages of mime type text/* use this for display."""
def content_iter(self, lines):
yield '<pre>'
for line in lines:
yield werkzeug.html(line)
yield '</pre>'
def plain_text(self):
"""
Get the content of the page with all markup removed, used for
indexing.
"""
return self.storage.page_text(self.title)
def view_content(self, lines=None):
"""
Read the page content from storage or preview and return iterator.
"""
if lines is None:
lines = self.storage.page_text(self.title).splitlines(True)
return self.content_iter(lines)
def render_editor(self, preview=None, captcha_error=None):
"""Generate the HTML for the editor."""
_ = self.wiki.gettext
author = self.request.get_author()
lines = []
try:
lines = self.storage.page_text(self.title).splitlines(True)
(rev, old_date, old_author,
old_comment) = self.storage.page_meta(self.title)
comment = _(u'modified')
if old_author == author:
comment = old_comment
except hatta.error.NotFoundErr:
comment = _(u'created')
rev = -1
except hatta.error.ForbiddenErr, e:
return werkzeug.html.p(werkzeug.html(unicode(e)))
if preview:
lines = preview
comment = self.request.form.get('comment', comment)
if captcha and self.wiki.recaptcha_public_key:
recaptcha_html = captcha.displayhtml(
self.wiki.recaptcha_public_key, error=captcha_error)
else:
recaptcha_html = None
context = {
'comment': comment,
'preview': preview,
'recaptcha_html': recaptcha_html,
'help': self.get_edit_help(),
'author': author,
'parent': rev,
'lines': lines,
}
return self.template('edit_text.html', **context)
def diff_content(self, from_text, to_text, message=u''):
"""Generate the HTML markup for a diff."""
def infiniter(iterator):
"""Turn an iterator into an infinite one, padding it with None"""
for i in iterator:
yield i
while True:
yield None
diff = difflib._mdiff(from_text.split('\n'), to_text.split('\n'))
mark_re = re.compile('\0[-+^]([^\1\0]*)\1|([^\0\1])')
yield message
yield u'<pre class="diff">'
for old_line, new_line, changed in diff:
old_no, old_text = old_line
new_no, new_text = new_line
line_no = (new_no or old_no or 1) - 1
if changed:
yield u'<div class="change" id="line_%d">' % line_no
old_iter = infiniter(mark_re.finditer(old_text))
new_iter = infiniter(mark_re.finditer(new_text))
old = old_iter.next()
new = new_iter.next()
buff = u''
while old or new:
while old and old.group(1):
if buff:
yield werkzeug.escape(buff)
buff = u''
yield u'<del>%s</del>' % werkzeug.escape(old.group(1))
old = old_iter.next()
while new and new.group(1):
if buff:
yield werkzeug.escape(buff)
buff = u''
yield u'<ins>%s</ins>' % werkzeug.escape(new.group(1))
new = new_iter.next()
if new:
buff += new.group(2)
old = old_iter.next()
new = new_iter.next()
if buff:
yield werkzeug.escape(buff)
yield u'</div>'
else:
yield u'<div class="orig" id="line_%d">%s</div>' % (
line_no, werkzeug.escape(old_text))
yield u'</pre>'
class WikiPageColorText(WikiPageText):
"""Text pages, but displayed colorized with pygments"""
def view_content(self, lines=None):
"""Generate HTML for the content."""
if lines is None:
text = self.storage.page_text(self.title)
else:
text = ''.join(lines)
return self.highlight(text, mime=self.mime)
def highlight(self, text, mime=None, syntax=None, line_no=0):
"""Colorize the source code."""
if pygments is None:
yield werkzeug.html.pre(werkzeug.html(text))
return
formatter = pygments.formatters.HtmlFormatter()
formatter.line_no = line_no
def wrapper(source, unused_outfile):
"""Wrap each line of formatted output."""
yield 0, '<div class="highlight"><pre>'
for lineno, line in source:
yield (lineno,
werkzeug.html.span(line, id_="line_%d" %
formatter.line_no))
formatter.line_no += 1
yield 0, '</pre></div>'
formatter.wrap = wrapper
try:
if mime:
lexer = pygments.lexers.get_lexer_for_mimetype(mime)
elif syntax:
lexer = pygments.lexers.get_lexer_by_name(syntax)
else:
lexer = pygments.lexers.guess_lexer(text)
except pygments.util.ClassNotFoundErr:
yield werkzeug.html.pre(werkzeug.html(text))
return
html = pygments.highlight(text, lexer, formatter)
yield html
class WikiPageWiki(WikiPageColorText):
"""Pages of with wiki markup use this for display."""
def __init__(self, *args, **kw):
super(WikiPageWiki, self).__init__(*args, **kw)
if self.config.get_bool('wiki_words', False):
self.parser = hatta.parser.WikiWikiParser
else:
self.parser = hatta.parser.WikiParser
if self.config.get_bool('ignore_indent', False):
try:
del self.parser.block['indent']
except KeyError:
pass
def extract_links(self, text=None):
"""Extract all links from the page."""
if text is None:
try:
text = self.storage.page_text(self.title)
except hatta.error.NotFoundErr:
text = u''
return self.parser.extract_links(text)
def view_content(self, lines=None):
if lines is None:
lines = self.storage.page_text(self.title).splitlines(True)
if self.wiki.icon_page and self.wiki.icon_page in self.storage:
icons = self.index.page_links_and_labels(self.wiki.icon_page)
smilies = dict((emo, link) for (link, emo) in icons)
else:
smilies = None
content = self.parser(lines, self.wiki_link, self.wiki_image,
self.highlight, self.wiki_math, smilies)
return content
def wiki_math(self, math_text, display=False):
math_url = self.wiki.math_url
if math_url == '':
return werkzeug.escape(math_text)
elif math_url == 'mathjax':
if display:
return werkzeug.escape(u"$$\n%s\n$$" % math_text)
else:
return werkzeug.escape(u"$%s$" % math_text)
if '%s' in math_url:
url = math_url % werkzeug.url_quote(math_text)
else:
url = '%s%s' % (math_url, werkzeug.url_quote(math_text))
label = werkzeug.escape(math_text, quote=True)
return werkzeug.html.img(src=url, alt=label, class_="math")
def dependencies(self):
dependencies = WikiPage.dependencies(self)
for title in [self.wiki.icon_page, self.wiki.alias_page]:
if title in self.storage:
rev, date, author, comment = self.storage.page_meta(title)
etag = '%s/%d-%s' % (werkzeug.url_quote(title), rev, date.isoformat())
dependencies.add(etag)
for link in self.index.page_links(self.title):
if link not in self.storage:
dependencies.add(werkzeug.url_quote(link))
return dependencies
class WikiPageFile(WikiPage):
"""Pages of all other mime types use this for display."""
def view_content(self, lines=None):
if self.title not in self.storage:
raise hatta.error.NotFoundErr()
content = ['<p>Download <a href="%s">%s</a> as <i>%s</i>.</p>' %
(self.request.get_download_url(self.title),
werkzeug.escape(self.title), self.mime)]
return content
class WikiPageImage(WikiPageFile):
"""Pages of mime type image/* use this for display."""
render_file = '128x128.png'
def view_content(self, lines=None):
if self.title not in self.storage:
raise hatta.error.NotFoundErr()
content = ['<a href="%s"><img src="%s" alt="%s"></a>'
% (self.request.get_url(self.title, 'download'),
self.request.get_url(self.title, 'render'),
werkzeug.escape(self.title))]
return content
def render_mime(self):
"""Give the filename and mime type of the rendered thumbnail."""
if not Image:
raise NotImplementedError('No Image library available')
return self.render_file, 'image/png'
def render_cache(self, cache_dir):
"""Render the thumbnail and save in the cache."""
if not Image:
raise NotImplementedError('No Image library available')
page_file = self.storage.open_page(self.title)
cache_path = os.path.join(cache_dir, self.render_file)
cache_file = open(cache_path, 'wb')
try:
im = Image.open(page_file)
im = im.convert('RGBA')
im.thumbnail((128, 128), Image.ANTIALIAS)
im.save(cache_file, 'PNG')
except IOError:
raise hatta.error.UnsupportedMediaTypeErr('Image corrupted')
finally:
cache_file.close()
return cache_path
class WikiPageCSV(WikiPageFile):
"""Display class for type text/csv."""
def content_iter(self, lines=None):
import csv
_ = self.wiki.gettext
# XXX Add preview support
csv_file = self.storage.open_page(self.title)
reader = csv.reader(csv_file)
html_title = werkzeug.escape(self.title, quote=True)
yield u'<table id="%s" class="csvfile">' % html_title
try:
for row in reader:
yield u'<tr>%s</tr>' % (u''.join(u'<td>%s</td>' % cell
for cell in row))
except csv.Error, e:
yield u'</table>'
yield werkzeug.html.p(werkzeug.html(
_(u'Error parsing CSV file %{file}s on '
u'line %{line}d: %{error}s') %
{'file': html_title, 'line': reader.line_num, 'error': e}))
finally:
csv_file.close()
yield u'</table>'
def view_content(self, lines=None):
if self.title not in self.storage:
raise hatta.error.NotFoundErr()
return self.content_iter(lines)
class WikiPageRST(WikiPageText):
"""
Display ReStructured Text.
"""
def content_iter(self, lines):
try:
from docutils.core import publish_parts
except ImportError:
return super(WikiPageRST, self).content_iter(lines)
text = ''.join(lines)
SAFE_DOCUTILS = dict(file_insertion_enabled=False, raw_enabled=False)
content = publish_parts(text, writer_name='html',
settings_overrides=SAFE_DOCUTILS)['html_body']
return [content]
class WikiPageBugs(WikiPageText):
"""
Display class for type text/x-bugs
Parse the ISSUES file in (roughly) format used by ciss
"""
def content_iter(self, lines):
last_lines = []
in_header = False
in_bug = False
attributes = {}
title = None
for line_no, line in enumerate(lines):
if last_lines and line.startswith('----'):
title = ''.join(last_lines)
last_lines = []
in_header = True
attributes = {}
elif in_header and ':' in line:
attribute, value = line.split(':', 1)
attributes[attribute.strip()] = value.strip()
else:
if in_header:
if in_bug:
yield '</div>'
#tags = [tag.strip() for tag in
# attributes.get('tags', '').split()
# if tag.strip()]
yield '<div id="line_%d">' % (line_no)
in_bug = True
if title:
yield werkzeug.html.h2(werkzeug.html(title))
if attributes:
yield '<dl>'
for attribute, value in attributes.iteritems():
yield werkzeug.html.dt(werkzeug.html(attribute))
yield werkzeug.html.dd(werkzeug.html(value))
yield '</dl>'
in_header = False
if not line.strip():
if last_lines:
if last_lines[0][0] in ' \t':
yield werkzeug.html.pre(werkzeug.html(
''.join(last_lines)))
else:
yield werkzeug.html.p(werkzeug.html(
''.join(last_lines)))
last_lines = []
else:
last_lines.append(line)
if last_lines:
if last_lines[0][0] in ' \t':
yield werkzeug.html.pre(werkzeug.html(
''.join(last_lines)))
else:
yield werkzeug.html.p(werkzeug.html(
''.join(last_lines)))
if in_bug:
yield '</div>'
filename_map = {
'README': (WikiPageText, 'text/plain'),
'ISSUES': (WikiPageBugs, 'text/x-bugs'),
'ISSUES.txt': (WikiPageBugs, 'text/x-bugs'),
'COPYING': (WikiPageText, 'text/plain'),
'CHANGES': (WikiPageText, 'text/plain'),
'MANIFEST': (WikiPageText, 'text/plain'),
'favicon.ico': (WikiPageImage, 'image/x-icon'),
}
mime_map = {
'text': WikiPageColorText,
'application/x-javascript': WikiPageColorText,
'application/x-python': WikiPageColorText,
'text/csv': WikiPageCSV,
'text/x-rst': WikiPageRST,
'text/x-wiki': WikiPageWiki,
'image': WikiPageImage,
'': WikiPageFile,
}
mimetypes.add_type('application/x-python', '.wsgi')
mimetypes.add_type('application/x-javascript', '.js')
mimetypes.add_type('text/x-rst', '.rst') | PypiClean |
/myllm-1.2.37.tar.gz/myllm-1.2.37/myllm/main.py | import asyncio
import importlib
from typing import Any, List, Mapping, Optional
import g4f
from loguru import logger
from myllm import __version__
from myllm.config import settings
class MyLLM:
"""
MyLLM class use to initiate a LLM client
with a given model and a given provider
Attributes:
logger (Logger): Logger
model (str): Model
enabled (bool): Enabled
commands (str): Commands
Methods:
get_myllm_info(self)
get_myllm_help(self)
talk(self, prompt = settings.llm_default_prompt)
"""
def __init__(self):
"""
Initialize the MyLLM object
Args:
None
"""
self.logger = logger
self.enabled = settings.llm_enabled
if not self.enabled:
return
self.model = settings.llm_model
self.provider = importlib.import_module(settings.llm_provider)
self.commands = settings.llm_commands
self.llm_continous = settings.llm_continous
self.chat_history = ""
# self.llm = LangLLM()
self.chain = None
async def get_myllm_info(self):
"""
Retrieves information about MyLLM including
its version and the model being used.
:return: A string containing the MyLLM version and the model.
"""
return f"ℹ️ MyLLM v{__version__}\n {self.model}\n"
async def get_myllm_help(self):
"""
Get the help message for MyLLM.
Returns:
str: The help message for the `myllm` command.
"""
return f"{self.commands}\n"
async def talk(self, prompt=settings.llm_default_prompt):
"""
Asynchronously initiates a chat with the given prompt.
Args:
prompt (str, optional): The prompt to start the chat with.
Defaults to settings.llm_default_prompt.
Returns:
g4f.ChatCompletion: An instance of the g4f.ChatCompletion class
representing the chat completion.
"""
self.logger.info(f"Starting chat with prompt: {prompt}")
return g4f.ChatCompletion.create(
model=self.model,
provider=self.provider,
messages=[{"role": "user", "content": prompt}],
)
async def chat(self, prompt, id=None):
"""
Asynchronously initiates a chat with the given prompt
and keep the history of the chat.
Args:
prompt (str, optional): The prompt to start the chat with.
Returns:
g4f.ChatCompletion: An instance of the g4f.ChatCompletion class
"""
if self.chat_history:
prompt = (
f"{prompt}, To answer, use the following context: {self.chat_history}"
)
self.chat_history = prompt
return await self.talk(prompt)
async def continous_mode(self, prompt):
""" """
if self.llm_continous:
self.chat_history = settings.llm_continous_context
return await self.chat(prompt)
async def clear_chat_history(self):
""" """
self.chat_history = ""
async def switch_continous_mode(self):
""" """
self.llm_continous = not self.llm_continous
return f"Continous mode {'enabled' if self.llm_continous else 'disabled'}." | PypiClean |
/IsPycharmRun-1.0.tar.gz/IsPycharmRun-1.0/pylib/utils/report_results.py | import logging
import os
import re
from pylib import constants
import flakiness_dashboard_results_uploader
def _LogToFile(results, test_type, suite_name):
"""Log results to local files which can be used for aggregation later."""
log_file_path = os.path.join(constants.GetOutDirectory(), 'test_logs')
if not os.path.exists(log_file_path):
os.mkdir(log_file_path)
full_file_name = os.path.join(
log_file_path, re.sub('\W', '_', test_type).lower() + '.log')
if not os.path.exists(full_file_name):
with open(full_file_name, 'w') as log_file:
print >> log_file, '\n%s results for %s build %s:' % (
test_type, os.environ.get('BUILDBOT_BUILDERNAME'),
os.environ.get('BUILDBOT_BUILDNUMBER'))
logging.info('Writing results to %s.' % full_file_name)
logging.info('Writing results to %s.' % full_file_name)
with open(full_file_name, 'a') as log_file:
shortened_suite_name = suite_name[:25] + (suite_name[25:] and '...')
print >> log_file, '%s%s' % (shortened_suite_name.ljust(30),
results.GetShortForm())
def _LogToFlakinessDashboard(results, test_type, test_package,
flakiness_server):
"""Upload results to the flakiness dashboard"""
logging.info('Upload results for test type "%s", test package "%s" to %s' %
(test_type, test_package, flakiness_server))
# TODO(frankf): Enable uploading for gtests.
if test_type != 'Instrumentation':
logging.warning('Invalid test type.')
return
try:
if flakiness_server == constants.UPSTREAM_FLAKINESS_SERVER:
assert test_package in ['ContentShellTest',
'ChromiumTestShellTest',
'AndroidWebViewTest']
dashboard_test_type = ('%s_instrumentation_tests' %
test_package.lower().rstrip('test'))
# Downstream server.
else:
dashboard_test_type = 'Chromium_Android_Instrumentation'
flakiness_dashboard_results_uploader.Upload(
results, flakiness_server, dashboard_test_type)
except Exception as e:
logging.error(e)
def LogFull(results, test_type, test_package, annotation=None,
flakiness_server=None):
"""Log the tests results for the test suite.
The results will be logged three different ways:
1. Log to stdout.
2. Log to local files for aggregating multiple test steps
(on buildbots only).
3. Log to flakiness dashboard (on buildbots only).
Args:
results: An instance of TestRunResults object.
test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.).
test_package: Test package name (e.g. 'ipc_tests' for gtests,
'ContentShellTest' for instrumentation tests)
annotation: If instrumenation test type, this is a list of annotations
(e.g. ['Smoke', 'SmallTest']).
flakiness_server: If provider, upload the results to flakiness dashboard
with this URL.
"""
if not results.DidRunPass():
logging.critical('*' * 80)
logging.critical('Detailed Logs')
logging.critical('*' * 80)
for line in results.GetLogs().splitlines():
logging.critical(line)
logging.critical('*' * 80)
logging.critical('Summary')
logging.critical('*' * 80)
for line in results.GetLongForm().splitlines():
logging.critical(line)
logging.critical('*' * 80)
if os.environ.get('BUILDBOT_BUILDERNAME'):
# It is possible to have multiple buildbot steps for the same
# instrumenation test package using different annotations.
if annotation and len(annotation) == 1:
suite_name = annotation[0]
else:
suite_name = test_package
_LogToFile(results, test_type, suite_name)
if flakiness_server:
_LogToFlakinessDashboard(results, test_type, test_package,
flakiness_server) | PypiClean |
/Flask-Upwork-1.0-pre1.tar.gz/Flask-Upwork-1.0-pre1/README | ============
Flask-Upwork
============
Requirements
============
* `flask`
* `python-upwork`
* `python-oauth2`
Authorization
==============
Quick start
-----------
Before you may use Upwork APIs, you will need to obtain your pair of API keys.
Visit the `Upwork API Center documentation <http://developers.upwork.com/Authentication#authentication>`_
for full details. Please note, that Flask-Upwork uses authorization via OAuth and it needs keys with auth type "OAuth".
Please make sure, that `SECRET_KEY` which is necessary for sessions, based on the secure cookies, is indicated in `settings.py`::
SECRET_KEY = '(your random secret key)'
You need to store your pair of Upwork API keys in `settings.py`::
ODESK_KEY = '(your Upwork public key)'
ODESK_SECRET = '(your Upwork secret key)'
You can also set the list of teams in `settings.py`, which will be able to authorize.
If you do not specify this option or leave the list empty, then all Upwork users will be able to authorize::
ODESK_AUTH_TEAMS = ('teamname',)
Please make sure that you have registered Upwork module in your `app.py` correctly.
Please keep in mind that `url_prefix` can be whatever you like::
from flask import Flask
from flaskext.odesk import odesk
app = Flask(__name__)
app.config.from_pyfile('settings.py')
app.register_module(odesk, url_prefix='/odesk')
Using authorization
-------------------
Please use the decorator `login_required` to close the access for anonymous users to the certain parts of your website::
@app.route('/only/for/odesk/users')
@odesk.login_required
def admin():
return "Welcome, Upwork user!"
If you want to indicate login or logout links in the template, than you can use `url_for` function and `odesk_is_authorized` variable::
{% if odesk_is_authorized %}
<a href="{{ url_for('odesk.logout') }}">Log out</a>
{% else %}
<a href="{{ url_for('odesk.login') }}">Upwork log in</a>
{% endif %}
To check the authorization of the current user you can use `is_authorized` method::
@app.route('/test')
def test():
if odesk.is_authorized():
return "You are authorized."
else:
return "You are not authorized yet."
If you need, you can start the authorization process manually from your code::
if not odesk.is_authorized():
return odesk.login()
You can also use `next` parameter to indicate URL, where will be redirect after the authorization process ends::
if not odesk.is_authorized():
return odesk.login(next='/blah/blah')
You can use `logout` method for user's logging out.
Please pay attention, that unlike `login` this method do not return the bulk of redirects.
It simply deletes the OAuth session. You should return response manually::
if odesk.is_authorized():
odesk.logout()
return redirect('/')
If you want to expand autorization process, you can use `after_login` decorator,
that indicates your function, which will be called after successfully authorization::
@odesk.after_login
def save_session():
# Getting current user's data. Please, see below how to use the Client.
session['user'] = odesk.get_client().hr.get_user('me')
If you have used `after_login` and saved something to the session, please,
do not forget to delete this session after logging out, using decorator `after_logout`::
@odesk.after_logout
def delete_session():
if 'user' in session:
del session['user']
Using client
============
You can use `get_access_token` method to get the current access token and access token secret,
that can be stored in DB and used for access to the client later, if necessary::
if odesk.is_authorized():
access_token, access_token_secret = odesk.get_access_token()
You can use `get_client` method to get the client::
if odesk.is_authorized():
c = odesk.get_client()
c.team.get_teamrooms()
Or you can use the client even if the current user is not authorized,
but you have the access token and access token secret::
if not odesk.is_authorized():
c = odesk.get_client(access_token, access_token_secret)
c.team.get_teamrooms() | PypiClean |
/Fregger-0.10.7.tar.gz/Fregger-0.10.7/fregger/static/lib/swagger-oauth.js | function handleLogin(){var e=[],o=window.swaggerUiAuth.authSchemes||window.swaggerUiAuth.securityDefinitions;if(o){var i,n=o;for(i in n){var a=n[i];if("oauth2"===a.type&&a.scopes){var t;if(Array.isArray(a.scopes)){var p;for(p=0;p<a.scopes.length;p++)e.push(a.scopes[p])}else for(t in a.scopes)e.push({scope:t,description:a.scopes[t],OAuthSchemeKey:i})}}}for(window.swaggerUi.api&&window.swaggerUi.api.info&&(appName=window.swaggerUi.api.info.title),$(".api-popup-dialog").remove(),popupDialog=$(['<div class="api-popup-dialog">','<div class="api-popup-title">Select OAuth2.0 Scopes</div>','<div class="api-popup-content">',"<p>Scopes are used to grant an application different levels of access to data on behalf of the end user. Each API may declare one or more scopes.",'<a href="#">Learn how to use</a>',"</p>","<p><strong>"+appName+"</strong> API requires the following scopes. Select which ones you want to grant to Swagger UI.</p>",'<ul class="api-popup-scopes">',"</ul>",'<p class="error-msg"></p>','<div class="api-popup-actions"><button class="api-popup-authbtn api-button green" type="button">Authorize</button><button class="api-popup-cancel api-button gray" type="button">Cancel</button></div>',"</div>","</div>"].join("")),$(document.body).append(popupDialog),popup=popupDialog.find("ul.api-popup-scopes").empty(),p=0;p<e.length;p++)t=e[p],str='<li><input type="checkbox" id="scope_'+p+'" scope="'+t.scope+'"" oauthtype="'+t.OAuthSchemeKey+'"/><label for="scope_'+p+'">'+t.scope,t.description&&($.map(o,function(e,o){return o}).length>1?str+='<br/><span class="api-scope-desc">'+t.description+" ("+t.OAuthSchemeKey+")</span>":str+='<br/><span class="api-scope-desc">'+t.description+"</span>"),str+="</label></li>",popup.append(str);var r=$(window),s=r.width(),c=r.height(),l=r.scrollTop(),d=popupDialog.outerWidth(),u=popupDialog.outerHeight(),h=(c-u)/2+l,g=(s-d)/2;popupDialog.css({top:(h<0?0:h)+"px",left:(g<0?0:g)+"px"}),popupDialog.find("button.api-popup-cancel").click(function(){popupMask.hide(),popupDialog.hide(),popupDialog.empty(),popupDialog=[]}),$("button.api-popup-authbtn").unbind(),popupDialog.find("button.api-popup-authbtn").click(function(){function e(e){return e.vendorExtensions["x-tokenName"]||e.tokenName}popupMask.hide(),popupDialog.hide();var o,i=window.swaggerUi.api.authSchemes,n=window.location,a=location.pathname.substring(0,location.pathname.lastIndexOf("/")),t=n.protocol+"//"+n.host+a+"/o2c.html",p=window.oAuthRedirectUrl||t,r=null,s=[],c=popup.find("input:checked"),l=[];for(k=0;k<c.length;k++){var d=$(c[k]).attr("scope");s.indexOf(d)===-1&&s.push(d);var u=$(c[k]).attr("oauthtype");l.indexOf(u)===-1&&l.push(u)}window.enabledScopes=s;for(var h in i)if(i.hasOwnProperty(h)&&l.indexOf(h)!=-1){var g=i[h].flow;if("oauth2"!==i[h].type||!g||"implicit"!==g&&"accessCode"!==g){if("oauth2"===i[h].type&&g&&"application"===g){var w=i[h];return window.swaggerUi.tokenName=e(w)||"access_token",void clientCredentialsFlow(s,w.tokenUrl,h)}if(i[h].grantTypes){var c=i[h].grantTypes;for(var f in c)if(c.hasOwnProperty(f)&&"implicit"===f){var w=c[f];w.loginEndpoint.url;r=w.loginEndpoint.url+"?response_type=token",window.swaggerUi.tokenName=e(w)}else if(c.hasOwnProperty(f)&&"accessCode"===f){var w=c[f];w.tokenRequestEndpoint.url;r=w.tokenRequestEndpoint.url+"?response_type=code",window.swaggerUi.tokenName=e(w)}}}else{var w=i[h];r=w.authorizationUrl+"?response_type="+("implicit"===g?"token":"code"),window.swaggerUi.tokenName=e(w)||"access_token",window.swaggerUi.tokenUrl="accessCode"===g?w.tokenUrl:null,o=h}}redirect_uri=p,r+="&redirect_uri="+encodeURIComponent(p),r+="&realm="+encodeURIComponent(realm),r+="&client_id="+encodeURIComponent(clientId),r+="&scope="+encodeURIComponent(s.join(scopeSeparator)),r+="&state="+encodeURIComponent(o);for(var h in additionalQueryStringParams)r+="&"+h+"="+encodeURIComponent(additionalQueryStringParams[h]);window.open(r)}),popupMask.show(),popupDialog.show()}function handleLogout(){for(key in window.swaggerUi.api.clientAuthorizations.authz)window.swaggerUi.api.clientAuthorizations.remove(key);window.enabledScopes=null,$(".api-ic.ic-on").addClass("ic-off"),$(".api-ic.ic-on").removeClass("ic-on"),$(".api-ic.ic-warning").addClass("ic-error"),$(".api-ic.ic-warning").removeClass("ic-warning")}function initOAuth(e){var o=e||{},i=[];return appName=o.appName||i.push("missing appName"),popupMask=o.popupMask||$("#api-common-mask"),popupDialog=o.popupDialog||$(".api-popup-dialog"),clientId=o.clientId||i.push("missing client id"),clientSecret=o.clientSecret||null,realm=o.realm||i.push("missing realm"),scopeSeparator=o.scopeSeparator||" ",additionalQueryStringParams=o.additionalQueryStringParams||{},i.length>0?void log("auth unable initialize oauth: "+i):($("pre code").each(function(e,o){hljs.highlightBlock(o)}),$(".api-ic").unbind(),void $(".api-ic").click(function(e){$(e.target).hasClass("ic-off")?handleLogin():handleLogout()}))}function clientCredentialsFlow(e,o,i){var n={client_id:clientId,client_secret:clientSecret,scope:e.join(" "),grant_type:"client_credentials"};$.ajax({url:o,type:"POST",data:n,success:function(e,o,n){onOAuthComplete(e,i)},error:function(e,o,i){onOAuthComplete("")}})}var appName,popupMask,popupDialog,clientId,realm,redirect_uri,clientSecret,scopeSeparator,additionalQueryStringParams;window.processOAuthCode=function(e){var o=e.state,i=window.location,n=location.pathname.substring(0,location.pathname.lastIndexOf("/")),a=i.protocol+"//"+i.host+n+"/o2c.html",t=window.oAuthRedirectUrl||a,p={client_id:clientId,code:e.code,grant_type:"authorization_code",redirect_uri:t};clientSecret&&(p.client_secret=clientSecret),$.ajax({url:window.swaggerUiAuth.tokenUrl,type:"POST",data:p,success:function(e,i,n){onOAuthComplete(e,o)},error:function(e,o,i){onOAuthComplete("")}})},window.onOAuthComplete=function(e,o){if(e)if(e.error){var i=$("input[type=checkbox],.secured");i.each(function(e){i[e].checked=!1}),alert(e.error)}else{var n=e[window.swaggerUiAuth.tokenName];if(o||(o=e.state),n){var a=null;$.each($(".auth .api-ic .api_information_panel"),function(e,o){var i=o;if(i&&i.childNodes){var n=[];$.each(i.childNodes,function(e,o){var i=o.innerHTML;i&&n.push(i)});for(var t=[],p=0;p<n.length;p++){var r=n[p];window.enabledScopes&&window.enabledScopes.indexOf(r)==-1&&t.push(r)}t.length>0?(a=o.parentNode.parentNode,$(a.parentNode).find(".api-ic.ic-on").addClass("ic-off"),$(a.parentNode).find(".api-ic.ic-on").removeClass("ic-on"),$(a).find(".api-ic").addClass("ic-warning"),$(a).find(".api-ic").removeClass("ic-error")):(a=o.parentNode.parentNode,$(a.parentNode).find(".api-ic.ic-off").addClass("ic-on"),$(a.parentNode).find(".api-ic.ic-off").removeClass("ic-off"),$(a).find(".api-ic").addClass("ic-info"),$(a).find(".api-ic").removeClass("ic-warning"),$(a).find(".api-ic").removeClass("ic-error"))}}),"undefined"!=typeof window.swaggerUi&&(window.swaggerUi.api.clientAuthorizations.add(window.swaggerUiAuth.OAuthSchemeKey,new SwaggerClient.ApiKeyAuthorization("Authorization","Bearer "+n,"header")),window.swaggerUi.load())}}}; | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/dizoo/classic_control/cartpole/config/__init__.py | from .cartpole_a2c_config import cartpole_a2c_config, cartpole_a2c_create_config
from .cartpole_acer_config import cartpole_acer_config, cartpole_acer_create_config
from .cartpole_c51_config import cartpole_c51_config, cartpole_c51_create_config
from .cartpole_dqfd_config import cartpole_dqfd_config, cartpole_dqfd_create_config
from .cartpole_dqn_config import cartpole_dqn_config, cartpole_dqn_create_config
from .cartpole_dqn_gail_config import cartpole_dqn_gail_config, cartpole_dqn_gail_create_config
from .cartpole_gcl_config import cartpole_gcl_ppo_onpolicy_config, cartpole_gcl_ppo_onpolicy_create_config
from .cartpole_impala_config import cartpole_impala_config, cartpole_impala_create_config
from .cartpole_iqn_config import cartpole_iqn_config, cartpole_iqn_create_config
from .cartpole_ppo_offpolicy_config import cartpole_ppo_offpolicy_config, cartpole_ppo_offpolicy_create_config
from .cartpole_ppg_config import cartpole_ppg_config, cartpole_ppg_create_config
from .cartpole_ppo_config import cartpole_ppo_config, cartpole_ppo_create_config
from .cartpole_qrdqn_config import cartpole_qrdqn_config, cartpole_qrdqn_create_config
from .cartpole_r2d2_config import cartpole_r2d2_config, cartpole_r2d2_create_config
from .cartpole_rainbow_config import cartpole_rainbow_config, cartpole_rainbow_create_config
from .cartpole_sqil_config import cartpole_sqil_config, cartpole_sqil_create_config
from .cartpole_sql_config import cartpole_sql_config, cartpole_sql_create_config
from .cartpole_sqn_config import cartpole_sqn_config, cartpole_sqn_create_config
from .cartpole_trex_dqn_config import cartpole_trex_dqn_config, cartpole_trex_dqn_create_config
from .cartpole_trex_offppo_config import cartpole_trex_offppo_config, cartpole_trex_offppo_create_config
from .cartpole_trex_onppo_config import cartpole_trex_ppo_onpolicy_config, cartpole_trex_ppo_onpolicy_create_config
from .cartpole_mdqn_config import cartpole_mdqn_config, cartpole_mdqn_create_config
# from .cartpole_ppo_default_loader import cartpole_ppo_default_loader | PypiClean |
/GeoNode-3.2.0-py3-none-any.whl/geonode/people/admin.py |
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import AdminPasswordChangeForm
from django.utils.translation import ugettext_lazy as _
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
from django.db import transaction
from django.views.decorators.debug import sensitive_post_parameters
from django.contrib.admin.options import IS_POPUP_VAR
from django.shortcuts import get_object_or_404
from django.utils.html import escape
from django.template.response import TemplateResponse
from django.http import HttpResponseRedirect, Http404
from django.core.exceptions import PermissionDenied
from django.forms import modelform_factory
from geonode.base.admin import set_user_and_group_layer_permission
from .models import Profile
from .forms import ProfileCreationForm, ProfileChangeForm
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
class ProfileAdmin(admin.ModelAdmin):
modelform_factory(get_user_model(), fields='__all__')
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
(_('Extended profile'), {'fields': ('organization', 'profile',
'position', 'voice', 'fax',
'delivery', 'city', 'area',
'zipcode', 'country',
'keywords')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2')}
),
)
form = ProfileChangeForm
add_form = ProfileCreationForm
change_password_form = AdminPasswordChangeForm
list_display = (
'id', 'username', 'organization',
'email', 'first_name', 'last_name',
'is_staff', 'is_active')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups')
search_fields = (
'username', 'organization', 'profile',
'first_name', 'last_name', 'email')
# readonly_fields = ("groups", )
ordering = ('username',)
filter_horizontal = ('groups', 'user_permissions',)
actions = [set_user_and_group_layer_permission]
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(ProfileAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults.update({
'form': self.add_form,
'fields': admin.utils.flatten_fieldsets(self.add_fieldsets),
})
defaults.update(kwargs)
return super(ProfileAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
return [ # '',
url(r'^(\d+)/password/$',
self.admin_site.admin_view(self.user_change_password))
] + super(ProfileAdmin, self).get_urls()
def lookup_allowed(self, lookup, value):
# See #20078: we don't want to allow any lookups involving passwords.
if lookup.startswith('password'):
return False
return super(ProfileAdmin, self).lookup_allowed(lookup, value)
@sensitive_post_parameters_m
@csrf_protect_m
@transaction.atomic
def add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
'order to add users, Django requires that your user '
'account have both the "Add user" and "Change user" '
'permissions set.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.model._meta.get_field(self.model.USERNAME_FIELD)
defaults = {
'auto_populated_fields': (),
'username_help_text': username_field.help_text,
}
extra_context.update(defaults)
return super(ProfileAdmin, self).add_view(request, form_url,
extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=''):
if not self.has_change_permission(request):
raise PermissionDenied
user = get_object_or_404(self.get_queryset(request), pk=id)
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
change_message = self.construct_change_message(
request,
form,
None)
self.log_change(request, user, change_message)
msg = _('Password changed successfully.')
messages.success(request, msg)
return HttpResponseRedirect('..')
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': list(form.base_fields)})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {
'title': _('Change password: %s') % escape(user.get_username()),
'adminForm': adminForm,
'form_url': form_url,
'form': form,
'is_popup': IS_POPUP_VAR in request.GET,
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
}
return TemplateResponse(request,
self.change_user_password_template or
'admin/auth/user/change_password.html',
context) # , using=self.admin_site.name)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST:
mutable = request.POST._mutable
request.POST._mutable = True
request.POST['_continue'] = 1
request.POST._mutable = mutable
return super(ProfileAdmin, self).response_add(request, obj,
post_url_continue)
admin.site.register(Profile, ProfileAdmin) | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/summernote/lang/summernote-hr-HR.js | (function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory();
else if(typeof define === 'function' && define.amd)
define([], factory);
else {
var a = factory();
for(var i in a) (typeof exports === 'object' ? exports : root)[i] = a[i];
}
})(self, function() {
return /******/ (() => { // webpackBootstrap
var __webpack_exports__ = {};
(function ($) {
$.extend($.summernote.lang, {
'hr-HR': {
font: {
bold: 'Podebljano',
italic: 'Kurziv',
underline: 'Podvučeno',
clear: 'Ukloni stilove fonta',
height: 'Visina linije',
name: 'Font Family',
strikethrough: 'Precrtano',
subscript: 'Subscript',
superscript: 'Superscript',
size: 'Veličina fonta'
},
image: {
image: 'Slika',
insert: 'Ubaci sliku',
resizeFull: 'Puna veličina',
resizeHalf: 'Umanji na 50%',
resizeQuarter: 'Umanji na 25%',
floatLeft: 'Poravnaj lijevo',
floatRight: 'Poravnaj desno',
floatNone: 'Bez poravnanja',
shapeRounded: 'Shape: Rounded',
shapeCircle: 'Shape: Circle',
shapeThumbnail: 'Shape: Thumbnail',
shapeNone: 'Shape: None',
dragImageHere: 'Povuci sliku ovdje',
dropImage: 'Drop image or Text',
selectFromFiles: 'Izaberi iz datoteke',
maximumFileSize: 'Maximum file size',
maximumFileSizeError: 'Maximum file size exceeded.',
url: 'Adresa slike',
remove: 'Ukloni sliku',
original: 'Original'
},
video: {
video: 'Video',
videoLink: 'Veza na video',
insert: 'Ubaci video',
url: 'URL video',
providers: '(YouTube, Vimeo, Vine, Instagram, DailyMotion ili Youku)'
},
link: {
link: 'Veza',
insert: 'Ubaci vezu',
unlink: 'Ukloni vezu',
edit: 'Uredi',
textToDisplay: 'Tekst za prikaz',
url: 'Internet adresa',
openInNewWindow: 'Otvori u novom prozoru'
},
table: {
table: 'Tablica',
addRowAbove: 'Add row above',
addRowBelow: 'Add row below',
addColLeft: 'Add column left',
addColRight: 'Add column right',
delRow: 'Delete row',
delCol: 'Delete column',
delTable: 'Delete table'
},
hr: {
insert: 'Ubaci horizontalnu liniju'
},
style: {
style: 'Stil',
p: 'pni',
blockquote: 'Citat',
pre: 'Kôd',
h1: 'Naslov 1',
h2: 'Naslov 2',
h3: 'Naslov 3',
h4: 'Naslov 4',
h5: 'Naslov 5',
h6: 'Naslov 6'
},
lists: {
unordered: 'Obična lista',
ordered: 'Numerirana lista'
},
options: {
help: 'Pomoć',
fullscreen: 'Preko cijelog ekrana',
codeview: 'Izvorni kôd'
},
paragraph: {
paragraph: 'Paragraf',
outdent: 'Smanji uvlačenje',
indent: 'Povećaj uvlačenje',
left: 'Poravnaj lijevo',
center: 'Centrirano',
right: 'Poravnaj desno',
justify: 'Poravnaj obostrano'
},
color: {
recent: 'Posljednja boja',
more: 'Više boja',
background: 'Boja pozadine',
foreground: 'Boja teksta',
transparent: 'Prozirna',
setTransparent: 'Prozirna',
reset: 'Poništi',
resetToDefault: 'Podrazumijevana'
},
shortcut: {
shortcuts: 'Prečice s tipkovnice',
close: 'Zatvori',
textFormatting: 'Formatiranje teksta',
action: 'Akcija',
paragraphFormatting: 'Formatiranje paragrafa',
documentStyle: 'Stil dokumenta',
extraKeys: 'Dodatne kombinacije'
},
help: {
'insertParagraph': 'Insert Paragraph',
'undo': 'Undoes the last command',
'redo': 'Redoes the last command',
'tab': 'Tab',
'untab': 'Untab',
'bold': 'Set a bold style',
'italic': 'Set a italic style',
'underline': 'Set a underline style',
'strikethrough': 'Set a strikethrough style',
'removeFormat': 'Clean a style',
'justifyLeft': 'Set left align',
'justifyCenter': 'Set center align',
'justifyRight': 'Set right align',
'justifyFull': 'Set full align',
'insertUnorderedList': 'Toggle unordered list',
'insertOrderedList': 'Toggle ordered list',
'outdent': 'Outdent on current paragraph',
'indent': 'Indent on current paragraph',
'formatPara': 'Change current block\'s format as a paragraph(P tag)',
'formatH1': 'Change current block\'s format as H1',
'formatH2': 'Change current block\'s format as H2',
'formatH3': 'Change current block\'s format as H3',
'formatH4': 'Change current block\'s format as H4',
'formatH5': 'Change current block\'s format as H5',
'formatH6': 'Change current block\'s format as H6',
'insertHorizontalRule': 'Insert horizontal rule',
'linkDialog.show': 'Show Link Dialog'
},
history: {
undo: 'Poništi',
redo: 'Ponovi'
},
specialChar: {
specialChar: 'SPECIAL CHARACTERS',
select: 'Select Special characters'
}
}
});
})(jQuery);
/******/ return __webpack_exports__;
/******/ })()
;
});
//# sourceMappingURL=summernote-hr-HR.js.map | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/semver/classes/range.js | class Range {
constructor (range, options) {
options = parseOptions(options)
if (range instanceof Range) {
if (
range.loose === !!options.loose &&
range.includePrerelease === !!options.includePrerelease
) {
return range
} else {
return new Range(range.raw, options)
}
}
if (range instanceof Comparator) {
// just put it in the set and return
this.raw = range.value
this.set = [[range]]
this.format()
return this
}
this.options = options
this.loose = !!options.loose
this.includePrerelease = !!options.includePrerelease
// First reduce all whitespace as much as possible so we do not have to rely
// on potentially slow regexes like \s*. This is then stored and used for
// future error messages as well.
this.raw = range
.trim()
.split(/\s+/)
.join(' ')
// First, split on ||
this.set = this.raw
.split('||')
// map the range to a 2d array of comparators
.map(r => this.parseRange(r.trim()))
// throw out any comparator lists that are empty
// this generally means that it was not a valid range, which is allowed
// in loose mode, but will still throw if the WHOLE range is invalid.
.filter(c => c.length)
if (!this.set.length) {
throw new TypeError(`Invalid SemVer Range: ${this.raw}`)
}
// if we have any that are not the null set, throw out null sets.
if (this.set.length > 1) {
// keep the first one, in case they're all null sets
const first = this.set[0]
this.set = this.set.filter(c => !isNullSet(c[0]))
if (this.set.length === 0) {
this.set = [first]
} else if (this.set.length > 1) {
// if we have any that are *, then the range is just *
for (const c of this.set) {
if (c.length === 1 && isAny(c[0])) {
this.set = [c]
break
}
}
}
}
this.format()
}
format () {
this.range = this.set
.map((comps) => comps.join(' ').trim())
.join('||')
.trim()
return this.range
}
toString () {
return this.range
}
parseRange (range) {
// memoize range parsing for performance.
// this is a very hot path, and fully deterministic.
const memoOpts =
(this.options.includePrerelease && FLAG_INCLUDE_PRERELEASE) |
(this.options.loose && FLAG_LOOSE)
const memoKey = memoOpts + ':' + range
const cached = cache.get(memoKey)
if (cached) {
return cached
}
const loose = this.options.loose
// `1.2.3 - 1.2.4` => `>=1.2.3 <=1.2.4`
const hr = loose ? re[t.HYPHENRANGELOOSE] : re[t.HYPHENRANGE]
range = range.replace(hr, hyphenReplace(this.options.includePrerelease))
debug('hyphen replace', range)
// `> 1.2.3 < 1.2.5` => `>1.2.3 <1.2.5`
range = range.replace(re[t.COMPARATORTRIM], comparatorTrimReplace)
debug('comparator trim', range)
// `~ 1.2.3` => `~1.2.3`
range = range.replace(re[t.TILDETRIM], tildeTrimReplace)
debug('tilde trim', range)
// `^ 1.2.3` => `^1.2.3`
range = range.replace(re[t.CARETTRIM], caretTrimReplace)
debug('caret trim', range)
// At this point, the range is completely trimmed and
// ready to be split into comparators.
let rangeList = range
.split(' ')
.map(comp => parseComparator(comp, this.options))
.join(' ')
.split(/\s+/)
// >=0.0.0 is equivalent to *
.map(comp => replaceGTE0(comp, this.options))
if (loose) {
// in loose mode, throw out any that are not valid comparators
rangeList = rangeList.filter(comp => {
debug('loose invalid filter', comp, this.options)
return !!comp.match(re[t.COMPARATORLOOSE])
})
}
debug('range list', rangeList)
// if any comparators are the null set, then replace with JUST null set
// if more than one comparator, remove any * comparators
// also, don't include the same comparator more than once
const rangeMap = new Map()
const comparators = rangeList.map(comp => new Comparator(comp, this.options))
for (const comp of comparators) {
if (isNullSet(comp)) {
return [comp]
}
rangeMap.set(comp.value, comp)
}
if (rangeMap.size > 1 && rangeMap.has('')) {
rangeMap.delete('')
}
const result = [...rangeMap.values()]
cache.set(memoKey, result)
return result
}
intersects (range, options) {
if (!(range instanceof Range)) {
throw new TypeError('a Range is required')
}
return this.set.some((thisComparators) => {
return (
isSatisfiable(thisComparators, options) &&
range.set.some((rangeComparators) => {
return (
isSatisfiable(rangeComparators, options) &&
thisComparators.every((thisComparator) => {
return rangeComparators.every((rangeComparator) => {
return thisComparator.intersects(rangeComparator, options)
})
})
)
})
)
})
}
// if ANY of the sets match ALL of its comparators, then pass
test (version) {
if (!version) {
return false
}
if (typeof version === 'string') {
try {
version = new SemVer(version, this.options)
} catch (er) {
return false
}
}
for (let i = 0; i < this.set.length; i++) {
if (testSet(this.set[i], version, this.options)) {
return true
}
}
return false
}
}
module.exports = Range
const LRU = require('lru-cache')
const cache = new LRU({ max: 1000 })
const parseOptions = require('../internal/parse-options')
const Comparator = require('./comparator')
const debug = require('../internal/debug')
const SemVer = require('./semver')
const {
safeRe: re,
t,
comparatorTrimReplace,
tildeTrimReplace,
caretTrimReplace,
} = require('../internal/re')
const { FLAG_INCLUDE_PRERELEASE, FLAG_LOOSE } = require('../internal/constants')
const isNullSet = c => c.value === '<0.0.0-0'
const isAny = c => c.value === ''
// take a set of comparators and determine whether there
// exists a version which can satisfy it
const isSatisfiable = (comparators, options) => {
let result = true
const remainingComparators = comparators.slice()
let testComparator = remainingComparators.pop()
while (result && remainingComparators.length) {
result = remainingComparators.every((otherComparator) => {
return testComparator.intersects(otherComparator, options)
})
testComparator = remainingComparators.pop()
}
return result
}
// comprised of xranges, tildes, stars, and gtlt's at this point.
// already replaced the hyphen ranges
// turn into a set of JUST comparators.
const parseComparator = (comp, options) => {
debug('comp', comp, options)
comp = replaceCarets(comp, options)
debug('caret', comp)
comp = replaceTildes(comp, options)
debug('tildes', comp)
comp = replaceXRanges(comp, options)
debug('xrange', comp)
comp = replaceStars(comp, options)
debug('stars', comp)
return comp
}
const isX = id => !id || id.toLowerCase() === 'x' || id === '*'
// ~, ~> --> * (any, kinda silly)
// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0 <3.0.0-0
// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0 <2.1.0-0
// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0 <1.3.0-0
// ~1.2.3, ~>1.2.3 --> >=1.2.3 <1.3.0-0
// ~1.2.0, ~>1.2.0 --> >=1.2.0 <1.3.0-0
// ~0.0.1 --> >=0.0.1 <0.1.0-0
const replaceTildes = (comp, options) => {
return comp
.trim()
.split(/\s+/)
.map((c) => replaceTilde(c, options))
.join(' ')
}
const replaceTilde = (comp, options) => {
const r = options.loose ? re[t.TILDELOOSE] : re[t.TILDE]
return comp.replace(r, (_, M, m, p, pr) => {
debug('tilde', comp, _, M, m, p, pr)
let ret
if (isX(M)) {
ret = ''
} else if (isX(m)) {
ret = `>=${M}.0.0 <${+M + 1}.0.0-0`
} else if (isX(p)) {
// ~1.2 == >=1.2.0 <1.3.0-0
ret = `>=${M}.${m}.0 <${M}.${+m + 1}.0-0`
} else if (pr) {
debug('replaceTilde pr', pr)
ret = `>=${M}.${m}.${p}-${pr
} <${M}.${+m + 1}.0-0`
} else {
// ~1.2.3 == >=1.2.3 <1.3.0-0
ret = `>=${M}.${m}.${p
} <${M}.${+m + 1}.0-0`
}
debug('tilde return', ret)
return ret
})
}
// ^ --> * (any, kinda silly)
// ^2, ^2.x, ^2.x.x --> >=2.0.0 <3.0.0-0
// ^2.0, ^2.0.x --> >=2.0.0 <3.0.0-0
// ^1.2, ^1.2.x --> >=1.2.0 <2.0.0-0
// ^1.2.3 --> >=1.2.3 <2.0.0-0
// ^1.2.0 --> >=1.2.0 <2.0.0-0
// ^0.0.1 --> >=0.0.1 <0.0.2-0
// ^0.1.0 --> >=0.1.0 <0.2.0-0
const replaceCarets = (comp, options) => {
return comp
.trim()
.split(/\s+/)
.map((c) => replaceCaret(c, options))
.join(' ')
}
const replaceCaret = (comp, options) => {
debug('caret', comp, options)
const r = options.loose ? re[t.CARETLOOSE] : re[t.CARET]
const z = options.includePrerelease ? '-0' : ''
return comp.replace(r, (_, M, m, p, pr) => {
debug('caret', comp, _, M, m, p, pr)
let ret
if (isX(M)) {
ret = ''
} else if (isX(m)) {
ret = `>=${M}.0.0${z} <${+M + 1}.0.0-0`
} else if (isX(p)) {
if (M === '0') {
ret = `>=${M}.${m}.0${z} <${M}.${+m + 1}.0-0`
} else {
ret = `>=${M}.${m}.0${z} <${+M + 1}.0.0-0`
}
} else if (pr) {
debug('replaceCaret pr', pr)
if (M === '0') {
if (m === '0') {
ret = `>=${M}.${m}.${p}-${pr
} <${M}.${m}.${+p + 1}-0`
} else {
ret = `>=${M}.${m}.${p}-${pr
} <${M}.${+m + 1}.0-0`
}
} else {
ret = `>=${M}.${m}.${p}-${pr
} <${+M + 1}.0.0-0`
}
} else {
debug('no pr')
if (M === '0') {
if (m === '0') {
ret = `>=${M}.${m}.${p
}${z} <${M}.${m}.${+p + 1}-0`
} else {
ret = `>=${M}.${m}.${p
}${z} <${M}.${+m + 1}.0-0`
}
} else {
ret = `>=${M}.${m}.${p
} <${+M + 1}.0.0-0`
}
}
debug('caret return', ret)
return ret
})
}
const replaceXRanges = (comp, options) => {
debug('replaceXRanges', comp, options)
return comp
.split(/\s+/)
.map((c) => replaceXRange(c, options))
.join(' ')
}
const replaceXRange = (comp, options) => {
comp = comp.trim()
const r = options.loose ? re[t.XRANGELOOSE] : re[t.XRANGE]
return comp.replace(r, (ret, gtlt, M, m, p, pr) => {
debug('xRange', comp, ret, gtlt, M, m, p, pr)
const xM = isX(M)
const xm = xM || isX(m)
const xp = xm || isX(p)
const anyX = xp
if (gtlt === '=' && anyX) {
gtlt = ''
}
// if we're including prereleases in the match, then we need
// to fix this to -0, the lowest possible prerelease value
pr = options.includePrerelease ? '-0' : ''
if (xM) {
if (gtlt === '>' || gtlt === '<') {
// nothing is allowed
ret = '<0.0.0-0'
} else {
// nothing is forbidden
ret = '*'
}
} else if (gtlt && anyX) {
// we know patch is an x, because we have any x at all.
// replace X with 0
if (xm) {
m = 0
}
p = 0
if (gtlt === '>') {
// >1 => >=2.0.0
// >1.2 => >=1.3.0
gtlt = '>='
if (xm) {
M = +M + 1
m = 0
p = 0
} else {
m = +m + 1
p = 0
}
} else if (gtlt === '<=') {
// <=0.7.x is actually <0.8.0, since any 0.7.x should
// pass. Similarly, <=7.x is actually <8.0.0, etc.
gtlt = '<'
if (xm) {
M = +M + 1
} else {
m = +m + 1
}
}
if (gtlt === '<') {
pr = '-0'
}
ret = `${gtlt + M}.${m}.${p}${pr}`
} else if (xm) {
ret = `>=${M}.0.0${pr} <${+M + 1}.0.0-0`
} else if (xp) {
ret = `>=${M}.${m}.0${pr
} <${M}.${+m + 1}.0-0`
}
debug('xRange return', ret)
return ret
})
}
// Because * is AND-ed with everything else in the comparator,
// and '' means "any version", just remove the *s entirely.
const replaceStars = (comp, options) => {
debug('replaceStars', comp, options)
// Looseness is ignored here. star is always as loose as it gets!
return comp
.trim()
.replace(re[t.STAR], '')
}
const replaceGTE0 = (comp, options) => {
debug('replaceGTE0', comp, options)
return comp
.trim()
.replace(re[options.includePrerelease ? t.GTE0PRE : t.GTE0], '')
}
// This function is passed to string.replace(re[t.HYPHENRANGE])
// M, m, patch, prerelease, build
// 1.2 - 3.4.5 => >=1.2.0 <=3.4.5
// 1.2.3 - 3.4 => >=1.2.0 <3.5.0-0 Any 3.4.x will do
// 1.2 - 3.4 => >=1.2.0 <3.5.0-0
const hyphenReplace = incPr => ($0,
from, fM, fm, fp, fpr, fb,
to, tM, tm, tp, tpr, tb) => {
if (isX(fM)) {
from = ''
} else if (isX(fm)) {
from = `>=${fM}.0.0${incPr ? '-0' : ''}`
} else if (isX(fp)) {
from = `>=${fM}.${fm}.0${incPr ? '-0' : ''}`
} else if (fpr) {
from = `>=${from}`
} else {
from = `>=${from}${incPr ? '-0' : ''}`
}
if (isX(tM)) {
to = ''
} else if (isX(tm)) {
to = `<${+tM + 1}.0.0-0`
} else if (isX(tp)) {
to = `<${tM}.${+tm + 1}.0-0`
} else if (tpr) {
to = `<=${tM}.${tm}.${tp}-${tpr}`
} else if (incPr) {
to = `<${tM}.${tm}.${+tp + 1}-0`
} else {
to = `<=${to}`
}
return `${from} ${to}`.trim()
}
const testSet = (set, version, options) => {
for (let i = 0; i < set.length; i++) {
if (!set[i].test(version)) {
return false
}
}
if (version.prerelease.length && !options.includePrerelease) {
// Find the set of versions that are allowed to have prereleases
// For example, ^1.2.3-pr.1 desugars to >=1.2.3-pr.1 <2.0.0
// That should allow `1.2.3-pr.2` to pass.
// However, `1.2.4-alpha.notready` should NOT be allowed,
// even though it's within the range set by the comparators.
for (let i = 0; i < set.length; i++) {
debug(set[i].semver)
if (set[i].semver === Comparator.ANY) {
continue
}
if (set[i].semver.prerelease.length > 0) {
const allowed = set[i].semver
if (allowed.major === version.major &&
allowed.minor === version.minor &&
allowed.patch === version.patch) {
return true
}
}
}
// Version has a -pre, but it's not one of the ones we like.
return false
}
return true
} | PypiClean |
/Blackboard_LMS_CLI-1.0.9-py3-none-any.whl/bbcli/services/courses_services.py | import json
from typing import Dict, List
import requests
from datetime import date
from bbcli.utils.URL_builder import URL_builder
url_builder = URL_builder()
# Commented out code depends whether we want all courses or just the ones from most recent semesters
# def list_courses(session: requests.Session, user_name: str) -> Any:
# terms = get_terms(session)
# sort_terms(terms)
# term_1 = terms[len(terms) - 1]
# term_2 = terms[len(terms) - 2]
# course_memberships = get_course_memberships(session, user_name)
# courses = get_courses_from_course_memberships(session, course_memberships)
# course_list = []
# for course in courses:
# if course['termId'] == term_1['id'] or course['termId'] == term_2['id']:
# course_list.append(course)
# else:
# break
# return course_list
def list_all_courses(session: requests.Session, user_name: str) -> List:
course_memberships = get_course_memberships(session, user_name)
course_list = get_courses_from_course_memberships(session, course_memberships)
return course_list
def list_course(session: requests.Session, course_id: str) -> Dict:
url = url_builder.base_v3().add_courses().add_id(course_id).create()
response = session.get(url)
response.raise_for_status()
return json.loads(response.text)
"""
HELPER FUNCTIONS
"""
# def take_start_date(elem):
# return date.fromisoformat(elem['availability']['duration']['start'].split('T')[0])
# def get_terms(session: requests.Session):
# url = url_builder.base_v1().add_terms().create()
# terms = session.get(url)
# terms.raise_for_status()
# terms = json.loads(terms.text)['results']
# return terms
# def sort_terms(terms):
# # Sort terms by start date to get the two most recent semesters to determine which courses to show
# for term in terms:
# if term['availability']['duration']['type'] != 'DateRange':
# terms.remove(term)
# terms.sort(key=take_start_date)
def get_course_memberships(session: requests.Session, user_name: str) -> List:
url = url_builder.base_v1().add_users().add_id(
id=user_name, id_type='userName').add_courses().create()
course_memberships = session.get(url)
course_memberships.raise_for_status()
course_memberships = json.loads(course_memberships.text)['results']
return course_memberships
def get_courses_from_course_memberships(session: requests.Session, course_memberships: List) -> List:
courses = []
for course in course_memberships:
url = url_builder.base_v3().add_courses().add_id(
course['courseId']).create()
response = session.get(url)
response.raise_for_status()
response = json.loads(response.text)
if response['availability']['available'] == 'Yes':
courses.append(response)
return courses | PypiClean |
/KqlmagicCustom-0.1.114.post13-py3-none-any.whl/Kqlmagic/palette.py |
import colorsys
from itertools import cycle
import re
from typing import List
from .dependencies import Dependencies
from .my_utils import is_collection
class Color(object):
def __init__(self, rgb_color, name=None, **kwargs):
self.color = rgb_color
self.name = name or rgb_color
def _repr_html_(self):
return self._to_html()
def _to_html(self):
c = f'<div style="background-color:{self.color};height:20px;width:20px;display:inline-block;"></div>'
return f'<div style="display:inline-block;padding:10px;"><div>{self.name}</div>{c}</div>'
def __repr__(self):
return self.color
class Palette(list):
def __init__(self, palette_name=None, n_colors=None, desaturation=None, rgb_palette=None, range_start=None, to_reverse=False, **kwargs):
self.name = palette_name or Palettes.get_default_pallete_name()
self.n_colors = (n_colors or Palettes.DEFAULT_N_COLORS) if rgb_palette is None else len(rgb_palette)
self.desaturation = desaturation or Palettes.DEFAULT_DESATURATION
self.kwargs = kwargs
self.range_start = range_start
parsed = self.parse(self.name)
self.name = parsed.get("name") or self.name
if rgb_palette is None:
rgb_palette = parsed.get("rgb_palette")
if rgb_palette is None:
rgb_float_pallete = self._get_color_palette(name=parsed.get("base_name"), n_colors=self.n_colors, desaturation=self.desaturation)
rgb_palette = ["rgb" + str((int(rgb[0] * 255), int(rgb[1] * 255), int(rgb[2] * 255))) for rgb in rgb_float_pallete]
if parsed.get("slice"):
rgb_palette = rgb_palette.__getitem__(parsed.get("slice"))
if parsed.get("reversed") is not None and parsed.get("reversed"):
rgb_palette = list(reversed(rgb_palette))
if to_reverse:
rgb_palette = list(reversed(rgb_palette))
super(Palette, self).__init__()
self.extend(rgb_palette)
def _to_html(self, add_details_to_name=True):
name = self.name
if self.range_start is not None:
name = f"{name}[{self.range_start}:{self.range_start + len(self)}]"
if add_details_to_name:
desaturation_details = ""
if self.desaturation is not None and self.desaturation > 0 and self.desaturation < 1.0:
desaturation_details = f", desaturation {self.desaturation}"
name = f"{name} ({self.n_colors} colors{desaturation_details})"
s_s = ""
for color in self:
s_s = f'{s_s}<div style="background-color:{color};height:20px;width:20px;display:inline-block;"></div>'
return f'<div style="display:inline-block;padding:10px;"><div>{name}</div>{s_s}</div>'
def __getitem__(self, key):
item = super(Palette, self).__getitem__(key)
if isinstance(key, slice):
range_start = min((key.start or 0), len(self)) + (self.range_start or 0)
return Palette(palette_name=self.name, desaturation=self.desaturation, rgb_palette=item, range_start=range_start, **self.kwargs)
else:
return Color(item, name=f"{self.name}[{(self.range_start or 0) + key}]")
def _repr_html_(self):
return self._to_html()
@classmethod
def parse(cls, name):
name = name.strip()
reverse = name.endswith("_r")
base_name = name[:-2] if reverse else name
rgb_palette = None
range = None
if base_name.endswith("]"):
start = base_name.rfind("[")
# slice
if start > 0:
se_parts = [value.strip() for value in base_name[start + 1: -1].split(":")]
if len(se_parts) == 2:
try:
range = slice(*[int(value) if value else None for value in se_parts])
base_name = base_name[:start]
except:
pass
# custom
if is_collection(base_name, "["):
rgb_palette = eval(base_name.lower().replace("-", "").replace("_", ""))
if not isinstance(rgb_palette, list) or len(rgb_palette) == 0:
raise ValueError("invlaid custom palette syntax, should be a comma separate list.'[\"rgb(r,g,b)\",...]'")
for rgb in rgb_palette:
if not isinstance(rgb, str) or not rgb.startswith("rgb"):
raise ValueError("invlaid custom palette syntax, each item must have a 'rgb' prefix.'[\"rgb(r,g,b)\",\"rgb(...)\",...]'")
color_list = eval(rgb[3:])
if len(color_list) != 3:
raise ValueError("invlaid custom palette syntax, each color must be composed of a list of 3 number: \"rgb(r,g,b)\"")
for color in color_list:
if not isinstance(color, int) or color < 0 or color > 255:
raise ValueError("invlaid custom palette syntax, each basic color (r,g,b) must between 0 to 255")
name = name.lower().replace("-", "").replace("_", "").replace(" ", "")
base_name = None
return {"name": name, "base_name": base_name, "rgb_palette": rgb_palette, "reversed": reverse, "slice": range}
@classmethod
def validate_palette_name(cls, name):
parsed = cls.parse(name)
if parsed.get("rgb_palette") is None and parsed.get("base_name") not in Palettes.get_all_pallete_names():
raise ValueError(
f"must be a known palette name or custom palette (see option -popup_palettes) , but a value of {name} was specified."
)
@classmethod
def validate_palette_desaturation(cls, desaturation):
if desaturation > 1 or desaturation < 0:
raise ValueError(f"must be between 0 and 1, but a value of {str(desaturation)} was specified.")
@classmethod
def validate_palette_colors(cls, n_colors):
if n_colors < 1:
raise ValueError(f"must be greater or equal than 1, but a value of {str(n_colors)} was specified.")
@classmethod
def _get_color_palette(cls, name=None, n_colors=1, desaturation=1):
if name in Palettes.DEFAULT_PALETTES:
default_palette = Palettes.DEFAULT_PALETTES[name]
pal_cycle = cycle(default_palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
rgb_float_pallete = list(map(cls._rrggbb_to_rgb, palette))
else:
mplcmap = Dependencies.get_module('matplotlib.cm')
mplcol = Dependencies.get_module('matplotlib.colors')
if name in Palettes.MATPLOTLIB_DISTINCT_PALETTES:
num = Palettes.MATPLOTLIB_DISTINCT_PALETTES[name]
vector = [idx / (num - 1) for idx in range(0,num)][:n_colors]
else:
num = int(n_colors) + 2
vector = [idx / (num - 1) for idx in range(0,num)][1:-1]
color_map = mplcmap.get_cmap(name)
palette = map(tuple, color_map(vector)[:, :3])
rgb_float_pallete = list(map(mplcol.colorConverter.to_rgb, palette))
rgb_float_pallete_desaturated = cls._desaturate_palette(rgb_float_pallete, desaturation)
return rgb_float_pallete_desaturated
@classmethod
def _rrggbb_to_rgb(cls, hex_color):
"""Convert color in hex format #rrggbb or #rgb to an RGB color."""
if isinstance(hex_color, str):
# hex color in #rrggbb format.
match = re.match(r"\A#[a-fA-F0-9]{6}\Z", hex_color)
if match:
return (tuple(int(value, 16) / 255 for value in [hex_color[1:3], hex_color[3:5], hex_color[5:7]]))
# hex color in #rgb format, shorthand for #rrggbb.
match = re.match(r"\A#[a-fA-F0-9]{3}\Z", hex_color)
if match:
return (tuple(int(value, 16) / 255 for value in [hex_color[1] * 2, hex_color[2] * 2, hex_color[3] * 2]))
return hex_color
@classmethod
def _desaturate_palette(cls, reg_float_palette, desaturation):
if not 0 <= desaturation <= 1:
return [*reg_float_palette]
return [cls._desaturate_rgb(rgb, desaturation) for rgb in reg_float_palette]
@classmethod
def _desaturate_rgb(cls, rgb, desaturation):
if not 0 <= desaturation <= 1:
return [*rgb]
hue, lightness, saturation = colorsys.rgb_to_hls(*rgb)
saturation *= desaturation
saturated_rgb = colorsys.hls_to_rgb(hue, lightness, saturation)
return saturated_rgb
class Palettes(list):
# mplcmap = Dependencies.get_module('matplotlib.cm')
# mplcol = Dependencies.get_module('matplotlib.colors')
DEFAULT_DESATURATION = 1.0
DEFAULT_N_COLORS = 10
DEFAULT_NAME = "tab10" # should be from BASE_PALETTE_NAMES
DEFAULT_ALT_NAME = "pastel" # should be from DEFAULT_PALETTES
# DEFAULT_PALETTES: old matplotlib default palette
DEFAULT_PALETTES = dict(
deep=["#4C72B0", "#DD8452", "#55A868", "#C44E52", "#8172B3",
"#937860", "#DA8BC3", "#8C8C8C", "#CCB974", "#64B5CD"],
deep6=["#4C72B0", "#55A868", "#C44E52",
"#8172B3", "#CCB974", "#64B5CD"],
muted=["#4878D0", "#EE854A", "#6ACC64", "#D65F5F", "#956CB4",
"#8C613C", "#DC7EC0", "#797979", "#D5BB67", "#82C6E2"],
muted6=["#4878D0", "#6ACC64", "#D65F5F",
"#956CB4", "#D5BB67", "#82C6E2"],
pastel=["#A1C9F4", "#FFB482", "#8DE5A1", "#FF9F9B", "#D0BBFF",
"#DEBB9B", "#FAB0E4", "#CFCFCF", "#FFFEA3", "#B9F2F0"],
pastel6=["#A1C9F4", "#8DE5A1", "#FF9F9B",
"#D0BBFF", "#FFFEA3", "#B9F2F0"],
bright=["#023EFF", "#FF7C00", "#1AC938", "#E8000B", "#8B2BE2",
"#9F4800", "#F14CC1", "#A3A3A3", "#FFC400", "#00D7FF"],
bright6=["#023EFF", "#1AC938", "#E8000B",
"#8B2BE2", "#FFC400", "#00D7FF"],
dark=["#001C7F", "#B1400D", "#12711C", "#8C0800", "#591E71",
"#592F0D", "#A23582", "#3C3C3C", "#B8850A", "#006374"],
dark6=["#001C7F", "#12711C", "#8C0800",
"#591E71", "#B8850A", "#006374"],
colorblind=["#0173B2", "#DE8F05", "#029E73", "#D55E00", "#CC78BC",
"#CA9161", "#FBAFE4", "#949494", "#ECE133", "#56B4E9"],
colorblind6=["#0173B2", "#029E73", "#D55E00",
"#CC78BC", "#ECE133", "#56B4E9"]
)
# matplotlib clormap + DEFAULT_PALETTES
BASE_PALETTE_NAMES = [
"deep",
"muted",
"bright",
"pastel",
"dark",
"colorblind",
"Accent",
"Blues",
"BrBG",
"BuGn",
"BuPu",
"CMRmap",
"Dark2",
"GnBu",
"Greens",
"Greys",
"OrRd",
"Oranges",
"PRGn",
"Paired",
"Pastel1",
"Pastel2",
"PiYG",
"PuBu",
"PuBuGn",
"PuOr",
"PuRd",
"Purples",
"RdBu",
"RdGy",
"RdPu",
"RdYlBu",
"RdYlGn",
"Reds",
"Set1",
"Set2",
"Set3",
"Spectral",
"Wistia",
"YlGn",
"YlGnBu",
"YlOrBr",
"YlOrRd",
"afmhot",
"autumn",
"binary",
"bone",
"brg",
"bwr",
"cividis",
"cool",
"coolwarm",
"copper",
"cubehelix",
"flag",
"gist_earth",
"gist_gray",
"gist_heat",
"gist_ncar",
"gist_rainbow",
"gist_stern",
"gist_yarg",
"gnuplot",
"gnuplot2",
"gray",
"hot",
"hsv",
"inferno",
"magma",
"nipy_spectral",
"ocean",
"pink",
"plasma",
"prism",
"rainbow",
"seismic",
"spring",
"summer",
"tab10",
"tab20",
"tab20b",
"tab20c",
"terrain",
"viridis",
"winter",
]
MATPLOTLIB_DISTINCT_PALETTES = {
"tab10": 10, "tab20": 20, "tab20b": 20, "tab20c": 20,
"Set1": 9, "Set2": 8, "Set3": 12,
"Accent": 8, "Paired": 12,
"Pastel1": 9, "Pastel2": 8, "Dark2": 8,
}
all_palette_names = []
default_palette_name = None
@classmethod
def get_all_pallete_names(cls)->List[str]:
if len(cls.all_palette_names) == 0:
mplcmap = Dependencies.get_module('matplotlib.cm', dont_throw=True)
mplcol = Dependencies.get_module('matplotlib.colors', dont_throw=True)
if mplcmap and mplcol:
cls.all_palette_names = cls.BASE_PALETTE_NAMES
else:
cls.all_palette_names = list(cls.DEFAULT_PALETTES.keys())
return cls.all_palette_names
@classmethod
def get_default_pallete_name(cls)->str:
if cls.default_palette_name is None:
if cls.DEFAULT_NAME in cls.get_all_pallete_names():
cls.default_palette_name = cls.DEFAULT_NAME
else:
cls.default_palette_name = cls.DEFAULT_ALT_NAME
return cls.default_palette_name
def __init__(self, n_colors=None, desaturation=None, palette_list=None, to_reverse=False, **kwargs):
self.n_colors = n_colors or Palettes.DEFAULT_N_COLORS
self.desaturation = desaturation or Palettes.DEFAULT_DESATURATION
self.to_reverse = to_reverse
self.kwargs = kwargs
super(Palettes, self).__init__()
self.extend(palette_list or Palettes.get_all_pallete_names())
def __getitem__(self, key):
if isinstance(key, str):
key = self.index(key)
item = super(Palettes, self).__getitem__(key)
if isinstance(key, slice):
return Palettes(palette_list=item, desaturation=self.desaturation, n_colors=self.n_colors, to_reverse=self.to_reverse, **self.kwargs)
else:
return Palette(palette_name=item, desaturation=self.desaturation, n_colors=self.n_colors, to_reverse=self.to_reverse, **self.kwargs)
def _to_html(self):
n_colors = self.n_colors
desaturation = self.desaturation
suffix = f" (desaturation {str(desaturation)})" if desaturation is not None and desaturation != 1.0 and desaturation != 0 else ""
html_str = f'<div style="text-align:center"><h1>{n_colors} colors palettes{suffix}</h1></div>'
for name in self:
for suffix in [""]: # ['', '_r']:
s = Palette(palette_name=name + suffix, n_colors=n_colors, desaturation=desaturation, **self.kwargs)
html_str += s._to_html(add_details_to_name=False)
return html_str
def _repr_html_(self):
return self._to_html()
# plotly support this css colors:
""" - A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
"""
# Have colormaps separated into categories:
# http://matplotlib.org/examples/color/colormaps_reference.html
# cmaps = [('Perceptually Uniform Sequential', [
# 'viridis', 'plasma', 'inferno', 'magma']),
# ('Sequential', [
# 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
# 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
# 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']),
# ('Sequential (2)', [
# 'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',
# 'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',
# 'hot', 'afmhot', 'gist_heat', 'copper']),
# ('Diverging', [
# 'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
# 'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']),
# ('Qualitative', [
# 'Pastel1', 'Pastel2', 'Paired', 'Accent',
# 'Dark2', 'Set1', 'Set2', 'Set3',
# 'tab10', 'tab20', 'tab20b', 'tab20c']),
# ('Miscellaneous', [
# 'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',
# 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv',
# 'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'])] | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/docs/modules/utilities/autowrap.rst | ===============
Autowrap Module
===============
The autowrap module works very well in tandem with the Indexed classes of the
:ref:`tensor_module`. Here is a simple example that shows how to setup a binary
routine that calculates a matrix-vector product.
>>> from diofant.utilities.autowrap import autowrap
>>> A, x, y = map(IndexedBase, ['A', 'x', 'y'])
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> instruction = Eq(y[i], A[i, j]*x[j])
>>> instruction
Eq(y[i], x[j]*A[i, j])
Because the code printers treat Indexed objects with repeated indices as a
summation, the above equality instance will be translated to low-level code for
a matrix vector product. This is how you tell Diofant to generate the code,
compile it and wrap it as a python function:
>>> matvec = autowrap(instruction) # doctest: +SKIP
That's it. Now let's test it with some numpy arrays. The default wrapper
backend is f2py. The wrapper function it provides is set up to accept python
lists, which it will silently convert to numpy arrays. So we can test the
matrix vector product like this:
>>> M = [[0, 1],
... [1, 0]]
>>> matvec(M, [2, 3]) # doctest: +SKIP
[ 3. 2.]
Implementation details
======================
The autowrap module is implemented with a backend consisting of CodeWrapper
objects. The base class ``CodeWrapper`` takes care of details about module
name, filenames and options. It also contains the driver routine, which runs
through all steps in the correct order, and also takes care of setting up and
removing the temporary working directory.
The actual compilation and wrapping is done by external resources, such as the
system installed f2py command. The Cython backend runs a distutils setup script
in a subprocess. Subclasses of CodeWrapper takes care of these
backend-dependent details.
API Reference
=============
.. automodule:: diofant.utilities.autowrap
:members:
| PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/contrib/gis/geos/prototypes/geom.py | from ctypes import POINTER, c_char_p, c_int, c_size_t, c_ubyte
from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_minus_one, check_sized_string, check_string,
)
# This is the return type used by binary output (WKB, HEX) routines.
c_uchar_p = POINTER(c_ubyte)
# We create a simple subclass of c_char_p here because when the response
# type is set to c_char_p, you get a _Python_ string and there's no way
# to access the string's address inside the error checking function.
# In other words, you can't free the memory allocated inside GEOS. Previously,
# the return type would just be omitted and the integer address would be
# used -- but this allows us to be specific in the function definition and
# keeps the reference so it may be free'd.
class geos_char_p(c_char_p):
pass
# ### ctypes factory classes ###
class BinConstructor(GEOSFuncFactory):
"Generate a prototype for binary construction (HEX, WKB) GEOS routines."
argtypes = [c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
# HEX & WKB output
class BinOutput(GEOSFuncFactory):
"Generate a prototype for the routines that return a sized string."
argtypes = [GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
class GeomOutput(GEOSFuncFactory):
"For GEOS routines that return a geometry."
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
class IntFromGeom(GEOSFuncFactory):
"Argument is a geometry, return type is an integer."
argtypes = [GEOM_PTR]
restype = c_int
errcheck = staticmethod(check_minus_one)
class StringFromGeom(GEOSFuncFactory):
"Argument is a Geometry, return type is a string."
argtypes = [GEOM_PTR]
restype = geos_char_p
errcheck = staticmethod(check_string)
# ### ctypes prototypes ###
# The GEOS geometry type, typeid, num_coordinates and number of geometries
geos_normalize = IntFromGeom('GEOSNormalize')
geos_type = StringFromGeom('GEOSGeomType')
geos_typeid = IntFromGeom('GEOSGeomTypeId')
get_dims = GEOSFuncFactory('GEOSGeom_getDimensions', argtypes=[GEOM_PTR], restype=c_int)
get_num_coords = IntFromGeom('GEOSGetNumCoordinates')
get_num_geoms = IntFromGeom('GEOSGetNumGeometries')
# Geometry creation factories
create_point = GeomOutput('GEOSGeom_createPoint', argtypes=[CS_PTR])
create_linestring = GeomOutput('GEOSGeom_createLineString', argtypes=[CS_PTR])
create_linearring = GeomOutput('GEOSGeom_createLinearRing', argtypes=[CS_PTR])
# Polygon and collection creation routines are special and will not
# have their argument types defined.
create_polygon = GeomOutput('GEOSGeom_createPolygon')
create_empty_polygon = GeomOutput('GEOSGeom_createEmptyPolygon')
create_collection = GeomOutput('GEOSGeom_createCollection')
# Ring routines
get_extring = GeomOutput('GEOSGetExteriorRing', argtypes=[GEOM_PTR])
get_intring = GeomOutput('GEOSGetInteriorRingN', argtypes=[GEOM_PTR, c_int])
get_nrings = IntFromGeom('GEOSGetNumInteriorRings')
# Collection Routines
get_geomn = GeomOutput('GEOSGetGeometryN', argtypes=[GEOM_PTR, c_int])
# Cloning
geom_clone = GEOSFuncFactory('GEOSGeom_clone', argtypes=[GEOM_PTR], restype=GEOM_PTR)
# Destruction routine.
destroy_geom = GEOSFuncFactory('GEOSGeom_destroy', argtypes=[GEOM_PTR])
# SRID routines
geos_get_srid = GEOSFuncFactory('GEOSGetSRID', argtypes=[GEOM_PTR], restype=c_int)
geos_set_srid = GEOSFuncFactory('GEOSSetSRID', argtypes=[GEOM_PTR, c_int]) | PypiClean |
/Djaloha-0.4.2.tar.gz/Djaloha-0.4.2/djaloha/static/aloha.0.20.20/lib/util/class.js | * MODIFICATIONS:
* * The name of the "constructor" method was changed from "init" to "_constructor"
* * Mixin Support using https://gist.github.com/1006243
* * Modified to be a require.js module
*/
define(
[],
function(){
var initializing = false, fnTest = /xyz/.test(function(){xyz;}) ? /\b_super\b/ : /.*/;
// The base Class implementation (does nothing)
// with doing that Class is available in the global namespace.
this.Class = function(){};
// Create a new Class that inherits from this class
Class.extend = function() {
var _super = this.prototype;
// Instantiate a base class (but only create the instance,
// don't run the init constructor)
initializing = true;
var prototype = new this();
initializing = false;
// Copy the properties over onto the new prototype
for(var i = 0; i < arguments.length; i++) {
var prop = arguments[i];
for (var name in prop) {
// Check if we're overwriting an existing function
prototype[name] = typeof prop[name] == "function" &&
typeof _super[name] == "function" && fnTest.test(prop[name]) ?
(function(name, fn){
return function() {
var tmp = this._super;
// Add a new ._super() method that is the same method
// but on the super-class
this._super = _super[name];
// The method only need to be bound temporarily, so we
// remove it when we're done executing
var ret = fn.apply(this, arguments);
this._super = tmp;
return ret;
};
})(name, prop[name]) :
prop[name];
}
}
// The dummy class constructor
function Class() {
// All construction is actually done in the _constructor method
if ( !initializing && this._constructor )
this._constructor.apply(this, arguments);
}
// Populate our constructed prototype object
Class.prototype = prototype;
// Enforce the constructor to be what we expect
Class.constructor = Class;
// And make this class extendable
Class.extend = arguments.callee;
return Class;
};
return this.Class;
}); | PypiClean |
/Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/utils.py |
# DeepSpeed Team
import os
from typing import List
import torch
from deepspeed import comm as dist
from deepspeed.utils import logger
from deepspeed.ops.adam import DeepSpeedCPUAdam
from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad
from deepspeed.ops.adam import FusedAdam
from deepspeed.utils.nvtx import instrument_w_nvtx
from deepspeed.accelerator import get_accelerator
def _initialize_parameter_parallel_groups(parameter_parallel_size=None):
data_parallel_size = int(dist.get_world_size())
parameter_parallel_size = parameter_parallel_size or data_parallel_size
logger.info("data_parallel_size: %s, parameter_parallel_size: %s", data_parallel_size, parameter_parallel_size)
assert data_parallel_size % parameter_parallel_size == 0, \
'world size should be divisible by parameter parallel size'
rank = dist.get_rank()
my_group = None
for i in range(data_parallel_size // parameter_parallel_size):
ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size)
group = dist.new_group(ranks)
if rank in ranks:
my_group = group
return my_group
class ZeRORuntimeException(Exception):
pass
ZERO_SUPPORTED_OPTIMIZERS = [
torch.optim.Adam, torch.optim.AdamW, FusedAdam, DeepSpeedCPUAdam, torch.optim.Adagrad, DeepSpeedCPUAdagrad
]
# Add apex FusedAdam to supported list if apex is installed
try:
import apex
if hasattr(apex, 'optimizers') and hasattr(apex.optimizers, 'FusedAdam'):
ZERO_SUPPORTED_OPTIMIZERS.append(apex.optimizers.FusedAdam)
except ImportError:
pass
def is_zero_supported_optimizer(optimizer):
if dist.get_rank() == 0:
logger.info(f'Checking ZeRO support for optimizer={optimizer.__class__.__name__} type={type(optimizer)}')
return type(optimizer) in ZERO_SUPPORTED_OPTIMIZERS
def get_lst_from_rank0(lst: List[int]) -> None:
"""
NOTE: creates both communication and synchronization overhead so should be used
sparingly
"""
lst_tensor = torch.tensor(
lst if dist.get_rank() == 0 else [-1] * len(lst),
dtype=int,
# device=get_accelerator().current_device_name(),
device=torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])),
requires_grad=False,
)
dist.broadcast(lst_tensor, src=0, async_op=False)
return list(lst_tensor.cpu().numpy())
@instrument_w_nvtx
def assert_ints_same_as_other_ranks(ints: List[int]) -> None:
"""
NOTE: creates both communication and synchronization overhead so should be
used sparingly
takes a list of ints from each rank and ensures that they are the same
across ranks, throwing an exception if they are not.
"""
rank0_ints = get_lst_from_rank0(ints)
if ints != rank0_ints:
raise RuntimeError(f"disagreement between rank0 and rank{dist.get_rank()}: "
f"rank0: {rank0_ints}, rank{dist.get_rank()}: {ints}") | PypiClean |
/Cantera-3.0.0b1-cp311-cp311-win_amd64.whl/cantera/ctml2yaml.py |
# This file is part of Cantera. See License.txt in the top-level directory or
# at https://cantera.org/license.txt for license and copyright information.
"""Convert legacy CTML input to YAML format.
There are two main entry points to this script, `main` and `convert`. The former is
used from the command line interface and parses the arguments passed. The latter
accepts either the name of the CTML input file or a string containing the CTML
content.
"""
from pathlib import Path
import sys
import re
import argparse
import xml.etree.ElementTree as etree
from email.utils import formatdate
import warnings
import copy
from typing import Any, Dict, Union, Iterable, Optional, List, Tuple, TypedDict
from typing import TYPE_CHECKING
import numpy as np
try:
from ruamel import yaml
except ImportError:
import ruamel_yaml as yaml # type: ignore
# yaml.version_info is a tuple with the three parts of the version
yaml_version = yaml.version_info
# We choose ruamel.yaml 0.15.34 as the minimum version
# since it is the highest version available in the Ubuntu
# 18.04 repositories and seems to work. Older versions such as
# 0.13.14 on CentOS7 and 0.10.23 on Ubuntu 16.04 raise an exception
# that they are missing the RoundTripRepresenter
yaml_min_version = (0, 15, 34)
if yaml_version < yaml_min_version:
raise RuntimeError(
"The minimum supported version of ruamel.yaml is 0.15.34. If you "
"installed ruamel.yaml from your operating system's package manager, "
"please install an updated version using pip or conda."
)
if TYPE_CHECKING:
QUANTITY = Union[float, str]
RK_EOS_DICT = TypedDict(
"RK_EOS_DICT",
{"a": List[QUANTITY], "b": QUANTITY, "binary-a": Dict[str, List[QUANTITY]]},
total=False,
)
DH_BETA_MATRIX = TypedDict(
"DH_BETA_MATRIX", {"species": List[str], "beta": QUANTITY}, total=False
)
ARRHENIUS_PARAMS = Dict[str, Union[str, QUANTITY]]
EFFICIENCY_PARAMS = Dict[str, float]
LINDEMANN_PARAMS = Union[str, ARRHENIUS_PARAMS, EFFICIENCY_PARAMS]
TROE_PARAMS = Dict[str, float]
SRI_PARAMS = Dict[str, float]
COVERAGE_PARAMS = Dict[str, ARRHENIUS_PARAMS]
ARRHENIUS_TYPE = Dict[str, ARRHENIUS_PARAMS]
INTERFACE_TYPE = Dict[
str, Union[ARRHENIUS_PARAMS, bool, str, COVERAGE_PARAMS, float]
]
NESTED_LIST_OF_FLOATS = List[List[float]]
CHEBYSHEV_TYPE = Dict[str, Union[List[float], NESTED_LIST_OF_FLOATS, str]]
PLOG_TYPE = Dict[str, Union[str, List[ARRHENIUS_PARAMS]]]
CHEMACT_TYPE = Dict[
str, Union[str, ARRHENIUS_PARAMS, EFFICIENCY_PARAMS, TROE_PARAMS]
]
LINDEMANN_TYPE = Dict[str, LINDEMANN_PARAMS]
TROE_TYPE = Dict[str, Union[LINDEMANN_PARAMS, TROE_PARAMS]]
THREEBODY_TYPE = Dict[str, Union[ARRHENIUS_PARAMS, EFFICIENCY_PARAMS]]
SRI_TYPE = Dict[str, Union[LINDEMANN_PARAMS, SRI_PARAMS]]
THERMO_POLY_TYPE = Union[List[List[float]], List[float]]
HKFT_THERMO_TYPE = Union[str, QUANTITY, List[QUANTITY]]
# The last Union[str, float] here is not a QUANTITY
HMW_THERMO_TYPE = Union[
str, QUANTITY, bool, Dict[str, Union[float, List[Union[str, float]]]]
]
BlockMap = yaml.comments.CommentedMap
def FlowMap(*args, **kwargs):
"""A YAML mapping that flows onto one line."""
m = yaml.comments.CommentedMap(*args, **kwargs)
m.fa.set_flow_style()
return m
def FlowList(*args, **kwargs):
"""A YAML sequence that flows onto one line."""
lst = yaml.comments.CommentedSeq(*args, **kwargs)
lst.fa.set_flow_style()
return lst
class MissingXMLNode(LookupError):
def __init__(self, message: str = "", node: Optional[etree.Element] = None):
"""Error raised when a required node is missing in the XML tree.
:param message:
The error message to be displayed to the user.
:param node:
The XML node from which the requested node is missing.
"""
if node is not None:
node_str = etree.tostring(node).decode("utf-8")
if message:
message += ": '" + node_str + "'"
else:
message = node_str
super().__init__(message)
class MissingXMLAttribute(LookupError):
def __init__(self, message: str = "", node: Optional[etree.Element] = None):
"""Error raised when a required attribute is missing in the XML node.
:param message:
The error message to be displayed to the user.
:param node:
The XML node from which the requested attribute is missing.
"""
if node is not None:
node_str = etree.tostring(node).decode("utf-8")
if message:
message += ": '" + node_str + "'"
else:
message = node_str
super().__init__(message)
class MissingNodeText(LookupError):
def __init__(self, message: str = "", node: Optional[etree.Element] = None):
"""Error raised when the text of an XML node is missing.
:param message:
The error message to be displayed to the user.
:param node:
The XML node from which the text is missing.
"""
if node is not None:
node_str = etree.tostring(node).decode("utf-8")
if message:
message += ": '" + node_str + "'"
else:
message = node_str
super().__init__(message)
# Improved float formatting requires Numpy >= 1.14
HAS_FMT_FLT_POS = hasattr(np, "format_float_positional")
def float2string(data: float) -> str:
"""Format a float into a string.
:param data: The floating point data to be formatted.
Uses *NumPy*'s ``format_float_positional()`` and ``format_float_scientific()`` if
they are is available, requires ``numpy >= 1.14``. In that case, values with
magnitude between 0.01 and 10000 are formatted using ``format_float_positional ()``
and other values are formatted using ``format_float_scientific()``. If those *NumPy*
functions are not available, returns the ``repr`` of the input.
"""
if not HAS_FMT_FLT_POS:
return repr(data)
if data == 0:
return "0.0"
elif 0.01 <= abs(data) < 10000:
return np.format_float_positional(data, trim="0")
else:
return np.format_float_scientific(data, trim="0")
def represent_float(self: Any, data: Any) -> Any:
"""Format floating point numbers for ruamel YAML.
:param data:
The floating point data to be formatted.
Uses `float2string` to format the floating point input to a string, then inserts
the resulting string into the YAML tree as a scalar.
"""
if data != data:
value = ".nan"
elif data == self.inf_value:
value = ".inf"
elif data == -self.inf_value:
value = "-.inf"
else:
value = float2string(data)
return self.represent_scalar("tag:yaml.org,2002:float", value)
yaml.RoundTripRepresenter.add_representer(float, represent_float)
def get_float_or_quantity(node: etree.Element) -> "QUANTITY":
"""Process an XML node into a float value or a value with units.
:param node:
The XML node with a value in the text and optionally a units attribute.
Given XML nodes like:
.. code:: XML
<E units="cal/mol">1000.0</E>
<E>1000.0</E>
this function returns, respectively::
1000.0 cal/mol
1000.0
where the first value is a string and the second is a float.
"""
value = float(clean_node_text(node))
units = node.get("units", "")
if units:
units = re.sub(r"([A-Za-z])-([A-Za-z])", r"\1*\2", units)
units = re.sub(r"([A-Za-z])([-\d])", r"\1^\2", units)
return "{} {}".format(float2string(value), units)
else:
return value
def split_species_value_string(node: etree.Element) -> Dict[str, float]:
"""Split a string of species:value pairs into a dictionary.
:param node:
An XML node whose text contains the species: value pairs
Returns a dictionary where the keys of the dictionary are species names and the
values are the number associated with each species. This is useful for things like
elemental composition, mole fraction mappings, coverage mappings, etc.
The algorithm is reimplemented from ``compositionMap::parseCompString`` in
``base/stringUtils.cpp``.
"""
text = clean_node_text(node)
pairs = FlowMap({})
start, stop, left = 0, 0, 0
# \S matches the first non-whitespace character
non_whitespace = re.compile(r"\S")
stop_re = re.compile(r"[,;\s]")
while stop < len(text):
try:
colon = text.index(":", left)
except ValueError:
break
# colon + 1 here excludes the colon itself from the search
valstart_match = non_whitespace.search(text, colon + 1)
if valstart_match is None:
break
valstart = valstart_match.start()
stop_match = stop_re.search(text, valstart)
if stop_match is None:
stop = len(text)
else:
stop = stop_match.start()
name = text[start:colon]
try:
value = float(text[valstart:stop])
except ValueError:
testname = text[start : stop - start]
if re.search(r"\s", testname) is not None:
raise
elif text[valstart:stop].find(":") != -1:
left = colon + 1
stop = 0
continue
else:
raise
pairs[name] = value
start_match = re.search(r"[^,;\s]", text[stop + 1 :])
if start_match is None:
continue
start = start_match.start() + stop + 1
left = start
return pairs
def clean_node_text(node: etree.Element) -> str:
"""Clean the text of a node.
:param node:
An XML node with a text value.
Raises `MissingNodeText` if the node text is not present. Otherwise, replaces
newlines and tab characters with spaces, then strips the resulting string. This
turns multi-line text values into a single line that can be split on whitespace.
"""
text = node.text
if text is None:
raise MissingNodeText("The text of the node must exist", node)
return text.replace("\n", " ").replace("\t", " ").strip(" ,")
class Phase:
thermo_model_mapping = {
"IdealGas": "ideal-gas",
"Incompressible": "constant-density",
"Surface": "ideal-surface",
"Surf": "ideal-surface",
"Edge": "edge",
"Metal": "electron-cloud",
"StoichSubstance": "fixed-stoichiometry",
"PureFluid": "pure-fluid",
"LatticeSolid": "compound-lattice",
"Lattice": "lattice",
"HMW": "HMW-electrolyte",
"HMWSoln": "HMW-electrolyte",
"IdealSolidSolution": "ideal-condensed",
"IdealSolidSoln": "ideal-condensed",
"DebyeHuckel": "Debye-Huckel",
"IdealMolalSolution": "ideal-molal-solution",
"IdealMolalSoln": "ideal-molal-solution",
"IdealSolnVPSS": "ideal-solution-VPSS",
"IdealSolnGas": "ideal-solution-VPSS",
"IdealGasVPSS": "ideal-gas-VPSS",
"Margules": "Margules",
"IonsFromNeutralMolecule": "ions-from-neutral-molecule",
"IonsFromNeutral": "ions-from-neutral-molecule",
"Redlich-Kister": "Redlich-Kister",
"RedlichKister": "Redlich-Kister",
"RedlichKwongMFTP": "Redlich-Kwong",
"RedlichKwong": "Redlich-Kwong",
"MaskellSolidSolnPhase": "Maskell-solid-solution",
"MaskellSolidSoln": "Maskell-solid-solution",
"PureLiquidWater": "liquid-water-IAPWS95",
"Water": "liquid-water-IAPWS95",
"BinarySolutionTabulatedThermo": "binary-solution-tabulated",
}
kinetics_model_mapping = {
"GasKinetics": "gas",
"Interface": "surface",
"none": None,
"Edge": "edge",
"None": None,
"SolidKinetics": None,
}
transport_model_mapping = {
"Mix": "mixture-averaged",
"Multi": "multicomponent",
"None": None,
"Ion": "ionized-gas",
"Water": "water",
"none": None,
None: None,
"UnityLewis": "unity-Lewis-number",
"CK_Mix": "mixture-averaged-CK",
"CK_Multi": "multicomponent-CK",
"HighP": "high-pressure",
}
state_properties_mapping = {
"moleFractions": "X",
"massFractions": "Y",
"temperature": "T",
"pressure": "P",
"coverages": "coverages",
"soluteMolalities": "molalities",
}
pure_fluid_mapping = {
"0": "water",
"1": "nitrogen",
"2": "methane",
"3": "hydrogen",
"4": "oxygen",
"5": "HFC-134a",
"7": "carbon-dioxide",
"8": "heptane",
}
def __init__(
self,
phase: etree.Element,
species_data: Dict[str, List["Species"]],
reaction_data: Dict[str, List["Reaction"]],
):
"""Represent an XML ``phase`` node.
:param phase:
XML node containing a phase definition.
:param species_data:
Mapping of species data sources to lists of `Species` instances.
:param reaction_data:
Mapping of reaction data sources to lists of `Reaction` instances.
This class processes the XML node of a phase definition and generates a mapping
for the YAML output. The mapping is stored in the ``attribs`` instance
attribute and automatically formatted to YAML by the `~Phase.to_yaml` class
method.
"""
phase_name = phase.get("id")
if phase_name is None:
raise MissingXMLAttribute(
"The 'phase' node requires an 'id' attribute.", phase
)
self.attribs = BlockMap({"name": phase_name})
elem_text = phase.findtext("elementArray")
if elem_text is not None:
elements = elem_text.replace("\n", "").strip().split()
# This second check is necessary because self-closed tags
# have an empty text when checked with 'findtext' but
# have 'None' when 'find().text' is used
if elements:
self.attribs["elements"] = FlowList(elements)
species = []
speciesArray_nodes = phase.findall("speciesArray")
for sA_node in speciesArray_nodes:
species.append(self.get_species_array(sA_node))
species_skip = sA_node.find("skip")
if species_skip is not None:
element_skip = species_skip.get("element", "")
if element_skip == "undeclared":
self.attribs["skip-undeclared-elements"] = True
if species:
if len(species) == 1 and "species" in species[0]:
self.attribs.update(species[0])
else:
self.attribs["species"] = species
phase_thermo = phase.find("thermo")
if phase_thermo is None:
raise MissingXMLNode("The 'phase' node requires a 'thermo' node.", phase)
phase_thermo_model = phase_thermo.get("model")
if phase_thermo_model is None:
raise MissingXMLAttribute(
"The 'thermo' node requires a 'model' attribute.", phase_thermo
)
self.attribs["thermo"] = self.thermo_model_mapping[phase_thermo_model]
phases_text = phase.findtext("phaseArray")
if phases_text is not None:
adjacent_phases = phases_text.replace("\n", " ").strip().split()
if adjacent_phases:
self.attribs["adjacent-phases"] = FlowList(adjacent_phases)
if phase_thermo_model == "PureFluid":
pure_fluid_type = phase_thermo.get("fluid_type")
if pure_fluid_type is None:
raise MissingXMLAttribute(
"The 'PureFluid' model requires the 'fluid_type' attribute.",
phase_thermo,
)
self.attribs["pure-fluid-name"] = self.pure_fluid_mapping[pure_fluid_type]
elif phase_thermo_model == "HMW":
activity_coefficients = phase_thermo.find("activityCoefficients")
if activity_coefficients is None:
raise MissingXMLNode(
"The 'HMW' thermo model requires the 'activityCoefficients' node.",
phase_thermo,
)
self.attribs["activity-data"] = self.hmw_electrolyte(activity_coefficients)
elif phase_thermo_model == "DebyeHuckel":
activity_coefficients = phase_thermo.find("activityCoefficients")
if activity_coefficients is None:
raise MissingXMLNode(
"The 'DebyeHuckel' thermo model requires the "
"'activityCoefficients' node.",
phase_thermo,
)
self.attribs["activity-data"] = self.debye_huckel(
species, activity_coefficients, species_data
)
elif phase_thermo_model == "StoichSubstance":
self.move_density_to_species(species, phase_thermo, species_data)
elif phase_thermo_model == "RedlichKwongMFTP":
activity_coefficients = phase_thermo.find("activityCoefficients")
if activity_coefficients is not None:
self.move_RK_coeffs_to_species(
species, activity_coefficients, species_data
)
elif phase_thermo_model == "MaskellSolidSolnPhase":
try:
self.move_density_to_species(species, phase_thermo, species_data)
except MissingXMLNode:
pass
excess_h_node = phase_thermo.find("h_mix")
if excess_h_node is not None:
self.attribs["excess-enthalpy"] = get_float_or_quantity(excess_h_node)
product_spec_node = phase_thermo.find("product_species")
if product_spec_node is not None:
self.attribs["product-species"] = clean_node_text(product_spec_node)
elif phase_thermo_model == "IonsFromNeutralMolecule":
neutral_phase_node = phase_thermo.find("neutralMoleculePhase")
if neutral_phase_node is None:
raise MissingXMLNode(
"The 'IonsFromNeutralMolecule' phase requires the "
"'neutralMoleculePhase' node.",
phase_thermo,
)
neutral_phase_src = neutral_phase_node.get("datasrc")
if neutral_phase_src is None:
raise MissingXMLAttribute(
"The 'neutralMoleculePhase' requires the 'datasrc' attribute.",
neutral_phase_node,
)
filename, location = neutral_phase_src.split("#")
filename = str(Path(filename).with_suffix(".yaml"))
self.attribs["neutral-phase"] = "{}/{}".format(filename, location)
elif phase_thermo_model == "Redlich-Kister":
activity_coefficients = phase_thermo.find("activityCoefficients")
if activity_coefficients is None:
raise MissingXMLNode(
"The 'RedlichKister' thermo model requires the "
"'activityCoefficients' node.",
phase_thermo,
)
self.attribs["interactions"] = self.redlich_kister(activity_coefficients)
elif phase_thermo_model == "LatticeSolid":
lattice_array_node = phase_thermo.find("LatticeArray")
if lattice_array_node is None:
raise MissingXMLNode(
"The 'LatticeSolid' phase thermo requires a 'LatticeArray' node.",
phase_thermo,
)
self.lattice_nodes = [] # type: List[Phase]
for lattice_phase_node in lattice_array_node.findall("phase"):
self.lattice_nodes.append(
Phase(lattice_phase_node, species_data, reaction_data)
)
lattice_stoich_node = phase_thermo.find("LatticeStoichiometry")
if lattice_stoich_node is None:
raise MissingXMLNode(
"The 'LatticeSolid' phase thermo requires a "
"'LatticeStoichiometry' node.",
phase_thermo,
)
self.attribs["composition"] = {}
for phase_ratio in clean_node_text(lattice_stoich_node).split():
p_name, ratio = phase_ratio.rsplit(":", 1)
self.attribs["composition"][p_name.strip()] = float(ratio)
elif phase_thermo_model == "Margules":
activity_coefficients = phase_thermo.find("activityCoefficients")
if activity_coefficients is not None:
margules_interactions = self.margules(activity_coefficients)
if margules_interactions:
self.attribs["interactions"] = margules_interactions
elif phase_thermo_model == "IdealMolalSolution":
activity_coefficients = phase_thermo.find("activityCoefficients")
if activity_coefficients is not None:
ideal_molal_cutoff = self.ideal_molal_solution(activity_coefficients)
if ideal_molal_cutoff:
self.attribs["cutoff"] = ideal_molal_cutoff
for node in phase_thermo:
if node.tag == "site_density":
self.attribs["site-density"] = get_float_or_quantity(node)
elif node.tag == "density":
if self.attribs["thermo"] == "electron-cloud":
self.attribs["density"] = get_float_or_quantity(node)
elif node.tag == "tabulatedSpecies":
self.attribs["tabulated-species"] = node.get("name")
elif node.tag == "tabulatedThermo":
self.attribs["tabulated-thermo"] = self.get_tabulated_thermo(node)
transport_node = phase.find("transport")
if transport_node is not None:
transport_model = self.transport_model_mapping[transport_node.get("model")]
if transport_model is not None:
self.attribs["transport"] = transport_model
# The phase requires both a kinetics model and a set of
# reactions to include the kinetics
kinetics_node = phase.find("kinetics")
has_reactionArray = phase.find("reactionArray") is not None
if kinetics_node is not None and has_reactionArray:
kinetics_model = self.kinetics_model_mapping[kinetics_node.get("model", "")]
if kinetics_node.get("model", "").lower() == "solidkinetics":
warnings.warn(
"The SolidKinetics type is not implemented and will not be "
"included in the YAML output."
)
reactions = []
for rA_node in phase.iterfind("reactionArray"):
# If the reaction list associated with the datasrc for this
# reactionArray is missing or empty, don't do anything.
datasrc = rA_node.get("datasrc", "")
if datasrc.startswith("#") and not reaction_data.get(datasrc[1:]):
continue
reactions.append(self.get_reaction_array(rA_node, reaction_data))
# The reactions list may be empty, don't include any kinetics stuff
# if it is
if reactions and kinetics_model is not None:
self.attribs["kinetics"] = kinetics_model
# If there is one reactionArray and the datasrc was reaction_data
# (munged to just reactions) the output should be 'reactions: all',
# so we use update. Otherwise, there needs to be a list
# of mappings.
if len(reactions) == 1 and "reactions" in reactions[0]:
self.attribs.update(reactions[0])
else:
self.attribs["reactions"] = reactions
state_node = phase.find("state")
if state_node is not None:
phase_state = FlowMap()
for prop in state_node:
property_name = self.state_properties_mapping[prop.tag]
if prop.tag in [
"moleFractions",
"massFractions",
"coverages",
"soluteMolalities",
]:
composition = split_species_value_string(prop)
phase_state[property_name] = composition
else:
value = get_float_or_quantity(prop)
phase_state[property_name] = value
if phase_state:
self.attribs["state"] = phase_state
std_conc_node = phase.find("standardConc")
if std_conc_node is not None:
model = std_conc_node.get("model")
if model == "solvent_volume":
model = "solvent-molar-volume"
elif model == "molar_volume":
model = "species-molar-volume"
self.attribs["standard-concentration-basis"] = model
self.check_elements(species, species_data)
def ideal_molal_solution(
self, activity_coeffs: etree.Element
) -> Dict[str, Union[str, "QUANTITY"]]:
"""Process the cutoff data in an ``IdealMolalSolution`` phase-thermo type.
:param activity_coeffs:
XML ``activityCoefficients`` node. For the ``IdealMolalSolution`` thermo
type, this node contains information about cutoff limits for the
thermodynamic properties.
Returns a (possibly empty) dictionary to update the `Phase` attributes. The
dictionary will be empty when there are no cutoff nodes in the
``activityCoefficients`` node.
"""
cutoff = {} # type: Dict[str, Union[str, QUANTITY]]
cutoff_node = activity_coeffs.find("idealMolalSolnCutoff")
if cutoff_node is not None:
cutoff_model = cutoff_node.get("model")
if cutoff_model is not None:
cutoff["model"] = cutoff_model
for limit_node in cutoff_node:
# Remove _limit or _cutoff from the right side of the node tag
tag = limit_node.tag.rsplit("_", 1)[0]
cutoff[tag] = get_float_or_quantity(limit_node)
return cutoff
def margules(
self, activity_coeffs: etree.Element
) -> List[Dict[str, List[Union[str, "QUANTITY"]]]]:
"""Process activity coefficients for a ``Margules`` phase-thermo type.
:param activity_coeffs:
XML ``activityCoefficients`` node. For the ``Margules`` phase-thermo type
these are interaction parameters between the species in the phase.
Returns a list of interaction data values. Margules does not require the
``binaryNeutralSpeciesParameters`` node to be present. Almost a superset of the
Redlich-Kister parameters, but since the ``binaryNeutralSpeciesParameters`` are
processed in a loop, there's no advantage to re-use Redlich-Kister processing
because this function would have to go through the same nodes again.
"""
all_binary_params = activity_coeffs.findall("binaryNeutralSpeciesParameters")
interactions = []
for binary_params in all_binary_params:
species_A = binary_params.get("speciesA")
species_B = binary_params.get("speciesB")
if species_A is None or species_B is None:
raise MissingXMLAttribute(
"'binaryNeutralSpeciesParameters' node requires 'speciesA' and "
"'speciesB' attributes",
binary_params,
)
this_node = {
"species": FlowList([species_A, species_B])
} # type: Dict[str, List[Union[str, QUANTITY]]]
excess_enthalpy_node = binary_params.find("excessEnthalpy")
if excess_enthalpy_node is not None:
excess_enthalpy = clean_node_text(excess_enthalpy_node).split(",")
enthalpy_units = excess_enthalpy_node.get("units", "")
if not enthalpy_units:
this_node["excess-enthalpy"] = FlowList(map(float, excess_enthalpy))
else:
this_node["excess-enthalpy"] = FlowList(
[" ".join([e.strip(), enthalpy_units]) for e in excess_enthalpy]
)
excess_entropy_node = binary_params.find("excessEntropy")
if excess_entropy_node is not None:
excess_entropy = clean_node_text(excess_entropy_node).split(",")
entropy_units = excess_entropy_node.get("units", "")
if not entropy_units:
this_node["excess-entropy"] = FlowList(map(float, excess_entropy))
else:
this_node["excess-entropy"] = FlowList(
[" ".join([e.strip(), entropy_units]) for e in excess_entropy]
)
excessvol_enth_node = binary_params.find("excessVolume_Enthalpy")
if excessvol_enth_node is not None:
excess_vol_enthalpy = clean_node_text(excessvol_enth_node).split(",")
enthalpy_units = excessvol_enth_node.get("units", "")
if not enthalpy_units:
this_node["excess-volume-enthalpy"] = FlowList(
map(float, excess_vol_enthalpy)
)
else:
this_node["excess-volume-enthalpy"] = FlowList(
[
" ".join([e.strip(), enthalpy_units])
for e in excess_vol_enthalpy
]
)
excessvol_entr_node = binary_params.find("excessVolume_Entropy")
if excessvol_entr_node is not None:
excess_vol_entropy = clean_node_text(excessvol_entr_node).split(",")
entropy_units = excessvol_entr_node.get("units", "")
if not entropy_units:
this_node["excess-volume-entropy"] = FlowList(
map(float, excess_vol_entropy)
)
else:
this_node["excess-volume-entropy"] = FlowList(
[
" ".join([e.strip(), entropy_units])
for e in excess_vol_entropy
]
)
interactions.append(this_node)
return interactions
def redlich_kister(
self, activity_coeffs: etree.Element
) -> List[Dict[str, List[Union[str, "QUANTITY"]]]]:
"""Process activity coefficients for a Redlich-Kister phase-thermo type.
:param activity_coeffs:
XML ``activityCoefficients`` node. For the ``RedlichKister`` phase-thermo
type these are interaction parameters between the species in the phase.
Returns a list of interaction data values. The ``activityCoefficients`` node
must have a ``binaryNeutralSpeciesParameters`` child node.
"""
all_binary_params = activity_coeffs.findall("binaryNeutralSpeciesParameters")
if not all_binary_params:
raise MissingXMLNode(
"Redlich-Kister activity coefficients requires a "
"'binaryNeutralSpeciesParameters' node",
activity_coeffs,
)
interactions = []
for binary_params in all_binary_params:
species_A = binary_params.get("speciesA")
species_B = binary_params.get("speciesB")
if species_A is None or species_B is None:
raise MissingXMLAttribute(
"'binaryNeutralSpeciesParameters' node requires 'speciesA' and "
"'speciesB' attributes",
binary_params,
)
this_node = {
"species": FlowList([species_A, species_B])
} # type: Dict[str, List[Union[str, QUANTITY]]]
excess_enthalpy_node = binary_params.find("excessEnthalpy")
if excess_enthalpy_node is not None:
excess_enthalpy = clean_node_text(excess_enthalpy_node).split(",")
enthalpy_units = excess_enthalpy_node.get("units", "")
if not enthalpy_units:
this_node["excess-enthalpy"] = FlowList(map(float, excess_enthalpy))
else:
this_node["excess-enthalpy"] = FlowList(
[" ".join([e.strip(), enthalpy_units]) for e in excess_enthalpy]
)
excess_entropy_node = binary_params.find("excessEntropy")
if excess_entropy_node is not None:
excess_entropy = clean_node_text(excess_entropy_node).split(",")
entropy_units = excess_entropy_node.get("units", "")
if not entropy_units:
this_node["excess-entropy"] = FlowList(map(float, excess_entropy))
else:
this_node["excess-entropy"] = FlowList(
[" ".join([e.strip(), entropy_units]) for e in excess_entropy]
)
interactions.append(this_node)
return interactions
def check_elements(
self,
this_phase_species: List[Dict[str, Iterable[str]]],
species_data: Dict[str, List["Species"]],
) -> None:
"""Check the species elements for inclusion in the `Phase`-level specification.
:param this_phase_species:
A list of mappings of species data sources to the species names in that data
source. Passed as an argument instead of using the ``species`` key in the
instance ``attribs`` dictionary because the attribute could be a mapping or
a list of mappings, whereas this argument is always a list of mappings.
:param species_data:
Mapping of species data sources (that is, ``id`` attributes on
``speciesData`` nodes) to lists of `Species` instances.
Some species include a charge node that adds an electron to the species
composition. The `Phase`s that include these species don't necessarily include
the electron in the `Phase`-level elements list, so we need to update that to
include it if necessary.
"""
phase_elements = self.attribs.get("elements")
if phase_elements is None:
return
flat_species = {k: v for d in this_phase_species for k, v in d.items()}
for datasrc, species_names in flat_species.items():
if datasrc == "species":
datasrc = "species_data"
species = species_data.get(datasrc)
if species is None:
continue
for spec in species:
species_elements = spec.attribs.get("composition", {})
if spec.attribs["name"] not in species_names:
continue
for species_element, amount in species_elements.items():
if species_element not in phase_elements and amount > 0.0:
phase_elements.append(species_element)
def move_RK_coeffs_to_species(
self,
this_phase_species: List[Dict[str, Iterable[str]]],
activity_coeffs: etree.Element,
species_data: Dict[str, List["Species"]],
) -> None:
"""Move the Redlich-Kwong activity coefficient data from phase to species.
:param this_phase_species:
A list of mappings of species data sources to the species names in that
data source. Passed as an argument instead of using the ``species`` key
in the instance ``attribs`` because the attribute could be a mapping or
a list of mappings, whereas this argument is always a list of mappings.
:param activity_coeffs:
XML ``activityCoefficients`` node.
:param species_data:
Mapping of species data sources (that is, ``id`` attributes on
``speciesData`` nodes) to lists of `Species` instances.
The YAML format moves the specification of Redlich-Kwong binary interaction
parameters from the `Phase` node into the `Species` nodes. This modifies the
`Species` objects in-place in the ``species_data`` list.
"""
all_species_eos = {} # type: Dict[str, RK_EOS_DICT]
for pure_param in activity_coeffs.iterfind("pureFluidParameters"):
eq_of_state = BlockMap({"model": "Redlich-Kwong"})
pure_species = pure_param.get("species")
if pure_species is None:
raise MissingXMLAttribute(
"The 'pureFluidParameters' node requires a 'species' attribute",
pure_param,
)
pure_a_node = pure_param.find("a_coeff")
if pure_a_node is None:
raise MissingXMLNode(
"The 'pureFluidParameters' node requires the 'a_coeff' node.",
pure_param,
)
pure_a_units = pure_a_node.get("units")
pure_a = [float(a) for a in clean_node_text(pure_a_node).split(",")]
if pure_a_units is not None:
pure_a_units = re.sub(r"([A-Za-z])-([A-Za-z])", r"\1*\2", pure_a_units)
pure_a_units = re.sub(r"([A-Za-z])([-\d])", r"\1^\2", pure_a_units)
eq_of_state["a"] = FlowList()
eq_of_state["a"].append(
"{} {}".format(float2string(pure_a[0]), pure_a_units + "*K^0.5")
)
eq_of_state["a"].append(
"{} {}".format(float2string(pure_a[1]), pure_a_units + "/K^0.5")
)
else:
eq_of_state["a"] = FlowList(pure_a)
pure_b_node = pure_param.find("b_coeff")
if pure_b_node is None:
raise MissingXMLNode(
"The 'pureFluidParameters' node requires the 'b_coeff' node.",
pure_param,
)
eq_of_state["b"] = get_float_or_quantity(pure_b_node)
all_species_eos[pure_species] = eq_of_state
all_cross_params = activity_coeffs.findall("crossFluidParameters")
for cross_param in all_cross_params:
species_1_name = cross_param.get("species1")
species_2_name = cross_param.get("species2")
if species_1_name is None or species_2_name is None:
raise MissingXMLAttribute(
"The 'crossFluidParameters' node requires 2 species names",
cross_param,
)
species_1 = all_species_eos[species_1_name]
if "binary-a" not in species_1:
species_1["binary-a"] = {}
species_2 = all_species_eos[species_2_name]
if "binary-a" not in species_2:
species_2["binary-a"] = {}
cross_a_node = cross_param.find("a_coeff")
if cross_a_node is None:
raise MissingXMLNode(
"The 'crossFluidParameters' node requires the 'a_coeff' node",
cross_param,
)
cross_a_unit = cross_a_node.get("units")
cross_a = [float(a) for a in clean_node_text(cross_a_node).split(",")]
if cross_a_unit is not None:
cross_a_unit = re.sub(r"([A-Za-z])-([A-Za-z])", r"\1*\2", cross_a_unit)
cross_a_unit = re.sub(r"([A-Za-z])([-\d])", r"\1^\2", cross_a_unit)
cross_a_w_units = []
cross_a_w_units.append(
"{} {}".format(float2string(cross_a[0]), cross_a_unit + "*K^0.5")
)
cross_a_w_units.append(
"{} {}".format(float2string(cross_a[1]), cross_a_unit + "/K^0.5")
)
species_1["binary-a"].update(
{species_2_name: FlowList(cross_a_w_units)}
)
species_2["binary-a"].update(
{species_1_name: FlowList(cross_a_w_units)}
)
else:
species_1["binary-a"].update({species_2_name: FlowList(cross_a)})
species_2["binary-a"].update({species_1_name: FlowList(cross_a)})
for node in this_phase_species:
for datasrc, species_names in node.items():
if datasrc == "species":
datasrc = "species_data"
species = species_data.get(datasrc)
if species is None:
continue
for spec in species:
if spec.attribs["name"] in species_names:
spec.attribs["equation-of-state"] = all_species_eos[
spec.attribs["name"]
]
def move_density_to_species(
self,
this_phase_species: List[Dict[str, Iterable[str]]],
phase_thermo: etree.Element,
species_data: Dict[str, List["Species"]],
) -> None:
"""Move the phase density information into each species definition.
:param this_phase_species:
A list of mappings of species data sources to the species names in that
data source. Passed as an argument instead of using the ``species`` key
in the instance ``attribs`` because the attribute could be a mapping or
a list of mappings, whereas this argument is always a list of mappings.
:param phase_thermo:
XML ``thermo`` node.
:param species_data:
Mapping of species data sources (that is, ``id`` attributes on
``speciesData`` nodes) to lists of `Species` instances.
The YAML format moves the specification of density for ``StoichSubstance``
phase-thermo types from the `Phase` node into the `Species` nodes. This modifies
the `Species` objects in-place in the ``species_data`` list.
"""
den_node = phase_thermo.find("density")
const_prop = "density"
if den_node is None:
den_node = phase_thermo.find("molarDensity")
const_prop = "molar-density"
if den_node is None:
den_node = phase_thermo.find("molarVolume")
const_prop = "molar-volume"
if den_node is None:
raise MissingXMLNode(
"Thermo node is missing 'density', 'molarDensity', or 'molarVolume' "
"node.",
phase_thermo,
)
equation_of_state = {
"model": "constant-volume",
const_prop: get_float_or_quantity(den_node),
}
flat_species = {k: v for d in this_phase_species for k, v in d.items()}
for datasrc, species_names in flat_species.items():
if datasrc == "species":
datasrc = "species_data"
species = species_data.get(datasrc)
if species is None:
continue
for spec in species:
if (
spec.attribs["name"] in species_names
and "equation-of-state" not in spec.attribs
):
spec.attribs["equation-of-state"] = equation_of_state
def get_species_array(
self, speciesArray_node: etree.Element
) -> Dict[str, Iterable[str]]:
"""Process a list of species from a ``speciesArray`` node.
:param speciesArray_node:
An XML ``speciesArray`` node.
The ``speciesArray`` node has the data source plus a list of species to derive
from that data source. If the data source specifies an XML file, convert the
extension to ``.yaml``. If the data source ``id`` is ``species_data``, reformat
to just ``species`` for the YAML file. Otherwise, retain the ``id`` as-is.
"""
species_list = FlowList(clean_node_text(speciesArray_node).split())
datasrc = speciesArray_node.get("datasrc", "")
if datasrc == "#species_data":
new_datasrc = "species"
elif datasrc.startswith("#"):
new_datasrc = datasrc[1:]
else:
filename, location = datasrc.split("#", 1)
name = str(Path(filename).with_suffix(".yaml"))
if location == "species_data":
location = "species"
new_datasrc = "{}/{}".format(name, location)
return {new_datasrc: species_list}
def get_reaction_array(
self,
reactionArray_node: etree.Element,
reaction_data: Dict[str, List["Reaction"]],
) -> Dict[str, str]:
"""Process reactions from a ``reactionArray`` node in a phase definition.
:param reactionArray_node:
An XML ``reactionArray`` node.
The ``reactionArray`` node has the data source plus a list of reaction to derive
from that data source. If the data source specifies an XML file, convert the
extension to ``.yaml``. If the data source ``id`` is ``reaction_data``, reformat
to just ``reaction`` for the YAML file. Otherwise, retain the ``id`` as-is.
"""
datasrc = reactionArray_node.get("datasrc", "")
if not datasrc:
raise MissingXMLAttribute(
"The 'reactionArray' node must include a 'datasrc' attribute.",
reactionArray_node,
)
filter_node = reactionArray_node.find("include")
if filter_node is not None:
filter_text = filter_node.get("min", "none")
if filter_text != filter_node.get("max"):
raise ValueError("Cannot handle differing reaction filter criteria")
else:
filter_text = "none"
skip_node = reactionArray_node.find("skip")
if skip_node is not None:
# "undeclared" is the only allowed option for third_bodies and species
# here, so ignore other options
if skip_node.get("third_bodies", "").lower() == "undeclared":
self.attribs["skip-undeclared-third-bodies"] = True
if skip_node.get("species", "").lower() == "undeclared":
reaction_option = "declared-species"
else:
reaction_option = "all"
else:
reaction_option = "all"
if not datasrc.startswith("#"):
if filter_text.lower() != "none":
raise ValueError(
"Filtering reactions is not allowed with an external 'datasrc'"
)
if skip_node is None:
raise MissingXMLNode(
"Must include 'skip' node for external data sources",
reactionArray_node,
)
# This code does not handle the # character in a filename
filename, location = datasrc.split("#", 1)
name = str(Path(filename).with_suffix(".yaml"))
if location == "reaction_data":
location = "reactions"
datasrc = "{}/{}".format(name, location)
else:
if filter_text.lower() != "none":
datasrc = self.filter_reaction_list(datasrc, filter_text, reaction_data)
elif datasrc == "#reaction_data":
datasrc = "reactions"
else:
datasrc = datasrc.lstrip("#")
return {datasrc: reaction_option}
def filter_reaction_list(
self, datasrc: str, filter_text: str, reaction_data: Dict[str, List["Reaction"]]
) -> str:
"""Filter the reaction_data list to only include specified reactions.
:param datasrc:
The XML source of the reaction data that is being filtered.
:param filter_text:
The text specified in the ``filter`` node telling which reactions are being
filtered.
:param reaction_data:
Mapping of reaction data sources (that is, ``id`` attributes on
``reactionData`` nodes) to lists of `Reaction` instances.
The YAML format does not support filtering reactions by setting options in the
`Phase` node, like the XML format does. Instead, when filters are used in XML,
the reactions should be split into separate top-level nodes in the YAML file,
which then become the data source in the YAML reactions specification. Returns
a string that should be used as the data source in the YAML file.
"""
all_reactions = reaction_data[datasrc.lstrip("#")]
hits = []
misses = []
re_pattern = re.compile(filter_text.replace("*", ".*"))
for reaction in all_reactions:
reaction_id = reaction.attribs.get("id")
if re_pattern.match(reaction_id):
hits.append(reaction)
else:
misses.append(reaction)
if not hits:
raise ValueError(
"The filter text '{}' resulted in an empty set of "
"reactions".format(filter_text)
)
else:
new_datasrc = self.attribs["name"] + "-reactions"
reaction_data[new_datasrc] = hits
# If misses is not empty, replace the old list of reactions with
# a new list where filtered out reactions are removed. If there
# are no remaining reactions, remove the entry for this datasrc
# from the dictionary
if misses:
reaction_data[datasrc] = misses
else:
del reaction_data[datasrc]
return new_datasrc
def get_tabulated_thermo(self, tab_thermo_node: etree.Element) -> Dict[str, str]:
"""Process data from the ``tabulatedThermo`` node.
:param tab_thermo_node:
The XML node with the tabulated thermodynamics data.
"""
tab_thermo = BlockMap()
enthalpy_node = tab_thermo_node.find("enthalpy")
if enthalpy_node is None:
raise MissingXMLNode(
"The 'tabulatedThermo' node must have an 'enthalpy' node.",
tab_thermo_node,
)
enthalpy_units = enthalpy_node.get("units", "").split("/")
if not enthalpy_units:
raise MissingXMLAttribute(
"The 'enthalpy' node must have a 'units' attribute.", enthalpy_node,
)
entropy_node = tab_thermo_node.find("entropy")
if entropy_node is None:
raise MissingXMLNode(
"The 'tabulatedThermo' node must have an 'entropy' node.",
tab_thermo_node,
)
entropy_units = entropy_node.get("units", "").split("/")
if not entropy_units:
raise MissingXMLAttribute(
"The 'entropy' node must have a 'units' attribute.", enthalpy_node,
)
if enthalpy_units[:2] != entropy_units[:2]:
raise ValueError("Tabulated thermo must have the same units.")
tab_thermo["units"] = FlowMap(
{"energy": entropy_units[0], "quantity": entropy_units[1]}
)
enthalpy = clean_node_text(enthalpy_node).split(",")
if len(enthalpy) != int(enthalpy_node.get("size", 0)):
raise ValueError(
"The number of entries in the enthalpy list is different from the "
"indicated size."
)
tab_thermo["enthalpy"] = FlowList(map(float, enthalpy))
entropy = clean_node_text(entropy_node).split(",")
tab_thermo["entropy"] = FlowList(map(float, entropy))
if len(entropy) != int(entropy_node.get("size", 0)):
raise ValueError(
"The number of entries in the entropy list is different from the "
"indicated size."
)
mole_fraction_node = tab_thermo_node.find("moleFraction")
if mole_fraction_node is None:
raise MissingXMLNode(
"The 'tabulatedThermo' node must have a 'moleFraction' node.",
tab_thermo_node,
)
mole_fraction = clean_node_text(mole_fraction_node).split(",")
tab_thermo["mole-fractions"] = FlowList(map(float, mole_fraction))
if len(mole_fraction) != int(mole_fraction_node.get("size", 0)):
raise ValueError(
"The number of entries in the mole_fraction list is different from the "
"indicated size."
)
return tab_thermo
def hmw_electrolyte(
self, activity_node: etree.Element
) -> Dict[str, "HMW_THERMO_TYPE"]:
"""Process the activity coefficients for an ``HMW`` phase-thermo type.
:param activity_coeffs:
XML ``activityCoefficients`` node.
The ``activityCoefficients`` must include the ``A_debye`` node, as well as
any interaction parameters between species.
"""
activity_data = BlockMap(
{"temperature-model": activity_node.get("TempModel", "constant")}
)
A_Debye_node = activity_node.find("A_Debye")
if A_Debye_node is None:
raise MissingXMLNode(
"The 'activityCoefficients' node must have an 'A_debye' node.",
activity_node,
)
if A_Debye_node.get("model", "").lower() == "water":
activity_data["A_Debye"] = "variable"
else:
# Assume the units are kg^0.5/gmol^0.5. Apparently,
# this is not handled in the same way as other units.
if A_Debye_node.text is None:
raise MissingNodeText(
"The 'A_Debye' node must have a text value", A_Debye_node
)
activity_data["A_Debye"] = A_Debye_node.text.strip() + " kg^0.5/gmol^0.5"
interactions = []
for inter_node in activity_node:
if inter_node.tag not in [
"binarySaltParameters",
"thetaAnion",
"psiCommonCation",
"thetaCation",
"psiCommonAnion",
"lambdaNeutral",
"zetaCation",
]:
continue
this_interaction = {"species": FlowList([i[1] for i in inter_node.items()])}
for param_node in inter_node:
data = clean_node_text(param_node).split(",")
param_name = param_node.tag.lower()
if param_name == "cphi":
param_name = "Cphi"
if len(data) == 1:
this_interaction[param_name] = float(data[0])
else:
this_interaction[param_name] = FlowList(map(float, data))
interactions.append(this_interaction)
activity_data["interactions"] = interactions
return activity_data
def debye_huckel(
self,
this_phase_species: List[Dict[str, Iterable[str]]],
activity_node: etree.Element,
species_data: Dict[str, List["Species"]],
) -> Dict[str, Union[str, "QUANTITY", bool]]:
"""Process the activity coefficients for the ``DebyeHuckel`` phase-thermo type.
:param this_phase_species:
A list of mappings of species data sources to the species names in that
data source. Passed as an argument instead of using the ``species`` key
in the instance ``attribs`` because the attribute could be a mapping or
a list of mappings, whereas this argument is always a list of mappings.
:param activity_node:
XML ``activityCoefficients`` node.
:param species_data:
Mapping of species data sources (that is, ``id`` attributes on
``speciesData`` nodes) to lists of `Species` instances.
"""
model_map = {
"dilute_limit": "dilute-limit",
"bdot_with_variable_a": "B-dot-with-variable-a",
"bdot_with_common_a": "B-dot-with-common-a",
"pitzer_with_beta_ij": "Pitzer-with-beta_ij",
"beta_ij": "beta_ij",
"": "dilute-limit",
}
activity_model = activity_node.get("model")
if activity_model is None:
raise MissingXMLAttribute(
"The 'activityCoefficients' node must have a 'model' attribute.",
activity_node,
)
activity_data = BlockMap({"model": model_map[activity_model.lower()]})
A_Debye = activity_node.findtext("A_Debye")
if A_Debye is not None:
# Assume the units are kg^0.5/gmol^0.5. Apparently,
# this is not handled in the same way as other units?
activity_data["A_Debye"] = A_Debye.strip() + " kg^0.5/gmol^0.5"
B_Debye = activity_node.findtext("B_Debye")
if B_Debye is not None:
# Assume the units are kg^0.5/gmol^0.5/m. Apparently,
# this is not handled in the same way as other units?
activity_data["B_Debye"] = B_Debye.strip() + " kg^0.5/gmol^0.5/m"
max_ionic_strength = activity_node.findtext("maxIonicStrength")
if max_ionic_strength is not None:
activity_data["max-ionic-strength"] = float(max_ionic_strength)
if activity_node.find("UseHelgesonFixedForm") is not None:
activity_data["use-Helgeson-fixed-form"] = True
B_dot_node = activity_node.find("B_dot")
if B_dot_node is not None:
activity_data["B-dot"] = get_float_or_quantity(B_dot_node)
ionic_radius_node = activity_node.find("ionicRadius")
species_ionic_radii = {} # type: Dict[str, QUANTITY]
if ionic_radius_node is not None:
default_radius = ionic_radius_node.get("default")
radius_units = ionic_radius_node.get("units")
if default_radius is not None:
if radius_units is not None:
if radius_units == "Angstroms":
radius_units = "angstrom"
default_radius += " {}".format(radius_units)
activity_data["default-ionic-radius"] = default_radius
else:
activity_data["default-ionic-radius"] = float(default_radius)
if ionic_radius_node.text is not None:
radii = clean_node_text(ionic_radius_node).split()
for r in radii:
species_name, radius = r.strip().rsplit(":", 1)
if radius_units is not None:
radius += " {}".format(radius_units)
species_ionic_radii[species_name] = radius
else:
species_ionic_radii[species_name] = float(radius)
beta_matrix_node = activity_node.find("DHBetaMatrix")
if beta_matrix_node is not None:
beta_matrix = []
beta_units = beta_matrix_node.get("units")
for beta_text in clean_node_text(beta_matrix_node).split():
# The C++ code to process this matrix from XML assumes that the species
# names in this matrix do not contain colons, so we retain that
# behavior here.
species_1, species_2, beta_value = beta_text.split(":")
beta_dict = {
"species": FlowList([species_1, species_2])
} # type: DH_BETA_MATRIX
if beta_units is not None:
beta_units = re.sub(r"([A-Za-z])-([A-Za-z])", r"\1*\2", beta_units)
beta_units = re.sub(r"([A-Za-z])([-\d])", r"\1^\2", beta_units)
beta_dict["beta"] = beta_value + " " + beta_units
else:
beta_dict["beta"] = float(beta_value)
beta_matrix.append(beta_dict)
if beta_matrix:
activity_data["beta"] = beta_matrix
ionic_strength_mods_node = activity_node.find("stoichIsMods")
is_mods = {}
if ionic_strength_mods_node is not None:
mods = clean_node_text(ionic_strength_mods_node).split()
for m in mods:
species_name, mod = m.strip().rsplit(":", 1)
is_mods[species_name] = float(mod)
electrolyte_species_type_node = activity_node.find("electrolyteSpeciesType")
etype_mods = {}
if electrolyte_species_type_node is not None:
mods = clean_node_text(electrolyte_species_type_node).split()
for m in mods:
species_name, mod = m.strip().rsplit(":", 1)
etype_mods[species_name] = mod
flat_species = {k: v for d in this_phase_species for k, v in d.items()}
for datasrc, species_names in flat_species.items():
if datasrc == "species":
datasrc = "species_data"
species = species_data.get(datasrc)
if species is None:
continue
for spec in species:
name = spec.attribs["name"]
if name not in species_names:
continue
debye_huckel = spec.attribs.get("Debye-Huckel", {})
if name in species_ionic_radii:
debye_huckel["ionic-radius"] = species_ionic_radii[name]
if name in is_mods:
if "weak-acid-charge" not in debye_huckel:
debye_huckel["weak-acid-charge"] = is_mods[name]
else:
if is_mods[name] != debye_huckel["weak-acid-charge"]:
warnings.warn(
"The stoichIsMods node was specified at the phase and "
"species level for species '{}'. The value specified "
"in the species node will be used".format(name)
)
if name in etype_mods:
etype = spec.electrolyte_species_type_mapping[etype_mods[name]]
if "electrolyte-species-type" not in debye_huckel:
debye_huckel["electrolyte-species-type"] = etype
else:
if debye_huckel["electrolyte-species-type"] != etype:
warnings.warn(
"The electrolyteSpeciesType node was specified at the "
"phase and species level for species '{}'. The value "
"specified in the species node will be "
"used".format(name)
)
if debye_huckel:
spec.attribs["Debye-Huckel"] = debye_huckel
return activity_data
@classmethod
def to_yaml(cls, representer, data):
"""Serialize the class instance to YAML format suitable for ruamel.yaml.
:param representer:
An instance of a ruamel.yaml representer type.
:param data:
An instance of this class that will be serialized.
The class instance should have an instance attribute called ``attribs`` which
is a dictionary representing the information about the instance. The dictionary
is serialized using the ``represent_dict`` method of the ``representer``.
"""
return representer.represent_dict(data.attribs)
class SpeciesThermo:
def __init__(self, thermo: etree.Element) -> None:
"""Represent the polynomial-type thermodynamic data for a `Species`.
:param thermo:
A ``species/thermo`` XML node. Must have one or more child nodes with tag
``NASA``, ``NASA9``, ``const_cp``, ``Shomate``, or ``Mu0``.
This class will process the `Species`-level thermodynamic information for the
polynomial thermo types. The pressure-dependent standard state types are
processed directly in the `Species` instance.
"""
thermo_type = thermo[0].tag
if thermo_type not in ["NASA", "NASA9", "const_cp", "Shomate", "Mu0"]:
raise TypeError("Unknown thermo model type: '{}'".format(thermo[0].tag))
func = getattr(self, thermo_type)
self.attribs = func(thermo)
def process_polynomial(
self, thermo: etree.Element, poly_type: str
) -> Tuple[List[List[float]], List[float]]:
"""Process the `Species` thermodynamic polynomial for several types.
:param thermo:
A ``species/thermo`` XML node. Must have one or more child nodes with tag
``NASA``, ``NASA9``, or ``Shomate``.
:param poly_type:
A string determining the type of polynomial. One of ``NASA``, ``NASA9``,
or ``Shomate``.
This method converts the polynomial data for the ``NASA``, ``NASA9``, and
``Shomate`` thermodynamic types into the appropriate YAML structure.
"""
temperature_ranges = set()
model_nodes = thermo.findall(poly_type)
unsorted_data = {}
for node in model_nodes:
Tmin = float(node.get("Tmin", 0))
Tmax = float(node.get("Tmax", 0))
if not Tmin or not Tmax:
raise MissingXMLAttribute(
"'Tmin' and 'Tmax' must both be specified.", node
)
temperature_ranges.add(Tmin)
temperature_ranges.add(Tmax)
float_array = node.find("floatArray")
if float_array is None:
raise MissingXMLNode(
"'{}' entry missing 'floatArray' node.".format(poly_type), node
)
unsorted_data[Tmin] = FlowList(
map(float, clean_node_text(float_array).split(","))
)
if len(temperature_ranges) != len(model_nodes) + 1:
raise ValueError(
"The midpoint temperature is not consistent between '{}' "
"entries".format(poly_type)
)
data = []
for key in sorted(unsorted_data.keys()):
data.append(unsorted_data[key])
return data, FlowList(sorted(temperature_ranges))
def Shomate(
self, thermo: etree.Element
) -> Dict[str, Union[str, "THERMO_POLY_TYPE"]]:
"""Process a Shomate `Species` thermodynamic polynomial.
:param thermo:
A ``species/thermo`` XML node. There must be one or more child nodes with
the tag ``Shomate``.
"""
thermo_attribs = BlockMap({"model": "Shomate"})
data, temperature_ranges = self.process_polynomial(thermo, "Shomate")
thermo_attribs["temperature-ranges"] = temperature_ranges
thermo_attribs["data"] = data
return thermo_attribs
def NASA(self, thermo: etree.Element) -> Dict[str, Union[str, "THERMO_POLY_TYPE"]]:
"""Process a NASA 7-coefficient thermodynamic polynomial.
:param thermo:
A ``species/thermo`` XML node. There must be one or more child nodes with
the tag ``NASA``.
"""
thermo_attribs = BlockMap({"model": "NASA7"})
data, temperature_ranges = self.process_polynomial(thermo, "NASA")
thermo_attribs["temperature-ranges"] = temperature_ranges
thermo_attribs["data"] = data
return thermo_attribs
def NASA9(self, thermo: etree.Element) -> Dict[str, Union[str, "THERMO_POLY_TYPE"]]:
"""Process a NASA 9-coefficient thermodynamic polynomial.
:param thermo:
A ``species/thermo`` XML node. There must be one or more child nodes with
the tag ``NASA9``.
"""
thermo_attribs = BlockMap({"model": "NASA9"})
data, temperature_ranges = self.process_polynomial(thermo, "NASA9")
thermo_attribs["temperature-ranges"] = temperature_ranges
thermo_attribs["data"] = data
return thermo_attribs
def const_cp(self, thermo: etree.Element) -> Dict[str, Union[str, "QUANTITY"]]:
"""Process a `Species` thermodynamic type with constant specific heat.
:param thermo:
A ``species/thermo`` XML node. There must be one child node with the tag
``const_cp``.
"""
thermo_attribs = BlockMap({"model": "constant-cp"})
const_cp_node = thermo.find("const_cp")
if const_cp_node is None:
raise MissingXMLNode(
"The 'thermo' node must contain a 'const_cp' node", thermo
)
for node in const_cp_node:
tag = node.tag
if tag == "t0":
tag = "T0"
thermo_attribs[tag] = get_float_or_quantity(node)
tmin = const_cp_node.get('Tmin')
if tmin is not None and tmin != '100.0':
thermo_attribs['T-min'] = float(tmin)
tmax = const_cp_node.get('Tmax')
if tmax is not None and tmax != '5000.0':
thermo_attribs['T-max'] = float(tmax)
return thermo_attribs
def Mu0(
self, thermo: etree.Element
) -> Dict[str, Union[str, Dict[float, Iterable]]]:
"""Process a piecewise Gibbs Free Energy thermodynamic polynomial.
:param thermo:
A ``species/thermo`` XML node. There must be one child node with the tag
``Mu0``.
"""
thermo_attribs = BlockMap({"model": "piecewise-Gibbs"})
Mu0_node = thermo.find("Mu0")
if Mu0_node is None:
raise MissingXMLNode("The 'thermo' node must contain a 'Mu0' node.", thermo)
ref_pressure = Mu0_node.get("Pref")
if ref_pressure is None:
raise MissingXMLAttribute(
"The 'Mu0' node must have a 'Pref' node.", Mu0_node
)
thermo_attribs["reference-pressure"] = float(ref_pressure)
H298_node = Mu0_node.find("H298")
if H298_node is None:
raise MissingXMLNode(
"The 'Mu0' node must contain an 'H298' node.", Mu0_node
)
thermo_attribs["h0"] = get_float_or_quantity(H298_node)
tmin = Mu0_node.get('Tmin')
if tmin is not None:
thermo_attribs['T-min'] = float(tmin)
tmax = Mu0_node.get('Tmax')
if tmax is not None:
thermo_attribs['T-max'] = float(tmax)
for float_node in Mu0_node.iterfind("floatArray"):
title = float_node.get("title")
if title == "Mu0Values":
dimensions = float_node.get("units")
if dimensions == "Dimensionless":
thermo_attribs["dimensionless"] = True
dimensions = ""
values = [] # type: Union[Iterable[float], Iterable[str]]
values = map(float, clean_node_text(float_node).split(","))
if dimensions:
values = [float2string(v) + " " + dimensions for v in values]
elif title == "Mu0Temperatures":
temperatures = map(float, clean_node_text(float_node).split(","))
thermo_attribs["data"] = dict(zip(temperatures, values))
return thermo_attribs
@classmethod
def to_yaml(cls, representer, data):
"""Serialize the class instance to YAML format suitable for ruamel.yaml.
:param representer:
An instance of a ruamel.yaml representer type.
:param data:
An instance of this class that will be serialized.
The class instance should have an instance attribute called ``attribs`` which
is a dictionary representing the information about the instance. The dictionary
is serialized using the ``represent_dict`` method of the ``representer``.
"""
return representer.represent_dict(data.attribs)
class SpeciesTransport:
species_transport_mapping = {"gas_transport": "gas"}
transport_properties_mapping = {
"LJ_welldepth": "well-depth",
"LJ_diameter": "diameter",
"polarizability": "polarizability",
"rotRelax": "rotational-relaxation",
"dipoleMoment": "dipole",
"dispersion_coefficient": "dispersion-coefficient",
"quadrupole_polarizability": "quadrupole-polarizability",
}
def __init__(self, transport: etree.Element):
"""Represent the Lennard-Jones transport properties of a species.
:param transport:
A ``species/transport`` XML node.
This class only supports one type of transport model, ``gas_transport``.
"""
self.attribs = BlockMap({})
transport_model = transport.get("model")
if transport_model not in self.species_transport_mapping:
raise TypeError(
"Unknown transport model type: '{}'".format(transport.get("model"))
)
self.attribs["model"] = self.species_transport_mapping[transport_model]
self.attribs["geometry"] = transport.findtext("string[@title='geometry']")
for prop_node in transport:
if prop_node.tag == "string":
continue
# Don't use get_float_or_units because the units of the gas_transport
# parameters are assumed to be customary units in YAML.
value = float(clean_node_text(prop_node))
name = self.transport_properties_mapping.get(prop_node.tag)
if name is None:
raise TypeError(
"Unknown transport property node: '{}'".format(prop_node.tag)
)
self.attribs[name] = value
@classmethod
def to_yaml(cls, representer, data):
"""Serialize the class instance to YAML format suitable for ruamel.yaml.
:param representer:
An instance of a ruamel.yaml representer type.
:param data:
An instance of this class that will be serialized.
The class instance should have an instance attribute called ``attribs`` which
is a dictionary representing the information about the instance. The dictionary
is serialized using the ``represent_dict`` method of the ``representer``.
"""
return representer.represent_dict(data.attribs)
class Species:
standard_state_model_mapping = {
"ideal-gas": "ideal-gas",
"constant_incompressible": "constant-volume",
"constant-incompressible": "constant-volume",
"waterPDSS": "liquid-water-IAPWS95",
"waterIAPWS": "liquid-water-IAPWS95",
"temperature_polynomial": "molar-volume-temperature-polynomial",
"density_temperature_polynomial": "density-temperature-polynomial",
}
electrolyte_species_type_mapping = {
"weakAcidAssociated": "weak-acid-associated",
"chargedSpecies": "charged-species",
"strongAcidAssociated": "strong-acid-associated",
"polarNeutral": "polar-neutral",
"nonpolarNeutral": "nonpolar-neutral",
}
def __init__(self, species_node: etree.Element):
"""Represent an XML ``species`` node.
:param species_node:
The XML node with the species information.
This class processes the XML node of a species definition and generates a
mapping for the YAML output. The mapping is stored in the ``attribs`` instance
attribute and automatically formatted to YAML by the `~Species.to_yaml` class
method.
"""
self.attribs = BlockMap()
species_name = species_node.get("name")
if species_name is None:
raise MissingXMLAttribute(
"The 'species' node must have a 'name' attribute.", species_node
)
self.attribs["name"] = species_name
atom_array = species_node.find("atomArray")
if atom_array is not None and atom_array.text is not None:
self.attribs["composition"] = split_species_value_string(atom_array)
else:
self.attribs["composition"] = {}
charge_node = species_node.find("charge")
if charge_node is not None:
charge = float(clean_node_text(charge_node))
if charge != 0.0:
self.attribs["composition"]["E"] = -1 * charge
if species_node.findtext("note") is not None:
self.attribs["note"] = species_node.findtext("note")
thermo = species_node.find("thermo")
if thermo is not None:
thermo_model = thermo.get("model", "")
# This node is not used anywhere, but we don't want it to be processed by
# the SpeciesThermo constructor or the hkft method
pseudo_species = thermo.find("pseudoSpecies")
if pseudo_species is not None:
thermo.remove(pseudo_species)
# The IonFromNeutral species thermo node does not correspond to a
# SpeciesThermo type and the IonFromNeutral model doesn't have a thermo
# node in the YAML format. Instead, the data from the XML thermo node are
# moved to the equation-of-state node in YAML
if thermo_model.lower() == "ionfromneutral":
neutral_spec_mult_node = thermo.find("neutralSpeciesMultipliers")
if neutral_spec_mult_node is None:
raise MissingXMLNode(
"'IonFromNeutral' node requires a 'neutralSpeciesMultipliers' "
"node.",
thermo,
)
species_multipliers = FlowMap({})
neutral_spec_mult = clean_node_text(neutral_spec_mult_node).split()
for spec_mult in neutral_spec_mult:
species, multiplier = spec_mult.rsplit(":", 1)
species_multipliers[species] = float(multiplier)
if species_multipliers:
self.attribs["equation-of-state"] = {
"model": "ions-from-neutral-molecule",
"multipliers": species_multipliers,
}
if thermo.find("specialSpecies") is not None:
self.attribs["equation-of-state"]["special-species"] = True
elif thermo_model.lower() == "hkft":
self.attribs["equation-of-state"] = self.hkft(species_node)
else:
if len(thermo) > 0:
self.attribs["thermo"] = SpeciesThermo(thermo)
transport = species_node.find("transport")
if transport is not None:
self.attribs["transport"] = SpeciesTransport(transport)
self.process_standard_state_node(species_node)
electrolyte = species_node.findtext("electrolyteSpeciesType")
debye_huckel = {}
if electrolyte is not None:
electrolyte = self.electrolyte_species_type_mapping[electrolyte.strip()]
debye_huckel["electrolyte-species-type"] = electrolyte
weak_acid_charge = species_node.find("stoichIsMods")
if weak_acid_charge is not None:
debye_huckel["weak-acid-charge"] = get_float_or_quantity(weak_acid_charge)
if debye_huckel:
self.attribs["Debye-Huckel"] = debye_huckel
def hkft(self, species_node: etree.Element) -> Dict[str, "HKFT_THERMO_TYPE"]:
"""Process a species with HKFT thermo type.
:param species_node:
The XML node with the species information.
Requires synthesizing data from the ``thermo`` node and the ``standardState``
node.
"""
thermo_node = species_node.find("./thermo/HKFT")
std_state_node = species_node.find("standardState")
if thermo_node is None or std_state_node is None:
raise MissingXMLNode(
"An HKFT species requires both the 'thermo' and 'standardState' nodes.",
species_node,
)
eqn_of_state = BlockMap({"model": "HKFT"})
for t_node in thermo_node:
if t_node.tag == "DH0_f_Pr_Tr":
eqn_of_state["h0"] = get_float_or_quantity(t_node)
elif t_node.tag == "DG0_f_Pr_Tr":
eqn_of_state["g0"] = get_float_or_quantity(t_node)
elif t_node.tag == "S0_Pr_Tr":
eqn_of_state["s0"] = get_float_or_quantity(t_node)
a = FlowList([])
c = FlowList([])
for tag in ["a1", "a2", "a3", "a4", "c1", "c2"]:
node = std_state_node.find(tag)
if node is None:
raise MissingXMLNode(
"The HKFT 'standardState' node requires a '{}' node.".format(tag),
std_state_node,
)
if tag.startswith("a"):
a.append(get_float_or_quantity(node))
elif tag.startswith("c"):
c.append(get_float_or_quantity(node))
eqn_of_state["a"] = a
eqn_of_state["c"] = c
omega_node = std_state_node.find("omega_Pr_Tr")
if omega_node is None:
raise MissingXMLNode(
"The HKFT 'standardState' node requires an 'omega_Pr_Tr' node.",
std_state_node,
)
eqn_of_state["omega"] = get_float_or_quantity(omega_node)
return eqn_of_state
def process_standard_state_node(self, species_node: etree.Element) -> None:
"""Process the ``standardState`` node in a species definition.
:param species_node:
The XML node with the species information.
If the model is ``IonFromNeutral`` or ``HKFT``, this function doesn't do
anything to the `Species` object. Otherwise, the model data is put into the YAML
``equation-of-state`` node.
"""
std_state = species_node.find("standardState")
if std_state is not None:
std_state_model = std_state.get("model")
if std_state_model is None:
std_state_model = "ideal-gas"
elif std_state_model.lower() in ["ionfromneutral", "hkft"]:
# If the standard state model is IonFromNeutral or HKFT, we don't
# need to do anything with it because it is processed above in the
# species __init__ function
return
eqn_of_state = {
"model": self.standard_state_model_mapping[std_state_model]
} # type: Dict[str, Union[str, QUANTITY, List[QUANTITY]]]
if std_state_model == "constant_incompressible":
molar_volume_node = std_state.find("molarVolume")
if molar_volume_node is None:
raise MissingXMLNode(
"If the standard state model is 'constant_incompressible', it "
"must include a 'molarVolume' node",
std_state,
)
eqn_of_state["molar-volume"] = get_float_or_quantity(molar_volume_node)
elif "temperature_polynomial" in std_state_model:
poly_node = std_state.find("volumeTemperaturePolynomial")
if poly_node is None:
raise MissingXMLNode(
"'{}' standard state model requires a "
"'volumeTemperaturePolynomial' node".format(std_state_model),
std_state,
)
poly_values_node = poly_node.find("floatArray")
if poly_values_node is None:
raise MissingXMLNode(
"The 'floatArray' node must be specified", std_state
)
values = clean_node_text(poly_values_node).split(",")
poly_units = poly_values_node.get("units", "")
if not poly_units:
eqn_of_state["data"] = FlowList(map(float, values))
else:
poly_units = re.sub(r"([A-Za-z])-([A-Za-z])", r"\1*\2", poly_units)
poly_units = re.sub(r"([A-Za-z])([-\d])", r"\1^\2", poly_units)
# Need to put units on each term in the polynomial because we can't
# reliably parse the units attribute string into a mass and a length
# (for example, if the units are g/L) and there's no way to specify
# YAML node-level units of volume.
data = []
for v, suffix in zip(values, ("", "/K", "/K^2", "/K^3")):
data.append("{} {}{}".format(v.strip(), poly_units, suffix))
eqn_of_state["data"] = FlowList(data)
self.attribs["equation-of-state"] = eqn_of_state
@classmethod
def to_yaml(cls, representer, data):
"""Serialize the class instance to YAML format suitable for ruamel.yaml.
:param representer:
An instance of a ruamel.yaml representer type.
:param data:
An instance of this class that will be serialized.
The class instance should have an instance attribute called ``attribs`` which
is a dictionary representing the information about the instance. The dictionary
is serialized using the ``represent_dict`` method of the ``representer``.
"""
return representer.represent_dict(data.attribs)
class Reaction:
def __init__(self, reaction: etree.Element, node_motz_wise: bool):
"""Represent an XML ``reaction`` node.
:param reaction:
The XML node with the reaction information.
:param node_motz_wise:
``True`` if the ``reactionData`` node that contains this ``reaction`` node
has the ``motz_wise`` attribute set to ``True``. Otherwise, ``False``. This
argument is used to adjust each reaction instead of setting the
`Phase`-level option because the reactions are processed before the phases,
so it isn't known at this point what phase these reactions will apply to.
"""
self.attribs = BlockMap({})
reaction_id = reaction.get("id", False) # type: Union[str, int, bool]
if reaction_id:
# If the reaction_id can be converted to an integer, it was likely
# added automatically, so there's no need to include it in the
# output. Including an integer-like reaction ID will generate an error
# when reading the YAML file.
try:
reaction_id = int(reaction_id)
except ValueError:
self.attribs["id"] = reaction_id
reaction_equation = reaction.findtext("equation")
if reaction_equation is None:
raise MissingNodeText(
"The 'reaction' node must have an 'equation' node.", reaction
)
# This has to replace the reaction direction symbols separately because
# species names can have [ or ] in them
self.attribs["equation"] = reaction_equation.replace("[=]", "<=>").replace(
"=]", "=>"
)
reaction_type = reaction.get("type", "arrhenius").lower()
rate_coeff = reaction.find("rateCoeff")
if rate_coeff is None:
raise MissingXMLNode(
"The 'reaction' node must have a 'rateCoeff' node.", reaction
)
if reaction_type in ["arrhenius", "elementary"]:
reaction_type = "arrhenius"
elif reaction_type in ["threebody", "three_body"]:
reaction_type = "threebody"
elif reaction_type == "falloff":
falloff_node = rate_coeff.find("falloff")
if falloff_node is None:
raise MissingXMLNode(
"Falloff reaction types must have a 'falloff' node.", rate_coeff
)
falloff_type = falloff_node.get("type")
if falloff_type not in ["Lindemann", "Troe", "SRI"]:
raise TypeError(
"Unknown falloff type '{}' for reaction id '{}'".format(
falloff_type, reaction.get("id")
)
)
else:
reaction_type = falloff_type
elif reaction_type in ["chemact", "chemically_activated"]:
falloff_node = rate_coeff.find("falloff")
if falloff_node is None:
raise MissingXMLNode(
"chemAct reaction types must have a falloff node.", rate_coeff
)
falloff_type = falloff_node.get("type")
if falloff_type != "Troe":
raise TypeError(
"Unknown activation type '{}' for reaction id '{}'".format(
falloff_type, reaction.get("id")
)
)
elif reaction_type in ["plog", "pdep_arrhenius"]:
reaction_type = "plog"
elif reaction_type == "chebyshev":
# Remove deprecated '(+M)' third body notation
self.attribs["equation"] = re.sub(r" *\( *\+ *M *\)", "",
self.attribs["equation"])
# There's only one way to spell Chebyshev, so no need to change the
# reaction_type.
elif reaction_type in [
"interface",
"edge",
"surface",
"global",
"electrochemical",
]:
reaction_type = "interface"
elif reaction_type in [
"butlervolmer_noactivitycoeffs",
"butlervolmer",
"surfaceaffinity",
]:
warnings.warn(
"Butler-Volmer parameters are not supported in the YAML "
"format. If this is an important feature to you, please see the "
"following issue and pull request on GitHub:\n"
"https://github.com/Cantera/cantera/issues/749\n"
"https://github.com/Cantera/cantera/pulls/750"
)
reaction_type = "interface"
else:
raise TypeError(
"Unknown reaction type '{}' for reaction id '{}'".format(
reaction_type, reaction.get("id")
)
)
func = getattr(self, reaction_type.lower())
self.attribs.update(func(rate_coeff))
if node_motz_wise and self.attribs.get("Motz-Wise") is None:
self.attribs["Motz-Wise"] = True
if reaction.get("negative_A", "").lower() == "yes":
self.attribs["negative-A"] = True
reactants_node = reaction.find("reactants")
if reactants_node is None:
raise MissingXMLNode(
"The 'reaction' node must have a 'reactants' node.", reaction
)
reactants = split_species_value_string(reactants_node)
orders = {}
for order_node in reaction.iterfind("order"):
species = order_node.get("species", "")
if not species:
raise MissingXMLAttribute(
"A reaction 'order' node must have a 'species' attribute",
order_node,
)
order = get_float_or_quantity(order_node)
if species not in reactants or not np.isclose(reactants[species], order):
orders[species] = order
if orders:
self.attribs["orders"] = orders
if reaction.get("negative_orders", "").lower() == "yes":
self.attribs["negative-orders"] = True
if reaction.get("nonreactant_orders", "").lower() == "yes":
self.attribs["nonreactant-orders"] = True
if reaction.get("duplicate", "").lower() == "yes":
self.attribs["duplicate"] = True
@classmethod
def to_yaml(cls, representer, data):
"""Serialize the class instance to YAML format suitable for ruamel.yaml.
:param representer:
An instance of a ruamel.yaml representer type.
:param data:
An instance of this class that will be serialized.
The class instance should have an instance attribute called ``attribs`` which
is a dictionary representing the information about the instance. The dictionary
is serialized using the ``represent_dict`` method of the ``representer``.
"""
return representer.represent_dict(data.attribs)
def sri(self, rate_coeff: etree.Element) -> "SRI_TYPE":
"""Process an SRI reaction.
:param rate_coeff:
The XML node with rate coefficient information for this reaction.
"""
reaction_attribs = self.lindemann((rate_coeff))
falloff_node = rate_coeff.find("falloff")
if falloff_node is None:
raise MissingXMLNode("SRI reaction requires 'falloff' node", rate_coeff)
SRI_names = list("ABCDE")
SRI_data = FlowMap({})
for name, param in zip(SRI_names, clean_node_text(falloff_node).split()):
SRI_data[name] = float(param)
reaction_attribs["SRI"] = SRI_data
return reaction_attribs
def threebody(self, rate_coeff: etree.Element) -> "THREEBODY_TYPE":
"""Process a three-body reaction.
:param rate_coeff:
The XML node with rate coefficient information for this reaction.
"""
reaction_attribs = FlowMap({"type": "three-body"})
reaction_attribs["rate-constant"] = self.process_arrhenius_parameters(
rate_coeff.find("Arrhenius")
)
eff_node = rate_coeff.find("efficiencies")
if eff_node is not None:
reaction_attribs["efficiencies"] = self.process_efficiencies(eff_node)
return reaction_attribs
def lindemann(self, rate_coeff: etree.Element) -> "LINDEMANN_TYPE":
"""Process a Lindemann falloff reaction.
:param rate_coeff:
The XML node with rate coefficient information for this reaction.
"""
reaction_attribs = FlowMap({"type": "falloff"})
for arr_coeff in rate_coeff.iterfind("Arrhenius"):
if arr_coeff.get("name") == "k0":
reaction_attribs[
"low-P-rate-constant"
] = self.process_arrhenius_parameters(arr_coeff)
elif arr_coeff.get("name") is None:
reaction_attribs[
"high-P-rate-constant"
] = self.process_arrhenius_parameters(arr_coeff)
else:
raise TypeError("Too many 'Arrhenius' nodes")
eff_node = rate_coeff.find("efficiencies")
if eff_node is not None:
reaction_attribs["efficiencies"] = self.process_efficiencies(eff_node)
return reaction_attribs
def troe(self, rate_coeff: etree.Element) -> "TROE_TYPE":
"""Process a Troe falloff reaction.
:param rate_coeff:
The XML node with rate coefficient information for this reaction.
"""
# This gets the low-p and high-p rate constants and the efficiencies
reaction_attribs = self.lindemann(rate_coeff)
troe_node = rate_coeff.find("falloff")
if troe_node is None:
raise MissingXMLNode(
"Troe reaction types must include a 'falloff' node", rate_coeff
)
troe_params = clean_node_text(troe_node).split()
troe_names = ["A", "T3", "T1", "T2"]
reaction_attribs["Troe"] = FlowMap()
# zip stops when the shortest iterable is exhausted. If T2 is not present
# in the Troe parameters (that is, troe_params is three elements long), it
# will be omitted here as well.
for name, param in zip(troe_names, troe_params):
reaction_attribs["Troe"].update({name: float(param)}) # type: ignore
return reaction_attribs
def chemact(self, rate_coeff: etree.Element) -> "CHEMACT_TYPE":
"""Process a chemically activated falloff reaction.
:param rate_coeff:
The XML node with rate coefficient information for this reaction.
"""
reaction_attribs = FlowMap({"type": "chemically-activated"})
for arr_coeff in rate_coeff.iterfind("Arrhenius"):
if arr_coeff.get("name") == "kHigh":
reaction_attribs[
"high-P-rate-constant"
] = self.process_arrhenius_parameters(arr_coeff)
elif arr_coeff.get("name") is None:
reaction_attribs[
"low-P-rate-constant"
] = self.process_arrhenius_parameters(arr_coeff)
else:
raise TypeError("Too many 'Arrhenius' nodes")
eff_node = rate_coeff.find("efficiencies")
if eff_node is not None:
reaction_attribs["efficiencies"] = self.process_efficiencies(eff_node)
troe_node = rate_coeff.find("falloff")
if troe_node is None:
raise MissingXMLNode(
"Chemically activated reaction types must include a 'falloff' node",
rate_coeff,
)
troe_params = clean_node_text(troe_node).split()
troe_names = ["A", "T3", "T1", "T2"]
reaction_attribs["Troe"] = FlowMap()
# zip stops when the shortest iterable is exhausted. If T2 is not present
# in the Troe parameters (that is, troe_params is three elements long), it
# will be omitted here as well.
for name, param in zip(troe_names, troe_params):
reaction_attribs["Troe"].update({name: float(param)})
return reaction_attribs
def plog(self, rate_coeff: etree.Element) -> "PLOG_TYPE":
"""Process a PLOG reaction.
:param rate_coeff:
The XML node with rate coefficient information for this reaction.
"""
reaction_attributes = FlowMap({"type": "pressure-dependent-Arrhenius"})
rate_constants = []
for arr_coeff in rate_coeff.iterfind("Arrhenius"):
rate_constant = self.process_arrhenius_parameters(arr_coeff)
P_node = arr_coeff.find("P")
if P_node is None:
raise MissingXMLNode(
"A 'plog' reaction must have a 'P' node.", arr_coeff
)
rate_constant["P"] = get_float_or_quantity(P_node)
rate_constants.append(rate_constant)
reaction_attributes["rate-constants"] = rate_constants
return reaction_attributes
def chebyshev(self, rate_coeff: etree.Element) -> "CHEBYSHEV_TYPE":
"""Process a Chebyshev reaction.
:param rate_coeff:
The XML node with rate coefficient information for this reaction.
"""
reaction_attributes = FlowMap(
{
"type": "Chebyshev",
"temperature-range": FlowList(),
"pressure-range": FlowList(),
}
)
for range_tag in ["Tmin", "Tmax", "Pmin", "Pmax"]:
range_node = rate_coeff.find(range_tag)
if range_node is None:
raise MissingXMLNode(
"A Chebyshev 'reaction' node must include a '{}' "
"node".format(range_tag),
rate_coeff,
)
if range_tag.startswith("T"):
reaction_attributes["temperature-range"].append(
get_float_or_quantity(range_node)
)
elif range_tag.startswith("P"):
reaction_attributes["pressure-range"].append(
get_float_or_quantity(range_node)
)
data_node = rate_coeff.find("floatArray")
if data_node is None:
raise MissingXMLNode(
"A Chebyshev 'reaction' node must include a 'floatArray' node.",
rate_coeff,
)
n_p_values = int(data_node.get("degreeP", 0))
n_T_values = int(data_node.get("degreeT", 0))
if not n_p_values or not n_T_values:
raise MissingXMLAttribute(
"A Chebyshev 'floatArray' node is missing the 'degreeP' or 'degreeT' "
"attributes.",
data_node,
)
raw_data = [float(a) for a in clean_node_text(data_node).split(",")]
data = []
for i in range(0, len(raw_data), n_p_values):
data.append(FlowList(raw_data[i : i + n_p_values]))
if len(data) != n_T_values:
raise ValueError(
"The number of coefficients in the Chebyshev data do not match the "
"specified temperature and pressure degrees."
)
reaction_attributes["data"] = data
return reaction_attributes
def interface(self, rate_coeff: etree.Element) -> "INTERFACE_TYPE":
"""Process an interface reaction.
:param rate_coeff:
The XML node with rate coefficient information for this reaction.
This represents both interface and electrochemical reactions.
"""
arr_node = rate_coeff.find("Arrhenius")
if arr_node is None:
raise MissingXMLNode(
"An interface 'reaction' node requires an 'Arrhenius' node", rate_coeff
)
if arr_node.get("type", "").lower() == "stick":
reaction_attributes = FlowMap(
{"sticking-coefficient": self.process_arrhenius_parameters(arr_node)}
)
species = arr_node.get("species", "")
if species:
reaction_attributes["sticking-species"] = species
motz_wise = arr_node.get("motz_wise", "").lower()
if motz_wise == "true":
reaction_attributes["Motz-Wise"] = True
elif motz_wise == "false":
reaction_attributes["Motz-Wise"] = False
else:
reaction_attributes = FlowMap(
{"rate-constant": self.process_arrhenius_parameters(arr_node)}
)
cov_node = arr_node.find("coverage")
if cov_node is not None:
cov_species = cov_node.get("species")
cov_a = cov_node.find("a")
if cov_a is None:
raise MissingXMLNode(
"A 'coverage' node requires an 'a' node.", cov_node
)
cov_m = cov_node.find("m")
if cov_m is None:
raise MissingXMLNode(
"A 'coverage' node requires an 'm' node.", cov_node
)
cov_e = cov_node.find("e")
if cov_e is None:
raise MissingXMLNode(
"A 'coverage' node requires an 'e' node.", cov_node
)
reaction_attributes["coverage-dependencies"] = {
cov_species: {
"a": get_float_or_quantity(cov_a),
"m": get_float_or_quantity(cov_m),
"E": get_float_or_quantity(cov_e),
}
}
echem_node = rate_coeff.find("electrochem")
if echem_node is not None:
beta = echem_node.get("beta")
if beta is not None:
reaction_attributes["beta"] = float(beta)
if rate_coeff.get("type", "").lower() == "exchangecurrentdensity":
reaction_attributes["exchange-current-density-formulation"] = True
return reaction_attributes
def arrhenius(self, rate_coeff: etree.Element) -> "ARRHENIUS_TYPE":
"""Process a standard Arrhenius-type reaction.
:param rate_coeff:
The XML node with rate coefficient information for this reaction.
"""
return FlowMap(
{
"rate-constant": self.process_arrhenius_parameters(
rate_coeff.find("Arrhenius")
)
}
)
def process_arrhenius_parameters(
self, arr_node: Optional[etree.Element]
) -> "ARRHENIUS_PARAMS":
"""Process the parameters from an ``Arrhenius`` child of a ``rateCoeff`` node.
:param arr_node:
The XML node with the Arrhenius parameters. Must have three child nodes
with tags ``A``, ``b``, and ``E``.
"""
if arr_node is None:
raise MissingXMLNode("The 'Arrhenius' node must be present.")
A_node = arr_node.find("A")
b_node = arr_node.find("b")
E_node = arr_node.find("E")
if A_node is None or b_node is None or E_node is None:
raise MissingXMLNode(
"All of 'A', 'b', and 'E' must be specified for the 'Arrhenius' "
"parameters.",
arr_node,
)
return FlowMap(
{
"A": get_float_or_quantity(A_node),
"b": get_float_or_quantity(b_node),
"Ea": get_float_or_quantity(E_node),
}
)
def process_efficiencies(self, eff_node: etree.Element) -> "EFFICIENCY_PARAMS":
"""Process the efficiency information about a reaction.
:param eff_node:
The XML efficiency node. The text of the node must be a space-delimited
string of ``species:value`` pairs.
"""
efficiencies = [eff.rsplit(":", 1) for eff in clean_node_text(eff_node).split()]
return FlowMap({s: float(e) for s, e in efficiencies})
def create_species_from_data_node(ctml_tree: etree.Element) -> Dict[str, List[Species]]:
"""Generate lists of `Species` instances mapped to the ``speciesData`` id string.
:param ctml_tree:
The root XML node of the CTML document.
The CTML document is searched for ``speciesData`` nodes that contain ``species``
child nodes. Each ``speciesData`` node must have an ``id`` attribute, which is used
as the key of the returned dictionary. The values in the dictionary are lists of
`Species` instances representing the ``species`` nodes in that ``speciesData``
node. The ``id`` attribute is also used as the top-level key in the YAML document
for that set of species, with the exception that ``species_data`` is changed to
just ``species``.
If ``speciesData`` nodes with the same ``id`` attribute are found, only the first
section with that ``id`` is put into the YAML output file.
"""
species = {} # type: Dict[str, List[Species]]
for species_data_node in ctml_tree.iterfind("speciesData"):
this_data_node_id = species_data_node.get("id", "")
if this_data_node_id in species:
warnings.warn(
"Duplicate 'speciesData' id found: '{}'. Only the first section will "
"be included in the output file.".format(this_data_node_id)
)
continue
species[this_data_node_id] = [
Species(s) for s in species_data_node.iterfind("species")
]
return species
def create_reactions_from_data_node(
ctml_tree: etree.Element,
) -> Dict[str, List[Reaction]]:
"""Generate lists of `Reaction` instances mapped to the ``reactionData`` id string.
:param ctml_tree:
The root XML node of the CTML document.
The CTML document is searched for ``reactionData`` nodes that contain ``reaction``
child nodes. Each ``reactionData`` node must have an ``id`` attribute, which is used
as the key of the returned dictionary. The values in the dictionary are lists of
`Reaction` instances representing the ``reaction`` nodes in that ``reactionData``
node. The ``id`` attribute is also used as the top-level key in the YAML document
for that set of reactions, with the exception that ``reaction_data`` is changed to
just ``reactions``.
If ``reactionData`` nodes with the same ``id`` attribute are found, only the first
section with that ``id`` is put into the YAML output file.
"""
reactions = {} # type: Dict[str, List[Reaction]]
for reactionData_node in ctml_tree.iterfind("reactionData"):
node_motz_wise = False
if reactionData_node.get("motz_wise", "").lower() == "true":
node_motz_wise = True
this_data_node_id = reactionData_node.get("id", "")
if this_data_node_id in reactions:
warnings.warn(
"Duplicate 'reactionData' id found: '{}'. Only the first section will "
"be included in the output file.".format(this_data_node_id)
)
continue
reactions[this_data_node_id] = [
Reaction(r, node_motz_wise) for r in reactionData_node.iterfind("reaction")
]
return reactions
def create_phases_from_data_node(
ctml_tree: etree.Element,
species_data: Dict[str, List[Species]],
reaction_data: Dict[str, List[Reaction]],
) -> List[Phase]:
"""Generate a list of `Phase` instances from XML ``phase`` nodes.
:param ctml_tree:
The root XML node of the CTML document.
:param species_data:
Mapping of ``speciesData`` id strings to lists of `Species` instances.
:param reaction_data:
Mapping of ``reactionData`` id strings to lists of `Reaction` instances.
The CTML document is searched for ``phase`` nodes, which are processed into `Phase`
instances. For any Lattice-type phases, the child ``phase`` nodes are un-nested
from their parent node.
"""
phases = [
Phase(node, species_data, reaction_data) for node in ctml_tree.iterfind("phase")
]
l_nodes = []
for p in phases:
if hasattr(p, "lattice_nodes"):
l_nodes.extend(copy.deepcopy(p.lattice_nodes))
del p.lattice_nodes
if l_nodes:
phases.extend(l_nodes)
return phases
def convert(
inpfile: Union[str, Path] = None,
outfile: Union[str, Path] = None,
text: str = None,
) -> None:
"""Convert an input legacy CTML file to a YAML file.
:param inpfile:
The input CTML file name. Exclusive with ``text``, only one of the two can be
specified.
:param outfile:
The output YAML file name.
:param text:
Contains a string with the CTML input file content. Exclusive with ``inpfile``,
only one of the two can be specified.
All files are assumed to be relative to the current working directory of the Python
process running this script.
"""
if inpfile is not None and text is not None:
raise ValueError("Only one of 'inpfile' or 'text' should be specified.")
elif inpfile is not None:
inpfile = Path(inpfile)
ctml_text = inpfile.read_text().lstrip()
if outfile is None:
outfile = inpfile.with_suffix(".yaml")
elif text is not None:
if outfile is None:
raise ValueError("If 'text' is passed, 'outfile' must also be passed.")
ctml_text = text.lstrip()
else:
raise ValueError("One of 'inpfile' or 'text' must be specified")
# Replace any raw ampersands in the text with an escaped ampersand. This
# substitution is necessary because ctml_writer outputs literal & characters
# from text data into the XML output. Although this doesn't cause a problem
# with the custom XML parser in Cantera, standards-compliant XML parsers
# like the Expat one included in Python can't handle the raw & character. I
# could not figure out a way to override the parsing logic such that & could
# be escaped in the data during parsing, so it has to be done manually here.
# According to https://stackoverflow.com/a/1091953 there are 5 escaped
# characters in XML: " ("), ' ('), & (&), < (<), and >
# (>). This code only replaces & not followed by one of the escaped
# character codes.
ctml_text = re.sub("&(?!amp;|quot;|apos;|lt;|gt;)", "&", ctml_text)
ctml_tree = etree.fromstring(ctml_text)
species_data = create_species_from_data_node(ctml_tree)
reaction_data = create_reactions_from_data_node(ctml_tree)
phases = create_phases_from_data_node(ctml_tree, species_data, reaction_data)
# This should be done after phase processing
output_species = BlockMap({})
for species_node_id, species_list in species_data.items():
if not species_list:
continue
if species_node_id == "species_data":
species_node_id = "species"
output_species[species_node_id] = species_list
output_species.yaml_set_comment_before_after_key(species_node_id, before="\n")
output_reactions = BlockMap({})
for reaction_node_id, reaction_list in reaction_data.items():
if not reaction_list:
continue
if reaction_node_id == "reaction_data":
reaction_node_id = "reactions"
output_reactions[reaction_node_id] = reaction_list
output_reactions.yaml_set_comment_before_after_key(
reaction_node_id, before="\n"
)
output_phases = BlockMap({"phases": phases})
output_phases.yaml_set_comment_before_after_key("phases", before="\n")
emitter = yaml.YAML()
for cl in [Phase, Species, SpeciesThermo, SpeciesTransport, Reaction]:
emitter.register_class(cl)
metadata = BlockMap(
{
"generator": "ctml2yaml",
"cantera-version": "b1",
"date": formatdate(localtime=True),
}
)
if inpfile is not None:
metadata["input-files"] = FlowList([str(inpfile)])
with Path(outfile).open("w") as output_file:
emitter.dump(metadata, output_file)
emitter.dump(output_phases, output_file)
if output_species:
emitter.dump(output_species, output_file)
if output_reactions:
emitter.dump(output_reactions, output_file)
def main():
"""Parse command line arguments and pass them to `convert`."""
parser = argparse.ArgumentParser(
description="Convert legacy CTML input files to YAML format",
epilog=(
"The 'output' argument is optional. If it is not given, an output "
"file with the same name as the input file is used, with the extension "
"changed to '.yaml'."
),
)
parser.add_argument("input", help="The input CTML filename. Must be specified.")
parser.add_argument("output", nargs="?", help="The output YAML filename. Optional.")
if len(sys.argv) not in [2, 3]:
if len(sys.argv) > 3:
print(
"ctml2yaml.py: error: unrecognized arguments:",
' '.join(sys.argv[3:]),
file=sys.stderr,
)
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
input_file = Path(args.input)
if args.output is None:
output_file = input_file.with_suffix(".yaml")
else:
output_file = Path(args.output)
convert(input_file, output_file)
if __name__ == "__main__":
main() | PypiClean |
/LibJciHitachi-0.5.2-py3-none-any.whl/JciHitachi/status.py | import base64
from . import utility as util
from .model import JciHitachiAC, JciHitachiDH, JciHitachiHE
class JciHitachiCommand: # pragma: no cover
"""Abstract class for sending job command.
Parameters
----------
gateway_mac_address : str
Gateway mac address.
"""
def __init__(self, gateway_mac_address):
self.job_info_base = bytearray.fromhex(
"d0d100003c6a9dffff03e0d4ffffffff \
00000100000000000000002000010000 \
000000000000000002000d278050f0d4 \
469dafd3605a6ebbdb130d278052f0d4 \
469dafd3605a6ebbdb13060006000000 \
0000")
self.job_info_base[32:40] = bytearray.fromhex(hex(int(gateway_mac_address))[2:])
def get_command(self, command, value):
raise NotImplementedError
def get_b64command(self, command, value):
"""A wrapper of get_command, generating base64 command.
Parameters
----------
command : str
Status name.
value : int
Status value.
Returns
-------
str
Base64 command.
"""
return base64.b64encode(self.get_command(command, value)).decode()
class JciHitachiCommandAC(JciHitachiCommand): # pragma: no cover
"""Sending job command to air conditioner.
Parameters
----------
gateway_mac_address : str
Gateway mac address.
"""
def __init__(self, gateway_mac_address):
super().__init__(gateway_mac_address)
def get_command(self, command, value):
"""Get job command.
Parameters
----------
command : str
Status name.
value : int
Status value.
Returns
-------
bytearray
Bytearray command.
"""
job_info = self.job_info_base.copy()
# Device type
job_info[77] = 1
# Command (eg. target_temp)
job_info[78] = 128 + JciHitachiAC.idx[command]
# Value (eg. 27)
job_info[80] = value
# Checksum
# Original algorithm:
# xor job_info 76~80
# Since byte 76(0x06), 77(device type), and 79(0x00) are constants,
# here is the simplified algorithm:
# command ^ value ^ 0x07 (flip last 3 bits)
job_info[81] = job_info[78] ^ job_info[80] ^ 0x07
return job_info
class JciHitachiCommandDH(JciHitachiCommand): # pragma: no cover
"""Sending job command to dehumidifier.
Parameters
----------
gateway_mac_address : str
Gateway mac address.
"""
def __init__(self, gateway_mac_address):
super().__init__(gateway_mac_address)
def get_command(self, command, value):
"""Get job command.
Parameters
----------
command : str
Status name.
value : int
Status value.
Returns
-------
bytearray
Bytearray command.
"""
job_info = self.job_info_base.copy()
# Device type
job_info[77] = 4
# Command (eg. target_temp)
job_info[78] = 128 + JciHitachiDH.idx[command]
# Value (eg. 27)
job_info[80] = value
# Checksum
# Original algorithm:
# xor job_info 76~80
# Since byte 76(0x06), 77(device type), and 79(0x00) are constants,
# here is the simplified algorithm:
# command ^ value ^ 0x02
job_info[81] = job_info[78] ^ job_info[80] ^ 0x02
return job_info
class JciHitachiCommandHE(JciHitachiCommand): # pragma: no cover
"""Sending job command to heat exchanger.
Parameters
----------
gateway_mac_address : str
Gateway mac address.
"""
def __init__(self, gateway_mac_address):
super().__init__(gateway_mac_address)
def get_command(self, command, value):
"""Get job command.
Parameters
----------
command : str
Status name.
value : int
Status value.
Returns
-------
bytearray
Bytearray command.
"""
job_info = self.job_info_base.copy()
# Device type
job_info[77] = 14
# Command (eg. target_temp)
job_info[78] = 128 + JciHitachiHE.idx[command]
# Value (eg. 27)
job_info[80] = value
# Checksum
# Original algorithm:
# xor job_info 76~80
# Since byte 76(0x06), 77(device type), and 79(0x00) are constants,
# here is the simplified algorithm:
# command ^ value ^ 0x08
job_info[81] = job_info[78] ^ job_info[80] ^ 0x08
return job_info
class JciHitachiStatusInterpreter: # pragma: no cover
"""Interpreting received status code.
Parameters
----------
code : str
status code.
"""
def __init__(self, code):
self.base64_bytes = base64.standard_b64decode(code)
def _decode_status_number(self):
if 6 < self.base64_bytes[0] and (self.base64_bytes[1], self.base64_bytes[2]) == (0, 8):
return int((self.base64_bytes[0] - 4) / 3)
else:
return 0
def _decode_support_number(self):
if 9 < self.base64_bytes[0]:
return int((self.base64_bytes[0] - 26) / 3)
else:
return 0
def _decode_single_status(self, max_func_number, while_counter):
stat_idx = while_counter * 3 + 3
if stat_idx + 3 <= self.base64_bytes[0] - 1:
status_bytes = bytearray(4)
status_bytes[0] = (self.base64_bytes[stat_idx] & 0x80) != 0
status_bytes[1] = self.base64_bytes[stat_idx] & 0xffff7fff
status_bytes[2:4] = self.base64_bytes[stat_idx + 1: stat_idx + 3]
output = int.from_bytes(status_bytes, byteorder='little')
return output
else:
output = util.bin_concat(0xff, max_func_number)
output = (output << 16) & 0xffff0000 | max_func_number
return output
def _decode_single_support(self, max_func_number, while_counter, init):
stat_idx = while_counter * 3 + init
if stat_idx + 3 <= self.base64_bytes[0] - 1:
status_bytes = bytearray(4)
status_bytes[0] = (self.base64_bytes[stat_idx] & 0x80) != 0
status_bytes[1] = self.base64_bytes[stat_idx] & 0xffff7fff
status_bytes[2:4] = self.base64_bytes[stat_idx + 1: stat_idx + 3]
output = int.from_bytes(status_bytes, byteorder='little')
else:
output = util.bin_concat(0xff, max_func_number)
output = (output << 16) & 0xffff0000 | max_func_number
return output
def _get_strs(self, start_idx, num_strs):
strs = []
idx = start_idx
while len(strs) < num_strs:
char = self.base64_bytes[idx]
if char == 0:
strs.append(self.base64_bytes[start_idx:idx].decode())
idx += 1
start_idx = idx
else:
idx += 1
return idx, strs
def decode_status(self):
"""Decode all status codes of a peripheral.
Returns
-------
dict
Decoded status.
"""
table = {}
num_idx = self._decode_status_number()
for i in range(num_idx):
ret = self._decode_single_status(num_idx, i)
idx = util.cast_bytes(ret >> 8, 1)
table[idx] = ret >> 0x18 + (ret >> 0x10 * 0x100)
return table
def decode_support(self):
"""Decode all support codes of a peripheral.
Returns
-------
dict
Decoded support.
"""
init, (brand, model) = self._get_strs(8, 2)
table = {
'brand': brand,
'model': model
}
num_idx = self._decode_support_number()
for i in range(num_idx):
ret = self._decode_single_support(num_idx, i, init)
idx = util.cast_bytes(ret >> 8, 1)
if idx >= 128:
idx = idx - 128
# save raw value, the extraction procedure is performed in model.
table[idx] = ret
return table | PypiClean |
/dirtrav-1.0.0.tar.gz/dirtrav-1.0.0/src/flask/logging.py | import logging
import sys
import typing as t
from werkzeug.local import LocalProxy
from .globals import request
if t.TYPE_CHECKING: # pragma: no cover
from .app import Flask
@LocalProxy
def wsgi_errors_stream() -> t.TextIO:
"""Find the most appropriate error stream for the application. If a request
is active, log to ``wsgi.errors``, otherwise use ``sys.stderr``.
If you configure your own :class:`logging.StreamHandler`, you may want to
use this for the stream. If you are using file or dict configuration and
can't import this directly, you can refer to it as
``ext://flask.logging.wsgi_errors_stream``.
"""
return request.environ["wsgi.errors"] if request else sys.stderr
def has_level_handler(logger: logging.Logger) -> bool:
"""Check if there is a handler in the logging chain that will handle the
given logger's :meth:`effective level <~logging.Logger.getEffectiveLevel>`.
"""
level = logger.getEffectiveLevel()
current = logger
while current:
if any(handler.level <= level for handler in current.handlers):
return True
if not current.propagate:
break
current = current.parent # type: ignore
return False
#: Log messages to :func:`~flask.logging.wsgi_errors_stream` with the format
#: ``[%(asctime)s] %(levelname)s in %(module)s: %(message)s``.
default_handler = logging.StreamHandler(wsgi_errors_stream) # type: ignore
default_handler.setFormatter(
logging.Formatter("[%(asctime)s] %(levelname)s in %(module)s: %(message)s")
)
def create_logger(app: "Flask") -> logging.Logger:
"""Get the Flask app's logger and configure it if needed.
The logger name will be the same as
:attr:`app.import_name <flask.Flask.name>`.
When :attr:`~flask.Flask.debug` is enabled, set the logger level to
:data:`logging.DEBUG` if it is not set.
If there is no handler for the logger's effective level, add a
:class:`~logging.StreamHandler` for
:func:`~flask.logging.wsgi_errors_stream` with a basic format.
"""
logger = logging.getLogger(app.name)
if app.debug and not logger.level:
logger.setLevel(logging.DEBUG)
if not has_level_handler(logger):
logger.addHandler(default_handler)
return logger | PypiClean |
/2b2t-0.3.0.tar.gz/2b2t-0.3.0/bbtt/coord/__main__.py | import enum
from typing import Tuple
USAGE = """\
i2w <x> <z>: convert coordinate in 8K radar image to the corresponding coordinate in 2b2t world
image2world <x> <z>: convert coordinate in 8K radar image to the corresponding coordinate in 2b2t world
w2i <x> <z>: convert coordinate in 2b2t world to the corresponding coordinate in 8K radar image
world2image <x> <z>: convert coordinate in 2b2t world to the corresponding coordinate in 8K radar image
q (or quit, exit): exit program
h (or help): show this help menu\
"""
class RadarImageType(enum.Enum):
RADAR_4K = (3840, 2160, 8)
RADAR_8K = (7680, 4320, 4)
def world_to_image(loc, image_type=RadarImageType.RADAR_8K) -> Tuple[int, int]:
"""
Given a coordinate in 2b2t overworld, return the corresponding pixel coordinate in radar image.
"""
x, z = loc
off_x, off_z, chunks_per_pixel = image_type.value[0] // 2, image_type.value[1] // 2, image_type.value[2]
return 3840 + x // 16 // chunks_per_pixel, 2160 + z // 16 // chunks_per_pixel
def image_to_world(loc, image_type=RadarImageType.RADAR_8K) -> Tuple[int, int]:
"""
Given a position in radar image, return the center coordinate of the corresponding range in 2b2t overworld.
"""
x, z = loc
off_x, off_z, chunks_per_pixel = image_type.value[0] // 2, image_type.value[1] // 2, image_type.value[2]
x, z = x - off_x, z - off_z
return int((x + 0.5) * 16 * chunks_per_pixel), int((z + 0.5) * 16 * chunks_per_pixel)
def main():
""" REPL """
while True:
try:
inp = input('> ').strip().split(' ') or None
cmd = inp[0] if len(inp) > 0 else None
if cmd == 'i2w' or cmd == 'image2world':
world_x, world_y = image_to_world((int(inp[1]), int(inp[2])))
print(f'World: ({world_x}, {world_y})')
print(f'Nether: ({world_x // 8}, {world_y // 8})')
elif cmd == 'w2i' or cmd == 'world2image':
print(world_to_image((int(inp[1]), int(inp[2]))))
elif cmd == 'q' or cmd == 'quit' or cmd == 'exit':
break
elif cmd == 'h' or cmd == 'help':
print(USAGE)
elif not cmd:
pass
else:
print('Invalid command. Run \'help\' or \'h\' for usage description.')
except (ValueError, IndexError):
print('Invalid command. Type `help` or `h` for help.')
except KeyboardInterrupt:
print()
pass # Ignore Ctrl-C event
if __name__ == '__main__':
main() | PypiClean |
/IsPycharmRun-1.0.tar.gz/IsPycharmRun-1.0/poco/utils/simplerpc/jsonrpc/jsonrpc2.py | import json
from . import six
from .base import JSONRPCBaseRequest, JSONRPCBaseResponse
from .exceptions import JSONRPCError, JSONRPCInvalidRequestException
class JSONRPC20Request(JSONRPCBaseRequest):
""" A rpc call is represented by sending a Request object to a Server.
:param str method: A String containing the name of the method to be
invoked. Method names that begin with the word rpc followed by a
period character (U+002E or ASCII 46) are reserved for rpc-internal
methods and extensions and MUST NOT be used for anything else.
:param params: A Structured value that holds the parameter values to be
used during the invocation of the method. This member MAY be omitted.
:type params: iterable or dict
:param _id: An identifier established by the Client that MUST contain a
String, Number, or NULL value if included. If it is not included it is
assumed to be a notification. The value SHOULD normally not be Null
[1] and Numbers SHOULD NOT contain fractional parts [2].
:type _id: str or int or None
:param bool is_notification: Whether request is notification or not. If
value is True, _id is not included to request. It allows to create
requests with id = null.
The Server MUST reply with the same value in the Response object if
included. This member is used to correlate the context between the two
objects.
[1] The use of Null as a value for the id member in a Request object is
discouraged, because this specification uses a value of Null for Responses
with an unknown id. Also, because JSON-RPC 1.0 uses an id value of Null
for Notifications this could cause confusion in handling.
[2] Fractional parts may be problematic, since many decimal fractions
cannot be represented exactly as binary fractions.
"""
JSONRPC_VERSION = "2.0"
REQUIRED_FIELDS = set(["jsonrpc", "method"])
POSSIBLE_FIELDS = set(["jsonrpc", "method", "params", "id"])
@property
def data(self):
data = dict(
(k, v) for k, v in self._data.items()
if not (k == "id" and self.is_notification)
)
data["jsonrpc"] = self.JSONRPC_VERSION
return data
@data.setter
def data(self, value):
if not isinstance(value, dict):
raise ValueError("data should be dict")
self._data = value
@property
def method(self):
return self._data.get("method")
@method.setter
def method(self, value):
if not isinstance(value, six.string_types):
raise ValueError("Method should be string")
if value.startswith("rpc."):
raise ValueError(
"Method names that begin with the word rpc followed by a " +
"period character (U+002E or ASCII 46) are reserved for " +
"rpc-internal methods and extensions and MUST NOT be used " +
"for anything else.")
self._data["method"] = str(value)
@property
def params(self):
return self._data.get("params")
@params.setter
def params(self, value):
if value is not None and not isinstance(value, (list, tuple, dict)):
raise ValueError("Incorrect params {0}".format(value))
value = list(value) if isinstance(value, tuple) else value
if value is not None:
self._data["params"] = value
@property
def _id(self):
return self._data.get("id")
@_id.setter
def _id(self, value):
if value is not None and \
not isinstance(value, six.string_types + six.integer_types):
raise ValueError("id should be string or integer")
self._data["id"] = value
@classmethod
def from_json(cls, json_str):
data = cls.deserialize(json_str)
is_batch = isinstance(data, list)
data = data if is_batch else [data]
if not data:
raise JSONRPCInvalidRequestException("[] value is not accepted")
if not all(isinstance(d, dict) for d in data):
raise JSONRPCInvalidRequestException(
"Each request should be an object (dict)")
result = []
for d in data:
if not cls.REQUIRED_FIELDS <= set(d.keys()) <= cls.POSSIBLE_FIELDS:
extra = set(d.keys()) - cls.POSSIBLE_FIELDS
missed = cls.REQUIRED_FIELDS - set(d.keys())
msg = "Invalid request. Extra fields: {0}, Missed fields: {1}"
raise JSONRPCInvalidRequestException(msg.format(extra, missed))
try:
result.append(JSONRPC20Request(
method=d["method"], params=d.get("params"),
_id=d.get("id"), is_notification="id" not in d,
))
except ValueError as e:
raise JSONRPCInvalidRequestException(str(e))
return JSONRPC20BatchRequest(*result) if is_batch else result[0]
class JSONRPC20BatchRequest(object):
""" Batch JSON-RPC 2.0 Request.
:param JSONRPC20Request *requests: requests
"""
JSONRPC_VERSION = "2.0"
def __init__(self, *requests):
self.requests = requests
@classmethod
def from_json(cls, json_str):
return JSONRPC20Request.from_json(json_str)
@property
def json(self):
return json.dumps([r.data for r in self.requests])
def __iter__(self):
return iter(self.requests)
class JSONRPC20Response(JSONRPCBaseResponse):
""" JSON-RPC response object to JSONRPC20Request.
When a rpc call is made, the Server MUST reply with a Response, except for
in the case of Notifications. The Response is expressed as a single JSON
Object, with the following members:
:param str jsonrpc: A String specifying the version of the JSON-RPC
protocol. MUST be exactly "2.0".
:param result: This member is REQUIRED on success.
This member MUST NOT exist if there was an error invoking the method.
The value of this member is determined by the method invoked on the
Server.
:param dict error: This member is REQUIRED on error.
This member MUST NOT exist if there was no error triggered during
invocation. The value for this member MUST be an Object.
:param id: This member is REQUIRED.
It MUST be the same as the value of the id member in the Request
Object. If there was an error in detecting the id in the Request
object (e.g. Parse error/Invalid Request), it MUST be Null.
:type id: str or int or None
Either the result member or error member MUST be included, but both
members MUST NOT be included.
"""
JSONRPC_VERSION = "2.0"
@property
def data(self):
data = dict((k, v) for k, v in self._data.items())
data["jsonrpc"] = self.JSONRPC_VERSION
return data
@data.setter
def data(self, value):
if not isinstance(value, dict):
raise ValueError("data should be dict")
self._data = value
@property
def result(self):
return self._data.get("result")
@result.setter
def result(self, value):
if self.error:
raise ValueError("Either result or error should be used")
self._data["result"] = value
@property
def error(self):
return self._data.get("error")
@error.setter
def error(self, value):
self._data.pop('value', None)
if value:
self._data["error"] = value
# Test error
JSONRPCError(**value)
@property
def _id(self):
return self._data.get("id")
@_id.setter
def _id(self, value):
if value is not None and \
not isinstance(value, six.string_types + six.integer_types):
raise ValueError("id should be string or integer")
self._data["id"] = value
class JSONRPC20BatchResponse(object):
JSONRPC_VERSION = "2.0"
def __init__(self, *responses):
self.responses = responses
@property
def data(self):
return [r.data for r in self.responses]
@property
def json(self):
return json.dumps(self.data)
def __iter__(self):
return iter(self.responses) | PypiClean |
/DevGossip-Nosakhare-0.1.0.tar.gz/DevGossip-Nosakhare-0.1.0/README.md | # DevGossip App
This is a console application that allows software developers from different tech companies or tech spaces to converge and share anything, gists such as gossip about, bosses and their collegues, pop cultures, even their personal lives and relationships. This will be a realtime chat application for casual discussions.
There are 8 functions in the main python file that makes the functionality of this app.
1. homepage:
2. signup:
3. verify_username:
4. login:
5. select_chatroom:
6. connection_manager:
7. server_response:
8. get_userinput:
# Requirements
1. python 3.x
2. pip
# Prerequisites
1. Set up pusher:
if you dont already have a pusher account, create a free account at https//:pusher.com/signup.
login to your account dashboard and create an app. save your app credentials (app_id, app_key, app_secret and app_cluster)
2. install virtualenv package. this is to help manage environments. To avoid conflitcting libries among different projects due to installations.
pip install virtualenv in terminal
# Set Up
1. Create a virtual enviroment for project
2. Clone the project repository into a folder on your computer.
3. cd into the project folder.
4. install update version of pysher from the github link
pip install git+https://github.com/nlsdfnbch/Pysher.git
5. install requirements.txt
pip install requirements.txt
6. Head to the project and fill in your pusher app creadentials into the .env file:
PUSHER_APP_ID = XXX_APP_ID
PUSHER_APP_KEY = XXX_APP_KEY
PUSHER_APP_SECRET = XXX_APP_SECRET
PUSHER_APP_CLUSTER = XXX_APP_CLUSTER
# Run the app
run python main.py in commandline/terminal | PypiClean |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.