id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/CodeTalker-1.1.tar.gz/CodeTalker-1.1/codetalker/pgm/translator.py |
from tokens import Token
import types
import inspect
import copy
from nodes import AstNode
from errors import CodeTalkerException
class TranslatorException(CodeTalkerException):
pass
class Translator:
def __init__(self, grammar, **defaults):
self.grammar = grammar
self.register = {}
self.scope = True
if not defaults:
self.scope = False
self.defaults = defaults
def translates(self, what):
def meta(func):
self.register[what] = func
def beta(node, scope=None):
if node is None:
return None
if self.scope:
return func(node, scope)
else:
return func(node)
return beta
return meta
def translate(self, tree, scope=None):
if tree is None:
return None
if tree.__class__ not in self.register:
if isinstance(tree, Token):
return tree.value
raise TranslatorException('no rule to translate %s' % tree.__class__.__name__)
if self.scope:
return self.register[tree.__class__](tree, scope)
else:
return self.register[tree.__class__](tree)
def from_string(self, text, **args):
# assert text == str(self.grammar.process(text))
tree = self.grammar.get_ast(text)
'''
ptree = self.grammar.process(text)
if ptree is None:
return None
tree = self.grammar.to_ast(ptree)
'''
return self.from_ast(tree, **args)
def from_ast(self, tree, **args):
if self.scope:
if self.defaults.keys() == ['scope']:
scope = self.defaults['scope']
for k, v in args.items():
setattr(scope, k, v)
else:
stuff = copy.deepcopy(self.defaults)
stuff.update(args)
Scope = type('Scope', (), {})
scope = Scope()
for k,v in stuff.iteritems():
setattr(scope, k, v)
return self.translate(tree, scope)
elif args:
raise Exception('no scope -- cannot define variables: %s' % (args,))
else:
return self.translate(tree)
# vim: et sw=4 sts=4 | PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/_items_/input_form/input_form.py | import Famcy
import json
class input_form(Famcy.FamcyCard):
"""
This is a category of card
that group all submittable
blocks together.
"""
def __init__(self, layout_mode=Famcy.FamcyLayoutMode.recommend, **kwargs):
super(input_form, self).__init__(layout_mode=layout_mode, **kwargs)
self.configs["method"] = "post"
self.init_block()
def init_block(self):
self.body = Famcy.form()
self.body["id"] = self.id
self.body["method"] = self.configs["method"]
self.body["action"] = self.action
self.body["onsubmit"] = "return false;"
script = Famcy.script()
script["src"] = "/static/js/input_form_submit.js"
self.body.addStaticScript(script)
def set_submit_action(self, layout):
for widget, _, _, _, _ in layout.content:
if widget.clickable:
if type(widget).__name__ == "inputBtn":
widget.body.children[3]["onclick"] = "input_form_main_btn_submit(this, %s, '%s', '%s', '%s');" % (json.dumps(self.loader), self.id, str(self.submission_obj_key), str(widget.submission_obj_key))
else:
widget.body["onclick"] = "input_form_main_btn_submit(this, %s, '%s', '%s', '%s');" % (json.dumps(self.loader), self.id, str(self.submission_obj_key), str(widget.submission_obj_key))
else:
if type(widget).__name__ == "FSection":
self.set_submit_action(widget.layout)
for _ in layout.fixedContent:
widget = _[0]
if widget.clickable:
if type(widget).__name__ == "inputBtn":
widget.body.children[3]["onclick"] = "input_form_main_btn_submit(this, %s, '%s', '%s', '%s');" % (json.dumps(self.loader), self.id, str(self.submission_obj_key), str(widget.submission_obj_key))
else:
widget.body["onclick"] = "input_form_main_btn_submit(this, %s, '%s', '%s', '%s');" % (json.dumps(self.loader), self.id, str(self.submission_obj_key), str(widget.submission_obj_key))
else:
if type(widget).__name__ == "FSection":
self.set_submit_action(widget.layout)
def render_inner(self):
header_script, self.body = self.layout.render()
if header_script not in self.header_script:
self.header_script += header_script
self.set_submit_action(self.layout)
return self.body | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/electrum_chi/electrum/names.py |
def split_name_script(decoded):
# This case happens if a script was malformed and couldn't be decoded by
# transaction.get_address_from_output_script.
if decoded is None:
return {"name_op": None, "address_scriptPubKey": decoded}
# name_register TxOuts look like:
# NAME_REGISTER (name) (value) 2DROP DROP (Bitcoin TxOut)
match = [ OP_NAME_REGISTER, OPPushDataGeneric, OPPushDataGeneric, opcodes.OP_2DROP, opcodes.OP_DROP ]
if match_decoded(decoded[:len(match)], match):
return {"name_op": {"op": OP_NAME_REGISTER, "name": decoded[1][1], "value": decoded[2][1]}, "address_scriptPubKey": decoded[len(match):]}
# name_update TxOuts look like:
# NAME_UPDATE (name) (value) 2DROP DROP (Bitcoin TxOut)
match = [ OP_NAME_UPDATE, OPPushDataGeneric, OPPushDataGeneric, opcodes.OP_2DROP, opcodes.OP_DROP ]
if match_decoded(decoded[:len(match)], match):
return {"name_op": {"op": OP_NAME_UPDATE, "name": decoded[1][1], "value": decoded[2][1]}, "address_scriptPubKey": decoded[len(match):]}
return {"name_op": None, "address_scriptPubKey": decoded}
def get_name_op_from_output_script(_bytes):
try:
decoded = [x for x in script_GetOp(_bytes)]
except MalformedBitcoinScript:
decoded = None
# Extract the name script if one is present.
return split_name_script(decoded)["name_op"]
def name_op_to_script(name_op):
if name_op is None:
script = ''
elif name_op["op"] == OP_NAME_REGISTER:
validate_update_length(name_op)
script = '51' # OP_NAME_REGISTER
script += push_script(bh2u(name_op["name"]))
script += push_script(bh2u(name_op["value"]))
script += '6d' # OP_2DROP
script += '75' # OP_DROP
elif name_op["op"] == OP_NAME_UPDATE:
validate_update_length(name_op)
script = '52' # OP_NAME_UPDATE
script += push_script(bh2u(name_op["name"]))
script += push_script(bh2u(name_op["value"]))
script += '6d' # OP_2DROP
script += '75' # OP_DROP
else:
raise BitcoinException('unknown name op: {}'.format(name_op))
return script
def validate_update_length(name_op):
validate_anyupdate_length(name_op)
def validate_anyupdate_length(name_op):
validate_identifier_length(name_op["name"])
validate_value_length(name_op["value"])
def validate_identifier_length(identifier):
identifier_length_limit = 256
identifier_length = len(identifier)
if identifier_length > identifier_length_limit:
raise BitcoinException('identifier length {} exceeds limit of {}'.format(identifier_length, identifier_length_limit))
# TODO: Xaya has more validation rules, which we should at some point
# implement here as well.
def validate_value_length(value):
# Special case: This is also called when we build the "fake name script"
# that ElectrumX indexes on. In this case, the value is empty. That is
# not valid for Xaya, but we need to accept it here.
if len(value) == 0:
return
value_length_limit = 2048
value_length = len(value)
if value_length > value_length_limit:
raise BitcoinException('value length {} exceeds limit of {}'.format(value_length, value_length_limit))
import json
try:
parsed = json.loads(value)
if not isinstance (parsed, dict):
raise BitcoinException(f"Value is not a JSON object: {value}")
except json.decoder.JSONDecodeError:
raise BitcoinException(f"Value is invalid JSON: {value}")
def name_identifier_to_scripthash(identifier_bytes):
name_op = {"op": OP_NAME_UPDATE, "name": identifier_bytes, "value": bytes([])}
script = name_op_to_script(name_op)
script += '6a' # OP_RETURN
return script_to_scripthash(script)
def format_name_identifier(identifier_bytes):
try:
identifier = identifier_bytes.decode("utf-8")
except UnicodeDecodeError:
return format_name_identifier_unknown_hex(identifier_bytes)
if identifier.startswith("p/"):
return format_name_identifier_player(identifier)
if identifier.startswith("g/"):
return format_name_identifier_game(identifier)
return format_name_identifier_unknown(identifier)
def format_name_identifier_player(identifier):
label = identifier[len("p/"):]
return f"Player: {label}"
def format_name_identifier_game(identifier):
label = identifier[len("g/"):]
return f"Game: {label}"
def format_name_identifier_unknown(identifier):
# Check for non-printable characters, and print ASCII if none are found.
if identifier.isprintable():
return 'Non-standard name "' + identifier + '"'
return format_name_identifier_unknown_hex(identifier.encode("ascii"))
def format_name_identifier_unknown_hex(identifier_bytes):
return "Non-standard hex name " + bh2u(identifier_bytes)
def format_name_value(identifier_bytes):
try:
identifier = identifier_bytes.decode("ascii")
except UnicodeDecodeError:
return format_name_value_hex(identifier_bytes)
if not identifier.isprintable():
return format_name_value_hex(identifier_bytes)
return "JSON " + identifier
def format_name_value_hex(identifier_bytes):
return "Hex " + bh2u(identifier_bytes)
def format_name_op(name_op):
if name_op is None:
return ''
if "name" in name_op:
formatted_name = "Name = " + format_name_identifier(name_op["name"])
if "value" in name_op:
formatted_value = "Data = " + format_name_value(name_op["value"])
if name_op["op"] == OP_NAME_REGISTER:
return "\tRegistration\n\t\t" + formatted_name + "\n\t\t" + formatted_value
if name_op["op"] == OP_NAME_UPDATE:
return "\tUpdate\n\t\t" + formatted_name + "\n\t\t" + formatted_value
def get_default_name_tx_label(wallet, tx):
for idx, o in enumerate(tx.outputs()):
name_op = o.name_op
if name_op is not None:
# TODO: Handle multiple atomic name ops.
name_input_is_mine, name_output_is_mine, name_value_is_unchanged = get_wallet_name_delta(wallet, tx)
if not name_input_is_mine and not name_output_is_mine:
return None
if name_op["op"] == OP_NAME_REGISTER:
return "Registration: " + format_name_identifier(name_op["name"])
if name_input_is_mine and not name_output_is_mine:
return "Transfer (Outgoing): " + format_name_identifier(name_op["name"])
if not name_input_is_mine and name_output_is_mine:
return "Transfer (Incoming): " + format_name_identifier(name_op["name"])
if name_op["op"] == OP_NAME_UPDATE:
return "Update: " + format_name_identifier(name_op["name"])
return None
def get_wallet_name_delta(wallet, tx):
name_input_is_mine = False
name_output_is_mine = False
name_input_value = None
name_output_value = None
for txin in tx.inputs():
addr = wallet.get_txin_address(txin)
if wallet.is_mine(addr):
prev_tx = wallet.db.transactions.get(txin['prevout_hash'])
if prev_tx.outputs()[txin['prevout_n']].name_op is not None:
name_input_is_mine = True
if 'value' in prev_tx.outputs()[txin['prevout_n']].name_op:
name_input_value = prev_tx.outputs()[txin['prevout_n']].name_op['value']
for o in tx.outputs():
if o.name_op is not None and wallet.is_mine(o.address):
name_output_is_mine = True
if 'value' in o.name_op:
name_output_value = o.name_op['value']
name_value_is_unchanged = name_input_value == name_output_value
return name_input_is_mine, name_output_is_mine, name_value_is_unchanged
def get_wallet_name_count(wallet, network):
confirmed_count = 0
pending_count = 0
utxos = wallet.get_utxos()
for _, x in enumerate(utxos):
txid = x.get('prevout_hash')
vout = x.get('prevout_n')
name_op = wallet.db.transactions[txid].outputs()[vout].name_op
if name_op is None:
continue
height = x.get('height')
if height <= 0:
# Transaction isn't mined yet
if name_op['op'] == OP_NAME_REGISTER:
# Registration is pending
pending_count += 1
continue
else:
# name_update is pending
# TODO: we shouldn't consider it confirmed if it's an incoming
# or outgoing transfer.
confirmed_count += 1
continue
if 'name' in name_op:
# name_anyupdate is mined (not expired)
confirmed_count += 1
continue
else:
# name_new is mined
pending_count += 1
continue
return confirmed_count, pending_count
import binascii
from datetime import datetime, timedelta
import os
import re
from .bitcoin import push_script, script_to_scripthash
from .crypto import hash_160
from .transaction import MalformedBitcoinScript, match_decoded, opcodes, OPPushDataGeneric, script_GetOp, Transaction
from .util import bh2u, BitcoinException
OP_NAME_REGISTER = opcodes.OP_1
OP_NAME_UPDATE = opcodes.OP_2 | PypiClean |
/Hikka_Pyro_New-2.0.103-py3-none-any.whl/hikkapyro/methods/chats/restrict_chat_member.py |
from datetime import datetime
from typing import Union
import hikkapyro
from hikkapyro import raw, utils
from hikkapyro import types
class RestrictChatMember:
async def restrict_chat_member(
self: "hikkapyro.Client",
chat_id: Union[int, str],
user_id: Union[int, str],
permissions: "types.ChatPermissions",
until_date: datetime = utils.zero_datetime()
) -> "types.Chat":
"""Restrict a user in a supergroup.
You must be an administrator in the supergroup for this to work and must have the appropriate admin rights.
Pass True for all permissions to lift restrictions from a user.
.. include:: /_includes/usable-by/users-bots.rst
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
user_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target user.
For a contact that exists in your Telegram address book you can use his phone number (str).
permissions (:obj:`~pyrogram.types.ChatPermissions`):
New user permissions.
until_date (:py:obj:`~datetime.datetime`, *optional*):
Date when the user will be unbanned.
If user is banned for more than 366 days or less than 30 seconds from the current time they are
considered to be banned forever. Defaults to epoch (ban forever).
Returns:
:obj:`~pyrogram.types.Chat`: On success, a chat object is returned.
Example:
.. code-block:: python
from datetime import datetime, timedelta
from pyrogram.types import ChatPermissions
# Completely restrict chat member (mute) forever
await app.restrict_chat_member(chat_id, user_id, ChatPermissions())
# Chat member muted for 24h
await app.restrict_chat_member(chat_id, user_id, ChatPermissions(),
datetime.now() + timedelta(days=1))
# Chat member can only send text messages
await app.restrict_chat_member(chat_id, user_id,
ChatPermissions(can_send_messages=True))
"""
r = await self.invoke(
raw.functions.channels.EditBanned(
channel=await self.resolve_peer(chat_id),
participant=await self.resolve_peer(user_id),
banned_rights=raw.types.ChatBannedRights(
until_date=utils.datetime_to_timestamp(until_date),
send_messages=not permissions.can_send_messages,
send_media=not permissions.can_send_media_messages,
send_stickers=not permissions.can_send_other_messages,
send_gifs=not permissions.can_send_other_messages,
send_games=not permissions.can_send_other_messages,
send_inline=not permissions.can_send_other_messages,
embed_links=not permissions.can_add_web_page_previews,
send_polls=not permissions.can_send_polls,
change_info=not permissions.can_change_info,
invite_users=not permissions.can_invite_users,
pin_messages=not permissions.can_pin_messages,
)
)
)
return types.Chat._parse_chat(self, r.chats[0]) | PypiClean |
/ChatAudio-2023.4.25.9.51.6-py3-none-any.whl/chatllm/qa.py |
from langchain.chains import RetrievalQA
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import FAISS
# ME
from meutils.pipe import *
from chatllm.chatllm import ChatLLM
RetrievalQA.return_source_documents = True
class QA(object):
def __init__(self, chatllm: ChatLLM, faiss_ann: FAISS = None, document_prompt: PromptTemplate = None):
"""
:param chatllm:
"""
self.chatllm = chatllm
self.faiss_ann = faiss_ann
self.document_prompt = document_prompt if document_prompt else self.default_document_prompt
@property
def default_document_prompt(self) -> PromptTemplate:
prompt_template = """
基于以下已知信息,简洁和专业的来回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。
已知内容:
{context}
问题:
{question}
""".strip()
return PromptTemplate(template=prompt_template, input_variables=["context", "question"])
def get_knowledge_based_answer(self, query, max_turns=3, top_k=4, **kwargs):
assert self.faiss_ann
# 设置chatllm参数,# history会被储存?
self.chatllm.set_chat_kwargs(**kwargs)
self.chatllm.max_turns = max_turns
llm_chain = RetrievalQA.from_llm(
llm=self.chatllm,
retriever=self.faiss_ann.as_retriever(search_kwargs={"k": top_k}), # todo: 重复实例化优化
prompt=self.document_prompt
)
# llm_chain.combine_documents_chain.document_prompt = PromptTemplate(
# input_variables=["page_content"], template="{page_content}"
# )
# 官方默认,要不要覆盖
# document_prompt = PromptTemplate(
# input_variables=["page_content"], template="Context:\n{page_content}"
# )
result = llm_chain({"query": query})
return result
def get_llm_answer(self, query, max_turns=3, **kwargs): # 重复代码
self.chatllm.set_chat_kwargs(**kwargs)
self.chatllm.max_turns = max_turns
return self.chatllm._call(query) | PypiClean |
/Muntjac-1.1.2.tar.gz/Muntjac-1.1.2/muntjac/addon/colorpicker/color_picker_application.py |
from StringIO import StringIO
from math import pi
from datetime import datetime as Date
from muntjac.addon.colorpicker.color import Color
from muntjac.application import Application
from muntjac.ui.check_box import CheckBox
from muntjac.ui.window import Window
from muntjac.ui.embedded import Embedded
from muntjac.ui.panel import Panel
from muntjac.ui.vertical_layout import VerticalLayout
from muntjac.ui.horizontal_layout import HorizontalLayout
from muntjac.ui.grid_layout import GridLayout
from muntjac.ui.button import IClickListener
from muntjac.ui.alignment import Alignment
from muntjac.terminal.stream_resource import IStreamSource, StreamResource
from muntjac.addon.colorpicker.color_picker \
import ColorPicker, ButtonStyle, IColorChangeListener
from muntjac.addon.canvas.canvas import Canvas
class ColorPickerApplication(Application, IColorChangeListener):
"""Testing application for the ColorPicker.
@author: John Ahlroos / ITMill Oy Ltd 2010
@author: Richard Lincoln
"""
_VERSION = '1.1.2'
def __init__(self):
super(ColorPickerApplication, self).__init__()
# The foreground color.
self._foregroundColor = Color.BLACK # The currently selected
# The background color.
self._backgroundColor = Color.WHITE # The currently selected
# The display box where the image is rendered.
self._display = None
self._mainLayout = None
self._colorpicker1 = None
self._colorpicker2 = None
self._colorpicker3 = None
self._colorpicker4 = None
self._colorpicker5 = None
self._colorpicker6 = None
self._rgbVisible = True
self._hsvVisible = True
self._swaVisible = True
self._historyVisible = True
self._txtfieldVisible = True
self._rgbBox = CheckBox('RGB tab visible')
self._hsvBox = CheckBox('HSV tab visible')
self._swaBox = CheckBox('Swatches tab visible')
self._hisBox = CheckBox('History visible')
self._txtBox = CheckBox('CSS field visible')
def setPopupVisibilities(self):
self._rgbBox.setEnabled(not (self._rgbVisible
and not self._hsvVisible and not self._swaVisible))
self._hsvBox.setEnabled(not (not self._rgbVisible
and self._hsvVisible and not self._swaVisible))
self._swaBox.setEnabled(not (not self._rgbVisible
and not self._hsvVisible and self._swaVisible))
self._colorpicker1.setRGBVisibility(self._rgbVisible)
self._colorpicker2.setRGBVisibility(self._rgbVisible)
self._colorpicker3.setRGBVisibility(self._rgbVisible)
self._colorpicker4.setRGBVisibility(self._rgbVisible)
self._colorpicker5.setRGBVisibility(self._rgbVisible)
self._colorpicker6.setRGBVisibility(self._rgbVisible)
self._colorpicker1.setHSVVisibility(self._hsvVisible)
self._colorpicker2.setHSVVisibility(self._hsvVisible)
self._colorpicker3.setHSVVisibility(self._hsvVisible)
self._colorpicker4.setHSVVisibility(self._hsvVisible)
self._colorpicker5.setHSVVisibility(self._hsvVisible)
self._colorpicker6.setHSVVisibility(self._hsvVisible)
self._colorpicker1.setSwatchesVisibility(self._swaVisible)
self._colorpicker2.setSwatchesVisibility(self._swaVisible)
self._colorpicker3.setSwatchesVisibility(self._swaVisible)
self._colorpicker4.setSwatchesVisibility(self._swaVisible)
self._colorpicker5.setSwatchesVisibility(self._swaVisible)
self._colorpicker6.setSwatchesVisibility(self._swaVisible)
self._colorpicker1.setHistoryVisibility(self._historyVisible)
self._colorpicker2.setHistoryVisibility(self._historyVisible)
self._colorpicker3.setHistoryVisibility(self._historyVisible)
self._colorpicker4.setHistoryVisibility(self._historyVisible)
self._colorpicker5.setHistoryVisibility(self._historyVisible)
self._colorpicker6.setHistoryVisibility(self._historyVisible)
self._colorpicker1.setTextfieldVisibility(self._txtfieldVisible)
self._colorpicker2.setTextfieldVisibility(self._txtfieldVisible)
self._colorpicker3.setTextfieldVisibility(self._txtfieldVisible)
self._colorpicker4.setTextfieldVisibility(self._txtfieldVisible)
self._colorpicker5.setTextfieldVisibility(self._txtfieldVisible)
self._colorpicker6.setTextfieldVisibility(self._txtfieldVisible)
def init(self):
# This is called whenever a colorpicker popup is closed
main = Window()
main.setWidth('1000px')
self.setMainWindow(main)
# Create an instance of the preview and add it to the window
# self._display = Embedded('Color preview')
self._display = Canvas()
self._display.setWidth('270px')
self._display.setHeight('270px')
# Add the foreground and background colorpickers to a layout
self._mainLayout = mainLayout = HorizontalLayout()
mainLayout.setMargin(True)
mainLayout.setSpacing(True)
main.setContent(mainLayout)
layout = VerticalLayout()
layout.setWidth('450px')
layout.setSpacing(True)
optPanel = Panel('Customize the color picker popup window',
GridLayout(3, 2))
optPanel.getContent().setSizeFull()
optPanel.getContent().setMargin(True)
optPanel.getContent().setSpacing(True)
self._rgbBox.addListener(RgbClickListener(self), IClickListener)
self._rgbBox.setValue(self._rgbVisible)
self._rgbBox.setImmediate(True)
optPanel.getContent().addComponent(self._rgbBox)
self._hsvBox.addListener(HsvClickListener(self), IClickListener)
self._hsvBox.setValue(self._hsvVisible)
self._hsvBox.setImmediate(True)
optPanel.getContent().addComponent(self._hsvBox)
self._swaBox.addListener(SwaClickListener(self), IClickListener)
self._swaBox.setValue(self._swaVisible)
self._swaBox.setImmediate(True)
optPanel.getContent().addComponent(self._swaBox)
self._hisBox.addListener(HisClickListener(self), IClickListener)
self._hisBox.setValue(self._historyVisible)
self._hisBox.setImmediate(True)
optPanel.getContent().addComponent(self._hisBox)
self._txtBox.addListener(TxtClickListener(self), IClickListener)
self._txtBox.setValue(self._txtfieldVisible)
self._txtBox.setImmediate(True)
optPanel.getContent().addComponent(self._txtBox)
layout.addComponent(optPanel)
panel1 = Panel(
'Button like colorpicker with current color and CSS code',
HorizontalLayout())
panel1.getContent().setSizeFull()
panel1.getContent().setMargin(True)
self._colorpicker1 = ColorPicker('Foreground', self._foregroundColor)
self._colorpicker1.setWidth('100px')
self._colorpicker1.addListener(self)
panel1.addComponent(self._colorpicker1)
panel1.getContent().setComponentAlignment(self._colorpicker1,
Alignment.MIDDLE_CENTER)
self._colorpicker2 = ColorPicker('Background', self._backgroundColor)
self._colorpicker2.addListener(self)
self._colorpicker2.setWidth('100px')
panel1.addComponent(self._colorpicker2)
panel1.getContent().setComponentAlignment(self._colorpicker2,
Alignment.MIDDLE_CENTER)
layout.addComponent(panel1)
panel2 = Panel(
'Button like colorpicker with current color and custom caption',
HorizontalLayout())
panel2.getContent().setSizeFull()
panel2.getContent().setMargin(True)
self._colorpicker3 = ColorPicker('Foreground', self._foregroundColor)
self._colorpicker3.addListener(self)
self._colorpicker3.setWidth('120px')
self._colorpicker3.setButtonCaption('Foreground')
panel2.addComponent(self._colorpicker3)
panel2.getContent().setComponentAlignment(self._colorpicker3,
Alignment.MIDDLE_CENTER)
self._colorpicker4 = ColorPicker('Background', self._backgroundColor)
self._colorpicker4.addListener(self)
self._colorpicker4.setWidth('120px')
self._colorpicker4.setButtonCaption('Background')
panel2.addComponent(self._colorpicker4)
panel2.getContent().setComponentAlignment(self._colorpicker4,
Alignment.MIDDLE_CENTER)
layout.addComponent(panel2)
panel3 = Panel(
'Color area color picker with caption',
HorizontalLayout())
panel3.getContent().setSizeFull()
panel3.getContent().setMargin(True)
self._colorpicker5 = ColorPicker('Foreground', self._foregroundColor)
self._colorpicker5.setCaption('Foreground')
self._colorpicker5.addListener(self)
self._colorpicker5.setButtonStyle(ButtonStyle.BUTTON_AREA)
panel3.addComponent(self._colorpicker5)
panel3.getContent().setComponentAlignment(self._colorpicker5,
Alignment.MIDDLE_CENTER)
self._colorpicker6 = ColorPicker('Background', self._backgroundColor)
self._colorpicker6.setCaption('Background')
self._colorpicker6.addListener(self)
self._colorpicker6.setButtonStyle(ButtonStyle.BUTTON_AREA)
panel3.addComponent(self._colorpicker6)
panel3.getContent().setComponentAlignment(self._colorpicker6,
Alignment.MIDDLE_CENTER)
layout.addComponent(panel3)
mainLayout.addComponent(layout)
mainLayout.addComponent(self._display)
self.updateDisplay(self._foregroundColor, self._backgroundColor)
def updateDisplay(self, fg, bg):
"""Update display.
@param fg:
the foreround color
@param bg:
the background color
"""
# imagesource = MyImageSource(fg, bg)
# now = Date.now()
# frmt = '%H%M%S'
# imageresource = StreamResource(imagesource,
# 'myimage' + now.strftime(frmt) + '.png', self)
# imageresource.setCacheTime(0)
# self._display.setSource(imageresource)
canvas = self._display
canvas.saveContext()
canvas.clear()
canvas.setFillStyle(str(self._backgroundColor))
canvas.fillRect(0, 0, 270, 270)
canvas.saveContext()
canvas.setFillStyle(str(self._foregroundColor))
canvas.arc(135, 135, 100, 0, 2 * pi, True)
canvas.fill()
canvas.restoreContext()
canvas.restoreContext()
def colorChanged(self, event):
if ((event.getSource() == self._colorpicker1)
or (event.getSource() == self._colorpicker3)
or (event.getSource() == self._colorpicker5)):
self._foregroundColor = event.getColor()
if event.getSource() != self._colorpicker1:
self._colorpicker1.setColor(event.getColor())
if event.getSource() != self._colorpicker3:
self._colorpicker3.setColor(event.getColor())
if event.getSource() != self._colorpicker5:
self._colorpicker5.setColor(event.getColor())
elif ((event.getSource() == self._colorpicker2)
or (event.getSource() == self._colorpicker4)
or (event.getSource() == self._colorpicker6)):
self._backgroundColor = event.getColor()
if event.getSource() != self._colorpicker2:
self._colorpicker2.setColor(event.getColor())
if event.getSource() != self._colorpicker4:
self._colorpicker4.setColor(event.getColor())
if event.getSource() != self._colorpicker6:
self._colorpicker6.setColor(event.getColor())
else:
return
oldDisplay = self._display
self._display = Canvas()
self._display.setWidth('270px')
self._display.setHeight('270px')
self.updateDisplay(self._foregroundColor, self._backgroundColor)
self._mainLayout.replaceComponent(oldDisplay, self._display)
def getVersion(self):
return self._VERSION
class _ColorClickListener(IClickListener):
def __init__(self, app):
self._app = app
class RgbClickListener(_ColorClickListener):
def buttonClick(self, event):
self._app._rgbVisible = event.getButton().getValue()
self._app.setPopupVisibilities()
class HsvClickListener(_ColorClickListener):
def buttonClick(self, event):
self._app._hsvVisible = event.getButton().getValue()
self._app.setPopupVisibilities()
class SwaClickListener(_ColorClickListener):
def buttonClick(self, event):
self._app._swaVisible = event.getButton().getValue()
self._app.setPopupVisibilities()
class HisClickListener(_ColorClickListener):
def buttonClick(self, event):
self._app._historyVisible = event.getButton().getValue()
self._app.setPopupVisibilities()
class TxtClickListener(_ColorClickListener):
def buttonClick(self, event):
self._app._txtfieldVisible = event.getButton().getValue()
self._app.setPopupVisibilities()
class MyImageSource(IStreamSource):
"""This class is used to represent the preview of the color selection."""
def __init__(self, fg, bg):
"""Instantiates a new my image source.
@param fg:
the foreground color
@param bg:
the background color
"""
self._imagebuffer = None
self._fgColor = fg
self._bgColor = bg
def getStream(self):
from PIL import Image, ImageDraw # PIL dependency
# Create an image and draw something on it.
image = Image.new("RGB", (270, 270))
drawable = ImageDraw.Draw(image)
drawable.rectangle([0, 0, 270, 270], fill=str(Color.BLUE))
drawable.rectangle([1, 1, 268, 268], fill=str(self._bgColor))
drawable.ellipse([25, 25, 245, 245], fill=str(self._fgColor))
drawable.text((50, 100),
'r=' + str(self._fgColor.getRed()) +
',g=' + str(self._fgColor.getGreen()) +
',b=' + str(self._fgColor.getBlue()), fill=str(Color.BLACK))
drawable.text((5, 15),
'r=' + str(self._bgColor.getRed()) +
',g=' + str(self._bgColor.getGreen()) +
',b=' + str(self._bgColor.getBlue()), fill=str(Color.BLACK))
del drawable
try:
# Write the image to a buffer.
self._imagebuffer = StringIO()
image.save(self._imagebuffer, 'PNG')
return self._imagebuffer
except IOError:
return None
if __name__ == '__main__':
from muntjac.main import muntjac
muntjac(ColorPickerApplication, nogui=True, forever=True, debug=True) | PypiClean |
/Firefly%20III%20API%20Python%20Client-1.5.6.post2.tar.gz/Firefly III API Python Client-1.5.6.post2/firefly_iii_client/model/tag_read.py | import re # noqa: F401
import sys # noqa: F401
from firefly_iii_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from firefly_iii_client.exceptions import ApiAttributeError
def lazy_import():
from firefly_iii_client.model.object_link import ObjectLink
from firefly_iii_client.model.tag_model import TagModel
globals()['ObjectLink'] = ObjectLink
globals()['TagModel'] = TagModel
class TagRead(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'attributes': (TagModel,), # noqa: E501
'id': (str,), # noqa: E501
'links': (ObjectLink,), # noqa: E501
'type': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'attributes': 'attributes', # noqa: E501
'id': 'id', # noqa: E501
'links': 'links', # noqa: E501
'type': 'type', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, attributes, id, links, type, *args, **kwargs): # noqa: E501
"""TagRead - a model defined in OpenAPI
Args:
attributes (TagModel):
id (str):
links (ObjectLink):
type (str): Immutable value
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.attributes = attributes
self.id = id
self.links = links
self.type = type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, attributes, id, links, type, *args, **kwargs): # noqa: E501
"""TagRead - a model defined in OpenAPI
Args:
attributes (TagModel):
id (str):
links (ObjectLink):
type (str): Immutable value
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.attributes = attributes
self.id = id
self.links = links
self.type = type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | PypiClean |
/Colr-0.9.1.tar.gz/Colr-0.9.1/colr/__init__.py | from .base import (
__version__,
ChainedBase,
get_codes,
strip_codes,
)
from .colr import ( # noqa
Colr,
InvalidArg,
InvalidColr,
InvalidFormatArg,
InvalidFormatColr,
InvalidEscapeCode,
InvalidRgbEscapeCode,
InvalidStyle,
auto_disable,
closing_code,
codeformat,
color,
disable,
disabled,
enable,
extbackformat,
extforeformat,
format_back,
format_fore,
get_all_names,
get_code_num,
get_known_codes,
get_known_name,
get_terminal_size,
name_data,
parse_colr_arg,
rgbbackformat,
rgbforeformat,
)
from .codes import (
code_nums,
code_nums_reverse,
codes,
codes_reverse,
)
from .controls import ( # noqa
Control,
EraseMethod,
)
from .colrcontrol import (
ColrControl,
)
from .progress import (
AnimatedProgress,
ProgressBar,
ProgressTimedOut,
StaticProgress,
WriterProcess,
)
from .progress_frames import (
Bars,
BarSet,
Frames,
FrameSet,
)
try:
from .colr_docopt import ( # noqa
docopt,
docopt_file,
docopt_version,
)
has_docopt = True
except ImportError:
has_docopt = False
from .trans import (
ColorCode,
fix_hex,
hex2rgb,
hex2term,
hex2term_map,
hex2termhex,
rgb2hex,
rgb2term,
rgb2termhex,
term2hex,
term2hex_map,
term2rgb
)
from .preset import (
Preset,
)
__all__ = [
# base classes/functions made available.
'__version__',
'ChainedBase',
'get_codes',
'strip_codes',
# colr classes/functions made available.
'InvalidArg',
'InvalidColr',
'InvalidFormatArg',
'InvalidFormatColr',
'InvalidEscapeCode',
'InvalidRgbEscapeCode',
'InvalidStyle',
'auto_disable',
'closing_code',
'code_nums',
'code_nums_reverse',
'codeformat',
'codes',
'codes_reverse',
'color',
'Colr',
'disable',
'disabled',
'enable',
'extbackformat',
'extforeformat',
'format_back',
'format_fore',
'get_all_names',
'get_code_num',
'get_known_codes',
'get_known_name',
'get_terminal_size',
'name_data',
'parse_colr_arg',
'rgbbackformat',
'rgbforeformat',
# controls functions/classes made available.
'Control',
'EraseMethod',
# colrcontrol classes made available.
'ColrControl',
# progress functions/classes made available.
'AnimatedProgress',
'ProgressBar',
'ProgressTimedOut',
'StaticProgress',
'WriterProcess',
# progress frame classes made available.
'Bars',
'BarSet',
'Frames',
'FrameSet',
# trans functions made available.
'ColorCode',
'fix_hex',
'hex2rgb',
'hex2term',
'hex2term_map',
'hex2termhex',
'rgb2hex',
'rgb2term',
'rgb2termhex',
'term2hex',
'term2hex_map',
'term2rgb',
# Preset stuff made available.
'Preset',
]
if has_docopt:
__all__.append('docopt')
__all__.append('docopt_version')
__all__.append('docopt_file') | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/dizoo/classic_control/cartpole/entry/cartpole_c51_main.py | import os
import gym
from tensorboardX import SummaryWriter
from easydict import EasyDict
from ding.config import compile_config
from ding.worker import BaseLearner, SampleSerialCollector, InteractionSerialEvaluator, AdvancedReplayBuffer
from ding.envs import BaseEnvManager, DingEnvWrapper
from ding.policy import C51Policy
from ding.model import C51DQN
from ding.utils import set_pkg_seed
from ding.rl_utils import get_epsilon_greedy_fn
from dizoo.classic_control.cartpole.config.cartpole_c51_config import cartpole_c51_config
# Get DI-engine form env class
def wrapped_cartpole_env():
return DingEnvWrapper(
gym.make('CartPole-v0'),
EasyDict(env_wrapper='default'),
)
def main(cfg, seed=0):
cfg = compile_config(
cfg,
BaseEnvManager,
C51Policy,
BaseLearner,
SampleSerialCollector,
InteractionSerialEvaluator,
AdvancedReplayBuffer,
save_cfg=True
)
collector_env_num, evaluator_env_num = cfg.env.collector_env_num, cfg.env.evaluator_env_num
collector_env = BaseEnvManager(env_fn=[wrapped_cartpole_env for _ in range(collector_env_num)], cfg=cfg.env.manager)
evaluator_env = BaseEnvManager(env_fn=[wrapped_cartpole_env for _ in range(evaluator_env_num)], cfg=cfg.env.manager)
# Set random seed for all package and instance
collector_env.seed(seed)
evaluator_env.seed(seed, dynamic_seed=False)
set_pkg_seed(seed, use_cuda=cfg.policy.cuda)
# Set up RL Policy
model = C51DQN(**cfg.policy.model)
policy = C51Policy(cfg.policy, model=model)
# Set up collection, training and evaluation utilities
tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial'))
learner = BaseLearner(cfg.policy.learn.learner, policy.learn_mode, tb_logger, exp_name=cfg.exp_name)
collector = SampleSerialCollector(
cfg.policy.collect.collector, collector_env, policy.collect_mode, tb_logger, exp_name=cfg.exp_name
)
evaluator = InteractionSerialEvaluator(
cfg.policy.eval.evaluator, evaluator_env, policy.eval_mode, tb_logger, exp_name=cfg.exp_name
)
replay_buffer = AdvancedReplayBuffer(cfg.policy.other.replay_buffer, tb_logger, exp_name=cfg.exp_name)
# Set up other modules, etc. epsilon greedy
eps_cfg = cfg.policy.other.eps
epsilon_greedy = get_epsilon_greedy_fn(eps_cfg.start, eps_cfg.end, eps_cfg.decay, eps_cfg.type)
# Training & Evaluation loop
while True:
# Evaluating at the beginning and with specific frequency
if evaluator.should_eval(learner.train_iter):
stop, reward = evaluator.eval(learner.save_checkpoint, learner.train_iter, collector.envstep)
if stop:
break
# Update other modules
eps = epsilon_greedy(collector.envstep)
# Sampling data from environments
new_data = collector.collect(train_iter=learner.train_iter, policy_kwargs={'eps': eps})
replay_buffer.push(new_data, cur_collector_envstep=collector.envstep)
# Training
for i in range(cfg.policy.learn.update_per_collect):
train_data = replay_buffer.sample(learner.policy.get_attribute('batch_size'), learner.train_iter)
if train_data is None:
break
learner.train(train_data, collector.envstep)
if __name__ == "__main__":
main(cartpole_c51_config) | PypiClean |
/Indian_Speech_Lib-1.0.7.tar.gz/Indian_Speech_Lib-1.0.7/Indian_Speech_Lib/Automatic_Transcripts/combine_script.py | import sys
import os
import json
import shutil
class Time(object):
mins = 0
sec = 0
def __init__(self,mins,sec):
self.mins = mins
self.sec = sec
def changeMins(self,mins):
self.mins = mins
def changeSecs(self,sec):
self.sec = sec
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def integrate_transcripts(audio_file_name):
'''
Given the audio file name, this function will make one file(MainTranscript.txt) by concatenating all the chunks of this audio file
'''
out_path = os.path.join(os.getcwd(), 'Out_Chunks')
#now combine them into a single file
transcripts = os.listdir(out_path)
#first create ordered transcript on the basis of chunk numbers
#print(transcripts)
for i in range(0, len(transcripts)):
number = int(transcripts[i][9:-4])
transcripts[i] = number
transcripts.sort()
#print(transcripts)
F = open('MainTranscript.txt','w')
F.write(audio_file_name)
F.write('\n\n\n')
cur_time = Time(0,0)
ite = 0
for transcript in transcripts:
name = 'out_chunk' + str(transcript) + '.txt'
f = open(os.path.join(out_path, name), 'r')
if(cur_time.sec >= 60):
cur_time.changeSecs(cur_time.sec - 60)
cur_time.changeMins(cur_time.mins + 1)
string1 = "{0:0>2}".format(cur_time.mins)
string2 = "{0:0>2}".format(cur_time.sec)
if(ite<2):
cur_time.changeSecs(cur_time.sec+14)
else:
cur_time.changeSecs(cur_time.sec+13)
if(cur_time.sec >= 60):
cur_time.changeSecs(cur_time.sec - 60)
cur_time.changeMins(cur_time.mins + 1)
string3 = "{0:0>2}".format(cur_time.mins)
string4 = "{0:0>2}".format(cur_time.sec)
data = json.load(f)
if(len(data)!=0):
F.write(string1 + '.' + string2 + ' - ' + string3 + '.' + string4 + ' sec - ')
if(isinstance(data, str)):
F.write(data[u"results"][0][u"alternatives"][0][u"transcript"])
else:
value = data["results"][0]["alternatives"][0]["transcript"]
value = value.encode('utf-8')
F.write(value)
F.write('\n\n\n')
cur_time.changeSecs(cur_time.sec + 1)
ite = ite + 1
F.close()
shutil.rmtree(out_path)
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | PypiClean |
/BananaPY-1.1.1.tar.gz/BananaPY-1.1.1/bananapy/Client.py | import aiohttp
class Error(Exception):
"""
Error that is caused when the client returns a status code other than 200 (success).
"""
pass
class Client:
"""
Main Client of BananAPI.
Params:
token (str): The BananAPI token.
"""
def __init__(self, token, session=None):
self.session = session or aiohttp.ClientSession()
self.token = token
self.base_url = "https://bananapi.ml/api/"
async def _get(self, endpoint, params):
"""
Private function to request from the API.
This should not be called directly.
"""
headers = { "Authorization": self.token }
res = await self.session.get(self.base_url + endpoint, headers=headers, params=params)
msg = ""
if res.status != 200:
try:
resp = await res.json()
msg = resp.get("message", "No message found.")
except:
msg = await res.text()
raise Error("{}: {}".format(res.status, resp))
else:
return res
"""
IMAGE-TEXT ENDPOINTS
Params:
text (str): Text to use. Check https://bananapi.ml/docs for limits. If you exceed limits, the API will return 400.
"""
async def abandon(self, text):
res = await self._get("abandon", { "text": text })
res = await res.read()
return res
async def alert(self, text):
res = await self._get("alert", { "text": text })
res = await res.read()
return res
async def autism(self, text):
res = await self._get("autism", { "text": text })
res = await res.read()
return res
async def disabled(self, text):
res = await self._get("disabled", { "text": text })
res = await res.read()
return res
async def headache(self, text):
res = await self._get("headache", { "text": text })
res = await res.read()
return res
async def humansgood(self, text):
res = await self._get("humansgood", { "text": text })
res = await res.read()
return res
async def hurt(self, text):
res = await self._get("hurt", { "text": text })
res = await res.read()
return res
async def legends(self, text):
res = await self._get("legends", { "text": text })
res = await res.read()
return res
async def note(self, text):
res = await self._get("note", { "text": text })
res = await res.read()
return res
async def scroll(self, text):
res = await self._get("scroll", { "text": text })
res = await res.read()
return res
async def sleeptight(self, text):
res = await self._get("sleeptight", { "text": text })
res = await res.read()
return res
async def stayawake(self, text):
res = await self._get("stayawake", { "text": text })
res = await res.read()
return res
async def trumptweet(self, text):
res = await self._get("trumptweet", { "text": text })
res = await res.read()
return res
"""
IMAGE-IMAGE ENDPOINTS
"""
async def peek(self, url):
res = await self._get("peek", { "url": url })
res = await res.read()
return res
async def retarded(self, url):
res = await self._get("retarded", { "url": url })
res = await res.read()
return res
async def spit(self, firstImage, secondImage):
res = await self._get("spit", { "firstImage": firstImage, "secondImage": secondImage })
res = await res.read()
return res
"""
TEXT ENDPOINTS
"""
async def eightball(self, question):
res = await self._get("8ball", { "question": question })
res = await res.json()
return res
async def hash(self, text):
res = await self._get("hash", { "text": text })
res = await res.json()
return res
async def jsify(self, text):
res = await self._get("jsify", { "text": text })
res = await res.json()
return res
async def mock(self, text):
res = await self._get("mock", { "text": text })
res = await res.json()
return res
async def reverse(self, text):
res = await self._get("reverse", { "text": text })
res = await res.json()
return res
async def star(self, text):
res = await self._get("star", { "text": text })
res = await res.json()
return res | PypiClean |
/EMO_AI-0.0.5-py3-none-any.whl/EMO_AI/model_api.py |
__all__ = ['mish', 'Mish', 'EmoModel', 'label2int', 'get_model', 'load_tokenizer', 'setup_tokenizer', 'print_emotion',
'get_output', 'get_model_exp']
# Cell
# necessary evil
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
# Cell
# from https://github.com/digantamisra98/Mish/blob/b5f006660ac0b4c46e2c6958ad0301d7f9c59651/Mish/Torch/mish.py
@torch.jit.script
def mish(input):
return input * torch.tanh(F.softplus(input))
class Mish(nn.Module):
def forward(self, input):
return mish(input)
# Cell
class EmoModel(nn.Module):
def __init__(self, base_model, n_classes, base_model_output_size=768, dropout=0.05):
super().__init__()
self.base_model = base_model
self.classifier = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(base_model_output_size, base_model_output_size),
Mish(),
nn.Dropout(dropout),
nn.Linear(base_model_output_size, n_classes)
)
for layer in self.classifier:
if isinstance(layer, nn.Linear):
layer.weight.data.normal_(mean=0.0, std=0.02)
if layer.bias is not None:
layer.bias.data.zero_()
def forward(self, input_, *args):
X, attention_mask = input_
hidden_states = self.base_model(X, attention_mask=attention_mask)
# customize here
# use the <s> representation
return self.classifier(hidden_states[0][:, 0, :])
# Cell
label2int = {
"sadness": 0,
"joy": 1,
"love": 2,
"anger": 3,
"fear": 4,
"surprise": 5
}
# Cell
def get_model(PATH=".pt", pretrained=True, inference_only=True, lr=0.0001, default_model=None):
if PATH[-3:] != ".pt" and PATH[-4:] != ".pth":
print("Unable to load pretrained model")
return None
# show warning message when it's inference only but lr has been changed
if inference_only == True and lr != 0.0001:
print("Warning: the loaded model is for inference only, so there's no optimizer for the changed learning rate")
# model = EmoModel(AutoModelWithLMHead.from_pretrained("distilroberta-base").base_model, len(emotions))
# emotions = ["sadness", "joy", "love", "anger", "fear", "surprise"], len(label2int) would work as well
model = None
if default_model is None:
from transformers import AutoModelWithLMHead
model = EmoModel(AutoModelWithLMHead.from_pretrained("distilroberta-base").base_model, 6)
else:
# set model to user-defined model
model = default_model
# if you want to train it from scratch
if pretrained == False:
return model
checkpoint = torch.load(PATH)
if inference_only:
# model would not be subscriptable
model.load_state_dict(checkpoint)
model.eval()
else:
# lr: learning rate, adjustable
from transformers import AdamW
optimizer = AdamW(model.parameters(), lr=lr)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
model.train()
return model
# Cell
# save the pretrained token
def load_tokenizer():
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('distilroberta-base')
# -p, --parent, no error if it exists, make parent directory as necessary
import os
os.system("mkdir -p tokenizer")
tokenizer.save_pretrained("tokenizer")
def setup_tokenizer():
import os
# if there's no previous file/record
# should we check if there are missing files given that it's previously downloaded?
if not os.path.isdir("tokenizer"):
load_tokenizer()
else: # content of previously download files is not complete
checklist = ['merges.txt', 'special_tokens_map.json', 'tokenizer.json', 'tokenizer_config.json', 'vocab.json']
# check existing files
# existing_files = os.walk(os.path.join(os.getcwd(), "tokenizer"))
existing_files = list(os.walk("tokenizer"))[0][2]
# os.walk() won't messed up the order of the searched files,
# so, we can just use "==" operator
if existing_files != checklist:
# clean the previously download ones
os.system("rmdir -rf tokenizer")
# and, re-download it
load_tokenizer()
# Cell
def print_emotion(output, print_emo=True):
# output = model.forward(input)
import torch
idx = torch.argmax(output, dim=1)
from .model_api import label2int
for key in label2int:
if label2int[key] == idx:
if print_emo == True:
print("Emotion: %s" % key)
break
return key
# Cell
# mainly about not re-writing with torch.no_grad(), model.eval() again
def get_output(text, model, tokenizer=None, return_tensor=False, print_emo=False):
# we should add try/Except error handling for "model" argument
# , but i consider it to be ugly
from .data_process import convert_text_to_tensor
import torch
with torch.no_grad():
model.eval()
# we have control flow in convert_text_to_tensor()
out = model(convert_text_to_tensor(text, tokenizer))
# put it right here to enable "print_emo" argument
emo_label = print_emotion(out, print_emo=print_emo)
if return_tensor == True:
return out
# else, return emotion label (a string)
return emo_label
# Cell
def get_model_exp(PATH=".pt", pretrained=True, inference_only=True, lr=0.0001, custom_model=None, custom_optimizer=None):
def valid_pth(P):
# .pt
# length >=3 and .pt: true
# .pth
# length >=4 and .pth: true
N = len(P)
if N >= 3 and PATH.endswith(".pt"):
return True
if N >= 4 and PATH.endswith(".pth"):
return True
return False
# init globally inside this function
model = None
# if no customized model (user defined model) -> load default model arch.
if custom_model is None:
from transformers import AutoModelWithLMHead
model = EmoModel(AutoModelWithLMHead.from_pretrained("distilroberta-base").base_model, 6)
else:
model = custom_model
if not pretrained:
if inference_only:
model.eval()
return model
if not valid_pth(PATH):
print("model path not valid, return None")
return None
if inference_only:
model.load_state_dict(PATH)
model.eval()
else:
# if it's customized model -> pretrained -> not inference only-> check if optimizer is None
import torch
checkpoint = torch.load(PATH)
if custom_optimizer:
optimizer = custom_optimizer
else:
from transformers import AdamW
optimizer = AdamW(model.parameters(), lr=lr)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
model.train()
return model | PypiClean |
/CodeKitLang-0.4.tar.gz/CodeKitLang-0.4/codekitlang/compiler.py |
import collections
import logging
import os
import re
def _(s):
return s
Fragment = collections.namedtuple(
'Fragment',
(
'pos', # fpos of fragment start
'line', # line number of fragment
'column', # column number of fragment
'command', # NOOP, STOR, LOAD, JUMP
'args',
),
)
NEW_LINE_RE = re.compile(r'\r?\n', re.MULTILINE)
SPECIAL_COMMENT_RE = re.compile(
r'(?P<wrapper><!--\s*(?:'
r'@(?:(?i)(?:import|include))\s+(?P<filenames>.*?)'
r'|'
r'[@$](?P<variable>[a-zA-Z][^\s:=]*?)\s*(?:[\s:=]\s*(?P<value>.*?))?'
r')-->)',
re.DOTALL | re.LOCALE | re.MULTILINE | re.UNICODE
)
default_logger = logging.getLogger(__name__)
def get_file_content(filepath, encoding_hints=None):
"""
@type filepath: str
@type encoding_hints: [str, ...]
@rtype: (str, str)
"""
with open(filepath, 'rb') as fp:
b = fp.read()
# TODO: not implemented encoding detection yet
return 'utf-8', unicode(b, encoding='utf-8', errors='replace')
class CompileError(Exception):
def to_message(self):
return _('Compile Error: unknown error')
class CyclicInclusionError(CompileError):
def __init__(self, filepath, stack):
self.filepath = filepath
self.stack = stack
super(CyclicInclusionError, self).__init__(filepath, stack)
def to_message(self):
msg = _('Compile Error: file "{}" is included already from {}')
msg = msg.format(
self.filepath,
_(' from ').join(['"{}"'.format(s) for s in reversed(self.stack)])
)
return msg
class FileNotFoundError(CompileError):
def __init__(self, filename):
self.filename = filename
super(FileNotFoundError, self).__init__(filename)
def to_message(self):
s = _('Compile Error: file "{}" does not found').format(self.filename)
return s
class UnknownEncodingError(CompileError):
pass
class VariableNotFoundError(CompileError):
def __init__(self, filepath, fragment):
self.filepath = filepath
self.fragment = fragment
super(VariableNotFoundError, self).__init__(filepath, fragment)
def to_message(self):
s = _('Compile Error: variable "{}" does not found on "{}:{}:{}"')
s = s.format(self.fragment.args, self.filepath, self.fragment.line,
self.fragment.column)
return s
class Compiler(object):
NEW_LINE_RE = NEW_LINE_RE
SPECIAL_COMMENT_RE = SPECIAL_COMMENT_RE
logger = default_logger
def __init__(self, framework_paths=None, logger=None,
missing_file_behavior=None, missing_variable_behavior=None):
"""
@param framework_paths: [str, ...]
@param logger: logging.Logger
@param missing_file_behavior: 'ignore', 'logonly' or 'exception'
(default: 'logonly')
@param missing_variable_behavior: 'ignroe', 'logonly' or 'exception'
(default: 'ignore')
"""
if framework_paths is None:
self.framework_paths = tuple()
elif isinstance(framework_paths, tuple):
self.framework_paths = framework_paths
elif isinstance(framework_paths, basestring):
self.framework_paths = (framework_paths,)
else:
self.framework_paths = tuple(framework_paths)
if logger is not None:
self.logger = logger
if missing_file_behavior is None:
missing_file_behavior = 'logonly'
self.missing_file_behavior = missing_file_behavior
if missing_variable_behavior is None:
missing_variable_behavior = 'ignore'
self.missing_variable_behavior = missing_variable_behavior
self.parsed_caches = dict()
def resolve_path(self, filename, base_path):
"""
@type filename: str
@type base_path: str
@rtype: str
"""
_, ext = os.path.splitext(filename)
if not ext:
filename += '.kit'
ext = '.kit'
if ext == '.kit':
prefixes = ('', '_')
paths = (base_path,) + self.framework_paths
else:
prefixes = ('',)
paths = (base_path,)
for prefix in prefixes:
for path in paths:
filepath = os.path.realpath(os.path.join(path, filename))
basename = os.path.basename(filename)
if prefix and not basename.startswith(prefix):
filepath = os.path.join(
os.path.dirname(filepath),
prefix + os.path.basename(filename)
)
if os.path.exists(filepath):
self.logger.debug('Using %s for %s', filepath, filename)
return filepath
return None
def normalize_path(self, filepath=None, filename=None, basepath=None):
if filepath:
filepath = os.path.realpath(filepath)
elif filename and basepath:
filepath = self.resolve_path(filename, basepath)
else:
pass # TODO: handle assert
return filepath
def get_new_signature(self, filepath):
"""
@param filepath: `realpath`ed full path of file
@type filepath: str
@return: tuple of inode number, mtime and size
@rtye: (int, int, int) or None
"""
cached_signature = None
if filepath in self.parsed_caches:
cache = self.parsed_caches[filepath]
cached_signature = cache['signature']
stat = os.stat(filepath)
signature = stat.st_ino, stat.st_mtime, stat.st_size
if cached_signature and signature == cached_signature:
signature = None
return signature
def parse_str(self, s):
"""
@type s: str
@rtype: [(int, int, int, str, str), ...]
"""
parsed = []
pos = 0
line = 1
column = 1
for m in self.SPECIAL_COMMENT_RE.finditer(s):
if m.start('wrapper') > pos:
subs = s[pos:m.start('wrapper')]
parsed.append(Fragment(pos, line, column, 'NOOP', subs))
pos = m.start('wrapper')
subs = self.NEW_LINE_RE.split(s[:pos])
line = len(subs)
column = len(subs[-1]) + 1
if m.group('filenames'):
for filename in m.group('filenames').split(','):
filename = filename.strip().strip('\'"')
parsed.append(Fragment(pos, line, column, 'JUMP',
filename))
elif m.group('value'):
value = m.group('value').strip()
parsed.append(Fragment(pos, line, column, 'STOR',
(m.group('variable'), value)))
else: # m.group('variable')
parsed.append(Fragment(pos, line, column, 'LOAD',
m.group('variable')))
pos = m.end('wrapper')
subs = self.NEW_LINE_RE.split(s[:pos])
line = len(subs)
column = len(subs[-1]) + 1
parsed.append(Fragment(pos, line, column, 'NOOP', s[pos:]))
return parsed
def parse_file(self, filepath=None, filename=None, basepath=None):
filepath = self.normalize_path(filepath, filename, basepath)
if filepath is None or not os.path.exists(filepath):
ex = FileNotFoundError(filepath)
if self.missing_file_behavior == 'exception':
raise ex
if self.missing_file_behavior == 'logonly':
self.logger.warn(ex.to_message())
return None
signature = self.get_new_signature(filepath)
if signature:
_, ext = os.path.splitext(filepath)
encoding, s = get_file_content(filepath)
if ext == '.kit':
data = self.parse_str(s)
else:
data = [Fragment(0, 1, 1, 'NOOP', s)]
self.parsed_caches[filepath] = dict(
signature=signature,
encoding=encoding,
data=data,
)
for i in range(len(data)):
fragment = data[i]
if fragment.command == 'JUMP':
subfilepath = self.parse_file(
filename=fragment.args,
basepath=os.path.dirname(filepath)
)
data[i] = Fragment(fragment.pos,
fragment.line,
fragment.column,
'JUMP',
subfilepath)
return filepath
def generate_to_list(self, filepath, context=None, stack=None):
filepath = os.path.realpath(filepath)
if context is None:
context = dict()
if stack is None:
stack = tuple()
if filepath in stack:
raise CyclicInclusionError(filepath, stack)
compiled = []
if filepath not in self.parsed_caches:
filepath = self.parse_file(filepath=filepath)
cache = self.parsed_caches.get(filepath, {})
for fragment in cache.get('data', []):
if fragment.command == 'NOOP':
compiled.append(fragment.args)
elif fragment.command == 'STOR':
context[fragment.args[0]] = fragment.args[1]
elif fragment.command == 'LOAD':
if fragment.args not in context:
ex = VariableNotFoundError(filepath, fragment)
if self.missing_variable_behavior == 'exception':
raise ex
elif self.missing_variable_behavior == 'logonly':
self.logger.warn(ex.to_message())
compiled.append(context.get(fragment.args, ''))
elif fragment.command == 'JUMP':
compiled.extend(
self.generate_to_list(fragment.args, context.copy(),
stack + (filepath,))
)
return compiled
def generate_to_str(self, filepath):
return ''.join(self.generate_to_list(filepath))
def generate_to_file(self, dest, src):
dest = os.path.realpath(dest)
d = os.path.dirname(dest)
if not os.path.exists(d):
os.makedirs(d)
s = self.generate_to_str(src)
# TODO: not implemented encoding detection yet
s = s.encode('utf-8')
with open(dest, 'wb') as fp:
fp.write(s)
return | PypiClean |
/Firefly%20III%20API%20Python%20Client-1.5.6.post2.tar.gz/Firefly III API Python Client-1.5.6.post2/firefly_iii_client/model/autocomplete_piggy_balance_array.py | import re # noqa: F401
import sys # noqa: F401
from firefly_iii_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from firefly_iii_client.exceptions import ApiAttributeError
def lazy_import():
from firefly_iii_client.model.autocomplete_piggy_balance import AutocompletePiggyBalance
globals()['AutocompletePiggyBalance'] = AutocompletePiggyBalance
class AutocompletePiggyBalanceArray(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'value': ([AutocompletePiggyBalance],),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""AutocompletePiggyBalanceArray - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([AutocompletePiggyBalance]): # noqa: E501
Keyword Args:
value ([AutocompletePiggyBalance]): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""AutocompletePiggyBalanceArray - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([AutocompletePiggyBalance]): # noqa: E501
Keyword Args:
value ([AutocompletePiggyBalance]): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self | PypiClean |
/MutPy-Pynguin-0.7.1.tar.gz/MutPy-Pynguin-0.7.1/mutpy/test_runners/base.py | import sys
from abc import abstractmethod
from collections import namedtuple
from mutpy import utils, coverage
class BaseTestSuite:
@abstractmethod
def add_tests(self, test_module, target_test):
pass
@abstractmethod
def skip_test(self, test):
pass
@abstractmethod
def run(self):
pass
@abstractmethod
def run_with_coverage(self, coverage_injector=None):
pass
@abstractmethod
def __iter__(self):
pass
class BaseTest:
@abstractmethod
def __repr__(self):
pass
class CoverageTestResult:
def __init__(self, *args, coverage_injector=None, **kwargs):
super().__init__(*args, **kwargs)
self.coverage_injector = coverage_injector
self.always_covered_nodes = coverage_injector.covered_nodes.copy()
self.test_covered_nodes = {}
def start_measure_coverage(self):
self.covered_nodes = self.coverage_injector.covered_nodes.copy()
self.coverage_injector.covered_nodes.clear()
def stop_measure_coverage(self, test):
self.test_covered_nodes[repr(test)] = self.coverage_injector.covered_nodes.copy() | self.always_covered_nodes
self.coverage_injector.covered_nodes.update(self.covered_nodes)
SerializableMutationTestResult = namedtuple(
'SerializableMutationTestResult', [
'is_incompetent',
'is_survived',
'killer',
'exception_traceback',
'exception',
'tests_run',
]
)
class MutationTestResult:
def __init__(self, *args, coverage_injector=None, **kwargs):
super(MutationTestResult, self).__init__(*args, **kwargs)
self.coverage_injector = coverage_injector
self.passed = []
self.failed = []
self.type_error = None
self.skipped = []
def was_successful(self):
return len(self.failed) == 0 and not self.is_incompetent()
def is_incompetent(self):
return bool(self.type_error)
def is_survived(self):
return self.was_successful()
def _get_killer(self):
if self.failed:
return self.failed[0]
def get_killer(self):
killer = self._get_killer()
if killer:
return killer.name
def get_exception_traceback(self):
killer = self._get_killer()
if killer:
return killer.long_message
def get_exception(self):
return self.type_error
def tests_run(self):
return len(self.passed) + len(self.failed)
def tests_skipped(self):
return len(self.skipped)
def serialize(self):
return SerializableMutationTestResult(
self.is_incompetent(),
self.is_survived(),
str(self.get_killer()),
str(self.get_exception_traceback()),
self.get_exception(),
self.tests_run() - self.tests_skipped(),
)
def set_type_error(self, err):
self.type_error = err
def add_passed(self, name):
self.passed.append(TestInfo(name))
def add_skipped(self, name):
self.skipped.append(TestInfo(name))
def add_failed(self, name, short_message, long_message):
self.failed.append(TestFailure(name, short_message, long_message))
class TestInfo:
def __init__(self, name):
self.name = name
class TestFailure(TestInfo):
def __init__(self, name, short_message, long_message):
super().__init__(name)
self.short_message = short_message
self.long_message = long_message
class BaseTestRunner:
test_suite_cls = None
def __init__(self, test_loader, timeout_factor, stdout_manager, mutate_covered):
self.test_loader = test_loader
self.timeout_factor = timeout_factor
self.stdout_manager = stdout_manager
self.mutate_covered = mutate_covered
self.init_modules = self.find_init_modules()
def create_empty_test_suite(self):
return self.test_suite_cls()
def create_test_suite(self, mutant_module):
if not issubclass(self.test_suite_cls, BaseTestSuite):
raise ValueError('{0} is not a subclass of {1}'.format(self.test_suite_cls, BaseTestSuite))
suite = self.create_empty_test_suite()
injector = utils.ModuleInjector(mutant_module)
for test_module, target_test in self.test_loader.load():
injector.inject_to(test_module)
suite.add_tests(test_module, target_test)
importer = utils.InjectImporter(mutant_module)
importer.install()
return suite
@utils.TimeRegister
def run_tests_with_mutant(self, total_duration, mutant_module, mutations, coverage_result):
suite = self.create_test_suite(mutant_module)
if coverage_result:
self.mark_not_covered_tests_as_skip(mutations, coverage_result, suite)
timer = utils.Timer()
result = self.run_mutation_test_runner(suite, total_duration)
timer.stop()
return result, timer.duration
def run_mutation_test_runner(self, suite, total_duration):
live_time = self.timeout_factor * (total_duration if total_duration > 1 else 1)
test_runner_class = utils.get_mutation_test_runner_class()
test_runner = test_runner_class(suite=suite)
with self.stdout_manager:
test_runner.start()
result = test_runner.get_result(live_time)
test_runner.terminate()
return result
def inject_coverage(self, target_ast, target_module):
if not self.mutate_covered:
return None, None
coverage_injector = coverage.CoverageInjector()
coverage_module = coverage_injector.inject(target_ast, target_module.__name__)
suite = self.create_test_suite(coverage_module)
with self.stdout_manager:
coverage_result = suite.run_with_coverage(coverage_injector=coverage_injector)
return coverage_injector, coverage_result
def run_test(self, test_module, target_test):
suite = self.create_empty_test_suite()
suite.add_tests(test_module, target_test)
timer = utils.Timer()
with self.stdout_manager:
result = suite.run()
return result, timer.stop()
def find_init_modules(self):
test_runner_class = utils.get_mutation_test_runner_class()
test_runner = test_runner_class(suite=self.create_empty_test_suite())
test_runner.start()
test_runner.terminate()
return list(sys.modules.keys())
def remove_loaded_modules(self):
for module in list(sys.modules.keys()):
if module not in self.init_modules:
del sys.modules[module]
def mark_not_covered_tests_as_skip(self, mutations, coverage_result, suite):
mutated_nodes = {mutation.node.marker for mutation in mutations}
for test in suite:
test_id = repr(test)
if test_id in coverage_result.test_covered_nodes and mutated_nodes.isdisjoint(
coverage_result.test_covered_nodes[test_id]):
suite.skip_test(test) | PypiClean |
/AwesomeTkinter-2021.11.8-py3-none-any.whl/awesometkinter/bidirender.py | import os
import platform
import tkinter as tk
import re
from bidi.algorithm import get_display
if not __package__:
__package__ = 'awesometkinter'
from .menu import RightClickMenu
UNSHAPED = 0
ISOLATED = 1
INITIAL = 2
MEDIAL = 3
FINAL = 4
operating_system = platform.system() # current operating system ('Windows', 'Linux', 'Darwin')
shapes_table = (
('\u0621', '\uFE80', '', '', ''), # (ء, ﺀ, , , ),
('\u0622', '\uFE81', '', '', '\uFE82'), # (آ, ﺁ, , , ﺂ),
('\u0623', '\uFE83', '', '', '\uFE84'), # (أ, ﺃ, , , ﺄ),
('\u0624', '\uFE85', '', '', '\uFE86'), # (ؤ, ﺅ, , , ﺆ),
('\u0625', '\uFE87', '', '', '\uFE88'), # (إ, ﺇ, , , ﺈ),
('\u0626', '\uFE89', '\uFE8B', '\uFE8C', '\uFE8A'), # (ئ, ﺉ, ﺋ, ﺌ, ﺊ),
('\u0627', '\uFE8D', '', '', '\uFE8E'), # (ا, ﺍ, , , ﺎ),
('\u0628', '\uFE8F', '\uFE91', '\uFE92', '\uFE90'), # (ب, ﺏ, ﺑ, ﺒ, ﺐ),
('\u0629', '\uFE93', '', '', '\uFE94'), # (ة, ﺓ, , , ﺔ),
('\u062A', '\uFE95', '\uFE97', '\uFE98', '\uFE96'), # (ت, ﺕ, ﺗ, ﺘ, ﺖ),
('\u062B', '\uFE99', '\uFE9B', '\uFE9C', '\uFE9A'), # (ث, ﺙ, ﺛ, ﺜ, ﺚ),
('\u062C', '\uFE9D', '\uFE9F', '\uFEA0', '\uFE9E'), # (ج, ﺝ, ﺟ, ﺠ, ﺞ),
('\u062D', '\uFEA1', '\uFEA3', '\uFEA4', '\uFEA2'), # (ح, ﺡ, ﺣ, ﺤ, ﺢ),
('\u062E', '\uFEA5', '\uFEA7', '\uFEA8', '\uFEA6'), # (خ, ﺥ, ﺧ, ﺨ, ﺦ),
('\u062F', '\uFEA9', '', '', '\uFEAA'), # (د, ﺩ, , , ﺪ),
('\u0630', '\uFEAB', '', '', '\uFEAC'), # (ذ, ﺫ, , , ﺬ),
('\u0631', '\uFEAD', '', '', '\uFEAE'), # (ر, ﺭ, , , ﺮ),
('\u0632', '\uFEAF', '', '', '\uFEB0'), # (ز, ﺯ, , , ﺰ),
('\u0633', '\uFEB1', '\uFEB3', '\uFEB4', '\uFEB2'), # (س, ﺱ, ﺳ, ﺴ, ﺲ),
('\u0634', '\uFEB5', '\uFEB7', '\uFEB8', '\uFEB6'), # (ش, ﺵ, ﺷ, ﺸ, ﺶ),
('\u0635', '\uFEB9', '\uFEBB', '\uFEBC', '\uFEBA'), # (ص, ﺹ, ﺻ, ﺼ, ﺺ),
('\u0636', '\uFEBD', '\uFEBF', '\uFEC0', '\uFEBE'), # (ض, ﺽ, ﺿ, ﻀ, ﺾ),
('\u0637', '\uFEC1', '\uFEC3', '\uFEC4', '\uFEC2'), # (ط, ﻁ, ﻃ, ﻄ, ﻂ),
('\u0638', '\uFEC5', '\uFEC7', '\uFEC8', '\uFEC6'), # (ظ, ﻅ, ﻇ, ﻈ, ﻆ),
('\u0639', '\uFEC9', '\uFECB', '\uFECC', '\uFECA'), # (ع, ﻉ, ﻋ, ﻌ, ﻊ),
('\u063A', '\uFECD', '\uFECF', '\uFED0', '\uFECE'), # (غ, ﻍ, ﻏ, ﻐ, ﻎ),
('\u0640', '\u0640', '\u0640', '\u0640', '\u0640'), # (ـ, ـ, ـ, ـ, ـ), Arabic Tatweel
('\u0641', '\uFED1', '\uFED3', '\uFED4', '\uFED2'), # (ف, ﻑ, ﻓ, ﻔ, ﻒ),
('\u0642', '\uFED5', '\uFED7', '\uFED8', '\uFED6'), # (ق, ﻕ, ﻗ, ﻘ, ﻖ),
('\u0643', '\uFED9', '\uFEDB', '\uFEDC', '\uFEDA'), # (ك, ﻙ, ﻛ, ﻜ, ﻚ),
('\u0644', '\uFEDD', '\uFEDF', '\uFEE0', '\uFEDE'), # (ل, ﻝ, ﻟ, ﻠ, ﻞ),
('\u0645', '\uFEE1', '\uFEE3', '\uFEE4', '\uFEE2'), # (م, ﻡ, ﻣ, ﻤ, ﻢ),
('\u0646', '\uFEE5', '\uFEE7', '\uFEE8', '\uFEE6'), # (ن, ﻥ, ﻧ, ﻨ, ﻦ),
('\u0647', '\uFEE9', '\uFEEB', '\uFEEC', '\uFEEA'), # (ه, ﻩ, ﻫ, ﻬ, ﻪ),
('\u0648', '\uFEED', '', '', '\uFEEE'), # (و, ﻭ, , , ﻮ),
# ('\u0649', '\uFEEF', '\uFBE8', '\uFBE9', '\uFEF0'), # (ى, ﻯ, ﯨ, ﯩ, ﻰ),
('\u0649', '\uFEEF', '', '', '\uFEF0'), # (ى, ﻯ, , , ﻰ),
('\u064A', '\uFEF1', '\uFEF3', '\uFEF4', '\uFEF2'), # (ي, ﻱ, ﻳ, ﻴ, ﻲ),
('\u0671', '\uFB50', '', '', '\uFB51'), # (ٱ, ﭐ, , , ﭑ),
('\u0677', '\uFBDD', '', '', ''), # (ٷ, ﯝ, , , ),
('\u0679', '\uFB66', '\uFB68', '\uFB69', '\uFB67'), # (ٹ, ﭦ, ﭨ, ﭩ, ﭧ),
('\u067A', '\uFB5E', '\uFB60', '\uFB61', '\uFB5F'), # (ٺ, ﭞ, ﭠ, ﭡ, ﭟ),
('\u067B', '\uFB52', '\uFB54', '\uFB55', '\uFB53'), # (ٻ, ﭒ, ﭔ, ﭕ, ﭓ),
('\u067E', '\uFB56', '\uFB58', '\uFB59', '\uFB57'), # (پ, ﭖ, ﭘ, ﭙ, ﭗ),
('\u067F', '\uFB62', '\uFB64', '\uFB65', '\uFB63'), # (ٿ, ﭢ, ﭤ, ﭥ, ﭣ),
('\u0680', '\uFB5A', '\uFB5C', '\uFB5D', '\uFB5B'), # (ڀ, ﭚ, ﭜ, ﭝ, ﭛ),
('\u0683', '\uFB76', '\uFB78', '\uFB79', '\uFB77'), # (ڃ, ﭶ, ﭸ, ﭹ, ﭷ),
('\u0684', '\uFB72', '\uFB74', '\uFB75', '\uFB73'), # (ڄ, ﭲ, ﭴ, ﭵ, ﭳ),
('\u0686', '\uFB7A', '\uFB7C', '\uFB7D', '\uFB7B'), # (چ, ﭺ, ﭼ, ﭽ, ﭻ),
('\u0687', '\uFB7E', '\uFB80', '\uFB81', '\uFB7F'), # (ڇ, ﭾ, ﮀ, ﮁ, ﭿ),
('\u0688', '\uFB88', '', '', '\uFB89'), # (ڈ, ﮈ, , , ﮉ),
('\u068C', '\uFB84', '', '', '\uFB85'), # (ڌ, ﮄ, , , ﮅ),
('\u068D', '\uFB82', '', '', '\uFB83'), # (ڍ, ﮂ, , , ﮃ),
('\u068E', '\uFB86', '', '', '\uFB87'), # (ڎ, ﮆ, , , ﮇ),
('\u0691', '\uFB8C', '', '', '\uFB8D'), # (ڑ, ﮌ, , , ﮍ),
('\u0698', '\uFB8A', '', '', '\uFB8B'), # (ژ, ﮊ, , , ﮋ),
('\u06A4', '\uFB6A', '\uFB6C', '\uFB6D', '\uFB6B'), # (ڤ, ﭪ, ﭬ, ﭭ, ﭫ),
('\u06A6', '\uFB6E', '\uFB70', '\uFB71', '\uFB6F'), # (ڦ, ﭮ, ﭰ, ﭱ, ﭯ),
('\u06A9', '\uFB8E', '\uFB90', '\uFB91', '\uFB8F'), # (ک, ﮎ, ﮐ, ﮑ, ﮏ),
('\u06AD', '\uFBD3', '\uFBD5', '\uFBD6', '\uFBD4'), # (ڭ, ﯓ, ﯕ, ﯖ, ﯔ),
('\u06AF', '\uFB92', '\uFB94', '\uFB95', '\uFB93'), # (گ, ﮒ, ﮔ, ﮕ, ﮓ),
('\u06B1', '\uFB9A', '\uFB9C', '\uFB9D', '\uFB9B'), # (ڱ, ﮚ, ﮜ, ﮝ, ﮛ),
('\u06B3', '\uFB96', '\uFB98', '\uFB99', '\uFB97'), # (ڳ, ﮖ, ﮘ, ﮙ, ﮗ),
('\u06BA', '\uFB9E', '', '', '\uFB9F'), # (ں, ﮞ, , , ﮟ),
('\u06BB', '\uFBA0', '\uFBA2', '\uFBA3', '\uFBA1'), # (ڻ, ﮠ, ﮢ, ﮣ, ﮡ),
('\u06BE', '\uFBAA', '\uFBAC', '\uFBAD', '\uFBAB'), # (ھ, ﮪ, ﮬ, ﮭ, ﮫ),
('\u06C0', '\uFBA4', '', '', '\uFBA5'), # (ۀ, ﮤ, , , ﮥ),
('\u06C1', '\uFBA6', '\uFBA8', '\uFBA9', '\uFBA7'), # (ہ, ﮦ, ﮨ, ﮩ, ﮧ),
('\u06C5', '\uFBE0', '', '', '\uFBE1'), # (ۅ, ﯠ, , , ﯡ),
('\u06C6', '\uFBD9', '', '', '\uFBDA'), # (ۆ, ﯙ, , , ﯚ),
('\u06C7', '\uFBD7', '', '', '\uFBD8'), # (ۇ, ﯗ, , , ﯘ),
('\u06C8', '\uFBDB', '', '', '\uFBDC'), # (ۈ, ﯛ, , , ﯜ),
('\u06C9', '\uFBE2', '', '', '\uFBE3'), # (ۉ, ﯢ, , , ﯣ),
('\u06CB', '\uFBDE', '', '', '\uFBDF'), # (ۋ, ﯞ, , , ﯟ),
('\u06CC', '\uFBFC', '\uFBFE', '\uFBFF', '\uFBFD'), # (ی, ﯼ, ﯾ, ﯿ, ﯽ),
('\u06D0', '\uFBE4', '\uFBE6', '\uFBE7', '\uFBE5'), # (ې, ﯤ, ﯦ, ﯧ, ﯥ),
('\u06D2', '\uFBAE', '', '', '\uFBAF'), # (ے, ﮮ, , , ﮯ),
('\u06D3', '\uFBB0', '', '', '\uFBB1'), # (ۓ, ﮰ, , , ﮱ),
('\uFEFB', '\uFEFB', '', '', '\uFEFC'), # (ﻻ, ﻻ, , , ﻼ),
('\uFEF7', '\uFEF7', '', '', '\uFEF8'), # (ﻷ, ﻷ, , , ﻸ),
('\uFEF5', '\uFEF5', '', '', '\uFEF6'), # (ﻵ, ﻵ, , , ﻶ),
('\uFEF9', '\uFEF9', '', '', '\uFEFA'), # (ﻹ, ﻹ, , , ﻺ),
)
unshaped_to_isolated = {x[UNSHAPED]: x[ISOLATED] for x in shapes_table}
mandatory_liga_table = {
('\uFEDF', '\uFE82'): '\uFEF5', # ['ﻟ', 'ﺂ', 'ﻵ']
('\uFEDF', '\uFE84'): '\uFEF7', # ['ﻟ', 'ﺄ', 'ﻷ']
('\uFEDF', '\uFE88'): '\uFEF9', # ['ﻟ', 'ﺈ', 'ﻹ']
('\uFEDF', '\uFE8E'): '\uFEFB', # ['ﻟ', 'ﺎ', 'ﻻ']
('\uFEE0', '\uFE82'): '\uFEF6', # ['ﻠ', 'ﺂ', 'ﻶ']
('\uFEE0', '\uFE84'): '\uFEF8', # ['ﻠ', 'ﺄ', 'ﻸ']
('\uFEE0', '\uFE88'): '\uFEFA', # ['ﻠ', 'ﺈ', 'ﻺ']
('\uFEE0', '\uFE8E'): '\uFEFC', # ['ﻠ', 'ﺎ', 'ﻼ']
}
# lam = '\u0644'
lamalif_to_alif = {
'\uFEF5': '\u0622', # [ 'آ', 'ﻵ']
'\uFEF7': '\u0623', # ['ﺃ', 'ﻷ']
'\uFEF9': '\u0625', # [ 'ﺇ', 'ﻹ']
'\uFEFB': '\u0627', # ['ﺍ', 'ﻻ']
}
HARAKAT_RE = re.compile(
'['
'\u0610-\u061a'
'\u064b-\u065f'
'\u0670'
'\u06d6-\u06dc'
'\u06df-\u06e8'
'\u06ea-\u06ed'
'\u08d4-\u08e1'
'\u08d4-\u08ed'
'\u08e3-\u08ff'
']',
re.UNICODE | re.X
)
ARABIC_RE = re.compile(
'['
'\u0600-\u060A'
'\u060C-\u06FF'
'\u0750-\u077F'
'\u08A0-\u08FF'
'\u206C-\u206D'
'\uFB50-\uFD3D'
'\uFD50-\uFDFB'
'\uFE70-\uFEFC'
']',
re.UNICODE | re.X
)
NUMBERS_RE = re.compile(
'['
'\u0660-\u0669' # indic numbers
'\u0030-\u0039' # arabic numbers
']',
re.UNICODE | re.X)
NEUTRAL_RE = re.compile(
'['
'\u0000-\u0040'
'\u005B-\u0060'
'\u007B-\u007F'
']',
re.UNICODE | re.X)
def remove_harakat(text):
result = [c for c in text if not HARAKAT_RE.match(c)]
# print(HARAKAT_RE.match(c))
return ''.join(result)
def do_ligation(text):
result = []
for i, c in enumerate(text):
shape = mandatory_liga_table.get((c, text[i - 1]), None)
if shape:
result.pop()
result.append(shape)
else:
result.append(c)
return ''.join(result)
def do_shaping(text):
def get_shapes(c):
# get all different letter shapes
if c is None:
return {}
key = c
match = [v for v in shapes_table if key in v]
if match:
match = match[UNSHAPED]
return {ISOLATED: match[ISOLATED], INITIAL: match[INITIAL], MEDIAL: match[MEDIAL], FINAL: match[FINAL]}
else:
return {}
def get_shape(c, right_char, left_char):
"""get a proper letter shape
Args:
c: current letter
right_char: letter before
left_char: letter after
"""
c_shapes = get_shapes(c)
if c_shapes and c_shapes.get(FINAL):
# letter is arabic
right_char_shapes = get_shapes(right_char)
left_char_shapes = get_shapes(left_char)
position = MEDIAL if right_char_shapes.get(MEDIAL) else INITIAL
alternative = {MEDIAL: FINAL, INITIAL: ISOLATED}
if not isarabic(left_char):
position = alternative[position]
elif not left_char_shapes.get(FINAL):
position = ISOLATED
c = c_shapes.get(position) or c_shapes.get(alternative[position])
return c
t = []
for i in range(len(text) - 1, -1, -1):
c = text[i]
right_char = text[i + 1] if i < len(text) - 1 else None
left_char = text[i - 1] if i > 0 else None
t.insert(0, get_shape(c, right_char, left_char))
return ''.join(t)
def workaround_for_windows_auto_bidi(text):
"""workaround to disable windows auto-bidi
we should pass only ISOLATED form of arabic letters
since unshaped letters trigger windows bidi engine
"""
# todo: should find away to disable windows bidi completely
# convert all unshaped letters to isolated to bypass windows auto-bidi
text = ''.join([unshaped_to_isolated.get(c, c) for c in text])
# remove arabic TATWEEL letter '\u0640', it has no isolated form
text = text.replace('\u0640', '')
return text
def reshaper(text):
text = do_shaping(text)
text = do_ligation(text)
text = remove_harakat(text)
if operating_system == 'Windows':
text = workaround_for_windows_auto_bidi(text)
return text
def render_bidi_text(text):
text = get_display(text)
text = reshaper(text)
return text
def derender_bidi_text(text):
# convert visual text to logical
# get unshaped characters
unshaped_text = []
for c in text:
match = [item[0] for item in shapes_table if c in item]
if match:
c = match[0]
# lam-alif decomposition
if c in lamalif_to_alif:
alif = lamalif_to_alif[c]
lam = '\u0644'
unshaped_text.append(alif)
c = lam
unshaped_text.append(c)
# reverse text order to its original state
text = get_display(''.join(unshaped_text))
return text
def split_path(path):
"""
split path into individual parts
Args:
path(str): string representation of a path, e.g: '/home/Desktop'
Return:
list of splitted path
credit: https://www.oreilly.com/library/view/python-cookbook/0596001673/ch04s16.html
"""
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def render_bidi_path(path):
"""
render bidi words in path string
Args:
path(str): string representation of a path, e.g: '/home/Desktop'
Return:
(str) rendered path
"""
parts = split_path(path)
parts = [render_bidi_text(x) for x in parts]
return os.path.join(*parts)
def derender_bidi_path(path):
"""
reverse of render_bidi_path
"""
parts = split_path(path)
parts = [derender_bidi_text(x) for x in parts]
return os.path.join(*parts)
def render_text(text, ispath=False):
"""
render bidi text
Args:
text(str): input text that contains a bidi words e.g: English words mixed with Arabic words
ispath(bool): whether the text argument is path or not, e.g: '/usr/bin/etc'
Returns:
(str): rendered text
"""
if ispath:
return render_bidi_path(text)
else:
return render_bidi_text(text)
def derender_text(text, ispath=False):
if ispath:
return derender_bidi_path(text)
else:
return derender_bidi_text(text)
def isarabic(c):
if isinstance(c, str):
match = ARABIC_RE.match(c)
return match
return False
def is_neutral(c):
if isinstance(c, str):
match = NEUTRAL_RE.match(c)
return match
return False
def handle_entry(event, widget):
try:
if widget.focus_get() != widget: # sometimes it raise an exception
return
except:
return
def move_cursor_to_left():
# control direction
current_index = widget.index(tk.INSERT)
new_index = current_index - 1 if current_index >= 1 else 0
widget.icursor(new_index)
c = event.char
index = widget.index('insert')
if not (c or event.keysym in ('BackSpace', 'Delete') or isarabic(c) or is_neutral(c)):
return
if NUMBERS_RE.match(event.char):
return
if isarabic(c):
widget.RTL = True
move_cursor_to_left()
# handle backspace
elif event.keysym in ('BackSpace', 'Delete'):
try:
widget.delete("sel.first", "sel.last")
except:
if widget.RTL and event.keysym == 'BackSpace' or not widget.RTL and event.keysym == 'Delete':
widget.delete(index)
elif index > 0:
widget.delete(index - 1)
elif is_neutral(c) and widget.RTL:
move_cursor_to_left()
else:
widget.RTL = False
if widget.last_text == widget._get():
return
text = widget._get()
index = widget.index('insert')
widget.delete(0, "end")
text = reshaper(text)
widget.insert(0, text)
widget.icursor(index)
widget.last_text = widget._get()
def add_bidi_support_for_entry(widget):
"""add arabic support for an entry widget"""
def handledeletion(event):
handle_entry(event, widget)
return 'break'
widget.RTL = False
widget.last_text = ''
widget.bind("<BackSpace>", handledeletion)
widget.bind("<Delete>", handledeletion)
widget._get = widget.get
widget.get = lambda: derender_bidi_text(widget._get())
def set_text(text):
widget.delete(0, "end")
widget.insert(0, render_bidi_text(text))
widget.set = set_text
widget.bind_all('<KeyPress>', lambda event: handle_entry(event, widget), add='+')
def add_bidi_support_for_label(widget):
"""add arabic support for an entry widget"""
def get_text():
return derender_bidi_text(widget['text'])
def set_text(text):
widget['text'] = render_bidi_text(text)
widget.get = get_text
widget.set = set_text
def add_bidi_support(widget, render_copy_paste=True, copy_paste_menu=False, ispath=False):
"""add bidi support for tkinter widget """
if widget.winfo_class() == 'Label':
add_bidi_support_for_label(widget)
elif widget.winfo_class() == 'Entry':
add_bidi_support_for_entry(widget)
if render_copy_paste:
override_copy_paste(widget, ispath=ispath, copy_paste_menu=copy_paste_menu)
def override_copy_paste(widget, copyrender=derender_text, pasterender=render_text, ispath=False, copy_paste_menu=False):
def copy(value):
"""copy clipboard value
Args:
value (str): value to be copied to clipboard
"""
try:
widget.clipboard_clear()
widget.clipboard_append(str(value))
except:
pass
def paste():
"""get clipboard value"""
try:
value = widget.clipboard_get()
except:
value = ''
return value
def copy_callback(*args):
try:
selected_text = widget.selection_get()
derendered_text = copyrender(selected_text, ispath=ispath)
copy(derendered_text)
except:
pass
return 'break'
def paste_callback(*args):
try:
widget.delete("sel.first", "sel.last")
except:
pass
try:
text = paste()
rendered_text = pasterender(text, ispath=ispath)
widget.insert(tk.INSERT, rendered_text)
except:
pass
return 'break'
# bind
widget.bind("<<Copy>>", copy_callback)
widget.bind("<<Paste>>", paste_callback)
# reference copy paste
widget.copy_callback = copy_callback
widget.paste_callback = paste_callback
# right click menu
def rcm_handler(option):
if option.lower() == 'copy':
copy_callback()
else:
paste_callback()
if copy_paste_menu:
widget.rcm = RightClickMenu(widget, ['copy', 'paste'], callback=rcm_handler)
if __name__ == '__main__':
root = tk.Tk()
txt = 'السلام عليكم'
# text display incorrectly on linux
dummyvar = tk.StringVar()
dummyvar.set(txt)
tk.Label(root, textvariable=dummyvar, font='any 20').pack()
# uncomment below to set a rendered text to first label
dummyvar.set(render_bidi_text(txt))
entry = tk.Entry(root, font='any 20', justify='right')
entry.pack()
lbl = tk.Label(root, font='any 20')
lbl.pack()
# adding bidi support for widgets
add_bidi_support(lbl)
add_bidi_support(entry)
# we can use set() and get() methods to set and get text on a widget
entry.set(txt)
lbl.set('هذا كتاب adventure شيق')
root.mainloop() | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dijit/_editor/plugins/FullScreen.js | define("dijit/_editor/plugins/FullScreen",["dojo/aspect","dojo/_base/declare","dojo/dom-class","dojo/dom-geometry","dojo/dom-style","dojo/_base/event","dojo/i18n","dojo/keys","dojo/_base/lang","dojo/on","dojo/_base/sniff","dojo/_base/window","dojo/window","../../focus","../_Plugin","../../form/ToggleButton","../../registry","dojo/i18n!../nls/commands"],function(_1,_2,_3,_4,_5,_6,_7,_8,_9,on,_a,_b,_c,_d,_e,_f,_10){
var _11=_2("dijit._editor.plugins.FullScreen",_e,{zIndex:500,_origState:null,_origiFrameState:null,_resizeHandle:null,isFullscreen:false,toggle:function(){
this.button.set("checked",!this.button.get("checked"));
},_initButton:function(){
var _12=_7.getLocalization("dijit._editor","commands"),_13=this.editor;
this.button=new _f({label:_12["fullScreen"],dir:_13.dir,lang:_13.lang,showLabel:false,iconClass:this.iconClassPrefix+" "+this.iconClassPrefix+"FullScreen",tabIndex:"-1",onChange:_9.hitch(this,"_setFullScreen")});
},setEditor:function(_14){
this.editor=_14;
this._initButton();
this.editor.addKeyHandler(_8.F11,true,true,_9.hitch(this,function(e){
this.toggle();
_6.stop(e);
setTimeout(_9.hitch(this,function(){
this.editor.focus();
}),250);
return true;
}));
this.connect(this.editor.domNode,"onkeydown","_containFocus");
},_containFocus:function(e){
if(this.isFullscreen){
var ed=this.editor;
if(!ed.isTabIndent&&ed._fullscreen_oldOnKeyDown&&e.keyCode===_8.TAB){
var f=_d.curNode;
var avn=this._getAltViewNode();
if(f==ed.iframe||(avn&&f===avn)){
setTimeout(_9.hitch(this,function(){
ed.toolbar.focus();
}),10);
}else{
if(avn&&_5.get(ed.iframe,"display")==="none"){
setTimeout(_9.hitch(this,function(){
_d.focus(avn);
}),10);
}else{
setTimeout(_9.hitch(this,function(){
ed.focus();
}),10);
}
}
_6.stop(e);
}else{
if(ed._fullscreen_oldOnKeyDown){
ed._fullscreen_oldOnKeyDown(e);
}
}
}
},_resizeEditor:function(){
var vp=_c.getBox();
_4.setMarginBox(this.editor.domNode,{w:vp.w,h:vp.h});
var _15=this.editor.getHeaderHeight();
var _16=this.editor.getFooterHeight();
var _17=_4.getPadBorderExtents(this.editor.domNode);
var _18=_4.getPadBorderExtents(this.editor.iframe.parentNode);
var _19=_4.getMarginExtents(this.editor.iframe.parentNode);
var _1a=vp.h-(_15+_17.h+_16);
_4.setMarginBox(this.editor.iframe.parentNode,{h:_1a,w:vp.w});
_4.setMarginBox(this.editor.iframe,{h:_1a-(_18.h+_19.h)});
},_getAltViewNode:function(){
},_setFullScreen:function(_1b){
var vp=_c.getBox();
var ed=this.editor;
var _1c=_b.body();
var _1d=ed.domNode.parentNode;
this.isFullscreen=_1b;
if(_1b){
while(_1d&&_1d!==_b.body()){
_3.add(_1d,"dijitForceStatic");
_1d=_1d.parentNode;
}
this._editorResizeHolder=this.editor.resize;
ed.resize=function(){
};
ed._fullscreen_oldOnKeyDown=ed.onKeyDown;
ed.onKeyDown=_9.hitch(this,this._containFocus);
this._origState={};
this._origiFrameState={};
var _1e=ed.domNode,_1f=_1e&&_1e.style||{};
this._origState={width:_1f.width||"",height:_1f.height||"",top:_5.get(_1e,"top")||"",left:_5.get(_1e,"left")||"",position:_5.get(_1e,"position")||"static",marginBox:_4.getMarginBox(ed.domNode)};
var _20=ed.iframe,_21=_20&&_20.style||{};
var bc=_5.get(ed.iframe,"backgroundColor");
this._origiFrameState={backgroundColor:bc||"transparent",width:_21.width||"auto",height:_21.height||"auto",zIndex:_21.zIndex||""};
_5.set(ed.domNode,{position:"absolute",top:"0px",left:"0px",zIndex:this.zIndex,width:vp.w+"px",height:vp.h+"px"});
_5.set(ed.iframe,{height:"100%",width:"100%",zIndex:this.zIndex,backgroundColor:bc!=="transparent"&&bc!=="rgba(0, 0, 0, 0)"?bc:"white"});
_5.set(ed.iframe.parentNode,{height:"95%",width:"100%"});
if(_1c.style&&_1c.style.overflow){
this._oldOverflow=_5.get(_1c,"overflow");
}else{
this._oldOverflow="";
}
if(_a("ie")&&!_a("quirks")){
if(_1c.parentNode&&_1c.parentNode.style&&_1c.parentNode.style.overflow){
this._oldBodyParentOverflow=_1c.parentNode.style.overflow;
}else{
try{
this._oldBodyParentOverflow=_5.get(_1c.parentNode,"overflow");
}
catch(e){
this._oldBodyParentOverflow="scroll";
}
}
_5.set(_1c.parentNode,"overflow","hidden");
}
_5.set(_1c,"overflow","hidden");
var _22=function(){
var vp=_c.getBox();
if("_prevW" in this&&"_prevH" in this){
if(vp.w===this._prevW&&vp.h===this._prevH){
return;
}
}else{
this._prevW=vp.w;
this._prevH=vp.h;
}
if(this._resizer){
clearTimeout(this._resizer);
delete this._resizer;
}
this._resizer=setTimeout(_9.hitch(this,function(){
delete this._resizer;
this._resizeEditor();
}),10);
};
this._resizeHandle=on(window,"resize",_9.hitch(this,_22));
this._resizeHandle2=_1.after(ed,"onResize",_9.hitch(this,function(){
if(this._resizer){
clearTimeout(this._resizer);
delete this._resizer;
}
this._resizer=setTimeout(_9.hitch(this,function(){
delete this._resizer;
this._resizeEditor();
}),10);
}));
this._resizeEditor();
var dn=this.editor.toolbar.domNode;
setTimeout(function(){
_c.scrollIntoView(dn);
},250);
}else{
if(this._resizeHandle){
this._resizeHandle.remove();
this._resizeHandle=null;
}
if(this._resizeHandle2){
this._resizeHandle2.remove();
this._resizeHandle2=null;
}
if(this._rst){
clearTimeout(this._rst);
this._rst=null;
}
while(_1d&&_1d!==_b.body()){
_3.remove(_1d,"dijitForceStatic");
_1d=_1d.parentNode;
}
if(this._editorResizeHolder){
this.editor.resize=this._editorResizeHolder;
}
if(!this._origState&&!this._origiFrameState){
return;
}
if(ed._fullscreen_oldOnKeyDown){
ed.onKeyDown=ed._fullscreen_oldOnKeyDown;
delete ed._fullscreen_oldOnKeyDown;
}
var _23=this;
setTimeout(function(){
var mb=_23._origState.marginBox;
var oh=_23._origState.height;
if(_a("ie")&&!_a("quirks")){
_1c.parentNode.style.overflow=_23._oldBodyParentOverflow;
delete _23._oldBodyParentOverflow;
}
_5.set(_1c,"overflow",_23._oldOverflow);
delete _23._oldOverflow;
_5.set(ed.domNode,_23._origState);
_5.set(ed.iframe.parentNode,{height:"",width:""});
_5.set(ed.iframe,_23._origiFrameState);
delete _23._origState;
delete _23._origiFrameState;
var _24=_10.getEnclosingWidget(ed.domNode.parentNode);
if(_24&&_24.resize){
_24.resize();
}else{
if(!oh||oh.indexOf("%")<0){
setTimeout(_9.hitch(this,function(){
ed.resize({h:mb.h});
}),0);
}
}
_c.scrollIntoView(_23.editor.toolbar.domNode);
},100);
}
},updateState:function(){
this.button.set("disabled",this.get("disabled"));
},destroy:function(){
if(this._resizeHandle){
this._resizeHandle.remove();
this._resizeHandle=null;
}
if(this._resizeHandle2){
this._resizeHandle2.remove();
this._resizeHandle2=null;
}
if(this._resizer){
clearTimeout(this._resizer);
this._resizer=null;
}
this.inherited(arguments);
}});
_e.registry["fullScreen"]=_e.registry["fullscreen"]=function(_25){
return new _11({zIndex:("zIndex" in _25)?_25.zIndex:500});
};
return _11;
}); | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/typescript/lib/lib.es2015.symbol.wellknown.d.ts |
/// <reference lib="es2015.symbol" />
interface SymbolConstructor {
/**
* A method that determines if a constructor object recognizes an object as one of the
* constructor’s instances. Called by the semantics of the instanceof operator.
*/
readonly hasInstance: unique symbol;
/**
* A Boolean value that if true indicates that an object should flatten to its array elements
* by Array.prototype.concat.
*/
readonly isConcatSpreadable: unique symbol;
/**
* A regular expression method that matches the regular expression against a string. Called
* by the String.prototype.match method.
*/
readonly match: unique symbol;
/**
* A regular expression method that replaces matched substrings of a string. Called by the
* String.prototype.replace method.
*/
readonly replace: unique symbol;
/**
* A regular expression method that returns the index within a string that matches the
* regular expression. Called by the String.prototype.search method.
*/
readonly search: unique symbol;
/**
* A function valued property that is the constructor function that is used to create
* derived objects.
*/
readonly species: unique symbol;
/**
* A regular expression method that splits a string at the indices that match the regular
* expression. Called by the String.prototype.split method.
*/
readonly split: unique symbol;
/**
* A method that converts an object to a corresponding primitive value.
* Called by the ToPrimitive abstract operation.
*/
readonly toPrimitive: unique symbol;
/**
* A String value that is used in the creation of the default string description of an object.
* Called by the built-in method Object.prototype.toString.
*/
readonly toStringTag: unique symbol;
/**
* An Object whose own property names are property names that are excluded from the 'with'
* environment bindings of the associated objects.
*/
readonly unscopables: unique symbol;
}
interface Symbol {
/**
* Converts a Symbol object to a symbol.
*/
[Symbol.toPrimitive](hint: string): symbol;
readonly [Symbol.toStringTag]: string;
}
interface Array<T> {
/**
* Returns an object whose properties have the value 'true'
* when they will be absent when used in a 'with' statement.
*/
[Symbol.unscopables](): {
copyWithin: boolean;
entries: boolean;
fill: boolean;
find: boolean;
findIndex: boolean;
keys: boolean;
values: boolean;
};
}
interface Date {
/**
* Converts a Date object to a string.
*/
[Symbol.toPrimitive](hint: "default"): string;
/**
* Converts a Date object to a string.
*/
[Symbol.toPrimitive](hint: "string"): string;
/**
* Converts a Date object to a number.
*/
[Symbol.toPrimitive](hint: "number"): number;
/**
* Converts a Date object to a string or number.
*
* @param hint The strings "number", "string", or "default" to specify what primitive to return.
*
* @throws {TypeError} If 'hint' was given something other than "number", "string", or "default".
* @returns A number if 'hint' was "number", a string if 'hint' was "string" or "default".
*/
[Symbol.toPrimitive](hint: string): string | number;
}
interface Map<K, V> {
readonly [Symbol.toStringTag]: string;
}
interface WeakMap<K extends object, V> {
readonly [Symbol.toStringTag]: string;
}
interface Set<T> {
readonly [Symbol.toStringTag]: string;
}
interface WeakSet<T extends object> {
readonly [Symbol.toStringTag]: string;
}
interface JSON {
readonly [Symbol.toStringTag]: string;
}
interface Function {
/**
* Determines whether the given value inherits from this function if this function was used
* as a constructor function.
*
* A constructor function can control which objects are recognized as its instances by
* 'instanceof' by overriding this method.
*/
[Symbol.hasInstance](value: any): boolean;
}
interface GeneratorFunction {
readonly [Symbol.toStringTag]: string;
}
interface Math {
readonly [Symbol.toStringTag]: string;
}
interface Promise<T> {
readonly [Symbol.toStringTag]: string;
}
interface PromiseConstructor {
readonly [Symbol.species]: PromiseConstructor;
}
interface RegExp {
/**
* Matches a string with this regular expression, and returns an array containing the results of
* that search.
* @param string A string to search within.
*/
[Symbol.match](string: string): RegExpMatchArray | null;
/**
* Replaces text in a string, using this regular expression.
* @param string A String object or string literal whose contents matching against
* this regular expression will be replaced
* @param replaceValue A String object or string literal containing the text to replace for every
* successful match of this regular expression.
*/
[Symbol.replace](string: string, replaceValue: string): string;
/**
* Replaces text in a string, using this regular expression.
* @param string A String object or string literal whose contents matching against
* this regular expression will be replaced
* @param replacer A function that returns the replacement text.
*/
[Symbol.replace](string: string, replacer: (substring: string, ...args: any[]) => string): string;
/**
* Finds the position beginning first substring match in a regular expression search
* using this regular expression.
*
* @param string The string to search within.
*/
[Symbol.search](string: string): number;
/**
* Returns an array of substrings that were delimited by strings in the original input that
* match against this regular expression.
*
* If the regular expression contains capturing parentheses, then each time this
* regular expression matches, the results (including any undefined results) of the
* capturing parentheses are spliced.
*
* @param string string value to split
* @param limit if not undefined, the output array is truncated so that it contains no more
* than 'limit' elements.
*/
[Symbol.split](string: string, limit?: number): string[];
}
interface RegExpConstructor {
readonly [Symbol.species]: RegExpConstructor;
}
interface String {
/**
* Matches a string or an object that supports being matched against, and returns an array
* containing the results of that search, or null if no matches are found.
* @param matcher An object that supports being matched against.
*/
match(matcher: { [Symbol.match](string: string): RegExpMatchArray | null; }): RegExpMatchArray | null;
/**
* Passes a string and {@linkcode replaceValue} to the `[Symbol.replace]` method on {@linkcode searchValue}. This method is expected to implement its own replacement algorithm.
* @param searchValue An object that supports searching for and replacing matches within a string.
* @param replaceValue The replacement text.
*/
replace(searchValue: { [Symbol.replace](string: string, replaceValue: string): string; }, replaceValue: string): string;
/**
* Replaces text in a string, using an object that supports replacement within a string.
* @param searchValue A object can search for and replace matches within a string.
* @param replacer A function that returns the replacement text.
*/
replace(searchValue: { [Symbol.replace](string: string, replacer: (substring: string, ...args: any[]) => string): string; }, replacer: (substring: string, ...args: any[]) => string): string;
/**
* Finds the first substring match in a regular expression search.
* @param searcher An object which supports searching within a string.
*/
search(searcher: { [Symbol.search](string: string): number; }): number;
/**
* Split a string into substrings using the specified separator and return them as an array.
* @param splitter An object that can split a string.
* @param limit A value used to limit the number of elements returned in the array.
*/
split(splitter: { [Symbol.split](string: string, limit?: number): string[]; }, limit?: number): string[];
}
interface ArrayBuffer {
readonly [Symbol.toStringTag]: string;
}
interface DataView {
readonly [Symbol.toStringTag]: string;
}
interface Int8Array {
readonly [Symbol.toStringTag]: "Int8Array";
}
interface Uint8Array {
readonly [Symbol.toStringTag]: "Uint8Array";
}
interface Uint8ClampedArray {
readonly [Symbol.toStringTag]: "Uint8ClampedArray";
}
interface Int16Array {
readonly [Symbol.toStringTag]: "Int16Array";
}
interface Uint16Array {
readonly [Symbol.toStringTag]: "Uint16Array";
}
interface Int32Array {
readonly [Symbol.toStringTag]: "Int32Array";
}
interface Uint32Array {
readonly [Symbol.toStringTag]: "Uint32Array";
}
interface Float32Array {
readonly [Symbol.toStringTag]: "Float32Array";
}
interface Float64Array {
readonly [Symbol.toStringTag]: "Float64Array";
}
interface ArrayConstructor {
readonly [Symbol.species]: ArrayConstructor;
}
interface MapConstructor {
readonly [Symbol.species]: MapConstructor;
}
interface SetConstructor {
readonly [Symbol.species]: SetConstructor;
}
interface ArrayBufferConstructor {
readonly [Symbol.species]: ArrayBufferConstructor;
} | PypiClean |
/FrameDynamics-0.1.8.tar.gz/FrameDynamics-0.1.8/README.md | # FrameDynamics
FrameDynamics is a python package that provides numerical simulations for the
field of pulse sequence development in magnetic resonance.
A coupling Hamiltonian is modulated in the toggling or interaction frame
according to the specified pulse sequence and offset frequencies.
The trajectory of the time-dependent Hamiltonian can be plotted or used
to calculate the zeroth order average Hamiltonian (higher order terms might be
available in following versions of FrameDynamics).
Theoretical background can be found in the publication (coming soon...).
## Installation
The python package can be installed via PyPI:
```
pip install FrameDynamics
```
## Simulations
Two examples shall be given: the WAHUHA sequence and a heteronuclear echo consisting of a shaped pulse and a hard 180° pulse.
More examples can be found in the FrameDynamics github-repository ([link](https://github.com/jdhaller/FrameDynamics/tree/main/examples)).
### Example #1: WAHUHA sequence
Initialize frame:
```Python
from FrameDynamics.Frame import Frame
frame = Frame(["I", "J"])
```
Specify the interaction:
```Python
interaction = frame.set_interaction("I", "J", "Dstrong")
```
Define the pulse sequence:
```Python
tau = 5 * 10**(-5)
frame.delay(tau)
frame.pulse(["I", "J"], degree=90, amplitude=10**(5), phase=0)
frame.delay(tau)
frame.pulse(["I", "J"], degree=90, amplitude=10**(5), phase=3)
frame.delay(2*tau)
frame.pulse(["I", "J"], degree=90, amplitude=10**(5), phase=1)
frame.delay(tau)
frame.pulse(["I", "J"], degree=90, amplitude=10**(5), phase=2)
frame.delay(tau)
```
Start the simulations and plot trajectories without using multiprocessing (default: `MP=False`).
```Python
frame.start(traject=True)
frame.plot_traject(interaction, save="WAHUHA.png")
```
## Example #2: Reburp pulse
Load Frame and Block class. Block class is used to align different blocks
in the pulse sequence (e.g. Reburp pulse and 180° hard pulse in heteronuclear
echo)
```Python
import numpy as np
from FrameDynamics.Frame import Frame
from FrameDynamics.Block import Block
frame = Frame(["I", "S"])
```
Specify the interaction:
```Python
interaction = frame.set_interaction("I", "S", "Jweak")
```
Specify offset frequencies:
```Python
off = 5000
offsetsI = np.linspace(-off, off, 61)
offsetsS = np.linspace(-off, off, 61)
frame.set_offset("I", offsetsI)
frame.set_offset("S", offsetsS)
```
Load pulse shape to array:
```Python
Reburp = frame.load_shape("Reburp.1000")
```
**After** the interaction and offsets are set for the Frame object, one can now
initialize the Block class for each block. The frame-object has to be passed to the Block() class:
```Python
block1 = Block(frame, ["I"])
block2 = Block(frame, ["S"])
```
Define a Reburp pulse on `"I"` and hard pulse on `"S"` in first two lines.
Then center-align both block-elements (Reburp and hard pulse) within the frame-object.
```Python
block1.shape(["I"], Reburp, length=1000*10**(-6), amplitude=6264.8, phase=1)
block2.pulse(["S"], degree=180, amplitude=10000, phase=1)
frame.align(block1, block2, alignment="center")
```
Start the simulations using multiprocessing (`MP=True`).
If using multiprocessing on Windows, the scope has to be resolved (`if __name__ == "__main__"`). Note, plotting and data retrieval has to be done in the same scope.
```Python
if __name__ == "__main__":,
frame.start(MP=True, traject=True)
# Create offset-dependent 2D graph of the zeroth order average
# Hamiltonian (H0) that is plotted against both offsets
frame.plot_H0_2D(interaction, zlim=1)
# Create offset-dependent 1D graph of H0 where offset of spin "S"
# is fixed to specified value (offset=0.)
frame.plot_H0_1D(interaction, "S", offset=0.)
# Plot trajectories for specified interaction and operators
# (the given operators are default values)
frame.plot_traject(interaction, operators=["x1","y1","z1","xx","yy","zz"])
# Retrieve trajectories and the resulting average Hamiltonian.
# Dictionaries are returned for specified offsets and operators.
time, traject = frame.get_traject(interaction, offsets={"I": 0, "S": 300}, operators=["1z", "zz"])
average_Hamiltonian = frame.get_results(interaction, operators=["zz"])
```
| PypiClean |
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/docs/devel/translations.rst | Managing translations
=====================
.. _adding-translation:
Adding new translations
-----------------------
New strings can be made available for translation when they appear in the base file,
called :guilabel:`Template for new translations` (see :ref:`component`).
If your file format doesn't require such a file, as is the case with most monolingual
translation flows, you can start with empty files).
New languages can be added right away when requested by a user in Weblate, or a
notification will be sent to project admins for approval and manual addition.
This can be done using :ref:`component-new_lang` in :ref:`component`.
.. note::
If you add a language file in connected remote repository, respective
translation will be added to the component when Weblate updates local repository.
More info on the repository update settings can be found on the :ref:`update-vcs`.
.. seealso::
:ref:`component-new_lang`,
:ref:`new-translations`
.. _removing-translation:
Removing existing translations
------------------------------
Languages, components, or the projects they are in, can be removed (deleted from Weblate
and remote repository if used) from the menu :guilabel:`Manage` ↓ :guilabel:`Removal`
of each project, component, or language.
Initiating the :guilabel:`Removal` action shows the list of components to be removed.
You have to enter the object's `slug` to confirm the removal. The `slug` is the
project's, language's, or component's pathname as it can be seen in the URL.
If you want to remove just some specific strings, there are following ways:
- Manually in the source file. They will be removed from the
translation project as well upon Weblate's repository update.
.. versionadded:: 4.5
- In Weblate’s UI via button :guilabel:`Tools` ↓ :guilabel:`Remove` while editing the string.
This has differences between file formats, see: :ref:`component-manage_units`
.. note::
If you delete a language file in connected remote repository, respective
translation will be removed from the component when Weblate updates local repository.
More info on the repository update settings can be found on the :ref:`update-vcs`.
.. _variants:
String variants
---------------
Variants are useful to group several strings together so that translators can
see all variants of the string at one place.
.. hint::
Abbreviations (shortened forms, contractions) are a good example of variants.
Automated key based variants
++++++++++++++++++++++++++++
.. versionadded:: 3.11
You can define regular expression to group the strings based on the key of
monolingual translations in the :ref:`component`:
.. image:: /screenshots/variants-settings.png
In case the :guilabel:`Key` matches the expression, the matching part is
removed to generate root key of the variant. Then all the strings with the same
root key become part of a single variant group, also including the string with
the key exactly matching the root key.
The following table lists some usage examples:
+---------------------------+-------------------------------+-----------------------------------------------+
| Use case | Regular expression variant | Matched translation keys |
+===========================+===============================+===============================================+
| Suffix identification | ``(Short|Min)$`` | ``monthShort``, ``monthMin``, ``month`` |
+---------------------------+-------------------------------+-----------------------------------------------+
| Inline identification | ``#[SML]`` | ``dial#S.key``, ``dial#M.key``, ``dial.key`` |
+---------------------------+-------------------------------+-----------------------------------------------+
Manual variants
+++++++++++++++
.. versionadded:: 4.5
You can manually link specific strings using ``variant:SOURCE`` flag. This can
be useful for bilingual translations which do not have keys to group strings
automatically, or to group strings which keys are not matching, but
should be considered together when translating.
The additional variant for a string can also be added using the :guilabel:`Tools` while translating
(when :ref:`component-manage_units` is turned on):
.. image:: /screenshots/glossary-tools.png
.. note::
There the variant source string has to at most 768 characters long. This is
technical limitation due to compatibility with MySQL database.
.. seealso::
:ref:`custom-checks`,
:ref:`glossary-variants`
Variants while translating
++++++++++++++++++++++++++
The variant is later grouped when translating:
.. image:: /screenshots/variants-translate.png
.. _labels:
String labels
-------------
Split component translation strings into categories by text and colour in the project configuration.
.. image:: /screenshots/labels.png
.. hint::
Labels can be assigned to units in :ref:`additional` by bulk editing, or using the :ref:`addon-weblate.flags.bulk` add-on.
| PypiClean |
/MatchZoo-test-1.0.tar.gz/MatchZoo-test-1.0/matchzoo/utils/early_stopping.py |
import typing
import torch
import numpy as np
class EarlyStopping:
"""
EarlyStopping stops training if no improvement after a given patience.
:param patience: Number fo events to wait if no improvement and then
stop the training.
:param should_decrease: The way to judge the best so far.
:param key: Key of metric to be compared.
"""
def __init__(
self,
patience: typing.Optional[int] = None,
should_decrease: bool = None,
key: typing.Any = None
):
"""Early stopping Constructor."""
self._patience = patience
self._key = key
self._best_so_far = 0
self._epochs_with_no_improvement = 0
self._is_best_so_far = False
self._early_stop = False
def state_dict(self) -> typing.Dict[str, typing.Any]:
"""A `Trainer` can use this to serialize the state."""
return {
'patience': self._patience,
'best_so_far': self._best_so_far,
'is_best_so_far': self._is_best_so_far,
'epochs_with_no_improvement': self._epochs_with_no_improvement,
}
def load_state_dict(
self,
state_dict: typing.Dict[str, typing.Any]
) -> None:
"""Hydrate a early stopping from a serialized state."""
self._patience = state_dict["patience"]
self._is_best_so_far = state_dict["is_best_so_far"]
self._best_so_far = state_dict["best_so_far"]
self._epochs_with_no_improvement = \
state_dict["epochs_with_no_improvement"]
def update(self, result: list):
"""Call function."""
score = result[self._key]
if score > self._best_so_far:
self._best_so_far = score
self._is_best_so_far = True
self._epochs_with_no_improvement = 0
else:
self._is_best_so_far = False
self._epochs_with_no_improvement += 1
@property
def best_so_far(self) -> bool:
"""Returns best so far."""
return self._best_so_far
@property
def is_best_so_far(self) -> bool:
"""Returns true if it is the best so far."""
return self._is_best_so_far
@property
def should_stop_early(self) -> bool:
"""Returns true if improvement has stopped for long enough."""
if not self._patience:
return False
else:
return self._epochs_with_no_improvement >= self._patience | PypiClean |
/Cubane-1.0.11.tar.gz/Cubane-1.0.11/cubane/svgicons/management/commands/create_svgicons.py | from __future__ import unicode_literals
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from cubane.svgicons import get_svgicons_filename
from cubane.svgicons import get_combined_svg
from cubane.lib.resources import get_resources
from cubane.lib.resources import get_resource_targets
from cubane.lib.resources import load_resource_version_identifier
import os
import codecs
class Command(BaseCommand):
"""
Generate svg icon sheet files - Management command.
"""
args = ''
help = 'Generate svg icon sheet files'
def add_arguments(self, parser): # pragma: no cover
parser.add_argument(
'--identifier', action='store', dest='identifier',
default=None,
help='Nominates the version identifier for which SVG Icons are generated. Defaults to the "current" version identifier generated by the last deployment.',
)
def handle(self, *args, **options):
"""
Run command.
"""
self.verbose('Processing svgicons...Please Wait...')
identifier = options.get('identifier')
if not identifier:
identifier = load_resource_version_identifier()
for target in get_resource_targets():
# build filename
filename = get_svgicons_filename(target, identifier)
# build icon sheet
resources = get_resources(target, 'svg')
resources_with_style = get_resources(target, 'svg', 'with-style')
if len(resources) > 0 or len(resources_with_style) > 0:
self.verbose('> %s' % filename)
svg = get_combined_svg(resources, resources_with_style)
# folder exists?
if not os.path.isdir(settings.STATIC_ROOT):
os.makedirs(settings.STATIC_ROOT)
# write file
path = os.path.join(settings.STATIC_ROOT, filename)
with codecs.open(path, 'w', encoding='utf-8') as f:
f.write(svg)
self.verbose('Complete.')
def verbose(self, msg): # pragma: no cover
"""
Print given verbose message or keep quite if we are under test.
"""
if not settings.TEST:
print msg | PypiClean |
/DB_helper-1.1.0.tar.gz/DB_helper-1.1.0/DB_helper/__init__.py | import sqlite3
def create_db():
'''
Если у вас нет файла БД, то создайте его здесь.
Функция вернет название файла
'''
with open('data.db','w'):
pass
return 'data.db'
def _get_connection(file_name):
'''
Подключение к БД
'''
return sqlite3.connect(file_name)
class DataBase:
def __init__(self, file_name: str):
'''
file_name - Название файла, его можно получить из create_db()
или передать свое имя
'''
self.file_name = file_name
def create_table(self, table_name: str, columns: str):
'''
Создание таблицы
table_name - название
columns - значения в виде "{ЗНАЧЕНИЕ} ТИП (sql), {ЗНАЧЕНИЕ} ТИП (sql)"
'''
conn = _get_connection(self.file_name)
cursor = conn.cursor()
cursor.execute(f"""CREATE TABLE {table_name}
({columns})
""")
conn.close()
def insert_data(self, table_name: str, data: tuple):
'''
Добавление данных в таблицу
table_name - название
data - кортеж данных
'''
conn = _get_connection(self.file_name)
cursor = conn.cursor()
count = len(data)
values = '?,' * count
values = values[:-1]
cursor.execute(f"""INSERT INTO {table_name}
VALUES ({values})""", data
)
conn.commit()
conn.close()
def update_data(self, table_name: str, set_name: str, where_name: str, set_value: str, where_value: str):
'''
Обновить данные таблицы
table_name - название
set_name - в каком поле сменить
where_name - по какому полю смотреть
set_value - готовое значеие
where_value - значение поля просмотра
'''
conn = _get_connection(self.file_name)
cursor = conn.cursor()
sql = f"""
UPDATE {table_name}
SET {set_name} = ?
WHERE {where_name} = ?
"""
cursor.execute(sql, (set_value, where_value,))
conn.commit()
conn.close()
def delete_data(self, table_name: str, where_name: str, value: str):
'''
Удаление данных
table_name - название
where_name - по какому полю смотреть
value - значение в поле
'''
conn = _get_connection(self.file_name)
cursor = conn.cursor()
sql = f"DELETE FROM {table_name} WHERE {where_name} = ?"
cursor.execute(sql, (value,))
conn.commit()
conn.close()
def get_data(self, table_name: str):
'''
Получение данных
table_name - название
вернет list со значениями
'''
conn = _get_connection(self.file_name)
cursor = conn.cursor()
rows = []
for row in cursor.execute(f"SELECT rowid, * FROM {table_name} ORDER BY rowid"):
rows.append(row)
conn.close()
return rows | PypiClean |
/ChemSpiPy-2.0.0.tar.gz/ChemSpiPy-2.0.0/README.rst | ChemSpiPy
=========
.. image:: https://img.shields.io/pypi/v/ChemSpiPy.svg?style=flat
:target: https://pypi.python.org/pypi/ChemSpiPy
.. image:: https://img.shields.io/pypi/l/ChemSpiPy.svg?style=flat
:target: https://github.com/mcs07/ChemSpiPy/blob/master/LICENSE
.. image:: https://img.shields.io/travis/mcs07/ChemSpiPy/master.svg?style=flat
:target: https://travis-ci.org/mcs07/ChemSpiPy
.. image:: https://img.shields.io/coveralls/mcs07/ChemSpiPy/master.svg?style=flat
:target: https://coveralls.io/r/mcs07/ChemSpiPy?branch=master
ChemSpiPy provides a way to interact with ChemSpider in Python. It allows chemical searches, chemical file downloads,
depiction and retrieval of chemical properties::
>>> from chemspipy import ChemSpider
>>> cs = ChemSpider('<YOUR-API-KEY>')
>>> c1 = cs.get_compound(236) # Specify compound by ChemSpider ID
>>> c2 = cs.search('benzene') # Search using name, SMILES, InChI, InChIKey, etc.
Installation
------------
Install ChemSpiPy using conda::
conda install -c conda-forge chemspipy
or using pip::
pip install chemspipy
Alternatively, try one of the other `installation options`_.
Documentation
-------------
Full documentation is available at https://chemspipy.readthedocs.io/en/stable/.
The `general documentation for the ChemSpider API`_ is also a useful resource.
Contribute
----------
- Feature ideas and bug reports are welcome on the `Issue Tracker`_.
- Fork the `source code`_ on GitHub, make changes and file a pull request.
License
-------
ChemSpiPy is licensed under the `MIT license`_.
This project was originally forked from `ChemSpiPy by Cameron Neylon`_, which has been released into the public domain.
.. _`installation options`: https://chemspipy.readthedocs.io/en/stable/guide/install.html
.. _`source code`: https://github.com/mcs07/ChemSpiPy
.. _`Issue Tracker`: https://github.com/mcs07/ChemSpiPy/issues
.. _`MIT license`: https://github.com/mcs07/ChemSpiPy/blob/master/LICENSE
.. _`ChemSpiPy by Cameron Neylon`: https://github.com/cameronneylon/ChemSpiPy
.. _`general documentation for the ChemSpider API`: https://developer.rsc.org/compounds-v1/apis
| PypiClean |
/BigJob2-0.54.post73.tar.gz/BigJob2-0.54.post73/util/bigjob_usage.py |
# <markdowncell>
# # Generating BigJob Usage Statistics out of Redis entries
# Read `cus` and `pilots` from Redis
# <codecell>
import pandas as pd
import matplotlib.pyplot as plt
import os, sys
import archive
import datetime
import ast
# <codecell>
# Attempt to restore old data frame
cus_df = None
pilot_df = None
if os.path.exists("cus.df") and os.path.exists("pilot.df"):
cus_df = pd.load("cus.df") #pd.read_csv("cus.csv", index_col=0, parse_dates=False, date_parser=)
pilot_df = pd.load("pilot.df") #pd.read_csv("pilot.csv", index_col=0, parse_dates=False, date_parser=), dat
max_cus_date = cus_df.index.max()
max_pilots_date = pilot_df.index.max()
print "Restored data frames until %s"%max_cus_date
# <codecell>
# Download new data
# Redis Service to connect to:
# redis://[email protected]:6379
# redis://localhost
rd = archive.RedisDownloader("redis://[email protected]:6379")
#rd = archive.RedisDownloader("redis://localhost:6379")
pilots = rd.get_pilots()
cus = rd.get_cus()
# <markdowncell>
# ## Compute Units Executed per Day
# <codecell>
# make sure only new entries are loaded into data frame
max_cus_date = None
try:
max_cus_date = cus_df.index.max()
except:
pass
timestamp_index = []
cus_new = []
for i in cus:
if max_cus_date == None or datetime.datetime.utcfromtimestamp(float(i["start_time"]))>max_cus_date:
# print "add " + str(datetime.datetime.utcfromtimestamp(float(i["start_time"])))
timestamp_index.append(datetime.datetime.utcfromtimestamp(float(i["start_time"])))
cus_new.append(i)
#print cus_new
if len(cus_new) > 0:
cus_df_new = pd.DataFrame(cus_new, index=timestamp_index, columns=['Executable', 'NumberOfProcesses', "SPMDVariation", "start_time", "end_queue_time", "start_staging_time", "end_time"])
try:
cus_df = pd.concat([cus_df, cus_df_new])
except:
cus_df = cus_df_new
# <codecell>
cus_df_h = cus_df["Executable"].resample("D", how="count")
cus_df_h.plot(color='k', alpha=0.7)
plt.ylabel("Number of CUs Executed")
plt.xlabel("Day")
plt.savefig("number_cus_per_day.pdf", format="pdf", bbox_inches='tight', pad_inches=0.1)
# <markdowncell>
# ## Compute Unit Types
#
# How many sequential versus parallel (MPI) CUs are executed?
# <codecell>
spmd = cus_df["SPMDVariation"].astype("object")
spmd[spmd.isnull()]="single"
spmd.value_counts().plot(kind="bar", color='k', alpha=0.7)
plt.ylabel("Number of CUs")
plt.ylabel("CU SPMD Variation")
plt.savefig("cu_type.pdf", format="pdf", bbox_inches='tight', pad_inches=0.1)
# <codecell>
cus_df["Executable"].value_counts().plot(kind="bar", color='k', alpha=0.7)
plt.ylabel("Number CUs")
plt.xlabel("CU Executable")
plt.savefig("cu_executable.pdf", format="pdf", bbox_inches='tight', pad_inches=0.1)
# <markdowncell>
# ## CU Runtime Distribution
# <codecell>
runtimes = cus_df.apply(lambda row: float(row["end_time"]) - float(row["end_queue_time"]), axis=1)
runtimes.hist(bins=50)
plt.ylabel("Number of Events")
plt.xlabel("CU Runtime (in sec)")
plt.savefig("cu_runtime.pdf", format="pdf", bbox_inches='tight', pad_inches=0.1)
runtimes.describe()
# <markdowncell>
# ## Pilots Executed per Day
# Extract pilot desciptions out of Redis entries
# <codecell>
print "Number of Pilots: %d Number CUs: %d Executed since: %s"%(len(pilots), len(cus), str(cus_df.index.min()))
# <codecell>
pilots = [i for i in pilots if i.has_key("start_time")]
max_pilot_date = None
try:
max_pilot_date = max_pilot_date.index.max()
except:
pass
timestamp_index = []
pilot_new = []
for i in pilots:
if max_pilot_date == None or datetime.datetime.utcfromtimestamp(float(i["start_time"]))>max_pilot_date:
timestamp_index.append(datetime.datetime.utcfromtimestamp(float(i["start_time"])))
pilot_new.append(ast.literal_eval(i["description"]))
#print cus_new
if len(pilot_new) > 0:
pilot_df_new = pd.DataFrame(pilot_new, index=timestamp_index, columns=['service_url', "number_of_processes"])
try:
pilot_df = pd.concat([pilot_df, pilot_df_new])
except:
pilot_df = pilot_df_new
# <codecell>
pilot_df_h = pilot_df['service_url'].resample("D", how="count")
pilot_df_h.plot(kind="line", color='k', alpha=0.7)
plt.ylabel("Number of Pilots")
plt.xlabel("Day")
plt.savefig("number_pilots.pdf", format="pdf", bbox_inches='tight', pad_inches=0.1)
# <markdowncell>
# ## Store Dataframes for later usage
# <codecell>
cus_df.save("cus.df")
pilot_df.save("pilot.df")
date_string = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
cus_df.to_csv("cus-"+date_string+".csv", index_label="Date")
pilot_df.to_csv("pilot-"+date_string+".csv", index_label="Date") | PypiClean |
/CRIkit2-0.4.4.tar.gz/CRIkit2-0.4.4/crikit/ui/dialog_AnscombeParams.py | import sys as _sys
import os as _os
import numpy as _np
# Generic imports for QT-based programs
from PyQt5.QtWidgets import (QApplication as _QApplication,
QDialog as _QDialog)
# Import from Designer-based GUI
from crikit.ui.qt_CalcAnscombeParameters import Ui_Dialog
# Generic imports for MPL-incorporation
import matplotlib as _mpl
_mpl.use('Qt5Agg')
_mpl.rcParams['font.family'] = 'sans-serif'
_mpl.rcParams['font.size'] = 10
from crikit.preprocess.standardize import calc_anscombe_parameters
class DialogCalcAnscombeParams(_QDialog):
"""
Methods
--------
References
----------
"""
def __init__(self, parent=None, dark_array=None, rep_array=None, axis=None, rng=None, dark_sub=None):
super(DialogCalcAnscombeParams, self).__init__(parent) ### EDIT ###
self.ui = Ui_Dialog() ### EDIT ###
self.ui.setupUi(self) ### EDIT ###
if (dark_array is None) or (rep_array is None) or (axis is None):
raise ValueError('dark_array, rep_array, and axis must have inputs')
self.dark = dark_array
self.rep = rep_array
self.rng = rng
self.axis = axis
self.ui.spinBoxAxis.setValue(self.axis)
self.all_rng = self.ui.checkBoxAllFreqRange.isChecked()
self.n_dark = dark_array.shape[axis]
self.use_n_dark = 1*self.n_dark
self.ui.spinBoxNSpectraDark.setMaximum(self.n_dark)
self.ui.spinBoxNSpectraDark.setValue(self.n_dark)
self.ui.spinBoxSkipNDark.setValue(0)
self.ui.spinBoxSkipNDark.setMaximum(self.n_dark-2)
self.n_rep = rep_array.shape[axis]
self.use_n_rep = 1*self.n_rep
self.ui.spinBoxNSpectraRep.setMaximum(self.n_rep)
self.ui.spinBoxNSpectraRep.setValue(self.n_rep)
self.ui.spinBoxSkipNRep.setValue(0)
self.ui.spinBoxSkipNRep.setMaximum(self.n_dark-2)
if dark_sub is not None:
self.dark_sub = dark_sub
else:
self.dark_sub = False
self.ui.checkBoxDarkSub.setChecked(self.dark_sub)
self.updateInputValues()
self.updateOutputValues()
self.ui.checkBoxAllFreqRange.stateChanged.connect(self.updateOutputValues)
self.ui.checkBoxDarkSub.stateChanged.connect(self.updateOutputValues)
self.ui.spinBoxNSpectraDark.editingFinished.connect(self.updateOutputValues)
self.ui.spinBoxNSpectraRep.editingFinished.connect(self.updateOutputValues)
self.ui.spinBoxAxis.editingFinished.connect(self.updateOutputValues)
self.ui.spinBoxSkipNDark.editingFinished.connect(self.updateOutputValues)
self.ui.spinBoxSkipNRep.editingFinished.connect(self.updateOutputValues)
self.ui.pushButtonOk.clicked.connect(self.accept)
self.ui.pushButtonCancel.clicked.connect(self.reject)
self.ui.pushButtonOk.setFocus(False)
def updateInputValues(self):
self.axis = self.ui.spinBoxAxis.value()
self.all_rng = self.ui.checkBoxAllFreqRange.isChecked()
self.skip_dark = self.ui.spinBoxSkipNDark.value()
self.ui.spinBoxNSpectraDark.setMaximum(self.n_dark-self.skip_dark)
if self.ui.spinBoxNSpectraDark.value() > self.n_dark-self.skip_dark:
self.ui.spinBoxNSpectraDark.setValue(self.n_dark-self.skip_dark)
self.skip_rep = self.ui.spinBoxSkipNRep.value()
self.ui.spinBoxNSpectraRep.setMaximum(self.n_rep-self.skip_rep)
if self.ui.spinBoxNSpectraRep.value() > self.n_rep-self.skip_rep:
self.ui.spinBoxNSpectraRep.setValue(self.n_rep-self.skip_rep)
self.use_n_dark = self.ui.spinBoxNSpectraDark.value()
self.use_n_rep = self.ui.spinBoxNSpectraRep.value()
self.dark_sub = self.ui.checkBoxDarkSub.isChecked()
def updateOutputValues(self):
self.updateInputValues()
# NOTE: rng is dealt with in calc_anscombe_parameters; thus, full
# spectral range is passed
if self.axis == 0:
slicer_dark = (slice(self.skip_dark, self.use_n_dark + self.skip_dark), slice(None))
slicer_rep = (slice(self.skip_rep, self.use_n_rep + self.skip_rep), slice(None))
else:
slicer_dark = (slice(None), slice(self.skip_dark, self.use_n_dark + self.skip_dark))
slicer_rep = (slice(None), slice(self.skip_rep, self.use_n_rep + self.skip_rep))
if self.all_rng:
values = calc_anscombe_parameters(self.dark[slicer_dark], self.rep[slicer_rep], self.axis, None, self.dark_sub)
else:
values = calc_anscombe_parameters(self.dark[slicer_dark], self.rep[slicer_rep], self.axis, self.rng, self.dark_sub)
self.ui.spinBoxGMean.setValue(values['g_mean'].mean())
self.ui.spinBoxGStdDev.setValue(values['g_std'].mean())
self.ui.spinBoxAlphaMean.setValue(values['alpha'].mean())
self.ui.spinBoxAlphaWMean.setValue(values['weighted_mean_alpha'])
self.values = values
@staticmethod
def dialogCalcAnscombeParams(parent=None, dark_array=None, rep_array=None, axis=None, rng=None, dark_sub=None):
"""
Calculate Anscombe Parameters
Parameters
----------
None : None
Returns
----------
"""
dialog = DialogCalcAnscombeParams(parent=parent, dark_array=dark_array, rep_array=rep_array, axis=axis, rng=rng, dark_sub=dark_sub)
result = dialog.exec_()
if result == 1:
ret = dialog.values
return ret
else:
return None
if __name__ == '__main__':
app = _QApplication(_sys.argv)
app.setStyle('Cleanlooks')
n_spectra = 1000 # number of indep. spectra
n_lambda = 901 # number of wavelengths in each spectrum
f = _np.linspace(0,4000,n_lambda) # Frequency (au)
y = 40e2*_np.exp(-f**2/(2*350**2)) + 50e1*_np.exp(-(f-2900)**2/(2*250**2)) # signal
g_mean = 100
g_std = 25
p_alpha = 10
y_array = _np.dot(_np.ones((n_spectra,1)),y[None,:])
y_noisy = p_alpha*_np.random.poisson(y_array) + g_std*_np.random.randn(*y_array.shape) + g_mean
dark = g_std*_np.random.randn(*y_array.shape) + g_mean
out = DialogCalcAnscombeParams.dialogCalcAnscombeParams(dark_array=dark, rep_array=y_noisy, axis=0)
print('Returns: {}'.format(out))
_sys.exit() | PypiClean |
/MetaSBT-0.1.2.tar.gz/MetaSBT-0.1.2/metasbt/modules/index.py | __author__ = "Fabio Cumbo ([email protected])"
__version__ = "0.1.0"
__date__ = "Apr 27, 2023"
import argparse as ap
import errno
import hashlib
import math
import multiprocessing as mp
import os
import shutil
import sys
import time
from functools import partial
from logging import Logger
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import numpy # type: ignore
import tqdm # type: ignore
# Local modules are not available when the main controller
# tries to load them for accessing their variables
try:
# Load utility functions
from utils import ( # type: ignore # isort: skip
bfaction,
build_sh,
checkm,
dereplicate_genomes,
estimate_bf_size,
filter_checkm_tables,
get_file_info,
howdesbt,
init_logger,
load_input_table,
load_manifest,
number,
optimal_k,
println,
)
except Exception:
pass
# Define the module name
TOOL_ID = "index"
# Define the list of dependencies
DEPENDENCIES = [
"checkm",
"howdesbt",
"kitsune",
"ntcard",
"wget",
]
# Define the list of input files and folders
FILES_AND_FOLDERS = [
"--db-dir", # Database folder path
"--input-list", # File with a list of paths to the input genomes
"--log", # Path to the log file
"--tmp-dir", # Temporary folder path
]
# Define the url to the NCBI taxdump
TAXDUMP_URL = "https://ftp.ncbi.nlm.nih.gov/pub/taxonomy/taxdump.tar.gz"
# Define the url to the NCBI GenBank Assembly Summary
# https://ftp.ncbi.nlm.nih.gov/genomes/README_assembly_summary.txt
# https://www.ncbi.nlm.nih.gov/assembly/help/
ASSEMBLY_SUMMARY_URL = "https://ftp.ncbi.nlm.nih.gov/genomes/ASSEMBLY_REPORTS/assembly_summary_genbank.txt"
# Consider a genome as a reference if it contains one of the following tags
# under the excluded_from_refseq column in the NCBI GenBank Assembly Summary table
# https://www.ncbi.nlm.nih.gov/assembly/help/anomnotrefseq/
REFERENCE_TAGS = [
"derived from single cell",
"derived from surveillance project",
"assembly from type material",
"assembly from synonym type material",
"assembly designated as neotype",
"assembly designated as reftype",
"assembly from pathotype material",
"assembly from proxytype material",
"missing strain identifier",
"genus undefined",
"from large multi-isolate project"
]
def read_params():
"""
Read and test input arguments
:return: The ArgumentParser object
"""
p = ap.ArgumentParser(
prog=TOOL_ID,
description=(
"Build a database with a set of genomes indexed with HowDeSBT. "
"Genomes are provided as inputs or automatically downloaded from NCBI GenBank"
),
formatter_class=ap.ArgumentDefaultsHelpFormatter,
)
# General arguments
general_group = p.add_argument_group("General arguments")
general_group.add_argument(
"--cluster-prefix",
type=str,
default="MSBT",
dest="cluster_prefix",
help="Prefix of clusters numerical identifiers",
)
general_group.add_argument(
"--cleanup",
action="store_true",
default=False,
help="Remove temporary data at the end of the pipeline",
)
general_group.add_argument(
"--db-dir",
type=os.path.abspath,
required=True,
dest="db_dir",
help="This is the database directory with the taxonomically organised sequence bloom trees",
)
general_group.add_argument(
"--extension",
type=str,
required=True,
choices=["fa", "fa.gz", "fasta", "fasta.gz", "fna", "fna.gz"],
help=(
"Specify the input genome files extension. "
"All the input genomes must have the same file extension before running this module"
),
)
general_group.add_argument(
"--flat-structure",
action="store_true",
default=False,
dest="flat_structure",
help=(
"Organize genomes without any taxonomic organization. "
"This will lead to the creation of a single sequence bloom tree"
),
)
general_group.add_argument(
"--input-list",
type=os.path.abspath,
required=True,
dest="input_list",
help=(
"Path to the input table with a list of genome file paths and an optional column with their taxonomic labels. "
"Please note that the input genome files must all have the same extension and can be Gzip compressed (e.g.: *.fna.gz)"
),
)
general_group.add_argument(
"--kmer-len",
type=number(int, minv=4),
dest="kmer_len",
help="This is the length of the kmers used for building bloom filters",
)
general_group.add_argument(
"--limit-estimation-number",
type=number(int, minv=1),
dest="limit_estimation_number",
help=(
"Limit the number of genomes per group to be considered as input for kitsune and ntCard. "
"Must be used in conjunction with --estimate-kmer-size and/or --estimate-filter-size. "
"It overrides --limit-estimation-percentage in case of a number > 0"
),
)
general_group.add_argument(
"--limit-estimation-percentage",
type=number(float, minv=sys.float_info.min, maxv=100.0),
default=100.0,
dest="limit_estimation_percentage",
help=(
"Percentage on the total number of genomes per group to be considered as input for kitsune and ntCard. "
"Must be used in conjunction with --estimate-kmer-size and/or --estimate-filter-size"
),
)
general_group.add_argument(
"--log",
type=os.path.abspath,
help="Path to the log file. Used to keep track of messages and errors printed on the stdout and stderr"
)
general_group.add_argument(
"--nproc",
type=number(int, minv=1, maxv=os.cpu_count()),
default=1,
help="This argument refers to the number of processors used for parallelizing the pipeline when possible",
)
general_group.add_argument(
"--parallel",
type=number(int, minv=1, maxv=os.cpu_count()),
default=1,
help="Maximum number of processors to process each NCBI tax ID in parallel",
)
general_group.add_argument(
"--tmp-dir",
type=os.path.abspath,
required=True,
dest="tmp_dir",
help="Path to the folder for storing temporary data",
)
general_group.add_argument(
"--verbose",
action="store_true",
default=False,
help="Print messages and errors on the stdout"
)
general_group.add_argument(
"-v",
"--version",
action="version",
version='"{}" version {} ({})'.format(TOOL_ID, __version__, __date__),
help='Print the "{}" version and exit'.format(TOOL_ID),
)
# Group of arguments for CheckM
qc_group = p.add_argument_group("CheckM: Quality control")
qc_group.add_argument(
"--completeness",
type=number(float, minv=0.0, maxv=100.0),
default=0.0,
help="Input genomes must have a minimum completeness percentage before being processed and added to the database",
)
qc_group.add_argument(
"--contamination",
type=number(float, minv=0.0, maxv=100.0),
default=100.0,
help="Input genomes must have a maximum contamination percentage before being processed and added to the database",
)
qc_group.add_argument(
"--pplacer-threads",
type=number(int, minv=1, maxv=os.cpu_count()),
default=1,
dest="pplacer_threads",
help="Maximum number of threads for pplacer. This is required to maximise the CheckM performances",
)
# Group of arguments for the dereplication
dereplication_group = p.add_argument_group("Dereplication of genomes")
dereplication_group.add_argument(
"--dereplicate",
action="store_true",
default=False,
help="Enable the dereplication of genomes",
)
dereplication_group.add_argument(
"--similarity",
type=number(float, minv=0.0, maxv=1.0),
default=1.0,
help=(
"Dereplicate genomes if they have a theta distance greather than this threshold. "
"This is used exclusively in conjunction with the --dereplicate argument"
),
)
# Group of arguments for estimating the bloom filter size
filter_size_group = p.add_argument_group("Bloom filter size")
filter_size_group.add_argument(
"--estimate-filter-size",
action="store_true",
default=False,
dest="estimate_filter_size",
help="Automatically estimate the best bloom filter size with ntCard",
)
filter_size_group.add_argument(
"--filter-size",
type=number(int, minv=10000),
dest="filter_size",
help="This is the size of the bloom filters",
)
filter_size_group.add_argument(
"--increase-filter-size",
type=number(float, minv=0.0, maxv=100.0),
default=0.0,
dest="increase_filter_size",
help=(
"Increase the estimated filter size by the specified percentage. "
"This is used in conjunction with the --estimate-filter-size argument only. "
"It is highly recommended to increase the filter size by a good percentage in case you are planning to update the index with new genomes"
),
)
filter_size_group.add_argument(
"--min-kmer-occurrences",
type=number(int, minv=1),
default=2,
dest="min_kmer_occurrences",
help=(
"Minimum number of occurrences of kmers to be considered for estimating the bloom filter size "
"and for building the bloom filter files"
),
)
# Group of arguments for estimating the optimal kmer size
kitsune_group = p.add_argument_group("Kitsune: Estimation of the optimal kmer size")
kitsune_group.add_argument(
"--closely-related",
action="store_true",
default=False,
dest="closely_related",
help="For closesly related genomes use this flag",
)
kitsune_group.add_argument(
"--estimate-kmer-size",
action="store_true",
default=False,
dest="estimate_kmer_size",
help="Automatically estimate the optimal kmer size with kitsune",
)
kitsune_group.add_argument(
"--jellyfish-threads",
type=number(int, minv=1, maxv=os.cpu_count()),
default=1,
dest="jellyfish_threads",
help="Maximum number of threads for Jellyfish. This is required to maximise the kitsune performances",
)
kitsune_group.add_argument(
"--limit-kmer-size",
type=number(int, minv=4),
default=32,
dest="limit_kmer_size",
help="Limit the estimation of the optimal kmer size with kitsune to this value at most",
)
return p.parse_args()
def quality_control(
genomes: list,
tax_id: str,
tmp_dir: str,
input_extension: str = "fna.gz",
completeness: float = 0.0,
contamination: float = 100.0,
nproc: int = 1,
pplacer_threads: int = 1,
) -> Tuple[List[str], List[str]]:
"""
Quality-control genomes with CheckM
:param genomes: List of genome file paths
:param tax_id: NCBI tax ID
:param tmp_dir: Path to the temporary folder
:param input_extension: File extension of the input files in genomes
:param completeness: Completeness threshold
:param contamination: Contamination threshold
:param nproc: Make CheckM parallel
:param pplacer_threads: Max number of threads for pplacer
:return: List of genome file paths for genomes that passed the quality-control
in addition to a list with the CheckM output table paths
"""
# Define the CheckM temporary folder
checkm_tmp_dir = os.path.join(tmp_dir, "checkm", tax_id)
os.makedirs(checkm_tmp_dir, exist_ok=True)
# Run CheckM on the current set of genomes
checkm_tables = checkm(
genomes,
checkm_tmp_dir,
file_extension=input_extension,
nproc=nproc,
pplacer_threads=pplacer_threads,
)
# Filter genomes according to the input --completeness and --contamination thresholds
genome_ids = filter_checkm_tables(checkm_tables, completeness=completeness, contamination=contamination)
# Rebuild the genome file paths
genome_paths = [os.path.join(tmp_dir, "genomes", tax_id, "{}.{}".format(genome_id, input_extension)) for genome_id in genome_ids]
return genome_paths, checkm_tables
def organize_data(
genomes: list,
db_dir: str,
tax_label: str,
tax_id: str,
cluster_id: int = 1,
cluster_prefix: str = "MSBT",
metadata: Optional[List[Dict[str, str]]] = None,
checkm_tables: Optional[list] = None,
flat_structure: bool = False,
) -> List[str]:
"""
Organize genome files
:param genomes: List with path to the genome files
:param db_dir: Path to the database folder
:param tax_label: Full taxonomic label
:param tax_id: NCBI tax ID
:param cluster_id: Numberical cluster ID
:param cluster_prefix: Cluster prefix
:param metadata: List of dictionaries with genomes information
:param checkm_tables: List with paths to the CheckM output tables
:param flat_structure: Organize genomes in the same folder without any taxonomic organization
:return: List with genome file paths
"""
genomes_paths = list()
# In case at least one genome survived both the quality control and dereplication steps
# Define the taxonomy folder in database
tax_dir = os.path.join(db_dir, tax_label.replace("|", os.sep)) if not flat_structure else db_dir
genomes_dir = os.path.join(tax_dir, "genomes")
os.makedirs(genomes_dir, exist_ok=True)
if not flat_structure:
# Create the strains folder
genomes_dir = os.path.join(tax_dir, "strains", "genomes")
os.makedirs(genomes_dir, exist_ok=True)
references_path = "references.txt" if not flat_structure else "genomes_{}.txt".format(tax_id)
with open(os.path.join(tax_dir, references_path), "w+") as refsfile:
# Move the processed genomes to the taxonomy folder
for genome_path in genomes:
# Get the genome name from file path
_, genome_name, _, _ = get_file_info(genome_path)
# Move the genome file into the species folder
shutil.move(genome_path, genomes_dir)
# Also take track of the genome names in the references.txt file
refsfile.write("{}\n".format(genome_name))
# Add the genome to the full list of genomes in database
genomes_paths.append(os.path.join(genomes_dir, os.path.basename(genome_path)))
# Create the metadata table in the taxonomy folder
with open(os.path.join(tax_dir, "metadata.tsv"), "w+") as metafile:
if not flat_structure:
metafile.write("# Cluster ID: {}{}\n".format(cluster_prefix, cluster_id))
if checkm_tables:
# Also merge the CheckM output tables and move the result to the taxonomy folder
checkm_path = "checkm.tsv" if not flat_structure else "checkm_{}.tsv".format(tax_id)
with open(os.path.join(tax_dir, checkm_path), "w+") as table:
header = True
for table_path in checkm_tables:
with open(table_path) as partial_table:
line_count = 0
for line in partial_table:
line = line.strip()
if line:
if line_count == 0:
if header:
table.write("{}\n".format(line))
header = False
else:
table.write("{}\n".format(line))
line_count += 1
return genomes_paths
def process_input_genomes(
genomes_list: list,
taxonomic_label: str,
cluster_id: int,
db_dir: str,
tmp_dir: str,
kmer_len: int,
input_extension: str = "fna.gz",
cluster_prefix: str = "MSBT",
nproc: int = 1,
pplacer_threads: int = 1,
completeness: float = 0.0,
contamination: float = 100.0,
dereplicate: bool = False,
similarity: float = 1.0,
flat_structure: bool = False,
logger: Optional[Logger] = None,
verbose: bool = False,
) -> List[str]:
"""
Process a provided list of input genomes
Organize, quality-control, and dereplicate genomes
:param genomes_list: List of input genome file paths
:param taxonomic_label: Taxonomic label of the input genomes
:param cluster_id: Numberical cluster ID
:param db_dir: Path to the database root folder
:param tmp_dir: Path to the temporary folder
:param kmer_len: Length of the kmers
:param input_extension: File extension of the input files in genomes_list
:param cluster_prefix: Cluster prefix
:param nproc: Make the process parallel when possible
:param pplacer_threads: Maximum number of threads to make pplacer parallel with CheckM
:param completeness: Threshold on the CheckM completeness
:param contamination: Threshold on the CheckM contamination
:param dereplicate: Enable the dereplication step to get rid of replicated genomes
:param similarity: Get rid of genomes according to this threshold in case the dereplication step is enabled
:param flat_structure: Do not taxonomically organize genomes
:param logger: Logger object
:param verbose: Print messages on screen
:return: The list of paths to the genome files
"""
# Define a partial println function to avoid specifying logger and verbose
# every time the println function is invoked
printline = partial(println, logger=logger, verbose=verbose)
if verbose:
printline("Processing {}".format(taxonomic_label))
# Create a temporary folder to store genomes
tax_id = hashlib.md5(taxonomic_label.encode("utf-8")).hexdigest()
tmp_genomes_dir = os.path.join(tmp_dir, "genomes", tax_id)
os.makedirs(tmp_genomes_dir, exist_ok=True)
# Iterate over the list of input genome file paths
genomes = list()
for genome_path in genomes_list:
# Symlink input genomes into the temporary folder
os.symlink(genome_path, os.path.join(tmp_genomes_dir, os.path.basename(genome_path)))
genomes.append(os.path.join(tmp_genomes_dir, os.path.basename(genome_path)))
# Take track of the paths to the CheckM output tables
checkm_tables: List[str] = list()
# Quality control
if completeness > 0.0 or contamination < 100.0:
before_qc = len(genomes)
genomes, checkm_tables = quality_control(
genomes,
tax_id,
tmp_dir,
input_extension=input_extension,
completeness=completeness,
contamination=contamination,
nproc=nproc,
pplacer_threads=pplacer_threads,
)
if before_qc > len(genomes) and verbose:
printline("Quality control: excluding {}/{} genomes".format(before_qc - len(genomes), before_qc))
# Dereplication
if len(genomes) > 1 and dereplicate:
before_dereplication = len(genomes)
genomes = dereplicate_genomes(
genomes,
tax_id,
tmp_dir,
kmer_len,
filter_size=None,
nproc=nproc,
similarity=similarity,
)
if before_dereplication > len(genomes) and verbose:
printline(
"Dereplication: excluding {}/{} genomes".format(
before_dereplication - len(genomes), before_dereplication
)
)
# Check whether no genomes survived the quality control and the dereplication steps
if not genomes:
if verbose:
printline("No more genomes available for the NCBI tax ID {}".format(tax_id))
return genomes
# Organize genome files
genomes_paths = organize_data(
genomes,
db_dir,
taxonomic_label,
tax_id,
cluster_id=cluster_id,
cluster_prefix=cluster_prefix,
metadata=None,
checkm_tables=checkm_tables,
flat_structure=flat_structure,
)
return genomes_paths
def get_sublist(genomes, limit_number=None, limit_percentage=100.0, flat_structure=False) -> List[str]:
"""
Given a list of genomes, define a sublist with a limited number of genomes taken randomly
:param genomes: Input list of paths to the genomes files
:param limit_number: Limit the number of elements in the resulting list to this number
If defined, it overrides the percentage
:param limit_percentage: Limit the size of the resulting list to this percentage computed
on the total number of elements in the input list
:param flat_structure: Genomes organization
:return: The sublist of genomes
"""
if not genomes or limit_number == 0 or limit_percentage == 0.0:
return list()
# Always use the same seed for reproducibility
rng = numpy.random.default_rng(0)
# Number of genomes to be considered for estimating the optimal kmer size with kitsune
select_at_most = 0
is_percentage = False
if isinstance(limit_number, int):
# Select a specific number of genomes at most
select_at_most = limit_number
else:
# Remember that the number of genomes will be selected based on a percentage
is_percentage = True
if is_percentage and limit_percentage == 100.0:
# There is no need for subsampling here
return genomes
genomes_sub = list()
if flat_structure:
if select_at_most == 0:
# Select genomes as a percentage of the total number of genomes
select_at_most = int(math.ceil(len(genomes) * limit_percentage / 100.0))
# Subsampling genomes as input for kitsune
rng.shuffle(genomes)
genomes_sub = genomes[:select_at_most]
else:
# Genomes must be taxonomically reorganized
species2genomes = dict()
for genome_path in genomes:
# Get the genomes folder
dirpath, _, _, _ = get_file_info(genome_path)
# Retrieve the species id
# Genomes are located under the "genomes" folder, under the species directory
species = dirpath.split(os.sep)[-2]
# Group genomes according to their species
if species not in species2genomes:
species2genomes[species] = list()
species2genomes[species].append(genome_path)
for species in species2genomes:
species_genomes = species2genomes[species]
# Cluster specific maximum number of genomes to be considered for kitsune
select_at_most_in_species = select_at_most
if select_at_most_in_species == 0:
# Select genomes as a percentage of the total number of genomes
select_at_most_in_species = int(math.ceil(len(species_genomes) * limit_percentage / 100.0))
# Subsampling genomes as input for kitsune
rng.shuffle(species_genomes)
genomes_sub.extend(species_genomes[:select_at_most_in_species])
return genomes_sub
def estimate_bf_size_and_howdesbt(
strains_dir: str,
tmp_dir: str,
extension: str = "fna.gz",
kmer_len: int = 21,
min_kmer_occurrences: int = 2,
prefix: str = "genomes",
limit_number: Optional[int] = None,
limit_percentage: float = 100.0,
increase_filter_size: float = 0.0,
nproc: int = 1,
) -> None:
"""
Wrapper around ntCard and HowDeSBT for building strains SBTs with species-specific bloom filter sizes
The structure is always flat here
:param strains_dir: Path to the strains folder
:param tmp_dir: Path to the temporary folder
:param extension: Input file extension
:param kmer_len: Kmer size
:param min_kmer_occurrences: Minimum number of occurrences of kmers for estimating the bloom filter size and for building bloom filter files
:param prefix: Prefix of the ntCard output histogram file
:param limit_number: Maximum number of genomes as input for ntCard
:param limit_percentage: Maximum number of genomes as input for ntCard (in percentage)
:param increase_filter_size: Increase the estimated bloom filter size by the specified percentage
:param nproc: Make ntCard and HowDeSBT parallel
:return: Strains folder path and list of selected representative genomes
"""
genomes_dir = os.path.join(strains_dir, "genomes")
if not os.path.isdir(genomes_dir):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), genomes_dir)
os.makedirs(tmp_dir, exist_ok=True)
genomes_paths = [str(path) for path in Path(genomes_dir).glob("*.{}".format(extension))]
# Define a subset of genomes
genomes_paths_sub = get_sublist(
genomes_paths,
limit_number=limit_number,
limit_percentage=limit_percentage,
flat_structure=True
)
# Estimate the bloom filter size
filter_size = estimate_bf_size(
genomes_paths_sub,
kmer_len=kmer_len,
min_occurrences=min_kmer_occurrences,
prefix=prefix,
tmp_dir=tmp_dir,
nproc=nproc,
)
# Increment the estimated bloom filter size
increment = int(math.ceil(filter_size * increase_filter_size / 100.0))
filter_size += increment
# Run HowDeSBT
howdesbt(
strains_dir,
extension=extension,
kmer_len=kmer_len,
min_occurrences=min_kmer_occurrences,
filter_size=filter_size,
nproc=nproc,
flat_structure=True,
)
# Define the manifest file
with open(os.path.join(strains_dir, "manifest.txt"), "w+") as manifest:
manifest.write("--min-kmer-occurrences {}\n".format(min_kmer_occurrences))
manifest.write("--kmer-len {}\n".format(kmer_len))
manifest.write("--filter-size {}\n".format(filter_size))
# Select the representative genomes
selected_genomes = list()
if len(genomes_paths) <= 3:
# 3 is the maximum number of selected species
# as it is also the minimum number of genomes for computing boundaries
selected_genomes = [
get_file_info(genome_path, check_supported=False, check_exists=False)[1] for genome_path in genomes_paths
]
else:
# Get the bloom filters file paths
bf_filepaths = [str(path) for path in Path(os.path.join(strains_dir, "filters")).glob("*.bf")]
# Compute the theta distance between genomes
bfdistance_theta = bfaction(
bf_filepaths,
os.path.join(tmp_dir, "howdesbt"),
kmer_len,
min_occurrences=min_kmer_occurrences,
filter_size=filter_size,
nproc=nproc,
action="bfdistance",
mode="theta"
)
# Sum the distances to get the final score
bfdistance_sums = {genome: sum(bfdistance_theta[genome].values()) for genome in bfdistance_theta}
# Sort genomes according to the sum of their distances with all the other genomes
sorted_genomes = sorted(bfdistance_sums, key=lambda genome: bfdistance_sums[genome])
# First and last genomes are those that minimize and maximize the distance with all the other genomes
selected_genomes.append(sorted_genomes[0])
selected_genomes.append(sorted_genomes[-1])
# Also select a genome in the middles of min and max distances
selected_genomes.append(sorted_genomes[math.ceil(int(len(sorted_genomes) / 2))])
return strains_dir, selected_genomes
def index(
db_dir: str,
input_list: str,
tmp_dir: str,
input_extension: str = "fna.gz",
cluster_prefix: str = "MSBT",
kmer_len: Optional[int] = None,
filter_size: Optional[int] = None,
flat_structure: bool = False,
estimate_filter_size: bool = True,
increase_filter_size: float = 0.0,
min_kmer_occurrences: int = 2,
estimate_kmer_size: bool = True,
limit_estimation_number: Optional[int] = None,
limit_estimation_percentage: float = 100.0,
limit_kmer_size: int = 32,
completeness: float = 0.0,
contamination: float = 100.0,
dereplicate: bool = False,
similarity: float = 1.0,
closely_related: bool = False,
logger: Optional[Logger] = None,
verbose: bool = False,
nproc: int = 1,
pplacer_threads: int = 1,
jellyfish_threads: int = 1,
parallel: int = 1,
) -> None:
"""
Build the database baseline
:param db_dir: Path to the database root folder
:param input_list: Path to the file with a list of input genome paths
:param tmp_dir: Path to the temporary folder
:param input_extension: File extension of the input files whose paths are defined into the input_list file
:param cluster_prefix: Prefix of clusters numerical identifiers
:param kmer_len: Length of the kmers
:param filter_size: Size of the bloom filters
:param flat_structure: Do not taxonomically organize genomes
:param estimate_filter_size: Run ntCard to estimate the most appropriate bloom filter size
:param increase_filter_size: Increase the estimated bloom filter size by the specified percentage
:param min_kmer_occurrences: Minimum number of occurrences of kmers for estimating the bloom filter size and for building bloom filter files
:param estimate_kmer_size: Run kitsune to estimate the best kmer size
:param limit_estimation_number: Number of genomes per group as input for kitsune and ntCard
:param limit_estimation_percentage: Percentage on the total number of genomes per group as input for kitsune and ntCard
:param limit_kmer_size: Maximum kmer size for kitsune kopt
:param completeness: Threshold on the CheckM completeness
:param contamination: Threshold on the CheckM contamination
:param dereplicate: Enable the dereplication step to get rid of replicated genomes
:param closely_related: For closesly related genomes use this flag
:param similarity: Get rid of genomes according to this threshold in case the dereplication step is enabled
:param logger: Logger object
:param verbose: Print messages on screen
:param nproc: Make the process parallel when possible
:param pplacer_threads: Maximum number of threads to make pplacer parallel with CheckM
:param jellyfish_threads: Maximum number of threads to make Jellyfish parallel with kitsune
:param parallel: Maximum number of processors to process each NCBI tax ID in parallel
"""
# Define a partial println function to avoid specifying logger and verbose
# every time the println function is invoked
printline = partial(println, logger=logger, verbose=verbose)
# Load the list of input genomes and eventually their taxonomic labels
taxonomy2genomes = load_input_table(input_list, input_extension=input_extension)
# Force flat structure in case of genomes with no taxonomic label
if "NA" in taxonomy2genomes:
flat_structure = True
# Build a partial function around process_input_genomes
process_partial = partial(
process_input_genomes,
db_dir=db_dir,
tmp_dir=tmp_dir,
kmer_len=kmer_len,
input_extension=input_extension,
cluster_prefix=cluster_prefix,
nproc=nproc,
pplacer_threads=pplacer_threads,
completeness=completeness,
contamination=contamination,
dereplicate=dereplicate,
similarity=similarity,
flat_structure=flat_structure,
logger=logger,
verbose=False,
)
printline("Processing clusters")
# Define a cluster counter
# This is also the size of the progress bar
clusters_counter = len(taxonomy2genomes)
# Take track of all the genomes paths
genomes_paths = list()
with mp.Pool(processes=parallel) as pool, tqdm.tqdm(total=clusters_counter, disable=(not verbose)) as pbar:
# Wrapper around the update function of tqdm
def progress(*args):
pbar.update()
# Process input genomes
jobs = [
pool.apply_async(
process_partial,
args=(taxonomy2genomes[taxonomy], taxonomy, pos + 1),
callback=progress,
)
for pos, taxonomy in enumerate(taxonomy2genomes)
]
# Get results from jobs
for job in jobs:
genomes_paths.extend(job.get())
if not genomes_paths:
raise Exception("No input genomes found")
printline("Processing {} genomes".format(len(genomes_paths)))
# Define the manifest file path
# This is used in case the --filter-size and/or --kmer-len must be estimated
manifest_filepath = os.path.join(db_dir, "manifest.txt")
# Add cluster counter
with open(manifest_filepath, "a+") as manifest:
manifest.write("--clusters-counter {}\n".format(clusters_counter))
# Limited set of genomes in case of --estimate-kmer-size and/or --estimate-filter-size
# The set is limited to the number of genomes specified with --limit-estimation-number or limit_estimation_percentage
genomes_paths_sub = list()
# Check whether the kmer size must be estimated
if estimate_kmer_size and not kmer_len:
printline("Estimating the best k-mer size. Please be patient, this may take a while")
# Define a subset of genomes
genomes_paths_sub = get_sublist(
genomes_paths,
limit_number=limit_estimation_number,
limit_percentage=limit_estimation_percentage,
flat_structure=flat_structure
)
if len(genomes_paths_sub) < 2:
raise Exception("Not enough genomes for estimating the optimal kmer size with kitsune!")
# Estimate a kmer size
kmer_len = optimal_k(
genomes_paths_sub,
limit_kmer_size,
os.path.join(tmp_dir, "kitsune"),
closely_related=closely_related,
nproc=nproc,
threads=jellyfish_threads
)
# Update the manifest file with the --filter-size
with open(manifest_filepath, "a+") as manifest:
manifest.write("--kmer-len {}\n".format(kmer_len))
# Check whether the bloom filter size must be estimated
if estimate_filter_size and not filter_size:
printline("Estimating the bloom filter size")
# Use the precomputed subset of genomes in case it has already been
# defined because of the --estimate-kmer-size
if not genomes_paths_sub:
# Define a subset of genomes
genomes_paths_sub = get_sublist(
genomes_paths,
limit_number=limit_estimation_number,
limit_percentage=limit_estimation_percentage,
flat_structure=flat_structure
)
# Estimate the bloom filter size
filter_size = estimate_bf_size(
genomes_paths_sub,
kmer_len=kmer_len,
min_occurrences=min_kmer_occurrences,
prefix="genomes",
tmp_dir=tmp_dir,
nproc=nproc,
)
# Increment the estimated bloom filter size
increment = int(math.ceil(filter_size * increase_filter_size / 100.0))
filter_size += increment
# Update the manifest file with the --filter-size
with open(manifest_filepath, "a+") as manifest:
manifest.write("--filter-size {}\n".format(filter_size))
if filter_size and kmer_len:
# Retrieve the current working directory
current_folder = os.getcwd()
printline("Indexing genomes")
# Define a partial function around the howdesbt wrapper
howdesbt_partial = partial(
howdesbt,
extension=input_extension,
kmer_len=kmer_len,
min_occurrences=min_kmer_occurrences,
filter_size=filter_size,
nproc=nproc,
flat_structure=flat_structure,
)
if not flat_structure:
# Iterate over all the taxonomic levels from the species up to the superkingdom
for level in [
"species",
"genus",
"family",
"order",
"class",
"phylum",
"kingdom",
]:
folders = [str(path) for path in Path(db_dir).glob("**/{}__*".format(level[0])) if os.path.isdir(str(path))]
printline("Running HowDeSBT at the {} level ({} clusters)".format(level, len(folders)))
if level == "species":
estimate_bf_size_and_howdesbt_partial = partial(
estimate_bf_size_and_howdesbt,
extension=input_extension,
kmer_len=kmer_len,
min_kmer_occurrences=min_kmer_occurrences,
prefix="genomes",
limit_number=limit_estimation_number,
limit_percentage=limit_estimation_percentage,
increase_filter_size=increase_filter_size,
nproc=nproc,
)
# Establish a species-specific bloom filter size
# Run howdesbt in flat mode
# Search for genomes that minimize and maximize the genetic distance versus all the other genomes in the same cluster
# Use only these genomes to build the species tree
with mp.Pool(processes=parallel) as strains_pool, tqdm.tqdm(total=len(folders), disable=(not verbose)) as pbar:
# Wrapper around the update function of tqdm
def progress(*args):
pbar.update()
# Process strains
jobs = [
strains_pool.apply_async(
estimate_bf_size_and_howdesbt_partial,
args=(
os.path.join(species_dir, "strains"),
os.path.join(species_dir, "strains", "tmp"),
),
callback=progress
)
for species_dir in folders
]
for job in jobs:
strains_dir, selected_genomes = job.get()
species_dir = os.sep.join(strains_dir.split(os.sep)[:-1])
# Populate the genomes folder at the species level with the selected genomes
for genome in selected_genomes:
os.symlink(
os.path.join(strains_dir, "genomes", "{}.{}".format(genome, input_extension)),
os.path.join(species_dir, "genomes", "{}.{}".format(genome, input_extension))
)
with mp.Pool(processes=parallel) as pool, tqdm.tqdm(total=len(folders), disable=(not verbose)) as pbar:
# Wrapper around the update function of tqdm
def progress(*args):
pbar.update()
# Process clusters under a specific taxonomic level
jobs = [
pool.apply_async(howdesbt_partial, args=(level_dir,), callback=progress)
for level_dir in folders
]
for job in jobs:
job.get()
# Also run HowDeSBT on the database folder to build
# the bloom filter representation of the superkingdom
if not flat_structure:
printline("Building the database root bloom filter with HowDeSBT")
howdesbt_partial(db_dir)
if flat_structure:
# Merge all the genomes_*.txt into a single file
gen = Path(db_dir).glob("genomes_*.txt")
with open(os.path.join(db_dir, "genomes.txt"), "w+") as genomes_file:
for filepath in gen:
with open(str(filepath)) as file:
for line in file:
line = line.strip()
genomes_file.write("{}\n".format(line))
# Get rid of the partial genomes file
os.unlink(str(filepath))
# The howdesbt function automatically set the current working directory to
# the index folder of the taxonomic labels
# Come back to the original folder
os.chdir(current_folder)
def main() -> None:
# Load command line parameters
args = read_params()
# Create the database folder
os.makedirs(args.db_dir, exist_ok=True)
# Also create the temporary folder
# Do not raise an exception in case it already exists
os.makedirs(args.tmp_dir, exist_ok=True)
# Initialise the logger
logger = init_logger(filepath=args.log, toolid=TOOL_ID, verbose=args.verbose)
# Skip the bloom filter size estimation if the filter size is passed as input
if not args.filter_size and not args.estimate_filter_size:
raise Exception(
(
"Please specify a bloom filter size with the --filter-size option or "
"use the --estimate-filter-size flag to automatically estimate the best bloom filter size with ntCard"
)
)
# Skip the kmer size estimation if the kmer length is passed as input
if not args.kmer_len and not args.estimate_kmer_size:
raise Exception(
(
"Please specify a kmer size with the --kmer-len option or "
"use the --estimate-kmer-size flag to automatically estimate the optimal kmer size with kitsune"
)
)
superkingdoms = set()
if not args.flat_structure:
try:
with open(args.input_list) as inlist:
for line in inlist:
line = line.strip()
if line:
superkingdoms.add(line.split("\t")[1].split("|")[0].split("__")[-1])
except Exception as ex:
raise Exception("Input file is not correctly formatted: {}".format(args.input_list)).with_traceback(
ex.__traceback__
)
if superkingdoms:
for superkingdom in superkingdoms:
if os.path.isdir(os.path.join(args.db_dir, "k__{}".format(superkingdom))):
raise Exception(
(
"An indexed version of the {} superkingdom already exists in the database!\n"
"Please use the update module to add new genomes"
).format(superkingdom)
)
# Define the database manifest file
manifest_filepath = os.path.join(args.db_dir, "manifest.txt")
if os.path.isfile(manifest_filepath):
# Load and compare --kmer-len and --filter-size
manifest = load_manifest(manifest_filepath)
if "kmer_len" in manifest:
if not args.kmer_len:
args.kmer_len = manifest["kmer_len"]
elif args.kmer_len != manifest["kmer_len"]:
raise ValueError("The kmer length is not compatible with the specified database")
if "filter_size" in manifest:
if not args.filter_size:
args.filter_size = manifest["filter_size"]
elif args.filter_size != manifest["filter_size"]:
raise ValueError("The bloom filter size is not compatible with the specified database")
if "min_kmer_occurrences" in manifest:
if not args.min_kmer_occurrences:
args.min_kmer_occurrences = manifest["min_kmer_occurrences"]
elif args.min_kmer_occurrences != manifest["min_kmer_occurrences"]:
raise ValueError("The minimum number of occurrences of kmers is not compatible with the specified database")
else:
# Initialize manifest file
with open(manifest_filepath, "w+") as manifest:
if args.kmer_len:
manifest.write("--kmer-len {}\n".format(args.kmer_len))
if args.filter_size:
manifest.write("--filter-size {}\n".format(args.filter_size))
manifest.write("--min-kmer-occurrences {}\n".format(args.min_kmer_occurrences))
# Build a sh script with the command line used to launch the index module
build_sh(sys.argv, TOOL_ID, args.db_dir)
t0 = time.time()
index(
args.db_dir,
args.input_list,
args.tmp_dir,
input_extension=args.extension,
cluster_prefix=args.cluster_prefix,
kmer_len=args.kmer_len,
filter_size=args.filter_size,
flat_structure=args.flat_structure,
estimate_filter_size=args.estimate_filter_size,
increase_filter_size=args.increase_filter_size,
min_kmer_occurrences=args.min_kmer_occurrences,
estimate_kmer_size=args.estimate_kmer_size,
limit_estimation_number=args.limit_estimation_number,
limit_estimation_percentage=args.limit_estimation_percentage,
limit_kmer_size=args.limit_kmer_size,
completeness=args.completeness,
contamination=args.contamination,
dereplicate=args.dereplicate,
similarity=args.similarity,
closely_related=args.closely_related,
logger=logger,
verbose=args.verbose,
nproc=args.nproc,
pplacer_threads=args.pplacer_threads,
jellyfish_threads=args.jellyfish_threads,
parallel=args.parallel,
)
if args.cleanup:
# Remove the temporary folder
println(
"Cleaning up temporary space",
logger=logger,
verbose=args.verbose,
)
shutil.rmtree(args.tmp_dir, ignore_errors=True)
t1 = time.time()
println(
"Total elapsed time {}s".format(int(t1 - t0)),
logger=logger,
verbose=args.verbose,
)
if __name__ == "__main__":
main() | PypiClean |
/Mopidy-MusicBox-Darkclient-1.1.tar.gz/Mopidy-MusicBox-Darkclient-1.1/README.rst | *****************************
Mopidy-MusicBox-Darkclient
*****************************
Mopidy MusicBox Webclient (MMW) is a frontend extension and JavaScript-based web client especially
written for Mopidy
Darkclient is dark theme for MMW. Can be used as standalone Mopidy plugin without upstream MMW.
Features
========
- Responsive design that works equally well on desktop and mobile browsers.
- Browse content backend extension.
- Add one or more tracks or entire albums to the queue.
- Search for tracks, albums, or artists.
- Shows detailed track and album information in playlists and queue with album covers.
- Artwork and Like extensions
- Support for all of the Mopidy playback controls (consume mode, repeat, shuffle, etc.)
- Fullscreen mode.
- Multilanguage support
.. image:: https://github.com/stffart/mopidy-musicbox-darkclient/raw/develop/screenshots/overview.png
:height: 425 px
:width: 762 px
:align: center
Dependencies
============
- MMW has been tested on the major browsers (Chrome, IE, Firefox, Safari, iOS). It *may* also work on other browsers
that support websockets, cookies, and JavaScript.
- ``Mopidy`` >= 3.0.0. An extensible music server that plays music from local disk, Spotify, SoundCloud, Google
Play Music, and more.
Installation
============
Install by running::
sudo python3 -m pip install Mopidy-MusicBox-Darkclient
Or, if available, install the Debian/Ubuntu package from
`apt.mopidy.com <https://apt.mopidy.com/>`_.
Configuration
=============
MMW is shipped with default settings that should work straight out of the box for most users::
[musicbox_darkclient]
enabled = true
musicbox = false
websocket_host =
websocket_port =
on_track_click = PLAY_ALL
locale = en
The following configuration values are available should you wish to customize your installation further:
- ``musicbox_darkclient/enabled``: If the MMW extension should be enabled or not. Defaults to ``true``.
- ``musicbox_darkclient/musicbox``: Set this to ``true`` if you are connecting to a Mopidy instance running on a
Pi Musicbox. Expands the MMW user interface to include system control/configuration functionality.
- ``musicbox_darkclient/websocket_host``: Optional setting to specify the target host for Mopidy websocket connections.
- ``musicbox_darkclient/websocket_port``: Optional setting to specify the target port for Mopidy websocket connections.
- ``musicbox_darkclient/on_track_click``: The action performed when clicking on a track. Valid options are:
``PLAY_ALL`` (default), ``PLAY_NOW``, ``PLAY_NEXT``, ``ADD_THIS_BOTTOM``, ``ADD_ALL_BOTTOM``, and ``DYNAMIC`` (repeats last action).
- ``musicbox_darkclient/locale``: Optional setting to specify user interface language.
Usage
=====
Enter the address of the Mopidy server that you are connecting to in your browser (e.g. http://localhost:6680/musicbox_darkclient)
Credits
=======
- Original author: `Wouter van Wijk <https://github.com/woutervanwijk>`__
- `Contributors <https://github.com/pimusicbox/mopidy-musicbox-webclient/graphs/contributors>`_
| PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/edimage.py | __author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: edimage.py 54209 2008-06-14 04:57:51Z CJP $"
__revision__ = "$Revision: 54209 $"
#-----------------------------------------------------------------------------#
from extern.embeddedimage import PyEmbeddedImage
catalog = {}
index = []
splashwarn = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAASwAAADICAYAAABS39xVAAAABHNCSVQICAgIfAhkiAAAIABJ"
"REFUeJzsvWmMbdl1HvatPZzpTjW/sedm020O4iDJkqAxkOAEdhQDAZIfTmwEzoAkCPwjiIIg"
"P5IYcaAYsGRBkKzYlixLESHHlKGBhCKZEtUUx261mqTIZotkT+zhdfcbq+pO55w95Mdea99T"
"jyKLMoWnfkptoPBe3Tr33nP22nsN3/rW2gTgN3E2zsbZOBt3wVB/3jdwNs7G2Tgb3+g4U1hn"
"42ycjbtmnCmss3E2zsZdM84U1tk4G2fjrhlnCutsnI2zcdeMM4V1Ns7G2bhrxpnCOhtn42zc"
"NcOcdsHrL7/8HX3bonMOighGawBAjBE+BJRlCe8cYoww1sL1PWxZgpRCDAGkFIL36NsWRARb"
"logxAjHCFgWs1vAhoOt7+L6HMgZaa4QQ8vvlehCBiNLvQP6/sRYEIISAEAJ836Pve/gYURgD"
"W5ZQSsF7D8QIpXW+P/msGAKU1gjewzsHbS36tkXVNOkaImi+l77rELyHrSoE56C0xnw+R2lt"
"vh8ACN6nefIeEUBZlnB9jxACyrqG6zoQEUgpaGPS9w+emwAs53NYa6G0Bojg+x7O+ywHeRal"
"VHp+59LrxkAbgxgjgvcAANd1af75uddtC02ECMBoDSKC0hrKGATv070R4Uz+Z/K/U/K/9OCD"
"n/ymFJZMZsE3icG/IkAiytcHniAKIV2jNUxRADGiXa+h+h6aBdu1LaiqQAC0MVnIXd+DiGCK"
"AiEEKKUAohMLTL5TKQXECB8jAk8cAFhrUcrkhgDP96OMScL1Pi0YpWDLEt1qBe8cTFFAaY0Y"
"Aoqqgut7FDzJXdvm62OMaYPxgil4cfS8OOQefQhpsfPCNEWByPcynLu8KWNMr8eIONignjcG"
"EUENXo8hnNzEg2fzfQ9TFOk9SiEagxgCurZNcx0CtNbpe/ke5DMRIyJviDP5n8n/Tsn/tHHq"
"VYoIkT9MtLXcpFIqaeGBtjcsQNHO8lAggrUWpFSazBBgtEbXdTBsVYuyRO892tUK3nsQWwvw"
"58V0E/nz8wQPhIXBBCpj4HiRiSXD4BmUMSiKAs77bIV7tnrGWnTrdVoAvOjy9/LGIaWwXq2g"
"WanHEKB4wYYYYZRKz0wE79zGavHnGJ6P9IhpU4oXAaSNZozJi1DmmJRKr/EGlcUushgu4Lw5"
"lEpWWr4LQMHfFQZKIM8T/34m/zP530n5nzZOVVgRAMliYG0+1KiOrWG+fjhRSsGLxSBKmjWE"
"7CKLJfMsSK01rNagukbbtvDObb6PJx4sDJkwDP4eeZKDXCv3xPcytCSKF1nXdVBaw3sP731y"
"h51Du1ohxggtC4oIZG2yXn2fBWTZ/bdFkeaCF5YPIbnQ7OoDyXsAu/qy4OV+vPeA92kz8IYY"
"hhfDIfMtMrn9dXCIkDcKbxbF9yJWPnssg41/YrEqdSb/M/nfUfmfNk73w4YuuFIZpb/dPfSD"
"hxm6q67vodlKeH64sq7z30Qgfd/DOYeiKGC0RuBJFVyB2EJIbCwCKzgG7kX7i+uvNQhAWdfo"
"uw6u79MitTZPTOSYfxhKAMkyy/d553Lc3vV9jsvFUhtr00Jnq2e0TjgA/+66DqYooNm6yPfI"
"M2W8RhaOPBuHE3KdWDeZ2zwvty2W4QIUdx4cUkhoFBnryYsvRgQOq7Ks+Ufk/Wchf+ccKSJU"
"o1HGmrTW8Uz+//+Q/zey/08b31DgGAc3Jq6xaHYRvriVsqjCQGvKZBFRxhDESgnAKZPqlEoL"
"hjYAIMTFF5dcLCvwVZgBSSzMVsvwZ8iDijUT17esa6yXS2gGGru2RVlVqNk9dewyO3kGsYr8"
"fX3bJiyAn4vKMoctngHW4Fx27aNz2SuQhS33FWOEEs8BAPF3npDFIAQCkDZLjAkj0Tp5Gre5"
"+IrxFSAtDM3zqHmxKH6PGn62bB6t/8zkz14NaaXgQkAgiiEEUkTxTP5/8eX/jez/08aptAbR"
"lPlf1vriPor7HWNMVpMtlmQOFFtEsZbipgLI4OUQTA0hwLF7r9g6GGNgJOMhC5+F2vd9miye"
"SGNMjpV936PjidHW5s87YcW8R1nX4gFAEcF1HY6Pj9O9DYQh2EGevEGGRj5PEWW8Yd11STgy"
"XwPBk1JQbEU9z5dnCy7POdwg8vm3/wwXPHi+xbJqDj8UUZYBYkx/s/bkffNzaH4mURJ/JvKP"
"kaCUssYoY4wiIm2KQldVpSndhAop8jiT/19E+f8p9v9p41QPSyZOXFIv2ng4aVpD80LiGcgT"
"13ddnhzvHPrVCtqY5AoOXMe+6/IikActrMViuYQuSxBRchljhNEaRin0Yin5PgO75pq/W2md"
"3FC1AV01g6BDADHGiLosobXGernMQmydQ7tcYjydQhuDVdsiOJczGpK5iQMhiIsuFrpdr+EG"
"1iYDxbwxlQCaIrQQkgchi0VAXAl1brPItiwRvM8/2RsBEIlgtUbnPRx/TsQmk9SLlRuEFrcD"
"2t+0/PuerLVktKZIpGKMSmlNJlnuCKVC33UhOBeFInAm/79A8v9T7v/TBuGUfljXrlz5jqE7"
"HGPMKL+ilOlYrdcAEpdDrBtiTDfKEyGTslouM1Cpjcn8jKEbK24jEaEwBo5jbwKy0P1QkByv"
"A4lrIsCiYnc+hyRaJ9ecEjDrnMv8HKIEkMr14O/y3qcMBr+vZ3BWJtoWBVaLRRa4xOsxRjRV"
"heVqla19UVUIIaBvWxRVlZ9B8JAslIFVlfsGEu4h1loyU33XJbC6LDMuEbzP4YZ3DmVZAkQ4"
"unUrP1fZNJgfH0MLVnGbtc5WmBXFv438DSsqaK0QoyKtleudGo0bRaTQrtfRud6H3vneex+9"
"j4gxnsn/L4b8/232/+WHHvrmeFgS0xulsGrbTHYrmN/hnUNVFCmF3HXoOU2pjAEBWC+X2dqR"
"SmleAf36vkdRFBvLMrA+kunwMcJy9kisBAGZlzO0ljm+RspexBBgWLtHnkRZpIoIhbUIMWUw"
"HIN+VVEgKIW26+CdS9kf57Bmzo6iTTbDOYf1cpm5O+1yiXa5RGEtTFGg9x62KEBAIhbWdZov"
"JiMaYzLQKFZvaFVzpmbgLisiBAwyRYPnF4svCxaMU/Tew7M1Favmug4xBOiiyJ8ji1X+lQ3y"
"p5G/63vSCTgnrRT1fa80oLTRqndezbZG6ur1uTo6XuPcbhN8RARRIIAiUcyhy5n870r5f7P7"
"/7RxKoYlLvq675P1UgpFVUFpnYGzGBNpLzD2AADBufz3zPjlLIG4/DT4HsnKAMiclTCwpAAQ"
"kDS1LDJxX0Eb/orgDDlL0/cJD1GbVK/gEs57WH4+WeirtkVg6ygWRX6IEq/Eap1dYiHmec5C"
"FUyKg9wrz8loNAIBKMoSjtP1w2cfPn+IMXkVvGAkyyX3cHsINHThhylxsdqRsQXZaGKFc8pe"
"sm/xJKAr1v0blT+IiJKiUgSoSKS1tRpEOkbore2pfvIzz6u/9d/+FP03P/Jz9NobN1XfdeSc"
"VyAQKUVQis7kf3fK/89q/3+9cTpxlN1iyYQAyDciv6/bFgrJ1RR324cAw66lZ+xhmMaU2P0E"
"Ec0YaNqQ7EAEzSFB5PeDF5cxBkZrrNilHApG7k3AwxhCBmZloTnnkmvLLrNnK0tK5RRrXZaY"
"z+cn7teFkN3ywBY0hIBuvU5EQF4M8i8AkLWoyxI+RhgitEPBySKKEQEbjIJY+BJ+aK2hGS8g"
"DkuEfAfgxAaQBSPzWlQVIs97u1xu8IdBOJRT54xFyGvfiPy7riOlNRljSGlNSilFRBRJKWuU"
"qppCrZZL9X/8+Afp//6VT2K+6tF3HT73+S/j2979AIVoqCosKVKktY6Szj+T/90h/z/L/X/a"
"OPUKTQSHDbYUvIdngExbm1K2SsEWRbJ0srCAzGPx4qLyyKCj39REgYUl1kaEWWidFgk/dAbq"
"RBgcMwsmIYIL3oMSjpKtEZAYvzKxYiX6rkM9GqGpa6zaNrvKbd+japq8SQBsSjxYmOvlElXT"
"pDKOrsucHKtUivM5W9P2Pawx6HyqQRuCwcPnUcZAMY7iQ8iYjZLFymGJ8JHEIgZeVABO/Cs1"
"ZgKEEpBCK8Z+xFuRlLaPXBYSY84wOf6OLP++T3hSUVBIYVcC1q1VREpFgup6r2aVVtoo+sjH"
"vkj/60/8Dr34yjF2t2bY2rF49bU5Pvbki3TxfKO2ZzvBaK1Kq6NSCfWIMUZSCobDQgnxSBQQ"
"h43CJA+DdQcgk06lvCfLX+u/UPI/sadu+xH5iwcpoXAQBR5TPaBSA37VYO8T0R3f/6eNUxWW"
"ZyVBwAlSnixcxfiCFG4CSLVDHM/KTQytjiw6wibkzBkHtqoyYVljDwUcEukMRLDyGgvUSAaH"
"syY5pKANQ1fuU7g3Simsl0u4okBZluidQ79e5xqogGTxxIYJ6CvWwUkKl59FvkMbk65jjyDE"
"lOFywsURN1xwG75H4fgQC1EWoJcMlTyLUidif8+LMCsn3rCRX9dao2katBz2CCAqi1RCniGe"
"EXixU9yUf+j0LMS1aGSMUaYoVCRSFKNSitRk2qgvfvlV+vn3P0X/6oN/jNGowWjrPiyCQfA9"
"RtMGv/NEix/8zmtU1xM1BoIpS+gYEZSKw/txfY/g3AmPyTuHnhUDEcHxfAugK/IRWdPguTKz"
"+m6WP++rYXbRhQDZ/oJ/icKOrDxrriIIMULxdYGNlLwvyz4EeKXu6P4/bZwOuq9WsGWZ2Mes"
"PW+va7qdmzGMq4k26VzJyAjXw4j2Zwwi8ubJWp6FIFoa/F7wNa7v4YCT1f2cmcgu70DbDxnD"
"YAWnrd0UgzqHNcftZV2f8D5OVKknySKGgKKu0a1WeQMMuSWK3WHvPaqiwLrrYLVGy5tEFkDG"
"DWLM9XIyL3GwkEIIIO+z6zw/PKQbL72kFCsf1/e5rktquLQxOXskVq5r28wBuv07ZKOkFcoZ"
"I2vz8wfvCUQwhSVrCzJVRbYoSFe1qse1msxGqnU9/eTPPU7v/8Cn8dwVYDy5gGUoURiLUVNh"
"3QYU9QJufRPPvniLpuNbynqvVzdvAl0XfNuGvutiVsTOZXBacBcAUEoDhBPepjYmP1uMAcFt"
"MnHQGrOLF8NkZycKz+uulX/cMN+F36SZRwYgkztzaEqbzhau72FV4odZVliS9RODJUrUc/eF"
"O7X/TxunKixpodF5n6q2yxKhKCQjBMOov+JsjLSSiDGiappcHJqti/cnmLBDljBviKx8An9n"
"ZEGK+ypcmqHGlolznCkBkL0tN9D2Ur8k17SrVW7f4Z1DFAsAoBSOTNxkTnoOpbRSMGWJlrMm"
"EqtrY3JmKYTUfiPEmMBc7+G0RlkUaNmtlufIwoonU9xSUS/WR+lNdf1zjz1mn/jRH20OJlPY"
"w0NE50CKQPGka4/bDVfM+mjz9zi4bvB/AoGMhi4rmMkYxWyGcmsb1d4uyr09VPsHMAfncOnR"
"BwG1g9/41HP4qV96Ak8/vwb0DOfOTXDPxR10DvjB77oXz710hI88dYi6HGMVPT76mSUePf+K"
"uv6CV/jKC3b15S9j8fzzWL/+Ovr5Asjha+DnphMPEvl3Gj4Lv55CS6Ss2/4+Xlut8Lb/8r9Y"
"ffvf/ttt5I1/N8pflIH3PjPotdYwvLdy6AygW68TRcGYjNERpexhhgycg4+DUh5ssKU7vf9P"
"G6cqLNe2MGWZ+vD0fb4ZW5YwtCGIZfaxMfDsCscQUFZVztA453I1t1YKq9UKFffnCdikLEVZ"
"ycQBybIWnIJ1XZf7BwHIFfbDUgcpp/B9n3oPMZckfx57HjK6tk28mxASKbFt4ZRCU1WZ6ax5"
"ggVDscagqSqsaAOwxhixXi7hvcd4OkXX9ykcYEERETyHZ1KWIKAk2NIQLyJhfYt775xD6HuU"
"xkAXBUpj4n1K462XLuH83/27QNciesdb9WsN0US3a7XBiJtLSSmQ0SBTQNUVVFNDNSOoUfqx"
"swnGezN88vOv4p/+5MfxKx96BaPRCJcunMfBwXlMx4SibFA1W/jKG8Bzr3R48CLhpTdG0LbD"
"zfkahz3hvsuXsH/fPSje8x7Q4RHC8TFC2yYFAg51459ww6LD4uBXAhAJ7K9ATca49sEP4ulP"
"fxqNNtnTiHep/KuyzPCHcw4qpFpK8VykQFtwtSFlAjFmmCB3mFAK5ZB1zoa/KMs/l/3/9cbp"
"GJb3UAOuyAmwl+PqPBmyzkV5sDWQh9QDj4cokQLlvcLMHX5GXpMxZrZwtjAhoHcut+jI+JRY"
"w4G77dxXF7gKLmCMSYxfIqy7DlVRYLlYoLAWWiksVys0dZ0WtfcwDKBqa+G8zxZWsI/gfUp1"
"9z2W8znq0Qg9c15sWcIxrmatBaxNz601AnDi/nIafpC61loDPjGUhXG8VVUYXbuG+stfxuQ/"
"+GHA2OSN5LgOoIEn8icN8USy6wX2WCnV5pHWiNaCCotYlgi2RDVtQE2BWzeP8SM//Rje9+vP"
"oOsCHn14F9vbB+i9wa054ait8dq1DkVxDfvbEUftFnZHhIcvtejdFK4nfO6FJe6/h4DpAcYX"
"DYregboW6B0Q/EZZ/UkK6+sNDotWH/oQFs8+h1lRwvA0CNfrrpQ/f2cIIfGa4gBsD5si5CCR"
"hChcY1BydNLzOiKlcu8qYLPHRTH8ee3/rzVOzxJy/Bm8P+EBRb55DMIrsTwZ6CRCwR0JhxZG"
"AMoMUsrDyg0PXHAJCaVAMwN0A8BZDSfS+6/+nAEwnZUeZ0aIeTQAciMxzVkNudfDw0OMp1MU"
"WuP4+DiRApndO5lMsFyt0Epzt6JAYQw6ziD1bOGG4LEpS1it0Xv/VQs112uFTabPhZCATbHQ"
"nAHTRClDowjtv/k3cB95LFnoP2lfn4gBv8YgAhUWVFZAXYPGI9BsBpptQe3vARcvIhycx+6j"
"9+Ha6wv8yodfwI/97CdwvGwx2znAg5cnOFpqvPRGAJFD6xWsJdx/PqL1JTpXYFYeYdVV+EsP"
"1vjUZ1eYHxuMtMO3P/gaipevw16/Bvvii6Dnn0d8/XWExTwpLu+B4BE9h4e3hYZf/Ts/NAGh"
"d8kjUAoUmfIA3oB3ofwFk5TWNZI4AJBJoxnYln0hmBoD7MM9KviWXC+wSWC86Y7v/68zTudh"
"sdsoNySTSCEkgJfdQhpM2LBIsmMtrolOcEMA5Ja4cqNDoG64t5TWMDIhEtJJZsJvQFVRTJkJ"
"LZ91O7jJ/1dKYbVYJMvHGNdyuURZFOgkzOR7Wc7nKKoKk8kEt27dQlFVaFerRBFgDEOA777r"
"UJQlptMpOn5mwRuUtWjbFkvn0NQ11l2XFTGATVlGSFwWO8Aa5Hk1z7sMIgU9qtPrxpyQe966"
"vKfz1o4nryGtQWUBqhvQbAqazkBbM9DODmh3Dzh3gPKeiygv7uMDH30BP/kvHscf/tGLmG3v"
"473fchEeEzx/ZY0QIrSJMNrh0uwGQlBwbQPSBxjXEQ/fW+ILzwf85kcdHrk34MHzGsu5wfNX"
"O5y/p0I/naLY2YE6OgK6Ls1/t0Z0AeRDUlrBD+7/dmV123MhgqoayntQ3+cHj4xD3a3yv53C"
"MCzfyQmnrkvJAuJODhyuyndJ58/8nUTZk5XXhvSJO7H/TxunN/DjLxatLWPYQTFfO7gZeYC+"
"bZOLrFSykoO/hRAQBviTxPASx+eU/UCY8m1ZualBtTq/PhRAGCwGYeLGGKFjTBlFtaluFwuT"
"M2UxZcikKVvftlgvl9ja2kqlFlWVQEXmo3i+H0LqIiDtTUinTozDRen7HusBbyjfO1F2jYmt"
"kh4sBoAJe7KgJK/KiQlCBLTNqej0tzwdkJfSe4FICspqoKpAozHUbAu0uwO1twvs7iLu7WN8"
"7wWoc7t45uVj/L0f+QB+6/dfwLgCvuUdD6MZbeH5V1u07SE6V2Br3OIvnfsSKrNCXSpMm4De"
"G3iM8fzRW/Hk0/sg9HjnW0ocbCs8+3yL164Cz+yu8bYLI4zKEvV4DDWegJpDxNUK8B4Uk7Lh"
"4AHwA+M0fKjb12+ihW8Y2DFluiiEXJR7t8lfDUKrTE4VL0miECBjS1KWpLROYWtM3VANG3Np"
"5yMeUmQ8LCqV9/kd2/+njG+op7vEuGH4hbd5LcN/iQjR+696AMtV94G9IcmsiFs6ePI8eYYt"
"g0yshIjEGntYa5UV1fDeBy40iIDApDm36ZHdr9epFqzvYbXGsm0zv8dYi57vRVK/12/exO72"
"NtbMlB4+NxGha9vEiVGpoVtZ1ymdzcDlqGngiwKL+RxW2sKymy4KV8JWP1jUw/B3g9vJDz8z"
"KE0l85FuXwKbbBoBWkMVNnlV0yloewu0tws6OIe4swva28XsvvN4+ajHz/3sk/i/3vck6rLH"
"pYvn8eB953Dj5hyvvngDvbfYGS9xz/bLuGf3CJNxja3pDkb1GIUpoShg3a1xef5FfP61OV68"
"OoFrCzz1OYd7LxhsjzQ++2zA972jxXQ0xmg8hp5NQEdj0HIJcFlKUsJ+8ySe8a0hDwP01Zgd"
"xU2YHBO3DJyNvhvlL+HfMLMmYVsQ+ITXyDCxpNhjFAWYQz0OkeXz/ABW+XPZ/19nnI5hMecD"
"SM3SCIAZTNQJ7TgA/iIvDMMupQ+banm5jlQithE2zevz56pEVhyCfGL5JAiQWHuIa2USnnyG"
"Sk3TMptXXGeJ6/WmH3XhPbS1KMImlRxCgCnLtKhZqAUR3rh2Dfu7u5ktvW5bFMbAFEUqP+j7"
"BM4qlcFXseadc4gAqqbZtPZQCtH7zNAXIUpzs+ECk8zPMMRNskj/Zk/LFgnGGSYEFYEUh451"
"BWpGKQTc3QXt7QF7+6D9PYwvHcCPRvinv/Z5/Nz7H8czz97E3sFFPHL/Nuoi4PFnjjCrHUrj"
"8dZzb+De/UMcbJeYjC5hUtcYj8fYrmvUiqAi0HqPo+0O53Zv4Nc+1eHmfILLBwWid/jj51d4"
"7WqPD/3hHH/nhyZYuwblZII4GgFVBayWadMI/i7PG4EYOMMG2qzBnAjlzBvrtDTNie3dA3e1"
"/GUND0/oCaxQY4z5NB/Xbw7KkH0m/CcJ0YL3iIKJiZLj1/+89v/XGqeHhGLZBpqZiHIRqmUe"
"COFkXZA2BhobjTvsOilaNw5cdXlQABvy2W2bMvhN3x9TFCnDI9k/VnDCgs9cE+8zzkDYxMku"
"BKi+R+89mqbJfJBuvUZRlug4s5NbktR1ZvY6ziYdHx+j7XvMZjNszWZo2xZHR0fQWmM0GmX3"
"3HBXgMDWROgWVdNAsj8EwLFFlQyWzGluWicHGgguwJst2ybezEPQncwgPNQaZC1QlqCmAo0n"
"Cava3QXOHYD29zG+dA6YjPCJz7+KH/2ZX8cfPv06ujjBD33PI7g+r/HKGyt0XYum0Ngf38RD"
"567h0p7B9ngfu80EO1YjdD1KRJzTCmMiUPDojcZ2Oca4bvCf/tBN/PonjvCRzxCs0eh7j+A7"
"fPqZQyx+8CKaokBoRtB1DZQlYG2y2PJMgkMBICSjJUorZTxZS2f8gNfXYF17Xid3o/wFwJbO"
"B0O8Vtj7QneQxJVSibHuWOmA93NGCcKGOJrnSbJ/d3j/f71xegM/jl8ds4FtWaLnljPL1Qpt"
"36MZAGcAcgkFcQhm2erIhCxXq5xpGWYMhrVq4v4CKWa3VbVhM6vEK1l3XWIkM84mcTyQygPE"
"ElXsdkt8TpSya13foyyKzCHpGADtui5ldWLMvX7kuTop2WBF2XCL3RAjtmYzlFWFmzduZP6X"
"CEGySnJfxlqsl8t0WstgoYShl8gKe2hFM87xNYQ7YDMAiImaUJUgW6R/R2NgPAKNUgZQ7eyg"
"395Fcek8Rhd28NyrN/C///jj+O3HPgOlazz8wCVs7ZzDS29cR9+vcf2oxLmZw3vvfwEHs4id"
"2Q526wn2rcZ2iLDzBa7dugkQUNsCIyLoGOC1Rq0UyqKEVvv49771Jl545Sa+8HyPGAmVXuO9"
"by2xXq0RVYGgCNAWZDSiNoA27F1H9hj9JvFJEh4yZpU1d56G/EsGnO9m+QsQLgaLjbNwpZRK"
"2Ur5btf3qKxF3/epFQxzwHK7ZL5uzSRTOaOQ+D13cv+fNk69wrCXMhqNQJS6ME4mE8znc+zt"
"7MBxJsD3qdm8nDoSmBQnNypV77PZLH+meFIC7EsIJ6drxBCSqy1C59SqNAkLIXUqzATSts3N"
"/5bLZT7YUvCHIXiv2XLGGBMbmHsYgTEIaa1rjUHbtqlvj/coGGiVwwZWqxVG4zH6tsUb16+j"
"Lgrs7e6iY2vaD4p2hcBqubslgFzy4MXdthaerahYVwF9RbgZLwhfg6fAODxIgYyBqmtgOoOa"
"TUGzKTCdgSYTYGsb2NnG9n3ncX3l8BM/+wTe96tP4uqNNe6/7xIunZ/i+pHC8y9ewevHMzyw"
"dwPf/dbXcf/eMbanW9iZbGG/qrGHiFnfo14u4Y6P8PrVq/BNAzWZwGgNDUCzoTEmgKoCttzH"
"f/7DGv/8176CdevwzocI3/ZIDVp00NaD2hYI3ONb69SrPYe8G8wj66SIxNk6MQ8bT0u8zhyq"
"EEEzgfKuk7+EXBxpiGKSVjdyrRzt5ZlnJtnKTIsQJ4FJqgghdyGVvvZpWu/c/j9tfENZQiGb"
"rTkrsXQulTX0fS5Mbvse5Bxq7pUzzPpJZ0LvXGomxoWs+TQNdhclnUq0aVLWjEYpGxJCPqxy"
"3XUAu6MxBMxXK4xGowScrtdpkyiuleLJyZkWdj27EE6ceosY0a1WWVjBeywHRz0RbdqP5FIO"
"Zh3LQQQ1M/Ffv3oV5/b304LXm2r0W4eH2JrNcPPWLVR1nTouunRAgSbu7z0ANomtE8SjChvW"
"M2kNpXIecJAB5FDI2nRI5XgM2t6G2tsD7e2DdncQpzPorSnKvS2oSYP3/+6X8BP//OP4wvNH"
"GE/28O/+wAN47grhuVfWWKw8mqrEdzz0Ii5vH+LyfolRcxkXplPsFQbbMWK8WqFcLqEXcyyP"
"j9EfHgKc3YPSiVoQAlSfsMSpMSBTgPZ38Xd+WGG1vAkbPEYosO06VGu0EMmuAAAgAElEQVQP"
"fTwH1olwCK0BazaKJ+NYw9AQAEXmaQ3ToBhcieyd9iHAMpZ0t8lf6kWHe0fC19vJ1/JsCgAZ"
"g8ilNiFs+m3FyG2nB5Uk7WqFbr1G1TR3dP+fNr4hpvuy71PrjeUSBbvQvu95c9CJ03JlCLjp"
"Q4AH0NR1bhPj+h4lt+3IoZ/66mZpsgsD4w+5zoktjLE2g50iBMtKzXuPsiiSSzsQoqLBySo8"
"ur6Hcg7loGkbkPpyt7woPQP3cny5F6GHVH4gKfHFYgGlFN64dg0729sIAAoGWieTCQBgPJnk"
"tiTiAUZWnpnARwTD/bqHWVMBRE9gDUCShSKgKKBGTcKmZlPQ1hbU3j7o3D7o3Hn0s22MDrZR"
"zGo8/rkr+N9++nfx2c89i3K0g+98zwPQ5Q7+4OmrqMsIFwu868EjHIxew8GMsDPdx+54hoOm"
"wo42mHiPul3DrlYwqxXiugW6Dr7rEuCtNcia9P+QOFS671EpORxBoZqNcKwi4nqNcQyY9g7V"
"eg3Vdwmr4gQBImf+WDsTh363s7ASED9c/OKJbWgvxqQWLrkY/C6TfyY/y5pmD3SI92amPFLk"
"seIOEbIH5O9SSO36HuvVKpFnB9HHnd7/p41v6FzCGAKOj48xnkyyi7tq29S4n8M3Uiqf3Nq1"
"LYJKdVg+JKZunizWrIgxa1pZSJkFO7j5GGNeEB1T/EklXsl8sUinpPAhliI81/co2AqWEj46"
"lw7nDIkqIf2+1eC+JFSUfkERqX6s4x7WTV0jsnAF5/Bh06N7Pp+nlLVLvYwOj44wnU5xfHyc"
"BGptxgma0ShliooCq7bdVMWb1Kdb5iF3ljAGFDZV8SKbvDG1BpUVFNMT1O4uaHsHtLMD7O+D"
"zp+D2t/D9oUdfOmlI/zUz3wUH/zQZ6CLBt/3PW/Dcm1x88jh1ZeuYjyZ4vzkdTxwcAXTYo6t"
"yTb2JzMcNA12C4uZVmj6HsW6hV2tQOsW1PcIiACp5FVpk3Aza1OygwuL4T1016EMAYoUCu8w"
"VQRYCxsCCh1QEEDBQ/Wpdi5KrR0/s/iVyZnceJkb2D2x4uW4d7kWEbnxHeKmQ8HdJv/N1oxZ"
"aeTfeS/FGNH3PdrVKhdsS7mROAUhhKyQC8bbAntA3nv4GFFzFHOn9v9p4xs6l7CqaywXi8zp"
"WCyXqfd1CIiDqvN+IHjEiLbv8031PBmWSWTDm80ZAtowhsVllT5IXirKkVx4VRQoYjyxgXNs"
"TISirlNFedtmz0yxOy34Q+RJKwWI5NYZpihS47J+0xmyKgr0XQfLhaBFUcA7h8IY+BBwxIvT"
"9316fwhYrlaJk+McSrNp8xK8x6rr4JzDZDqFtTZzUobkRbGu4EwSzKZ7o7jbubxkNGZFtQc6"
"2Ic6dw60t4e4tYPi/D6qC7tYKIsf/4Un8P984I/w2i2Dhx84h+lkgtbVeOHVBTw07rvUYL/6"
"Ei5tH2I6arC/fR/OsaLaJoVRDKjbFrbtoNo1qG0R+z7djtZQ1iYP1qQwjqzlXkusWrwH9S4x"
"pZWGUYRQlkDTQGkLbXT62+EtRG2ggge6HiH4TObMyum20HBDeUnUhijzx4OY1pC7dHKYctfJ"
"fzCEQwWk7iSigF1I5yFaa2HipmdXx99RMhBuZA+FDYFUPEeBUe7k/j9tnB4SOoc2RkynU1y/"
"cQNVVWE6m23+zmCbKBcwaziw5ZFDHDO4lvEFyqUGw/Dv9vRzUVX54IBSgHbnoLzP3UQLxhza"
"9Ro1nyrccgZms1gpnSzC1qNnULRgDGLY/VCKM01RZExMYnbN/BY9WDyKKFvS8XgMrTWOl0s0"
"dX2CbHf1jTdw8eJFzI+PYYsinbiyXKIejVAohRWzh/Ww3UncHEowJAqKRTKTCfT2NtTuDvTB"
"AdT5C1AXL4IuXEA3nWL7nn2sSONXPvYq/tkvfxzPv9rjXX95HzvnG8xm2/DO4eUr1/HAffuo"
"43PY0V/EzqzB3vZFHMy2cFAV2FYa4xBQ9x0K56D7HtR2qUBZwFIyid9VeqCwiedlbeKCIYLI"
"JQycCJEApS2oKBLPqiwQihKuKNDMRmgXLcLVOoWR6xVouUwe2sCDGFIWuGQww3fyMiDhYcjK"
"TXoy9c7BAHel/MUTCT7VIopRV1rD8/0LFULKeww2nlfHe6DrezSjEbxz6PoeNiRqx/DI+Du9"
"/08bpzfw63sUAJbrNWp2iY1SWHFNUuZYxEERMpiIZy0i0tlsxKCeZEIUZ1CATS9o6eUtgDsR"
"5cp0Ulw8ylo+xIh126K0Fh0zlVUIuWWHgKvapNM8QhxUi3sPRZQXjID0kbEWOTJpzSCsZwpF"
"xW1zC16shrOQXddhvVqh4dDV9X0ORcHAf7taYWs2S2HDZIIVYx0A0v9N6pYp3QOEa6U4XS3x"
"PvN2iJSKdjQiv7cPe+E89ME+1MVLUJcvgS5dBvb3sHNxC5946gp+6deexGNPXMFbHjqP++61"
"+NQzBS5d3MZkVqFqPFRh0B4/i3tnT2H/4F5c3t3HuckYu2WBaQQa71CyotJ9D/Q9yDPDm0mJ"
"iewUQMElZcW4SvawGHeKxCz9qgSNx8B4jFg3aLbGMDtjPPXpV3Df+S1YCnDzOejwCDieA20H"
"GnCxBlTYE8oqDtZ9viIAiXWKLH/ECGvt3Sl/9qqGvpYkY1zfY9V1sEqhHo/R8jmLopBNUeRS"
"Gde2+Zgta1JH1Mi0iJxBv8P7/7RxOtOdtbRbrVA1Dawx6H06LbfnrIFoyhhjzhhI75vJbJb7"
"7+SfQRyu+AHF/ZbPyinbmOqahF/iQupXFAEUanPqR991GzDU++S6O5f7bNuiwHK1QmFMCgGM"
"SdaBP7PgrGLJRa1lXSchcmhqxFrpTdsMSTsLbwVAZgfLsUfCMi7YYiqkpoGGwcvgfcYsEBOv"
"xzJIK8+c69uI4L2nEAJZa6meTInuuwfm8mWoi5cQL19C9cC9sAd7uLZc4f/88Q/jgx95GUYB"
"Dz70MK7ctLhyU+PbvmULF89v48OP30BTW1zcVdhqX8Du3iXcd3AO90wn2LUWEwBl38P0PXTX"
"gbhrAqVJ36ShhWgeY8oMFgWiMYjGpuxeREoIgDk6WoOqGhiN4CdTjM/v4JmvXMP//D/+Mj7+"
"xBfxX/2t78N//Te+BRg1sJMx6GgEWq8BDodPAO0DsmwUx2sYKoo3FkImkJZFkfGgu07+4oUN"
"WOnCYRKqQ10U2VALGD6sR5TSGm1t6h/f97kTqTT6M6wo7/T+/+YVltZYdR1q5nv0XZcYs22b"
"aoPUpqdQy4dG1lWVFkrc9Mu57ejy3DYDSN7SdDKBZQ+oB6CsxXqxSBPnPYqyRLtapZ7kq1VS"
"YuLGy+fwIlRIRlWyFiL0kgl4ADLXJpP2IvNz+B5kM8qicCGkDUOEAptTWSomrgpWImFL8B6d"
"c9C8WGTBibcYnMt9yOvRCMvFAhrA0dERqqZJpxeXZU40KGuJP4cIUMZa1FszZR99FOryPVD3"
"XML0wXvw2sLjsQ99Dv/o55/CqDGYznbR+glevqpx76UG7333Ft776B5+/SNHeOByhYMdjTdu"
"rGGqA+xtR+xvbWG3LDALAVXvYJxLRbiRe45rA+i4cV8G0RlCBDkPMjbhWcYw0x4gxyG/0iCj"
"E3vdWoz3ZnjfB57C//D3fgl1ZVHYiF94/xP46999L7a0xng8RjmdAut18uwGvb4yMsZh4aBQ"
"chNhkE94VuCEAF+07vuEid4t8jcm467DH+Dk4SBDvqEpikz4PDEG79fGpCwse2iSoczK2Jg7"
"vv+/3jhdYVkL1XWJSMcgo/A1uvUazXiMxfFxag5W11gsFqmHdVVhtV5ja2sLQW9awQjpbZip"
"KIsC8/k8N0eryxLaGIwmEzjnMitZW4vlYpHIfF068VawBOcT2VDc0aG3BiCfVutDQF1VMEAG"
"9wXs1ww6GmNQaI1+kKXRDCQG8CEWjF945zJJsLQWpizTBuAFJq593koCltPm6PTlfI56PIbr"
"utQjfL1OBwaUpbTbpeA949qatDGKjKF6d0+Ft70du4/cj2XR4Bd/+3P40Mefw+N/dBMwU5hm"
"H0cLg3e9fYa2DXj4vgP89e+/B194scWiW8C1CoXpsVxF2POPoKhfQtU0qK1F2Xcw3kMR9zDS"
"GqR0DruGYVjWDTHNC9m0CaJl4F2wKwBQiZsFzbQMrfD5P34RN48dvvXdj+DWAjg6WuB3Pvks"
"/up7LkAXJcx4BL0cA4KZZSL7AO+McUAO3XCzok8F06QDSFGuabMqnS94N8i/Z/BaQHvBmG6v"
"hwWYiMpeofInqz8GN5G+n+9fJk1aSYnSlgMv7uT+P22cjmGt12g41Mqkza7DqGlSecxqtTl9"
"BOnAyK5tsVqvUVdVehghkgE57rXWolAq12xpInTOYXd3N58Zd/PGDYzGY0QkTowPAdPZLB3P"
"zVp9wYz2pmlSXM5uqiwI+M0RQ9raFLPzdc57bG9vY3F8nEPQXCSN1CJEOjqKyZbz4uSYpJLj"
"emlj49oWfTx5gMDQBZZFm1tsOJfY04tFyjKFkLCJGGUOiYyhGCORUoq0VgFQpVJq5+K+jjtj"
"/P4zV/FP3vdBfPZLR1isa+zsX0AzmuG737OLm8eEv/Luy7C2woVz2/Ah4rc++ixm4xpqtoeH"
"LnRA/wY+/pRFERs8en9AryvAJuUkSoDU5gi1E3h33OBGFCLIuURa1RqkbfKwgNw9Qco2oFKf"
"b7+a49L5XTx0/yVAFbh4oHDjMOC3P/YavuvtUyhVohmNoMeJPhF7l7AyAIHThUQDRhanCgeU"
"2nRzIeTTpCVrFWN808u/ZOMt2eEYY+6/rlQ6Ak0iiXyiDitUUWYSrg05XrkxgHiEnEzo+x6W"
"M6BAwu/u5P4/bZyqsKqmwXq5xLrrMG6aRCBbrTbxNYPX3qfm8yQuML9/qN0ja36hJ0AEz+5j"
"U1W4evUqRrwISCksFwsAgOPUsyx4ab4v59KBKBenDosrJVWbFwoLS1GqJ1xx2GnZaqwWCyzX"
"a4zHY4zZw8ukvZi4O8KOFgxCahOl9Q3CphXOarHIR5fJIhZmct68PM+LxQIxRtSjUQIqlaJV"
"21JZVcpaqyKg2rbX00mlQF498ZkX6Rf/9eP4yOOv4Gg9wqWLB3j72/exahVeuqrw4SdbjMcj"
"XLy0wktXFvj45w/x8KUF/vIjF/GWy4Q/eLrFk08voELEG7cMfudGhXe/ZY33PDBFhxK2USBS"
"gFaA85l8efvYhIQB5PqUHeQ0fPKwCHBJRlFRagDDQP2twyXuOT/Cwf4Mz77cw9gCpGu88IbH"
"7z15A//+t5/HwtfQkzFM14Jcx1Y6R4ZAz4RS0EZ5UmRP0COCkvenNUixZ8NK800tfyKslkvY"
"qko91zlDLokpOdZMuFEZAtE6JQ4kQynfGzedRkVxyIlBPgQUMZ04bVghSkuYO7n/TxunKqz5"
"8TGqssTOeIzj42M45zAejdKXMbbUdR2MMSibBovj4/TwxmC5WmXmuWh5WxQoOEYWMLPrexit"
"0XYdZtNp7tET2zaDfkJU69drNJMJNLvgvfeZY+K9hzWbtrGSCYo8Wa7vc2mOUB7E45ofH6cM"
"Blup5XKZ4+2cneHFJaGlZ8sKwT544mVRBsbYNr2reDOpzdHkwbmcBpZ2IfPjY0wmEzjvs2cV"
"QtRak97e3VZPPfUF+ulf+Ch99PEvQ9ld1M0e3v3wNrpe4+XXljhuR9BG452PlHj92hpP/NEK"
"IQY01QzvefsBKrPG7z7e4vkrCn3foO+A6djjlVcLfPyzN/CWSyNU1TaqSkETQFqB+tT0LsZh"
"R1dg6MVQCKmrJyssMjrjI4nOMCAH8hFd3kfs75RYdQpdKNH2WyACVm6Bzz63xve+aw1rRxiN"
"J9Ad0ymcB2LY1DTLfUjfK/G8onAePGJMTQ9lLRS2ABX2TS1/6UQS2euS+4vs0cSQWORyBDxJ"
"CDtQEEprQG2aAOaQjPdjwYdPaFa8CoMwEYklcCf3/2njVIU1mc1weHiYOiNYi4o1t/zeMqXA"
"MUlte2sLi+USy9Uqu425YJetQERKs/Yu1XLVnC4mAEs+4y20LUZcR9hyStlxdqLjLMtysciY"
"13g8xlrSozzhxlpYrfMpJdlFF7DPD9phsEAdu7aaKGdZAE4bGwPFm7JnUDJiY0X0AHiNIZ0E"
"IpX1YfC34eIXEh0A1KMR1lz+sFgsaDweI6ZT3ykgquC8+qmf/Jfqp37+EyAzxlseugerdY/x"
"eIRX3ujgo8HBdsCD545xc7WFzzxD2Ntp0NQejzywhRdfC3jh5TmOl4RXb1QACD50KC2w0zgc"
"zGrcWE/wyrU5mgtjNHUNzVgPdQnDIOmnPlBc2d0JAeiLpKR0wrHIcMvmgZcbYgBIgZRGCMD+"
"dgUyY7TO4J5zQBcrXDqw+KPnb+GpL8/xA28foaUGZtJBt2uQ61Ohc9jQRjd6i0AkrzPni5Bb"
"+mrOmoUYcmPAN6P858fHGI3HOWyTziOWMTLiz2p5vwz7et2eiTuRfVObTr5DSoJ0XJUQWPpw"
"lU1zR/f/aeP0Y77YFWzX63SYJWviqigwXy4xbppMIej71LHRWAvVtui7LqWMJRsxcKHlSCBr"
"LVYy6RLXx1SMOV8sMKprGO/hGCwPzuU0qdyHcw7Hx8dYdx0m0+lXuZaCQTgpedA6lxHICb5F"
"VYGIUu9ua3OpxXgyyQtQhC8hgAdQsjs8XLiyQPJJvdgcOjnENkSYMnpmSUv7ka7ryBQFRaVo"
"1DT0o//oX9H/8vd/Fe94z1sQ0OArr64wGRl4dwNvOdfjvv0VSt0BpHCxdnh47xKu03tQ2hU+"
"+8cEHxSuz0foQ42yABADzu+nUGO/GaH3I7z+Wovf+tQNXP5rxzjSJcq6yG1+ybnbWhMPKQYc"
"dnUdyFpErVJGMXtYPoP3uYjXaBAIijS2pwbKFFiGHVzYWeOVqwakGnz0M3N8y0MzlNUE5XgM"
"3fWA84DnHu9Zxqw7aZi2TDcXiRjD0llOtiwTteVNKn/LfeXBeA8xruT6HoG9s8IYtEAuj5Hu"
"nQLOF1WFfpAlHIZnJ7iNMUJjc1q16/vMaYwx3vH9//XGqQpLyF3W2sQw5w+W8Ms5B88dFYP3"
"aJ1LBaKDJmhinYRlrgaC67ruRL+qzGcJIWVqui53SOwlM6jT8dgrbFjJAFAYg/nxMVbsAnd9"
"j54XzYiPahJy2mI+R+8calm4zqGqKjSTSeZvNU2zqYKXlK9SuQTI9z1WbQurdcrmcCaybprc"
"vlmeJTiHajSC4xBEGM3y+zCTJKcHh5h4LUk99PHgYBu6aXDl9QUunLd454MKD1zosFW3KEyB"
"wk5RFyXKwiKQhg9H+PQLn8PvP30fdBER9QSjUY/9WcTRusb9l2ooFTEbKTz9zBwqLtD1AZ/8"
"gsG7Hj7E9717jGUwMEViXsvpNeT9BtweKofgE3vdMGnSqORpqYQXgZWVihGBCFAahY547mUH"
"pQwO5xaqDFi5CbYmS9zCCJ//SosvfGWO2aNjNPUIduqhXQ/qO8D1yVOSTTjcmFEoD6zAfAC0"
"Slk+7zFfrRCAN6385T2yJ6y18KJw2PhSUaBsmpQBd5sDhKVjqefvABL04UPIDfeCT+RpUdS5"
"n5x4Way87vT+P22c3g9L63xCSMVdF4FU0qAGhLOeewqJ5hY3t5SeQ6tVVhZDQWp2JzMQxyGG"
"5u9dLhYpFGThxLBpXyuEt65tE+dFpeOvJU09qesE8klNGBIRVhuDyM/RO4eKS3xanmAAJ6wh"
"EaEuSxweHmbCnCkKxLZFxc8nYH/JFlIssWAGxhg45tQ4zj4pvTkzDoPFmP5JFEilVETw8fr1"
"o/hD3/2W8D/9dz+gb928ifsvqkhuAaw8jcstbFUFJtai4RAuFhZLexGzt8/x5TeO8MI1g9GI"
"4PsG73i4wWu3gO0thU8/fYyD2QKtL1HpDlFv46g1+MAnr+PRBw5R7FSorIVmpUWisCRFx+QC"
"kuyUbZNXpTSgTKI4MCUgdWxINIIQASoUJrXCx379NXzu2Q6PPDjCjXnE7vYIh4fplOVlH/D+"
"3zvCOx9ZYl7soByPoPoOqu0Qu+6El5WnT2hiksYkStdpk8tqNHsWb2b5C8dJQP+CW81ICxpF"
"6VDWfPJMPHmCjrid4TYlIR6uY4Ne2M1J6vm9fP0d3/+n6aPTLlBaYzGfo26adCQSx7iSJRCC"
"Wddz50Egx8Nl0+DWzZupET+zbzOBjF+TmikKIT9I51xq+qY1dnd2AACrts1dG13XQXPBqBDr"
"NFsh4b7IhJ0osJQsD5f4yNHZOQvEC02a5ccYcyX+uutQVBW69Rq2KBBcKprt2BUeq8SULqoK"
"br1Oi4DpE+KI986lolm21LdX3scQhgdKRiIiH0JUREERESlNf+Ovvh03blylw+NlPL56A/Xq"
"dX0Aj0nXolmtUPikuGMzQjubYTbdxt/83jn+5cev4/x5A1PNsOpK9Os5Xnv1Ju4/UJivFEAW"
"t5YTBE+4sN/i8y8U+NTTh9j69gnKokxpaFMmJrv3Ob0PDKgPMYBsAWUTw11Zm0pzRGHxz7r3"
"aMYFyET841/+DP7Jr7yAS+e2ceVwAhdKXD8ucGsxQVkuURqLq4fAb37yEP/RvzPBAhXMeJwa"
"/K3XqcYw+A0H64TnJ+Eh37PR+dQcH+ObXf7JaA0UWcLoYj66q+M9qDnr7Zzb4Ip8rQDeooxk"
"/xFRqqccUA6kS4OE+dJZ4U7v/683Tsew+h4lpzbr0Qh922ZATrJxhTGIoky47sl5j5Io86oE"
"4D5x6otocU4pS42U7jZntc15sobM3J5j7FHT5GLNGGP2oLzWqa1G16HijEa2rGHTYF9pDcXu"
"tLSaGWaF2tUqufrc8UGwj46zl0Tp9Nps2diiIsaT9VXYuLtKaxS8EKUUQqwbBgLj1yLze6LS"
"OhTWUlM3Mcz2aLJF8RhE8Utf1HtGYxICbNencElpYDZFSYCqSjywO8H3v7PHY0+vUFbX8Pmv"
"9HjHgx2ianBjXuLGvGYgnNCUBmNLmC/H+NXfv4p3vfUIVTVBXZYw2uTThxEiA/AQIlTynsoS"
"pqmhqgpKiqCVRowJeA4xYrZV4yuvXMeP/exH8cHHrsDUF3BjOYIuDIqywNWbAaUlnNvxuHqr"
"xHw5xe/9wXX8lbfdwiPnD9BWNXTTQDUN0LaAk97jfDtxo7jSxiXAp8JcU/IhESEdLvFmlr94"
"R5D3x5jZ50DKDkZh5tNtxcQsF3kW2WtDpXB75lBItIrvR/CsO7n/TxunH/PFrmPBh02WzMQt"
"uBK9Xa9TfB24Olsl9q7lPkiZsMYWxLN1lhtvua+QlAUIf8VL2pOP9waSxl+tVohIXSEBoLQW"
"LoTMRQG3n62ZSKqJsAZSVksplDwp0gJXrCbxs7Zc3lCWJdZdh9F4jA6DI454sQ1b6BZsdWQh"
"5cZsbL2NtSmMYAFrY3LRaQwnD6SM4omwwrJao+u6GIFQFgUwGpHVGqqqIl59Va+uXEFtDUof"
"krLyIbV3cT2UsZiMRuiqPbz7AcIHP3YNV/0SW43Bazca9CjRhwrWBDy4P0fvAl56o0HXO5zb"
"6fHCyzV+51M38Df/2gzHrkJR2MR8F+A8SuYLyZkJEaquUE4mMONxWojGwHNrmaoyAAX8xm89"
"ib//j5/Ac6/02Nk9j62tGbStcLQkxEiwBqjKgDeOG+xObiH4AvMF4X2/eRX//X8ygdUVirqG"
"HTWpY0QQj09OVYqZiZ+wMwUiQDUNyjJxmpDoIvHNLH+lUn+ubr1O/bK4pUu7Wm04iTGewIXk"
"M8EKLDKOJUpJDk+NIWRKgjzXkOAp5TR3ev9/0wqr41hfJuXo8BDNaISjw0OU1qKQeiLv0Xuf"
"PBiktOWaewEZY1CVZUp/clYGQHZlpV6rdw5FjKmfELuPhA1JD9amODqEXP/V9n0SqPf5lFxp"
"5mesRcfaPS82Y3K3RaN15roILibWodY6NSCLqbPpsm1Tdwdm/q9Xqw0gWlUnwVSxgAy8yuk9"
"UqQtgivKEo5T69JSVha6pIKJCMbaELwnANEYQ0VZorA2YLmg8MorUFUKYRA4e1cWIK1SL/ej"
"KSbjCcLWCP/h963xMx9wUL4FdSWapsPB1ONgy+FwWUFRwM4s4LWrDWZVjXvPzfH/Pq7xjkeu"
"4b1vH2PpbMIpFIE012XGTXdYioCOAaPtbdjJJIHKMcLqZD0/9enn8GP/7BN44jMvo41TXL58"
"Hy5f2MayJbxyrcOosXDBYW+2gPcKC29xvCpAWKPDNn7vqWv4zne8jh9892XYosJ4MoHxKZyI"
"4bYTw5UGFVagHWiXcFAzGhFiJK01FUWBNsk/vlnlb7jcJ/hhpwqe+hAyPhiwoTRIgbPU09Jt"
"r0uEoa3NuFgUkN9v+scDyAev3qn9/00rLCBl33qXuiiWRYEl9/pZrlawkgUYxN1R8AHnsLW1"
"lTCDgdttpBVtCCjrOnVhcC6RE+VzwL2LXCo4nTNpTQqYlVIo+L3GWti0ieFDQMsgPAGp86ja"
"HPMd2dPKJ6XEDUcrxohKiHRaA5yuRUyEt47b2cQQMgC5HJyqIiRAz1hF5IVGQD4dWCmFno3A"
"cL5ylkawNrZuMYQohwvExB6FTeB17BeL4G7eQGxGgEm1flHrBGGs18B8AXV0hGprhn7c4Fsf"
"HuOB88d49rVbGFUau6Meq26GWwuL3ekaL16dYG98jKPa4KgtcWHvPFS5wKe/fIiH7rmJ0lQo"
"jYHCxqWPIJCSbBxBEzBmhaWUwmRc4OhoiX/w07+JX/zXn8XNZYHZ9gW88/59+Kjx8pU5ls7C"
"2gLrXsOaHldulGiKFuNijnVLWDuNEDR2p8BvPHYV73zLFvRoinI0hg4ByoUB3SJy99XIveAt"
"ojbQiGiWW7Db2wpEWmtNyphAfR+1tYjexzep/PPhEgKeQxQiY15Dr0gymcJ8F88mUykGSmFI"
"OPXO5SygZgdB8b6/k/v/m1ZYgvIX3DVBaosCKw+ZiGGzvK7vc8x1RBIAACAASURBVObBcaim"
"FJ8byO6u1mlzrebzrDDkRA+J/4WPFbxP2pzTqFprlExKE9hSWs+4roNSCiN2y4cLQt6rgNT2"
"ljldhnGK3jmMJpN8bc8V6aBUnZ7dYQb2bVnCcjpZW4t2uQQRJfIrA64S3go/xhQFOi436roO"
"BTcVlHkcFmwDiVYSvIcPIdZNkyrj1+vU4imk/kXR9cyOVLm9CroecbUCHR/DHB2hHk8w3R7j"
"P/7eJf7h+49RU4HD+Q6qeo2Xru5he9xif3KEzhXQpoAyO/C0xt5Wj4/8YYv7Dq7hB79rhpWz"
"sCbNYdTcSxxMkOSQZLI1w87eFopC48O//zn8g5/5PXzqszcx297D2x49h7Iscf1mi1sLgo8F"
"xk2PvfoKZvZVTKslQAWutffi2WuXEKNCXXRoqg4maHzppTl+47FX8J/98ARLlLDjEahjmoNP"
"B4JSDKmGsa5ATYNY1dBaYewcyksXlSlL063XqaDemGCJYrdavdnlD5H/mj2h3F6GcTA5Hk+b"
"1I9MMDLhg0mXCAAZn5IwUhuTS48kstGcWLiT+/+bVlhSLqC0Tr2plULHjfjFdfUclgmoVzGP"
"BCy8pq5zvdHx8THGUu3NDdLEVRblIv2r4Fwmtwk4Z6tqA2ry5Eo8LNlDJRuIPSrJAoFSgWXk"
"UgANoF0u0+dKkSlX198eTyv+fCllCCGg4w6n3WqVF5B3LvF3Bm6+504TluvBpKeQ4eSAZDe/"
"qoiVCGoQzopCl1YjOX/PP7mODpy6blvQfA51eIRiMkFVV3j08gTf9bYOH/7MAju7DTQIB5Ob"
"uHlsQHqMdzy6g++ZRHz2mSNcuXIDpIGLF/fw9Iu38LaHr0OfL1FaA6UNiNLhATAGURH63qOs"
"CzTjCl96/gr+4T/7FH77sS/C0wgPPfQW7O2McetoiTeuO8zXFjuTHiN7HW89eBG7kw6TyRhN"
"vcXW+SouvrbA41/eh40Rq46w6hX2ZoT3//aLeNvD2/j+d1xG5wvougItq2Shk2sOqmtgNktH"
"mU0n2Luwiy2l4CbbqlUwAYiaKHZtGxUR2bKMd438OYEkBGOtdTpoom1hjcmnSYUYUbAyExKn"
"1Xw6DjsExOB/BuqRcGFRdHd6/3/TCsuH1LdZ2K2iBbvVKgNluYdO5MJP4XywZ+Qdn0hChJ2d"
"HRwfHsIxZiBZC0nZghg45QkHu5Ka8QWxRpnfwpOfMy3yXqSMRT4dmVKdFBj7Ehd22FJEcUjp"
"/j/q3izIsiwr0/v23me+k/v1ITyGjIwcKwdqoqCgqqhuxFQmGkyYJDPJWg964En9qDfJTDJD"
"kslM6gce6JbUtEmgbiQVjZqWgBZQdAFFQUENSVZmZWZVzlNkRmREuIf7Hc+4tx72cI9HAZ5m"
"1RZkHDMP9/Dx3LPPXmetf/3r/10ZmuS5DWiuRS3ZtJ/9hTbaSjQDlKsVrdanjCWVW1BvboCw"
"w6KNV7M0xg6bui6Rf92m95p8O9xfHw++9qVsrYAehIlgY2w3ryoRiwXRfE42HJJGY376kwXP"
"vrpA6Nu8cyPlYK+hGG7z4JUJb19bc+vmHINkurPDE1c6vvXtQ7727Rn37d7iZ6djlnXq3FPE"
"ZoBXwHiac/XqIf/sX/wFv/avvsnVW5L777vMpfNbrEvNG1eXVI1EKsN92zd5cPca9+8vGYz2"
"2Znusj2ZkOc5kYooq4r7Lx+xMznmt/5MIiWM0prbJx1V1XB8MrdqBkbaQerISTJjrCzzcATb"
"U5L7LlANx/zm11/l2W+/xdvXDuU/+PmfFJcv7uu6aaWS0hgwkVJCxrG5J9bfZTtRkoSZPCEl"
"scvmlosFWZoyHI38EL3VUnelqB/z8a42voSL4xjlHvK+yrnr+/+M430RR310T/Lccl+ElTcV"
"QoS6XUkZNIICYNx1QTKicyL9SkoyJ8JXjEahW6Kk3NT1vb/vyWv+aRUCln/z8se+XevTTreh"
"S9ed8UL/sNERgg22ULvWtf/etm3D3+rL1fj2s7jj4kopSbIMWdes12s6Y8hcWuxlbTwW0Tgs"
"pOs668QbbVxEfPtbuTKjc21lL/1stGY1n5M4UmxIsLTByB7b275QaBpYLRGzOcl4TJEXXNoZ"
"86MfX/GFvzQ8fkXSkPPkQ5J3rl8jiwXLKufKhYxWw58+dZVIaC5fHPPajTVvXbtFfLkgyzMb"
"G6QgSyXadPz6b36FX/rVr/Lcyyu2t7f4zCcvsSw17x1WHJ20tCazgWrvXc5vN+xOJ0x3H2Rv"
"d8rO1hajoiCLE5SAqm2ZLXbY3roJXOfzv3/CsTZsD2v+vZ8ec2lHsV6s2bIdP5Bu7MdYnXgx"
"KFBbE+rBmP/xV/6A//mf/SHrNmN965jJZCz+wX/6IyKOU5lnqdZai85NUdwT679YEPe6dl5R"
"InYgets0G/lhR8cwEByk+1WHx5rMHa/Hn7fHle/m/v8b49FZ39Bnv3ZNQ9tZWVmNJXjGvQym"
"z6PwWYv/zHK9pnSkuziKqIUbL/BcDNdJCT8vpXULjnpqiKY3XOrS8r6Coa+Z/UVSQgRrJuNS"
"+P5r8tPyIRh2HbXr8viFTiA8xei6kNUYY22UurJkOBpZb0RjSLKMzOEQUinaurZgbZYFMmvs"
"1FM9kBvR44mxGcr2oGjbOLMA1wENpUWS0GCojX2TaKtJZZwprdZQN8j1GjE7Qd4ekAwGJPk2"
"P/6JKd++umQ+nxPnipdenvPqexP+g5/cYTiQfOErCzI1RyYjnriS8M6Ngm+8cBuhb/Kf/YeH"
"aJFy/zgD0fGn33iD/+F/+RpPfettomTCk08+ymSYcf3mgvlasFxLLu+tubT1Old25+zsTJnu"
"nGN3d4+96TZb4zHDPCNVCiUsY77ThkGWcrxI+KlPpUTqXd6+PuOjD0SMB0OGUWQdwasqgLpe"
"BllKZzSbxhwt1zz/8jVkssP9F88x3y35k2+8y8/91Ltid++cUEqKNI6FkVJoranr2twr618M"
"BtYrMMtCddC5+1e4bpzXnfd7KgD7YqPJFbIcD6A7ysSp/XGX9/9fd5wdsHwJ5WtmZfWm/Xyf"
"F66/k8Ph1RR9rV1kmR1mdl2MNM8tb0NZ+QuvUeXTXowJc1A+NfYGqv5z2gUl6YOQ6zb0SaRL"
"JxMSu86H6C1O08O/8E8dT3BzmaU/h+CUq62XneeY5HluSapVReQY0uVyGcQGOyktl6eqgtxz"
"6W4y3zXyKbh/Xf3DlwNrh7UZbaV2BsMh67rh3eUS2pYiju2TWlmFBGKvlhAjlgvUakXSNNbz"
"TwjG0xGffdLwa//mJoOthAsPT7jYaX79C0t+4JE5B9sFFw52kUrx1HMz3ny7ZG9ieP3mmOde"
"PuSHPp7zZ3+54Etfvcpv/O7LHM0jHnv4EYaDlHVpeOnNOU2nGGclD126yUMHtzm/mzGZPszu"
"7i7j8ZjhsCBNMhrTcbJcOhZ9C51xXSqFkoIkG/PZHxTMT4as1jWZVERCsFiuaFqbQerFGtPY"
"hkuKtaYaNTVrI2kZkeYr0iSiajKOjte8e+0GaZaLPE1lrJQWQgijtfFZ1Qd5/VdlSTEYULms"
"zQXaoElfFAV111G785KOymOMCTOFkcvg6rI8RSb1AL7RmsYpMdzV/f9vI2C1tfVji/wJLBaB"
"KlCWZchwItMbugSE60T4lDqN46DiqJRCudTQE0bpBSPTC1T9OSjj+TJCIO/gbXj5j9gFvDhJ"
"rHqAu+F8RqYcaO0zq8aP97jPdW1L47qNaVEESV3ZNMGeO3Y3QeVSWgm07ubwppSeTFeu18HH"
"DiECsXAwGFC50qOp64BLhXOMIlIhgmmBb1kXrlvVjUe898QTdIMhkdFO5VPZdr5UCOVMTSOF"
"SlLSomAoBIOmJW3hyUdG/J3DE24uG9brmsPDFZ98LOJgf5vJeMi3Xu147e0l61Lx8cdi5osx"
"126s+ZUvwJ996y2eeaXl7Wsr9nYu8cTjYw6P19y63XLjqCaKEx7cfYdH9t/j/K5ksn2e7e1d"
"JqOBBYLLNYvZCcdlSbde06zXtKsVXVli2o4kicmLgnQwQKQJTWfQ6zVquaBerzlalxwu5nQn"
"M5rjY7qVDdxxljO6cJ6tBx/iwLSsdw+oGsmwSFi3GUWhaSrBsy8di/Pn90XdtCJWjUDY1vy9"
"sP55UVCXpcXFHJ8qShJkZ+dmFw7kz4qC2r0OYDMn6YJtf2QnZJFsOpVaW6OKu73/v6eA1fkh"
"UcdiBTYAIi6ddFlRUDP0F8R/TxSFafXlfA5AU9cWuHVf82S2IKWqdYi8yr0ZetHbtUV9Bhg4"
"JcJqDbWdZep6QT8Pgvq/UzveSZFl4eL7jk7tOkWxoxD4dF261q1vHSMEMVC6oBg5jKFpmuCl"
"mGQZqZSn2MHLxYI0SZg73Ew4IBZ383gg07fXjTH2qeye4P0b6IH77uOhhx5mcvkSpnKdFhe0"
"hJRW4VPYoCXimKgokHmOTlIWbcUnn9D8439xle3dfR65coH7DiLeuBHxh08tkGgevliBTLh6"
"PaNaQ5Joqkrw1Zdj2q7jsUfPMczh6nsnnMwNWsPDB3Pu23qXy/st0+k+093znNvbYWsypshz"
"UimQXk65tkPMuq4xzpSVpnXNPmt8i5R0bUc7HNCNR9ZpuqoQ5RpWa8x6jWkqRGeQSUI83SY5"
"f55iOuWaVCQxdGLAdBxzNM+IYsnRcclqMRfVdi2KLBWxUkIIIdq6Nh/09feUir7Uku6s9rwn"
"mequo3Z0BLDgPlqHsrPP9fKvqc8Bw72eu73/v+eAJYQItHyw3JHMuSr7J00QKetlMT6t9NFa"
"usXP89wOTioV0t36DmxK+MDkampPlAtBzV61QHgzZjOXBZyaGvdehr5M9MzaSKlTowOV44v4"
"CXmMoSxLS3aT1glExXEoEWCjZ5S4p23jBl2zPCdKkhBEO/d9Qlj+iR/lCOMIsmeQaTZcFf8z"
"ouvCQG5fBiRRir0s40BJzj/yKOnDj4D03VIRjEu9FIwREoNN46um5dbxCY8lhh988oQ/+uaa"
"tKj4468vOVkseeKhjHPTiDdvJDRVSdkkTMc1qzJn3WTEWUNiSjo0r1xd0umc3fEtzg2u8ujB"
"ETvTMdPdBzk4d479/T2m0ynj0ZA8z0niOEwvWCkU1+F0NA0/j+ebJ1obtO4w2qrIWgEG1/V1"
"AznCbEZatBBoKTGR4a2Xr7OuBUmSsCozdieG+TLj2lFD1y2pqoa2a9HGCOzfFVEUmXth/ZWb"
"7PAVh98jQthALx2QnTgsqYLA1fKYU13XFkOTloDtH9q+LPYjPHdr/3/vAUu6MRbX4fBt7LKu"
"UULQdF3Qs/GyLXEvygfwrsfk9UBicNZ17VnZy5j6ZZsSgtZhCOGJ4tJtqaxwftT/G1pbCzBc"
"KQhWRdI9KT2O5dNd32UU0lk6OWAxdSqnGGO7iO6J5iUxtDsfT62oy5KhGxCN0zTMbUmlQjtZ"
"RVHoGvkAKoyxnm9K0fgnpVJWc95hH75c0FqTDQbuZ6W9GZuG5R9/ifpb3woGpkhl2e++RFTK"
"kinTBJHnmCyjiBSlTvjpT0/5g6/f5unnD5nuwg89kfHKe4quXSO0YlhINB03jicYWjoadzMb"
"DmcNcZrx6Oib3Dd5l/N7OaOtB9jd3eVgf5/dnR22tiYUaUoqJaqqEMulVdRoGmuQ2tSYusHU"
"NaaxJgemqqwiaNNC29ivVxXUFaaqMVUJlf2YxmZptJ01aN3bhfsuEz/+CO1Ksi5bBkXO+Qvb"
"zGczxltTTpY3WC1LmroWuuuEctIz3oHpA73+RbGhPkAISnVdW5zJ7RHglOtUnqY0YsN17FyW"
"40H8ruuCDhZChDnBu7n/v+eAVZYleVHYRXAn3Tr5YrCuOh6o9hPsuuvsMLKP0mx0omNXb3eu"
"do+cTrsPTt5IwpsC+DS0KstTCo4+lRVSkrqyrnKaO4lrTQuXsuMCl8/EfAno8THtblJcOekB"
"RT8IGysVPNOqsgRjB139hRbCSs3macpitSLrDcO2oXu1Od/IpdGNI/7FSRK6Ld7qSGtNYwx5"
"mlK1bXgqF1lGWVWWfd1YN2QpLKlWzBf2hoojhIqsprpSQa6YSCFaKxGjtCbNMkYqYm8y4T/5"
"yRO+8I1jtqYZL7yqaZAcz8c8ef+C2TpjXqZorEKoEZI46ejamP3JnEvRF7gwbdg7d4mDg4tM"
"xkOGg4KD/T3LrcoyYqWshHBjteG9TI3pWkzbQlMj6tpaeVW9t7LC1BWirFyAKq20jAtaoqox"
"tf0/WkNR2LemRSpBZzqqBvbPbTPIFSo6x2pdUTfw3lEnzl+sqJsOqbpgwPNBX//K7S2vnCCE"
"CFZbHoQXQrBaLsmczHWepqzKEt22DAaDMIurlKJcLq2YpVM/8eoUg+GQ2cnJXd3/33PAyrKM"
"rmnIBgMaR0IDWC2XJO5i9zV2QpprLBlutV5baZLYCX35ut11EUJnQDjyplIhrVUOk2ocHnXn"
"4dvGfjDa1+dCSpqypHY3kYpjG4QcLuAP35pGWB5IZ0zQKpJCoNwF7rSmapowrGmw0srG2Da2"
"kJI0TanqmpEDR2P3N0Ng9ek+NghLp3QpIOjOe9zDuI/BjmZUZblRinQCcVGWOakXjWlbTFsj"
"4sQRb72RqDhtNGosN4aug9qW0FmSMEgKPvPRbf702Zu8+OoRW9NzbE+WyBiO5inXjgcoKYgi"
"Q9sqdsYlio7ZUiLr2+xPl5y78CGuXL6Pc/v7aN2hBAyShDyOSYTDrIRBSuFkjO0coscpvaoC"
"vYzQqAgRdaA7TGwhArSblzOWpa3N5rUCiKJADIaYQYFOU4ysqdqIm7c1J8s1IkopVxVtnXKy"
"WFHXlei6DmHsL9JaYxwA/4Fd/x5zfr1cBva9v7+1KyE976l0FYjfMytHWs0c4VWnaTBnQakg"
"99LU9V3f/2cdZypmBbat6wb4ujlNksAIlq5eNsaEFDBJU+I0pcjzYKPtf5d/A2vbDXYos23b"
"YG66Xi5ZV5WVXHX6WMAGjGyakFb6WttzOjqXRQ2KgtKlyf4p4H+HH+tRSpEkCUma2u6Vb/F6"
"ANJ1e/yW8K1cv/h+gZaLBYvlMphflqsVx8fH1sDBZW2RUrZz1FjB/cxhOZ2xypee8OeJgF5d"
"NXedKhXHQdq5XC4x3SZTEa125ZPNWEzTYtrGkh9b//nOfo/LVGRVEbctuZFsFRN+9tMJwlTE"
"zLh5LFmXHW/dzBB0SGmzhA5JkRqqJiZSHSftOYrxfZzf3+bg3Dl2trds8K9qlNbWjLVtEW3r"
"3G7c5e13sE2g6IdNbTymhcN0jMEYTfii/z+eBGwQSmLiGJIEkSRopXj7RuXGtwSLaojQaxpd"
"kMZwNDPorqLTRvgn/D2x/k1DuVyiu458MCDwmFwm50vO/kCycsHEy0G3TUNZlizmc6IoYjIe"
"27lGny2pjQb93dz/Zx1nOz9HG6mMTutgBxQ5uQgPLPaDBmz4W/25LI8R1E4Dx9f43oixrb3n"
"nKuxtUY1G6kY32qld+NUZRmY8KazThxtZ0cCQvrtLkifw+Xft227GboUtvXqHXDbxkrA+uzO"
"d3T864uiiHK1YuSMCrKioHIdj7ZtGYzHNC49bpWyhLuuQ7vujLdJrxweEpQ5/fXTltUcub8r"
"HaNY+YzE8jVc1tS6ssO4jMMZRLhYYBAQMm73SSlRUpCnOSkxn3h8j08/8QZfe2lNFzdUbcbu"
"9oLGFJRtagHluOPaccHB+Jiq1CyE4Km3H+H7P1wSxylZmln1y7q2+FJa21IU6c4DF3DcJQ/q"
"VYTP2c/7SyFOf65vjooItl52PCdGZinkGeQ5lRa8+MZtyjZnNxbOvCEhSzXlKmGxXtI1JXVd"
"h3Z7nCQhCH1Q1z+QQYWwYzhZZoFxhxX5zqI/10gpNFa7S0lJ7UwfVus1aRwzm80wWjMYDoOB"
"i9ePN27/3K39f9ZxtkSylGgpGY1GoQZWDvFX0UY9QTnAWvrtYE63TZUboOxcJBVRRCSsCH4Y"
"omxbG4CiiMSlov0g48mjob3qQDzPShZSkiQJqWsUtHUdDFfv7KScUoT0Tx+lwjCpBHSWYYyh"
"lhtHaY99Ga2tKUYcs3LGkh58lVFEmmWhc+nnw6I0tU8mVwr4Gcn+hvByJrBhHtetlfhYLhZB"
"sSJ2Wkrozg55OwwOlDMO3exy09pIEazdBdDUtqMoFUoqRlFMo3J+6lM7/PGzSzK1QJiIIpao"
"WPPeTJEmgkHWEcmGo1lOKlrKOuOl2wXPv1Hz4JWSVltJn05KG0TbxhpASNc+d3+eEEg3nzuV"
"dfnzNBuvweAejTmVgSGwzYUshTyHwQCKHFLFXzx7xM4kRkWC/VHN8bJgXCy5dTvjeKHouppO"
"G4QUIlJKJA5a+ECvf7PxMvBcrOPjY/I0JclzC4Goja0dLoPzJZfPhgxspkLc96/Wa2onC11k"
"mbUzu4v7/6zjzIC1WizIh0OqtrV1uruQifMHVK4DInolV9t1SJcpJXlOW1XhqZLmOWme2+6d"
"I73125naWAG1Pj+kaq2wnyeAGuxQthIi2HWtnQtK4gB7Iza8ED+Q6f9W21hfOeO6K/7veMKg"
"X1QVRcGbTydJkPj13Rnf7fCYgcfH6vU6tKExJoCVHpPzr9N3W8Jr950U9/9gQuADt7+R3XvP"
"N6N1paGw5qZh33scy9jMxIh+lmIDPMpiColSDJOUhy5u83M/suDffOMGgyTi5Xf2uHyg2R2t"
"2Js0VF3C4SxhXStm7YDt0RHbWccffEPw8ceOeSjfsuTJOLZyIk0HonO4lfDMBRtkTNAGDefk"
"jxCPPP7W/3zvP8aCQnbwOc8RoxGMRjAa8Mp7DddvrihG+5ysByzbjHHRcrLMgTVKNKzLRmjd"
"CaPt2pZlKYzW5l5Y//6MbeoAbm9lp6S1IWuahixJEFJSrlaAVURtO+st6NUctNYhQTDGdjTn"
"jhJ0N/f/Wcf7wrD8RWgdkzbxwmZgRfTEZmwmpIKuru5nSZ0D3bp+QHJtVOFap4PhECWtyJnX"
"a/csXOkWUPnUWVjZDN11JHHMwImBrcsygPH9NL4vauado6W0lkVlXVvRPyEC/QGXNs/nc2az"
"mR2vkDIMtfobIHFibp44GCWJZfC6IBgra0vWak3VttQOp/DaX34+zTuSKMfriZLE6iFhQd5i"
"MAhZoQ+sFsdqra55a0tD09rum2g7i2e5r9O2DvPS9v+OVkBVEbUtOZJhNuLHP7nF9lBQlzOG"
"yZKThWGUrjiax7x6bcjtZYySkCeG6VgjoiFXbwl+7yszVqtjtCEAt3TuHJomkERx5aJuGgue"
"u6hkeinWncmWCWmZ/6rLrKIIkeUwHCImW4jplGa8Rb63xRe/fhUVpzQmp9EpnYk4XOR0nWSY"
"1czXkrbt0J2mbVsROsX3yPr7vVa5bMcfscOX2tYSpys3a+mzHOVw3uFwGGg82uFTSZaFDnug"
"INzF/X/W8X6cdVitViwXC44ODxmNRoAV9vPpnNeTilwtqs1GoH7tB46lpOyB58vlkrqqNoC4"
"i7h98T4vHzudTqmbJvx87WeXeqVcYKsDuUvl/eGZy967zT/xjEvfoygijeMQwDxz2NfeWVFY"
"eQwpabqOVVmGdF93HdVqFTI36W6cNM/DojSdlY/t3Gv0zGmtNbP5nMY9gTyW1rVt0PHGYR1C"
"SpbLZSAyhmFw7agBXWuDlcezWidFGz7XoxGEANe6oFUhm5rEdGQy4sL+Lp/74YSTWckoOUbq"
"incPFa+/l6M7p3Ipoe5SyjZhsY547KFt3r4lee2tW7RtY01UhcAYbf9+0+NR1Z5P5ZjtHnfz"
"DSPwTNJTGVXA5DAYYTErkWeI0cgGq90d2N0jPrfLrVbw1WevcbScEMcRRRoKTwZZRdsZlGhp"
"O/tL3bypuCfWvwf2CyGCoornSLVtz1DVlWp9oLvpnMW8g1+UlIG2sV6tQgfPf//d3P9nHe8L"
"w0IIsjSl05qbh4dB+tTzQsCmqbWr1/2TZVWWjIZDK3TvTsaXe34Y2bvHyijacLBS63nWdtas"
"MXFBJVIqqBnWbYtyT0P/Qr2yoT98TZ044FRrHTSno9g6zvoMSykVwHaf7gcbbxfItNa0Tqfa"
"Bw1fdvrF9d0lhG3fex5KCMb+JnMEwt3dXWYnJ0ghSJ1Tr+jX9qJfD20s0AN47TMmlx0EsFoI"
"rByrwqI+7l9jN71RvkITCCURZYVKUnIVszQpn/34Dl9//i1euramizRxC9PxkpaMdZ2ilCSS"
"LS0TfuJTCZKYL35lxf/xuyf8lz8/oDEDe54Ga1Huszt7cq4/4EpCj1WxaQKeArRM783elIg0"
"BhI77J1nsLUF+/vICweMruzyD3/5Gf7yJTg4yEgTA9IwLw1dp4llxWItGKY1sRoZaxgkNpQa"
"FwA+sOvvmk8ed3InsBnr6T2s57MZsWOwG9jwD4VgPB5TtS3lakVVVeRsnHR8Ceex37u1/886"
"zp4l7LqA8/hSzE9gqzjm1uEheZ6TZplNNd1C+lksL0TvxzE8fyVOU/tUq+ugpIhbWONulKjH"
"Cdnb2eHW4aH1gmsam0VpHcA6z6mKHKCZuCec56n4i+EX0xsAAIG5XLoyMomiwEj2N2MwgXRj"
"Dv3ux52tWv93POaGf8L1Oi79mcjCsaPrqrKYhTF2Ot8tqj/6uJ5x+EigNrSd3fzGgDIhCPQx"
"Lf+xPTMHcBrsvKGyTPhIKYZxQpuN+Zkf2eYXfrVia3uGNILtoqQ2ms4oYgWpWvFjPzTmuVcE"
"335pxaIa0QrNl5+6xd/9RErZJQwjY6+X1puSlDuwKHonCKfrQQ9mCSxtIYkRsVMUiBNEmsJw"
"gJ5OYf8cowfP84//1Uv8yv/zBgfndtge1szqgkWZcm5rybrsuHYY8cDONR65VCPiAWkaGcBU"
"Vn3UPgg/6OvvPhekYHygcRwo474vjuOgg+X3Xtd1dEIEt5o0y1CuxNRtG/wZvaPQ3dz/Zx1n"
"0xrURsEgdgCbMsaChXWNdDiSDx5pmhIVRViYxlEMoiiyL8jFjgAAIABJREFUYJxwUsdRZNUZ"
"XV0uhAjkOD+47Dt4jbvJhZRBCN/X0F4uWCqFqGs7QV5VyKqicdlS5FLwrttYFwkhyAcD5vM5"
"eZZtiIDu9/mU/866Ovwt9/o8SOnf/FPOfrPsbUK7CYxrBde1tTUvioI8TQMT2WeJ4Rx67N+Q"
"+kNQs/BdQktrAIPafJ3T3Tf/segvvcBiSlIhojUqjsiEYCBjnnxwix/7+Lt885WbxGnM829O"
"eeBCy954ycVdw9Wbkt//8zWTYczF82MeOIh46ls1X/rLYz76yJxsO6M2ELtztoKCLeg7U39/"
"xv1Pmc1XpLe8l8g0C7LMZCkUA8x4TH5hn2465r/935/llz7/ElcujkBFHK8KlDLsj2ecLBSx"
"rNnN3uETD7zD/s4FUxQDnaWpkVIaYw0/jAfTP6jrL6W0jST3f7DBSff4W31eWesgEQ+DeEWI"
"xo3rGClZOg6Yx87auqZcr4N4wN3a/2cd78v5uW/4OJvPiePYjgy4TMeTzeq6DguTRhHFcBgu"
"aO3qV+lOvn+hgaALLaU8pa/ugbzC6fw0VWWF07QO1kG+c9N2HcpYy+skTUkdaFp5xq7WdK6D"
"UlcVSZJQFIWdl4oiIjfqEEUR2s1j+fkvv9idG5JWLs32T99OW50kZTb8F7Q+ler6JyoudfYY"
"XOeaAHmaWo9FB6hng0HgpvSf3qHscCWCxamcDngonQTgraF8p3BzCDccDQKhDcjK+vdFEZFU"
"ZGnCKBnwM58e8MzLJ+jqhMlgwJVL25zfVXz1eU2UjDg4UDx4QfDSay3ffr1Ba8NTrxR89blD"
"/t1PD1l3MalyBEQDQjuwX5s7XJp75+fKRoTARApEYln8UlpOV2wdaUxRMNjfJtrf4uVbNf/F"
"f/Nl/uhr13jg8h7T7RFHi5y2hTRuqGuBbtZc3Hqexy7cZu/cZXNw4ZLZ29k2o9FIp2lq0Nqo"
"SBGpD/b6Cxew/PkFsmv/6w4u8TOPwgc4F7iUEGilKJuGLI5tRVJVrMvSTntACFZ3c/+fdbwv"
"52fYqCBGruMhpCSNY9YuZVRxjKnrMGulJpNA2gRIkoTMtYY7t+FihxvV3UaHxxinJOpSaOOI"
"dgBZkhBFEbOTk8D70I5tq6KIzOkQ+fLQGEOeZVazxz31lJQuqbAXN3IaPf4JWa5WpE5RoHak"
"uL6kch8v83wY5QNH/6bplQD+d3ujAvzXtGY2m7G1tQXSaitFrpXedJ01zIyi0BH1N5turc69"
"7rrNqI0vCVUvM2HzsT3MHeWhjWImUoimsVmWKlEqIlWSXEU8eGGHn/iBFb/79YqPPbpCMuT3"
"vtIw2Zrwo58c8/Rzt3jltTnXbymOTobsD0sun6/5vb9Y8pFHjonP5eQysWUWJgDwtB3eotls"
"TtgeUtjh7cSJEgrsJo9jyDJMmtFlGVsXdri+avn8b73EL/7zF+i05KOP71EMt/nO2xEHW0uU"
"TCgrwSi6yUfue53zu4rdc4+aSxcvmP39vW57e9pFSmlhjNbGmHK5Mh/09fdNKeErD3c+p77e"
"trTu87ETC9RdF8oyT5SNhKBsmqDllbvmgm43BjB3c/+fdbwveRlvg7WuKvI0pes61k4fp3Aa"
"P57xGjsyWdM01HrjQBtJK1urhB2O9Axfz/6N45jYBay165ogRNC1ns/noUOxM50ym82smJrD"
"Cqr1GtzTLnYdRN/p8KWmF13zIwC115N2qbWKIlIIg6F+zMBjDr6741OCrrMStP539EsDIaz8"
"SX8kCNjIgWC5OEWec3x8zKAoKPLcjml0nR2GdfLOfSa+kHa8QrWtpQNo3QPdNyVhWD8HuNt/"
"IozwPswbHGRTYtYYZYenVRKTRRGRyPncp6dcPTzk+ZdOaFXKdDphOT/huRcW5JHmeD7giQda"
"nn4pomwL9qcLnv7ONv/XF+b8539/yKyZEBlDYox1gWkdoVS7gOrBdyksoI6CWCKSHIQr/9LE"
"8qyKgnR7RDwu+I0/eI5f/LXnePbFYx69MkamO4g458ZRzSitma8TMnnMY7tvcnG6ZH93n53d"
"A3Ph4nmzvbXVjUYjnWeZTuJYqygyxhiTpukHfv0FoLqNuoHnbhntTFP1xo/Qd8bvLF0xdtav"
"cY2QyL3uyrHgPdTiu4B3a/+fdZxtQuFS4yhJiFzrVbvugH9qtA40MxCkYBJXzwN2ENnYQVEP"
"agOcHB+HIOTdSnwKnqSptZn3AGCe2/NxUXswHJJ0XXDcbTyrvW3DoKXuOuqmsd5qvQ6ir6vr"
"tiVydfyqLGmc9LCnTniAX/SCn+4sZiQdR6yPY3hft8BDURv9b7A1vd8EcZIQxTEn7gnbNg03"
"Dw8p8pyBwwA8d8dnnf5n/YaRSmI6RxvQHXQuFAlbDvbDkce0wONbtnsoXOkopLBBpJZQVkg3"
"pjJMU7rJNj/w2JwvPdPw4JWa7UHJYql5+e0xP/KxlNoobh3X7E00N2/nvHO8S5TO+OoLK77x"
"wk0++5GcVYstl5xwn6kbSzZ05xiAf2GDFh5QzzJIM3SeMdjdQhQpz7x6nf/6v/p9/vyZ94jj"
"hE9+7CG0abh1Irh13LA1bDFG8+DOy1yazjnYK9jZfpTdPGV6bt9Mzx20gyzVKoq0EEJnSWLW"
"VWXulfUXgPF4r9sfntvUVz3wEk2+M+lpGL7zV7m95oNd5/5O5MjYlZNyvpv7/8x4dNY3+FZv"
"uVqROiKbFILavUdsHC8KV8+mDtTz9bUPel6KuHUSE7u7u2htB0cTt4Cdxwo6a5Lq6/7w1Os9"
"nXx7N3G8qsVyGYA7f5PlbhA0imOWqxWxUtapRCkGaUpVltaaPE1Di7Z2Ed/fbF6HSCoVps19"
"YMUttncR8cCkEJaFHrkmgpTSmiv42SvXSZlub7N0fm+j8Zi2qlgsFqR5TiRl6FhpCMQ9n2W1"
"db0ZzWl9903aiOQ1DwEhekB86L6b3ueM5ziAqJ1jckSUpuRJSh0P+OSH9/l7b17n6Zeu8fqy"
"4+NPbNNFY64fVhTxguMOlnVKoxWGlofvyyiyA559/ZAHLx5x/2hCgnWJlsYg2g7d2mthZa+t"
"Hj1JYmcBiwGmGNBlGXI0ZLg34Y3rx/xPv/wVPv87z7FqMh64/xIPXhrx0lsN85UijQ1F2rJb"
"3OTRc9fY30nY3brE3nDMbixRszmD/X29vbfbyqYxcRzrpmlMq7XJ09TcK+svIyuf7K22gI1o"
"nguOYbg/3mhygZNxdnvSkz2llEGZFCFCEhC7fXI39//3HLDK5TIMhPoXGVqQrvujtSaJNg6z"
"Ko5ZzedkDiT3AaQPtLV1TWMMSZoyGA6t0aPvZrg6f7VYMByNrMRG01A5EfwkTa3YfpZRRxHz"
"2YxiMGA4HFpNrLYNEsja1coAuQPp67pGaE3rh0CXS6I4DjIdoW53C96/KTtth41DG7nbWEDh"
"rkXn/qaf1JdaB+zs1OFurjDY3XWhBK7Wa9o4JnY3a+LKkRr7xJJxTJSkVJ1GdE52xcFCKJ9d"
"bWgCxvRKPwDU6dk90SsUHZZFkhCnGYPBgHY84bMfO+Zbry25eGDIcoVub/D6dYGWOUeLgrrW"
"JBFc2B/w4Ycirl074Q++CrvD2+x+JidRlk8XUn9tLB9MSkySILJsMws4GiFGQ8Z7W8ybll/6"
"/Ff5lf/7aV58o+KhBy7x+ME269Lw1HfWKKmJpGRveIsru4dc3m3ZmuyzN95iL47Z7loGt2fM"
"rl1DXbyoI2O6tusom0YbMOt7bP39PkmL4hRdp0++9PstWNM73MmrioaGQdNglMJIeWp/+p9r"
"6vqu7v+zjjMDVmD9epDX/eHIXxBHYKvb1ioPio19j9E6RM1TSgvaysLqrrM4kuOC9JUT1ssl"
"aZaxmM9txyJJSJPELlpVBR5LnKbsTKdh4j11YxKep+Wdaz1r3ncTB0URsrbG3YB9kNAAjdYb"
"lchoo+HdeKY9GzUL5X53J8Qpz7qu6+zkuyt/lHtSG1ff+/LXs37jXlANXao4RkAgKyZZZtP4"
"tsEYDbqFTm1OnH425fQQIjCd/X/4OgIROomudDEKIWqrTbWKkak1TU2TlEfu3+FzP1zzh08v"
"uHUMWo5Q8YBZlSHQ/NCTiscfSHjh9ZoX3pBcf+c9Lp/Peerlho8/eky6t0eapqg0RVU1GG3x"
"qTyHokAMB+h8QLQ9Id/bZmHg17/4PP/080/z7ItHpMWEv/PpR1iXLTcOS24caYYFnBve5sH9"
"99jbEuxPh2wPdjiXp0yNYbJcUMzmiKMjjt+5SvPgg7pcrbo8SYRRytyT6+/A679pg/erkTAu"
"AwHE9ziXp0Y0rtRryjIQqft77W7t/7OOMwNW7PCfTmuradQDGFvfdXCBQUURdVkG2dTgoeYA"
"TL+QQYJCWL5LnCR2yHowsJ0H5+PWVFUwkdSdNY5IsBiUnzxfLRasjGFra4tOWinYrCjshdbW"
"yqsPhBpjiNzF8gvru4pl09inojs3f2N5DWqv5tA41rzHFnDnpFx67Al5xhha1yLG38i+JvNP"
"uNa64uo4tpmfu0F9MPWKqEtHwIuUIs0ykiSxT/9OWxyr7Xrguqvw2g3nytAFBYcwaGzsx2Kz"
"v9znassqkBKR2PS9iBRFOuRjDw/40tevs6gFrRpSdS0X9xd86mMDplsRoOjaOccLwZMf2ue9"
"6+/w1AsD/vWfl1z42RKpMsvT8dlLliFGQ8xwSJ0P2L6wQ6UU/9/XXue//+Wv89Zb7xBlUy5f"
"eZgr5zNu3Drm6u2UpjZc3FnxwO51ruyX7GxtMR1OmKYZu6ZjvFwyXCxIj49Rx7dpjm7TvHed"
"bjYzbv2N6IHR99L6Z0VhG0lJEigQAfvzQcG9Lo+99buUAXh3wcO4ElX5TNc93P3evJv7/6zj"
"7AyrTwdw3QOfLnZdR+pUDzOXLnpzxbKy7rbebcMLkOE7Ff5FKmWVDYuCqiypwArvuVrfj9QY"
"LAgZJUmYTO9c2RcpxeHt20zGY/I0ZenmoYQQpMnG1t3fYMqdn/96FMc0dW0VFV1bVxongxtF"
"YcC6a1viLAs4WdO2bq5OBjVKrTWtKwGC0au7kftjP31eTeOeXN4Kqu8AVLqU2muMe+qFlSFh"
"0yXUncXZ+8EnwhLae1kX9AiloV+4eY8A2YIRNZQSsVwiXGlSJClXLm7zqY/O+dXfa0nyJZ/4"
"voS6WfPm9TFVNePFNzWXzsX8nU/kvPmO4aQ+4LGHl7w7O+HpV475zON7LMkYju0JqWGBGU9I"
"d7bIhgV//M3X+T9/5zv86y9+mzgZ88DDj7sbX/Hym8ecrFOKpOT77r/Bo+dPmG6NmE7uZ68o"
"2AEmVcngZE52ckx0coy6fQzzOfXshOb4mLixw8alGxi+V9e/jSKLc8Vx0GKHDTUgdCeN2Yxx"
"sWkA4M9DbqRmJBtqUddYpQZ5l/f/WceZActf9DAU6qfAXYnWOdq956p40LE/Y+UxKd9+FWBd"
"PVzbNHbT7dIBlJ7CECkVOFLr1Qqh1Ebky/0+31FZr9fcPj5ma2uLLMtYLZfEWUZbVfbGEiKY"
"UeSDgcUsksTiV+4pUbdtYOPK3gJFSWJr9a4L+tNCCNuihyDNDBuCHBBYykrY2a1OiNPll/ue"
"ptcZrbF0C+maDf4m9efnX4d9YrpRF925krAnL+MahcZzMFu4E9MK5aJwbPjWfca/GFFjVmuE"
"siXRYCSpVcqP/8A2f/7sNaIsI0slaQLPv7omftjwuc9MuXar4yvP1CgZ8fhDBbKreOYF+PU/"
"FFzen/HA7jbDrW3SNMWkBdFkwHNvHPK//cs/5rf+8CoRFQ9eucTe/jnevVkSseLt64JVU/Dh"
"S1d5eP+Ig92ErfEl9odjduOIadsyWC3Jjo+JDw9Rx8cwm8F8jlmXUK4xTYPuWtutMoZsOLwn"
"19//fKc1qStV26bZYGR+zzngHTYzhJtvcbm3e+3GmNAlxGzkju/2/j/rOJsL37u4teNNdFqT"
"iI1NUOzFxByDt6kqy8foOpIkCWmwj/j+ovoUe7lYhMgbOwmMwPlQisgFqthNmEspretKZ3V3"
"PBkvjawCZJymjEYjDo+OrBpjbHW5E2k5WNV6Td22rMuSPMtAiCDyD7bFKt35rlYriqKwAc7Z"
"G+HATCklVdMgmo3qYuxmplRsFU+bsrRP2Ciy/CM4NfsVuQ6UX0ipVJgn8+fi2+O4j4W7ySpH"
"iA1qDOBZVxtlg8hACyYCuh4vK7AjVKCTCmV1qTzKZWoQotww4FXEoMiZDob8/c+N+eI3a159"
"/RZb2xM++5GG1mT80VM1b17r+MiHcsYDQSI0f/aNmNF4C91V/JPfvs3PfOaEhy5FRBLm5ZLf"
"/tJT/OlT15Fmxfm9Ibt7DwOaN9++zqwcYEzGfdMjnrjwHnvThO2tA6aDLc5lMVNjGFUl+XxO"
"fHyCun2EuH0bZnPEaoWprDGrx3DAGpOWVUWl9T25/p59vpzPkePxKRD8zn3bJ74KtTF58ZlV"
"3WwUfX1W2LatLd965eXd2v9nHe9r+NmT1aS7wP6Pe3ut1XrNYDAgUoqybW3k1Rvd9P5IgOd8"
"eAa71laa1WsDCSFCZ8EvrhKWkZ74FNNhC/1WrlcfBWwHQikmkwknsxmpS5u92J8xhixJTn0/"
"YFU86/pU+qykZOV0j7YmE/s7jKF055p5wf62tWNB7kb0AbV1Tyx/LQEibRUXwCoxZklC4/A2"
"IdykvmuBx66D1LnfU7dtGJVI4pja2C6h0V0vVG06gnaXdojQOOxRyj2Tgf69LjDCBz9h5wyV"
"FchTaUIaKYZpwmOXx3z5mevU64YHntxGCvjmizXFIOHn//1dvvLMkm+/VvPeTc3f+8yUG4cr"
"3nrrFjfnUz7/RxWz2dvsjGrKqub4pCTJd9kedWwP4aW31whTUrYDtvITnrj4Hg8ctAxG59jb"
"2mY3S5kimNQ1g7IkWcxRt2+jjk8wsxOYz6FcQ9vacSBs8DZm0/7PosiqlN6D69950qZzTvbB"
"MkxCuI5j13VWrM8PNncdVdMQax2cpGVvP3ssSghr7JqkKYvZ7K7u/7OOMwNWmudBtrhuW5S7"
"sFme09Q1ZV3baOoGIJMso1qv7XwftjWcio3xo++6eECQ1qp8+tTRk0Q9e9YYQ7lckua5JaTl"
"OWkUsVwuw3yTn3L3voV1WYK7iFtbWyxmM/v0aK2vnJQyyHjUjhsSRRGr5dJ2Il36XDuMIs8y"
"ajcM6s/b1/r+iKMotHxDd0aI4NlW1/UpX0RPVl0sFiR5TrVahZ+TSpFnGYvaWlcFfXtheT6R"
"u1Zd27qZsm7TJRQ+ixJ/RRsdTCQQ3UYqOcgPCxu9/M/0J2WoG1iXiNRapmcqYrso+JEPD6ia"
"klffOObtQ81HHp/yfY8ofv33jjiaC4pM8HM/sc+168dcvVby0EPnefOdJSdHN/noE7u8816N"
"Mku2pttsjRXXbrTcODwmjgXrSvHh86/y4PmS/b1tJuNzHIwKplIyrmuK5ZJsuSBerpDLJWI2"
"w8xmiNXaGm147MburtA1VUrae9rZrd+L6x/3Am2nrZlp67K8JMsCTcLAqQd853hhwnXyuq6z"
"Wu8u0wmZn7S8Kt22d33/n3WcXRL26tEsscaSRZ6zXi5RSjFwL6hx0Rwh7BPJGIqiCM7LnngX"
"uXnA1g2Rap/munTSc0k8ftA1DflwaPkmrjw8Oj5murXFcrkEaVVIA33BAXhNVbF0gW4ymdjp"
"cpfK13VN2uOIpI61W7ha2ge/1NXmQHjvVVA7x3j27Vvf0bkT1PTzZk3bkrjSwj9pPZGuWyzC"
"oCrCKqFWTROegsp1lwQbCWncExRtu4SBe2WZCe7DO7KtwMVSoTyy3+sE9FQfne+l57XY0Bzi"
"hDSKieOUT3xowjOvVDz9zTX/zqemRKnmT772HoNsxI//8DnyeMWrb97mK89pPv2RC9w6XvPa"
"1YZL+1OSpAGpGY8LBpnitTcPOZwpJqlhb3CDJz90yM50zM72BfaGE/ZixbhrGc7npPMF0WJO"
"tFgg1mtMaQUB6eku0Q/Xmy6DLYOcbbyXjLnX1t8ri/hRmL7PQbVeBy6UPxfP7/J0Cd/di+ON"
"c3RfV95nRJ7Jfzf3/1nH2aM5ccwwiljM55YNawzr9To8OTxYJpViPpuROoBSRZHt1glBEkWB"
"NetdPeD0sGMguPU6KEFudrUKC1K3LTvb2yxXK6I0JZKSk9nMpqtJYv3jPG8GS3zDDV4ClK7F"
"6stPf5F16yy/HA8GJ1Xjb4wgY9ubK4vdXFXQRfJApwu4UWxVTOuyDM4qWmvbWXFC/4vFwmJ0"
"TYMSFg33zsFeAsS30HHgrbcji6OIWmuE7jAuw+p50Hx3hqU2JaAxnKYz0DlKg3CAvUXhjREI"
"02JkZeWI4wQZxwwjRS0LfvT7c24sNMv5bb75TMv+uW0un5McH99mhubG7RaIWawMlw4yFqsx"
"tw+Pqao1Fw5GvPPujLKsmC9rHtm9zn07t7lvP2Jrcpm9yYTdNGXatYzmK7L5nGh2YgPVYgll"
"aQ1Y2w5jNMJoN5a0ef2hM2pZHBsCZF0jouieXH9PZvX7KVAUXDDrj+gIIajrmspNeZwqMdl0"
"/0T/vSsBzd/G/j/jeF/Dz13XMRqPA5mzruvwNGi1psgy5vM5k8mExXzOcDRi4cYNlBBWcN8F"
"CB+QtNZ0jnOCMYEI6kcWPLHO4wtRmloxe605OTkJ7Nv1asXW1hZaa27dusWwKOwTj40Vfde2"
"xG5mKQSuug5+aa2Tlenc06PTOmBcret8JEkSPOQ6rYndAjdS0ti75VSZ4GVmkygiGQ7tzW4M"
"VVWxXi6DHXrmgqx/D7arVK7XFIOBtVPqrAVS07Z0wg7VCixugnYifmoD3G5oCr11/K4P7BF+"
"phflLOCMnTcUAiMM1ICqEPEKmcREacJgUHD/uTGFeo8X31L82A9uc1xFvPDKMZcPJHmW0nVw"
"sJtT5JJbtyuOj1eMxzlxqnj97QU3b83Zzd7hh6+8xaX9mPFkn93xNvtpyrYxjBZzivmcZDZD"
"nZwgFkvEemVdoTunotAv/zCnAxXYrqqLw0pFthxzD7Z7cf29KmjVNEFs0ttwJc78ws8Y+mwK"
"s/FT7LTVcc+z7LswppAsuDd5l/f/Wcf7YrpHURRam+DccN0Fil3nYZDnVOs1QzcPVRSF5a8Y"
"69DbF6fvKzXinhw+hQ0loYvanplbuhQ0imPKqiIWgtnxMcPRiHK9ZlAU7O7u0mlNuVpZh163"
"aDKKWK3XFG6AGjbpbaQUpkeuA6sB75VLO1d69blgRlhmcNu2YQjUQLC4B0KZcOPwMNzwUm18"
"3Jq2hcjKPo/HY05OTkIwXa7XjNyYUeTmrqIoQrq5R+Vek/TX0ji1hh7hqp9p9aB1+7/oNChv"
"OtddjMwG3+KOFMxgAfiyhDRF1TVpnjNMC77/sYyjKuL27RlPv6r5/g/vU67X3Lq2ZjDZ4Yce"
"y3nxlWMOb93k3KRiPB7w9Hdq7t96m8sXXmQ6bNnf2WNnPGUvT5kazXgxp5gvSGYzopMTxNzi"
"U6IqrdyyNo6IuXl1m1M1pz4S4ZpsjjTLMELck+sf+QDj9mfgYLlg0C9LfQLgYZPOdfp80AtU"
"B2NOUyB6VIa7uf/POt4XDytLEpZucrtcreykeY8Ri7twfiDZp69KqdDi9JHUXxgVx6FsC3pD"
"PgK7C+g5IQirDooQLFzauVqtyNKUcrWyNtoBu7DdnNV6fUrUXkgrZyEdcAk2Q1ksFrZV6/gg"
"vvNRl2XQefcL6gdc+1ydum2DxI3uXXDdWXfqIk2tjrUnDgpBXhSUVRXa39oY0jg+9dQNrjjG"
"hKe5yHNws5Kmdf512rWftd7UevjykJAxWW6DAaEsmdSvvNgEL/vjjsDlvii8lLIytuxsO0Tb"
"ITqNAoZFyqWDEe/+/hHDIuVznxljpODFWcql+0a89EbN4eFVlquWv/vxjC8/k1AuXuFDW6+z"
"m73D7nSHva0d9vOCXQzj5ZJisSCdzYhOZsj5DLFcWC5V09hskk2gsqnTJhoFKzNX3vpMwUc0"
"bTY4yb26/l522ZjNYHZoQDkVUG+26s1efcCpmoY0ju28nysT+3skaMK7/ec7oXdt/59xnE1r"
"aFuWrruWRhGtGxJdzGZBHXSxWFiziCShyDJuLBbkECRh2ralriowhtQZPfq0NUrTTfvTpdG4"
"KOy7ekFCo2kYjkb2RshzKjfCM3fUhfF4bOcRtVNwkNKmr3UdjFm1tIxkL+rnp/LbO9jCbdva"
"lNt3MxzO0QjbNo7S1FqCu8DR+bZs76I3XYd0dAr/BNWAKEtyLzbozrWGQNYzxgSjzJULUHGa"
"ntJnAmcQYtzN62SHrVOvDnZIBhfD3Eb2wcq0IBwTftNNs5vcl4T2Ux74ctiGGwkRaYrKMtIi"
"o24laSLY2hry/CsLHrws+fijKbdnNZG+yZULkrZTPPfiMY8Mv8kkuspkoNjdeoCDPGNXSbbL"
"NYPFgvRkRjzzgWqJWK2grp3N/abbZzub9BoJftf513say/LJosdvVosFIoruyfX3ILgfTzuV"
"UfmyzmUv3u7OBzCtNauyJNM6NBX6A9x9+SKjrQHs3dz/Zx3vS16ma6yvWeeeCMHYoW1ZLpcU"
"ec5qvSYfDDg5OWE4GLAuS0RZbqKttCx2P+Lg26it65T0sasAdLoL6tPF4HYMpybq09gK7S8W"
"CwaDQXCzTZUiT1PW2JnDNMsQQlC5Vq92iyKkJMtz+1SVTvlRKdA6zC8G77Qosq7GLq33Drta"
"u5kzB+qKOCZ2C+7b254tLYSgKsvgZtJpTZrnrBYLjLRtd2/BlKcpjRuMTbLMAsbuqe7uKpt1"
"SINx7B6LSTn0RjhMy7PeERvdrM6WiZYF7z7uWhsIXKmIB2HjGLIcMRoitiawvYUZDGmV4OkX"
"F0Rxyp987QaPPDAmT4Z85akjVuuax64ojo5r6uNvsRs9x8Vtyc5kj/1izJ4wbFcVo3JNPl8Q"
"z05QszliPof1ylrdeyZ/qO3EpuQLAvW+iPUlhn9q/HETAAAgAElEQVQ97tt634Fbf6WsQsG9"
"uP6mV77BxjH6Tg5XAN+jyHYkgSLPKcuSsq4ZDofguoC1C7yRcjZ4UoYmxd3c/99zwPJ8CeP4"
"F0Agp3XaynoslktbwzqW+Xw+D26zUkpwnQ6f9SjX1sRdENhYcnnKvq/N++3OznVnjLHOHcJh"
"C15GJlLK8rNiqxtdNg2pW3RvBlmXJdoY8qKwN5prT+uus1SHsqQYDlnN58FaDAiMZ9+FqV3K"
"XnfWxFUphXQgZrlehye37GEKUkpSl/X5VvpisWA8mZDFMZ1z+qndnFtfoE0qFUh32m0Euz7a"
"Ba2esYMAo52VlraBywLxrmwUTs9ddJt93XW9ja4tbC8lQthgJYocRkPE1jZMd2jHE7YvTPjN"
"L77Er/3OOzx0eUwcbdN0EX/5rWso0VHWBcc3XiMqX+DxvRnT7QP2xlvsK8m0qRnPF+SzGcls"
"jlrMkfM5YrmEsoKmsU47/nX2Y9MdHYV+yWevyZ03MS5rsJ59dV2TxnFQDLjn1t/d9z4ba5zs"
"Ur+7/td14BrHB0udhpyQ0noguPPUWoeA7YPj3dz/Zx3vO8PyEi3+aVSu16H2D+qD2jramMGA"
"uqpo6toqh7rg4uty5S5Gn7fST2v7Eq/9ye7+4RcR2KS7btN6c0aplJ2il5LYSdXkWYZuGtar"
"Vci40jynXK3CPd+UJZkLaP4JAlC5etyXZb4u9zNYPqVO3Ll5YqoPkh73iNzTpC5LxpMJbVWx"
"cuWELweAwAkCm1FKx6eJkoTWMam1dgxloTdlH7hSyYSa0KI5/ia2AL3HCgP/CoNQxr23CqCk"
"KWIwQIzHyJ0d2t0dsnO7DM9N+F//5fP8wj/6Gp/5xC5H85huVfPK67e5/9KI3eGSovpzzseH"
"nL80YWfyGOeymB3dMZkvKU6OSRw7Xc4XsFqiy/WGS7WpScEzXO9se5q/4r37WsgmXT7WaY1m"
"w0RfrdfBJOFeXH/jzFhSd25+f/gSL0g5O8A7ctmkf9j1wXr/8/7NuK83VXXK1fpu7f+/6Tgb"
"dHcL4/kmopfCtm1LtVjQOmDes4VVHFMMh9RlaXEmV+93nZWI6YyxhqbGBBfmOwE3IeyG8eVh"
"f0Ewp+UxkHJjne0unmfeek5LnqYwHHL79m3Go1GYigf71ImSZMNhiTZSsoPBwNbeTourqioi"
"Y1vYbV1bImBr7ZWU78Y4ENaDov3XtK4qlJQM05TM3QyeZ9NnAgtXGqyd0mPArtyQq9MqEjer"
"mlQIchW5tFqAkKGU8zrp9v/Kfqxc+S2V+1jZ8Rul7AhOntuNJQRRmhAPhyTTKWJ7yvTSAbrI"
"+If/9Mv8o3/+TfLhAW9eE1bSKq340HlJs3iOdP4dHryUszM5YJoWbDdripMj4tmc6vZt6pNj"
"zMkMs1hgytJa2HedxeTuvHfv5Gi8j8Pc8XFlNDeahtHhkfhQHGPu4fUP2vJsHtw+6/F7w89O"
"+jJVKjuDKqRE+kTAUwzcvsRsAjrGAgzR39L+/+uO9+Wak7qyzBMuu7alGAysHEaakuY5TdOE"
"sku69DFKEqrl0j41lCLJMpRnvjqgUvQvqM+merXsX5mB9boR3jfOd0n8DeKfFNrNXZWNtf3e"
"39vj1uFheCp4NnLszFY907f1ZWaSEMcxq8ZqxCeuk7T23Q+zUWcUDtDHbDgvxWBgMQPHBu6z"
"k417+uWDAevlMshrxK67IoQgd2NKXmYkdx2erm2ZPv54V//H/1EppKKTEi1ASgfKCpuVGETY"
"hL4LG94QiMiObWis/buKYqIsJSqGQo4GQk4mMpruiGg4ENP9LfHauyfiF/67/5cvf/0qly9N"
"qXXCu7cMF/Yatgc1o+477OwcMd25xN50x0yHAzNJUlMoqaO6NmaxMO18brqypKsqaFqED1Ji"
"s7E3a+3vZfsNvoPlqQRSKvCZpLCjNyDQ2g5y665DqQiFYbpei0s/+qPtYrkkc1n5vbj+PsMO"
"MuGumgikTxeEvmvv9LIof3E95BI52obPAn3Q+9vY/3/TIYDf/Zu+4Z3XXvvhruuCdXYfzc+K"
"guV8TuwGQj2d32NFYaP0bkIviOaF9byWdHhSSHnqht3cuH3QYqPn7hfA6/SUbng1iePQxWib"
"hiRNQ0pa5Dnv3bxJkWWBEewJqp6Nm+a5rbPduQhhp9OXs1mYCVut14HPJYQIk/T+fPubQQg7"
"1N06Xo9SitwNr8aOpuFvmGIwCDdh6xjX/tr51Fu6MteTaT1oqeLYGmq6my12jGR/3fy5eQyi"
"z4kJ1zKOpVJKRkmikixTSRSpKI7Fb/z2X4hf/CdfEFffq9k/uIxw3daqG/DE+Ws8cO6Y/Z0R"
"u3uXzLn9Hb2zu6snW9vdcDzSeVHoWGvdta1p69r8ba5/mqbhoXWvrr9S6hSZ+t/m+gNB1ytz"
"mefd2v8Xrlz5i+9a/N5xNtNdWvF8fwFD17DrKFcrJpNJ+N7OWDNFo+2QMu7i+sgbudRWOD4U"
"rgb2h3QL4Vur4UW6p2r4nBDfBdBJpQInxT/d/EUBgqGkH985t7fHcrXazC26tDrY3rsJ9/Vq"
"xc50Clh2vL9ZddeROKa0Ngbl/oaXuzGubAhPK3djZM66yUvPJk5ZNXLnX9Z14Mf4823rOgij"
"RXG8aTaUpTWg0BojZfCqCxtZa1tquWvouzFA+LnOWAVWAUJIKZRSMlFKyiRRBlSeSnX92v/f"
"3pcHV3Hd6X693FVX+4YALWgDIVYBYsesZjO7sQ2243gZ25PEdubVJM9TU5l6NZVUZqYmmZfn"
"LJPEsZMJGGICAWOD2cUiFoEkhASShSS0S2hf73673x/dfdR9tdzWgpCgvypV6d7bffrsfc7v"
"9zvf10z9759+iZy8e3ByZiyYk4i7pR1wuRmkxliRPLEeIUEGBAXF8eHhUXxoSCAXEhzssfhZ"
"OL1ex1FuN+/u6OAhBiDyHg/Fc4LGl1PGzCFn66S82t/bNCDFnkkeQIlEj+V5eABwLhecPA+P"
"xMjpEbX4TCa4PZ4nov0lQRISmCmfgMTJg0CqQ6luOY54kxmvl4Q0OUuSEKM5/n1BFR+WTnSt"
"StzSbp4nVBwPGxvhb7HAIMq9mwwGQW3ZaiWnxj0eD4kF0TOMsCQW99hdnZ3E3sDxPHi3SNPB"
"iPYWsbLllU4qlhbOWknfMzodDGKHcDkcRJJIxzDo6OhASEgIWJpGZ2cnbN3dCAoKQrfVKhxQ"
"ldkJdAYDuru7YRJliax2uxBxzDCwcRwgumXddjsRLpCil3kI21NpQpA6iOTdlBwDjOj+Bs/D"
"KosBMsuMqE6nIAdO3OK0IH3uFtPQ6/XgpS0wLbBRgGEAj0fgr5I9T9quSNsGmuPAiG9MiqJE"
"tgYePE2DMRhommUZP4uJOfJlNvWjfz8MN6fDpKjJcLk9yL7bgElhPFKinYif6ILJL4yPiJjA"
"h4eG8qGhIZzZaPTojUbOoNdzNEXxvMfD0zTN64xGXs+yAiVwH+0PCMdoKFqwp0ntT/HyqH0B"
"tPi91P4e0SBtCggg7e92OuGmhEDhTrv9iWt/yZBO+K24HvIAoiso266SVZT0P91zppYX65Cw"
"N0iTk1gfozX+fUHVCkuaMaWlps5gIG8HkilOkNhmxGWp2WSCXewIjLTSESubA8iKSfJySMFj"
"0lEIqVNKjSfc0kOeL30nr1jO7SarM4ZlYRaNmICgmONyOsFJB6PdbtQ3NCAsNBScXo/Ojg74"
"BwQQg6dRPCnPSm8UlwtmPz/4WSwkfybxjJjU2Xiv/AJQCmCI19EsC3g8gkwTwwjxMBQFW3c3"
"2UZQfbz5pHv1ACmv1EbyOvLIJikAimBA8SKSH8n2h576pHiKokwmPfX1+Xzqze//BmHBBvgH"
"xaKmkQdLO5CWBKTGehAabISfZRI/ITKcCw0N4fwtFi4wIJBjdSxnNhh4t8fD0TTNeziOh/hG"
"92jtPzbbHyCrQcmOR9M0uFEe/76g2ksoxUQZWIEXR35YkTUYwHEcdHq9oBACgVWBoijxwClD"
"jKRSRLC0IpIq1uNyCZLa4hEAl+i2ZcU3rVSp0ol4qXGcbjeMotHO7RLYH1kx3kPyuHR2dSEw"
"MJB0XolC2aDTCVHyRiMCAgOFYwoUBXt3N2lAAOTtyYlbKj0rsCnyEGWQxPgbMyvwAnVbrQKT"
"JN+jByd1HJplhXNcPE8MmHq9Hk4ploYWWB/1siBZ6W3r4QQ5JaPZDEYMBpRiZaT2kTqbtITn"
"5V4fTw8tieSCd4kBicTtzjDgOB4OhxMTwsy8yaij2rpoOLgWxE/2w4LpOkSF0jBbwvjg4FAu"
"PCyEDwkO8fhbLJzRaOB1ej1HAzzH8zxF0zxF0zxLUUKQptb+Y7b9SewWRSkmwtEe/77gOw6L"
"ZYl4o46m4eI4EsAJ9JyZkmZK6fS3NMtL/ONkW4c+JH8gBoV6PMJxBsk4Ktvfg6LAezwkeI0V"
"bQl6ADa7HUa9nnh6OI8QzCedT/K3WAQSMvFNzJhMcLpcxAPjFJkbJMoNvckEu/i2AwTaGqPZ"
"LNgoGAZWu52IW5iMRjg9Ahm/5OUx6vVwOJ3Qi+e19Dqd8FbihIPZjNhZpDSlYDrJ7sGKHj3p"
"qJJTzDsjXtfd2Qm9wUBCOCiaJgyW8npWgOqJW5Mbks0Wi0CAqNPxbrebcjgcvMFo5KzdNi56"
"YgD1g79fTZ3OyENKUiQSJ/E8eJa3BATx4eFhfEhQMGex+HFGo5HzM5s5lmF4HuBpmubF9uUl"
"vnCt/cd8+wvbaLcbenEFJQm2jub49wVVgaNOux16sdO47HYSWk8KBcGrYDIY4OCFGCiDwSAc"
"gRD3vVKMCABiKJQHrklMDDqGgdvjIft0ctZJms1F4yjP84QJkRUriDQ8x8HpEuSDJKOudDqd"
"1ethd7nAQwgeNBmN0LMsidJlWRZutxsmPz84xahnGj1vBo4TzjjyeoGrmgNgEDu7XozVcbsE"
"mXOPOHCkRpK2GJLtwCa+haTfJIpcAIRtlRdjfYgcks0mHM4VjcJSKIckGMDL6ljoET0ubYh2"
"KuItgmAoljxYBpOJ97jdPOfxcCzLejo6rFj3zHRMTwyF3doGq53jLX5mLjAggA8KCuTMJhNn"
"MBo5HcvyDE3zRr2ed7jdcLtcvNb+46/99SYTeVlYLBZYbTZBYXoUx78v+J6wxAoBJfDieMSl"
"seQ2Nfr5wWmzCVqA4hKXoihCTCbJH3loWiE9JNknpOWry+UCbDbCP8SIRjppWSwVTrIxeFwu"
"QWFXdCcT6gy251wVw7Iwi8cdurq6oBMPSgcEBMAsuq0pcQnM6gTFXYnKleM46AwGwhVvFxtP"
"2ma4XC7oxMEBhhGknagetRUSjSxGMCsMnuLAkuwVkquY43vOiEmePuKe1uuJaIbLKdDSSkGF"
"UicgnkCGIZ1I+l4O0qnF+nS6XNALy3EeFMXTNM15hPAA3qgzcmGhwZTD4ge9XsczNM0Z9XrO"
"bDbzFEXxJoOBd7lcPGsw8E7RrqK1//hsf0pMh2FZ2BwOOBwO+InHjkZr/PuCKnoZKThT6jxy"
"Inqe4+Dv7w+H2w0Dy0LPssKSWVJelvbM8sqSz7S0QPIldUopcA0AKby0/HS7XISCQ+qMAAin"
"O0XTsIvLZ73RKBwYFbng/fz80NbeLhgDrVYEBASgU6xgjuNAcaJYpOQWpoRIe7v4JpMqF+g5"
"wAqKAkvTsIlBflYx9oTV64VtickExuWCy+kkCsCsOBAlSGWlWRasuKqQKHAhDkaKomCz24Wz"
"bOLy22m3E3klyuMROoC8Y8qW/QpIb1lZeaQBKg4SzuPxwOlweCiOQ3BQIIICA0i5pcFF0TTh"
"4g6wWIT21+kAnU5r//Hd/kKslBjPNtrj3xd8hpZKvOc8L7iQyaFb0T7gtNuFpSNFwSG+Hcxi"
"ZyGRseKeVR55K++IOpaFn9kMo8kEg9FIgs0km4HcHSwVXBo88kA2TurMMld/t8gv7RR/AwTP"
"jU1crkqxSXbpyIuYZ3I8Q7yH0engtAkiDPLB4hI7l8vhELYNHg85FsSJ9gCdXhA20DEMyZvL"
"6YTNbodDYpIUtyk8L0YA0zKCNp6HSeQiomiaHDeSB+N5B1ZKniCJa0hubKXESYGmaSGoUq8n"
"b3YpCpsRAy+19tfafzTb3xd8TlgeWWGlBpceLH1mxH0/LZvNzX5+QoXyPMkw0BNRCwjbTVZc"
"CrtkdguaFs5RWQICSFiFNPtLDe52u+EU9/ms+IbweHok7KWK1ukFySGaEqhaadHGQDOMEOlM"
"03A4nTCKbz0pwM1ltwuzv1fHISyoEOJGnOLbVWKCZBhG8JDQgiKLrbsbnMcDncEg8HSLS2AA"
"0LOscHBWrCNp5cDQtMBQSQsacJJdhKJp2EQWSqkTyjmEpPNh5E+sb8mASzMMaSNpC2USPTNG"
"kZ/JKW6TrN3dxKujtb/W/qPZ/gPBtwiFuCSVaFKdoidEcoFK8RMULYiU8jwvRABLBRMzLe2n"
"pX056RysQAND3qCULIhNNOoBAC2+Zd0eD/Gs6MVzdRI7pE50D0v5dtrtxEWuN5kIEylFieRh"
"DENc5/LO7hDpQXieF9RBxA5i8feHQ1wqdzkcJDhPSkvi4dKJ7mLpVLpLPLEvvVVohoHD5RI8"
"YmLDeTiuZzByHHQ8D4fLJfB7iXXOsoI6MDlGInZCed1JHYGiKHCUcG5NOvIhiWBKB1l5CF4c"
"hqLQbbXC7nAIAZlUjzyVlK7W/lr7j0b7+4LPCSt9+fKA6MmThQcA5G3Ta28sK5yUGYiZo6R7"
"pXtkBYE8He89uFQw8bny7+SQ54cE7/WdOeWz+knP+/nSZwo90bje6RNDJtUjR44+viN7efTU"
"pTx9yH5HP+Xi0Uf5+mkPkg/pjStLW8q3tzyV9L23p0lrf639H2X7h4hHoAaCKl3C7Tt2qOHW"
"0qBBg4Yh4+yZMz6t7j4nIu+ZV4MGDRoeBdTMNT4nrP6Wfho0aNAwkhiRCUtbX2nQoGE0oGZx"
"5HuFNSJZ0aBBg4aBoWZxpBnTNWjQMCagZkuoisBvpNDS1ASHeEbJ6XBAp9NhSmKiqnvramrQ"
"3taGxORkVYckH1eaGgaH5sZGOJ1O0icYlkVCUtLjztZThbEyDtTs5oY8YTU3NiroTX2BYRhk"
"XryIe/n55AT9pOhovKliwrp75w4Of/YZACB2yhS89s47Q8v0I05Tw+Bx4fRpfFNYSI6GREZF"
"IeGDDx5zrp4ejLdx4FtItZ/v9/3hD2hva4PZzw+ToqMBAPeLisjvSdOmAQBqqqpg7e5GYFAQ"
"PvjwQ0yOicGXR44MKpPlZWXk/8ryckLiPxw8ijQ1DB7Pv/wycm/exPHDhx93Vp5KjKVxoMbo"
"7psieYDf/AMC8HfvvQeLvz9ampoUE9b6555DSFgYujo78dtf/IJ8HyAjrVeLBYsXo6SoCJ0d"
"HVixZs2IVOijSHMkcL+oCAf/9CcAQEJyMva+/voT9by+EKwiwlnDo8FYGgcjY8MaYNZ7dvNm"
"WPz9B7zd4u+PZzdvxvlTp3w+qj9EREbigw8/HNHZ/1GkORKQDtkC6ug2xtvzNIwtjNVx0B+G"
"bMOaM38+ps+cqera1Nmz0dzU1O/vHe3tKLt/H06HA1GTJyM6Npb8ZrfZYLVa4RSN9S6XC4nJ"
"yYr73S4XHpSWoqW5GQzDICwiAv4BAeju6kJMXFyv5z2KNOWora5GfW0t7DYb9AYDwiMjERMX"
"53PJ21Bfj/a2NvLZ6XSiob4eABAYHAyDeCDVYbejrKQE7a2tYFgWERMm9Jm+rzKofZ4Eh90O"
"a3c3cZy4XS4kJCeju6sLpcXFMBiNSE5JUeRDbV4HwmDSGEzdq23jwZbB4XCgubGx1/cBgYGw"
"+PujuamJ0CkDgt1Omix8PWuwbTBQGSMiI32OA0DoFxVlZWhtbgYPIDAoCHEJCTDKONj7y5fd"
"ZkNJcTG6OzsREh6OxKSk/gVTR2JL2N/h0GfWrvV5qwSaprFy3bo+f8u6ehWnv/pKcVp7+erV"
"WPXsswCA7KwsZGZkwC6yEVIUhR/99Kfk2qqKChzatw82qxUr1qyBn8WCC6dPo7qiAulLlvQ5"
"uTyKNAFhwjly8CAa6uuxZuNGBAUH4+svvkB3VxeiY2PxyptvQieSofWFw599BodMp6++tpYY"
"RDds3YopiYm4df06zp48iaDgYKxYswbZ16/j5LFjiIyKwouvvoogcXulpgxqnifHvfx8nPv6"
"a1hFkQaGYbBzzx4c+/xzwpKwct06rFizBgBU53UgqE1jsHWvto2HUgZbdzdOffklqsrLyXeT"
"Y2KwdtMmWPz9UVFWhkvnz6ND9MztevllMAyj6lmDaQNfZayqqBhwHEjlP3/qFDiOw9qNG8Ew"
"DI4fPgy3y4Vlq1Zh2apVoCiqz3y9+NprOHLgAEkfAJJTUvDit77V92SvYkvIAHhloAsC/P0T"
"Fi5c6HPqs1mtyLp6lXxOX7IEJrO513Utzc3Iv30bAIRZfsIE7HjxRUycPBlFBQUAhM60YNEi"
"6PR6xMTFIXHaNGRfFwRhKYoikyXPcfjkN79BV2cn4pOS8NzOnYiaNAmz09JQV1MDmmEwdfr0"
"Xnl4FGkCwKH9+1FbXQ0AWLR8OaalpqK7qwvVFRXoaG+HxWLBpJiYfutwweLFCAgKwr38fACC"
"1+bN730PCxYvRnBICG7fuoUvjxyBx+PBzpdeQnJKCsIjI5GTlSU8p7ISaenpqsvg63neiJo0"
"CfFJSci+cUOoK56H0WhE0rRpKC8tFb4DMGfePNV5BYC21lbk5eQAEEwI8xctAoBBpTGYuldb"
"P4N5vhxGkwlz0tJwv6gIXZ2dAIC09HTMSksj9cjzgkrR3jfeAMuyqp+ltg1mz53rs4zrNm3q"
"dxwAwmR14uhRuN1urN6wAQuXLkXUpEkwm80oLChAeWkpeJ7HlISEPvPF8zx2vPgiZs6di7t5"
"efB4PGhuakJcQgKCgoN71VtpaSn/oLy8tN8BgsccOBoxYQLWbNiAwKAgzJwzBwFBQQCEDtUk"
"W1IH91E4ALBarejs6AAgeDuyrl6FzWoFTdPYtH07wsLD+332o0gzdsoUAILai9Qg8km7sY9t"
"glq43W6cOXGCfJY8syGhoeS7mqoqtDQ1DasMviDvaAzD4LmdOxEbH0+W80nTpg0qryNRXmBw"
"da+mfoZbBoqmsfSZZ8jnW9evK8KA8nNz8Yy46xjss9S0gdo+0N84cLlcOPf11+SzfKsYL4uT"
"y7x4Ed1dXb3yRTMMtr/wAkLDwzE5JkZxT6NodvDGiHgJHyW8jXwG2ZJdDaGX2WyGxd8fXZ2d"
"8Ljd+PqLL3Dqyy8RGxeH+YsXY4msw6jFcNJc9eyzWLZyJWiaRsPDh7h49ixuZ2eT34dj1K6p"
"rITNaiWf/7p/P2HZlKO9vR1xISEjXi8DISYuDu//8Iewdndj4uTJqCgrU53XkLCwPtMcTHlD"
"wsIGVfdq2ngkyjBtxgwEhYSgraUFXZ2duJuXh1lpaaiqqIDRaCQT02DL2he820AiHBxqH6ip"
"rFTY2eTONf+AAPI/5/Gg4sGDXvZsClCwoepl9lDPMMbBsLyEjxsUTWP7iy/i0L59pHJ5jkN5"
"WRnKy8qweMUKrNu0adTSdNjtuHrpEvJycuBxu7F6/XrMmjsXl8+fB9B7gh4MrLIODQDbXnih"
"zzeSwWh8JPXiC0HBweQNO5i89ofBpjGYuldTP5O9tu5DKQNN01i0bBm+/uILAMDVS5cwKy0N"
"N65cUUwYI1FfgLINhtsHbF4KNgpBWJEPXqI9tqtQuxkpjOuzhG0tLQgJCcF7P/gBNmzZgimJ"
"iQoPxPXLl2GXvSUeZZqcx4M//va3uCwaU195803MXbBAMVAYdugLWvlbDRBEDMx+fr3+GIZ5"
"JPXyqPI6EmkMtu7V1I+3c2QoZQAEb7okrtBQX4+crCy0NDcjcerUEa0vbwy3D3jnSS4jLxdC"
"BQSv4WjB94Q1hgn8qquqcOLoUZj9/JC+dClefestvP/DHxKDsaSdNhpp1tbU4GFdHQCBezsy"
"KopcL4FVMWHJ36wu2XOiJk2Cn8VCPt+9c6fXvQV5eairqRlUGfp73nAwmLyORBqDrXs19RMe"
"ETHsMgCAXq8nTgQA+OroUSxevnzIZVWL4Y6NiZMnE5syIDhG+vrf7OdH7IejgRFbYbW3tys+"
"NzY09HmdfFDIXeoAFKoZ8tlfctcCQkVLrlOWZVFSXIyLZ8+S3wODghAWEQFAMF72F9g60mnK"
"B4Tb7UZOVhYqy8tRKHo+ASHezBfk3rn62lqUl5aiq7MTTocDG7duJb9lnD2LW9evo7OjA+1t"
"bbh07hwyTp9GQGDgoMrQ3/NsXtsUCfJ64ziOnAuVg2EY1XkFlH3CbrMJMlODSGOwda+mfgKD"
"ggZVhoGQvmQJ2VJZLBakzpo1rPpS0wZq+0B/44CmaWzZuZOsyu7JJlFSrxSFzTt2kAPTvfIl"
"++yWrdAc/azs1CyNhh3W0Nbaij9//DGuX7miMGzezcvD3Tt3kDR1KlkSZ168iHNff01mdmt3"
"N+4XFSEtPR3/8/vfo172BikuKoJer0dHezsOf/aZYgK7nZ2N8AkTwDIM6mtq0PjwIe7k5KCj"
"vR13cnNxLz8fCUlJ2LlnT6/gR0CIJxrpNC3+/mhva0N9ba2Q/8JCNDc2Yu3GjSguLITb7cbD"
"ujrk3rqF1Fmz+kxDSqezowN1NTXgOA55OTnIy8nBxMmTMS01FRMnT0ZdTQ26u7pwv6gI1y9f"
"xo3MTJjNZjz/8ss9+VBZhoGeF+pl4L2Xn4/DBw4oOtzt7Gx0d3UpvEAAEB4ZqSqv1y9fxpkT"
"J8ik5bDbUXD7NpJTUhAdF6cqjcHWvd1mU1U/asvgC3qDAS3NzXhYV4cFixcjoY/gTLXPUtsG"
"avpAaXFxv+MgNCwMIaGhiIuPx8O6OhTdvYvu7m6UlZQg8+JFREZFYcdLL5Ezw33lqyAvD9NS"
"U3H+1Cnk5+aSFW9leTm6OzvJvRLKysr4B0tMTvAAABxqSURBVA8eDBjWQAE4OdAFkyZOfPa9"
"998fk7YuSQnYaDSipakJzc3NoGka4RERQzqzOBJpNjU2oqOtDQGBgeRtZrfbUV9TA4qiEDlx"
"oiJCuD+0tbaio70dRpMJYeHhCo8LIFD1tIlR6hGRkYqBM5Qy+HrecDBQXkcyDbV1P5T6GW4Z"
"3G43nA4HDAaDT1vmSNTXSI+NtpYWtLS0gAIQEhb2SOxWZ06f5s6dP396oGvG9YSlQYOGJwen"
"T53izl+4MOCE5XsiGsNhDRo0aHi6oK2cNGjQMCagiVBo0KBh3GBEJqyxG4WlQYOGpw3allCD"
"Bg1jAmNONWc0cOTgQRSI9DUStuzahbkLFjymHGnQoEENxv2Etf+TT/CgpKTXuSWD0YjWlhaB"
"QTE8HCkzZ2LBokVgdTrsfOklzJw7Fwc+/ZTco6YixhP2ffwxHpSVKQJ1A4OCoNfr0dLSApZl"
"ER4RgekzZ2L+4sWqjgQ9yfjs009RVlKiYAAJDAqC0WQix0yCQ0KQOHUqFi9f3iePm4axgSEz"
"jo4GXn7jDTQ+fIjf/Nd/ke+Wr16NtPR0OBwOHN6/HyXFxaiurMT9oiK8+uaboGi6T5rXJwmv"
"vPUWGh4+xH/3US/dXV346/79qHjwgNTLK2K9PAnIycpSqC79y7/9m8979r7+OhobGvCbn/+c"
"fCfVl8vlwrHPP8e9/HzU19aisKAAb7//PnTjRKdyKPUxnuHbS/iY47DCIyP7/N5gMGDV+vXk"
"c3lpKSpFStrHnefRQEQ/9eJnsWCD7FzaA1m9PM0IFyPfvaHT6bBu82byubmxEWX3749WtjQM"
"Er69hGN4O+U9mckFFZ5meA9O+el6Db1hkTElABg0w4eG0cO4Nm54M3j6qzwjxfM88rKzkX3j"
"Bhx2O7q6uhAUHIx5Cxdi3sKFimtbW1pw9eJFPCgtBcuyaG5qQkBgIFJmzMDKtWt7SXvfvXMH"
"t65dQ31dHSwWC/QGA/wDAtDR3o7V69cTHqTB5GGwkJ+SBwCLF7eRmmefP3UKVy5cUNy3duNG"
"tDQ3o7CgAC6XC3Hx8Vi/ZUuvQ9KAcAA5+8YN1NfWwuznB2t3NyZMnIi09HQFJ37GmTO4dO6c"
"4t4tu3aB4zhcyciAzWrFhi1bMGf+fPz+o48Im4CE33/0EQDg7957b5C11IMakQseEPjYpUPE"
"NzIzcfbECQXz55ZduxAZFYX9n3yiYLTwduzYrFZkZmSg6N49OOx2uN1ucjBYp9cjLDwcQcHB"
"2P2KwD2gtt94l11tfahtj8cKFTsj32wNAQGqRCgeJeQUGckpKYiaNAmAsN3Jz80FILAtPrtp"
"E7HV9HcPABz9/HNcPn8eRpMJ73zwAYKCg3Hr2jXcLyqCy+kkHba1uRm/++gjVFVUIGriRLzx"
"ne+go60ND0pKUFVRAavViuSUFJLulYwMnPjb39De1oZ5Cxdi7+uvIy09HXarFbdv3cLMuXMJ"
"xa3aPAylXkqKiwmnklQv8sPMap49JTERsfHxyJPRDDscDixcuhQBgYEou39fmLzy8zF73jwF"
"4d1XR4/izFdfoaOtDd9+9108s2YN4hMTcU48td/e1kYGSVxCAkLDwhRUMO1tbWhvbUV9bS08"
"Hg8qy8ux9JlnMG/hQuj1ehQXFpJr/9c//7PqCb6v+qqrqcGxQ4dgs1oxYeJE7H7lFQSLXOqT"
"Y2KQOHUqcrKyFPclp6RgxuzZuHHlSp/173Q68Ydf/QrFhYWwWa349jvvYFpqKhHaAM/jje98"
"hwhYDKbfyKG2PgbTHo8TZaWlPtkaxm2ke2V5Ob76298ACOyIL7z6qoLGtT/k5+aSSW5aaioY"
"hkG8TM7q2uXLhLy/rraWvBWrKioAQDHxeROtXbt0qeeDuJWmKAoLlizBgsWLh5SHwaK2uhqn"
"jh8HAISFh2PP668r2AEG8+y4+HhF2nMXLMCUxEQsX72akLZ1dXbiqqzcednZRIVl2owZxNYW"
"GRWFZJFO5PatW7h96xa5J3X2bMVzEqdOxcy5c4dUfrW4kpGBX/3nf+LjX/4SzY2NSJkxA9te"
"eEHRvoBAZNcXBmIrKPnmG4UuYWRUlEItieM4VD54QD6r7TdDwVDaYyzD55ZwrFmwrly4gOtX"
"rgA8j4jISCxetgxzFiyAfgC9PzluXrtG/pdoYE0iXxcgbJeqKiowfeZMTJ0+HSvWrEFzUxOm"
"paYCAFEIAXpzWcvDL7KuXkVNVRVSZ89GyowZ2Lht25DyoBaXL1zA2ZMnSZ6mTp+O3S+/3GsS"
"H6lnx8bHo0IcdPcLC7F248Ze6UvMnxIiJkwgK6nsGzcwZ/58AL2dJFKIwcTJk9Hw8CFWy5wr"
"I4VlK1cKqxi7HZkZGcjMyEBhQQGmz5yJbS+8MCwvofe9bre7VxnllDFq+81QMJT2GMsY02EN"
"fWHZqlV9asGpRb1IpQsIogDSlkehBCJ2IIZhsHLdOtRUVeFefj4yzpwZkCdq/qJFyMzIIJ9r"
"qqpQU1WF0199hTnz5mHTtm1gdbpB5UEtlq9ahcryctwRtx3FhYWoqqzsRV87Us+WD7jWlhby"
"f8PDh+R/b94vOQ3ww36knuTXvvW97/nMx3BhNBqxZsMGPKytRUlxMe7l58MSEIANW7YMOc3E"
"5GSkzJhBJoPysjLQsgkrNj5esXpV22+GgpFqj7GCca2aM1wsXr58wCV3S3Mz/nbwIGqqqmDx"
"98erb72FqooKRdyLHKvXr4fZbMa1y5eJgCYAgOdx+9YtmMzmXkolvvIwGGzesQP1tbVoqK8H"
"z/M4cuAA3n7/fUXHfBTPlr/S1PYWeoz1q/ikJJQUFwMACm7fHtaERdE0dr/yCr65dw9fHjmC"
"r7/4AjRFYXJMDKalpiJ9yRJFXNxQ+o3qvKi8bqy1R38Ydyus4SI8IoKQ+fviWD/6l7+gpqoK"
"gMDLHR4ZSWxZfeH//vSneP+HP8Si5ctRX1uL4sJC5GRlEZuQFN8zmDwMBjqdDrv27MHvP/oI"
"brcbnR0dOPqXv2DvG2+QLclIPVvunZILs0ZMmEDqzJuzX35Pf7ahwYLn+RGJu5Nvnb29rENB"
"SXExjh8+jIjISHzr7bcHvFZtv1ED7/oY7fZ41Hjq2BrmzJtH/i8V36hy1FRVkUmpVubulo63"
"DLRd6uzowJWMDFAUhahJk/DM2rV487vfJb9LK53B5GGwCI+MxJoNG3rSv39fETYwnGfL3fty"
"BZfZsjTlKzaJY11Cg2zbsWDJkgHL0R+87UPebv2hQt7WE70M7/KjOi5xMpOLKvSFS2fPwtrd"
"jbbWVlRXVqKpoUH4a2xU2EEB9f2mL/iqj0fdHiMJNXPNmPcSekuB11ZX+wxm9dZbk3vc5i1a"
"RMjv62trcfXiRZLew/p6nDh6lAg0yOOLrmdm4vypUwoXNyB4YeQrlYvnzuHqpUtkYpOCWWmZ"
"dPlg8tAfvFWJHspsU+lLlyI6Lq4nT2fPEi/QcJ6dl51NBB3uf/MNAEFxOF02KGalpZHOX1hQ"
"QAbFw/p6fHPvHgBgyYoVSJkxg9zjrdDjLSwqx6SYGIWZ4uSxY7hx5YpPMU9vr6vctlNcVIR8"
"8cC8wWjEs889p7hWHgN1Lz8fzU1NOPPVV4pr5F5BAJgiel7bWlvxya9/jV///OfC389+hp/9"
"+Mf47NNPFXlW02/6gq/6GEp7PC6omWuGrZrzKPHpb36D86dPKyaoupoa3Lx2DRGRkX0GLJYW"
"F+OzTz5RLH8ryspIrAlFUUidNQt+FgtsVivu5efj1vXrqCgrQ2dHBzZt306kr2Li4lBbXY3u"
"ri6id7d19250dnSgtbkZHM+job4egUFBiIyKwqVz57Dt+edRWlyMy+fPI+fmTeRlZyMhKQnb"
"du9GjDiJDCYPfeEPv/oVLpw5o9iu11ZXI+vqVSRPmwY/iwXRMTHIycoidffNvXvIy85GWno6"
"ZqWlqX62PHaps6MDWVev4tb16zCbzUhbuBBbnn++l0E4aepUTJg4EdauLly9dAn5ubnIvXkT"
"sVOmYOPWrZgn0+krLizEgT/+USFVVV5Who5+YoNMZjP8AwJQV1MDl8sFp9MJ/4AAxCcn9ys0"
"+tf9+3H25ElFBHtNVRVu37qF7Bs3UHD7NsIjIjArLQ3bdu8mAhYSYuLi0FBXh/b2dmHV1NKC"
"5atX4+bVq+Sa6spKlBYXk+DRuClToNPrUV1ZqTh0LaGluRk2mw3JKSmq+01fUFMfg2mPxwk1"
"cViaCIWGAfGvH35I/n9u585heWifFuRkZeHc11/DZrUiISmJ2BA5jweFBQU4fOAAACHM4J0P"
"PnjMuR07OH36NHfeh2rOuD6ao0HDWMTXx4+T1VxSSgoxgtMMg2TZqlHurNCgbkuoqeZo6Bfe"
"BlxvY7GGvhEr28Ll5+YqbHRS1HlgcLDCOaJBHbQVloY+cfXSJVy9eFHx3YUzZ3D3zh28/MYb"
"ikBTDUrsfvVV3LhyBfeLitDc1IT/9x//geCQEPA8Lxj1N29GWno69D4cK08b1HgJn7o4LA3q"
"sGTFCixZseJxZ2NcQq/XY/nq1Vi+evXjzsq4grYl1KBBwxMFzfunQYOGMYERCRzVtoQaNGgY"
"DajZyz2xRvd9H3+MspISxXex8fF4zce5Lg1DR0FeHo6IMUZy9FXv0sFgQIjvGgsEchrGPp7Y"
"LeErb72Fbbt3K7/UVouPFKkzZ+K7//iPmJWWpvyhj3o/fvgwuru60N3V1S/7hQYN3nhiV1gA"
"MHPuXBw7dOhxZ2PU0NnRgbycHJQWF6OlqQk0TcPj8cBgNCIuPp4wTjwqUDSN0LAwbNy6lfBy"
"9Xut5szRMAT4Zhwdx6uSgcj2niS43W5cPHsWNzIzMS01FSvXrUN0TAyhTOnu6kJxYSH+un8/"
"kqdPx+r16x/phGHwIorrC5t37MCXR46Aoihs3rGj1+/yI0GxU6bgtXfeGdE8ahifeKJXWE8D"
"bFYrPvv0U7S2tGDvt7+NuISEXtf4WSyYu2ABZs6Zg2OHDuHsyZNDJoQbKUxLTSW00xo0qMWY"
"F1LV0D/cbjc++/RTNDc14bW33+5zspKD1emw/YUXUFpcrOCz0qBhTEDFXPPUrbDqampw9sQJ"
"VFZUwGg0YvrMmVi9fj3Zxhz9/PNe9hfJy5Vx5gwuX7ig0EPsywPmcrmQfeMGCvPz0fDwIViW"
"VZ7DoygEihqKH8i2PqX37+P6pUuoqa6Gx+1GYFAQQsPD4bDbMW3GDKR7kaxlnD6Nmqoq7Nqz"
"h9imnE4nLp07h/zcXHAch6nTp6OyvBxNDQ2YFB2NN7/7XSxevhw3r13D1uef71U/rc3NuHr5"
"Mkq/+QYejwc2mw1h4eGYOXcu0hcvVijwAAKtzeXz51Hx4AE4jwfBoaG9xA7keFhXh9/+4he9"
"vpck1n/Rh9R6dWUl+f6DDz/EX/fvx738fMU1sfHx2LJzJ059+SUelJYiMDAQu199lajENDc1"
"4ea1a3hQUgKb1QqHwwGWZREeGYm58+crSAjVoLmpCVmZmSi7fx82mw0URcFoNCI+KQnpS5cq"
"qI+Gkt+h9qHxDG+d0b7gkw/L32JJWLho0bhdZsn5nNxuN+pqauBnsaCxoQFOhwO11dUoLy3F"
"nHnzQNE0pqWmIjwyUtHBgoKCMGf+fMQlJCA+MVEhiST9JsHa3Y1Pfv1r5OfmoqO9HWs3bMDu"
"V14BTdMoL+2h+tm1dy82yRRR7uTk4NC+fWhtaYHBaMT3fvADLFq+HDVVVbh75w4Sp07FpOho"
"cn1HezsOHzyIqEmTiDR9R3s7Pv3v/0bxvXtYt2kTdu3di7t37pDnLn3mGUyKiYHZzw8ZZ89i"
"4dKliroqLCjA/3z8MWoqK7F+yxZsf/FF8DyPgrw8lN2/j6rycsyYM4fYBu/k5ODAn/6EpoYG"
"REZF4e/eew/pS5eiqqIC1ZWVPXUUHEzqyOLvj4TkZNzJzVV00GfWrgUALFq2DIuWLVO0W0xc"
"HN75/vexaNkyAEDKjBkwGI1K6mCRm+xBaSncLhdsVius3d2YPmsWigsL8T+/+x2qKypA0zS+"
"/+GHmD1vHq5fuYLW5mZ8c+8e9Ho9omNj++lFShTcvo39f/gDqisrMSUhAa+/+y4WLVuGqspK"
"5GVnIycrC8HBwWTiHmx+h9qHxjtKSkr48vLyYeoSPkGGaz+LBd96+23s2rsXz8kMvTVVVciV"
"TUIDsS9OlunL9YVL58+jScYGKrn45arAAJDnpQN34cwZRT7Nfn6gaRrPbt7c54rl9q1b4Dwe"
"wk/FcRw+//Of0dzYiITkZCKm6efnB0DY2k+fNYuk3+nF597U2IgjBw/C43bDz2IhKw75yqO8"
"rIyo7NTX1uKLv/6VTDobt22DyWwGTdNYuW7dgHU0OSamFw3xYEDRNBZ6rTYpmsbze/fCI6Mu"
"ltg7s2/cIPTOVqsVNMMIpIsTJpBr5XJYA6GupgZHP/+cpPfsc8+BYVkwLEvsgh63G0cPHSLb"
"7sHmd6h9aLxDjfnJN6f7IOWmxjJMJhOplJlz5hCedkB4a0oYqOJ8VWqTF3WxRDdsFicOCd6C"
"AHIK34b6ehw5eBD1tbWgabpPZZtS8W0dK8pF5efmEl5y+fWS1NSUhAQizeV2uXqxc2ZmZJDB"
"I2cclct5AT088FcyMsgAM/v5KUQM1GhEDteD6623GBgYCIZlsXz1augNBgQGB2PFmjUAgDnz"
"58NoMgEUhdmyGDF5+0uUxL6QKSt3UEgIAsRtGSCIq0r1xXMcMmVsF4PJ71D70HjHiLA1PEkr"
"LDkYlkVIWJiC43okEB0bq1j6d7S3IygkpBe3lFxtGRCED+TbqILbt1Fw+zYiJkzAM2vX9lr1"
"NYqc5NLkcluSlKcoYnyvrqwkA3HGnDnk3uampl7S53JBirqaGoUtSR6mIIkvyLcmQcHBvSvi"
"MeGZtWvJ9lJCyowZSJkxAxzHgeM4FBcVoTA/H9WimgygPnynQqbYbOlDHMI/IIBIdVWUlQ0p"
"v0PtQ08Dnmp6Gbmw5Ei9rZatXIm21laydcq6dg3rNm1SbDniEhJ6qZRs3rEDf/74416dsqG+"
"Hof27cOWXbsUWwK73Q6apslKRVpd+fv7kxXO3bw8AIIgrHzCu19UhOSUFMVz5M+dMHGiQrWl"
"L9hkAgosO/Z9N3U1Nbh+5Qq+uXsXeoMBazZsQHNjo+IloQZyMj7vVROgVLHxFtdQi6H2ofGO"
"ETlL+OROV0rZKrlc+3DAsCy27d6NRcuW4dTx48i9eRP5ubmgaRpTEhMxY/ZszJ43r9eWKDIq"
"Cn//D/+A7Bs3cPfOHbKCknDp/HnFhKXT6eByOuF2u4UJQ3yxSFs9juOI4yA6Npaskrq7ulBw"
"+za+/e67ivTNfn5kZdDW2uqznGazmXitbD4Uax437uTk4OihQwDPg2YYvP3uuwgJDVU4T9RC"
"Xk99iUvI5b+8t9NqMdQ+9DTA95ZwNHLxmCC3G3krk1A0PWT7XUFeHs6ePAmX04kf/Mu/qOpY"
"//5//g++/0//hBVr1mDFmjVobmrChVOnyKTjLVMVGhaG+tpatDQ1IWLCBCQkJ6Po7l20tbbi"
"1vXrqCwvJyEILpcLHrcb9XV1OHnsGNZv2dJL6y4mLo48q7urC+VlZQo5dQCw22zIy87GwmXL"
"EB0bi6K7dwGAaO0NpJ83HAykBakGF06fJhP6hKgohISG+rzn3p07+PJvf4PZzw8vvfYa4V+f"
"kpiI/NxcAH1TRsvbKT4pach5Hkofehow9tfyIwiXTOapo71doSe4aPlyxbVBwcFobW4GAIV8"
"uEJKvB8cO3SIGLB/9uMfwywT4gRFITg4GItXrCDadQDgsNtx7uRJbNq+HYAwIW19/nkyiXgL"
"FkxJSEB9bS3K7t9HxIQJ2LR9O9wuF8ofPEDmxYvYsGULVq1bh6+PH0dleTl+9pOfIDo2Fpt3"
"7EBUHx66xStWoLCggNhyvjh0CM+//DIxpjc1NODIwYOIED1rCxYvJhMWz3E4cfQodrz0EliW"
"xf2iIp915AsGoxEOUV9SzYpvIMgnEZvVKpSR53utDDmOIxPDiWPHYLfZYLfZkHHmDJ7fuxeA"
"sF27e+cOOI8HrS0taG9rQ2BQkJDPlhbyLJZlsWQAPUFfGEofehrw1GwJWZZFbXU1KsvLER0b"
"q4jzWbtpU68V1qy5c8k1zU1NuHHlCkLCwnq5vxsbGnqtLmanpRHBVZvV2suW0dzYiJL79/HS"
"t76lsCXdun4dDMNg5bPPwmAwEHociqKwev16RRrzFi7E9cxM3Lx2DelLl8Li74+9b7zRq9x7"
"vv1tVfUzKToam3fswFdHj4LnOLS1tuLjX/4SfhYLWJZFe1sbkqZNI+f+piQmYvGKFbh26RIA"
"wRtZ+q//CqPJ1Gs12NTYCGt3N/FycRyHTq+Jv7y0VBGpn5icjLt37gAQJpyDf/oTzH5+JNjV"
"26vX0dEBj8fTpzZhdFwcMYC3trTgT7/7XZ+r54wzZ5A4dSpi4uKURnjZ/+GRkdjx4os49vnn"
"cLvdOHPiBHa88AJ4AGdPCop5Or0eu/bsUbxkBpNfYOh9aDxDTVjDmBZSHQlcOncOG7ZuxYat"
"W+FyuXDt0iVkZmSgvq4OMVOmYPP27QpXt4To2Fg4HQ60NDfD43ajsbEROp0O6597TiHO4HK5"
"kJWZCf+AALJySU5JQWBQEBobGgY0vDY2NGC+KGJ58exZ7NyzB00NDTh/6hSuXryIort3EZ+Y"
"iC27dvXaXpjMZnjcbhQXFoKmaRLeMBxETZqElNRUuN1u2O12spUMCQ3Fmg0bsGbDBsUAS0hK"
"QlBwMDra22Gz2cCyLKJjY7Ft927k3rzZU0dOJ65fuQKHw4GEpCT86mc/66XonZeTA4qiSDli"
"p0xBc2Mj2tvbQdE0dDodEpOTMSkmBtcuX8ahffsUW0W7zYacrCwkTZtGYs8kxCcmoqmxEe1t"
"bWBYFsEhIVi3aRPS0tPxsK4OXV1d4AF0dXTAz98f0bGxCAwKwoPSUgQEBmLzzp2KNCMiIzF9"
"1ixwHg+qKipw+cIFZGVmwul0YuacOdjx0kuKIN/B5hcYWh8a7ygdCSHViVFRz77/wQfjdgNt"
"s1phki+nHzEelJTg+JEjaGtpgd5gwA9+9CNiS3I6ncgSJe8BwUD+zz/5yZCfxXEcjhw4gHsF"
"BVj/3HO9Ite9UVNVhTs5Odj4BEVHP4kYzT40lnD61Cnu/IULAwqpjtuJSC1Gc7ICgCMHD6Kt"
"pQUAEB4RoTh7p9frFbay4Upl0TSNXXv2YNW6dTh78iT++NvforiwUCH77nK5UF5aisMHDuDk"
"sWO9ziNqGHsYzT403uDbS6ixNQwKimW/aDSWQ27fkZ9BHCoomsby1asxZ/585N68iUvnzuHQ"
"vn3QibFYHo8Hk2NikJaejukzZ2rtOQ4w2n1oPOGpMbqPFpatWoUzX30FQDCMXjh9GivWrAHD"
"MLBZrTh1/DgAIfJ86cqVI/Zc/4AAEhLBc1wPg4DsOJKG8YHH1YfGA554G9bjwIOSEuTeuoXa"
"6mrhkDFFwWKxgKIoTIyOxpz585/KYxUa1ONp7ENqbFjalvARYEpi4lMXH6NhZKH1ob6hrZw0"
"aNAwbqBNWBo0aBgTGBE+LA0aNGgYDYyMVL0GDRo0jAZUUFlpE5YGDRrGDXyGNQAY+pFzDRo0"
"aFAJk8kEm812caBr1ExYGjRo0DAmoG0JNWjQMG6gTVgaNGgYN9AmLA0aNIwbaBOWBg0axg20"
"CUuDBg3jBtqEpUGDhnGD/w8mANBHiJ4GhAAAAABJRU5ErkJggg==")
index.append('splashwarn')
catalog['splashwarn'] = splashwarn | PypiClean |
/Meraki_Auto_Sync-1.190-py3-none-any.whl/autosync/mnetutils/sync.py | import asyncio
import threading
from random import randrange
from meraki.exceptions import AsyncAPIError
from autosync import lib, const, model
def set_sync(org_id: str, net_id: str, product: str, is_golden: bool):
"""
Args:
org_id:
net_id:
product:
is_golden:
Returns:
"""
golden_tag = const.appcfg.tag_golden
if is_golden:
action = [model.golden_nets[golden_tag].networks[golden_tag]]
else:
action = [
model.meraki_nets[org_id].networks[net_id]]
return action
async def sync(sdk: object, org_id: str, net_id: str, product: str,
is_golden: bool):
"""
Proforms a full sync of the _config object that is passed to the fuction
using the meraki dashboard SDK
Args:
sdk(object): Meraki dashboard SDK Objeect
org_id(str): Orginization ID
net_id(str): Current Network ID
product(str): Meraki Product being synced I.E. switch, wirless
is_golden(bool): Sync of Golden network
Returns:
Nothing Updates the _Config Object that is passed
"""
if const.appcfg.debug:
print(f'Current Thread Name:{threading.currentThread().name} '
f'Thread net_id:{threading.currentThread().native_id}')
_maction = getattr(sdk, product)
net = set_sync(org_id, net_id, product, is_golden)
tasks = net[0].dashboard[product].settings()
#o_func = getattr(net, f"functions[{product}]['get']")
o_func = net[0].functions[product]['get']
net_name = net[0].name
for task in tasks:
waiting = randrange(0, 2)
await asyncio.sleep(waiting)
if const.appcfg.debug:
print(
f'\t {lib.bc.OKGREEN}Network:{net_name}'
f'{lib.bc.OKBLUE} Requesting Config Object P{product} - {task} '
f'in Orginization {threading.currentThread().name} with '
f'thread :{threading.currentThread().native_id} {lib.bc.Default}'
)
if task in o_func:
try:
#await eval(f'_config.Get_{setting}(sdk, net_id,_appcfg)')
action = getattr(net[0].dashboard[product],f'Get_{task}')
await action(sdk,net[0].net_id,net_name)
except AsyncAPIError as apie:
print(
f'\t {lib.bc.FAIL} Error Running Setting {task} '
f'{lib.bc.WARNING}Error Message: {str(apie)}{lib.bc.Default}'
)
except Exception as error:
print(f'{lib.bc.FAIL}Network: {net_name} '
f'{lib.bc.WARNING}Error with Module: {str(error)} '
f'{lib.bc.Default}'
f'Running OVerride Function {task}')
else:
try:
action = getattr(_maction, f'get{task}')
value = await action(net[0].net_id)
dashboard = [net[0].dashboard[product]]
setattr(dashboard[0],task,
value)
except AsyncAPIError as apie:
print(
f'\t {lib.bc.FAIL} Error Running Setting {task} '
f'{lib.bc.WARNING}Error Message: {str(apie)}{lib.bc.Default}'
)
except Exception as error:
print(f'{lib.bc.FAIL}Network: {net_name} '
f'{lib.bc.WARNING}Error with Module: {str(error)}'
f'{lib.bc.Default}') | PypiClean |
/flask_more-0.2.1.tar.gz/flask_more-0.2.1/docs/api.md | # @api
Basically, Flask-More does most of the work using the `@api` decorator, which does not disturb the existing routing view. The functionality adds validation of the request data, handles the request body data automatically, and helps you describe the api's functionality in more detail.
## Validation
```python
from flask import FLask
from flask_more import More, api
from pydantic import BaseModel
from models import User
app = Flask(__name)
More(app)
class UserSchema(BaseModel):
name: str
age: int
@app.post('/users')
@api
def add_user(user: UserSchema):
new_user = user.dict()
Users.create(**new_user)
return new_user
```
## OpenAPI
```python
from flask import FLask
from flask_more import More, api
from pydantic import BaseModel
from models import User
app = Flask(__name)
More(app)
class UserSchema(BaseModel):
name: str
age: int
@app.get('/users')
@api(
tags=["users"],
summary="get all users",
description="get all or query users",
)
def get_users(start: int = 0, limit: int = 10):
pass
@app.get('/others')
@api(tags=["others"])
def others():
pass
```
| PypiClean |
/Flask_Simple_Serializer-1.1.3-py3-none-any.whl/flask_simple_serializer/serializers.py | import six
from werkzeug.datastructures import MultiDict
from collections import OrderedDict
from wtforms.form import Form
from wtforms_alchemy import ModelForm
def serializer_factory(base=Form):
class BaseSerializer(base):
def __init__(self, data_dict=None, model_instance=None, **kwargs):
# Supouse that the data is already a Python Dict
self._validate_called = None
self.model_instance = model_instance
self.formdata = MultiDict({})
if data_dict:
if not isinstance(data_dict, dict):
raise ValueError("Data must be a Dict instance")
self.formdata = MultiDict(data_dict)
super(BaseSerializer, self).__init__(formdata=self.formdata, **kwargs)
def is_valid(self):
self._validate_called = True
return super(BaseSerializer, self).validate()
@property
def data(self):
if not self._validate_called:
msg = 'You must call `.is_valid()` before accessing `.data`.'
raise AssertionError(msg)
return super(BaseSerializer, self).data
def validate(self):
raise NotImplementedError
@property
def errors(self):
if not self._validate_called:
msg = 'You must call `.is_valid()` before accessing `.errors`.'
raise AssertionError(msg)
return super(BaseSerializer, self).errors
return BaseSerializer
BaseSerializer = serializer_factory()
ModelBaseSerilizer = serializer_factory(base=ModelForm)
class SerializerMetaclass(type):
"""
This metaclass sets a dictionary named `_declared_fields` on the class.
Any instances of `Field` included as attributes on either the class
or on any of its superclasses will be include in the
`_declared_fields` dictionary.
"""
@classmethod
def _get_declared_fields(cls, bases, attrs):
fields = [(field_name, attrs.pop(field_name))
for field_name, obj in list(attrs.items())
if isinstance(obj, dict)]
fields.sort(key=lambda x: x[1]._creation_counter)
# If this class is subclassing another Serializer, add that Serializer's
# fields. Note that we loop over the bases in *reverse*. This is necessary
# in order to maintain the correct order of fields.
for base in reversed(bases):
if hasattr(base, '_declared_fields'):
fields = list(base._declared_fields.items()) + fields
return OrderedDict(fields)
def __new__(cls, name, bases, attrs):
attrs['_declared_fields'] = cls._get_declared_fields(bases, attrs)
return super(SerializerMetaclass, cls).__new__(cls, name, bases, attrs)
class Serializer(BaseSerializer):
pass
class ModelSerializer(ModelBaseSerilizer):
pass | PypiClean |
/Clearmatch-1.0.0-py3-none-any.whl/clearmatch/clearmatch.py |
from matplotlib.pyplot import bar, show, suptitle
import numpy as np
import pandas as pd
records_dict = {}
names = ["Missing", "Nonmissing"]
missing_count = [0, 0]
class ClearMatch:
def __init__(self, host_col, host_data, key_col, key_data, value_cols):
"""A constructor for the ClearMatch class
Parameters: host_col, the index of the column to be matched to; host_data, a DataFrame that contains a column
be matched to; key_col, the index of the column to be used as the linking key; key_data, a DataFrame that
contains the key_col; values_col, a list of indices within the key_data to be matched with the host_data
Note: host_data and key_data may or may not come from separate DataFrames"""
# Various statements to enforce parameter types
if not isinstance(host_col, int):
raise TypeError("the host_col parameter must be an integer")
if not isinstance(host_data, pd.DataFrame):
raise TypeError("the host must be a Pandas DataFrame object")
if not isinstance(key_col, int):
raise TypeError("the key_col parameter must be an integer")
if not isinstance(key_data, pd.DataFrame):
raise TypeError("the key must be a Pandas DataFrame object")
if not isinstance(value_cols, list):
raise TypeError("the value_cols parameter must be a list that corresponds to the key_data")
self.host_df = host_data # To return after joining or replacing data
self.host_col = host_col
self.host_data = pd.DataFrame(host_data.iloc[:, self.host_col])
self.key_col = key_col
self.key_data = pd.DataFrame(key_data.iloc[:, self.key_col])
self.value_data = key_data.iloc[:, -key_col:]
self.hcol = self.host_data.columns[self.host_col] # The name of the host_column to use in the join method
def create_lookup(self):
"""Creates a dictionary with records in the key parameter as keys and corresponding rows in the values
parameter as values"""
# noinspection PyTypeChecker
key_tuple = tuple([i for sublist in self.key_data.values.tolist() for i in sublist])
values_tuple = tuple(self.value_data.values.tolist())
index = 0
for element in key_tuple:
records_dict[str(element)] = values_tuple[index]
index += 1
return records_dict
def replace(self):
"""Checks host values in the dictionary and replaces them with their associated keys or NaN is no key is
found """
for key in records_dict:
for record in self.host_data.iloc[:, 0]:
if record in records_dict[key]:
self.host_df.replace(record, str(key), inplace=True) # Replaces the element with the correct key
missing_count[1] += 1 # Useful so we can see statistics on missingness later
else:
self.host_df.loc[self.host_col].replace('record', np.NaN)
missing_count[0] = (self.host_data.iloc[:, 0].size - missing_count[1])
def join(self):
"""Adds a column of keys that correspond to host values or inserts NaNs if no match exists"""
self.host_df['Match'] = np.NaN # New column for matches
for key in records_dict:
for record in self.host_data.iloc[:, self.host_col]:
if record in records_dict[key]:
n = self.host_data[self.host_data[self.hcol] == record].index[0] # Stores the index
self.host_df.loc[n, 'Match'] = key # Replaces the value at index n with the key
missing_count[1] += 1
missing_count[0] = (self.host_data.iloc[:, 0].size - missing_count[1])
return self.host_df
def summary(self):
"""Returns basic information about the data and its missingness"""
if missing_count[0] == 0 or missing_count[1] == 0:
raise TypeError("the replace or join methods must be called before calculating summary information")
print("Data Types:")
print(self.host_df.dtypes)
print("Number of records:", self.host_data.iloc[:, 0].size)
print("Number of matches:", missing_count[1])
print("Number of missing records:", missing_count[0])
print("Percentage of missing records:", (missing_count[0] / missing_count[1]) * 100)
return self.host_data.dtypes, self.host_data.iloc[:, 0].size, missing_count[1], missing_count[0], \
(missing_count[0] / missing_count[1]) * 100
def partition(self, col):
"""Creates DataFrames based on unique values in a given column in host_data"""
df_names = {}
for k, v in self.host_df.groupby(str(col)):
df_names[k] = v
return df_names
@staticmethod
def plot():
"""Creates a bar plot of missing vs. non-missing values"""
bar(names, missing_count)
suptitle('Missingness')
show() | PypiClean |
/Marl-Factory-Grid-0.1.2.tar.gz/Marl-Factory-Grid-0.1.2/marl_factory_grid/environment/actions.py | import abc
from typing import Union
from marl_factory_grid.environment import rewards as r, constants as c
from marl_factory_grid.utils.helpers import MOVEMAP
from marl_factory_grid.utils.results import ActionResult
class Action(abc.ABC):
@property
def name(self):
return self._identifier
@abc.abstractmethod
def __init__(self, identifier: str):
self._identifier = identifier
@abc.abstractmethod
def do(self, entity, state) -> Union[None, ActionResult]:
return
def __repr__(self):
return f'Action[{self._identifier}]'
class Noop(Action):
def __init__(self):
super().__init__(c.NOOP)
def do(self, entity, *_) -> Union[None, ActionResult]:
return ActionResult(identifier=self._identifier, validity=c.VALID,
reward=r.NOOP, entity=entity)
class Move(Action, abc.ABC):
@abc.abstractmethod
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do(self, entity, env):
new_pos = self._calc_new_pos(entity.pos)
if next_tile := env[c.FLOOR].by_pos(new_pos):
# noinspection PyUnresolvedReferences
valid = entity.move(next_tile)
else:
valid = c.NOT_VALID
reward = r.MOVEMENTS_VALID if valid else r.MOVEMENTS_FAIL
return ActionResult(entity=entity, identifier=self._identifier, validity=valid, reward=reward)
def _calc_new_pos(self, pos):
x_diff, y_diff = MOVEMAP[self._identifier]
return pos[0] + x_diff, pos[1] + y_diff
class North(Move):
def __init__(self, *args, **kwargs):
super().__init__(c.NORTH, *args, **kwargs)
class NorthEast(Move):
def __init__(self, *args, **kwargs):
super().__init__(c.NORTHEAST, *args, **kwargs)
class East(Move):
def __init__(self, *args, **kwargs):
super().__init__(c.EAST, *args, **kwargs)
class SouthEast(Move):
def __init__(self, *args, **kwargs):
super().__init__(c.SOUTHEAST, *args, **kwargs)
class South(Move):
def __init__(self, *args, **kwargs):
super().__init__(c.SOUTH, *args, **kwargs)
class SouthWest(Move):
def __init__(self, *args, **kwargs):
super().__init__(c.SOUTHWEST, *args, **kwargs)
class West(Move):
def __init__(self, *args, **kwargs):
super().__init__(c.WEST, *args, **kwargs)
class NorthWest(Move):
def __init__(self, *args, **kwargs):
super().__init__(c.NORTHWEST, *args, **kwargs)
Move4 = [North, East, South, West]
# noinspection PyTypeChecker
Move8 = Move4 + [NorthEast, SouthEast, SouthWest, NorthWest]
ALL_BASEACTIONS = Move8 + [Noop] | PypiClean |
/AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/dxc/ai/publish_microservice/publish_microservice.py | import Algorithmia
from Algorithmia.errors import AlgorithmException
import shutil #serializing models
import urllib.parse #input data
from git import Git, Repo, remote
import os
import pickle
from IPython.display import YouTubeVideo
from IPython.core.magic import register_line_cell_magic
import urllib.request, json
from dxc.ai.global_variables import globals_file
from dxc.ai.logging import microservice_logging
def publish_microservice(microservice_design, trained_model, verbose = False):
#Capture microservice_design in log
microservice_logging.microservice_design_log(microservice_design)
# create a connection to algorithmia
client=Algorithmia.client(microservice_design["api_key"])
api = client.algo(microservice_design["execution_environment_username"] + "/" + microservice_design["microservice_name"])
##Defining the environment for Algorithmia
try:
if microservice_design["environment"].lower() == 'default':
run_environment = "python38"
else:
run_environment = microservice_design["environment"]
except:
run_environment = "python38"
# create the algorithm if it doesn't exist
try:
api.create(
details = {
"summary": microservice_design["microservice_description"],
"label": microservice_design["microservice_name"],
"tagline": microservice_design["microservice_description"]
},
settings = {
"source_visibility": "closed",
"package_set": run_environment,
"license": "apl",
"network_access": "full",
"pipeline_enabled": True
}
)
except Exception as error:
print(error)
# create data collection if it doesn't exist
if not client.dir(microservice_design["model_path"]).exists():
client.dir(microservice_design["model_path"]).create()
# define a local work directory
local_dir = microservice_design["microservice_name"]
# delete local directory if it already exists
if os.path.exists(local_dir):
shutil.rmtree(local_dir)
# create local work directory
os.makedirs(local_dir)
# serialize the model locally
local_model = "{}/{}".format(local_dir, "mdl")
# open a file in a specified location
file = open(local_model, 'wb')
# dump information to that file
pickle.dump(trained_model, file)
# close the file
file.close()
# upload our model file to our data collection
api_model = "{}/{}".format(microservice_design["model_path"], microservice_design["microservice_name"])
client.file(api_model).putFile(local_model)
if globals_file.run_experiment_encoder_used:
encode_model = 'encode_file.pkl'
encode_output = open(encode_model, 'wb')
pickle.dump(globals_file.run_experiment_encoder, encode_output)
encode_output.close()
encode_folder = microservice_design["microservice_name"] + '_encoder'
encode_path = "{}/{}".format(microservice_design["model_path"], encode_folder)
client.file(encode_path).putFile(encode_model)
if globals_file.run_experiment_target_encoder_used:
target_encode_model = 'target_encode_file.pkl'
target_encode_output = open(target_encode_model, 'wb')
pickle.dump(globals_file.run_experiment_target_encoder, target_encode_output)
target_encode_output.close()
target_encode_folder = microservice_design["microservice_name"] + '_target_encoder'
target_encode_path = "{}/{}".format(microservice_design["model_path"], target_encode_folder)
client.file(target_encode_path).putFile(target_encode_model)
# encode API key, so we can use it in the git URL
encoded_api_key = urllib.parse.quote_plus(microservice_design["api_key"])
algo_repo = "https://{}:{}@git.algorithmia.com/git/{}/{}.git".format(
microservice_design["execution_environment_username"],
encoded_api_key,
microservice_design["execution_environment_username"],
microservice_design["microservice_name"]
)
class Progress(remote.RemoteProgress):
if verbose == False:
def line_dropped(self, line):
pass
def update(self, *args):
pass
else:
def line_dropped(self, line):
print(line)
def update(self, *args):
print(self._cur_line)
p = Progress()
try:
Repo.clone_from(algo_repo, "{}/{}".format(local_dir, microservice_design["microservice_name"]), progress=p)
cloned_repo = Repo("{}/{}".format(local_dir, microservice_design["microservice_name"]))
except Exception as error:
print("here")
print(error)
api_script_path = "{}/{}/src/{}.py".format(local_dir, microservice_design["microservice_name"], microservice_design["microservice_name"])
dependency_file_path = "{}/{}/{}".format(local_dir, microservice_design["microservice_name"], "requirements.txt")
# defines the source for the microservice
results = "{'results':prediction}"
file_path = "'" + api_model + "'"
if globals_file.run_experiment_encoder_used:
encodefile_path = "'" + encode_path + "'"
if globals_file.run_experiment_target_encoder_used:
target_encodefile_path = "'" + target_encode_path + "'"
##Don't change the structure of below docstring
##this is the source code needed for the microservice
src_code_content = """import Algorithmia
from Algorithmia import ADK
import pandas as pd
import pickle
import json
import numpy as np
# create an Algorithmia client
client = Algorithmia.client()
def load_model():
# Get file by name
# Open file and load model
\tfile_path = {file_path}
\tmodel_path = client.file(file_path).getFile().name
# Open file and load model
\twith open(model_path, 'rb') as f:
\t\tmodel = pickle.load(f)
\t\treturn model
trained_model = load_model()
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def apply(input):
\tprediction = trained_model.predict(pd.DataFrame(input,index = [0]))
\tprediction = json.dumps(prediction, default=default)
\treturn {results}
algorithm = ADK(apply)
algorithm.init("Algorithmia")"""
## source code for customized model
src_code_generalized = """import Algorithmia
from Algorithmia import ADK
import pandas as pd
import pickle
import json
import numpy as np
# create an Algorithmia client
client = Algorithmia.client()
def load_model():
# Get file by name
# Open file and load model
\tfile_path = {file_path}
\tmodel_path = client.file(file_path).getFile().name
# Open file and load model
\twith open(model_path, 'rb') as f:
\t\tmodel = pickle.load(f)
\t\treturn model
trained_model = load_model()
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def apply(input):
\tprediction = trained_model.predict(pd.DataFrame(input, index = [0]))
\tprediction = json.dumps(prediction, default=default)
\treturn {results}
algorithm = ADK(apply)
algorithm.init("Algorithmia")"""
## source code for generalized tpot model
src_code_generalized_encode = """import Algorithmia
from Algorithmia import ADK
import pandas as pd
import pickle
import json
import numpy as np
import feature_engine
# create an Algorithmia client
client = Algorithmia.client()
def load_model():
# Get file by name
# Open file and load model
\tfile_path = {file_path}
\tmodel_path = client.file(file_path).getFile().name
# Open file and load model
\twith open(model_path, 'rb') as f:
\t\tmodel = pickle.load(f)
\t\treturn model
trained_model = load_model()
def load_encode():
# Get file by name
# Open file and load encoder
\tencodefile_path = {encodefile_path}
\tencode_path = client.file(encodefile_path).getFile().name
# Open file and load encoder
\twith open(encode_path, 'rb') as f:
\t\tencoder = pickle.load(f)
\t\treturn encoder
encode = load_encode()
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def apply(input):
\tinput = pd.DataFrame([input])
\ttry:
\t\tinput = encode.transform(input)
\texcept:
\t\tpass
\tprediction = trained_model.predict(input)
\tprediction = json.dumps(prediction[0], default=default)
\treturn {results}
algorithm = ADK(apply)
algorithm.init("Algorithmia")"""
## source code for generalized tpot model
src_code_generalized_target_encode = """import Algorithmia
from Algorithmia import ADK
import pandas as pd
import pickle
import json
import numpy as np
import feature_engine
# create an Algorithmia client
client = Algorithmia.client()
def load_model():
# Get file by name
# Open file and load model
\tfile_path = {file_path}
\tmodel_path = client.file(file_path).getFile().name
# Open file and load model
\twith open(model_path, 'rb') as f:
\t\tmodel = pickle.load(f)
\t\treturn model
trained_model = load_model()
def load_target_encode():
# Get file by name
# Open file and load target encoder
\ttarget_encodefile_path = {target_encodefile_path}
\ttarget_encode_path = client.file(target_encodefile_path).getFile().name
# Open file and load target encoder
\twith open(target_encode_path, 'rb') as f:
\t\ttarget_encoder = pickle.load(f)
\t\treturn target_encoder
target_encode = load_target_encode()
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def apply(input):
\tinput = pd.DataFrame([input])
\ttry:
\t\tinput = encode.transform(input)
\texcept:
\t\tpass
\tprediction = trained_model.predict(input)
\ttry:
\t\tprediction = target_encode.inverse_transform(prediction)
\t\tprediction = prediction[0]
\texcept:
\t\tprediction = json.dumps(prediction[0], default=default)
\treturn {results}
algorithm = ADK(apply)
algorithm.init("Algorithmia")"""
## source code for generalized tpot model
src_code_generalized_both_encode = """import Algorithmia
from Algorithmia import ADK
import pandas as pd
import pickle
import json
import numpy as np
import feature_engine
# create an Algorithmia client
client = Algorithmia.client()
def load_model():
# Get file by name
# Open file and load model
\tfile_path = {file_path}
\tmodel_path = client.file(file_path).getFile().name
# Open file and load model
\twith open(model_path, 'rb') as f:
\t\tmodel = pickle.load(f)
\t\treturn model
trained_model = load_model()
def load_encode():
# Get file by name
# Open file and load encoder
\tencodefile_path = {encodefile_path}
\tencode_path = client.file(encodefile_path).getFile().name
# Open file and load encoder
\twith open(encode_path, 'rb') as f:
\t\tencoder = pickle.load(f)
\t\treturn encoder
encode = load_encode()
def load_target_encode():
# Get file by name
# Open file and load target encoder
\ttarget_encodefile_path = {target_encodefile_path}
\ttarget_encode_path = client.file(target_encodefile_path).getFile().name
# Open file and load target encoder
\twith open(target_encode_path, 'rb') as f:
\t\ttarget_encoder = pickle.load(f)
\t\treturn target_encoder
target_encode = load_target_encode()
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def apply(input):
\tinput = pd.DataFrame([input])
\ttry:
\t\tinput = encode.transform(input)
\texcept:
\t\tpass
\tprediction = trained_model.predict(input)
\ttry:
\t\tprediction = target_encode.inverse_transform(prediction)
\t\tprediction = prediction[0]
\texcept:
\t\tprediction = json.dumps(prediction[0], default=default)
\treturn {results}
algorithm = ADK(apply)
algorithm.init("Algorithmia")"""
if globals_file.run_experiment_used:
src_code_content = src_code_generalized
if globals_file.run_experiment_encoder_used and not globals_file.run_experiment_target_encoder_used:
src_code_content = src_code_generalized_encode
if globals_file.run_experiment_target_encoder_used and not globals_file.run_experiment_encoder_used:
src_code_content = src_code_generalized_target_encode
if globals_file.run_experiment_encoder_used and globals_file.run_experiment_target_encoder_used:
src_code_content = src_code_generalized_both_encode
splitted=src_code_content.split('\n')
##writes the source into the local, cloned GitHub repository
with open(api_script_path, "w") as f:
for line in splitted:
if line.strip()=="file_path = {file_path}":
line="\tfile_path = {}".format(file_path)
if line.strip()=="encodefile_path = {encodefile_path}":
line="\tencodefile_path = {}".format(encodefile_path)
if line.strip()=="target_encodefile_path = {target_encodefile_path}":
line="\ttarget_encodefile_path = {}".format(target_encodefile_path)
if line.strip()=="return {results}":
line="\treturn {}".format(results)
f.write(line + '\n')
##Don't change the structure of below docstring
##this is the requirements needed for microservice
requirements_file_content="""algorithmia
pandas
numpy
feature-engine"""
post_split=requirements_file_content.split('\n')
#writes the requirements file into the local, cloned GitHub repository.
with open(dependency_file_path, "w") as f:
for line in post_split:
line = line.lstrip()
f.write(line + '\n')
# Publish the microservice
files = ["src/{}.py".format(microservice_design["microservice_name"]), "requirements.txt"]
cloned_repo.index.add(files)
cloned_repo.index.commit("Add algorithm files")
origin = cloned_repo.remote(name='origin')
p = Progress()
origin.push(progress=p)
# publish/deploy our algorithm
#client.algo(microservice_design["api_namespace"]).publish()
# api.publish(
# settings = {
# "algorithm_callability": "private"
# },
# version_info = {
# "release_notes": "Publishing Microservice",
# "version_type": "revision"
# },
# details = {
# "label": microservice_design["microservice_name"]
# }
# )
api.publish(
details = {
"label": microservice_design["microservice_name"]
}
)
# code generates the api endpoint for the newly published microservice
latest_version = client.algo(microservice_design["api_namespace"]).info().version_info.semantic_version
api_url = "https://api.algorithmia.com/v1/algo/{}/{}".format(microservice_design["api_namespace"], latest_version)
return api_url | PypiClean |
/EOmaps-7.0-py3-none-any.whl/eomaps/scripts/open.py | import sys
import os
import click
try:
# make sure qt5 is used as backend
import matplotlib
matplotlib.use("qt5agg")
except Exception:
click.echo("... unable to activate PyQt5 backend... defaulting to 'tkinter'")
def _identify_crs(crs):
from eomaps import Maps
if crs == "web":
crs = "google_mercator"
# if crs can be idenified as integer, return it
try:
return int(crs)
except ValueError:
pass
if crs.startswith("Maps.CRS"):
x = getattr(Maps.CRS, crs[9:])
if callable(x):
return x()
else:
return x
else:
import inspect
options = [
key
for key, val in Maps.CRS.__dict__.items()
if (
not key.startswith("_")
and (inspect.isclass(val) and (issubclass(val, Maps.CRS.CRS)))
or (isinstance(val, Maps.CRS.CRS))
)
]
[
options.remove(i)
for i in ("epsg", "CRS", "Geocentric", "Geodetic", "Projection")
if i in options
]
query = [i.lower() for i in options]
try:
idx = query.index(crs.lower())
x = getattr(Maps.CRS, options[idx])
except Exception:
from difflib import get_close_matches
matches = get_close_matches(crs, query, 3, cutoff=0.3)
if len(matches) == 1:
txt = f"did you mean '{options[query.index(matches[0])]}' ?"
elif len(matches) > 1:
txt = f"did you mean {[options[query.index(i)] for i in matches]} ?"
else:
txt = ""
click.echo(f"EOmaps: unable to identify the crs: '{crs}'... {txt}")
return None
if callable(x):
return x()
else:
return x
@click.command()
@click.option(
"--crs",
type=str,
default=None,
help=(
"The projection of the map."
"\n\n\b\n"
"- integer (4326,3857 ...epsg code)"
"\b\n"
"- string (web, equi7_eu ...Maps.CRS name)"
"\n\b\n"
"The default is 'web' (e.g. Web Mercator Projection)."
"\n\n\b\n"
),
)
@click.option(
"--file",
type=str,
default="",
help=(
"Path to a file that should be plotted. "
"\n\n\b\n"
"Supported filetypes: csv, GeoTIFF, NetCDF, Shapefile, GeoJson, ... "
),
)
@click.option(
"--ne",
type=click.Choice(
[
"coastline",
"ocean",
"land",
"countries",
"urban_areas",
"lakes",
"rivers_lake_centerlines",
],
case_sensitive=False,
),
default=[],
multiple=True,
help=("Add one (or multiple) NaturalEarth features to the map."),
)
@click.option(
"--wms",
type=click.Choice(
[
"osm",
"google_satellite",
"google_roadmap",
"s2_cloudless" "landcover",
"topo",
"terrain_light",
"basemap",
"basemap_light",
"s1_vv",
],
case_sensitive=False,
),
default=None,
multiple=False,
help=("Add one (or multiple) WebMap services to the map."),
)
@click.option(
"--location",
type=str,
default=None,
multiple=False,
help=("Query OpenStreetMap for a location and set the map extent accordingly."),
)
@click.option(
"--loglevel",
type=str,
default=None,
multiple=False,
help=("Set the log level. (info, warning, error, debug"),
)
def cli(crs=None, file=None, ne=None, wms=None, location=None, loglevel=None):
"""
Command line interface for EOmaps.
Keyboard-shortcuts for the map:
\b
"w" : open the companion widget
\b
"ctrl + c" : export to clipboard
\b
"f" : fullscreen
\b
"""
from eomaps import Maps
Maps.config(use_interactive_mode=False, log_level=loglevel)
if crs is None and wms is not None:
crs = "google_mercator"
elif crs is None:
crs = "google_mercator"
usecrs = _identify_crs(crs)
if usecrs is None:
return
m = Maps(crs=usecrs)
if location is not None:
m.set_extent_to_location(location)
for ne_feature in ne:
try:
getattr(m.add_feature.preset, ne_feature.lower())()
except Exception:
click.echo(f"EOmaps: Unable to add NaturalEarth feature: {ne_feature}")
if wms is not None:
if wms in ["osm"]:
m.add_wms.OpenStreetMap.add_layer.default()
elif wms in ["google_roadmap"]:
m.add_wms.GOOGLE.add_layer.roadmap_standard()
elif wms in ["google_satellite"]:
m.add_wms.GOOGLE.add_layer.satellite()
elif wms in ["s2_cloudless"]:
m.add_wms.S2_cloudless.add_layer.s2cloudless_3857()
elif wms in ["landcover"]:
m.add_wms.ESA_WorldCover.add_layer.WORLDCOVER_2020_MAP()
elif wms in ["topo"]:
m.add_wms.GEBCO.add_layer.GEBCO_LATEST()
elif wms in ["terrain_light"]:
m.add_wms.S2_cloudless.add_layer.terrain_light_3857()
elif wms in ["basemap"]:
m.add_wms.DLR_basemaps.add_layer.basemap()
elif wms in ["basemap_light"]:
m.add_wms.DLR_basemaps.add_layer.litemap()
elif wms in ["s1_vv"]:
m.add_wms.S1GBM.add_layer.vv()
if len(file) > 0:
m._init_companion_widget()
m._companion_widget.show()
m._companion_widget.tabs.tab_open.new_file_tab(file)
def on_close(*args, **kwargs):
try:
# TODO check why ordinary exists cause the following qt errors
# see https://stackoverflow.com/a/13723190/9703451 for os._exit
# "QGuiApplication::font(): no QGuiApplication instance
# and no application font set."
sys.exit(0)
except SystemExit:
os._exit(0)
else:
os._exit(0)
m.BM.canvas.mpl_connect("close_event", on_close)
m.show() | PypiClean |
/GP_Framework_BYU_HCMI-0.0.10.tar.gz/GP_Framework_BYU_HCMI-0.0.10/gp_framework/phenotype/blackjack.py | from enum import Enum, auto
from typing import List
import random
import struct
from gp_framework.bytegenotype import ByteGenotype
from gp_framework.phenotype.phenotype import PhenotypeConverter
class Card(Enum):
ace = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
nine = auto()
ten = auto()
jack = auto()
queen = auto()
king = auto()
value_dict = {Card.two: 2, Card.three: 3, Card.four: 4, Card.five: 5, Card.six: 6, Card.seven: 7, Card.eight: 8,
Card.nine: 9, Card.ten: 10, Card.jack: 10, Card.queen: 10, Card.king: 10}
class Deck:
def __init__(self):
"""
gives the instance a shuffled list of Cards
"""
self._cards = [Card.ace, Card.two, Card.three, Card.four, Card.five, Card.six, Card.seven, Card.eight,
Card.nine, Card.ten, Card.jack, Card.queen, Card.king] * 4
random.shuffle(self._cards)
def take_random(self) -> Card:
"""
:return: a card from deck or None if the deck is empty
"""
if len(self._cards) == 0:
return None
card = self._cards[-1]
del self._cards[-1]
return card
def __len__(self):
return len(self._cards)
class PlayerStack:
def __init__(self, starting_cards: List[Card]):
self._cards = starting_cards
def __len__(self):
return len(self._cards)
def add_card(self, card: Card):
self._cards.append(card)
@staticmethod
def _normalize_score(score) -> int:
if score > 21:
return -1
return score
@staticmethod
def _calc_possible_ace_scores(num_aces) -> List[int]:
if num_aces == 0:
return []
possible_scores = [1, 11]
num_aces -= 1
for i in range(num_aces):
possible_scores *= 2 # make a copy of each element
possible_scores.sort() # sort the list so that 1 doesn't always add up with 1, 11 with 11, etc
for j in range(0, len(possible_scores), 2):
possible_scores[j] += 1
possible_scores[j + 1] += 11
# remove duplicates and obvious bad choices
possible_scores = set(possible_scores)
for score in possible_scores:
if score > 21:
del score
possible_scores = list(possible_scores)
return possible_scores
@staticmethod
def _maximize_score(num_aces, preliminary_score) -> int:
# if there are no aces, return the normalized preliminary score
if num_aces == 0:
return PlayerStack._normalize_score(preliminary_score)
possible_scores = PlayerStack._calc_possible_ace_scores(num_aces)
for i in range(len(possible_scores)):
possible_scores[i] = PlayerStack._normalize_score(possible_scores[i] + preliminary_score)
return max(possible_scores)
@property
def score(self) -> int:
num_aces = 0
preliminary_score = 0
for card in self._cards:
if card == Card.ace:
num_aces += 1
else:
preliminary_score += value_dict[card]
return PlayerStack._maximize_score(num_aces, preliminary_score)
class PlayerParameters:
"""
Everything that a Genotype decides about a player.
"""
def __init__(self, score_weight: float, house_score_weight: float,pgw_weight: float, money_weight: float):
self.score_weight = score_weight
self.house_score_weight = house_score_weight
self.pgw_weight = pgw_weight
self.money_weight = money_weight
@staticmethod
def number_of_parameters() -> int:
return 4
@staticmethod
def from_list(parameter_list: List[float]):
"""
:param parameter_list: the values for each parameter in PlayerParameters
:return: a PlayerParameters object
"""
score_weight = parameter_list[0]
house_score_weight = parameter_list[1]
pgw_weight = parameter_list[2]
money_weight = parameter_list[3]
return PlayerParameters(score_weight, house_score_weight, pgw_weight, money_weight)
class Player:
"""
Plays a game of Black Jack
"""
_HIT_THRESHOLD = 1.0
def __init__(self, parameters: PlayerParameters):
self._parameters = parameters
self._cards = None
self.reset_cards()
self._money = 100
self._games_played = 0
self._games_won = 0
def hit(self, house_score: int) -> bool:
hs_pressure = house_score * self._parameters.house_score_weight
score_pressure = self._cards.score * self._parameters.score_weight
return hs_pressure + score_pressure > Player._HIT_THRESHOLD
def make_bet(self) -> int:
"""
automatically decreases the Player's money
:return: the player's starting bet
"""
if self._games_played == 0: # avoid div by zero errors
self._money -= 10
return 10
float_bet = min(float(self._money),
self._money * self._parameters.money_weight + self.perc_games_won * self._parameters.pgw_weight)
# converting to an integer like this can result in an OverflowError
try:
bet = int(float_bet)
except OverflowError:
if float_bet > 0:
bet = self._money
else:
bet = 0
bet = max(10, bet)
self._money -= bet
return bet
def receive_card(self, card: Card):
self._cards.add_card(card)
def reset_cards(self):
self._cards = PlayerStack([])
def get_game_results(self, won: bool, winnings: int):
self._games_played += 1
if won:
self._games_won += 1
self._money += winnings
@property
def score(self):
return self._cards.score
@property
def money(self):
return self._money
@property
def perc_games_won(self):
return self._games_won / self._games_played
class Dealer:
"""
Acts as the house
"""
def __init__(self):
self._deck = Deck()
hidden_card = self._deck.take_random()
shown_card = self._deck.take_random()
self._hidden_stack = PlayerStack([hidden_card, shown_card])
self._shown_stack = PlayerStack([shown_card])
def give_card(self) -> Card:
return self._deck.take_random()
def give_self_card(self) -> bool:
"""
:return: whether or not the Dealer gave itself a new card
"""
if self._hidden_stack.score >= 17:
return False
new_card = self._deck.take_random()
if new_card is None:
return False
self._hidden_stack.add_card(new_card)
self._shown_stack.add_card(new_card)
@property
def known_score(self):
return self._shown_stack.score
@property
def hidden_score(self):
return self._hidden_stack.score
class BlackJackTable:
def __init__(self, players: List[Player]):
"""
Important! Do NOT change the ordering of the list of players. It will mess up the mapping of Genotype to Money.
This really should be fixed.
"""
self._players = players
self._payout = 3 / 2
def _play_round(self):
# set up
dealer = Dealer()
bets = {}
for player in self._players:
player.reset_cards()
bets[player] = player.make_bet()
# print("A bet of", bets[player], "has been placed.")
player.receive_card(dealer.give_card())
player.receive_card(dealer.give_card())
# play
cards_given = 0
for player in self._players:
while player.hit(dealer.known_score) and player.score > 0:
new_card = dealer.give_card()
"""if new_card is None:
print("Warning! Deck is empty.")
else:
cards_given += 1
print("Given {} cards".format(cards_given))"""
player.receive_card(new_card)
# print("Player done.\n")
# payout
for player in self._players:
won = False
winnings = 0
player_score = player.score
dealer_score = dealer.hidden_score
# print("Player score: {} vs Dealer score {}".format(player_score, dealer_score))
if player.score >= dealer.hidden_score:
won = True
winnings = bets[player] * self._payout
# print("Paying out", winnings)
player.get_game_results(won, winnings)
def play_rounds(self, num_rounds):
for i in range(num_rounds):
self._play_round()
@property
def players(self) -> List[Player]:
return self._players
class PlayerConverter(PhenotypeConverter):
def convert(self, genotype: ByteGenotype) -> Player:
"""
Turn genome into an array of parameters between 0 and 1 to be plugged into
some application.
:param genotype: The Genotype to convert
:return: An array of floats between 0 and 1
"""
parameters: List[float] = []
# Each parameter will consume 4 bytes.
for i in range(PlayerParameters.number_of_parameters()):
[parameter] = struct.unpack('f', bytes(genotype[i:i + 4]))
parameters.append(parameter)
return Player(PlayerParameters.from_list(parameters)) | PypiClean |
/CheckM2-1.0.1.tar.gz/CheckM2-1.0.1/checkm2/predictQuality.py | from checkm2 import modelProcessing
from checkm2 import metadata
from checkm2 import prodigal
from checkm2 import diamond
from checkm2.defaultValues import DefaultValues
from checkm2.versionControl import VersionControl
from checkm2 import keggData
from checkm2 import modelPostprocessing
from checkm2 import fileManager
import os
import multiprocessing as mp
import numpy as np
import shutil
import sys
import logging
import pandas as pd
import tarfile
# For unnessesary tensorflow warnings:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.getLogger('tensorflow').setLevel(logging.FATAL)
class Predictor():
def __init__(self, bin_folder, outdir, bin_extension='.fna', threads=1, lowmem=False, tempDBloc=None):
self.bin_folder = bin_folder
self.bin_extension = bin_extension
self.bin_files = self.__setup_bins()
self.output_folder = outdir
self.prodigal_folder = os.path.join(self.output_folder, DefaultValues.PRODIGAL_FOLDER_NAME)
fileManager.make_sure_path_exists(self.prodigal_folder)
self.lowmem = lowmem
if self.lowmem:
logging.info('Running in low-memory mode.')
self.total_threads = threads
logging.debug('Verifying internal checksums for all models, scalers and reference data.')
#if VersionControl().checksum_version_validate() is False:
# logging.error('Could not verify internal model checksums. Please re-download CheckM2.')
# sys.exit(1)
logging.debug('Verifying DIAMOND DB installation path.')
if tempDBloc is not None:
self.diamond_path = tempDBloc
else:
self.diamond_path = fileManager.DiamondDB().get_DB_location()
if self.diamond_path == None or self.diamond_path == '' or self.diamond_path == 'Not Set':
logging.error("Please download and install the CheckM2 database first (see 'checkm2 database -h')")
sys.exit(1)
fileManager.check_if_file_exists(self.diamond_path)
def __setup_bins(self):
bin_files = []
if self.bin_folder is not None:
all_files = os.listdir(self.bin_folder)
for f in all_files:
if f.endswith(self.bin_extension):
binFile = os.path.join(self.bin_folder, f)
if os.stat(binFile).st_size == 0:
logging.warning("Skipping bin {} as it has a size of 0 bytes.".format(f))
elif tarfile.is_tarfile(binFile):
logging.warning('Skipping bin {} as tar archives are not supported.'.format(binFile))
else:
bin_files.append(binFile)
if not bin_files:
logging.error("No bins found. Check the extension (-x) used to identify bins.")
sys.exit(1)
return sorted(bin_files)
def prediction_wf(self, genes_supplied=False, mode='auto', debug_cos=False,
dumpvectors=False, stdout=False, resume=False, remove_intermediates=False, ttable=None):
#make sure models can be loaded without problems
modelProc = modelProcessing.modelProcessor(self.total_threads)
#make sure diamond is set up and ready to go
diamond_search = diamond.DiamondRunner(self.total_threads, self.output_folder, self.lowmem, self.diamond_path)
''' 1: Call genes and automatically determine coding table'''
if resume:
logging.info('Re-using protein files from output directory: {}'.format(self.prodigal_folder,))
prodigal_files = [os.path.join(self.prodigal_folder, bin_file) for bin_file in os.listdir(self.prodigal_folder)]
elif not genes_supplied:
used_ttables, coding_density, \
N50, avg_gene_len, \
total_bases, cds_count, \
GC = self.__run_prodigal(ttable)
prodigal_files, used_ttables = fileManager.verify_prodigal_output(self.prodigal_folder, used_ttables, self.bin_extension)
else:
logging.info('Using user-supplied protein files.')
prodigal_files = []
for bin in self.bin_files:
shutil.copyfile(bin, os.path.join(self.prodigal_folder, os.path.splitext(os.path.basename(bin))[0]))
prodigal_files.append(bin)
''' 2: Calculate genome metadata from protein files'''
metadata_df = self.__calculate_metadata(prodigal_files)
metadata_df = pd.concat(metadata_df.values())
metadata_df.reset_index(drop=True, inplace=True)
# make sure metadata is arranged correctly
metadata_order = keggData.KeggCalculator().return_proper_order('Metadata')
metadata_order.insert(0, 'Name')
metadata_df = metadata_df[metadata_order]
''' 3: Determine all KEGG annotations of input genomes using DIAMOND blastp'''
if resume:
logging.info("Reusing DIAMOND output from output directory: {}".format(diamond_search.diamond_out))
diamond_out = [x for x in os.listdir(diamond_search.diamond_out) if x.startswith('DIAMOND_RESULTS')]
if len(diamond_out) == 0:
logging.error("No DIAMOND outputs have been found in {}. Resuming is not possible.".format(diamond_search.diamond_out))
exit(1)
else:
diamond_out = diamond_search.run(prodigal_files)
### MOVED
logging.info('Processing DIAMOND output')
# concatenate all results even if only one
results = pd.concat([pd.read_csv(os.path.join(diamond_search.diamond_out, entry), sep='\t', usecols=[0, 1],
names=['header', 'annotation']) for entry in diamond_out])
if len(results) < 1:
logging.error('No DIAMOND annotation was generated. Exiting')
sys.exit(1)
# Split columns into usable series
results[['GenomeName', 'ProteinID']] = results['header'].str.split(diamond_search.separator, 1, expand=True)
results[['Ref100_hit', 'Kegg_annotation']] = results['annotation'].str.split('~', 1, expand=True)
''' Get a list of default KO id's from data
Available categories are the keys in DefaultValues.feature_ordering
Here, returns an ordered set of KEGG ID's and sets to 0
'''
KeggCalc = keggData.KeggCalculator()
defaultKOs = KeggCalc.return_default_values_from_category('KO_Genes')
# Remove from results any KOs we're not currently using
results = results[results['Kegg_annotation'].isin(defaultKOs.keys())]
# Update counts per genome
full_name_list = metadata_df['Name'].values
#kegg_genome_list = []
annot_dict = dict(
zip(sorted(results['GenomeName'].unique()), [x for _, x in results.groupby(results['GenomeName'])]))
logging.info('Predicting completeness and contamination using ML models.')
names, final_comps, final_conts, models_chosen, csm_arrays, \
general_results_comp, specific_results_comp = [], [], [], [], [], [], []
chunk_counter = 0
for i in range(0, len(full_name_list), DefaultValues.KO_FEATURE_VECTOR_CHUNK):
sublist = full_name_list[i:i + DefaultValues.KO_FEATURE_VECTOR_CHUNK]
chunk_counter += 1
parsed_diamond_results, ko_list_length = diamond_search.process_diamond_output(defaultKOs, annot_dict, sublist)
parsed_diamond_results.sort_values(by='Name', inplace=True)
sub_metadata = metadata_df[metadata_df['Name'].isin(sublist)]
sub_metadata.sort_values(by='Name', inplace=True)
parsed_diamond_results.sort_values(by='Name', inplace=True)
parsed_diamond_results.reset_index(drop=True, inplace=True)
sub_metadata.reset_index(drop=True, inplace=True)
names.append(parsed_diamond_results['Name'].values)
# delete duplicate 'name' column and merge
del parsed_diamond_results['Name']
feature_vectors = pd.concat([sub_metadata[sub_metadata['Name'].isin(sublist)], parsed_diamond_results], axis=1)
#print(feature_vectors.shape)
''' 4: Call general model & specific models and derive predictions'''
vector_array = feature_vectors.iloc[:, 1:].values.astype(np.float)
general_result_comp, general_result_cont = modelProc.run_prediction_general(vector_array)
specific_model_vector_len = (ko_list_length + len(
metadata_order)) - 1 # -1 = without name TODO a bit ugly - maybe just calculate length on setup somewhere
# also retrieve scaled data for CSM calculations
specific_result_comp, scaled_features = modelProc.run_prediction_specific(vector_array, specific_model_vector_len)
final_conts.append(general_result_cont)
general_results_comp.append(general_result_comp)
specific_results_comp.append(specific_result_comp)
''' 5: Determine any substantially complete genomes similar to reference genomes and fine-tune predictions'''
if not mode == 'specific' or not mode == 'general':
#logging.info('Using cosine simlarity to reference data to select appropriate predictor model.')
postProcessor = modelPostprocessing.modelProcessor(self.total_threads)
final_comp, final_cont, model_chosen, csm_array = postProcessor.calculate_general_specific_ratio(
vector_array[:, 20],
scaled_features,
general_result_comp,
general_result_cont,
specific_result_comp)
final_comps.append(final_comp)
models_chosen.append(model_chosen)
csm_arrays.append(csm_array)
if dumpvectors:
dumpfile = os.path.join(self.output_folder, f'feature_vectors_{chunk_counter}.pkl')
feature_vectors.to_pickle(dumpfile, protocol=4)
logging.info('Parsing all results and constructing final output table.')
#flatten lists
names = [item for sublist in names for item in sublist]
final_comps = [item for sublist in final_comps for item in sublist]
final_conts = [item for sublist in final_conts for item in sublist]
models_chosen = [item for sublist in models_chosen for item in sublist]
csm_arrays = [item for sublist in csm_arrays for item in sublist]
general_results_comp = [item for sublist in general_results_comp for item in sublist]
specific_results_comp = [item for sublist in specific_results_comp for item in sublist]
final_results = pd.DataFrame({'Name':names})
if mode == 'both':
final_results['Completeness_General'] = np.round(general_results_comp, 2)
final_results['Contamination'] = np.round(final_conts, 2)
final_results['Completeness_Specific'] = np.round(specific_results_comp, 2)
final_results['Completeness_Model_Used'] = models_chosen
elif mode == 'auto':
final_results['Completeness'] = np.round(final_comps, 2)
final_results['Contamination'] = np.round(final_conts, 2)
final_results['Completeness_Model_Used'] = models_chosen
elif mode == 'general':
final_results['Completeness_General'] = np.round(general_results_comp, 2)
final_results['Contamination'] = np.round(final_conts, 2)
elif mode == 'specific':
final_results['Completeness_Specific'] = np.round(specific_results_comp, 2)
final_results['Contamination'] = np.round(final_conts, 2)
else:
logging.error('Programming error in model choice')
sys.exit(1)
if not genes_supplied and not resume:
final_results['Translation_Table_Used'] = final_results['Name'].apply(lambda x: used_ttables[x])
final_results['Coding_Density'] = final_results['Name'].apply(lambda x: np.round(coding_density[x], 3))
final_results['Contig_N50'] = final_results['Name'].apply(lambda x: int(N50[x]))
final_results['Average_Gene_Length'] = final_results['Name'].apply(lambda x: avg_gene_len[x])
final_results['Genome_Size'] = final_results['Name'].apply(lambda x: total_bases[x])
final_results['GC_Content'] = final_results['Name'].apply(lambda x: np.round(GC[x], 2))
final_results['Total_Coding_Sequences'] = final_results['Name'].apply(lambda x: cds_count[x])
if debug_cos is True:
final_results['Cosine_Similarity'] = np.round(csm_arrays, 2)
#Flag any substantial divergences in completeness predictions
additional_notes = self.__flag_divergent_predictions(general=general_results_comp, specific=specific_results_comp)
final_results['Additional_Notes'] = additional_notes
final_file = os.path.join(self.output_folder, 'quality_report.tsv')
final_results.to_csv(final_file, sep='\t', index=False)
if stdout:
print(final_results.to_string(index=False, float_format=lambda x: '%.2f' % x))
if remove_intermediates:
shutil.rmtree(self.prodigal_folder)
shutil.rmtree(diamond_search.diamond_out)
logging.info('CheckM2 finished successfully.')
def __flag_divergent_predictions(self, general, specific, threshold=DefaultValues.MODEL_DIVERGENCE_WARNING_THRESHOLD):
compare = pd.DataFrame({'General':general, 'Specific':specific})
compare['Difference'] = compare.apply(lambda row: abs(row['General'] - row['Specific']), axis=1)
compare['Additional_Notes'] = compare.apply(lambda row: 'None' if row['Specific'] < 50 or row['Difference'] < threshold else \
'Low confidence prediction - substantial ({}%) disagreement between completeness prediction models'.format(int(row['Difference'])), axis=1)
return compare['Additional_Notes'].values
def __set_up_prodigal_thread(self, queue_in, queue_out, ttable, used_ttable, coding_density,
N50, avg_gene_len, total_bases, cds_count, GC):
while True:
bin = queue_in.get(block=True, timeout=None)
if bin == None:
break
prodigal_thread = prodigal.ProdigalRunner(self.prodigal_folder, bin)
binname, selected_coding_table, c_density, \
v_N50, v_avg_gene_len, v_total_bases, v_cds_count, v_GC = prodigal_thread.run(bin, ttable)
used_ttable[binname] = selected_coding_table
coding_density[binname] = c_density
N50[binname] = v_N50
avg_gene_len[binname] = v_avg_gene_len
total_bases[binname] = v_total_bases
GC[binname] = v_GC
cds_count[binname] = v_cds_count
queue_out.put((bin, selected_coding_table, coding_density, N50, avg_gene_len, total_bases, cds_count, GC))
def __reportProgress(self, total_bins, queueIn):
"""Report number of processed bins."""
processed = 0
while True:
bin, selected_coding_table, coding_density, N50, \
avg_gene_len, total_bases, cds_count, GC = queueIn.get(block=True, timeout=None)
if bin == None:
if logging.root.level == logging.INFO or logging.root.level == logging.DEBUG:
sys.stdout.write('\n')
sys.stdout.flush()
break
processed += 1
if logging.root.level == logging.INFO or logging.root.level == logging.DEBUG:
statusStr = ' Finished processing %d of %d (%.2f%%) bins.' % (
processed, total_bins, float(processed) * 100 / total_bins)
sys.stdout.write('\r{}'.format(statusStr))
sys.stdout.flush()
def __run_prodigal(self, ttable):
self.threads_per_bin = max(1, int(self.total_threads / len(self.bin_files)))
logging.info("Calling genes in {} bins with {} threads:".format(len(self.bin_files), self.total_threads))
# process each bin in parallel
workerQueue = mp.Queue()
writerQueue = mp.Queue()
for bin in self.bin_files:
workerQueue.put(bin)
for _ in range(self.total_threads):
workerQueue.put(None)
used_ttables = mp.Manager().dict()
coding_density = mp.Manager().dict()
N50 = mp.Manager().dict()
avg_gene_len = mp.Manager().dict()
total_bases = mp.Manager().dict()
cds_count = mp.Manager().dict()
GC = mp.Manager().dict()
try:
calcProc = []
for _ in range(self.total_threads):
calcProc.append(
mp.Process(target=self.__set_up_prodigal_thread, args=(workerQueue, writerQueue, ttable,
used_ttables, coding_density,
N50, avg_gene_len,
total_bases, cds_count, GC)))
writeProc = mp.Process(target=self.__reportProgress, args=(len(self.bin_files), writerQueue))
writeProc.start()
for p in calcProc:
p.start()
for p in calcProc:
p.join()
writerQueue.put((None, None, None, None, None, None, None, None))
writeProc.join()
except:
# make sure all processes are terminated
for p in calcProc:
p.terminate()
writeProc.terminate()
return used_ttables, coding_density, N50, avg_gene_len, total_bases, cds_count, GC
def __calculate_metadata(self, faa_files):
self.threads_per_bin = max(1, int(self.total_threads / len(faa_files)))
logging.info("Calculating metadata for {} bins with {} threads:".format(len(faa_files), self.total_threads))
# process each bin in parallel
workerQueue = mp.Queue()
writerQueue = mp.Queue()
for faa in faa_files:
workerQueue.put(faa)
for _ in range(self.total_threads):
workerQueue.put(None)
metadata_dict = mp.Manager().dict()
try:
calcProc = []
for _ in range(self.total_threads):
calcProc.append(
mp.Process(target=self.__set_up_metadata_thread, args=(workerQueue, writerQueue, metadata_dict)))
writeProc = mp.Process(target=self.__report_progress_metadata, args=(len(faa_files), writerQueue))
writeProc.start()
for p in calcProc:
p.start()
for p in calcProc:
p.join()
writerQueue.put((None, None))
writeProc.join()
except:
# make sure all processes are terminated
for p in calcProc:
p.terminate()
writeProc.terminate()
# metadata_dict = process into df (metadata_dict)
return metadata_dict
def __set_up_metadata_thread(self, queue_in, queue_out, metadata_dict):
while True:
bin = queue_in.get(block=True, timeout=None)
if bin == None:
break
metadata_thread = metadata.MetadataCalculator(bin)
name1, cdscount_series = metadata_thread.calculate_CDS()
name2, aalength_series = metadata_thread.calculate_amino_acid_length()
name3, aa_list, aa_counts = metadata_thread.calculate_amino_acid_counts()
if name1 == name2 == name3:
meta_thread_df = pd.DataFrame(
{'Name': [name1], 'CDS': [cdscount_series], 'AALength': [aalength_series]})
for idx, aa in enumerate(aa_list):
meta_thread_df[aa] = aa_counts[idx]
else:
logging.error('Inconsistent name information in metadata calculation. Exiting.')
sys.exit(1)
metadata_dict[bin] = meta_thread_df
queue_out.put(bin)
def __report_progress_metadata(self, total_bins, queueIn):
"""Report number of processed bins."""
processed = 0
while True:
bin = queueIn.get(block=True, timeout=None)
if bin[0] == None:
if logging.root.level == logging.INFO or logging.root.level == logging.DEBUG:
sys.stdout.write('\n')
sys.stdout.flush()
break
processed += 1
if logging.root.level == logging.INFO or logging.root.level == logging.DEBUG:
statusStr = ' Finished processing %d of %d (%.2f%%) bin metadata.' % (
processed, total_bins, float(processed) * 100 / total_bins)
sys.stdout.write('\r{}'.format(statusStr))
sys.stdout.flush() | PypiClean |
/KindleComicConverter_headless-5.5.2-py3-none-any.whl/kindlecomicconverter/shared.py |
import os
from hashlib import md5
from html.parser import HTMLParser
from distutils.version import StrictVersion
from re import split
from traceback import format_tb
class HTMLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def error(self, message):
pass
def getImageFileName(imgfile):
name, ext = os.path.splitext(imgfile)
ext = ext.lower()
if (name.startswith('.') and len(name) == 1) or ext not in ['.png', '.jpg', '.jpeg', '.gif', '.webp']:
return None
return [name, ext]
def walkSort(dirnames, filenames):
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in split('([0-9]+)', key)]
dirnames.sort(key=lambda name: alphanum_key(name.lower()))
filenames.sort(key=lambda name: alphanum_key(name.lower()))
return dirnames, filenames
def walkLevel(some_dir, level=1):
some_dir = some_dir.rstrip(os.path.sep)
assert os.path.isdir(some_dir)
num_sep = some_dir.count(os.path.sep)
for root, dirs, files in os.walk(some_dir):
dirs, files = walkSort(dirs, files)
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if num_sep + level <= num_sep_this:
del dirs[:]
def md5Checksum(fpath):
with open(fpath, 'rb') as fh:
m = md5()
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
def sanitizeTrace(traceback):
return ''.join(format_tb(traceback))\
.replace('C:/projects/kcc/', '')\
.replace('c:/projects/kcc/', '')\
.replace('C:/python37-x64/', '')\
.replace('c:/python37-x64/', '')\
.replace('C:\\projects\\kcc\\', '')\
.replace('c:\\projects\\kcc\\', '')\
.replace('C:\\python37-x64\\', '')\
.replace('c:\\python37-x64\\', '')
# noinspection PyUnresolvedReferences
def dependencyCheck(level):
missing = []
if level > 1:
try:
from psutil import __version__ as psutilVersion
if StrictVersion('5.0.0') > StrictVersion(psutilVersion):
missing.append('psutil 5.0.0+')
except ImportError:
missing.append('psutil 5.0.0+')
try:
from slugify import __version__ as slugifyVersion
if StrictVersion('1.2.1') > StrictVersion(slugifyVersion):
missing.append('python-slugify 1.2.1+')
except ImportError:
missing.append('python-slugify 1.2.1+')
try:
from PIL import __version__ as pillowVersion
if StrictVersion('5.2.0') > StrictVersion(pillowVersion):
missing.append('Pillow 5.2.0+')
except ImportError:
missing.append('Pillow 5.2.0+')
if len(missing) > 0:
print('ERROR: ' + ', '.join(missing) + ' is not installed!')
exit(1) | PypiClean |
/BIA_OBS-1.0.3.tar.gz/BIA_OBS-1.0.3/BIA/static/dist/node_modules/tailwindcss/src/util/normalizeScreens.js | * @typedef {object} Screen
* @property {string} name
* @property {boolean} not
* @property {ScreenValue[]} values
*/
/**
* A function that normalizes the various forms that the screens object can be
* provided in.
*
* Input(s):
* - ['100px', '200px'] // Raw strings
* - { sm: '100px', md: '200px' } // Object with string values
* - { sm: { min: '100px' }, md: { max: '100px' } } // Object with object values
* - { sm: [{ min: '100px' }, { max: '200px' }] } // Object with object array (multiple values)
*
* Output(s):
* - [{ name: 'sm', values: [{ min: '100px', max: '200px' }] }] // List of objects, that contains multiple values
*
* @returns {Screen[]}
*/
export function normalizeScreens(screens, root = true) {
if (Array.isArray(screens)) {
return screens.map((screen) => {
if (root && Array.isArray(screen)) {
throw new Error('The tuple syntax is not supported for `screens`.')
}
if (typeof screen === 'string') {
return { name: screen.toString(), not: false, values: [{ min: screen, max: undefined }] }
}
let [name, options] = screen
name = name.toString()
if (typeof options === 'string') {
return { name, not: false, values: [{ min: options, max: undefined }] }
}
if (Array.isArray(options)) {
return { name, not: false, values: options.map((option) => resolveValue(option)) }
}
return { name, not: false, values: [resolveValue(options)] }
})
}
return normalizeScreens(Object.entries(screens ?? {}), false)
}
/**
* @param {Screen} screen
* @returns {{result: false, reason: string} | {result: true, reason: null}}
*/
export function isScreenSortable(screen) {
if (screen.values.length !== 1) {
return { result: false, reason: 'multiple-values' }
} else if (screen.values[0].raw !== undefined) {
return { result: false, reason: 'raw-values' }
} else if (screen.values[0].min !== undefined && screen.values[0].max !== undefined) {
return { result: false, reason: 'min-and-max' }
}
return { result: true, reason: null }
}
/**
* @param {'min' | 'max'} type
* @param {Screen | 'string'} a
* @param {Screen | 'string'} z
* @returns {number}
*/
export function compareScreens(type, a, z) {
let aScreen = toScreen(a, type)
let zScreen = toScreen(z, type)
let aSorting = isScreenSortable(aScreen)
let bSorting = isScreenSortable(zScreen)
// These cases should never happen and indicate a bug in Tailwind CSS itself
if (aSorting.reason === 'multiple-values' || bSorting.reason === 'multiple-values') {
throw new Error(
'Attempted to sort a screen with multiple values. This should never happen. Please open a bug report.'
)
} else if (aSorting.reason === 'raw-values' || bSorting.reason === 'raw-values') {
throw new Error(
'Attempted to sort a screen with raw values. This should never happen. Please open a bug report.'
)
} else if (aSorting.reason === 'min-and-max' || bSorting.reason === 'min-and-max') {
throw new Error(
'Attempted to sort a screen with both min and max values. This should never happen. Please open a bug report.'
)
}
// Let the sorting begin
let { min: aMin, max: aMax } = aScreen.values[0]
let { min: zMin, max: zMax } = zScreen.values[0]
// Negating screens flip their behavior. Basically `not min-width` is `max-width`
if (a.not) [aMin, aMax] = [aMax, aMin]
if (z.not) [zMin, zMax] = [zMax, zMin]
aMin = aMin === undefined ? aMin : parseFloat(aMin)
aMax = aMax === undefined ? aMax : parseFloat(aMax)
zMin = zMin === undefined ? zMin : parseFloat(zMin)
zMax = zMax === undefined ? zMax : parseFloat(zMax)
let [aValue, zValue] = type === 'min' ? [aMin, zMin] : [zMax, aMax]
return aValue - zValue
}
/**
*
* @param {PartialScreen> | string} value
* @param {'min' | 'max'} type
* @returns {Screen}
*/
export function toScreen(value, type) {
if (typeof value === 'object') {
return value
}
return {
name: 'arbitrary-screen',
values: [{ [type]: value }],
}
}
function resolveValue({ 'min-width': _minWidth, min = _minWidth, max, raw } = {}) {
return { min, max, raw }
} | PypiClean |
/Django-ArrayAccum-1.6.1.tar.gz/Django-ArrayAccum-1.6.1/django/db/models/aggregates.py | from django.db.models.constants import LOOKUP_SEP
def refs_aggregate(lookup_parts, aggregates):
"""
A little helper method to check if the lookup_parts contains references
to the given aggregates set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for match.
"""
for i in range(len(lookup_parts) + 1):
if LOOKUP_SEP.join(lookup_parts[0:i]) in aggregates:
return True
return False
class Aggregate(object):
"""
Default Aggregate definition.
"""
def __init__(self, lookup, **extra):
"""Instantiate a new aggregate.
* lookup is the field on which the aggregate operates.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* name, the identifier for this aggregate function.
"""
self.lookup = lookup
self.extra = extra
def _default_alias(self):
return '%s__%s' % (self.lookup, self.name.lower())
default_alias = property(_default_alias)
def add_to_query(self, query, alias, col, source, is_summary):
"""Add the aggregate to the nominated query.
This method is used to convert the generic Aggregate definition into a
backend-specific definition.
* query is the backend-specific query instance to which the aggregate
is to be added.
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* is_summary is a boolean that is set True if the aggregate is a
summary value rather than an annotation.
"""
klass = getattr(query.aggregates_module, self.name)
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
class Avg(Aggregate):
name = 'Avg'
class Count(Aggregate):
name = 'Count'
class Max(Aggregate):
name = 'Max'
class Min(Aggregate):
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
class Sum(Aggregate):
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
class ArrayAccum(Aggregate): #added by Django-ArrayAccum
name = 'ArrayAccum' | PypiClean |
/Byond_API-0.2.2.tar.gz/Byond_API-0.2.2/README.md | # Byond-API
A simple and convenient extension that can be used to work with the servers of the game Space Station 13 based on the BayStation build.
supported builds = ["bay", "paradise"]
## Examples
```
from Byond_API import ByondAPI
servers = ByondAPI()
servers.add_server("ss220", "bay" ('game.ss220.space' ,7725))
server_info = servers.get_server_info("ss220")
server_revision = servers.get_server_revision("ss220")
server_manifest = servers.get_server_manifest("ss220")
```
## Proc's
```
def add_server(name: str, build: str, data: tuple) -> None:
def get_server_revision(server:str=None) -> Info:
def get_server_info(server:str=None) -> Info:
def get_server_manifest(server:str=None) -> Info:
```
## Info object
```
Vars:
Type - Type of Info object (Info, Revision, Manifest)
raw_data - Raw dict of data
for Info:
version
mode
can_respawn
can_enter
can_vote
ai
host
players_num
station_time
round_duration
round_time
map
ticker_state
admins_num
players
admins
active_players
for Revision:
gameid
dm_version
dm_build
dd_verion
dd_build
revision
git_branch
date
for Manifest:
manifest
``` | PypiClean |
/DeDist-0.1.tar.gz/DeDist-0.1/dedist/dedist.py | import numpy as np
from scipy.stats import mvn
from multiprocessing import Pool
def multi_fun(inputs):
''' Function to apply mvnun in paralell
Parameters
----------
inputs : list
[0], array, lower bounds
[1], array, upper bounds
[2], array, means
[3], array, covariance matrix
Returns
-------
array
probabilities for each stimulus
'''
low = inputs[0]
upp = inputs[1]
mean = inputs[2]
cov = inputs[3]
p,e = mvn.mvnun(low,upp,mean,cov)
return p
def get_means(fun, theta, par, x, x_):
''' find means for multivariate normal describing error landscape
Parameters
----------
fun : function
Function to be used. Assumed to be of form
fun(x,x_,par)
where x and x_ are described below, and par are the basic
model parameters
theta : array/float
the real stimulus value
par : array
model parameters
x : array
preferred values of neurons
x_ : array
actual values to be tried to decode
full_return : binary
if False, only returns decoding distribution. If true, also returns
sampled errors, calculated mean, and covariance
Returns
-------
array
for each stimulus in x_, what the mean error will be
'''
# find real population response
f = fun(x,theta,par)
# find possible function values
Fs = fun(x,x_.reshape(x_.shape+(1,)),par)
# first, find the means
means = np.sum( (f-Fs)**2,axis=1 )
return means
def get_cov(fun,theta,par,sigma,x,x_):
''' find covariance matrix for multivariate normal describing error
landscape
Parameters
----------
fun : function
Function to be used. Assumed to be of form
fun(x,x_,par)
where x and x_ are described below, and par are the basic
model parameters
theta : array/float
the real stimulus value
par : array
model parameters
sigma : float
sigma^2 is the variance of the gaussian noise
x : array
preferred values of neurons
x_ : array
actual values to be tried to decode
full_return : binary
if False, only returns decoding distribution. If true, also returns
sampled errors, calculated mean, and covariance
Returns
-------
array
for each stimulus in x_, what the mean error will be
'''
# find dimensionality of multivar Gaussian
ns = len(x_)
# find real population response
f = fun(x,theta,par)
# find possible function values
Fs = fun(x,x_.reshape(x_.shape+(1,)),par)
# find the covariances
cov = np.zeros((ns,ns))
cov = 4*sigma**2*np.sum(Fs*Fs[:,None],axis=2)
return cov
def sample_E(fun,theta,par,sigma,x,x_,n,full_return=False):
''' Samples n errors from a multivariate gaussian distribution.
Parameters
----------
fun : function
Function to be used. Assumed to be of form
fun(x,x_,par)
where x and x_ are described below, and par are the basic
model parameters
theta : array/float
the real stimulus value
par : array
model parameters
sigma : float
sigma^2 is the variance of the gaussian noise
x : array
preferred values of neurons
x_ : array
actual values to be tried to decode
n : int
number of realizations to sample
full_return : binary
if False, only returns decoding distribution. If true, also returns
sampled errors, calculated mean, and covariance
Returns
-------
array
for each stimulus in x_, what how often this stimulus has the
smallest error
if full_return:
array
The sampled error profiles
array
The means for the multivariate normal
array
The covariance matrix for the multivariate normal
'''
# find dimensionality of multivar Gaussian
ns = len(x_)
# find real population response
f = fun(x,theta,par)
# find possible function values
Fs = fun(x,x_.reshape(x_.shape+(1,)),par)
# first, find the means
means = np.sum( (f-Fs)**2,axis=1 )
# now the covariances
cov = np.zeros((ns,ns))
cov = 4*sigma**2*np.sum(Fs*Fs[:,None],axis=2)
# now do a set of realizations
print 'Sampling from distribution'
Errors = np.random.multivariate_normal(means,cov,size=n)
sol_th = x_[Errors.argmin(axis=1)]
print 'Done'
# return values
if full_return:
return sol_th,Errors,means,cov
else:
return sol_th
def est_p(fun,theta,par,sigma,x,x_,full_return=False,lowmem=False,verbose=True):
''' For each stimulus in fun, estimates the probability that it gives the
smallest error. It does this by find the multivariate normal for the error
at each x_, with the error at each other x_' subtracted.
Parameters
----------
fun : function
Function to be used. Assumed to be of form
fun(x,x_,par)
where x and x_ are described below, and par are the basic
model parameters
theta : array/float
the real stimulus value
par : array
model parameters
sigma : float
sigma^2 is the variance of the gaussian noise
x : array
preferred values of neurons
x_ : array
actual values to be tried to decode
full_return : binary,optional
if False, only returns decoding distribution. If true, also returns
the calculated means and covariance for each stimulus in x_.
Default False
lowmem : bool, optional
Whether to use lower memory mode (useful if calculting big
covariance matrices). Will not be able to use full_return!
Default False
verbose : bool, optional
Whether to print progress or not
Returns
-------
array
for each stimulus in x_, the probability that this has the smallest
error
if full_return:
array
The full set of means. Dimensions as (len(x_)-1,len(x_)), such that
means[:,i] describes the full set of means for the error differences
with stimulus i.
array
The full set of covariances. Dimensions as (len(x_)-1,len(x_)-1,
len(x_)). Thus covs[:,:,i] describes the relevant covariance matrix
for stimulus i.
'''
# find dimensionality of multivar Gaussian
ns = len(x_)
# set integration bounds
low = -np.ones(len(x_)-1)*1e50
upp = np.zeros(len(x_)-1)
# find real population response
f = fun(x,theta,par)
# make multidimensional version of x_ so less need for for loops
# a + b.reshape(b.shape+(1,)) gives all possible combinations between
# a and b
x_mult = x_.reshape(x_.shape+(1,))
# first, find all required function differences
diffs = (fun(x,x_mult,par)[:,None]-fun(x,x_mult,par))
diffs_sq = (fun(x,x_mult,par)[:,None]**2-fun(x,x_mult,par)**2)
# then, find the means
if verbose: print 'finding means'
means = np.zeros((ns-1,ns)) # sum((f-f')**2)
# loop over all to be generated means
for i in range(ns):
if verbose: print '\r'+str(i),
# loop over all stimuli, except when i=j
means[:i,i] = np.sum( diffs_sq[i,:i] - 2*f*diffs[i,:i],axis=1 )
means[i:,i] = np.sum( diffs_sq[i,i+1:] - 2*f*diffs[i,i+1:],axis=1 )
if verbose: print ''
# If low memory, only calculate one covariance matrix at a time
if lowmem:
if verbose:
print 'Low memory mode.'
print 'Finding p[x] of ' + str(ns) + ':'
for i in range(ns):
if verbose: print '\r'+str(i),
# find current covariance
cov = np.zeros((ns-1,ns-1))
cov[:i,:i] = 4*sigma**2*np.sum(diffs[i,:i][:,None]
*diffs[i,:i],axis=2)
cov[:i,i:] = 4*sigma**2*np.sum(diffs[i,:i][:,None]
*diffs[i,i+1:],axis=2)
cov[i:,:i] = 4*sigma**2*np.sum(diffs[i,i+1:][:,None]
*diffs[i,:i],axis=2)
cov[i:,i:] = 4*sigma**2*np.sum(diffs[i,i+1:][:,None]
*diffs[i,i+1:],axis=2)
# find p
p[i],e = mvn.mvnun(low,upp,means[:,i],cov)
return p
# now for the tough one, the covariances
if verbose:
print 'finding covariances, ',
print 'doing set x of ' + str(ns) + ':'
# loop over coveriances to find
covs = np.zeros((ns-1,ns-1,ns))
for i in range(ns):
if verbose: print '\r'+str(i),
covs[:i,:i,i] = 4*sigma**2*np.sum(diffs[i,:i][:,None]
*diffs[i,:i],axis=2)
covs[:i,i:,i] = 4*sigma**2*np.sum(diffs[i,:i][:,None]
*diffs[i,i+1:],axis=2)
covs[i:,:i,i] = 4*sigma**2*np.sum(diffs[i,i+1:][:,None]
*diffs[i,:i],axis=2)
covs[i:,i:,i] = 4*sigma**2*np.sum(diffs[i,i+1:][:,None]
*diffs[i,i+1:],axis=2)
if verbose: print ''
# calculate the cumulative distribution for each of the calculated covs
if verbose: print 'Calculating cumulative distributions'
# calculate probabilities
pool = Pool(None) # to use less than max processes, change 'None' to number
inputs = [[low,upp,means[:,i],covs[:,:,i]] for i in range(ns)]
p = pool.map(multi_fun,inputs)
pool.close()
if full_return:
return p, means, covs
else:
return p
def est_p_cor(fun,theta,par,cov,x,x_,full_return=False,lowmem=False,verbose=True):
''' For each stimulus in fun, estimates the probability that it gives the
smallest error. It does this by find the multivariate normal for the error
at each x_, with the error at each other x_' subtracted.
This function does the same as est_p(), but for correlated noise. If you
have uncorrelated noise, use est_p() as it is faster.
Parameters
----------
fun : function
Function to be used. Assumed to be of form
fun(x,x_,par)
where x and x_ are described below, and par are the basic
model parameters
theta : array/float
the real stimulus value
par : array
model parameters
cov : array
The covariance matrix for the noise
x : array
preferred values of neurons
x_ : array
actual values to be tried to decode
full_return : binary,optional
if False, only returns decoding distribution. If true, also returns
the calculated means and covariance for each stimulus in x_.
Default False
lowmem : bool, optional
Whether to use lower memory mode (useful if calculting big
covariance matrices). Will not be able to use full_return!
Default False
verbose : bool, optional
Whether to print progress or not
Returns
-------
array
for each stimulus in x_, the probability that this has the smallest
error
if full_return:
array
The full set of means. Dimensions as (len(x_)-1,len(x_)), such that
means[:,i] describes the full set of means for the error differences
with stimulus i.
array
The full set of covariances. Dimensions as (len(x_)-1,len(x_)-1,
len(x_)). Thus covs[:,:,i] describes the relevant covariance matrix
for stimulus i.
'''
# find dimensionality of multivar Gaussian
ns = len(x_)
# get inverse covariance
cov_i = np.linalg.inv(cov)
# set integration bounds
low = -np.ones(len(x_)-1)*1e50
upp = np.zeros(len(x_)-1)
# find real population response
f = fun(x,theta,par)
# make multidimensional version of x_ so less need for for loops
# a + b.reshape(b.shape+(1,)) gives all possible combinations between
# a and b
x_mult = x_.reshape(x_.shape+(1,))
# first, find all required function differences
diffs_true = f - fun(x,x_mult,par)
Lmeans = np.array([np.dot(np.dot(diffs_true[a, :], cov_i),
diffs_true[a, :]) for a in range(ns)])
diffs = (fun(x,x_mult,par)[:,None]-fun(x,x_mult,par))
# then, find the means
means = np.zeros((ns-1, ns))
if verbose: print 'finding means'
for m in range(ns):
if verbose: print '\r'+str(m),
means[:m, m] = Lmeans[m] - Lmeans[:m]
means[m:, m] = Lmeans[m] - Lmeans[m+1:]
if verbose: print ''
# now for the covariances
if verbose:
print 'finding covariances, ',
print 'doing set x of ' + str(ns) + ':'
# loop over coveriances to find
covs = np.zeros((ns-1, ns-1, ns))
for m in range(ns):
if verbose: print '\r'+str(m),
for a in range(m):
for b in range(m):
covs[a, b, m] = np.dot(np.dot(diffs[m, a, :], cov_i),
diffs[m, b, :])
for b in range(m+1, ns):
covs[a, b-1, m] = np.dot(np.dot(diffs[m, a, :], cov_i),
diffs[m, b, :])
for a in range(m+1, ns):
for b in range(m):
covs[a-1, b, m] = np.dot(np.dot(diffs[m, a, :], cov_i),
diffs[m, b, :])
for b in range(m+1, ns):
covs[a-1, b-1, m] = np.dot(np.dot(diffs[m, a, :], cov_i),
diffs[m, b, :])
if verbose: print ''
# calculate the cumulative distribution for each of the calculated covs
if verbose: print 'Calculating cumulative distributions'
# calculate probabilities
pool = Pool(None) # to use less than max processes, change 'None' to number
inputs = [[low, upp, means[:, i], 4*covs[:, :, i]] for i in range(ns)]
p = pool.map(multi_fun, inputs)
pool.close()
if full_return:
return p, means, covs
else:
return p
def calc_crb(dfun,sigma,par,x,x_,db=0,b=0):
''' Estimates the optimally possible decoding distribution from the
cramer-rao bound, assuming a neurons response is r_i = f_i + n_i,
where n_i drawn from a normal dist with mean 0 and variance sigma.
Only works for 1D systems.
Parameters
----------
dfun : function
The derivative of the normal function. Should be of furm
dfun(x,x_,par)
sigma : float
The variance in the noise
par : array
Parameters of the model
x : array
The prefered stimulus values
x_ : array
The stimulus values at which to evaluate the Fisher information
db : array, optional
The derivative of the bias, if any
b : array, optional
The bias, if any
Returns
-------
array
The cramer-rao bound at each stimulus value in x_
'''
# find population derivatives
df = dfun(x,x_[:,None],par)
# find the Fisher information at each stimulus value
I = np.sum( df**2 , axis=1 )/sigma**2
# find the CBR
crb = (1+db)**2/I
return crb | PypiClean |
/MaiConverter-0.14.5-py3-none-any.whl/maiconverter/converter/simaitomaima2.py | from typing import List, Tuple, Optional
from ..maima2 import (
MaiMa2,
BPM,
HoldNote as Ma2HoldNote,
TouchHoldNote as Ma2TouchHoldNote,
SlideNote as Ma2SlideNote,
)
from ..simai import (
SimaiChart,
pattern_to_int,
TapNote,
HoldNote,
SlideNote,
TouchHoldNote,
TouchTapNote,
)
from ..event import SimaiNote, NoteType
def simai_to_ma2(simai: SimaiChart, fes_mode: bool = False) -> MaiMa2:
ma2 = MaiMa2(fes_mode=fes_mode)
for bpm in simai.bpms:
measure = 0.0 if bpm.measure <= 1.0 else bpm.measure
ma2.set_bpm(measure, bpm.bpm)
ma2.set_meter(0.0, 4, 4)
convert_notes(ma2, simai.notes)
if len(ma2.bpms) != 1:
fix_durations(ma2)
return ma2
def convert_notes(ma2: MaiMa2, simai_notes: List[SimaiNote]) -> None:
for simai_note in simai_notes:
note_type = simai_note.note_type
if isinstance(simai_note, TapNote):
is_break = note_type in [NoteType.break_tap, NoteType.break_star]
is_ex = note_type in [NoteType.ex_tap, NoteType.ex_star]
is_star = note_type in [
NoteType.star,
NoteType.break_star,
NoteType.ex_star,
]
ma2.add_tap(
measure=simai_note.measure,
position=simai_note.position,
is_break=is_break,
is_star=is_star,
is_ex=is_ex,
)
elif isinstance(simai_note, HoldNote):
is_ex = note_type == NoteType.ex_hold
ma2.add_hold(
measure=simai_note.measure,
position=simai_note.position,
duration=simai_note.duration,
is_ex=is_ex,
)
elif isinstance(simai_note, SlideNote):
# Ma2 slide durations does not include the delay
# like in simai
pattern = pattern_to_int(simai_note)
ma2.add_slide(
measure=simai_note.measure,
start_position=simai_note.position,
end_position=simai_note.end_position,
duration=simai_note.duration,
pattern=pattern,
delay=simai_note.delay,
)
elif isinstance(simai_note, TouchTapNote):
ma2.add_touch_tap(
measure=simai_note.measure,
position=simai_note.position,
region=simai_note.region,
is_firework=simai_note.is_firework,
)
elif isinstance(simai_note, TouchHoldNote):
ma2.add_touch_hold(
measure=simai_note.measure,
position=simai_note.position,
region=simai_note.region,
duration=simai_note.duration,
is_firework=simai_note.is_firework,
)
else:
print(f"Warning: Unknown note type {note_type}")
def fix_durations(ma2: MaiMa2):
"""Simai note durations (slide delay, slide duration, hold note duration)
disregards bpm changes midway, unlike ma2. So we'll have to compensate for those.
"""
def bpm_changes(start: float, duration: float) -> List[BPM]:
result: List[BPM] = []
for bpm in ma2.bpms:
if start < bpm.measure < start + duration:
result.append(bpm)
return result
def compensate_duration(
start: float, duration: float, base_bpm: float, changes: List[BPM]
) -> float:
new_duration = 0
note_start = start
for bpm in changes:
new_duration += (
ma2.get_bpm(bpm.measure - 0.0001)
* (bpm.measure - note_start)
/ base_bpm
)
note_start = bpm.measure
if note_start < start + duration:
new_duration += (
ma2.get_bpm(note_start + 0.0001)
* (start + duration - note_start)
/ base_bpm
)
return new_duration
for note in ma2.notes:
if isinstance(note, (Ma2HoldNote, Ma2TouchHoldNote, Ma2SlideNote)):
bpms = bpm_changes(note.measure, note.duration)
if len(bpms) != 0:
note.duration = compensate_duration(
note.measure, note.duration, ma2.get_bpm(note.measure), bpms
)
if isinstance(note, Ma2SlideNote):
bpms = bpm_changes(note.measure, note.delay)
if len(bpms) != 0:
note.delay = compensate_duration(
note.measure, note.delay, ma2.get_bpm(note.measure), bpms
) | PypiClean |
/Ammonia-0.0.16.tar.gz/Ammonia-0.0.16/ammonia/mq.py | from kombu import Consumer, Producer, Connection, Exchange, Queue
from ammonia import settings
# ---------------------------------- task mq ---------------------------------- #
class TaskConnection(Connection):
hostname = settings.TASK_URL
class TaskExchange(Exchange):
def __init__(self, name=None, channel=None, *args, **kwargs):
super(TaskExchange, self).__init__(name=name, channel=channel, *args, **kwargs)
class TaskQueue(Queue):
def __init__(self, name=None, routing_key=None, exchange=None, channel=None, *args, **kwargs):
super(TaskQueue, self).__init__(
name=name, exchange=exchange, routing_key=routing_key,
channel=channel, *args, **kwargs
)
task_exchange = TaskExchange(name=settings.TASK_EXCHANGE_NAME)
task_queues = [
TaskQueue(name=settings.HIGH_TASK_QUEUE_NAME, routing_key=settings.HIGH_TASK_ROUTING_KEY, exchange=task_exchange),
TaskQueue(name=settings.MID_TASK_QUEUE_NAME, routing_key=settings.MID_TASK_ROUTING_KEY, exchange=task_exchange),
TaskQueue(name=settings.LOW_TASK_QUEUE_NAME, routing_key=settings.LOW_TASK_ROUTING_KEY, exchange=task_exchange),
]
class TaskConsumer(Consumer):
def __init__(self, channel=None, queues=None, *args, **kwargs):
super(TaskConsumer, self).__init__(channel=channel, queues=queues, *args, **kwargs)
class TaskProducer(Producer):
def __init__(self, routing_key='', channel=None, exchange=None, *args, **kwargs):
super(TaskProducer, self).__init__(routing_key=routing_key, channel=channel,
exchange=exchange, *args, **kwargs)
def publish_task(self, message, routing_key, exchange, declare):
super(TaskProducer, self).publish(body=message, routing_key=routing_key, exchange=exchange, declare=declare,
serializer='pickle')
# ---------------------------------- backend mq ---------------------------------- #
class BackendConnection(Connection):
hostname = settings.BACKEND_URL
class BackendExchange(Exchange):
def __init__(self, channel=None, *args, **kwargs):
# 默认参数durable为True,auto_delete=False,保证持久化
super(BackendExchange, self).__init__(channel=channel, *args, **kwargs)
class BackendQueue(Queue):
def __init__(self, routing_key="", exchange=None, channel=None, *args, **kwargs):
# 默认参数durable为True,auto_delete=False,保证持久化,并且用完即删除
super(BackendQueue, self).__init__(
exchang=exchange, routing_key=routing_key,
channel=channel, *args, **kwargs
)
class BackendConsumer(Consumer):
def __init__(self, routing_key, channel=None, callbacks=None, *args, **kwargs):
queues = [BackendQueue(routing_key=routing_key, channel=channel)]
super(BackendConsumer, self).__init__(channel=channel, queues=queues, no_ack=False,
callbacks=callbacks, *args, **kwargs)
class BackendProducer(Producer):
def __init__(self, routing_key="", channel=None, exchange=None, *args, **kwargs):
super(BackendProducer, self).__init__(routing_key=routing_key, channel=channel,
exchange=exchange, *args, **kwargs)
def publish_task(self, message):
super(BackendProducer, self).publish(body=message, serializer="pickle") | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/docs/modules/combinatorics/permutations.rst | .. _combinatorics-permutations:
Permutations
============
.. module:: diofant.combinatorics.permutations
.. autoclass:: Permutation
:members:
.. autoclass:: Cycle
:members:
.. _combinatorics-generators:
.. autofunction:: _af_parity
Generators
----------
.. module:: diofant.combinatorics.generators
.. automethod:: diofant.combinatorics.generators.symmetric
.. automethod:: diofant.combinatorics.generators.cyclic
.. automethod:: diofant.combinatorics.generators.alternating
.. automethod:: diofant.combinatorics.generators.dihedral
| PypiClean |
/CGOL-0.9.5.tar.gz/CGOL-0.9.5/README.md | # CGOL · [](https://pypi.org/project/CGOL/) [](https://github.com/INeido/CGOL/releases)  
A Conway's Game of Life implementation using numpy and pygame.

## Description
This project has no particular aim. It is a purely personal project and barely maintained.
It is a CLI based [Conway's Game of Life](https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life) implementation using numpy for fast calculations and pygame for an interactive simulation.
No Hashlife or Quicklife algorithm support (yet).
---
Rules of Conway's Game of Life
1. Any live cell with two or three live neighbors survives.
2. Any dead cell with three live neighbors becomes a live cell.
3. All other live cells die in the next generation. Similarly, all other dead cells stay dead.
## Installing
Install using pip
```bash
pip install cgol
```
Manually install using CLI
```bash
git clone https://github.com/INeido/CGOL
pip install -e CGOL/.
```
## Usage
Here are some examples.
Start a simulation with the default setting but with a custom seed.
```bash
cgol -se 42
```

Change grid size, cell size and fade color.
```bash
cgol -cf 130 20 0 -cs 8 -gh 90 -gw 160
```

Change the color to white on black without fade.
```bash
cgol -fa False -ca 255 255 255
```

Draw with the mouse to birth or kill cells.

## Arguments
```
usage: CGOL [-h] [-rw RW] [-rh RH] [-ca CA [CA ...]] [-cd CD [CD ...]] [-cf CF [CF ...]] [-cb CB [CB ...]] [-cs CS] [-gw GW] [-gh GH] [-ti TI] [-se SE]
[-ps [PS]] [-po [PO]] [-fr FR] [-fd FD] [-to [TO]] [-fa [FA]]
Conway's Game of Life
options:
-h, --help show this help message and exit
-rw RW Width of the Game.
-rh RH Height of the Game.
-ca CA [CA ...] Color for alive cells. 'R G B'
-cd CD [CD ...] Color for dead cells. 'R G B'
-cf CF [CF ...] Color to fade dead cells to. 'R G B'
-cb CB [CB ...] Color for dead cells. 'R G B'
-cs CS Size of a cell in pixel.
-gw GW Width of the World.
-gh GH Height of the World.
-ti TI Number of times the game shall update in a second (FPS).
-se SE Seed value used to create World.
-ps [PS] Game pauses on a stalemate.
-po [PO] Game pauses when only oscillators remain.
-fr FR Value by which a cell should decrease every generation.
-fd FD Value a cell should have after death.
-to [TO] Enables toroidal space (Cells wrap around edges).
-fa [FA] Enables fade effect.
```
| Argument | Description | Default Value |
| ------ | ------ | ------ |
| -rh | Height of the Game. | 720 |
| -rw | Width of the Game. | 1280 |
| -ca | Colour for alive cells. 'R G B' | 255, 144, 0 |
| -cd | Colour for dead cells. 'R G B' | 0, 0, 0 |
| -cf | Colour to fade dead cells to. 'R G B' | 0, 0, 0 |
| -cb | Colour of background. 'R G B' | 16, 16, 16 |
| -cs | Size of a cell in pixel | 8 |
| -sx | Height of the World. | 90 |
| -sy | Width of the World. | 160 |
| -ti | Number of times the game shall update in a second (FPS). | 60 |
| -se | Seed value used to create World. | -1 |
| -ps | Game pauses on a stalemate. | False |
| -po | Game pauses when only oscillators remain. | False |
| -fr | Value by which a cell should decrease every generation. | 0.01 |
| -fd | Value a cell should have after death. | 0.5 |
| -to | Enables toroidal space (Cells wrap around edges). | True |
| -fa | Enables fade effect. | True |
## Controls
| Button | Description |
| ------ | ------ |
| ESC | Closes game. |
| RETURN | Pauses game. |
| Left Click | Births cell. |
| Right Click | Kills cell. |
| Middle Click | Drags screen. |
| Middle Scroll | Zoom in and out. |
| R | Reset game. |
| F | Fill with random cells. |
| A | Fill with alive cells. |
| D | Fill with dead cells. |
| K | Kill alive cells. |
| R | Reset game. |
| L | Load last saved game. |
| S | Save current game. |
| C | Center view. |
| P | Save screenshot. |
| I | Toggle Insert Mode. |
| Left Click | (Insert Mode) Place loaded pattern. |
| Middle Scroll | (Insert Mode) Rotate loaded pattern. |
| 1 | Load `1.rle`. |
| 2 | Load `2.rle`. |
| 3 | Load `3.rle`. |
| Right Arrow | Forward one generation. |
| + | Extend grid by one cell in every direction. |
| - | Reduce grid by one cell in every direction. |
| PypiClean |
/Dpowers-0.1.5rc1.tar.gz/Dpowers-0.1.5rc1/docs/index.rst | Welcome to Dpowers' documentation!
===================================
Source code:
`<https://github.com/dp0s/Dpowers>`_
Introduction
************
.. include:: intro.rst
Requirements
*************
- python 3.6 or later
- Currently only tested on apt based Linux systems (Debian, Ubuntu, Linux Mint).
.. toctree::
:maxdepth: 2
:caption: Preperation
preperation
.. toctree::
:maxdepth: 2
:caption: Quickstart
quickstart
.. toctree::
:maxdepth: 2
:glob:
:caption: Reference (still incomplete)
reference/*
Indices and tables
******************
* :ref:`genindex`
| PypiClean |
/Neodroid-0.4.9-py36-none-any.whl/neodroid/messaging/fbs/FBSModels/FReaction.py |
# namespace: Reaction
import flatbuffers
class FReaction(object):
__slots__ = ["_tab"]
@classmethod
def GetRootAsFReaction(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = FReaction()
x.Init(buf, n + offset)
return x
# FReaction
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# FReaction
def EnvironmentName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# FReaction
def Parameters(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = o + self._tab.Pos
from .FReactionParameters import FReactionParameters
obj = FReactionParameters()
obj.Init(self._tab.Bytes, x)
return obj
return None
# FReaction
def Motions(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .FMotion import FMotion
obj = FMotion()
obj.Init(self._tab.Bytes, x)
return obj
return None
# FReaction
def MotionsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FReaction
def Displayables(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .FDisplayable import FDisplayable
obj = FDisplayable()
obj.Init(self._tab.Bytes, x)
return obj
return None
# FReaction
def DisplayablesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FReaction
def Unobservables(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .FUnobservables import FUnobservables
obj = FUnobservables()
obj.Init(self._tab.Bytes, x)
return obj
return None
# FReaction
def Configurations(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .FConfiguration import FConfiguration
obj = FConfiguration()
obj.Init(self._tab.Bytes, x)
return obj
return None
# FReaction
def ConfigurationsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FReaction
def SerialisedMessage(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def FReactionStart(builder):
builder.StartObject(7)
def FReactionAddEnvironmentName(builder, environmentName):
builder.PrependUOffsetTRelativeSlot(
0, flatbuffers.number_types.UOffsetTFlags.py_type(environmentName), 0
)
def FReactionAddParameters(builder, parameters):
builder.PrependStructSlot(
1, flatbuffers.number_types.UOffsetTFlags.py_type(parameters), 0
)
def FReactionAddMotions(builder, motions):
builder.PrependUOffsetTRelativeSlot(
2, flatbuffers.number_types.UOffsetTFlags.py_type(motions), 0
)
def FReactionStartMotionsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def FReactionAddDisplayables(builder, displayables):
builder.PrependUOffsetTRelativeSlot(
3, flatbuffers.number_types.UOffsetTFlags.py_type(displayables), 0
)
def FReactionStartDisplayablesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def FReactionAddUnobservables(builder, unobservables):
builder.PrependUOffsetTRelativeSlot(
4, flatbuffers.number_types.UOffsetTFlags.py_type(unobservables), 0
)
def FReactionAddConfigurations(builder, configurations):
builder.PrependUOffsetTRelativeSlot(
5, flatbuffers.number_types.UOffsetTFlags.py_type(configurations), 0
)
def FReactionStartConfigurationsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def FReactionAddSerialisedMessage(builder, serialisedMessage):
builder.PrependUOffsetTRelativeSlot(
6, flatbuffers.number_types.UOffsetTFlags.py_type(serialisedMessage), 0
)
def FReactionEnd(builder):
return builder.EndObject() | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dijit/MenuItem.js | require({cache:{"url:dijit/templates/MenuItem.html":"<tr class=\"dijitReset dijitMenuItem\" data-dojo-attach-point=\"focusNode\" role=\"menuitem\" tabIndex=\"-1\"\n\t\tdata-dojo-attach-event=\"onmouseenter:_onHover,onmouseleave:_onUnhover,ondijitclick:_onClick\">\n\t<td class=\"dijitReset dijitMenuItemIconCell\" role=\"presentation\">\n\t\t<img src=\"${_blankGif}\" alt=\"\" class=\"dijitIcon dijitMenuItemIcon\" data-dojo-attach-point=\"iconNode\"/>\n\t</td>\n\t<td class=\"dijitReset dijitMenuItemLabel\" colspan=\"2\" data-dojo-attach-point=\"containerNode\"></td>\n\t<td class=\"dijitReset dijitMenuItemAccelKey\" style=\"display: none\" data-dojo-attach-point=\"accelKeyNode\"></td>\n\t<td class=\"dijitReset dijitMenuArrowCell\" role=\"presentation\">\n\t\t<div data-dojo-attach-point=\"arrowWrapper\" style=\"visibility: hidden\">\n\t\t\t<img src=\"${_blankGif}\" alt=\"\" class=\"dijitMenuExpand\"/>\n\t\t\t<span class=\"dijitMenuExpandA11y\">+</span>\n\t\t</div>\n\t</td>\n</tr>\n"}});
define("dijit/MenuItem",["dojo/_base/declare","dojo/dom","dojo/dom-attr","dojo/dom-class","dojo/_base/event","dojo/_base/kernel","dojo/_base/sniff","./_Widget","./_TemplatedMixin","./_Contained","./_CssStateMixin","dojo/text!./templates/MenuItem.html"],function(_1,_2,_3,_4,_5,_6,_7,_8,_9,_a,_b,_c){
return _1("dijit.MenuItem",[_8,_9,_a,_b],{templateString:_c,baseClass:"dijitMenuItem",label:"",_setLabelAttr:{node:"containerNode",type:"innerHTML"},iconClass:"dijitNoIcon",_setIconClassAttr:{node:"iconNode",type:"class"},accelKey:"",disabled:false,_fillContent:function(_d){
if(_d&&!("label" in this.params)){
this.set("label",_d.innerHTML);
}
},buildRendering:function(){
this.inherited(arguments);
var _e=this.id+"_text";
_3.set(this.containerNode,"id",_e);
if(this.accelKeyNode){
_3.set(this.accelKeyNode,"id",this.id+"_accel");
_e+=" "+this.id+"_accel";
}
this.domNode.setAttribute("aria-labelledby",_e);
_2.setSelectable(this.domNode,false);
},_onHover:function(){
this.getParent().onItemHover(this);
},_onUnhover:function(){
this.getParent().onItemUnhover(this);
this._set("hovering",false);
},_onClick:function(_f){
this.getParent().onItemClick(this,_f);
_5.stop(_f);
},onClick:function(){
},focus:function(){
try{
if(_7("ie")==8){
this.containerNode.focus();
}
this.focusNode.focus();
}
catch(e){
}
},_onFocus:function(){
this._setSelected(true);
this.getParent()._onItemFocus(this);
this.inherited(arguments);
},_setSelected:function(_10){
_4.toggle(this.domNode,"dijitMenuItemSelected",_10);
},setLabel:function(_11){
_6.deprecated("dijit.MenuItem.setLabel() is deprecated. Use set('label', ...) instead.","","2.0");
this.set("label",_11);
},setDisabled:function(_12){
_6.deprecated("dijit.Menu.setDisabled() is deprecated. Use set('disabled', bool) instead.","","2.0");
this.set("disabled",_12);
},_setDisabledAttr:function(_13){
this.focusNode.setAttribute("aria-disabled",_13?"true":"false");
this._set("disabled",_13);
},_setAccelKeyAttr:function(_14){
this.accelKeyNode.style.display=_14?"":"none";
this.accelKeyNode.innerHTML=_14;
_3.set(this.containerNode,"colSpan",_14?"1":"2");
this._set("accelKey",_14);
}});
}); | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/io/OAuth.js | if(!dojo._hasResource["dojox.io.OAuth"]){
dojo._hasResource["dojox.io.OAuth"]=true;
dojo.provide("dojox.io.OAuth");
dojo.require("dojox.encoding.digests.SHA1");
dojox.io.OAuth=new (function(){
var _1=this.encode=function(s){
if(!s){
return "";
}
return encodeURIComponent(s).replace(/\!/g,"%21").replace(/\*/g,"%2A").replace(/\'/g,"%27").replace(/\(/g,"%28").replace(/\)/g,"%29");
};
var _2=this.decode=function(_3){
var a=[],_4=_3.split("&");
for(var i=0,l=_4.length;i<l;i++){
var _5=_4[i];
if(_4[i]==""){
continue;
}
if(_4[i].indexOf("=")>-1){
var _6=_4[i].split("=");
a.push([decodeURIComponent(_6[0]),decodeURIComponent(_6[1])]);
}else{
a.push([decodeURIComponent(_4[i]),null]);
}
}
return a;
};
function _7(_8){
var _9=["source","protocol","authority","userInfo","user","password","host","port","relative","path","directory","file","query","anchor"],_a=/^(?:([^:\/?#]+):)?(?:\/\/((?:(([^:@]*):?([^:@]*))?@)?([^:\/?#]*)(?::(\d*))?))?((((?:[^?#\/]*\/)*)([^?#]*))(?:\?([^#]*))?(?:#(.*))?)/,_b=_a.exec(_8),_c={},i=_9.length;
while(i--){
_c[_9[i]]=_b[i]||"";
}
var p=_c.protocol.toLowerCase(),a=_c.authority.toLowerCase(),b=(p=="http"&&_c.port==80)||(p=="https"&&_c.port==443);
if(b){
if(a.lastIndexOf(":")>-1){
a=a.substring(0,a.lastIndexOf(":"));
}
}
var _d=_c.path||"/";
_c.url=p+"://"+a+_d;
return _c;
};
var _e="0123456789ABCDEFGHIJKLMNOPQRSTUVWXTZabcdefghiklmnopqrstuvwxyz";
function _f(_10){
var s="",tl=_e.length;
for(var i=0;i<_10;i++){
s+=_e.charAt(Math.floor(Math.random()*tl));
}
return s;
};
function _11(){
return Math.floor(new Date().valueOf()/1000)-2;
};
function _12(_13,key,_14){
if(_14&&_14!="PLAINTEXT"&&_14!="HMAC-SHA1"){
throw new Error("dojox.io.OAuth: the only supported signature encodings are PLAINTEXT and HMAC-SHA1.");
}
if(_14=="PLAINTEXT"){
return key;
}else{
return dojox.encoding.digests.SHA1._hmac(_13,key);
}
};
function key(_15){
return _1(_15.consumer.secret)+"&"+(_15.token&&_15.token.secret?_1(_15.token.secret):"");
};
function _16(_17,oaa){
var o={oauth_consumer_key:oaa.consumer.key,oauth_nonce:_f(16),oauth_signature_method:oaa.sig_method||"HMAC-SHA1",oauth_timestamp:_11(),oauth_version:"1.0"};
if(oaa.token){
o.oauth_token=oaa.token.key;
}
_17.content=dojo.mixin(_17.content||{},o);
};
function _18(_19){
var _1a=[{}],_1b;
if(_19.form){
if(!_19.content){
_19.content={};
}
var _1c=dojo.byId(_19.form);
var _1d=_1c.getAttributeNode("action");
_19.url=_19.url||(_1d?_1d.value:null);
_1b=dojo.formToObject(_1c);
delete _19.form;
}
if(_1b){
_1a.push(_1b);
}
if(_19.content){
_1a.push(_19.content);
}
var map=_7(_19.url);
if(map.query){
var tmp=dojo.queryToObject(map.query);
for(var p in tmp){
tmp[p]=encodeURIComponent(tmp[p]);
}
_1a.push(tmp);
}
_19._url=map.url;
var a=[];
for(var i=0,l=_1a.length;i<l;i++){
var _1e=_1a[i];
for(var p in _1e){
if(dojo.isArray(_1e[p])){
for(var j=0,jl=_1e.length;j<jl;j++){
a.push([p,_1e[j]]);
}
}else{
a.push([p,_1e[p]]);
}
}
}
_19._parameters=a;
return _19;
};
function _1f(_20,_21,oaa){
_16(_21,oaa);
_18(_21);
var a=_21._parameters;
a.sort(function(a,b){
if(a[0]>b[0]){
return 1;
}
if(a[0]<b[0]){
return -1;
}
if(a[1]>b[1]){
return 1;
}
if(a[1]<b[1]){
return -1;
}
return 0;
});
var s=dojo.map(a,function(_22){
return _1(_22[0])+"="+_1(_22[1]||"");
}).join("&");
var _23=_20.toUpperCase()+"&"+_1(_21._url)+"&"+_1(s);
return _23;
};
function _24(_25,_26,oaa){
var k=key(oaa),_27=_1f(_25,_26,oaa),s=_12(_27,k,oaa.sig_method||"HMAC-SHA1");
_26.content["oauth_signature"]=s;
return _26;
};
this.sign=function(_28,_29,oaa){
return _24(_28,_29,oaa);
};
this.xhr=function(_2a,_2b,oaa,_2c){
_24(_2a,_2b,oaa);
return dojo.xhr(_2a,_2b,_2c);
};
this.xhrGet=function(_2d,oaa){
return this.xhr("GET",_2d,oaa);
};
this.xhrPost=this.xhrRawPost=function(_2e,oaa){
return this.xhr("POST",_2e,oaa,true);
};
this.xhrPut=this.xhrRawPut=function(_2f,oaa){
return this.xhr("PUT",_2f,oaa,true);
};
this.xhrDelete=function(_30,oaa){
return this.xhr("DELETE",_30,oaa);
};
})();
} | PypiClean |
/DJModels-0.0.6-py3-none-any.whl/djmodels/contrib/gis/gdal/prototypes/raster.py | from ctypes import POINTER, c_bool, c_char_p, c_double, c_int, c_void_p
from functools import partial
from djmodels.contrib.gis.gdal.libgdal import GDAL_VERSION, std_call
from djmodels.contrib.gis.gdal.prototypes.generation import (
chararray_output, const_string_output, double_output, int_output,
void_output, voidptr_output,
)
# For more detail about c function names and definitions see
# http://gdal.org/gdal_8h.html
# http://gdal.org/gdalwarper_8h.html
# http://www.gdal.org/gdal__utils_8h.html
# Prepare partial functions that use cpl error codes
void_output = partial(void_output, cpl=True)
const_string_output = partial(const_string_output, cpl=True)
double_output = partial(double_output, cpl=True)
# Raster Driver Routines
register_all = void_output(std_call('GDALAllRegister'), [], errcheck=False)
get_driver = voidptr_output(std_call('GDALGetDriver'), [c_int])
get_driver_by_name = voidptr_output(std_call('GDALGetDriverByName'), [c_char_p], errcheck=False)
get_driver_count = int_output(std_call('GDALGetDriverCount'), [])
get_driver_description = const_string_output(std_call('GDALGetDescription'), [c_void_p])
# Raster Data Source Routines
create_ds = voidptr_output(std_call('GDALCreate'), [c_void_p, c_char_p, c_int, c_int, c_int, c_int, c_void_p])
open_ds = voidptr_output(std_call('GDALOpen'), [c_char_p, c_int])
close_ds = void_output(std_call('GDALClose'), [c_void_p], errcheck=False)
flush_ds = int_output(std_call('GDALFlushCache'), [c_void_p])
copy_ds = voidptr_output(
std_call('GDALCreateCopy'),
[c_void_p, c_char_p, c_void_p, c_int, POINTER(c_char_p), c_void_p, c_void_p]
)
add_band_ds = void_output(std_call('GDALAddBand'), [c_void_p, c_int])
get_ds_description = const_string_output(std_call('GDALGetDescription'), [c_void_p])
get_ds_driver = voidptr_output(std_call('GDALGetDatasetDriver'), [c_void_p])
get_ds_xsize = int_output(std_call('GDALGetRasterXSize'), [c_void_p])
get_ds_ysize = int_output(std_call('GDALGetRasterYSize'), [c_void_p])
get_ds_raster_count = int_output(std_call('GDALGetRasterCount'), [c_void_p])
get_ds_raster_band = voidptr_output(std_call('GDALGetRasterBand'), [c_void_p, c_int])
get_ds_projection_ref = const_string_output(std_call('GDALGetProjectionRef'), [c_void_p])
set_ds_projection_ref = void_output(std_call('GDALSetProjection'), [c_void_p, c_char_p])
get_ds_geotransform = void_output(std_call('GDALGetGeoTransform'), [c_void_p, POINTER(c_double * 6)], errcheck=False)
set_ds_geotransform = void_output(std_call('GDALSetGeoTransform'), [c_void_p, POINTER(c_double * 6)])
get_ds_metadata = chararray_output(std_call('GDALGetMetadata'), [c_void_p, c_char_p], errcheck=False)
set_ds_metadata = void_output(std_call('GDALSetMetadata'), [c_void_p, POINTER(c_char_p), c_char_p])
get_ds_metadata_domain_list = chararray_output(std_call('GDALGetMetadataDomainList'), [c_void_p], errcheck=False)
get_ds_metadata_item = const_string_output(std_call('GDALGetMetadataItem'), [c_void_p, c_char_p, c_char_p])
set_ds_metadata_item = const_string_output(std_call('GDALSetMetadataItem'), [c_void_p, c_char_p, c_char_p, c_char_p])
free_dsl = void_output(std_call('CSLDestroy'), [POINTER(c_char_p)], errcheck=False)
if GDAL_VERSION >= (2, 1):
get_ds_info = const_string_output(std_call('GDALInfo'), [c_void_p, c_void_p])
else:
get_ds_info = None
# Raster Band Routines
band_io = void_output(
std_call('GDALRasterIO'),
[c_void_p, c_int, c_int, c_int, c_int, c_int, c_void_p, c_int, c_int, c_int, c_int, c_int]
)
get_band_xsize = int_output(std_call('GDALGetRasterBandXSize'), [c_void_p])
get_band_ysize = int_output(std_call('GDALGetRasterBandYSize'), [c_void_p])
get_band_index = int_output(std_call('GDALGetBandNumber'), [c_void_p])
get_band_description = const_string_output(std_call('GDALGetDescription'), [c_void_p])
get_band_ds = voidptr_output(std_call('GDALGetBandDataset'), [c_void_p])
get_band_datatype = int_output(std_call('GDALGetRasterDataType'), [c_void_p])
get_band_color_interp = int_output(std_call('GDALGetRasterColorInterpretation'), [c_void_p])
get_band_nodata_value = double_output(std_call('GDALGetRasterNoDataValue'), [c_void_p, POINTER(c_int)])
set_band_nodata_value = void_output(std_call('GDALSetRasterNoDataValue'), [c_void_p, c_double])
if GDAL_VERSION >= (2, 1):
delete_band_nodata_value = void_output(std_call('GDALDeleteRasterNoDataValue'), [c_void_p])
else:
delete_band_nodata_value = None
get_band_statistics = void_output(
std_call('GDALGetRasterStatistics'),
[
c_void_p, c_int, c_int, POINTER(c_double), POINTER(c_double),
POINTER(c_double), POINTER(c_double), c_void_p, c_void_p,
],
)
compute_band_statistics = void_output(
std_call('GDALComputeRasterStatistics'),
[c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_void_p, c_void_p],
)
# Reprojection routine
reproject_image = void_output(
std_call('GDALReprojectImage'),
[c_void_p, c_char_p, c_void_p, c_char_p, c_int, c_double, c_double, c_void_p, c_void_p, c_void_p]
)
auto_create_warped_vrt = voidptr_output(
std_call('GDALAutoCreateWarpedVRT'),
[c_void_p, c_char_p, c_char_p, c_int, c_double, c_void_p]
)
# Create VSI gdal raster files from in-memory buffers.
# http://gdal.org/cpl__vsi_8h.html
create_vsi_file_from_mem_buffer = voidptr_output(std_call('VSIFileFromMemBuffer'), [c_char_p, c_void_p, c_int, c_int])
get_mem_buffer_from_vsi_file = voidptr_output(std_call('VSIGetMemFileBuffer'), [c_char_p, POINTER(c_int), c_bool])
unlink_vsi_file = int_output(std_call('VSIUnlink'), [c_char_p]) | PypiClean |
/ImSwitchUC2-2.1.0.tar.gz/ImSwitchUC2-2.1.0/imswitch/imcontrol/model/managers/detectors/ThorcamManager.py | import numpy as np
from imswitch.imcommon.model import initLogger
from .DetectorManager import DetectorManager, DetectorAction, DetectorNumberParameter, DetectorListParameter
class ThorcamManager(DetectorManager):
""" DetectorManager that deals with TheImagingSource cameras and the
parameters for frame extraction from them.
Manager properties:
- ``cameraListIndex`` -- the camera's index in the Allied Vision camera list (list
indexing starts at 0); set this string to an invalid value, e.g. the
string "mock" to load a mocker
- ``av`` -- dictionary of Allied Vision camera properties
"""
def __init__(self, detectorInfo, name, **_lowLevelManagers):
self.__logger = initLogger(self, instanceName=name)
cameraId = detectorInfo.managerProperties['cameraListIndex']
self._camera = self._getGXObj(cameraId )
for propertyName, propertyValue in detectorInfo.managerProperties['gxipycam'].items():
self._camera.setPropertyValue(propertyName, propertyValue)
fullShape = (self._camera.SensorWidth,
self._camera.SensorHeight)
model = self._camera.model
self._running = False
self._adjustingParameters = False
# Prepare parameters
parameters = {
'exposure': DetectorNumberParameter(group='Misc', value=100, valueUnits='ms',
editable=True),
'image_width': DetectorNumberParameter(group='Misc', value=fullShape[0], valueUnits='arb.u.',
editable=False),
'image_height': DetectorNumberParameter(group='Misc', value=fullShape[1], valueUnits='arb.u.',
editable=False)
}
# Prepare actions
actions = {
'More properties': DetectorAction(group='Misc',
func=self._camera.openPropertiesGUI)
}
super().__init__(detectorInfo, name, fullShape=fullShape, supportedBinnings=[1],
model=model, parameters=parameters, actions=actions, croppable=False)
def getLatestFrame(self, is_save=False):
if is_save:
return self._camera.getLastChunk()
else:
return self._camera.getLast()
def setParameter(self, name, value):
"""Sets a parameter value and returns the value.
If the parameter doesn't exist, i.e. the parameters field doesn't
contain a key with the specified parameter name, an error will be
raised."""
super().setParameter(name, value)
if name not in self._DetectorManager__parameters:
raise AttributeError(f'Non-existent parameter "{name}" specified')
value = self._camera.setPropertyValue(name, value)
return value
def getParameter(self, name):
"""Gets a parameter value and returns the value.
If the parameter doesn't exist, i.e. the parameters field doesn't
contain a key with the specified parameter name, an error will be
raised."""
if name not in self._parameters:
raise AttributeError(f'Non-existent parameter "{name}" specified')
value = self._camera.getPropertyValue(name)
return value
def getChunk(self):
try:
return self._camera.getLastChunk()
except:
return None
def flushBuffers(self):
pass
def startAcquisition(self):
pass
def stopAcquisition(self):
pass
def stopAcquisitionForROIChange(self):
pass
def finalize(self) -> None:
super().finalize()
self.__logger.debug('Safely disconnecting the camera...')
self._camera.close()
@property
def pixelSizeUm(self):
return [1, 1, 1]
def crop(self, hpos, vpos, hsize, vsize):
pass
def _performSafeCameraAction(self, function):
""" This method is used to change those camera properties that need
the camera to be idle to be able to be adjusted.
"""
self._adjustingParameters = True
wasrunning = self._running
self.stopAcquisitionForROIChange()
function()
if wasrunning:
self.startAcquisition()
self._adjustingParameters = False
def openPropertiesDialog(self):
self._camera.openPropertiesGUI()
def _getGXObj(self, cameraId, binning=1):
try:
from imswitch.imcontrol.model.interfaces.thorcamera import ThorCamera
self.__logger.debug(f'Trying to initialize Throcamera {cameraId}')
camera = ThorCamera(cameraNo=cameraId, binning=binning)
except Exception as e:
self.__logger.debug(e)
self.__logger.warning(f'Failed to initialize ThorCamera {cameraId}, loading TIS mocker')
from imswitch.imcontrol.model.interfaces.tiscamera_mock import MockCameraTIS
camera = MockCameraTIS()
self.__logger.info(f'Initialized camera, model: {camera.model}')
return camera
def closeEvent(self):
self._camera.close()
# Copyright (C) ImSwitch developers 2021
# This file is part of ImSwitch.
#
# ImSwitch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ImSwitch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>. | PypiClean |
/Fileseq-1.15.2.tar.gz/Fileseq-1.15.2/src/fileseq/constants.py | from __future__ import absolute_import
import re
# The max frame count of a FrameSet before a MaxSizeException
# exception is raised
MAX_FRAME_SIZE = 10000000
class _PadStyle(object):
def __init__(self, name):
self.__name = name
def __hash__(self):
return hash(str(self))
def __repr__(self):
return '<PAD_STYLE: {}>'.format(self.__name)
def __str__(self):
return self.__name
def __eq__(self, other):
if not isinstance(other, _PadStyle):
return False
return str(self) == str(other)
PAD_STYLE_HASH1 = _PadStyle("HASH1")
PAD_STYLE_HASH4 = _PadStyle("HASH4")
PAD_STYLE_DEFAULT = PAD_STYLE_HASH4
PAD_MAP = {
"#": {PAD_STYLE_HASH1: 1, PAD_STYLE_HASH4: 4},
"@": {PAD_STYLE_HASH1: 1, PAD_STYLE_HASH4: 1}
}
# Map width back to preferred padding character. Every pad style must have an
# entry for width 1, which will also be the default pad character
REVERSE_PAD_MAP = {
PAD_STYLE_HASH1: {1: "#"},
PAD_STYLE_HASH4: {1: "@", 4: "#"}
}
# Component of SPLIT_PATTERN (c.f FRANGE_PATTERN).
_FRANGE_PART = r"""
-?\d+ # start frame
(?: # optional range
- # range delimiter
-?\d+ # end frame
(?: # optional stepping
[:xy] # step format
-?\d+ # step value
)?
)?
"""
_FRANGE_PART = re.compile(_FRANGE_PART, re.X).pattern
# Regular expression for matching a file sequence string.
# Example: /film/shot/renders/bilbo_bty.1-100#.exr
# Example: /film/shot/renders/[email protected]
# Example: /film/shot/renders/bilbo_bty.1-100@@@@#.exr
# Example: /film/shot/renders/bilbo_bty.1-100%04d.exr
# Example: /film/shot/renders/bilbo_bty.1-100$F4.exr
# Example: /film/shot/renders/bilbo_bty.1-100<UDIM>.exr
SPLIT_PATTERN = r"""
((?:{0}(?:,{0})*)?) # range
( # pad format
[{1}]+ # pad map characters
|
%\d*d # printf syntax pad format
|
\$F\d* # Houdini syntax pad format
|
<UDIM>|%\(UDIM\)d # UDIM Syntax pad format
)
""".format(_FRANGE_PART, ''.join(PAD_MAP))
SPLIT_RE = re.compile(SPLIT_PATTERN, re.X)
# Component of SPLIT_SUB_PATTERN (c.f FRANGE_PATTERN).
# If both start and stop are present either both or neither should have
# a fractional component to avoid ambiguity when basename ends in \d\.
_FRANGE_SUB_PARTS = [
_FRANGE_PART,
r"""
(?:
-?\d+ # start frame
(?: # optional range
- # range delimiter
-?\d+ # end frame
(?: # optional stepping
x # step format
-?\d+\.\d+ # step value
)?
)?
)
""",r"""
(?:
-?\d+\.\d+ # start frame
(?: # optional range
- # range delimiter
-?\d+\.\d+ # end frame
(?: # optional stepping
x # step format
-?\d+(?:\.\d+)? # step value
)?
)?
)
"""]
_FRANGE_SUB_PARTS = [
re.compile(part, re.X).pattern for part in _FRANGE_SUB_PARTS
]
# Regular expression for matching a file sequence string allowing subframes.
# Example: /film/shot/renders/bilbo_bty.1-100#.#.exr
# Example: /film/shot/renders/bilbo_bty.1.5-2x0.1#.#.exr
SPLIT_SUB_PATTERN = r"""
( # range
(?:
(?:{1}(?:,{1})*)
|
(?:{2}(?:,{2})*)
|
(?:{3}(?:,{3})*)
)?
)
( # pad format
[{0}]+(?:\.[{0}]+)? # pad map characters
|
%\d*d # printf syntax pad format
|
\$F\d* # Houdini syntax pad format
|
<UDIM>|%\(UDIM\)d # UDIM Syntax pad format
)
""".format(''.join(PAD_MAP), *_FRANGE_SUB_PARTS)
SPLIT_SUB_RE = re.compile(SPLIT_SUB_PATTERN, re.X)
# Regular expression pattern for matching padding against a printf syntax
# padding string E.g. %04d
PRINTF_SYNTAX_PADDING_PATTERN = r"\A%(\d*)d\Z"
PRINTF_SYNTAX_PADDING_RE = re.compile(PRINTF_SYNTAX_PADDING_PATTERN)
# Regular expression pattern for matching padding against houdini syntax
HOUDINI_SYNTAX_PADDING_PATTERN = r"\A\$F(\d*)\Z"
HOUDINI_SYNTAX_PADDING_RE = re.compile(HOUDINI_SYNTAX_PADDING_PATTERN)
# Legal patterns for UDIM style padding
UDIM_PADDING_PATTERNS = ['<UDIM>', '%(UDIM)d']
# Regular expression pattern for matching file names on disk.
DISK_PATTERN = r"""
\A
((?:.*[/\\])?) # dirname
(.*?) # basename
(-?\d+)? # frame
( # ext
(?:\.\w*[a-zA-Z]\w?)* # optional leading alnum ext prefix (.foo.1bar)
(?:\.[^.]+)? # ext suffix
)
\Z
"""
DISK_RE = re.compile(DISK_PATTERN, re.X)
# Regular expression pattern for matching file names on disk allowing subframes.
DISK_SUB_PATTERN = r"""
\A
((?:.*[/\\])?) # dirname
(.*?) # basename
(-?\d+(?:\.\d+)?)? # frame
( # ext
(?:\.\w*[a-zA-Z]\w?)* # optional leading alnum ext prefix (.foo.1bar)
(?:\.[^.]+)? # ext suffix
)
\Z
"""
DISK_SUB_RE = re.compile(DISK_SUB_PATTERN, re.X)
# Regular expression pattern for matching frame set strings.
# Examples: '1.0' or '1.0-100.0', '1.0-100.0x0.5', '1-100x0.25',
# '1,2', etc.
FRANGE_PATTERN = r"""
\A
(-?\d+(?:\.\d+)?) # start frame
(?: # optional range
- # range delimiter
(-?\d+(?:\.\d+)?) # end frame
(?: # optional stepping
([:xy]) # step format
(-?\d+(?:\.\d+)?) # step value
)?
)?
\Z
"""
FRANGE_RE = re.compile(FRANGE_PATTERN, re.X)
# Regular expression for padding a frame range.
PAD_PATTERN = r"""
(-?)(\d+(?:\.\d+)?) # start frame
(?: # optional range
(-) # range delimiter
(-?)(\d+(?:\.\d+)?) # end frame
(?: # optional stepping
([:xy]) # step format
(\d+(?:\.\d+)?) # step value
)?
)?
"""
PAD_RE = re.compile(PAD_PATTERN, re.X) | PypiClean |
/AyDictionary-0.0.4.tar.gz/AyDictionary-0.0.4/README.md | ## AyDictionary: A Dictionary Module for Python
<!-- [](https://travis-ci.org/geekpradd/AyDictionary) -->
[](https://pypi.python.org/pypi/AyDictionary/)
[](https://pypi.python.org/pypi/AyDictionary/)
<!-- [](https://pypi.python.org/pypi/AyDictionary/) -->
AyDictionary is modified version of [PyDictionary](https://github.com/geekpradd/PyDictionary)
AyDictionary is a Dictionary Module for Python 3 to get meanings, synonyms and Antonyms of words. It uses WordNet for getting meanings, Google for translations, and synonym.com for getting synonyms and antonyms.
This module uses Python Requests, BeautifulSoup4 and click as dependencies
### Installation
Installation is very simple through pip (or easy_install)
For pip
```
pip install AyDictionary
```
For Easy_Install
```
easy_install AyDictionary
```
### Usage
AyDictionary can be utilised in 2 ways, either by creating a dictionary instance which can take words as arguments or by creating a dictionary instance with a fixed amount of words.
For example,
```python
from AyDictionary import AyDictionary
dictionary = AyDictionary()
```
This is will create a local instance of the AyDictionary class and now it can be used to get meanings, translations etc.
```python
print (dictionary.meaning("indentation"))
```
This will return a dictionary containing the meanings of the word.
For example the above code will return:
```
{'Noun': ['a concave cut into a surface or edge (as in a coastline', 'the
formation of small pits in a surface as a consequence of corrosion', 'th
e space left between the margin and the start of an indented line', 'the
act of cutting into an edge with toothlike notches or angular incisions']
}
```
The dictionary keys are the different types of the word. If a word is both a verb and a noun then there will be 2 keys: 'Noun' and 'Verb'.
Each key refers to a list containing the meanings
For Synonyms,
```python
print (dictionary.synonym("Life"))
```
This will return a list containing the Synonyms of the word.
For Antonyms,
```python
print (dictionary.antonym("Life"))
```
This will return a list containing the Antonyms of the word.
<!-- For Translations,
```python
print (dictionary.translate("Range",'es'))
```
This will return the Translation of the word "Range" in Spanish. For Language codes consult Google Translate. The return value is string in Python 3 and unicode in Python 2 -->
Alternatively, you can set a fixed number of words to the AyDictionary Instance. This is useful if you just want to get the meanings of some words quickly without any development need.
Example:
```python
from AyDictionary import AyDictionary
dictionary=AyDictionary("hotel","ambush","nonchalant","perceptive")
'There can be any number of words in the Instance'
print(dictionary.printMeanings()) '''This print the meanings of all the words'''
print(dictionary.getMeanings()) '''This will return meanings as dictionaries'''
print (dictionary.getSynonyms())
```
<!-- print (dictionary.translateTo("hi")) '''This will translate all words to Hindi''' -->
Similarly Synonyms and Antonyms can also be printed onto the screen.
### About
Current Version: 0.0.4
Created By Ayush Agrawal 2023.
The orginal library is [PyDictionary](https://github.com/geekpradd/PyDictionary) ⭐ | PypiClean |
/ArseinTest-4.8.8.tar.gz/ArseinTest-4.8.8/arsein/Getheader.py | import aiohttp
import asyncio
from arsein.Encoder import encoderjson
from arsein.PostData import method_Rubika,httpfiles,_download_with_server
from json import loads
from pathlib import Path
from arsein.Clien import clien
class Upload:
def __init__(self, Sh_account:str):
self.Auth = Sh_account
self.enc = encoderjson(Sh_account)
self.methodUpload = method_Rubika(Sh_account)
def requestSendFile(self,file):
while 1:
try:
return self.methodUpload.methodsRubika("json",methode ="requestSendFile",indata = {"file_name": str(file.split("/")[-1]),"mime": file.split(".")[-1],"size": Path(file).stat().st_size},wn = clien.web).get("data")
break
except:
continue
def uploadFile(self, file):
while 1:
try:
for tr in range(1):
if not ("http" or "https") in file:
REQUES = self.requestSendFile(file)
bytef = open(file,"rb").read()
hash_send = REQUES["access_hash_send"]
file_id = REQUES["id"]
url = REQUES["upload_url"]
header = {
'auth':self.Auth,
'Host':url.replace("https://","").replace("/UploadFile.ashx",""),
'chunk-size':str(Path(file).stat().st_size),
'file-id':str(file_id),
'access-hash-send':hash_send,
"content-type": "application/octet-stream",
"content-length": str(Path(file).stat().st_size),
"accept-encoding": "gzip",
"user-agent": "okhttp/3.12.1"
}
if len(bytef) <= 131072:
header["part-number"], header["total-part"] = "1","1"
while True:
try:
#loop = asyncio.get_event_loop()
j = self.methodUpload.methodsRubika(types = "file",server = url,podata = bytef,header = header)
j = loads(j)['data']['access_hash_rec']
break
except:
continue
return [REQUES, j]
else:
t = round(len(bytef) / 131072 + 1)
for i in range(1,t+1):
if i != t:
k = i - 1
k = k * 131072
while True:
try:
header["chunk-size"], header["part-number"], header["total-part"] = "131072", str(i),str(t)
#loop = asyncio.get_event_loop()
o = self.methodUpload.methodsRubika(types = "file",server = url,podata = bytef[k:k + 131072],header = header)
o = loads(o)['data']
break
except:
continue
else:
k = i - 1
k = k * 131072
while True:
try:
header["chunk-size"], header["part-number"], header["total-part"] = str(len(bytef[k:])), str(i),str(t)
#loop = asyncio.get_event_loop()
p = self.methodUpload.methodsRubika(types = "file",server = url,podata = bytef[k:],header = header)
p = loads(p)['data']['access_hash_rec']
break
except:
continue
return [REQUES, p]
else:
loop = asyncio.get_event_loop()
while 1:
try:
REQUES = self.methodUpload.methodsRubika("json",methode ="requestSendFile",indata = {"file_name": str(file.split("/")[-1]),"mime": file.split(".")[-1],"size": len(loop.run_until_complete(_download_with_server(server = file)))},wn = clien.web).get("data")
break
except:
continue
hash_send = REQUES["access_hash_send"]
file_id = REQUES["id"]
url = REQUES["upload_url"]
loop = asyncio.get_event_loop()
bytef = loop.run_until_complete(_download_with_server(server = file))
header = {
'auth':self.Auth,
'Host':url.replace("https://","").replace("/UploadFile.ashx",""),
'chunk-size':str(len(loop.run_until_complete(_download_with_server(server = file)))),
'file-id':str(file_id),
'access-hash-send':hash_send,
"content-type": "application/octet-stream",
"content-length": str(len(loop.run_until_complete(_download_with_server(server = file)))),
"accept-encoding": "gzip",
"user-agent": "okhttp/3.12.1"
}
if len(bytef) <= 131072:
header["part-number"], header["total-part"] = "1","1"
while True:
try:
#loop = asyncio.get_event_loop()
j = self.methodUpload.methodsRubika(types = "file",server = url,podata = bytef,header = header)
j = loads(j)['data']['access_hash_rec']
break
except:
continue
return [REQUES, j]
else:
t = round(len(bytef) / 131072 + 1)
for i in range(1,t+1):
if i != t:
k = i - 1
k = k * 131072
while True:
try:
header["chunk-size"], header["part-number"], header["total-part"] = "131072", str(i),str(t)
#loop = asyncio.get_event_loop()
o = self.methodUpload.methodsRubika(types = "file",server = url,podata = bytef[k:k + 131072],header = header)
o = loads(o)['data']
break
except:
continue
else:
k = i - 1
k = k * 131072
while True:
try:
header["chunk-size"], header["part-number"], header["total-part"] = str(len(bytef[k:])), str(i),str(t)
#loop = asyncio.get_event_loop()
p = self.methodUpload.methodsRubika(types = "file",server = url,podata = bytef[k:],header = header)
p = loads(p)['data']['access_hash_rec']
break
except:
continue
return [REQUES, p]
break
except:
continue | PypiClean |
/MokaPlayer-0.8.5.7.tar.gz/MokaPlayer-0.8.5.7/mokaplayer/core/playlists/__init__.py | import enum
import peewee
from mokaplayer.core.database import Song
class AbstractPlaylist:
""" Abstract class for a playlist
"""
class OrderBy(enum.Enum):
""" Enum for the different way to order a playlist
"""
DEFAULT = enum.auto()
ARTIST = enum.auto()
ALBUM = enum.auto()
TITLE = enum.auto()
YEAR = enum.auto()
LENGTH = enum.auto()
ADDED = enum.auto()
PLAYED = enum.auto()
@property
def name(self):
""" Return the name of the playlist """
def collections(self, order, desc):
""" Return a list of songs in the specified order"""
def get_orderby_fields(self, order, desc):
""" Return a list of fields for a order by query
"""
fields = []
if order == self.OrderBy.ALBUM:
fields = [peewee.fn.strip_articles(Song.Album),
Song.Discnumber, Song.Tracknumber]
elif order == self.OrderBy.YEAR:
fields = [Song.Year, peewee.fn.strip_articles(Song.Album),
Song.Discnumber, Song.Tracknumber]
elif order == self.OrderBy.ADDED:
fields = [-Song.Added, peewee.fn.strip_articles(Song.AlbumArtist), Song.Year,
peewee.fn.strip_articles(Song.Album), Song.Discnumber, Song.Tracknumber]
elif order == self.OrderBy.TITLE:
fields = [Song.Title]
elif order == self.OrderBy.LENGTH:
fields = [Song.Length]
elif order == self.OrderBy.PLAYED:
fields = [-Song.Played]
else:
fields = [peewee.fn.strip_articles(Song.AlbumArtist), Song.Year,
peewee.fn.strip_articles(Song.Album), Song.Discnumber, Song.Tracknumber]
if desc and fields[0]._ordering == 'DESC':
fields[0] = fields[0].asc()
elif desc:
fields[0] = -fields[0]
return fields
from .songs_playlist import SongsPlaylist
from .m3u_playlist import M3UPlaylist
from .mostplayed_playlist import MostPlayedPlaylist
from .rarelyplayed_playlist import RarelyPlayedPlaylist
from .recentlyadded_playlist import RecentlyAddedPlaylist
from .recentlyplayed_playlist import RecentlyPlayedPlaylist
from .upnext_playlist import UpNextPlaylist
from .albums_playlist import AlbumsPlaylist
from .album_playlist import AlbumPlaylist
from .artists_playlist import ArtistsPlaylist
from .artist_playlist import ArtistPlaylist | PypiClean |
/AstroCabTools-1.5.1.tar.gz/AstroCabTools-1.5.1/astrocabtools/mrs_subviz/src/viewers/canvas_interaction/centroidAreaSelectionCanvas/panOnClick.py | import numpy
import weakref
from pubsub import pub
import matplotlib.pyplot as _plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from .zoomOnWheel import ZoomOnWheel
class PanOnClick(ZoomOnWheel):
"""Class providing pan & zoom interaction to a matplotlib Figure.
Left button for pan and zoom on wheel.
"""
def __init__(self, figure=None, scale_factor=1.1):
"""Initializer
:param Figure figure: The matplotlib figure to attach the behavior to.
:param float scale_factor: The scale factor to apply on wheel event.
"""
super(PanOnClick, self).__init__(figure, scale_factor=1.1)
self._add_connection_pan('button_press_event', self._on_mouse_press)
self._add_connection_pan('button_release_event', self._on_mouse_release)
self._add_connection_pan('motion_notify_event', self._on_mouse_motion)
self._pressed_button = None # To store active button
self._axes = None # To store x and y axes concerned by interaction
self._event = None # To store reference event during interaction
def _pan_update_limits(self, ax, axis_id, event, last_event):
"""Compute limits with applied pan."""
if axis_id == 0:
lim = ax.get_xlim()
else:
lim = ax.get_ylim()
#TransData make it possible to transform data values to display values (display of the screen)
#Inverted allows to transform from a data point to the data point based on the display inverted
#Because this is possible, we can make for every pad made, transform every value of the image to
#the ones that will fit on the screen based on the initial (x,y) and the final(x,y)
#the initial x,y correspond to the button_press_event values, and the final correspond to the button_release_event
#For each time the motion_notify_event ocurrs, the previous values will be saved and update the image
pixel_to_data = ax.transData.inverted()
data = pixel_to_data.transform_point((event.x, event.y))
last_data = pixel_to_data.transform_point((last_event.x, last_event.y))
#Otbtain the delta and apply it to update the limits of the figure into the plot
delta = data[axis_id] - last_data[axis_id]
new_lim = lim[0] - delta, lim[1] - delta
return new_lim
def _pan(self, event):
"""Execute function based on the name of it"""
if event.name == 'button_press_event': # begin pan
self._event = event
elif event.name == 'button_release_event': # end pan
self._event = None
elif event.name == 'motion_notify_event': # pan
if self._event is None:
return
if event.x != self._event.x:
for ax in self._axes[0]:
xlim = self._pan_update_limits(ax, 0, event, self._event)
ax.set_xlim(xlim)
if event.y != self._event.y:
for ax in self._axes[1]:
ylim = self._pan_update_limits(ax, 1, event, self._event)
ax.set_ylim(ylim)
if event.x != self._event.x or event.y != self._event.y:
self._draw()
self._event = event
def _on_mouse_press(self, event):
"""Set axes values based on point selected"""
if self._pressed_button is not None:
return # Discard event if a button is already pressed
x_axes = set()
y_axes = set()
for ax in self.figure.axes:
#Simmilar to event.inaxis = axis
x_axes, y_axes = self._axes_to_update(event)
if ax.contains(event)[0]:
x_axes.add(ax)
y_axes.add(ax)
self._axes = x_axes, y_axes
self._pressed_button = event.button
if self._pressed_button == 1: # pan
self._pan(event)
def _on_mouse_release(self, event):
if self._pressed_button == 1: # pan
self.redraw_rectangle_without_interaction()
self.redraw_ellipse_without_interaction()
self._pan(event)
pub.sendMessage('emit_data')
self._pressed_button = None
def _on_mouse_motion(self, event):
if self._pressed_button == 1: # pan
self._pan(event) | PypiClean |
/MolScribe-1.1.1.tar.gz/MolScribe-1.1.1/README.md | # MolScribe
This is the repository for MolScribe, an image-to-graph model that translates a molecular image to its chemical
structure. Try our [demo](https://huggingface.co/spaces/yujieq/MolScribe) on HuggingFace!

If you use MolScribe in your research, please cite our [paper](https://pubs.acs.org/doi/10.1021/acs.jcim.2c01480).
```
@article{
MolScribe,
title = {{MolScribe}: Robust Molecular Structure Recognition with Image-to-Graph Generation},
author = {Yujie Qian and Jiang Guo and Zhengkai Tu and Zhening Li and Connor W. Coley and Regina Barzilay},
journal = {Journal of Chemical Information and Modeling},
publisher = {American Chemical Society ({ACS})},
doi = {10.1021/acs.jcim.2c01480},
year = 2023,
}
```
## Quick Start
Run the following command to install the package and its dependencies:
```
git clone [email protected]:thomas0809/MolScribe.git
cd MolScribe
python setup.py install
```
Download the MolScribe checkpoint from [HuggingFace Hub](https://huggingface.co/yujieq/MolScribe/tree/main)
and predict molecular structures:
```python
import torch
from molscribe import MolScribe
from huggingface_hub import hf_hub_download
ckpt_path = hf_hub_download('yujieq/MolScribe', 'swin_base_char_aux_1m.pth')
model = MolScribe(ckpt_path, device=torch.device('cpu'))
output = model.predict_image_file('assets/example.png', compute_confidence=True, get_atoms_bonds=True)
```
The output is a dictionary, with the following format
```
{
'smiles': 'Fc1ccc(-c2cc(-c3ccccc3)n(-c3ccccc3)c2)cc1',
'molfile': '***',
'confidence': 0.9175,
'atoms': [{'atom_symbol': '[Ph]', 'x': 0.5714, 'y': 0.9523, 'confidence': 0.9127}, ... ],
'bonds': [{'bond_type': 'single', 'endpoint_atoms': [0, 1], 'confidence': 0.9999}, ... ]
}
```
Please refer to [`molscribe/interface.py`](molscribe/interface.py) for details and other available APIs.
For development or reproducing the experiments, please follow the instructions below.
## Experiments
### Requirements
Install the required packages
```
pip install -r requirements.txt
```
### Data
For training or evaluation, please download the corresponding datasets to `data/`.
Training data:
| Datasets | Description |
|-------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------|
| USPTO <br> [Download](https://www.dropbox.com/s/3podz99nuwagudy/uspto_mol.zip?dl=0) | Downloaded from [USPTO, Grant Red Book](https://bulkdata.uspto.gov/). |
| PubChem <br> [Download](https://www.dropbox.com/s/mxvm5i8139y5cvk/pubchem.zip?dl=0) | Molecules are downloaded from [PubChem](https://ftp.ncbi.nlm.nih.gov/pubchem/Compound/), and images are dynamically rendered during training. |
Benchmarks:
| Category | Datasets | Description |
|--------------------------------------------------------------------------------------------|-----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Synthetic <br> [Download](https://huggingface.co/yujieq/MolScribe/blob/main/synthetic.zip) | Indigo <br> ChemDraw | Images are rendered by Indigo and ChemDraw. |
| Realistic <br> [Download](https://huggingface.co/yujieq/MolScribe/blob/main/real.zip) | CLEF <br> UOB <br> USPTO <br> Staker <br> ACS | CLEF, UOB, and USPTO are downloaded from https://github.com/Kohulan/OCSR_Review. <br/> Staker is downloaded from https://drive.google.com/drive/folders/16OjPwQ7bQ486VhdX4DWpfYzRsTGgJkSu. <br> ACS is a new dataset collected by ourself. |
| Perturbed <br> [Download](https://huggingface.co/yujieq/MolScribe/blob/main/perturb.zip) | CLEF <br> UOB <br> USPTO <br> Staker | Downloaded from https://github.com/bayer-science-for-a-better-life/Img2Mol/ |
### Model
Our model checkpoints can be downloaded from [Dropbox](https://www.dropbox.com/sh/91u508kf48cotv4/AACQden2waMXIqLwYSi8zO37a?dl=0)
or [HuggingFace Hub](https://huggingface.co/yujieq/MolScribe/tree/main).
Model architecture:
- Encoder: [Swin Transformer](https://github.com/microsoft/Swin-Transformer), Swin-B.
- Decoder: Transformer, 6 layers, hidden_size=256, attn_heads=8.
- Input size: 384x384
Download the model checkpoint to reproduce our experiments:
```
mkdir -p ckpts
wget -P ckpts https://huggingface.co/yujieq/MolScribe/resolve/main/swin_base_char_aux_1m680k.pth
```
### Prediction
```
python predict.py --model_path ckpts/swin_base_char_aux_1m680k.pth --image_path assets/example.png
```
MolScribe prediction interface is in [`molscribe/interface.py`](molscribe/interface.py).
See python script [`predict.py`](predict.py) or jupyter notebook [`notebook/predict.ipynb`](notebook/predict.ipynb)
for example usage.
### Evaluate MolScribe
```
bash scripts/eval_uspto_joint_chartok_1m680k.sh
```
The script uses one GPU and batch size of 64 by default. If more GPUs are available, update `NUM_GPUS_PER_NODE` and
`BATCH_SIZE` for faster evaluation.
### Train MolScribe
```
bash scripts/train_uspto_joint_chartok_1m680k.sh
```
The script uses four GPUs and batch size of 256 by default. It takes about one day to train the model with four A100 GPUs.
During training, we use a modified code of [Indigo](https://github.com/epam/Indigo) (included in `molscribe/indigo/`).
### Evaluation Script
We implement a standalone evaluation script [`evaluate.py`](evaluate.py). Example usage:
```
python evaluate.py \
--gold_file data/real/acs.csv \
--pred_file output/uspto/swin_base_char_aux_1m680k/prediction_acs.csv \
--pred_field post_SMILES
```
The prediction should be saved in a csv file, with columns `image_id` for the index (must match the gold file),
and `SMILES` for predicted SMILES. If prediction has a different column name, specify it with `--pred_field`.
The result contains three scores:
- canon_smiles: our main metric, exact matching accuracy.
- graph: graph exact matching accuracy, ignoring tetrahedral chirality.
- chiral: exact matching accuracy on chiral molecules.
| PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/tr46/index.js | "use strict";
var punycode = require("punycode");
var mappingTable = require("./lib/mappingTable.json");
var PROCESSING_OPTIONS = {
TRANSITIONAL: 0,
NONTRANSITIONAL: 1
};
function normalize(str) { // fix bug in v8
return str.split('\u0000').map(function (s) { return s.normalize('NFC'); }).join('\u0000');
}
function findStatus(val) {
var start = 0;
var end = mappingTable.length - 1;
while (start <= end) {
var mid = Math.floor((start + end) / 2);
var target = mappingTable[mid];
if (target[0][0] <= val && target[0][1] >= val) {
return target;
} else if (target[0][0] > val) {
end = mid - 1;
} else {
start = mid + 1;
}
}
return null;
}
var regexAstralSymbols = /[\uD800-\uDBFF][\uDC00-\uDFFF]/g;
function countSymbols(string) {
return string
// replace every surrogate pair with a BMP symbol
.replace(regexAstralSymbols, '_')
// then get the length
.length;
}
function mapChars(domain_name, useSTD3, processing_option) {
var hasError = false;
var processed = "";
var len = countSymbols(domain_name);
for (var i = 0; i < len; ++i) {
var codePoint = domain_name.codePointAt(i);
var status = findStatus(codePoint);
switch (status[1]) {
case "disallowed":
hasError = true;
processed += String.fromCodePoint(codePoint);
break;
case "ignored":
break;
case "mapped":
processed += String.fromCodePoint.apply(String, status[2]);
break;
case "deviation":
if (processing_option === PROCESSING_OPTIONS.TRANSITIONAL) {
processed += String.fromCodePoint.apply(String, status[2]);
} else {
processed += String.fromCodePoint(codePoint);
}
break;
case "valid":
processed += String.fromCodePoint(codePoint);
break;
case "disallowed_STD3_mapped":
if (useSTD3) {
hasError = true;
processed += String.fromCodePoint(codePoint);
} else {
processed += String.fromCodePoint.apply(String, status[2]);
}
break;
case "disallowed_STD3_valid":
if (useSTD3) {
hasError = true;
}
processed += String.fromCodePoint(codePoint);
break;
}
}
return {
string: processed,
error: hasError
};
}
var combiningMarksRegex = /[\u0300-\u036F\u0483-\u0489\u0591-\u05BD\u05BF\u05C1\u05C2\u05C4\u05C5\u05C7\u0610-\u061A\u064B-\u065F\u0670\u06D6-\u06DC\u06DF-\u06E4\u06E7\u06E8\u06EA-\u06ED\u0711\u0730-\u074A\u07A6-\u07B0\u07EB-\u07F3\u0816-\u0819\u081B-\u0823\u0825-\u0827\u0829-\u082D\u0859-\u085B\u08E4-\u0903\u093A-\u093C\u093E-\u094F\u0951-\u0957\u0962\u0963\u0981-\u0983\u09BC\u09BE-\u09C4\u09C7\u09C8\u09CB-\u09CD\u09D7\u09E2\u09E3\u0A01-\u0A03\u0A3C\u0A3E-\u0A42\u0A47\u0A48\u0A4B-\u0A4D\u0A51\u0A70\u0A71\u0A75\u0A81-\u0A83\u0ABC\u0ABE-\u0AC5\u0AC7-\u0AC9\u0ACB-\u0ACD\u0AE2\u0AE3\u0B01-\u0B03\u0B3C\u0B3E-\u0B44\u0B47\u0B48\u0B4B-\u0B4D\u0B56\u0B57\u0B62\u0B63\u0B82\u0BBE-\u0BC2\u0BC6-\u0BC8\u0BCA-\u0BCD\u0BD7\u0C00-\u0C03\u0C3E-\u0C44\u0C46-\u0C48\u0C4A-\u0C4D\u0C55\u0C56\u0C62\u0C63\u0C81-\u0C83\u0CBC\u0CBE-\u0CC4\u0CC6-\u0CC8\u0CCA-\u0CCD\u0CD5\u0CD6\u0CE2\u0CE3\u0D01-\u0D03\u0D3E-\u0D44\u0D46-\u0D48\u0D4A-\u0D4D\u0D57\u0D62\u0D63\u0D82\u0D83\u0DCA\u0DCF-\u0DD4\u0DD6\u0DD8-\u0DDF\u0DF2\u0DF3\u0E31\u0E34-\u0E3A\u0E47-\u0E4E\u0EB1\u0EB4-\u0EB9\u0EBB\u0EBC\u0EC8-\u0ECD\u0F18\u0F19\u0F35\u0F37\u0F39\u0F3E\u0F3F\u0F71-\u0F84\u0F86\u0F87\u0F8D-\u0F97\u0F99-\u0FBC\u0FC6\u102B-\u103E\u1056-\u1059\u105E-\u1060\u1062-\u1064\u1067-\u106D\u1071-\u1074\u1082-\u108D\u108F\u109A-\u109D\u135D-\u135F\u1712-\u1714\u1732-\u1734\u1752\u1753\u1772\u1773\u17B4-\u17D3\u17DD\u180B-\u180D\u18A9\u1920-\u192B\u1930-\u193B\u19B0-\u19C0\u19C8\u19C9\u1A17-\u1A1B\u1A55-\u1A5E\u1A60-\u1A7C\u1A7F\u1AB0-\u1ABE\u1B00-\u1B04\u1B34-\u1B44\u1B6B-\u1B73\u1B80-\u1B82\u1BA1-\u1BAD\u1BE6-\u1BF3\u1C24-\u1C37\u1CD0-\u1CD2\u1CD4-\u1CE8\u1CED\u1CF2-\u1CF4\u1CF8\u1CF9\u1DC0-\u1DF5\u1DFC-\u1DFF\u20D0-\u20F0\u2CEF-\u2CF1\u2D7F\u2DE0-\u2DFF\u302A-\u302F\u3099\u309A\uA66F-\uA672\uA674-\uA67D\uA69F\uA6F0\uA6F1\uA802\uA806\uA80B\uA823-\uA827\uA880\uA881\uA8B4-\uA8C4\uA8E0-\uA8F1\uA926-\uA92D\uA947-\uA953\uA980-\uA983\uA9B3-\uA9C0\uA9E5\uAA29-\uAA36\uAA43\uAA4C\uAA4D\uAA7B-\uAA7D\uAAB0\uAAB2-\uAAB4\uAAB7\uAAB8\uAABE\uAABF\uAAC1\uAAEB-\uAAEF\uAAF5\uAAF6\uABE3-\uABEA\uABEC\uABED\uFB1E\uFE00-\uFE0F\uFE20-\uFE2D]|\uD800[\uDDFD\uDEE0\uDF76-\uDF7A]|\uD802[\uDE01-\uDE03\uDE05\uDE06\uDE0C-\uDE0F\uDE38-\uDE3A\uDE3F\uDEE5\uDEE6]|\uD804[\uDC00-\uDC02\uDC38-\uDC46\uDC7F-\uDC82\uDCB0-\uDCBA\uDD00-\uDD02\uDD27-\uDD34\uDD73\uDD80-\uDD82\uDDB3-\uDDC0\uDE2C-\uDE37\uDEDF-\uDEEA\uDF01-\uDF03\uDF3C\uDF3E-\uDF44\uDF47\uDF48\uDF4B-\uDF4D\uDF57\uDF62\uDF63\uDF66-\uDF6C\uDF70-\uDF74]|\uD805[\uDCB0-\uDCC3\uDDAF-\uDDB5\uDDB8-\uDDC0\uDE30-\uDE40\uDEAB-\uDEB7]|\uD81A[\uDEF0-\uDEF4\uDF30-\uDF36]|\uD81B[\uDF51-\uDF7E\uDF8F-\uDF92]|\uD82F[\uDC9D\uDC9E]|\uD834[\uDD65-\uDD69\uDD6D-\uDD72\uDD7B-\uDD82\uDD85-\uDD8B\uDDAA-\uDDAD\uDE42-\uDE44]|\uD83A[\uDCD0-\uDCD6]|\uDB40[\uDD00-\uDDEF]/;
function validateLabel(label, processing_option) {
if (label.substr(0, 4) === "xn--") {
label = punycode.toUnicode(label);
processing_option = PROCESSING_OPTIONS.NONTRANSITIONAL;
}
var error = false;
if (normalize(label) !== label ||
(label[3] === "-" && label[4] === "-") ||
label[0] === "-" || label[label.length - 1] === "-" ||
label.indexOf(".") !== -1 ||
label.search(combiningMarksRegex) === 0) {
error = true;
}
var len = countSymbols(label);
for (var i = 0; i < len; ++i) {
var status = findStatus(label.codePointAt(i));
if ((processing === PROCESSING_OPTIONS.TRANSITIONAL && status[1] !== "valid") ||
(processing === PROCESSING_OPTIONS.NONTRANSITIONAL &&
status[1] !== "valid" && status[1] !== "deviation")) {
error = true;
break;
}
}
return {
label: label,
error: error
};
}
function processing(domain_name, useSTD3, processing_option) {
var result = mapChars(domain_name, useSTD3, processing_option);
result.string = normalize(result.string);
var labels = result.string.split(".");
for (var i = 0; i < labels.length; ++i) {
try {
var validation = validateLabel(labels[i]);
labels[i] = validation.label;
result.error = result.error || validation.error;
} catch(e) {
result.error = true;
}
}
return {
string: labels.join("."),
error: result.error
};
}
module.exports.toASCII = function(domain_name, useSTD3, processing_option, verifyDnsLength) {
var result = processing(domain_name, useSTD3, processing_option);
var labels = result.string.split(".");
labels = labels.map(function(l) {
try {
return punycode.toASCII(l);
} catch(e) {
result.error = true;
return l;
}
});
if (verifyDnsLength) {
var total = labels.slice(0, labels.length - 1).join(".").length;
if (total.length > 253 || total.length === 0) {
result.error = true;
}
for (var i=0; i < labels.length; ++i) {
if (labels.length > 63 || labels.length === 0) {
result.error = true;
break;
}
}
}
if (result.error) return null;
return labels.join(".");
};
module.exports.toUnicode = function(domain_name, useSTD3) {
var result = processing(domain_name, useSTD3, PROCESSING_OPTIONS.NONTRANSITIONAL);
return {
domain: result.string,
error: result.error
};
};
module.exports.PROCESSING_OPTIONS = PROCESSING_OPTIONS; | PypiClean |
/FortranBinary-21.2.1.tar.gz/FortranBinary-21.2.1/README.rst | ====================
FortranBinary README
====================
Package for handling of FORTRAN binary data with python.
Installation:
-------------
To install the latest releaase from PyPI::
pip install numpyXtns
Alternatively, download the source from the repository install via pip, descend
into the top-level of the source tree
and launch::
pip3 install .
or to install in developers mode::
pip3 install -e .
.. _github: https://github.com/mommebutenschoen/FortranBinary
Documentation:
--------------
Documentation to this package can be found on readthedocs_.
.. _readthedocs: https://fortranbinary.readthedocs.io/
| PypiClean |
/NudeNet-2.0.9-py3-none-any.whl/nudenet/detector.py | import os
import cv2
import pydload
import logging
import numpy as np
import onnxruntime
from progressbar import progressbar
from .detector_utils import preprocess_image
from .video_utils import get_interest_frames_from_video
def dummy(x):
return x
FILE_URLS = {
"default": {
"checkpoint": "https://github.com/notAI-tech/NudeNet/releases/download/v0/detector_v2_default_checkpoint.onnx",
"classes": "https://github.com/notAI-tech/NudeNet/releases/download/v0/detector_v2_default_classes",
},
"base": {
"checkpoint": "https://github.com/notAI-tech/NudeNet/releases/download/v0/detector_v2_base_checkpoint.onnx",
"classes": "https://github.com/notAI-tech/NudeNet/releases/download/v0/detector_v2_base_classes",
},
}
class Detector:
detection_model = None
classes = None
def __init__(self, model_name="default"):
"""
model = Detector()
"""
checkpoint_url = FILE_URLS[model_name]["checkpoint"]
classes_url = FILE_URLS[model_name]["classes"]
home = os.path.expanduser("~")
model_folder = os.path.join(home, f".NudeNet/")
if not os.path.exists(model_folder):
os.makedirs(model_folder)
checkpoint_name = os.path.basename(checkpoint_url)
checkpoint_path = os.path.join(model_folder, checkpoint_name)
classes_path = os.path.join(model_folder, "classes")
if not os.path.exists(checkpoint_path):
print("Downloading the checkpoint to", checkpoint_path)
pydload.dload(checkpoint_url, save_to_path=checkpoint_path, max_time=None)
if not os.path.exists(classes_path):
print("Downloading the classes list to", classes_path)
pydload.dload(classes_url, save_to_path=classes_path, max_time=None)
self.detection_model = onnxruntime.InferenceSession(checkpoint_path)
self.classes = [c.strip() for c in open(classes_path).readlines() if c.strip()]
def detect_video(
self, video_path, mode="default", min_prob=0.6, batch_size=2, show_progress=True
):
frame_indices, frames, fps, video_length = get_interest_frames_from_video(
video_path
)
logging.debug(
f"VIDEO_PATH: {video_path}, FPS: {fps}, Important frame indices: {frame_indices}, Video length: {video_length}"
)
if mode == "fast":
frames = [
preprocess_image(frame, min_side=480, max_side=800) for frame in frames
]
else:
frames = [preprocess_image(frame) for frame in frames]
scale = frames[0][1]
frames = [frame[0] for frame in frames]
all_results = {
"metadata": {
"fps": fps,
"video_length": video_length,
"video_path": video_path,
},
"preds": {},
}
progress_func = progressbar
if not show_progress:
progress_func = dummy
for _ in progress_func(range(int(len(frames) / batch_size) + 1)):
batch = frames[:batch_size]
batch_indices = frame_indices[:batch_size]
frames = frames[batch_size:]
frame_indices = frame_indices[batch_size:]
if batch_indices:
outputs = self.detection_model.run(
[s_i.name for s_i in self.detection_model.get_outputs()],
{self.detection_model.get_inputs()[0].name: np.asarray(batch)},
)
labels = [op for op in outputs if op.dtype == "int32"][0]
scores = [op for op in outputs if isinstance(op[0][0], np.float32)][0]
boxes = [op for op in outputs if isinstance(op[0][0], np.ndarray)][0]
boxes /= scale
for frame_index, frame_boxes, frame_scores, frame_labels in zip(
frame_indices, boxes, scores, labels
):
if frame_index not in all_results["preds"]:
all_results["preds"][frame_index] = []
for box, score, label in zip(
frame_boxes, frame_scores, frame_labels
):
if score < min_prob:
continue
box = box.astype(int).tolist()
label = self.classes[label]
all_results["preds"][frame_index].append(
{
"box": [int(c) for c in box],
"score": float(score),
"label": label,
}
)
return all_results
def detect(self, img_path, mode="default", min_prob=None):
if mode == "fast":
image, scale = preprocess_image(img_path, min_side=480, max_side=800)
if not min_prob:
min_prob = 0.5
else:
image, scale = preprocess_image(img_path)
if not min_prob:
min_prob = 0.6
outputs = self.detection_model.run(
[s_i.name for s_i in self.detection_model.get_outputs()],
{self.detection_model.get_inputs()[0].name: np.expand_dims(image, axis=0)},
)
labels = [op for op in outputs if op.dtype == "int32"][0]
scores = [op for op in outputs if isinstance(op[0][0], np.float32)][0]
boxes = [op for op in outputs if isinstance(op[0][0], np.ndarray)][0]
boxes /= scale
processed_boxes = []
for box, score, label in zip(boxes[0], scores[0], labels[0]):
if score < min_prob:
continue
box = box.astype(int).tolist()
label = self.classes[label]
processed_boxes.append(
{"box": [int(c) for c in box], "score": float(score), "label": label}
)
return processed_boxes
def censor(self, img_path, out_path=None, visualize=False, parts_to_blur=[]):
if not out_path and not visualize:
print(
"No out_path passed and visualize is set to false. There is no point in running this function then."
)
return
image = cv2.imread(img_path)
boxes = self.detect(img_path)
if parts_to_blur:
boxes = [i["box"] for i in boxes if i["label"] in parts_to_blur]
else:
boxes = [i["box"] for i in boxes]
for box in boxes:
part = image[box[1] : box[3], box[0] : box[2]]
image = cv2.rectangle(
image, (box[0], box[1]), (box[2], box[3]), (0, 0, 0), cv2.FILLED
)
if visualize:
cv2.imshow("Blurred image", image)
cv2.waitKey(0)
if out_path:
cv2.imwrite(out_path, image)
if __name__ == "__main__":
m = Detector()
print(m.detect("/Users/bedapudi/Desktop/n2.jpg")) | PypiClean |
/Autoneuro-master_new-0.0.1.tar.gz/Autoneuro-master_new-0.0.1/EDA.py | import numpy as np
import pandas as pd
import logger
'''from sklearn.decomposition import PCA
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn.feature_selection import VarianceThreshold
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler'''
class DataPreprocessor:
"""
This class shall be used to include all Data Preprocessing techniques to be feed to the Machine Learning Models
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
def __init__(self, file_object, logger_object):
self.file_object = file_object
self.logger_object = logger_object
def impute_missing_values(self, data, mv_flag=None, target=None, strategy='median', impute_val=None,
missing_vals=None):
"""
Method Name: impute_missing_values
Description: This method will be used to impute missing values in the dataframe
Input Description:
data: Name of the input dataframe
target: Name of the target column of DataFrame
strategy: Strategy to be used for MVI (Missing Value Imputation)
--‘median’ : default for continuous variables,
replaces missing value(s) with median of the concerned column
--‘mean’
--‘mode’ : default for categorical variables
--‘fixed’ : replaces all missing values with a fixed ‘explicitly specified’ value
impute_val: None(default), can be assigned a value to be used for imputation i
n ‘fixed’ strategy
missing_vals: None(default), a list/tuple of missing value indicators. By default,
it considers only NaN as missing. Dictionary can be passed to consider different missing values
for different columns in format – {col_name:[val1,val2, …], col2:[…]}
mv_flag: None(default), can be passed list/tuple of columns as input for which it creates missing
value flags
On Exception: Write the exception in the log file. Raise an exception with the appropriate error message
return: A DataFrame with missing values imputed
Written By: Purvansh singh
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object, "Entered into impute_missing_values method.")
try:
if isinstance(data, pd.DataFrame) and not data.empty:
self.logger_object.log(self.file_object, "Non-empty DataFrame object Identified")
dataframe = data
if mv_flag is True:
self.logger_object.log(self.file_object, "my_flag found True Imputing Dataframe.")
# Converting missing_vals to Nan Values
if missing_vals:
dataframe.replace(missing_vals, np.nan, inplace=True)
# Checking for Missing Values in Dependent Variable
if dataframe[target].isna().any():
dataframe = dataframe[dataframe[
target].notna()].copy() # Selecting the Dataframe With No missing values in Dependent column
# Checking for Missing Values in Independent Variables
Missing_data_columns = dataframe.columns[
dataframe.isna().any()].tolist() # Finding Columns with the missing data from dataframe
if strategy == 'fixed': # checking if strategy == fixed
dataframe.fillna(impute_val,
inplace=True) # Filling the Nan values with the imputed value from user
else:
for columns in Missing_data_columns: # Iterating over the columns having Nan Values
if dataframe[columns].dtype == 'object': # Checking for the categorical data
mode = dataframe[columns].mode()[0]
dataframe[columns].fillna(mode,
inplace=True) # Imputing Nan values with mode of the column
else:
if strategy == 'median': # checking if the strategy == median
median = dataframe[columns].median()
dataframe[columns].fillna(median,
inplace=True) # Imputing Nan values with median of the column
else: # The only strategy remains is mean
mean = dataframe[columns].mean()
dataframe[columns].fillna(mean,
inplace=True) # Imputing Nan values with mean of the column
else:
self.logger_object.log(self.file_object, "my_flag found False")
else:
raise Exception("No DataFrame Found")
except Exception as e:
self.logger_object.log(self.file_object,
"Error Occurred in impute_missing_values, Error statement: " + str(e))
raise Exception(e) from None # Suppressing the Error Chaining
else:
self.logger_object.log(self.file_object, "Imputed DataFrame Returned Successfully")
return dataframe
def type_conversion(self, dataset, cat_to_num=None, num_to_cat=None):
'''
Method Name: type_conversion
Description: This method will be used to convert column datatype from
numerical to categorical or vice-versa, if possible.
Input Description:
dataset: input DataFrame in which type conversion is needed
cat_to_num: None(default),list/tuple of variables that need to
be converted from categorical to numerical
num_to_cat: None(default),list/tuple of variables to be
converted from numerical to categorical
return: A DataFrame with column types changed as per requirement
On Exception : Write the exception in the log file. Raise an exception with the appropriate error message
Written By: Purvansh singh
Version: 1.0
Revisions: None
'''
self.logger_object.log(self.file_object, "Entered into type_conversion method.")
try:
if isinstance(dataset, pd.DataFrame) and not dataset.empty:
self.logger_object.log(self.file_object, "Non-empty DataFrame object Identified")
if cat_to_num is not None:
for column in cat_to_num:
dataset[column] = pd.to_numeric(dataset[column])
if num_to_cat is not None:
for column in num_to_cat:
dataset[column] = dataset[column].astype('object')
except Exception as e:
self.logger_object.log(self.file_object,
"Error Occurred in type_conversion method, Error statement: " + str(e))
raise Exception(e) from None # Suppressing the Error Chaining
else:
self.logger_object.log(self.file_object, "type_converted DataFrame Returned Successfully")
return dataset
def remove_imbalance(self, data, target, threshold=10.0, oversample=True, smote=False):
"""
Method Name: remove_imbalance
Description: This method will be used to handle unbalanced datasets(rare classes) through oversampling/ undersampling
techniques
Input Description: data: the input dataframe with target column.
threshold: the threshold of mismatch between the target values to perform balancing.
Output: A balanced dataframe.
On Failure: Raise Exception
Written By: Punit Nanda
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object,'Entered the remove_imbalance method of the DataPreprocessor class') # Logging entry to the method
try:
#data= pd.read_csv(self.training_file) # reading the data file
self.logger_object.log(self.file_object,'DataFrame Load Successful of the remove_imbalance method of the DataPreprocessor class')
#return self.data # return the read data to the calling method
self.logger_object.log(self.file_object,'X y created in the remove_imbalance method of the DataPreprocessor class')
X = data.drop(target, axis=1)
y = data[target]
self.logger_object.log(self.file_object,'Class Imbalance Process Starts in the remove_imbalance method of the DataPreprocessor class')
no_of_classes = data[target].nunique()
if no_of_classes == 2:
self.logger_object.log(self.file_object,'No of Classes is 2 in the remove_imbalance method of the DataPreprocessor class')
thresh_satisfied = ((data[target].value_counts()/float(len(data[target]))*100).any() < threshold)
if thresh_satisfied:
self.logger_object.log(self.file_object,'Threshold satisfied in the remove_imbalance method of the DataPreprocessor class')
if smote:
self.logger_object.log(self.file_object,'OverSampling using SMOTE having 2 classes in the remove_imbalance method of the DataPreprocessor class')
smote = SMOTE()
X, y = smote.fit_resample(X, y)
elif oversample:
self.logger_object.log(self.file_object,'OverSampling minority classes data having 2 classes in the remove_imbalance method of the DataPreprocessor class')
ROS = RandomOverSampler(sampling_strategy='auto', random_state=42)
X, y = ROS.fit_sample(X, y)
else:
self.logger_object.log(self.file_object,'UnderSampling majority classes data having 2 classes in the remove_imbalance method of the DataPreprocessor class')
ROS = RandomUnderSampler(sampling_strategy='auto', random_state=42)
X, y = ROS.fit_sample(X, y)
else:
high = (data[target].value_counts()/float(len(data[target]))*100).ravel().max()
low = (data[target].value_counts()/float(len(data[target]))*100).ravel().min()
thresh_satisfied = ( high-low > 100.0 - threshold )
if thresh_satisfied:
self.logger_object.log(self.file_object,'Threshold satisfied in the remove_imbalance method of the DataPreprocessor class')
if smote:
self.logger_object.log(self.file_object,'OverSampling using SMOTE having more than 2 classes in the remove_imbalance method of the DataPreprocessor class')
for i in range(no_of_classes-2):
smote = SMOTE()
X, y = smote.fit_resample(X, y)
elif oversample:
self.logger_object.log(self.file_object,'OverSampling minority classes data having more than 2 classes in the remove_imbalance method of the DataPreprocessor class')
for i in range(no_of_classes-2):
ROS = RandomOverSampler(sampling_strategy='auto', random_state=42)
X, y = ROS.fit_sample(X, y)
else:
self.logger_object.log(self.file_object,'UnderSampling majority classes data having more than 2 classes in the remove_imbalance method of the DataPreprocessor class')
for i in range(no_of_classes-2):
ROS = RandomUnderSampler(sampling_strategy='auto', random_state=42)
X, y = ROS.fit_sample(X, y)
y.to_frame(name=target)
dfBalanced = pd.concat([X, y], axis=1)
self.logger_object.log(self.file_object,'Class Imbalance Process Ends in the remove_imbalance method of the DataPreprocessor class')
return dfBalanced
except Exception as e:
self.logger_object.log(self.file_object,'Exception occured in remove_imbalance method of the DataPreprocessor class. Exception message: '+str(e)) # Logging the exception message
self.logger_object.log(self.file_object,
'DataFrame Load Unsuccessful.Exited the remove_imbalance method of the DataPreprocessor class') # Logging unsuccessful load of data
raise Exception() # raising exception and exiting
def remove_columns_with_minimal_variance(self, data, threshold):
"""
Method Name: remove_columns_with_minimal_variance
Description: This method drops any numerical column with standard deviation below specified threshold
Input Parameter Description: data: input DataFrame in which we need to check std deviations
threshold : the threshold for std deviation below which we need to drop the columns
Output: A DataFrame with numerical columns with low std dev dropped.
On Failure: Raise Exception
Written By: PUNIT NANDA
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object,
'Entered the remove_columns_with_minimal_variance method of the DataPreprocessor class') # Logging entry to the method
try:
# self.logger_object.log(self.file_object,'Data Load Successful.') # Logging exit from the method
sel = VarianceThreshold(threshold=(threshold * (1 - threshold)))
sel_var = sel.fit_transform(data)
new_data = data[data.columns[sel.get_support(indices=True)]]
return new_data # return the read data to the calling method
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in remove_columns_with_minimal_variance method of the DataPreprocessor class. Exception message: ' + str(
e)) # Logging the exception message
raise Exception() # raising exception and exiting
def standardize_data(self, dataframe):
"""
Method Name: standardize_data
Description: This method will be used to standardize al the numeric variables. Where mean = 0, std dev = 1.
Input Description: data: the input dataframe with numeric columns.
Output: Standardized data where mean of each column will be 0 and standard deviation will be 1.
On Failure: Raise Exception
Written By: Abhishek Kulkarni
Version: 1.0
Revisions: None
"""
try:
data = dataframe
stdscalar = StandardScaler()
scaled_data = stdscalar.fit_transform(data)
scaled_data=pd.Dataframe(data=scaled_data,columns=data.columns)
return scaled_data
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured while stadardizing data. Exception message: ' + str(e))
raise Exception()
def normalize_data(self, dataframe):
"""
Method Name: normalize_data
Description: This method will be used to normalize all the numeric variables. Where min value = 0 and max value = 1.
Input Description: data: the input dataframe with numeric columns.
Output: Normalized data where minimum value of each column will be 0 and maximum value of each column will be 1.
On Failure: Raise Exception
Written By: Abhishek Kulkarni
Version: 1.0
Revisions: None
"""
try:
data = dataframe
normalizer = MinMaxScaler()
normalized_data = normalizer.fit_transform(data)
normalized_data=pd.Dataframe(data=normalized_data,columns=data.columns)
return normalized_data
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured while stadardizing data. Exception message: ' + str(e))
raise Exception()
def pca(self, data, var_explained):
"""
Method Name: pca
Description: This method reduces the dimension from scaled Data which enables
quick for large data files.
input : Data which is Scaled, var_explained = 0.90(default value)
Output : It returns the scaled and reduced dimensions.
On Failure : Raise Exception
Written by : Saravanan Dhanapal
version : 1.0
revisions : None.
"""
self.data = data
self.var_explained = var_explained
self.logger_object.log(self.file_object, 'S::Entered the PCA method of the DataPreprocessor class')
try:
n = len(data.keys()) # find out the no columns in the data
mat_pca = PCA(n_components=n)
mat_pca.fit(data) # applying PCA model
##calculate variance ratios
variance = mat_pca.explained_variance_ratio_
cum_var = np.cumsum(np.round(mat_pca.explained_variance_ratio_, decimals=3) * 100)
self.logger_object.log(self.file_object, 'I : PCA params variance and cum_var are :' +str(variance)+ str(cum_var))
# function for calculating number of principal components to use:
def calc_num_components(cum_var, var_explained):
for i in range(n):
if cum_var[i] >= var_explained:
return i + 1
# call the function to calulate num_components:
n_components = calc_num_components(cum_var, var_explained)
self.logger_object.log(self.file_object, 'I : PCA n_components:' + str(n_components))
# create the PCA instance
pca = PCA(n_components=n_components)
principal_components = pca.fit_transform(data)
# Convert into dataframe
pca_data = pd.DataFrame(data=principal_components, columns=['PC' + str(i) for i in range(1, n_components + 1)])
self.logger_object.log(self.file_object, 'C:: Compeleted the PCA method of the DataPreprocessor class')
return pca_data
except Exception as e:
self.logger_object.log(self.file_object,
'E : Exception occured in PCA method of the Data Preprocessor class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'E : Unsuccessful. Exited the PCA method of the DataPreprocessor class')
raise Exception()
if __name__ == '__main__':
log = logger.App_Logger()
df = pd.read_csv('train.csv')
file = open('log.txt', "a")
print(df.dtypes)
# Test your code by calling methods here. | PypiClean |
/CoCoMiCo-0.2.1.tar.gz/CoCoMiCo-0.2.1/src/cocomico/__main__.py | import argparse
from cocomico.pipeline import benchmark_mode , run_mode
from cocomico.utils import is_valid_dir , is_valid_file, check_valid_dir
import os
import time
import pkg_resources
import sys
VERSION = pkg_resources.get_distribution("cocomico").version
LICENSE = """ Copyright (C) 2022 Maxime Lecomte - David Sherman - Clémence Frioux - Inria BSO - Pleiade
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>\n.
"""
REQUIRES = """
Requires Clingo and clyngor package: "pip install clyngor clyngor-with-clingo"
"""
def main():
"""Run programm.
"""
parser_general=argparse.ArgumentParser()
############# CREATE COMMUNITY DATA ##########
config_generate_file_path=argparse.ArgumentParser(add_help=False)
config_generate_file_path.add_argument("-config_file", "--config_file", help='path of the config file')
############## community_benchmark ###############
com_path_bench=argparse.ArgumentParser(add_help=False)
com_path_bench.add_argument("-json_com", "--json_com", "-j",help='path of list of json files or a folder. each json representes a set of community. path1 path2 path3')
seed_path_bench=argparse.ArgumentParser(add_help=False)
seed_path_bench.add_argument("-seed_path", "--seed_path", "-s",help='path of seed file')
sbml_path=argparse.ArgumentParser(add_help=False)
sbml_path.add_argument('-sbml_path',"--sbml_path","-i",help='folder path to find sbml model')
output = argparse.ArgumentParser(add_help=False)
output.add_argument('-output',"--output", "-o",help='output path')
############## run ###############
dir_path=argparse.ArgumentParser(add_help=False)
dir_path.add_argument("-folder_path", "--folder_path", "-i",help='Directory path of a community composed of sbml or xml files.')
seed_path_samples=argparse.ArgumentParser(add_help=False)
seed_path_samples.add_argument("-seed_path", "--seed_path","-s",help='path of seed file')
######################################### subparser parser #######################################
parser = parser_general.add_subparsers(title='command',description='valid subcommands:',dest="cmd")
parser_toy=parser.add_parser("toys",help="simulation on toy example",parents=[output])
parser_samples=parser.add_parser("run",help="Simulates community from folder",parents=[dir_path,seed_path_samples,output])
parser_bench=parser.add_parser("benchmark",help="Simulates communities from json file",parents=[com_path_bench,seed_path_bench,sbml_path,output])
# Error gestion 1 : if one or more arguments are wrong print help
# ex: cocomico
try:
parser_general.parse_args()
except:
parser_general.print_help()
print("ERROR: one or more arguments are missing or badly written")
sys.exit(1)
# Error gestion 2 : If zero argument print the help.
# ex : cocomico
if len(sys.argv) == 1:
parser_general.print_help()
print("ERROR: Arguments are missing")
sys.exit(1)
# Error gestion 3 : If zero argument print the help.
# ex : cocomico run
if len(sys.argv) == 2:
parser_general.print_help()
print("ERROR: Arguments are missing")
sys.exit(1)
##### Test conformity : existence of the output file + validity of input files
args = parser_general.parse_args()
# Test writing in out_directory if a subcommand is given else print version and help
if args.cmd:
if not is_valid_dir(args.output):
print("ERROR: Impossible to access/create output directory")
sys.exit(1)
# Test input paths are valids:
if "seed_path" in args and args.seed_path is not None:
if not is_valid_file(args.seed_path):
print("ERROR: " ,args.seed_path + " is not a correct filepath")
sys.exit(1)
if "json_com" in args and args.json_com is not None:
if not is_valid_file(args.json_com):
print("ERROR: " ,args.json_com + " is not a correct filepath")
sys.exit(1)
if "sbml_path" in args and args.sbml_path is not None:
if not check_valid_dir(args.sbml_path):
print("ERROR: " ,args.sbml_path + " is not a correct directory path")
sys.exit(1)
if "folder_path" in args and args.folder_path is not None:
if not check_valid_dir(args.folder_path):
print("ERROR: " ,args.folder_path + " is not a correct directory path")
sys.exit(1)
if args.cmd == 'toys':
package_path = os.path.dirname(os.path.realpath(__file__))
workflow_data_path = os.path.join(package_path, 'toys')
print("Launching workflow on test data")
models_benchmarks=os.path.join(workflow_data_path,'communities.json')
seed_file=os.path.join(workflow_data_path,'seeds.sbml')
sbml_paths_benchmarks=os.path.join(workflow_data_path,'sbml/')
output=args.output
benchmark_mode(models_benchmarks,seed_file,sbml_paths_benchmarks,output)
elif args.cmd == 'run':
models=args.folder_path
seed_file=args.seed_path
output=args.output
start = time.time()
run_mode(models,seed_file,output)
td = round(time.time() - start,3)
m, s = divmod(td, 60)
h, m = divmod(m, 60)
if h == 0.0 and m == 0.0:
print('runs done in ',s,'s')
elif h == 0.0 and m != 0.0:
print('runs done in ',m,'m ',s,'s')
else:
print('runs done in ',h,'h ',m,'m ',s,'s')
elif args.cmd == 'benchmark':
models=args.json_com
seed_file=args.seed_path
sbml_path=args.sbml_path
output=args.output
global_start_time = time.time()
benchmark_mode(models,seed_file,sbml_path, output)
td = round(time.time() - global_start_time,3)
m, s = divmod(td, 60)
h, m = divmod(m, 60)
if h == 0.0 and m == 0.0:
print('runs done in ',s,'s')
elif h == 0.0 and m != 0.0:
print('runs done in ',m,'m ',s,'s')
else:
print('runs done in ',h,'h ',m,'m ',s,'s') | PypiClean |
/HTSeq-0.13.5.tar.gz/HTSeq-0.13.5/README.md | 
[](https://htseq.readthedocs.io)
# HTSeq
**DEVS**: https://github.com/htseq/htseq
**DOCS**: https://htseq.readthedocs.io
A Python library to facilitate processing and analysis of data
from high-throughput sequencing (HTS) experiments. A popular use of ``HTSeq``
is ``htseq-count``, a tool to quantify gene expression in RNA-Seq and similar
experiments.
## Requirements
To use ``HTSeq`` you will need:
- ``Python >= 3.6`` (**note**: ``Python 2.7`` support has been dropped)
- ``numpy``
- ``pysam``
To run the ``htseq-qa`` script, you will also need:
- ``matplotlib``
Both **Linux** and **OSX** are supported and binaries are provided on Pypi. We
would like to support **Windows** but currently lack the expertise to do so. If
you would like to take on the Windows release and maintenance, please open an
issue and we'll try to help.
A source package which should not require ``Cython`` nor ``SWIG`` is also
provided on Pypi.
To **develop** `HTSeq` you will **also** need:
- ``Cython >=0.29.5``
- ``SWIG >=3.0.8``
## Installation
### PIP
To install directly from PyPI:
```bash
pip install HTSeq
```
To install a specific version:
```bash
pip install 'HTSeq==0.14.0'
```
If this fails, please install all dependencies first:
```bash
pip install matplotlib
pip install Cython
pip install pysam
pip install HTSeq
```
### setup.py (distutils/setuptools)
Install the dependencies with your favourite tool (``pip``, ``conda``,
etc.).
To install ``HTSeq`` itself, run:
```bash
python setup.py build install
```
## Authors
- Since 2016: Fabio Zanini @ http://fabilab.org.
- 2020-2015: Simon Anders, Wolfgang Huber
| PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/standard/plugins/specialchar/dialogs/lang/ru.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
CKEDITOR.plugins.setLang("specialchar","ru",{euro:"Знак евро",lsquo:"Левая одинарная кавычка",rsquo:"Правая одинарная кавычка",ldquo:"Левая двойная кавычка",rdquo:"Левая двойная кавычка",ndash:"Среднее тире",mdash:"Длинное тире",iexcl:"перевёрнутый восклицательный знак",cent:"Цент",pound:"Фунт",curren:"Знак валюты",yen:"Йена",brvbar:"Вертикальная черта с разрывом",sect:"Знак параграфа",uml:"Умлаут",copy:"Знак охраны авторского права",ordf:"Указатель окончания женского рода ...ая",laquo:"Левая кавычка-«ёлочка»",
not:"Отрицание",reg:"Знак охраны смежных прав\\t",macr:"Макрон",deg:"Градус",sup2:"Надстрочное два",sup3:"Надстрочное три",acute:"Акут",micro:"Микро",para:"Абзац",middot:"Интерпункт",cedil:"Седиль",sup1:"Надстрочная единица",ordm:"Порядковое числительное",raquo:"Правая кавычка-«ёлочка»",frac14:"Одна четвертая",frac12:"Одна вторая",frac34:"Три четвёртых",iquest:"Перевёрнутый вопросительный знак",Agrave:"Латинская заглавная буква А с апострофом",Aacute:"Латинская заглавная буква A с ударением",Acirc:"Латинская заглавная буква А с циркумфлексом",
Atilde:"Латинская заглавная буква А с тильдой",Auml:"Латинская заглавная буква А с тремой",Aring:"Латинская заглавная буква А с кольцом над ней",AElig:"Латинская большая буква Æ",Ccedil:"Латинская заглавная буква C с седилью",Egrave:"Латинская заглавная буква Е с апострофом",Eacute:"Латинская заглавная буква Е с ударением",Ecirc:"Латинская заглавная буква Е с циркумфлексом",Euml:"Латинская заглавная буква Е с тремой",Igrave:"Латинская заглавная буква I с апострофом",Iacute:"Латинская заглавная буква I с ударением",
Icirc:"Латинская заглавная буква I с циркумфлексом",Iuml:"Латинская заглавная буква I с тремой",ETH:"Латинская большая буква Eth",Ntilde:"Латинская заглавная буква N с тильдой",Ograve:"Латинская заглавная буква O с апострофом",Oacute:"Латинская заглавная буква O с ударением",Ocirc:"Латинская заглавная буква O с циркумфлексом",Otilde:"Латинская заглавная буква O с тильдой",Ouml:"Латинская заглавная буква O с тремой",times:"Знак умножения",Oslash:"Латинская большая перечеркнутая O",Ugrave:"Латинская заглавная буква U с апострофом",
Uacute:"Латинская заглавная буква U с ударением",Ucirc:"Латинская заглавная буква U с циркумфлексом",Uuml:"Латинская заглавная буква U с тремой",Yacute:"Латинская заглавная буква Y с ударением",THORN:"Латинская заглавная буква Thorn",szlig:"Знак диеза",agrave:"Латинская маленькая буква a с апострофом",aacute:"Латинская маленькая буква a с ударением",acirc:"Латинская маленькая буква a с циркумфлексом",atilde:"Латинская маленькая буква a с тильдой",auml:"Латинская маленькая буква a с тремой",aring:"Латинская маленькая буква a с кольцом",
aelig:"Латинская маленькая буква æ",ccedil:"Латинская маленькая буква с с седилью",egrave:"Латинская маленькая буква е с апострофом",eacute:"Латинская маленькая буква е с ударением",ecirc:"Латинская маленькая буква е с циркумфлексом",euml:"Латинская маленькая буква е с тремой",igrave:"Латинская маленькая буква i с апострофом",iacute:"Латинская маленькая буква i с ударением",icirc:"Латинская маленькая буква i с циркумфлексом",iuml:"Латинская маленькая буква i с тремой",eth:"Латинская маленькая буква eth",
ntilde:"Латинская маленькая буква n с тильдой",ograve:"Латинская маленькая буква o с апострофом",oacute:"Латинская маленькая буква o с ударением",ocirc:"Латинская маленькая буква o с циркумфлексом",otilde:"Латинская маленькая буква o с тильдой",ouml:"Латинская маленькая буква o с тремой",divide:"Знак деления",oslash:"Латинская строчная перечеркнутая o",ugrave:"Латинская маленькая буква u с апострофом",uacute:"Латинская маленькая буква u с ударением",ucirc:"Латинская маленькая буква u с циркумфлексом",
uuml:"Латинская маленькая буква u с тремой",yacute:"Латинская маленькая буква y с ударением",thorn:"Латинская маленькая буква thorn",yuml:"Латинская маленькая буква y с тремой",OElig:"Латинская прописная лигатура OE",oelig:"Латинская строчная лигатура oe",372:"Латинская заглавная буква W с циркумфлексом",374:"Латинская заглавная буква Y с циркумфлексом",373:"Латинская маленькая буква w с циркумфлексом",375:"Латинская маленькая буква y с циркумфлексом",sbquo:"Нижняя одинарная кавычка",8219:"Правая одинарная кавычка",
bdquo:"Левая двойная кавычка",hellip:"Горизонтальное многоточие",trade:"Товарный знак",9658:"Черный указатель вправо",bull:"Маркер списка",rarr:"Стрелка вправо",rArr:"Двойная стрелка вправо",hArr:"Двойная стрелка влево-вправо",diams:"Черный ромб",asymp:"Примерно равно"}); | PypiClean |
/EtherollApp-2020.322-py3-none-any.whl/etherollapp/etheroll/settings_screen.py | import os
import shutil
from kivy.properties import BooleanProperty, NumericProperty
from pyetheroll.constants import ChainID
from etherollapp.etheroll.constants import KEYSTORE_DIR_SUFFIX
from etherollapp.etheroll.settings import Settings
from etherollapp.etheroll.ui_utils import SubScreen, load_kv_from_py
from etherollapp.etheroll.utils import (check_request_write_permission,
check_write_permission)
load_kv_from_py(__file__)
class SettingsScreen(SubScreen):
"""Screen for configuring network, gas price..."""
is_stored_mainnet = BooleanProperty()
is_stored_testnet = BooleanProperty()
stored_gas_price = NumericProperty()
def store_network(self):
"""Saves selected network to the store."""
network = self.get_ui_network()
Settings.set_stored_network(network)
def store_gas_price(self):
"""Saves gas price value to the store."""
gas_price = self.get_ui_gas_price()
Settings.set_stored_gas_price(gas_price)
def store_is_persistent_keystore(self):
"""
Saves the persistency option to the store.
Note that to save `True` we also check if we have write permissions.
"""
persist_keystore = self.is_ui_persistent_keystore()
persist_keystore = persist_keystore and check_write_permission()
persistency_toggled = (
Settings.is_persistent_keystore() != persist_keystore)
if persistency_toggled:
self.sync_keystore(persist_keystore)
Settings.set_is_persistent_keystore(persist_keystore)
def sync_to_directory(source_dir, destination_dir):
"""
Copy source dir content to the destination dir one.
Files already existing get overriden.
"""
os.makedirs(destination_dir, exist_ok=True)
files = os.listdir(source_dir)
for f in files:
source_file = os.path.join(source_dir, f)
# file path is given rather than the dir so it gets overriden
destination_file = os.path.join(destination_dir, f)
try:
shutil.copy(source_file, destination_file)
except PermissionError:
# `copymode()` may have fail, fallback to simple copy
shutil.copyfile(source_file, destination_file)
@classmethod
def sync_keystore_to_persistent(cls):
"""Copies keystore from non persistent to persistent storage."""
# TODO: handle dir doesn't exist
source_dir = os.path.join(
Settings.get_non_persistent_keystore_path(),
KEYSTORE_DIR_SUFFIX)
destination_dir = os.path.join(
Settings.get_persistent_keystore_path(),
KEYSTORE_DIR_SUFFIX)
cls.sync_to_directory(source_dir, destination_dir)
@classmethod
def sync_keystore_to_non_persistent(cls):
"""Copies keystore from persistent to non persistent storage."""
# TODO: handle dir doesn't exist
source_dir = os.path.join(
Settings.get_persistent_keystore_path(),
KEYSTORE_DIR_SUFFIX)
destination_dir = os.path.join(
Settings.get_non_persistent_keystore_path(),
KEYSTORE_DIR_SUFFIX)
cls.sync_to_directory(source_dir, destination_dir)
@classmethod
def sync_keystore(cls, to_persistent):
if to_persistent:
cls.sync_keystore_to_persistent()
else:
cls.sync_keystore_to_non_persistent()
def set_persist_keystore_switch_state(self, active):
"""
The MDSwitch UI look doesn't seem to be binded to its status.
Here the UI look will be updated depending on the "active" status.
"""
mdswitch = self.ids.persist_keystore_switch_id
if self.is_ui_persistent_keystore() != active:
mdswitch.ids.thumb.trigger_action()
def load_settings(self):
"""Load json store settings to UI properties."""
self.is_stored_mainnet = Settings.is_stored_mainnet()
self.is_stored_testnet = Settings.is_stored_testnet()
self.stored_gas_price = Settings.get_stored_gas_price()
is_persistent_keystore = (
Settings.is_persistent_keystore() and check_write_permission())
self.set_persist_keystore_switch_state(is_persistent_keystore)
def store_settings(self):
"""Stores settings to json store."""
self.store_gas_price()
self.store_network()
self.store_is_persistent_keystore()
def get_ui_network(self):
"""Retrieves network values from UI."""
if self.is_ui_mainnet():
network = ChainID.MAINNET
else:
network = ChainID.ROPSTEN
return network
def is_ui_mainnet(self):
return self.ids.mainnet_checkbox_id.active
def is_ui_testnet(self):
return self.ids.testnet_checkbox_id.active
def get_ui_gas_price(self):
return self.ids.gas_price_slider_id.value
def is_ui_persistent_keystore(self):
return self.ids.persist_keystore_switch_id.active
def check_request_write_permission(self):
# previous state before the toggle
if self.is_ui_persistent_keystore():
check_request_write_permission() | PypiClean |
/Markdown-Editor-1.0.7.tar.gz/Markdown-Editor-1.0.7/markdown_editor/libs/codemirror-5.15.2/keymap/emacs.js |
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
var Pos = CodeMirror.Pos;
function posEq(a, b) { return a.line == b.line && a.ch == b.ch; }
// Kill 'ring'
var killRing = [];
function addToRing(str) {
killRing.push(str);
if (killRing.length > 50) killRing.shift();
}
function growRingTop(str) {
if (!killRing.length) return addToRing(str);
killRing[killRing.length - 1] += str;
}
function getFromRing(n) { return killRing[killRing.length - (n ? Math.min(n, 1) : 1)] || ""; }
function popFromRing() { if (killRing.length > 1) killRing.pop(); return getFromRing(); }
var lastKill = null;
function kill(cm, from, to, mayGrow, text) {
if (text == null) text = cm.getRange(from, to);
if (mayGrow && lastKill && lastKill.cm == cm && posEq(from, lastKill.pos) && cm.isClean(lastKill.gen))
growRingTop(text);
else
addToRing(text);
cm.replaceRange("", from, to, "+delete");
if (mayGrow) lastKill = {cm: cm, pos: from, gen: cm.changeGeneration()};
else lastKill = null;
}
// Boundaries of various units
function byChar(cm, pos, dir) {
return cm.findPosH(pos, dir, "char", true);
}
function byWord(cm, pos, dir) {
return cm.findPosH(pos, dir, "word", true);
}
function byLine(cm, pos, dir) {
return cm.findPosV(pos, dir, "line", cm.doc.sel.goalColumn);
}
function byPage(cm, pos, dir) {
return cm.findPosV(pos, dir, "page", cm.doc.sel.goalColumn);
}
function byParagraph(cm, pos, dir) {
var no = pos.line, line = cm.getLine(no);
var sawText = /\S/.test(dir < 0 ? line.slice(0, pos.ch) : line.slice(pos.ch));
var fst = cm.firstLine(), lst = cm.lastLine();
for (;;) {
no += dir;
if (no < fst || no > lst)
return cm.clipPos(Pos(no - dir, dir < 0 ? 0 : null));
line = cm.getLine(no);
var hasText = /\S/.test(line);
if (hasText) sawText = true;
else if (sawText) return Pos(no, 0);
}
}
function bySentence(cm, pos, dir) {
var line = pos.line, ch = pos.ch;
var text = cm.getLine(pos.line), sawWord = false;
for (;;) {
var next = text.charAt(ch + (dir < 0 ? -1 : 0));
if (!next) { // End/beginning of line reached
if (line == (dir < 0 ? cm.firstLine() : cm.lastLine())) return Pos(line, ch);
text = cm.getLine(line + dir);
if (!/\S/.test(text)) return Pos(line, ch);
line += dir;
ch = dir < 0 ? text.length : 0;
continue;
}
if (sawWord && /[!?.]/.test(next)) return Pos(line, ch + (dir > 0 ? 1 : 0));
if (!sawWord) sawWord = /\w/.test(next);
ch += dir;
}
}
function byExpr(cm, pos, dir) {
var wrap;
if (cm.findMatchingBracket && (wrap = cm.findMatchingBracket(pos, true))
&& wrap.match && (wrap.forward ? 1 : -1) == dir)
return dir > 0 ? Pos(wrap.to.line, wrap.to.ch + 1) : wrap.to;
for (var first = true;; first = false) {
var token = cm.getTokenAt(pos);
var after = Pos(pos.line, dir < 0 ? token.start : token.end);
if (first && dir > 0 && token.end == pos.ch || !/\w/.test(token.string)) {
var newPos = cm.findPosH(after, dir, "char");
if (posEq(after, newPos)) return pos;
else pos = newPos;
} else {
return after;
}
}
}
// Prefixes (only crudely supported)
function getPrefix(cm, precise) {
var digits = cm.state.emacsPrefix;
if (!digits) return precise ? null : 1;
clearPrefix(cm);
return digits == "-" ? -1 : Number(digits);
}
function repeated(cmd) {
var f = typeof cmd == "string" ? function(cm) { cm.execCommand(cmd); } : cmd;
return function(cm) {
var prefix = getPrefix(cm);
f(cm);
for (var i = 1; i < prefix; ++i) f(cm);
};
}
function findEnd(cm, pos, by, dir) {
var prefix = getPrefix(cm);
if (prefix < 0) { dir = -dir; prefix = -prefix; }
for (var i = 0; i < prefix; ++i) {
var newPos = by(cm, pos, dir);
if (posEq(newPos, pos)) break;
pos = newPos;
}
return pos;
}
function move(by, dir) {
var f = function(cm) {
cm.extendSelection(findEnd(cm, cm.getCursor(), by, dir));
};
f.motion = true;
return f;
}
function killTo(cm, by, dir) {
var selections = cm.listSelections(), cursor;
var i = selections.length;
while (i--) {
cursor = selections[i].head;
kill(cm, cursor, findEnd(cm, cursor, by, dir), true);
}
}
function killRegion(cm) {
if (cm.somethingSelected()) {
var selections = cm.listSelections(), selection;
var i = selections.length;
while (i--) {
selection = selections[i];
kill(cm, selection.anchor, selection.head);
}
return true;
}
}
function addPrefix(cm, digit) {
if (cm.state.emacsPrefix) {
if (digit != "-") cm.state.emacsPrefix += digit;
return;
}
// Not active yet
cm.state.emacsPrefix = digit;
cm.on("keyHandled", maybeClearPrefix);
cm.on("inputRead", maybeDuplicateInput);
}
var prefixPreservingKeys = {"Alt-G": true, "Ctrl-X": true, "Ctrl-Q": true, "Ctrl-U": true};
function maybeClearPrefix(cm, arg) {
if (!cm.state.emacsPrefixMap && !prefixPreservingKeys.hasOwnProperty(arg))
clearPrefix(cm);
}
function clearPrefix(cm) {
cm.state.emacsPrefix = null;
cm.off("keyHandled", maybeClearPrefix);
cm.off("inputRead", maybeDuplicateInput);
}
function maybeDuplicateInput(cm, event) {
var dup = getPrefix(cm);
if (dup > 1 && event.origin == "+input") {
var one = event.text.join("\n"), txt = "";
for (var i = 1; i < dup; ++i) txt += one;
cm.replaceSelection(txt);
}
}
function addPrefixMap(cm) {
cm.state.emacsPrefixMap = true;
cm.addKeyMap(prefixMap);
cm.on("keyHandled", maybeRemovePrefixMap);
cm.on("inputRead", maybeRemovePrefixMap);
}
function maybeRemovePrefixMap(cm, arg) {
if (typeof arg == "string" && (/^\d$/.test(arg) || arg == "Ctrl-U")) return;
cm.removeKeyMap(prefixMap);
cm.state.emacsPrefixMap = false;
cm.off("keyHandled", maybeRemovePrefixMap);
cm.off("inputRead", maybeRemovePrefixMap);
}
// Utilities
function setMark(cm) {
cm.setCursor(cm.getCursor());
cm.setExtending(!cm.getExtending());
cm.on("change", function() { cm.setExtending(false); });
}
function clearMark(cm) {
cm.setExtending(false);
cm.setCursor(cm.getCursor());
}
function getInput(cm, msg, f) {
if (cm.openDialog)
cm.openDialog(msg + ": <input type=\"text\" style=\"width: 10em\"/>", f, {bottom: true});
else
f(prompt(msg, ""));
}
function operateOnWord(cm, op) {
var start = cm.getCursor(), end = cm.findPosH(start, 1, "word");
cm.replaceRange(op(cm.getRange(start, end)), start, end);
cm.setCursor(end);
}
function toEnclosingExpr(cm) {
var pos = cm.getCursor(), line = pos.line, ch = pos.ch;
var stack = [];
while (line >= cm.firstLine()) {
var text = cm.getLine(line);
for (var i = ch == null ? text.length : ch; i > 0;) {
var ch = text.charAt(--i);
if (ch == ")")
stack.push("(");
else if (ch == "]")
stack.push("[");
else if (ch == "}")
stack.push("{");
else if (/[\(\{\[]/.test(ch) && (!stack.length || stack.pop() != ch))
return cm.extendSelection(Pos(line, i));
}
--line; ch = null;
}
}
function quit(cm) {
cm.execCommand("clearSearch");
clearMark(cm);
}
// Actual keymap
var keyMap = CodeMirror.keyMap.emacs = CodeMirror.normalizeKeyMap({
"Ctrl-W": function(cm) {kill(cm, cm.getCursor("start"), cm.getCursor("end"));},
"Ctrl-K": repeated(function(cm) {
var start = cm.getCursor(), end = cm.clipPos(Pos(start.line));
var text = cm.getRange(start, end);
if (!/\S/.test(text)) {
text += "\n";
end = Pos(start.line + 1, 0);
}
kill(cm, start, end, true, text);
}),
"Alt-W": function(cm) {
addToRing(cm.getSelection());
clearMark(cm);
},
"Ctrl-Y": function(cm) {
var start = cm.getCursor();
cm.replaceRange(getFromRing(getPrefix(cm)), start, start, "paste");
cm.setSelection(start, cm.getCursor());
},
"Alt-Y": function(cm) {cm.replaceSelection(popFromRing(), "around", "paste");},
"Ctrl-Space": setMark, "Ctrl-Shift-2": setMark,
"Ctrl-F": move(byChar, 1), "Ctrl-B": move(byChar, -1),
"Right": move(byChar, 1), "Left": move(byChar, -1),
"Ctrl-D": function(cm) { killTo(cm, byChar, 1); },
"Delete": function(cm) { killRegion(cm) || killTo(cm, byChar, 1); },
"Ctrl-H": function(cm) { killTo(cm, byChar, -1); },
"Backspace": function(cm) { killRegion(cm) || killTo(cm, byChar, -1); },
"Alt-F": move(byWord, 1), "Alt-B": move(byWord, -1),
"Alt-D": function(cm) { killTo(cm, byWord, 1); },
"Alt-Backspace": function(cm) { killTo(cm, byWord, -1); },
"Ctrl-N": move(byLine, 1), "Ctrl-P": move(byLine, -1),
"Down": move(byLine, 1), "Up": move(byLine, -1),
"Ctrl-A": "goLineStart", "Ctrl-E": "goLineEnd",
"End": "goLineEnd", "Home": "goLineStart",
"Alt-V": move(byPage, -1), "Ctrl-V": move(byPage, 1),
"PageUp": move(byPage, -1), "PageDown": move(byPage, 1),
"Ctrl-Up": move(byParagraph, -1), "Ctrl-Down": move(byParagraph, 1),
"Alt-A": move(bySentence, -1), "Alt-E": move(bySentence, 1),
"Alt-K": function(cm) { killTo(cm, bySentence, 1); },
"Ctrl-Alt-K": function(cm) { killTo(cm, byExpr, 1); },
"Ctrl-Alt-Backspace": function(cm) { killTo(cm, byExpr, -1); },
"Ctrl-Alt-F": move(byExpr, 1), "Ctrl-Alt-B": move(byExpr, -1),
"Shift-Ctrl-Alt-2": function(cm) {
var cursor = cm.getCursor();
cm.setSelection(findEnd(cm, cursor, byExpr, 1), cursor);
},
"Ctrl-Alt-T": function(cm) {
var leftStart = byExpr(cm, cm.getCursor(), -1), leftEnd = byExpr(cm, leftStart, 1);
var rightEnd = byExpr(cm, leftEnd, 1), rightStart = byExpr(cm, rightEnd, -1);
cm.replaceRange(cm.getRange(rightStart, rightEnd) + cm.getRange(leftEnd, rightStart) +
cm.getRange(leftStart, leftEnd), leftStart, rightEnd);
},
"Ctrl-Alt-U": repeated(toEnclosingExpr),
"Alt-Space": function(cm) {
var pos = cm.getCursor(), from = pos.ch, to = pos.ch, text = cm.getLine(pos.line);
while (from && /\s/.test(text.charAt(from - 1))) --from;
while (to < text.length && /\s/.test(text.charAt(to))) ++to;
cm.replaceRange(" ", Pos(pos.line, from), Pos(pos.line, to));
},
"Ctrl-O": repeated(function(cm) { cm.replaceSelection("\n", "start"); }),
"Ctrl-T": repeated(function(cm) {
cm.execCommand("transposeChars");
}),
"Alt-C": repeated(function(cm) {
operateOnWord(cm, function(w) {
var letter = w.search(/\w/);
if (letter == -1) return w;
return w.slice(0, letter) + w.charAt(letter).toUpperCase() + w.slice(letter + 1).toLowerCase();
});
}),
"Alt-U": repeated(function(cm) {
operateOnWord(cm, function(w) { return w.toUpperCase(); });
}),
"Alt-L": repeated(function(cm) {
operateOnWord(cm, function(w) { return w.toLowerCase(); });
}),
"Alt-;": "toggleComment",
"Ctrl-/": repeated("undo"), "Shift-Ctrl--": repeated("undo"),
"Ctrl-Z": repeated("undo"), "Cmd-Z": repeated("undo"),
"Shift-Alt-,": "goDocStart", "Shift-Alt-.": "goDocEnd",
"Ctrl-S": "findNext", "Ctrl-R": "findPrev", "Ctrl-G": quit, "Shift-Alt-5": "replace",
"Alt-/": "autocomplete",
"Ctrl-J": "newlineAndIndent", "Enter": false, "Tab": "indentAuto",
"Alt-G G": function(cm) {
var prefix = getPrefix(cm, true);
if (prefix != null && prefix > 0) return cm.setCursor(prefix - 1);
getInput(cm, "Goto line", function(str) {
var num;
if (str && !isNaN(num = Number(str)) && num == (num|0) && num > 0)
cm.setCursor(num - 1);
});
},
"Ctrl-X Tab": function(cm) {
cm.indentSelection(getPrefix(cm, true) || cm.getOption("indentUnit"));
},
"Ctrl-X Ctrl-X": function(cm) {
cm.setSelection(cm.getCursor("head"), cm.getCursor("anchor"));
},
"Ctrl-X Ctrl-S": "save",
"Ctrl-X Ctrl-W": "save",
"Ctrl-X S": "saveAll",
"Ctrl-X F": "open",
"Ctrl-X U": repeated("undo"),
"Ctrl-X K": "close",
"Ctrl-X Delete": function(cm) { kill(cm, cm.getCursor(), bySentence(cm, cm.getCursor(), 1), true); },
"Ctrl-X H": "selectAll",
"Ctrl-Q Tab": repeated("insertTab"),
"Ctrl-U": addPrefixMap
});
var prefixMap = {"Ctrl-G": clearPrefix};
function regPrefix(d) {
prefixMap[d] = function(cm) { addPrefix(cm, d); };
keyMap["Ctrl-" + d] = function(cm) { addPrefix(cm, d); };
prefixPreservingKeys["Ctrl-" + d] = true;
}
for (var i = 0; i < 10; ++i) regPrefix(String(i));
regPrefix("-");
}); | PypiClean |
/FixedEffectModel-0.0.5.tar.gz/FixedEffectModel-0.0.5/fixedeffect/fe/did.py | from ..utils.DemeanDataframe import demean_dataframe
from ..utils.FormTransfer import form_transfer
from ..utils.CalDf import cal_df
from ..utils.CalFullModel import cal_fullmodel
from ..utils.WaldTest import waldtest
from ..utils.OLSFixed import OLSFixed
from ..utils.ClusterErr import clustered_error, is_nested,min_clust
from ..utils.GenCrossProd import gencrossprod, gencrossprod_dataset
import statsmodels.api as sm
from scipy.stats import t
from scipy.stats import f
import numpy as np
import pandas as pd
class did:
def __init__(self,
data_df,
dependent = None,
exog_x = None,
treatment = None,
csid = None,
tsid = None,
exp_date=None,
group_effect = 'treatment',
cluster=[],
formula = None,
robust = False,
noint = False,
c_method='cgm',
psdef=True,
figsize = (10,6),
fontsize = 15,
**kwargs
):
"""
:param data_df: Dataframe of relevant data
:param y: List of dependent variables(so far, only support one dependent variable)
:param exog_x: List of exogenous or right-hand-side variables (variable by time by entity).
:param category_input: List of category variables(fixed effects)
:param cluster_input: List of cluster variables
:param formula: a string like 'y~x+x2|id+firm|id',dependent_variable~continuous_variable|fixed_effect|clusters
:param robust: bool value of whether to get a robust variance
:param noint: force nointercept option
:return:params,df,bse,tvalues,pvalues,rsquared,rsquared_adj,fvalue,f_pvalue,variance_matrix,fittedvalues,resid,summary
**kwargs:some hidden option not supposed to be used by user
"""
# force irrelavant input to bu empty
endog_x = []
iv = []
treatment_col = treatment
exp_date = exp_date
if exp_date==None:
raise NameError('Miss experiment start date.')
did_effect = group_effect
if csid and tsid:
category = [csid[0], tsid[0]]
else:
category = None
if group_effect not in ['treatment','individual']:
raise NameError('group_effect should be treatment or individual')
# initialize opt in kwargs
no_print = False
for key, value in kwargs.items():
if key == 'no_print':
if value == True:
no_print = True
# grammar check
if (exog_x is None) & (formula is None):
raise NameError('You have to input list of variables name or formula')
elif exog_x is None:
dependent, exog_x, category_input, cluster_input, endog_x, iv = form_transfer(formula)
print('dependent variable(s):', dependent)
print('independent(exogenous):', exog_x)
if len(category_input)==2:
category_input = category_input
print('csid:', category_input[0])
print('tsid:', category_input[1])
else:
category_input = category
print('category variables(fixed effects):', category_input)
print('cluster variables:', cluster_input)
else:
dependent, exog_x, category_input, cluster_input, endog_x, iv = dependent, exog_x, category, \
cluster, endog_x, iv
if category_input==None:
raise NameError('Miss csid or tsid')
orignal_exog_x = exog_x
# df preprocess
data_df.fillna(0, inplace=True)
data_df.reset_index(drop=True, inplace=True)
data_df = gencrossprod(data_df, exog_x)
# process DID related grammar
data_df = gencrossprod_dataset(data_df,
dependent,
exog_x,
category_input,
treatment_col,
exp_date,
did_effect,
no_print=no_print,
figsize = figsize,
fontsize = fontsize)
# did with group fixed effect
if group_effect == 'treatment':
exog_x = exog_x + [str(treatment_col[0]) + "*post_experiment"] + treatment
# did with individual effect
else:
exog_x = exog_x + [str(treatment_col[0]) + "*post_experiment"]
self.data_df = data_df
self.dependent = dependent
self.exog_x = exog_x
self.endog_x = endog_x
self.iv = iv
self.category_input = category_input
self.cluster_input = cluster_input
self.formula = formula
self.robust = robust
self.noint = noint
self.c_method = c_method
self.psdef = psdef
self.exp_date = exp_date
self.did_effect = did_effect
self.treatment = treatment
self.csid = csid
self.tsid = tsid
self.exp_date = exp_date
self.group_effect = group_effect
self.orignal_exog_x = orignal_exog_x
def fit(self,
epsilon = 1e-8,
max_iter = 1e6):
data_df = self.data_df
dependent = self.dependent
exog_x = self.exog_x
endog_x = self.endog_x
iv = self.iv
category_input = self.category_input
noint = self.noint
treatment = self.treatment
exp_date = self.exp_date
group_effect = self.group_effect
orignal_exog_x = self.orignal_exog_x
if noint is True:
k0 = 0
else:
k0 = 1
all_cols = []
for i in exog_x:
all_cols.append(i)
for i in endog_x:
all_cols.append(i)
for i in iv:
all_cols.append(i)
all_cols.append(dependent[0])
#2021/05/16: bug fix: did has two category_col input, but takes the second only if effect = group
if group_effect == 'treatment':
demeaned_df = demean_dataframe(data_df, all_cols, [category_input[1]], epsilon = epsilon, max_iter = max_iter)
else:
demeaned_df = demean_dataframe(data_df, all_cols, category_input, epsilon = epsilon, max_iter = max_iter)
if noint is False:
for i in all_cols:
demeaned_df[i] = demeaned_df[i].add(data_df[i].mean())
demeaned_df['const'] = 1
rank = cal_df(data_df, category_input)
#----------------- estimation -----------------#
# if OLS on raw data:
if noint is False:
exog_x = ['const'] + exog_x
model = sm.OLS(demeaned_df[dependent].astype(float), demeaned_df[exog_x].astype(float))
result = model.fit()
coeff = result.params.values.reshape(len(exog_x), 1)
real_resid = demeaned_df[dependent] - np.dot(demeaned_df[exog_x], coeff)
demeaned_df['resid'] = real_resid
n = demeaned_df.shape[0]
k = len(exog_x)
# initiate result object
f_result = OLSFixed()
f_result.model = 'did'
f_result.dependent = dependent
f_result.exog_x = exog_x
f_result.endog_x = []
f_result.iv = []
f_result.category_input = category_input
f_result.data_df = data_df.copy()
f_result.demeaned_df = demeaned_df
f_result.params = result.params
f_result.df = result.df_resid - rank + k0
f_result.x_second_stage = None
f_result.x_first_stage = None
f_result.treatment = treatment
f_result.exp_date = exp_date
f_result.group_effect = group_effect
f_result.cluster = []
f_result.orignal_exog_x = orignal_exog_x
# compute standard error and save in result
self.compute_se(result, f_result, n, k, rank)
# compute summary statistics and save in result
self.compute_summary_statistics(result, f_result, rank)
return f_result
def compute_summary_statistics(self,
result,
f_result,
rank):
dependent = self.dependent
category_input = self.category_input
cluster_input = self.cluster_input
data_df = self.data_df
robust = self.robust
c_method = self.c_method
exog_x = f_result.exog_x
if self.noint is True:
k0 = 0
else:
k0 = 1
demeaned_df = f_result.demeaned_df
f_result.resid = demeaned_df['resid']
f_result.tvalues = f_result.params / f_result.bse
f_result.pvalues = pd.Series(2 * t.sf(np.abs(f_result.tvalues), f_result.df), index=list(result.params.index))
proj_rss = sum(f_result.resid ** 2)
proj_rss = float("{:.8f}".format(proj_rss)) # round up
# calculate totoal sum squared of error
if k0 == 0 and category_input == []:
proj_tss = sum(((demeaned_df[dependent]) ** 2).values)[0]
else:
proj_tss = sum(((demeaned_df[dependent] - demeaned_df[dependent].mean()) ** 2).values)[0]
proj_tss = float("{:.8f}".format(proj_tss)) # round up
if proj_tss > 0:
f_result.rsquared = 1 - proj_rss / proj_tss
else:
raise NameError('Total sum of square equal 0, program quit.')
# calculate adjusted r2
if category_input != []:
# for fixed effect, k0 should not affect adjusted r2
f_result.rsquared_adj = 1 - (len(data_df) - 1) / (result.df_resid - rank + k0) * (1 - f_result.rsquared)
else:
f_result.rsquared_adj = 1 - (len(data_df) - k0) / (result.df_resid) * (1 - f_result.rsquared)
if k0 == 0:
w = waldtest(f_result.params, f_result.variance_matrix)
else:
# get rid of constant in the vc matrix
f_var_mat_noint = f_result.variance_matrix.copy()
if type(f_var_mat_noint) == np.ndarray:
f_var_mat_noint = np.delete(f_var_mat_noint, 0, 0)
f_var_mat_noint = np.delete(f_var_mat_noint, 0, 1)
else:
f_var_mat_noint = f_var_mat_noint.drop('const', axis=1)
f_var_mat_noint = f_var_mat_noint.drop('const', axis=0)
# get rid of constant in the param column
params_noint = f_result.params.drop('const', axis=0)
if category_input == []:
w = waldtest(params_noint, (n - k) / (n - k - rank) * f_var_mat_noint)
else:
w = waldtest(params_noint, f_var_mat_noint)
# calculate f-statistics
if result.df_model > 0:
# if do pooled regression
if category_input == []:
# if do pooled regression, because doesn't account for const in f test, adjust dof
scale_const = (n - k) / (n - k + k0)
f_result.fvalue = scale_const * w / result.df_model
# if do fixed effect, just ignore
else:
f_result.fvalue = w / result.df_model
else:
f_result.fvalue = 0
if len(cluster_input) > 0 and cluster_input[0] != '0' and c_method == 'cgm':
f_result.f_pvalue = f.sf(f_result.fvalue, result.df_model,
min(min_clust(data_df, cluster_input) - 1, f_result.df))
f_result.f_df_proj = [result.df_model, (min(min_clust(data_df, cluster_input) - 1, f_result.df))]
else:
f_result.f_pvalue = f.sf(f_result.fvalue, result.df_model, f_result.df)
f_result.f_df_proj = [result.df_model, f_result.df]
f_result.fittedvalues = result.fittedvalues
# get full-model related statistics
f_result.full_rsquared, f_result.full_rsquared_adj, f_result.full_fvalue, f_result.full_f_pvalue, f_result.f_df_full \
= cal_fullmodel(data_df, dependent, exog_x, cluster_input, rank, RSS=sum(f_result.resid ** 2),
originRSS=sum(result.resid ** 2))
f_result.nobs = result.nobs
f_result.yname = dependent
f_result.xname = exog_x
f_result.resid_std_err = np.sqrt(sum(f_result.resid ** 2) / (result.df_resid - rank))
if len(cluster_input) == 0 or cluster_input[0] == '0':
f_result.cluster_method = 'no_cluster'
if robust:
f_result.Covariance_Type = 'robust'
else:
f_result.Covariance_Type = 'nonrobust'
else:
f_result.cluster_method = c_method
f_result.Covariance_Type = 'clustered'
return
# compute standard error
def compute_se(self, result, f_result, n, k, rank):
if self.noint is True:
k0 = 0
else:
k0 = 1
cluster_col = self.cluster_input
category_col = self.category_input
robust = self.robust
c_method = self.c_method
psdef = self.psdef
exog_x = f_result.exog_x
demeaned_df = f_result.demeaned_df
if (len(cluster_col) == 0 or cluster_col[0] == '0') & (robust is False):
if (len(category_col) == 0):
std_error = result.bse * np.sqrt((n - k) / (n - k - rank)) # for pooled regression
else:
std_error = result.bse * np.sqrt((n - k) / (n - k + k0 - rank)) # for fe if k0=1 need to add it back
covariance_matrix = result.normalized_cov_params * result.scale * result.df_resid / f_result.df
elif (len(cluster_col) == 0 or cluster_col[0] == '0') & (robust is True):
covariance_matrix = robust_err(demeaned_df, new_x, category_col, n, k, k0, rank)
std_error = np.sqrt(np.diag(covariance_matrix))
else:
if category_col == []:
nested = False
else:
nested = is_nested(f_result.demeaned_df, category_col, cluster_col, exog_x)
print('category variable(s) is_nested in cluster variables:', nested)
covariance_matrix = clustered_error(demeaned_df,
exog_x,
category_col,
cluster_col,
n, k, k0, rank,
nested = nested,
c_method=c_method,
psdef=psdef)
std_error = np.sqrt(np.diag(covariance_matrix))
f_result.bse = std_error
f_result.variance_matrix = covariance_matrix
return | PypiClean |
/FuzzyClassificator-1.3.84-py3-none-any.whl/pybrain/rl/environments/simple/renderer.py | __author__ = 'Thomas Rueckstiess, [email protected]'
from pylab import plot, figure, ion, Line2D, draw, arange
from pybrain.rl.environments.renderer import Renderer
import threading
import time
class SimpleRenderer(Renderer):
def __init__(self):
Renderer.__init__(self)
self.dataLock = threading.Lock()
self.stopRequest = False
self.pathx = []
self.pathy = []
self.f = None
self.min = -1
self.max = 1
self.fig = None
self.color = 'red'
def setFunction(self, f, rmin, rmax):
self.dataLock.acquire()
self.f = f
self.min = rmin
self.max = rmax
self.dataLock.release()
def updateData(self, data):
self.dataLock.acquire()
(x, y) = data
self.pathx.append(x)
self.pathy.append(y)
self.dataLock.release()
def reset(self):
self.dataLock.acquire()
self.pathx = []
self.pathy = []
self.dataLock.release()
def stop(self):
self.dataLock.acquire()
self.stopRequest = True
self.dataLock.release()
def start(self):
self.drawPlot()
Renderer.start(self)
def drawPlot(self):
ion()
self.fig = figure()
axes = self.fig.add_subplot(111)
# draw function
xvalues = arange(self.min, self.max, 0.1)
yvalues = map(self.f, xvalues)
plot(xvalues, yvalues)
# draw exploration path
self.line = Line2D([], [], linewidth=3, color='red')
axes.add_artist(self.line)
self.line.set_clip_box(axes.bbox)
# set axes limits
axes.set_xlim(min(xvalues) - 0.5, max(xvalues) + 0.5)
axes.set_ylim(min(yvalues) - 0.5, max(yvalues) + 0.5)
def _render(self):
while not self.stopRequest:
self.dataLock.acquire()
self.line.set_data(self.pathx, self.pathy)
self.line.set_color(self.color)
figure(self.fig.number)
draw()
self.dataLock.release()
time.sleep(0.05)
self.stopRequest = False | PypiClean |
/Djinja-0.7.tar.gz/Djinja-0.7/website_example/settings.py |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'a3%rn@^g69l(!xgnp&2v0m4cxfn2$d--o99c@1hyf41rj%hnhn'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'djinja.template.loaders.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'website_example.urls'
TEMPLATE_DIRS = (
'templates',
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'website_example',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'djinja.contrib.admin',
# 'django.contrib.admin',
# Uncomment the next line to enable the Django Debug Toolbar:
# 'djinja.contrib.debug_toolbar',
# 'debug_toolbar',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
JINJA2_EXTENSIONS = (
'jinja2.ext.autoescape',
'djinja.template.extensions.haml',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
} | PypiClean |
/7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/searchindex.js | Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["Welcome to 7Wonder-RL-Lib\u2019s documentation!"], "terms": {"index": 0, "modul": 0, "search": 0, "page": 0, "master": [], "file": [], "creat": [], "sphinx": [], "quickstart": [], "wed": [], "mai": [], "10": [], "21": [], "53": [], "31": [], "2023": [], "you": [], "can": 0, "adapt": [], "thi": 0, "complet": [], "your": 0, "like": [], "should": 0, "least": [], "contain": 0, "root": [], "toctre": [], "direct": [], "maxdepth": [], "2": 0, "caption": [], "content": [], "class": 0, "sevenwonenv": 0, "env": 0, "sevenwonderenv": 0, "player": 0, "close": 0, "after": 0, "user": 0, "ha": 0, "finish": 0, "us": 0, "environ": 0, "code": 0, "necessari": 0, "clean": 0, "up": 0, "i": 0, "critic": 0, "render": 0, "window": 0, "databas": 0, "http": 0, "connect": 0, "legalact": 0, "playernum": 0, "given": 0, "number": 0, "return": 0, "all": 0, "legal": 0, "action": 0, "list": 0, "param": [], "int": [], "mode": 0, "human": 0, "comput": 0, "frame": 0, "specifi": 0, "render_mod": 0, "dure": 0, "initi": 0, "The": 0, "metadata": 0, "possibl": 0, "wai": 0, "implement": 0, "In": 0, "addit": 0, "version": 0, "most": 0, "achiev": 0, "through": 0, "gymnasium": 0, "make": 0, "which": 0, "automat": 0, "appli": 0, "wrapper": 0, "collect": 0, "note": 0, "As": 0, "known": 0, "__init__": 0, "object": 0, "state": 0, "initialis": 0, "By": 0, "convent": 0, "none": 0, "default": 0, "continu": 0, "current": 0, "displai": 0, "termin": 0, "usual": 0, "consumpt": 0, "occur": 0, "step": 0, "doesn": 0, "t": 0, "need": 0, "call": 0, "rgb_arrai": 0, "singl": 0, "repres": 0, "A": 0, "np": 0, "ndarrai": 0, "shape": 0, "x": 0, "y": 0, "3": 0, "rgb": 0, "valu": 0, "an": 0, "pixel": 0, "imag": 0, "ansi": 0, "string": 0, "str": 0, "stringio": 0, "style": 0, "text": 0, "represent": 0, "each": 0, "time": 0, "includ": 0, "newlin": 0, "escap": 0, "sequenc": 0, "e": 0, "g": 0, "color": 0, "rgb_array_list": 0, "ansi_list": 0, "base": 0, "ar": 0, "except": 0, "rendercollect": 0, "pop": 0, "reset": 0, "sure": 0, "kei": 0, "support": 0, "chang": 0, "0": 0, "25": 0, "function": 0, "wa": 0, "longer": 0, "accept": 0, "paramet": 0, "rather": 0, "cartpol": 0, "v1": 0, "seed": 0, "option": 0, "episod": 0, "setperson": 0, "personalitylist": 0, "set": 0, "person": 0, "proce": 0, "one": 0, "turn": 0, "game": 0, "tupl": 0, "posit": 0, "1": 0, "n": 0, "arg": 0, "actioncod": 0, "id": 0, "random": 0, "new_stat": 0, "reward": 0, "done": 0, "info": 0, "librari": 0, "provid": 0, "test": 0, "reinforc": 0, "learn": 0, "7": 0, "wonder": 0, "There": 0, "multipl": 0, "ai": 0, "howev": 0, "now": 0, "mostli": 0, "cover": 0, "onli": 0, "tradit": 0, "board": 0, "go": 0, "chess": 0, "etc": 0, "52": 0, "card": 0, "poker": 0, "rummi": 0, "where": 0, "do": 0, "realli": 0, "have": 0, "interact": 0, "other": 0, "euro": 0, "good": 0, "algorithm": 0, "mani": 0, "aspect": 0, "explor": 0, "trade": 0, "deal": 0, "imperfect": 0, "inform": 0, "stochast": 0, "element": 0, "introduc": 0, "mention": 0, "abov": 0, "out": 0, "new": 0, "basic": 0, "system": 0, "allow": 0, "custom": 0, "space": 0, "To": 0, "gym": 0, "run": 0, "pip": [], "sevenwondersenv": 0, "exampl": 0, "how": 0, "declar": 0, "below": 0, "import": 0, "from": 0, "maingameenv": 0, "4": 0, "randomai": 0, "rulebasedai": 0, "dqnai": 0, "append": 0, "rang": 0, "statelist": 0, "variabl": 0, "consist": 0, "depend": 0, "add": 0, "model": 0, "py": 0, "main": 0, "init": 0, "make_choic": 0, "For": 0, "take": 0, "choic": 0, "randomli": 0, "choos": 0, "def": 0, "self": 0, "super": 0, "ag": 0, "len": 0, "develop": 0, "build": 0}, "objects": {"SevenWonEnv.envs": [[0, 0, 0, "-", "SevenWonderEnv"]], "SevenWonEnv.envs.SevenWonderEnv": [[0, 1, 1, "", "SevenWonderEnv"]], "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv": [[0, 2, 1, "", "close"], [0, 2, 1, "", "legalAction"], [0, 2, 1, "", "render"], [0, 2, 1, "", "reset"], [0, 2, 1, "", "setPersonality"], [0, 2, 1, "", "step"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"]}, "titleterms": {"welcom": 0, "7wonder": 0, "rl": 0, "lib": 0, "": 0, "document": 0, "indic": 0, "tabl": 0, "readm": 0, "file": 0, "overview": 0, "instal": 0, "usag": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"Welcome to 7Wonder-RL-Lib\u2019s documentation!": [[0, "welcome-to-7wonder-rl-lib-s-documentation"]], "Readme File": [[0, "readme-file"]], "7Wonder-RL-Lib": [[0, "wonder-rl-lib"]], "Overview": [[0, "overview"]], "Installation": [[0, "installation"]], "Usage": [[0, "usage"]], "Documentation": [[0, "module-SevenWonEnv.envs.SevenWonderEnv"]], "Indices and tables": [[0, "indices-and-tables"]]}, "indexentries": {"sevenwonenv.envs.sevenwonderenv": [[0, "module-SevenWonEnv.envs.SevenWonderEnv"]], "sevenwonderenv (class in sevenwonenv.envs.sevenwonderenv)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv"]], "close() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.close"]], "legalaction() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.legalAction"]], "module": [[0, "module-SevenWonEnv.envs.SevenWonderEnv"]], "render() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.render"]], "reset() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.reset"]], "setpersonality() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.setPersonality"]], "step() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.step"]]}}) | PypiClean |
/Django-Pizza-16.10.1.tar.gz/Django-Pizza-16.10.1/pizza/blog/migrations/0002_auto.py | import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field formats on 'Blog'
m2m_table_name = db.shorten_name(u'blog_blog_formats')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('blog', models.ForeignKey(orm[u'blog.blog'], null=False)),
('fileformat', models.ForeignKey(orm[u'blog.fileformat'], null=False))
))
db.create_unique(m2m_table_name, ['blog_id', 'fileformat_id'])
def backwards(self, orm):
# Removing M2M table for field formats on 'Blog'
db.delete_table(db.shorten_name(u'blog_blog_formats'))
models = {
u'blog.blog': {
'Meta': {'ordering': "('title',)", 'object_name': 'Blog'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['kitchen_sink.Author']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'formats': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['blog.FileFormat']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'podcast': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'blog.category': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'blog.fileformat': {
'Meta': {'ordering': "('title',)", 'object_name': 'FileFormat'},
'ext': ('django.db.models.fields.SlugField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'blog.mediafile': {
'Meta': {'object_name': 'MediaFile'},
'ext': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['blog.FileFormat']"}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['blog.Post']"})
},
u'blog.post': {
'Meta': {'ordering': "('-publish',)", 'object_name': 'Post'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['kitchen_sink.Author']", 'null': 'True', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['blog.Blog']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['blog.Category']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['kitchen_sink.Image']", 'null': 'True', 'blank': 'True'}),
'imageset': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['kitchen_sink.ImageSet']", 'null': 'True', 'blank': 'True'}),
'publish': ('django.db.models.fields.DateTimeField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'kitchen_sink.author': {
'Meta': {'ordering': "('name',)", 'object_name': 'Author'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['kitchen_sink.Image']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'})
},
u'kitchen_sink.image': {
'Meta': {'ordering': "('title',)", 'object_name': 'Image'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'caption_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'credit': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'credit_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'kitchen_sink.imageset': {
'Meta': {'ordering': "('title',)", 'object_name': 'ImageSet'},
'captype': ('django.db.models.fields.CharField', [], {'default': "'override'", 'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['blog'] | PypiClean |
/KratosMultilevelMonteCarloApplication-9.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/KratosMultiphysics/MultilevelMonteCarloApplication/tools.py | import KratosMultiphysics
class ParametersWrapper(object):
"""
Class for handling the project parameters with different solver settings.
This class is used by Monte Carlo and Multilevel Monte Carlo algorithms.
Input:
- project_parameters: Kratos parameters
"""
def __init__(self,project_parameters):
self.project_parameters = project_parameters
def GetModelPartName(self):
"""
Method returning the model part name.
Input:
- self: an instance of the class
Output:
- model_part_name: string containing the main model part name
"""
project_parameters = self.project_parameters
if (project_parameters["solver_settings"]["solver_type"].GetString() == "ale_fluid"):
return project_parameters["solver_settings"]["fluid_solver_settings"]["model_part_name"].GetString()
else:
return project_parameters["solver_settings"]["model_part_name"].GetString()
def GetDomainSize(self):
"""
Method returning the domain size of the problem
Input:
- self: an instance of the class
Output:
- domain_size: domain size of the problem
"""
project_parameters = self.project_parameters
if (project_parameters["solver_settings"]["solver_type"].GetString() == "ale_fluid"):
return project_parameters["solver_settings"]["fluid_solver_settings"]["domain_size"].GetInt()
else:
return project_parameters["solver_settings"]["domain_size"].GetInt()
def SetModelImportSettingsInputType(self,string):
"""
Method returning project parameters with modified input type of the model_import_settings.
Input:
- self: an instance of the class
- string: string to be set as input type
"""
if (self.project_parameters["solver_settings"]["solver_type"].GetString() == "ale_fluid"):
self.project_parameters["solver_settings"]["fluid_solver_settings"]["model_import_settings"]["input_type"].SetString(string)
else:
self.project_parameters["solver_settings"]["model_import_settings"]["input_type"].SetString(string)
def GetMaterialsFilename(self):
"""
Method returning materials filename.
Input:
- self: an instance of the class
Output:
- materials_filename: the materials filename
"""
if self.project_parameters["solver_settings"].Has("material_import_settings"):
return self.project_parameters["solver_settings"]["material_import_settings"]["materials_filename"].GetString()
else:
return None
def SetMaterialsFilename(self,string):
"""
Method returning project parameters with modified materials filename.
Input:
- self: an instance of the class
- string: string to be set as input type
"""
if self.project_parameters["solver_settings"].Has("material_import_settings"):
self.project_parameters["solver_settings"]["material_import_settings"]["materials_filename"].SetString(string) | PypiClean |
/Music-Player-1.0.5.1.tar.gz/Music-Player-1.0.5.1/MusicPlayer/apis/netEaseApi.py |
__author__ = 'cyrbuzz'
import re
import json
import logging
import urllib.parse
from collections import namedtuple
from apiRequestsBase import HttpRequest, ignored
from netEaseEncode import encrypted_request, hashlib
logger = logging.getLogger(__name__)
SongInfo = namedtuple(
'SongInfo', ['music_id', 'url', 'author', 'time', 'name', 'music_img', 'lyric'])
class NetEaseWebApi(HttpRequest):
"""
2015年写的,函数名略混乱,不影响使用,暂时不修改。
"""
cookies = {
'appver': '2.1.2.184499',
'os': 'pc',
'channel': 'netease',
}
default_timeout = 10
def __init__(self):
super(NetEaseWebApi, self).__init__()
self.headers['Host'] = 'music.163.com'
self.headers['Referer'] = 'http://music.163.com'
self.headers['Content-Type'] = 'application/x-www-form-urlencoded'
self.vertifyHeaders = self.headers.copy()
self.vertifyHeaders['Host'] = 'ac.dun.163yun.com'
self.vertifyHeaders['Accept'] = 'image/png,image/*;q=0.8,*/*;q=0.5'
self.vertifyHeaders['Content-Type'] = ''
self.urlEamilHeaders = self.headers.copy()
self.urlEamilHeaders['Referer'] = ''
self.urlEamilHeaders['Origin'] = 'orpheus://orpheus'
def httpRequest(self, *args, **kwargs):
data = kwargs.get('data')
if data:
kwargs['data'] = encrypted_request(data)
logger.info("进行网易云Url请求, args: {0}, kwargs: {1}".format(args, kwargs))
html = super(NetEaseWebApi, self).httpRequest(*args, **kwargs)
with ignored():
return json.loads(html.text)
logger.info("url: {0} 请求失败. Header: {1}".format(
args[0], kwargs.get('headers')))
return False
def login(self, username, password):
"""默认记住登陆。"""
"""
phone 用的/weapi/login/cellphone?csrf_token=
email 用的/weapi/login?csrf_token=
还有验证码相关暂时不做。
"""
password = password.encode()
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
data = {'password': password, 'rememberLogin': 'true'}
# email = data.update({'username': username}) if '@' in username else data.update({'phone': username})
email = True if '@' in username else False
if email:
data['username'] = username
# https://ac.dun.163yun.com/v1/d?d=nsHeOgdOdpeTDoF4zMLze4xEBfrKIZ2fQzMlHTn2VTgJt%2BkQAVDZ%2B9L5X1DQ%2BkjffAzVQgCLt0EE60l6qm4OGM%2F7FnsP7OO%2B3eZNebXpl6qZt7Q36oeUCUM%2BQcfaX1lsYvHM9vsVpXIJAok6el%2F9uwfX%2BtvF056U%5C8XllLj62g0K22jv2ZYZ8DNU7ws5YSYo%5CRGMRrcJisIyYtP8pbAqiGqi3ZnV%5CLM9568YMdDomJJZc77BTD%5CVwUsj%2Fgv76g%2FCT885OtsSfkOhMVmIESXztxyrLyxt52Vs1wOG%5CyIeR6x%2BaJsYmbjTJXQ2M2DlOv5jqe3PyqHBp0Ar%2FG1ueK5xJve3A0QAS9s4qMXG%2FIpiXvjUT6jA28GNxw3ZFzwNY1IinR%2FH4CooLpNP0bokXAq8Z%2FooRZRZg4uvPa9jHR7l2LlRaTO0oIYvAPvQshOyTq%5CAfUVuLtjyhmkPjgdJNpguxGOKVGqghlYwcrmKuTU8ytT2qxb5x0opJneyEbvGor1LcRC5m9%5CdKlhJ7KbLjdKQGX7l4nHdMe9OS2ViwGXaqHIVKS1Zrck2UsYu1AAjaVOXSOO0Y%2FqhTFHEdrT11vxAf6hRjEdZS8nJxHxv5zsvpx2XXUBUikMum81uF2cdwc0PF1YoITmlGA1IbwXGE3mUfV3Ggvk0%5C1djVd8o3C3F%2BOvQ89V0pq4mBbJ1OSiT4%2FV56qPjLYuIX7leuZGkNYlJIhFypeQDa7CvCh%2BBhVCh%2BO3pt%2FVwbFkrZlmoRM%2BdB22H3z%2F%2Bn%2FWW&cb=_jsonp_ssd1507090923068&x.js
# https://ac.dun.163yun.com/v1/d?d=FvHNjq2CwM%2FgqdENT6g0AUjuRaNfeeO%5CkX64uXxNGYraS6AVKH0jrEFbxq1y%2BveQPiOClFtY0OqQ8G6OCQuK%2F4zKrVAsaT9XPiPS1%2BhrPj6TT1mX%2BBFFq%2FZafGazTvU13DluLC2Khhid80cbwi95%2BzO4CjubRjNlhJ9VYuoTGErCi8LlSkjUAYplAkfkATJOBQK%2FFg4RAysIEsUpbACTUFqlhQqVL5aBbR8XhN4HooPPOC9L0IY9TFMfI14hhgLc9AiXSq7S5wkjje5fc3tXrfVZqptla8s%2BhFPJNNyKw8IdNJ6Ik9p9aTC8tXkU1yk%2FZeicCMU48zgDvmTjBYVi1xeQ0xxzLZmvA%5C61rJOJNjqC%2Bh0M%2BCy5HtoceoppxfyC4o80YP%5CR3LP5yb03jL89sbqC48mf6a1aTu6d5MnCGnDl17o%2Frk8onMOYLN8YX0Qf3EORGP547CcNlp6ZA83VcwrYGzR%5CYoKbrlLR7dfMb%5CoMusuXm1cGakS6rInXZNQdE%2FFN2OUhlw%5CstCL0UIbw4IsvuwMXl2O5sZhGCejE%2F2%2F1lZ7u57FCHp9BQ4tG5QjJb%2FLQi6V%5CraQLMJxq%5CBuiKbJr7uZrUj4LIq4jsw7jMqmA7o4uM5JMHbzEdg7k%5CsFyM8x9hdeG7owXIDmFuCpfTqiH9wMCZa0DV%2F8m%2BCOuG8q%2F87cGDyvaFlRVoRgnuqQHVE5%2BV5Z6iAMYGtOFQH%2BBLvjPI1cF0yT1twyTR1e0FTnMI3tNjoHH%2Fy%2Fyb1hZCuuIJoJJP%2FWW&cb=_jsonp_ssd1507090962239&x.js
# a = '1_ZfeoJYpgEKaPdueAuHxAz56XkZ28JiOG_hUOMgOEgFt1H0Azl4sFFeKjDBIbrhKWD'
data['clientToken'] = "1_ZfeoJYpgEKaPdueAuHxAz56XkZ28JiOG_SrSyeuuSjQrobJdGvXFN2Jo4fzHb+oRQ"
else:
data['phone'] = username
urlEmail = 'http://music.163.com/weapi/login?csrf_token='
urlPhone = 'http://music.163.com/weapi/login/cellphone?csrf_token='
url = urlEmail if email else urlPhone
html = self.httpRequest(url, method='POST', data=data)
return html
def user_playlist(self, uid, offset=0, limit=1000):
"""
2017/8/12更新 新API.
个人歌单。
"""
# url = 'http://music.163.com/api/user/playlist/?offset=%s&limit=1000&uid=%s' % (offset, uid)
# html = self.httpRequest(url, method='GET', cookies=self.cookies)
# return html['playlist']
data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
url = 'http://music.163.com/weapi/user/playlist'
html = self.httpRequest(url, method='POST', data=data)
try:
return html['playlist']
except:
return {}
def all_playlist(self, cat='全部歌单', types='all', offset=0, index=1):
"""
全部歌单。列表字典形式。
"""
url = 'http://music.163.com/api/playlist/list?cat=%s&type=%s&order=%s&offset=%d&total=true&limit=30&index=%d'\
% (urllib.parse.quote(cat), types, types, offset, index)
html = self.httpRequest(url, method='GET', cookies=self.cookies)
try:
return html['playlists']
except:
return {}
def details_playlist(self, ids):
"""
歌单详情。
"""
url = 'http://music.163.com/api/playlist/detail?id={0}' .format(ids)
html = self.httpRequest(url, method="GET", cookies=self.cookies)
with ignored():
return html['result']
return False
def search(self, s, offset=0, limit=100, stype=1):
"""
搜索.
type类型: 单曲(1), 专辑(10), 歌手(100), 歌单(1000), 用户(1002)
2017/7/15更新API.
"""
# url = 'http://music.163.com/api/search/get/web'
url = 'http://music.163.com/weapi/cloudsearch/get/web'
data = {
's': s,
'offset': str(offset),
'limit': str(limit),
'type': str(stype)
}
html = self.httpRequest(url, method='POST', data=data)
try:
return html['result']
except:
return {'songCount': 0, 'songs': []}
def singsUrl(self, ids: list):
"""
2017/7/14更新。
返回歌曲的URL。
"""
data = {'csrf_token': '', 'ids': ids, 'br': 999000}
url = "http://music.163.com/weapi/song/enhance/player/url"
html = self.httpRequest(url, method='POST', data=data)
with ignored():
return html['data']
logger.info('歌曲请求失败: ids {0}'.format(ids))
return False
def newsong(self, areaID=0, offset=0, total='true', limit=100):
"""
最新音乐--新歌速递。
areaID(0全部, 9华语, 96欧美, 16韩国, 8日本。)
"""
url = 'http://music.163.com/api/discovery/new/songs?areaId=%d&offset=%d&total=%s&limit=%d' %\
(areaID, offset, total, limit)
html = self.httpRequest(url, method='GET', cookies=self.cookies)
return html['data']
def fnewsong(self, year=2015, month=4, area='ALL'):
"""
最新音乐--新碟上架。
area(ALL全部, ZH华语, EA欧美, KR韩国, 日本JP)
"""
url = 'http://music.163.com/api/discovery/new/albums/area?year=%d&month=%d&area=%s&type=hot&offset=0&total=true&limit=20&rcmd=true' \
% (year, month, area)
html = self.httpRequest(url, method="GET", cookies=self.cookies)
return html['monthData']
def lyric(self, ids):
url = 'http://music.163.com/api/song/lyric?os=osx&id={0}&lv=-1&kv=-1&tv=-1'.format(
ids)
html = self.httpRequest(url, method='GET')
try:
return html['lrc']['lyric']
except:
return False
def getContainedPlaylists(self, songId) -> set:
"""
传入某个歌曲Id, 返回包含此歌曲的3个歌单。
此功能需要直接解析HTMl文档.
1. 获取·http://music.163.com/song?id=29953681·
2. 提取出歌单的id。不想使用额外的包所以直接用正则了。
"""
rawHtml = super().httpRequest(
'http://music.163.com/song?id={}'.format(songId), method='GET')
containedUl = re.findall(
r'<ul class="m-rctlist f-cb">[.\s\S]+?</ul>', rawHtml.text)
if not containedUl:
containedUl = ''
else:
containedUl = containedUl[0]
playlists = set(re.findall(r'data-res-id="(.+)"', containedUl))
return playlists
def getRandomSongFromPlaylist(self, playlistId) -> list:
"""
只返回包含其中音乐信息的列表。
"""
allSong = self.details_playlist(playlistId)
if allSong:
tracks = allSong['tracks']
SongInfoList = []
for i in tracks:
songInfo = SongInfo(music_id=i['id'],
music_img=i['album']['blurPicUrl'],
url="http(0)",
lyric=None,
time=i['duration'],
name=i['name'],
author='-'.join([x['name']
for x in i['artists']])
)
SongInfoList.append(songInfo)
return SongInfoList
return False
netease = NetEaseWebApi()
if __name__ == '__main__':
help(netease) | PypiClean |
/MAVR-0.93.tar.gz/MAVR-0.93/scripts/annotation/gff_examine.py | __author__ = 'mahajrod'
import os
import argparse
import pprint
from collections import OrderedDict
import matplotlib
matplotlib.use('Agg')
os.environ['MPLCONFIGDIR'] = '/tmp/'
import matplotlib.pyplot as plt
plt.ioff()
import numpy as np
from BCBio.GFF import GFFExaminer
from BCBio import GFF
from RouToolPa.Collections.General import TwoLvlDict
from RouToolPa.Routines.Sequence import get_feature_lengths, get_total_feature_lengths, feature_lengths_collapse_records
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--gff", action="store", dest="gff",
help="gff to examine")
parser.add_argument("-o", "--len_file", action="store", dest="len_file",
help="Output file for feature lengths", default=None)
parser.add_argument("-p", "--prefix", action="store",
dest="prefix",
help="Prefix of output files",
default="prefix")
parser.add_argument("-l", "--length_distribution_file_prefix", action="store",
dest="len_distr_file",
help="Output file with lengths distibutions",
default="length_distribution")
args = parser.parse_args()
examiner = GFFExaminer()
with open(args.gff, "r") as in_fd:
pprint.pprint(examiner.parent_child_map(in_fd))
with open(args.gff, "r") as in_fd:
record_dict = dict([(record.id, record) for record in GFF.parse(in_fd)])
gene_dict = OrderedDict({})
for record_id in record_dict:
for feature in record_dict[record_id].features:
if feature.type == "gene":
gene_dict[feature.qualifiers["Name"][0]] = OrderedDict({})
for sub_feature in feature.sub_features:
gene_dict[feature.qualifiers["Name"][0]][sub_feature.type] = len(sub_feature)
if feature.type in ("snoRNA", "ncRNA", "snRNA"):
gene_dict[feature.qualifiers["Name"][0]] = OrderedDict({"ncRNA": len(feature)})
with open("%s_test.t" % args.prefix, "w") as out_fd:
for gene in gene_dict:
for sub_feature in gene_dict[gene]:
out_fd.write("%s\t%s\t%i\n" % (gene, sub_feature, gene_dict[gene][sub_feature]))
lengths_dict = get_feature_lengths(record_dict)
count_dict = TwoLvlDict({})
for record in lengths_dict:
count_dict[record] = {}
for feature_type in lengths_dict[record]:
count_dict[record][feature_type] = len(lengths_dict[record][feature_type])
count_dict.write("%s_counts.t" % args.prefix)
total_lengths = get_total_feature_lengths(lengths_dict, out_filename="%s_feature_lengths.t" % args.prefix)
white_list = ["five_prime_UTR", "three_prime_UTR", "CDS", "ncRNA"]
collapsed_dict = feature_lengths_collapse_records(lengths_dict,
synonym_dict={"snoRNA": "ncRNA", "snRNA": "ncRNA"})
for feature in collapsed_dict:
collapsed_dict[feature] = np.array(collapsed_dict[feature])
bin_dict = {"five_prime_UTR": np.linspace(0, 900, 91), "three_prime_UTR": np.linspace(0, 1600, 81),
"CDS": np.linspace(0, 16000, 81), "ncRNA": 40}
plt.figure(1, dpi=150, figsize=(24, 12))
index = 1
for feature_type in white_list:
if feature_type not in collapsed_dict:
continue
plt.subplot(2, 2, index)
plt.title(feature_type + " (%i)" % len(collapsed_dict[feature_type]))
percentile_list = np.percentile(collapsed_dict[feature_type], [1, 5, 50, 95, 99])
plt.hist(collapsed_dict[feature_type], bins=bin_dict[feature_type],
label="Min: %i\nMax: %i\n1th percentile %i\n5th percentile %i\n50th percentile %i\n95th percentile %i\n99th percentile %i" %
(min(collapsed_dict[feature_type]),
max(collapsed_dict[feature_type]),
percentile_list[0], percentile_list[1], percentile_list[2], percentile_list[3], percentile_list[4]))
plt.xlabel("Length")
plt.ylabel("N")
plt.legend()
index += 1
plt.suptitle("Feature length distribution")
plt.savefig("%s_length_distribution.svg" % args.prefix)
plt.savefig("%s_length_distribution.eps" % args.prefix)
plt.close() | PypiClean |
/Jellyfin_CLI-1.6-py3-none-any.whl/jellyfin_cli/jellyfin_client/JellyfinClient.py | from aiohttp import ClientSession
from jellyfin_cli.jellyfin_client.data_classes.View import View
from jellyfin_cli.jellyfin_client.data_classes.Shows import Episode, Show
from jellyfin_cli.jellyfin_client.data_classes.Movies import Movie
from jellyfin_cli.jellyfin_client.data_classes.Audio import Audio, Album
class InvalidCredentialsError(Exception):
def __init__(self):
Exception("Invalid username, password or server URL")
class HttpError(Exception):
def __init__(self, text):
Exception("Something went wrong: {}".format(text))
class ServerContext:
def __init__(self, res=None, url=None, client=None, cfg=None, username=None):
if cfg:
self.url = cfg["url"]
self.user_id = cfg["user_id"]
self.server_id = cfg["server_id"]
self.client = ClientSession(headers={
"x-emby-authorization": cfg["auth_header"]
})
self.username = cfg["username"]
else:
self.url = url
self.user_id = res["User"]["Id"]
self.server_id = res["ServerId"]
self.username = username
self.client = client
def get_token(self):
header = self.client._default_headers["x-emby-authorization"]
header = [i.split("=") for i in header.split(",")]
pairs = {k[0].strip().replace('"',""):k[1].strip().replace('"',"") for k in header}
return pairs["Token"]
class HttpClient:
def __init__(self, server, context=None):
self.client = ClientSession()
self.server = server
self.context = context
async def login(self, username, password):
try:
res = await self.client.post(self.server+'/Users/authenticatebyname',json={
"Username": username,
"Pw": password
}, headers={
"x-emby-authorization":'MediaBrowser Client="Jellyfin CLI", Device="Jellyfin-CLI", DeviceId="None", Version="10.4.3"'
})
except Exception:
raise InvalidCredentialsError()
if res.status == 200:
res = await res.json()
token = res["AccessToken"]
self.client = ClientSession(headers={
"x-emby-authorization":'MediaBrowser Client="Jellyfin CLI", Device="Jellyfin-CLI", DeviceId="None", Version="10.4.3", Token="{}"'.format(token)
})
self.context = ServerContext(res, self.server, self.client, username=username)
from jellyfin_cli.utils.login_helper import store_creds
store_creds(self.context)
return True
elif res.status == 401:
raise InvalidCredentialsError()
else:
raise HttpError(await res.text())
async def get_views(self):
res = await self.context.client.get("{}/Users/{}/Views".format(self.context.url, self.context.user_id))
if res.status == 200:
res = await res.json()
return [View(i, self.context) for i in res["Items"]]
else:
raise HttpError(await res.text())
async def get_resume(self, limit=12, types="Video"):
res = await self.context.client.get("{}/Users/{}/Items/Resume".format(self.context.url, self.context.user_id), params={
"Limit": limit,
"Recursive": "true",
"Fields": "BasicSyncInfo",
"MediaTypes": types
})
if res.status == 200:
res = await res.json()
return [Episode(r, self.context) for r in res["Items"]]
else:
raise HttpError(await res.text())
async def get_nextup(self, limit=24):
res = await self.context.client.get("{}/Shows/NextUp".format(self.context.url), params={
"UserId": self.context.user_id,
"Limit": limit,
"Recursive": "true",
"Fields": "BasicSyncInfo"
})
if res.status == 200:
res = await res.json()
return [Episode(r, self.context) for r in res["Items"]]
else:
raise HttpError(await res.text())
async def search(self, query, media_type, limit=30):
res = await self.context.client.get("{}/Users/{}/Items".format(self.context.url, self.context.user_id), params={
"searchTerm": query,
"IncludeItemTypes": media_type,
"IncludeMedia": "true",
"IncludePeople": "false",
"IncludeGenres": "false",
"IncludeStudios": "false",
"IncludeArtists": "false",
"Fields": "BasicSyncInfo",
"Recursive": "true",
"Limit": limit
})
if res.status == 200:
res = await res.json()
r = []
for i in res["Items"]:
if i["Type"] == "Movie":
r.append(Movie(i, self.context))
elif i["Type"] == "Audio":
r.append(Audio(i, self.context))
elif i["Type"] == "Series":
r.append(Show(i, self.context))
elif i["Type"] == "Episode":
r.append(Episode(i, self.context))
elif i["Type"] == "MusicAlbum":
r.append(Album(i, self.context))
return r
else:
raise HttpError(await res.text())
async def get_recommended(self, limit=30):
res = await self.context.client.get("{}/Users/{}/Items".format(self.context.url, self.context.user_id), params={
"SortBy": "IsFavoriteOrLiked,Random",
"IncludeItemTypes": "Movie,Series",
"Recursive": "true",
"Limit": limit
})
if res.status == 200:
res = await res.json()
r = []
for i in res["Items"]:
if i["Type"] == "Movie":
r.append(Movie(i, self.context))
elif i["Type"] == "Series":
r.append(Show(i, self.context))
return r
else:
raise HttpError(await res.text())
#verifies that the login token is valid by running a request using it
#raises an HttpError if the token is invalid
async def test_token(self):
response = await self.context.client.get(f"{self.server}/Users/Me")
if response.status != 200:
raise HttpError(await response.text()) | PypiClean |
/Office365-REST-Python-Client-2.4.3.tar.gz/Office365-REST-Python-Client-2.4.3/office365/runtime/client_runtime_context.py | import abc
from time import sleep
from typing import TypeVar
from office365.runtime.client_request_exception import ClientRequestException
from office365.runtime.client_result import ClientResult
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.http.request_options import RequestOptions
from office365.runtime.queries.client_query import ClientQuery
from office365.runtime.queries.read_entity import ReadEntityQuery
T = TypeVar('T', bound='ClientObject')
class ClientRuntimeContext(object):
def __init__(self):
self._queries = []
self._current_query = None
@property
def current_query(self):
"""
:rtype: office365.runtime.queries.client_query.ClientQuery
"""
return self._current_query
@property
def has_pending_request(self):
return len(self._queries) > 0
def build_request(self, query):
"""
Builds a request
:type query: office365.runtime.queries.client_query.ClientQuery
"""
self._current_query = query
return self.pending_request().build_custom_request(query)
def execute_query_retry(self, max_retry=5, timeout_secs=5, success_callback=None, failure_callback=None,
exceptions=(ClientRequestException,)):
"""
Executes the current set of data retrieval queries and method invocations and retries it if needed.
:param int max_retry: Number of times to retry the request
:param int timeout_secs: Seconds to wait before retrying the request.
:param (office365.runtime.client_object.ClientObject)-> None success_callback:
:param (int, requests.exceptions.RequestException)-> None failure_callback:
:param exceptions: tuple of exceptions that we retry
"""
for retry in range(1, max_retry + 1):
try:
self.execute_query()
if callable(success_callback):
success_callback(self.current_query.return_type)
break
except exceptions as e:
self.add_query(self.current_query)
if callable(failure_callback):
failure_callback(retry, e)
sleep(timeout_secs)
@abc.abstractmethod
def pending_request(self):
"""
:rtype: office365.runtime.client_request.ClientRequest
"""
pass
@abc.abstractmethod
def service_root_url(self):
"""
:rtype: str
"""
pass
def load(self, client_object, properties_to_retrieve=None, before_loaded=None, after_loaded=None):
"""Prepare retrieval query
:type properties_to_retrieve: list[str] or None
:type client_object: office365.runtime.client_object.ClientObject
:type before_loaded: (office365.runtime.http.request_options.RequestOptions) -> None
:type after_loaded: (T) -> None
"""
qry = ReadEntityQuery(client_object, properties_to_retrieve)
self.add_query(qry)
if callable(before_loaded):
self.before_execute(before_loaded)
if callable(after_loaded):
self.after_query_execute(after_loaded, client_object)
return self
def before_query_execute(self, action, once=True, *args, **kwargs):
"""
Attach an event handler which is triggered before query is submitted to server
:type action: (office365.runtime.http.request_options.RequestOptions, *args, **kwargs) -> None
:param bool once: Flag which determines whether action is executed once or multiple times
"""
query = self._queries[-1]
def _prepare_request(request):
"""
:type request: office365.runtime.http.request_options.RequestOptions
"""
if self.current_query.id == query.id:
if once:
self.pending_request().beforeExecute -= _prepare_request
action(request, *args, **kwargs)
self.pending_request().beforeExecute += _prepare_request
return self
def before_execute(self, action, once=True, *args, **kwargs):
"""
Attach an event handler which is triggered before request is submitted to server
:param (office365.runtime.http.request_options.RequestOptions, any) -> None action:
:param bool once: Flag which determines whether action is executed once or multiple times
"""
def _process_request(request):
if once:
self.pending_request().beforeExecute -= _process_request
action(request, *args, **kwargs)
self.pending_request().beforeExecute += _process_request
return self
def after_query_execute(self, action, *args, **kwargs):
"""
Attach an event handler which is triggered after query is submitted to server
:type action: (Response, *args, **kwargs) -> None
"""
query = self._queries[-1]
def _process_response(resp):
"""
:type resp: requests.Response
"""
resp.raise_for_status()
if self.current_query.id == query.id:
self.pending_request().afterExecute -= _process_response
action(*args, **kwargs)
self.pending_request().afterExecute += _process_response
execute_first = kwargs.pop("execute_first", False)
if execute_first and len(self._queries) > 1:
self._queries.insert(0, self._queries.pop())
return self
def after_execute(self, action, once=True, *args, **kwargs):
"""
Attach an event handler which is triggered after request is submitted to server
:param (RequestOptions, *args, **kwargs) -> None action:
:param bool once:
"""
def _process_response(response):
if once:
self.pending_request().afterExecute -= _process_response
action(response, *args, **kwargs)
self.pending_request().afterExecute += _process_response
return self
def execute_request_direct(self, path):
"""
:type path: str
"""
full_url = "".join([self.service_root_url(), "/", path])
request = RequestOptions(full_url)
return self.pending_request().execute_request_direct(request)
def execute_query(self):
"""Submit request(s) to the server"""
while self.has_pending_request:
qry = self._get_next_query()
self.pending_request().execute_query(qry)
def add_query(self, query):
"""
:type query: office365.runtime.queries.client_query.ClientQuery
"""
self._queries.append(query)
return self
def clear(self):
self._current_query = None
self._queries = []
return self
def get_metadata(self):
return_type = ClientResult(self)
def _construct_download_request(request):
"""
:type request: office365.runtime.http.request_options.RequestOptions
"""
request.url += "/$metadata"
request.method = HttpMethod.Get
def _process_download_response(response):
"""
:type response: requests.Response
"""
response.raise_for_status()
return_type.set_property("__value", response.content)
qry = ClientQuery(self)
self.before_execute(_construct_download_request)
self.after_execute(_process_download_response)
self.add_query(qry)
return return_type
def _get_next_query(self, count=1):
"""
:type count: int
"""
if count == 1:
qry = self._queries.pop(0)
else:
from office365.runtime.queries.batch import BatchQuery
qry = BatchQuery(self)
while self.has_pending_request and count > 0:
qry.add(self._queries.pop(0))
count = count - 1
self._current_query = qry
return qry | PypiClean |
/IsPycharmRun-1.0.tar.gz/IsPycharmRun-1.0/pb_py/playertitle_pb2.py |
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import enum_define_pb2 as enum__define__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='playertitle.proto',
package='FunPlus.Common.Config',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x11playertitle.proto\x12\x15\x46unPlus.Common.Config\x1a\x11\x65num_define.proto\"\xea\x02\n\rPBPlayerTitle\x12\r\n\x05pt_id\x18\x01 \x01(\x05\x12\x15\n\rupgrade_score\x18\x02 \x01(\x05\x12\x16\n\x0ept_chapterName\x18\x03 \x01(\t\x12\x14\n\x0cpt_titleIcon\x18\x04 \x01(\t\x12\x14\n\x0cpt_titleName\x18\x05 \x01(\t\x12\x15\n\rpt_titleName1\x18\x06 \x01(\t\x12\x16\n\x0ept_chapterDesc\x18\x07 \x01(\t\x12\x17\n\x0fpt_chapterDesc1\x18\x08 \x01(\t\x12\x10\n\x08pt_bgPic\x18\t \x01(\t\x12\x13\n\x0bpt_subLevel\x18\n \x01(\x05\x12\x11\n\tpt_reward\x18\x0b \x03(\r\x12V\n\x10pt_upgradeEffect\x18\x0c \x01(\x0e\x32(.FunPlus.Common.Config.ETitleUpgradeType:\x12\x45TitleUpgrade_None\x12\x15\n\rpt_spRewardId\x18\r \x01(\r\"N\n\x11PBPlayerTitleList\x12\x39\n\x0bplayerTitle\x18\x01 \x03(\x0b\x32$.FunPlus.Common.Config.PBPlayerTitle'
,
dependencies=[enum__define__pb2.DESCRIPTOR,])
_PBPLAYERTITLE = _descriptor.Descriptor(
name='PBPlayerTitle',
full_name='FunPlus.Common.Config.PBPlayerTitle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='pt_id', full_name='FunPlus.Common.Config.PBPlayerTitle.pt_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='upgrade_score', full_name='FunPlus.Common.Config.PBPlayerTitle.upgrade_score', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pt_chapterName', full_name='FunPlus.Common.Config.PBPlayerTitle.pt_chapterName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pt_titleIcon', full_name='FunPlus.Common.Config.PBPlayerTitle.pt_titleIcon', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pt_titleName', full_name='FunPlus.Common.Config.PBPlayerTitle.pt_titleName', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pt_titleName1', full_name='FunPlus.Common.Config.PBPlayerTitle.pt_titleName1', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pt_chapterDesc', full_name='FunPlus.Common.Config.PBPlayerTitle.pt_chapterDesc', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pt_chapterDesc1', full_name='FunPlus.Common.Config.PBPlayerTitle.pt_chapterDesc1', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pt_bgPic', full_name='FunPlus.Common.Config.PBPlayerTitle.pt_bgPic', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pt_subLevel', full_name='FunPlus.Common.Config.PBPlayerTitle.pt_subLevel', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pt_reward', full_name='FunPlus.Common.Config.PBPlayerTitle.pt_reward', index=10,
number=11, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pt_upgradeEffect', full_name='FunPlus.Common.Config.PBPlayerTitle.pt_upgradeEffect', index=11,
number=12, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pt_spRewardId', full_name='FunPlus.Common.Config.PBPlayerTitle.pt_spRewardId', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=426,
)
_PBPLAYERTITLELIST = _descriptor.Descriptor(
name='PBPlayerTitleList',
full_name='FunPlus.Common.Config.PBPlayerTitleList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='playerTitle', full_name='FunPlus.Common.Config.PBPlayerTitleList.playerTitle', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=428,
serialized_end=506,
)
_PBPLAYERTITLE.fields_by_name['pt_upgradeEffect'].enum_type = enum__define__pb2._ETITLEUPGRADETYPE
_PBPLAYERTITLELIST.fields_by_name['playerTitle'].message_type = _PBPLAYERTITLE
DESCRIPTOR.message_types_by_name['PBPlayerTitle'] = _PBPLAYERTITLE
DESCRIPTOR.message_types_by_name['PBPlayerTitleList'] = _PBPLAYERTITLELIST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PBPlayerTitle = _reflection.GeneratedProtocolMessageType('PBPlayerTitle', (_message.Message,), {
'DESCRIPTOR' : _PBPLAYERTITLE,
'__module__' : 'playertitle_pb2'
# @@protoc_insertion_point(class_scope:FunPlus.Common.Config.PBPlayerTitle)
})
_sym_db.RegisterMessage(PBPlayerTitle)
PBPlayerTitleList = _reflection.GeneratedProtocolMessageType('PBPlayerTitleList', (_message.Message,), {
'DESCRIPTOR' : _PBPLAYERTITLELIST,
'__module__' : 'playertitle_pb2'
# @@protoc_insertion_point(class_scope:FunPlus.Common.Config.PBPlayerTitleList)
})
_sym_db.RegisterMessage(PBPlayerTitleList)
# @@protoc_insertion_point(module_scope) | PypiClean |
/Dendrite_Neural_Networks-0.0.9-py3-none-any.whl/PreTrain/kmeans/bkmeans.py | import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import euclidean_distances
def bkmeans(x_train, y_train, boxes, eps=0.05):
classes = np.unique(y_train)
counter = 0
flag = 0
if len(classes)<=2:
# Analyzed patterns
pos = np.where(classes[1] == y_train)[0]
x = x_train[pos, :]
# Initial Number of Clusters
c_num = boxes[counter]
# Appliying KMeans to the selected class
kmeans = KMeans(init = 'k-means++', n_clusters = c_num, n_init = 10, max_iter = 100)
kmeans.fit(x)
pattern_labels = kmeans.labels_
clusters = np.unique(pattern_labels)
#print("Clusters n",clusters)
for cluster in clusters:
indices = np.where(cluster == pattern_labels)[0]
cluster_points = x[indices,:]
#print("clusters",cluster_points)
#print("Dimensiones",len(cluster_points[0]))
for d in range(len(cluster_points[0])):
cluster_d = cluster_points[:,d:d+1]
if flag == 0:
Wmin_arr = min(cluster_d)-eps
Wmax_arr = max(cluster_d)+eps
else:
Wmin_arr = np.concatenate((Wmin_arr, min(cluster_d)-eps), axis = 0)
Wmax_arr = np.concatenate((Wmax_arr, max(cluster_d)+eps), axis = 0)
flag += 1
#
if len(classes)>2:
for classe in classes:
# Analyzed patterns
pos = np.where(classe == y_train)[0]
x = x_train[pos, :]
# Initial Number of Clusters
c_num = boxes[counter]
# Appliying KMeans to the selected class
kmeans = KMeans(init = 'k-means++', n_clusters = c_num, n_init = 10, max_iter = 100)
kmeans.fit(x)
pattern_labels = kmeans.labels_
clusters = np.unique(pattern_labels)
for cluster in clusters:
indices = np.where(cluster == pattern_labels)[0]
cluster_points = x[indices,:]
#Para cada dimension de cada cluster
for d in range(len(cluster_points[0])):
cluster_d = cluster_points[:,d:d+1]
if flag == 0:
Wmin_arr = min(cluster_d)-eps
Wmax_arr = max(cluster_d)+eps
else:
Wmin_arr = np.concatenate((Wmin_arr, min(cluster_d)-eps), axis = 0)
Wmax_arr = np.concatenate((Wmax_arr, max(cluster_d)+eps), axis = 0)
flag += 1
counter += 1
dendrite = Wmin_arr, Wmax_arr
return dendrite | PypiClean |
/Kapok-0.2.1-cp35-cp35m-win_amd64.whl/kapok/rvogp.py | import collections
import time
import numpy as np
def rvogfwdvol(hv, ext, inc, kz, rngslope=0.0):
"""RVoG forward model volume coherence.
For a given set of model parameters, calculate the RVoG model coherence.
Note that all input arguments must be arrays (even if they are one element
arrays), so that they can be indexed to check for nan or infinite values
(due to extreme extinction or forest height values). All input arguments
must have the same shape.
Arguments:
hv (array): Height of the forest volume, in meters.
ext (array): Wave extinction within the forest volume, in Np/m.
inc (array): Incidence angle, in radians.
kz (array): Interferometric vertical wavenumber, in radians/meter.
rngslope (array): Range-facing terrain slope angle, in radians. If not
specified, flat terrain is assumed.
Returns:
gamma: Modelled complex coherence.
"""
# Calculate the propagation coefficients.
p1 = 2*ext*np.cos(rngslope)/np.cos(inc-rngslope)
p2 = p1 + 1j*kz
# Check for zero or close to zero hv (or kz) values (e.g., negligible
# volume decorrelation).
gammav = kz*hv
ind_novolume = np.isclose(np.abs(gammav), 0)
# Check for zero or close to zero extinction values (e.g., uniform
# vertical structure function).
gammav = p2 * (np.exp(p1*hv) - 1)
ind_zeroext = np.isclose(np.abs(gammav), 0) & ~ind_novolume
# Check for infinite numerator of the volume coherence equation (e.g.,
# extremely high extinction value).
gammav = p1 * (np.exp(p2*hv) - 1)
ind_nonfinite = ~np.isfinite(gammav) & ~ind_novolume & ~ind_zeroext
# The remaining indices are where the standard equation should be valid:
ind = ~ind_zeroext & ~ind_novolume & ~ind_nonfinite
if np.any(ind_novolume):
gammav[ind_novolume] = 1
if np.any(ind_zeroext):
gammav[ind_zeroext] = ((np.exp(1j*kz*hv) - 1) / (1j*kz*hv))[ind_zeroext]
if np.any(ind_nonfinite):
gammav[ind_nonfinite] = np.exp(1j*hv*kz)[ind_nonfinite]
if np.any(ind):
gammav[ind] = ((p1 / p2) * (np.exp(p2*hv)-1) / (np.exp(p1*hv)-1))[ind]
return gammav
def rvoginv(gamma, phi, inc, kz, ext=None, tdf=None, mu=0.0, rngslope=0.0,
mask=None, limit2pi=True, hv_min=0.0, hv_max=60.0, hv_step=0.01,
ext_min=0.0, ext_max=0.115, silent=False):
"""RVoG model inversion.
Calculate the RVoG model parameters which produce a modelled coherence
closest to a set of observed coherences. The model is formulated
using real-valued volumetric temporal decorrelation factor (tdf), with
physical parameters representing forest height (hv), extinction of
the radar waves within the forest canopy (ext), and the coherence of
the ground surface (phi), where arg(phi) is equal to the topographic
phase. In addition, the ground-to-volume amplitude ratio (mu) varies
as a function of the polarization.
In the single-baseline case, in order to reduce the number of unknowns
and ensure the model has a unique solution, we assume that mu for the
high coherence (from the phase diversity coherence optimization) is
fixed. By default it is set to zero, that is, we assume the high
coherence has no ground scattering component. We must then fix either
the extinction value, or the temporal decorrelation.
This function therefore requires either the ext or td keyword arguments
to be provided. The function will then optimize whichever of those two
parameters is not provided, plus the forest height. If neither
parameter is provided, tdf will be fixed to a value of 1.0 (no temporal
decorrelation).
Note that the ext, tdf, and mu keyword arguments can be provided as
a fixed single value (e.g., mu=0), as an array with the same
dimensions as gamma, or as a LUT of parameter values as a function
of the forest height parameter. In this case, a dict should be given
where dict['x'] contains the forest height values for each LUT bin,
and dict['y'] contains the parameter values. This LUT will then be
interpolated using numpy.interp to the forest height values by the
function.
Note that one cannot fix both ext and tdf using this function. The
function will always try to solve for one of these two parameters.
Arguments:
gamma (array): 2D complex-valued array containing the 'high'
coherences from the coherence optimization.
phi (array): 2D complex-valued array containing the ground
coherences (e.g., from kapok.topo.groundsolver()).
inc (array): 2D array containing the master track incidence
angle, in radians.
kz (array): 2D array containing the kz values, in radians/meter.
ext: Fixed values for the extinction parameter, in Nepers/meter.
If not specified, function will try to optimize the values of
ext and hv for fixed tdf. Default: None.
tdf: Fixed values for the temporal decorrelation factor, from 0
to 1. If not specified, the function will try to optimize
the values of tdf and hv. If both ext and tdf are left empty,
function will fix tdf to 1. Default: None.
mu: Fixed values for the ground-to-volume scattering ratio of
the gamma input argument. Default: 0.
rngslope (array): Terrain slope angle in the ground range
direction, in radians. Default: 0 (flat terrain).
mask (array): Boolean array. Pixels where (mask == True) will be
inverted, while pixels where (mask == False) will be ignored,
and hv set to -1.
limit2pi (bool): If True, function will not allow hv to go above
the 2*pi (ambiguity) height (as determined by the kz values).
If False, no such restriction. Default: True.
hv_min (float or array): Minimum allowed hv value, in meters.
Default: 0.
hv_max (float or array): Maximum allowed hv value, in meters.
Default: 50.
hv_step (float): Function will perform consecutive searches with
progressively smaller step sizes, until the step size
reaches a value below hv_step. Default: 0.01 m.
ext_min (float): Minimum extinction value, in Np/m.
Default: 0.00115 Np/m (~0.01 dB/m).
ext_max (float): Maximum extinction value, in Np/m.
Default: 0.115 Np/m (~1 dB/m).
silent (bool): Set to True to suppress status updates. Default:
False.
Returns:
hvmap (array): Array of inverted forest height values, in meters.
extmap/tdfmap (array): If ext was specified, array of inverted tdf
values will be returned here. If tdf was specified, array
of inverted ext values will be returned.
converged (array): A 2D boolean array. For each pixel, if
|observed gamma - modelled gamma| <= 0.01, that pixel is
marked as converged. Otherwise, converged will be False for
that pixel. Pixels where converged == False suggest that the
RVoG model could not find a good fit for that pixel, and the
parameter estimates may be invalid.
"""
if not silent:
print('kapok.rvog.rvoginv | Beginning RVoG model inversion. ('+time.ctime()+')')
dim = np.shape(gamma)
if mask is None:
mask = np.ones(dim, dtype='bool')
if np.all(limit2pi) or (limit2pi is None):
limit2pi = np.ones(dim, dtype='bool')
elif np.all(limit2pi == False):
limit2pi = np.zeros(dim, dtype='bool')
if isinstance(hv_max, (collections.Sequence, np.ndarray)):
hv_max_clip = hv_max.copy()[mask]
hv_max = np.nanmax(hv_max)
else:
hv_max_clip = None
if isinstance(hv_min, (collections.Sequence, np.ndarray)):
hv_min_clip = hv_min.copy()[mask]
hv_min = np.nanmin(hv_min)
else:
hv_min_clip = None
hv_samples = int((hv_max-hv_min)*2 + 1) # Initial Number of hv Bins in Search Grid
hv_vector = np.linspace(hv_min, hv_max, num=hv_samples)
if tdf is not None:
ext_samples = 40
ext_vector = np.linspace(ext_min, ext_max, num=ext_samples)
elif ext is None:
tdf = 1.0
ext_samples = 40
ext_vector = np.linspace(ext_min, ext_max, num=ext_samples)
else:
ext_vector = [-1.0]
# Use mask to clip input data.
gammaclip = gamma[mask]
phiclip = phi[mask]
incclip = inc[mask]
kzclip = kz[mask]
limit2piclip = limit2pi[mask]
if isinstance(mu, (collections.Sequence, np.ndarray)):
muclip = mu[mask]
elif isinstance(mu, dict):
if not silent:
print('kapok.rvog.rvoginv | Using LUT for mu as a function of forest height.')
muclip = None
else:
muclip = np.ones(gammaclip.shape, dtype='float32') * mu
if isinstance(rngslope, (collections.Sequence, np.ndarray)):
rngslopeclip = rngslope[mask]
else:
rngslopeclip = np.ones(gammaclip.shape, dtype='float32') * rngslope
if isinstance(ext, (collections.Sequence, np.ndarray)):
extclip = ext[mask]
elif isinstance(ext, dict):
if not silent:
print('kapok.rvog.rvoginv | Using LUT for extinction as a function of forest height.')
extclip = None
elif ext is not None:
extclip = np.ones(gammaclip.shape, dtype='float32') * ext
elif isinstance(tdf, (collections.Sequence, np.ndarray)):
tdfclip = tdf[mask]
elif isinstance(tdf, dict):
if not silent:
print('kapok.rvog.rvoginv | Using LUT for temporal decorrelation magnitude as a function of forest height.')
tdfclip = None
elif tdf is not None:
tdfclip = np.ones(gammaclip.shape, dtype='float32') * tdf
# Arrays to store the fitted parameters:
hvfit = np.zeros(gammaclip.shape, dtype='float32')
if ext is None:
extfit = np.zeros(gammaclip.shape, dtype='float32')
if not silent:
print('kapok.rvog.rvoginv | Solving for forest height and extinction, with fixed temporal decorrelation.')
else:
tdffit = np.zeros(gammaclip.shape, dtype='float32')
if not silent:
print('kapok.rvog.rvoginv | Solving for forest height and temporal decorrelation magnitude, with fixed extinction.')
# Variables for optimization:
mindist = np.ones(gammaclip.shape, dtype='float32') * 1e9
convergedclip = np.ones(gammaclip.shape,dtype='bool')
threshold = 0.01 # threshold for convergence
if not silent:
print('kapok.rvog.rvoginv | Performing repeated searches over smaller parameter ranges until hv step size is less than '+str(hv_step)+' m.')
print('kapok.rvog.rvoginv | Beginning pass #1 with hv step size: '+str(np.round(hv_vector[1]-hv_vector[0],decimals=3))+' m. ('+time.ctime()+')')
for n, hv_val in enumerate(hv_vector):
if not silent:
print('kapok.rvog.rvoginv | Progress: '+str(np.round(n/hv_vector.shape[0]*100,decimals=2))+'%. ('+time.ctime()+') ', end='\r')
for ext_val in ext_vector:
if isinstance(mu, dict):
muclip = np.interp(hv_val, mu['x'], mu['y'])
if ext is None:
if isinstance(tdf, dict):
tdfclip = np.interp(hv_val, tdf['x'], tdf['y'])
gammav_model = rvogfwdvol(hv_val, ext_val, incclip, kzclip, rngslope=rngslopeclip)
gamma_model = phiclip * (muclip + tdfclip*gammav_model) / (muclip + 1)
dist = np.abs(gammaclip - gamma_model)
else:
if isinstance(ext, dict):
extclip = np.interp(hv_val, ext['x'], ext['y'])
gammav_model = rvogfwdvol(hv_val, extclip, incclip, kzclip, rngslope=rngslopeclip)
tdf_val = np.abs((gammaclip*(muclip+1) - phiclip*muclip)/(phiclip*gammav_model))
gamma_model = phiclip * (muclip + tdf_val*gammav_model) / (muclip + 1)
dist = np.abs(gammaclip - gamma_model)
# If potential vegetation height is greater than
# 2*pi ambiguity height, and the limit2pi option
# is set to True, remove these as potential solutions:
ind_limit = limit2piclip & (hv_val > np.abs(2*np.pi/kzclip))
if np.any(ind_limit):
dist[ind_limit] = 1e10
# If hv_min and hv_max were set to arrays,
# ensure that solutions outside of the bounds are excluded.
if hv_min_clip is not None:
ind_limit = (hv_val < hv_min_clip)
if np.any(ind_limit):
dist[ind_limit] = 1e10
if hv_max_clip is not None:
ind_limit = (hv_val > hv_max_clip)
if np.any(ind_limit):
dist[ind_limit] = 1e10
# Best solution so far?
ind = dist < mindist
# Then update:
if np.any(ind):
mindist[ind] = dist[ind]
hvfit[ind] = hv_val
if ext is None:
extfit[ind] = ext_val
else:
tdffit[ind] = tdf_val[ind]
hv_inc = hv_vector[1] - hv_vector[0]
if ext is None:
ext_inc = ext_vector[1] - ext_vector[0]
else:
ext_inc = 1e-10
itnum = 1
while (hv_inc > hv_step):
itnum += 1
hv_low = hvfit - hv_inc
hv_high = hvfit + hv_inc
hv_val = hv_low.copy()
hv_inc /= 10
if ext is None:
ext_low = extfit - ext_inc
ext_low[ext_low < ext_min] = ext_min
ext_high = extfit + ext_inc
ext_high[ext_high > ext_max] = ext_max
ext_val = ext_low.copy()
ext_inc /= 10
else:
ext_low = np.array(ext_min,dtype='float32')
ext_high = np.array(ext_max,dtype='float32')
ext_val = ext_low.copy()
ext_inc = 10.0
if not silent:
print('kapok.rvog.rvoginv | Beginning pass #'+str(itnum)+' with hv step size: '+str(np.round(hv_inc,decimals=3))+' m. ('+time.ctime()+')')
while np.all(hv_val < hv_high):
if not silent:
print('kapok.rvog.rvoginv | Progress: '+str(np.round((hv_val-hv_low)/(hv_high-hv_low)*100,decimals=2)[0])+'%. ('+time.ctime()+') ', end='\r')
while np.all(ext_val < ext_high):
if isinstance(mu, dict):
muclip = np.interp(hv_val, mu['x'], mu['y'])
if ext is None:
if isinstance(tdf, dict):
tdfclip = np.interp(hv_val, tdf['x'], tdf['y'])
gammav_model = rvogfwdvol(hv_val, ext_val, incclip, kzclip, rngslope=rngslopeclip)
gamma_model = phiclip * (muclip + tdfclip*gammav_model) / (muclip + 1)
dist = np.abs(gammaclip - gamma_model)
else:
if isinstance(ext, dict):
extclip = np.interp(hv_val, ext['x'], ext['y'])
gammav_model = rvogfwdvol(hv_val, extclip, incclip, kzclip, rngslope=rngslopeclip)
tdf_val = np.abs((gammaclip*(muclip+1) - phiclip*muclip)/(phiclip*gammav_model))
gamma_model = phiclip * (muclip + tdf_val*gammav_model) / (muclip + 1)
dist = np.abs(gammaclip - gamma_model)
# If potential vegetation height is greater than
# 2*pi ambiguity height, and the limit2pi option
# is set to True, remove these as potential solutions:
ind_limit = limit2piclip & (hv_val > np.abs(2*np.pi/kzclip))
if np.any(ind_limit):
dist[ind_limit] = 1e10
# If hv_min and hv_max were set to arrays,
# ensure that solutions outside of the bounds are excluded.
if hv_min_clip is not None:
ind_limit = (hv_val < hv_min_clip)
if np.any(ind_limit):
dist[ind_limit] = 1e10
if hv_max_clip is not None:
ind_limit = (hv_val > hv_max_clip)
if np.any(ind_limit):
dist[ind_limit] = 1e10
# Best solution so far?
ind = np.less(dist,mindist)
# Then update:
if np.any(ind):
mindist[ind] = dist[ind]
hvfit[ind] = hv_val[ind]
if ext is None:
extfit[ind] = ext_val[ind]
else:
tdffit[ind] = tdf_val[ind]
# Increment the extinction:
ext_val += ext_inc
# Increment the forest height:
hv_val += hv_inc
ext_val = ext_low.copy()
# Check convergence rate.
ind = np.less(mindist,threshold)
convergedclip[ind] = True
num_converged = np.sum(convergedclip)
num_total = len(convergedclip)
rate = np.round(num_converged/num_total*100,decimals=2)
if not silent:
print('kapok.rvog.rvoginv | Completed. Convergence Rate: '+str(rate)+'%. ('+time.ctime()+')')
# Rebuild masked arrays into original image size.
hvmap = np.ones(dim, dtype='float32') * -1
hvmap[mask] = hvfit
converged = np.ones(dim, dtype='float32') * -1
converged[mask] = convergedclip
if ext is None:
extmap = np.ones(dim, dtype='float32') * -1
extmap[mask] = extfit
return hvmap, extmap, converged
else:
tdfmap = np.ones(dim, dtype='float32') * -1
tdfmap[mask] = tdffit
return hvmap, tdfmap, converged
def rvogblselect(gamma, kz, method='prod', minkz=0.0314, gammaminor=None):
"""From a multi-baseline dataset, select the baseline for each pixel that
we expect to produce the best forest height estimate using the RVoG
model.
There are multiple different methods implemented here for ranking
the baselines. These are chosen using the method keyword argument.
The default is method='prod', which selects the baseline with the
highest product between the coherence region major axis line length
and the magnitude of the complex average of the high and low
coherences. Essentially, this method prefers baselines which
have both a long coherence region (e.g., a large phase separation
between the high and low coherences) as well as a high overall
coherence magnitude.
The second method is 'line', which takes the product between
the coherence region major axis separation times the minimum distance
between the origin of the complex plane and the line segment fitted
to the optimized coherences. This is fairly similar to the previous
method, but will produce different results in some cases. This
criteria and the previous option were suggested by Marco Lavalle.
The second method is 'ecc', which selects the baseline with the
highest coherence region eccentricity, favoring baselines
with coherence regions that have a large axial ratio (major axis
divided by minor axis).
The last option is method='var', which favours the baseline with
the smallest expected height variance, which is calculated using
the Cramer-Rao Lower Bound for the phase variance. For details on
these last two selection criteria, see the paper:
S. K. Lee, F. Kugler, K. P. Papathanassiou, I. Hajnsek,
"Multibaseline polarimetric SAR interferometry forest height
inversion approaches", POLinSAR ESA-ESRIN, 2011-Jan.
Arguments:
gamma (array): Array of coherence values for the
multi-baseline dataset. Should have dimensions
(bl, coh, azimuth, range). bl is the baseline index, and coh
is the coherence index (e.g., the high and low optimized
coherences). Note that if you are using the eccentricity
selection method, gamma needs to contain the high and low
coherences, as the shape of the coherence region is used
in the selection process. If you are using the height
variance selection method, however, the input coherences
can be any coherences you wish to use. The mean coherence
magnitude of the input coherences for each pixel will be
used to calculate the height variance.
kz (array): Array of kz values for the multi-baseline dataset.
Should have shape (baseline, azimuth, range).
gammaminor (array): If using the eccentricity selection method,
this keyword argument needs to be given an array with the
same shape as gamma, containing the two coherences along the
minor axis of the coherence region (e.g., the optimized
coherences with the smallest separation). These can be
calculated and saved by calling kapok.Scene.opt(saveall=True).
See the documentation for kapok.Scene.opt for more details.
Default: None.
method (str): String which determines the method to use for
selecting the baselines. Options are 'ecc'
(for eccentricity), 'var' (for height variance), or
'prod' (for product of coherence region major axis and
coherence magnitude). See main function description above
for more details. Default: 'prod'.
minkz (float): For a baseline to be considered, the absolute
value of kz must be at least this amount. This keyword
argument allows baselines with zero spatial separation to be
excluded. Default: 0.0314 (e.g., if pi height is greater
than 100m, that baseline will be excluded).
Returns:
gammasel (array): Array of coherences, for the selected
baselines only. Has shape (2, azimuth, range).
kzsel (array): The kz values of the selected baselines for each
pixel. Has shape (azimuth, range).
blsel (array): For each pixel, an array containing the baseline index
of the baseline that was chosen.
"""
if 'line' in method: # Line Length * Separation Product Method
from kapok.lib import linesegmentdist
print('kapok.rvog.rvogblselect | Performing incoherent multi-baseline RVoG inversion. Selecting baselines using product of fitted line distance from origin and coherence separation. ('+time.ctime()+')')
sep = np.abs(gamma[:,0] - gamma[:,1])
dist = linesegmentdist(0, gamma[:,0], gamma[:,1])
criteria = sep * dist
elif 'var' in method: # Height Variance Method
# Note: We don't include the number of looks in the equation, as we
# assume the coherence for all of the baselines have been estimated
# using the same number of looks, so it does not affect the
# selection.
print('kapok.rvog.rvogblselect | Performing incoherent multi-baseline RVoG inversion. Selecting baselines using height variance. ('+time.ctime()+')')
criteria = np.abs(gamma[:,0]) ** 2
criteria = -1*np.sqrt((1-criteria)/2/criteria)/np.abs(kz)
elif 'ecc' in method: # Eccentricity Method
if gammaminor is not None:
print('kapok.rvog.rvogblselect | Performing incoherent multi-baseline RVoG inversion. Selecting baselines using coherence region eccentricity. ('+time.ctime()+')')
criteria = (np.abs(gammaminor[:,0] - gammaminor[:,1])/np.abs(gamma[:,0] - gamma[:,1])) ** 2
criteria = np.sqrt(1 - criteria)
else:
print('kapok.rvog.rvogblselect | Using eccentricity method for baseline selection, but gammaminor keyword has not been set. Aborting.')
return None
else: # Default to Coherence Magnitude * Separation Product Method
print('kapok.rvog.rvogblselect | Performing incoherent multi-baseline RVoG inversion. Selecting baselines using product of average coherence magnitude and separation. ('+time.ctime()+')')
sep = np.abs(gamma[:,0] - gamma[:,1])
mag = np.abs(gamma[:,0] + gamma[:,1])
criteria = sep * mag
# Remove too small baselines.
criteria[np.abs(kz) < minkz] = -1e6
# Now shuffle the coherences and kz values around to return the baselines
# with the highest criteria value for each pixel.
blsel = np.argmax(criteria, axis=0)
az = np.tile(np.arange(gamma.shape[2]),(gamma.shape[3],1)).T
rng = np.tile(np.arange(gamma.shape[3]),(gamma.shape[2],1))
gammasel = gamma[blsel,:,az,rng]
gammasel = np.rollaxis(gammasel, 2)
kzsel = kz[blsel,az,rng]
return gammasel, kzsel, blsel | PypiClean |
/Argonaut-0.3.4.tar.gz/Argonaut-0.3.4/argonaut/public/ckeditor/_source/plugins/forms/dialogs/textfield.js | /*
Copyright (c) 2003-2010, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.dialog.add( 'textfield', function( editor )
{
var autoAttributes =
{
value : 1,
size : 1,
maxLength : 1
};
var acceptedTypes =
{
text : 1,
password : 1
};
return {
title : editor.lang.textfield.title,
minWidth : 350,
minHeight : 150,
onShow : function()
{
delete this.textField;
var element = this.getParentEditor().getSelection().getSelectedElement();
if ( element && element.getName() == "input" &&
( acceptedTypes[ element.getAttribute( 'type' ) ] || !element.getAttribute( 'type' ) ) )
{
this.textField = element;
this.setupContent( element );
}
},
onOk : function()
{
var editor,
element = this.textField,
isInsertMode = !element;
if ( isInsertMode )
{
editor = this.getParentEditor();
element = editor.document.createElement( 'input' );
element.setAttribute( 'type', 'text' );
}
if ( isInsertMode )
editor.insertElement( element );
this.commitContent( { element : element } );
},
onLoad : function()
{
var autoSetup = function( element )
{
var value = element.hasAttribute( this.id ) && element.getAttribute( this.id );
this.setValue( value || '' );
};
var autoCommit = function( data )
{
var element = data.element;
var value = this.getValue();
if ( value )
element.setAttribute( this.id, value );
else
element.removeAttribute( this.id );
};
this.foreach( function( contentObj )
{
if ( autoAttributes[ contentObj.id ] )
{
contentObj.setup = autoSetup;
contentObj.commit = autoCommit;
}
} );
},
contents : [
{
id : 'info',
label : editor.lang.textfield.title,
title : editor.lang.textfield.title,
elements : [
{
type : 'hbox',
widths : [ '50%', '50%' ],
children :
[
{
id : '_cke_saved_name',
type : 'text',
label : editor.lang.textfield.name,
'default' : '',
accessKey : 'N',
setup : function( element )
{
this.setValue(
element.getAttribute( '_cke_saved_name' ) ||
element.getAttribute( 'name' ) ||
'' );
},
commit : function( data )
{
var element = data.element;
if ( this.getValue() )
element.setAttribute( '_cke_saved_name', this.getValue() );
else
{
element.removeAttribute( '_cke_saved_name' );
element.removeAttribute( 'name' );
}
}
},
{
id : 'value',
type : 'text',
label : editor.lang.textfield.value,
'default' : '',
accessKey : 'V'
}
]
},
{
type : 'hbox',
widths : [ '50%', '50%' ],
children :
[
{
id : 'size',
type : 'text',
label : editor.lang.textfield.charWidth,
'default' : '',
accessKey : 'C',
style : 'width:50px',
validate : CKEDITOR.dialog.validate.integer( editor.lang.common.validateNumberFailed )
},
{
id : 'maxLength',
type : 'text',
label : editor.lang.textfield.maxChars,
'default' : '',
accessKey : 'M',
style : 'width:50px',
validate : CKEDITOR.dialog.validate.integer( editor.lang.common.validateNumberFailed )
}
],
onLoad : function()
{
// Repaint the style for IE7 (#6068)
if ( CKEDITOR.env.ie7Compat )
this.getElement().setStyle( 'zoom', '100%' );
}
},
{
id : 'type',
type : 'select',
label : editor.lang.textfield.type,
'default' : 'text',
accessKey : 'M',
items :
[
[ editor.lang.textfield.typeText, 'text' ],
[ editor.lang.textfield.typePass, 'password' ]
],
setup : function( element )
{
this.setValue( element.getAttribute( 'type' ) );
},
commit : function( data )
{
var element = data.element;
if ( CKEDITOR.env.ie )
{
var elementType = element.getAttribute( 'type' );
var myType = this.getValue();
if ( elementType != myType )
{
var replace = CKEDITOR.dom.element.createFromHtml( '<input type="' + myType + '"></input>', editor.document );
element.copyAttributes( replace, { type : 1 } );
replace.replace( element );
editor.getSelection().selectElement( replace );
data.element = replace;
}
}
else
element.setAttribute( 'type', this.getValue() );
}
}
]
}
]
};
}); | PypiClean |
/GRADitude-0.1.3-py3-none-any.whl/graditudelib/correlation_specific_gene.py | import pandas as pd
from scipy.stats import spearmanr
from scipy.stats import pearsonr
def corr_specific_gene_vs_all(feature_count_table,
feature_count_start_column,
feature_count_end_column,
name_column_with_genes_name,
name, correlation, output_file):
table_protein = pd.read_table(feature_count_table)
matrix = _extract_value_matrix(table_protein,
feature_count_start_column,
feature_count_end_column)
gene_column = _exctract_gene_column(table_protein,
name_column_with_genes_name)
table_with_genes = pd.concat([gene_column, matrix], axis=1)
table_with_genes_new_index = \
table_with_genes.set_index(name_column_with_genes_name)
gene_row_specific_gene = \
_extract_gene_row(table_with_genes_new_index, name)
if correlation == "Spearman":
spearman_correlation(gene_row_specific_gene,
table_with_genes_new_index, output_file)
else:
pearson_correlation(gene_row_specific_gene,
table_with_genes_new_index, output_file)
def _extract_gene_row(feature_count_table_df, gene_name):
return feature_count_table_df.loc[feature_count_table_df.index == gene_name]
def _extract_value_matrix(feature_count_table_df, feature_count_start_column,
feature_count_end_column):
return feature_count_table_df.iloc[:,
feature_count_start_column:feature_count_end_column]
def _exctract_gene_column(feature_count_table_df, name_column_with_genes_name):
return feature_count_table_df[[name_column_with_genes_name]]
def pearson_correlation(gene_row_specific_gene, table_with_genes_new_index, output_file):
rho_list = []
p_value_list = []
for index_gene, row_gene in table_with_genes_new_index.iterrows():
rho = pearsonr(gene_row_specific_gene.iloc[0], row_gene)[0]
rho_list.append(rho)
p_value = pearsonr(gene_row_specific_gene.iloc[0], row_gene)[1]
p_value_list.append(p_value)
table_with_genes_new = table_with_genes_new_index.reset_index()
rho_df = pd.DataFrame(rho_list)
p_df = pd.DataFrame(p_value_list)
table_with_rho_and_pvalue = pd.concat([table_with_genes_new, rho_df, p_df], axis=1)
table_with_rho_and_pvalue.columns.values[[-2, -1]] = ['Pearson_coefficient', 'p_value']
table_with_rho_and_pvalue.to_csv(output_file, sep='\t', index=None)
def spearman_correlation(gene_row_specific_gene, table_with_genes_new_index, output_file):
rho_list = []
p_value_list = []
for index_gene, row_gene in table_with_genes_new_index.iterrows():
rho = spearmanr(gene_row_specific_gene.iloc[0], row_gene)[0]
rho_list.append(rho)
p_value = spearmanr(gene_row_specific_gene.iloc[0], row_gene)[1]
p_value_list.append(p_value)
table_with_genes_new = table_with_genes_new_index.reset_index()
rho_df = pd.DataFrame(rho_list)
p_df = pd.DataFrame(p_value_list)
table_with_rho_and_pvalue = pd.concat([table_with_genes_new, rho_df, p_df], axis=1)
table_with_rho_and_pvalue.columns.values[[-2, -1]] = ['Spearman_coefficient', 'p_value']
table_with_rho_and_pvalue.to_csv(output_file, sep='\t', index=None) | PypiClean |
/AASMessenger_Server-1.0.1.tar.gz/AASMessenger_Server-1.0.1/server/server/main_window.py | from PyQt5.QtWidgets import QMainWindow, QAction, qApp, QLabel, QTableView
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import QTimer
from server.stat_window import StatWindow
from server.config_window import ConfigWindow
from server.add_user import RegisterUser
from server.remove_user import DelUserDialog
class MainWindow(QMainWindow):
'''Класс - основное окно сервера.'''
def __init__(self, database, server, config):
# Конструктор предка
super().__init__()
# База данных сервера
self.database = database
self.server_thread = server
self.config = config
# Ярлык выхода
self.exitAction = QAction('Выход', self)
self.exitAction.setShortcut('Ctrl+Q')
self.exitAction.triggered.connect(qApp.quit)
# Кнопка обновить список клиентов
self.refresh_button = QAction('Обновить список', self)
# Кнопка настроек сервера
self.config_btn = QAction('Настройки сервера', self)
# Кнопка регистрации пользователя
self.register_btn = QAction('Регистрация пользователя', self)
# Кнопка удаления пользователя
self.remove_btn = QAction('Удаление пользователя' , self)
# Кнопка вывести историю сообщений
self.show_history_button = QAction('История клиентов', self)
# Статусбар
self.statusBar()
self.statusBar().showMessage('Server Working')
# Тулбар
self.toolbar = self.addToolBar('MainBar')
self.toolbar.addAction(self.exitAction)
self.toolbar.addAction(self.refresh_button)
self.toolbar.addAction(self.show_history_button)
self.toolbar.addAction(self.config_btn)
self.toolbar.addAction(self.register_btn)
self.toolbar.addAction(self.remove_btn)
# Настройки геометрии основного окна
self.setFixedSize(800, 600)
self.setWindowTitle('Messaging Server alpha release')
# Надпись о том, что ниже список подключённых клиентов
self.label = QLabel('Список подключённых клиентов:', self)
self.label.setFixedSize(240, 15)
self.label.move(10, 25)
# Окно со списком подключённых клиентов.
self.active_clients_table = QTableView(self)
self.active_clients_table.move(10, 45)
self.active_clients_table.setFixedSize(780, 400)
# Таймер, обновляющий список клиентов 1 раз в секунду
self.timer = QTimer()
self.timer.timeout.connect(self.create_users_model)
self.timer.start(1000)
# Связываем кнопки с процедурами
self.refresh_button.triggered.connect(self.create_users_model)
self.show_history_button.triggered.connect(self.show_statistics)
self.config_btn.triggered.connect(self.server_config)
self.register_btn.triggered.connect(self.reg_user)
self.remove_btn.triggered.connect(self.rem_user)
# Последним параметром отображаем окно.
self.show()
def create_users_model(self):
'''Метод заполняющий таблицу активных пользователей.'''
list_users = self.database.active_users_list()
list = QStandardItemModel()
list.setHorizontalHeaderLabels(
['Имя Клиента', 'IP Адрес', 'Порт', 'Время подключения'])
for row in list_users:
user, ip, port, time = row
user = QStandardItem(user)
user.setEditable(False)
ip = QStandardItem(ip)
ip.setEditable(False)
port = QStandardItem(str(port))
port.setEditable(False)
# Уберём милисекунды из строки времени,
# т.к. такая точность не требуется.
time = QStandardItem(str(time.replace(microsecond=0)))
time.setEditable(False)
list.appendRow([user, ip, port, time])
self.active_clients_table.setModel(list)
self.active_clients_table.resizeColumnsToContents()
self.active_clients_table.resizeRowsToContents()
def show_statistics(self):
'''Метод создающий окно со статистикой клиентов.'''
global stat_window
stat_window = StatWindow(self.database)
stat_window.show()
def server_config(self):
'''Метод создающий окно с настройками сервера.'''
global config_window
# Создаём окно и заносим в него текущие параметры
config_window = ConfigWindow(self.config)
def reg_user(self):
'''Метод создающий окно регистрации пользователя.'''
global reg_window
reg_window = RegisterUser(self.database , self.server_thread)
reg_window.show()
def rem_user(self):
'''Метод создающий окно удаления пользователя.'''
global rem_window
rem_window = DelUserDialog(self.database , self.server_thread)
rem_window.show() | PypiClean |
/DynamicForms-0.74.8-py3-none-any.whl/dynamicforms_legacy/action.py | import uuid as uuid_module
from enum import IntEnum
from typing import Iterable, List, Union
from django.utils.translation import gettext_lazy as _
from rest_framework.serializers import Serializer
from .settings import DYNAMICFORMS
class ActionBase(object):
def __init__(self, action_js: str, name: Union[str, None] = None, serializer: Serializer = None):
"""
:param action_js: JavaScript to execute when action is run
:param name: name by which to recognise this action in further processing, e.g. Serializer.suppress_action
:param serializer: bind to this serializer instance
"""
self.name = name
self.action_js = action_js
assert self.action_js is not None, "When declaring action, it must declare action JavaScript to execute"
# serializer will be set when obtaining a resolved copy
self.serializer = serializer
@property
def action_id(self):
return id(self)
def copy_and_resolve_reference(self, serializer):
raise NotImplementedError()
def render(self, serializer: Serializer, **kwds):
raise NotImplementedError()
@staticmethod
def prepare_string(string, encode=True):
"""
Replaces curly brackets with some special string so there is no problem when string.format() is called
:param string: String in which program searches for curly brackets
:param encode: True: replaces brackets with special string, False: the other way around
:return:
"""
if not string:
return ""
replaces = {"{": "´|curl_brack_start|`", "}": "´|curl_brack_end|`"}
if not encode:
replaces = dict([(value, key) for key, value in replaces.items()])
for key, val in replaces.items():
string = string.replace(key, val)
return string
class RenderableActionMixin(object):
"""
Action that is rendered on screen
"""
def __init__(self, label: str, title: str, icon: str = None, btn_classes: Union[str, dict, None] = None):
"""
:param label: Label for rendering to on screen control
:param title: Hint text for on-screen control
:param icon: optional icon to add to render
:param btn_classes: optional class(es) of button. if variable is dict and key 'replace' have value True,
then default class will be replaced with class(es) that are under key 'classes'. In other case class(es) will
be just added to default class
"""
self.label = label
self.title = title
self.icon = icon
self.btn_classes = btn_classes
class TablePosition(IntEnum):
HEADER = 0 # Table header of list view
FILTER_ROW_START = 7 # Alternative to HEADER: command is placed in filter row, actions column at start of line
FILTER_ROW_END = 8 # Alternative to HEADER: command is placed in filter row, actions column at end of line
# On left click on table row (currently this renders only once per table.
# We might need to add one that renders for each row
ROW_CLICK = 1
ROW_RIGHTCLICK = 2 # On right click on table row
ROW_START = 3 # Additional control column on left side of table
ROW_END = 4 # Additional control column on right side of table
FIELD_START = 5 # On left side of field value
FIELD_END = 6 # On right side of field value
class TableAction(ActionBase, RenderableActionMixin):
def __init__(
self,
position: TablePosition,
label: str,
action_js: str,
title: Union[str, None] = None,
icon: Union[str, None] = None,
field_name: Union[str, None] = None,
name: Union[str, None] = None,
serializer: Serializer = None,
btn_classes: Union[str, dict, None] = None,
):
ActionBase.__init__(self, action_js, name, serializer)
RenderableActionMixin.__init__(self, label, title, icon, btn_classes)
self.position = position
self.field_name = field_name
def copy_and_resolve_reference(self, serializer: Serializer):
return TableAction(
self.position,
self.label,
self.action_js,
self.title,
self.icon,
self.field_name,
self.name,
serializer,
self.btn_classes,
)
def render(self, serializer: Serializer, **kwds):
ret = rowclick = rowrclick = ""
stop_propagation = "dynamicforms.stopEventPropagation(event);"
action_action = self.prepare_string(self.action_js)
if self.position != TablePosition.HEADER:
# We need to do this differently because of dynamic page loading for tables: each time the serializer
# has a different UUID
action_action = action_action.replace(
"__TABLEID__", "$(event.target).parents('table').attr('id').substr(5)"
)
else:
action_action = action_action.replace("__TABLEID__", "'" + str(serializer.uuid) + "'")
if self.position == TablePosition.ROW_CLICK:
rowclick = action_action
elif self.position == TablePosition.ROW_RIGHTCLICK:
rowrclick = action_action
else:
from uuid import uuid1
def get_btn_class(default_class, additional_class):
classes = default_class.split(" ")
if additional_class:
if isinstance(additional_class, dict):
if additional_class.get("replace", False):
classes = []
additional_class = additional_class.get("classes", "").split(" ")
else:
additional_class = additional_class.split(" ")
classes.extend(additional_class)
return " ".join(classes)
button_name = (' name="btn-%s" ' % self.name) if self.name else ""
btn_class = get_btn_class("btn btn-info", self.btn_classes)
btnid = uuid1()
ret += (
'<button id="df-action-btn-{btnid}" type="button" class="{btn_class}" title="{title}"{button_name}'
'onClick="{stop_propagation} {action}">{icon_def}{label}</button>'.format(
btnid=btnid,
stop_propagation=stop_propagation,
action=action_action,
btn_class=btn_class,
label=self.prepare_string(self.label),
title=self.prepare_string(self.title),
icon_def='<img src="{icon}"/>'.format(icon=self.icon) if self.icon else "",
button_name=button_name,
)
)
if DYNAMICFORMS.jquery_ui:
ret += '<script type="application/javascript">$("#df-action-btn-{btnid}").button();</script>'.format(
btnid=btnid
)
if self.position in (TablePosition.ROW_CLICK, TablePosition.ROW_RIGHTCLICK):
if rowclick != "":
ret += (
"$('#list-{uuid}').find('tbody').click("
"function(event) {{ \n{stop_propagation} \n{action} \nreturn false;\n}});\n".format(
stop_propagation=stop_propagation, action=rowclick, uuid=serializer.uuid
)
)
if rowrclick != "":
ret += (
"$('#list-{uuid}').find('tbody').contextmenu("
"function(event) {{ \n{stop_propagation} \n{action} \nreturn false;\n}});\n".format(
stop_propagation=stop_propagation, action=rowrclick, uuid=serializer.uuid
)
)
if ret != "":
ret = '<script type="application/javascript">%s</script>' % ret
elif ret != "":
ret = '<div class="dynamicforms-actioncontrol float-{direction} pull-{direction}">{ret}</div>'.format(
ret=ret, direction="left" if self.position == TablePosition.FIELD_START else "right"
)
return self.prepare_string(ret, False)
class FieldChangeAction(ActionBase):
def __init__(
self,
tracked_fields: Iterable[str],
action_js: str,
name: Union[str, None] = None,
serializer: Serializer = None,
):
super().__init__(action_js, name, serializer)
self.tracked_fields = tracked_fields
assert self.tracked_fields, "When declaring an action, it must track at least one form field"
if serializer:
self.tracked_fields = [self._resolve_reference(f) for f in self.tracked_fields]
def _resolve_reference(self, ref):
from .mixins import RenderMixin
if isinstance(ref, uuid_module.UUID):
return str(ref)
elif isinstance(ref, RenderMixin):
# TODO unit tests!!!
# TODO test what happens if the Field instance given is from another serializer
# TODO test what happens when Field instance is actually a Serializer (when should onchange trigger for it?)
return ref.uuid
elif isinstance(ref, str) and ref in self.serializer.fields:
return self.serializer.fields[ref].uuid
elif isinstance(ref, str) and "." in ref:
# This supports nested serializers and fields with . notation, e.g. master_serializer_field.child_field
f = self.serializer
for r in ref.split("."):
f = f.fields[r]
return f.uuid
raise Exception("Unknown reference type for Action tracked field (%r)" % ref)
def copy_and_resolve_reference(self, serializer: Serializer):
return FieldChangeAction(self.tracked_fields, self.action_js, self.name, serializer)
def render(self, serializer: Serializer, **kwds):
res = "var action_func{0.action_id} = {0.action_js};\n".format(self)
for tracked_field in self.tracked_fields:
res += (
"dynamicforms.registerFieldAction('{ser.uuid}', '{tracked_field}', action_func{s.action_id});\n".format(
ser=serializer, tracked_field=tracked_field, s=self
)
)
return res
class FormInitAction(ActionBase):
def copy_and_resolve_reference(self, serializer: Serializer):
return FormInitAction(self.action_js, self.name, serializer)
def render(self, serializer: Serializer, **kwds):
# we need window.setTimeout because at the time of form generation, the initial fields value collection
# hasn't been done yet
return "window.setTimeout(function() {{ {0.action_js} }}, 1);\n".format(self)
class FieldInitAction(FieldChangeAction):
def copy_and_resolve_reference(self, serializer: Serializer):
return FieldInitAction(self.tracked_fields, self.action_js, self.name, serializer)
def render(self, serializer: Serializer, **kwds):
# we need window.setTimeout because at the time of form generation, the initial fields value collection
# hasn't been done yet
return "window.setTimeout(function() {{ {0.action_js} }}, 1);;\n".format(self)
class FormButtonTypes(IntEnum):
CANCEL = 1
SUBMIT = 2
CUSTOM = 3
class FormButtonAction(ActionBase):
DEFAULT_LABELS = {
FormButtonTypes.CANCEL: _("Cancel"),
FormButtonTypes.SUBMIT: _("Save changes"),
FormButtonTypes.CUSTOM: _("Custom"),
}
def __init__(
self,
btn_type: FormButtonTypes,
label: str = None,
btn_classes: str = None,
action_js: str = None,
button_is_primary: bool = None,
positions: List[str] = None,
name: Union[str, None] = None,
serializer: Serializer = None,
):
super().__init__(action_js or False, name, serializer)
self.uuid = uuid_module.uuid1()
self.btn_type = btn_type
self.label = label or FormButtonAction.DEFAULT_LABELS[btn_type or FormButtonTypes.CUSTOM]
self.positions = positions or ["dialog", "form"]
if button_is_primary is None:
button_is_primary = btn_type == FormButtonTypes.SUBMIT
self.button_is_primary = button_is_primary
self.btn_classes = btn_classes or (
DYNAMICFORMS.form_button_classes
+ " "
+ (
DYNAMICFORMS.form_button_classes_primary
if button_is_primary
else DYNAMICFORMS.form_button_classes_secondary
)
+ " "
+ (DYNAMICFORMS.form_button_classes_cancel if btn_type == FormButtonTypes.CANCEL else "")
)
def copy_and_resolve_reference(self, serializer):
return FormButtonAction(
self.btn_type,
self.label,
self.btn_classes,
self.action_js,
self.button_is_primary,
self.positions,
self.name,
serializer,
)
def render(self, serializer: Serializer, position=None, **kwds):
if self.btn_type == FormButtonTypes.CANCEL and position == "form":
return ""
action_js = self.action_js
button_name = ('name="btn-%s"' % self.name) if self.name else ""
if isinstance(action_js, str):
action_js = self.prepare_string(action_js)
action_js = action_js.format(**locals())
button_type = "button" if self.btn_type != FormButtonTypes.SUBMIT or position == "dialog" else "submit"
data_dismiss = 'data-dismiss="modal"' if self.btn_type == FormButtonTypes.CANCEL else ""
if self.btn_type == FormButtonTypes.SUBMIT:
button_id = "save-" + str(serializer.uuid)
else:
button_id = "formbutton-" + str(self.uuid)
if (self.btn_type != FormButtonTypes.SUBMIT or position == "dialog") and action_js:
button_js = (
'<script type="text/javascript">'
' $("#{button_id}").on("click", function() {{'
" {action_js}"
" }});"
"</script>"
).format(**locals())
else:
button_js = ""
return self.prepare_string(
'<button type="{button_type}" class="{self.btn_classes}" {data_dismiss} {button_name} id="{button_id}">'
"{self.label}</button>{button_js}".format(**locals()),
False,
)
class Actions(object):
def __init__(
self, *args, add_default_crud: bool = False, add_default_filter: bool = False, add_form_buttons: bool = True
) -> None:
super().__init__()
if len(args) == 1 and args[0] is None:
self.actions = []
return
self.actions = list(args) # type: List[ActionBase]
if add_default_crud:
self.actions.append(
TableAction(
TablePosition.HEADER,
_("+ Add"),
title=_("Add new record"),
name="add",
action_js="dynamicforms.newRow('{% url url_reverse|add:'-detail' pk='new' format='html' %}'"
", 'record', __TABLEID__);",
)
)
self.actions.append(
TableAction(
TablePosition.ROW_CLICK,
_("Edit"),
title=_("Edit record"),
name="edit",
action_js="dynamicforms.editRow('{% url url_reverse|add:'-detail' pk='__ROWID__' "
"format='html' %}'.replace('__ROWID__', $(event.target.parentElement).closest("
"'tr[class=\"df-table-row\"]').attr('data-id')), 'record', __TABLEID__);",
)
)
self.actions.append(
TableAction(
TablePosition.ROW_END,
label=_("Delete"),
title=_("Delete record"),
name="delete",
action_js="dynamicforms.deleteRow('{% url url_reverse|add:'-detail' pk=row.id %}', "
"{{row.id}}, 'record', __TABLEID__);",
)
)
if add_default_filter:
self.actions.append(
TableAction(
TablePosition.HEADER,
label=_("Filter"),
title=_("Filter"),
name="filter",
action_js="dynamicforms.defaultFilter(event);",
)
)
if add_form_buttons:
self.actions.append(FormButtonAction(btn_type=FormButtonTypes.CANCEL, name="cancel"))
self.actions.append(FormButtonAction(btn_type=FormButtonTypes.SUBMIT, name="submit"))
def get_resolved_copy(self, serializer) -> "Actions":
"""
Returns a copy of declared actions bound to the serializer
:param serializer: serializer the copy will be bound to
:return:
"""
if not isinstance(serializer, Serializer):
return Actions(None)
actions = [a.copy_and_resolve_reference(serializer) for a in self.actions]
# move actions from Field to Serializer
actions.extend(
[
a.copy_and_resolve_reference(serializer)
for field in serializer.fields.values()
for a in getattr(field, "actions", Actions(None)).actions_not_suppressed(serializer)
if isinstance(a, FieldChangeAction)
]
)
return Actions(*actions, add_form_buttons=False)
def render_field_onchange(self, serializer):
"""
renders all field onchange actions needed for dynamicforms.registerFieldAction() function
:return: the actions rendered as template string
"""
res = ""
for action in self.actions:
if isinstance(action, FieldChangeAction) and not isinstance(action, FieldInitAction):
res += action.render(serializer)
return res
def render_form_init(self, serializer):
"""
renders the function which will analyse initial form data and hide appropriate fields
:return: the actions rendered as template string
"""
res = ""
for action in self.actions:
if isinstance(action, FormInitAction):
res += action.render(serializer)
return res
def render_field_init(self, serializer, field_name: str):
"""
renders function that will initialise the field being rendered
:return: the actions rendered as template string
"""
res = ""
for action in self.actions:
if isinstance(action, FieldInitAction) and field_name in action.tracked_fields:
res += action.render(serializer)
return res
def renderable_actions(self, serializer: Serializer):
request = serializer.context.get("request", None)
viewset = serializer.context.get("view", None)
return (
a
for a in self.actions
if isinstance(a, TableAction) and not serializer.suppress_action(a, request, viewset)
)
def render_renderable_actions(
self, allowed_positions: Iterable[TablePosition], field_name: str, serializer: Serializer
):
"""
Returns those actions that are not suppressed
:return: List[Action]
"""
res = ""
for action in self.renderable_actions(serializer):
if action.position in allowed_positions and (field_name is None or field_name == action.field_name):
res += action.render(serializer)
return res
def actions_not_suppressed(self, serializer: Serializer):
request = serializer.context.get("request", None)
viewset = serializer.context.get("view", None)
return (a for a in self.actions if not serializer.suppress_action(a, request, viewset))
def render_form_buttons(self, serializer: Serializer, position: str):
"""
Renders form buttons
:return: List[Action]
"""
request = serializer.context.get("request", None)
viewset = serializer.context.get("view", None)
res = ""
for button in self.actions:
if (
isinstance(button, FormButtonAction)
and position in button.positions
and not serializer.suppress_action(button, request, viewset)
):
res += button.render(serializer, position=position)
return res
def __iter__(self):
return iter(self.actions)
"""
tipi akcij
crud (new, edit, delete, view detail)
on field change
on form display (initial fields hiding)
dialog / form manipulation (cancel entry, submit entry)
custom actions
kako se akcije renderajo
gumb / link -> html control
onclick / onrightclick / onkey -> JS event
on field change -> JS event (depends on field / position)
akcije na formi / dialogu
control column (start, end of row, any position?) -> HTML control
table header / footer -> html control
table field left / right -> html control
form field left / right -> html control
form top, form bottom -> html control
custom pozicija (programer pokliče render funkcijo z nekim custom tekstom, namesto z enum vrednostjo)
ali morajo vedeti, na čem so?
pri formi imamo submit, pri dialogu pa dynamicforms.SubmitForm, se pravi, da je render drugačen
pri tabeli row id, morda row data?
podatki, ki so lahko na voljo:
parent serializer
serializer
row data
form data --> serializer
""" | PypiClean |
/MedPy-0.4.0.tar.gz/MedPy-0.4.0/medpy/core/logger.py |
# build-in module
import sys
import logging
from logging import Logger as NativeLogger
# third-party modules
# own modules
# constants
# code
class Logger (NativeLogger):
r"""Logger to be used by all applications and classes.
Notes
-----
Singleton class i.e. setting the log level changes the output globally.
Examples
--------
Initializing the logger
>>> from medpy.core import Logger
>>> logger = Logger.getInstance()
Error messages are passed to stdout
>>> logger.error('error message')
15.09.2014 12:40:25 [ERROR ] error message
>>> logger.error('critical message')
15.09.2014 12:40:42 [CRITICAL] critical message
But debug and info messages are suppressed
>>> logger.info('info message')
>>> logger.debug('debug message')
Unless the log level is set accordingly
>>> import logging
>>> logger.setLevel(logging.DEBUG)
>>> logger.info('info message')
15.09.2014 12:43:06 [INFO ] info message (in <ipython-input-14-a08cad56519d>.<module>:1)
>>> logger.debug('debug message')
15.09.2014 12:42:50 [DEBUG ] debug message (in <ipython-input-13-3bb0c512b560>.<module>:1)
"""
class LoggerHelper (object):
r"""A helper class which performs the actual initialization.
"""
def __call__(self, *args, **kw) :
# If an instance of TestSingleton does not exist,
# create one and assign it to TestSingleton.instance.
if Logger._instance is None :
Logger._instance = Logger()
# Return TestSingleton.instance, which should contain
# a reference to the only instance of TestSingleton
# in the system.
return Logger._instance
r"""Member variable initiating and returning the instance of the class."""
getInstance = LoggerHelper()
r"""The member variable holding the actual instance of the class."""
_instance = None
r"""Holds the loggers handler for format changes."""
_handler = None
def __init__(self, name = 'MedPyLogger', level = 0) :
# To guarantee that no one created more than one instance of Logger:
if not Logger._instance == None :
raise RuntimeError('Only one instance of Logger is allowed!')
# initialize parent
NativeLogger.__init__(self, name, level)
# set attributes
self.setHandler(logging.StreamHandler(sys.stdout))
self.setLevel(logging.WARNING)
def setHandler(self, hdlr):
r"""Replace the current handler with a new one.
Parameters
----------
hdlr : logging.Handler
A subclass of Handler that should used to handle the logging output.
Notes
-----
If none should be replaces, but just one added, use the parent classes
addHandler() method.
"""
if None != self._handler:
self.removeHandler(self._handler)
self._handler = hdlr
self.addHandler(self._handler)
def setLevel(self, level):
r"""Overrides the parent method to adapt the formatting string to the level.
Parameters
----------
level : int
The new log level to set. See the logging levels in the logging module for details.
Examples
--------
>>> import logging
>>> Logger.setLevel(logging.DEBUG)
"""
if logging.DEBUG >= level:
formatter = logging.Formatter("%(asctime)s [%(levelname)-8s] %(message)s (in %(module)s.%(funcName)s:%(lineno)s)",
"%d.%m.%Y %H:%M:%S")
self._handler.setFormatter(formatter)
else:
formatter = logging.Formatter("%(asctime)s [%(levelname)-8s] %(message)s",
"%d.%m.%Y %H:%M:%S")
self._handler.setFormatter(formatter)
NativeLogger.setLevel(self, level) | PypiClean |
/GalSim-2.4.11-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl/galsim/phase_psf.py |
from heapq import heappush, heappop
import numpy as np
from .gsobject import GSObject
from .gsparams import GSParams
from .angle import radians, degrees, arcsec, Angle, AngleUnit
from .image import Image, _Image
from .bounds import _BoundsI
from .wcs import PixelScale
from .interpolatedimage import InterpolatedImage
from .utilities import doc_inherit, OrderedWeakRef, rotate_xy, lazy_property, basestring
from .errors import GalSimValueError, GalSimRangeError, GalSimIncompatibleValuesError
from .errors import GalSimFFTSizeError, galsim_warn
from .photon_array import TimeSampler
class Aperture:
"""Class representing a telescope aperture embedded in a larger pupil plane array -- for use
with the `PhaseScreenPSF` class to create PSFs via Fourier or geometric optics.
The pupil plane array is completely specified by its size, sampling interval, and pattern of
illuminated pixels. Pupil plane arrays can be specified either geometrically or using an image
to indicate the illuminated pixels. In both cases, various options exist to control the pupil
plane size and sampling interval.
**Geometric pupil specification**:
The first way to specify the details of the telescope aperture is through a series of keywords
indicating the diameter, size of the central obscuration, and the nature of the struts
holding up the secondary mirror (or prime focus cage, etc.). The struts are assumed to be
rectangular obscurations extending from the outer edge of the pupil to the outer edge of the
obscuration disk (or to the pupil center if ``obscuration = 0.``). You can specify how many
struts there are (evenly spaced in angle), how thick they are as a fraction of the pupil
diameter, and what angle they start at relative to the positive y direction.
The size (in meters) and sampling interval (in meters) of the pupil plane array representing the
aperture can be set directly using the the ``pupil_plane_size`` and ``pupil_plane_scale``
keywords. However, in most situations, it's probably more convenient to let GalSim set these
automatically based on the pupil geometry and the nature of the (potentially time-varying)
phase aberrations from which a PSF is being derived.
The pupil plane array physical size is by default set to twice the pupil diameter producing a
Nyquist sampled PSF image. While this would always be sufficient if using sinc interpolation
over the PSF image for subsequent operations, GalSim by default uses the much faster (though
approximate) quintic interpolant, which means that in some cases -- in particular, for
significantly aberrated optical PSFs without atmospheric aberrations -- it may be useful to
further increase the size of the pupil plane array, thereby increasing the sampling rate of the
resulting PSF image. This can be done by increasing the ``oversampling`` keyword.
A caveat to the above occurs when using ``geometric_shooting=True`` to draw using
photon-shooting. In this case, we only need an array just large enough to avoid clipping the
pupil, which we can get by setting ``oversampling=0.5``.
The pupil plane array physical sampling interval (which is directly related to the resulting PSF
image physical size) is set by default to the same interval as would be used to avoid
significant aliasing (image folding) for an obscured `Airy` profile with matching diameter and
obscuration and for the value of ``folding_threshold`` in the optionally specified gsparams
argument. If the phase aberrations are significant, however, the PSF image size computed this
way may still not be sufficiently large to avoid aliasing. To further increase the pupil plane
sampling rate (and hence the PSF image size), you can increase the value of the ``pad_factor``
keyword.
An additional way to set the pupil sampling interval for a particular set of phase screens
(i.e., for a particular `PhaseScreenList`) is to provide the screens in the ``screen_list``
argument. Each screen in the list computes its own preferred sampling rate and the
`PhaseScreenList` appropriately aggregates these. This last option also requires that a
wavelength ``lam`` be specified, and is particularly helpful for creating PSFs derived from
turbulent atmospheric screens.
Finally, when specifying the pupil geometrically, Aperture may choose to make a small adjustment
to ``pupil_plane_scale`` in order to produce an array with a good size for FFTs. If your
application depends on knowing the size and scale used with the Fourier optics framework, you
can obtain these from the ``aper.pupil_plane_size`` and ``aper.pupil_plane_scale`` attributes.
**Pupil image specification**:
The second way to specify the pupil plane configuration is by passing in an image of it. This
can be useful, for example, if the struts are not evenly spaced or are not radially directed, as
is assumed by the simple model for struts described above. In this case, an exception is raised
if keywords related to struts are also given. On the other hand, the ``obscuration`` keyword is
still used to ensure that the PSF images are not aliased, though it is ignored during the actual
construction of the pupil plane illumination pattern. Note that for complicated pupil
configurations, it may be desireable to increase ``pad_factor`` for more fidelity at the expense
of slower running time. Finally, the ``pupil_plane_im`` that is passed in can be rotated during
internal calculations by specifying a ``pupil_angle`` keyword.
If you choose to pass in a pupil plane image, it must be a square array in which the image of
the pupil is centered. The areas that are illuminated should have some value >0, and the other
areas should have a value of precisely zero. Based on what the Aperture class determines is a
good PSF sampling interval, the image of the pupil plane that is passed in might be zero-padded
during internal calculations. (The pupil plane array size and scale values can be accessed via
the ``aper.pupil_plane_size`` and ``aper.pupil_plane_scale`` attributes.) The pixel scale of
the pupil plane can be specified in one of three ways. In descending order of priority, these
are:
1. The ``pupil_plane_scale`` keyword argument (units are meters).
2. The ``pupil_plane_im.scale`` attribute (units are meters).
3. If (1) and (2) are both None, then the scale will be inferred by assuming that the
illuminated pixel farthest from the image center is at a physical distance of self.diam/2.
The ``pupil_plane_size`` and ``lam`` keywords are both ignored when constructing an Aperture
from an image.
Parameters:
diam: Aperture diameter in meters.
lam: Wavelength in nanometers. [default: None]
circular_pupil: Adopt a circular pupil? [default: True]
obscuration: Linear dimension of central obscuration as fraction of aperture
linear dimension. [0., 1.). [default: 0.0]
nstruts: Number of radial support struts to add to the central obscuration.
[default: 0]
strut_thick: Thickness of support struts as a fraction of aperture diameter.
[default: 0.05]
strut_angle: `Angle` made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be an
`Angle` instance. [default: 0. * galsim.degrees]
oversampling: Optional oversampling factor *in the image plane* for the PSF
eventually constructed using this `Aperture`. Setting
``oversampling < 1`` will produce aliasing in the PSF (not good).
[default: 1.0]
pad_factor: Additional multiple by which to extend the PSF image to avoid
folding. [default: 1.0]
screen_list: An optional `PhaseScreenList` object. If present, then get a good
pupil sampling interval using this object. [default: None]
pupil_plane_im: The GalSim.Image, NumPy array, or name of file containing the pupil
plane image, to be used instead of generating one based on the
obscuration and strut parameters. [default: None]
pupil_angle: If ``pupil_plane_im`` is not None, rotation angle for the pupil plane
(positive in the counter-clockwise direction). Must be an `Angle`
instance. [default: 0. * galsim.degrees]
pupil_plane_scale: Sampling interval in meters to use for the pupil plane array. In
most cases, it's a good idea to leave this as None, in which case
GalSim will attempt to find a good value automatically. The
exception is when specifying the pupil arrangement via an image, in
which case this keyword can be used to indicate the sampling of that
image. See also ``pad_factor`` for adjusting the pupil sampling scale.
[default: None]
pupil_plane_size: Size in meters to use for the pupil plane array. In most cases, it's
a good idea to leave this as None, in which case GalSim will attempt
to find a good value automatically. See also ``oversampling`` for
adjusting the pupil size. [default: None]
gsparams: An optional `GSParams` argument. [default: None]
"""
def __init__(self, diam, lam=None, circular_pupil=True, obscuration=0.0,
nstruts=0, strut_thick=0.05, strut_angle=0.0*radians,
oversampling=1.0, pad_factor=1.0, screen_list=None,
pupil_plane_im=None, pupil_angle=0.0*radians,
pupil_plane_scale=None, pupil_plane_size=None,
gsparams=None):
self._diam = diam # Always need to explicitly specify an aperture diameter.
self._lam = lam
self._circular_pupil = circular_pupil
self._obscuration = obscuration
self._nstruts = nstruts
self._strut_thick = strut_thick
self._strut_angle = strut_angle
self._oversampling = oversampling
self._pad_factor = pad_factor
self._screen_list = screen_list
self._pupil_plane_im = pupil_plane_im
self._pupil_angle = pupil_angle
self._input_pupil_plane_scale = pupil_plane_scale
self._input_pupil_plane_size = pupil_plane_size
self._gsparams = GSParams.check(gsparams)
if diam <= 0.:
raise GalSimRangeError("Invalid diam.", diam, 0.)
if obscuration < 0. or obscuration >= 1.:
raise GalSimRangeError("Invalid obscuration.", obscuration, 0., 1.)
if not isinstance(strut_angle, Angle):
raise TypeError("strut_angle must be a galsim.Angle instance.")
if not isinstance(pupil_angle, Angle):
raise TypeError("pupil_angle must be a galsim.Angle instance.")
# You can either set geometric properties, or use a pupil image, but not both, so check for
# that here. One caveat is that we allow sanity checking the sampling of a pupil_image by
# comparing it to the sampling GalSim would have used for an (obscured) Airy profile. So
# it's okay to specify an obscuration and a pupil_plane_im together, for example, but not
# a pupil_plane_im and struts.
is_default_geom = (circular_pupil and
nstruts == 0 and
strut_thick == 0.05 and
strut_angle == 0.0*radians)
if not is_default_geom and pupil_plane_im is not None:
raise GalSimIncompatibleValuesError(
"Can't specify both geometric parameters and pupil_plane_im.",
circular_pupil=circular_pupil, nstruts=nstruts, strut_thick=strut_thick,
strut_angle=strut_angle, pupil_plane_im=pupil_plane_im)
if screen_list is not None and lam is None:
raise GalSimIncompatibleValuesError(
"Wavelength ``lam`` must be specified with ``screen_list``.",
screen_list=screen_list, lam=lam)
# For each of these, the actual value is defined during the construction of the _illuminated
# array, so access that (lazy) property first.
@property
def pupil_plane_scale(self):
"""The scale_size of the pupil-plane image.
"""
self._illuminated
return self._pupil_plane_scale
@property
def pupil_plane_size(self):
"""The size of the pupil-plane image.
"""
self._illuminated
return self._pupil_plane_size
@property
def npix(self):
"""The number of pixels in each direction of the pupil-plane image.
"""
self._illuminated
return self._npix
@lazy_property
def good_pupil_size(self):
"""An estimate of a good pupil-plane image size.
"""
# Although the user can set the pupil plane size and scale directly if desired, in most
# cases it's nicer to have GalSim try to pick good values for these.
# For the pupil plane size, we'll achieve Nyquist sampling in the focal plane if we sample
# out to twice the diameter of the actual aperture in the pupil plane (completely
# independent of wavelength, struts, obscurations, GSparams, and so on!). This corresponds
# to oversampling=1.0. In fact, if we were willing to always use sinc interpolation, there
# would never be any reason to go beyond this. In practice, we usually use a faster, but
# less accurate, quintic interpolant, which means we can benefit from improved sampling
# (oversampling > 1.0) in some cases, especially when we're *not* modeling an atmosphere
# which would otherwise tend to damp contributions at large k.
return 2 * self.diam * self._oversampling
@lazy_property
def good_pupil_scale(self):
"""An estimate of a good pupil-plane image scale.
"""
from .airy import Airy
# For the pupil plane sampling interval, details like the obscuration and GSParams *are*
# important as they affect the amount of aliasing encountered. (An Airy profile has an
# infinite extent in real space, so it *always* aliases at some level, more so with an
# obscuration than without. The GSParams settings indicate how much aliasing we're
# willing to tolerate, so it's required here.) To pick a good sampling interval, we start
# with the interval that would be used for an obscured Airy GSObject profile. If the
# `screen_list` argument was supplied, then we also check its .stepk propertry, which
# aggregates a good sampling interval from all of the wrapped PhaseScreens, and keep the
# smaller stepk.
if self._lam is None:
# For Airy, pupil_plane_scale is independent of wavelength. We could build an Airy with
# lam_over_diam=1.0 and then alter the `good_pupil_scale = ...` line below
# appropriately, but it's easier to just arbitrarily set `lam=500` if it wasn't set.
lam = 500.0
else:
lam = self._lam
airy = Airy(diam=self.diam, lam=lam, obscuration=self.obscuration, gsparams=self.gsparams)
stepk = airy.stepk
if self._screen_list is not None:
screen_list = PhaseScreenList(self._screen_list)
stepk = min(stepk,
screen_list._getStepK(lam=lam, diam=self.diam, obscuration=self.obscuration,
gsparams=self.gsparams))
return stepk * lam * 1.e-9 * (radians / arcsec) / (2 * np.pi * self._pad_factor)
@lazy_property
def _illuminated(self):
# Now that we have good candidate sizes and scales, we load or generate the pupil plane
# array.
if self._pupil_plane_im is not None: # Use image of pupil plane
return self._load_pupil_plane()
else: # Use geometric parameters.
if self._input_pupil_plane_scale is not None:
self._pupil_plane_scale = self._input_pupil_plane_scale
# Check input scale and warn if looks suspicious.
if self._pupil_plane_scale > self.good_pupil_scale:
ratio = self.good_pupil_scale / self._pupil_plane_scale
galsim_warn("Input pupil_plane_scale may be too large for good sampling.\n"
"Consider decreasing pupil_plane_scale by a factor %f, and/or "
"check PhaseScreenPSF outputs for signs of folding in real "
"space."%(1./ratio))
else:
self._pupil_plane_scale = self.good_pupil_scale
if self._input_pupil_plane_size is not None:
self._pupil_plane_size = self._input_pupil_plane_size
# Check input size and warn if looks suspicious
if self._pupil_plane_size < self.good_pupil_size:
ratio = self.good_pupil_size / self._pupil_plane_size
galsim_warn("Input pupil_plane_size may be too small for good focal-plane"
"sampling.\n"
"Consider increasing pupil_plane_size by a factor %f, and/or "
"check PhaseScreenPSF outputs for signs of undersampling."%ratio)
else:
self._pupil_plane_size = self.good_pupil_size
return self._generate_pupil_plane()
def _generate_pupil_plane(self):
""" Create an array of illuminated pixels parameterically.
"""
ratio = self._pupil_plane_size/self._pupil_plane_scale
# Fudge a little to prevent good_fft_size() from turning 512.0001 into 768.
ratio *= (1.0 - 1.0/2**14)
self._npix = Image.good_fft_size(int(np.ceil(ratio)))
# Check FFT size
if self._npix > self.gsparams.maximum_fft_size:
raise GalSimFFTSizeError("Created pupil plane array that is too large.",self._npix)
# Shrink scale such that size = scale * npix exactly.
self._pupil_plane_scale = self._pupil_plane_size / self._npix
radius = 0.5*self.diam
if self._circular_pupil:
illuminated = (self.rsqr < radius**2)
if self.obscuration > 0.:
illuminated *= self.rsqr >= (radius*self.obscuration)**2
else:
illuminated = (np.abs(self.u) < radius) & (np.abs(self.v) < radius)
if self.obscuration > 0.:
illuminated *= ((np.abs(self.u) >= radius*self.obscuration) *
(np.abs(self.v) >= radius*self.obscuration))
if self._nstruts > 0:
# Add the initial rotation if requested, converting to radians.
rot_u, rot_v = self.u, self.v
if self._strut_angle.rad != 0.:
rot_u, rot_v = rotate_xy(rot_u, rot_v, -self._strut_angle)
rotang = 360. * degrees / self._nstruts
# Then loop through struts setting to zero the regions which lie under the strut
for istrut in range(self._nstruts):
rot_u, rot_v = rotate_xy(rot_u, rot_v, -rotang)
illuminated *= ((np.abs(rot_u) >= radius * self._strut_thick) + (rot_v < 0.0))
return illuminated
def _load_pupil_plane(self):
""" Create an array of illuminated pixels with appropriate size and scale from an input
image of the pupil. The basic strategy is:
1. Read in array.
2. Determine the scale.
3. Pad the input array with zeros to meet the requested pupil size.
4. Check that the pupil plane sampling interval is at least as small as requested.
5. Optionally rotate pupil plane.
"""
from . import fits
# Handle multiple types of input: NumPy array, galsim.Image, or string for filename with
# image.
if isinstance(self._pupil_plane_im, np.ndarray):
# Make it into an image.
self._pupil_plane_im = Image(self._pupil_plane_im)
elif isinstance(self._pupil_plane_im, Image):
# Make sure not to overwrite input image.
self._pupil_plane_im = self._pupil_plane_im.copy()
else:
# Read in image of pupil plane from file.
self._pupil_plane_im = fits.read(self._pupil_plane_im)
# scale = pupil_plane_im.scale # Interpret as either the pixel scale in meters, or None.
pp_arr = self._pupil_plane_im.array
self._npix = pp_arr.shape[0]
# Check FFT size
if self._npix > self.gsparams.maximum_fft_size:
raise GalSimFFTSizeError("Loaded pupil plane array that is too large.", self._npix)
# Sanity checks
if self._pupil_plane_im.array.shape[0] != self._pupil_plane_im.array.shape[1]:
raise GalSimValueError("Input pupil_plane_im must be square.",
self._pupil_plane_im.array.shape)
if self._pupil_plane_im.array.shape[0] % 2 == 1:
raise GalSimValueError("Input pupil_plane_im must have even sizes.",
self._pupil_plane_im.array.shape)
# Set the scale, priority is:
# 1. pupil_plane_scale kwarg
# 2. image.scale if not None
# 3. Use diameter and farthest illuminated pixel.
if self._input_pupil_plane_scale is not None:
self._pupil_plane_scale = self._input_pupil_plane_scale
elif self._pupil_plane_im.scale is not None:
self._pupil_plane_scale = self._pupil_plane_im.scale
else:
# If self._pupil_plane_scale is not set yet, then figure it out from the distance
# of the farthest illuminated pixel from the image center and the aperture diameter.
# below is essentially np.linspace(-0.5, 0.5, self._npix)
u = np.fft.fftshift(np.fft.fftfreq(self._npix))
u, v = np.meshgrid(u, u)
r = np.hypot(u, v)
rmax_illum = np.max(r*(self._pupil_plane_im.array > 0))
self._pupil_plane_scale = self.diam / (2.0 * rmax_illum * self._npix)
self._pupil_plane_size = self._pupil_plane_scale * self._npix
# Check the pupil plane size here and bump it up if necessary.
if self._pupil_plane_size < self.good_pupil_size:
new_npix = Image.good_fft_size(int(np.ceil(
self.good_pupil_size/self._pupil_plane_scale)))
pad_width = (new_npix-self._npix)//2
pp_arr = np.pad(pp_arr, [(pad_width, pad_width)]*2, mode='constant')
self._npix = new_npix
self._pupil_plane_size = self._pupil_plane_scale * self._npix
# Check sampling interval and warn if it's not good enough.
if self._pupil_plane_scale > self.good_pupil_scale:
ratio = self._pupil_plane_scale / self.good_pupil_scale
galsim_warn("Input pupil plane image may not be sampled well enough!\n"
"Consider increasing sampling by a factor %f, and/or check "
"PhaseScreenPSF outputs for signs of folding in real space."%ratio)
if self._pupil_angle.rad == 0.:
return pp_arr.astype(bool)
else:
# Rotate the pupil plane image as required based on the `pupil_angle`, being careful to
# ensure that the image is one of the allowed types. We ignore the scale.
b = _BoundsI(1,self._npix,1,self._npix)
im = _Image(pp_arr, b, PixelScale(1.))
int_im = InterpolatedImage(im, x_interpolant='linear',
calculate_stepk=False, calculate_maxk=False)
int_im = int_im.rotate(self._pupil_angle)
new_im = Image(pp_arr.shape[1], pp_arr.shape[0])
new_im = int_im.drawImage(image=new_im, scale=1., method='no_pixel')
pp_arr = new_im.array
# Restore hard edges that might have been lost during the interpolation. To do this, we
# check the maximum value of the entries. Values after interpolation that are >half
# that maximum value are kept as nonzero (True), but those that are <half the maximum
# value are set to zero (False).
max_pp_val = np.max(pp_arr)
pp_arr[pp_arr < 0.5*max_pp_val] = 0.
return pp_arr.astype(bool)
@property
def gsparams(self):
"""The `GSParams` of this object.
"""
return self._gsparams
def withGSParams(self, gsparams=None, **kwargs):
"""Create a version of the current aperture with the given gsparams
"""
if gsparams == self.gsparams: return self
from copy import copy
ret = copy(self)
ret._gsparams = GSParams.check(gsparams, self.gsparams, **kwargs)
return ret
# Used in Aperture.__str__ and OpticalPSF.__str__
def _geometry_str(self):
s = ""
if not self._circular_pupil:
s += ", circular_pupil=False"
if self.obscuration != 0.0:
s += ", obscuration=%s"%self.obscuration
if self._nstruts != 0:
s += ", nstruts=%s"%self._nstruts
if self._strut_thick != 0.05:
s += ", strut_thick=%s"%self._strut_thick
if self._strut_angle != 0*radians:
s += ", strut_angle=%s"%self._strut_angle
return s
def __str__(self):
s = "galsim.Aperture(diam=%r"%self.diam
if self._pupil_plane_im is None:
# Pupil was created geometrically, so use that here.
s += self._geometry_str()
s += ")"
return s
def _geometry_repr(self):
s = ""
if not self._circular_pupil:
s += ", circular_pupil=False"
if self.obscuration != 0.0:
s += ", obscuration=%r"%self.obscuration
if self._nstruts != 0:
s += ", nstruts=%r"%self._nstruts
if self._strut_thick != 0.05:
s += ", strut_thick=%r"%self._strut_thick
if self._strut_angle != 0*radians:
s += ", strut_angle=%r"%self._strut_angle
return s
def __repr__(self):
s = "galsim.Aperture(diam=%r"%self.diam
if self._pupil_plane_im is None:
# Pupil was created geometrically, so use that here.
s += self._geometry_repr()
s += ", pupil_plane_scale=%r"%self._input_pupil_plane_scale
s += ", pupil_plane_size=%r"%self._input_pupil_plane_size
s += ", oversampling=%r"%self._oversampling
s += ", pad_factor=%r"%self._pad_factor
else:
# Pupil was created from image, so use that instead.
# It's slightly less annoying to see an enormous stream of zeros fly by than an enormous
# stream of Falses, so convert to int16.
tmp = self.illuminated.astype(np.int16).tolist()
s += ", pupil_plane_im=array(%r"%tmp+", dtype='int16')"
s += ", pupil_plane_scale=%r"%self._pupil_plane_scale
if self.gsparams != GSParams():
s += ", gsparams=%r"%self.gsparams
s += ")"
return s
def __eq__(self, other):
if self is other: return True
if not (isinstance(other, Aperture) and
self.diam == other.diam and
self._gsparams == other._gsparams):
return False
if self._pupil_plane_im is not None:
return (self.pupil_plane_scale == other.pupil_plane_scale and
np.array_equal(self.illuminated, other.illuminated))
else:
return (other._pupil_plane_im is None and
self._circular_pupil == other._circular_pupil and
self._obscuration == other._obscuration and
self._nstruts == other._nstruts and
self._strut_thick == other._strut_thick and
self._strut_angle == other._strut_angle and
self._input_pupil_plane_scale == other._input_pupil_plane_scale and
self._input_pupil_plane_size == other._input_pupil_plane_size and
self._oversampling == other._oversampling and
self._pad_factor == other._pad_factor)
def __hash__(self):
# Cache since self.illuminated may be large.
if not hasattr(self, '_hash'):
self._hash = hash(("galsim.Aperture", self.diam, self.pupil_plane_scale))
self._hash ^= hash(tuple(self.illuminated.ravel()))
return self._hash
# Properties show up nicely in the interactive terminal for
# >>>help(Aperture)
# So we make a thin wrapper here.
@property
def illuminated(self):
"""A boolean array indicating which positions in the pupil plane are exposed to the sky.
"""
return self._illuminated
@lazy_property
def rho(self):
"""Unit-disk normalized pupil plane coordinate as a complex number:
(x, y) => x + 1j * y.
"""
self._illuminated
u = np.fft.fftshift(np.fft.fftfreq(self._npix, self.diam/self._pupil_plane_size/2.0))
u, v = np.meshgrid(u, u)
return u + 1j * v
@lazy_property
def _uv(self):
if not hasattr(self, '_npix'):
# Need this check, since `_uv` is used by `_illuminated`, so need to make sure we
# don't have an infinite loop.
self._illuminated
u = np.fft.fftshift(np.fft.fftfreq(self._npix, 1./self._pupil_plane_size))
u, v = np.meshgrid(u, u)
return u, v
@property
def u(self):
"""Pupil horizontal coordinate array in meters."""
return self._uv[0]
@property
def v(self):
"""Pupil vertical coordinate array in meters."""
return self._uv[1]
@lazy_property
def u_illuminated(self):
"""The u values for only the `illuminated` pixels.
"""
return self.u[self.illuminated]
@lazy_property
def v_illuminated(self):
"""The v values for only the `illuminated` pixels.
"""
return self.v[self.illuminated]
@lazy_property
def rsqr(self):
"""Pupil radius squared array in meters squared."""
return self.u**2 + self.v**2
@property
def diam(self):
"""Aperture diameter in meters"""
return self._diam
@property
def obscuration(self):
"""Fraction linear obscuration of pupil."""
return self._obscuration
def __getstate__(self):
# Let unpickled object reconstruct cached values on-the-fly instead of including them in the
# pickle.
d = self.__dict__.copy()
for k in ('rho', '_uv', 'rsqr', 'u_illuminated', 'v_illuminated'):
d.pop(k, None)
# Only reconstruct _illuminated if we made it from geometry. If loaded, it's probably
# faster to serialize the array.
if self._pupil_plane_im is None:
d.pop('_illuminated', None)
return d
def samplePupil(self, photons, rng):
"""Set the pupil_u and pupil_v values in the PhotonArray by sampling the current aperture.
"""
from .random import UniformDeviate
n_photons = len(photons)
u = self.u_illuminated
v = self.v_illuminated
gen = rng.as_numpy_generator()
pick = gen.choice(len(u), size=n_photons).astype(int)
photons.pupil_u = u[pick]
photons.pupil_v = v[pick]
# Make continuous by adding +/- 0.5 pixels shifts.
uscale = self.u[0, 1] - self.u[0, 0]
vscale = self.v[1, 0] - self.v[0, 0]
photons.pupil_u += gen.uniform(-uscale/2.,uscale/2.,size=n_photons)
photons.pupil_v += gen.uniform(-vscale/2.,vscale/2.,size=n_photons)
# Some quick notes for Josh:
# - Relation between real-space grid with size theta and pitch dtheta (dimensions of angle)
# and corresponding (fast) Fourier grid with size 2*maxk and pitch stepk (dimensions of
# inverse angle):
# stepk = 2*pi/theta
# maxk = pi/dtheta
# - Relation between aperture of size L and pitch dL (dimensions of length, not angle!) and
# (fast) Fourier grid:
# dL = stepk * lambda / (2 * pi)
# L = maxk * lambda / pi
# - Implies relation between aperture grid and real-space grid:
# dL = lambda/theta
# L = lambda/dtheta
#
# MJ: Of these four, only _sky_scale is still used. The rest are left here for informational
# purposes, but nothing actually calls them.
def _getStepK(self, lam, scale_unit=arcsec):
"""Return the Fourier grid spacing for this aperture at given wavelength.
Parameters:
lam: Wavelength in nanometers.
scale_unit: Inverse units in which to return result [default: galsim.arcsec]
Returns:
Fourier grid spacing.
"""
return 2*np.pi*self.pupil_plane_scale/(lam*1e-9) * scale_unit/radians
def _getMaxK(self, lam, scale_unit=arcsec):
"""Return the Fourier grid half-size for this aperture at given wavelength.
Parameters:
lam: Wavelength in nanometers.
scale_unit: Inverse units in which to return result [default: galsim.arcsec]
Returns:
Fourier grid half-size.
"""
return np.pi*self.pupil_plane_size/(lam*1e-9) * scale_unit/radians
def _sky_scale(self, lam, scale_unit=arcsec):
"""Return the image scale for this aperture at given wavelength.
Parameters:
lam: Wavelength in nanometers.
scale_unit: Units in which to return result [default: galsim.arcsec]
Returns:
Image scale.
"""
return (lam*1e-9) / self.pupil_plane_size * radians/scale_unit
def _sky_size(self, lam, scale_unit=arcsec):
"""Return the image size for this aperture at given wavelength.
Parameters:
lam: Wavelength in nanometers.
scale_unit: Units in which to return result [default: galsim.arcsec]
Returns:
Image size.
"""
return (lam*1e-9) / self.pupil_plane_scale * radians/scale_unit
class PhaseScreenList:
"""List of phase screens that can be turned into a PSF. Screens can be either atmospheric
layers or optical phase screens. Generally, one would assemble a PhaseScreenList object using
the function `Atmosphere`. Layers can be added, removed, appended, etc. just like items can
be manipulated in a python list. For example::
# Create an atmosphere with three layers.
>>> screens = galsim.PhaseScreenList([galsim.AtmosphericScreen(...),
galsim.AtmosphericScreen(...),
galsim.AtmosphericScreen(...)])
# Add another layer
>>> screens.append(galsim.AtmosphericScreen(...))
# Remove the second layer
>>> del screens[1]
# Switch the first and second layer. Silly, but works...
>>> screens[0], screens[1] = screens[1], screens[0]
Parameters:
layers: Sequence of phase screens.
"""
def __init__(self, *layers):
from .phase_screens import AtmosphericScreen, OpticalScreen
if len(layers) == 1:
# First check if layers[0] is a PhaseScreenList, so we avoid nesting.
if isinstance(layers[0], PhaseScreenList):
self._layers = layers[0]._layers
else:
# Next, see if layers[0] is iterable. E.g., to catch generator expressions.
try:
self._layers = list(layers[0])
except TypeError:
self._layers = list(layers)
else:
self._layers = list(layers)
self._update_attrs()
self._pending = [] # Pending PSFs to calculate upon first drawImage.
def __len__(self):
return len(self._layers)
def __getitem__(self, index):
try:
items = self._layers[index]
except TypeError:
msg = "{cls.__name__} indices must be integers or slices"
raise TypeError(msg.format(cls=self.__class__))
try:
index + 1 # Regular in indices are the norm, so try something that works for it,
# but not for slices, where we need different handling.
except TypeError:
# index is a slice, so items is a list.
return PhaseScreenList(items)
else:
# index is an int, so items is just one screen.
return items
def __setitem__(self, index, layer):
self._layers[index] = layer
self._update_attrs()
def __delitem__(self, index):
del self._layers[index]
self._update_attrs()
def append(self, layer):
self._layers.append(layer)
self._update_attrs()
def extend(self, layers):
self._layers.extend(layers)
self._update_attrs()
def __str__(self):
return "galsim.PhaseScreenList([%s])" % ",".join(str(l) for l in self._layers)
def __repr__(self):
return "galsim.PhaseScreenList(%r)" % self._layers
def __eq__(self, other):
return (self is other or
(isinstance(other,PhaseScreenList) and self._layers == other._layers))
def __ne__(self, other): return not self == other
__hash__ = None # Mutable means not hashable.
def _update_attrs(self):
# If any of the wrapped PhaseScreens have an rng, then eval(repr(screen_list)) will run, but
# fail to round-trip to the original object. So we search for that here and set/delete a
# dummy rng sentinel attribute so do_pickle() will know to skip the obj == eval(repr(obj))
# test.
self.__dict__.pop('rng', None)
self.dynamic = any(l.dynamic for l in self)
self.reversible = all(l.reversible for l in self)
self.__dict__.pop('r0_500_effective', None)
def _seek(self, t):
"""Set all layers' internal clocks to time t."""
for layer in self:
try:
layer._seek(t)
except AttributeError:
# Time indep phase screen
pass
self._update_attrs()
def _reset(self):
"""Reset phase screens back to time=0."""
for layer in self:
try:
layer._reset()
except AttributeError:
# Time indep phase screen
pass
self._update_attrs()
def instantiate(self, pool=None, _bar=None, **kwargs):
"""Instantiate the screens in this `PhaseScreenList`.
Parameters:
pool: A multiprocessing.Pool object to use to instantiate screens in parallel.
**kwargs: Keyword arguments to forward to screen.instantiate().
"""
_bar = _bar if _bar else dict() # with dict() _bar.update() is a trivial no op.
if pool is not None:
results = []
for layer in self:
try:
results.append(pool.apply_async(layer.instantiate, kwds=kwargs))
except AttributeError: # OpticalScreen has no instantiate method
pass
_bar.update()
for r in results:
r.wait()
else:
for layer in self:
try:
layer.instantiate(**kwargs)
except AttributeError:
pass
_bar.update()
def _delayCalculation(self, psf):
"""Add psf to delayed calculation list."""
heappush(self._pending, (psf.t0, OrderedWeakRef(psf)))
def _prepareDraw(self):
"""Calculate previously delayed PSFs."""
if not self._pending:
return
# See if we have any dynamic screens. If not, then we can immediately compute each PSF
# in a simple loop.
if not self.dynamic:
for _, psfref in self._pending:
psf = psfref()
if psf is not None:
psf._step()
psf._finalize()
self._pending = []
self._update_time_heap = []
return
# If we do have time-evolving screens, then iteratively increment the time while being
# careful to always stop at multiples of each PSF's time_step attribute to update that PSF.
# Use a heap (in _pending list) to track the next time to stop at.
while(self._pending):
# Get and seek to next time that has a PSF update.
t, psfref = heappop(self._pending)
# Check if this PSF weakref is still alive
psf = psfref()
if psf is not None:
# If it's alive, update this PSF
self._seek(t)
psf._step()
# If that PSF's next possible update time doesn't extend past its exptime, then
# push it back on the heap.
t += psf.time_step
if t < psf.t0 + psf.exptime:
heappush(self._pending, (t, OrderedWeakRef(psf)))
else:
psf._finalize()
self._pending = []
def wavefront(self, u, v, t, theta=(0.0*radians, 0.0*radians)):
""" Compute cumulative wavefront due to all phase screens in `PhaseScreenList`.
Wavefront here indicates the distance by which the physical wavefront lags or leads the
ideal plane wave (pre-optics) or spherical wave (post-optics).
Parameters:
u: Horizontal pupil coordinate (in meters) at which to evaluate wavefront. Can
be a scalar or an iterable. The shapes of u and v must match.
v: Vertical pupil coordinate (in meters) at which to evaluate wavefront. Can
be a scalar or an iterable. The shapes of u and v must match.
t: Times (in seconds) at which to evaluate wavefront. Can be None, a scalar or an
iterable. If None, then the internal time of the phase screens will be used
for all u, v. If scalar, then the size will be broadcast up to match that of
u and v. If iterable, then the shape must match the shapes of u and v.
theta: Field angle at which to evaluate wavefront, as a 2-tuple of `galsim.Angle`
instances. [default: (0.0*galsim.arcmin, 0.0*galsim.arcmin)]
Only a single theta is permitted.
Returns:
Array of wavefront lag or lead in nanometers.
"""
if len(self._layers) > 1:
return np.sum([layer.wavefront(u, v, t, theta) for layer in self], axis=0)
else:
return self._layers[0].wavefront(u, v, t, theta)
def wavefront_gradient(self, u, v, t, theta=(0.0*radians, 0.0*radians)):
""" Compute cumulative wavefront gradient due to all phase screens in `PhaseScreenList`.
Parameters:
u: Horizontal pupil coordinate (in meters) at which to evaluate wavefront. Can
be a scalar or an iterable. The shapes of u and v must match.
v: Vertical pupil coordinate (in meters) at which to evaluate wavefront. Can
be a scalar or an iterable. The shapes of u and v must match.
t: Times (in seconds) at which to evaluate wavefront gradient. Can be None, a
scalar or an iterable. If None, then the internal time of the phase screens
will be used for all u, v. If scalar, then the size will be broadcast up to
match that of u and v. If iterable, then the shape must match the shapes of
u and v.
theta: Field angle at which to evaluate wavefront, as a 2-tuple of `galsim.Angle`
instances. [default: (0.0*galsim.arcmin, 0.0*galsim.arcmin)]
Only a single theta is permitted.
Returns:
Arrays dWdu and dWdv of wavefront lag or lead gradient in nm/m.
"""
if len(self._layers) > 1:
return np.sum([layer.wavefront_gradient(u, v, t, theta) for layer in self], axis=0)
else:
return self._layers[0].wavefront_gradient(u, v, t, theta)
def _wavefront(self, u, v, t, theta):
if len(self._layers) > 1:
return np.sum([layer._wavefront(u, v, t, theta) for layer in self], axis=0)
else:
return self._layers[0]._wavefront(u, v, t, theta)
def _wavefront_gradient(self, u, v, t, theta):
gradx, grady = self._layers[0]._wavefront_gradient(u, v, t, theta)
for layer in self._layers[1:]:
gx, gy = layer._wavefront_gradient(u, v, t, theta)
gradx += gx
grady += gy
return gradx, grady
def makePSF(self, lam, **kwargs):
"""Create a PSF from the current `PhaseScreenList`.
Parameters:
lam: Wavelength in nanometers at which to compute PSF.
t0: Time at which to start exposure in seconds. [default: 0.0]
exptime: Time in seconds over which to accumulate evolving instantaneous
PSF. [default: 0.0]
time_step: Time interval in seconds with which to sample phase screens when
drawing using real-space or Fourier methods, or when using
photon-shooting without the geometric optics approximation. Note
that the default value of 0.025 is fairly arbitrary. For careful
studies, we recommend checking that results are stable when
decreasing time_step. Also note that when drawing using
photon-shooting with the geometric optics approximation this
keyword is ignored, as the phase screen can be sampled
continuously in this case instead of at discrete intervals.
[default: 0.025]
flux: Flux of output PSF. [default: 1.0]
theta: Field angle of PSF as a 2-tuple of `Angle` instances.
[default: (0.0*galsim.arcmin, 0.0*galsim.arcmin)]
interpolant: Either an Interpolant instance or a string indicating which
interpolant should be used. Options are 'nearest', 'sinc',
'linear', 'cubic', 'quintic', or 'lanczosN' where N should be the
integer order to use. [default: galsim.Quintic()]
scale_unit: Units to use for the sky coordinates of the output profile.
[default: galsim.arcsec]
ii_pad_factor: Zero-padding factor by which to extend the image of the PSF when
creating the ``InterpolatedImage``. See the
``InterpolatedImage`` docstring for more details. [default: 1.5]
suppress_warning: If ``pad_factor`` is too small, the code will emit a warning
telling you its best guess about how high you might want to raise
it. However, you can suppress this warning by using
``suppress_warning=True``. [default: False]
geometric_shooting: If True, then when drawing using photon shooting, use geometric
optics approximation where the photon angles are derived from the
phase screen gradient. If False, then first draw using Fourier
optics and then shoot from the derived InterpolatedImage.
[default: True]
aper: `Aperture` to use to compute PSF(s). [default: None]
second_kick: An optional second kick to also convolve by when using geometric
photon-shooting. (This can technically be any `GSObject`, though
usually it should probably be a SecondKick object). If None, then a
good second kick will be chosen automatically based on
``screen_list``. If False, then a second kick won't be applied.
[default: None]
kcrit: Critical Fourier scale (in units of 1/r0) at which to separate low-k
and high-k turbulence. The default value was chosen based on
comparisons between Fourier optics and geometric optics with a
second kick correction. While most values of kcrit smaller than the
default produce similar results, we caution the user to compare the
affected geometric PSFs against Fourier optics PSFs carefully before
changing this value. [default: 0.2]
fft_sign: The sign (+/-) to use in the exponent of the Fourier kernel when
evaluating the Fourier optics PSF. As of version 2.3, GalSim uses a
plus sign by default, which we believe to be consistent with, for
example, how Zemax computes a Fourier optics PSF on DECam. Before
version 2.3, the default was a negative sign. Input should be
either the string '+' or the string '-'. [default: '+']
gsparams: An optional `GSParams` argument. [default: None]
The following are optional keywords to use to setup the aperture if ``aper`` is not
provided.
Parameters:
diam: Aperture diameter in meters.
circular_pupil: Adopt a circular pupil? [default: True]
obscuration: Linear dimension of central obscuration as fraction of aperture
linear dimension. [0., 1.). [default: 0.0]
nstruts: Number of radial support struts to add to the central
obscuration. [default: 0]
strut_thick: Thickness of support struts as a fraction of aperture diameter.
[default: 0.05]
strut_angle: `Angle` made between the vertical and the strut starting closest to
it, defined to be positive in the counter-clockwise direction;
must be an `Angle` instance. [default: 0. * galsim.degrees]
oversampling: Optional oversampling factor *in the image plane* for the PSF
eventually constructed using this `Aperture`. Setting
``oversampling < 1`` will produce aliasing in the PSF (not good).
[default: 1.0]
pad_factor: Additional multiple by which to extend the PSF image to avoid
folding. [default: 1.0]
pupil_plane_im: The GalSim.Image, NumPy array, or name of file containing the
pupil plane image, to be used instead of generating one based on
the obscuration and strut parameters. [default: None]
pupil_angle: If ``pupil_plane_im`` is not None, rotation angle for the pupil
plane (positive in the counter-clockwise direction). Must be an
`Angle` instance. [default: 0. * galsim.degrees]
pupil_plane_scale: Sampling interval in meters to use for the pupil plane array. In
most cases, it's a good idea to leave this as None, in which case
GalSim will attempt to find a good value automatically. The
exception is when specifying the pupil arrangement via an image,
in which case this keyword can be used to indicate the sampling
of that image. See also ``pad_factor`` for adjusting the pupil
sampling scale. [default: None]
pupil_plane_size: Size in meters to use for the pupil plane array. In most cases,
it's a good idea to leave this as None, in which case GalSim will
attempt to find a good value automatically. See also
``oversampling`` for adjusting the pupil size. [default: None]
"""
return PhaseScreenPSF(self, lam, **kwargs)
@lazy_property
def r0_500_effective(self):
"""Effective r0_500 for set of screens in list that define an r0_500 attribute."""
r0_500s = np.array([l.r0_500 for l in self if hasattr(l, 'r0_500')])
if len(r0_500s) == 0:
return None
else:
return np.sum(r0_500s**(-5./3))**(-3./5)
def _getStepK(self, **kwargs):
"""Return an appropriate stepk for this list of phase screens.
The required set of parameters depends on the types of the individual `PhaseScreen`
instances in the `PhaseScreenList`. See the documentation for the individual
`PhaseScreen.pupil_plane_scale` methods for more details.
Returns:
stepk.
"""
# Generically, GalSim propagates stepk for convolutions using
# stepk = sum(s**-2 for s in stepks)**(-0.5)
# We're not actually doing convolution between screens here, though. In fact, the right
# relation for Kolmogorov screens uses exponents -5./3 and -3./5:
# stepk = sum(s**(-5./3) for s in stepks)**(-3./5)
# Since most of the layers in a PhaseScreenList are likely to be (nearly) Kolmogorov
# screens, we'll use that relation.
return np.sum([layer._getStepK(**kwargs)**(-5./3) for layer in self])**(-3./5)
def __getstate__(self):
d = self.__dict__.copy()
d['_pending'] = []
return d
class PhaseScreenPSF(GSObject):
"""A PSF surface brightness profile constructed by integrating over time the instantaneous PSF
derived from a set of phase screens and an aperture.
There are two equivalent ways to construct a PhaseScreenPSF given a `PhaseScreenList`::
>>> psf = screen_list.makePSF(...)
>>> psf = PhaseScreenPSF(screen_list, ...)
Computing a PSF from a phase screen also requires an `Aperture` be specified. This can be done
either directly via the ``aper`` keyword, or by setting a number of keywords that will be passed
to the `Aperture` constructor. The ``aper`` keyword always takes precedence.
There are effectively three ways to draw a PhaseScreenPSF (or `GSObject` that includes a
PhaseScreenPSF):
1) Fourier optics
This is the default, and is performed for all drawImage methods except method='phot'. This
is generally the most accurate option. For a `PhaseScreenList` that includes an
`AtmosphericScreen`, however, this can be prohibitively slow. For `OpticalPSF`, though,
this can sometimes be a good option.
2) Photon-shooting from an image produced using Fourier optics.
This is done if geometric_shooting=False when creating the PhaseScreenPSF, and method='phot'
when calling drawImage. This actually performs the same calculations as the Fourier optics
option above, but then proceeds by shooting photons from that result. This can sometimes be
a good option for OpticalPSFs, especially if the same OpticalPSF can be reused for may
objects, since the Fourier part of the process would only be performed once in this case.
3) Photon-shooting using the "geometric approximation".
This is done if geometric_shooting=True when creating the PhaseScreenPSF, and method='phot'
when calling drawImage. In this case, a completely different algorithm is used make an
image. Photons are uniformly generated in the `Aperture` pupil, and then the phase gradient
at that location is used to deflect each photon in the image plane. This method, which
corresponds to geometric optics, is broadly accurate for phase screens that vary slowly
across the aperture, and is usually several orders of magnitude or more faster than Fourier
optics (depending on the flux of the object, of course, but much faster even for rather
bright flux levels).
One short-coming of this method is that it neglects interference effects, i.e. diffraction.
For `PhaseScreenList` that include at least one `AtmosphericScreen`, a correction, dubbed
the "second kick", will automatically be applied to handle both the quickly varying modes
of the screens and the diffraction pattern of the `Aperture`. For PhaseScreenLists without
an `AtmosphericScreen`, the correction is simply an Airy function. Note that this
correction can be overridden using the second_kick keyword argument, and also tuned to some
extent using the kcrit keyword argument.
Note also that calling drawImage on a PhaseScreenPSF that uses a `PhaseScreenList` with any
uninstantiated `AtmosphericScreen` will perform that instantiation, and that the details of the
instantiation depend on the drawing method used, and also the kcrit keyword argument to
PhaseScreenPSF. See the `AtmosphericScreen` docstring for more details.
Parameters:
screen_list: `PhaseScreenList` object from which to create PSF.
lam: Wavelength in nanometers at which to compute PSF.
t0: Time at which to start exposure in seconds. [default: 0.0]
exptime: Time in seconds over which to accumulate evolving instantaneous PSF.
[default: 0.0]
time_step: Time interval in seconds with which to sample phase screens when
drawing using real-space or Fourier methods, or when using
photon-shooting without the geometric optics approximation. Note
that the default value of 0.025 is fairly arbitrary. For careful
studies, we recommend checking that results are stable when
decreasing time_step. Also note that when drawing using
photon-shooting with the geometric optics approximation this
keyword is ignored, as the phase screen can be sampled
continuously in this case instead of at discrete intervals.
[default: 0.025]
flux: Flux of output PSF [default: 1.0]
theta: Field angle of PSF as a 2-tuple of `Angle` instances.
[default: (0.0*galsim.arcmin, 0.0*galsim.arcmin)]
interpolant: Either an Interpolant instance or a string indicating which
interpolant should be used. Options are 'nearest', 'sinc', 'linear',
'cubic', 'quintic', or 'lanczosN' where N should be the integer order
to use. [default: galsim.Quintic()]
scale_unit: Units to use for the sky coordinates of the output profile.
[default: galsim.arcsec]
ii_pad_factor: Zero-padding factor by which to extend the image of the PSF when
creating the ``InterpolatedImage``. See the ``InterpolatedImage``
docstring for more details. [default: 1.5]
suppress_warning: If ``pad_factor`` is too small, the code will emit a warning telling
you its best guess about how high you might want to raise it.
However, you can suppress this warning by using
``suppress_warning=True``. [default: False]
geometric_shooting: If True, then when drawing using photon shooting, use geometric
optics approximation where the photon angles are derived from the
phase screen gradient. If False, then first draw using Fourier
optics and then shoot from the derived InterpolatedImage.
[default: True]
aper: `Aperture` to use to compute PSF(s). [default: None]
second_kick: An optional second kick to also convolve by when using geometric
photon-shooting. (This can technically be any `GSObject`, though
usually it should probably be a SecondKick object). If None, then a
good second kick will be chosen automatically based on
``screen_list``. If False, then a second kick won't be applied.
[default: None]
kcrit: Critical Fourier scale (in units of 1/r0) at which to separate low-k
and high-k turbulence. The default value was chosen based on
comparisons between Fourier optics and geometric optics with a second
kick correction. While most values of kcrit smaller than the default
produce similar results, we caution the user to compare the affected
geometric PSFs against Fourier optics PSFs carefully before changing
this value. [default: 0.2]
fft_sign: The sign (+/-) to use in the exponent of the Fourier kernel when
evaluating the Fourier optics PSF. As of version 2.3, GalSim uses a
plus sign by default, which we believe to be consistent with, for
example, how Zemax computes a Fourier optics PSF on DECam. Before
version 2.3, the default was a negative sign. Input should be either
the string '+' or the string '-'. [default: '+']
gsparams: An optional `GSParams` argument. [default: None]
The following are optional keywords to use to setup the aperture if ``aper`` is not provided:
Parameters:
diam: Aperture diameter in meters. [default: None]
circular_pupil: Adopt a circular pupil? [default: True]
obscuration: Linear dimension of central obscuration as fraction of aperture
linear dimension. [0., 1.). [default: 0.0]
nstruts: Number of radial support struts to add to the central obscuration.
[default: 0]
strut_thick: Thickness of support struts as a fraction of aperture diameter.
[default: 0.05]
strut_angle: `Angle` made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be an
`Angle` instance. [default: 0. * galsim.degrees]
oversampling: Optional oversampling factor *in the image plane* for the PSF
eventually constructed using this `Aperture`. Setting
``oversampling < 1`` will produce aliasing in the PSF (not good).
[default: 1.0]
pad_factor: Additional multiple by which to extend the PSF image to avoid
folding. [default: 1.0]
pupil_plane_im: The GalSim.Image, NumPy array, or name of file containing the pupil
plane image, to be used instead of generating one based on the
obscuration and strut parameters. [default: None]
pupil_angle: If ``pupil_plane_im`` is not None, rotation angle for the pupil plane
(positive in the counter-clockwise direction). Must be an `Angle`
instance. [default: 0. * galsim.degrees]
pupil_plane_scale: Sampling interval in meters to use for the pupil plane array. In
most cases, it's a good idea to leave this as None, in which case
GalSim will attempt to find a good value automatically. The
exception is when specifying the pupil arrangement via an image, in
which case this keyword can be used to indicate the sampling of that
image. See also ``pad_factor`` for adjusting the pupil sampling
scale. [default: None]
pupil_plane_size: Size in meters to use for the pupil plane array. In most cases, it's
a good idea to leave this as None, in which case GalSim will attempt
to find a good value automatically. See also ``oversampling`` for
adjusting the pupil size. [default: None]
"""
_has_hard_edges = False
_is_axisymmetric = False
_is_analytic_x = True
_is_analytic_k = True
_default_iipf = 1.5
def __init__(self, screen_list, lam, t0=0.0, exptime=0.0, time_step=0.025, flux=1.0,
theta=(0.0*arcsec, 0.0*arcsec), interpolant=None, scale_unit=arcsec,
ii_pad_factor=None, suppress_warning=False,
geometric_shooting=True, aper=None, second_kick=None, kcrit=0.2, fft_sign='+',
gsparams=None, _force_stepk=0., _force_maxk=0., _bar=None, **kwargs):
# Hidden `_bar` kwarg can be used with astropy.console.utils.ProgressBar to print out a
# progress bar during long calculations.
if not isinstance(screen_list, PhaseScreenList):
screen_list = PhaseScreenList(screen_list)
if fft_sign not in ['+', '-']:
raise GalSimValueError("Invalid fft_sign", fft_sign, allowed_values=['+','-'])
self._screen_list = screen_list
self.t0 = float(t0)
self.lam = float(lam)
self.exptime = float(exptime)
self.time_step = float(time_step)
if aper is None:
# Check here for diameter.
if 'diam' not in kwargs:
raise GalSimIncompatibleValuesError(
"Diameter required if aperture not specified directly.", diam=None, aper=aper)
aper = Aperture(lam=lam, screen_list=self._screen_list, gsparams=gsparams, **kwargs)
elif gsparams is None:
gsparams = aper.gsparams
else:
aper = aper.withGSParams(gsparams)
self.aper = aper
if not isinstance(theta[0], Angle) or not isinstance(theta[1], Angle):
raise TypeError("theta must be 2-tuple of galsim.Angle's.")
self.theta = theta
self.interpolant = interpolant
if isinstance(scale_unit, str):
scale_unit = AngleUnit.from_name(scale_unit)
self.scale_unit = scale_unit
self._gsparams = GSParams.check(gsparams)
self.scale = aper._sky_scale(self.lam, self.scale_unit)
self._force_stepk = _force_stepk
self._force_maxk = _force_maxk
self._img = None
if self.exptime < 0:
raise GalSimRangeError("Cannot integrate PSF for negative time.", self.exptime, 0.)
self._ii_pad_factor = ii_pad_factor if ii_pad_factor is not None else self._default_iipf
self._bar = _bar if _bar else dict() # with dict() _bar.update() is a trivial no op.
self._flux = float(flux)
self._suppress_warning = suppress_warning
self._geometric_shooting = geometric_shooting
self._kcrit = kcrit
self._fft_sign = fft_sign
# We'll set these more intelligently as needed below
self._second_kick = second_kick
self._screen_list._delayCalculation(self)
self._finalized = False
@lazy_property
def _real_ii(self):
ii = InterpolatedImage(
self._img, x_interpolant=self.interpolant,
_force_stepk=self._force_stepk, _force_maxk=self._force_maxk,
pad_factor=self._ii_pad_factor,
use_true_center=False, gsparams=self._gsparams)
if not self._suppress_warning:
specified_stepk = 2*np.pi/(self._img.array.shape[0]*self.scale)
observed_stepk = ii.stepk
if observed_stepk < specified_stepk:
galsim_warn(
"The calculated stepk (%g) for PhaseScreenPSF is smaller than what was used "
"to build the wavefront (%g). This could lead to aliasing problems. "
"Increasing pad_factor is recommended."%(observed_stepk, specified_stepk))
return ii
@lazy_property
def _dummy_ii(self):
# If we need self._ii before we've done _prepareDraw, then build a placeholder that has
# roughly the right properties. All we really need is for the stepk and maxk to be
# correct, so use the force_ options to set them how we want.
if self._force_stepk > 0.:
stepk = self._force_stepk
else:
stepk = self._screen_list._getStepK(lam=self.lam, diam=self.aper.diam,
obscuration=self.aper.obscuration,
gsparams=self._gsparams)
if self._force_maxk > 0.:
maxk = self._force_maxk
else:
maxk = self.aper._getMaxK(self.lam, self.scale_unit)
image = _Image(np.array([[self._flux]], dtype=float),
_BoundsI(1, 1, 1, 1), PixelScale(1.))
interpolant = 'delta' # Use delta so it doesn't contribute to stepk
return InterpolatedImage(
image, pad_factor=1.0, x_interpolant=interpolant,
_force_stepk=stepk, _force_maxk=maxk)
@property
def _ii(self):
if self._finalized:
return self._real_ii
else:
return self._dummy_ii
@property
def kcrit(self):
"""The critical Fourier scale being used for this object.
"""
return self._kcrit
@property
def fft_sign(self):
"""The sign (+/-) to use in the exponent of the Fourier kernel when evaluating the Fourier
optics PSF.
"""
return self._fft_sign
@lazy_property
def screen_kmax(self):
"""The maximum k value to use in the screen. Typically `kcrit`/r0.
"""
r0_500 = self._screen_list.r0_500_effective
if r0_500 is None:
return np.inf
else:
r0 = r0_500 * (self.lam/500)**(6./5)
return self.kcrit / r0
@lazy_property
def second_kick(self):
"""Make a SecondKick object based on contents of screen_list and aper.
"""
from .airy import Airy
from .second_kick import SecondKick
if self._second_kick is None:
r0_500 = self._screen_list.r0_500_effective
if r0_500 is None: # No AtmosphericScreens in list
return Airy(lam=self.lam, diam=self.aper.diam,
obscuration=self.aper.obscuration, gsparams=self._gsparams)
else:
r0 = r0_500 * (self.lam/500.)**(6./5)
return SecondKick(
self.lam, r0, self.aper.diam, self.aper.obscuration,
kcrit=self.kcrit, scale_unit=self.scale_unit,
gsparams=self._gsparams)
else:
return self._second_kick
@property
def flux(self):
"""The flux of the profile.
"""
return self._flux
@property
def screen_list(self):
"""The `PhaseScreenList` being used for this object.
"""
return self._screen_list
@doc_inherit
def withGSParams(self, gsparams=None, **kwargs):
if gsparams == self.gsparams: return self
gsparams = GSParams.check(gsparams, self.gsparams, **kwargs)
aper = self.aper.withGSParams(gsparams)
ret = self.__class__.__new__(self.__class__)
ret.__dict__.update(self.__dict__)
# Make sure we generate fresh versions of any attrs that depend on gsparams
for attr in ['second_kick', '_real_ii', '_dummy_ii']:
ret.__dict__.pop(attr, None)
ret._gsparams = gsparams
ret.aper = aper
# Make sure we mark that we need to recalculate any previously finalized InterpolatedImage
ret._finalized = False
ret._screen_list._delayCalculation(ret)
ret._img = None
return ret
def __str__(self):
return ("galsim.PhaseScreenPSF(%s, lam=%s, exptime=%s)" %
(self._screen_list, self.lam, self.exptime))
def __repr__(self):
outstr = ("galsim.PhaseScreenPSF(%r, lam=%r, exptime=%r, flux=%r, aper=%r, theta=%r, "
"interpolant=%r, scale_unit=%r, fft_sign=%r, gsparams=%r)")
return outstr % (self._screen_list, self.lam, self.exptime, self.flux, self.aper,
self.theta, self.interpolant, self.scale_unit,
self._fft_sign, self.gsparams)
def __eq__(self, other):
# Even if two PSFs were generated with different sets of parameters, they will act
# identically if their img, interpolant, stepk, maxk, pad_factor, fft_sign and gsparams
# match.
return (self is other or
(isinstance(other, PhaseScreenPSF) and
self._screen_list == other._screen_list and
self.lam == other.lam and
self.aper == other.aper and
self.t0 == other.t0 and
self.exptime == other.exptime and
self.time_step == other.time_step and
self._flux == other._flux and
self.interpolant == other.interpolant and
self._force_stepk == other._force_stepk and
self._force_maxk == other._force_maxk and
self._ii_pad_factor == other._ii_pad_factor and
self._fft_sign == other._fft_sign and
self.gsparams == other.gsparams))
def __hash__(self):
return hash(("galsim.PhaseScreenPSF", tuple(self._screen_list), self.lam, self.aper,
self.t0, self.exptime, self.time_step, self._flux, self.interpolant,
self._force_stepk, self._force_maxk, self._ii_pad_factor, self._fft_sign,
self.gsparams))
def _prepareDraw(self):
# Trigger delayed computation of all pending PSFs.
self._screen_list._prepareDraw()
def _step(self):
"""Compute the current instantaneous PSF and add it to the developing integrated PSF."""
from . import fft
u = self.aper.u_illuminated
v = self.aper.v_illuminated
# This is where I need to make sure the screens are instantiated for FFT.
self._screen_list.instantiate(check='FFT')
wf = self._screen_list._wavefront(u, v, None, self.theta)
expwf = np.exp((2j*np.pi/self.lam) * wf)
expwf_grid = np.zeros_like(self.aper.illuminated, dtype=np.complex128)
expwf_grid[self.aper.illuminated] = expwf
# Note fft is '-' and ifft is '+' below
if self._fft_sign == '+':
ftexpwf = fft.ifft2(expwf_grid, shift_in=True, shift_out=True)
else:
ftexpwf = fft.fft2(expwf_grid, shift_in=True, shift_out=True)
if self._img is None:
self._img = np.zeros(self.aper.illuminated.shape, dtype=np.float64)
self._img += np.abs(ftexpwf)**2
self._bar.update()
def _finalize(self):
"""Take accumulated integrated PSF image and turn it into a proper GSObject."""
self._img *= self._flux / self._img.sum(dtype=float)
b = _BoundsI(1,self.aper.npix,1,self.aper.npix)
self._img = _Image(self._img, b, PixelScale(self.scale))
self._finalized = True
def __getstate__(self):
d = self.__dict__.copy()
# The SBProfile is picklable, but it is pretty inefficient, due to the large images being
# written as a string. Better to pickle the image and remake the InterpolatedImage.
d.pop('_dummy_ii',None)
d.pop('_real_ii',None)
d.pop('second_kick',None)
return d
def __setstate__(self, d):
self.__dict__ = d
if not self._finalized:
self._screen_list._delayCalculation(self)
@property
def _maxk(self):
return self._ii.maxk
@property
def _stepk(self):
return self._ii.stepk
@property
def _centroid(self):
self._prepareDraw()
return self._ii.centroid
@property
def _positive_flux(self):
if self._geometric_shooting:
return self._flux
else:
return self._ii.positive_flux
@property
def _negative_flux(self):
if self._geometric_shooting:
return 0.
else:
return self._ii.negative_flux
@property
def _flux_per_photon(self):
if self._geometric_shooting:
return 1.
else:
return self._calculate_flux_per_photon()
@property
def _max_sb(self):
return self._ii.max_sb
def _xValue(self, pos):
self._prepareDraw()
return self._ii._xValue(pos)
def _kValue(self, kpos):
self._prepareDraw()
return self._ii._kValue(kpos)
def _drawReal(self, image, jac=None, offset=(0.,0.), flux_scaling=1.):
self._ii._drawReal(image, jac, offset, flux_scaling)
def _shoot(self, photons, rng):
from .photon_array import PhotonArray
if not self._geometric_shooting:
self._prepareDraw()
return self._ii._shoot(photons, rng)
if not photons.hasAllocatedPupil():
self.aper.samplePupil(photons, rng)
if not photons.hasAllocatedTimes():
TimeSampler(self.t0, self.exptime).applyTo(photons, rng=rng)
u = photons.pupil_u
v = photons.pupil_v
t = photons.time
n_photons = len(photons)
# This is where the screens need to be instantiated for drawing with geometric photon
# shooting.
self._screen_list.instantiate(kmax=self.screen_kmax, check='phot')
nm_to_arcsec = 1.e-9 * radians / arcsec
if self._fft_sign == '+':
nm_to_arcsec *= -1
photons.x, photons.y = self._screen_list._wavefront_gradient(u, v, t, self.theta)
photons.x *= nm_to_arcsec
photons.y *= nm_to_arcsec
photons.flux = self._flux / n_photons
if self.second_kick:
p2 = PhotonArray(len(photons))
self.second_kick._shoot(p2, rng)
photons.convolve(p2, rng)
def _drawKImage(self, image, jac=None):
self._ii._drawKImage(image, jac)
@property
def img(self):
from .deprecated import depr
depr('img', 2.1, '', "This functionality has been removed.")
return self._img
@property
def finalized(self):
from .deprecated import depr
depr('finalized', 2.1, "This functionality has been removed.")
return self._finalized
@doc_inherit
def withFlux(self, flux):
if self._finalized:
# Then it's probably not faster to rebuild with a different flux.
return self.withScaledFlux(flux / self.flux)
else:
return PhaseScreenPSF(self._screen_list, lam=self.lam, exptime=self.exptime, flux=flux,
aper=self.aper, theta=self.theta, interpolant=self.interpolant,
scale_unit=self.scale_unit, gsparams=self.gsparams)
class OpticalPSF(GSObject):
"""A class describing aberrated PSFs due to telescope optics. Its underlying implementation
uses an InterpolatedImage to characterize the profile.
The diffraction effects are characterized by the diffraction angle, which is a function of the
ratio lambda / D, where lambda is the wavelength of the light and D is the diameter of the
telescope. The natural unit for this value is radians, which is not normally a convenient
unit to use for other `GSObject` dimensions. Assuming that the other sky coordinates you are
using are all in arcsec (e.g. the pixel scale when you draw the image, the size of the galaxy,
etc.), then you should convert this to arcsec as well::
>>> lam = 700 # nm
>>> diam = 4.0 # meters
>>> lam_over_diam = (lam * 1.e-9) / diam # radians
>>> lam_over_diam *= 206265 # Convert to arcsec
>>> psf = galsim.OpticalPSF(lam_over_diam, ...)
To make this process a bit simpler, we recommend instead providing the wavelength and diameter
separately using the parameters ``lam`` (in nm) and ``diam`` (in m). GalSim will then convert
this to any of the normal kinds of angular units using the ``scale_unit`` parameter::
>>> psf = galsim.OpticalPSF(lam=lam, diam=diam, scale_unit=galsim.arcsec, ...)
When drawing images, the scale_unit should match the unit used for the pixel scale or the WCS.
e.g. in this case, a pixel scale of 0.2 arcsec/pixel would be specified as ``pixel_scale=0.2``.
Input aberration coefficients are assumed to be supplied in units of wavelength, and correspond
to the Zernike polynomials in the Noll convention defined in
Noll, J. Opt. Soc. Am. 66, 207-211(1976). For a brief summary of the polynomials, refer to
http://en.wikipedia.org/wiki/Zernike_polynomials#Zernike_polynomials. By default, the
aberration coefficients indicate the amplitudes of _circular_ Zernike polynomials, which are
orthogonal over a circle. If you would like the aberration coefficients to instead be
interpretted as the amplitudes of _annular_ Zernike polynomials, which are orthogonal over an
annulus (see Mahajan, J. Opt. Soc. Am. 71, 1 (1981)), set the ``annular_zernike`` keyword
argument to True.
There are two ways to specify the geometry of the pupil plane, i.e., the obscuration disk size
and the areas that will be illuminated outside of it. The first way is to use keywords that
specify the size of the obscuration, and the nature of the support struts holding up the
secondary mirror (or prime focus cage, etc.). These are taken to be rectangular obscurations
extending from the outer edge of the pupil to the outer edge of the obscuration disk (or the
pupil center if ``obscuration = 0.``). You can specify how many struts there are (evenly spaced
in angle), how thick they are as a fraction of the pupil diameter, and what angle they start at
relative to the positive y direction.
The second way to specify the pupil plane configuration is by passing in an image of it. This
can be useful for example if the struts are not evenly spaced or are not radially directed, as
is assumed by the simple model for struts described above. In this case, keywords related to
struts are ignored; moreover, the ``obscuration`` keyword is used to ensure that the images are
properly sampled (so it is still needed), but the keyword is then ignored when using the
supplied image of the pupil plane. Note that for complicated pupil configurations, it may be
desireable to increase ``pad_factor`` for more fidelity at the expense of slower running time.
The ``pupil_plane_im`` that is passed in can be rotated during internal calculations by
specifying a ``pupil_angle`` keyword.
If you choose to pass in a pupil plane image, it must be a square array in which the image of
the pupil is centered. The areas that are illuminated should have some value >0, and the other
areas should have a value of precisely zero. Based on what the OpticalPSF class thinks is the
required sampling to make the PSF image, the image that is passed in of the pupil plane might be
zero-padded during internal calculations. The pixel scale of the pupil plane can be specified
in one of three ways. In descending order of priority, these are:
1. The ``pupil_plane_scale`` keyword argument (units are meters).
2. The ``pupil_plane_im.scale`` attribute (units are meters).
3. If (1) and (2) are both None, then the scale will be inferred by assuming that the
illuminated pixel farthest from the image center is at a physical distance of self.diam/2.
Note that if the scale is specified by either (1) or (2) above (which always includes specifying
the pupil_plane_im as a filename, since the default scale then will be 1.0), then the
lam_over_diam keyword must not be used, but rather the lam and diam keywords are required
separately. Finally, to ensure accuracy of calculations using a pupil plane image, we recommend
sampling it as finely as possible.
As described above, either specify the lam/diam ratio directly in arbitrary units::
>>> optical_psf = galsim.OpticalPSF(lam_over_diam=lam_over_diam, defocus=0., ...)
or, use separate keywords for the telescope diameter and wavelength in meters and nanometers,
respectively::
>>> optical_psf = galsim.OpticalPSF(lam=lam, diam=diam, defocus=0., ...)
Either of these options initializes ``optical_psf`` as an OpticalPSF instance.
Parameters:
lam_over_diam: Lambda / telescope diameter in the physical units adopted for ``scale``
(user responsible for consistency). Either ``lam_over_diam``, or
``lam`` and ``diam``, must be supplied.
lam: Lambda (wavelength) in units of nanometers. Must be supplied with
``diam``, and in this case, image scales (``scale``) should be
specified in units of ``scale_unit``.
diam : Telescope diameter in units of meters. Must be supplied with
``lam``, and in this case, image scales (``scale``) should be
specified in units of ``scale_unit``.
tip: Tip in units of incident light wavelength. [default: 0]
tilt: Tilt in units of incident light wavelength. [default: 0]
defocus: Defocus in units of incident light wavelength. [default: 0]
astig1: Astigmatism (like e2) in units of incident light wavelength.
[default: 0]
astig2: Astigmatism (like e1) in units of incident light wavelength.
[default: 0]
coma1: Coma along y in units of incident light wavelength. [default: 0]
coma2: Coma along x in units of incident light wavelength. [default: 0]
trefoil1: Trefoil (one of the arrows along y) in units of incident light
wavelength. [default: 0]
trefoil2: Trefoil (one of the arrows along x) in units of incident light
wavelength. [default: 0]
spher: Spherical aberration in units of incident light wavelength.
[default: 0]
aberrations: Optional keyword, to pass in a list, tuple, or NumPy array of
aberrations in units of reference wavelength (ordered according to
the Noll convention), rather than passing in individual values for each
individual aberration. Note that aberrations[1] is piston (and not
aberrations[0], which is unused.) This list can be arbitrarily long to
handle Zernike polynomial aberrations of arbitrary order.
annular_zernike: Boolean indicating that aberrations specify the amplitudes of annular
Zernike polynomials instead of circular Zernike polynomials.
[default: False]
aper: `Aperture` object to use when creating PSF. [default: None]
circular_pupil: Adopt a circular pupil? [default: True]
obscuration: Linear dimension of central obscuration as fraction of pupil linear
dimension, [0., 1.). This should be specified even if you are providing
a ``pupil_plane_im``, since we need an initial value of obscuration to
use to figure out the necessary image sampling. [default: 0]
interpolant: Either an Interpolant instance or a string indicating which interpolant
should be used. Options are 'nearest', 'sinc', 'linear', 'cubic',
'quintic', or 'lanczosN' where N should be the integer order to use.
[default: galsim.Quintic()]
oversampling: Optional oversampling factor for the InterpolatedImage. Setting
``oversampling < 1`` will produce aliasing in the PSF (not good).
Usually ``oversampling`` should be somewhat larger than 1. 1.5 is
usually a safe choice. [default: 1.5]
pad_factor: Additional multiple by which to zero-pad the PSF image to avoid folding
compared to what would be employed for a simple `Airy`. Note that
``pad_factor`` may need to be increased for stronger aberrations, i.e.
those larger than order unity. [default: 1.5]
ii_pad_factor: Zero-padding factor by which to extend the image of the PSF when
creating the ``InterpolatedImage``. See the ``InterpolatedImage``
docstring for more details. [default: 1.5]
suppress_warning: If ``pad_factor`` is too small, the code will emit a warning telling you
its best guess about how high you might want to raise it. However,
you can suppress this warning by using ``suppress_warning=True``.
[default: False]
geometric_shooting: If True, then when drawing using photon shooting, use geometric
optics approximation where the photon angles are derived from the
phase screen gradient. If False, then first draw using Fourier
optics and then shoot from the derived InterpolatedImage.
[default: False]
flux: Total flux of the profile. [default: 1.]
nstruts: Number of radial support struts to add to the central obscuration.
[default: 0]
strut_thick: Thickness of support struts as a fraction of pupil diameter.
[default: 0.05]
strut_angle: `Angle` made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be an
`Angle` instance. [default: 0. * galsim.degrees]
pupil_plane_im: The GalSim.Image, NumPy array, or name of file containing the pupil
plane image, to be used instead of generating one based on the
obscuration and strut parameters. [default: None]
pupil_angle: If ``pupil_plane_im`` is not None, rotation angle for the pupil plane
(positive in the counter-clockwise direction). Must be an `Angle`
instance. [default: 0. * galsim.degrees]
pupil_plane_scale: Sampling interval in meters to use for the pupil plane array. In
most cases, it's a good idea to leave this as None, in which case
GalSim will attempt to find a good value automatically. The
exception is when specifying the pupil arrangement via an image, in
which case this keyword can be used to indicate the sampling of that
image. See also ``pad_factor`` for adjusting the pupil sampling scale.
[default: None]
pupil_plane_size: Size in meters to use for the pupil plane array. In most cases, it's
a good idea to leave this as None, in which case GalSim will attempt
to find a good value automatically. See also ``oversampling`` for
adjusting the pupil size. [default: None]
scale_unit: Units to use for the sky coordinates when calculating lam/diam if these
are supplied separately. Should be either a `galsim.AngleUnit` or a
string that can be used to construct one (e.g., 'arcsec', 'radians',
etc.). [default: galsim.arcsec]
fft_sign: The sign (+/-) to use in the exponent of the Fourier kernel when
evaluating the Fourier optics PSF. As of version 2.3, GalSim uses a
plus sign by default, which we believe to be consistent with, for
example, how Zemax computes a Fourier optics PSF on DECam. Before
version 2.3, the default was a negative sign. Input should be either
the string '+' or the string '-'. [default: '+']
gsparams: An optional `GSParams` argument. [default: None]
"""
_opt_params = {
"diam": float,
"defocus": float,
"astig1": float,
"astig2": float,
"coma1": float,
"coma2": float,
"trefoil1": float,
"trefoil2": float,
"spher": float,
"annular_zernike": bool,
"circular_pupil": bool,
"obscuration": float,
"oversampling": float,
"pad_factor": float,
"suppress_warning": bool,
"interpolant": str,
"flux": float,
"nstruts": int,
"strut_thick": float,
"strut_angle": Angle,
"pupil_plane_im": str,
"pupil_angle": Angle,
"pupil_plane_scale": float,
"pupil_plane_size": float,
"scale_unit": str,
"fft_sign": str}
_single_params = [{"lam_over_diam": float, "lam": float}]
_has_hard_edges = False
_is_axisymmetric = False
_is_analytic_x = True
_is_analytic_k = True
_default_iipf = 1.5 # The default ii_pad_factor, since we need to check it for the repr
def __init__(self, lam_over_diam=None, lam=None, diam=None, tip=0., tilt=0., defocus=0.,
astig1=0., astig2=0., coma1=0., coma2=0., trefoil1=0., trefoil2=0., spher=0.,
aberrations=None, annular_zernike=False,
aper=None, circular_pupil=True, obscuration=0., interpolant=None,
oversampling=1.5, pad_factor=1.5, ii_pad_factor=None, flux=1.,
nstruts=0, strut_thick=0.05, strut_angle=0.*radians,
pupil_plane_im=None, pupil_plane_scale=None, pupil_plane_size=None,
pupil_angle=0.*radians, scale_unit=arcsec, fft_sign='+', gsparams=None,
_force_stepk=0., _force_maxk=0.,
suppress_warning=False, geometric_shooting=False):
from .phase_screens import OpticalScreen
if fft_sign not in ['+', '-']:
raise GalSimValueError("Invalid fft_sign", fft_sign, allowed_values=['+','-'])
if isinstance(scale_unit, str):
scale_unit = AngleUnit.from_name(scale_unit)
# Need to handle lam/diam vs. lam_over_diam here since lam by itself is needed for
# OpticalScreen.
if lam_over_diam is not None:
if lam is not None or diam is not None:
raise GalSimIncompatibleValuesError(
"If specifying lam_over_diam, then do not specify lam or diam",
lam_over_diam=lam_over_diam, lam=lam, diam=diam)
# For combination of lam_over_diam and pupil_plane_im with a specified scale, it's
# tricky to determine the actual diameter of the pupil needed by Aperture. So for now,
# we just disallow this combination. Please feel free to raise an issue at
# https://github.com/GalSim-developers/GalSim/issues if you need this functionality.
if pupil_plane_im is not None:
if isinstance(pupil_plane_im, basestring):
# Filename, therefore specific scale exists.
raise GalSimIncompatibleValuesError(
"If specifying lam_over_diam, then do not specify pupil_plane_im as "
"as a filename.",
lam_over_diam=lam_over_diam, pupil_plane_im=pupil_plane_im)
elif isinstance(pupil_plane_im, Image) and pupil_plane_im.scale is not None:
raise GalSimIncompatibleValuesError(
"If specifying lam_over_diam, then do not specify pupil_plane_im "
"with definite scale attribute.",
lam_over_diam=lam_over_diam, pupil_plane_im=pupil_plane_im)
elif pupil_plane_scale is not None:
raise GalSimIncompatibleValuesError(
"If specifying lam_over_diam, then do not specify pupil_plane_scale. ",
lam_over_diam=lam_over_diam, pupil_plane_scale=pupil_plane_scale)
lam = 500. # Arbitrary
diam = lam*1.e-9 / lam_over_diam * radians / scale_unit
else:
if lam is None or diam is None:
raise GalSimIncompatibleValuesError(
"If not specifying lam_over_diam, then specify lam AND diam",
lam_over_diam=lam_over_diam, lam=lam, diam=diam)
# Make the optical screen.
self._screen = OpticalScreen(
diam=diam, defocus=defocus, astig1=astig1, astig2=astig2, coma1=coma1, coma2=coma2,
trefoil1=trefoil1, trefoil2=trefoil2, spher=spher, aberrations=aberrations,
obscuration=obscuration, annular_zernike=annular_zernike, lam_0=lam)
# Make the aperture.
if aper is None:
aper = Aperture(
diam, lam=lam, circular_pupil=circular_pupil, obscuration=obscuration,
nstruts=nstruts, strut_thick=strut_thick, strut_angle=strut_angle,
oversampling=oversampling, pad_factor=pad_factor,
pupil_plane_im=pupil_plane_im, pupil_angle=pupil_angle,
pupil_plane_scale=pupil_plane_scale, pupil_plane_size=pupil_plane_size,
gsparams=gsparams)
self.obscuration = obscuration
else:
self.obscuration = aper.obscuration
# Save for pickling
self._lam = float(lam)
self._flux = float(flux)
self._interpolant = interpolant
self._scale_unit = scale_unit
self._gsparams = GSParams.check(gsparams)
self._suppress_warning = suppress_warning
self._geometric_shooting = geometric_shooting
self._aper = aper
self._force_stepk = _force_stepk
self._force_maxk = _force_maxk
self._ii_pad_factor = ii_pad_factor if ii_pad_factor is not None else self._default_iipf
self._fft_sign = fft_sign
@lazy_property
def _psf(self):
psf = PhaseScreenPSF(PhaseScreenList(self._screen), lam=self._lam, flux=self._flux,
aper=self._aper, interpolant=self._interpolant,
scale_unit=self._scale_unit, fft_sign=self._fft_sign,
gsparams=self._gsparams,
suppress_warning=self._suppress_warning,
geometric_shooting=self._geometric_shooting,
_force_stepk=self._force_stepk, _force_maxk=self._force_maxk,
ii_pad_factor=self._ii_pad_factor)
psf._prepareDraw() # No need to delay an OpticalPSF.
return psf
def __str__(self):
screen = self._screen
s = "galsim.OpticalPSF(lam=%s, diam=%s" % (screen.lam_0, self._aper.diam)
if any(screen.aberrations):
s += ", aberrations=[" + ",".join(str(ab) for ab in screen.aberrations) + "]"
if self._aper._pupil_plane_im is None:
s += self._aper._geometry_str()
if screen.annular_zernike:
s += ", annular_zernike=True"
s += ", obscuration=%r"%self.obscuration
if self._flux != 1.0:
s += ", flux=%s" % self._flux
s += ")"
return s
def __repr__(self):
screen = self._screen
s = "galsim.OpticalPSF(lam=%r, diam=%r" % (self._lam, self._aper.diam)
s += ", aper=%r"%self._aper
if any(screen.aberrations):
s += ", aberrations=[" + ",".join(repr(ab) for ab in screen.aberrations) + "]"
if screen.annular_zernike:
s += ", annular_zernike=True"
s += ", obscuration=%r"%self.obscuration
if self._interpolant != None:
s += ", interpolant=%r"%self._interpolant
if self._scale_unit != arcsec:
s += ", scale_unit=%r"%self._scale_unit
if self._fft_sign != '+':
s += ", fft_sign='-'"
if self._gsparams != GSParams():
s += ", gsparams=%r"%self._gsparams
if self._flux != 1.0:
s += ", flux=%r" % self._flux
if self._force_stepk != 0.:
s += ", _force_stepk=%r" % self._force_stepk
if self._force_maxk != 0.:
s += ", _force_maxk=%r" % self._force_maxk
if self._ii_pad_factor != OpticalPSF._default_iipf:
s += ", ii_pad_factor=%r" % self._ii_pad_factor
s += ")"
return s
def __eq__(self, other):
return (self is other or
(isinstance(other, OpticalPSF) and
self._lam == other._lam and
self._aper == other._aper and
self._screen == other._screen and
self._flux == other._flux and
self._interpolant == other._interpolant and
self._scale_unit == other._scale_unit and
self._force_stepk == other._force_stepk and
self._force_maxk == other._force_maxk and
self._ii_pad_factor == other._ii_pad_factor and
self._fft_sign == other._fft_sign and
self._gsparams == other._gsparams))
def __hash__(self):
return hash(("galsim.OpticalPSF", self._lam, self._aper, self._screen,
self._flux, self._interpolant, self._scale_unit, self._force_stepk,
self._force_maxk, self._ii_pad_factor, self._fft_sign, self._gsparams))
def __getstate__(self):
# The SBProfile is picklable, but it is pretty inefficient, due to the large images being
# written as a string. Better to pickle the psf and remake the PhaseScreenPSF.
d = self.__dict__.copy()
d.pop('_psf', None)
return d
def __setstate__(self, d):
self.__dict__ = d
@property
def _maxk(self):
return self._psf.maxk
@property
def _stepk(self):
return self._psf.stepk
@property
def _centroid(self):
return self._psf.centroid
@property
def _positive_flux(self):
return self._psf.positive_flux
@property
def _negative_flux(self):
return self._psf.negative_flux
@property
def _flux_per_photon(self):
return self._psf._flux_per_photon
@property
def _max_sb(self):
return self._psf.max_sb
@property
def fft_sign(self):
return self._fft_sign
def _xValue(self, pos):
return self._psf._xValue(pos)
def _kValue(self, kpos):
return self._psf._kValue(kpos)
def _drawReal(self, image, jac=None, offset=(0.,0.), flux_scaling=1.):
self._psf._drawReal(image, jac, offset, flux_scaling)
def _shoot(self, photons, rng):
self._psf._shoot(photons, rng)
def _drawKImage(self, image, jac=None):
self._psf._drawKImage(image, jac)
@doc_inherit
def withFlux(self, flux):
screen = self._screen
return OpticalPSF(
lam=self._lam, diam=self._aper.diam, aper=self._aper,
aberrations=screen.aberrations, annular_zernike=screen.annular_zernike,
flux=flux, _force_stepk=self._force_stepk, _force_maxk=self._force_maxk,
ii_pad_factor=self._ii_pad_factor, fft_sign=self._fft_sign,
gsparams=self._gsparams) | PypiClean |
/EnergyFlow-1.3.2.tar.gz/EnergyFlow-1.3.2/energyflow/efm.py | r"""# Energy Flow Moments
Energy Flow Moments (EFMs) are tensors that can be computed in
$\mathcal O(M)$ where $M$ is the number of particles. They are useful for many
things, including providing a fast way of computing the $\beta=2$ EFPs, which
are the scalar contractions of products of EFMs.
The expression for a (normalized) hadronic EFM in terms of transverse momenta
$\{p_{Ti}\}$ and particle momenta $\{p_i^\mu\}$ is:
\[\mathcal I^{\mu_1\cdots\mu_v} = 2^{v/2}\sum_{i=1}^Mz_in_i^{\mu_1}\cdots n_i^{\mu_v},\]
where
\[z_i=\frac{p_{Ti}}{\sum_jp_{Tj}},\quad\quad n_i^\mu=\frac{p_i^\mu}{p_{Ti}}.\]
Note that for an EFM in an $e^+e^-$ context, transverse momenta are replaced
with energies.
Support for using EFMs to compute $\beta=2$ EFPs is built in to the `EFP` and
`EFPSet` classes using the classes and functions in this module. The `EFM` and
`EFMSet` classes can also be used on their own, as can the `efp2efms` function.
"""
# ______ ______ __ __
# | ____| ____| \/ |
# | |__ | |__ | \ / |
# | __| | __| | |\/| |
# | |____| | | | | |
# |______|_| |_| |_|
# EnergyFlow - Python package for high-energy particle physics.
# Copyright (C) 2017-2021 Patrick T. Komiske III and Eric Metodiev
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from operator import itemgetter
import sys
import numpy as np
from numpy.core.multiarray import c_einsum
from energyflow.algorithms import einsum
from energyflow.base import EFMBase
from energyflow.utils import flat_metric, timing
from energyflow.utils.graph_utils import *
__all__ = ['EFM', 'EFMSet', 'efp2efms']
###############################################################################
# EFM functions
###############################################################################
# allowed einsum symbols
I = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
def efp2efms(graph):
"""Translates an EFP formula, specified by its graph, to an expression
involving EFMs. The input is a graph as a list of edges and the output is a
tuple where the first argument is a string to be used with einsum and the
second is a list of EFM signatures (the number of raised indices followed
by the number of lowered indices).
**Arguments**
- **graph** : _list_ of _tuple_
- The EFP graph given as a list of edges.
**Returns**
- (_str_, _list_ of _tuple_)
- The einstring to be used with einsum for performing the contraction
of EFMs followed by a list of the EFM specs. If `r` is the result of
this function, and `efms` is a dictionary containing EFM tensors
indexed by their signatures, then the value of the EFP is given as
`np.einsum(r[0], *[efms[sig] for sig in r[1]])`.
"""
# handle empty graph
if len(graph) == 0:
return '', [(0,0)]
# build convenient data structure to hold graph information
vds = get_valency_structure(graph)
# dictionary to hold efm terms
efms = {}
# counter to store how to get fresh dummy indices
ind = 0
# iterate over vertices sorted by valency in decreasing order
sorted_verts = sorted(valencies(graph).items(), key=itemgetter(1), reverse=True)
for vert,valency in sorted_verts:
# dict holding info for new efm term
new_efm = {'upper_indices': '', 'lower_indices': ''}
# iterate over neighboring vertices
for neighbor,n_shared_edges in vds[vert].items():
# if a new neighbor, assign fresh I
if neighbor not in efms:
new_I = I[ind:ind+n_shared_edges]
ind += n_shared_edges
new_efm['upper_indices'] += new_I
# store I shared with that neighbor
new_efm[neighbor] = new_I
# if neighbor already has an efm factor, add already assigned indices to lower_indices
else:
new_efm['lower_indices'] += efms[neighbor][vert]
# store new efm factor
efms[vert] = new_efm
einstr_list, efm_specs = [], []
for vert,valency in sorted_verts:
efm = efms[vert]
# conventionally put uppered indices before lower indices
einstr_list.append(efm['upper_indices'] + efm['lower_indices'])
# add spec which is (nup, nlow) of efm
efm_specs.append((len(efm['upper_indices']), len(efm['lower_indices'])))
# return comma joined einstr and efm_specs
return ','.join(einstr_list), efm_specs
###############################################################################
# EFM
###############################################################################
class EFM(EFMBase):
"""A class representing and computing a single EFM."""
# EFM(nup, nlow=0, measure='hadrefm', beta=2, kappa=1, normed=None,
# coords=None, check_input=True)
def __init__(self, nup, nlow=0, rl_from=None, subslice_from=None, **kwargs):
r"""Since EFMs are fully symmetric tensors, they can be specified by
just two integers: the number of raised and number of lowered indices
that they carry. Thus we use a tuple of two ints as an EFM "spec" or
signature throughout EnergyFlow. By convention the raised indices come
before the lowered indices.
Since a standalone `EFM` defines and holds a `Measure` instance, all
`Measure` keywords are accepted. Note that `beta` is ignored as EFMs
require $\beta=2$.
**Arguments**
- **nup** : _int_
- The number of uppered indices of the EFM.
- **nlow** : _int_
- The number of lowered indices of the EFM.
- **measure** : {`'hadrefm'`, `'eeefm'`}
- The choice of measure. See [Measures](../measures) for additional
info. Note that EFMs can only use the `'hadrefm'` and `'eeefm'`
measures.
- **beta** : _float_
- The parameter $\beta$ appearing in the measure. Must be greater
than zero.
- **kappa** : {_float_, `'pf'`}
- If a number, the energy weighting parameter $\kappa$. If `'pf'`,
use $\kappa=v-1$ where $v$ is the valency of the vertex.
- **normed** : _bool_
- Controls normalization of the energies in the measure.
- **coords** : {`'ptyphim'`, `'epxpypz'`, `None`}
- Controls which coordinates are assumed for the input. See
[Measures](../measures) for additional info.
- **check_input** : _bool_
- Whether to check the type of the input each time or assume the
first input type.
"""
# initialize base class
super(EFM, self).__init__(kwargs)
# store inputs
self._nup, self._nlow = nup, nlow
self._rl_from = rl_from
self._subslice_from = subslice_from
# useful derived quantities
self._v = self.nup + self.nlow
self._spec = (self.nup, self.nlow)
# construct by raising/lowering
if self._rl_from is not None:
# ensure valid raising/lowering
if self.v != sum(self._rl_from):
raise ValueError('cannot raise/lower among different valency EFMs')
# determine einstr
diff = self.nup - self._rl_from[0]
self._rl_diff = abs(diff)
i_start, i_end = ((self._rl_from[0], self.nup) if diff > 0 else
(self.nup, self._rl_from[0]))
self.rl_einstr = ','.join([I[:self.v]] + list(I[i_start:i_end])) + '->' + I[:self.v]
self._construct = self._rl_construct
# construct by subslicing
elif self._subslice_from is not None:
# get number of subslices
num_up_subslices = self._subslice_from[0] - self.nup
num_low_subslices = self._subslice_from[1] - self.nlow
# perform check
if num_up_subslices < 0 or num_low_subslices < 0:
m = 'invalid subslicing from {} to {}'.format(self.subslicing_from, self.spec)
raise ValueError(m)
# note that python 2 doesn't support pickling ellipsis
if sys.version_info[0] > 2:
self.subslice = tuple([0]*num_up_subslices + [Ellipsis] + [0]*num_low_subslices)
else:
self.subslice = tuple([0]*num_up_subslices + self.v*[slice(None)] +
[0]*num_low_subslices)
self._pow2 = 2**(-(num_up_subslices + num_low_subslices)/2)
self._construct = self._subslice_construct
# construct directly
else:
self.raw_einstr = (','.join([I[0]] + [I[0] + I[i+1] for i in range(self.v)]) +
'->' + I[1:self.v+1])
self.raw_einpath = ['einsum_path'] + [(0,1)]*self.v
self._rl_diff = self.nlow
self.rl_einstr = ','.join([I[:self.v]] + list(I[self.nup:self.v])) + '->' + I[:self.v]
self._pow2 = 2**(self.v/2)
self._construct = self._raw_construct
#================
# PRIVATE METHODS
#================
def _rl_construct(self, tensor):
# fine to use pure c_einsum here as it's used anyway
return c_einsum(self.rl_einstr, tensor, *[flat_metric(len(tensor))]*self._rl_diff)
def _subslice_construct(self, tensor):
return self._pow2 * tensor[self.subslice]
def _raw_construct(self, zsnhats):
zs, nhats = zsnhats
M, dim = nhats.shape
# if no lowering is needed
if self.nlow == 0:
return self._pow2 * einsum(self.raw_einstr, zs, *[nhats]*self.v,
optimize=self.raw_einpath)
# lowering nhats first is better
elif M*dim < dim**self.v:
low_nhats = nhats * (flat_metric(dim)[np.newaxis])
einsum_args = [nhats]*self.nup + [low_nhats]*self.nlow
return self._pow2 * einsum(self.raw_einstr, zs, *einsum_args,
optimize=self.raw_einpath)
# lowering EFM is better
else:
tensor = einsum(self.raw_einstr, zs, *[nhats]*self.v, optimize=self.raw_einpath)
return self._pow2 * self._rl_construct(tensor)
#===============
# PUBLIC METHODS
#===============
def compute(self, event=None, zs=None, nhats=None):
"""Evaluates the EFM on a single event. Note that `EFM` also is
callable, in which case this method is invoked.
**Arguments**
- **event** : 2-d array_like or `fastjet.PseudoJet`
- The event as an array of particles in the coordinates specified
by `coords`.
- **zs** : 1-d array_like
- If present, `nhats` must also be present, and `zs` is used in place
of the energies of an event.
- **nhats** : 2-d array like
- If present, `zs` must also be present, and `nhats` is used in place
of the scaled particle momenta.
**Returns**
- _numpy.ndarray_ of rank `v`
- The values of the EFM tensor on the event. The raised indices
are the first `nup` and the lowered indices are the last `nlow`.
"""
return self._raw_construct(super(EFM, self).compute(event, zs, nhats))
def batch_compute(self, events, n_jobs=None):
"""Evaluates the EFM on several events.
**Arguments**
- **events** : array_like or `fastjet.PseudoJet`
- The events as an array of arrays of particles in coordinates
matching those anticipated by `coords`.
- **n_jobs** : _int_ or `None`
- The number of worker processes to use. A value of `None` will
use as many processes as there are CPUs on the machine.
**Returns**
- _numpy.ndarray_ of rank `v+1`
- Array of EFM tensor values on the events.
"""
return super(EFM, self).batch_compute(events, n_jobs)
def set_timer(self):
self.times = []
self._construct = timing(self, self._construct)
#===========
# PROPERTIES
#===========
@property
def nup(self):
"""The number of uppered indices on the EFM."""
return self._nup
@property
def nlow(self):
"""The number of lowered indices on the EFM."""
return self._nlow
@property
def spec(self):
"""The signature of the EFM as `(nup, nlow)`."""
return self._spec
@property
def v(self):
"""The valency, or total number of indices, of the EFM."""
return self._v
###############################################################################
# EFMSet
###############################################################################
class EFMSet(EFMBase):
"""A class for holding and efficiently constructing a collection of EFMs."""
# EFMSet(efm_specs=None, vmax=None, measure='hadrefm', beta=2, kappa=1,
# normed=None, coords=None, check_input=True)
def __init__(self, efm_specs=None, vmax=None, **kwargs):
r"""An `EFMSet` can be initialized two ways (in order of precedence):
1. **EFM Specs** - Pass in a list of EFM specs (`nup`, `nlow`).
2. **Max Valency** - Specify a maximum valency and each EFM with up to
that many indices will be constructed, with all indices raised.
Since a standalone `EFMSet` defines and holds a `Measure` instance,
all `Measure` keywords are accepted. Note that `beta` is ignored as
EFMs require $\beta=2$.
**Arguments**
- **efm_specs** : {_list_, _tuple_, _set_} of _tuple_ or `None`
- A collection of tuples of length two specifying which EFMs this
object is to hold. Each spec is of the form `(nup, nlow)` where these
are the number of upper and lower indices, respectively, that the EFM
is to have.
- **vmax** : _int_
- Only used if `efm_specs` is None. The maximum EFM valency to
include in the `EFMSet`. Note that all EFMs will have `nlow=0`.
- **measure** : {`'hadrefm'`, `'eeefm'`}
- The choice of measure. See [Measures](../measures) for additional
info. Note that EFMs can only use the `'hadrefm'` and `'eeefm'`
measures.
- **beta** : _float_
- The parameter $\beta$ appearing in the measure. Must be greater
than zero.
- **kappa** : {_float_, `'pf'`}
- If a number, the energy weighting parameter $\kappa$. If `'pf'`,
use $\kappa=v-1$ where $v$ is the valency of the vertex.
- **normed** : _bool_
- Controls normalization of the energies in the measure.
- **coords** : {`'ptyphim'`, `'epxpypz'`, `None`}
- Controls which coordinates are assumed for the input. See
[Measures](../measures) for additional info.
- **check_input** : _bool_
- Whether to check the type of the input each time or assume the
first input type.
"""
hidden_subslicing = kwargs.pop('subslicing', False)
# initialize base class
super(EFMSet, self).__init__(kwargs)
if efm_specs is None:
if vmax is not None:
vmin = 1 if self.normed else 0
efm_specs = [(v,0) for v in range(vmin, vmax+1)]
else:
raise ValueError('efm_specs and vmax cannot both be None.')
# get unique EFMs
self._unique_efms = frozenset(efm_specs)
# setup EFMs based on whether we can subslice or not
self.efms, self._args, self.rules = {}, {}, OrderedDict()
if self.subslicing or hidden_subslicing:
self._subslicing_setup()
else:
self._full_setup()
#================
# PRIVATE METHODS
#================
def _find_subslice(self, sig):
"""Determine if sig can be subsliced from the currently stored EFMs."""
nup, nlow = sig
bsigs = list(filter(lambda x: x[0] >= nup and x[1] >= nlow, self.efms))
return min(bsigs, key=sum) if len(bsigs) else None
def _find_minimum_rl(self, sig):
v = sum(sig)
vsigs = list(filter(lambda x: sum(x) == v, self.efms))
return min(vsigs, key=lambda x: abs(sig[0]-x[0]))
def _subslicing_setup(self):
"""Setup the rules for constructing the EFMs using the fact that
setting an index to zero "pops" it off, which is referred to as the
subclicing property. Typically, the EE measures have this property
whereas the hadronic ones do not.
"""
# ensure there is at least one EFM of each valency for rl purposes
maxsig = max(self._unique_efms, key=sum) if len(self._unique_efms) else (0,0)
self._unique_efms |= set((n,0) for n in range(1, sum(maxsig)+1))
# sort EFMs to minimize raising/lowering operations
# EFMs will be ordered first by decreasing v, then decreasing abs difference
# between nlow and nup, and then decreasing nup
self._sorted_efms = sorted(self._unique_efms, key=itemgetter(0), reverse=True)
self._sorted_efms.sort(key=lambda x: abs(x[0]-x[1]), reverse=True)
self._sorted_efms.sort(key=sum, reverse=True)
# take care of empty set
if not len(self._sorted_efms):
return
# the first one must be raw constructed
sig0 = self._sorted_efms[0]
self.efms[sig0] = EFM(*sig0, no_measure=True)
self._args[sig0] = 'r'
self.rules[sig0] = 'constructing raw'
for sig in self._sorted_efms[1:]:
# determine if we can subslice
big_spec = self._find_subslice(sig)
if big_spec is not None:
self.efms[sig] = EFM(*sig, subslice_from=big_spec, no_measure=True)
self._args[sig] = big_spec
self.rules[sig] = 'subslicing from {}'.format(big_spec)
# find best raise/lower available
else:
rlsig = self._find_minimum_rl(sig)
self.efms[sig] = EFM(*sig, rl_from=rlsig, no_measure=True)
self._args[sig] = rlsig
rl_n = abs(rlsig[0] - sig[0])
self.rules[sig] = 'raising/lowering from {}, {}'.format(rlsig, rl_n)
def _full_setup(self):
"""Setup the rules for constructing the EFMs without the assumption of any
special properties.
"""
# sort the EFMs first by increasing v and then by increasing nlow
self._sorted_efms = sorted(self._unique_efms, key=itemgetter(1))
self._sorted_efms.sort(key=sum)
vprev, sigprev = None, None
for sig in self._sorted_efms:
v = sum(sig)
# construct raw (all up) if this is a new valency
if v != vprev:
self.efms[sig] = EFM(*sig, no_measure=True)
self._args[sig] = 'r'
self.rules[sig] = 'constructing raw'
# construct from lowering if we have a previous EFM with this v
else:
self.efms[sig] = EFM(*sig, rl_from=sigprev, no_measure=True)
self._args[sig] = sigprev
self.rules[sig] = 'lowering from {}'.format(sigprev)
# update prevous values
vprev, sigprev = v, sig
#===============
# PUBLIC METHODS
#===============
def compute(self, event=None, zs=None, nhats=None):
"""Evaluates the EFMs held by this `EFMSet` according to the
predetermined strategy on a single event. Note that `EFMSet` also is
callable, in which case this method is invoked.
**Arguments**
- **event** : 2-d array_like or `fastjet.PseudoJet`
- The event as an array of particles in the coordinates specified
by `coords`.
- **zs** : 1-d array_like
- If present, `nhats` must also be present, and `zs` is used in place
of the energies of an event.
- **nhats** : 2-d array like
- If present, `zs` must also be present, and `nhats` is used in place
of the scaled particle momenta.
**Returns**
- _dict_ of _numpy.ndarray_ of rank `v`
- A dictionary of EFM tensors indexed by their signatures.
"""
zsnhats = super(EFMSet, self).compute(event, zs, nhats)
efm_dict = {}
for sig in self._sorted_efms:
arg = self._args[sig]
data_arg = zsnhats if arg == 'r' else efm_dict[arg]
efm_dict[sig] = self.efms[sig]._construct(data_arg)
return efm_dict
def batch_compute(self, events, n_jobs=None):
"""Evaluates the EFMs held by the `EFMSet` on several events.
**Arguments**
- **events** : array_like or `fastjet.PseudoJet`
- The events as an array of arrays of particles in coordinates
matching those anticipated by `coords`.
- **n_jobs** : _int_ or `None`
- The number of worker processes to use. A value of `None` will
use as many processes as there are CPUs on the machine.
**Returns**
- _numpy.ndarray_ of _dict_
- Object array of dictionaries of EFM tensors indexed by their
signatures.
"""
return super(EFMSet, self).batch_compute(events, n_jobs)
def set_timers(self):
for efm in self.efms.values():
efm.set_timer()
def get_times(self):
return {sig: np.asarray(efm.times) for sig,efm in self.efms.items()}
#===========
# PROPERTIES
#===========
@property
def efms(self):
"""A dictionary of the `EFM` objects held by this `EFMSet` where the
keys are the signatures of the EFM."""
return self._efms
@property
def rules(self):
"""An ordered dictionary of the construction method used for each `EFM`
where the order is the same as `sorted_efms`."""
return self._rules
@efms.setter
def efms(self, value):
self._efms = value
@rules.setter
def rules(self, value):
self._rules = value | PypiClean |
/MufSim-1.2.2.tar.gz/MufSim-1.2.2/mufsim/insts/mcpgui.py | from mudclientprotocol import (
McpMessage, McpPackage, mktoken
)
from mufsim.errors import MufRuntimeError
from mufsim.insts.base import Instruction, instr
from mufsim.interface import network_interface
import mufsim.stackitems as si
existing_dialogs = {}
def get_dlog(dlogid):
global existing_dialogs
return existing_dialogs.get(dlogid)
def add_dlog(dlogid, dlog):
global existing_dialogs
existing_dialogs[dlogid] = dlog
def del_dlog(dlogid):
global existing_dialogs
if dlogid in existing_dialogs:
del existing_dialogs[dlogid]
def version2float(vers):
major, minor = vers.split('.', 1)
try:
major = int(major)
minor = int(minor)
except ValueError:
major = 0
minor = 0
return (major + (minor / 1000.0))
class McpGuiDialog(object):
def __init__(self, descr, pid):
self.pid = pid
self.descr = descr
self.values = {}
while True:
dlogid = mktoken()
if not get_dlog(dlogid):
break
self.dlogid = dlogid
add_dlog(dlogid, self)
def get_id(self):
return self.dlogid
def getvalue(self, name, dflt=None):
return self.values.get(name, dflt)
def setvalue(self, name, val):
self.values[name] = val
def send_message(self, mesg, **kwargs):
msg = McpMessage(
'org-fuzzball-gui-%s' % mesg,
dlogid=self.dlogid,
)
msg.extend(kwargs)
mcp = netifc.descr_mcp_connection(self.descr)
mcp.send_message(msg)
class McpGuiPackage(McpPackage):
def __init__(self):
McpPackage.__init__(self, 'org-fuzzball-gui', '1.0', '1.3')
def process_value_msg(self, msg):
dlogid = msg.get('dlogid')
ctrlid = msg.get('id')
value = msg.get('value')
if value is None:
return
dlog = get_dlog(dlogid)
if not dlog:
return
dlog.setvalue(ctrlid, value)
def process_event_msg(self, msg):
descr = msg.context
dlogid = msg.get('dlogid')
ctrlid = msg.get('id', '')
event = msg.get('event', '')
dismissed = msg.get('dismissed', '0')
data = msg.get('data', si.MufDict({}, fr.array_pinning))
dlog = get_dlog(dlogid)
if not dlog:
return
from mufsim.processlist import process_list
fr = process_list.get(dlog.pid)
if fr is None:
return
dismissed = 0 if dismissed == '0' else 1
if isinstance(data, str):
data = [data]
fr.events.add_event(
'GUI.%s' % dlogid,
si.MufDict(
{
'descr': descr,
'dlogid': dlogid,
'id': ctrlid,
'event': event,
'dismissed': dismissed,
'values': dlog.values,
'data': data,
},
fr.array_pinning
)
)
if dismissed:
del_dlog(dlogid)
def process_error_msg(self, msg):
descr = msg.context
dlogid = msg.get('dlogid')
ctrlid = msg.get('id')
errcode = msg.get('errcode', '')
errtext = msg.get('errtext', '')
dlog = get_dlog(dlogid)
if not dlog:
return
from mufsim.processlist import process_list
fr = process_list.get(dlog.pid)
if fr is None:
return
data = {
'descr': descr,
'dlogid': dlogid,
'errcode': errcode,
'errtext': errtext,
}
if ctrlid:
data['id'] = ctrlid
fr.events.add_event('GUI.%s' % dlogid, data)
def process_message(self, msg):
msgname = msg.name[len(self.name)+1:]
if msgname == 'ctrl-value':
self.process_value_msg(msg)
elif msgname == 'ctrl-event':
self.process_event_msg(msg)
elif msgname == 'error':
self.process_error_msg(msg)
@instr("gui_available")
class InstGuiAvailable(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
mcp = netifc.descr_mcp_connection(descr)
vers = mcp.supports_package('org-fuzzball-gui')
vers = version2float(vers)
fr.data_push(vers)
@instr("gui_dlog_create")
class InstGuiDlogCreate(Instruction):
def execute(self, fr):
fr.check_underflow(4)
args = fr.data_pop_dict()
title = fr.data_pop(str)
dtype = fr.data_pop(str)
descr = fr.data_pop(int)
if dtype not in ['simple', 'tabbed', 'helper']:
dtype = "simple"
for key in args.keys():
fr.check_type(key, [str])
mcp = netifc.descr_mcp_connection(descr)
dlog = McpGuiDialog(descr, fr.pid)
msg = McpMessage(
'org-fuzzball-gui-dlog-create',
dlogid=dlog.dlogid,
title=title,
type=dtype,
)
msg.extend(dict(args))
mcp.send_message(msg)
fr.data_push(dlog.dlogid)
@instr("gui_dlog_show")
class InstGuiDlogShow(Instruction):
def execute(self, fr):
dlogid = fr.data_pop(str)
dlog = get_dlog(dlogid)
if not dlog:
raise MufRuntimeError("Invalid dialog ID")
dlog.send_message('dlog-show')
@instr("gui_dlog_close")
class InstGuiDlogClose(Instruction):
def execute(self, fr):
dlogid = fr.data_pop(str)
dlog = get_dlog(dlogid)
if not dlog:
raise MufRuntimeError("Invalid dialog ID")
dlog.send_message('dlog-close')
del_dlog(dlogid)
@instr("gui_ctrl_create")
class InstGuiCtrlCreate(Instruction):
def execute(self, fr):
fr.check_underflow(4)
args = fr.data_pop_dict()
ctrlid = fr.data_pop(str)
ctype = fr.data_pop(str)
dlogid = fr.data_pop(str)
for key in args.keys():
fr.check_type(key, [str])
dlog = get_dlog(dlogid)
args['id'] = ctrlid
dlog.send_message('ctrl-%s' % ctype, **args)
@instr("gui_ctrl_command")
class InstGuiCtrlCommand(Instruction):
def execute(self, fr):
fr.check_underflow(4)
args = fr.data_pop_dict()
command = fr.data_pop(str)
ctrlid = fr.data_pop(str)
dlogid = fr.data_pop(str)
for key in args.keys():
fr.check_type(key, [str])
dlog = get_dlog(dlogid)
args['id'] = ctrlid
args['command'] = command
dlog.send_message('ctrl-command', **args)
@instr("gui_value_set")
class InstGuiValueSet(Instruction):
def execute(self, fr):
fr.check_underflow(3)
val = fr.data_pop(str, int, float, si.DBRef, si.MufList)
ctrlid = fr.data_pop(str)
dlogid = fr.data_pop(str)
if isinstance(val, si.MufList):
val = val[:]
fr.check_list_type(val, (str, int, float, si.DBRef), argnum=3)
dlog = get_dlog(dlogid)
dlog.setvalue(ctrlid, val)
dlog.send_message('ctrl-value', id=ctrlid, value=val)
@instr("gui_value_get")
class InstGuiValueGet(Instruction):
def execute(self, fr):
fr.check_underflow(2)
ctrlid = fr.data_pop(str)
dlogid = fr.data_pop(str)
dlog = get_dlog(dlogid)
val = dlog.getvalue(ctrlid, '')
if isinstance(val, str):
val = [val]
fr.data_push(val)
@instr("gui_values_get")
class InstGuiValuesGet(Instruction):
def execute(self, fr):
dlogid = fr.data_pop(str)
dlog = get_dlog(dlogid)
fr.data_push(dlog.values)
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap | PypiClean |
/EQSN-0.0.8.tar.gz/EQSN-0.0.8/eqsn/gates.py | import multiprocessing
import logging
import numpy as np
from eqsn.qubit_thread import SINGLE_GATE, MERGE_SEND, MERGE_ACCEPT, MEASURE, \
MEASURE_NON_DESTRUCTIVE, GIVE_STATEVECTOR, DOUBLE_GATE, \
CONTROLLED_GATE, NEW_QUBIT, ADD_MERGED_QUBITS_TO_DICT, CONTROLLED_TWO_GATE
from eqsn.shared_dict import SharedDict
from eqsn.worker_process import WorkerProcess
from eqsn.process_picker import ProcessPicker
class EQSN(object):
"""
Main object of EQSN, with this object, all of the Qubits can be controlled.
All functions are threadsafe, but at the moment, only one instance should be
used.
"""
__instance = None
@staticmethod
def get_instance():
if EQSN.__instance is None:
return EQSN()
return EQSN.__instance
def __init__(self):
if EQSN.__instance is not None:
raise ValueError("Use get instance to get this class")
EQSN.__instance = self
self.manager = multiprocessing.Manager()
cpu_count = multiprocessing.cpu_count()
self.process_queue_list = []
for _ in range(cpu_count):
q = multiprocessing.Queue()
new_worker = WorkerProcess(q)
p = multiprocessing.Process(target=new_worker.run, args=())
p.start()
self.process_queue_list.append((p, q))
self.process_picker = ProcessPicker.get_instance(
cpu_count, self.process_queue_list)
# create the shared dict after all the processes have been created.
self.shared_dict = SharedDict.get_instance()
def new_qubit(self, q_id):
"""
Creates a new qubit with an id.
Args:
q_id (String): Id of the new qubit.
"""
p, q = self.process_picker.get_next_process_queue()
q.put([NEW_QUBIT, q_id])
self.shared_dict.set_thread_with_id(q_id, p, q)
logging.debug("Created new qubit with id %s.", q_id)
def stop_all(self):
"""
Stops the simulator from running.
"""
for p, q in self.process_queue_list:
q.put(None)
p.join()
self.shared_dict.stop_shared_dict()
self.process_picker.stop_process_picker()
EQSN.__instance = None
def X_gate(self, q_id):
"""
Applies the Pauli X gate to the Qubit with q_id.
Args:
q_id(String): ID of the Qubit to apply the gate to.
"""
x = np.array([[0, 1], [1, 0]], dtype=np.csingle)
q = self.shared_dict.get_queues_for_ids([q_id])[0]
q.put([SINGLE_GATE, x, q_id])
def Y_gate(self, q_id):
"""
Applies the Pauli Y gate to the Qubit with q_id.
Args:
q_id(String): ID of the Qubit to apply the gate to.
"""
x = np.array([[0, 0 - 1j], [0 + 1j, 0]], dtype=np.csingle)
q = self.shared_dict.get_queues_for_ids([q_id])[0]
q.put([SINGLE_GATE, x, q_id])
def Z_gate(self, q_id):
"""
Applies the Pauli Z gate to the Qubit with q_id.
Args:
q_id(String): ID of the Qubit to apply the gate to.
"""
x = np.array([[1, 0], [0, -1]], dtype=np.csingle)
q = self.shared_dict.get_queues_for_ids([q_id])[0]
q.put([SINGLE_GATE, x, q_id])
def H_gate(self, q_id):
"""
Applies the Hadamard gate to the Qubit with q_id.
Args:
q_id(String): ID of the Qubit to apply the gate to.
"""
x = (1 / 2.0) ** 0.5 * np.array([[1, 1], [1, -1]], dtype=np.csingle)
q = self.shared_dict.get_queues_for_ids([q_id])[0]
q.put([SINGLE_GATE, x, q_id])
def T_gate(self, q_id):
"""
Applies the T gate to the Qubit with q_id.
Args:
q_id(String): ID of the Qubit to apply the gate to.
"""
x = np.array(
[[1, 0], [0, (0.7071067811865476 + 0.7071067811865475j)]],
dtype=np.csingle)
q = self.shared_dict.get_queues_for_ids([q_id])[0]
q.put([SINGLE_GATE, x, q_id])
def S_gate(self, q_id):
"""
Applies the S gate to the Qubit with q_id.
Args:
q_id(String): ID of the Qubit to apply the gate to.
"""
x = np.array([[1, 0], [0, 1j]], dtype=np.csingle)
q = self.shared_dict.get_queues_for_ids([q_id])[0]
q.put([SINGLE_GATE, x, q_id])
def K_gate(self, q_id):
"""
Applies the K gate to the Qubit with q_id.
Args:
q_id(String): ID of the Qubit to apply the gate to.
"""
x = 0.5 * np.array([[1 + 1j, 1 - 1j], [-1 + 1j, -1 - 1j]], dtype=np.csingle)
q = self.shared_dict.get_queues_for_ids([q_id])[0]
q.put([SINGLE_GATE, x, q_id])
def RX_gate(self, q_id, rad):
"""
Applies a rotational X gate to the Qubit with q_id.
Args:
q_id(String): ID of the Qubit to apply the gate to.
rad(int): Rotational degrees in rad.
"""
mid = np.cos(rad / 2)
other = -1j * np.sin(rad / 2)
x = np.array([[mid, other], [other, mid]], dtype=np.csingle)
q = self.shared_dict.get_queues_for_ids([q_id])[0]
q.put([SINGLE_GATE, x, q_id])
def RY_gate(self, q_id, rad):
"""
Applies a rotational Y gate to the Qubit with q_id.
Args:
q_id(String): ID of the Qubit to apply the gate to.
rad(int): Rotational degrees in rad.
"""
mid = np.cos(rad / 2)
other = np.sin(rad / 2)
x = np.array([[mid, -1.0 * other], [other, mid]], dtype=np.csingle)
q = self.shared_dict.get_queues_for_ids([q_id])[0]
q.put([SINGLE_GATE, x, q_id])
def RZ_gate(self, q_id, rad):
"""
Applies a rotational Z gate to the Qubit with q_id.
Args:
q_id(String): ID of the Qubit to apply the gate to.
rad(int): Rotational degrees in rad.
"""
top = np.exp(-1j * (rad / 2))
bot = np.exp(1j * (rad / 2))
x = np.array([[top, 0], [0, bot]], dtype=np.csingle)
q = self.shared_dict.get_queues_for_ids([q_id])[0]
q.put([SINGLE_GATE, x, q_id])
def custom_gate(self, q_id, gate):
"""
Applies a custom gate to the qubit with q_id.
Args:
q_id(String): Id of the Qubit to apply the gate on.
gate(np.ndarray): unitary 2x2 matrix, of the gate.
"""
q = self.shared_dict.get_queues_for_ids([q_id])[0]
q.put([SINGLE_GATE, gate, q_id])
def merge_qubits(self, q_id1, q_id2):
"""
Merges two qubits to one process, if they are not already
running in the same process.
Args:
q_id1 (String): Id of the Qubit merged into q_id2.
q_id2 (String): Id of the Qubit merged with q_id1.
"""
queues = self.shared_dict.get_queues_for_ids([q_id1, q_id2])
if len(queues) == 1:
return # Already merged
else:
# Block the dictionary, that noone can send commands to the qubits,
logging.debug("Merge Qubits %s and %s.", q_id1, q_id2)
self.shared_dict.block_shared_dict()
q1 = queues[0]
q2 = queues[1]
merge_q = self.manager.Queue()
qubits_q = self.manager.Queue()
q1.put([MERGE_SEND, q_id1, merge_q, qubits_q])
q2.put([MERGE_ACCEPT, q_id2, merge_q])
qubits = qubits_q.get()
q2.put([ADD_MERGED_QUBITS_TO_DICT, q_id2, qubits])
self.shared_dict.change_thread_and_queue_of_ids_nonblocking(
qubits, q_id2)
self.shared_dict.release_shared_dict()
def cnot_gate(self, applied_to_id, controlled_by_id):
"""
Applies a controlled X gate, where the gate is applied to
q_id1 and controlled by q_id2.
Args:
applied_to_id (String): Id of the Qubit on which the X gate is applied.
controlled_by_id (String): Id of the Qubit which controls the gate.
"""
x = np.array([[0, 1], [1, 0]], dtype=np.csingle)
self.merge_qubits(applied_to_id, controlled_by_id)
q = self.shared_dict.get_queues_for_ids([applied_to_id])[0]
q.put([CONTROLLED_GATE, x, applied_to_id, controlled_by_id])
def cphase_gate(self, applied_to_id, controlled_by_id):
"""
Applies a controlled Z gate, where the gate is applied to
q_id1 and controlled by q_id2.
Args:
applied_to_id (String): Id of the Qubit on which the X gate is applied.
controlled_by_id (String): Id of the Qubit which controls the gate.
"""
x = np.array([[0, 1], [0, -1]], dtype=np.csingle)
self.merge_qubits(applied_to_id, controlled_by_id)
q = self.shared_dict.get_queues_for_ids([applied_to_id])[0]
q.put([CONTROLLED_GATE, x, applied_to_id, controlled_by_id])
def give_statevector_for(self, q_id):
"""
Gives the statevector and Qubits of a Qubit and all other Qubits with
which the qubit is entangled.
Args:
q_id(String): Qubit id of the Qubit to get the statevector from.
Returns:
Tuple. Tuple of a lists and vector, where the first list are the qubits of
the statevector and the second list is the statevector.
"""
ret = self.manager.Queue()
q = self.shared_dict.get_queues_for_ids([q_id])[0]
q.put([GIVE_STATEVECTOR, q_id, ret])
qubits, vector = ret.get()
return qubits, vector
def custom_two_qubit_gate(self, q_id1, q_id2, gate):
"""
Applies a two Qubit gate to two Qubits.
Args:
q_id1(String): ID of the first Qubit of the gate.
q_id2(String): ID of the second Qubit of the gate.
gate(np.ndarray): 4x4 unitary matrix gate.
"""
self.merge_qubits(q_id1, q_id2)
q = self.shared_dict.get_queues_for_ids([q_id1])[0]
q.put([DOUBLE_GATE, gate, q_id1, q_id2])
def custom_two_qubit_control_gate(self, q_id1, q_id2, q_id3, gate):
"""
Applies a two Qubit gate to two Qubits.
Args:
q_id1 (String): ID of the first Qubit of the gate.
q_id2 (String): ID of the second Qubit of the gate.
q_id3 (String): ID of the third Qubit of the gate.
gate(np.ndarray): 4x4 unitary matrix gate.
"""
self.merge_qubits(q_id1, q_id2)
self.merge_qubits(q_id1, q_id3)
q = self.shared_dict.get_queues_for_ids([q_id1])[0]
q.put([CONTROLLED_TWO_GATE, gate, q_id1, q_id2, q_id3])
def custom_controlled_gate(self, applied_to_id, controlled_by_id, gate):
"""
Applies a custom controlled gate to a Qubit.
Args:
applied_to_id(String): ID of the qubit to apply the gate to.
controlled_by_id(String): ID of the qubit which controls the gate.
gate(np.ndarray): Unitary 2x2 matrix which should be applied.
"""
self.merge_qubits(applied_to_id, controlled_by_id)
q = self.shared_dict.get_queues_for_ids([applied_to_id])[0]
q.put([CONTROLLED_GATE, gate, applied_to_id, controlled_by_id])
def measure(self, q_id, non_destructive=False):
"""
Measures a qubit with an id. If non_destructive is False, the qubit
is removed from the system, otherwise, the qubit stays in the system
after measurement, but its wavefunction collapses.
Args:
id (String): Id of the Qubit which should be measured.
non_destructive(bool): If a qubit should not be removed from the
system after measurement.
"""
ret = self.manager.Queue()
q = self.shared_dict.get_queues_for_ids([q_id])[0]
if non_destructive:
q.put([MEASURE_NON_DESTRUCTIVE, q_id, ret])
else:
q.put([MEASURE, q_id, ret])
res = ret.get()
if not non_destructive:
self.shared_dict.delete_id_and_check_to_join_thread(q_id)
logging.debug(
"Qubit with id %s has been measured with outcome %d.", q_id, res)
return res | PypiClean |
/Minio_hung-0.0.7.1.tar.gz/Minio_hung-0.0.7.1/src/minio_hung/ReadMinio.py | from importlib.resources import read_text
from minio import Minio
import pandas as pd
from io import BytesIO
import os
def readcsv(ACCESS_KEY,PRIVATE_KEY,BUCKET_NAME,OBJECT_NAME):
client = Minio(
"apilakedpa.apps.xplat.fis.com.vn",
access_key=ACCESS_KEY,
secret_key=PRIVATE_KEY,
secure = True
)
obj = client.get_object(BUCKET_NAME,OBJECT_NAME+'.csv')
de = pd.read_csv(obj)
print(de)
def writecsv(ACCESS_KEY,PRIVATE_KEY,BUCKET_NAME_IN,OBJECT_NAME_IN,BUCKET_NAME_OUT,OBJECT_NAME_OUT):
client = Minio(
"apilakedpa.apps.xplat.fis.com.vn",
access_key=ACCESS_KEY,
secret_key=PRIVATE_KEY,
secure = True
)
obj = client.get_object(BUCKET_NAME_IN, OBJECT_NAME_IN+'.csv')
df = pd.read_csv(obj)
csv = df.to_csv().encode('utf-8')
client.put_object(
BUCKET_NAME_OUT,
OBJECT_NAME_OUT+'.csv',
data=BytesIO(csv),
length=len(csv),
content_type='example.csv'
)
def readtext(ACCESS_KEY,PRIVATE_KEY,BUCKET_NAME,OBJECT_NAME):
client = Minio(
"apilakedpa.apps.xplat.fis.com.vn",
access_key=ACCESS_KEY,
secret_key=PRIVATE_KEY,
secure = True
)
obj = client.get_object(BUCKET_NAME,OBJECT_NAME+'.txt')
df = pd.read_table(obj)
print(df)
def writetext(ACCESS_KEY,PRIVATE_KEY,BUCKET_NAME_IN,OBJECT_NAME_IN,BUCKET_NAME_OUT,OBJECT_NAME_OUT):
client = Minio(
"apilakedpa.apps.xplat.fis.com.vn",
access_key=ACCESS_KEY,
secret_key=PRIVATE_KEY,
secure = True
)
obj = client.get_object(BUCKET_NAME_IN, OBJECT_NAME_IN+'.txt')
df = pd.read_table(obj)
csv = df.to_csv().encode('utf-8')
client.put_object(
BUCKET_NAME_OUT,
OBJECT_NAME_OUT+'.csv',
data=BytesIO(csv),
length=len(csv),
content_type='example.txt'
)
def read_list(ACCESS_KEY, PRIVATE_KEY):
client = Minio(
"apilakedpa.apps.xplat.fis.com.vn",
access_key=ACCESS_KEY,
secret_key=PRIVATE_KEY,
secure = True
)
a = client.list_buckets()
print(a)
def readexcel(ACCESS_KEY, PRIVATE_KEY, BUCKET_NAME, OBJECT_NAME):
client = Minio(
"apilakedpa.apps.xplat.fis.com.vn",
access_key=ACCESS_KEY,
secret_key=PRIVATE_KEY,
secure = True
)
a = client.presigned_get_object(BUCKET_NAME, OBJECT_NAME+'.xlsx')
c = pd.read_excel(a)
print(c)
def writeexcel(ACCESS_KEY,PRIVATE_KEY,BUCKET_NAME_IN,OBJECT_NAME_IN,BUCKET_NAME_OUT,OBJECT_NAME_OUT):
client = Minio(
"apilakedpa.apps.xplat.fis.com.vn",
access_key=ACCESS_KEY,
secret_key=PRIVATE_KEY,
secure = True
)
b = client.presigned_get_object(BUCKET_NAME_IN,OBJECT_NAME_IN+'.xlsx')
d = pd.read_excel(b)
csv = d.to_csv().encode('utf-8')
client.put_object(
BUCKET_NAME_OUT,
(OBJECT_NAME_OUT + ".csv"),
data=BytesIO(csv),
length=len(csv),
content_type='example.txt'
) | PypiClean |
/Firefly_III_API_Client-2.0.5.0-py3-none-any.whl/firefly_iii_client/paths/v1_budgets_id_limits_limit_id_transactions/get.py | from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from firefly_iii_client import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from firefly_iii_client import schemas # noqa: F401
from firefly_iii_client.model.unauthenticated import Unauthenticated
from firefly_iii_client.model.bad_request import BadRequest
from firefly_iii_client.model.transaction_type_filter import TransactionTypeFilter
from firefly_iii_client.model.internal_exception import InternalException
from firefly_iii_client.model.not_found import NotFound
from firefly_iii_client.model.transaction_array import TransactionArray
from . import path
# Query params
PageSchema = schemas.IntSchema
TypeSchema = TransactionTypeFilter
RequestRequiredQueryParams = typing_extensions.TypedDict(
'RequestRequiredQueryParams',
{
}
)
RequestOptionalQueryParams = typing_extensions.TypedDict(
'RequestOptionalQueryParams',
{
'page': typing.Union[PageSchema, decimal.Decimal, int, ],
'type': typing.Union[TypeSchema, ],
},
total=False
)
class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams):
pass
request_query_page = api_client.QueryParameter(
name="page",
style=api_client.ParameterStyle.FORM,
schema=PageSchema,
explode=True,
)
request_query_type = api_client.QueryParameter(
name="type",
style=api_client.ParameterStyle.FORM,
schema=TypeSchema,
explode=True,
)
# Header params
XTraceIdSchema = schemas.UUIDSchema
RequestRequiredHeaderParams = typing_extensions.TypedDict(
'RequestRequiredHeaderParams',
{
}
)
RequestOptionalHeaderParams = typing_extensions.TypedDict(
'RequestOptionalHeaderParams',
{
'X-Trace-Id': typing.Union[XTraceIdSchema, str, uuid.UUID, ],
},
total=False
)
class RequestHeaderParams(RequestRequiredHeaderParams, RequestOptionalHeaderParams):
pass
request_header_x_trace_id = api_client.HeaderParameter(
name="X-Trace-Id",
style=api_client.ParameterStyle.SIMPLE,
schema=XTraceIdSchema,
)
# Path params
IdSchema = schemas.StrSchema
LimitIdSchema = schemas.StrSchema
RequestRequiredPathParams = typing_extensions.TypedDict(
'RequestRequiredPathParams',
{
'id': typing.Union[IdSchema, str, ],
'limitId': typing.Union[LimitIdSchema, str, ],
}
)
RequestOptionalPathParams = typing_extensions.TypedDict(
'RequestOptionalPathParams',
{
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_id = api_client.PathParameter(
name="id",
style=api_client.ParameterStyle.SIMPLE,
schema=IdSchema,
required=True,
)
request_path_limit_id = api_client.PathParameter(
name="limitId",
style=api_client.ParameterStyle.SIMPLE,
schema=LimitIdSchema,
required=True,
)
_auth = [
'firefly_iii_auth',
]
SchemaFor200ResponseBodyApplicationVndApijson = TransactionArray
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationVndApijson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/vnd.api+json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationVndApijson),
},
)
SchemaFor400ResponseBodyApplicationJson = BadRequest
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor400ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
content={
'application/json': api_client.MediaType(
schema=SchemaFor400ResponseBodyApplicationJson),
},
)
SchemaFor401ResponseBodyApplicationJson = Unauthenticated
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor401ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
content={
'application/json': api_client.MediaType(
schema=SchemaFor401ResponseBodyApplicationJson),
},
)
SchemaFor404ResponseBodyApplicationJson = NotFound
@dataclass
class ApiResponseFor404(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor404ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_404 = api_client.OpenApiResponse(
response_cls=ApiResponseFor404,
content={
'application/json': api_client.MediaType(
schema=SchemaFor404ResponseBodyApplicationJson),
},
)
SchemaFor500ResponseBodyApplicationJson = InternalException
@dataclass
class ApiResponseFor500(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor500ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_500 = api_client.OpenApiResponse(
response_cls=ApiResponseFor500,
content={
'application/json': api_client.MediaType(
schema=SchemaFor500ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
'400': _response_for_400,
'401': _response_for_401,
'404': _response_for_404,
'500': _response_for_500,
}
_all_accept_content_types = (
'application/vnd.api+json',
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _list_transaction_by_budget_limit_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _list_transaction_by_budget_limit_oapg(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _list_transaction_by_budget_limit_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _list_transaction_by_budget_limit_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
List all transactions by a budget limit ID.
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params)
self._verify_typed_dict_inputs_oapg(RequestHeaderParams, header_params)
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_id,
request_path_limit_id,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
prefix_separator_iterator = None
for parameter in (
request_query_page,
request_query_type,
):
parameter_data = query_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
if prefix_separator_iterator is None:
prefix_separator_iterator = parameter.get_prefix_separator_iterator()
serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator)
for serialized_value in serialized_data.values():
used_path += serialized_value
_headers = HTTPHeaderDict()
for parameter in (
request_header_x_trace_id,
):
parameter_data = header_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_headers.extend(serialized_data)
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(
status=response.status,
reason=response.reason,
api_response=api_response
)
return api_response
class ListTransactionByBudgetLimit(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def list_transaction_by_budget_limit(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def list_transaction_by_budget_limit(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def list_transaction_by_budget_limit(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def list_transaction_by_budget_limit(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._list_transaction_by_budget_limit_oapg(
query_params=query_params,
header_params=header_params,
path_params=path_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._list_transaction_by_budget_limit_oapg(
query_params=query_params,
header_params=header_params,
path_params=path_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/mobile/app/SceneController.js.uncompressed.js | define("dojox/mobile/app/SceneController", ["dijit","dojo","dojox","dojo/require!dojox/mobile/_base"], function(dijit,dojo,dojox){
dojo.provide("dojox.mobile.app.SceneController");
dojo.experimental("dojox.mobile.app.SceneController");
dojo.require("dojox.mobile._base");
(function(){
var app = dojox.mobile.app;
var templates = {};
dojo.declare("dojox.mobile.app.SceneController", dojox.mobile.View, {
stageController: null,
keepScrollPos: false,
init: function(sceneName, params){
// summary:
// Initializes the scene by loading the HTML template and code, if it has
// not already been loaded
this.sceneName = sceneName;
this.params = params;
var templateUrl = app.resolveTemplate(sceneName);
this._deferredInit = new dojo.Deferred();
if(templates[sceneName]){
// If the template has been cached, do not load it again.
this._setContents(templates[sceneName]);
}else{
// Otherwise load the template
dojo.xhrGet({
url: templateUrl,
handleAs: "text"
}).addCallback(dojo.hitch(this, this._setContents));
}
return this._deferredInit;
},
_setContents: function(templateHtml){
// summary:
// Sets the content of the View, and invokes either the loading or
// initialization of the scene assistant.
templates[this.sceneName] = templateHtml;
this.domNode.innerHTML = "<div>" + templateHtml + "</div>";
var sceneAssistantName = "";
var nameParts = this.sceneName.split("-");
for(var i = 0; i < nameParts.length; i++){
sceneAssistantName += nameParts[i].substring(0, 1).toUpperCase()
+ nameParts[i].substring(1);
}
sceneAssistantName += "Assistant";
this.sceneAssistantName = sceneAssistantName;
var _this = this;
dojox.mobile.app.loadResourcesForScene(this.sceneName, function(){
console.log("All resources for ",_this.sceneName," loaded");
var assistant;
if(typeof(dojo.global[sceneAssistantName]) != "undefined"){
_this._initAssistant();
}else{
var assistantUrl = app.resolveAssistant(_this.sceneName);
dojo.xhrGet({
url: assistantUrl,
handleAs: "text"
}).addCallback(function(text){
try{
dojo.eval(text);
}catch(e){
console.log("Error initializing code for scene " + _this.sceneName
+ '. Please check for syntax errors');
throw e;
}
_this._initAssistant();
});
}
});
},
_initAssistant: function(){
// summary:
// Initializes the scene assistant. At this point, the View is
// populated with the HTML template, and the scene assistant type
// is declared.
console.log("Instantiating the scene assistant " + this.sceneAssistantName);
var cls = dojo.getObject(this.sceneAssistantName);
if(!cls){
throw Error("Unable to resolve scene assistant "
+ this.sceneAssistantName);
}
this.assistant = new cls(this.params);
this.assistant.controller = this;
this.assistant.domNode = this.domNode.firstChild;
this.assistant.setup();
this._deferredInit.callback();
},
query: function(selector, node){
// summary:
// Queries for DOM nodes within either the node passed in as an argument
// or within this view.
return dojo.query(selector, node || this.domNode)
},
parse: function(node){
var widgets = this._widgets =
dojox.mobile.parser.parse(node || this.domNode, {
controller: this
});
// Tell all widgets what their controller is.
for(var i = 0; i < widgets.length; i++){
widgets[i].set("controller", this);
}
},
getWindowSize: function(){
// TODO, this needs cross browser testing
return {
w: dojo.global.innerWidth,
h: dojo.global.innerHeight
}
},
showAlertDialog: function(props){
var size = dojo.marginBox(this.assistant.domNode);
var dialog = new dojox.mobile.app.AlertDialog(
dojo.mixin(props, {controller: this}));
this.assistant.domNode.appendChild(dialog.domNode);
console.log("Appended " , dialog.domNode, " to ", this.assistant.domNode);
dialog.show();
},
popupSubMenu: function(info){
var widget = new dojox.mobile.app.ListSelector({
controller: this,
destroyOnHide: true,
onChoose: info.onChoose
});
this.assistant.domNode.appendChild(widget.domNode);
widget.set("data", info.choices);
widget.show(info.fromNode);
}
});
})();
}); | PypiClean |
/EmBCI-0.1.2.tar.gz/EmBCI-0.1.2/README_zh.md | # 用户使用说明
- 开机自动运行主程序 `只在PC上测试了,使用crontab是个很好的办法`
- 屏幕显示波形以及一些信息 `GUI写有初始Menu,选择任务比如显示波形,显示信息,等等,关于ScreenGUI的一切使用可以看 utils/visualization.py 里面的 Screen_GUI 类,比较简易的实现GUI功能,按钮绑定回调函数,run_me1.py就是示例`
- 如果想要orangepi自动连接wifi,需在开机前提供一个无密码的wifi,或者使用orangepi作为热点让电脑连接
## 开机自动运行控制
[cron](https://en.wikipedia.org/wiki/Cron)
# 开发人员往这儿看
## 安装所需模块
命令行运行 `pip install -r requirements.txt`
python2或3都是兼容的
`2018.05.04 最近更新比较多,还没有考虑兼容的问题,目前都是基于python2的,orangepi上配置的是py2的环境`
在orangepi上,用户名为hank,密码gaohanlin
- 在/home/hank/programes/MyBCI里面`git pull`一下跟进最新更新
- sudo ipython 密码gaohanlin
- run run_me1.py即可看到GUI,需要手动调用 `s.start_touch_screen(port=avr_port(default '/dev/ttyS2' on OPi))` ARM才会开始接受AVR发出的触摸屏信号
- 目前run_me1.py里面有了显示信息和显示波形两个功能,还需要添加下面几个功能
- `通过触摸屏调节波形放大倍数scale(写好了,未测试)`
- `显示频谱,从 utils/common.py 里面的 SignalInfo 类实现了各种频域信息的提取`
- `GUI好好排版一下`
## 运行程序
`run_me.py`为程序入口,也就是引导文件,该文件调用src和utils里面的各种函数,创建
`reader`, `model`, `commander` 等对象,用于获取数据,训练/分类模型,和控制外围设备,
将这些对象传入实验范式的框架,`src/frame.py`中提供了`sEMG`, `SSVEP`, `P300`,
`Motor Imaginary`, `TGAM_relax`等框架,需要进一步完善
## 给arduino编程
`sudo apt-get install avr-gcc avrdude arduino-mk`
[guide1](https://github.com/kcuzner/avrdude)
[guide2](http://kevincuzner.com/2013/05/27/raspberry-pi-as-an-avr-programmer/)
## 将orangepi作为wifi热点
[树莓派是这样做的](http://www.raspberry-projects.com/pi/software_utilities/wifi-access-point)
[配置/etc/network/interfaces的问题](https://unix.stackexchange.com/questions/128439/good-detailed-explanation-of-etc-network-interfaces-syntax)
| PypiClean |
/NuPlone-2.2.0.tar.gz/NuPlone-2.2.0/docs/changes.rst | Changelog
=========
2.2.0 (2023-06-14)
------------------
- Support Plone 6
[ale-rt]
2.1.4 (2023-01-04)
------------------
- Sitemenu: Add a helper method to add submenus to existing categories.
[thet]
- Update pre-commit config.
[thet]
- Update buildout, test and CI infrastructure.
[thet]
2.1.3 (2022-09-15)
------------------
- Update a deprecated import
[ale-rt]
2.1.2 (2022-06-06)
------------------
- Fix the position of the footer (fixes `#59 <https://github.com/euphorie/NuPlone/issues/39>`_) [ale-rt]
2.1.1 (2022-03-30)
------------------
- Fix brown-bag release that was missing the bundle
2.1.0 (2022-03-30)
------------------
- Add a ``@@nuplone-version`` view which can be used to break caching of resources.
- Add a new ``NuPlone.bundle`` resource directory and deliver the bundle directly from NuPlone.
- Update all JavaScript resources to use latest Patternslib (7.4.0).
Fixes a number of security problems with old JavaScript.
- Remove Support for IE<11.
- Cleanup resources.
- Fixed CSRF issue with copy & paste.
2.0.2 (2021-12-08)
------------------
- Text input: Take type from widget if available
- Decrease log verbosity
2.0.1 (2021-06-02)
------------------
- Restore ordering support that accidentally got lost in #20
2.0.0 (2021-05-27)
------------------
BREAKING CHANGES:
Update to Plone 5.2
- Remove the dependency from grok
- Remove z3c.appconfig
- Remove z3c.zrtresource
1.6.4 (unreleased)
------------------
- Removed the update-order tile
1.6.3 (2020-05-26)
------------------
- Improve styles for list of checkboxes and labels by adding more spacing.
- Fix checkboxlist to show the field's title on the fieldset legend instead of the value of the first item.
- Show validation errors that are not associated with a widget (like invariants).
1.6.2 (2019-08-21)
------------------
- Translation update (IS)
1.6.1 (2019-01-11)
------------------
- Fix getting the email settings for Plone 5
- Customised orderedselect_input.pt for IOrderedSelectWidget
1.6.0 (2018-10-10)
------------------
This version is built for Plone 5.1 and higher!
- More efficient and safe url definition in templates
- Textlines widget: be more in line with other widgets, use
`legend` for the field name.
1.5.6 (2017-11-27)
------------------
- In the File and Image widgets (z3cform), add a safeguard that prevents
a user-facing error when a blob file is missing
- Updated translations for Croatian (HR)
1.5.5 (2017-07-17)
------------------
- Make re-ordering more robust
1.5.4 (2017-06-16)
------------------
- Add translation file for Croatioan (hr), currently with one translation
1.5.3 (2017-04-03)
------------------
- Also show the "stupid" div again on text_input, but only if the field
has a description
1.5.2 (2016-09-29)
------------------
- Streamline File and Image input
- Mostly revert markup change of 1.5.1, since the `<div>` is required
for making infoBubbles render correctly
1.5.1 (2016-06-20)
------------------
- Fix markup in z3c.form input fields: replace `<div>` around label and input
with a `<span>` and only show it if it is needed to add dependency classes.
1.5.0 (2015-10-13)
------------------
- Update JS libraries
jquery from 1.4.4 to 1.11.3
jquery.ui from 1.8 to 1.11.4
Add jquery.browser (which adds functionality removed from jquery core)
- Include the new JS libraries and update code to handle them.
Specifically, the .live method is no longer available and .on must be used.
1.4.5 (2014-08-29)
------------------
- On the PW reset form, catch errors caused by wrong user name and show
meaningful error message instead of 'Ooops'
- fixed Italian translation for button_cancel (was the same as button_delete),
OSHA ref #10522
1.4.4 (2014-08-11)
------------------
- Add support for Plone 4.3.3.
1.4.3 (2014-07-09)
------------------
- Bugfix. Site Menu dropdown prevents clicking on certain page elements. (OSHA #10390)
- Bugfix. Site Menu dropdowns truncated in IE. (OSHA #10329)
1.4.2 (2014-07-07)
------------------
- Revert IE 11 CSS fix, has unintented consequences.
1.4.1 (2014-07-07)
------------------
- Update a translation in IT
- CSS fix for IE 11.
1.4.0 - January 9, 2014
-----------------------
- Add an API to the analytics tile to trigger extra (virtual) page views.
- Change analyatics tile to send the authentication status (*anonymous* or
*authenticated* instead of the users login name.
1.3.9 - January 3, 2014
-----------------------
- Add prototype page for osha library page.
1.3.8 - December 19, 2013
-------------------------
- Fix comaptibility with Chameleon 1.14.
- New translation: Maltese (MT)
1.3.7 - December 12, 2013
-------------------------
- New translations: Italian (IT) and Icelandic (IS)
- Fixed issue with file browse button
- Setup accordian for prototype settings page.
1.3.6 - October 7, 2013
-----------------------
- Modify internal buildout to use the latets buildout and Pillow releases.
- Remove stray space in readonly-attribute for named file widgets. This caused
IE to treat all file widgets as read-only.
1.3.5 - July 5, 2013
--------------------
- Changed 2 strings in the Greek translation [pyailor]
1.3.4 - July 3, 2013
--------------------
- Enable 'depends' form directive also for schema extended fields.
[jcbrand]
1.3.3 - April 23, 2013
----------------------
- Added translation to Hungarian
[pysailor]
- Textual corrections for Lithuanian
[pysailor]
1.3.2 - April 4, 2013
---------------------
- Add standard makefile to manage builds and cleanup buildout configuration.
- Fix editing of fields using object widgets: their data was not correctly
extracted due to a missing hidden form field.
1.3.1 - March 6, 2013
---------------------
- Fix a syntax error in template for the select form widget.
1.3 - February 14, 2013
-----------------------
- Prevent the *Paste* action from being show in places where paste was
not allowed.
- Stop the portlet sidebar from jumping from left to right on page lods.
- Tighten lxml dependency to make sure security improvements in its html
cleaner are included.
- Update form markup to add an `error` class on labels for fields with
errors.
- Add new translations: Finnish and Lithuanian
1.2 - December 7, 2012
----------------------
- Rewrite code to handle links in rich text fields. This fixes ticket
`ticket 56 <https://github.com/euphorie/Euphorie/issues/56>`_.
- Add new translation: Bulgarian, Flemish, Catalan, Latvian and Portugese.
- Update htmllaundry to 2.0.
- Update TinyMCE to version 3.5.6.
- Configure HTML cleanup code to strip data: attributes.
1.1 - December 20, 2011
-----------------------
- Allow anonymous users to switch the current language as well. This fixes
Euphorie ticket `27 <https://github.com/euphorie/Euphorie/issues/27>`_,
1.0.1 - December 9, 2011
------------------------
- Update package metadata.
[wichert]
- Fix MANIFEST so tiny_mce is included in the distribution.
[wichert]
1.0 - December 8, 2011
----------------------
- Add support for Plone 4.1 and Chameleon 2.x.
[wichert]
- Register screen-ie6.css as zrt-resource.
[jcbrand]
- New Spanish, Czech, Slovenian translations
[thomas_w]
- Refactored infoPanels on z3cforms to fix alignment issues.
[jcbrand]
- Don't capitalize questions and legends.
[jcbrand]
- Add css class to enable secondary InfoPanels (per field).
[jcbrand]
- Two newlines TinyMCE bug fixed (Github issue #1)
[jcbrand]
1.0rc8 - May 17, 2011
---------------------
- Correct htmllaundry dependency.
[wichert]
- Correct location of toolbar CSS.
[wichert]
1.0rc7 - April 26, 2011
-----------------------
- Exclude prototype from all distribution forms; the symlinked files confuse
distutils too much.
[wichert]
- Add MANIFEST.in and restructure symlinks for css/javacsript files to
guarantee all files are included in eggs.
[wichert]
1.0rc6 - April 21, 2011
-----------------------
- Re-release rc5 as rc6 to fixup error in source control tagging.
[wichert]
1.0rc5 - April 21, 2011
-----------------------
- Prefer `Title` method to get the current title for the title of the delete
confirmation page.
[wichert]
- Do not put a <p> element in an <object>; IE9 will move it outside the object
element, thus resulting in leftovers even when using the object->iframe
conversion.
[wichert]
- Enable the iframe workaround for IE 9 as well.
[wichert]
- Add support for status messages containing markup.
[jcbrand]
- Bugfix. Prevent clicking on the "Actions" site menu action if it doesn't have
a URL to go to.
[jcbrand]
1.0rc4 - Febuary 1, 2011
------------------------
- Paper brown bag: fix initialisation of rich text editor in forms. This
broke in 1.0rc3 as a part of the tooltip changes.
[wichert]
1.0rc3 - January 25, 2011
-------------------------
- Upgrade to jQuery 1.4.4 and jQuery UI 1.8.9.
[wichert]
- Add javascript workaround for bad handling if ``<button>`` elements in
Internet Explorer versions before 8.
[wichert]
- Do form-related markup transforms earlier so positioning of tooltips
from global transforms works correctly.
[wichert]
1.0rc2 - Janary 11, 2011
------------------------
- Fix TinyMCE: making text bold or italic works again.
[wichert]
- Expose date/time format methods from the Tools view directly as well
for use in python code.
[wichert]
1.0rc1 - December 7, 2010
-------------------------
- zope.i18n is not capable of rendering pre-1900 dates. To prevent site errors
detect this and return an textual error instead.
[wichert]
- Do not load the TinyMCE linesfield plugin. It is not needed, and it triggered
a symlink handling bug in setuptools/distutils.
[wichert]
- Fix transparent background for sitemenu in IE7.
[wichert]
- Refactor positioning of form tooltips.
[wichert]
- Update to jQuery 1.4.3 and jQuery UI 1.8.6.
[wichert]
1.0b4 - October 6, 2010
-----------------------
- Update IE8 styling.
[cornae]
1.0b3 - October 5, 2010
-----------------------
- Correct font reference for IE6 and IE7.
[wichert]
- Update form field dependency checker to deal with z3c.form's madness of
always using :list for checkbox field names.
[wichert]
1.0b2 - September 29, 2010
--------------------------
- Form CSS improvements.
[cornae]
1.0b1 - September 23, 2010
--------------------------
- Modify site menu to generate the contents of the actions menu in code. This
makes it easier to extend the menu using a derived class.
[wichert]
- Make the email address and name of the contact person where emails are send
to configurable via appconfig.
[wichert]
- Move ``dfn`` elements for tooltips outside ``label`` elements to make sure
we can handle click events for them. Otherwise browsers pretend the click
was targeted to the input element inside the label.
[cornae, wichert]
1.0a2 - September 9, 2010
-------------------------
- Update error page handler to deal with double acquisition wrapping which
can happen on certain NotFound errors in Zope 2.12.
[wichert]
- Add `plone.app.testing <http://pypi.python.org/pypi/plone.app.testing>`_
based test fixture.
[wichert]
- Delete some old copy/paste leftovers from `Euphorie
<http://pypi.python.org/pypi/Euphorie>`_.
[wichert]
1.0a1 - August 31, 2010
-----------------------
- First release.
[wichert, cornae]
| PypiClean |
/DJModels-0.0.6-py3-none-any.whl/djmodels/core/management/commands/loaddata.py | import functools
import glob
import gzip
import os
import sys
import warnings
import zipfile
from itertools import product
from djmodels.apps import apps
from djmodels.conf import settings
from djmodels.core import serializers
from djmodels.core.exceptions import ImproperlyConfigured
from djmodels.core.management.base import BaseCommand, CommandError
from djmodels.core.management.color import no_style
from djmodels.core.management.utils import parse_apps_and_model_labels
from djmodels.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connections, router,
transaction,
)
from djmodels.utils.functional import cached_property
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
READ_STDIN = '-'
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
missing_args_message = (
"No database fixture specified. Please provide the path of at least "
"one fixture in the command line."
)
def add_arguments(self, parser):
parser.add_argument('args', metavar='fixture', nargs='+', help='Fixture labels.')
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to load fixtures into. Defaults to the "default" database.',
)
parser.add_argument(
'--app', dest='app_label',
help='Only look for fixtures in the specified app.',
)
parser.add_argument(
'--ignorenonexistent', '-i', action='store_true', dest='ignore',
help='Ignores entries in the serialized data for fields that do not '
'currently exist on the model.',
)
parser.add_argument(
'-e', '--exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude. Can be used multiple times.',
)
parser.add_argument(
'--format',
help='Format of serialized data when reading from stdin.',
)
def handle(self, *fixture_labels, **options):
self.ignore = options['ignore']
self.using = options['database']
self.app_label = options['app_label']
self.verbosity = options['verbosity']
self.excluded_models, self.excluded_apps = parse_apps_and_model_labels(options['exclude'])
self.format = options['format']
with transaction.atomic(using=self.using):
self.loaddata(fixture_labels)
# Close the DB connection -- unless we're still in a transaction. This
# is required as a workaround for an edge case in MySQL: if the same
# connection is used to create tables, load data, and query, the query
# can return incorrect results. See Django #7572, MySQL #37735.
if transaction.get_autocommit(self.using):
connections[self.using].close()
def loaddata(self, fixture_labels):
connection = connections[self.using]
# Keep a count of the installed objects and fixtures
self.fixture_count = 0
self.loaded_object_count = 0
self.fixture_object_count = 0
self.models = set()
self.serialization_formats = serializers.get_public_serializer_formats()
# Forcing binary mode may be revisited after dropping Python 2 support (see #22399)
self.compression_formats = {
None: (open, 'rb'),
'gz': (gzip.GzipFile, 'rb'),
'zip': (SingleZipReader, 'r'),
'stdin': (lambda *args: sys.stdin, None),
}
if has_bz2:
self.compression_formats['bz2'] = (bz2.BZ2File, 'r')
# Django's test suite repeatedly tries to load initial_data fixtures
# from apps that don't have any fixtures. Because disabling constraint
# checks can be expensive on some database (especially MSSQL), bail
# out early if no fixtures are found.
for fixture_label in fixture_labels:
if self.find_fixtures(fixture_label):
break
else:
return
with connection.constraint_checks_disabled():
self.objs_with_deferred_fields = []
for fixture_label in fixture_labels:
self.load_label(fixture_label)
for obj in self.objs_with_deferred_fields:
obj.save_deferred_fields(using=self.using)
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem installing fixtures: %s" % e,)
raise
# If we found even one object in a fixture, we need to reset the
# database sequences.
if self.loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models)
if sequence_sql:
if self.verbosity >= 2:
self.stdout.write("Resetting sequences\n")
with connection.cursor() as cursor:
for line in sequence_sql:
cursor.execute(line)
if self.verbosity >= 1:
if self.fixture_object_count == self.loaded_object_count:
self.stdout.write(
"Installed %d object(s) from %d fixture(s)"
% (self.loaded_object_count, self.fixture_count)
)
else:
self.stdout.write(
"Installed %d object(s) (of %d) from %d fixture(s)"
% (self.loaded_object_count, self.fixture_object_count, self.fixture_count)
)
def load_label(self, fixture_label):
"""Load fixtures files for a given label."""
show_progress = self.verbosity >= 3
for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):
_, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))
open_method, mode = self.compression_formats[cmp_fmt]
fixture = open_method(fixture_file, mode)
try:
self.fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if self.verbosity >= 2:
self.stdout.write(
"Installing %s fixture '%s' from %s."
% (ser_fmt, fixture_name, humanize(fixture_dir))
)
objects = serializers.deserialize(
ser_fmt, fixture, using=self.using, ignorenonexistent=self.ignore,
handle_forward_references=True,
)
for obj in objects:
objects_in_fixture += 1
if (obj.object._meta.app_config in self.excluded_apps or
type(obj.object) in self.excluded_models):
continue
if router.allow_migrate_model(self.using, obj.object.__class__):
loaded_objects_in_fixture += 1
self.models.add(obj.object.__class__)
try:
obj.save(using=self.using)
if show_progress:
self.stdout.write(
'\rProcessed %i object(s).' % loaded_objects_in_fixture,
ending=''
)
# psycopg2 raises ValueError if data contains NUL chars.
except (DatabaseError, IntegrityError, ValueError) as e:
e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj.object._meta.app_label,
'object_name': obj.object._meta.object_name,
'pk': obj.object.pk,
'error_msg': e,
},)
raise
if obj.deferred_fields:
self.objs_with_deferred_fields.append(obj)
if objects and show_progress:
self.stdout.write('') # add a newline after progress indicator
self.loaded_object_count += loaded_objects_in_fixture
self.fixture_object_count += objects_in_fixture
except Exception as e:
if not isinstance(e, CommandError):
e.args = ("Problem installing fixture '%s': %s" % (fixture_file, e),)
raise
finally:
fixture.close()
# Warn if the fixture we loaded contains 0 objects.
if objects_in_fixture == 0:
warnings.warn(
"No fixture data found for '%s'. (File format may be "
"invalid.)" % fixture_name,
RuntimeWarning
)
@functools.lru_cache(maxsize=None)
def find_fixtures(self, fixture_label):
"""Find fixture files for a given label."""
if fixture_label == READ_STDIN:
return [(READ_STDIN, None, READ_STDIN)]
fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label)
databases = [self.using, None]
cmp_fmts = list(self.compression_formats) if cmp_fmt is None else [cmp_fmt]
ser_fmts = serializers.get_public_serializer_formats() if ser_fmt is None else [ser_fmt]
if self.verbosity >= 2:
self.stdout.write("Loading '%s' fixtures..." % fixture_name)
if os.path.isabs(fixture_name):
fixture_dirs = [os.path.dirname(fixture_name)]
fixture_name = os.path.basename(fixture_name)
else:
fixture_dirs = self.fixture_dirs
if os.path.sep in os.path.normpath(fixture_name):
fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name))
for dir_ in fixture_dirs]
fixture_name = os.path.basename(fixture_name)
suffixes = (
'.'.join(ext for ext in combo if ext)
for combo in product(databases, ser_fmts, cmp_fmts)
)
targets = {'.'.join((fixture_name, suffix)) for suffix in suffixes}
fixture_files = []
for fixture_dir in fixture_dirs:
if self.verbosity >= 2:
self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir))
fixture_files_in_dir = []
path = os.path.join(fixture_dir, fixture_name)
for candidate in glob.iglob(glob.escape(path) + '*'):
if os.path.basename(candidate) in targets:
# Save the fixture_dir and fixture_name for future error messages.
fixture_files_in_dir.append((candidate, fixture_dir, fixture_name))
if self.verbosity >= 2 and not fixture_files_in_dir:
self.stdout.write("No fixture '%s' in %s." %
(fixture_name, humanize(fixture_dir)))
# Check kept for backwards-compatibility; it isn't clear why
# duplicates are only allowed in different directories.
if len(fixture_files_in_dir) > 1:
raise CommandError(
"Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
fixture_files.extend(fixture_files_in_dir)
if not fixture_files:
raise CommandError("No fixture named '%s' found." % fixture_name)
return fixture_files
@cached_property
def fixture_dirs(self):
"""
Return a list of fixture directories.
The list contains the 'fixtures' subdirectory of each installed
application, if it exists, the directories in FIXTURE_DIRS, and the
current directory.
"""
dirs = []
fixture_dirs = settings.FIXTURE_DIRS
if len(fixture_dirs) != len(set(fixture_dirs)):
raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.")
for app_config in apps.get_app_configs():
app_label = app_config.label
app_dir = os.path.join(app_config.path, 'fixtures')
if app_dir in fixture_dirs:
raise ImproperlyConfigured(
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label)
)
if self.app_label and app_label != self.app_label:
continue
if os.path.isdir(app_dir):
dirs.append(app_dir)
dirs.extend(list(fixture_dirs))
dirs.append('')
dirs = [os.path.abspath(os.path.realpath(d)) for d in dirs]
return dirs
def parse_name(self, fixture_name):
"""
Split fixture name in name, serialization format, compression format.
"""
if fixture_name == READ_STDIN:
if not self.format:
raise CommandError('--format must be specified when reading from stdin.')
return READ_STDIN, self.format, 'stdin'
parts = fixture_name.rsplit('.', 2)
if len(parts) > 1 and parts[-1] in self.compression_formats:
cmp_fmt = parts[-1]
parts = parts[:-1]
else:
cmp_fmt = None
if len(parts) > 1:
if parts[-1] in self.serialization_formats:
ser_fmt = parts[-1]
parts = parts[:-1]
else:
raise CommandError(
"Problem installing fixture '%s': %s is not a known "
"serialization format." % (''.join(parts[:-1]), parts[-1]))
else:
ser_fmt = None
name = '.'.join(parts)
return name, ser_fmt, cmp_fmt
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if len(self.namelist()) != 1:
raise ValueError("Zip-compressed fixtures must contain one file.")
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
def humanize(dirname):
return "'%s'" % dirname if dirname else 'absolute path' | PypiClean |
/LogTrace-0.1.2.tar.gz/LogTrace-0.1.2/README.txt | LogTrace
========
|Build Status|
Aggregate messages to produce a log entry representing a single event or
procedure. The purpose of this module is to easily asssociate log
messages together that belong together.
::
import logging
from logtrace import LogTrace
logger = logging.getLogger(__name__)
trace = LogTrace(logger=logger)
trace.add("Let's get started")
...
trace.add("Later, something else happens")
...
trace.add("And finally...")
trace.emit()
You get a single log entry like this:
::
[05/Jan/2018 11:11:00] DEBUG [21, .30s] Let's get started; [65, .132s] Later, something else happens; [75, .330s] And finally...
Install
-------
::
pip install logtrace
Note that this only suppports Python 3. Let me know if anyone wants
support for Python 2. There are no dependencies outside the Python
Standard Library for this module.
Example
-------
Logs can be hard to read because you have cases where you log
information as you go through a procedure. These log entries get
scattered with all the other logs from other processes. You end up
having to search for related entries possibly implanting identifying
information in each one to tie them together. ``LogTrace`` fixes this
problem by letting you collect logs and then output once. Take the
example of a token authentication procedure where transient tokens are
required to be authenticated. You want to record the following events:
- Check the HTTP header info with the token
- What table are we going to use to check the token?
- Did the token service authenticate the token?
- Is the token in a local cache?
- Successfully authenticated?
The following records five separate instances where you would have
called ``logger.info()`` with a line number and the time in seconds
since constructing the ``LogTrace`` object ``[<lineno>, <secs>s]``:
::
[12:12:54] INFO [132, 0.0006s] auth header: [b'Token', b'2c59999137******************************']; [132, 0.0007s] authenticate key, model: <class 'tastypie.models.ApiKey'>; [132, 0.1057s] token renewal for API call confirmed; [132, 0.1078s] got key from token table: paul; [163, 0.1079s] Successfully authenticated
Details
-------
We respect logging levels. So, the overhead of using LogTrace is minimal
if your log level is not effective. If your log level is
``logging.INFO`` and you call ``logtrace.emit_debug()``, almost all
overhead is avoided minus some function call overhead and one or two
conditional expressions.
What LogTrace is *not*: This is *not* a logging framework. LogTrace uses
the standard Python ``logging`` module. All your configuration to
``logging`` is going to be used by LogTrace. All your handlers are going
to act exactly as before. If you use a framework like Django, you use it
just like you do now. No changes whatever are required to your logging
configuration.
We also provide other features like
- Easily generate a UUID for the logged event.
- Timing for each message since LogTrace was created.
- Frame information for each part message, like filename, function,
lineno
- Any logging mechanism can be used, not just standard Python logging.
- Pass structured data (JSON).
We wanted to provide something that works in perfect harmony with the
existing Python logging module without unnecessary duplication of
features and no external dependencies (outside the PSL).
::
LogTrace(logger=None, # we'll emit output here
delimiter="; ", # delimiter between messages
tag='', # add a non-unique label
unique_id=False, # create a uuid to identify the log?
verbosity='v' # level of output for frame information
)
- ``logger``: the standard logger returned from
``import logging; logger = logging.getLogger(__name__)``. You can
create a ``LogTrace()`` without a logger in which case it creates
with the value of ``__name__``.
- ``delimiter``: the character(s) used between messages
- ``tag``: This is a convenience to tell LogTrace() to use hash+tag at
the start of every entry after calling ``.emit()`` for ease of
searching.
- ``unique_id``: generate a uuid to associate with the final message
output.
- ``verbosity``: v, vv, vvv for three levels of verbosity when adding
frame information
``LogTrace.get_uid()``: return the unique id. If one has not been set
during construction of the LogTrace, a uuid is generated. Otherwise, it
returns the existing one.
``LogTrace.set_uid(uid)``: Set a unique id. This can be done by
constructing ``LogTrace()`` with ``unique_id=True``. This takes normally
either a uuid or str argument.
``LogTrace.add(msg, data, backup)``: Add a message to the list. This
will get frame information for the call depending on the verbosity
level.
``LogTrace.emit_string()``: return a string that is the final log
message.
``LogTrace.emit()``: call ``logger.debug(message)``
``LogTrace.emit_error()``: call ``logger.error(message)``
``LogTrace.emit_info()``: call ``logger.info(message)``
``LogTrace.emit_debug()``: call ``logger.debug(message)``
``LogTrace.emit_warning()``: call ``logger.warning(message)``
``LogTrace.emit_critical()``: call ``logger.critical(message)``
When the ``LogTrace`` is created, ``time.time()`` is recorded. Whenever
``LogTrace.add()`` is called, the start time is subtracted from the
current time when the message is added. The final message prints the
number of seconds since creating.
You probably want to avoid including ``LogTrace.add()`` in loops. You
also probably want to create it as a local, not a module-level variable.
Pass it as a method argument rather than using a module level instance.
If you do want to re-use a ``LogTrace`` and clear messages, you can call
``LogTrace.clear()``. But be aware the uid might need to be reset
depending on your application requirements.
Extra Data
----------
``LogTrace.add()`` has an optional parameter ``data`` that takes a
dictionary. We keep a dict in the object and ``update()`` it whenever
the ``data`` parameter is used. This doesn’t do anything within
``LogTrace`` itself other than maintain the ``data`` member variable.
But you can accumulate data and later ship the data to a service like
AWS S3 or whatever, like this:
::
logger.info(trace.emit_string(), extra=trace.data)
This would be useful if you are using a logging handler that ships the
``logging.LogRecord`` as JSON to some service like a document oriented
data store, Elasticsearch, etc.
Testing
-------
::
pip install pytest
cd logtrace
pytest test.py --verbose
or
::
python3 logtrace/test.py
Performance
-----------
``LogTrace()`` appends to a list of strings everytime you call
``add()``. But it firstly calls ``inspect.getFrameInfo()`` and builds
the string with that information. When ``emit()`` is called, it
concatenates all the strings in the list separated by ``delimiter`` and
then calls ``logger.info()`` or whatever method is appropriate. If the
effective level is not the current level for the method, then the list
will be empty and it won’t do the call to the ``logger`` method.
Acknowledgements
----------------
Thanks to
.. @metazet: https://github.com/metazet
For important fixes.
.. |Build Status| image:: https://travis-ci.org/paul-wolf/logtrace.svg?branch=master
:target: https://travis-ci.org/paul-wolf/logtrace
| PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/neon-animation/guides/neon-animation.md | ---
title: neon-animation
summary: "A short guide to neon-animation and neon-animated-pages"
tags: ['animation','core-animated-pages']
elements: ['neon-animation','neon-animated-pages']
updated: 2015-05-26
---
# neon-animation
`neon-animation` is a suite of elements and behaviors to implement pluggable animated transitions for Polymer Elements using [Web Animations](https://w3c.github.io/web-animations/).
*Warning: The API may change.*
* [A basic animatable element](#basic)
* [Animation configuration](#configuration)
* [Animation types](#configuration-types)
* [Configuration properties](#configuration-properties)
* [Using multiple animations](#configuration-multiple)
* [Running animations encapsulated in children nodes](#configuration-encapsulation)
* [Page transitions](#page-transitions)
* [Shared element animations](#shared-element)
* [Declarative page transitions](#declarative-page)
* [Included animations](#animations)
* [Demos](#demos)
<a name="basic"></a>
## A basic animatable element
Elements that can be animated should implement the `Polymer.NeonAnimatableBehavior` behavior, or `Polymer.NeonAnimationRunnerBehavior` if they're also responsible for running an animation.
```js
Polymer({
is: 'my-animatable',
behaviors: [
Polymer.NeonAnimationRunnerBehavior
],
properties: {
animationConfig: {
value: function() {
return {
// provided by neon-animation/animations/scale-down-animation.html
name: 'scale-down-animation',
node: this
}
}
}
},
listeners: {
// this event is fired when the animation finishes
'neon-animation-finish': '_onNeonAnimationFinish'
},
animate: function() {
// run scale-down-animation
this.playAnimation();
},
_onNeonAnimationFinish: function() {
console.log('animation done!');
}
});
```
[Live demo](http://morethanreal.github.io/neon-animation-demo/bower_components/neon-animation/demo/doc/basic.html)
<a name="configuration"></a>
## Animation configuration
<a name="configuration-types"></a>
### Animation types
An element might run different animations, for example it might do something different when it enters the view and when it exits from view. You can set the `animationConfig` property to a map from an animation type to configuration.
```js
Polymer({
is: 'my-dialog',
behaviors: [
Polymer.NeonAnimationRunnerBehavior
],
properties: {
opened: {
type: Boolean
},
animationConfig: {
value: function() {
return {
'entry': {
// provided by neon-animation/animations/scale-up-animation.html
name: 'scale-up-animation',
node: this
},
'exit': {
// provided by neon-animation-animations/fade-out-animation.html
name: 'fade-out-animation',
node: this
}
}
}
}
},
listeners: {
'neon-animation-finish': '_onNeonAnimationFinish'
},
show: function() {
this.opened = true;
this.style.display = 'inline-block';
// run scale-up-animation
this.playAnimation('entry');
},
hide: function() {
this.opened = false;
// run fade-out-animation
this.playAnimation('exit');
},
_onNeonAnimationFinish: function() {
if (!this.opened) {
this.style.display = 'none';
}
}
});
```
[Live demo](http://morethanreal.github.io/neon-animation-demo/bower_components/neon-animation/demo/doc/types.html)
You can also use the convenience properties `entryAnimation` and `exitAnimation` to set `entry` and `exit` animations:
```js
properties: {
entryAnimation: {
value: 'scale-up-animation'
},
exitAnimation: {
value: 'fade-out-animation'
}
}
```
<a name="configuration-properties"></a>
### Configuration properties
You can pass additional parameters to configure an animation in the animation configuration object.
All animations should accept the following properties:
* `name`: The name of an animation, ie. an element implementing `Polymer.NeonAnimationBehavior`.
* `node`: The target node to apply the animation to. Defaults to `this`.
* `timing`: Timing properties to use in this animation. They match the [Web Animations Animation Effect Timing interface](https://w3c.github.io/web-animations/#the-animationeffecttiming-interface). The
properties include the following:
* `duration`: The duration of the animation in milliseconds.
* `delay`: The delay before the start of the animation in milliseconds.
* `easing`: A timing function for the animation. Matches the CSS timing function values.
Animations may define additional configuration properties and they are listed in their documentation.
<a name="configuration-multiple"></a>
### Using multiple animations
Set the animation configuration to an array to combine animations, like this:
```js
animationConfig: {
value: function() {
return {
// fade-in-animation is run with a 50ms delay from slide-down-animation
'entry': [{
name: 'slide-down-animation',
node: this
}, {
name: 'fade-in-animation',
node: this,
timing: {delay: 50}
}]
}
}
}
```
<a name="configuration-encapsulation"></a>
### Running animations encapsulated in children nodes
You can include animations in the configuration that are encapsulated in a child element that implement `Polymer.NeonAnimatableBehavior` with the `animatable` property.
```js
animationConfig: {
value: function() {
return {
// run fade-in-animation on this, and the entry animation on this.$.myAnimatable
'entry': [
{name: 'fade-in-animation', node: this},
{animatable: this.$.myAnimatable, type: 'entry'}
]
}
}
}
```
<a name="page-transitions"></a>
## Page transitions
*The artist formerly known as `<core-animated-pages>`*
The `neon-animated-pages` element manages a set of pages to switch between, and runs animations between the page transitions. It implements the `Polymer.IronSelectableBehavior` behavior. Each child node should implement `Polymer.NeonAnimatableBehavior` and define the `entry` and `exit` animations. During a page transition, the `entry` animation is run on the new page and the `exit` animation is run on the old page.
<a name="shared-element"></a>
### Shared element animations
Shared element animations work on multiple nodes. For example, a "hero" animation is used during a page transition to make two elements from separate pages appear to animate as a single element. Shared element animation configurations have an `id` property that identify they belong in the same animation. Elements containing shared elements also have a `sharedElements` property defines a map from `id` to element, the element involved with the animation.
In the incoming page:
```js
properties: {
animationConfig: {
value: function() {
return {
// the incoming page defines the 'entry' animation
'entry': {
name: 'hero-animation',
id: 'hero',
toPage: this
}
}
}
},
sharedElements: {
value: function() {
return {
'hero': this.$.hero
}
}
}
}
```
In the outgoing page:
```js
properties: {
animationConfig: {
value: function() {
return {
// the outgoing page defines the 'exit' animation
'exit': {
name: 'hero-animation',
id: 'hero',
fromPage: this
}
}
}
},
sharedElements: {
value: function() {
return {
'hero': this.$.otherHero
}
}
}
}
```
<a name="declarative-page"></a>
### Declarative page transitions
For convenience, if you define the `entry-animation` and `exit-animation` attributes on `<neon-animated-pages>`, those animations will apply for all page transitions.
For example:
```js
<neon-animated-pages id="pages" class="flex" selected="[[selected]]" entry-animation="slide-from-right-animation" exit-animation="slide-left-animation">
<neon-animatable>1</neon-animatable>
<neon-animatable>2</neon-animatable>
<neon-animatable>3</neon-animatable>
<neon-animatable>4</neon-animatable>
<neon-animatable>5</neon-animatable>
</neon-animated-pages>
```
The new page will slide in from the right, and the old page slide away to the left.
<a name="animations"></a>
## Included animations
Single element animations:
* `fade-in-animation` Animates opacity from `0` to `1`;
* `fade-out-animation` Animates opacity from `1` to `0`;
* `scale-down-animation` Animates transform from `scale(1)` to `scale(0)`;
* `scale-up-animation` Animates transform from `scale(0)` to `scale(1)`;
* `slide-down-animation` Animates transform from `none` to `translateY(100%)`;
* `slide-up-animation` Animates transform from `none` to `translateY(-100%)`;
* `slide-from-top-animation` Animates transform from `translateY(-100%)` to `none`;
* `slide-from-bottom-animation` Animates transform from `translateY(100%)` to `none`;
* `slide-left-animation` Animates transform from `none` to `translateX(-100%)`;
* `slide-right-animation` Animates transform from `none` to `translateX(100%)`;
* `slide-from-left-animation` Animates transform from `translateX(-100%)` to `none`;
* `slide-from-right-animation` Animates transform from `translateX(100%)` to `none`;
* `transform-animation` Animates a custom transform.
Note that there is a restriction that only one transform animation can be applied on the same element at a time. Use the custom `transform-animation` to combine transform properties.
Shared element animations
* `hero-animation` Animates an element such that it looks like it scales and transforms from another element.
* `ripple-animation` Animates an element to full screen such that it looks like it ripples from another element.
Group animations
* `cascaded-animation` Applys an animation to an array of elements with a delay between each.
<a name="demos"></a>
## Demos
* [Grid to full screen](http://morethanreal.github.io/neon-animation-demo/bower_components/neon-animation/demo/grid/index.html)
* [Animation on load](http://morethanreal.github.io/neon-animation-demo/bower_components/neon-animation/demo/load/index.html)
* [List item to detail](http://morethanreal.github.io/neon-animation-demo/bower_components/neon-animation/demo/list/index.html) (For narrow width)
* [Dots to squares](http://morethanreal.github.io/neon-animation-demo/bower_components/neon-animation/demo/tiles/index.html)
* [Declarative](http://morethanreal.github.io/neon-animation-demo/bower_components/neon-animation/demo/declarative/index.html)
| PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/electrum_chi/electrum/gui/qt/password_dialog.py |
import re
import math
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QLineEdit, QLabel, QGridLayout, QVBoxLayout, QCheckBox
from electrum.i18n import _
from electrum.plugin import run_hook
from .util import icon_path, WindowModalDialog, OkButton, CancelButton, Buttons
def check_password_strength(password):
'''
Check the strength of the password entered by the user and return back the same
:param password: password entered by user in New Password
:return: password strength Weak or Medium or Strong
'''
password = password
n = math.log(len(set(password)))
num = re.search("[0-9]", password) is not None and re.match("^[0-9]*$", password) is None
caps = password != password.upper() and password != password.lower()
extra = re.match("^[a-zA-Z0-9]*$", password) is None
score = len(password)*( n + caps + num + extra)/20
password_strength = {0:"Weak",1:"Medium",2:"Strong",3:"Very Strong"}
return password_strength[min(3, int(score))]
PW_NEW, PW_CHANGE, PW_PASSPHRASE = range(0, 3)
class PasswordLayout(object):
titles = [_("Enter Password"), _("Change Password"), _("Enter Passphrase")]
def __init__(self, msg, kind, OK_button, wallet=None, force_disable_encrypt_cb=False):
self.wallet = wallet
self.pw = QLineEdit()
self.pw.setEchoMode(2)
self.new_pw = QLineEdit()
self.new_pw.setEchoMode(2)
self.conf_pw = QLineEdit()
self.conf_pw.setEchoMode(2)
self.kind = kind
self.OK_button = OK_button
vbox = QVBoxLayout()
label = QLabel(msg + "\n")
label.setWordWrap(True)
grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnMinimumWidth(0, 150)
grid.setColumnMinimumWidth(1, 100)
grid.setColumnStretch(1,1)
if kind == PW_PASSPHRASE:
vbox.addWidget(label)
msgs = [_('Passphrase:'), _('Confirm Passphrase:')]
else:
logo_grid = QGridLayout()
logo_grid.setSpacing(8)
logo_grid.setColumnMinimumWidth(0, 70)
logo_grid.setColumnStretch(1,1)
logo = QLabel()
logo.setAlignment(Qt.AlignCenter)
logo_grid.addWidget(logo, 0, 0)
logo_grid.addWidget(label, 0, 1, 1, 2)
vbox.addLayout(logo_grid)
m1 = _('New Password:') if kind == PW_CHANGE else _('Password:')
msgs = [m1, _('Confirm Password:')]
if wallet and wallet.has_password():
grid.addWidget(QLabel(_('Current Password:')), 0, 0)
grid.addWidget(self.pw, 0, 1)
lockfile = "lock.png"
else:
lockfile = "unlock.png"
logo.setPixmap(QPixmap(icon_path(lockfile))
.scaledToWidth(36, mode=Qt.SmoothTransformation))
grid.addWidget(QLabel(msgs[0]), 1, 0)
grid.addWidget(self.new_pw, 1, 1)
grid.addWidget(QLabel(msgs[1]), 2, 0)
grid.addWidget(self.conf_pw, 2, 1)
vbox.addLayout(grid)
# Password Strength Label
if kind != PW_PASSPHRASE:
self.pw_strength = QLabel()
grid.addWidget(self.pw_strength, 3, 0, 1, 2)
self.new_pw.textChanged.connect(self.pw_changed)
self.encrypt_cb = QCheckBox(_('Encrypt wallet file'))
self.encrypt_cb.setEnabled(False)
grid.addWidget(self.encrypt_cb, 4, 0, 1, 2)
self.encrypt_cb.setVisible(kind != PW_PASSPHRASE)
def enable_OK():
ok = self.new_pw.text() == self.conf_pw.text()
OK_button.setEnabled(ok)
self.encrypt_cb.setEnabled(ok and bool(self.new_pw.text())
and not force_disable_encrypt_cb)
self.new_pw.textChanged.connect(enable_OK)
self.conf_pw.textChanged.connect(enable_OK)
self.vbox = vbox
def title(self):
return self.titles[self.kind]
def layout(self):
return self.vbox
def pw_changed(self):
password = self.new_pw.text()
if password:
colors = {"Weak":"Red", "Medium":"Blue", "Strong":"Green",
"Very Strong":"Green"}
strength = check_password_strength(password)
label = (_("Password Strength") + ": " + "<font color="
+ colors[strength] + ">" + strength + "</font>")
else:
label = ""
self.pw_strength.setText(label)
def old_password(self):
if self.kind == PW_CHANGE:
return self.pw.text() or None
return None
def new_password(self):
pw = self.new_pw.text()
# Empty passphrases are fine and returned empty.
if pw == "" and self.kind != PW_PASSPHRASE:
pw = None
return pw
class PasswordLayoutForHW(object):
def __init__(self, msg, wallet=None):
self.wallet = wallet
vbox = QVBoxLayout()
label = QLabel(msg + "\n")
label.setWordWrap(True)
grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnMinimumWidth(0, 150)
grid.setColumnMinimumWidth(1, 100)
grid.setColumnStretch(1,1)
logo_grid = QGridLayout()
logo_grid.setSpacing(8)
logo_grid.setColumnMinimumWidth(0, 70)
logo_grid.setColumnStretch(1,1)
logo = QLabel()
logo.setAlignment(Qt.AlignCenter)
logo_grid.addWidget(logo, 0, 0)
logo_grid.addWidget(label, 0, 1, 1, 2)
vbox.addLayout(logo_grid)
if wallet and wallet.has_storage_encryption():
lockfile = "lock.png"
else:
lockfile = "unlock.png"
logo.setPixmap(QPixmap(icon_path(lockfile))
.scaledToWidth(36, mode=Qt.SmoothTransformation))
vbox.addLayout(grid)
self.encrypt_cb = QCheckBox(_('Encrypt wallet file'))
grid.addWidget(self.encrypt_cb, 1, 0, 1, 2)
self.vbox = vbox
def title(self):
return _("Toggle Encryption")
def layout(self):
return self.vbox
class ChangePasswordDialogBase(WindowModalDialog):
def __init__(self, parent, wallet):
WindowModalDialog.__init__(self, parent)
is_encrypted = wallet.has_storage_encryption()
OK_button = OkButton(self)
self.create_password_layout(wallet, is_encrypted, OK_button)
self.setWindowTitle(self.playout.title())
vbox = QVBoxLayout(self)
vbox.addLayout(self.playout.layout())
vbox.addStretch(1)
vbox.addLayout(Buttons(CancelButton(self), OK_button))
self.playout.encrypt_cb.setChecked(is_encrypted)
def create_password_layout(self, wallet, is_encrypted, OK_button):
raise NotImplementedError()
class ChangePasswordDialogForSW(ChangePasswordDialogBase):
def __init__(self, parent, wallet):
ChangePasswordDialogBase.__init__(self, parent, wallet)
if not wallet.has_password():
self.playout.encrypt_cb.setChecked(True)
def create_password_layout(self, wallet, is_encrypted, OK_button):
if not wallet.has_password():
msg = _('Your wallet is not protected.')
msg += ' ' + _('Use this dialog to add a password to your wallet.')
else:
if not is_encrypted:
msg = _('Your CHI are password protected. However, your wallet file is not encrypted.')
else:
msg = _('Your wallet is password protected and encrypted.')
msg += ' ' + _('Use this dialog to change your password.')
self.playout = PasswordLayout(msg=msg,
kind=PW_CHANGE,
OK_button=OK_button,
wallet=wallet,
force_disable_encrypt_cb=not wallet.can_have_keystore_encryption())
def run(self):
if not self.exec_():
return False, None, None, None
return True, self.playout.old_password(), self.playout.new_password(), self.playout.encrypt_cb.isChecked()
class ChangePasswordDialogForHW(ChangePasswordDialogBase):
def __init__(self, parent, wallet):
ChangePasswordDialogBase.__init__(self, parent, wallet)
def create_password_layout(self, wallet, is_encrypted, OK_button):
if not is_encrypted:
msg = _('Your wallet file is NOT encrypted.')
else:
msg = _('Your wallet file is encrypted.')
msg += '\n' + _('Note: If you enable this setting, you will need your hardware device to open your wallet.')
msg += '\n' + _('Use this dialog to toggle encryption.')
self.playout = PasswordLayoutForHW(msg)
def run(self):
if not self.exec_():
return False, None
return True, self.playout.encrypt_cb.isChecked()
class PasswordDialog(WindowModalDialog):
def __init__(self, parent=None, msg=None):
msg = msg or _('Please enter your password')
WindowModalDialog.__init__(self, parent, _("Enter Password"))
self.pw = pw = QLineEdit()
pw.setEchoMode(2)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Password')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(self), OkButton(self)))
self.setLayout(vbox)
run_hook('password_dialog', pw, grid, 1)
def run(self):
if not self.exec_():
return
return self.pw.text() | PypiClean |
/Doxhooks-0.6.0.zip/Doxhooks-0.6.0/doxhooks/filetrees.py | import os
import re
from doxhooks.errors import DoxhooksDataError, DoxhooksLookupError
__all__ = [
"FileTree",
"normalise_path",
]
_starts_with_sep = re.compile(r"[\\/]" if os.name == "nt" else "/").match
_is_explicit_relative_path = re.compile(
r"\.$|\.[\\/]" if os.name == "nt" else r"\.$|\./"
).match
def normalise_path(path):
"""
Return a path as a normalised absolute or explicitly relative path.
`normalise_path` extends `os.path.normpath` by returning an
implicitly relative path as an explicitly relative path.
Parameters
----------
path : str
A path.
Returns
-------
str
The normalised path. The path is either absolute or explicitly
relative.
Examples
--------
>>> doxhooks.filetrees.normalise_path("films/heat.html")
'./films/heat.html'
>>> doxhooks.filetrees.normalise_path("./films/heat.html")
'./films/heat.html'
>>> doxhooks.filetrees.normalise_path("/films/heat.html")
'/films/heat.html'
"""
os_norm_path = os.path.normpath(path)
if os.path.isabs(os_norm_path) or _is_explicit_relative_path(os_norm_path):
return os_norm_path
else:
return "{}{}{}".format(os.curdir, os.sep, os_norm_path)
class FileTree:
"""
A file tree with named root paths.
Class Interface
---------------
path
Replace root names with paths and return the computed path.
"""
def __init__(self, roots, *, name="`FileTree`"):
"""
Initialise the file tree with named root paths.
Roots are chained by starting a root path with the name of
another root.
Parameters
----------
roots : dict
Named root paths in the tree.
name : str, optional
Keyword-only. A name for the file tree. The name only
appears in error messages. Defaults to ``"`FileTree`"``.
"""
self._roots = roots
self._name = name
_root_notation = re.compile(r"^<(\w+)>(.*)")
def _resolve_roots(self, match):
# Return a path after recursively resolving root names.
root_name, rel_path = match.groups()
try:
root = self._roots[root_name]
except KeyError:
raise DoxhooksLookupError(root_name, self._roots, self._name)
if not rel_path:
path = root
elif _starts_with_sep(rel_path):
path = root + rel_path
else:
path = os.path.join(root, rel_path)
return self._root_notation.sub(self._resolve_roots, path)
def path(self, dir_path, filename=None, *, rewrite=None):
r"""
Replace root names with paths and return the computed path.
The paths `dir_path` and `filename` will be joined, unless
`dir_path` is overridden because:
* `filename` is an absolute path.
* `filename` is an explicit relative path, e.g. ``"./file.txt"``.
* `filename` starts with a root name, e.g. ``"<html>file.txt"``.
Parameters
----------
dir_path : str
A path to a directory (or, unusually, a file). The path may
be absolute, explicitly or implicitly relative, or start
with a root name.
filename : str or None, optional
A path to a file (or, unusally, a directory). The path may
be absolute, explicitly or implicitly relative, or start
with a root name.
rewrite : optional
Keyword-only. A value that will replace a substring ``"{}"``
in the computed path. Defaults to ``None``, which denotes
that the path will not be rewritten.
Returns
-------
str
The computed, normalised path.
Raises
------
~doxhooks.errors.DoxhooksLookupError
If a named root cannot be found among the *roots* of this
`FileTree`.
~doxhooks.errors.DoxhooksDataError
If `rewrite` is not ``None`` and the path cannot be
rewritten.
Examples
--------
>>> ft = doxhooks.filetrees.FileTree(
... {"src": "source", "html": "<src>html", "js": "<src>js"})
>>> ft.path("source/html/films")
'./source/html/films'
>>> dir_path = "<html>films"
>>> ft.path(dir_path)
'./source/html/films'
>>> ft.path(dir_path, "heat.html")
'./source/html/films/heat.html'
>>> ft.path(dir_path, "heat{}.html", rewrite="-1995")
'./source/html/films/heat-1995.html'
>>> ft.path(dir_path, "<js>inline.js")
'./source/js/inline.js'
>>> ft.path(dir_path, "./relative/path")
'./relative/path'
>>> ft.path(dir_path, "/absolute/path")
'/absolute/path'
"""
if (filename and (os.path.isabs(filename) or
_is_explicit_relative_path(filename))):
path = filename
elif filename and self._root_notation.match(filename):
path = self._root_notation.sub(self._resolve_roots, filename)
else:
full_dir_path = self._root_notation.sub(
self._resolve_roots, dir_path)
path = (
full_dir_path if filename is None else
os.path.join(full_dir_path, filename)
)
if rewrite is not None:
try:
path = path.format(rewrite)
except (LookupError, ValueError) as error:
raise DoxhooksDataError("Cannot rewrite path:", path) \
from error
return normalise_path(path) | PypiClean |
Subsets and Splits