input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<reponame>mindaffect/smart-keyboard<filename>smart_keyboard/windows/keyboard_window.py<gh_stars>1-10
"""
This module contains a single class representing a keyboard.
It contains the following visual and functional elements:
* three keypad windows (upper, lower, symbols)
* suggestion keys
* textfield
"""
# Copyright (c) 2021,
# Authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from smart_keyboard.settings_manager import SettingsManager
from smart_keyboard.windows.window import Window
from smart_keyboard.windows.keypad_window import KeypadWindow
from smart_keyboard.key import Key
from smart_keyboard.key_type import KeyType
from smart_keyboard.word_prediction import WordPrediction
from smart_keyboard.word_correction import WordCorrection
from smart_keyboard.text_to_speech import TextToSpeech
from smart_keyboard.text_field import TextField
from smart_keyboard.keyboard_loader import KeyboardLoader
import re as regex
import _thread as thread
import sys
class KeyboardWindow(Window):
"""
A Window representing a keyboard.
Args:
parent (windows.window.Window): The parent of this window.
facade (framework_facade.FrameworkFacade): The GUI-specific functionality.
style (dict): Style configurations for objects contained by a window.
use_flickering (bool): A boolean indicating whether or not to use flickering in the window.
noisetag (noisetag.Noisetag): Reference to the Noisetag module from MindAffect.
ai_settings (dict): Instructions on whether or not to use AI modules.
config (dict): Configurations (to be replaced).
"""
def __init__(self, parent, facade, style, use_flickering, noisetag, ai_settings, config):
super().__init__(
parent=parent,
facade=facade,
style=style,
use_flickering=use_flickering,
noisetag=noisetag
)
self.keys = []
# Keyboard layout:
self.key_padding = style["key_padding"]
self.keyboard_size = style["keyboard_size"]
self.keypad_size = style["keypad_size"]
# Setup AI modules:
self.load_ai = ai_settings["load_ai"]
self.config = config
keypad_layouts = config["keypads"]
self.feedback_threshold = self.config["feedback_threshold"]
self.predictor = None
self.state2color = style["state2color"]
if self.load_ai:
# Initialize text-to-speech engine:
self.tts = TextToSpeech(
service=config['text_to_speech']["tts_service"]
)
# Initialize word prediction module:
self.predictor = WordPrediction.get_instance(ai_settings["ngram_depth"])
# Initialize word correction module:
self.correction = WordCorrection.get_instance(ai_settings["correction_distance"])
# Generate suggestion keys:
self.keys = self.build_suggestion_keys()
self.text_field = TextField(
facade=self.facade,
style_sheet=self.style,
update_suggestions=self.update_suggestions,
predictor=self.predictor
)
# Initialize KeypadWindows:
self.windows = {
"Upper": KeypadWindow(
parent=self,
facade=facade,
style=style,
keypad_layout=KeyboardLoader.load_json(keypad_layouts["Upper"]),
use_flickering=use_flickering,
text_field=self.text_field,
noisetag=noisetag
),
"Lower": KeypadWindow(
parent=self,
facade=facade,
style=style,
keypad_layout=KeyboardLoader.load_json(keypad_layouts["Lower"]),
use_flickering=use_flickering,
text_field=self.text_field,
noisetag=noisetag
),
"Symbols": KeypadWindow(
parent=self,
facade=facade,
style=style,
keypad_layout=KeyboardLoader.load_json(keypad_layouts["Symbols"]),
use_flickering=use_flickering,
text_field=self.text_field,
noisetag=noisetag
),
"AdditionalSymbols": KeypadWindow(
parent=self,
facade=facade,
style=style,
keypad_layout=KeyboardLoader.load_json(keypad_layouts["AdditionalSymbols"]),
use_flickering=use_flickering,
text_field=self.text_field,
noisetag=noisetag
)
}
#MA logo
self.logo = self.facade.create_icon(
"key_icons\MindAffect_Logo.png",
label_col=self.style["MA_orange"],
size=(.3,.3),
pos=(.95,.9),
)
# Construct optosensor square (in upper-left corner of the screen):
self.opto = facade.create_rect(
size=(0.1, 0.1),
pos=(0.05, 0.95),
color=self.style["button_color"],
line_color=self.style["line_color"]
)
self.active_window = "Lower"
self.switched_windows = False
self.is_active = False
self.active_trial = False
def get_keys(self):
"""Returns the keys of this Window. """
return self.keys
def activate(self):
"""Activates all visual and functional elements of this Window."""
self.text_field.activate()
self.facade.toggle_image_render(self.logo, True)
if self.keys:
for key in self.keys:
key.toggle_render(True)
self.windows[self.active_window].activate()
if self.use_flickering:
self.noisetag.setnumActiveObjIDs(len(self.get_keys()) +
len(self.windows[self.active_window].get_keys()))
self.start_new_trial()
# renders optometer square when turned on specifically, or when cuing is done
if self.config["use_cued"]:
self.facade.toggle_shape_render(self.opto, True)
self.is_active = True
def deactivate(self):
"""Deactivates all visual and functional elements of this Window."""
self.is_active = False
self.text_field.deactivate()
self.facade.toggle_image_render(self.logo, False)
self.facade.toggle_shape_render(self.opto, False)
for key in self.keys:
key.toggle_render(False)
self.windows[self.active_window].deactivate()
def switch_window(self, window_name):
"""
Switches to Window specified by window_name.
Args:
window_name (str): The name of the Window to switch to.
"""
if self.windows:
if window_name in self.windows:
self.windows[self.active_window].deactivate()
self.windows[window_name].activate()
self.active_window = window_name
self.switched_windows = True
# When switching keypads, reset_flicker() is called to update the number of objectIDs in the Noisetag
# to match the new key count:
self.reset_flicker()
else:
self.parent.switch_window(window_name)
self.switched_windows = True
else:
self.parent.switch_window(window_name)
self.switched_windows = True
def handle_mouse_events(self):
"""Handles mouse events within this Window."""
if not (self.switched_windows or self.parent.switched_windows):
for key in self.keys:
key.handle_mouse_events(self)
if self.windows:
self.windows[self.active_window].handle_mouse_events()
else:
if not self.facade.mouse_event([0]):
self.parent.switched_windows = False
self.switched_windows = False
def get_window(self, window_name):
"""
Gives access to Window specified by window_name.
Args:
window_name (str): The name of the Window to get.
"""
if self.windows:
if window_name in self.windows:
return self.windows[window_name]
else:
return self.parent.get_window(window_name)
else:
return self.parent.get_window(window_name)
def draw(self, noisetag, last_flip_time, target_idx=-1):
"""
Changes the state of the buttons/keys within the Window.
It draws the display with the colors given by the Noisetag module if flickering is active.
Args:
noisetag (noisetag.Noisetag): Reference to the Noisetag module from MindAffect.
last_flip_time (int): Timestamp of last screen update, i.e. buffer flip.
target_idx (int): (Optional) index of the target stimulus.
"""
if self.use_flickering:
# Send info of the previous stimulus state, with the recorded vsync time (if available):
flip_time = last_flip_time if last_flip_time is not None else noisetag.getTimeStamp()
noisetag.sendStimulusState(timestamp=flip_time)
# Update and get the new stimulus state to display:
try:
noisetag.updateStimulusState()
stimulus_state, target_idx, obj_ids, send_vents = noisetag.getStimulusState()
target_state = stimulus_state[target_idx] if target_idx >= 0 else -1
except StopIteration:
# Start a new trial each time the last one ends in order to be able to keep typing:
self.start_new_trial()
return
# Draw the display with the instructed colors:
if stimulus_state:
for i, key in enumerate(self.keys):
key.change_color(self.state2color[str(stimulus_state[i])])
# Pass the stimulus states to the keypad for drawing the flickering:
self.windows[self.active_window].draw(len(self.keys), stimulus_state)
# Handle prediction feedback:
self.feedback(self.feedback_threshold)
if self.config["use_cued"]:
if target_state is not None and target_state in (0, 1):
# Draw optometer square with the colors of the target stimulus state:
self.facade.change_button_color(self.opto, self.style["state2color"][
str(target_state)])
def select_key(self, obj_id):
"""
Selects and activates the key (like a mouseclick) if selected using prediction trials from the Noisetag.
Args:
obj_id (int): ID of the object that is selected.
"""
if self.is_active:
# Suggestion keys:
if obj_id <= len(self.keys):
# Convert objID to the corresponding key. 1 is subtracted from the objID since the objIDs start at 1:
key = self.keys[obj_id - 1]
# Apply the functionality of that key:
key.key_type.func_provider.apply(key, self)
# Keys on the keypad:
else:
# Convert the objID to the corresponding key:
keys = self.windows[self.active_window].get_keys()
key = keys[obj_id - len(self.keys) - 1]
if key.key_type != KeyType.DEFAULT_SETTINGS_KEY:
# apply the functionality of that key, as long as it's not the settings key:
# this is to prevent the cued prediction from switching to the settings menu when actually selecting
# this would abort cued prediction immediately obviously
key.key_type.func_provider.apply(key, self)
def reset_flicker(self):
"""Resets the number of stimuli for the Noisetag when changing keypads."""
if self.use_flickering:
self.noisetag.setnumActiveObjIDs(len(self.keys) + len(self.windows[self.active_window].get_keys()))
self.start_new_trial()
# KeyboardWindow-specific functions:
def reset_key_colors(self):
"""Resets all the keys to their original color"""
for key in self.keys:
key.reset_color()
for key in self.windows[self.active_window].get_keys():
key.reset_color()
# Manually triggers a window flip so the default colors show up before the key function is executed:
self.facade.flip()
def start_new_trial(self):
"""Starts a new EEG trial."""
# if cuing is on, we use a cued trial otherwise normal single trials for typing
self.noisetag.startPrediction(
cuedprediction=self.config["use_cued"],
nTrials=1,
waitduration=1,
duration=self.config["trial_duration"],
framesperbit=self.style["framesperbit"]
)
# to clear feedback of the last trial:
self.feedback(self.feedback_threshold)
def text_to_speech(self):
"""Converts the text in the text field to speech and reads it out loud."""
if self.load_ai:
if not self.text_field.is_empty():
self.tts.speak_threaded(self.text_field.get_state())
def update_suggestions(self):
"""Updates the suggestions of the keyboard; correction, autocompletion, and next word prediction."""
if self.load_ai:
settings_manager = SettingsManager.get_instance()
if settings_manager.get_word_prediction_correction():
correct = False # Regulatory boolean
# Split sentences with ., !, and ? as delimiters:
sentences = regex.split(r"[.!?]", self.text_field.get_state())
| |
NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_sequence_rule_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_query()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_sequence_rule_query_session_for_bank(self, bank_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the sequence rule query service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank``
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.authoring.SequenceRuleQuerySession) - a
``SequenceRuleQuerySession``
raise: NotFound - no ``Bank`` found by the given ``Id``
raise: NullArgument - ``bank_id or proxy is null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_sequence_rule_query()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
if bank_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_sequence_rule_search_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the sequence rule search service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.authoring.SequenceRuleSearchSession) -
a ``SequenceRuleSearchSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_sequence_rule_search()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_search()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_sequence_rule_search_session_for_bank(self, bank_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the sequence rule earch service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank``
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.authoring.SequenceRuleSearchSession) -
a ``SequenceRuleSearchSession``
raise: NotFound - no ``Bank`` found by the given ``Id``
raise: NullArgument - ``bank_id or proxy is null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_sequence_rule_search()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
if bank_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_sequence_rule_admin_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the sequence rule administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.authoring.SequenceRuleAdminSession) - a
``SequenceRuleAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_sequence_rule_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_admin()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_sequence_rule_admin_session_for_bank(self, bank_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the sequence rule administration service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank``
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.authoring.SequenceRuleAdminSession) - a
``SequenceRuleAdminSession``
raise: NotFound - no ``Bank`` found by the given ``Id``
raise: NullArgument - ``bank_id or proxy is null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_sequence_rule_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
if bank_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_sequence_rule_notification_session(self, sequence_rule_receiver=None, proxy=None):
"""Gets the ``OsidSession`` associated with the sequence rule notification service.
arg: sequence_rule_receiver
(osid.assessment.authoring.SequenceRuleReceiver): the
notification callback
arg: proxy (osid.proxy.Proxy): a proxy
return:
(osid.assessment.authoring.SequenceRuleNotificationSessi
on) - a ``SequenceRuleNotificationSession``
raise: NullArgument - ``sequence_rule_receiver`` or ``proxy``
is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_sequence_rule_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_notification()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_sequence_rule_notification_session_for_bank(self, sequence_rule_receiver=None, bank_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the sequence rule notification service for the given bank.
arg: sequence_rule_receiver
(osid.assessment.authoring.SequenceRuleReceiver): the
notification callback
arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank``
arg: proxy (osid.proxy.Proxy): a proxy
return:
(osid.assessment.authoring.SequenceRuleNotificationSessi
on) - a ``SequenceRuleNotificationSession``
raise: NotFound - no bank found by the given ``Id``
raise: NullArgument - ``sequence_rule_receiver, bank_id`` or
``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_sequence_rule_notification()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
if sequence_rule_receiver is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_sequence_rule_bank_session(self, proxy=None):
"""Gets the ``OsidSession`` to lookup sequence rule/bank mappings for sequence rules.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.authoring.SequenceRuleBankSession) - a
``SequenceRuleBankSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_sequence_rule_bank()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_bank()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_sequence_rule_bank_assignment_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with assigning sequence rule to bank.
arg: proxy (osid.proxy.Proxy): a proxy
return:
(osid.assessment.authoring.SequenceRuleBankAssignmentSes
sion) - a ``SequenceRuleBankAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_sequence_rule_bank_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_bank_assignment()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_sequence_rule_smart_bank_session(self, bank_id=None, proxy=None):
"""Gets the ``OsidSession`` to manage sequence rule smart bank.
arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank``
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.authoring.SequenceRuleSmartBankSession)
- a ``SequenceRuleSmartBankSession``
raise: NotFound - no ``Bank`` found by the given ``Id``
raise: NullArgument - ``bank_id`` or ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_sequence_rule_smart_bank()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_smart_bank()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_sequence_rule_enabler_lookup_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the sequence rule enabler lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return:
(osid.assessment.authoring.SequenceRuleEnablerLookupSess
ion) - a ``SequenceRuleEnablerLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_sequence_rule_enabler_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_enabler_lookup()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_sequence_rule_enabler_lookup_session_for_bank(self, bank_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the sequence rule enabler lookup service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank``
arg: proxy (osid.proxy.Proxy): a proxy
return:
(osid.assessment.authoring.SequenceRuleEnablerLookupSess
ion) - a ``SequenceRuleEnablerLookupSession``
raise: NotFound - no ``Bank`` found by the given ``Id``
raise: NullArgument - ``bank_id or proxy is null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_sequence_rule_enabler_lookup()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_enabler_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
if bank_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_sequence_rule_enabler_query_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the sequence rule enabler query service.
arg: proxy (osid.proxy.Proxy): a proxy
return:
(osid.assessment.authoring.SequenceRuleEnablerQuerySessi
on) - a ``SequenceRuleEnablerQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_sequence_rule_enabler_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_enabler_query()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_sequence_rule_enabler_query_session_for_bank(self, bank_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the sequence rule enabler query service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank``
arg: proxy (osid.proxy.Proxy): a proxy
return:
(osid.assessment.authoring.SequenceRuleEnablerQuerySessi
on) - a ``SequenceRuleEnablerQuerySession``
raise: NotFound - no ``Bank`` found by the given ``Id``
raise: NullArgument - ``bank_id or proxy is null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_sequence_rule_enabler_query()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_enabler_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
if bank_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_sequence_rule_enabler_search_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the sequence rule enabler search service.
arg: proxy (osid.proxy.Proxy): a proxy
return:
(osid.assessment.authoring.SequenceRuleEnablerSearchSess
ion) - a ``SequenceRuleEnablerSearchSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_sequence_rule_enabler_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_sequence_rule_enabler_search()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_sequence_rule_enabler_search_session_for_bank(self, bank_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the sequence rule enablers earch service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank``
arg: proxy (osid.proxy.Proxy): a proxy
return:
(osid.assessment.authoring.SequenceRuleEnablerSearchSess
ion) - a ``SequenceRuleEnablerSearchSession``
raise: NotFound - no ``Bank`` found by the given | |
<gh_stars>0
"""
A qiki Word is defined by a three-word subject-verb-object
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import re
import threading
import time
import mysql.connector
import six
from qiki import Number, Suffix
from qiki.number import type_name
# TODO: Move mysql stuff to lex_mysql.py?
HORRIBLE_MYSQL_CONNECTOR_WORKAROUND = True
# SEE: https://stackoverflow.com/q/52759667/673991#comment99030618_55150960
# SEE: https://stackoverflow.com/questions/49958723/cant-insert-blob-image-using-python-via-stored-procedure-mysql # noqa
# SEE: https://stackoverflow.com/questions/51657097/how-can-i-retrieve-binary-data-using-mysql-python-connector # noqa
# Problem: VARBINARY fields are decoded as if their contents were text
# 'utf8' codec can't decode ... invalid start byte
# 0q80 == lex.idn, and '\x80' can never be a valid utf8 string
# Started happening between connector versions 2.2.2 and 8.0.16
# Workaround: character encoding latin1 across whole table
# qiki.Number fields work because latin1 can never fail to decode
# qiki.Text field (txt) fake stores utf8 when it thinks it's latin1, yuk
# max_idn_lock = threading.Lock()
# noinspection PyAttributeOutsideInit
class Word(object):
"""
A qiki Word is a subject-verb-object triplet of other words (sbj, vrb, obj).
A word is identified by a qiki Number (idn).
A word may be elaborated by a Number (num) and a string (txt).
A word remembers the time it was created (whn).
Each of these seven components of a word has a 3-letter symbol.
(idn, sbj, vrb, obj, num, txt, whn)
This helps a little in searching for the symbol, and avoiding reserved words.
A word is fundamentally, uniquely, and forever defined by its idn,
within the context of its Lex,
as long as it has been saved (exists is true).
:type content: Text.is_valid() | Word | instancemethod
:type sbj: Number | Word
:type vrb: Number | instancemethod
:type obj: Number | Word
:type num: Number
:type txt: Unicode string in either Python 2 or 3
"""
lex = None # This is probably overwritten by the Lex base constructor.
def __init__(self, content=None, sbj=None, vrb=None, obj=None, num=None, txt=None):
if Text.is_valid(content): # Word('agent')
self._from_definition(content)
elif isinstance(content, Number): # Word(idn)
self._inchoate(content)
elif isinstance(content, Word): # Word(another_word)
self._from_word(content)
elif content is None: # Word(sbj=s, vrb=v, obj=o, num=n, txt=t)
# TODO: If this is only used via spawn(), then move this code there somehow?
self._fields = dict(
sbj=None if sbj is None else self.lex.read_word(sbj),
vrb=None if vrb is None else self.lex.read_word(vrb),
obj=None if obj is None else self.lex.read_word(obj),
num=num,
txt=None if txt is None else Text(txt),
whn=None,
)
else:
need_unicode = type_name(content) in ('str', 'bytes', 'bytearray')
raise TypeError("{outer}({inner}) is not supported{etc}".format(
outer=type_name(self),
inner=type_name(content),
etc=" -- use unicode instead" if need_unicode else ""
))
def _inchoate(self, idn):
"""
Initialize an inchoate word.
Definition of "inchoate"
------------------------
An inchoate word is frugal with resources. It's a ghost, barely there.
All that is known about an inchoate word is its idn.
Maybe that's all we ever need to know about it.
But, if anything substantive is asked of it, then the word is made choate.
For example, getting these properties forces the word to become choate:
word.sbj
word.vrb
word.vrb
word.num
word.txt
word.whn
word.exists() (does it exist in the lex, NOT is it choate)
The following also have the side-effect of making a word choate,
because they use one of the above properties:
str(word)
repr(word)
hasattr(word, 'txt')
...a lot more
But the following actions do not make a word choate. If it was inchoate it stays so:
word.idn
word.lex
hash(word)
word == word2
word2 = lex(word)
word2 = word.spawn(word)
word2 = word.inchoate_copy()
This makes it possible to dereference the parts of a sentence dynamically,
only when they're needed, e.g.
word.obj.obj.obj.obj.txt
It also makes it possible to work with a list of words
in a way that's almost as resource-efficient as a list of idns.
There is a private property to determine if a word is inchoate:
if word._is_inchoate:
do something on inchoate word
else:
do something on choate word
"""
# CAUTION: But Word(content=None) is a choate word, because it populates self._fields.
# Listing relies on all this so it may need to be refactored.
# (This is weird because Word(idn) is inchoate.)
self.set_idn_if_you_really_have_to(idn)
def set_idn_if_you_really_have_to(self, idn):
self._idn = idn
def _choate(self):
"""
Transform an inchoate word into a not-inchoate word.
That is, from a mere container of an idn to a fleshed-out word
with num and txt whatever other properties it has.
This in preparation to use one of its properties, sbj, vrb, obj, txt, num, whn.
"""
if self._is_inchoate:
self._from_idn(self._idn)
# assert not self._is_inchoate
# TODO: Why the f does asserting that break everything?
if self._is_inchoate:
self._fields = dict()
assert not self._is_inchoate
# TODO: @property?
def exists(self):
""""
Does this word exist? Is it stored in a Lex?
This is a bigger question than being choate.
Choate is more a concept of what we know about the word so far.
Exist is more a concept of what the world manifests about the word.
The concepts are independent.
A choate word can be nonexistent just before a .save()
"""
# TODO: What about Listing words?
self._choate()
return hasattr(self, '_exists') and self._exists # WTF is not hasattr() enough?
def _now_it_exists(self):
"""Declare that a word "exists"."""
self._exists = True
# NOTE: lex and define words may be very common and benefit from a short idn (0q80 and 0q82)
@property
def _is_inchoate(self):
return not hasattr(self, '_fields')
@property
def sbj(self):
return self._get_field('sbj')
@property
def vrb(self):
return self._get_field('vrb')
@property
def obj(self):
return self._get_field('obj')
@property
def num(self):
return self._get_field('num')
@property
def txt(self):
return self._get_field('txt')
@property
def whn(self):
return self._get_field('whn')
@whn.setter
def whn(self, new_whn):
self._set_field('whn', new_whn)
def _set_field(self, field_name, new_value):
self._choate()
self._fields[field_name] = new_value
def _get_field(self, field_name):
self._choate()
try:
return self._fields[field_name]
except KeyError:
return None
@property
def do_not_call_in_templates(self):
# THANKS: for this Django flag, maybe http://stackoverflow.com/a/21711308/673991
return True
class NotExist(Exception):
pass
def __call__(self, vrb, *args, **kwargs):
"""
Part of the bracket syntax for reading or creating a word in a LexSentence.
lex[s](v)[o] = n,t
This method returns the result of this expression:
lex[s](v)
an instance of SubjectVerb. That instance remembers the sbj and the vrb.
The next part of implementing the bracket syntax is SubjectVerb.__getitem__()
for reading a word and SubjectVerb.__setitem__() for creating a word.
Those methods implement the [o] part.
"""
if isinstance(vrb, six.binary_type):
raise TypeError("Verb name must be unicode, not " + repr(vrb))
sbj = self
return SubjectedVerb(sbj, vrb, *args, **kwargs)
def said(self, vrb, obj):
return self(vrb)[obj]
@classmethod
def txt_num_swap(cls, a1, a2):
"""
Swap num and txt if necessary.
Either could be None
"""
# TODO: This is a poor stand-in for extract_txt_num(). Or vice versa.
if (
(a2 is None or Text.is_valid(a2)) and
(a1 is None or Number.is_number(a1))
):
return a2, a1
else:
return a1, a2
def says(self, vrb, obj, num=None, txt=None, num_add=None, use_already=False):
# return self(vrb, *args, **kwargs)[obj]
# NOTE: The above way is not quite aggressive enough.
# If num and txt were missing it would passively find a word by s,v,o,
# as opposed to making a new ('',1) word, as create_word below would do.
txt, num = self.txt_num_swap(num, txt) # in case they were specified positionally
return self.lex.create_word(
sbj=self,
vrb=vrb,
obj=obj,
num=num,
txt=txt,
num_add=num_add,
use_already=use_already,
)
def spawn(self, *args, **kwargs):
"""
Construct a Word() using the same lex as another word.
"""
if len(args) == 1 and isinstance(args[0], (Number, Word)):
return self.lex.root_lex[args[0]]
try:
idn = self.lex.idn_ify(args[0])
except IndexError: # args is empty (kwargs probably is not)
pass
except TypeError: # args[0] is neither a Word nor a Number
pass
else:
try:
return Listing.word_from_idn(idn)
except Listing.NotAListing:
pass # args[0] is not a listing word
if idn.is_suffixed():
raise self.NotAWord("Do not know how to spawn this suffixed idn " + idn.qstring())
assert hasattr(self, 'lex')
assert isinstance(self.lex, Lex)
return type(self)(*args, **kwargs)
# NOTE: This should be the only call to the Word constructor.
# Because users should use a Lex instance as a Word factory.
# (Except of course from Word subclass constructors that call their super().)
# Enforce? Refactor somehow?
# (The chicken/egg problem is resolved by the first Word being instantiated
# via the derived class Lex (e.g. LexMySQL).)
class | |
<reponame>agave233/PaddleHelix<filename>pahelix/utils/compound_tools.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
| Tools for compound features.
| Adapted from https://github.com/snap-stanford/pretrain-gnns/blob/master/chem/loader.py
"""
import os
from collections import OrderedDict
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import rdchem
from pahelix.utils.compound_constants import DAY_LIGHT_FG_SMARTS_LIST
def get_gasteiger_partial_charges(mol, n_iter=12):
"""
Calculates list of gasteiger partial charges for each atom in mol object.
Args:
mol: rdkit mol object.
n_iter(int): number of iterations. Default 12.
Returns:
list of computed partial charges for each atom.
"""
Chem.rdPartialCharges.ComputeGasteigerCharges(mol, nIter=n_iter,
throwOnParamFailure=True)
partial_charges = [float(a.GetProp('_GasteigerCharge')) for a in
mol.GetAtoms()]
return partial_charges
def create_standardized_mol_id(smiles):
"""
Args:
smiles: smiles sequence.
Returns:
inchi.
"""
if check_smiles_validity(smiles):
# remove stereochemistry
smiles = AllChem.MolToSmiles(AllChem.MolFromSmiles(smiles),
isomericSmiles=False)
mol = AllChem.MolFromSmiles(smiles)
if not mol is None: # to catch weird issue with O=C1O[al]2oc(=O)c3ccc(cn3)c3ccccc3c3cccc(c3)c3ccccc3c3cc(C(F)(F)F)c(cc3o2)-c2ccccc2-c2cccc(c2)-c2ccccc2-c2cccnc21
if '.' in smiles: # if multiple species, pick largest molecule
mol_species_list = split_rdkit_mol_obj(mol)
largest_mol = get_largest_mol(mol_species_list)
inchi = AllChem.MolToInchi(largest_mol)
else:
inchi = AllChem.MolToInchi(mol)
return inchi
else:
return
else:
return
def check_smiles_validity(smiles):
"""
Check whether the smile can't be converted to rdkit mol object.
"""
try:
m = Chem.MolFromSmiles(smiles)
if m:
return True
else:
return False
except Exception as e:
return False
def split_rdkit_mol_obj(mol):
"""
Split rdkit mol object containing multiple species or one species into a
list of mol objects or a list containing a single object respectively.
Args:
mol: rdkit mol object.
"""
smiles = AllChem.MolToSmiles(mol, isomericSmiles=True)
smiles_list = smiles.split('.')
mol_species_list = []
for s in smiles_list:
if check_smiles_validity(s):
mol_species_list.append(AllChem.MolFromSmiles(s))
return mol_species_list
def get_largest_mol(mol_list):
"""
Given a list of rdkit mol objects, returns mol object containing the
largest num of atoms. If multiple containing largest num of atoms,
picks the first one.
Args:
mol_list(list): a list of rdkit mol object.
Returns:
the largest mol.
"""
num_atoms_list = [len(m.GetAtoms()) for m in mol_list]
largest_mol_idx = num_atoms_list.index(max(num_atoms_list))
return mol_list[largest_mol_idx]
def rdchem_enum_to_list(values):
"""values = {0: rdkit.Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
1: rdkit.Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
2: rdkit.Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
3: rdkit.Chem.rdchem.ChiralType.CHI_OTHER}
"""
return [values[i] for i in range(len(values))]
def safe_index(alist, elem):
"""
Return index of element e in list l. If e is not present, return the last index
"""
try:
return alist.index(elem)
except ValueError:
return len(alist) - 1
def get_atom_feature_dims(list_acquired_feature_names):
""" tbd
"""
return list(map(len, [CompoundKit.atom_vocab_dict[name] for name in list_acquired_feature_names]))
def get_bond_feature_dims(list_acquired_feature_names):
""" tbd
"""
list_bond_feat_dim = list(map(len, [CompoundKit.bond_vocab_dict[name] for name in list_acquired_feature_names]))
# +1 for self loop edges
return [_l + 1 for _l in list_bond_feat_dim]
class CompoundKit(object):
"""
CompoundKit
"""
atom_vocab_dict = {
"atomic_num": list(range(1, 119)) + ['misc'],
"chiral_tag": rdchem_enum_to_list(rdchem.ChiralType.values),
"degree": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'misc'],
"explicit_valence": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 'misc'],
"formal_charge": [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'misc'],
"hybridization": rdchem_enum_to_list(rdchem.HybridizationType.values),
"implicit_valence": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 'misc'],
"is_aromatic": [0, 1],
"total_numHs": [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
'num_radical_e': [0, 1, 2, 3, 4, 'misc'],
'atom_is_in_ring': [0, 1],
'valence_out_shell': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
'in_num_ring_with_size3': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
'in_num_ring_with_size4': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
'in_num_ring_with_size5': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
'in_num_ring_with_size6': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
'in_num_ring_with_size7': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
'in_num_ring_with_size8': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
}
bond_vocab_dict = {
"bond_dir": rdchem_enum_to_list(rdchem.BondDir.values),
"bond_type": rdchem_enum_to_list(rdchem.BondType.values),
"is_in_ring": [0, 1],
'bond_stereo': rdchem_enum_to_list(rdchem.BondStereo.values),
'is_conjugated': [0, 1],
}
# float features
atom_float_names = ["van_der_waals_radis", "partial_charge", 'mass']
# bond_float_feats= ["bond_length", "bond_angle"] # optional
### functional groups
day_light_fg_smarts_list = DAY_LIGHT_FG_SMARTS_LIST
day_light_fg_mo_list = [Chem.MolFromSmarts(smarts) for smarts in day_light_fg_smarts_list]
morgan_fp_N = 200
morgan2048_fp_N = 2048
maccs_fp_N = 167
period_table = Chem.GetPeriodicTable()
### atom
@staticmethod
def get_atom_value(atom, name):
"""get atom values"""
if name == 'atomic_num':
return atom.GetAtomicNum()
elif name == 'chiral_tag':
return atom.GetChiralTag()
elif name == 'degree':
return atom.GetDegree()
elif name == 'explicit_valence':
return atom.GetExplicitValence()
elif name == 'formal_charge':
return atom.GetFormalCharge()
elif name == 'hybridization':
return atom.GetHybridization()
elif name == 'implicit_valence':
return atom.GetImplicitValence()
elif name == 'is_aromatic':
return int(atom.GetIsAromatic())
elif name == 'mass':
return int(atom.GetMass())
elif name == 'total_numHs':
return atom.GetTotalNumHs()
elif name == 'num_radical_e':
return atom.GetNumRadicalElectrons()
elif name == 'atom_is_in_ring':
return int(atom.IsInRing())
elif name == 'valence_out_shell':
return CompoundKit.period_table.GetNOuterElecs(atom.GetAtomicNum())
else:
raise ValueError(name)
@staticmethod
def get_atom_feature_id(atom, name):
"""get atom features id"""
assert name in CompoundKit.atom_vocab_dict, "%s not found in atom_vocab_dict" % name
return safe_index(CompoundKit.atom_vocab_dict[name], CompoundKit.get_atom_value(atom, name))
@staticmethod
def get_atom_feature_size(name):
"""get atom features size"""
assert name in CompoundKit.atom_vocab_dict, "%s not found in atom_vocab_dict" % name
return len(CompoundKit.atom_vocab_dict[name])
### bond
@staticmethod
def get_bond_value(bond, name):
"""get bond values"""
if name == 'bond_dir':
return bond.GetBondDir()
elif name == 'bond_type':
return bond.GetBondType()
elif name == 'is_in_ring':
return int(bond.IsInRing())
elif name == 'is_conjugated':
return int(bond.GetIsConjugated())
elif name == 'bond_stereo':
return bond.GetStereo()
else:
raise ValueError(name)
@staticmethod
def get_bond_feature_id(bond, name):
"""get bond features id"""
assert name in CompoundKit.bond_vocab_dict, "%s not found in bond_vocab_dict" % name
return safe_index(CompoundKit.bond_vocab_dict[name], CompoundKit.get_bond_value(bond, name))
@staticmethod
def get_bond_feature_size(name):
"""get bond features size"""
assert name in CompoundKit.bond_vocab_dict, "%s not found in bond_vocab_dict" % name
return len(CompoundKit.bond_vocab_dict[name])
### fingerprint
@staticmethod
def get_morgan_fingerprint(mol, radius=2):
"""get morgan fingerprint"""
nBits = CompoundKit.morgan_fp_N
mfp = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=nBits)
return [int(b) for b in mfp.ToBitString()]
@staticmethod
def get_morgan2048_fingerprint(mol, radius=2):
"""get morgan2048 fingerprint"""
nBits = CompoundKit.morgan2048_fp_N
mfp = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=nBits)
return [int(b) for b in mfp.ToBitString()]
@staticmethod
def get_maccs_fingerprint(mol):
"""get maccs fingerprint"""
fp = AllChem.GetMACCSKeysFingerprint(mol)
return [int(b) for b in fp.ToBitString()]
### functional groups
@staticmethod
def get_daylight_functional_group_counts(mol):
"""get daylight functional group counts"""
fg_counts = []
for fg_mol in CompoundKit.day_light_fg_mo_list:
sub_structs = Chem.Mol.GetSubstructMatches(mol, fg_mol, uniquify=True)
fg_counts.append(len(sub_structs))
return fg_counts
@staticmethod
def get_ring_size(mol):
"""return (N,6) list"""
rings = mol.GetRingInfo()
rings_info = []
for r in rings.AtomRings():
rings_info.append(r)
ring_list = []
for atom in mol.GetAtoms():
atom_result = []
for ringsize in range(3, 9):
num_of_ring_at_ringsize = 0
for r in rings_info:
if len(r) == ringsize and atom.GetIdx() in r:
num_of_ring_at_ringsize += 1
if num_of_ring_at_ringsize > 8:
num_of_ring_at_ringsize = 9
atom_result.append(num_of_ring_at_ringsize)
ring_list.append(atom_result)
return ring_list
@staticmethod
def atom_to_feat_vector(atom):
""" tbd """
atom_names = {
"atomic_num": safe_index(CompoundKit.atom_vocab_dict["atomic_num"], atom.GetAtomicNum()),
"chiral_tag": safe_index(CompoundKit.atom_vocab_dict["chiral_tag"], atom.GetChiralTag()),
"degree": safe_index(CompoundKit.atom_vocab_dict["degree"], atom.GetTotalDegree()),
"explicit_valence": safe_index(CompoundKit.atom_vocab_dict["explicit_valence"], atom.GetExplicitValence()),
"formal_charge": safe_index(CompoundKit.atom_vocab_dict["formal_charge"], atom.GetFormalCharge()),
"hybridization": safe_index(CompoundKit.atom_vocab_dict["hybridization"], atom.GetHybridization()),
"implicit_valence": safe_index(CompoundKit.atom_vocab_dict["implicit_valence"], atom.GetImplicitValence()),
"is_aromatic": safe_index(CompoundKit.atom_vocab_dict["is_aromatic"], int(atom.GetIsAromatic())),
"total_numHs": safe_index(CompoundKit.atom_vocab_dict["total_numHs"], atom.GetTotalNumHs()),
'num_radical_e': safe_index(CompoundKit.atom_vocab_dict['num_radical_e'], atom.GetNumRadicalElectrons()),
'atom_is_in_ring': safe_index(CompoundKit.atom_vocab_dict['atom_is_in_ring'], int(atom.IsInRing())),
'valence_out_shell': safe_index(CompoundKit.atom_vocab_dict['valence_out_shell'],
CompoundKit.period_table.GetNOuterElecs(atom.GetAtomicNum())),
'van_der_waals_radis': CompoundKit.period_table.GetRvdw(atom.GetAtomicNum()),
'partial_charge': CompoundKit.check_partial_charge(atom),
'mass': atom.GetMass(),
}
return atom_names
@staticmethod
def get_atom_names(mol):
"""get atom name list
TODO: to be remove in the future
"""
atom_features_dicts = []
Chem.rdPartialCharges.ComputeGasteigerCharges(mol)
for i, atom in enumerate(mol.GetAtoms()):
atom_features_dicts.append(CompoundKit.atom_to_feat_vector(atom))
ring_list = CompoundKit.get_ring_size(mol)
for i, atom in enumerate(mol.GetAtoms()):
atom_features_dicts[i]['in_num_ring_with_size3'] = safe_index(
CompoundKit.atom_vocab_dict['in_num_ring_with_size3'], ring_list[i][0])
atom_features_dicts[i]['in_num_ring_with_size4'] = safe_index(
CompoundKit.atom_vocab_dict['in_num_ring_with_size4'], ring_list[i][1])
atom_features_dicts[i]['in_num_ring_with_size5'] = safe_index(
CompoundKit.atom_vocab_dict['in_num_ring_with_size5'], ring_list[i][2])
atom_features_dicts[i]['in_num_ring_with_size6'] = safe_index(
CompoundKit.atom_vocab_dict['in_num_ring_with_size6'], ring_list[i][3])
atom_features_dicts[i]['in_num_ring_with_size7'] = safe_index(
CompoundKit.atom_vocab_dict['in_num_ring_with_size7'], ring_list[i][4])
atom_features_dicts[i]['in_num_ring_with_size8'] = safe_index(
CompoundKit.atom_vocab_dict['in_num_ring_with_size8'], ring_list[i][5])
return atom_features_dicts
@staticmethod
def check_partial_charge(atom):
"""tbd"""
pc = atom.GetDoubleProp('_GasteigerCharge')
if pc != pc:
# unsupported atom, replace nan with 0
pc = 0
if pc == float('inf'):
# max 4 for other atoms, set to 10 here if inf is get
pc = 10
return pc
class Compound3DKit(object):
"""the 3Dkit of Compound"""
@staticmethod
def get_atom_poses(mol, conf):
"""tbd"""
atom_poses = []
for i, atom in enumerate(mol.GetAtoms()):
if atom.GetAtomicNum() == 0:
return [[0.0, 0.0, 0.0]] * len(mol.GetAtoms())
pos = conf.GetAtomPosition(i)
atom_poses.append([pos.x, pos.y, pos.z])
return atom_poses
@staticmethod
def get_MMFF_atom_poses(mol, numConfs=None, return_energy=False):
"""the atoms of mol will be changed in some cases."""
try:
new_mol = Chem.AddHs(mol)
res = AllChem.EmbedMultipleConfs(new_mol, numConfs=numConfs)
### MMFF generates multiple conformations
| |
from __future__ import absolute_import
import ctypes as _ctypes
from ._library import libHAPI as _hapi
from .version import *
from .common import *
from .helpers import *
CreateInProcessSession = _hapi.HAPI_CreateInProcessSession
CreateInProcessSession.restype = Result
CreateInProcessSession.argtypes = [ _ctypes.POINTER(Session) ]
StartThriftSocketServer = _hapi.HAPI_StartThriftSocketServer
StartThriftSocketServer.restype = Result
StartThriftSocketServer.argtypes = [ _ctypes.POINTER(ThriftServerOptions),
_ctypes.c_int,
_ctypes.POINTER(ProcessId) ]
CreateThriftSocketSession = _hapi.HAPI_CreateThriftSocketSession
CreateThriftSocketSession.restype = Result
CreateThriftSocketSession.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_char_p,
_ctypes.c_int ]
StartThriftNamedPipeServer = _hapi.HAPI_StartThriftNamedPipeServer
StartThriftNamedPipeServer.restype = Result
StartThriftNamedPipeServer.argtypes = [ _ctypes.POINTER(ThriftServerOptions),
_ctypes.c_char_p,
_ctypes.POINTER(ProcessId) ]
CreateThriftNamedPipeSession = _hapi.HAPI_CreateThriftNamedPipeSession
CreateThriftNamedPipeSession.restype = Result
CreateThriftNamedPipeSession.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_char_p ]
BindCustomImplementation = _hapi.HAPI_BindCustomImplementation
BindCustomImplementation.restype = Result
BindCustomImplementation.argtypes = [ SessionType,
_ctypes.c_char_p ]
CreateCustomSession = _hapi.HAPI_CreateCustomSession
CreateCustomSession.restype = Result
CreateCustomSession.argtypes = [ SessionType,
_ctypes.c_void_p,
_ctypes.POINTER(Session) ]
IsSessionValid = _hapi.HAPI_IsSessionValid
IsSessionValid.restype = Result
IsSessionValid.argtypes = [ _ctypes.POINTER(Session) ]
CloseSession = _hapi.HAPI_CloseSession
CloseSession.restype = Result
CloseSession.argtypes = [ _ctypes.POINTER(Session) ]
IsInitialized = _hapi.HAPI_IsInitialized
IsInitialized.restype = Result
IsInitialized.argtypes = [ _ctypes.POINTER(Session) ]
Initialize = _hapi.HAPI_Initialize
Initialize.restype = Result
Initialize.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(CookOptions),
Bool,
_ctypes.c_int,
_ctypes.c_char_p,
_ctypes.c_char_p,
_ctypes.c_char_p,
_ctypes.c_char_p,
_ctypes.c_char_p ]
Cleanup = _hapi.HAPI_Cleanup
Cleanup.restype = Result
Cleanup.argtypes = [_ctypes.POINTER(Session)]
GetEnvInt = _hapi.HAPI_GetEnvInt
GetEnvInt.restype = Result
GetEnvInt.argtypes = [ EnvIntType,
_ctypes.POINTER(_ctypes.c_int) ]
GetSessionEnvInt = _hapi.HAPI_GetSessionEnvInt
GetSessionEnvInt.restype = Result
GetSessionEnvInt.argtypes = [ _ctypes.POINTER(Session),
SessionEnvIntType,
_ctypes.POINTER(_ctypes.c_int) ]
GetServerEnvInt = _hapi.HAPI_GetServerEnvInt
GetServerEnvInt.restype = Result
GetServerEnvInt.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_char_p,
_ctypes.POINTER(_ctypes.c_int) ]
GetServerEnvString = _hapi.HAPI_GetServerEnvString
GetServerEnvString.restype = Result
GetServerEnvString.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_char_p,
_ctypes.POINTER(StringHandle) ]
GetServerEnvVarCount = _hapi.HAPI_GetServerEnvVarCount
GetServerEnvVarCount.restype = Result
GetServerEnvVarCount.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(_ctypes.c_int) ]
GetServerEnvVarList = _hapi.HAPI_GetServerEnvVarList
GetServerEnvVarList.restype = Result
GetServerEnvVarList.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(StringHandle),
_ctypes.c_int,
_ctypes.c_int ]
SetServerEnvInt = _hapi.HAPI_SetServerEnvInt
SetServerEnvInt.restype = Result
SetServerEnvInt.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_char_p,
_ctypes.c_int ]
SetServerEnvString = _hapi.HAPI_SetServerEnvString
SetServerEnvString.restype = Result
SetServerEnvString.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_char_p,
_ctypes.c_char_p ]
GetStatus = _hapi.HAPI_GetStatus
GetStatus.restype = Result
GetStatus.argtypes = [ _ctypes.POINTER(Session),
StatusType,
_ctypes.POINTER(_ctypes.c_int) ]
GetStatusStringBufLength = _hapi.HAPI_GetStatusStringBufLength
GetStatusStringBufLength.restype = Result
GetStatusStringBufLength.argtypes = [ _ctypes.POINTER(Session),
StatusType,
StatusVerbosity,
_ctypes.POINTER(_ctypes.c_int) ]
GetStatusString = _hapi.HAPI_GetStatusString
GetStatusString.restype = Result
GetStatusString.argtypes = [ _ctypes.POINTER(Session),
StatusType,
_ctypes.c_char_p,
_ctypes.c_int ]
ComposeNodeCookResult = _hapi.HAPI_ComposeNodeCookResult
ComposeNodeCookResult.restype = Result
ComposeNodeCookResult.argtypes = [ _ctypes.POINTER(Session),
NodeId,
StatusVerbosity,
_ctypes.POINTER(_ctypes.c_int) ]
GetComposedNodeCookResult = _hapi.HAPI_GetComposedNodeCookResult
GetComposedNodeCookResult.restype = Result
GetComposedNodeCookResult.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_char_p,
_ctypes.c_int ]
CheckForSpecificErrors = _hapi.HAPI_CheckForSpecificErrors
CheckForSpecificErrors.restype = Result
CheckForSpecificErrors.argtypes = [ _ctypes.POINTER(Session),
NodeId,
ErrorCodeBits,
_ctypes.POINTER(ErrorCodeBits) ]
GetCookingTotalCount = _hapi.HAPI_GetCookingTotalCount
GetCookingTotalCount.restype = Result
GetCookingTotalCount.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(_ctypes.c_int) ]
GetCookingCurrentCount = _hapi.HAPI_GetCookingCurrentCount
GetCookingCurrentCount.restype = Result
GetCookingCurrentCount.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(_ctypes.c_int) ]
ConvertTransform = _hapi.HAPI_ConvertTransform
ConvertTransform.restype = Result
ConvertTransform.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(TransformEuler),
RSTOrder,
XYZOrder,
_ctypes.POINTER(TransformEuler) ]
ConvertMatrixToQuat = _hapi.HAPI_ConvertMatrixToQuat
ConvertMatrixToQuat.restype = Result
ConvertMatrixToQuat.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(_ctypes.c_float),
RSTOrder,
_ctypes.POINTER(Transform) ]
ConvertMatrixToEuler = _hapi.HAPI_ConvertMatrixToEuler
ConvertMatrixToEuler.restype = Result
ConvertMatrixToEuler.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(_ctypes.c_float),
RSTOrder,
XYZOrder,
_ctypes.POINTER(TransformEuler) ]
ConvertTransformQuatToMatrix = _hapi.HAPI_ConvertTransformQuatToMatrix
ConvertTransformQuatToMatrix.restype = Result
ConvertTransformQuatToMatrix.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(Transform),
_ctypes.POINTER(_ctypes.c_float) ]
ConvertTransformEulerToMatrix = _hapi.HAPI_ConvertTransformEulerToMatrix
ConvertTransformEulerToMatrix.restype = Result
ConvertTransformEulerToMatrix.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(TransformEuler),
_ctypes.POINTER(_ctypes.c_float) ]
PythonThreadInterpreterLock = _hapi.HAPI_PythonThreadInterpreterLock
PythonThreadInterpreterLock.restype = Result
PythonThreadInterpreterLock.argtypes = [ _ctypes.POINTER(Session),
Bool ]
GetStringBufLength = _hapi.HAPI_GetStringBufLength
GetStringBufLength.restype = Result
GetStringBufLength.argtypes = [ _ctypes.POINTER(Session),
StringHandle,
_ctypes.POINTER(_ctypes.c_int) ]
GetString = _hapi.HAPI_GetString
GetString.restype = Result
GetString.argtypes = [ _ctypes.POINTER(Session),
StringHandle,
_ctypes.c_char_p,
_ctypes.c_int ]
SetCustomString = _hapi.HAPI_SetCustomString
SetCustomString.restype = Result
SetCustomString.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_char_p,
_ctypes.POINTER(StringHandle) ]
RemoveCustomString = _hapi.HAPI_RemoveCustomString
RemoveCustomString.restype = Result
RemoveCustomString.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(StringHandle) ]
GetStringBatchSize = _hapi.HAPI_GetStringBatchSize
GetStringBatchSize.restype = Result
GetStringBatchSize.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(StringHandle),
_ctypes.c_int,
_ctypes.POINTER(_ctypes.c_int) ]
GetStringBatch = _hapi.HAPI_GetStringBatch
GetStringBatch.restype = Result
GetStringBatch.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_char_p,
_ctypes.c_int ]
GetTime = _hapi.HAPI_GetTime
GetTime.restype = Result
GetTime.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(_ctypes.c_float) ]
SetTime = _hapi.HAPI_SetTime
SetTime.restype = Result
SetTime.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_float ]
GetTimelineOptions = _hapi.HAPI_GetTimelineOptions
GetTimelineOptions.restype = Result
GetTimelineOptions.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(TimelineOptions) ]
SetTimelineOptions = _hapi.HAPI_SetTimelineOptions
SetTimelineOptions.restype = Result
SetTimelineOptions.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(TimelineOptions) ]
LoadAssetLibraryFromFile = _hapi.HAPI_LoadAssetLibraryFromFile
LoadAssetLibraryFromFile.restype = Result
LoadAssetLibraryFromFile.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_char_p,
Bool,
_ctypes.POINTER(AssetLibraryId) ]
LoadAssetLibraryFromMemory = _hapi.HAPI_LoadAssetLibraryFromMemory
LoadAssetLibraryFromMemory.restype = Result
LoadAssetLibraryFromMemory.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_char_p,
_ctypes.c_int,
Bool,
_ctypes.POINTER(AssetLibraryId) ]
GetAvailableAssetCount = _hapi.HAPI_GetAvailableAssetCount
GetAvailableAssetCount.restype = Result
GetAvailableAssetCount.argtypes = [ _ctypes.POINTER(Session),
AssetLibraryId,
_ctypes.POINTER(_ctypes.c_int) ]
GetAvailableAssets = _hapi.HAPI_GetAvailableAssets
GetAvailableAssets.restype = Result
GetAvailableAssets.argtypes = [ _ctypes.POINTER(Session),
AssetLibraryId,
_ctypes.POINTER(StringHandle),
_ctypes.c_int ]
GetAssetInfo = _hapi.HAPI_GetAssetInfo
GetAssetInfo.restype = Result
GetAssetInfo.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(AssetInfo) ]
Interrupt = _hapi.HAPI_Interrupt
Interrupt.restype = Result
Interrupt.argtypes = [ _ctypes.POINTER(Session) ]
LoadHIPFile = _hapi.HAPI_LoadHIPFile
LoadHIPFile.restype = Result
LoadHIPFile.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_char_p,
Bool ]
SaveHIPFile = _hapi.HAPI_SaveHIPFile
SaveHIPFile.restype = Result
SaveHIPFile.argtypes = [ _ctypes.POINTER(Session),
_ctypes.c_char_p,
Bool]
IsNodeValid = _hapi.HAPI_IsNodeValid
IsNodeValid.restype = Result
IsNodeValid.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_int,
_ctypes.POINTER(Bool) ]
GetNodeInfo = _hapi.HAPI_GetNodeInfo
GetNodeInfo.restype = Result
GetNodeInfo.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(NodeInfo) ]
GetNodePath = _hapi.HAPI_GetNodePath
GetNodePath.restype = Result
GetNodePath.argtypes = [ _ctypes.POINTER(Session),
NodeId,
NodeId,
_ctypes.POINTER(StringHandle) ]
GetManagerNodeId = _hapi.HAPI_GetManagerNodeId
GetManagerNodeId.restype = Result
GetManagerNodeId.argtypes = [ _ctypes.POINTER(Session),
NodeType,
_ctypes.POINTER(NodeId) ]
ComposeChildNodeList = _hapi.HAPI_ComposeChildNodeList
ComposeChildNodeList.restype = Result
ComposeChildNodeList.argtypes = [ _ctypes.POINTER(Session),
NodeId,
NodeTypeBits,
NodeFlagsBits,
Bool,
_ctypes.POINTER(_ctypes.c_int)]
GetComposedChildNodeList = _hapi.HAPI_GetComposedChildNodeList
GetComposedChildNodeList.restype = Result
GetComposedChildNodeList.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(NodeId),
_ctypes.c_int ]
CreateNode = _hapi.HAPI_CreateNode
CreateNode.restype = Result
CreateNode.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.c_char_p,
Bool,
_ctypes.POINTER(NodeId) ]
CreateInputNode = _hapi.HAPI_CreateInputNode
CreateInputNode.restype = Result
CreateInputNode.argtypes = [ _ctypes.POINTER(Session),
_ctypes.POINTER(NodeId),
_ctypes.c_char_p ]
CreateHeightfieldInputNode = _hapi.HAPI_CreateHeightfieldInputNode
CreateHeightfieldInputNode.restype = Result
CreateHeightfieldInputNode.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.c_int,
_ctypes.c_int,
_ctypes.c_float,
_ctypes.POINTER(NodeId),
_ctypes.POINTER(NodeId),
_ctypes.POINTER(NodeId),
_ctypes.POINTER(NodeId),
]
CreateHeightfieldInputVolumeNode = _hapi.HAPI_CreateHeightfieldInputVolumeNode
CreateHeightfieldInputVolumeNode.restype = Result
CreateHeightfieldInputVolumeNode.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(NodeId),
_ctypes.c_char_p,
_ctypes.c_int,
_ctypes.c_int,
_ctypes.c_float,
]
CookNode = _hapi.HAPI_CookNode
CookNode.restype = Result
CookNode.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(CookOptions) ]
DeleteNode = _hapi.HAPI_DeleteNode
DeleteNode.restype = Result
DeleteNode.argtypes = [ _ctypes.POINTER(Session),
NodeId ]
RenameNode = _hapi.HAPI_RenameNode
RenameNode.restype = Result
RenameNode.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p ]
ConnectNodeInput = _hapi.HAPI_ConnectNodeInput
ConnectNodeInput.restype = Result
ConnectNodeInput.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_int,
NodeId,
_ctypes.c_int,
]
DisconnectNodeInput = _hapi.HAPI_DisconnectNodeInput
DisconnectNodeInput.restype = Result
DisconnectNodeInput.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_int ]
QueryNodeInput = _hapi.HAPI_QueryNodeInput
QueryNodeInput.restype = Result
QueryNodeInput.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_int,
_ctypes.POINTER(NodeId) ]
GetNodeInputName = _hapi.HAPI_GetNodeInputName
GetNodeInputName.restype = Result
GetNodeInputName.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_int,
_ctypes.POINTER(StringHandle) ]
DisconnectNodeOutputsAt = _hapi.HAPI_DisconnectNodeOutputsAt
DisconnectNodeOutputsAt.restype = Result
DisconnectNodeOutputsAt.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_int ]
QueryNodeOutputConnectedCount = _hapi.HAPI_QueryNodeOutputConnectedCount
QueryNodeOutputConnectedCount.restype = Result
QueryNodeOutputConnectedCount.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_int,
Bool,
Bool,
_ctypes.POINTER(_ctypes.c_int) ]
QueryNodeOutputConnectedNodes = _hapi.HAPI_QueryNodeOutputConnectedNodes
QueryNodeOutputConnectedNodes.restype = Result
QueryNodeOutputConnectedNodes.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_int,
Bool,
Bool,
_ctypes.POINTER(NodeId),
_ctypes.c_int,
_ctypes.c_int, ]
GetNodeOutputName = _hapi.HAPI_GetNodeOutputName
GetNodeOutputName.restype = Result
GetNodeOutputName.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_int,
_ctypes.POINTER(StringHandle) ]
GetParameters = _hapi.HAPI_GetParameters
GetParameters.restype = Result
GetParameters.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(ParmInfo),
_ctypes.c_int,
_ctypes.c_int ]
GetParmInfo = _hapi.HAPI_GetParmInfo
GetParmInfo.restype = Result
GetParmInfo.argtypes = [ _ctypes.POINTER(Session),
NodeId,
ParmId,
_ctypes.POINTER(ParmInfo) ]
GetParmIdFromName = _hapi.HAPI_GetParmIdFromName
GetParmIdFromName.restype = Result
GetParmIdFromName.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.POINTER(ParmId) ]
GetParmInfoFromName = _hapi.HAPI_GetParmInfoFromName
GetParmInfoFromName.restype = Result
GetParmInfoFromName.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.POINTER(ParmInfo) ]
GetParmTagName = _hapi.HAPI_GetParmTagName
GetParmTagName.restype = Result
GetParmTagName.argtypes = [ _ctypes.POINTER(Session),
NodeId,
ParmId,
_ctypes.c_int,
_ctypes.POINTER(StringHandle) ]
GetParmTagValue = _hapi.HAPI_GetParmTagValue
GetParmTagValue.restype = Result
GetParmTagValue.argtypes = [ _ctypes.POINTER(Session),
NodeId,
ParmId,
_ctypes.c_char_p,
_ctypes.POINTER(StringHandle) ]
ParmHasTag = _hapi.HAPI_ParmHasTag
ParmHasTag.restype = Result
ParmHasTag.argtypes = [ _ctypes.POINTER(Session),
NodeId,
ParmId,
_ctypes.c_char_p,
_ctypes.POINTER(Bool) ]
ParmHasExpression = _hapi.HAPI_ParmHasExpression
ParmHasExpression.restype = Result
ParmHasExpression.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.c_int,
_ctypes.POINTER(Bool) ]
GetParmWithTag = _hapi.HAPI_GetParmWithTag
GetParmWithTag.restype = Result
GetParmWithTag.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.POINTER(ParmId) ]
GetParmExpression = _hapi.HAPI_GetParmExpression
GetParmExpression.restype = Result
GetParmExpression.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.c_int,
_ctypes.POINTER(StringHandle) ]
RevertParmToDefault = _hapi.HAPI_RevertParmToDefault
RevertParmToDefault.restype = Result
RevertParmToDefault.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.c_int ]
RevertParmToDefaults = _hapi.HAPI_RevertParmToDefaults
RevertParmToDefaults.restype = Result
RevertParmToDefaults.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p ]
SetParmExpression = _hapi.HAPI_SetParmExpression
SetParmExpression.restype = Result
SetParmExpression.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
ParmId,
_ctypes.c_int ]
RemoveParmExpression = _hapi.HAPI_RemoveParmExpression
RemoveParmExpression.restype = Result
RemoveParmExpression.argtypes = [ _ctypes.POINTER(Session),
NodeId,
ParmId,
_ctypes.c_int ]
GetParmIntValue = _hapi.HAPI_GetParmIntValue
GetParmIntValue.restype = Result
GetParmIntValue.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.c_int,
_ctypes.POINTER(_ctypes.c_int) ]
GetParmIntValues = _hapi.HAPI_GetParmIntValues
GetParmIntValues.restype = Result
GetParmIntValues.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(_ctypes.c_int),
_ctypes.c_int,
_ctypes.c_int ]
GetParmFloatValue = _hapi.HAPI_GetParmFloatValue
GetParmFloatValue.restype = Result
GetParmFloatValue.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.c_int,
_ctypes.POINTER(_ctypes.c_float) ]
GetParmFloatValues = _hapi.HAPI_GetParmFloatValues
GetParmFloatValues.restype = Result
GetParmFloatValues.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(_ctypes.c_float),
_ctypes.c_int,
_ctypes.c_int ]
GetParmStringValue = _hapi.HAPI_GetParmStringValue
GetParmStringValue.restype = Result
GetParmStringValue.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.c_int,
Bool,
_ctypes.POINTER(StringHandle) ]
GetParmStringValues = _hapi.HAPI_GetParmStringValues
GetParmStringValues.restype = Result
GetParmStringValues.argtypes = [ _ctypes.POINTER(Session),
NodeId,
Bool,
_ctypes.POINTER(StringHandle),
_ctypes.c_int,
_ctypes.c_int ]
GetParmNodeValue = _hapi.HAPI_GetParmNodeValue
GetParmNodeValue.restype = Result
GetParmNodeValue.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.POINTER(NodeId) ]
GetParmFile = _hapi.HAPI_GetParmFile
GetParmFile.restype = Result
GetParmFile.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.c_char_p,
_ctypes.c_char_p ]
GetParmChoiceLists = _hapi.HAPI_GetParmChoiceLists
GetParmChoiceLists.restype = Result
GetParmChoiceLists.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(ParmChoiceInfo),
_ctypes.c_int,
_ctypes.c_int ]
SetParmIntValue = _hapi.HAPI_SetParmIntValue
SetParmIntValue.restype = Result
SetParmIntValue.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.c_int,
_ctypes.c_int ]
SetParmIntValues = _hapi.HAPI_SetParmIntValues
SetParmIntValues.restype = Result
SetParmIntValues.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(_ctypes.c_int),
_ctypes.c_int,
_ctypes.c_int ]
SetParmFloatValue = _hapi.HAPI_SetParmFloatValue
SetParmFloatValue.restype = Result
SetParmFloatValue.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.c_int,
_ctypes.c_float ]
SetParmFloatValues = _hapi.HAPI_SetParmFloatValues
SetParmFloatValues.restype = Result
SetParmFloatValues.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(_ctypes.c_float),
_ctypes.c_int,
_ctypes.c_int ]
SetParmStringValue = _hapi.HAPI_SetParmStringValue
SetParmStringValue.restype = Result
SetParmStringValue.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
ParmId,
_ctypes.c_int ]
SetParmNodeValue = _hapi.HAPI_SetParmNodeValue
SetParmNodeValue.restype = Result
SetParmNodeValue.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
NodeId ]
InsertMultiparmInstance = _hapi.HAPI_InsertMultiparmInstance
InsertMultiparmInstance.restype = Result
InsertMultiparmInstance.argtypes = [ _ctypes.POINTER(Session),
NodeId,
ParmId,
_ctypes.c_int ]
RemoveMultiparmInstance = _hapi.HAPI_RemoveMultiparmInstance
RemoveMultiparmInstance.restype = Result
RemoveMultiparmInstance.argtypes = [ _ctypes.POINTER(Session),
NodeId,
ParmId,
_ctypes.c_int ]
GetHandleInfo = _hapi.HAPI_GetHandleInfo
GetHandleInfo.restype = Result
GetHandleInfo.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(HandleInfo),
_ctypes.c_int,
_ctypes.c_int ]
GetHandleBindingInfo = _hapi.HAPI_GetHandleBindingInfo
GetHandleBindingInfo.restype = Result
GetHandleBindingInfo.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_int,
_ctypes.POINTER(HandleBindingInfo),
_ctypes.c_int,
_ctypes.c_int ]
GetPresetBufLength = _hapi.HAPI_GetPresetBufLength
GetPresetBufLength.restype = Result
GetPresetBufLength.argtypes = [ _ctypes.POINTER(Session),
NodeId,
PresetType,
_ctypes.c_char_p,
_ctypes.POINTER(_ctypes.c_int) ]
GetPreset = _hapi.HAPI_GetPreset
GetPreset.restype = Result
GetPreset.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.c_int ]
SetPreset = _hapi.HAPI_SetPreset
SetPreset.restype = Result
SetPreset.argtypes = [ _ctypes.POINTER(Session),
NodeId,
PresetType,
_ctypes.c_char_p,
_ctypes.c_char_p,
_ctypes.c_int ]
GetObjectInfo = _hapi.HAPI_GetObjectInfo
GetObjectInfo.restype = Result
GetObjectInfo.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(ObjectInfo) ]
GetObjectTransform = _hapi.HAPI_GetObjectTransform
GetObjectTransform.restype = Result
GetObjectTransform.argtypes = [ _ctypes.POINTER(Session),
NodeId,
NodeId,
RSTOrder,
_ctypes.POINTER(Transform) ]
ComposeObjectList = _hapi.HAPI_ComposeObjectList
ComposeObjectList.restype = Result
ComposeObjectList.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.c_char_p,
_ctypes.POINTER(_ctypes.c_int) ]
GetComposedObjectList = _hapi.HAPI_GetComposedObjectList
GetComposedObjectList.restype = Result
GetComposedObjectList.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(ObjectInfo),
_ctypes.c_int,
_ctypes.c_int ]
GetComposedObjectTransforms = _hapi.HAPI_GetComposedObjectTransforms
GetComposedObjectTransforms.restype = Result
GetComposedObjectTransforms.argtypes = [ _ctypes.POINTER(Session),
NodeId,
RSTOrder,
_ctypes.POINTER(Transform),
_ctypes.c_int,
_ctypes.c_int ]
GetInstancedObjectIds = _hapi.HAPI_GetInstancedObjectIds
GetInstancedObjectIds.restype = Result
GetInstancedObjectIds.argtypes = [ _ctypes.POINTER(Session),
NodeId,
_ctypes.POINTER(NodeId),
_ctypes.c_int,
_ctypes.c_int ]
GetInstanceTransforms = _hapi.HAPI_GetInstanceTransforms
GetInstanceTransforms.restype = Result
GetInstanceTransforms.argtypes = [ _ctypes.POINTER(Session),
NodeId,
RSTOrder,
_ctypes.POINTER(Transform),
| |
iogrp='iogrp0', thin=True)
self.svc.client.svctask.mkfcmap.assert_called_once_with(source="source_volume", target="test_snapshot",
copyrate=0)
def test_create_snapshot_as_stretched_success(self):
self._prepare_mocks_for_create_snapshot_mkvolume()
self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1:pool2",
flashcopy_2=False)
self.svc.client.svctask.mkvolume.assert_called_once_with(name='test_snapshot', unit='b', size=1024,
pool='pool1:pool2', iogrp='iogrp0', thin=True)
self.svc.client.svctask.mkfcmap.assert_called_once_with(source="source_volume", target="test_snapshot",
copyrate=0)
def test_create_snapshot_with_specified_source_volume_space_efficiency_success(self):
self._prepare_mocks_for_create_snapshot_mkvolume(source_has_deduplicated_copy=True)
self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool=None,
flashcopy_2=False)
self.svc.client.svctask.mkvolume.assert_called_once_with(name='test_snapshot', unit='b', size=1024,
pool='pool_name', iogrp='iogrp0',
compressed=True, deduplicated=True)
def test_create_snapshot_with_different_space_efficiency_success(self):
self._prepare_mocks_for_create_snapshot_mkvolume(source_has_deduplicated_copy=True)
self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency="thin", pool=None,
flashcopy_2=False)
self.svc.client.svctask.mkvolume.assert_called_once_with(name='test_snapshot', unit='b', size=1024,
pool='pool_name', iogrp='iogrp0', thin=True)
def test_create_snapshot_no_deduplicated_copy_success(self):
self._prepare_mocks_for_create_snapshot_mkvolume(support_deduplicated_copy=False)
snapshot = self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1",
flashcopy_2=False)
self.assertEqual(1024, snapshot.capacity_bytes)
self.assertEqual('SVC', snapshot.array_type)
self.assertEqual('snap_id', snapshot.id)
def _prepare_mocks_for_create_snapshot_addsnapshot(self):
self.svc.client.svctask.addsnapshot = Mock()
source_volume_to_copy_from = self._get_custom_cli_volume(False, False, pool_name='pool1')
volumes_to_return = [source_volume_to_copy_from, source_volume_to_copy_from]
self.svc.client.svcinfo.lsvdisk.side_effect = self._mock_cli_objects(volumes_to_return)
self.svc.client.svctask.addsnapshot.return_value = Mock(
response=(b'Snapshot, id [0], successfully created or triggered\n', b''))
self.svc.client.svcinfo.lsvolumesnapshot = Mock()
self.svc.client.svcinfo.lsvolumesnapshot.return_value = self._mock_cli_object(self._get_cli_snapshot())
def test_create_snapshot_addsnapshot_success(self):
self._prepare_mocks_for_create_snapshot_addsnapshot()
snapshot = self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1",
flashcopy_2=True)
self.assertEqual(1024, snapshot.capacity_bytes)
self.svc.client.svctask.addsnapshot.assert_called_once_with(name='test_snapshot', volumes='test_id',
pool='pool1')
self.svc.client.svcinfo.lsvolumesnapshot.assert_called_once_with(object_id=0)
self.assertEqual('SVC', snapshot.array_type)
self.assertEqual('', snapshot.id)
self.assertEqual('snapshot_id', snapshot.internal_id)
def test_create_snapshot_addsnapshot_not_supported_error(self):
with self.assertRaises(array_errors.Flashcopy2NotSupportedMessage):
self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1",
flashcopy_2=True)
def _test_create_snapshot_addsnapshot_cli_failure_error(self, error_message_id, expected_error):
self._test_mediator_method_client_cli_failure_error(self.svc.create_snapshot,
('source_volume_name', 'snapshot_name', '', 'pool', True),
self.svc.client.svctask.addsnapshot, error_message_id,
expected_error)
def test_create_snapshot_addsnapshot_raise_exceptions(self):
self.svc.client.svctask.addsnapshot = Mock()
self._test_mediator_method_client_error(self.svc.create_snapshot,
('source_volume_name', 'snapshot_name', '', 'pool'),
self.svc.client.svctask.addsnapshot, Exception, Exception)
self._test_create_snapshot_addsnapshot_cli_failure_error("Failed", CLIFailureError)
self._test_create_snapshot_addsnapshot_cli_failure_error("CMMVC8710E", array_errors.NotEnoughSpaceInPool)
self._test_create_snapshot_addsnapshot_cli_failure_error("CMMVC6017E", array_errors.IllegalObjectName)
self._test_create_snapshot_addsnapshot_cli_failure_error("CMMVC6035E", array_errors.SnapshotAlreadyExists)
self._test_create_snapshot_addsnapshot_cli_failure_error("CMMVC5754E", array_errors.PoolDoesNotExist)
def test_delete_snapshot_no_volume_raise_snapshot_not_found(self):
self._prepare_lsvdisk_to_return_none()
with self.assertRaises(array_errors.ObjectNotFoundError):
self.svc.delete_snapshot("test_snapshot", "internal_id")
def test_delete_snapshot_no_fcmap_id_raise_snapshot_not_found(self):
self._prepare_lsvdisk_to_return_mapless_target_volume()
with self.assertRaises(array_errors.ObjectNotFoundError):
self.svc.delete_snapshot("test_snapshot", "internal_id")
def test_delete_snapshot_call_rmfcmap(self):
self._prepare_mocks_for_delete_snapshot()
fcmaps_as_target = self.fcmaps
self.svc.client.svcinfo.lsfcmap.side_effect = [Mock(as_list=fcmaps_as_target), Mock(as_list=[])]
self.svc.delete_snapshot("test_snapshot", "internal_id")
self.svc.client.svctask.rmfcmap.assert_called_once_with(object_id="test_fc_id", force=True)
def test_delete_snapshot_does_not_remove_hyperswap_fcmap(self):
self._prepare_mocks_for_delete_snapshot()
self._prepare_fcmaps_for_hyperswap()
self.svc.delete_snapshot("test_snapshot", "internal_id")
self.svc.client.svctask.rmfcmap.assert_not_called()
def _test_delete_snapshot_rmvolume_cli_failure_error(self, error_message_id, expected_error, snapshot_id="snap_id"):
self._test_mediator_method_client_cli_failure_error(self.svc.delete_snapshot, (snapshot_id, "internal_id"),
self.svc.client.svctask.rmvolume, error_message_id,
expected_error)
def test_delete_snapshot_rmvolume_errors(self):
self._prepare_mocks_for_delete_snapshot()
self._test_delete_snapshot_rmvolume_cli_failure_error("CMMVC5753E", array_errors.ObjectNotFoundError)
self._test_delete_snapshot_rmvolume_cli_failure_error("CMMVC8957E", array_errors.ObjectNotFoundError)
self._test_delete_snapshot_rmvolume_cli_failure_error("Failed", CLIFailureError)
def test_delete_snapshot_still_copy_fcmaps_not_removed(self):
self._prepare_mocks_for_object_still_in_use()
fcmaps_as_target = self.fcmaps
fcmaps_as_source = self.fcmaps_as_source
fcmaps_as_source[0].status = "not good"
self.svc.client.svcinfo.lsfcmap.side_effect = [Mock(as_list=fcmaps_as_target), Mock(as_list=fcmaps_as_source)]
with self.assertRaises(array_errors.ObjectIsStillInUseError):
self.svc.delete_snapshot("test_snapshot", "internal_id")
def test_delete_snapshot_rmvolume_success(self):
self._prepare_mocks_for_delete_snapshot()
self.svc.delete_snapshot("test_snapshot", "internal_id")
self.assertEqual(2, self.svc.client.svctask.rmfcmap.call_count)
self.svc.client.svctask.rmvolume.assert_called_once_with(vdisk_id="test_snapshot")
@patch("controller.array_action.array_mediator_svc.is_warning_message")
def test_delete_snapshot_with_fcmap_already_stopped_success(self, mock_warning):
self._prepare_mocks_for_delete_snapshot()
mock_warning.return_value = False
self.svc.client.svctask.stopfcmap.side_effect = [CLIFailureError('CMMVC5912E')]
self.svc.delete_snapshot("test_snapshot", "internal_id")
self.assertEqual(2, self.svc.client.svctask.rmfcmap.call_count)
self.svc.client.svctask.rmvolume.assert_called_once_with(vdisk_id="test_snapshot")
@patch("controller.array_action.array_mediator_svc.is_warning_message")
def test_delete_snapshot_with_stopfcmap_raise_error(self, mock_warning):
self._prepare_mocks_for_delete_snapshot()
mock_warning.return_value = False
self.svc.client.svctask.stopfcmap.side_effect = [CLIFailureError('error')]
with self.assertRaises(CLIFailureError):
self.svc.delete_snapshot("test_snapshot", "internal_id")
def _prepare_mocks_for_delete_snapshot_addsnapshot(self):
self.svc.client.svctask.addsnapshot = Mock()
def _test_delete_snapshot_rmsnapshot_cli_failure_error(self, error_message_id, expected_error):
self._test_mediator_method_client_cli_failure_error(self.svc.delete_snapshot, ("", "internal_id"),
self.svc.client.svctask.rmsnapshot, error_message_id,
expected_error)
def test_delete_snapshot_rmsnapshot_errors(self):
self._prepare_mocks_for_delete_snapshot_addsnapshot()
self._test_delete_snapshot_rmsnapshot_cli_failure_error("CMMVC9755E", array_errors.ObjectNotFoundError)
self._test_delete_snapshot_rmsnapshot_cli_failure_error("Failed", CLIFailureError)
def test_delete_snapshot_rmsnapshot_success(self):
self._prepare_mocks_for_delete_snapshot_addsnapshot()
self.svc.delete_snapshot("", "internal_id")
self.svc.client.svctask.rmsnapshot.assert_called_once_with(snapshotid='internal_id')
def test_validate_supported_space_efficiency_raise_error(self):
space_efficiency = "Test"
with self.assertRaises(
array_errors.SpaceEfficiencyNotSupported):
self.svc.validate_supported_space_efficiency(space_efficiency)
def test_validate_supported_space_efficiency_success(self):
no_space_efficiency = ""
self.svc.validate_supported_space_efficiency(no_space_efficiency)
thin_space_efficiency = config.SPACE_EFFICIENCY_THIN
self.svc.validate_supported_space_efficiency(thin_space_efficiency)
thick_space_efficiency = config.SPACE_EFFICIENCY_THICK
self.svc.validate_supported_space_efficiency(thick_space_efficiency)
compressed_space_efficiency = config.SPACE_EFFICIENCY_COMPRESSED
self.svc.validate_supported_space_efficiency(compressed_space_efficiency)
deduplicated_space_efficiency = config.SPACE_EFFICIENCY_DEDUPLICATED
self.svc.validate_supported_space_efficiency(deduplicated_space_efficiency)
deduplicated_thin_space_efficiency = config.SPACE_EFFICIENCY_DEDUPLICATED_THIN
self.svc.validate_supported_space_efficiency(deduplicated_thin_space_efficiency)
deduplicated_compressed_space_efficiency = config.SPACE_EFFICIENCY_DEDUPLICATED_COMPRESSED
self.svc.validate_supported_space_efficiency(deduplicated_compressed_space_efficiency)
def _test_build_kwargs_from_parameters(self, space_efficiency, pool, io_group, volume_group, name, size,
expected_space_efficiency_kwargs):
expected_kwargs = {'name': name, 'unit': 'b', 'size': size, 'pool': pool}
expected_kwargs.update(expected_space_efficiency_kwargs)
if io_group:
expected_kwargs['iogrp'] = io_group
if volume_group:
expected_kwargs['volumegroup'] = volume_group
actual_kwargs = build_kwargs_from_parameters(space_efficiency, pool, io_group, volume_group, name, size)
self.assertDictEqual(actual_kwargs, expected_kwargs)
def test_build_kwargs_from_parameters(self):
size = self.svc._convert_size_bytes(1000)
second_size = self.svc._convert_size_bytes(2048)
self._test_build_kwargs_from_parameters('Thin', 'P1', None, None, 'V1', size, {'thin': True})
self._test_build_kwargs_from_parameters('compressed', 'P2', None, None, 'V2', size, {'compressed': True})
self._test_build_kwargs_from_parameters('dedup_thin', 'P3', 'IOGRP1', 'VOLGRP1', 'V3', second_size,
{'iogrp': 'IOGRP1', 'volumegroup': 'VOLGRP1',
'thin': True, 'deduplicated': True})
self._test_build_kwargs_from_parameters('dedup_compressed', 'P3', None, None, 'V3', second_size,
{'compressed': True, 'deduplicated': True})
self._test_build_kwargs_from_parameters('Deduplicated', 'P3', None, None, 'V3', second_size,
{'compressed': True, 'deduplicated': True})
def test_properties(self):
self.assertEqual(22, SVCArrayMediator.port)
self.assertEqual(512, SVCArrayMediator.minimal_volume_size_in_bytes)
self.assertEqual('SVC', SVCArrayMediator.array_type)
self.assertEqual(63, SVCArrayMediator.max_object_name_length)
self.assertEqual(2, SVCArrayMediator.max_connections)
self.assertEqual(10, SVCArrayMediator.max_lun_retries)
def _prepare_lsnvmefabric_mock(self, host_names, nvme_host_names, connectivity_types):
nvme_host_mocks = []
self.svc.client.svcinfo.lsnvmefabric.return_value = Mock(as_list=nvme_host_mocks)
if config.NVME_OVER_FC_CONNECTIVITY_TYPE in connectivity_types:
nvme_host_names = host_names if nvme_host_names is None else nvme_host_names
if nvme_host_names:
nvme_host_mocks = [Mock(object_name=host_name) for host_name in nvme_host_names]
lsnvmefabric_return_values = [Mock(as_list=[host_mock] * 4) for host_mock in nvme_host_mocks]
self.svc.client.svcinfo.lsnvmefabric.side_effect = lsnvmefabric_return_values
def _prepare_lsfabric_mock_for_get_host(self, host_names, fc_host_names, connectivity_types):
fc_host_mocks = []
self.svc.client.svcinfo.lsfabric.return_value = Mock(as_list=fc_host_mocks)
if config.FC_CONNECTIVITY_TYPE in connectivity_types:
fc_host_names = host_names if fc_host_names is None else fc_host_names
if fc_host_names:
for host_name in fc_host_names:
mock = Mock()
mock.name = host_name
fc_host_mocks.append(mock)
lsfabric_return_values = [Mock(as_list=[host_mock] * 4) for host_mock in fc_host_mocks]
self.svc.client.svcinfo.lsfabric.side_effect = lsfabric_return_values
def _prepare_lshostiplogin_mock(self, host_name, iscsi_host_name, connectivity_types):
iscsi_host_name = host_name if iscsi_host_name is None else iscsi_host_name
if config.ISCSI_CONNECTIVITY_TYPE in connectivity_types and iscsi_host_name:
iscsi_host_mock = Mock(host_name=iscsi_host_name)
self.svc.client.svcinfo.lshostiplogin.return_value = Mock(as_single_element=iscsi_host_mock)
else:
self.svc.client.svcinfo.lshostiplogin.side_effect = CLIFailureError("CMMVC5804E")
def _prepare_mocks_for_get_host_by_identifiers(self, nvme_host_names=None, fc_host_names=None,
iscsi_host_name=None, connectivity_types=None):
host_name = 'test_host_1'
host_names = [host_name]
if connectivity_types is None:
connectivity_types = {config.NVME_OVER_FC_CONNECTIVITY_TYPE,
config.FC_CONNECTIVITY_TYPE,
config.ISCSI_CONNECTIVITY_TYPE}
self._prepare_lsnvmefabric_mock(host_names, nvme_host_names, connectivity_types)
self._prepare_lsfabric_mock_for_get_host(host_names, fc_host_names, connectivity_types)
self._prepare_lshostiplogin_mock(host_name, iscsi_host_name, connectivity_types)
def _prepare_mocks_for_get_host_by_identifiers_no_hosts(self):
self._prepare_mocks_for_get_host_by_identifiers(nvme_host_names=[], fc_host_names=[], iscsi_host_name='')
self.svc.client.svcinfo.lshost = Mock(return_value=[])
def _prepare_mocks_for_get_host_by_identifiers_slow(self, svc_response, custom_host=None):
self._prepare_mocks_for_get_host_by_identifiers_no_hosts()
host_1 = self._get_host_as_munch('host_id_1', 'test_host_1', nqn_list=['nqn.test.1'], wwpns_list=['wwn1'],
iscsi_names_list=['iqn.test.1'])
host_2 = self._get_host_as_munch('host_id_2', 'test_host_2', nqn_list=['nqn.test.2'], wwpns_list=['wwn2'],
iscsi_names_list=['iqn.test.2'])
if custom_host:
host_3 = custom_host
else:
host_3 = self._get_host_as_munch('host_id_3', 'test_host_3', nqn_list=['nqn.test.3'],
wwpns_list=['wwn3'], iscsi_names_list=['iqn.test.3'])
hosts = [host_1, host_2, host_3]
self.svc.client.svcinfo.lshost = Mock()
self.svc.client.svcinfo.lshost.return_value = self._get_hosts_list_result(hosts)
self.svc.client.send_raw_command = Mock()
self.svc.client.send_raw_command.return_value = EMPTY_BYTES, EMPTY_BYTES
svc_response.return_value = hosts
def test_get_host_by_name_success(self):
self.svc.client.svcinfo.lshost.return_value = Mock(
as_single_element=self._get_host_as_munch('host_id_1', 'test_host_1', nqn_list=['nqn.test.1'],
wwpns_list=['wwn1'],
iscsi_names_list=['iqn.test.1']))
host = self.svc.get_host_by_name('test_host_1')
self.assertEqual("test_host_1", host.name)
self.assertEqual(['nvmeofc', 'fc', 'iscsi'], host.connectivity_types)
self.assertEqual(['nqn.test.1'], host.initiators.nvme_nqns)
self.assertEqual(['wwn1'], host.initiators.fc_wwns)
self.assertEqual(['iqn.test.1'], host.initiators.iscsi_iqns)
def test_get_host_by_name_raise_host_not_found(self):
self.svc.client.svcinfo.lshost.return_value = Mock(as_single_element=None)
with self.assertRaises(array_errors.HostNotFoundError):
self.svc.get_host_by_name('test_host_1')
@patch.object(SVCResponse, 'as_list', new_callable=PropertyMock)
def test_get_host_by_identifiers_returns_host_not_found(self, svc_response):
self._prepare_mocks_for_get_host_by_identifiers_slow(svc_response)
with self.assertRaises(array_errors.HostNotFoundError):
self.svc.get_host_by_host_identifiers(Initiators(['Test_nqn'], ['Test_wwn'], ['Test_iqn']))
def test_get_host_by_identifier_return_host_not_found_when_no_hosts_exist(self):
self._prepare_mocks_for_get_host_by_identifiers_no_hosts()
with self.assertRaises(array_errors.HostNotFoundError):
self.svc.get_host_by_host_identifiers(Initiators(['Test_nqn'], ['Test_wwn'], ['Test_iqn']))
@patch.object(SVCResponse, 'as_list', new_callable=PropertyMock)
def test_get_host_by_identifiers_slow_raise_multiplehostsfounderror(self, svc_response):
self._prepare_mocks_for_get_host_by_identifiers_slow(svc_response)
with self.assertRaises(array_errors.MultipleHostsFoundError):
self.svc.get_host_by_host_identifiers(Initiators(['Test_nqn'], ['wwn2'], ['iqn.test.3']))
def test_get_host_by_identifiers_raise_multiplehostsfounderror(self):
self._prepare_mocks_for_get_host_by_identifiers(nvme_host_names=['test_host_1'],
fc_host_names=['test_host_2'])
with self.assertRaises(array_errors.MultipleHostsFoundError):
self.svc.get_host_by_host_identifiers(Initiators(['Test_nqn'], ['wwn2'], ['iqn.test.3']))
@patch.object(SVCResponse, 'as_list', new_callable=PropertyMock)
def test_get_host_by_identifiers_slow_return_iscsi_host(self, svc_response):
self._prepare_mocks_for_get_host_by_identifiers_slow(svc_response)
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['Test_nqn'], ['Test_wwn'], ['iqn.test.2']))
self.assertEqual('test_host_2', hostname)
self.assertEqual([config.ISCSI_CONNECTIVITY_TYPE], connectivity_types)
def test_get_host_by_identifiers_return_iscsi_host(self):
self._prepare_mocks_for_get_host_by_identifiers(iscsi_host_name='test_host_1',
connectivity_types=[config.ISCSI_CONNECTIVITY_TYPE])
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['Test_nqn'], ['Test_wwn'], ['iqn.test.2']))
self.assertEqual('test_host_1', hostname)
self.assertEqual({config.ISCSI_CONNECTIVITY_TYPE}, connectivity_types)
self.svc.client.svcinfo.lshostiplogin.assert_called_once_with(object_id='iqn.test.2')
@patch.object(SVCResponse, 'as_list', new_callable=PropertyMock)
def test_get_host_by_identifiers_slow_no_other_ports_return_iscsi_host(self, svc_response):
host_with_iqn = self._get_host_as_munch('costume_host_id', 'test_costume_host',
iscsi_names_list=['iqn.test.costume'])
self._prepare_mocks_for_get_host_by_identifiers_slow(svc_response, custom_host=host_with_iqn)
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['Test_nqn'], ['Test_wwn'], ['iqn.test.costume']))
self.assertEqual('test_costume_host', hostname)
self.assertEqual([config.ISCSI_CONNECTIVITY_TYPE], connectivity_types)
@patch.object(SVCResponse, 'as_list', new_callable=PropertyMock)
def test_get_host_by_identifiers_slow_return_iscsi_host_with_list_iqn(self, svc_response):
host_with_iqn_list = self._get_host_as_munch('costume_host_id', 'test_costume_host', wwpns_list=['wwns'],
iscsi_names_list=['iqn.test.s1', 'iqn.test.s2'])
self._prepare_mocks_for_get_host_by_identifiers_slow(svc_response, custom_host=host_with_iqn_list)
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['Test_nqn'], ['Test_wwn'], ['iqn.test.s1']))
self.assertEqual('test_costume_host', hostname)
self.assertEqual([config.ISCSI_CONNECTIVITY_TYPE], connectivity_types)
@patch.object(SVCResponse, 'as_list', new_callable=PropertyMock)
def test_get_host_by_identifiers_slow_return_nvme_host(self, svc_response):
self._prepare_mocks_for_get_host_by_identifiers_slow(svc_response)
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['nqn.test.3'], ['Test_wwn'], ['iqn.test.6']))
self.assertEqual('test_host_3', hostname)
self.assertEqual([config.NVME_OVER_FC_CONNECTIVITY_TYPE], connectivity_types)
def test_get_host_by_identifiers_return_nvme_host(self):
self._prepare_mocks_for_get_host_by_identifiers(nvme_host_names=['test_host_3'],
connectivity_types=[config.NVME_OVER_FC_CONNECTIVITY_TYPE])
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['nqn.test.1'], ['Test_wwn'], ['iqn.test.6']))
self.assertEqual('test_host_3', hostname)
self.assertEqual({config.NVME_OVER_FC_CONNECTIVITY_TYPE}, connectivity_types)
self.svc.client.svcinfo.lsnvmefabric.assert_called_once_with(remotenqn='nqn.test.1')
@patch.object(SVCResponse, 'as_list', new_callable=PropertyMock)
def test_get_host_by_identifiers_slow_no_other_ports_return_nvme_host(self, svc_response):
host_with_nqn = self._get_host_as_munch('costume_host_id', 'test_costume_host',
nqn_list=['nqn.test.costume'])
self._prepare_mocks_for_get_host_by_identifiers_slow(svc_response, custom_host=host_with_nqn)
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['nqn.test.costume'], ['Test_wwn'], ['Test_iqn']))
self.assertEqual('test_costume_host', hostname)
self.assertEqual([config.NVME_OVER_FC_CONNECTIVITY_TYPE], connectivity_types)
@patch.object(SVCResponse, 'as_list', new_callable=PropertyMock)
def test_get_host_by_identifiers_slow_return_fc_host(self, svc_response):
host_1 = self._get_host_as_munch('host_id_1', 'test_host_1', wwpns_list=['wwn1'], iscsi_names_list=[])
host_2 = self._get_host_as_munch('host_id_2', 'test_host_2', wwpns_list=['wwn2'], iscsi_names_list=[])
host_3 = self._get_host_as_munch('host_id_3', 'test_host_3', wwpns_list=['wwn3', 'wwn4'],
iscsi_names_list=['iqn.test.3'])
hosts = [host_1, host_2, host_3]
self._prepare_mocks_for_get_host_by_identifiers_slow(svc_response)
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['Test_nqn'], ['wwn4', 'WWN3'], ['iqn.test.6']))
self.assertEqual('test_host_3', hostname)
self.assertEqual([config.FC_CONNECTIVITY_TYPE], connectivity_types)
svc_response.return_value = hosts
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['Test_nqn'], ['wwn3'], ['iqn.test.6']))
self.assertEqual('test_host_3', hostname)
self.assertEqual([config.FC_CONNECTIVITY_TYPE], connectivity_types)
def test_get_host_by_identifiers_return_fc_host(self):
self._prepare_mocks_for_get_host_by_identifiers(fc_host_names=['test_host_3'],
connectivity_types=[config.FC_CONNECTIVITY_TYPE])
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['nqn.test.1'], ['Test_wwn'], ['iqn.test.6']))
self.assertEqual('test_host_3', hostname)
self.assertEqual({config.FC_CONNECTIVITY_TYPE}, connectivity_types)
self.svc.client.svcinfo.lsfabric.assert_called_once_with(wwpn='Test_wwn')
@patch.object(SVCResponse, 'as_list', new_callable=PropertyMock)
def test_get_host_by_identifiers_slow_no_other_ports_return_fc_host(self, svc_response):
host_with_wwpn = self._get_host_as_munch('costume_host_id', 'test_costume_host', wwpns_list=['WWNs'])
self._prepare_mocks_for_get_host_by_identifiers_slow(svc_response, custom_host=host_with_wwpn)
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['Test_nqn'], ['Test_wwn', 'WWNs'], ['Test_iqn']))
self.assertEqual('test_costume_host', hostname)
self.assertEqual([config.FC_CONNECTIVITY_TYPE], connectivity_types)
def test_get_host_by_identifiers_no_other_ports_return_fc_host(self):
self._prepare_mocks_for_get_host_by_identifiers(fc_host_names=['', 'test_host_2'],
connectivity_types=[config.FC_CONNECTIVITY_TYPE])
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['Test_nqn'], ['Test_wwn', 'WWNs'], ['Test_iqn']))
self.assertEqual('test_host_2', hostname)
self.assertEqual({config.FC_CONNECTIVITY_TYPE}, connectivity_types)
@patch.object(SVCResponse, 'as_list', new_callable=PropertyMock)
def test_get_host_by_identifiers_slow_with_wrong_fc_iscsi_raise_not_found(self, svc_response):
host_1 = self._get_host_as_munch('host_id_1', 'test_host_1', wwpns_list=['wwn1'], iscsi_names_list=[])
host_2 = self._get_host_as_munch('host_id_2', 'test_host_2', wwpns_list=['wwn3'],
iscsi_names_list=['iqn.test.2'])
host_3 = self._get_host_as_munch('host_id_3', 'test_host_3', wwpns_list=['wwn3'],
iscsi_names_list=['iqn.test.3'])
hosts = [host_1, host_2, host_3]
self._prepare_mocks_for_get_host_by_identifiers_slow(svc_response)
with self.assertRaises(array_errors.HostNotFoundError):
self.svc.get_host_by_host_identifiers(Initiators(['Test_nqn'], [], []))
svc_response.return_value = hosts
with self.assertRaises(array_errors.HostNotFoundError):
self.svc.get_host_by_host_identifiers(Initiators(['Test_nqn'], ['a', 'b'], ['123']))
@patch.object(SVCResponse, 'as_list', new_callable=PropertyMock)
def test_get_host_by_identifiers_slow_return_nvme_fc_and_iscsi(self, svc_response):
self._prepare_mocks_for_get_host_by_identifiers_slow(svc_response)
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['nqn.test.2'], ['WWN2'], ['iqn.test.2']))
self.assertEqual('test_host_2', hostname)
self.assertEqual(
{config.NVME_OVER_FC_CONNECTIVITY_TYPE, config.FC_CONNECTIVITY_TYPE, config.ISCSI_CONNECTIVITY_TYPE},
set(connectivity_types))
def test_get_host_by_identifiers_return_nvme_fc_and_iscsi(self):
self._prepare_mocks_for_get_host_by_identifiers()
hostname, connectivity_types = self.svc.get_host_by_host_identifiers(
Initiators(['nqn.test.1'], ['WWN1'], ['iqn.test.1']))
self.assertEqual('test_host_1', hostname)
self.assertEqual(
{config.NVME_OVER_FC_CONNECTIVITY_TYPE, config.FC_CONNECTIVITY_TYPE, config.ISCSI_CONNECTIVITY_TYPE},
connectivity_types)
def _get_host_as_munch(self, host_id, host_name, nqn_list=None, wwpns_list=None, iscsi_names_list=None,
portset_id=None):
host = Munch(id=host_id, name=host_name)
if nqn_list:
host.nqn = nqn_list
if wwpns_list:
host.WWPN = wwpns_list
if iscsi_names_list:
host.iscsi_name = iscsi_names_list
if portset_id:
host.portset_id = portset_id
return host
def _get_hosts_list_result(self, hosts_dict):
return [Munch(host_dict) for host_dict in hosts_dict]
def test_get_volume_mappings_empty_mapping_list(self):
self.svc.client.svcinfo.lsvdiskhostmap.return_value = []
mappings = self.svc.get_volume_mappings("volume")
self.assertEqual({}, mappings)
def _test_get_volume_mappings_lsvdisk_cli_failure_error(self, volume_name, error_message_id, expected_error):
self._test_mediator_method_client_cli_failure_error(self.svc.get_volume_mappings, (volume_name,),
self.svc.client.svcinfo.lsvdisk, error_message_id,
expected_error)
def test_get_volume_mappings_lsvdisk_cli_failure_errors(self):
self._test_get_volume_mappings_lsvdisk_cli_failure_error("\xff", 'CMMVC6017E', array_errors.IllegalObjectID)
self._test_get_volume_mappings_lsvdisk_cli_failure_error("!@#", 'CMMVC5741E', array_errors.IllegalObjectID)
def test_get_volume_mappings_on_volume_not_found(self):
self.svc.client.svcinfo.lsvdiskhostmap.side_effect = [
svc_errors.CommandExecutionError('Failed')]
with self.assertRaises(array_errors.ObjectNotFoundError):
self.svc.get_volume_mappings('volume')
def test_get_volume_mappings_success(self):
map1 = Munch({'id': '51', 'name': 'peng', 'SCSI_id': '0',
'host_id': '12', 'host_name': 'Test_P'})
map2 = Munch({'id': '52', 'name': 'peng', 'SCSI_id': '1',
'host_id': '18', 'host_name': 'Test_W'})
self.svc.client.svcinfo.lsvdiskhostmap.return_value = [map1, map2]
mappings = self.svc.get_volume_mappings("volume")
self.assertEqual({'Test_P': '0', 'Test_W': '1'}, mappings)
def test_get_free_lun_raises_host_not_found_error(self):
self.svc.client.svcinfo.lshostvdiskmap.side_effect = [
svc_errors.CommandExecutionError('Failed')]
with self.assertRaises(array_errors.HostNotFoundError):
self.svc._get_free_lun('host')
def _test_get_free_lun_host_mappings(self, lun_list, expected_lun='0'):
maps = []
for index, lun in enumerate(lun_list):
maps.append(Munch({'id': index, 'name': 'peng{}'.format(index), 'SCSI_id': lun,
'host_id': index, 'host_name': 'Test_{}'.format(index)}))
self.svc.client.svcinfo.lshostvdiskmap.return_value = maps
lun = self.svc._get_free_lun('host')
if lun_list:
self.assertNotIn(lun, lun_list)
self.assertEqual(lun, expected_lun)
@patch("controller.array_action.array_mediator_svc.choice")
def test_get_free_lun_with_no_host_mappings(self, random_choice):
random_choice.return_value = '0'
self._test_get_free_lun_host_mappings([])
@patch.object(SVCArrayMediator, "MAX_LUN_NUMBER", 2)
@patch.object(SVCArrayMediator, "MIN_LUN_NUMBER", 0)
def test_get_free_lun_success(self):
self._test_get_free_lun_host_mappings(('1', '2'))
@patch.object(SVCArrayMediator, "MAX_LUN_NUMBER", 4)
@patch.object(SVCArrayMediator, "MIN_LUN_NUMBER", 0)
@patch("controller.array_action.array_mediator_svc.LUN_INTERVAL", 1)
def test_get_free_lun_in_interval_success(self):
self._test_get_free_lun_host_mappings(('0', | |
'''
<NAME> - 308020734
<NAME> - 201242120
Compiler - Compilation Engine
'''
from SymbolTable import SymbolTable
from JackTokenizer import *
statments = ['let','if','while','do','return']
entityOp = ['<', '>', '"', '&']
opperators = ['+','-','*','/','&','|','<','>','='] + entityOp
class CompliationEngine(object):
def __init__(self, agent):
self._agent = agent
self._symbol_table = SymbolTable()
self._dynamic_label_counter = 0
# pre processing
if self._agent.advance() != 'class':# 'class' kw
print("Warning - __init__")
self._class_name = self._agent.advance() # 'class_name' identifier
self._agent.advance() # '{' sym
# class: 'class' className '{' classVarDec* subroutineDec* '}'
def compileClass(self):
self._agent.advance() #
# Run throuth each of the class variable declarations.
# Note that there coule be zero or more of them
while self._agent.cur in ['field', 'static']:
self.compileClassVarDec()
# Run through each of the class variable declarations.
# Note that there could be zero or more of them
while self._agent.cur in ['function', 'method', 'constructor']:
self.compileSubroutineDec()
self._agent.close()
# ('constructor' | 'function' | 'method') ('void' | type) subroutineName
# '(' parameterList ')' subroutineBody
def compileSubroutineDec(self):
f_type = self._agent.cur # 'constructor' | 'function' | 'method' kw
self._agent.advance() # 'void' | 'type' kw/identifier
f_name = self._agent.advance() # 'subroutineName' identifier
self._symbol_table.startSubRoutine()
if f_type == 'method':
self._symbol_table.define(['this', self._class_name, 'argument'])
self._agent.advance() # '('
self._agent.advance()
if self._agent.cur != ')': # Extra validation for edge cases
self.compileParameterList()
self.compileSubroutineBody(f_type, f_name)
# '{' varDec* statements '}'
def compileSubroutineBody(self, f_type, f_name):
self._agent.advance() # '{' symbol
token = self._agent.advance() # Statements
# Run through each of the subroutine variable declarations.
# Note that there could be zero or more of them
if self._agent.cur == 'var':
self.compileVarDec()
local_variables = self._symbol_table.varCount('local')
# VM Code preps
self._agent.writeFunction(self._class_name, f_name, local_variables)
# Handling Constructor
if f_name == 'new':
no_of_fields = self._symbol_table.varCount('field')
self._agent.writePush('constant', no_of_fields)
self._agent.writeCall('Memory', 'alloc', 1)
self._agent.writePop('pointer', 0)
# Handling instance Method
if f_type == 'method':
self._agent.writePush('argument', 0)
self._agent.writePop('pointer', 0)
if token != '}': # Extra validation for edge cases
self.compileStatements()
self._agent.advance() # '}' symbol
def compileParameterList(self):
# The parameter list could be empty, so basically while there
# are still variables in the listcharacter is not a closing bracket
# For each parameter in the list
token = self._agent.cur
while (token in ['int','char','boolean']) or isIdentifier(token):
id_type = self._agent.cur # 'int' | 'bool' | 'string' | 'type' kw/identifier
identifier = self._agent.advance() # 'varName' identifier
self._symbol_table.define([identifier, id_type, 'argument'])
# Check if it's a comma, process if it is.
# If it's not, it's the last variable name, so skip it
token = self._agent.advance()
if token == ',':
self._agent.advance()
self.compileParameterList()
# 'var' type varName (',' varName)* ';'
def compileVarDec(self):
# Loop to deal with all variable names, including the first one.
while self._agent.cur == 'var':
id_type = self._agent.advance() # 'int' | 'bool' | 'string' | 'type' kw/idenitfier
identifier = self._agent.advance() # 'varName' identifier
self._symbol_table.define([identifier, id_type, 'local'])
self._agent.advance() # ',' symbol
# Handling case of int var1, var2, var3; all vars should have the same type
while self._agent.cur == ',':
identifier = self._agent.advance() # 'varName' identifier
self._symbol_table.define([identifier, id_type, 'local'])
self._agent.advance()# ',' symbol
self._agent.advance() # ';' closing symbol
def compileStatements(self):
# Compile all the statements - in begining of file
while self._agent.cur in statments:
if self._agent.cur == 'let':
self.compileLet()
elif self._agent.cur == 'if':
self.compileIf()
elif self._agent.cur == 'while':
self.compileWhile()
elif self._agent.cur == 'do':
self.compileDo()
elif self._agent.cur == 'return':
self.compileReturn()
else:
print("ERROR IN COMPILING A STATEMENT, EXIT NOW. GIVEN CURSOR: ", token)
# 'let' varName ('[' expression ']')? '=' expression ';'
def compileLet(self):
identifier = self._agent.advance() # 'varName' identifier
segment = self._symbol_table.kindOf(identifier)
index = str(self._symbol_table.indexOf(identifier))
is_array = self._agent.advance() == '['
if is_array:
self._agent.advance()
self.compileExpression()
self._agent.writePush(segment, index)
self._agent.writeArithmatic('add')
self._agent.advance()
self._agent.advance() # '=' symbol
self.compileExpression()
# Got a little help from github to crack it down...
# https://github.com/havivha/Nand2Tetris/blob/master/11/JackAnalyzer/Parser.py (line 246)
if is_array:
self._agent.writePop('temp', 0)
self._agent.writePop('pointer', 1)
self._agent.writePush('temp', 0)
self._agent.writePop('that', 0)
else:
self._agent.writePop(segment, index)
self._agent.advance() # ';' symbol
# 'if' '(' expression ')' '{' statements '}' ( 'else' '{' statements '}' )?
def compileIf(self):
self._dynamic_label_counter += 1 # for linear label names
self._agent.advance() # '(' symbool
self._agent.advance() # So curr will be ready
self.compileExpression()
self._agent.writeArithmatic('~')
label = ".".join([self._class_name, 'if', str(self._dynamic_label_counter), 'LABEL1'])
self._agent.writeIfGoto(label)
goto_label = ".".join([self._class_name, 'if', str(self._dynamic_label_counter), 'LABEL2'])
self._agent.advance()
if self._agent.advance() != '}': # Making sure
self.compileStatements()
self._agent.writeGoto(goto_label)
self._agent.writeLabel(label)
# Only process an else clause if it exists
if self._agent.advance() == "else": # 'else' kw
self._agent.advance() # '{' symbol
if self._agent.advance() != '}':
self.compileStatements()
self._agent.advance() # '{' symbol
self._agent.writeLabel(goto_label)
# 'while' '(' expression ')' '{' statements '}'
def compileWhile(self):
self._dynamic_label_counter += 1 # for linear label names
label = '.'.join([self._class_name, 'w', str(self._dynamic_label_counter), 'LABEL1'])
self._agent.writeLabel(label)
self._agent.advance() # '(' symbool
self._agent.advance() # Preps fpr expression
self.compileExpression()
self._agent.writeArithmatic('~')
if_label = '.'.join([self._class_name, 'w', str(self._dynamic_label_counter), 'LABEL2'])
self._agent.writeIfGoto(if_label)
self._agent.advance() # '{' symbol
if self._agent.advance() != '}':
self.compileStatements()
self._agent.writeGoto(label)
self._agent.writeLabel(if_label)
self._agent.advance()
# 'do' subroutineCall ';
def compileDo(self):
identifier = self._agent.advance()
token = self._agent.advance() # like peek - '.' or '('
# Deal with subroutine call ad-hoc. Might not be the correct way to go about it,
# Since the documentation does not mention what to do with this.
# Regardless, this is the same as in `compileTerm`, minus the return
# Statements and the closing term tag
# Deal with '.' as the next token, i.e. with a dot-refernce subroutineCall
# ( className | varName) '.' subroutineName '(' expressionList ')'
class_name = identifier
arg_count = 0
id_type = None
if token == ".":
method_or_function = self._agent.advance()
self._agent.advance() # '('
id_type = self._symbol_table.typeOf(identifier)
# Deal with '(' as the next token, i.e. with a regular subroutineCall
# subroutineName '(' expressionList ')'
if token == '(':
class_name = self._class_name
method_or_function = identifier
arg_count += 1
self._agent.writePush('pointer', '0')
token = self._agent.advance()
if id_type:
segment = self._symbol_table.kindOf(identifier)
index = self._symbol_table.indexOf(identifier)
self._agent.writePush(segment, index)
arg_count += 1
class_name = id_type
if token != ')':
arg_count += self.compileExpressionList() # Returns the number of arguemnt we need to add
self._agent.writeCall(class_name, method_or_function, arg_count)
self._agent.advance()
self._agent.writePop('temp', '0')
self._agent.advance()
# 'return' expression? ';'
def compileReturn(self):
if self._agent.advance() == ';':
# Deal with end function
self._agent.writePush('constant', '0')
else:
# Deal with an optional expression
self.compileExpression()
self._agent.writeReturn()
self._agent.advance()
# (expression (',' expression)* )?
def compileExpressionList(self):
# The next thing after an expression list is always a closing parantheses
# If we're not at a closing parantheses, then there's at least one expression here
# Otherwise, compile at least one expression
args_to_add = 1
self.compileExpression()
# If there's a comma, there's at least two expresseions here, Parse all of them
while self._agent.cur == ",":
args_to_add += 1
self._agent.advance() # ',' symbol
self.compileExpression()
return args_to_add
# term (op term)*
def compileExpression(self):
# An expression is always, at the very least, a ter
self.compileTerm()
possible_operator = self._agent.cur
# Deal with operators / entity operators (&;lt etc.)
if possible_operator in opperators:
# Process each operator and compile the term after it
self._agent.advance()
self.compileTerm()
self._agent.writeArithmatic(possible_operator) # 'op' symbol
# integerConstant | stringConstant | keywordConstant | varName |
# varName '[' expression ']' | subroutineCall | '(' expression ')' | (unaryOp term)
def compileTerm(self):
token = self._agent.cur
# Since this is the most complicated part in the compiler, it's broken
# into parts that often repeat themselves. Easier debugging and all
# Deal with integer constants
if isIntegerConstant(token):
self._agent.writePush('constant', token)
# Deal with keyword constants
elif token == 'true':
self._agent.writePush('constant', '1')
self._agent.writeArithmatic('neg')
elif token in ['false', 'null']:
self._agent.writePush('constant', '0')
elif token == 'this':
self._agent.writePush('pointer', '0')
# Dealing with Unary operators
elif token == '-':
self._agent.advance()
self.compileTerm()
self._agent.writeArithmatic('neg')
return
elif token == "~":
if self._agent.advance() != '(':
self.compileTerm()
else:
self._agent.advance()
self.compileExpression()
self._agent.advance()
self._agent.writeArithmatic('not')
return
# Deal with '(' expression ')'
elif token == "(":
token = self._agent.advance()
self.compileExpression()
# Deal with '[' as the next token, i.e. with referencing an index in an array
# varName '[' expression ']'
elif self._agent.peek() == "[":
index = self._symbol_table.indexOf(token)
segment = self._symbol_table.kindOf(token)
self._agent.writePush(segment, index)
self._agent.advance()
token = self._agent.advance()
self.compileExpression()
self._agent.writeArithmatic('add')
self._agent.writePop('pointer', '1')
self._agent.writePush('that', '0')
# Deal with '.' as the next token, i.e. with a dot-refernce subroutineCall
# ( className | varName) '.' subroutineName '(' expressionList ')'
elif self._agent.peek() == ".":
identifier = token
self._agent.advance()
f_name | |
<filename>qiling/qiling/os/windows/dlls/user32.py
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
from qiling.os.windows.fncc import *
from qiling.os.const import *
from qiling.os.windows.utils import *
from qiling.os.windows.const import *
from qiling.const import *
from qiling.os.windows.structs import *
from .const import *
dllname = 'user32_dll'
#ATOM RegisterClassExA(
# const WNDCLASSEXA *Arg1
#);
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={'lpWndClass': 'POINTER'})
def hook_RegisterClassExA(ql, address, params):
ret = 0
return ret
#BOOL UpdateWindow(
# HWND hWnd
#);
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_UpdateWindow(ql, address, params):
ret = 0
return ret
# HWND CreateWindowExA(
# DWORD dwExStyle,
# LPCSTR lpClassName,
# LPCSTR lpWindowName,
# DWORD dwStyle,
# int X,
# int Y,
# int nWidth,
# int nHeight,
# HWND hWndParent,
# HMENU hMenu,
# HINSTANCE hInstance,
# LPVOID lpParam
#);
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={'hMenu': 'POINTER'})
def hook_CreateWindowExA(ql, address, params):
ret = 0
return ret
# INT_PTR DialogBoxParamA(
# HINSTANCE hInstance,
# LPCSTR lpTemplateName,
# HWND hWndParent,
# DLGPROC lpDialogFunc,
# LPARAM dwInitParam
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCSTR': 'POINTER'})
def hook_DialogBoxParamA(ql, address, params):
ret = 0
return ret
# UINT GetDlgItemTextA(
# HWND hDlg,
# int nIDDlgItem,
# LPSTR lpString,
# int cchMax
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetDlgItemTextA(ql, address, params):
ret = 0
hDlg = params["hDlg"]
nIDDlgItem = params["nIDDlgItem"]
lpString = params["lpString"]
cchMax = params["cchMax"]
ql.os.stdout.write(b"Input DlgItemText :\n")
string = ql.os.stdin.readline().strip()[:cchMax]
ret = len(string)
ql.mem.write(lpString, string)
return ret
# BOOL EndDialog(
# HWND hDlg,
# INT_PTR nResult
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_EndDialog(ql, address, params):
ret = 1
return ret
# HWND GetDesktopWindow((
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetDesktopWindow(ql, address, params):
pass
# BOOL OpenClipboard(
# HWND hWndNewOwner
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_OpenClipboard(ql, address, params):
return ql.os.clipboard.open(params['hWndNewOwner'])
# BOOL CloseClipboard();
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_CloseClipboard(ql, address, params):
return ql.os.clipboard.close()
# HANDLE SetClipboardData(
# UINT uFormat,
# HANDLE hMem
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_SetClipboardData(ql, address, params):
try:
data = bytes(params['hMem'], 'ascii', 'ignore')
except (UnicodeEncodeError, TypeError):
data = b""
return ql.os.clipboard.set_data(params['uFormat'], data)
# HANDLE GetClipboardData(
# UINT uFormat
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetClipboardData(ql, address, params):
data = ql.os.clipboard.get_data(params['uFormat'])
if data:
addr = ql.os.heap.alloc(len(data))
ql.mem.write(addr, data)
return addr
else:
ql.log.debug('Failed to get clipboard data')
return 0
# BOOL IsClipboardFormatAvailable(
# UINT format
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_IsClipboardFormatAvailable(ql, address, params):
rtn = ql.os.clipboard.format_available(params['uFormat'])
return rtn
# UINT MapVirtualKeyW(
# UINT uCode,
# UINT uMapType
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_MapVirtualKeyW(ql, address, params):
map_value = params["uMapType"]
code_value = params["uCode"]
map_dict = MAP_VK.get(map_value, None)
if map_dict is not None:
code = map_dict.get(code_value, None)
if code is not None:
return code
else:
ql.log.debug("Code value %x" % code_value)
raise QlErrorNotImplemented("API not implemented")
else:
ql.log.debug("Map value %x" % map_value)
raise QlErrorNotImplemented("API not implemented")
# SHORT GetKeyState(
# int nVirtKey
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'int': 'UINT'})
def hook_GetKeyState(ql, address, params):
let = chr(params["nVirtKey"])
ql.log.debug(let)
UP = 2
DOWN = 0
return UP
# UINT RegisterWindowMessageA(
# LPCSTR lpString
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_RegisterWindowMessageA(ql, address, params):
return hook_RegisterWindowMessageW.__wrapped__(ql, address, params)
# UINT RegisterWindowMessageW(
# LPCWSTR lpString
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_RegisterWindowMessageW(ql, address, params):
# maybe some samples really use this and we need to have a real implementation
return 0xD10C
# HWND GetActiveWindow();
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetActiveWindow(ql, address, params):
# maybe some samples really use this and we need to have a real implementation
return 0xD10C
# HWND GetLastActivePopup(
# HWND hWnd
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetLastActivePopup(ql, address, params):
hwnd = params["hWnd"]
return hwnd
# BOOL GetPhysicalCursorPos(
# LPPOINT lpPoint
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetPhysicalCursorPos(ql, address, params):
return 1
# int GetSystemMetrics(
# int nIndex
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetSystemMetrics(ql, address, params):
info = params["nIndex"]
if info == SM_CXICON or info == SM_CYICON:
# Size of icon
return 32
elif info == SM_CXVSCROLL:
return 4
elif info == SM_CYHSCROLL:
return 300
else:
ql.log.debug("Info value %x" % info)
raise QlErrorNotImplemented("API not implemented")
# HDC GetDC(
# HWND hWnd
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetDC(ql, address, params):
handler = params["hWnd"]
# Maybe we should really emulate the handling of screens and windows. Is going to be a pain
return 0xD10C
# int GetDeviceCaps(
# HDC hdc,
# int index
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetDeviceCaps(ql, address, params):
# Maybe we should really emulate the handling of screens and windows. Is going to be a pain
return 1
# int ReleaseDC(
# HWND hWnd,
# HDC hDC
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'HWND': 'POINTER'})
def hook_ReleaseDC(ql, address, params):
return 1
# DWORD GetSysColor(
# int nIndex
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetSysColor(ql, address, params):
info = params["nIndex"]
return 0
# HBRUSH GetSysColorBrush(
# int nIndex
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetSysColorBrush(ql, address, params):
info = params["nIndex"]
return 0xd10c
# HCURSOR LoadCursorA(
# HINSTANCE hInstance,
# LPCSTR lpCursorName
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'HINSTANCE': 'POINTER', 'LPCSTR': 'INT'})
def hook_LoadCursorA(ql, address, params):
return 0xd10c
# HCURSOR LoadCursorFromFileA(
# LPCSTR lpFileName
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_LoadCursorFromFileA(ql, address, params):
return hook_LoadCursorFromFileW.__wrapped__(ql, address, params)
# HCURSOR LoadCursorFromFileW(
# LPCSTR lpFileName
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_LoadCursorFromFileW(ql, address, params):
handle = Handle()
ql.os.handle_manager.append(handle)
return handle.id
# UINT GetOEMCP();
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetOEMCP(ql, address, params):
return OEM_US
# int LoadStringW(
# HINSTANCE hInstance,
# UINT uID,
# LPSTR lpBuffer,
# int cchBufferMax
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'HINSTANCE': 'POINTER'})
def hook_LoadStringW(ql, address, params):
dst = params["lpBuffer"]
max_len = params["cchBufferMax"]
# FIXME, should not be hardcoded
string = "AAAABBBBCCCCDDDD" + "\x00"
if max_len == 0:
if len(string) >= max_len:
string[max_len] = "\x00"
string = string[:max_len]
ql.mem.write(dst, string.encode("utf-16le"))
# should not count the \x00 byte
return len(string) - 1
# int LoadStringA(
# HINSTANCE hInstance,
# UINT uID,
# LPSTR lpBuffer,
# int cchBufferMax
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'HINSTANCE': 'POINTER'})
def hook_LoadStringA(ql, address, params):
dst = params["lpBuffer"]
max_len = params["cchBufferMax"]
# FIXME, should not be hardcoded
string = "AAAABBBBCCCCDDDD" + "\x00"
if max_len == 0:
if len(string) >= max_len:
string[max_len] = "\x00"
string = string[:max_len]
ql.mem.write(dst, string.encode())
# should not count the \x00 byte
return len(string) - 1
# BOOL MessageBeep(
# UINT uType
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_MessageBeep(ql, address, params):
return 1
# HHOOK SetWindowsHookExA(
# int idHook,
# HOOKPROC lpfn,
# HINSTANCE hmod,
# DWORD dwThreadId
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'HINSTANCE': 'POINTER'})
def hook_SetWindowsHookExA(ql, address, params):
# Should hook a procedure to a dll
hook = params["lpfn"]
return hook
# BOOL UnhookWindowsHookEx(
# HHOOK hhk
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_UnhookWindowsHookEx(ql, address, params):
return 1
# BOOL ShowWindow(
# HWND hWnd,
# int nCmdShow
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'HWND': 'POINTER'})
def hook_ShowWindow(ql, address, params):
# return value depends on sample goal (evasion on just display error)
return 0x1
# HICON LoadIconA(
# HINSTANCE hInstance,
# LPCSTR lpIconName
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'HINSTANCE': 'POINTER', 'LPCSTR': 'UINT'})
def hook_LoadIconA(ql, address, params):
return hook_LoadIconW.__wrapped__(ql, address, params)
# HICON LoadIconW(
# HINSTANCE hInstance,
# LPCWSTR lpIconName
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'HINSTANCE': 'POINTER', 'LPCWSTR': 'UINT'})
def hook_LoadIconW(ql, address, params):
inst, name = params
if name not in (IDI_APPLICATION, IDI_ASTERISK, IDI_ERROR, IDI_EXCLAMATION, IDI_HAND,
IDI_INFORMATION, IDI_QUESTION, IDI_SHIELD, IDI_WARNING, IDI_WINLOGO):
return 0
return 1
# BOOL IsWindow(
# HWND hWnd
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'HWND': 'POINTER'})
def hook_IsWindow(ql, address, params):
# return value depends on sample goal (evasion on just display error)
return 0x1
# LRESULT SendMessageA(
# HWND hWnd,
# UINT Msg,
# WPARAM wParam,
# LPARAM lParam
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'HWND': 'POINTER', 'LPARAM': 'UINT'})
def hook_SendMessageA(ql, address, params):
# TODO don't know how to get right return value
return 0xD10C
# LRESULT LRESULT DefWindowProcA(
# HWND hWnd,
# UINT Msg,
# WPARAM wParam,
# LPARAM lParam
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'HWND': 'POINTER', 'LPARAM': 'UINT'})
def hook_DefWindowProcA(ql, address, params):
# TODO don't know how to get right return value
return 0xD10C
# LPWSTR CharNextW(
# LPCWSTR lpsz
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCWSTR': 'POINTER'})
def hook_CharNextW(ql, address, params):
# Return next char if is different from \x00
point = params["lpsz"][0]
string = ql.os.utils.read_wstring(point)
params["lpsz"] = string
if len(string) == 0:
return point
else:
return point + 1
# LPWSTR CharNextA(
# LPCSTR lpsz
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_CharNextA(ql, address, params):
# Return next char if is different from \x00
point = params["lpsz"][0]
string = ql.os.utils.read_cstring(point)
params["lpsz"] = string
if len(string) == 0:
return point
else:
return point + 1
# LPWSTR CharPrevW(
# LPCWSTR lpszStart,
# LPCWSTR lpszCurrent
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCWSTR': 'POINTER'})
def hook_CharPrevW(ql, address, params):
# Return next char if is different from \x00
current = params["lpszCurrent"]
strcur = ql.os.utils.read_wstring(current)
start = params["lpszStart"]
strstart = ql.os.utils.read_wstring(start)
params["lpszStart"] = strstart
params["lpszCurrent"] = strcur
if start == current:
return start
return current - 1
# LPWSTR CharPrevA(
# LPCSTR lpszStart,
# LPCSTR lpszCurrent
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCSTR': 'POINTER'})
def hook_CharPrevA(ql, address, params):
# Return next char if is different from \x00
current = params["lpszCurrent"]
strcur = ql.os.utils.read_cstring(current)
start = params["lpszStart"]
strstart = ql.os.utils.read_cstring(start)
params["lpszStart"] = strstart
params["lpszCurrent"] = strcur
if start == current:
return start
return current - 1
# int WINAPIV wsprintfW(
# LPWSTR ,
# LPCWSTR ,
# ...
# );
@winsdkapi(cc=CDECL, dllname=dllname, replace_params={'Buffer': POINTER, 'Format': WSTRING})
def hook_wsprintfW(ql: Qiling, address: int, params):
Buffer = params['Buffer']
Format = params['Format']
if Format == 0:
Format = "(null)"
nargs = Format.count("%")
ptypes = (POINTER, POINTER) + (PARAM_INTN, ) * nargs
args = ql.os.fcall.readParams(ptypes)[2:]
count = ql.os.utils.sprintf(Buffer, Format, args, wstring=True)
ql.os.utils.update_ellipsis(params, args)
return count
# HWND GetForegroundWindow();
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetForegroundWindow(ql, address, params):
return 0xF02E620D # Value so we can recognize inside dumps
# BOOL MoveWindow(
# HWND hWnd,
# int X,
# int Y,
# int nWidth,
# int nHeight,
# BOOL bRepaint
# )
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_MoveWindow(ql, address, params):
return 1
# int GetKeyboardType(
# int nTypeFlag
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'int': 'UINT'})
def hook_GetKeyboardType(ql, address, params):
| |
<reponame>erdavila/typedecl
#!/usr/bin/env python3
"""
This script generates a C++ source file containing several definitions for types
construction variations. Its intention is to help develop and test the function
typedecl<>().
The types are constructed in steps. E.g.:
using A1 = int;
using A2 = A1[];
using A3 = A2*;
using A4 = const A3;
Then, for each type it includes an assertion for the final type declaration
generated by this script:
assert((std::is_same<A4, int(*const)[]>::value));
and an assertion for the result of the typedecl<>() function call:
assert(typedecl<A4>() == "int(*const)[]");
Class Hierarchy
---------------
Type¹
├──BasicType
└──Operation¹
├──Modifier¹
│ ├──CVQualifier¹
│ │ ├──Const
│ │ │ └────────┐
│ │ └──Volatile │
│ │ └────────┴──ConstVolatile
│ ├──ParenthesizedModifierForArrayAndFunction¹
│ │ ├──Pointer
│ │ │ └───────────────────╥──PointerWithArraySize
│ │ └──Reference¹ ║
│ │ ├──LValueReference ║
│ │ └──RValueReference ║
│ ├──Array¹ ║
│ │ ├──UnsizedArray ║
│ │ └───────────────────────╥──SizedArray
│ ├──ArraySizeInitializer²════╝
│ └──PointerToMember
├──Function¹
│ ├──FunctionRet¹
│ │ ├──Function0Ret
│ │ │ └─────────────╥──FunctionVA0Ret
│ │ ├──Function1Ret ║
│ │ │ └─────────────╥──FunctionVA1Ret
│ │ └──Function2Ret ║
│ │ └─────────────╥──FunctionVA2Ret
│ ├──FunctionArg¹ ║
│ │ ├──Function1Arg ║
│ │ │ └─────────────╥──FunctionVA1Arg
│ │ ├──Function2Arg1 ║
│ │ │ └─────────────╥──FunctionVA2Arg1
│ │ └──Function2Arg2 ║
│ │ └─────────────╥──FunctionVA2Arg2
│ └──FunctionVA²════════╝
├──FunctionQualifier¹
│ └──FunctionCVQualifier¹
│ ├──FunctionConst
│ └──FunctionVolatile
└──FunctionRefQualifier¹
├──FunctionLValRef
└──FunctionRValRef
¹: Abstract base class
²: Mixin
"""
import sys
from itertools import repeat
from collections import namedtuple
MAX_LEVELS = 5
MAX_TYPES = 1800
MAX_TESTED_TYPES = 1500
SKIP_NULL_CONSTRUCTIONS = True
ONLY_ESSENTIAL_CONSTRUCTIONS_VARIATIONS = True
def printerr(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
class Type:
def __init__(self, level):
self.level = level
self.alias = 'A%d' % self.level
class BasicType(Type):
def __init__(self, token):
super().__init__(level=1)
self.token = token
@property
def definition(self):
return self.token
def description(self, plural=False):
descr = self.token
if plural:
descr += 's'
return descr
def declaration(self, suffix='', prefix=''):
return prefix + self.token + suffix
@property
def array_operations(self):
return []
def normalized(self):
return self
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.token)
class Operation(Type):
def __init__(self, operand):
super().__init__(level=operand.level+1)
self.operand = operand
@classmethod
def accept_operand(cls, operand):
pass
def normalized(self):
normalized_operand = self.operand.normalized()
if normalized_operand is self.operand:
return self
else:
return self.__class__(normalized_operand)
@property
def array_operations(self):
return self.operand.array_operations
def __eq__(self, other):
return self.__class__ is other.__class__ and \
self.operand == other.operand
def __hash__(self):
return hash(self.__class__) ^ hash(self.operand)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.operand)
class Modifier(Operation):
@classmethod
def accept_operand(cls, operand):
if isinstance(operand, Reference):
# modifier(reference(x))
raise Generator.OperationDisallowed('Cannot apply %s to reference' % cls.__name__)
super().accept_operand(operand)
@property
def definition(self):
return self.operand.alias + self.definition_token
def description(self, plural=False, pluralize_operand_description=False):
if plural:
prefix = self.description_prefix_plural
else:
prefix = self.description_prefix
return prefix + ' ' + self.operand.description(plural=pluralize_operand_description)
class CVQualifier(Modifier):
classes_by_qualifications = {}
@classmethod
def accept_operand(cls, operand):
if isinstance(operand, CVQualifier) and isinstance(operand.operand, CVQualifier):
raise Generator.GenerationPruned('At most 2 levels of cv-qualifiers')
if SKIP_NULL_CONSTRUCTIONS and isinstance(operand, (Function, FunctionQualifier)):
# <cv-qualified>(function(x))
raise Generator.GenerationPruned('cv-qualifier applied to function is ignored')
super().accept_operand(operand)
@property
def definition(self):
return self.definition_token + ' ' + self.operand.alias
def description(self, plural=False):
# Forwards pluralization to the operand
return self.description_prefix + ' ' + self.operand.description(plural=plural)
def declaration(self, suffix=''):
if isinstance(self.operand, BasicType):
prefix = self.definition_token + ' '
return self.operand.declaration(suffix, prefix=prefix)
else:
return self.operand.declaration(self.definition_token + suffix)
def normalized(self):
if isinstance(self.operand, CVQualifier):
# <cv-qualifier1>(<cv-qualifier2>(x)) -> <cv-qualifier1 + cv-qualifier2>(x)
qualifications = (self.qualifications).union(self.operand.qualifications)
cls = self.classes_by_qualifications[qualifications]
return cls(self.operand.operand).normalized()
if isinstance(self.operand, Array):
# <cv-qualifier>(array(x)) -> array(<cv-qualifier>(x))
array = self.operand
cv_class = self.__class__
qualified_array_operand = cv_class(array.operand)
array_class = array.__class__
new_array = array_class(qualified_array_operand)
return new_array.normalized()
if isinstance(self.operand, (Function, FunctionQualifier)):
# <cv-qualifier>(function(x)) -> function(x)
return self.operand.normalized()
return super().normalized()
Analysis = namedtuple('Analysis', ['original', 'qualifications', 'without_qualifications'])
@classmethod
def analyze(cls, operand):
op = operand
quals = set()
while isinstance(op, CVQualifier):
quals.update(op.qualifications)
op = op.operand
quals = frozenset(quals)
return cls.Analysis(operand, quals, op)
class Const(CVQualifier):
definition_token = 'const'
description_prefix = 'const'
qualifications = frozenset(['const'])
CVQualifier.classes_by_qualifications[Const.qualifications] = Const
class Volatile(CVQualifier):
definition_token = 'volatile'
description_prefix = 'volatile'
qualifications = frozenset(['volatile'])
CVQualifier.classes_by_qualifications[Volatile.qualifications] = Volatile
class ConstVolatile(Const, Volatile):
definition_token = 'const volatile'
description_prefix = 'const volatile'
qualifications = frozenset(['const', 'volatile'])
CVQualifier.classes_by_qualifications[ConstVolatile.qualifications] = ConstVolatile
class ParenthesizedModifierForArrayAndFunction(Modifier):
def declaration(self, suffix=''):
argument = self.definition_token + suffix
if isinstance(self.operand, (Array, Function)):
argument = '(' + argument + ')'
return self.operand.declaration(argument)
class Pointer(ParenthesizedModifierForArrayAndFunction):
definition_token = '*'
description_prefix = 'pointer to'
description_prefix_plural = 'pointers to'
@classmethod
def accept_operand(cls, operand):
unqualified_operand = CVQualifier.analyze(operand).without_qualifications
if isinstance(unqualified_operand, Pointer):
unqualified_operand_operand = CVQualifier.analyze(unqualified_operand.operand).without_qualifications
if isinstance(unqualified_operand_operand, Pointer):
# Pointer(Pointer(x))
raise Generator.GenerationPruned('At most 2 levels of pointers')
if isinstance(unqualified_operand, FunctionQualifier):
raise Generator.OperationDisallowed('Cannot point to qualified function')
super().accept_operand(operand)
class ArraySizeInitializer(Modifier):
def __init__(self, operand):
super().__init__(operand)
operand_array_operations = operand.array_operations
if len(operand_array_operations) == 0:
self.size = 5
else:
last_array_operation = operand_array_operations[-1]
self.size = last_array_operation.size - 1
class PointerWithArraySize(Pointer, ArraySizeInitializer):
@property
def array_operations(self):
return self.operand.array_operations + [self]
class Reference(ParenthesizedModifierForArrayAndFunction):
@classmethod
def accept_operand(cls, operand):
unqualified_operand = CVQualifier.analyze(operand).without_qualifications
if isinstance(unqualified_operand, FunctionQualifier):
raise Generator.OperationDisallowed('Cannot reference qualified function')
super().accept_operand(operand)
class LValueReference(Reference):
definition_token = '&'
description_prefix = 'l-value reference to'
class RValueReference(Reference):
definition_token = '&&'
description_prefix = 'r-value reference to'
class Array(Modifier):
description_prefix = 'array of'
description_prefix_plural = 'arrays of'
@classmethod
def accept_operand(cls, operand):
unqualified_operand = CVQualifier.analyze(operand).without_qualifications
if isinstance(unqualified_operand, (Function, FunctionQualifier)):
# array(function(x))
raise Generator.OperationDisallowed('Cannot make array of functions')
operand_array_operations = operand.array_operations
if len(operand_array_operations) > 0:
if len(operand_array_operations) >= 3:
raise Generator.GenerationPruned('At most 3 dimensions')
last_array_operation = operand_array_operations[-1]
if isinstance(last_array_operation, UnsizedArray):
# array(UnsizedArray(x))
raise Generator.OperationDisallowed('Cannot make array of unsized arrays')
super().accept_operand(operand)
def description(self, plural=False):
return super().description(plural=plural, pluralize_operand_description=True)
def declaration(self, prefix=''):
return self.operand.declaration(prefix + self.definition_token)
@property
def array_operations(self):
return self.operand.array_operations + [self]
class UnsizedArray(Array):
definition_token = '[]'
class SizedArray(Array, ArraySizeInitializer):
@property
def definition_token(self):
return '[%d]' % self.size
@property
def description_prefix(self):
return super().description_prefix + ' %d' % self.size
@property
def description_prefix_plural(self):
return super().description_prefix_plural + ' %d' % self.size
class Function(Operation):
@property
def definition(self):
return self._signature(self.operand.alias)
def description(self, plural=False, qualifiers=''):
assert plural is False
(return_description, arguments_descriptions) = self._return_and_arguments(self.operand.description())
return 'function%s with arguments (%s) returning %s' % (qualifiers, arguments_descriptions, return_description)
def declaration(self, infix='', qualifiers=''):
return self._signature(self.operand.declaration(), infix=infix) + qualifiers
def _signature(self, operand_value, infix=''):
result, args_list = self._return_and_arguments(operand_value)
return '%s%s(%s)' % (result, infix, args_list)
@classmethod
def _raw_args_list(cls):
return list(repeat('char', cls.num_args))
class FunctionRet(Function):
@classmethod
def accept_operand(cls, operand):
unqualified_operand = CVQualifier.analyze(operand).without_qualifications
if isinstance(unqualified_operand, (Array, Function, FunctionQualifier)):
# functionRet(array(x)) or functionRet(function(x))
raise Generator.OperationDisallowed('Cannot return array or function')
super().accept_operand(operand)
def declaration(self, prefix='', qualifiers=''):
unqualified_operand = CVQualifier.analyze(self.operand).without_qualifications
if isinstance(unqualified_operand, (Pointer, PointerToMember, Reference)):
_, arguments = self._return_and_arguments(None)
return self.operand.declaration(prefix + '(' + arguments + ')' + qualifiers)
return super().declaration(prefix) + qualifiers
def _return_and_arguments(self, result):
args_list = ', '.join(self._raw_args_list())
return (result, args_list)
class FunctionArg(Function):
@classmethod
def accept_operand(cls, operand):
if isinstance(operand, Reference):
reference = operand
unqualified_reference_operand = CVQualifier.analyze(reference.operand).without_qualifications
if isinstance(unqualified_reference_operand, UnsizedArray):
# functionArg(reference(UnsizedArray(x)))
raise Generator.OperationDisallowed('Function parameter cannot be reference to unsized array')
nonreference_operand = operand.operand
else:
nonreference_operand = operand
unqualified_operand = CVQualifier.analyze(nonreference_operand).without_qualifications
while isinstance(unqualified_operand, Pointer):
pointer = unqualified_operand
unqualified_pointer_operand = CVQualifier.analyze(pointer.operand).without_qualifications
if isinstance(unqualified_pointer_operand, UnsizedArray):
# functionArg(Pointer(UnsizedArray(x)))
# functionArg(reference(Pointer(UnsizedArray(x))))
raise Generator.OperationDisallowed('Function parameter cannot be [reference to] pointer to unsized array')
unqualified_operand = unqualified_pointer_operand
if isinstance(unqualified_operand, FunctionQualifier):
raise Generator.OperationDisallowed('Function parameter cannot be qualified function')
if SKIP_NULL_CONSTRUCTIONS and isinstance(operand, CVQualifier) \
and not isinstance(unqualified_operand, Array):
# functionArg(<cv-qualified>(x)) -> functionArg(x)
raise Generator.GenerationPruned('Argument cv-qualifier is ignored')
super().accept_operand(operand)
def normalized(self):
operand_analysis = CVQualifier.analyze(self.operand)
if operand_analysis.qualifications:
assert isinstance(self.operand, CVQualifier)
unqualified_operand = operand_analysis.without_qualifications
function_class = self.__class__
if isinstance(unqualified_operand, Array):
# functionArg(<cv-qualifier>(array(x))) -> functionArg(PointerWithArraySize(<cv-qualifier>(x)))
cv_class = CVQualifier.classes_by_qualifications[operand_analysis.qualifications]
array_operand = unqualified_operand.operand
qualified_array_operand = cv_class(array_operand)
pointer = PointerWithArraySize(qualified_array_operand)
return function_class(pointer).normalized()
else:
# functionArg(<cv-qualifier>(x)) -> functionArg(x)
return function_class(unqualified_operand.normalized()).normalized()
if isinstance(self.operand, Array):
# functionArg(array(x)) -> functionArg(PointerWithArraySize(x))
array = self.operand
new_operand = PointerWithArraySize(array.operand)
function_class = self.__class__
return function_class(new_operand).normalized()
if isinstance(self.operand, Function):
# functionArg(function(x)) -> functionArg(Pointer(function(x)))
function = self.operand
pointer = Pointer(function)
function_class = self.__class__
return function_class(pointer).normalized()
return super().normalized()
def _return_and_arguments(self, operand_value):
args = self._raw_args_list()
args[self.operand_arg_index] = operand_value
args_list = ', '.join(args)
return ('char', args_list)
class Function0Ret(FunctionRet):
num_args = 0
class Function1Ret(FunctionRet):
num_args = 1
class Function1Arg(FunctionArg):
num_args = 1
operand_arg_index = 0
class Function2Ret(FunctionRet):
num_args = 2
class Function2Arg1(FunctionArg):
num_args = 2
operand_arg_index = 0
class Function2Arg2(FunctionArg):
num_args = 2
operand_arg_index = 1
class FunctionVA(Function):
@classmethod
def _raw_args_list(cls):
return super()._raw_args_list() + ['...']
class FunctionVA0Ret(Function0Ret, FunctionVA):
pass
class FunctionVA1Ret(Function1Ret, FunctionVA):
pass
class FunctionVA1Arg(Function1Arg, FunctionVA):
pass
class FunctionVA2Ret(Function2Ret, FunctionVA):
pass
class FunctionVA2Arg1(Function2Arg1, FunctionVA):
pass
class FunctionVA2Arg2(Function2Arg2, FunctionVA):
pass
class FunctionQualifier(Operation):
def description(self, plural=False, qualifiers=''):
assert plural is False
quals = ' ' + self.definition_token + qualifiers
return self.operand.description(qualifiers=quals)
@property
def definition(self):
return self.operand.definition + ' ' + self.definition_token
def declaration(self, arg='', qualifiers=''):
qual = ' ' + self.definition_token + qualifiers
return self.operand.declaration(arg, qualifiers=qual)
class FunctionCVQualifier(FunctionQualifier):
pass
class FunctionConst(FunctionCVQualifier):
definition_token = 'const'
@classmethod
def accept_operand(cls, operand):
if not isinstance(operand, Function):
raise Generator.OperationDisallowed('Function const-qualifier can only be applied to a function')
super().accept_operand(operand)
class FunctionVolatile(FunctionCVQualifier):
definition_token = 'volatile'
@classmethod
def accept_operand(cls, operand):
if not isinstance(operand, (Function, FunctionConst)):
raise Generator.OperationDisallowed('Function volatile-qualifier can only be applied to a [const-qualified] function')
super().accept_operand(operand)
class FunctionRefQualifier(FunctionQualifier):
@classmethod
def accept_operand(cls, operand):
if not isinstance(operand, (Function, FunctionCVQualifier)):
raise Generator.OperationDisallowed('Function ref-qualifier can only be applied to a [cv-qualified] function')
super().accept_operand(operand)
class FunctionLValRef(FunctionRefQualifier):
definition_token = '&'
class FunctionRValRef(FunctionRefQualifier):
definition_token = '&&'
class PointerToMember(Modifier):
definition_token = ' C::*'
description_prefix = 'pointer to member'
description_prefix_plural = 'pointers to member'
@classmethod
def accept_operand(cls, operand):
if SKIP_NULL_CONSTRUCTIONS and isinstance(operand, FunctionRefQualifier):
# PointerToMember(functionRefQualifier(x))
raise Generator.GenerationPruned('ref-qualification of a pointed-to member function is ignored')
super().accept_operand(operand)
def normalized(self):
unqualified_operand = CVQualifier.analyze(self.operand).without_qualifications
if isinstance(unqualified_operand, FunctionRefQualifier):
# PointerToMember(functionRefQualifier(x)) -> PointerToMember(x)
return PointerToMember(unqualified_operand.operand).normalized()
return super().normalized()
def declaration(self, suffix=''):
arg = 'C::*' + suffix
if isinstance(self.operand, (Array, Function, FunctionCVQualifier)):
arg = '(' + arg + ')'
else:
arg = ' ' + arg
return self.operand.declaration(arg)
ALL_OPERATIONS = [
Const,
Volatile,
Pointer,
LValueReference,
RValueReference,
UnsizedArray,
SizedArray,
Function0Ret,
#Function1Ret,
#Function1Arg,
Function2Ret,
#Function2Arg1,
Function2Arg2,
FunctionVA0Ret,
FunctionVA1Ret,
FunctionVA1Arg,
#FunctionVA2Ret,
#FunctionVA2Arg1,
#FunctionVA2Arg2,
FunctionConst,
FunctionVolatile,
FunctionLValRef,
FunctionRValRef,
PointerToMember,
]
if ONLY_ESSENTIAL_CONSTRUCTIONS_VARIATIONS:
def remove_non_essential_operations(operations):
operations_to_remove = set([
Volatile, # Const and Volatile have the same relationship with other operations and with one another
RValueReference, # LValueReference and RValueReference have the same relationship with other operations and with one another
FunctionVolatile, # FunctionConst and FunctionVolatile have the same relationship with other operations and with one another
FunctionRValRef, # FunctionLValRef and FunctionRValRef have the same relationship with other operations and with one another
])
functions_ret = set()
functions_arg = set()
for op in operations:
if issubclass(op, FunctionRet):
functions_ret.add(op)
elif issubclass(op, FunctionArg):
functions_arg.add(op)
def func_key(func):
# Prefer vararg version
# Prefer more arguments
return (issubclass(func, FunctionVA), func.num_args)
for group in (functions_ret, functions_arg):
to_keep = max(group, key=func_key, default=None)
group.discard(to_keep)
operations_to_remove |= group
operations[:] = filter(lambda op: op not in operations_to_remove, operations)
remove_non_essential_operations(ALL_OPERATIONS)
del remove_non_essential_operations
class FileWriter:
def __init__(self, name):
self.name = name
self.identation_level = 0
def __enter__(self):
self.f = open(self.name, 'wt')
self.print('#include "typedecl.hpp"')
self.print('#include <cassert>')
self.print('#include <type_traits>')
self.print('')
self.print('template<typename>void avoid_unused_type_warning(){}')
self.print('')
self.print('struct C {};')
self.print('DEFINE_TYPEDECL(C);')
self.print('')
self.print('void testTypeDecl() {')
self.ident()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.deident()
self.print('}')
self.f.close()
return False
def print(self, line):
self.f.write(self._identation() + line + '\n')
def _identation(self):
return '\t' * self.identation_level
def ident(self):
self.identation_level += 1
def deident(self):
self.identation_level -= 1
class Generator:
class GenerationSkipped(Exception): pass
class GenerationPruned(GenerationSkipped): pass
class OperationDisallowed(GenerationSkipped): pass
total_types = 0
same_cases = 0
normalizations = {}
@classmethod
def tested_types(cls):
return cls.total_types - cls.same_cases
def __init__(self, file):
self.f = file
def generate(self, type):
if Generator.total_types >= MAX_TYPES or \
Generator.tested_types() >= MAX_TESTED_TYPES:
return
Generator.total_types += 1
type_number = Generator.total_types
normalized = type.normalized()
self.f.print('{\t// Type %d' % type_number)
self.f.ident()
self.f.print('// Constructed: %r - %s' % (type, type.description()))
self.f.print('// Normalized: %r - %s' % (normalized, normalized.description()))
definition_line = 'using %s = %s;' % (type.alias, type.definition)
skipped = False
if | |
import logging
import sys
from glob import glob
from json import dump
from subprocess import run
from attmap import PathExAttMap as PXAM
from refgenconf import RefGenConf
from refgenconf.exceptions import (
ConfigNotCompliantError,
GenomeConfigFormatError,
MissingConfigDataError,
RefgenconfError,
)
from refgenconf.helpers import replace_str_in_obj, swap_names_in_tree
from ubiquerg import checksum, is_command_callable, parse_registry_path, size
from .const import *
global _LOGGER
_LOGGER = logging.getLogger(PKG_NAME)
def archive(rgc, registry_paths, force, remove, cfg_path, genomes_desc):
"""
Takes the RefGenConf object and builds individual tar archives
that can be then served with 'refgenieserver serve'. Additionally determines their md5 checksums, file sizes and
updates the original refgenie config with these data. If the --asset and/or --genome options are used (specific
build is requested) the archiver will check for the existence of config file saved in the path provided in
`genome_server` in the original config and update it so that no archive metadata is lost
:param RefGenConf rgc: configuration object with the data to build the servable archives for
:param list[dict] registry_paths: a collection of mappings that identifies the assets to update
:param bool force: whether to force the build of archive, regardless of its existence
:param bool remove: whether remove specified genome/asset:tag from the archive
:param str cfg_path: config file path
"""
if float(rgc[CFG_VERSION_KEY]) < float(REQ_CFG_VERSION):
raise ConfigNotCompliantError(
f"You need to update the genome config to v{REQ_CFG_VERSION} in order to use the archiver. "
f"The required version can be generated with refgenie >= {REFGENIE_BY_CFG[REQ_CFG_VERSION]}"
)
if CFG_ARCHIVE_CONFIG_KEY in rgc:
srp = rgc[CFG_ARCHIVE_CONFIG_KEY]
server_rgc_path = (
srp
if os.path.isabs(srp)
else os.path.join(os.path.dirname(rgc.file_path), srp)
)
else:
try:
server_rgc_path = os.path.join(
rgc[CFG_ARCHIVE_KEY], os.path.basename(cfg_path)
)
except KeyError:
raise GenomeConfigFormatError(
f"The config '{cfg_path}' is missing a {' or '.join([CFG_ARCHIVE_KEY, CFG_ARCHIVE_KEY_OLD])} entry. "
f"Can't determine the desired archive."
)
if os.path.isfile(server_rgc_path) and not os.access(server_rgc_path, os.W_OK):
raise OSError(
"The determined archive config path is not writable: {}".format(
server_rgc_path
)
)
if force:
_LOGGER.info("Build forced; file existence will be ignored")
_LOGGER.debug("Registry_paths: {}".format(registry_paths))
# original RefGenConf has been created in read-only mode,
# make it RW compatible and point to new target path for server use or initialize a new object
if os.path.exists(server_rgc_path):
_LOGGER.debug(f"'{server_rgc_path}' file was found and will be updated")
rgc_server = RefGenConf(filepath=server_rgc_path)
if remove:
if not registry_paths:
_LOGGER.error(
"To remove archives you have to specify them. "
"Use 'asset_registry_path' argument."
)
exit(1)
with rgc_server as r:
_remove_archive(r, registry_paths, CFG_ARCHIVE_KEY)
exit(0)
else:
if remove:
_LOGGER.error(
"You can't remove archives since the genome_archive path does not exist yet."
)
exit(1)
_LOGGER.debug(f"'{server_rgc_path}' file was not found and will be created")
rgc_server = RefGenConf(filepath=rgc.file_path)
rgc_server.make_writable(filepath=server_rgc_path)
rgc_server.make_readonly()
if registry_paths:
genomes = _get_paths_element(registry_paths, "namespace")
asset_list = _get_paths_element(registry_paths, "item")
tag_list = _get_paths_element(registry_paths, "tag")
else:
genomes = rgc.genomes_list()
asset_list, tag_list = None, None
if not genomes:
_LOGGER.error("No genomes found")
exit(1)
else:
_LOGGER.debug(f"Genomes to be processed: {str(genomes)}")
genomes = [rgc.get_genome_alias_digest(g) for g in genomes]
if genomes_desc is not None:
if os.path.exists(genomes_desc):
import csv
_LOGGER.info(f"Found a genomes descriptions CSV file: {genomes_desc}")
with open(genomes_desc, mode="r") as infile:
reader = csv.reader(infile)
descs = {rows[0]: rows[1] for rows in reader}
else:
_LOGGER.error(
f"Genomes descriptions CSV file does not exist: {genomes_desc}"
)
sys.exit(1)
counter = 0
for genome in genomes:
genome_dir = os.path.join(rgc.data_dir, genome)
target_dir = os.path.join(rgc[CFG_ARCHIVE_KEY], genome)
alias_target_dir = os.path.join(
rgc[CFG_ARCHIVE_KEY], rgc.get_genome_alias(digest=genome, fallback=True)
)
if not os.path.exists(target_dir):
os.makedirs(target_dir, exist_ok=True)
# create legacy directory for archive
# TODO: remove in the future
os.makedirs(alias_target_dir, exist_ok=True)
genome_desc = (
rgc[CFG_GENOMES_KEY][genome].setdefault(
CFG_GENOME_DESC_KEY, DESC_PLACEHOLDER
)
if genomes_desc is None or genome not in descs
else descs[genome]
)
genome_aliases = rgc[CFG_GENOMES_KEY][genome].setdefault(CFG_ALIASES_KEY, [])
genome_attrs = {
CFG_GENOME_DESC_KEY: genome_desc,
CFG_ALIASES_KEY: genome_aliases,
}
with rgc_server as r:
r[CFG_GENOMES_KEY].setdefault(genome, PXAM())
r[CFG_GENOMES_KEY][genome].update(genome_attrs)
_LOGGER.debug(f"Updating '{genome}' genome attributes...")
asset = asset_list[counter] if asset_list is not None else None
assets = asset or rgc[CFG_GENOMES_KEY][genome][CFG_ASSETS_KEY].keys()
if not assets:
_LOGGER.error("No assets found")
continue
else:
_LOGGER.debug(f"Assets to be processed: {str(assets)}")
for asset_name in assets if isinstance(assets, list) else [assets]:
asset_desc = rgc[CFG_GENOMES_KEY][genome][CFG_ASSETS_KEY][
asset_name
].setdefault(CFG_ASSET_DESC_KEY, DESC_PLACEHOLDER)
default_tag = rgc[CFG_GENOMES_KEY][genome][CFG_ASSETS_KEY][
asset_name
].setdefault(CFG_ASSET_DEFAULT_TAG_KEY, DEFAULT_TAG)
asset_attrs = {
CFG_ASSET_DESC_KEY: asset_desc,
CFG_ASSET_DEFAULT_TAG_KEY: default_tag,
}
_LOGGER.debug(f"Updating '{genome}/{asset_name}' asset attributes...")
with rgc_server as r:
r.update_assets(genome, asset_name, asset_attrs)
tag = tag_list[counter] if tag_list is not None else None
tags = (
tag
or rgc[CFG_GENOMES_KEY][genome][CFG_ASSETS_KEY][asset_name][
CFG_ASSET_TAGS_KEY
].keys()
)
for tag_name in tags if isinstance(tags, list) else [tags]:
if not rgc.is_asset_complete(genome, asset_name, tag_name):
raise MissingConfigDataError(
f"Asset '{genome}/{asset_name}:{tag_name}' is incomplete. "
f"This probably means an attempt to archive a partially "
f"pulled parent. refgenieserver archive requires all assets to "
f"be built prior to archiving."
)
file_name = rgc[CFG_GENOMES_KEY][genome][CFG_ASSETS_KEY][asset_name][
CFG_ASSET_TAGS_KEY
][tag_name][CFG_ASSET_PATH_KEY]
target_file_core = os.path.join(target_dir, f"{asset_name}__{tag_name}")
target_file = f"{target_file_core}.tgz"
input_file = os.path.join(genome_dir, file_name, tag_name)
# these attributes have to be read from the original RefGenConf in case the archiver just increments
# an existing server RefGenConf
parents = rgc[CFG_GENOMES_KEY][genome][CFG_ASSETS_KEY][asset_name][
CFG_ASSET_TAGS_KEY
][tag_name].setdefault(CFG_ASSET_PARENTS_KEY, [])
children = rgc[CFG_GENOMES_KEY][genome][CFG_ASSETS_KEY][asset_name][
CFG_ASSET_TAGS_KEY
][tag_name].setdefault(CFG_ASSET_CHILDREN_KEY, [])
seek_keys = rgc[CFG_GENOMES_KEY][genome][CFG_ASSETS_KEY][asset_name][
CFG_ASSET_TAGS_KEY
][tag_name].setdefault(CFG_SEEK_KEYS_KEY, {})
asset_digest = rgc[CFG_GENOMES_KEY][genome][CFG_ASSETS_KEY][asset_name][
CFG_ASSET_TAGS_KEY
][tag_name].setdefault(CFG_ASSET_CHECKSUM_KEY, None)
if not os.path.exists(target_file) or force:
_LOGGER.info(
f"Creating archive '{target_file}' from '{input_file}' asset"
)
try:
_copy_asset_dir(input_file, target_file_core)
_get_asset_dir_contents(target_file_core, asset_name, tag_name)
_check_tgz(input_file, target_file)
_copy_recipe(input_file, target_dir, asset_name, tag_name)
_copy_log(input_file, target_dir, asset_name, tag_name)
# TODO: remove the rest of this try block in the future
_check_tgz_legacy(
input_file,
target_file,
asset_name,
rgc.get_genome_alias_digest(alias=genome, fallback=True),
rgc.get_genome_alias(digest=genome, fallback=True),
)
_copy_recipe(input_file, alias_target_dir, asset_name, tag_name)
_copy_log(input_file, alias_target_dir, asset_name, tag_name)
except OSError as e:
_LOGGER.warning(e)
continue
else:
_LOGGER.info(
f"Updating '{genome}/{asset_name}:{tag_name}' tag attributes"
)
tag_attrs = {
CFG_ASSET_PATH_KEY: file_name,
CFG_SEEK_KEYS_KEY: seek_keys,
CFG_ARCHIVE_CHECKSUM_KEY: checksum(target_file),
CFG_ARCHIVE_SIZE_KEY: size(target_file),
CFG_ASSET_SIZE_KEY: size(input_file),
CFG_ASSET_PARENTS_KEY: parents,
CFG_ASSET_CHILDREN_KEY: children,
CFG_ASSET_CHECKSUM_KEY: asset_digest,
}
# TODO: legacy checksum generation and tag dictionary
# update to be removed in the future
legacy_digest = checksum(
replace_str_in_obj(
target_file,
x=rgc.get_genome_alias_digest(
alias=genome, fallback=True
),
y=rgc.get_genome_alias(digest=genome, fallback=True),
)
)
tag_attrs.update(
{CFG_LEGACY_ARCHIVE_CHECKSUM_KEY: legacy_digest}
)
_LOGGER.debug(f"attr dict: {tag_attrs}")
with rgc_server as r:
for parent in parents:
# here we update any pre-existing parents' children
# attr with the newly added asset
_LOGGER.debug(
f"Updating {parent} children list with "
f"{genome}/{asset_name}:{tag_name}"
)
rp = parse_registry_path(parent)
parent_genome = rp["namespace"]
parent_asset = rp["item"]
parent_tag = rp["tag"]
try:
r.seek(
parent_genome,
parent_asset,
parent_tag,
strict_exists=True,
)
except RefgenconfError:
_LOGGER.warning(
f"'{genome}/{asset_name}:{tag_name}'s parent "
f"'{parent}' does not exist, skipping relationship updates"
)
continue
r.update_relatives_assets(
parent_genome,
parent_asset,
parent_tag,
[f"{genome}/{asset_name}:{tag_name}"],
children=True,
)
r.update_tags(genome, asset_name, tag_name, tag_attrs)
else:
exists_msg = f"'{target_file}' exists."
try:
rgc_server[CFG_GENOMES_KEY][genome][CFG_ASSETS_KEY][asset_name][
CFG_ASSET_TAGS_KEY
][tag_name][CFG_ARCHIVE_CHECKSUM_KEY]
_LOGGER.debug(exists_msg + " Skipping")
except KeyError:
_LOGGER.debug(exists_msg + " Calculating archive digest")
tag_attrs = {CFG_ARCHIVE_CHECKSUM_KEY: checksum(target_file)}
with rgc_server as r:
r.update_tags(genome, asset_name, tag_name, tag_attrs)
counter += 1
_LOGGER.info(f"Builder finished; server config file saved: {rgc_server.file_path}")
def _check_tgz(path, output):
"""
Check if file exists and tar it.
If gzipping is requested, the pigz software is used if available.
:param str path: path to the file to be tarred
:param str output: path to the result file
:raise OSError: if the file/directory meant to be archived does not exist
"""
pth, tag_name = os.path.split(path)
if os.path.exists(path):
# tar gzip the asset, exclude _refgenie_build dir, it may change digests
cmd = "tar --exclude '_refgenie_build' -C {p} "
cmd += (
"-cvf - {tn} | pigz > {o}"
if is_command_callable("pigz")
else "-cvzf {o} {tn}"
)
command = cmd.format(p=pth, o=output, tn=tag_name)
_LOGGER.info(f"command: {command}")
run(command, shell=True)
else:
raise OSError(f"Entity '{path}' does not exist")
def _check_tgz_legacy(path, output, asset_name, genome_name, alias):
"""
NOTE: This is a duplication of the _check_tgz function, kept separate as in
the future this step will be simply removed.
Check if file exists and tar it.
If gzipping is requested, the availability of pigz software is checked and used.
:param str path: path to the file to be tarred
:param str output: path to the result file
:param str asset_name: name of the asset
:raise OSError: if the file/directory meant to be archived does not exist
"""
# TODO: remove in the future
if isinstance(alias, str):
alias = [alias]
for a in alias:
if os.path.exists(path):
aliased_output = replace_str_in_obj(output, x=genome_name, y=a)
cmd = "rsync -rvL --exclude '_refgenie_build' {p}/ {p}/{an}/; "
command = cmd.format(p=path, o=output, an=asset_name)
_LOGGER.debug("command: {}".format(command))
run(command, shell=True)
swap_names_in_tree(os.path.join(path, asset_name), a, genome_name)
# tar gzip the new dir
cmd = "cd {p}; " + (
"tar -cvf - {an} | pigz > {oa}; "
if is_command_callable("pigz")
else "tar -cvzf {oa} {an}; "
| |
Func GetMinimumFPSModeInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the MinimumFPSMode setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetMinimumFPSModeInfo(io_admin_level, io_locked)
def get_minimum_space_fps_mode_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetMinimumSpaceFPSModeInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the MinimumSpaceFPSMode setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetMinimumSpaceFPSModeInfo(io_admin_level, io_locked)
def get_mouse_double_clic_delay_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetMouseDoubleClicDelayInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the MouseDoubleClicDelay setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetMouseDoubleClicDelayInfo(io_admin_level, io_locked)
def get_mouse_speed_value_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetMouseSpeedValueInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the MouseSpeedValue setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetMouseSpeedValueInfo(io_admin_level, io_locked)
def get_nb_isopars_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetNbIsoparsInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the NbIsopars setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetNbIsoparsInfo(io_admin_level, io_locked)
def get_no_show_background_rgb(self, io_r: int, io_g: int, io_b: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetNoShowBackgroundRGB(long ioR,
| long ioG,
| long ioB)
|
| Retrieves the No Show Background Color setting attribute
| value.
| Role: The No Show Background Color setting attribute manages the
| backgraound color of no show space
|
| Parameters:
|
| ioR,
| ioG, ioB [inout] The Red, Green, Blue components of the No Show
| Background Color setting attribute value
|
| Returns:
| S_OK if the No Show Background Color setting attribute value is
| successfully retrieved, and E_FAIL otherwise
:param int io_r:
:param int io_g:
:param int io_b:
:return: None
:rtype: None
"""
return self.visualization_setting_att.GetNoShowBackgroundRGB(io_r, io_g, io_b)
def get_no_show_background_rgb_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetNoShowBackgroundRGBInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves the No Show Background Color setting attribute
| information.
|
| Parameters:
|
| ioAdminLevel,
| ioLocked [inout] and oModified [out] The No Show Background Color
| setting attribute information
|
| Returns:
| S_OK if the No Show Background Color setting attribute information is
| successfully retrieved, and E_FAIL otherwise
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetNoShowBackgroundRGBInfo(io_admin_level, io_locked)
def get_no_z_buffer_selection_mode_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetNoZBufferSelectionModeInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the NoZBufferSelectionMode setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetNoZBufferSelectionModeInfo(io_admin_level, io_locked)
def get_number_of_minimum_fps_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetNumberOfMinimumFPSInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the NumberOfMinimumFPS setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetNumberOfMinimumFPSInfo(io_admin_level, io_locked)
def get_number_of_minimum_space_fps_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetNumberOfMinimumSpaceFPSInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the NumberOfMinimumSpaceFPS setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetNumberOfMinimumSpaceFPSInfo(io_admin_level, io_locked)
def get_occlusion_culling_mode_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetOcclusionCullingModeInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the OcclusionCullingMode setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetOcclusionCullingModeInfo(io_admin_level, io_locked)
def get_opaque_faces_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetOpaqueFacesInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the SetStereoModeLock setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetOpaqueFacesInfo(io_admin_level, io_locked)
def get_other_selection_timeout_activity_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetOtherSelectionTimeoutActivityInfo(CATBSTR
| ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the OtherSelectionTimeoutActivity setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetOtherSelectionTimeoutActivityInfo(io_admin_level, io_locked)
def get_other_selection_timeout_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetOtherSelectionTimeoutInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the OtherSelectionTimeout setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetOtherSelectionTimeoutInfo(io_admin_level, io_locked)
def get_picking_window_size_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetPickingWindowSizeInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the PickingWindowSize setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetPickingWindowSizeInfo(io_admin_level, io_locked)
def get_pre_selection_mode_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetPreSelectionModeInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the PreSelectionMode setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetPreSelectionModeInfo(io_admin_level, io_locked)
def get_preselected_element_linetype_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetPreselectedElementLinetypeInfo(CATBSTR
| ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the PreselectedElementLinetype setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetPreselectedElementLinetypeInfo(io_admin_level, io_locked)
def get_preselected_element_rgb(self, io_r: int, io_g: int, io_b: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetPreselectedElementRGB(long ioR,
| long ioG,
| long ioB)
|
| Returns the PreselectedElementRGB parameter.
:param int io_r:
:param int io_g:
:param int io_b:
:return: None
:rtype: None
"""
return self.visualization_setting_att.GetPreselectedElementRGB(io_r, io_g, io_b)
def get_preselected_element_rgb_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetPreselectedElementRGBInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the PreselectedElementRGB setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str | |
<reponame>marinajacks/Sum-of-Functions-Optimizer
"""
Model classes for each of the demo cases. Each class contains
an objective function f_df, initial parameters theta_init, a
reference for each subfunction subfunction_references, and a
set of full_objective_references that are evaluated every update step
to make the plots of objective function vs. learning iteration.
This is designed to be called by figure_convergence.py.
Author: <NAME> (2014)
This software is made available under the Creative Commons
Attribution-Noncommercial License.
( http://creativecommons.org/licenses/by-nc/3.0/ )
"""
import numpy as np
import scipy.special
import warnings
import random
from figures_cae import CAE
from os.path import join
try:
import pyGLM.simGLM as glm
import pyGLM.gabor as gabor
except:
pass
# numpy < 1.7 does not have np.random.choice
def my_random_choice(n, k, replace):
perm = np.random.permutation(n)
return perm[:k]
if hasattr(np.random, 'choice'):
random_choice = np.random.choice
else:
random_choice = my_random_choice
class toy:
"""
Toy problem. Sum of squared errors from random means, raised
to random powers.
"""
def __init__(self, num_subfunctions=100, num_dims=10):
self.name = '||x-u||^a, a in U[1.5,4.5]'
# create the array of subfunction identifiers
self.subfunction_references = []
N = num_subfunctions
for i in range(N):
npow = np.random.rand()*3. + 1.5
mn = np.random.randn(num_dims,1)
self.subfunction_references.append([npow,mn])
self.full_objective_references = self.subfunction_references
## initialize parameters
self.theta_init = np.random.randn(num_dims,1)
def f_df(self, x, args):
npow = args[0]/2.
mn = args[1]
f = np.sum(((x-mn)**2)**npow)
df = npow*((x-mn)**2)**(npow-1.)*2*(x-mn)
scl = 1. / np.prod(x.shape)
return f*scl, df*scl
class Hopfield:
def __init__(self, num_subfunctions=100, reg=1., scale_by_N=True):
"""
Train a Hopfield network/Ising model using MPF.
Adapted from code by <NAME>, <NAME>, <NAME>, 2011
TODO insert Hopfield and MPF references.
"""
self.name = 'Hopfield'
self.reg = reg/num_subfunctions
# Load data
X, _ = load_mnist()
# binarize data
X = (np.sign(X-0.5)+1)/2
# only keep units which actually change state
gd = ((np.sum(X,axis=1) > 0) & (np.sum(1-X,axis=1) > 0))
X = X[gd,:]
# TODO -- discard units with correlation of exactly 1?
# break the data up into minibatches
self.subfunction_references = []
for mb in range(num_subfunctions):
self.subfunction_references.append(X[:, mb::num_subfunctions].T)
#self.full_objective_references = (X[:, random_choice(X.shape[1], 10000, replace=False)].copy().T,)
self.full_objective_references = self.subfunction_references
if scale_by_N:
self.scl = float(num_subfunctions) / float(X.shape[1])
else:
self.scl = 100. / float(X.shape[1])
# parameter initialization
self.theta_init = np.random.randn(X.shape[0], X.shape[0])/np.sqrt(X.shape[0])/10.
self.theta_init = (self.theta_init + self.theta_init.T)/2.
def f_df(self, J, X):
J = (J + J.T)/2.
X = np.atleast_2d(X)
S = 2 * X - 1
Kfull = np.exp(-S * np.dot(X, J.T) + .5 * np.diag(J)[None, :])
dJ = -np.dot(X.T, Kfull * S) + .5 * np.diag(Kfull.sum(0))
dJ = (dJ + dJ.T)/2.
dJ = np.nan_to_num(dJ)
K = Kfull.sum()
if not np.isfinite(K):
K = 1e50
K *= self.scl
dJ *= self.scl
K += self.reg * np.sum(J**2)
dJ += 2. * self.reg * J
return K, dJ
class logistic:
"""
logistic regression on "protein" dataset
"""
def __init__(self, num_subfunctions=100, scale_by_N=True):
self.name = 'protein logistic regression'
try:
data = np.loadtxt('figure_data/bio_train.dat')
except:
raise Exception("Missing data. Download from and place in figure_data subdirectory.")
target = data[:,[2]]
feat = data[:,3:]
feat = self.whiten(feat)
feat = np.hstack((feat, np.ones((data.shape[0],1))))
# create the array of subfunction identifiers
self.subfunction_references = []
N = num_subfunctions
nper = float(feat.shape[0])/float(N)
if scale_by_N:
lam = 1./nper**2
scl = 1./nper
else:
default_N = 100.
nnp = float(feat.shape[0])/default_N
lam = (1./nnp**2) * (default_N / float(N))
scl = 1./nnp
for i in range(N):
i_s = int(np.floor(i*nper))
i_f = int(np.floor((i+1)*nper))
if i == N-1:
# don't drop any at the end
i_f = target.shape[0]
l_targ = target[i_s:i_f,:]
l_feat = feat[i_s:i_f,:]
self.subfunction_references.append([l_targ, l_feat, lam, scl, i])
self.full_objective_references = self.subfunction_references
self.theta_init = np.random.randn(feat.shape[1],1)/np.sqrt(feat.shape[1])/10. # parameters initialization
# remove first order dependencies from X, and scale to unit norm
def whiten(self, X,fudge=1E-10):
max_whiten_lines = 10000
# zero mean
X -= np.mean(X, axis=0).reshape((1,-1))
# the matrix X should be observations-by-components
# get the covariance matrix
Xsu = X[:max_whiten_lines,:]
Xcov = np.dot(Xsu.T,Xsu)/Xsu.shape[0]
# eigenvalue decomposition of the covariance matrix
try:
d,V = np.linalg.eigh(Xcov)
except:
print("could not get eigenvectors and eigenvalues using numpy.linalg.eigh of ", Xcov.shape, Xcov)
d,V = np.linalg.eig(Xcov)
d = np.nan_to_num(d+fudge)
d[d==0] = 1
V = np.nan_to_num(V)
# a fudge factor can be used so that eigenvectors associated with
# small eigenvalues do not get overamplified.
# TODO(jascha) D could be a vector not a matrix
D = np.diag(1./np.sqrt(d))
D = np.nan_to_num(D)
# whitening matrix
W = np.dot(np.dot(V,D),V.T)
# multiply by the whitening matrix
Y = np.dot(X,W)
return Y
def sigmoid(self, u):
return 1./(1.+np.exp(-u))
def f_df(self, x, args):
target = args[0]
feat = args[1]
lam = args[2]
scl = args[3]
feat = feat*(2*target - 1)
ip = -np.dot(feat, x.reshape((-1,1)))
et = np.exp(ip)
logL = np.log(1. + et)
etrat = et/(1.+et)
bd = np.nonzero(ip[:,0]>50)[0]
logL[bd,:] = ip[bd,:]
etrat[bd,:] = 1.
logL = np.sum(logL)
dlogL = -np.dot(feat.T, etrat)
logL *= scl
dlogL *= scl
reg = lam*np.sum(x**2)
dreg = 2.*lam*x
return logL + reg, dlogL+dreg
class ICA:
"""
ICA with Student's t-experts on MNIST images.
"""
def __init__(self, num_subfunctions=100):
self.name = 'ICA'
# Load data
#X = load_cifar10_imagesonly()
X, _ = load_mnist()
# do PCA to eliminate degenerate dimensions and whiten
C = np.dot(X, X.T) / X.shape[1]
w, V = np.linalg.eigh(C)
# take only the non-negligible eigenvalues
mw = np.max(np.real(w))
max_ratio = 1e4
gd = np.nonzero(np.real(w) > mw/max_ratio)[0]
# # whiten
# P = V[gd,:]*(np.real(w[gd])**(-0.5)).reshape((-1,1))
# don't whiten -- make the problem harder
P = V[gd,:]
X = np.dot(P, X)
X /= np.std(X)
# break the data up into minibatches
self.subfunction_references = []
for mb in range(num_subfunctions):
self.subfunction_references.append(X[:, mb::num_subfunctions])
# compute the full objective on all the data
self.full_objective_references = self.subfunction_references
# # the subset of the data used to compute the full objective function value
# idx = random_choice(X.shape[1], 10000, replace=False)
# self.full_objective_references = (X[:,idx].copy(),)
## initialize parameters
num_dims = X.shape[0]
self.theta_init = {'W':np.random.randn(num_dims, num_dims)/np.sqrt(num_dims),
'logalpha':np.random.randn(num_dims,1).ravel()}
# rescale the objective and gradient so that the same hyperparameter ranges work for
# ICA as for the other objectives
self.scale = 1. / self.subfunction_references[0].shape[1] / 100.
def f_df(self, params, X):
"""
ICA objective function and gradient, using a Student's t-distribution prior.
The energy function has form:
E = \sum_i \alpha_i \log( 1 + (\sum_j W_{ij} x_j )^2 )
params is a dictionary containing the filters W and the log of the
X is the training data, with each column corresponding to a sample.
L is the average negative log likelihood.
dL is its gradient.
"""
W = params['W']
logalpha = params['logalpha'].reshape((-1,1))
alpha = np.exp(logalpha)+0.5
## calculate the energy
ff = np.dot(W, X)
ff2 = ff**2
off2 = 1 + ff2
lff2 = np.log( off2 )
alff2 = lff2 * alpha.reshape((-1,1))
E = np.sum(alff2)
## calculate the energy gradient
# could just sum instead of using the rscl 1s vector. this is functionality
# left over from MPF MATLAB code. May want it again in a future project though.
rscl = np.ones((X.shape[1],1))
lt = (ff/off2) * alpha.reshape((-1,1))
dEdW = 2 * np.dot(lt * rscl.T, X.T)
dEdalpha = np.dot(lff2, rscl)
dEdlogalpha = (alpha-0.5) * dEdalpha
## calculate log Z
nu = alpha * 2. - 1.
#logZ = -np.log(scipy.special.gamma((nu + 1.) / 2.)) + 0.5 * np.log(np.pi) + \
# np.log(scipy.special.gamma(nu/2.))
logZ = -scipy.special.gammaln((nu + 1.) / 2.) + 0.5 * np.log(np.pi) + \
scipy.special.gammaln((nu/2.))
logZ = np.sum(logZ)
## DEBUG slogdet has memory leak!
## eg, call "a = np.linalg.slogdet(random.randn(5000,5000))"
## repeatedly, and watch memory usage. So, we do this with an
## explicit eigendecomposition instead
## logZ += -np.linalg.slogdet(W)[1]
W2 = np.dot(W.T, W)
W2eig, _ = np.linalg.eig(W2)
logZ += -np.sum(np.log(W2eig))/2.
## calculate gradient of log Z
# log determinant contribution
dlogZdW = -np.linalg.inv(W).T
if np.min(nu) < 0:
dlogZdnu = np.zeros(nu.shape)
warnings.warn('not a normalizable distribution!')
E = np.inf
else:
dlogZdnu = -scipy.special.psi((nu + 1) / 2 )/2 + \
scipy.special.psi( nu/2 )/2
dlogZdalpha = 2. * dlogZdnu
dlogZdlogalpha = (alpha-0.5) * dlogZdalpha
## full objective and gradient
L = (E + logZ) * self.scale
dLdW = (dEdW + dlogZdW) * self.scale
dLdlogalpha = (dEdlogalpha + dlogZdlogalpha) * self.scale
ddL = {'W':dLdW, 'logalpha':dLdlogalpha.ravel()}
if not np.isfinite(L):
warnings.warn('numerical problems')
L = np.inf
return L, ddL
class DeepAE:
"""
Deep Autoencoder from <NAME>. and <NAME>. (2006)
"""
def __init__(self, num_subfunctions=50, num_dims=10, objective='l2'):
# don't introduce a Theano dependency until we have to
| |
sampled, just use :func:`numpy.correlate`!
:INPUTS:
t -- (1D sequence) - time sampling of input data.
x, y -- (1D sequences) - input data.
Note that t, x, y should all have the same lengths!
:OPTIONAL INPUT:
zerolag -- (bool) - whether to compute DCF for zero-lag datapoints.
nbin -- (int) - number of computed correlation/lag values to average over
binwidth -- (float) - width of bins to average over, in units
of t. If set, this overrides nbin.
bins -- (sequence) - edges of bins to use in averaging DCF
values. If set, this overrides both nbin
and binwidth.
prebin -- (scalar) - factor by which to bin down initial data
and lag pairs. This translates into a
speed boost of about this same factor.
reterr -- bool - False or True
:RETURNS:
meanlags -- average lag in each averaged bin
rdcf -- DCF value in each averaged bin
rdcf_err -- uncertainty in the DCF value for each averaged bin
:SEE ALSO: :func:`numpy.correlate`
:REQUIREMENTS: :doc:`analysis`, :doc:`numpy`
"""
# 2010-11-12 IJC: Created
# 2010-11-17 09:25 IJC: Added 'bins' option
# 2011-03-22 13:54 IJC: Added 'prebin' option
# 2012-03-21 13:25 IJMC: Switched "import *" to "import array, " ... etc.
# 2014-12-19 08:21 IJMC: Added 'reterr' option
import analysis as an
#from numpy import array, meshgrid, nonzero, arange, argsort, isfinite
import pdb
t = np.array(t)
x = np.array(x, copy=True)
y = np.array(y, copy=True)
nx, ny = x.size, y.size
npts = max(nx, ny)
x -= x.mean()
y -= y.mean()
xx, yy = np.meshgrid(x, y)
sx = x.std()
sy = y.std()
# Generate data-pair indices:
if zerolag:
pairs1, pairs2 = np.nonzero(np.ones((npts, npts), bool))
else:
xind, yind = np.meshgrid(np.arange(npts), np.arange(npts))
pairs1, pairs2 = np.nonzero(xind<>yind)
del xind
del yind
# Compute lags:
tt, tt2 = np.meshgrid(t, t)
lags = (tt-tt2)[pairs1, pairs2]
del tt
del tt2
uij = (xx * yy)[pairs1, pairs2] / (sx * sy)
del xx
del yy
tind = np.argsort(lags)
lags = lags[tind]
uij = uij[tind]
del tind
#pdb.set_trace()
# The regular "DCF" uncertainty is just the standard deviation of the mean:
#if bins is not None:
# meanlags, rdcf, meanlags_width, rdcf_err = \
# errxy(lags, uij, bins, xerr='minmax', yerr='sdom')
#elif binwidth is None:
# meanlags, rdcf, meanlags_width, rdcf_err = \
# errxy(lags, uij, None, xerr='minmax', yerr='sdom', binfactor=nbin)
#else:
# bins = arange(lags.min(), lags.max() + binwidth, binwidth)
# meanlags, rdcf, meanlags_width, rdcf_err = \
# errxy(lags, uij, bins, xerr='minmax', yerr='sdom')
if prebin>1:
lags = an.binarray(lags, 2)
uij = an.binarray(uij, 2)
if reterr:
yerr = 'sdom'
else:
yerr = None
if bins is not None:
meanlags, rdcf, meanlags_width, rdcf_err = \
errxy(lags, uij, bins, xerr=None, yerr=yerr)
elif binwidth is None:
meanlags, rdcf, meanlags_width, rdcf_err = \
errxy(lags, uij, None, xerr=None, yerr=yerr, binfactor=nbin)
else:
bins = np.arange(lags.min(), lags.max() + binwidth, binwidth)
meanlags, rdcf, meanlags_width, rdcf_err = \
errxy(lags, uij, bins, xerr=None, yerr=yerr)
finite_ind = np.isfinite(meanlags) * np.isfinite(rdcf)
if reterr:
finite_ind *= np.isfinite(rdcf_err)
meanlags = meanlags[finite_ind]
rdcf = rdcf[finite_ind]
if reterr:
rdcf_err = rdcf_err[finite_ind]
return meanlags, rdcf, rdcf_err
def getfilelist(path='.', includes=[], excludes=[]):
"""Return a list of filenames meeting certain criteria.
:INPUTS:
path -- (str) path to directory to be scanned
includes -- (list of strs) -- all strings in this list must be
present in a filename to be returned
excludes -- (list of strs) -- any string in this list will
prevent a filename from being returned
"""
# 2011-01-30 22:20 IJC: Created
import os
f = os.popen('ls %s/' % path)
filenames = [os.path.split(line)[1].strip() for line in f.readlines()]
f.close()
filtered_filenames = []
if len(includes)>0:
for fn in filenames:
string_found = True
for incstr in includes:
if fn.find(incstr)==-1:
string_found = False
if string_found:
filtered_filenames.append(fn)
if len(excludes)>0:
returned_filenames = []
for fn in filtered_filenames:
file_excluded = False
for excstr in excludes:
if fn.find(excstr)>-1:
file_excluded = True
if not file_excluded:
returned_filenames.append(fn)
else:
returned_filenames = filtered_filenames
return returned_filenames
def loadpickle(filename,mode='both'):
"""Load a pickle from a given filename. If it can't be loaded by
pickle, return -1 -- otherwise return the pickled object.
mode : str
'dill', 'pickle', or 'both' to try both
E.g.,
data = tools.loadpickle(filename)"""
# 2011-02-10 15:45 IJC: Created
# 2014-08-29 19:25 IJMC: First, try dill:
# 2014-12-31 17:03 IJMC: Added 'dill' vs 'pickle' option
if mode=='both':
import dill
import pickle
elif mode=='dill':
try:
import dill as pickle
except:
import pickle
elif mode=='pickle':
import pickle
good = True
try:
f = open(filename, 'r')
except:
print "Could not open file: %s" % filename
good = False
if good:
try:
ret = pickle.load(f)
except:
good = False
if mode=='both':
try:
ret = dill.load(f)
good = True
except:
print "Mode 'both' failed to load file:"
good = False
if not good:
print "Could not load pickle from %s" % filename
try:
f.close()
except:
print "Could not close file %s" % filename
good = False
if good:
pass
else:
ret = -1
return ret
def savepickle(obj, filename):
"""Save a pickle to a given filename. If it can't be saved by
pickle, return -1 -- otherwise return the file object.
To save multiple objects in one file, use (e.g.) a dict:
tools.savepickle(dict(a=[1,2], b='eggs'), filename)
"""
# 2011-05-21 11:22 IJMC: Created from loadpickle.
# 2011-05-28 09:36 IJMC: Added dict example
# 2014-08-27 12:47 IJMC: By default, try to use Dill:
try:
import dill as pickle
except:
import pickle
good = True
try:
f = open(filename, 'wb')
except:
print "Could not open file: %s : for writing." % filename
good = False
if good:
try:
ret = pickle.dump(obj, f)
except:
print "Could not write object to pickle file: %s" % filename
good = False
try:
f.close()
except:
print "Could not close pickle file %s" % filename
good = False
if good:
pass
else:
f = -1
return f
def dict2obj(dic):
"""Take an input Dict, and turn it into an object with fields
corresponding to the dict's keys.
:SEE_ALSO:
:func:obj2dict`
"""
# 2011-02-17 09:41 IJC: Created
from spitzer import baseObject
ret = baseObject()
if not isinstance(dic, dict):
print "Input was not a Python dict! Exiting."
else:
for key in dic.keys():
exec('ret.%s = dic["%s"]' % (key, key))
return ret
def obj2dict(object, ignore=('_',), verbose=False):
"""Convert an object into a dict. Ignore functions & methods, and
any attributes starting with the 'ignore' keys.
:SEE_ALSO:
:func:`dict2obj`
"""
# 2014-08-29 19:34 IJMC: Created.
import inspect
ret = dict()
for attribute in dir(object):
for ign in ignore:
if attribute.find(ign)==0:
parseIt = False
if verbose:
print "Ignoring attribute '%s' in input object." % attribute
else:
parseIt = True
if parseIt:
val = getattr(object, attribute)
if not inspect.isroutine(val):
ret[attribute] = val
elif verbose:
print "Attribute '%s' in input object is a routine; ignoring it.." % attribute
return ret
#def loadspectrum(filename, lambdascale=1., limits=None, skiprows=None, lamdacol=0, datacol=1 ):
# """Load a spectrum from a FITS or ASCII file, and return the
# wavelength and data in two vectors.#
#
# :INPUTS:
#
# filename (str) -- filename to load. If extension is "FITS"
# load using pyfits, otherwise try to load as
# an ASCII file.
#
# lambdacol (int) -- column number
def find_peaks(vec, sep=0, thresh=None):
"""
Find all large values in input vector that are separated by at least
'wid' pixels.
:INPUTS:
vec (sequence) -- 1D vector
sep (scalar) -- minimum separation of returned peaks
thresh (scalar) -- ignore all peaks lower than this value.
:EXAMPLE:
import pylab as py
import tools
x = py.linspace(0, 10, 100) # Generate fake time index
y = py.sin(6.28*x/10.) + py.sin(6.28*x/2.) # Generate fake data
peakvals, peaklocs = tools.find_peaks(y, sep=10) # Find the peaks
py.plot(x, y, '-', x[peaklocs], peakvals, 'or') # Plot them
:RETURNS:
peakvals, peakindices
"""
# 2011-03-22 14:54 IJC: Created
# 2012-08-09 22:51 IJMC: Added thresh option.
import numpy as np
from pylab import find
import pdb
if thresh is None:
thresh = -np.inf
vec = np.array(vec)
npix = len(vec)
sep = np.floor(sep).astype(int)
available_index = np.ones(npix, | |
<gh_stars>0
# coding: utf8
# Select or Add Option widget and Multiselect widget (al final)
from gluon import *
import os
from gluon.utils import web2py_uuid
from gluon.sqlhtml import OptionsWidget
from gluon.sqlhtml import StringWidget
import uuid
from plugin_jschosen import jschosen_widget
T=current.T
selectaddpluscount=0
class SelectOrAddOption(object): #and even EDIT
def __init__(self, referenced_table, controller="plugin_selectplus", function="referenced_data", dialog_width=450,select_id=None):
self.referenced_table = referenced_table
self.controller = controller
self.function = function
self.dialog_width = dialog_width
self._my_select_id=select_id
self.filter=filter
def __call__(self, field, value):
#generate the standard widget for this field
#standar
#select_widget = OptionsWidget.widget(field, value)
#my_select_id = self._my_select_id or web2py_uuid()
#my_select_id = select_widget.attributes.get('_id', None)
#con jschosen
select_widget = jschosen_widget(field, value)
my_select_id = select_widget[0].attributes.get('_id', None)
wrapper = DIV(_id=my_select_id+"__reference-actions__wrapper", _class='input-group')
buttons=SPAN(_id=my_select_id+'_buttons',_class='input-group-btn')
wrapper.components.extend([select_widget])
#custom INVERCA para poner el estilo en una línea y acorde a los demás controles
sel=wrapper.element('select',_id=my_select_id)
sel['_class']+=' form-control'
#sel['_style']='width:auto; display:inline;'
#wrapper.element('select',_id=my_select_id)
style_icons = {'new':"icon plus icon-plus glyphicon glyphicon-plus", 'edit': "icon pen icon-pencil glyphicon glyphicon-pencil" }
actions = ['new']
if value: actions.append('edit') # if we already have selected value
for action in actions:
extra_args = [my_select_id,action,self.referenced_table._tablename ]
if action == 'edit':
extra_args.append(value)
#create a div that will load the specified controller via ajax
idcomp=my_select_id+'_'+action + '_dlgform'
form_loader_div = DIV(_id=idcomp, _title=T(action)+": "+self.referenced_table._singular.capitalize())
url=URL(c=self.controller,f=self.function,args=extra_args,user_signature=True)
#generate the "add/edit" button that will appear next the options widget and open our dialog
action_button = A([SPAN(_class=style_icons[action]), SPAN( _class="buttontext button") ],
_title=T(action), _id=my_select_id+"_option_%s_trigger"%action, _class="button btn btn-default", _style="vertical-align: middle" )
#create javascript for creating and opening the dialog
js = '$( "#%s" ).dialog({autoOpen: false, not__modal:true, show: "fade", hide: "fade", width: %s});' % (idcomp, self.dialog_width)
js += '$( "#%(id)s_option_%(action)s_trigger" ).click(function() { $( "#%(idcomp)s" ).dialog( "open"); web2py_component("%(url)s","%(idcomp)s"); return false;}); ' % dict(id=my_select_id, action=action, idcomp=idcomp,url=url )
if action=='edit':
# hide if reference changed - as load is constructed for initial value only (or would need some lazy loading mechanizm)
js += '$(function() {$("#%s").change(function() { $( "#%s_option_%s_trigger" ).hide(); } ) });' % (my_select_id, my_select_id, 'edit', )
#js para ajustar manualmente el ancho del select y que ocupe el ancho disponible, lo inyectamos solo una vez, en el add
js='$(function(){%s});'%js
jq_script=SCRIPT(js, _type="text/javascript")
buttons.components.extend([action_button,form_loader_div, jq_script])
"""
js = '$(function() {$("#%(id)s").css("width",$("#%(id)s__reference-actions__wrapper").width() - 55 -$("#%(id)s_buttons").width());' \
'$("#%(id)s_option_new_trigger" ).css("margin-left", "+=5");}); ' % dict(id=my_select_id)
js = SCRIPT(js, _type="text/javascript")
"""
wrapper.components.extend([buttons])
return wrapper
class SelectTreeWidget(object):
_class='string form-control'
def __init__(self, request, mptt, field, id_field=None, db=None,
keyword='',
edit_option=False, filter=None, filterdata=None,
field_reference_tree=None, # indica si ha de cargar como hijos de cada categoría los registros de una tabla maestra que tiene estas categorias
# children=[tabla, campo id relacionado con mptt, campo descripcion de tabla]
):
from plugin_jstree import JsTree
from gluon.globals import Response
from gluon.globals import Storage
self.mptt=mptt # objeto que maneja la estrucutra jerarquica para el tree
self.request = request
self._response = Response()
self._response._view_environment = current.globalenv.copy()
self._response._view_environment.update(
request=Storage(folder=os.path.join(os.path.dirname(os.path.dirname(self.request.folder)))),
response=self._response,
)
global selectaddpluscount
self.keyword = '_tree_%(key)s_%(tablename)s_%(fieldname)s' % dict(key=keyword,
tablename=field._tablename,
fieldname=field.name
)
# self.keyword='_complete_'+keyword
self.fields = [field]
self.edit_option = edit_option
self.add_option_value = 0
if self.edit_option:
self.keyword += '_edt'
if id_field:
self.is_reference = True
self.fields.append(id_field)
else:
self.is_reference = False
self.fields.append(field_reference_tree)
self.url = request
self.db = db or field._db
self.mptt=mptt
self._div_id = self.keyword + '_div'
if self.is_reference:
self._key2 = self.keyword + '_aux'
self._key3 = self.keyword + '_auto'
self.jstree = JsTree(tree_model=self.mptt, renderstyle=True,edit_option=edit_option,
selectcomponent=[self._key2, self._key3, self._div_id],table_children=self.fields if field_reference_tree else None,filter=filter,filterdata=filterdata)
else:
self.jstree = JsTree(tree_model=self.mptt, renderstyle=True, edit_option=edit_option, selectcomponent=[self.keyword, '', self._div_id],filter=filter,filterdata=filterdata)
def __call__(self, field, value, **attributes):
# ----------------------------------------------------------------------
# SelectTreeWidget
def script_show_parents(valor):
if value:
nodes = self.mptt.ancestors_from_node(valor, include_self=True).select()
lista = "[" + (','.join([str(node.id) for i, node in enumerate(nodes)])) + "]"
return lista
else:
return []
my_select_id = '%s_%s_%s' % (field._tablename, field.name, str(uuid.uuid4())[:8])
# generate the "dropdown" button that will appear next the options widget and open our dialog
style_icons = {'sel': "icon pen icon-pencil glyphicon glyphicon-triangle-bottom"}
wrapper = DIV(_id=my_select_id + "_sel_warpper", _class='form-inline')
# ----------------------------------------------------------------------
default = dict(
_type='text',
value=(not value is None and str(value)) or '',
)
attr = StringWidget._attributes(field, default, **attributes)
attr['_seltree'] = 'off'
if self.is_reference:
url = None
attr['_class'] = self._class
name = attr['_name']
if 'requires' in attr: del attr['requires']
attr['_autoid'] = self._key2
attr['_name'] = self._key2
value = attr['value']
valor = value
if self.fields[2]: # campo tipo establece busqueda en arbol
record = self.db(self.fields[1] == value).select(self.fields[0],self.fields[2]).first()
valor=record and record[self.fields[2].name]
else:
record = self.db(self.fields[1] == value).select(self.fields[0]).first()
attr['value'] = record and record[self.fields[0].name]
attr['_onblur'] = "if ($('#%s').val())" \
"div_Show('%s',false);" % (self._key3,self._div_id)
attr['_onfocus'] = "div_Show('%s',true);" % self._div_id
# attr['_style']='width:80%;'
# button action
actions = ['sel']
action_button = SPAN(_id=my_select_id + '_buttons',
*[A([SPAN(_class=style_icons[action]), SPAN(_class="buttontext button")],
_title=T(action), _id=my_select_id + "_option_%s_trigger" % action,
_class="button btn btn-default", _style="vertical-align:middle") for action in
actions])
dic = dict(key=self.keyword,
id=attr['_id'], key2=self._key2, key3=self._key3,
name=name,
div_id=self._div_id,
field=self.fields[0].name,
script_show_parents=script_show_parents(valor),
edit=self.edit_option,
my_select_id=my_select_id,
fileid=value if self.fields[2] else '')
jq_script = XML(self._response.render('plugin_selectplus/selectplus_tree_js.html', dic))
wrapper.components.extend([TAG[''](INPUT(**attr), action_button,
INPUT(_type='hidden',
_id=self._key3,
_value=value,
_name=name,
requires=field.requires),
DIV(self.jstree(),_id=self._div_id,
_style='padding: 12 0 0 0px;position:absolute;'),
jq_script)])
return wrapper
else: #sin reference, no id
attr['_name'] = field.name
attr['_autoid'] = self.keyword
attr['_onblur'] = "jQuery('#%s').delay(500).fadeOut();" % self._div_id
attr['_class'] = self._class
dic = dict(key=self.keyword, id=attr['_id'], div_id=self._div_id, key2=self.keyword,
field=self.fields[0].name,
my_select_id=my_select_id)
jq_script = XML(self._response.render('plugin_selectplus/selectplustree_js.html', dic))
wrapper.components.extend([TAG[''](INPUT(**attr),
DIV(self.jstree(),_id=self._div_id,
_style='padding: 12 0 0 0px;position:absolute;')),
jq_script])
return wrapper
class AutocompleteWidgetSelectOrAddOption(object):
_class = 'string form-control'
def __init__(self, request, field, id_field=None, db=None,
orderby=None, maxresults=10,
keyword='',
min_length=2,
# -------------------------------------------------------------
# SelectOrAddOption
controller='plugin_selectplus',
function='referenced_data',
form_title=None,
button_text = None, dialog_width=1000,
multi=False,sep='@ ',
add_option=True
# -------------------------------------------------------------
):
self.request = request
from gluon.globals import Response
from gluon.globals import Storage
self._response = Response()
self._response._view_environment = current.globalenv.copy()
self._response._view_environment.update(
request=Storage(folder=os.path.join(os.path.dirname(os.path.dirname(self.request.folder)))),
response=self._response,
)
global selectaddpluscount
self.keyword='_complete_%(key)s_%(dif)s_%(tablename)s_%(fieldname)s' % dict(key=keyword,tablename=field._tablename,fieldname=field.name,dif=request.args[-1] if len(request.args) else '')
#self.keyword='_complete_'+keyword
self.db = db or field._db
self.orderby = orderby or field
self.maxresults = maxresults
self.offset=0
self.min_length = min_length
self.fields=[field]
self._multi=multi
self._sep=sep
self.controller=controller
self.function=function
self.dialog_width = dialog_width
self.add_option=add_option
self.add_option_value=0
if self.add_option:
self.keyword+='_add'
if id_field:
self.is_reference = True
self.fields.append(id_field)
else:
self.is_reference = False
if hasattr(request,'application'):
self.url = URL(args=request.args)
url=self.url
self.callback()
else:
self.url = request
# ----------------------------------------------------------------------
# SelectOrAddOption
if form_title == None:
self.form_title = self.db[self.fields[0]._tablename]._singular
else:
self.form_title = T(form_title)
if button_text == None:
self.button_text = T('Add')
else:
self.button_text = T(button_text)
# ----------------------------------------------------------------------
def callback(self):
if self.keyword in self.request.vars:
field = self.fields[0]
self.offset=int(self.request.vars.pg or 1)-1
rows = self.db(field.like('%'+self.request.vars[self.keyword]+'%'))\
.select(orderby=self.orderby,
limitby=(self.offset,self.maxresults+self.offset),*self.fields)
#Tuve que poner el decode-encode para que no muestre caracteres no ingleses como interrogantes en un rombo
if '_add' in self.keyword:
if self.is_reference:
Add=[OPTION('--%s "%s"'%(T('Add'),self.request.vars[self.keyword].decode('latin-1').encode('utf8')),_value=self.add_option_value,_id='selectadd')]
else:
Add=[OPTION('--%s "%s"'%(T('Add'),self.request.vars[self.keyword].decode('latin-1').encode('utf8')), _id='selectadd')]
else:
Add=[]
if rows:
if self.is_reference:
id_field = self.fields[1]
opciones=[OPTION(s[field.name],_value=s[id_field.name],_selected=(k==0)) for k,s in enumerate(rows)]
else:
opciones=[OPTION(s[field.name],_selected=(k==0)) \
for k,s in enumerate(rows)]
if len(rows) == self.maxresults:
opciones += [OPTION('Más>>',_class='icon glyphicon glyphicon-arrow-down',_value=-(self.offset+self.maxresults+1),_id='selectnext')]
if self.offset >0:
opciones = [OPTION('<<Menos',_class='icon glyphicon glyphicon-arrow-up', _value=-(self.offset - self.maxresults+1), _id='selectprev')]+opciones
opciones+=Add
raise HTTP(200,SELECT(_id=self.keyword,_class='autocomplete',
_size=len(opciones),_multiple=(len(rows)==1),
*opciones).xml())
else:
raise HTTP(200,SELECT(_id=self.keyword,_class='autocomplete',
_size=len(Add),_multiple=True,
*Add).xml())
def __call__(self,field,value,**attributes):
# ----------------------------------------------------------------------
# SelectOrAddOption
my_select_id = '%s_%s_%s' % (field._tablename, field.name,str(uuid.uuid4())[:8])
#create a div that will load the specified controller via ajax
idcomp=my_select_id+"_dialog-form"
form_loader_div = DIV(_id=idcomp, _title=self.form_title)
#generate the "add" button that will appear next the options widget and open our dialog
style_icons = {'add':"icon plus icon-plus glyphicon glyphicon-plus", 'edit': "icon pen icon-pencil glyphicon glyphicon-pencil" }
wrapper = DIV(_id=my_select_id+"_adder_wrapper")
# ----------------------------------------------------------------------
default = dict(
_type = 'text',
value = (not value is None and str(value)) or '',
)
attr = StringWidget._attributes(field, default, **attributes)
div_id = self.keyword+'_div'
attr['_autocomplete']='off'
if self.is_reference:
key2 = self.keyword+'_aux'
key3 = self.keyword+'_auto'
if self.add_option:
add_args = [key3,'new',self.fields[0]._tablename] #esto sería si una funcion del controlador maneja esto
urladd=URL(c=self.controller,f=self.function,args=add_args,user_signature=True)
add_args=[key3,'edit',self.fields[0]._tablename,value]
urledit=URL(c=self.controller,f=self.function,args=add_args,user_signature=True)
else:
url=None
attr['_class']=self._class
name = attr['_name']
if 'requires' in attr: del attr['requires']
attr['_autoid'] = key2
attr['_name']=key2
value = attr['value']
record = self.db(self.fields[1]==value).select(self.fields[0]).first()
attr['value'] = record and record[self.fields[0].name]
attr['_onblur']="jQuery('#%(div_id)s').delay(500).fadeOut();" % \
dict(div_id=div_id,u='F'+self.keyword)
#attr['_style']='width:80%;'
#button action
actions = ['add']
if value: actions.append('edit')
action_button = SPAN(_id=my_select_id+'_buttons',_class='input-group-btn',*[A([SPAN(_class=style_icons[action]), SPAN( _class="buttontext button") ],
_title=T(action), _id=my_select_id+"_option_%s_trigger"%action, _class="button btn btn-default", _style="vertical-align:middle" ) for action in actions])
dic=dict(url=self.url,
min_length=self.min_length,
key=self.keyword,
id=attr['_id'],key2=key2,key3=key3,
name=name,
div_id=div_id,
u='F'+self.keyword,
idcomp=idcomp,
urlcomp=urladd,urledit=urledit,
field=self.fields[0].name,
my_select_id=my_select_id,
dialog_width=self.dialog_width,
tablename=self.db[self.fields[0]._tablename]._singular)
jq_script=XML(self._response.render('plugin_selectplus/selectplus_js.html',dic))
if self.min_length==0:
attr['_onfocus'] = attr['_onkeyup']
wrapper.components.extend([DIV([INPUT(**attr),action_button,
INPUT(_type='hidden',_id=key3,_value=value,_name=name,requires=field.requires)],_class='input-group'),
DIV(_id=div_id,_style='padding: 12 0 0 0px;position:absolute;'),
form_loader_div,jq_script])
return wrapper
else:
if self.add_option:
add_args = [my_select_id,'new',self.fields[0]._tablename]
urladd=URL(c=self.controller,f=self.function,args=add_args,user_signature=True)
add_args= [my_select_id,'edit',self.fields[0]._tablename,value]
urledit=URL(c=self.controller,f=self.function,args=add_args,user_signature=True)
else:
urladd=None
urledit=None
attr['_name']=field.name
attr['_autoid'] =self.keyword
attr['_onblur']="jQuery('#%(div_id)s').delay(500).fadeOut();" % \
dict(div_id=div_id,u='F'+self.keyword)
attr['_class']=self._class
dic=dict(url=self.url,min_length=self.min_length,
key=self.keyword,id=attr['_id'], div_id=div_id,key2=self.keyword,
u='F'+self.keyword,idcomp=idcomp,urlcomp=urladd,urledit=urledit,
field=self.fields[0].name,sep=self._sep,
my_select_id=my_select_id,
dialog_width=self.dialog_width,
tablename=self.db[self.fields[0]._tablename]._singular)
if self._multi:
jq_script=XML(self._response.render('plugin_selectplus/selectplus3_js.html',dic))
else:
jq_script=XML(self._response.render('plugin_selectplus/selectplus2_js.html',dic))
if self.min_length==0:
attr['_onfocus'] = attr['_onkeyup']
wrapper.components.extend([TAG[''](INPUT(**attr),
DIV(_id=div_id,
_style='padding: 12 0 0 0px;position:absolute;')),
form_loader_div,jq_script])
return wrapper
"""
# Multiselect widget from http://www.web2pyslices.com/slice/show/1395/jquery-multi-select-widget
#Definir un campo de esta manera:
db.tabla.campo.requires = IS_IN_SET(['Apples','Oranges','Bananas','Kiwis','Lemons'],multiple=True)
#y asignarle | |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import copy
from dace import properties, symbolic
import dace.library
import dace.sdfg.nodes
from dace.sdfg import SDFG, SDFGState
from dace import memlet as mm, data as dt
from dace.transformation.transformation import ExpandTransformation
from dace.libraries.blas.nodes.matmul import _get_matmul_operands
from dace.libraries.blas import blas_helpers
from dace.frontend.common import op_repository as oprepo
from dace.libraries.blas import environments
import numpy as np
import warnings
@dace.library.expansion
class ExpandGemvPure(ExpandTransformation):
environments = []
@staticmethod
def expansion(node, parent_state, parent_sdfg, **kwargs):
node.validate(parent_sdfg, parent_state)
sdfg = dace.SDFG(node.label + "_sdfg")
((edge_a, outer_array_a, shape_a, strides_a), (edge_x, outer_array_x,
shape_x, strides_x),
(edge_y, outer_array_y, shape_y,
strides_y)) = _get_matmul_operands(node,
parent_state,
parent_sdfg,
name_lhs="_A",
name_rhs="_x",
name_out="_y")
dtype_a = outer_array_a.dtype.type
dtype_x = outer_array_x.dtype.type
dtype_y = outer_array_y.dtype.type
if outer_array_a.dtype.veclen > 1 or outer_array_x.dtype.veclen > 1:
raise NotImplementedError("Vectorization for pure GEMV NYI.")
if node.transA:
trans_shape_a = list(reversed(shape_a))
else:
trans_shape_a = shape_a
if trans_shape_a[1] != shape_x[0]:
raise SyntaxError(
"Matrix-vector product size mismatch: {} vs. {}".format(
trans_shape_a[1], shape_x[0]))
N, M = trans_shape_a[0], trans_shape_a[1]
if outer_array_a.storage != outer_array_x.storage:
raise ValueError("Input matrices must have same storage")
storage = outer_array_a.storage
_, array_a = sdfg.add_array("_A",
shape_a,
dtype_a,
strides=strides_a,
storage=storage)
_, array_x = sdfg.add_array("_x",
shape_x,
dtype_x,
strides=strides_x,
storage=storage)
_, array_y = sdfg.add_array("_y",
shape_y,
dtype_y,
strides=strides_y,
storage=storage)
mul_program = "__out = {} * __A * __x".format(node.alpha)
init_state = sdfg.add_state(node.label + "_initstate")
state = sdfg.add_state_after(init_state, node.label + "_state")
if node.beta == 0:
mul_out, mul_out_array = "_y", array_y
output_nodes = None
else:
mul_out, mul_out_array = tmp, array_tmp = sdfg.add_temp_transient(
shape_y, dtype_y, storage=storage)
access_tmp = state.add_read(tmp)
output_nodes = {mul_out: access_tmp}
# Initialization map
init_state.add_mapped_tasklet(
"gemv_init", {
"_o%d" % i: "0:%s" % symbolic.symstr(d)
for i, d in enumerate(shape_y)
}, {},
"out = 0", {
"out":
dace.Memlet("{}[{}]".format(
mul_out, ",".join(["_o%d" % i
for i in range(len(shape_y))])))
},
external_edges=True)
# Multiplication map
state.add_mapped_tasklet("_GEMV_", {
"__i%d" % i: "0:%s" % s
for i, s in enumerate([N, M])
}, {
"__A":
dace.Memlet(
"_A[{}]".format("__i1, __i0" if node.transA else "__i0, __i1")),
"__x":
dace.Memlet("_x[__i1]")
},
mul_program, {
"__out":
dace.Memlet(f"{mul_out}[__i0]",
wcr="lambda x, y: x + y")
},
external_edges=True,
output_nodes=output_nodes)
add_program = "__y_out = ({} * __y_in) + __tmp".format(node.beta)
memlet_idx = "__i"
# addition map
if node.beta != 0:
state.add_mapped_tasklet("_Add_", {"__i": "0:{}".format(N)}, {
"__y_in": dace.Memlet(f"_y[{memlet_idx}]"),
"__tmp": dace.Memlet(f"{mul_out}[__i]"),
},
add_program,
{"__y_out": dace.Memlet("_y[__i]")},
external_edges=True,
input_nodes={mul_out: access_tmp})
return sdfg
@dace.library.expansion
class ExpandGemvFpgaAccumulate(ExpandTransformation):
"""
This FPGA-oriented expansion iterates over the input matrix A in simple
row-major order, with optional tiling in both dimensions, where the tiles
are also traversed in simple row-major order. This means that y is only
written once, but x is read for every tile in the y-dimension.
The implementation requires accumulation on the output, and does NOT assume
native accumulation for the given data type. Instead it uses multiple
partial sums to ensure that II=1, and only writes the final accumulated
value once it has been combined from the partial sums.
This works for both transposed and non-transposed A, but vectorization is
only implemented for non-transposed A.
"""
# The above corresponds to gemv_v1 in FBLAS
environments = []
@staticmethod
def expansion(node,
parent_state,
parent_sdfg,
tile_size_x=None,
tile_size_y=None,
num_partial_sums=16):
"""
:param node: Node to expand.
:param parent_state: State that the node is in.
:param parent_sdfg: SDFG that the node is in.
:param tile_size_x: Tile size along the dimension of the vector x. If
set to None, no tiling is used, corresponding to
setting the tile size equal to the full size of x.
:param tile_size_y: Tile size along the dimension of the vector y. If
set to None, no tiling is used, corresponding to
setting the tile size equal to the full size of y.
:param num_partial_sums: The number of distinct registers to accumulate
contributions to the final sum into. Should be
a power of two, and should be higher than the
latency of adding two numbers of the given
data type.
"""
node.validate(parent_sdfg, parent_state)
for e in parent_state.in_edges(node):
if e.dst_conn == "_A":
desc_a = parent_sdfg.arrays[e.data.data]
elif e.dst_conn == "_x":
desc_x = parent_sdfg.arrays[e.data.data]
for e in parent_state.out_edges(node):
if e.src_conn == "_y":
desc_y = parent_sdfg.arrays[e.data.data]
sdfg = dace.SDFG("gemv")
state = sdfg.add_state("gemv")
alpha = node.alpha
beta = node.beta
# Create local versions of input data nodes
desc_a = desc_a.clone()
desc_a.transient = False
sdfg.add_datadesc("_A", desc_a)
desc_x = desc_x.clone()
desc_x.transient = False
sdfg.add_datadesc("_x", desc_x)
desc_y = desc_y.clone()
desc_y.transient = False
sdfg.add_datadesc("_y", desc_y)
if node.transA and desc_a.dtype.veclen > 1:
raise NotImplementedError(
"Vectorization not implemented for transposed A.")
# Create accesses
read_a = state.add_read("_A")
read_x = state.add_read("_x")
if beta != 0:
read_y = state.add_read("_y")
write_y = state.add_write("_y")
size_x = desc_x.shape[0]
size_y = desc_y.shape[0]
if tile_size_x is None:
tile_size_x = size_x
if tile_size_y is None:
tile_size_y = size_y
num_tiles_y = f"{size_y}/{tile_size_y}"
num_tiles_x = f"{size_x}/{tile_size_x}"
veclen = desc_a.dtype.veclen
# Create tile map
y_tile_entry, y_tile_exit = state.add_map(
"y_tiles", {"ty": f"0:{num_tiles_y}"},
schedule=dace.ScheduleType.FPGA_Device)
x_tile_entry, x_tile_exit = state.add_map(
"x_tiles", {"tx": f"0:{num_tiles_x}"},
schedule=dace.ScheduleType.FPGA_Device)
# Create y map
y_entry, y_exit = state.add_map("y", {"iy": f"0:{tile_size_y}"},
schedule=dace.ScheduleType.FPGA_Device)
# Create x map
x_entry, x_exit = state.add_map("x", {"ix": f"0:{tile_size_x}"},
schedule=dace.ScheduleType.FPGA_Device)
# Local buffer of x
sdfg.add_array("x_local", (tile_size_x, ),
desc_x.dtype,
storage=dace.StorageType.FPGA_Local,
transient=True)
x_local_access = state.add_read("x_local")
if beta != 0:
raise NotImplementedError("Not yet implemented.")
multiply_tasklet = state.add_tasklet("multiply", {"A_in", "x_in"},
{f"product": desc_a.dtype},
"product = A_in * x_in")
if isinstance(desc_a, dt.Stream):
subset = "0"
elif node.transA:
subset = f"tx * {tile_size_x} + ix, ty * {tile_size_y} + iy"
else:
subset = f"ty * {tile_size_y} + iy, tx * {tile_size_x} + ix"
state.add_memlet_path(read_a,
y_tile_entry,
x_tile_entry,
y_entry,
x_entry,
multiply_tasklet,
dst_conn="A_in",
memlet=dace.Memlet(f"_A[{subset}]"))
read_x_entry, read_x_exit = state.add_map(
"read_x", {"ix": f"0:{tile_size_x}"},
schedule=dace.ScheduleType.FPGA_Device)
subset = ("0" if isinstance(desc_x, dt.Stream) else
f"tx*{tile_size_x} + ix")
read_x_tasklet = state.add_tasklet("read_x", {"x_memory"}, {"x_buffer"},
"x_buffer = x_memory")
state.add_memlet_path(read_x,
y_tile_entry,
x_tile_entry,
read_x_entry,
read_x_tasklet,
dst_conn="x_memory",
memlet=dace.Memlet(f"_x[{subset}]"))
state.add_memlet_path(read_x_tasklet,
read_x_exit,
x_local_access,
src_conn="x_buffer",
memlet=dace.Memlet(f"x_local[ix]"))
state.add_memlet_path(x_local_access,
y_entry,
x_entry,
multiply_tasklet,
dst_conn="x_in",
memlet=dace.Memlet(f"x_local[ix]"))
# Write to buffer
sdfg.add_array("product_vector", (1, ),
desc_a.dtype,
transient=True,
storage=dace.StorageType.FPGA_Local)
product_vector = state.add_access("product_vector")
state.add_memlet_path(multiply_tasklet,
product_vector,
src_conn="product",
memlet=dace.Memlet(f"product_vector[0]"))
# Vector length conversion
sdfg.add_array("product_scalar", (veclen, ),
desc_a.dtype.base_type,
transient=True,
storage=dace.StorageType.FPGA_Local)
product_scalar = state.add_access("product_scalar")
state.add_memlet_path(product_vector,
product_scalar,
memlet=dace.Memlet(f"product_vector[0]",
other_subset=f"0:{veclen}"))
# Now we need to collapse this
reduce_vector_entry, reduce_vector_exit = state.add_map(
"reduce_vector", {"u": f"0:{veclen}"},
schedule=dace.ScheduleType.FPGA_Device,
unroll=True)
reduce_vector_tasklet = state.add_tasklet(
"reduce_vector", {"product_in", "acc_in"}, {"acc_out"},
"acc_out = product_in + acc_in")
state.add_memlet_path(product_scalar,
reduce_vector_entry,
reduce_vector_tasklet,
dst_conn="product_in",
memlet=dace.Memlet(f"{product_scalar}[u]"))
# Add accumulation register
sdfg.add_array("accumulate_product", (1, ),
desc_a.dtype.base_type,
transient=True,
storage=dace.StorageType.FPGA_Local)
accumulate_product_read = state.add_access("accumulate_product")
accumulate_product_write = state.add_access("accumulate_product")
# Initialize it to zero
init_reduce_vector_tasklet = state.add_tasklet("init_reduce_vector", {},
{"acc_out"},
"acc_out = 0")
state.add_memlet_path(x_entry,
init_reduce_vector_tasklet,
memlet=dace.Memlet())
state.add_memlet_path(init_reduce_vector_tasklet,
accumulate_product_read,
src_conn="acc_out",
memlet=dace.Memlet(f"accumulate_product[0]"))
# Connect it to the tasklet
state.add_memlet_path(accumulate_product_read,
reduce_vector_entry,
reduce_vector_tasklet,
dst_conn="acc_in",
memlet=dace.Memlet(f"accumulate_product[0]"))
state.add_memlet_path(reduce_vector_tasklet,
reduce_vector_exit,
accumulate_product_write,
src_conn="acc_out",
memlet=dace.Memlet(f"accumulate_product[0]"))
# Partial sums
sdfg.add_array("partial_sums", (num_partial_sums, ),
desc_y.dtype,
storage=dace.StorageType.FPGA_Registers,
transient=True)
partial_sum_read = state.add_read("partial_sums")
partial_sum_write = state.add_access("partial_sums")
# Output array
sdfg.add_array("y_local", (tile_size_y, ),
desc_y.dtype,
storage=dace.StorageType.FPGA_Local,
transient=True)
# Now we need to actually accumulate into a local register of y
y_local_read = state.add_read("y_local")
y_local_write = state.add_read("y_local")
update_y_tasklet = state.add_tasklet(
"update_y", {"y_in", "acc_in"}, {"acc_out"}, f"""\
prev = acc_in if ix >= {num_partial_sums} else 0
acc_out = prev + y_in""")
state.add_memlet_path(accumulate_product_write,
update_y_tasklet,
dst_conn="y_in",
memlet=dace.Memlet(f"accumulate_product[0]"))
state.add_memlet_path(
partial_sum_read,
x_entry,
update_y_tasklet,
dst_conn="acc_in",
memlet=dace.Memlet(f"partial_sums[ix%{num_partial_sums}]"))
state.add_memlet_path(y_tile_entry, y_local_read, memlet=dace.Memlet())
state.add_memlet_path(y_entry, partial_sum_read, memlet=dace.Memlet())
state.add_memlet_path(
update_y_tasklet,
x_exit,
partial_sum_write,
src_conn="acc_out",
memlet=dace.Memlet(f"partial_sums[ix%{num_partial_sums}]"))
# Reduce the partial sums
reduce_sums_entry, reduce_sums_exit = state.add_map(
"reduce_partial_sums", {"u": f"0:{num_partial_sums}"},
schedule=dace.ScheduleType.FPGA_Device,
unroll=True)
reduce_sums_tasklet = state.add_tasklet(
"reduce_partial_sums", {"sum_in", "val_in"}, {"sum_out"}, """
prev = sum_in if u > 0 else 0
sum_out = prev + val_in""")
sdfg.add_array("accumulate_sum", (1, ),
desc_y.dtype,
transient=True,
storage=dace.StorageType.FPGA_Local)
accumulate_sum_read = state.add_access("accumulate_sum")
accumulate_sum_write = state.add_access("accumulate_sum")
state.add_memlet_path(y_entry,
accumulate_sum_read,
memlet=dace.Memlet())
state.add_memlet_path(accumulate_sum_read,
reduce_sums_entry,
reduce_sums_tasklet,
dst_conn="sum_in",
memlet=dace.Memlet("accumulate_sum[0]"))
state.add_memlet_path(reduce_sums_tasklet,
reduce_sums_exit,
accumulate_sum_write,
src_conn="sum_out",
memlet=dace.Memlet("accumulate_sum[0]"))
state.add_memlet_path(partial_sum_write,
reduce_sums_entry,
reduce_sums_tasklet,
dst_conn="val_in",
memlet=dace.Memlet("partial_sums[u]"))
# Combine with y buffer
combine_tasklet = state.add_tasklet(
"combine_y", {"val", "buffer_in"}, {"buffer_out"}, """\
prev = buffer_in if tx > 0 else 0
buffer_out = prev + val""")
state.add_memlet_path(accumulate_sum_write,
combine_tasklet,
dst_conn="val",
memlet=dace.Memlet("accumulate_sum[0]"))
state.add_memlet_path(y_local_read,
x_tile_entry,
y_entry,
combine_tasklet,
dst_conn="buffer_in",
memlet=dace.Memlet("y_local[iy]"))
state.add_memlet_path(combine_tasklet,
y_exit,
x_tile_exit,
y_local_write,
src_conn="buffer_out",
memlet=dace.Memlet(f"y_local[iy]"))
subset = ("0" if isinstance(desc_y, dt.Stream) else
f"ty*{tile_size_y} + iy")
write_y_entry, write_y_exit = state.add_map(
"write_y", {"iy": f"0:{tile_size_y}"},
schedule=dace.ScheduleType.FPGA_Device)
write_y_tasklet = state.add_tasklet("write_y", {"y_buffer"},
{"y_memory"}, "y_memory = y_buffer")
state.add_memlet_path(y_local_write,
write_y_entry,
write_y_tasklet,
dst_conn="y_buffer",
memlet=dace.Memlet(f"y_local[iy]"))
state.add_memlet_path(write_y_tasklet,
write_y_exit,
y_tile_exit,
write_y,
src_conn="y_memory",
memlet=dace.Memlet(f"_y[{subset}]"))
return sdfg
@dace.library.expansion
class ExpandGemvFpgaTilesByColumn(ExpandTransformation):
"""
FPGA-oriented expansion that reads the input matrix A in column-major
order, such that consecutive values are accumulated into different
registers, avoiding a loop-carried dependency due to accumulation.
The matrix can optionally be tiled, where the tiles | |
the interpolation.
"""
tr = read()[0]
tr.stats.sampling_rate = 1.0
tr.data = tr.data[:500]
tr.interpolate(method="lanczos", sampling_rate=10.0, a=20)
tr.stats.sampling_rate = 1.0
tr.data = tr.data[:500]
tr.stats.starttime = UTCDateTime(0)
org_tr = tr.copy()
# Now this does not do much for now but actually just shifts the
# samples.
tr.interpolate(method="lanczos", sampling_rate=1.0, a=1,
time_shift=0.2)
self.assertEqual(tr.stats.starttime, org_tr.stats.starttime + 0.2)
self.assertEqual(tr.stats.endtime, org_tr.stats.endtime + 0.2)
np.testing.assert_allclose(tr.data, org_tr.data, atol=1E-9)
tr.interpolate(method="lanczos", sampling_rate=1.0, a=1,
time_shift=0.4)
self.assertEqual(tr.stats.starttime, org_tr.stats.starttime + 0.6)
self.assertEqual(tr.stats.endtime, org_tr.stats.endtime + 0.6)
np.testing.assert_allclose(tr.data, org_tr.data, atol=1E-9)
tr.interpolate(method="lanczos", sampling_rate=1.0, a=1,
time_shift=-0.6)
self.assertEqual(tr.stats.starttime, org_tr.stats.starttime)
self.assertEqual(tr.stats.endtime, org_tr.stats.endtime)
np.testing.assert_allclose(tr.data, org_tr.data, atol=1E-9)
# This becomes more interesting when also fixing the sample
# positions. Then one can shift by subsample accuracy while leaving
# the sample positions intact. Note that there naturally are some
# boundary effects and as the interpolation method does not deal
# with any kind of extrapolation you will lose the first or last
# samples.
# This is a fairly extreme example but of course there are errors
# when doing an interpolation - a shift using an FFT is more accurate.
tr.interpolate(method="lanczos", sampling_rate=1.0, a=50,
starttime=tr.stats.starttime + tr.stats.delta,
time_shift=0.2)
# The sample point did not change but we lost the first sample,
# as we shifted towards the future.
self.assertEqual(tr.stats.starttime, org_tr.stats.starttime + 1.0)
self.assertEqual(tr.stats.endtime, org_tr.stats.endtime)
# The data naturally also changed.
self.assertRaises(AssertionError, np.testing.assert_allclose,
tr.data, org_tr.data[1:], atol=1E-9)
# Shift back. This time we will lose the last sample.
tr.interpolate(method="lanczos", sampling_rate=1.0, a=50,
starttime=tr.stats.starttime,
time_shift=-0.2)
self.assertEqual(tr.stats.starttime, org_tr.stats.starttime + 1.0)
self.assertEqual(tr.stats.endtime, org_tr.stats.endtime - 1.0)
# But the data (aside from edge effects - we are going forward and
# backwards again so they go twice as far!) should now again be the
# same as we started out with.
np.testing.assert_allclose(
tr.data[100:-100], org_tr.data[101:-101], atol=1E-9, rtol=1E-4)
def test_interpolation_arguments(self):
"""
Test case for the interpolation arguments.
"""
tr = read()[0]
tr.stats.sampling_rate = 1.0
tr.data = tr.data[:50]
for inter_type in ["linear", "nearest", "zero", "slinear",
"quadratic", "cubic", 1, 2, 3,
"weighted_average_slopes"]:
# If only the sampling rate is specified, the end time will be very
# close to the original end time but never bigger.
interp_tr = tr.copy().interpolate(sampling_rate=0.3,
method=inter_type)
self.assertEqual(tr.stats.starttime, interp_tr.stats.starttime)
self.assertTrue(tr.stats.endtime >= interp_tr.stats.endtime >=
tr.stats.endtime - (1.0 / 0.3))
# If the starttime is modified the new starttime will be used but
# the end time will again be modified as little as possible.
interp_tr = tr.copy().interpolate(sampling_rate=0.3,
method=inter_type,
starttime=tr.stats.starttime +
5.0)
self.assertEqual(tr.stats.starttime + 5.0,
interp_tr.stats.starttime)
self.assertTrue(tr.stats.endtime >= interp_tr.stats.endtime >=
tr.stats.endtime - (1.0 / 0.3))
# If npts is given it will be used to modify the end time.
interp_tr = tr.copy().interpolate(sampling_rate=0.3,
method=inter_type, npts=10)
self.assertEqual(tr.stats.starttime,
interp_tr.stats.starttime)
self.assertEqual(interp_tr.stats.npts, 10)
# If npts and starttime are given, both will be modified.
interp_tr = tr.copy().interpolate(sampling_rate=0.3,
method=inter_type,
starttime=tr.stats.starttime +
5.0, npts=10)
self.assertEqual(tr.stats.starttime + 5.0,
interp_tr.stats.starttime)
self.assertEqual(interp_tr.stats.npts, 10)
# An earlier starttime will raise an exception. No extrapolation
# is supported
self.assertRaises(ValueError, tr.copy().interpolate,
sampling_rate=1.0,
starttime=tr.stats.starttime - 10.0)
# As will too many samples that would overstep the end time bound.
self.assertRaises(ValueError, tr.copy().interpolate,
sampling_rate=1.0,
npts=tr.stats.npts * 1E6)
# A negative or zero desired sampling rate should raise.
self.assertRaises(ValueError, tr.copy().interpolate,
sampling_rate=0.0)
self.assertRaises(ValueError, tr.copy().interpolate,
sampling_rate=-1.0)
def test_resample_new(self):
"""
Tests if Trace.resample works as expected and test that issue #857 is
resolved.
"""
starttime = UTCDateTime("1970-01-01T00:00:00.000000Z")
tr0 = Trace(np.sin(np.linspace(0, 2 * np.pi, 10)),
{'sampling_rate': 1.0,
'starttime': starttime})
# downsample
tr = tr0.copy()
tr.resample(0.5, window='hanning', no_filter=True)
self.assertEqual(len(tr.data), 5)
expected = np.array([0.19478735, 0.83618307, 0.32200221,
-0.7794053, -0.57356732])
self.assertTrue(np.all(np.abs(tr.data - expected) < 1e-7))
self.assertEqual(tr.stats.sampling_rate, 0.5)
self.assertEqual(tr.stats.delta, 2.0)
self.assertEqual(tr.stats.npts, 5)
self.assertEqual(tr.stats.starttime, starttime)
self.assertEqual(tr.stats.endtime,
starttime + tr.stats.delta * (tr.stats.npts - 1))
# upsample
tr = tr0.copy()
tr.resample(2.0, window='hanning', no_filter=True)
self.assertEqual(len(tr.data), 20)
self.assertEqual(tr.stats.sampling_rate, 2.0)
self.assertEqual(tr.stats.delta, 0.5)
self.assertEqual(tr.stats.npts, 20)
self.assertEqual(tr.stats.starttime, starttime)
self.assertEqual(tr.stats.endtime,
starttime + tr.stats.delta * (tr.stats.npts - 1))
# downsample with non integer ratio
tr = tr0.copy()
tr.resample(0.75, window='hanning', no_filter=True)
self.assertEqual(len(tr.data), int(10 * .75))
expected = np.array([0.15425413, 0.66991128, 0.74610418, 0.11960477,
-0.60644662, -0.77403839, -0.30938935])
self.assertTrue(np.all(np.abs(tr.data - expected) < 1e-7))
self.assertEqual(tr.stats.sampling_rate, 0.75)
self.assertEqual(tr.stats.delta, 1 / 0.75)
self.assertEqual(tr.stats.npts, int(10 * .75))
self.assertEqual(tr.stats.starttime, starttime)
self.assertEqual(tr.stats.endtime,
starttime + tr.stats.delta * (tr.stats.npts - 1))
# downsample without window
tr = tr0.copy()
tr.resample(0.5, window=None, no_filter=True)
self.assertEqual(len(tr.data), 5)
self.assertEqual(tr.stats.sampling_rate, 0.5)
self.assertEqual(tr.stats.delta, 2.0)
self.assertEqual(tr.stats.npts, 5)
self.assertEqual(tr.stats.starttime, starttime)
self.assertEqual(tr.stats.endtime,
starttime + tr.stats.delta * (tr.stats.npts - 1))
# downsample with window and automatic filtering
tr = tr0.copy()
tr.resample(0.5, window='hanning', no_filter=False)
self.assertEqual(len(tr.data), 5)
self.assertEqual(tr.stats.sampling_rate, 0.5)
self.assertEqual(tr.stats.delta, 2.0)
self.assertEqual(tr.stats.npts, 5)
self.assertEqual(tr.stats.starttime, starttime)
self.assertEqual(tr.stats.endtime,
starttime + tr.stats.delta * (tr.stats.npts - 1))
# downsample with custom window
tr = tr0.copy()
window = np.ones((tr.stats.npts))
tr.resample(0.5, window=window, no_filter=True)
# downsample with bad window
tr = tr0.copy()
window = np.array([0, 1, 2, 3])
self.assertRaises(ValueError, tr.resample,
sampling_rate=0.5, window=window, no_filter=True)
def test_slide(self):
"""
Tests for sliding a window across a trace object.
"""
tr = Trace(data=np.linspace(0, 100, 101))
tr.stats.starttime = UTCDateTime(0.0)
tr.stats.sampling_rate = 5.0
# First slice it in 4 pieces. Window length is in seconds.
slices = []
for window_tr in tr.slide(window_length=5.0, step=5.0):
slices.append(window_tr)
self.assertEqual(len(slices), 4)
self.assertEqual(slices[0],
tr.slice(UTCDateTime(0), UTCDateTime(5)))
self.assertEqual(slices[1],
tr.slice(UTCDateTime(5), UTCDateTime(10)))
self.assertEqual(slices[2],
tr.slice(UTCDateTime(10), UTCDateTime(15)))
self.assertEqual(slices[3],
tr.slice(UTCDateTime(15), UTCDateTime(20)))
# Different step which is the distance between two windows measured
# from the start of the first window in seconds.
slices = []
for window_tr in tr.slide(window_length=5.0, step=10.0):
slices.append(window_tr)
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0],
tr.slice(UTCDateTime(0), UTCDateTime(5)))
self.assertEqual(slices[1],
tr.slice(UTCDateTime(10), UTCDateTime(15)))
# Offset determines the initial starting point. It defaults to zero.
slices = []
for window_tr in tr.slide(window_length=5.0, step=6.5, offset=8.5):
slices.append(window_tr)
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0],
tr.slice(UTCDateTime(8.5), UTCDateTime(13.5)))
self.assertEqual(slices[1],
tr.slice(UTCDateTime(15.0), UTCDateTime(20.0)))
# By default only full length windows will be returned so any
# remainder that can no longer make up a full window will not be
# returned.
slices = []
for window_tr in tr.slide(window_length=15.0, step=15.0):
slices.append(window_tr)
self.assertEqual(len(slices), 1)
self.assertEqual(slices[0],
tr.slice(UTCDateTime(0.0), UTCDateTime(15.0)))
# But it can optionally be returned.
slices = []
for window_tr in tr.slide(window_length=15.0, step=15.0,
include_partial_windows=True):
slices.append(window_tr)
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0],
tr.slice(UTCDateTime(0.0), UTCDateTime(15.0)))
self.assertEqual(slices[1],
tr.slice(UTCDateTime(15.0), UTCDateTime(20.0)))
# Negative step lengths work together with an offset.
slices = []
for window_tr in tr.slide(window_length=5.0, step=-5.0, offset=20.0):
slices.append(window_tr)
self.assertEqual(len(slices), 4)
self.assertEqual(slices[0],
tr.slice(UTCDateTime(15), UTCDateTime(20)))
self.assertEqual(slices[1],
tr.slice(UTCDateTime(10), UTCDateTime(15)))
self.assertEqual(slices[2],
tr.slice(UTCDateTime(5), UTCDateTime(10)))
self.assertEqual(slices[3],
tr.slice(UTCDateTime(0), UTCDateTime(5)))
def test_slide_nearest_sample(self):
"""
Tests that the nearest_sample argument is correctly passed to the
slice function calls.
"""
tr = Trace(data=np.linspace(0, 100, 101))
tr.stats.starttime = UTCDateTime(0.0)
tr.stats.sampling_rate = 5.0
# It defaults to True.
with mock.patch("obspy.core.trace.Trace.slice") as patch:
patch.return_value = tr
list(tr.slide(5, 5))
self.assertEqual(patch.call_count, 4)
for arg in patch.call_args_list:
self.assertTrue(arg[1]["nearest_sample"])
# Force True.
with mock.patch("obspy.core.trace.Trace.slice") as patch:
patch.return_value = tr
list(tr.slide(5, 5, nearest_sample=True))
self.assertEqual(patch.call_count, 4)
for arg in patch.call_args_list:
self.assertTrue(arg[1]["nearest_sample"])
# Set to False.
with mock.patch("obspy.core.trace.Trace.slice") as patch:
patch.return_value = tr
list(tr.slide(5, 5, nearest_sample=False))
self.assertEqual(patch.call_count, 4)
for arg in patch.call_args_list:
self.assertFalse(arg[1]["nearest_sample"])
def test_remove_response_plot(self):
"""
Tests the plotting option of remove_response().
"""
tr = read("/path/to/IU_ULN_00_LH1_2015-07-18T02.mseed")[0]
inv = read_inventory("/path/to/IU_ULN_00_LH1.xml")
tr.attach_response(inv)
pre_filt = [0.001, 0.005, 10, 20]
image_dir = os.path.join(os.path.dirname(__file__), 'images')
with ImageComparison(image_dir, "trace_remove_response.png",
reltol=1.5) as ic:
tr.remove_response(pre_filt=pre_filt, output="DISP",
water_level=60, end_stage=None, plot=ic.name)
def test_normalize(self):
"""
Tests the normalize() method on normal and edge cases.
"""
# Nothing should happen with ones.
tr = Trace(data=np.ones(5))
tr.normalize()
np.testing.assert_allclose(tr.data, np.ones(5))
# 10s should be normalized to all ones.
tr = Trace(data=10 * np.ones(5))
tr.normalize()
np.testing.assert_allclose(tr.data, np.ones(5))
# Negative 10s should be normalized to negative ones.
tr = Trace(data=-10 * np.ones(5))
tr.normalize()
np.testing.assert_allclose(tr.data, -np.ones(5))
# 10s and a couple of 5s should be normalized to 1s and a couple of
# 0.5s.
tr = Trace(data=np.array([10.0, 10.0, 5.0, 5.0]))
tr.normalize()
np.testing.assert_allclose(tr.data, np.array([1.0, 1.0, 0.5, 0.5]))
# Same but negative values.
tr = Trace(data=np.array([-10.0, -10.0, -5.0, -5.0]))
tr.normalize()
np.testing.assert_allclose(tr.data, np.array([-1.0, -1.0, -0.5, -0.5]))
# Mixed values.
tr = Trace(data=np.array([-10.0, -10.0, 5.0, 5.0]))
tr.normalize()
np.testing.assert_allclose(tr.data, np.array([-1.0, -1.0, 0.5, 0.5]))
# Mixed values.
tr = Trace(data=np.array([-10.0, 10.0, -5.0, 5.0]))
tr.normalize()
np.testing.assert_allclose(tr.data, np.array([-1.0, 1.0, -0.5, 0.5]))
# Mixed values.
tr = Trace(data=np.array([-10.0, -10.0, 0.0, 0.0]))
tr.normalize()
np.testing.assert_allclose(tr.data, np.array([-1.0, -1.0, 0.0, 0.0]))
# Mixed values.
tr = Trace(data=np.array([10.0, 10.0, 0.0, 0.0]))
tr.normalize()
np.testing.assert_allclose(tr.data, np.array([1.0, 1.0, 0.0, 0.0]))
# Small values get larger.
tr = Trace(data=np.array([-0.5, 0.5, 0.1, -0.1]))
tr.normalize()
np.testing.assert_allclose(tr.data, np.array([-1.0, 1.0, 0.2, -0.2]))
# All zeros. Nothing should happen but a warning will be raised.
| |
#! /usr/bin/python
#
# Copyright (c) 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import uuid
#FIXME there is a circular import here because compile.py imports unify.py
import compile
# A unifier designed for the bi_unify_atoms routine
# which is used by a backward-chaining style datalog implementation.
# Main goal: minimize memory allocation by manipulating only unifiers
# to keep variable namespaces separate.
class BiUnifier(object):
"""A unifier designed for bi_unify_atoms. Recursive
datastructure. When adding a binding variable u to
variable v, keeps a reference to the unifier for v.
A variable's identity is its name plus its unification context.
This enables a variable with the same name but from two
different atoms to be treated as different variables.
"""
class Value(object):
def __init__(self, value, unifier):
# actual value
self.value = value
# unifier context
self.unifier = unifier
def __str__(self):
return "<{},{}>".format(
str(self.value), repr(self.unifier))
def recur_str(self):
if self.unifier is None:
recur = str(self.unifier)
else:
recur = self.unifier.recur_str()
return "<{},{}>".format(
str(self.value), recur)
def __eq__(self, other):
return self.value == other.value and self.unifer == other.unifier
def __repr__(self):
return "Value(value={}, unifier={})".format(
repr(self.value), repr(self.unifier))
class Undo(object):
def __init__(self, var, unifier):
self.var = var
self.unifier = unifier
def __str__(self):
return "<var: {}, unifier: {}>".format(
str(self.var), str(self.unifier))
def __eq__(self, other):
return self.var == other.var and self.unifier == other.unifier
def __init__(self, dictionary=None):
# each value is a Value
self.contents = {}
if dictionary is not None:
for var, value in dictionary.iteritems():
self.add(var, value, None)
def add(self, var, value, unifier):
value = self.Value(value, unifier)
# logging.debug("Adding {} -> {} to unifier {}".format(
# str(var), str(value), str(self)))
self.contents[var] = value
return self.Undo(var, self)
def delete(self, var):
if var in self.contents:
del self.contents[var]
def value(self, term):
if term in self.contents:
return self.contents[term]
else:
return None
def apply(self, term, caller=None):
return self.apply_full(term, caller=caller)[0]
def apply_full(self, term, caller=None):
"""Recursively apply unifiers to TERM and return
(i) the final value and (ii) the final unifier.
If the final value is a variable, instantiate
with a new variable if not in KEEP_VARS
"""
# logging.debug("apply_full({}, {})".format(str(term), str(self)))
val = self.value(term)
if val is None:
# If result is a variable and this variable is not one of those
# in the top-most calling context, then create a new variable
# name based on this Binding.
# This process avoids improper variable capture.
# Outputting the same variable with the same binding twice will
# generate the same output, but outputting the same variable with
# different bindings will generate different outputs.
# Note that this variable name mangling
# is not done for the top-most variables,
# which makes output a bit easier to read.
# Unfortunately, the process is non-deterministic from one run
# to the next, which makes testing difficult.
if (caller is not None and term.is_variable() and
not (term in caller.variables and caller.binding is self)):
return (compile.Variable(term.name + str(id(self))), self)
else:
return (term, self)
elif val.unifier is None or not val.value.is_variable():
return (val.value, val.unifier)
else:
return val.unifier.apply_full(val.value)
def is_one_to_one(self):
image = set() # set of all things mapped TO
for x in self.contents:
val = self.apply(x)
if val in image:
return False
image.add(val)
return True
def __str__(self):
s = repr(self)
s += "={"
s += ",".join(["{}:{}".format(str(var), str(val))
for var, val in self.contents.iteritems()])
s += "}"
return s
def recur_str(self):
s = repr(self)
s += "={"
s += ",".join(["{}:{}".format(var, val.recur_str())
for var, val in self.contents.iteritems()])
s += "}"
return s
def __eq__(self, other):
return self.contents == other.contents
def binding_str(binding):
"""Handles string conversion of either dictionary or Unifier."""
if isinstance(binding, dict):
s = ",".join(["{}: {}".format(str(var), str(val))
for var, val in binding.iteritems()])
return '{' + s + '}'
else:
return str(binding)
def undo_all(changes):
"""Undo all the changes in CHANGES."""
# logging.debug("undo_all({})".format(
# "[" + ",".join([str(x) for x in changes]) + "]"))
for change in changes:
if change.unifier is not None:
change.unifier.delete(change.var)
def bi_unify_atoms(atom1, unifier1, atom2, unifier2):
"""If possible, modify BiUnifier UNIFIER1 and BiUnifier UNIFIER2 so that
ATOM1.plug(UNIFIER1) == ATOM2.plug(UNIFIER2).
Returns None if not possible; otherwise, returns
a list of changes to unifiers that can be undone
with undo-all. May alter unifiers besides UNIFIER1 and UNIFIER2.
"""
# logging.debug("Unifying {} under {} and {} under {}".format(
# str(atom1), str(unifier1), str(atom2), str(unifier2)))
if atom1.table != atom2.table:
return None
if len(atom1.arguments) != len(atom2.arguments):
return None
changes = []
for i in xrange(0, len(atom1.arguments)):
assert isinstance(atom1.arguments[i], compile.Term)
assert isinstance(atom2.arguments[i], compile.Term)
# grab values for args
val1, binding1 = unifier1.apply_full(atom1.arguments[i])
val2, binding2 = unifier2.apply_full(atom2.arguments[i])
# logging.debug("val({})={} at {}, val({})={} at {}".format(
# str(atom1.arguments[i]), str(val1), str(binding1),
# str(atom2.arguments[i]), str(val2), str(binding2)))
# assign variable (if necessary) or fail
if val1.is_variable() and val2.is_variable():
# logging.debug("1 and 2 are variables")
if bi_var_equal(val1, binding1, val2, binding2):
continue
else:
changes.append(binding1.add(val1, val2, binding2))
elif val1.is_variable() and not val2.is_variable():
# logging.debug("Left arg is a variable")
changes.append(binding1.add(val1, val2, binding2))
elif not val1.is_variable() and val2.is_variable():
# logging.debug("Right arg is a variable")
changes.append(binding2.add(val2, val1, binding1))
elif val1 == val2:
continue
else:
# logging.debug("Unify failure: undoing")
undo_all(changes)
return None
return changes
# def plug(atom, binding, withtable=False):
# """ Returns a tuple representing the arguments to ATOM after having
# applied BINDING to the variables in ATOM. """
# if withtable is True:
# result = [atom.table]
# else:
# result = []
# for i in xrange(0, len(atom.arguments)):
# if (atom.arguments[i].is_variable() and
# atom.arguments[i].name in binding):
# result.append(binding[atom.arguments[i].name])
# else:
# result.append(atom.arguments[i].name)
# return tuple(result)
def match_tuple_atom(tuple, atom):
"""Returns a binding dictionary that when applied to ATOM's arguments
gives exactly TUPLE, or returns None if no such binding exists.
"""
if len(tuple) != len(atom.arguments):
return None
binding = {}
for i in xrange(0, len(tuple)):
arg = atom.arguments[i]
if arg.is_variable():
if arg.name in binding:
oldval = binding[arg.name]
if oldval != tuple[i]:
return None
else:
binding[arg.name] = tuple[i]
return binding
def bi_var_equal(var1, unifier1, var2, unifier2):
"""Returns True iff variable VAR1 in unifier UNIFIER1 is the same
variable as VAR2 in UNIFIER2.
"""
return (var1 == var2 and unifier1 is unifier2)
def same(formula1, formula2):
"""Determine if FORMULA1 and FORMULA2 are the same up to a variable
renaming. Treats FORMULA1 and FORMULA2 as having different
variable namespaces. Returns None or the pair of unifiers.
"""
logging.debug("same({}, {})".format(str(formula1), str(formula2)))
if isinstance(formula1, compile.Literal):
if isinstance(formula2, compile.Rule):
return None
elif formula1.is_negated() != formula2.is_negated():
return None
else:
u1 = BiUnifier()
u2 = BiUnifier()
if same_atoms(formula1, u1, formula2, u2, set()) is not None:
return (u1, u2)
return None
elif isinstance(formula1, compile.Rule):
if isinstance(formula2, compile.Literal):
return None
else:
if len(formula1.body) != len(formula2.body):
return None
u1 = BiUnifier()
u2 = BiUnifier()
bound2 = set()
result = same_atoms(formula1.head, u1, formula2.head, u2, bound2)
if result is None:
return None
for i in xrange(0, len(formula1.body)):
result = same_atoms(
formula1.body[i], u1, formula2.body[i], u2, bound2)
if result is None:
return None
return (u1, u2)
else:
return None
def same_atoms(atom1, unifier1, atom2, unifier2, bound2):
"""Modifies UNIFIER1 and UNIFIER2 to demonstrate
that ATOM1 and ATOM2 are identical up to a variable renaming.
Returns None if not possible or the list of changes if it is.
BOUND2 is the set of variables already bound in UNIFIER2
"""
def die():
undo_all(changes)
return None
logging.debug("same_atoms({}, {})".format(str(atom1), str(atom2)))
if atom1.table != atom2.table:
return None
if len(atom1.arguments) != len(atom2.arguments):
return None
changes = []
# logging.debug("same_atoms entering loop")
for i in xrange(0, len(atom1.arguments)):
val1, binding1 = unifier1.apply_full(atom1.arguments[i])
val2, binding2 = unifier2.apply_full(atom2.arguments[i])
# logging.debug("val1: {} at {}; val2: {} at {}".format(
# str(val1), str(binding1), str(val2), str(binding2)))
if val1.is_variable() and val2.is_variable():
if bi_var_equal(val1, binding1, val2, binding2):
continue
# if we already bound either of these variables, not SAME
if not bi_var_equal(val1, binding1, atom1.arguments[i], unifier1):
# logging.debug("same_atoms: arg1 already bound")
return die()
if | |
account at the time the
operation was called. Possible values include: "Accepted", "Creating", "Deleting", "Moving",
"Failed", "Succeeded", "ResolvingDNS".
:vartype provisioning_state: str or ~azure.mgmt.cognitiveservices.models.ProvisioningState
:ivar endpoint: Endpoint of the created account.
:vartype endpoint: str
:ivar internal_id: The internal identifier (deprecated, do not use this property).
:vartype internal_id: str
:ivar capabilities: Gets the capabilities of the cognitive services account. Each item
indicates the capability of a specific feature. The values are read-only and for reference
only.
:vartype capabilities: list[~azure.mgmt.cognitiveservices.models.SkuCapability]
:ivar is_migrated: If the resource is migrated from an existing key.
:vartype is_migrated: bool
:ivar migration_token: Resource migration token.
:vartype migration_token: str
:ivar sku_change_info: Sku change info of account.
:vartype sku_change_info: ~azure.mgmt.cognitiveservices.models.SkuChangeInfo
:ivar custom_sub_domain_name: Optional subdomain name used for token-based authentication.
:vartype custom_sub_domain_name: str
:ivar network_acls: A collection of rules governing the accessibility from specific network
locations.
:vartype network_acls: ~azure.mgmt.cognitiveservices.models.NetworkRuleSet
:ivar encryption: The encryption properties for this resource.
:vartype encryption: ~azure.mgmt.cognitiveservices.models.Encryption
:ivar user_owned_storage: The storage accounts for this resource.
:vartype user_owned_storage: list[~azure.mgmt.cognitiveservices.models.UserOwnedStorage]
:ivar private_endpoint_connections: The private endpoint connection associated with the
Cognitive Services account.
:vartype private_endpoint_connections:
list[~azure.mgmt.cognitiveservices.models.PrivateEndpointConnection]
:ivar public_network_access: Whether or not public endpoint access is allowed for this account.
Possible values include: "Enabled", "Disabled".
:vartype public_network_access: str or ~azure.mgmt.cognitiveservices.models.PublicNetworkAccess
:ivar api_properties: The api properties for special APIs.
:vartype api_properties: ~azure.mgmt.cognitiveservices.models.ApiProperties
:ivar date_created: Gets the date of cognitive services account creation.
:vartype date_created: str
:ivar call_rate_limit: The call rate limit Cognitive Services account.
:vartype call_rate_limit: ~azure.mgmt.cognitiveservices.models.CallRateLimit
:ivar dynamic_throttling_enabled: The flag to enable dynamic throttling.
:vartype dynamic_throttling_enabled: bool
:ivar quota_limit:
:vartype quota_limit: ~azure.mgmt.cognitiveservices.models.QuotaLimit
:ivar restrict_outbound_network_access:
:vartype restrict_outbound_network_access: bool
:ivar allowed_fqdn_list:
:vartype allowed_fqdn_list: list[str]
:ivar disable_local_auth:
:vartype disable_local_auth: bool
:ivar endpoints: Dictionary of :code:`<string>`.
:vartype endpoints: dict[str, str]
:ivar restore:
:vartype restore: bool
:ivar deletion_date: The deletion date, only available for deleted account.
:vartype deletion_date: str
:ivar scheduled_purge_date: The scheduled purge date, only available for deleted account.
:vartype scheduled_purge_date: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'endpoint': {'readonly': True},
'internal_id': {'readonly': True},
'capabilities': {'readonly': True},
'is_migrated': {'readonly': True},
'sku_change_info': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
'date_created': {'readonly': True},
'call_rate_limit': {'readonly': True},
'quota_limit': {'readonly': True},
'endpoints': {'readonly': True},
'deletion_date': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'str'},
'internal_id': {'key': 'internalId', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': '[SkuCapability]'},
'is_migrated': {'key': 'isMigrated', 'type': 'bool'},
'migration_token': {'key': 'migrationToken', 'type': 'str'},
'sku_change_info': {'key': 'skuChangeInfo', 'type': 'SkuChangeInfo'},
'custom_sub_domain_name': {'key': 'customSubDomainName', 'type': 'str'},
'network_acls': {'key': 'networkAcls', 'type': 'NetworkRuleSet'},
'encryption': {'key': 'encryption', 'type': 'Encryption'},
'user_owned_storage': {'key': 'userOwnedStorage', 'type': '[UserOwnedStorage]'},
'private_endpoint_connections': {'key': 'privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
'api_properties': {'key': 'apiProperties', 'type': 'ApiProperties'},
'date_created': {'key': 'dateCreated', 'type': 'str'},
'call_rate_limit': {'key': 'callRateLimit', 'type': 'CallRateLimit'},
'dynamic_throttling_enabled': {'key': 'dynamicThrottlingEnabled', 'type': 'bool'},
'quota_limit': {'key': 'quotaLimit', 'type': 'QuotaLimit'},
'restrict_outbound_network_access': {'key': 'restrictOutboundNetworkAccess', 'type': 'bool'},
'allowed_fqdn_list': {'key': 'allowedFqdnList', 'type': '[str]'},
'disable_local_auth': {'key': 'disableLocalAuth', 'type': 'bool'},
'endpoints': {'key': 'endpoints', 'type': '{str}'},
'restore': {'key': 'restore', 'type': 'bool'},
'deletion_date': {'key': 'deletionDate', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'str'},
}
def __init__(
self,
*,
migration_token: Optional[str] = None,
custom_sub_domain_name: Optional[str] = None,
network_acls: Optional["NetworkRuleSet"] = None,
encryption: Optional["Encryption"] = None,
user_owned_storage: Optional[List["UserOwnedStorage"]] = None,
public_network_access: Optional[Union[str, "PublicNetworkAccess"]] = None,
api_properties: Optional["ApiProperties"] = None,
dynamic_throttling_enabled: Optional[bool] = None,
restrict_outbound_network_access: Optional[bool] = None,
allowed_fqdn_list: Optional[List[str]] = None,
disable_local_auth: Optional[bool] = None,
restore: Optional[bool] = None,
**kwargs
):
"""
:keyword migration_token: Resource migration token.
:paramtype migration_token: str
:keyword custom_sub_domain_name: Optional subdomain name used for token-based authentication.
:paramtype custom_sub_domain_name: str
:keyword network_acls: A collection of rules governing the accessibility from specific network
locations.
:paramtype network_acls: ~azure.mgmt.cognitiveservices.models.NetworkRuleSet
:keyword encryption: The encryption properties for this resource.
:paramtype encryption: ~azure.mgmt.cognitiveservices.models.Encryption
:keyword user_owned_storage: The storage accounts for this resource.
:paramtype user_owned_storage: list[~azure.mgmt.cognitiveservices.models.UserOwnedStorage]
:keyword public_network_access: Whether or not public endpoint access is allowed for this
account. Possible values include: "Enabled", "Disabled".
:paramtype public_network_access: str or
~azure.mgmt.cognitiveservices.models.PublicNetworkAccess
:keyword api_properties: The api properties for special APIs.
:paramtype api_properties: ~azure.mgmt.cognitiveservices.models.ApiProperties
:keyword dynamic_throttling_enabled: The flag to enable dynamic throttling.
:paramtype dynamic_throttling_enabled: bool
:keyword restrict_outbound_network_access:
:paramtype restrict_outbound_network_access: bool
:keyword allowed_fqdn_list:
:paramtype allowed_fqdn_list: list[str]
:keyword disable_local_auth:
:paramtype disable_local_auth: bool
:keyword restore:
:paramtype restore: bool
"""
super(AccountProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.endpoint = None
self.internal_id = None
self.capabilities = None
self.is_migrated = None
self.migration_token = migration_token
self.sku_change_info = None
self.custom_sub_domain_name = custom_sub_domain_name
self.network_acls = network_acls
self.encryption = encryption
self.user_owned_storage = user_owned_storage
self.private_endpoint_connections = None
self.public_network_access = public_network_access
self.api_properties = api_properties
self.date_created = None
self.call_rate_limit = None
self.dynamic_throttling_enabled = dynamic_throttling_enabled
self.quota_limit = None
self.restrict_outbound_network_access = restrict_outbound_network_access
self.allowed_fqdn_list = allowed_fqdn_list
self.disable_local_auth = disable_local_auth
self.endpoints = None
self.restore = restore
self.deletion_date = None
self.scheduled_purge_date = None
class AccountSku(msrest.serialization.Model):
"""Cognitive Services resource type and SKU.
:ivar resource_type: Resource Namespace and Type.
:vartype resource_type: str
:ivar sku: The SKU of Cognitive Services account.
:vartype sku: ~azure.mgmt.cognitiveservices.models.Sku
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(
self,
*,
resource_type: Optional[str] = None,
sku: Optional["Sku"] = None,
**kwargs
):
"""
:keyword resource_type: Resource Namespace and Type.
:paramtype resource_type: str
:keyword sku: The SKU of Cognitive Services account.
:paramtype sku: ~azure.mgmt.cognitiveservices.models.Sku
"""
super(AccountSku, self).__init__(**kwargs)
self.resource_type = resource_type
self.sku = sku
class AccountSkuListResult(msrest.serialization.Model):
"""The list of cognitive services accounts operation response.
:ivar value: Gets the list of Cognitive Services accounts and their properties.
:vartype value: list[~azure.mgmt.cognitiveservices.models.AccountSku]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AccountSku]'},
}
def __init__(
self,
*,
value: Optional[List["AccountSku"]] = None,
**kwargs
):
"""
:keyword value: Gets the list of Cognitive Services accounts and their properties.
:paramtype value: list[~azure.mgmt.cognitiveservices.models.AccountSku]
"""
super(AccountSkuListResult, self).__init__(**kwargs)
self.value = value
class ApiKeys(msrest.serialization.Model):
"""The access keys for the cognitive services account.
:ivar key1: Gets the value of key 1.
:vartype key1: str
:ivar key2: Gets the value of key 2.
:vartype key2: str
"""
_attribute_map = {
'key1': {'key': 'key1', 'type': 'str'},
'key2': {'key': 'key2', 'type': 'str'},
}
def __init__(
self,
*,
key1: Optional[str] = None,
key2: Optional[str] = None,
**kwargs
):
"""
:keyword key1: Gets the value of key 1.
:paramtype key1: str
:keyword key2: Gets the value of key 2.
:paramtype key2: str
"""
super(ApiKeys, self).__init__(**kwargs)
self.key1 = key1
self.key2 = key2
class ApiProperties(msrest.serialization.Model):
"""The api properties for special APIs.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, any]
:ivar qna_runtime_endpoint: (QnAMaker Only) The runtime endpoint of QnAMaker.
:vartype qna_runtime_endpoint: str
:ivar qna_azure_search_endpoint_key: (QnAMaker Only) The Azure Search endpoint key of QnAMaker.
:vartype qna_azure_search_endpoint_key: str
:ivar qna_azure_search_endpoint_id: (QnAMaker Only) The Azure Search endpoint id of QnAMaker.
:vartype qna_azure_search_endpoint_id: str
:ivar statistics_enabled: (Bing Search Only) The flag to enable statistics of Bing Search.
:vartype statistics_enabled: bool
:ivar event_hub_connection_string: (Personalization Only) The flag to enable statistics of Bing
Search.
:vartype event_hub_connection_string: str
:ivar storage_account_connection_string: (Personalization Only) The storage account connection
string.
:vartype storage_account_connection_string: str
:ivar aad_client_id: (Metrics Advisor Only) The Azure AD Client Id (Application Id).
:vartype aad_client_id: str
:ivar aad_tenant_id: (Metrics Advisor Only) The Azure AD Tenant Id.
:vartype aad_tenant_id: str
:ivar super_user: (Metrics Advisor Only) The super user of Metrics Advisor.
:vartype super_user: str
:ivar website_name: (Metrics Advisor Only) The website name of Metrics Advisor.
:vartype website_name: str
"""
_validation = {
'event_hub_connection_string': {'max_length': 1000, 'min_length': 0, 'pattern': r'^( *)Endpoint=sb://(.*);( *)SharedAccessKeyName=(.*);( *)SharedAccessKey=(.*)$'},
'storage_account_connection_string': {'max_length': 1000, 'min_length': 0, 'pattern': r'^(( *)DefaultEndpointsProtocol=(http|https)( *);( *))?AccountName=(.*)AccountKey=(.*)EndpointSuffix=(.*)$'},
'aad_client_id': {'max_length': 500, 'min_length': 0},
'aad_tenant_id': {'max_length': 500, 'min_length': 0},
'super_user': {'max_length': 500, 'min_length': 0},
'website_name': {'max_length': 500, 'min_length': 0},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'qna_runtime_endpoint': {'key': 'qnaRuntimeEndpoint', 'type': 'str'},
'qna_azure_search_endpoint_key': {'key': 'qnaAzureSearchEndpointKey', 'type': 'str'},
'qna_azure_search_endpoint_id': {'key': 'qnaAzureSearchEndpointId', 'type': 'str'},
'statistics_enabled': {'key': 'statisticsEnabled', 'type': 'bool'},
'event_hub_connection_string': {'key': 'eventHubConnectionString', 'type': 'str'},
'storage_account_connection_string': {'key': 'storageAccountConnectionString', 'type': 'str'},
'aad_client_id': {'key': 'aadClientId', 'type': 'str'},
'aad_tenant_id': {'key': 'aadTenantId', 'type': 'str'},
'super_user': {'key': 'superUser', 'type': 'str'},
'website_name': {'key': 'websiteName', 'type': 'str'},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, Any]] = None,
qna_runtime_endpoint: Optional[str] = None,
qna_azure_search_endpoint_key: Optional[str] = None,
qna_azure_search_endpoint_id: Optional[str] = None,
statistics_enabled: Optional[bool] = None,
event_hub_connection_string: Optional[str] = None,
storage_account_connection_string: Optional[str] = None,
aad_client_id: Optional[str] | |
TODO if range is list of list --> assume per channel data and do norm that way --> TODO --> think about the best way to do that
logger.debug('max after normalization=' + str(img.max()) + ' min after normalization=' + str(img.min()))
return img
elif 'ormalization' in method and not 'tandardization' in method:
logger.debug('Image will be normalized')
img = img.astype(np.float32)
img = Img._nomalize(img, individual_channels=individual_channels, method=method,
norm_range=range)
logger.debug('max after normalization=' + str(img.max()) + ' min after normalization=' + str(img.min()))
return img
elif 'tandardization' in method:
logger.debug('Image will be standardized')
img = img.astype(np.float32)
img = Img._standardize(img, individual_channels=individual_channels, method=method,
norm_range=range)
logger.debug('max after standardization=' + str(img.max()) + ' min after standardization=' + str(img.min()))
return img
else:
logger.error('unknown normalization method ' + str(method))
return img
# https://en.wikipedia.org/wiki/Feature_scaling
@staticmethod
def _nomalize(img, individual_channels=False, method='Rescaling (min-max normalization)', norm_range=None,
clip=False, normalization_minima_and_maxima=None):
eps = 1e-20 # for numerical stability avoid division by 0
if individual_channels:
for c in range(img.shape[-1]):
norm_min_max = None
if normalization_minima_and_maxima is not None:
# if list of list then use that --> in fact could also check if individual channel or not...
if isinstance(normalization_minima_and_maxima[0], list):
norm_min_max = normalization_minima_and_maxima[c]
else:
norm_min_max = normalization_minima_and_maxima
img[..., c] = Img._nomalize(img[..., c], individual_channels=False, method=method,
norm_range=norm_range, clip=clip,
normalization_minima_and_maxima=norm_min_max)
else:
# that should work
if 'percentile' in method:
# direct_range ??? --> think how to do that ???
# TODO here in some cases need assume passed directly the percentiles and in that case need not do that again... --> think how to do that --> shall I pass a second parameter directly --> maybe direct_range that bypasses the percentiles if set --> TODO --> check that
if normalization_minima_and_maxima is None:
lowest_percentile = np.percentile(img, norm_range[0])
highest_percentile = np.percentile(img, norm_range[1])
else:
lowest_percentile = normalization_minima_and_maxima[0]
highest_percentile = normalization_minima_and_maxima[1]
try:
import numexpr
img = numexpr.evaluate(
"(img - lowest_percentile) / ( highest_percentile - lowest_percentile + eps )")
except:
img = (img - lowest_percentile) / (highest_percentile - lowest_percentile + eps)
if clip:
img = np.clip(img, 0, 1)
elif method == 'Rescaling (min-max normalization)':
max = img.max()
min = img.min()
# if max != 0 and max != min:
if norm_range is None or norm_range == [0, 1] or norm_range == '[0, 1]' or norm_range == 'default' \
or isinstance(norm_range, int):
try:
import numexpr
img = numexpr.evaluate("(img - min) / (max - min + eps)")
except:
img = (img - min) / (
max - min + eps) # TODO will it take less memory if I split it into two lines
elif norm_range == [-1, 1] or norm_range == '[-1, 1]':
try:
import numexpr
img = numexpr.evaluate("-1 + ((img - min) * (1 - -1)) / (max - min + eps)")
except:
img = -1 + ((img - min) * (1 - -1)) / (max - min + eps)
elif method == 'Mean normalization':
# TODO should I implement range too here ??? or deactivate it
max = img.max()
min = img.min()
if max != 0 and max != min:
img = (img - np.average(img)) / (max - min)
elif method.startswith('Max normalization'): # here too assume 0-1 no need for range
if 'auto' in method:
max = img.max()
elif '255' in method:
max = 255
elif '4095' in method:
max = 4095
elif '65535' in method:
max = 65535
if max != 0:
try:
import numexpr
img = numexpr.evaluate("img / max")
except:
img = img / max
else:
logger.error('Unknown normalization method "' + str(method) + '" --> ignoring ')
return img
@staticmethod
def _standardize(img, individual_channels=False, method=None, norm_range=range):
if individual_channels:
for c in range(img.shape[-1]):
img[..., c] = Img._standardize(img[..., c], individual_channels=False, method=method,
norm_range=norm_range)
else:
mean = np.mean(img)
std = np.std(img)
# print('mean', mean, 'std', std)
if std != 0.0:
img = (img - mean) / std
else:
print('error empty image')
if mean != 0.0:
img = (img - mean)
if norm_range == [0, 1] or norm_range == [-1, 1] or norm_range == '[0, 1]' or norm_range == '[-1, 1]':
img = Img._nomalize(img, method='Rescaling (min-max normalization)',
individual_channels=individual_channels, norm_range=[0, 1])
if norm_range == [-1, 1] or norm_range == '[-1, 1]':
img = (img - 0.5) * 2.
logger.debug('max after standardization=' + str(img.max()) + ' min after standardization=' + str(img.min()))
return img
@staticmethod
def reassemble_tiles(tiles, crop_parameters, three_d=False):
'''Changes image contrast using scipy
Parameters
----------
tiles : list
input tiles
crop_parameters : dict
parameters required to reassemble the tiles
three_d : boolean
if True assume image is 3D (dhw), 2D (hw) otherwise
Returns
-------
ndarray
a reassembled image from individual tiles
'''
overlap_y = crop_parameters['overlap_y']
overlap_x = crop_parameters['overlap_x']
final_height = crop_parameters['final_height']
final_width = crop_parameters['final_width']
cols = []
for i in range(len(tiles)):
cur_size = 0
for j in range(len(tiles[0])):
if j == 0:
if overlap_y != 0:
y_slice = slice(None, -int(overlap_y / 2))
else:
y_slice = slice(None, None)
elif j == len(tiles[0]) - 1:
if overlap_y != 0:
y_slice = slice(int(overlap_y / 2), None)
else:
y_slice = slice(None, None)
else:
if overlap_y != 0:
y_slice = slice(int(overlap_y / 2), -int(overlap_y / 2))
else:
y_slice = slice(None, None)
if not three_d:
tiles[i][j] = tiles[i][j][y_slice, ...]
cur_size += tiles[i][j].shape[0]
else:
tiles[i][j] = tiles[i][j][:, y_slice, ...]
cur_size += tiles[i][j].shape[1]
if not three_d:
cols.append(np.vstack(tuple(tiles[i])))
else:
cols.append(np.hstack(tuple(tiles[i])))
cur_size = 0
for i in range(len(cols)):
if i == 0:
if overlap_x != 0:
x_slice = slice(None, -int(overlap_x / 2))
else:
x_slice = slice(None, None)
elif i == len(cols) - 1:
if overlap_x != 0:
x_slice = slice(int(overlap_x / 2), None) # orig
else:
x_slice = slice(None, None)
else:
if overlap_x != 0:
x_slice = slice(int(overlap_x / 2), -int(overlap_x / 2))
else:
x_slice = slice(None, None)
if not three_d:
if len(cols[i].shape) == 3:
cols[i] = cols[i][:, x_slice]
else:
cols[i] = cols[i][:, x_slice, ...]
cur_size += cols[i].shape[1]
else:
if len(cols[i].shape) == 3:
cols[i] = cols[i][:, :, x_slice]
else:
cols[i] = cols[i][:, :, x_slice, ...]
cur_size += cols[i].shape[2]
if not three_d:
return np.hstack(tuple(cols))[:final_height, :final_width]
else:
return np.dstack(tuple(cols))[:, :final_height, :final_width]
@staticmethod
def linear_to_2D_tiles(tiles, crop_parameters):
'''converts a 1D list to a 2D list
Parameters
----------
tiles : list
1D list containing tiles
crop_parameters : dict
parameters to recreate a 2D list from a 1D (i.e. nb or rows and cols)
Returns
-------
list
a 2D list containing tiles
'''
n_rows = crop_parameters['n_rows']
n_cols = crop_parameters['n_cols']
nb_tiles = crop_parameters['nb_tiles']
output = []
counter = 0
for i in range(n_rows):
cols = []
for j in range(n_cols):
cols.append(tiles[counter])
counter += 1
output.append(cols)
return output
# should dynamically crop images
def crop(self, **kwargs):
'''crops an image
Parameters
----------
kwargs : dict
a dict containing the top left corner and the bottom right coordinates of the crop x1, y1, x2, y2
Returns
-------
ndarray
a crop of the image
'''
img = self
corrected_metadata = dict(self.metadata)
dims = []
for i in range(len(img.shape)):
dims.append(slice(None))
# get the dim and its begin and end and create the appropriate slice
for key, value in kwargs.items():
if key in self.metadata['dimensions']:
idx = self.metadata['dimensions'].index(key)
if isinstance(value, list):
if len(value) == 2:
dims[idx] = slice(value[0], value[1])
elif len(value) == 3:
dims[idx] = slice(value[0], value[1], value[2])
# update the width and height parameters then or suppress w and h parameters from the data to avoid pbs
elif len(value) == 1:
dims[idx] = value
corrected_metadata.update(
{'dimensions': corrected_metadata['dimensions'].replace(key, '')}) # do remove dimension
else:
if value is not None:
dims[idx] = value
corrected_metadata.update(
{'dimensions': corrected_metadata['dimensions'].replace(key, '')}) # do remove dimension
else:
dims[idx] = slice(None)
# TODO need reduce size dim for the stuff in the metadata to avoid bugs
img = np.ndarray.copy(img[tuple(dims)])
output = Img(img, metadata=corrected_metadata)
return output
# should be able to parse any dimension in fact by its name
# IMPORTANT NEVER CALL IT COPY OTHERWISE OVERRIDES THE DEFAULT COPY METHOD OF NUMPY ARRAY THAT CREATES ERRORS
def imCopy(self, t=None, d=None, c=None):
'''Changes image contrast using scipy
Parameters
----------
t : int
the index of the time series to copy
d : int
the index of the z/d | |
<reponame>Flame442/Trusty-cogs
import aiohttp
import json
import logging
import discord
from dataclasses import dataclass
from typing import Optional, Literal, Union
from tabulate import tabulate
from datetime import datetime
from redbot.core.i18n import Translator
from redbot.core.utils.chat_formatting import box
from .constants import BASE_URL, HEADSHOT_URL, TEAMS
_ = Translator("Hockey", __file__)
log = logging.getLogger("red.trusty-cogs.hockey")
# This is somewhat unnecessary but for consistency we have the expected
# object here to "get" the data from the disct the API provides
# this way we can expect a value for each dataclass and not have to worry about
# a lot of other issues that can arise when doing this type of inheritence
SKATER_STATS = {
"time_on_ice": "timeOnIce",
"assists": "assists",
"goals": "goals",
"pim": "pim",
"shots": "shots",
"games": "games",
"hits": "hits",
"powerplay_goals": "powerPlayGoals",
"powerplay_points": "powerPlayPoints",
"powerplay_time_on_ice": "powerPlayTimeOnIce",
"event_time_on_ice": "evenTimeOnIce",
"penalty_minutes": "penaltyMinutes",
"face_off_percent": "faceOffPct",
"shot_percent": "shotPct",
"game_winning_goals": "gameWinningGoals",
"over_time_goals": "overTimeGoals",
"short_handed_goals": "shortHandedGoals",
"short_handed_points": "shortHandedPoints",
"short_handed_time_on_ice": "shortHandedTimeOnIce",
"blocked": "blocked",
"plusminus": "plusMinus",
"points": "points",
"shifts": "shifts",
"time_on_ice_per_game": "timeOnIcePerGame",
"even_time_on_ice_per_game": "evenTimeOnIcePerGame",
"short_handed_time_on_ice_per_game": "shortHandedTimeOnIcePerGame",
"powerplay_time_on_ice_per_game": "powerPlayTimeOnIcePerGame",
}
GOALIE_STATS = {
"time_on_ice": "timeOnIce",
"ot": "ot",
"shutouts": "shutouts",
"wins": "wins",
"ties": "ties",
"losses": "losses",
"saves": "saves",
"powerplay_saves": "powerPlaySaves",
"shorthanded_saves": "shortHandedSaves",
"even_saves": "evenSaves",
"shorthanded_shots": "shortHandedShots",
"even_shots": "evenShots",
"powerplay_shots": "powerPlayShots",
"save_percentage": "savePercentage",
"goals_against_average": "goalAgainstAverage",
"games": "games",
"games_started": "gamesStarted",
"shots_against": "shotsAgainst",
"goals_against": "goalsAgainst",
"time_on_ice_per_game": "timeOnIcePerGame",
"powerplay_save_percentage": "powerPlaySavePercentage",
"shorthanded_save_percentage": "shortHandedSavePercentage",
"even_strength_save_percentage": "evenStrengthSavePercentage",
}
FLAG_LOOKUP = {
"CAN": ":flag_ca:",
"USA": ":flag_us:",
"SWE": ":flag_se:",
"GBR": ":flag_gb:",
"CZE": ":flag_cz:",
"LVA": ":flag_lv:",
"NLD": ":flag_nl:",
"FIN": ":flag_fi:",
"UKR": ":flag_ua:",
"SRB": ":flag_rs:",
"FRA": ":flag_fr:",
"ITA": ":flag_it:",
"VEN": ":flag_si:",
"SVK": ":flag_sk:",
"IRL": ":flag_ie:",
"RUS": ":flag_ru:",
"POL": ":flag_pl:",
"LBN": ":flag_lb:",
"DEU": ":flag_de:",
"BRA": ":flag_gi:",
"CHE": ":flag_ch:",
"DNK": ":flag_dk:",
"ZAF": ":flag_za:",
"TWN": ":flag_tw:",
"JAM": ":flag_jm:",
"KOR": ":flag_kr:",
"PRY": ":flag_py:",
"NOR": ":flag_no:",
"HTI": ":flag_ht:",
"MKD": ":flag_mk:",
"GUY": ":flag_gy:",
"HUN": ":flag_hu:",
"AUS": ":flag_au:",
"AUT": ":flag_at:",
"BLR": ":flag_by:",
"GRC": ":flag_gr:",
"LTU": ":flag_lt:",
"BHS": ":flag_bs:",
"JPN": ":flag_jp:",
"KAZ": ":flag_kz:",
"NGA": ":flag_ng:",
"EST": ":flag_ee:",
"BEL": ":flag_be:",
"BRN": ":flag_bn:",
"TZA": ":flag_tz:",
"SVN": ":flag_si:",
"HRV": ":flag_hr:",
"ROU": ":flag_ro:",
"THA": ":flag_th:",
"IDN": ":flag_id:",
"MNE": ":flag_me:",
"CHN": ":flag_cn:",
"BGR": ":flag_bg:",
"MEX": ":flag_mx:",
"ISR": ":flag_il:",
None: "",
}
@dataclass
class Player:
id: int
accrued_seasons: Optional[int]
add_names: Optional[str]
age_signed_waiver: Optional[int]
age_signel_fa: Optional[int]
alert: Literal["Y", "N"]
birth_city: Optional[str]
birth_country: Optional[str]
birth_date: Optional[str]
birth_state_province: Optional[str]
career_team_id: Optional[int]
central_registry_position: Optional[str]
club_elect_arb: Literal["Y", "N"]
current_team_id: Optional[int]
date_of_death: Optional[str]
dda_id: Optional[int]
deceased: bool
ep_player_id: Optional[int]
fa_group_after_season: Literal[None]
first_name: str
first_signed_by_team_id: Optional[int]
free_agent_group: Optional[str]
full_name: str
group_5_election: Literal["Y", "N"]
group_5_seasons_earned: Optional[int]
group_6_proration: Literal[None]
group_6_seasons_earned: Optional[int]
groups_earned_thru_season: Optional[int]
height: Optional[int]
hof_induction_year: Optional[int]
home_town: Optional[str]
iihf_hof_induction_year: Optional[int]
in_hockey_hof: bool
in_iihf_hof: int
in_top_100_all_time: int
in_us_hockey_hof: bool
is_defected: Literal["Y", "N"]
is_deleted: Literal["Y", "N"]
is_junior: Literal["Y", "N"]
is_retired: Literal[None]
is_rookie: Literal["Y", "N"]
is_suspended: Literal["Y", "N"]
last_ameteur_league_id: Optional[int]
last_ameteur_team_id: Optional[int]
last_nhl_team_id: Optional[int]
last_name: str
loan_cap_exception: Literal["Y", "N"]
long_term_injury: Literal["Y", "N"]
message: Optional[str]
middle_name: Optional[str]
nationality: Optional[str]
nhl_experience: Optional[int]
on_roster: Literal["Y", "N"]
platform_year: Optional[int]
position: Optional[Literal["L", "R", "C", "D", "G"]]
pr_name: str
pr_stat: int
pro_year_reduction: Optional[int]
reenty_waivers: Optional[Literal["Y", "N"]]
roster_special_code: Optional[str]
salary_arbitration_exp: Optional[int]
shoots_catches: Optional[Literal["L", "R"]]
sweater_number: Optional[int]
update_timestamp: str
us_hof_induction_year: Optional[int]
vet_cap_exception: Literal["Y", "N"]
waiver_amount: Optional[int]
waiver_draft: Optional[str]
waiver_status: Literal["Y", "N"]
weight: Optional[int]
years_pro: Optional[int]
def __str__(self) -> str:
return "{0.full_name}, born {0.birth_date}".format(self)
def __repr__(self) -> str:
return "<Player name={0.full_name} id={0.id} number={0.sweater_number}>".format(self)
def description(self) -> str:
desc = {
"birth_date": _("Born: "),
"deceased": _("Deceased: "),
"home_town": _("Hometown: "),
"position": _("Position: "),
"height": _("Height: "),
"weight": _("Weight: "),
"is_rookie": _("Rookie"),
"is_junior": _("Junior"),
"is_suspended": _("Suspended"),
}
msg = ""
for attr, name in desc.items():
if getattr(self, attr):
if attr == "height" and self.height:
msg += (
name
+ f"{self.height//12}' {self.height%12}\" / {int(self.height * 2.54)} cm\n"
)
elif attr == "birth_date":
years = int(
(datetime.now() - datetime.strptime(self.birth_date, "%Y-%m-%d")).days
/ 365.25
)
msg += name + f"{getattr(self, attr)} ({years})\n"
flag = FLAG_LOOKUP[self.birth_country]
msg += (
", ".join(
[
i
for i in [self.birth_city, self.birth_state_province]
if i is not None
]
)
+ f" {flag}\n"
)
elif attr == "weight" and self.weight:
msg += name + f"{self.weight} lbs / {int(self.weight * 0.453592)} kg\n"
elif attr == "home_town":
flag = FLAG_LOOKUP[self.nationality]
msg += name + f"{getattr(self, attr)} {flag}\n"
elif attr == "position":
shoots = f"({getattr(self, 'shoots_catches', '')})"
ir = "\N{ADHESIVE BANDAGE}" if getattr(self, "long_term_injury") == "Y" else ""
msg += name + f"{getattr(self, attr)} {shoots if shoots != '()' else ''}{ir}\n"
elif attr == "deceased":
death_date = getattr(self, "date_of_death", "")
msg += f"{name} {death_date}\n" if getattr(self, attr) else ""
elif attr in ["is_rookie", "is_junior", "is_suspended"]:
if getattr(self, attr) == "Y":
msg += f"{name}\n"
elif attr == "dda_id":
msg += name.format(dda_id=self.dda_id) + "\n"
else:
msg += name + f"{getattr(self, attr)}\n"
links = [
_("[Elite Prospects]({ep_url})").format(ep_url=self.ep_url()),
_("[Cap Friendly]({cf_url})").format(cf_url=self.cap_friendly_url()),
]
if getattr(self, "dda_id"):
links.append(
_(
"[HHOF]( https://www.hhof.com/LegendsOfHockey/jsp/SearchPlayer.jsp?player={dda_id})"
).format(dda_id=self.dda_id)
)
msg += " | ".join(links)
return msg
def headshot(self) -> str:
return HEADSHOT_URL.format(self.id)
def get_embed(self) -> discord.Embed:
try:
team_id = self.current_team_id or self.last_nhl_team_id
log.debug(team_id)
team_name = [name for name, team in TEAMS.items() if team["id"] == team_id][0]
colour = int(TEAMS[team_name]["home"].replace("#", ""), 16)
logo = TEAMS[team_name]["logo"]
except IndexError:
team_name = _("No Team")
colour = 0xFFFFFF
logo = "https://cdn.bleacherreport.net/images/team_logos/328x328/nhl.png"
em = discord.Embed(colour=colour)
em.description = self.description()
em.set_thumbnail(url=self.headshot())
number = f"#{self.sweater_number}" if self.sweater_number else ""
em.set_author(name=f"{self.full_name} {number}", icon_url=logo)
em.description = self.description()
return em
async def get_full_stats(self, season: Optional[str]):
url = f"https://statsapi.web.nhl.com/api/v1/people/{self.id}/stats?stats=yearByYear"
log.debug(url)
log.debug(season)
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
data = await resp.json()
for seasons in reversed(data["stats"][0]["splits"]):
if seasons["league"].get("id", None) != 133:
continue
stats_season = seasons["season"]
if season in [stats_season, None]:
setattr(self, "last_nhl_team_id", seasons["team"].get("id", None))
if self.position == "G":
stats = [seasons["stat"].get(v, "") for k, v in GOALIE_STATS.items()]
player = Goalie(
*self.__dict__.values(),
stats_season,
*stats,
)
return await player.get_full_stats(season or stats_season)
else:
stats = [seasons["stat"].get(v, "") for v in SKATER_STATS.values()]
player = Skater(
*self.__dict__.values(),
stats_season,
*stats,
)
return await player.get_full_stats(season or stats_season)
log.debug(f"Returning {repr(self)}")
return self
def full_name_url(self) -> str:
return self.full_name.replace(" ", "-").lower()
def ep_url(self) -> str:
return f"https://www.eliteprospects.com/player/{self.ep_player_id}/{self.full_name_url()}"
def cap_friendly_url(self) -> str:
return f"https://www.capfriendly.com/players/{self.full_name_url()}"
@classmethod
async def from_id(cls, player_id: int):
async with aiohttp.ClientSession() as session:
async with session.get(f"https://records.nhl.com/site/api/player/{player_id}") as resp:
data = await resp.json()
return cls(*data["data"][0].values())
@dataclass
class Skater(Player):
season: str
time_on_ice: str
assists: int
goals: int
pim: int
shots: int
games: int
hits: int
powerplay_goals: int
powerplay_points: int
powerplay_time_on_ice: str
event_time_on_ice: str
penalty_minutes: str
face_off_percent: float
shot_percent: float
game_winning_goals: int
over_time_goals: int
short_handed_goals: int
short_handed_points: int
short_handed_time_on_ice: str
blocked: int
plusminus: int
points: int
shifts: int
time_on_ice_per_game: str
even_time_on_ice_per_game: str
shorthanded_time_on_ice_per_game: str
powerplay_time_on_ice_per_game: str
def __str__(self) -> str:
return "{0.full_name}, goals {0.goals}, games {0.games}".format(self)
def __repr__(self) -> str:
return "<Skater name={0.full_name} id={0.id} number={0.sweater_number}>".format(self)
async def get_full_stats(self, season: Optional[str]):
url = (
f"https://statsapi.web.nhl.com/api/v1/people/{self.id}/stats?stats=yearByYearPlayoffs"
)
log.debug(url)
log.debug(season)
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
data = await resp.json()
for seasons in reversed(data["stats"][0]["splits"]):
stats_season = seasons["season"]
if season in [stats_season, None]:
stats = [seasons["stat"].get(v, "") for v in SKATER_STATS.values()]
player = SkaterPlayoffs(
*self.__dict__.values(),
*stats,
)
return player
return self
def time_on_ice_average(self) -> str:
if self.time_on_ice:
minutes, seconds = self.time_on_ice.split(":")
total_seconds = (int(minutes) * 60) + int(seconds)
average_min = int((total_seconds / self.games) // 60)
average_sec = int((total_seconds / self.games) % 60)
if average_sec < 10:
average_sec = f"0{average_sec}"
return f"{average_min}:{average_sec}"
return ""
def get_embed(self) -> discord.Embed:
try:
team_id = self.current_team_id
log.debug(team_id)
team_name = [name for name, team in TEAMS.items() if team["id"] == team_id][0]
colour = int(TEAMS[team_name]["home"].replace("#", ""), 16)
logo = TEAMS[team_name]["logo"]
except IndexError:
team_name = _("No Team")
colour = 0xFFFFFF
logo = "https://cdn.bleacherreport.net/images/team_logos/328x328/nhl.png"
try:
team_id = self.last_nhl_team_id
log.debug(team_id)
team_name = [name for name, team in TEAMS.items() if team["id"] == team_id][0]
emoji = f'<:{TEAMS[team_name]["emoji"]}>'
except IndexError:
team_name = _("No Team")
emoji = ""
em = discord.Embed(colour=colour)
number = f"#{self.sweater_number}" if self.sweater_number else ""
em.set_author(name=f"{self.full_name} {number}", icon_url=logo)
em.set_thumbnail(url=self.headshot())
em.description = self.description()
post_data = [
[_("GP"), f"[ {self.games} ]"],
[_("Shots"), f"[ {self.shots} ]"],
[_("Goals"), f"[ {self.goals} ]"],
[_("Assists"), f"[ {self.assists} ]"],
[_("Hits"), f"[ {self.hits} ]"],
[_("Faceoff %"), f"[ {self.face_off_percent} ]"],
["+/-", f"[ {self.plusminus} ]"],
[_("Blocked Shots"), f"[ {self.blocked} ]"],
[_("PIM"), f"[ {self.pim} ]"],
[_("Avg. TOI"), f"[ {self.time_on_ice_average()} ]"],
]
stats_md = tabulate(
post_data, headers=[_("Stats"), f"{self.season[:4]}-{self.season[4:]}"]
)
em.set_thumbnail(url=self.headshot())
stats_str = f"{emoji} {team_name} {emoji}\n{box(stats_md, lang='apache')}"
em.add_field(name=_("Stats"), value=stats_str)
return em
@dataclass
class SkaterPlayoffs(Skater):
p_time_on_ice: str
p_assists: int
p_goals: int
p_pim: int
p_shots: int
p_games: int
p_hits: int
| |
<gh_stars>0
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from . import models
from azext_iot.constants import USER_AGENT
class ModelRepositoryControlPlaneApiConfiguration(AzureConfiguration):
"""Configuration for ModelRepositoryControlPlaneApi
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param str base_url: Service URL
"""
def __init__(
self, credentials, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if not base_url:
base_url = 'http://localhost'
super(ModelRepositoryControlPlaneApiConfiguration, self).__init__(base_url)
self.add_user_agent('modelrepositorycontrolplaneapi/{}'.format(VERSION))
self.add_user_agent(USER_AGENT) # @c-ryan-k
self.credentials = credentials
class ModelRepositoryControlPlaneApi(SDKClient):
"""ModelRepositoryControlPlaneApi
:ivar config: Configuration for client.
:vartype config: ModelRepositoryControlPlaneApiConfiguration
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param str base_url: Service URL
"""
def __init__(
self, credentials, base_url=None):
self.config = ModelRepositoryControlPlaneApiConfiguration(credentials, base_url)
super(ModelRepositoryControlPlaneApi, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2020-05-01-preview'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
def get_subjects_for_resources_async(
self, resource_id, resource_type, x_ms_client_request_id=None, custom_headers=None, raw=False, **operation_config):
"""Get the access permission for resource.
:param resource_id: The resource identifier.
:type resource_id: str
:param resource_type: The resource Type. Possible values include:
'Model', 'Tenant'
:type resource_type: str
:param x_ms_client_request_id: Gets or sets optional. Provides a
client-generated value that is recorded in the logs. Using this header
is highly recommended for correlating client-side activities with
requests received by the server.
:type x_ms_client_request_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~pnp.models.Target] or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2020-05-01-preview"
# Construct URL
url = self.get_subjects_for_resources_async.metadata['url']
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if x_ms_client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", x_ms_client_request_id, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[Target]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_subjects_for_resources_async.metadata = {'url': '/resources/{resourceId}/types/{resourceType}/targets'}
def get_subjects_for_resources_async1(
self, resource_id, subject_id, resource_type, x_ms_client_request_id=None, custom_headers=None, raw=False, **operation_config):
"""Get the access permission for resource for the specified
subject/principal.
:param resource_id: The resource Id.
:type resource_id: str
:param subject_id: The Subject Id.
:type subject_id: str
:param resource_type: The resource Type. Possible values include:
'Model', 'Tenant'
:type resource_type: str
:param x_ms_client_request_id: Gets or sets optional. Provides a
client-generated value that is recorded in the logs. Using this header
is highly recommended for correlating client-side activities with
requests received by the server.
:type x_ms_client_request_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~pnp.models.Target] or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2020-05-01-preview"
# Construct URL
url = self.get_subjects_for_resources_async1.metadata['url']
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str'),
'subjectId': self._serialize.url("subject_id", subject_id, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if x_ms_client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", x_ms_client_request_id, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[Target]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_subjects_for_resources_async1.metadata = {'url': '/resources/{resourceId}/types/{resourceType}/subjects/{subjectId}/targets'}
def assign_roles_async(
self, resource_id, resource_type, subject_id, x_ms_client_request_id=None, subject=None, custom_headers=None, raw=False, **operation_config):
"""Assign roles and permissions for a subject/principal to a resource.
:param resource_id: The resource identifier.
:type resource_id: str
:param resource_type: The resource type. Possible values include:
'Model', 'Tenant'
:type resource_type: str
:param subject_id: The subject identifier.
:type subject_id: str
:param x_ms_client_request_id: Gets or sets optional clients request.
Provides a client-generated value that is recorded in the logs. Using
this header is highly recommended for correlating client-side
activities with requests received by the server.
:type x_ms_client_request_id: str
:param subject: Gets or sets the subject.
:type subject: ~pnp.models.Subject
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2020-05-01-preview"
# Construct URL
url = self.assign_roles_async.metadata['url']
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'subjectId': self._serialize.url("subject_id", subject_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if x_ms_client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", x_ms_client_request_id, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if subject is not None:
body_content = self._serialize.body(subject, 'Subject')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
assign_roles_async.metadata = {'url': '/resources/{resourceId}/types/{resourceType}/subjects/{subjectId}'}
def remove_roles_async(
self, resource_id, subject_id, resource_type, role_id, x_ms_client_request_id=None, custom_headers=None, raw=False, **operation_config):
"""Removes the roles assigned to a subject/principal.
:param resource_id: The resource identifier.
:type resource_id: str
:param subject_id: The subject identifier.
:type subject_id: str
:param resource_type: The resource type. Possible values include:
'Model', 'Tenant'
:type resource_type: str
:param role_id: The roleIdentifier. Possible values include:
'ModelsPublisher', 'ModelsCreator', 'TenantAdministrator',
'ModelAdministrator', 'ModelReader', 'None'
:type role_id: str
:param x_ms_client_request_id: Gets or sets optional. Provides a
client-generated value that is recorded in the logs. Using this header
is highly recommended for correlating client-side activities with
requests received by the server.
:type x_ms_client_request_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2020-05-01-preview"
# Construct URL
url = self.remove_roles_async.metadata['url']
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str'),
'subjectId': self._serialize.url("subject_id", subject_id, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'roleId': self._serialize.url("role_id", role_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if x_ms_client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", x_ms_client_request_id, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
remove_roles_async.metadata = {'url': '/resources/{resourceId}/types/{resourceType}/subjects/{subjectId}/roles/{roleId}'}
def get_tenant_async(
self, x_ms_client_request_id=None, custom_headers=None, raw=False, **operation_config):
"""Gets the information about the tenant.
:param x_ms_client_request_id: Gets or sets optional. Provides a
client-generated value that is recorded in the logs. Using this header
is highly recommended for correlating client-side activities with
requests received by the server.
:type x_ms_client_request_id: str
:param dict | |
1003100: "Crystal Demon's Spear",
1003101: "Crystal Demon's Spear+1",
1003102: "Crystal Demon's Spear+2",
1003103: "Crystal Demon's Spear+3",
1003104: "Crystal Demon's Spear+4",
1003105: "Crystal Demon's Spear+5",
1003200: "Lightning Demon's Spear",
1003201: "Lightning Demon's Spear+1",
1003202: "Lightning Demon's Spear+2",
1003203: "Lightning Demon's Spear+3",
1003204: "Lightning Demon's Spear+4",
1003205: "Lightning Demon's Spear+5",
1003300: "Raw Demon's Spear",
1003301: "Raw Demon's Spear+1",
1003302: "Raw Demon's Spear+2",
1003303: "Raw Demon's Spear+3",
1003304: "Raw Demon's Spear+4",
1003305: "Raw Demon's Spear+5",
1003400: "Magic Demon's Spear",
1003401: "Magic Demon's Spear+1",
1003402: "Magic Demon's Spear+2",
1003403: "Magic Demon's Spear+3",
1003404: "Magic Demon's Spear+4",
1003405: "Magic Demon's Spear+5",
1003406: "Magic Demon's Spear+6",
1003407: "Magic Demon's Spear+7",
1003408: "Magic Demon's Spear+8",
1003409: "Magic Demon's Spear+9",
1003410: "Magic Demon's Spear+10",
1003500: "Enchanted Demon's Spear",
1003501: "Enchanted Demon's Spear+1",
1003502: "Enchanted Demon's Spear+2",
1003503: "Enchanted Demon's Spear+3",
1003504: "Enchanted Demon's Spear+4",
1003505: "Enchanted Demon's Spear+5",
1003600: "Divine Demon's Spear",
1003601: "Divine Demon's Spear+1",
1003602: "Divine Demon's Spear+2",
1003603: "Divine Demon's Spear+3",
1003604: "Divine Demon's Spear+4",
1003605: "Divine Demon's Spear+5",
1003606: "Divine Demon's Spear+6",
1003607: "Divine Demon's Spear+7",
1003608: "Divine Demon's Spear+8",
1003609: "Divine Demon's Spear+9",
1003610: "Divine Demon's Spear+10",
1003700: "Occult Demon's Spear",
1003701: "Occult Demon's Spear+1",
1003702: "Occult Demon's Spear+2",
1003703: "Occult Demon's Spear+3",
1003704: "Occult Demon's Spear+4",
1003705: "Occult Demon's Spear+5",
1003800: "Fire Demon's Spear",
1003801: "Fire Demon's Spear+1",
1003802: "Fire Demon's Spear+2",
1003803: "Fire Demon's Spear+3",
1003804: "Fire Demon's Spear+4",
1003805: "Fire Demon's Spear+5",
1003806: "Fire Demon's Spear+6",
1003807: "Fire Demon's Spear+7",
1003808: "Fire Demon's Spear+8",
1003809: "Fire Demon's Spear+9",
1003810: "Fire Demon's Spear+10",
1003900: "Chaos Demon's Spear",
1003901: "Chaos Demon's Spear+1",
1003902: "Chaos Demon's Spear+2",
1003903: "Chaos Demon's Spear+3",
1003904: "Chaos Demon's Spear+4",
1003905: "Chaos Demon's Spear+5",
1004000: "Channeler's Trident",
1004001: "Channeler's Trident+1",
1004002: "Channeler's Trident+2",
1004003: "Channeler's Trident+3",
1004004: "Channeler's Trident+4",
1004005: "Channeler's Trident+5",
1004006: "Channeler's Trident+6",
1004007: "Channeler's Trident+7",
1004008: "Channeler's Trident+8",
1004009: "Channeler's Trident+9",
1004010: "Channeler's Trident+10",
1004011: "Channeler's Trident+11",
1004012: "Channeler's Trident+12",
1004013: "Channeler's Trident+13",
1004014: "Channeler's Trident+14",
1004015: "Channeler's Trident+15",
1004100: "Crystal Channeler's Trident",
1004101: "Crys. Channeler's Trident+1",
1004102: "Crys. Channeler's Trident+2",
1004103: "Crys. Channeler's Trident+3",
1004104: "Crys. Channeler's Trident+4",
1004105: "Crys. Channeler's Trident+5",
1004200: "Ltng. Channeler's Trident",
1004201: "Ltng. Channeler's Trident+1",
1004202: "Ltng. Channeler's Trident+2",
1004203: "Ltng. Channeler's Trident+3",
1004204: "Ltng. Channeler's Trident+4",
1004205: "Ltng. Channeler's Trident+5",
1004300: "Raw Channeler's Trident",
1004301: "Raw Channeler's Trident+1",
1004302: "Raw Channeler's Trident+2",
1004303: "Raw Channeler's Trident+3",
1004304: "Raw Channeler's Trident+4",
1004305: "Raw Channeler's Trident+5",
1004400: "Magic Channeler's Trident",
1004401: "Magic Channeler's Trident+1",
1004402: "Magic Channeler's Trident+2",
1004403: "Magic Channeler's Trident+3",
1004404: "Magic Channeler's Trident+4",
1004405: "Magic Channeler's Trident+5",
1004406: "Magic Channeler's Trident+6",
1004407: "Magic Channeler's Trident+7",
1004408: "Magic Channeler's Trident+8",
1004409: "Magic Channeler's Trident+9",
1004410: "Mag. Channeler's Trident+10",
1004500: "Ench. Channeler's Trident",
1004501: "Ench. Channeler's Trident+1",
1004502: "Ench. Channeler's Trident+2",
1004503: "Ench. Channeler's Trident+3",
1004504: "Ench. Channeler's Trident+4",
1004505: "Ench. Channeler's Trident+5",
1004600: "Divine Channeler's Trident",
1004601: "Div. Channeler's Trident+1",
1004602: "Div. Channeler's Trident+2",
1004603: "Div. Channeler's Trident+3",
1004604: "Div. Channeler's Trident+4",
1004605: "Div. Channeler's Trident+5",
1004606: "Div. Channeler's Trident+6",
1004607: "Div. Channeler's Trident+7",
1004608: "Div. Channeler's Trident+8",
1004609: "Div. Channeler's Trident+9",
1004610: "Div. Channeler's Trident+10",
1004700: "Occult Channeler's Trident",
1004701: "Occ. Channeler's Trident+1",
1004702: "Occ. Channeler's Trident+2",
1004703: "Occ. Channeler's Trident+3",
1004704: "Occ. Channeler's Trident+4",
1004705: "Occ. Channeler's Trident+5",
1004800: "Fire Channeler's Trident",
1004801: "Fire Channeler's Trident+1",
1004802: "Fire Channeler's Trident+2",
1004803: "Fire Channeler's Trident+3",
1004804: "Fire Channeler's Trident+4",
1004805: "Fire Channeler's Trident+5",
1004806: "Fire Channeler's Trident+6",
1004807: "Fire Channeler's Trident+7",
1004808: "Fire Channeler's Trident+8",
1004809: "Fire Channeler's Trident+9",
1004810: "Fire Channeler's Trident+10",
1004900: "Chaos Channeler's Trident",
1004901: "Chaos Channeler's Trident+1",
1004902: "Chaos Channeler's Trident+2",
1004903: "Chaos Channeler's Trident+3",
1004904: "Chaos Channeler's Trident+4",
1004905: "Chaos Channeler's Trident+5",
1006000: "Silver Knight Spear",
1006001: "Silver Knight Spear+1",
1006002: "Silver Knight Spear+2",
1006003: "Silver Knight Spear+3",
1006004: "Silver Knight Spear+4",
1006005: "Silver Knight Spear+5",
1050000: "Pike",
1050001: "Pike+1",
1050002: "Pike+2",
1050003: "Pike+3",
1050004: "Pike+4",
1050005: "Pike+5",
1050006: "Pike+6",
1050007: "Pike+7",
1050008: "Pike+8",
1050009: "Pike+9",
1050010: "Pike+10",
1050011: "Pike+11",
1050012: "Pike+12",
1050013: "Pike+13",
1050014: "Pike+14",
1050015: "Pike+15",
1050100: "Crystal Pike",
1050101: "Crystal Pike+1",
1050102: "Crystal Pike+2",
1050103: "Crystal Pike+3",
1050104: "Crystal Pike+4",
1050105: "Crystal Pike+5",
1050200: "Lightning Pike",
1050201: "Lightning Pike+1",
1050202: "Lightning Pike+2",
1050203: "Lightning Pike+3",
1050204: "Lightning Pike+4",
1050205: "Lightning Pike+5",
1050300: "Raw Pike",
1050301: "Raw Pike+1",
1050302: "Raw Pike+2",
1050303: "Raw Pike+3",
1050304: "Raw Pike+4",
1050305: "Raw Pike+5",
1050400: "Magic Pike",
1050401: "Magic Pike+1",
1050402: "Magic Pike+2",
1050403: "Magic Pike+3",
1050404: "Magic Pike+4",
1050405: "Magic Pike+5",
1050406: "Magic Pike+6",
1050407: "Magic Pike+7",
1050408: "Magic Pike+8",
1050409: "Magic Pike+9",
1050410: "Magic Pike+10",
1050500: "Enchanted Pike",
1050501: "Enchanted Pike+1",
1050502: "Enchanted Pike+2",
1050503: "Enchanted Pike+3",
1050504: "Enchanted Pike+4",
1050505: "Enchanted Pike+5",
1050600: "Divine Pike",
1050601: "Divine Pike+1",
1050602: "Divine Pike+2",
1050603: "Divine Pike+3",
1050604: "Divine Pike+4",
1050605: "Divine Pike+5",
1050606: "Divine Pike+6",
1050607: "Divine Pike+7",
1050608: "Divine Pike+8",
1050609: "Divine Pike+9",
1050610: "Divine Pike+10",
1050700: "Occult Pike",
1050701: "Occult Pike+1",
1050702: "Occult Pike+2",
1050703: "Occult Pike+3",
1050704: "Occult Pike+4",
1050705: "Occult Pike+5",
1050800: "Fire Pike",
1050801: "Fire Pike+1",
1050802: "Fire Pike+2",
1050803: "Fire Pike+3",
1050804: "Fire Pike+4",
1050805: "Fire Pike+5",
1050806: "Fire Pike+6",
1050807: "Fire Pike+7",
1050808: "Fire Pike+8",
1050809: "Fire Pike+9",
1050810: "Fire Pike+10",
1050900: "Chaos Pike",
1050901: "Chaos Pike+1",
1050902: "Chaos Pike+2",
1050903: "Chaos Pike+3",
1050904: "Chaos Pike+4",
1050905: "Chaos Pike+5",
1051000: "Dragonslayer Spear",
1051001: "Dragonslayer Spear+1",
1051002: "Dragonslayer Spear+2",
1051003: "Dragonslayer Spear+3",
1051004: "Dragonslayer Spear+4",
1051005: "Dragonslayer Spear+5",
1051100: "Dragonslayer Spear",
1051101: "Dragonslayer Spear+1",
1051102: "Dragonslayer Spear+2",
1051103: "Dragonslayer Spear+3",
1051104: "Dragonslayer Spear+4",
1051105: "Dragonslayer Spear+5",
1051200: "Dragonslayer Spear",
1051201: "Dragonslayer Spear+1",
1051202: "Dragonslayer Spear+2",
1051203: "Dragonslayer Spear+3",
1051204: "Dragonslayer Spear+4",
1051205: "Dragonslayer Spear+5",
1051300: "Dragonslayer Spear",
1051301: "Dragonslayer Spear+1",
1051302: "Dragonslayer Spear+2",
1051303: "Dragonslayer Spear+3",
1051304: "Dragonslayer Spear+4",
1051305: "Dragonslayer Spear+5",
1051400: "Dragonslayer Spear",
1051401: "Dragonslayer Spear+1",
1051402: "Dragonslayer Spear+2",
1051403: "Dragonslayer Spear+3",
1051404: "Dragonslayer Spear+4",
1051405: "Dragonslayer Spear+5",
1051500: "Dragonslayer Spear",
1051501: "Dragonslayer Spear+1",
1051502: "Dragonslayer Spear+2",
1051503: "Dragonslayer Spear+3",
1051504: "Dragonslayer Spear+4",
1051505: "Dragonslayer Spear+5",
1051600: "Dragonslayer Spear",
1051601: "Dragonslayer Spear+1",
1051602: "Dragonslayer Spear+2",
1051603: "Dragonslayer Spear+3",
1051604: "Dragonslayer Spear+4",
1051605: "Dragonslayer Spear+5",
1051700: "Dragonslayer Spear",
| |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.0997091,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 3.29026,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 5.57367e-05,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202732,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.000759011,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0624466,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.100724,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0508421,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.214013,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0713053,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 3.96332,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.000143393,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00261929,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0189432,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0193712,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0190866,
'Execution Unit/Register Files/Runtime Dynamic': 0.0219905,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.039922,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.104843,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.948956,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000909702,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000909702,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000817964,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000330657,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00027827,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00291564,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00780697,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0186221,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.18453,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0731873,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0632489,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.46053,
'Instruction Fetch Unit/Runtime Dynamic': 0.165781,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0299302,
'L2/Runtime Dynamic': 0.0088234,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.90078,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.331845,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0214709,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0214709,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.00217,
'Load Store Unit/Runtime Dynamic': 0.459204,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0529435,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.105887,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0187898,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.019223,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0736494,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0120464,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.262036,
'Memory Management Unit/Runtime Dynamic': 0.0312695,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.3074,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.00037742,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00282201,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.031638,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
reshard.
transposed_expert_output = jnp.einsum('egcm->gecm', expert_output)
transposed_expert_output = Split(transposed_expert_output, ap.gecm)
combined_output = jnp.einsum('gecm,gsec->gsm', transposed_expert_output,
combine_tensor)
combined_output = Split(combined_output, ap.gsm)
combined_output = combined_output.reshape((bs, s_len, output_dims))
# Apply padding.
combined_output *= (1.0 - jnp.expand_dims(paddings, -1)).astype(fprop_dtype)
# Residual dropout.
after_residual = self.residual_dropout.FProp(theta.residual_dropout,
combined_output)
if p.add_skip_connection:
if p.residual_droppath_prob:
out = self.residual_droppath.FProp(theta.residual_droppath, inputs,
after_residual)
else:
out = inputs + after_residual * p.residual_weight
if not p.pre_layer_norm:
out = self.layer_norm.FProp(theta.layer_norm, out)
# Add loss to a global collection. We don't return the loss to the caller
# to avoid the change of the api here.
aux_loss_ctx = py_utils.AuxLossContext.Current()
if aux_loss_ctx is not None:
aux_loss_ctx.AddLoss(aux_loss)
return out
class TransformerLayer(base_layer.BaseLayer):
"""Transformer layer with multi-headed attention."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('input_dims', 0, 'Dimension of the transformer block input.')
p.Define('hidden_dims', 0, 'Hidden dimension of FFN layer.')
p.Define('num_heads', None, 'Num of heads in self attention.')
p.Define(
'dropout_tpl', stochastics.DropoutLayer.Params(),
'Residual dropout params template. keep_prop will be reset to '
'(1.0 - residual_dropout_prob).')
p.Define('atten_dropout_prob', 0.0,
'Probability at which we apply dropout to the attention weights.')
p.Define(
'residual_dropout_prob', 0.0,
'Probability at which we apply dropout to the residual layers, '
'such that, residual(x, y) = (x + dropout(y)).')
p.Define('relu_dropout_prob', 0.0,
'Probability at which we apply dropout to the FFN layers.')
p.Define('mask_self_attention', False, 'If True, use causal mask.')
p.Define('cross_attention', False, 'If True, perform cross'
'encoder-decoder attention.')
p.Define('ln_tpl', normalizations.LayerNorm.Params(), 'Layer norm params.')
p.Define('tr_atten_tpl',
attentions.MultiHeadedAttention.Params().Set(),
'Attention Layer params.')
p.Define('packed_input', False,
'If True, each training example may pack multiple sequences.')
p.Define('tr_fflayer_tpl', TransformerFeedForwardLayer.Params(),
'Transformer Feed-Forward Layer params.')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
# Initialize Layer Norm
params = p.ln_tpl.Copy()
params.name = 'layer_norm'
params.input_dims = p.input_dims
self.CreateChild('layer_norm', params)
# Initialize multi-headed self-attention
params = p.tr_atten_tpl.Copy()
params.name = 'multihead_self_atten'
params.input_dim = p.input_dims
params.hidden_dim = p.input_dims
params.num_heads = p.num_heads
params.atten_dropout_prob = p.atten_dropout_prob
self.CreateChild('self_attention', params)
# Initialize residual dropout.
params = p.dropout_tpl.Copy()
params.keep_prob = (1.0 - p.residual_dropout_prob)
self.CreateChild('residual_dropout', params)
# Initialize multi-headed cross-attention
if p.cross_attention:
params = p.tr_atten_tpl.Copy()
params.name = 'multihead_self_atten'
params.input_dim = p.input_dims
params.hidden_dim = p.input_dims
params.num_heads = p.num_heads
params.atten_dropout_prob = p.atten_dropout_prob
self.CreateChild('cross_attention', params)
# Initialize feed-forward layer
params = p.tr_fflayer_tpl.Copy()
params.name = 'tr_fflayer'
params.input_dims = p.input_dims
params.hidden_dims = p.hidden_dims
params.relu_dropout_prob = p.relu_dropout_prob
params.residual_dropout_prob = p.residual_dropout_prob
self.CreateChild('ff_layer', params)
def InitStates(self, theta: NestedMap, target_batch_size: int,
target_max_length: int) -> NestedMap:
return self.self_attention.InitStates(theta.self_attention,
target_batch_size, target_max_length)
def FProp(
self,
theta: NestedMap,
inputs: JTensor,
paddings: JTensor,
attention_mask: JTensor,
cross_inputs: Optional[JTensor] = None,
cross_attention_mask: Optional[JTensor] = None
) -> Tuple[JTensor, JTensor]:
"""Transformer decoder layer.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: Input sequence JTensor of shape [B, T, H].
paddings: Input paddings JTensor of shape [B, T] (only used in FFN layer).
attention_mask: Self attention mask ready to add to the logits. It can be
of shape [B/1, 1, T/1, T] which is broadcast compatible with the self
attention matrix of shape [B, N, T, T]. This is assumed to have combined
paddings, causal masking as well as segment maskings.
cross_inputs: Output of the encoder, to be used for cross attention, of
shape [B, S, H].
cross_attention_mask: Cross attention mask ready to add to the logits. It
can be of shape [B/1, 1, T/1, S] which is broadcast compatible with the
cross attention matrix of shape [B, N, T, T]. This is assumed to have
combined paddings as well as segment maskings.
Returns:
The fflayer output with shape [B, T, D].
atten_probs: A NestedMap with keys `self_atten` <float>[B, N, T, T].
"""
# Layer normalize input
inputs_normalized = self.layer_norm.FProp(theta.layer_norm, inputs)
# Compute self-attention, key/value vectors are the input itself
atten_output, self_atten_probs = self.self_attention.FProp(
theta.self_attention,
inputs_normalized,
inputs_normalized,
inputs_normalized,
atten_mask=attention_mask)
atten_probs = NestedMap(self_atten=self_atten_probs)
# Residual dropout and connection
atten_output = self.residual_dropout.FProp(theta.residual_dropout,
atten_output)
atten_output += inputs
# Apply cross attention if applicable
if self.params.cross_attention:
assert cross_inputs is not None
assert cross_attention_mask is not None
cross_atten_output, cross_atten_probs = self.cross_attention.FProp(
theta.cross_attention,
self.layer_norm.FProp(theta.layer_norm, atten_output),
cross_inputs,
cross_inputs,
atten_mask=cross_attention_mask)
atten_probs.cross_atten = cross_atten_probs
# Residual dropout and connection
cross_atten_output = self.residual_dropout.FProp(theta.residual_dropout,
cross_atten_output)
atten_output += cross_atten_output
# Apply FFN layer
output = self.ff_layer.FProp(
theta.ff_layer, atten_output, paddings=paddings)
return output, atten_probs
def ExtendStep(
self,
theta: NestedMap,
cached_states: NestedMap,
inputs: JTensor,
*,
time_step: JTensor,
attention_mask: JTensor,
cross_inputs: Optional[JTensor] = None,
cross_attention_mask: Optional[JTensor] = None
) -> Tuple[JTensor, NestedMap]:
"""Transformer decoder layer, autoregressive cached decoding.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
cached_states: A `.NestedMap` object containing tensors which are the
results of previous attentions, used for cached decoding. key - [T, B,
N, H]. value - [T, B, N, H].
inputs: Target sequence of shape [B, D] corresponding to target sequence
at index time_step.
time_step: A scalar, the current decode step, 0-based.
attention_mask: per step attention mask for this time step, of shape [B,
1, T]. This combines causal mask with any segment mask if applicable.
cross_inputs: Source sequence - [B, S, H].
cross_attention_mask: if not None, cross_segment_mask for this time step,
of shape [B, 1, 1, S]. This combines padding mask with any segment mask
if applicable.
Returns:
(updated_states, cur_output)
* updated_states: A `.NestedMap` object containing the updated states.
* cur_output: [B, D]
key - [T, B, N, H].
value - [T, B, N, H].
"""
if not self.params.mask_self_attention:
raise ValueError('ExtendStep should only be called with causal masking.')
# Layer normalize input
inputs_normalized = self.layer_norm.FProp(theta.layer_norm, inputs)
# Self-attention layer.
updated_states, atten_output = self.self_attention.ExtendStep(
theta.self_attention,
cached_states,
inputs_normalized,
atten_mask=attention_mask,
time_step=time_step)
# Residual dropout and connection
atten_output = self.residual_dropout.FProp(theta.residual_dropout,
atten_output)
atten_output += inputs
# Apply cross attention if applicable
if self.params.cross_attention:
assert cross_inputs is not None
assert cross_attention_mask is not None
atten_output_normalized = self.layer_norm.FProp(
theta.layer_norm, jnp.expand_dims(atten_output, axis=1))
cross_atten_output, _ = self.cross_attention.FProp(
theta.cross_attention,
atten_output_normalized,
cross_inputs,
cross_inputs,
atten_mask=cross_attention_mask)
# Residual dropout and connection
cross_atten_output = self.residual_dropout.FProp(theta.residual_dropout,
cross_atten_output)
# Squeeze sequence dim
cross_atten_output = jnp.squeeze(cross_atten_output, axis=1)
atten_output += cross_atten_output
# Apply FFN layer
output = self.ff_layer.FProp(theta.ff_layer, atten_output)
return updated_states, output
class StackedTransformerLayers(base_layer.BaseLayer):
"""A stack of Transformer layers."""
@staticmethod
def DefineParams(p):
p.Define('cross_attention', False,
'If set, introduces cross encoder-decoder attention layer.')
p.Define('mask_self_attention', False, 'Use masked self-attention.')
p.Define('num_layers', 0, 'Num of layers in this stack.')
p.Define('model_dims', 0, 'Model dimension in Transformer layers.')
p.Define('hidden_dims', 0,
'The hidden layer dimension of FFN in Transformer layers.')
p.Define('num_heads', 0, 'Number of attention heads.')
p.Define('dropout_prob', 0.0,
'Apply dropout at this prob at various places.')
p.Define(
'transformer_layer_params_tpl', TransformerLayer.Params(),
'A template of TransformerLayer.params, can be a list of params '
'of length equal to the num_layers or a factor of num_layers.'
'For a factor, the params are tiled as [a, a, ..., b, b,...,].')
p.Define('packed_input', False,
'If True, each training example may pack multiple sequences.')
p.Define(
'fold_padding_with_segment_mask', False, 'If True then segment'
'mask is supposed to include the padding mask as well, i.e.'
'treating PADs as one sequence and non-PADs as another.')
p.Define(
'enable_while_loop', False,
'Whether or not to use a while loop to unroll the transformer layer'
' stack. Potential benefits: 1) reduce xla compilation time. '
' 2) improve hbm usage due to explicit rematerialization.')
p.Define(
'checkpoint_policy', recurrent.AutodiffCheckpointType.SAVE_NOTHING,
'How to checkpoint residuals for BProp: save nothing, dot only or '
'dot with no batch dimensions.')
# MoE related params.
p.Define('moe_layer_tpl', TransformerShardedMoeLayer.Params(),
'Template configuration for the moe feedforward layer.')
p.Define('num_experts', 0, 'Total number of experts.')
p.Define('num_groups', 1, 'Num of groups for dispathcing.')
p.Define(
'min_group_size', None,
'If not None, num_groups will be adjusted so that there will be '
'at least min_group_size tokens in each group.')
p.Define('moe_layers', [], 'List of MoE layer indices, e.g. [0, 2, 4].')
return p
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p = StackedTransformerLayers.DefineParams(p)
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
assert p.num_layers > 0
assert p.model_dims > 0
assert p.hidden_dims > 0
assert p.num_heads > 0
assert 0.0 <= p.dropout_prob < 1.0
def _MoeLayerParams(ff_p):
"""Convert a TransformerFeedforwardLayer to a MoE Layer."""
assert issubclass(ff_p.cls, TransformerFeedForwardLayer)
p = self.params
assert p.num_experts > 0
moe_p = p.moe_layer_tpl.Copy()
# Copy over the base params.
| |
employees in the same report."))
if any(not expense.product_id for expense in self):
raise UserError(_("You can not create report without product."))
todo = self.filtered(lambda x: x.payment_mode=='own_account') or self.filtered(lambda x: x.payment_mode=='company_account')
sheet = self.env['hr.expense.sheet'].create({
'company_id': self.company_id.id,
'employee_id': self[0].employee_id.id,
'name': todo[0].name if len(todo) == 1 else '',
'expense_line_ids': [(6, 0, todo.ids)]
})
return sheet
def action_submit_expenses(self):
sheet = self._create_sheet_from_expenses()
sheet.action_submit_sheet()
return {
'name': _('New Expense Report'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'hr.expense.sheet',
'target': 'current',
'res_id': sheet.id,
}
def action_get_attachment_view(self):
self.ensure_one()
res = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment')
res['domain'] = [('res_model', '=', 'hr.expense'), ('res_id', 'in', self.ids)]
res['context'] = {'default_res_model': 'hr.expense', 'default_res_id': self.id}
return res
# ----------------------------------------
# Business
# ----------------------------------------
def _prepare_move_values(self):
"""
This function prepares move values related to an expense
"""
self.ensure_one()
journal = self.sheet_id.bank_journal_id if self.payment_mode == 'company_account' else self.sheet_id.journal_id
account_date = self.sheet_id.accounting_date or self.date
move_values = {
'journal_id': journal.id,
'company_id': self.sheet_id.company_id.id,
'date': account_date,
'ref': self.sheet_id.name,
# force the name to the default value, to avoid an eventual 'default_name' in the context
# to set it to '' which cause no number to be given to the account.move when posted.
'name': '/',
}
return move_values
def _get_account_move_by_sheet(self):
""" Return a mapping between the expense sheet of current expense and its account move
:returns dict where key is a sheet id, and value is an account move record
"""
move_grouped_by_sheet = {}
for expense in self:
# create the move that will contain the accounting entries
if expense.sheet_id.id not in move_grouped_by_sheet:
move_vals = expense._prepare_move_values()
move = self.env['account.move'].with_context(default_journal_id=move_vals['journal_id']).create(move_vals)
move_grouped_by_sheet[expense.sheet_id.id] = move
else:
move = move_grouped_by_sheet[expense.sheet_id.id]
return move_grouped_by_sheet
def _get_expense_account_source(self):
self.ensure_one()
if self.account_id:
account = self.account_id
elif self.product_id:
account = self.product_id.product_tmpl_id.with_company(self.company_id)._get_product_accounts()['expense']
if not account:
raise UserError(
_("No Expense account found for the product %s (or for its category), please configure one.") % (self.product_id.name))
else:
account = self.env['ir.property'].with_company(self.company_id)._get('property_account_expense_categ_id', 'product.category')
if not account:
raise UserError(_('Please configure Default Expense account for Product expense: `property_account_expense_categ_id`.'))
return account
def _get_expense_account_destination(self):
self.ensure_one()
if not self.employee_id.sudo().address_home_id:
raise UserError(_("No Home Address found for the employee %s, please configure one.") % (self.employee_id.name))
partner = self.employee_id.sudo().address_home_id.with_company(self.company_id)
account_dest = partner.property_account_payable_id.id or partner.parent_id.property_account_payable_id.id
return account_dest
def _get_account_move_line_values(self):
move_line_values_by_expense = {}
for expense in self:
move_line_name = expense.employee_id.name + ': ' + expense.name.split('\n')[0][:64]
account_src = expense._get_expense_account_source()
account_dst = expense._get_expense_account_destination()
account_date = expense.sheet_id.accounting_date or expense.date or fields.Date.context_today(expense)
company_currency = expense.company_id.currency_id
move_line_values = []
taxes = expense.tax_ids.with_context(round=True).compute_all(expense.unit_amount, expense.currency_id, expense.quantity, expense.product_id)
total_amount = 0.0
total_amount_currency = 0.0
partner_id = expense.employee_id.sudo().address_home_id.commercial_partner_id.id
# source move line
balance = expense.currency_id._convert(taxes['total_excluded'], company_currency, expense.company_id, account_date)
amount_currency = taxes['total_excluded']
move_line_src = {
'name': move_line_name,
'quantity': expense.quantity or 1,
'debit': balance if balance > 0 else 0,
'credit': -balance if balance < 0 else 0,
'amount_currency': amount_currency,
'account_id': account_src.id,
'product_id': expense.product_id.id,
'product_uom_id': expense.product_uom_id.id,
'analytic_account_id': expense.analytic_account_id.id,
'analytic_tag_ids': [(6, 0, expense.analytic_tag_ids.ids)],
'expense_id': expense.id,
'partner_id': partner_id,
'tax_ids': [(6, 0, expense.tax_ids.ids)],
'tax_tag_ids': [(6, 0, taxes['base_tags'])],
'currency_id': expense.currency_id.id,
}
move_line_values.append(move_line_src)
total_amount -= balance
total_amount_currency -= move_line_src['amount_currency']
# taxes move lines
for tax in taxes['taxes']:
balance = expense.currency_id._convert(tax['amount'], company_currency, expense.company_id, account_date)
amount_currency = tax['amount']
if tax['tax_repartition_line_id']:
rep_ln = self.env['account.tax.repartition.line'].browse(tax['tax_repartition_line_id'])
base_amount = self.env['account.move']._get_base_amount_to_display(tax['base'], rep_ln)
else:
base_amount = None
move_line_tax_values = {
'name': tax['name'],
'quantity': 1,
'debit': balance if balance > 0 else 0,
'credit': -balance if balance < 0 else 0,
'amount_currency': amount_currency,
'account_id': tax['account_id'] or move_line_src['account_id'],
'tax_repartition_line_id': tax['tax_repartition_line_id'],
'tax_tag_ids': tax['tag_ids'],
'tax_base_amount': base_amount,
'expense_id': expense.id,
'partner_id': partner_id,
'currency_id': expense.currency_id.id,
'analytic_account_id': expense.analytic_account_id.id if tax['analytic'] else False,
'analytic_tag_ids': [(6, 0, expense.analytic_tag_ids.ids)] if tax['analytic'] else False,
}
total_amount -= balance
total_amount_currency -= move_line_tax_values['amount_currency']
move_line_values.append(move_line_tax_values)
# destination move line
move_line_dst = {
'name': move_line_name,
'debit': total_amount > 0 and total_amount,
'credit': total_amount < 0 and -total_amount,
'account_id': account_dst,
'date_maturity': account_date,
'amount_currency': total_amount_currency,
'currency_id': expense.currency_id.id,
'expense_id': expense.id,
'partner_id': partner_id,
}
move_line_values.append(move_line_dst)
move_line_values_by_expense[expense.id] = move_line_values
return move_line_values_by_expense
def action_move_create(self):
'''
main function that is called when trying to create the accounting entries related to an expense
'''
move_group_by_sheet = self._get_account_move_by_sheet()
move_line_values_by_expense = self._get_account_move_line_values()
for expense in self:
# get the account move of the related sheet
move = move_group_by_sheet[expense.sheet_id.id]
# get move line values
move_line_values = move_line_values_by_expense.get(expense.id)
# link move lines to move, and move to expense sheet
move.write({'line_ids': [(0, 0, line) for line in move_line_values]})
expense.sheet_id.write({'account_move_id': move.id})
if expense.payment_mode == 'company_account':
expense.sheet_id.paid_expense_sheets()
# post the moves
for expense in self:
if not expense.payment_mode == 'company_account':
for move in move_group_by_sheet[expense.sheet_id.id]:
if move.state != 'posted':
move._post()
return move_group_by_sheet
def refuse_expense(self, reason):
self.write({'is_refused': True})
self.sheet_id.write({'state': 'cancel'})
self.sheet_id.message_post_with_view('hr_expense.hr_expense_template_refuse_reason',
values={'reason': reason, 'is_sheet': False, 'name': self.name})
@api.model
def get_expense_dashboard(self):
expense_state = {
'draft': {
'description': _('to report'),
'amount': 0.0,
'currency': self.env.company.currency_id.id,
},
'reported': {
'description': _('under validation'),
'amount': 0.0,
'currency': self.env.company.currency_id.id,
},
'approved': {
'description': _('to be reimbursed'),
'amount': 0.0,
'currency': self.env.company.currency_id.id,
}
}
if not self.env.user.employee_ids:
return expense_state
target_currency = self.env.company.currency_id
expenses = self.read_group(
[
('employee_id', 'in', self.env.user.employee_ids.ids),
('payment_mode', '=', 'own_account'),
('state', 'in', ['draft', 'reported', 'approved'])
], ['total_amount', 'currency_id', 'state'], ['state', 'currency_id'], lazy=False)
for expense in expenses:
state = expense['state']
currency = self.env['res.currency'].browse(expense['currency_id'][0]) if expense['currency_id'] else target_currency
amount = currency._convert(
expense['total_amount'], target_currency, self.env.company, fields.Date.today())
expense_state[state]['amount'] += amount
return expense_state
# ----------------------------------------
# Mail Thread
# ----------------------------------------
@api.model
def message_new(self, msg_dict, custom_values=None):
email_address = email_split(msg_dict.get('email_from', False))[0]
employee = self.env['hr.employee'].search([
'|',
('work_email', 'ilike', email_address),
('user_id.email', 'ilike', email_address)
], limit=1)
expense_description = msg_dict.get('subject', '')
if employee.user_id:
company = employee.user_id.company_id
currencies = company.currency_id | employee.user_id.company_ids.mapped('currency_id')
else:
company = employee.company_id
currencies = company.currency_id
if not company: # ultimate fallback, since company_id is required on expense
company = self.env.company
# The expenses alias is the same for all companies, we need to set the proper context
# To select the product account
self = self.with_company(company)
product, price, currency_id, expense_description = self._parse_expense_subject(expense_description, currencies)
vals = {
'employee_id': employee.id,
'name': expense_description,
'unit_amount': price,
'product_id': product.id if product else None,
'product_uom_id': product.uom_id.id,
'tax_ids': [(4, tax.id, False) for tax in product.supplier_taxes_id],
'quantity': 1,
'company_id': company.id,
'currency_id': currency_id.id
}
account = product.product_tmpl_id._get_product_accounts()['expense']
if account:
vals['account_id'] = account.id
expense = super(HrExpense, self).message_new(msg_dict, dict(custom_values or {}, **vals))
self._send_expense_success_mail(msg_dict, expense)
return expense
@api.model
def _parse_product(self, expense_description):
"""
Parse the subject to find the product.
Product code should be the first word of expense_description
Return product.product and updated description
"""
product_code = expense_description.split(' ')[0]
product = self.env['product.product'].search([('can_be_expensed', '=', True), ('default_code', '=ilike', product_code)], limit=1)
if product:
expense_description = expense_description.replace(product_code, '', 1)
return product, expense_description
@api.model
def _parse_price(self, expense_description, currencies):
""" Return price, currency and updated description """
symbols, symbols_pattern, float_pattern = [], '', '[+-]?(\d+[.,]?\d*)'
price = 0.0
for currency in currencies:
symbols.append(re.escape(currency.symbol))
symbols.append(re.escape(currency.name))
symbols_pattern = '|'.join(symbols)
price_pattern = "((%s)?\s?%s\s?(%s)?)" % (symbols_pattern, float_pattern, symbols_pattern)
matches = re.findall(price_pattern, expense_description)
if matches:
match = max(matches, key=lambda match: len([group for group in match if group])) # get the longuest match. e.g. "2 chairs 120$" -> the price is 120$, not 2
full_str = match[0]
currency_str = match[1] or match[3]
price = match[2].replace(',', '.')
if currency_str:
currency = currencies.filtered(lambda c: currency_str in [c.symbol, c.name])[0]
currency = currency or currencies[0]
expense_description = expense_description.replace(full_str, ' ') # remove price from description
expense_description = re.sub(' +', ' ', expense_description.strip())
price = float(price)
return price, currency, expense_description
@api.model
def _parse_expense_subject(self, expense_description, currencies):
""" Fetch product, price and currency info from mail subject.
Product can be identified based on product name or product code.
It can be passed between [] or it can be placed at start.
When parsing, only consider currencies passed as parameter.
This will fetch currency in symbol($) or ISO name (USD).
Some valid examples:
Travel by Air [TICKET] USD 1205.91
TICKET $1205.91 Travel by Air
Extra expenses 29.10EUR [EXTRA]
"""
product, expense_description = self._parse_product(expense_description)
price, currency_id, expense_description = self._parse_price(expense_description, currencies)
return product, price, currency_id, expense_description
# TODO: Make api.multi
def _send_expense_success_mail(self, msg_dict, expense):
mail_template_id = 'hr_expense.hr_expense_template_register' if expense.employee_id.user_id else 'hr_expense.hr_expense_template_register_no_user'
expense_template = self.env.ref(mail_template_id)
rendered_body = expense_template._render({'expense': expense}, engine='ir.qweb')
body = self.env['mail.render.mixin']._replace_local_links(rendered_body)
# TDE TODO: seems louche, check to use notify
if expense.employee_id.user_id.partner_id:
expense.message_post(
partner_ids=expense.employee_id.user_id.partner_id.ids,
subject='Re: %s' % msg_dict.get('subject', ''),
body=body,
subtype_id=self.env.ref('mail.mt_note').id,
email_layout_xmlid='mail.mail_notification_light',
)
else:
self.env['mail.mail'].sudo().create({
'email_from': self.env.user.email_formatted,
'author_id': self.env.user.partner_id.id,
'body_html': body,
'subject': 'Re: %s' % msg_dict.get('subject', ''),
'email_to': msg_dict.get('email_from', False),
| |
<filename>warrior/WarriorCore/Classes/warmock_class.py
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
from Framework.Utils.testcase_Utils import pNote
from Framework.Utils.print_Utils import print_info, print_warning
from WarriorCore.Classes.war_cli_class import WarriorCliClass as WarCli
# For function/method that only be mocked in trialmode (not sim mode), put name here
VERIFY_ONLY = ["verify_cmd_response", "verify_inorder_cmd_response"]
def mockready(func):
"""
Decorator function that assign a mockready attrib to input func
the attrib will be used to decide if the func is mockable or not
"""
if not WarCli.mock and not WarCli.sim:
return func
func.__dict__["mockready"] = True
return func
def mocked(func):
"""
Decorator function that route function to mocked function
"""
def inner(*args, **kwargs):
"""
Call corresponding mock method
"""
# If warrior is not in mock or sim mode
# or if warrior is in sim mode but it's a VERIFY_ONLY function
# return the original function
if (not WarCli.mock and not WarCli.sim) or (WarCli.sim and func.__name__ in VERIFY_ONLY):
return func(*args, **kwargs)
"""
If warrior is in simulator mode
this function will parse the response file
and tell warrior to store simresp value from each cmd
args[0] here is the testdata file
"""
if func.__name__ == "get_command_details_from_testdata":
if WarCli.sim and args[0] is not None and args[0] != "":
from Framework.Utils.data_Utils import cmd_params
cmd_params.update({"sim_response_list": "simresp"})
get_response_file(args[0])
return func(*args, **kwargs)
"""
link the command with its simresp value into a dict
so warrior knows which cmd maps to which response
"""
if func.__name__ == "_get_cmd_details":
result = func(*args, **kwargs)
pNote("The non-substituted commands:")
for index, cmd in enumerate(result["command_list"]):
pNote("#{}: {}".format(index+1, cmd))
if WarCli.sim:
if MockUtils.cli_Utils.response_reference_dict.get(cmd, None) is not None:
pNote("Command: {} is already linked to simresp: {}"\
.format(cmd, MockUtils.cli_Utils.response_reference_dict[cmd]))
else:
MockUtils.cli_Utils.response_reference_dict[cmd] = \
result["sim_response_list"][index]
return result
# Debug info
# pNote("Util {} is mocked".format(func.__name__), "WARNING")
# for value in [str(x) + ": " + str(y) for x, y in zip(inspect.getargspec(func)[0], args)]:
# pNote(value)
# for key, value in kwargs.items():
# pNote(str(key) + ": " + str(value))
# mapping function to mocked function
func_name = func.__name__
func_module = func.__module__.split(".")[-1]
if func_module in dir(MockUtils):
func_module = getattr(MockUtils, func_module)
if func_name in dir(func_module):
function = getattr(func_module, func_name)
else:
print_warning("Cannot locate {} in {}".format(func_name, dir(func_module)))
function = func
else:
print_warning("Cannot locate {} in {}".format(func_module, dir(MockUtils)))
function = func
return function(*args, **kwargs)
return inner
def get_cmd_specific_response_file(root):
"""
Map the commands block in the response file into a dict like this
{
"cmd1_text": {"default": "default response", "r1":"text"},
"cmd2_text": {"default": "def resp for cmd2", "r2":"hello"}
}
:argument:
root: response file root - xml elem
"""
cmd_specific_response_dict = {}
cmds = root.find("commands")
if cmds is not None:
for cmd in cmds:
# cmd_name = cmd.tag
cmd_text = cmd.get("text", "")
if cmd_text in cmd_specific_response_dict:
pNote("The cmd: '{}' has been created before"
"Please use one cmd block for the responses for same cmd".\
format(cmd_text))
else:
cmd_specific_response_dict[cmd_text] = {}
for resp in cmd:
resp_name = resp.tag
resp_text = resp.get("text", "")
if resp_name in cmd_specific_response_dict[cmd_text]:
pNote("A response with tag name {} has been created before with value: {}"
"Please rename with a different tag name".\
format(resp_name, cmd_specific_response_dict[cmd_text][resp_name]))
else:
cmd_specific_response_dict[cmd_text][resp_name] = resp_text
return cmd_specific_response_dict
def get_response_file(testdatafile):
"""
Build the response dict with response tag name and response text
"""
from Framework.Utils.xml_Utils import getRoot, getElementListWithSpecificXpath
tmp_list = getElementListWithSpecificXpath(testdatafile, "./global/response_file")
response_file = tmp_list[0].text if tmp_list != [] else ""
response_dict = {}
cmd_specific_response_dict = {}
if response_file != "":
root = getRoot(response_file)
# Parse global responses
responses = root.find("responses")
if responses is not None:
for resp in responses:
resp_name = resp.tag
resp_text = resp.get("text", "")
if resp_name in response_dict:
pNote("A response with tag name {} has been created before with value: {}"
"Please rename with a different tag name".\
format(resp_name, response_dict[resp_name]))
else:
response_dict[resp_name] = resp_text
else:
pNote("Unable to find responses, please put all responses inside a responses tag",
"ERROR")
# Parse cmd specific responses
cmd_specific_response_dict = get_cmd_specific_response_file(root)
else:
pNote("Unable to retrieve response file from testdata file, please put the path in"
" response_file tag inside global section of the testdata file", "ERROR")
MockUtils.cli_Utils.response_dict = response_dict
MockUtils.cli_Utils.cmd_specific_response_dict = cmd_specific_response_dict
def get_response_from_dict(cmd, simresp=None):
"""
The order of getting response match is:
cmd specific response with simresp > global response with simresp >
cmd specific response default > global response default
"""
cmd_response_dict = MockUtils.cli_Utils.cmd_specific_response_dict.get(cmd, None)
response = ""
if simresp is not None and cmd_response_dict is not None and simresp in cmd_response_dict:
response = cmd_response_dict[simresp]
elif simresp is not None and simresp in MockUtils.cli_Utils.response_dict:
response = MockUtils.cli_Utils.response_dict[simresp]
elif cmd_response_dict is not None and "default" in cmd_response_dict:
response = cmd_response_dict["default"]
else:
response = MockUtils.cli_Utils.response_dict.get("default", None)
return response
def cmd_resp_lookup(cmd):
"""
Takes in a raw simresp and substitute each part of it with the linked response
based on the separator it combine responses differently
, combine responses with space, + combine without space, # combine with newline
"""
result = ""
resp_tag = ""
char_dict = {",":" ", "+":"", "#":os.linesep}
simresp = MockUtils.cli_Utils.response_reference_dict.get(cmd, None)
if simresp is not None:
"""
If encountered a symbol, take all the char before as a resp_tag and try to find a simp
"""
for char in simresp:
if char == "," or char == "+" or char == "#":
response = get_response_from_dict(cmd, resp_tag)
if response is not None:
result += response + char_dict[char]
else:
pNote("Unable to find response tag: {} in response file".format(resp_tag))
resp_tag = ""
else:
resp_tag += char
if resp_tag != "":
response = get_response_from_dict(cmd, resp_tag)
if response is not None:
result += response
else:
pNote("Unable to find response tag: {} in response file".format(resp_tag))
else:
response = get_response_from_dict(cmd)
if response is not None:
result += response
else:
pNote("Unable to find response tag: {} in response file".format(resp_tag))
return result
class MockUtils(object):
"""
This class contains all the mocked Utils
"""
def __init__(self):
""""""
return None
class cli_Utils():
"""
Mocked cli_Utils
"""
response_dict = {}
response_reference_dict = {}
cmd_specific_response_dict = {}
@staticmethod
def connect_ssh(ip, port="22", username="", password="", logfile=None, timeout=60,
prompt=".*(%|#|\$)", conn_options="", custom_keystroke="", **kwargs):
"""
This function doesn't actually connect to the server
"""
pNote("Mocking connect_ssh")
sshobj = "Mocking connect_ssh"
conn_string = ""
conn_options = "" if conn_options is False or conn_options is None else conn_options
# delete -o StrictHostKeyChecking=no and put them in conn_options
if not conn_options or conn_options is None:
conn_options = ""
command = 'ssh -p {0} {1}@{2} {3}'.format(port, username, ip, conn_options)
# command = ('ssh -p '+ port + ' ' + username + '@' + ip)
pNote("connectSSH: cmd = %s" % command, "DEBUG")
pNote("MOCK MODE: No connection is made to the server")
return sshobj, conn_string
@staticmethod
def connect_telnet(ip, port="23", username="", password="", logfile=None, timeout=60,
prompt=".*(%|#|\$)", conn_options="", custom_keystroke="", **kwargs):
"""
This function doesn't actually connect to the server
"""
pNote("Mocking connect_telnet")
conn_options = "" if conn_options is False or conn_options is None else conn_options
pNote("timeout is: %s" % timeout, "DEBUG")
pNote("port num is: %s" % port, "DEBUG")
command = ('telnet ' + ip + ' ' + port)
if not conn_options or conn_options is None:
conn_options = ""
command = command + str(conn_options)
pNote("connectTelnet: cmd = %s" % command, "DEBUG")
pNote("MOCK MODE: No connection is made to the server")
conn_string = ""
telnetobj = "Mocking connect_telnet"
return telnetobj, conn_string
@classmethod
def _send_cmd(cls, *args, **kwargs):
"""
This function pass the command to the mocked send_command function
"""
command = kwargs.get('command')
startprompt = kwargs.get('startprompt', ".*")
endprompt = kwargs.get('endprompt', None)
cmd_timeout = kwargs.get('cmd_timeout', None)
result, response = cls.send_command("session_obj", startprompt, endprompt,
command, cmd_timeout)
return result, response
@classmethod
def send_command(cls, *args, **kwargs):
"""
Get response from the processed response dict
"""
print_warning("This method is obsolete and will be deprecated soon. Please"
" use 'send_command' method of 'PexpectConnect' | |
last test we tried to perform:
URL: {local_we_query_url}
Method: GET
Status code: {response.status_code}
We cannot proceed if the windows exporter server is not responding
properly. Make sure to check the logs and fix any issue found there. You
can see the logs in the Event Viewer for Application with source
windows_exporter.
''' ),
buttons=[
('Quit', False)
]
).run()
log.info(
f'''
To examine your windows exporter service logs, inspect logs in the Event
Viewer for Application with source windows_exporter.
'''
)
return False
# Let's find the number of running processes as a test
response_text = response.text
match = re.search(r'windows_os_processes (?P<processes>\d+)', response_text)
retry_index = 0
retry_count = 5
while (
not match
) and retry_index < retry_count:
result = button_dialog(
title='Unexpected response from Windows Exporter',
text=(
f'''
We received an unexpected response from the windows exporter server. Here
are some details for this last test we tried to perform:
URL: {local_we_query_url}
Method: GET
Missing line: windows_os_processes
We cannot proceed if the windows exporter server is not responding
properly. Make sure to check the logs and fix any issue found there. You
can see the logs in the Event Viewer for Application with source
windows_exporter.
''' ),
buttons=[
('Retry', 1),
('Quit', False)
]
).run()
if not result:
log.info(
f'''
To examine your windows exporter service logs, inspect logs in the Event
Viewer for Application with source windows_exporter.
'''
)
return False
retry_index = retry_index + 1
# Wait a little before the next retry
time.sleep(5)
try:
response = httpx.get(local_we_query_url)
except httpx.RequestError as exception:
result = button_dialog(
title='Cannot connect to Windows Exporter',
text=(
f'''
We could not connect to windows exporter server. Here are some details for
this last test we tried to perform:
URL: {local_we_query_url}
Method: GET
Exception: {exception}
We cannot proceed if the windows exporter server is not responding
properly. Make sure to check the logs and fix any issue found there. You
can see the logs in the Event Viewer for Application with source
windows_exporter.
''' ),
buttons=[
('Quit', False)
]
).run()
log.info(
f'''
To examine your windows exporter service logs, inspect logs in the Event
Viewer for Application with source windows_exporter.
'''
)
return False
if response.status_code != 200:
result = button_dialog(
title='Cannot connect to Windows Exporter',
text=(
f'''
We could not connect to windows exporter server. Here are some details for
this last test we tried to perform:
URL: {local_we_query_url}
Method: GET
Status code: {response.status_code}
We cannot proceed if the windows exporter server is not responding
properly. Make sure to check the logs and fix any issue found there. You
can see the logs in the Event Viewer for Application with source
windows_exporter.
''' ),
buttons=[
('Quit', False)
]
).run()
log.info(
f'''
To examine your windows exporter service logs, inspect logs in the Event
Viewer for Application with source windows_exporter.
'''
)
return False
response_text = response.text
match = re.search(r'windows_os_processes (?P<processes>\d+)', response_text)
if (
not match
):
# We could not get a proper result from Windows Exporter after all those retries
result = button_dialog(
title='Unexpected response from Windows Exporter',
text=(
f'''
After a few retries, we still received an unexpected response from the
windows exporter server. Here are some details for this last test we tried
to perform:
URL: {local_we_query_url}
Method: GET
Missing line: windows_os_processes
We cannot proceed if the windows exporter server is not responding
properly. Make sure to check the logs and fix any issue found there. You
can see the logs in the Event Viewer for Application with source
windows_exporter.
''' ),
buttons=[
('Quit', False)
]
).run()
log.info(
f'''
To examine your windows exporter service logs, inspect logs in the Event
Viewer for Application with source windows_exporter.
'''
)
return False
log.info(
f'''
Windows Exporter is installed and working properly.
''' )
time.sleep(5)
return True
def install_grafana(base_directory):
# Install Grafana as a service
nssm_binary = get_nssm_binary()
if not nssm_binary:
return False
# Check for existing service
grafana_service_exists = False
grafana_service_name = 'grafana'
service_details = get_service_details(nssm_binary, grafana_service_name)
if service_details is not None:
grafana_service_exists = True
if grafana_service_exists:
result = button_dialog(
title='Grafana service found',
text=(
f'''
The grafana service seems to have already been created. Here are some
details found:
Display name: {service_details['parameters'].get('DisplayName')}
Status: {service_details['status']}
Binary: {service_details['install']}
App parameters: {service_details['parameters'].get('AppParameters')}
App directory: {service_details['parameters'].get('AppDirectory')}
Do you want to skip installing grafana and its service?
''' ),
buttons=[
('Skip', 1),
('Install', 2),
('Quit', False)
]
).run()
if not result:
return result
if result == 1:
return True
# User wants to proceed, make sure the grafana service is stopped first
subprocess.run([
str(nssm_binary), 'stop', grafana_service_name])
# Check if grafana is already installed
grafana_path = base_directory.joinpath('bin', 'grafana')
grafana_bin_path = grafana_path.joinpath('bin')
grafana_cli_binary_file = grafana_bin_path.joinpath('grafana-cli.exe')
grafana_server_binary_file = grafana_bin_path.joinpath('grafana-server.exe')
grafana_found = False
grafana_version = UNKNOWN_VALUE
if grafana_cli_binary_file.is_file():
try:
process_result = subprocess.run([
str(grafana_cli_binary_file), '--version'
], capture_output=True, text=True)
grafana_found = True
process_output = process_result.stdout
result = re.search(r'version (?P<version>[^ ]+)', process_output)
if result:
grafana_version = result.group('version').strip()
except FileNotFoundError:
pass
install_grafana_binary = True
if grafana_found:
result = button_dialog(
title='Grafana binary distribution found',
text=(
f'''
The grafana binary distribution seems to have already been installed.
Here are some details found:
Version: {grafana_version}
Location: {grafana_path}
Do you want to skip installing the grafana binary distribution?
''' ),
buttons=[
('Skip', 1),
('Install', 2),
('Quit', False)
]
).run()
if not result:
return result
install_grafana_binary = (result == 2)
if install_grafana_binary:
# Getting latest Grafana release
retry_index = 0
retry_count = 5
retry_delay = 30
base_timeout = 10.0
timeout_retry_increment = 5.0
response = None
log.info('Getting Grafana download packages...')
while (
response is None or
response.status_code != 200
) and retry_index < retry_count:
try:
timeout_delay = base_timeout + (timeout_retry_increment * retry_index)
response = httpx.get(GRAFANA_DOWNLOAD_URL, params=GRAFANA_WINDOWS_PARAM,
timeout=timeout_delay, follow_redirects=True)
except httpx.RequestError as exception:
log.error(f'Cannot connect to Grafana download page. Exception {exception}.')
retry_index = retry_index + 1
if retry_index < retry_count:
log.info(f'We will retry in {retry_delay} seconds.')
time.sleep(retry_delay)
continue
if response.status_code != 200:
log.error(f'Grafana download page returned error code. '
f'Status code {response.status_code}')
retry_index = retry_index + 1
if retry_index < retry_count:
log.info(f'We will retry in {retry_delay} seconds.')
time.sleep(retry_delay)
continue
if response is None or response.status_code != 200:
log.error(f'We could not get the Grafana download packages from the download page '
f'after a few retries. We cannot continue.')
return False
response_text = response.text
soup = BeautifulSoup(response_text, "html.parser")
results = soup.find_all('div', class_='download-package')
archive_sha256 = None
archive_url = None
for result in results:
anchors = result.find_all('a')
for anchor in anchors:
href = anchor.attrs.get('href', None)
if href and href.endswith('windows-amd64.zip'):
archive_url = href
if archive_url is not None:
sha_spans = result.find_all('span', class_='download-package__sha', limit=1)
if sha_spans and len(sha_spans) > 0:
sha_text = sha_spans[0].text
match = re.search(r'SHA256:\s*(?P<sha256>\S+)', sha_text)
if match:
archive_sha256 = match.group('sha256').lower()
break
if archive_url is None:
log.error('No grafana binary distribution found on grafana download page')
return False
# Downloading latest Grafana binary distribution archive
download_path = base_directory.joinpath('downloads')
download_path.mkdir(parents=True, exist_ok=True)
url_file_name = urlparse(archive_url).path.split('/')[-1]
zip_url = archive_url
grafana_archive_path = download_path.joinpath(url_file_name)
grafana_archive_hash = hashlib.sha256()
if grafana_archive_path.is_file():
grafana_archive_path.unlink()
try:
with open(grafana_archive_path, 'wb') as binary_file:
log.info(f'Downloading grafana archive {url_file_name}...')
with httpx.stream('GET', zip_url, follow_redirects=True) as http_stream:
if http_stream.status_code != 200:
log.error(f'Cannot download grafana archive {zip_url}.\n'
f'Unexpected status code {http_stream.status_code}')
return False
for data in http_stream.iter_bytes():
binary_file.write(data)
grafana_archive_hash.update(data)
except httpx.RequestError as exception:
log.error(f'Exception while downloading grafana archive. Exception {exception}')
return False
# Verify checksum
if archive_sha256 is not None:
log.info('Verifying grafana archive checksum...')
grafana_archive_hexdigest = grafana_archive_hash.hexdigest().lower()
if grafana_archive_hexdigest != archive_sha256:
log.error(f'Grafana archive checksum does not match. Expected {archive_sha256} '
f'but we got {grafana_archive_hexdigest}. We will stop here to protect you.')
return False
# Unzip grafana archive
archive_members = None
log.info(f'Extracting grafana archive {url_file_name}...')
with ZipFile(grafana_archive_path, 'r') as zip_file:
archive_members = zip_file.namelist()
zip_file.extractall(download_path)
# Remove download leftovers
grafana_archive_path.unlink()
if archive_members is None or len(archive_members) == 0:
log.error('No files found in grafana archive. We cannot continue.')
return False
# Move all those extracted files into their final destination
if grafana_path.is_dir():
shutil.rmtree(grafana_path)
grafana_path.mkdir(parents=True, exist_ok=True)
archive_extracted_dir = download_path.joinpath(Path(archive_members[0]).parts[0])
with os.scandir(archive_extracted_dir) as it:
for diritem in it:
shutil.move(diritem.path, grafana_path)
# Make sure grafana was installed properly
grafana_found = False
grafana_version = UNKNOWN_VALUE
if grafana_cli_binary_file.is_file():
try:
process_result = subprocess.run([
str(grafana_cli_binary_file), '--version'
], capture_output=True, text=True)
grafana_found = True
process_output = process_result.stdout
result = re.search(r'version (?P<version>[^ ]+)', process_output)
if result:
grafana_version = result.group('version').strip()
except FileNotFoundError:
pass
if not grafana_found:
log.error(f'We could not find the grafana binary distribution from the installed '
f'archive in {grafana_path}. We cannot continue.')
return False
else:
| |
import time
from pathlib import Path
from typing import Tuple, Sequence
from collections import Counter
import numpy as np
import pandas as pd
from torch.utils import data
from tqdm import tqdm
from sandstone.datasets.factory import RegisterDataset
from sandstone.utils.generic import log, md5
import warnings
warnings.simplefilter("ignore")
class MIMIC_IV_Abstract_Dataset(data.Dataset):
"""Abstract class for different MIMIC-IV tasks.
Handles data loading, caching, splitting, and various generic preprocessing steps.
"""
def __init__(self, args, split_group):
super(MIMIC_IV_Abstract_Dataset, self).__init__()
self.args = args
self.split_group = split_group
cache_static_filename = get_cache_filename('static', args=args)
cache_hourly_filename = get_cache_filename('hourly', args=args)
print(f"Loading item mapping ({args.item_map_path})")
item_mapping = pd.read_csv(args.item_map_path, low_memory=False)
if Path(args.cache_dir, cache_static_filename).is_file() and Path(args.cache_dir, cache_hourly_filename).is_file():
print("Loading cached static_df and aggregated_df:", cache_static_filename, cache_hourly_filename)
static_df = pd.read_parquet(Path(args.cache_dir, cache_static_filename))
aggregated_df = pd.read_parquet(Path(args.cache_dir, cache_hourly_filename))
else:
# compute which csvs are needed
task_csv_subset = set(self.task_specific_features.keys())
features_csv_subset = set(item_mapping.origin.loc[item_mapping.origin != 'static'].dropna())
# by default, patients, chartevents, admissions and icustays are loaded
self.csv_subset = set(('patients', 'chartevents', 'admissions', 'icustays')).union(task_csv_subset).union(features_csv_subset)
raw_dataframes = load_data(args.dataset_path, subset=self.csv_subset, nrows=args.nrows, chunksize=args.chunksize, cache_dir=args.cache_dir)
static_df, aggregated_df = self.create_dataframes(args, item_mapping, **raw_dataframes)
# cache final dataframes
static_df.to_parquet(Path(args.cache_dir, cache_static_filename))
aggregated_df.to_parquet(Path(args.cache_dir, cache_hourly_filename))
print("Generating labels")
self.create_labels(static_df, aggregated_df, task=args.task, threshold=args.los_threshold)
if args.dataset == 'mimic-iv-sepsis':
print(f"Extracting {args.data_hours} hours of data")
aggregated_df = self.extract_timerange(args, aggregated_df, task=args.task)
print("Adding onset hour to static_df")
onset = aggregated_df.groupby('hadm_id')[args.task+'_onset_hour'].mean()
static_df = static_df.merge(onset, how='left', on='hadm_id')
# filter static_df to only include patients in aggregated_df
static_df = static_df[static_df.hadm_id.isin(aggregated_df.hadm_id.unique())]
print("Filter for just feature columns")
static_cols = ['subject_id', 'hadm_id', 'intime', 'y', args.task+'_onset_hour']
cols_to_keep = ['hadm_id', 'hour']
if len(args.features) != 0:
# convert to lower case
args.features = [x.lower() for x in args.features]
if args.group_by_level2:
static_cols.extend(args.features)
cols_to_keep.extend(args.features)
else:
feature_ids = list(item_mapping.loc[item_mapping['LEVEL2'].str.lower().isin(args.features)]['itemid'].map(str))
static_cols.extend(feature_ids)
cols_to_keep.extend(feature_ids)
else:
static_cols.extend(list(item_mapping.itemid.map(str)))
if args.group_by_level2:
cols_to_keep.extend(list(item_mapping.LEVEL2))
else:
cols_to_keep.extend(list(item_mapping.itemid.map(str)))
if args.feature_search is not None:
args.feature_search = args.feature_search.lower()
if args.group_by_level2:
print("Search feature:", args.feature_search)
static_cols.extend(args.feature_search)
cols_to_keep.extend(args.feature_search)
else:
search_ids = list(item_mapping.loc[item_mapping['LEVEL2'].str.lower() == (args.feature_search)]['itemid'].map(str))
print("Search IDs:", search_ids)
cols_to_keep.extend(search_ids)
static_cols.extend(search_ids)
if len(args.feature_remove) != 0:
# convert to lower case
args.feature_remove = [x.lower() for x in args.feature_remove]
if args.group_by_level2:
remove_ids = args.feature_remove
else:
remove_ids = list(item_mapping.loc[item_mapping['LEVEL2'].str.lower().isin(args.feature_remove)]['itemid'].map(str))
for feature in remove_ids:
if feature in cols_to_keep:
print("Removed feature:", feature)
cols_to_keep.remove(feature)
if feature in static_cols:
static_cols.remove(feature)
original_cols = [c for c in cols_to_keep if c in aggregated_df.columns]
if args.impute_method == 'simple':
exist_cols = [c+'_exist' for c in original_cols if c not in ['hadm_id', 'hour']]
time_cols = [c+'_time_since' for c in original_cols if c not in ['hadm_id', 'hour']]
cols_to_keep.extend(exist_cols)
cols_to_keep.extend(time_cols)
static_df = static_df.loc[:, static_df.columns.isin(static_cols)]
aggregated_df = aggregated_df.loc[:,aggregated_df.columns.isin(cols_to_keep)]
if args.dataset == 'mimic-iv-sepsis':
print(f"Re-indexing and zero filling")
aggregated_df = reindex_timeseries(aggregated_df)
aggregated_df.fillna({x:0 for x in original_cols}, inplace=True)
if args.impute_method == 'simple':
aggregated_df.fillna({x:0 for x in exist_cols}, inplace=True)
aggregated_df.fillna({x:100 for x in time_cols}, inplace=True)
print("Static df size:", static_df.shape)
print("Static df columns:", static_df.columns)
print("Aggregated df size:", aggregated_df.shape)
print("Aggregated df columns:", aggregated_df.columns)
print("Static df stats:")
print(static_df.describe())
print("Aggregated df stats:")
print(aggregated_df.describe())
print("Binarize/One-hot encode categorical feature columns")
if 'gender' in static_df.columns:
static_df['gender'] = (static_df.gender == 'M').astype(bool)
for col in ['marital_status', 'ethnicity']:
if col in static_df.columns:
dummies = pd.get_dummies(static_df[col]).add_prefix(col+"_").astype(bool)
static_df.drop(columns=col, inplace=True)
static_df[dummies.columns] = dummies
self.assign_splits(static_df)
if args.normalize is not None:
print("Normalizing values to zero-mean and unit variance.")
if args.group_by_level2:
normalize_feats = set(args.normalize)
else:
normalize_feats = set(item_mapping.loc[item_mapping['LEVEL2'].isin(args.normalize)].itemid.unique())
static_norm_cols = list(normalize_feats.intersection(static_df.columns))
hourly_norm_cols = list(normalize_feats.intersection(aggregated_df.columns))
unused_norm_cols = normalize_feats.difference(set(static_norm_cols + hourly_norm_cols))
if len(unused_norm_cols) != 0:
print("WARNING: Couldn't find specified columns to normalize by: {}!".format(unused_norm_cols))
static_train = static_df.loc[static_df.split_group == 'train']
static_normalize_df = static_train[static_norm_cols]
hourly_normalize_df = aggregated_df.loc[aggregated_df.hadm_id.isin(static_train.hadm_id.unique()), hourly_norm_cols]
# compute stats over train data
static_mean, static_std = static_normalize_df.mean(), static_normalize_df.std()
hourly_mean, hourly_std = hourly_normalize_df.mean(), hourly_normalize_df.std()
# prevent division by zero
static_std.loc[static_std == 0] = 1
hourly_std.loc[hourly_std == 0] = 1
# apply to whole dataset
static_df[static_norm_cols] = (static_df[static_norm_cols] - static_mean) / static_std
aggregated_df[hourly_norm_cols] = (aggregated_df[hourly_norm_cols] - hourly_mean) / hourly_std
if args.flatten_timeseries:
flattened_df = flatten_timeseries(aggregated_df)
static_df = static_df.merge(flattened_df, on='hadm_id')
elif args.timeseries_moments:
moments_df = compute_timeseries_moments(aggregated_df, args.timeseries_moments)
static_df = static_df.merge(moments_df, on='hadm_id')
static_df.columns = static_df.columns.map(str)
self.static_features = [col for col in static_df.columns if col not in ['y', 'subject_id', 'hadm_id', 'intime', 'split_group', args.task+'_onset_hour']]
self.timeseries_features = [col for col in aggregated_df.columns if col not in ['hadm_id', 'charttime', 'hour']]
static_df = static_df.loc[static_df['split_group'] == split_group]
if not args.static_only:
# if non-flattened hourly data is used, also filter aggregated_df
aggregated_df = aggregated_df.loc[aggregated_df['hadm_id'].isin(static_df['hadm_id'].unique())]
static_df.drop(columns='split_group', inplace=True)
if args.static_only:
self.dataset = self.create_dataset(static_df)
else:
self.dataset = self.create_dataset(static_df, aggregated_df)
# Class weighting
label_dist = [d['y'] for d in self.dataset]
label_counts = Counter(label_dist)
weight_per_label = 1./ len(label_counts)
label_weights = {
label: weight_per_label/count for label, count in label_counts.items()
}
self.weights = [ label_weights[d['y']] for d in self.dataset]
log(self.get_summary_statement(self.args.task, split_group, self.args.current_test_years, self.args.onset_bucket, label_counts), args)
@property
def task(self):
raise NotImplementedError("Abstract method needs to be overridden!")
@property
def task_specific_features(self, task=None):
"""Defines some itemids/gsns/icd_codes that are needed for the task.
Returns:
a dictionary mapping origin dataset -> list of itemids.
"""
return {}
def create_dataframes(self, args, item_mapping, **raw_dataframes):
"""Preprocesses raw dataframes into static_df and aggregated_df.
Returns:
- static_df
- must include columns 'hadm_id', and 'y' for the label.
- any additional columns will be used as input features for prediction.
- timeseries_df
"""
raise NotImplementedError("Abstract method needs to be overridden!")
def assign_splits(self, meta):
if self.args.timesplit:
# assign train_years as a list of years [2008, 2010] inclusive for instance.
train_start, train_end = map(int, self.args.train_years.split('-'))
meta['split_group'] = None
meta.loc[(meta['intime'].dt.year>=train_start) & (meta['intime'].dt.year<=train_end), 'split_group'] = 'train'
# dev will be a subset of train, of proportion split_probs[dev]
dev_prob = self.args.split_probs[1]
train_rows = meta[meta.split_group=='train'].shape[0]
dev_rows = int(dev_prob*train_rows)
meta.loc[meta[meta['split_group']=='train'].head(dev_rows).index, 'split_group'] = 'dev'
# if testing on training years, then final split is test set
if self.args.train_years == self.args.current_test_years:
test_prob = self.args.split_probs[2]
test_rows = int(test_prob*train_rows)
mask = meta.index.isin(meta[meta['split_group']=='train'].tail(test_rows).index)
else:
test_start, test_end = map(int, self.args.current_test_years.split('-'))
mask = meta['intime'].dt.year>=test_start
mask &= meta['intime'].dt.year<=test_end
# adding to the mask onset bucket
if self.args.onset_bucket is not None:
hour_start, hour_end = map(int, self.args.onset_bucket.split('-'))
mask &= meta[self.args.task+'_onset_hour'] >= hour_start
mask &= meta[self.args.task+'_onset_hour'] <= hour_end
meta.loc[mask, 'split_group'] = 'test'
else:
subject_ids = list(sorted(meta['subject_id'].unique()))
start_idx = 0
meta['split_group'] = None
for split, prob in zip(['train', 'dev', 'test'], self.args.split_probs):
end_idx = start_idx + int(len(subject_ids) * prob)
start = subject_ids[start_idx]
end = subject_ids[end_idx-1]
meta.loc[(meta['subject_id'] >= start) & (meta['subject_id'] <= end), 'split_group'] = split
start_idx = end_idx
if meta.loc[meta['subject_id']==subject_ids[end_idx-1]]['split_group'].isnull().any():
meta.loc[meta['subject_id']==subject_ids[end_idx-1], 'split_group'] = split
return meta
def create_dataset(self, static_df, aggregated_df=None):
"""Turns DataFrames into a list of samples, which are dicts containing 'pid', 'x', 'y', and
possibly 'x_timeseries' keys
"""
dataset = []
pids = static_df['subject_id'].values.astype(np.int32)
hadm_ids = static_df['hadm_id'].values.astype(np.int32)
ys = static_df['y'].values.astype(np.float32)
xs = static_df[self.static_features].values.astype(np.float32)
for i in tqdm(range(len(pids)), desc='Creating dataset', total=len(pids)):
patient_dict = {}
patient_dict['pid'] = pids[i]
patient_dict['y'] = ys[i]
patient_dict['x'] = xs[i]
if aggregated_df is not None:
patient_rows = aggregated_df.loc[aggregated_df.hadm_id == hadm_ids[i]]
assert len(patient_rows) > 0, "Found patient with no timeseries data!"
x_timeseries = patient_rows[self.timeseries_features].values.astype(np.float32)
patient_dict['x_timeseries'] = x_timeseries
dataset.append(patient_dict)
return dataset
def create_labels(self, static_df, aggregated_df, task, threshold):
"""Generates per-patient labels for the given task
Returns:
- static_df with an extra 'y' column
"""
raise NotImplementedError("Abstract method needs to be overridden!")
def extract_timerange(self, args, aggregated_df, task):
"""Extracts a fixed no. of hours of data to predict from
"""
raise NotImplementedError("Abstract method needs to be overridden!")
def get_summary_statement(self, task, split_group, years, hours, class_balance):
return "Created MIMIC-IV {} {} dataset for years {} and onset hours {} with the following class balance:\n{}".format(task, split_group, years, hours, class_balance)
def set_args(self, args):
args.num_classes = 2
args.input_dim = len(self.static_features)
if not args.flatten_timeseries:
args.timeseries_dim = len(self.timeseries_features)
args.timeseries_len = args.data_hours
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
@RegisterDataset("mimic-iv-sepsis")
class MIMIC_IV_Sepsis_Dataset(MIMIC_IV_Abstract_Dataset):
@property
def task(self):
return "Sepsis-3"
@property
def task_specific_features(self):
return {
'inputevents': [221662, 221653, 221289, 221906], # dopamine, dobutamine, epinephrine, norepinephrine
'outputevents': [226559, 226560, 226561, 226584, 226563, 226564, 226565, 226567, 226557,
226558, 227488, 227489], # for urine output
'labevents': [51265, 50885, 50912, 50821, 51301], # platelets, bilirubin, creatinine, PO2, WBC-count
'chartevents': [223835, 220739, 223900, 223901, 223849, 229314, # FiO2, GCS-Eye, GCS-Verbal, GCS-Motor, vent_mode, vent_mode (Hamilton)
223762, 223761, 220045, 220210, 224690], # temp_C, temp_F, heart rate, resp rate, resp rate (total)
'microbiologyevents': None, # all microbio samples (no id filtering happens on microbioevents, so None can be used here)
'prescriptions': None,
}
def create_dataframes(self, args, item_mapping, patients, chartevents, admissions, icustays,
inputevents, labevents, microbiologyevents=None, prescriptions=None, outputevents=None,
diagnoses_icd=None, procedureevents=None, **extra_dfs):
# filter | |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based transformer block layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import reuse_transformer
@parameterized.named_parameters(
('base', reuse_transformer.ReuseTransformer))
class ReuseTransformerLayerTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(ReuseTransformerLayerTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_layer_creation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor, _ = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor, _ = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_invocation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
def test_layer_invocation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_layer_output_range(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor, _ = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor, _ = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.25)
def test_layer_output_range_with_relative_pe(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu',
use_relative_pe=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor, _ = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
use_relative_pe=True)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor, _ = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_without_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048,
inner_activation='relu', norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
output_tensor, _ = test_layer(input_data)
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
norm_first=True)
_ = new_layer(input_data)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor, _ = new_layer(input_data)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_with_pre_norm(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048,
inner_activation='relu', norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor, _ = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
norm_first=True)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor, _ = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_invocation_with_float16_dtype(self, transformer_cls):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (10 * np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_transform_with_initializer(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output, _ = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
def test_dynamic_layer_sequence(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor, _ = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
input_length = 17
input_data = np.ones((1, input_length, width))
output_data = model.predict(input_data)
self.assertAllEqual([1, input_length, width], output_data.shape)
class ReuseTransformerArgumentTest(tf.test.TestCase, parameterized.TestCase):
def test_use_bias_norm_first(self):
num_attention_heads = 2
hidden_size = 16
encoder_block = reuse_transformer.ReuseTransformer(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
# Forward path.
dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32)
inputs = [dummy_tensor, dummy_mask]
output, _ = encoder_block(inputs)
self.assertEqual(output.shape, (2, 4, hidden_size))
def test_get_config(self):
num_attention_heads = 2
encoder_block = reuse_transformer.ReuseTransformer(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
encoder_block_config = encoder_block.get_config()
new_encoder_block = reuse_transformer.ReuseTransformer.from_config(
encoder_block_config)
self.assertEqual(encoder_block_config, new_encoder_block.get_config())
@parameterized.parameters({'attention_axes': None}, {'attention_axes': [1]},
{'attention_axes': [2]}, {'attention_axes': [1, 2]})
def test_several_attention_axes(self, attention_axes):
test_layer = reuse_transformer.ReuseTransformer(
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
num_attention_heads=10,
attention_axes=attention_axes)
num_rows = 21
num_cols = 13
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(num_rows, num_cols, width))
output_tensor, _ = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
@parameterized.named_parameters(
('plain', False, False, False),
('plain_returnscore', False, True, False),
('plain_with_relative_pe', False, False, True),
('reuse_all', True, False, False),
('reuse_all_returnscore', True, True, False),
('reuse_all_with_relative_pe', True, False, True),
('reuse_5', 5, False, False),
('reuse_5_returnscore', 5, True, False),
('reuse_5_with_relative_pe', 5, False, True),)
def test_layer_invocation_with_mask(self, reuse_attention,
return_attention_scores, use_relative_pe):
test_layer = reuse_transformer.ReuseTransformer(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
reuse_attention=reuse_attention,
use_relative_pe=use_relative_pe)
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
return_scores_tensor = tf.keras.Input(shape=(1,))
reuse_attention_scores = tf.keras.Input(
shape=(10, sequence_length, sequence_length))
output_tensor, _ = test_layer(
[data_tensor, mask_tensor, reuse_attention_scores])
# Create a model from the test layer.
model = tf.keras.Model(
([data_tensor, mask_tensor, reuse_attention_scores],
return_scores_tensor), output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = | |
wr_config and format_version >= 2:
print(f'{Color.RED}ERROR:{Color.RESET} Extrapolation of operations to "Result" is not supported. layer_id: {layer_id}')
sys.exit(-1)
else:
print('The {} layer is not yet implemented.'.format(layer.attrib['type']))
sys.exit(-1)
# layerids_of_the_terminating_output
if layerids_of_the_terminating_output is not None and layer_id in layerids_of_the_terminating_output:
if layer.attrib['type'] != 'Split' and layer.attrib['type'] != 'VariadicSplit' and layer.attrib['type'] != 'TopK' and layer.attrib['type'] != 'NonMaxSuppression':
tf_outputs.append(tf_layers_dict[layer_id])
else:
for layer_id_port in layer_id_port_dict[layer_id]['layer_id:port']:
tf_outputs.append(tf_layers_dict[layer_id_port])
# Layer structure print
if layer.attrib['type'] != 'Parameter' and layer.attrib['type'] != 'Const':
try:
layer_structure = {
'layer_type': layer.attrib['type'],
'layer_id': layer_id,
}
for edge_index in range(len(tf_edges[layer_id])):
# if type(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, edge_index)]) != np.ndarray:
if tf.keras.backend.is_keras_tensor(tf_layers_dict[layer_id]):
layer_structure[f'input_layer{edge_index}'] = f'layer_id={tf_edges[layer_id][edge_index]}: {tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, edge_index)]}'
else:
layer_structure[f'input_layer{edge_index}_shape'] = f'layer_id={tf_edges[layer_id][edge_index]}: Const(ndarray).shape {tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, edge_index)].shape}'
layer_structure[f'input_layer{edge_index}_value'] = f'layer_id={tf_edges[layer_id][edge_index]}: {tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, edge_index)]}'
except:
pass
if layer.attrib['type'] != 'Split' and layer.attrib['type'] != 'VariadicSplit' and layer.attrib['type'] != 'TopK' and layer.attrib['type'] != 'NonMaxSuppression':
if tf.keras.backend.is_keras_tensor(tf_layers_dict[layer_id]):
layer_structure['tf_layers_dict'] = tf_layers_dict[layer_id]
else:
layer_structure['tf_layers_dict_shape'] = tf_layers_dict[layer_id].shape
layer_structure['tf_layers_dict'] = tf_layers_dict[layer_id]
if layer.attrib['type'] == 'Concat' or layer.attrib['type'] == 'SoftMax' or layer.attrib['type'] == 'Squeeze' or \
layer.attrib['type'] == 'ReduceMean' or layer.attrib['type'] == 'ReduceMax' or layer.attrib['type'] == 'ReduceMin' or \
layer.attrib['type'] == 'ReduceSum' or layer.attrib['type'] == 'ReduceProd' or layer.attrib['type'] == 'ReduceL2':
layer_structure['axis'] = axis
if layer.attrib['type'] == 'Unsqueeze':
layer_structure['indices'] = indices
elif layer.attrib['type'] == 'Split' or layer.attrib['type'] == 'VariadicSplit' or layer.attrib['type'] == 'NonMaxSuppression':
# Split, VariadicSplit, NonMaxSuppression
for edge_index, tmp_layer_id_port in enumerate(layer_id_port_dict[layer_id]['layer_id:port']):
try:
if type(tf_layers_dict[get_tf_edges_from(tf_edges, tmp_layer_id_port, edge_index)]) != np.ndarray:
layer_structure[f'input_layer{edge_index}'] = f'layer_id={tf_edges[tmp_layer_id_port][edge_index]}: {tf_layers_dict[get_tf_edges_from(tf_edges, tmp_layer_id_port, edge_index)]}'
else:
layer_structure[f'input_layer{edge_index}'] = f'layer_id={tf_edges[tmp_layer_id_port][edge_index]}: Const(ndarray).shape {tf_layers_dict[get_tf_edges_from(tf_edges, tmp_layer_id_port, edge_index)].shape}'
except:
layer_structure[f'input_layer{edge_index}'] = f'layer_id=Unkown: Unkown'
for idx, (output, layer_id_port) in enumerate(zip(outputs, layer_id_port_dict[layer_id]['layer_id:port'])):
layer_structure[f'tf_layers_dict{idx}'] = f'layer_id_port: {layer_id_port} {output}'
elif layer.attrib['type'] == 'TopK':
# TopK
layer_structure['tf_layers_dict0'] = tf_layers_dict[layer_id_values]
layer_structure['tf_layers_dict1'] = tf_layers_dict[layer_id_indices]
layer_structure_print(layer_structure)
except Exception as e:
print(f'{Color.RED}ERROR:{Color.RESET}', e)
print(f'{Color.RED}ERROR:{Color.RESET} model_path : {model_path}.xml')
print(f'{Color.RED}ERROR:{Color.RESET} weights_path: {model_path}.bin')
print(f'{Color.RED}ERROR:{Color.RESET} layer_id :', layer_id)
try:
for edge_index in range(len(tf_edges[layer_id])):
if type(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, edge_index)]) != np.ndarray:
print(f'{Color.RED}ERROR:{Color.RESET} input_layer{edge_index} layer_id={tf_edges[layer_id][edge_index]}:', tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, edge_index)])
else:
print(f'{Color.RED}ERROR:{Color.RESET} input_layer{edge_index} layer_id={tf_edges[layer_id][edge_index]}: Const(ndarray).shape ', tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, edge_index)].shape)
pprint.pprint(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, edge_index)])
except:
pass
print(f'{Color.RED}ERROR:{Color.RESET} The trace log is below.')
import traceback
traceback.print_exc()
sys.exit(-1)
# output If the layer type is ndarray, output it to a file as a numpy binary file and remove it from the model output
np_outputs = [o for o in tf_outputs if isinstance(o, EagerTensor)]
for idx, n in enumerate(np_outputs):
np.save(f'{model_output_path}/{idx}', n)
print(f'{Color.YELLOW}WARNING:{Color.RESET} The numpy array (ndarray) cannot be specified as an output layer. Therefore, the tool outputs a sequentially numbered .npy binary file. .npy_file_path: {model_output_path}/{idx}.npy')
tf_outputs = [o for o in tf_outputs if not isinstance(o, EagerTensor)]
model = Model(inputs=tf_inputs, outputs=tf_outputs)
print(f'{Color.GREEN}TensorFlow/Keras model building process complete!{Color.RESET}')
# saved_model output
flag_for_output_switching_from_saved_model_to_pb_due_to_error = False
if output_saved_model:
try:
print(f'{Color.REVERCE}saved_model output started{Color.RESET}', '=' * 58)
tf.saved_model.save(model, model_output_path)
# tf.keras.models.save_model(model, model_output_path, include_optimizer=False, save_format='tf', save_traces=False)
# model.save(model_output_path, include_optimizer=False, save_format='tf', save_traces=False)
print(f'{Color.GREEN}saved_model output complete!{Color.RESET}')
except TypeError as e:
print(f'{Color.GREEN}Switch to the output of an optimized protocol buffer file (.pb).{Color.RESET}')
output_pb = True
output_h5 = False
flag_for_output_switching_from_saved_model_to_pb_due_to_error = True
except Exception as e:
print(f'{Color.RED}ERROR:{Color.RESET}', e)
import traceback
traceback.print_exc()
# .h5 output
if output_h5:
try:
print(f'{Color.REVERCE}.h5 output started{Color.RESET}', '=' * 66)
model.save(f'{model_output_path}/model_float32.h5', include_optimizer=False, save_format='h5')
print(f'{Color.GREEN}.h5 output complete!{Color.RESET} - {model_output_path}/model_float32.h5')
except Exception as e:
print(f'{Color.RED}ERROR:{Color.RESET}', e)
import traceback
traceback.print_exc()
# weight and json output
if output_weight_and_json:
try:
print(f'{Color.REVERCE}weight and json output started{Color.RESET}', '=' * 54)
open(f'{model_output_path}/model_float32.json', 'w').write(model.to_json())
model.save_weights(f'{model_output_path}/model_float32_weights.h5')
print(f'{Color.GREEN}weight and json output complete!{Color.RESET} - {model_output_path}/model_float32_weights.h5')
except Exception as e:
print(f'{Color.RED}ERROR:{Color.RESET}', e)
import traceback
traceback.print_exc()
# .pb output
if output_pb:
try:
print(f'{Color.REVERCE}.pb output started{Color.RESET}', '=' * 66)
full_model = tf.function(lambda inputs: model(inputs))
full_model = full_model.get_concrete_function(inputs=[tf.TensorSpec(model_input.shape, model_input.dtype) for model_input in model.inputs])
frozen_func = convert_variables_to_constants_v2(full_model, lower_control_flow=False)
frozen_func.graph.as_graph_def()
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir=".",
name=f'{model_output_path}/model_float32.pb',
as_text=False)
print(f'{Color.GREEN}.pb output complete!{Color.RESET} - {model_output_path}/model_float32.pb')
if flag_for_output_switching_from_saved_model_to_pb_due_to_error:
import shutil
saved_model_tmp = 'saved_model_tmp'
shutil.rmtree(saved_model_tmp, ignore_errors=True)
os.makedirs(saved_model_tmp, exist_ok=True)
inputs_tmp = []
outputs_tmp = []
for idx, _ in enumerate(model.inputs):
if idx == 0:
inputs_tmp.append(f'inputs:0')
else:
inputs_tmp.append(f'inputs_{idx}:0')
for idx, _ in enumerate(model.outputs):
if idx == 0:
outputs_tmp.append(f'Identity:0')
else:
outputs_tmp.append(f'Identity_{idx}:0')
def get_graph_def_from_file(graph_filepath):
tf.compat.v1.reset_default_graph()
tf.compat.v1.Graph().as_default()
with tf.compat.v1.gfile.GFile(graph_filepath, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
graph_def = get_graph_def_from_file(f'{model_output_path}/model_float32.pb')
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
tf.compat.v1.import_graph_def(graph_def, name='')
tf.compat.v1.saved_model.simple_save(
sess,
saved_model_tmp,
inputs= {t.rstrip(":0"):sess.graph.get_tensor_by_name(t) for t in inputs_tmp},
outputs={t.rstrip(":0"):sess.graph.get_tensor_by_name(t) for t in outputs_tmp}
)
from distutils.dir_util import copy_tree
copy_tree(saved_model_tmp, model_output_path)
shutil.rmtree(saved_model_tmp, ignore_errors=True)
print(f'{Color.GREEN}Optimized graph converted to SavedModel!{Color.RESET} - {model_output_path}')
except Exception as e:
print(f'{Color.RED}ERROR:{Color.RESET}', e)
import traceback
traceback.print_exc()
# No Quantization - Input/Output=float32
if output_no_quant_float32_tflite:
try:
print(f'{Color.REVERCE}tflite Float32 convertion started{Color.RESET}', '=' * 51)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
with open(f'{model_output_path}/model_float32.tflite', 'wb') as w:
w.write(tflite_model)
print(f'{Color.GREEN}tflite Float32 convertion complete!{Color.RESET} - {model_output_path}/model_float32.tflite')
except Exception as e:
print(f'{Color.RED}ERROR:{Color.RESET}', e)
import traceback
traceback.print_exc()
# Dynamic Range Quantization - Input/Output=float32
if output_dynamic_range_quant_tflite:
try:
print(f'{Color.REVERCE}Dynamic Range Quantization started{Color.RESET}', '=' * 50)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
with open(f'{model_output_path}/model_dynamic_range_quant.tflite', 'wb') as w:
w.write(tflite_model)
print(f'{Color.GREEN}Dynamic Range Quantization complete!{Color.RESET} - {model_output_path}/model_dynamic_range_quant.tflite')
except Exception as e:
print(f'{Color.RED}ERROR:{Color.RESET}', e)
import traceback
traceback.print_exc()
# Weight Quantization - Input/Output=float32
if output_weight_quant_tflite:
try:
print(f'{Color.REVERCE}Weight Quantization started{Color.RESET}', '=' * 57)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
with open(f'{model_output_path}/model_weight_quant.tflite', 'wb') as w:
w.write(tflite_model)
print(f'{Color.GREEN}Weight Quantization complete!{Color.RESET} - {model_output_path}/model_weight_quant.tflite')
except Exception as e:
print(f'{Color.RED}ERROR:{Color.RESET}', e)
import traceback
traceback.print_exc()
# Float16 Quantization - Input/Output=float32
if output_float16_quant_tflite:
try:
print(f'{Color.REVERCE}Float16 Quantization started{Color.RESET}', '=' * 56)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
tflite_quant_model = converter.convert()
with open(f'{model_output_path}/model_float16_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print(f'{Color.GREEN}Float16 Quantization complete!{Color.RESET} - {model_output_path}/model_float16_quant.tflite')
except Exception as e:
print(f'{Color.RED}ERROR:{Color.RESET}', e)
import traceback
traceback.print_exc()
# Downloading datasets for calibration
raw_test_data = None
input_shapes = None
if output_integer_quant_tflite or output_full_integer_quant_tflite:
if calib_ds_type == 'tfds':
print(f'{Color.REVERCE}TFDS download started{Color.RESET}', '=' * 63)
raw_test_data = tfds.load(name=ds_name_for_tfds_for_calibration,
with_info=False,
split=split_name_for_tfds_for_calibration,
data_dir=download_dest_folder_path_for_the_calib_tfds,
download=tfds_download_flg)
print(f'{Color.GREEN}TFDS download complete!{Color.RESET}')
elif calib_ds_type == 'numpy':
print(f'{Color.REVERCE}numpy dataset load started{Color.RESET}', '=' * 58)
try:
if load_dest_file_path_for_the_calib_npy == npy_load_default_path and not os.path.exists(npy_load_default_path):
os.makedirs(os.path.dirname(npy_load_default_path), exist_ok=True)
import gdown
import subprocess
try:
result = subprocess.check_output(
[
'gdown',
'--id', '1z-K0KZCK3JBH9hXFuBTmIM4jaMPOubGN',
'-O', load_dest_file_path_for_the_calib_npy
],
stderr=subprocess.PIPE
).decode('utf-8')
except:
result = subprocess.check_output(
[
'sudo', 'gdown',
'--id', '1z-K0KZCK3JBH9hXFuBTmIM4jaMPOubGN',
'-O', load_dest_file_path_for_the_calib_npy
],
stderr=subprocess.PIPE
).decode('utf-8')
raw_test_data = np.load(load_dest_file_path_for_the_calib_npy)
print(f'{Color.GREEN}numpy dataset load complete!{Color.RESET}')
except subprocess.CalledProcessError as e:
print(f'{Color.RED}ERROR:{Color.RESET}', e.stderr.decode('utf-8'))
import traceback
traceback.print_exc()
else:
pass
input_shapes = [model_input.shape for model_input in model.inputs]
def representative_dataset_gen():
if calib_ds_type == 'tfds':
for data in raw_test_data.take(10):
image = data['image'].numpy()
images = []
for shape in input_shapes:
data = tf.image.resize(image, (shape[1], shape[2]))
tmp_image = eval(string_formulas_for_normalization) # Default: (data - [127.5,127.5,127.5]) / [127.5,127.5,127.5]
tmp_image = tmp_image[np.newaxis,:,:,:]
images.append(tmp_image)
yield images
elif calib_ds_type == 'numpy':
for idx in range(raw_test_data.shape[0]):
image = raw_test_data[idx]
images = []
for shape in input_shapes:
if len(shape) == 4 and shape[3] == 3:
data = tf.image.resize(image, (shape[1], shape[2]))
data = data[np.newaxis,:,:,:]
elif len(shape) == 4 and shape[3] == 1:
data = tf.image.resize(image, (shape[1], shape[2]))
data = 0.299 * data[:, :, 0] + 0.587 * data[:, :, 1] + 0.114 * data[:, :, 2]
data = data[np.newaxis,:,:,np.newaxis]
else:
data = np.random.random_sample([i for i in shape]).astype(np.float32) * 255.0
tmp_image = eval(string_formulas_for_normalization) # Default: (data - [127.5,127.5,127.5]) / [127.5,127.5,127.5]
images.append(tmp_image)
yield images
# Integer Quantization
if output_integer_quant_tflite:
try:
print(f'{Color.REVERCE}Integer Quantization started{Color.RESET}', '=' * 56)
converter = tf.lite.TFLiteConverter.from_saved_model(model_output_path)
converter.experimental_new_quantizer = use_experimental_new_quantizer
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.SELECT_TF_OPS]
converter.representative_dataset = representative_dataset_gen
tflite_model = converter.convert()
with open(f'{model_output_path}/model_integer_quant.tflite', 'wb') as w:
w.write(tflite_model)
print(f'{Color.GREEN}Integer Quantization complete!{Color.RESET} - {model_output_path}/model_integer_quant.tflite')
except Exception as e:
print(f'{Color.RED}ERROR:{Color.RESET}', e)
import traceback
traceback.print_exc()
# Full Integer Quantization
if output_full_integer_quant_tflite:
try:
print(f'{Color.REVERCE}Full Integer Quantization started{Color.RESET}', '=' * 51)
converter = tf.lite.TFLiteConverter.from_saved_model(model_output_path)
converter.experimental_new_quantizer = use_experimental_new_quantizer
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.SELECT_TF_OPS]
inf_type = None
if output_integer_quant_type == 'int8':
inf_type = tf.int8
elif output_integer_quant_type == 'uint8':
inf_type = tf.uint8
else:
inf_type = tf.int8
converter.inference_input_type = inf_type
converter.inference_output_type = inf_type
converter.representative_dataset = representative_dataset_gen
tflite_model = converter.convert()
with open(f'{model_output_path}/model_full_integer_quant.tflite', 'wb') as w:
w.write(tflite_model)
print(f'{Color.GREEN}Full Integer Quantization complete!{Color.RESET} - {model_output_path}/model_full_integer_quant.tflite')
except Exception as e:
print(f'{Color.RED}ERROR:{Color.RESET}', e)
import traceback
traceback.print_exc()
# TensorFlow.js convert
if output_tfjs:
import subprocess
try:
print(f'{Color.REVERCE}TensorFlow.js Float32 convertion started{Color.RESET}', '=' * 44)
result = subprocess.check_output(
[
'tensorflowjs_converter',
'--input_format', 'tf_saved_model',
'--output_format', 'tfjs_graph_model',
'--signature_name', 'serving_default',
'--saved_model_tags', 'serve',
model_output_path, f'{model_output_path}/tfjs_model_float32'
],
| |
'Activity No toca BOTOn: Amazon Web Services desde python Confirmed. Thank you!',
'Activity Oily Python: a Reservoir Engineering Perspective Confirmed. Thank you!': 'Activity Oily Python: a Reservoir Engineering Perspective Confirmed. Thank you!',
'Activity OpenERP Confirmed. Thank you!': 'Activity OpenERP Confirmed. Thank you!',
'Activity Ordenando MVC con las ideas de Merleau-Ponty Confirmed. Thank you!': 'Activity Ordenando MVC con las ideas de Merleau-Ponty Confirmed. Thank you!',
'Activity Pensando en APIs Confirmed. Thank you!': 'Activity Pensando en APIs Confirmed. Thank you!',
'Activity PgDay Argentina 2012 Confirmed. Thank you!': 'Activity PgDay Argentina 2012 Confirmed. Thank you!',
'Activity pilas-engine, haciendo juegos en medio de la revolución Confirmed. Thank you!': 'Activity pilas-engine, haciendo juegos en medio de la revolución Confirmed. Thank you!',
"Activity Please don't help me so much Confirmed. Thank you!": "Activity Please don't help me so much Confirmed. Thank you!",
'Activity Ponete las pilas con Django Confirmed. Thank you!': 'Activity Ponete las pilas con Django Confirmed. Thank you!',
'Activity Poniéndole Lógica Peirceana a la Programación Confirmed. Thank you!': 'Activity Poniéndole Lógica Peirceana a la Programación Confirmed. Thank you!',
'Activity Posicionamiento de vértebras mediante Landmarks y Redes Neuronales Confirmed. Thank you!': 'Activity Posicionamiento de vértebras mediante Landmarks y Redes Neuronales Confirmed. Thank you!',
'Activity PostgreSQL when it is not your job Confirmed. Thank you!': 'Activity PostgreSQL when it is not your job Confirmed. Thank you!',
'Activity Procesamiento Inteligente de Imágenes: Posicionamiento de vértebras mediante Landmarks y Redes Neuronales Confirmed. Thank you!': 'Activity Procesamiento Inteligente de Imágenes: Posicionamiento de vértebras mediante Landmarks y Redes Neuronales Confirmed. Thank you!',
'Activity Proposal': 'Propuesta de Actividad',
'Activity Prymatex (sprint) Confirmed. Thank you!': 'Activity Prymatex (sprint) Confirmed. Thank you!',
'Activity Prymatex Confirmed. Thank you!': 'Activity Prymatex Confirmed. Thank you!',
'Activity PyAfipWs Confirmed. Thank you!': 'Activity PyAfipWs Confirmed. Thank you!',
'Activity PyPy Project Confirmed. Thank you!': 'Activity PyPy Project Confirmed. Thank you!',
'Activity Python 2 debe morir Confirmed. Thank you!': 'Activity Python 2 debe morir Confirmed. Thank you!',
"Activity Python 3.3: Trust Me, It's Better than 2.7 Confirmed. Thank you!": "Activity Python 3.3: Trust Me, It's Better than 2.7 Confirmed. Thank you!",
'Activity Python 3: The Next Generation ( is Already Here ) Confirmed. Thank you!': 'Activity Python 3: The Next Generation ( is Already Here ) Confirmed. Thank you!',
'Activity Python @ Microsoft: A Renaissance Confirmed. Thank you!': 'Activity Python @ Microsoft: A Renaissance Confirmed. Thank you!',
'Activity Python Apesta: problemas comunes y soluciones alternativas (Panel) Confirmed. Thank you!': 'Activity Python Apesta: problemas comunes y soluciones alternativas (Panel) Confirmed. Thank you!',
'Activity Python como lenguaje para el procesamiento de imágenes satelites Confirmed. Thank you!': 'Activity Python como lenguaje para el procesamiento de imágenes satelites Confirmed. Thank you!',
'Activity Python core sprint Confirmed. Thank you!': 'Activity Python core sprint Confirmed. Thank you!',
'Activity Python en Educación Confirmed. Thank you!': 'Activity Python en Educación Confirmed. Thank you!',
'Activity Python en la Exploración Minera Confirmed. Thank you!': 'Activity Python en la Exploración Minera Confirmed. Thank you!',
'Activity Python más rápido que C Confirmed. Thank you!': 'Activity Python más rápido que C Confirmed. Thank you!',
'Activity Python para el procesamiento de secuencias genéticas Confirmed. Thank you!': 'Activity Python para el procesamiento de secuencias genéticas Confirmed. Thank you!',
'Activity Python y sus métodos mágicos Confirmed. Thank you!': 'Activity Python y sus métodos mágicos Confirmed. Thank you!',
'Activity RAD2PY: desarrollo ágil, sólido y disciplinado Confirmed. Thank you!': 'Activity RAD2PY: desarrollo ágil, sólido y disciplinado Confirmed. Thank you!',
'Activity rad2py: IDE para el Desarrollo Rápido de Aplicaciones bajo el Proceso de Software Personal Confirmed. Thank you!': 'Activity rad2py: IDE para el Desarrollo Rápido de Aplicaciones bajo el Proceso de Software Personal Confirmed. Thank you!',
'Activity Really good logging -- talk Confirmed. Thank you!': 'Activity Really good logging -- talk Confirmed. Thank you!',
'Activity Recepción - "After Party" Confirmed. Thank you!': 'Activity Recepción - "After Party" Confirmed. Thank you!',
'Activity review': 'Activity review',
'Activity Robotica educativa con python, Proyecto ICARO Confirmed. Thank you!': 'Activity Robotica educativa con python, Proyecto ICARO Confirmed. Thank you!',
'Activity Salida Turística Confirmed. Thank you!': 'Activity Salida Turística Confirmed. Thank you!',
'Activity Sho, un ambiente interactivo para análisis de datos Confirmed. Thank you!': 'Activity Sho, un ambiente interactivo para análisis de datos Confirmed. Thank you!',
'Activity Si Ironman programara, usaría Vim (poster) Confirmed. Thank you!': 'Activity Si Ironman programara, usaría Vim (poster) Confirmed. Thank you!',
'Activity Sistema De Stock con Python. Confirmed. Thank you!': 'Activity Sistema De Stock con Python. Confirmed. Thank you!',
'Activity sleepy: are snakes still sleeping? Confirmed. Thank you!': 'Activity sleepy: are snakes still sleeping? Confirmed. Thank you!',
'Activity Software y Modelo de Simulación para el Control del mosquito Aedes aegypti por el Crustáceo Mesocyclops thermocyclopoides Confirmed. Thank you!': 'Activity Software y Modelo de Simulación para el Control del mosquito Aedes aegypti por el Crustáceo Mesocyclops thermocyclopoides Confirmed. Thank you!',
'Activity Stepping Through CPython Confirmed. Thank you!': 'Activity Stepping Through CPython Confirmed. Thank you!',
'Activity SugarLabs Sprint Confirmed. Thank you!': 'Activity SugarLabs Sprint Confirmed. Thank you!',
'Activity Testing funcional con python Confirmed. Thank you!': 'Activity Testing funcional con python Confirmed. Thank you!',
'Activity The Bad, The bad and Ugly. Confirmed. Thank you!': 'Activity The Bad, The bad and Ugly. Confirmed. Thank you!',
'Activity Threads con GTK, sin que se congele la interfaz Confirmed. Thank you!': 'Activity Threads con GTK, sin que se congele la interfaz Confirmed. Thank you!',
'Activity Todo sobre Deferreds Confirmed. Thank you!': 'Activity Todo sobre Deferreds Confirmed. Thank you!',
'Activity Tryton Confirmed. Thank you!': 'Activity Tryton Confirmed. Thank you!',
'Activity Tutorial de web2py (Curso Intensivo) Confirmed. Thank you!': 'Activity Tutorial de web2py (Curso Intensivo) Confirmed. Thank you!',
'Activity Técnicas de Programación eXtrema en Python Confirmed. Thank you!': 'Activity Técnicas de Programación eXtrema en Python Confirmed. Thank you!',
'Activity Unbreaking Your Django Application Confirmed. Thank you!': 'Activity Unbreaking Your Django Application Confirmed. Thank you!',
'Activity Usando fabric para estandarizar el proceso de desarrollo Confirmed. Thank you!': 'Activity Usando fabric para estandarizar el proceso de desarrollo Confirmed. Thank you!',
'Activity Usando Twisted para hacer aplicaciones de escritorio no bloqueantes Confirmed. Thank you!': 'Activity Usando Twisted para hacer aplicaciones de escritorio no bloqueantes Confirmed. Thank you!',
'Activity web2py Confirmed. Thank you!': 'Activity web2py Confirmed. Thank you!',
'Activity web2py: "Web Development Should be Easy" Confirmed. Thank you!': 'Activity web2py: "Web Development Should be Easy" Confirmed. Thank you!',
'Activity Web2py: Pensando en grande Confirmed. Thank you!': 'Activity Web2py: Pensando en grande Confirmed. Thank you!',
'Activity What is Google App Engine? Confirmed. Thank you!': 'Activity What is Google App Engine? Confirmed. Thank you!',
'Activity Yatel - Exploración de perfiles para Minería de Datos Confirmed. Thank you!': 'Activity Yatel - Exploración de perfiles para Minería de Datos Confirmed. Thank you!',
'Activity ¿Es esto Pythonico? Confirmed. Thank you!': 'Activity ¿Es esto Pythonico? Confirmed. Thank you!',
'Activity ¿Tenemos un “environment” completo en Python para realizar investigación en finanzas cuantitativas? Algunas herramientas pythonicas para el análisis y modelado de series de tiempo. Confirmed. Thank you!': 'Activity ¿Tenemos un “environment” completo en Python para realizar investigación en finanzas cuantitativas? Algunas herramientas pythonicas para el análisis y modelado de series de tiempo. Confirmed. Thank you!',
'Activity “import wx”: a Tale of Neverending GUI Power Confirmed. Thank you!': 'Activity “import wx”: a Tale of Neverending GUI Power Confirmed. Thank you!',
'Add': 'Add',
'Add a comment on this Activity': 'Agregue un comentario a esta Actividad',
'Add Author': 'Agregar Autor',
'add author': 'agregar autor',
'Add/remove tutorials': 'Agregue/remueva tutoriales',
'Added to Reviewer Group!': 'Added to Reviewer Group!',
'Additional Donation': 'Donación adicional',
'Additional remarks': 'Comentarios adicionales',
'Adherente': 'Adherente',
'Advanced': 'Avanzado',
'After completing this form you will receive a verification message by email. Follow the link therein, login with the email/password you chose below and you will be redirected to a payment form. You will be able to pay by credit card using Google Checkout. Register as non-attending in order to register and pay for other people.': 'Despues de completar este formulario usted recibirá un mensaje de verificación por correo electrónico (siga el enlace provisto para validar su inscripción)',
'Agradecimiento Especial': 'Agradecimiento Especial',
'All registered attendees will receive free of charge:': 'Todos los participantes registrados recibirán sin cargo:',
'Already in the Reviewer Group!': 'Already in the Reviewer Group!',
'alta calidad /medium': 'alta calidad /medium',
'alta calidad/large': 'alta calidad/large',
'alta calidad/small': 'alta calidad/small',
'alta calidad/xlarge': 'alta | |
domain=NamedThing, range=Optional[Union[Union[str, DeviceId], List[Union[str, DeviceId]]]])
slots.has_procedure = Slot(uri=BIOLINK.has_procedure, name="has procedure", curie=BIOLINK.curie('has_procedure'),
model_uri=BIOLINK.has_procedure, domain=NamedThing, range=Optional[Union[Union[str, ProcedureId], List[Union[str, ProcedureId]]]])
slots.has_receptor = Slot(uri=BIOLINK.has_receptor, name="has receptor", curie=BIOLINK.curie('has_receptor'),
model_uri=BIOLINK.has_receptor, domain=None, range=Optional[Union[str, OrganismalEntityId]])
slots.has_stressor = Slot(uri=BIOLINK.has_stressor, name="has stressor", curie=BIOLINK.curie('has_stressor'),
model_uri=BIOLINK.has_stressor, domain=None, range=Optional[str])
slots.has_route = Slot(uri=BIOLINK.has_route, name="has route", curie=BIOLINK.curie('has_route'),
model_uri=BIOLINK.has_route, domain=None, range=Optional[str])
slots.has_population_context = Slot(uri=BIOLINK.has_population_context, name="has population context", curie=BIOLINK.curie('has_population_context'),
model_uri=BIOLINK.has_population_context, domain=Association, range=Optional[Union[str, PopulationOfIndividualOrganismsId]])
slots.has_temporal_context = Slot(uri=BIOLINK.has_temporal_context, name="has temporal context", curie=BIOLINK.curie('has_temporal_context'),
model_uri=BIOLINK.has_temporal_context, domain=Association, range=Optional[Union[str, TimeType]])
slots.is_supplement = Slot(uri=BIOLINK.is_supplement, name="is supplement", curie=BIOLINK.curie('is_supplement'),
model_uri=BIOLINK.is_supplement, domain=NamedThing, range=Optional[Union[str, ChemicalMixtureId]])
slots.trade_name = Slot(uri=BIOLINK.trade_name, name="trade name", curie=BIOLINK.curie('trade_name'),
model_uri=BIOLINK.trade_name, domain=NamedThing, range=Optional[Union[str, ChemicalEntityId]])
slots.available_from = Slot(uri=BIOLINK.available_from, name="available from", curie=BIOLINK.curie('available_from'),
model_uri=BIOLINK.available_from, domain=NamedThing, range=Optional[Union[Union[str, "DrugAvailabilityEnum"], List[Union[str, "DrugAvailabilityEnum"]]]])
slots.animal_model_available_from = Slot(uri=BIOLINK.animal_model_available_from, name="animal model available from", curie=BIOLINK.curie('animal_model_available_from'),
model_uri=BIOLINK.animal_model_available_from, domain=NamedThing, range=Optional[Union[Union[str, DiseaseOrPhenotypicFeatureId], List[Union[str, DiseaseOrPhenotypicFeatureId]]]])
slots.highest_FDA_approval_status = Slot(uri=BIOLINK.highest_FDA_approval_status, name="highest FDA approval status", curie=BIOLINK.curie('highest_FDA_approval_status'),
model_uri=BIOLINK.highest_FDA_approval_status, domain=None, range=Optional[str])
slots.related_to = Slot(uri=BIOLINK.related_to, name="related to", curie=BIOLINK.curie('related_to'),
model_uri=BIOLINK.related_to, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.superclass_of = Slot(uri=BIOLINK.superclass_of, name="superclass of", curie=BIOLINK.curie('superclass_of'),
model_uri=BIOLINK.superclass_of, domain=None, range=Optional[Union[Union[dict, "OntologyClass"], List[Union[dict, "OntologyClass"]]]])
slots.subclass_of = Slot(uri=BIOLINK.subclass_of, name="subclass of", curie=BIOLINK.curie('subclass_of'),
model_uri=BIOLINK.subclass_of, domain=None, range=Optional[Union[Union[dict, "OntologyClass"], List[Union[dict, "OntologyClass"]]]])
slots.same_as = Slot(uri=BIOLINK.same_as, name="same as", curie=BIOLINK.curie('same_as'),
model_uri=BIOLINK.same_as, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.close_match = Slot(uri=BIOLINK.close_match, name="close match", curie=BIOLINK.curie('close_match'),
model_uri=BIOLINK.close_match, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.exact_match = Slot(uri=BIOLINK.exact_match, name="exact match", curie=BIOLINK.curie('exact_match'),
model_uri=BIOLINK.exact_match, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.broad_match = Slot(uri=BIOLINK.broad_match, name="broad match", curie=BIOLINK.curie('broad_match'),
model_uri=BIOLINK.broad_match, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.narrow_match = Slot(uri=BIOLINK.narrow_match, name="narrow match", curie=BIOLINK.curie('narrow_match'),
model_uri=BIOLINK.narrow_match, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.opposite_of = Slot(uri=BIOLINK.opposite_of, name="opposite of", curie=BIOLINK.curie('opposite_of'),
model_uri=BIOLINK.opposite_of, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.has_real_world_evidence_of_association_with = Slot(uri=BIOLINK.has_real_world_evidence_of_association_with, name="has real world evidence of association with", curie=BIOLINK.curie('has_real_world_evidence_of_association_with'),
model_uri=BIOLINK.has_real_world_evidence_of_association_with, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.associated_with_real_world_evidence = Slot(uri=BIOLINK.associated_with_real_world_evidence, name="associated with real world evidence", curie=BIOLINK.curie('associated_with_real_world_evidence'),
model_uri=BIOLINK.associated_with_real_world_evidence, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.active_in = Slot(uri=BIOLINK.active_in, name="active in", curie=BIOLINK.curie('active_in'),
model_uri=BIOLINK.active_in, domain=None, range=Optional[Union[Union[str, CellularComponentId], List[Union[str, CellularComponentId]]]])
slots.acts_upstream_of = Slot(uri=BIOLINK.acts_upstream_of, name="acts upstream of", curie=BIOLINK.curie('acts_upstream_of'),
model_uri=BIOLINK.acts_upstream_of, domain=None, range=Optional[Union[Union[str, BiologicalProcessId], List[Union[str, BiologicalProcessId]]]])
slots.has_upstream_actor = Slot(uri=BIOLINK.has_upstream_actor, name="has upstream actor", curie=BIOLINK.curie('has_upstream_actor'),
model_uri=BIOLINK.has_upstream_actor, domain=BiologicalProcess, range=Optional[Union[Union[dict, "GeneOrGeneProduct"], List[Union[dict, "GeneOrGeneProduct"]]]])
slots.acts_upstream_of_positive_effect = Slot(uri=BIOLINK.acts_upstream_of_positive_effect, name="acts upstream of positive effect", curie=BIOLINK.curie('acts_upstream_of_positive_effect'),
model_uri=BIOLINK.acts_upstream_of_positive_effect, domain=None, range=Optional[Union[Union[str, BiologicalProcessId], List[Union[str, BiologicalProcessId]]]])
slots.has_positive_upstream_actor = Slot(uri=BIOLINK.has_positive_upstream_actor, name="has positive upstream actor", curie=BIOLINK.curie('has_positive_upstream_actor'),
model_uri=BIOLINK.has_positive_upstream_actor, domain=BiologicalProcess, range=Optional[Union[Union[dict, "GeneOrGeneProduct"], List[Union[dict, "GeneOrGeneProduct"]]]])
slots.acts_upstream_of_negative_effect = Slot(uri=BIOLINK.acts_upstream_of_negative_effect, name="acts upstream of negative effect", curie=BIOLINK.curie('acts_upstream_of_negative_effect'),
model_uri=BIOLINK.acts_upstream_of_negative_effect, domain=None, range=Optional[Union[Union[str, BiologicalProcessId], List[Union[str, BiologicalProcessId]]]])
slots.has_negative_upstream_actor = Slot(uri=BIOLINK.has_negative_upstream_actor, name="has negative upstream actor", curie=BIOLINK.curie('has_negative_upstream_actor'),
model_uri=BIOLINK.has_negative_upstream_actor, domain=BiologicalProcess, range=Optional[Union[Union[dict, "GeneOrGeneProduct"], List[Union[dict, "GeneOrGeneProduct"]]]])
slots.acts_upstream_of_or_within = Slot(uri=BIOLINK.acts_upstream_of_or_within, name="acts upstream of or within", curie=BIOLINK.curie('acts_upstream_of_or_within'),
model_uri=BIOLINK.acts_upstream_of_or_within, domain=None, range=Optional[Union[Union[str, BiologicalProcessId], List[Union[str, BiologicalProcessId]]]])
slots.has_upstream_or_within_actor = Slot(uri=BIOLINK.has_upstream_or_within_actor, name="has upstream or within actor", curie=BIOLINK.curie('has_upstream_or_within_actor'),
model_uri=BIOLINK.has_upstream_or_within_actor, domain=BiologicalProcess, range=Optional[Union[Union[dict, "GeneOrGeneProduct"], List[Union[dict, "GeneOrGeneProduct"]]]])
slots.acts_upstream_of_or_within_positive_effect = Slot(uri=BIOLINK.acts_upstream_of_or_within_positive_effect, name="acts upstream of or within positive effect", curie=BIOLINK.curie('acts_upstream_of_or_within_positive_effect'),
model_uri=BIOLINK.acts_upstream_of_or_within_positive_effect, domain=None, range=Optional[Union[Union[str, BiologicalProcessId], List[Union[str, BiologicalProcessId]]]])
slots.has_positive_upstream_or_within_actor = Slot(uri=BIOLINK.has_positive_upstream_or_within_actor, name="has positive upstream or within actor", curie=BIOLINK.curie('has_positive_upstream_or_within_actor'),
model_uri=BIOLINK.has_positive_upstream_or_within_actor, domain=BiologicalProcess, range=Optional[Union[Union[dict, "GeneOrGeneProduct"], List[Union[dict, "GeneOrGeneProduct"]]]])
slots.acts_upstream_of_or_within_negative_effect = Slot(uri=BIOLINK.acts_upstream_of_or_within_negative_effect, name="acts upstream of or within negative effect", curie=BIOLINK.curie('acts_upstream_of_or_within_negative_effect'),
model_uri=BIOLINK.acts_upstream_of_or_within_negative_effect, domain=None, range=Optional[Union[Union[str, BiologicalProcessId], List[Union[str, BiologicalProcessId]]]])
slots.has_negative_upstream_or_within_actor = Slot(uri=BIOLINK.has_negative_upstream_or_within_actor, name="has negative upstream or within actor", curie=BIOLINK.curie('has_negative_upstream_or_within_actor'),
model_uri=BIOLINK.has_negative_upstream_or_within_actor, domain=BiologicalProcess, range=Optional[Union[Union[dict, "GeneOrGeneProduct"], List[Union[dict, "GeneOrGeneProduct"]]]])
slots.mentions = Slot(uri=BIOLINK.mentions, name="mentions", curie=BIOLINK.curie('mentions'),
model_uri=BIOLINK.mentions, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.contributor = Slot(uri=BIOLINK.contributor, name="contributor", curie=BIOLINK.curie('contributor'),
model_uri=BIOLINK.contributor, domain=InformationContentEntity, range=Optional[Union[Union[str, AgentId], List[Union[str, AgentId]]]])
slots.provider = Slot(uri=BIOLINK.provider, name="provider", curie=BIOLINK.curie('provider'),
model_uri=BIOLINK.provider, domain=InformationContentEntity, range=Optional[Union[Union[str, AgentId], List[Union[str, AgentId]]]])
slots.publisher = Slot(uri=BIOLINK.publisher, name="publisher", curie=BIOLINK.curie('publisher'),
model_uri=BIOLINK.publisher, domain=Publication, range=Optional[Union[Union[str, AgentId], List[Union[str, AgentId]]]])
slots.editor = Slot(uri=BIOLINK.editor, name="editor", curie=BIOLINK.curie('editor'),
model_uri=BIOLINK.editor, domain=Publication, range=Optional[Union[Union[str, AgentId], List[Union[str, AgentId]]]])
slots.author = Slot(uri=BIOLINK.author, name="author", curie=BIOLINK.curie('author'),
model_uri=BIOLINK.author, domain=Publication, range=Optional[Union[Union[str, AgentId], List[Union[str, AgentId]]]])
slots.interacts_with = Slot(uri=BIOLINK.interacts_with, name="interacts with", curie=BIOLINK.curie('interacts_with'),
model_uri=BIOLINK.interacts_with, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.physically_interacts_with = Slot(uri=BIOLINK.physically_interacts_with, name="physically interacts with", curie=BIOLINK.curie('physically_interacts_with'),
model_uri=BIOLINK.physically_interacts_with, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.chemically_interacts_with = Slot(uri=BIOLINK.chemically_interacts_with, name="chemically interacts with", curie=BIOLINK.curie('chemically_interacts_with'),
model_uri=BIOLINK.chemically_interacts_with, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.molecularly_interacts_with = Slot(uri=BIOLINK.molecularly_interacts_with, name="molecularly interacts with", curie=BIOLINK.curie('molecularly_interacts_with'),
model_uri=BIOLINK.molecularly_interacts_with, domain=MolecularEntity, range=Optional[Union[Union[str, MolecularEntityId], List[Union[str, MolecularEntityId]]]])
slots.genetically_interacts_with = Slot(uri=BIOLINK.genetically_interacts_with, name="genetically interacts with", curie=BIOLINK.curie('genetically_interacts_with'),
model_uri=BIOLINK.genetically_interacts_with, domain=Gene, range=Optional[Union[Union[str, GeneId], List[Union[str, GeneId]]]])
slots.affects = Slot(uri=BIOLINK.affects, name="affects", curie=BIOLINK.curie('affects'),
model_uri=BIOLINK.affects, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.affected_by = Slot(uri=BIOLINK.affected_by, name="affected by", curie=BIOLINK.curie('affected_by'),
model_uri=BIOLINK.affected_by, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.increases_amount_or_activity_of = Slot(uri=BIOLINK.increases_amount_or_activity_of, name="increases amount or activity of", curie=BIOLINK.curie('increases_amount_or_activity_of'),
model_uri=BIOLINK.increases_amount_or_activity_of, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.decreases_amount_or_activity_of = Slot(uri=BIOLINK.decreases_amount_or_activity_of, name="decreases amount or activity of", curie=BIOLINK.curie('decreases_amount_or_activity_of'),
model_uri=BIOLINK.decreases_amount_or_activity_of, domain=NamedThing, range=Optional[Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]]])
slots.chemical_role_mixin = Slot(uri=BIOLINK.chemical_role_mixin, name="chemical role mixin", curie=BIOLINK.curie('chemical_role_mixin'),
model_uri=BIOLINK.chemical_role_mixin, domain=None, range=Optional[str])
slots.biological_role_mixin = Slot(uri=BIOLINK.biological_role_mixin, name="biological role mixin", curie=BIOLINK.curie('biological_role_mixin'),
model_uri=BIOLINK.biological_role_mixin, domain=None, range=Optional[str])
slots.affects_abundance_of = Slot(uri=BIOLINK.affects_abundance_of, name="affects abundance of", curie=BIOLINK.curie('affects_abundance_of'),
model_uri=BIOLINK.affects_abundance_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.abundance_affected_by = Slot(uri=BIOLINK.abundance_affected_by, name="abundance affected by", curie=BIOLINK.curie('abundance_affected_by'),
model_uri=BIOLINK.abundance_affected_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.increases_abundance_of = Slot(uri=BIOLINK.increases_abundance_of, name="increases abundance of", curie=BIOLINK.curie('increases_abundance_of'),
model_uri=BIOLINK.increases_abundance_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.abundance_increased_by = Slot(uri=BIOLINK.abundance_increased_by, name="abundance increased by", curie=BIOLINK.curie('abundance_increased_by'),
model_uri=BIOLINK.abundance_increased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.decreases_abundance_of = Slot(uri=BIOLINK.decreases_abundance_of, name="decreases abundance of", curie=BIOLINK.curie('decreases_abundance_of'),
model_uri=BIOLINK.decreases_abundance_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.abundance_decreased_by = Slot(uri=BIOLINK.abundance_decreased_by, name="abundance decreased by", curie=BIOLINK.curie('abundance_decreased_by'),
model_uri=BIOLINK.abundance_decreased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.increases_activity_of = Slot(uri=BIOLINK.increases_activity_of, name="increases activity of", curie=BIOLINK.curie('increases_activity_of'),
model_uri=BIOLINK.increases_activity_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.affects_activity_of = Slot(uri=BIOLINK.affects_activity_of, name="affects activity of", curie=BIOLINK.curie('affects_activity_of'),
model_uri=BIOLINK.affects_activity_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.activity_affected_by = Slot(uri=BIOLINK.activity_affected_by, name="activity affected by", curie=BIOLINK.curie('activity_affected_by'),
model_uri=BIOLINK.activity_affected_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.activity_increased_by = Slot(uri=BIOLINK.activity_increased_by, name="activity increased by", curie=BIOLINK.curie('activity_increased_by'),
model_uri=BIOLINK.activity_increased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.decreases_activity_of = Slot(uri=BIOLINK.decreases_activity_of, name="decreases activity of", curie=BIOLINK.curie('decreases_activity_of'),
model_uri=BIOLINK.decreases_activity_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.activity_decreased_by = Slot(uri=BIOLINK.activity_decreased_by, name="activity decreased by", curie=BIOLINK.curie('activity_decreased_by'),
model_uri=BIOLINK.activity_decreased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.affects_expression_of = Slot(uri=BIOLINK.affects_expression_of, name="affects expression of", curie=BIOLINK.curie('affects_expression_of'),
model_uri=BIOLINK.affects_expression_of, domain=ChemicalEntity, range=Optional[Union[Union[str, NucleicAcidEntityId], List[Union[str, NucleicAcidEntityId]]]])
slots.expression_affected_by = Slot(uri=BIOLINK.expression_affected_by, name="expression affected by", curie=BIOLINK.curie('expression_affected_by'),
model_uri=BIOLINK.expression_affected_by, domain=NucleicAcidEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.increases_expression_of = Slot(uri=BIOLINK.increases_expression_of, name="increases expression of", curie=BIOLINK.curie('increases_expression_of'),
model_uri=BIOLINK.increases_expression_of, domain=ChemicalEntity, range=Optional[Union[Union[str, NucleicAcidEntityId], List[Union[str, NucleicAcidEntityId]]]])
slots.expression_increased_by = Slot(uri=BIOLINK.expression_increased_by, name="expression increased by", curie=BIOLINK.curie('expression_increased_by'),
model_uri=BIOLINK.expression_increased_by, domain=NucleicAcidEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.decreases_expression_of = Slot(uri=BIOLINK.decreases_expression_of, name="decreases expression of", curie=BIOLINK.curie('decreases_expression_of'),
model_uri=BIOLINK.decreases_expression_of, domain=ChemicalEntity, range=Optional[Union[Union[str, NucleicAcidEntityId], List[Union[str, NucleicAcidEntityId]]]])
slots.expression_decreased_by = Slot(uri=BIOLINK.expression_decreased_by, name="expression decreased by", curie=BIOLINK.curie('expression_decreased_by'),
model_uri=BIOLINK.expression_decreased_by, domain=NucleicAcidEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.affects_folding_of = Slot(uri=BIOLINK.affects_folding_of, name="affects folding of", curie=BIOLINK.curie('affects_folding_of'),
model_uri=BIOLINK.affects_folding_of, domain=ChemicalEntity, range=Optional[Union[Union[str, NucleicAcidEntityId], List[Union[str, NucleicAcidEntityId]]]])
slots.folding_affected_by = Slot(uri=BIOLINK.folding_affected_by, name="folding affected by", curie=BIOLINK.curie('folding_affected_by'),
model_uri=BIOLINK.folding_affected_by, domain=NucleicAcidEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.increases_folding_of = Slot(uri=BIOLINK.increases_folding_of, name="increases folding of", curie=BIOLINK.curie('increases_folding_of'),
model_uri=BIOLINK.increases_folding_of, domain=ChemicalEntity, range=Optional[Union[Union[str, NucleicAcidEntityId], List[Union[str, NucleicAcidEntityId]]]])
slots.folding_increased_by = Slot(uri=BIOLINK.folding_increased_by, name="folding increased by", curie=BIOLINK.curie('folding_increased_by'),
model_uri=BIOLINK.folding_increased_by, domain=NucleicAcidEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.decreases_folding_of = Slot(uri=BIOLINK.decreases_folding_of, name="decreases folding of", curie=BIOLINK.curie('decreases_folding_of'),
model_uri=BIOLINK.decreases_folding_of, domain=ChemicalEntity, range=Optional[Union[Union[str, NucleicAcidEntityId], List[Union[str, NucleicAcidEntityId]]]])
slots.folding_decreased_by = Slot(uri=BIOLINK.folding_decreased_by, name="folding decreased by", curie=BIOLINK.curie('folding_decreased_by'),
model_uri=BIOLINK.folding_decreased_by, domain=NucleicAcidEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.affects_localization_of = Slot(uri=BIOLINK.affects_localization_of, name="affects localization of", curie=BIOLINK.curie('affects_localization_of'),
model_uri=BIOLINK.affects_localization_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.localization_affected_by = Slot(uri=BIOLINK.localization_affected_by, name="localization affected by", curie=BIOLINK.curie('localization_affected_by'),
model_uri=BIOLINK.localization_affected_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.increases_localization_of = Slot(uri=BIOLINK.increases_localization_of, name="increases localization of", curie=BIOLINK.curie('increases_localization_of'),
model_uri=BIOLINK.increases_localization_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.localization_increased_by = Slot(uri=BIOLINK.localization_increased_by, name="localization increased by", curie=BIOLINK.curie('localization_increased_by'),
model_uri=BIOLINK.localization_increased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.decreases_localization_of = Slot(uri=BIOLINK.decreases_localization_of, name="decreases localization of", curie=BIOLINK.curie('decreases_localization_of'),
model_uri=BIOLINK.decreases_localization_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.localization_decreased_by = Slot(uri=BIOLINK.localization_decreased_by, name="localization decreased by", curie=BIOLINK.curie('localization_decreased_by'),
model_uri=BIOLINK.localization_decreased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.affects_metabolic_processing_of = Slot(uri=BIOLINK.affects_metabolic_processing_of, name="affects metabolic processing of", curie=BIOLINK.curie('affects_metabolic_processing_of'),
model_uri=BIOLINK.affects_metabolic_processing_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.metabolic_processing_affected_by = Slot(uri=BIOLINK.metabolic_processing_affected_by, name="metabolic processing affected by", curie=BIOLINK.curie('metabolic_processing_affected_by'),
model_uri=BIOLINK.metabolic_processing_affected_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.increases_metabolic_processing_of = Slot(uri=BIOLINK.increases_metabolic_processing_of, name="increases metabolic processing of", curie=BIOLINK.curie('increases_metabolic_processing_of'),
model_uri=BIOLINK.increases_metabolic_processing_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.metabolic_processing_increased_by = Slot(uri=BIOLINK.metabolic_processing_increased_by, name="metabolic processing increased by", curie=BIOLINK.curie('metabolic_processing_increased_by'),
model_uri=BIOLINK.metabolic_processing_increased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.decreases_metabolic_processing_of = Slot(uri=BIOLINK.decreases_metabolic_processing_of, name="decreases metabolic processing of", curie=BIOLINK.curie('decreases_metabolic_processing_of'),
model_uri=BIOLINK.decreases_metabolic_processing_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.metabolic_processing_decreased_by = Slot(uri=BIOLINK.metabolic_processing_decreased_by, name="metabolic processing decreased by", curie=BIOLINK.curie('metabolic_processing_decreased_by'),
model_uri=BIOLINK.metabolic_processing_decreased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.affects_molecular_modification_of = Slot(uri=BIOLINK.affects_molecular_modification_of, name="affects molecular modification of", curie=BIOLINK.curie('affects_molecular_modification_of'),
model_uri=BIOLINK.affects_molecular_modification_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.molecular_modification_affected_by = Slot(uri=BIOLINK.molecular_modification_affected_by, name="molecular modification affected by", curie=BIOLINK.curie('molecular_modification_affected_by'),
model_uri=BIOLINK.molecular_modification_affected_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.increases_molecular_modification_of = Slot(uri=BIOLINK.increases_molecular_modification_of, name="increases molecular modification of", curie=BIOLINK.curie('increases_molecular_modification_of'),
model_uri=BIOLINK.increases_molecular_modification_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.molecular_modification_increased_by = Slot(uri=BIOLINK.molecular_modification_increased_by, name="molecular modification increased by", curie=BIOLINK.curie('molecular_modification_increased_by'),
model_uri=BIOLINK.molecular_modification_increased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.decreases_molecular_modification_of = Slot(uri=BIOLINK.decreases_molecular_modification_of, name="decreases molecular modification of", curie=BIOLINK.curie('decreases_molecular_modification_of'),
model_uri=BIOLINK.decreases_molecular_modification_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.molecular_modification_decreased_by = Slot(uri=BIOLINK.molecular_modification_decreased_by, name="molecular modification decreased by", curie=BIOLINK.curie('molecular_modification_decreased_by'),
model_uri=BIOLINK.molecular_modification_decreased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.affects_synthesis_of = Slot(uri=BIOLINK.affects_synthesis_of, name="affects synthesis of", curie=BIOLINK.curie('affects_synthesis_of'),
model_uri=BIOLINK.affects_synthesis_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.synthesis_affected_by = Slot(uri=BIOLINK.synthesis_affected_by, name="synthesis affected by", curie=BIOLINK.curie('synthesis_affected_by'),
model_uri=BIOLINK.synthesis_affected_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.increases_synthesis_of = Slot(uri=BIOLINK.increases_synthesis_of, name="increases synthesis of", curie=BIOLINK.curie('increases_synthesis_of'),
model_uri=BIOLINK.increases_synthesis_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.synthesis_increased_by = Slot(uri=BIOLINK.synthesis_increased_by, name="synthesis increased by", curie=BIOLINK.curie('synthesis_increased_by'),
model_uri=BIOLINK.synthesis_increased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.decreases_synthesis_of = Slot(uri=BIOLINK.decreases_synthesis_of, name="decreases synthesis of", curie=BIOLINK.curie('decreases_synthesis_of'),
model_uri=BIOLINK.decreases_synthesis_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.synthesis_decreased_by = Slot(uri=BIOLINK.synthesis_decreased_by, name="synthesis decreased by", curie=BIOLINK.curie('synthesis_decreased_by'),
model_uri=BIOLINK.synthesis_decreased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.affects_degradation_of = Slot(uri=BIOLINK.affects_degradation_of, name="affects degradation of", curie=BIOLINK.curie('affects_degradation_of'),
model_uri=BIOLINK.affects_degradation_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.degradation_affected_by = Slot(uri=BIOLINK.degradation_affected_by, name="degradation affected by", curie=BIOLINK.curie('degradation_affected_by'),
model_uri=BIOLINK.degradation_affected_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.increases_degradation_of = Slot(uri=BIOLINK.increases_degradation_of, name="increases degradation of", curie=BIOLINK.curie('increases_degradation_of'),
model_uri=BIOLINK.increases_degradation_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.degradation_increased_by = Slot(uri=BIOLINK.degradation_increased_by, name="degradation increased by", curie=BIOLINK.curie('degradation_increased_by'),
model_uri=BIOLINK.degradation_increased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.decreases_degradation_of = Slot(uri=BIOLINK.decreases_degradation_of, name="decreases degradation of", curie=BIOLINK.curie('decreases_degradation_of'),
model_uri=BIOLINK.decreases_degradation_of, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.degradation_decreased_by = Slot(uri=BIOLINK.degradation_decreased_by, name="degradation decreased by", curie=BIOLINK.curie('degradation_decreased_by'),
model_uri=BIOLINK.degradation_decreased_by, domain=ChemicalEntity, range=Optional[Union[Union[str, ChemicalEntityId], List[Union[str, ChemicalEntityId]]]])
slots.affects_mutation_rate_of = Slot(uri=BIOLINK.affects_mutation_rate_of, name="affects mutation rate of", curie=BIOLINK.curie('affects_mutation_rate_of'),
model_uri=BIOLINK.affects_mutation_rate_of, domain=ChemicalEntity, range=Optional[Union[Union[str, NucleicAcidEntityId], List[Union[str, NucleicAcidEntityId]]]])
slots.mutation_rate_affected_by | |
from tensorhive.models.Task import Task, TaskStatus, try_parse_input_datetime
from tensorhive.models.User import User
from tensorhive.core import task_nursery, ssh
from tensorhive.core.task_nursery import SpawnError, ExitCodeError
from pssh.exceptions import ConnectionErrorException, AuthenticationException, UnknownHostException
from flask_jwt_extended import jwt_required, get_jwt_identity, get_jwt_claims
from sqlalchemy.orm.exc import NoResultFound
from tensorhive.config import API
from functools import wraps
from typing import List, Optional, Callable, Any, Dict, Tuple, Iterator
from datetime import datetime, timedelta
import logging
log = logging.getLogger(__name__)
T = API.RESPONSES['task']
S = API.RESPONSES['screen-sessions']
G = API.RESPONSES['general']
"""
This module contains two kinds of controllers:
- production-ready with authorization and authentication
- unprotected core business logic that can be used anywhere
My goal was to separate authorization from controllers' logic, so that
manual and automatic testing doesn't require patching Flask context
(@jwt_required breaks a lot of things)
In before: some controller MUST have camelCased arguments in order to keep up with API.
They are aliased to snake_case immediately inside controller body.
Connexion has this feature under the hood but it does not alsways work as it should (only in simple cases)
"""
# Typing aliases
Content = Dict[str, Any]
HttpStatusCode = int
TaskId = int
# TODO May want to move to utils
def is_admin():
claims = get_jwt_claims()
return 'admin' in claims['roles']
def synchronize(task_id: TaskId) -> None:
"""Updates the state of a Task object stored in database.
It compares current db record with list of active screen session (their pids in general)
on node defined by that record ([email protected]).
If task_nursery is unable to fetch active screen sessions then
the new state is always set to unsynchronized.
If task.pid is not alive (db record is outdated), then it
makes transition from last known state to a new state:
state before sync => state applied after sync
-----------------------------------------------
running => terminated
unsynchronized => not_running
"""
log.debug('Syncing Task {}...'.format(task_id))
try:
task = Task.get(task_id)
assert task.host, 'hostname is empty'
assert task.user, 'user does not exist'
active_sessions_pids = task_nursery.running(host=task.host, user=task.user.username)
except NoResultFound:
# This exception must be handled within try/except block when using Task.get()
# In other words, methods decorated with @synchronize_task_record must handle this case by themselves!
log.warning(
'Task {} could not be found (also synchronized). Failing without taking any action...'.format(task_id))
pass
except (AssertionError, Exception) as e:
# task_nursery.running pssh exceptions are also catched here
log.error('Unable to synchronize Task {}, reason: {}'.format(task_id, e))
log.debug('Task {} status was: {}'.format(task_id, task.status.name))
task.status = TaskStatus.unsynchronized
task.save()
log.debug('Task {} is now: {}'.format(task_id, task.status.name))
else:
log.debug('[BEFORE SYNC] Task {} status was: {}'.format(task_id, task.status.name))
change_status_msg = '[AFTER SYNC] Task {id} is now: {curr_status}'
if task.pid not in active_sessions_pids:
if task.status is TaskStatus.running:
task.status = TaskStatus.terminated
log.debug(change_status_msg.format(id=task_id, curr_status=task.status.name))
if task.status is TaskStatus.unsynchronized:
task.status = TaskStatus.not_running
log.debug(change_status_msg.format(id=task_id, curr_status=task.status.name))
task.pid = None
task.save()
def synchronize_task_record(func: Callable) -> Callable:
"""Decorated function MUST CONTAIN task id (int), function can take more arguments though.
In case when task.id could not be obtained from wrapped function's arguments,
synchronization will be aborted, but it won't affect wrapped function (silent fail).
"""
@wraps(func)
def sync_wrapper(*args, **kwargs):
try:
task_id = args[0]
except IndexError:
task_id = kwargs.get('id') or kwargs.get('task_id') or kwargs.get('taskId')
if task_id:
synchronize(task_id)
else:
log.critical('Synchronization aborted!')
log.critical('Task id not found in {}(), args: {}, kwargs: {}'.format(func.__name__, args, kwargs))
return func(*args, **kwargs)
return sync_wrapper
# Controllers
# POST /tasks
@jwt_required
def create(task: Dict[str, Any]) -> Tuple[Content, HttpStatusCode]:
try:
# User is not allowed to create task for someone else
assert task.get('userId') == get_jwt_identity()
except NoResultFound:
content, status = {'msg': T['not_found']}, 404
except AssertionError:
content, status = {'msg': G['unpriviliged']}, 403
else:
content, status = business_create(task)
finally:
return content, status
# GET /tasks/{id}
@jwt_required
def get(id: TaskId) -> Tuple[Content, HttpStatusCode]:
try:
task = Task.get(id)
assert get_jwt_identity() == task.user_id or is_admin()
except NoResultFound:
content, status = {'msg': T['not_found']}, 404
except AssertionError:
content, status = {'msg': G['unpriviliged']}, 403
else:
content, status = business_get(id)
finally:
return content, status
# GET /tasks?userId=X?syncAll=1
@jwt_required
def get_all(userId: Optional[int], syncAll: Optional[bool]) -> Tuple[Content, HttpStatusCode]:
user_id, sync_all = userId, syncAll
try:
if user_id:
# Owner or admin can fetch
assert get_jwt_identity() == user_id or is_admin()
else:
# Only admin can fetch all
assert is_admin()
except NoResultFound:
content, status = {'msg': T['not_found']}, 404
except AssertionError:
content, status = {'msg': G['unpriviliged']}, 403
else:
content, status = business_get_all(user_id, sync_all)
finally:
return content, status
# PUT /tasks/{id}
@jwt_required
def update(id: TaskId, newValues: Dict[str, Any]) -> Tuple[Content, HttpStatusCode]:
try:
task = Task.get(id)
assert task.user_id == get_jwt_identity(), 'Not an owner'
except NoResultFound:
content, status = {'msg': T['not_found']}, 404
except AssertionError:
content, status = {'msg': G['unpriviliged']}, 403
else:
content, status = business_update(id, newValues)
finally:
return content, status
# DELETE /tasks/{id}
@jwt_required
def destroy(id: TaskId) -> Tuple[Content, HttpStatusCode]:
try:
task = Task.get(id)
assert task.user_id == get_jwt_identity(), 'Not an owner'
except NoResultFound:
content, status = {'msg': T['not_found']}, 404
except AssertionError:
content, status = {'msg': G['unpriviliged']}, 403
else:
content, status = business_destroy(id)
finally:
return content, status
# GET /tasks/{id}/spawn
@jwt_required
def spawn(id: TaskId) -> Tuple[Content, HttpStatusCode]:
try:
task = Task.get(id)
assert task.user_id == get_jwt_identity(), 'Not an owner'
except NoResultFound as e:
log.error(e)
content, status = {'msg': T['not_found']}, 404
except AssertionError:
content, status = {'msg': G['unpriviliged']}, 403
else:
content, status = business_spawn(id)
finally:
return content, status
# GET /tasks/{id}/terminate
@jwt_required
def terminate(id: TaskId, gracefully: Optional[bool] = True) -> Tuple[Content, HttpStatusCode]:
try:
task = Task.get(id)
assert get_jwt_identity() == task.user_id or is_admin()
except NoResultFound:
content, status = {'msg': T['not_found']}, 404
except AssertionError:
content, status = {'msg': G['unpriviliged']}, 403
else:
content, status = business_terminate(id, gracefully)
finally:
return content, status
# GET /tasks/{id}/log
@jwt_required
def get_log(id: TaskId, tail: bool) -> Tuple[Content, HttpStatusCode]:
try:
task = Task.get(id)
assert get_jwt_identity() == task.user_id or is_admin()
except NoResultFound:
content, status = {'msg': T['not_found']}, 404
except AssertionError:
content, status = {'msg': G['unpriviliged']}, 403
else:
content, status = business_get_log(id, tail)
finally:
return content, status
# Business logic
def business_get_all(user_id: Optional[int], sync_all: Optional[bool]) -> Tuple[Content, HttpStatusCode]:
"""Fetches either all Task records or only those in relation with specific user.
Allows for synchronizing state of each Task out-of-the-box.
In typical scenario API client would want to get all records without sync and
then run sync each records individually.
"""
# TODO Exceptions should never occur, but need to experiment more
if user_id:
# Returns [] if such User with such id does not exist (SQLAlchemy behavior)
tasks = Task.query.filter(Task.user_id == user_id).all()
else:
tasks = Task.all()
# Wanted to decouple syncing from dict conversion with 2 oneliners (using list comprehension),
# but this code is O(n) instead of O(2n)
results = []
for task in tasks:
if sync_all:
synchronize(task.id)
results.append(task.as_dict)
return {'msg': T['all']['success'], 'tasks': results}, 200
def business_create(task: Dict[str, Any]) -> Tuple[Content, HttpStatusCode]:
"""Creates new Task db record.
Fields which require to be of datetime type are explicitly converted here.
"""
try:
new_task = Task(
user_id=task['userId'],
host=task['hostname'],
command=task['command'],
# TODO Adjust API spec, optional fields
spawn_at=try_parse_input_datetime(task.get('spawnAt')),
terminate_at=try_parse_input_datetime(task.get('terminateAt')))
# assert all(task.values()), 'fields cannot be blank or null'
new_task.save()
except ValueError:
# Invalid string format for datetime
content, status = {'msg': G['bad_request']}, 422
except KeyError:
# At least one of required fields was not present
content, status = {'msg': G['bad_request']}, 422
except AssertionError as e:
content, status = {'msg': T['create']['failure']['invalid'].format(reason=e)}, 422
except Exception as e:
log.critical(e)
content, status = {'msg': G['internal_error']}, 500
else:
content, status = {'msg': T['create']['success'], 'task': new_task.as_dict}, 201
finally:
return content, status
@synchronize_task_record
def business_get(id: TaskId) -> Tuple[Content, HttpStatusCode]:
"""Fetches one Task db record"""
try:
task = Task.get(id)
except NoResultFound:
content, status = {'msg': T['not_found']}, 404
except Exception as e:
log.critical(e)
content, status = {'msg': G['internal_error']}, 500
else:
content, status = {'msg': T['get']['success'], 'task': task.as_dict}, 200
finally:
return content, status
# TODO What if task is already running: allow for updating command, hostname, etc.?
def business_update(id: TaskId, new_values: Dict[str, Any]) -> Tuple[Content, HttpStatusCode]:
"""Updates certain fields of a Task db record, see `allowed_fields`."""
allowed_fields = {'command', 'hostname', 'spawnAt', 'terminateAt'}
try:
assert set(new_values.keys()).issubset(allowed_fields), 'invalid field is present'
task = Task.get(id)
for field_name, new_value in new_values.items():
if field_name == 'hostname':
# API client is allowed to use more verbose name here (hostname <=> host)
field_name = 'host'
if field_name in {'spawnAt', 'terminateAt'}:
field_name = field_name.replace('At', '_at')
new_value = try_parse_input_datetime(new_value)
else:
# Check that every other field matches
assert hasattr(task, field_name), 'task has no {} column'.format(field_name)
setattr(task, field_name, new_value)
task.save()
except NoResultFound:
content, status = {'msg': T['not_found']}, 404
except ValueError:
# Invalid string format for datetime
| |
shifts[0]
recipients = [publicity_email]
salutation = 'Hello Publicity Officer,'
if pres.exists():
recipients.append(pres[0].email)
salutation = 'Hello President and Publicity Officer,'
ccs = [leader.get_email() for leader in leaders]
if not needed_coe_event or not edited:
body = COE_EVENT_EMAIL_BODY % {
'salutation': salutation,
'intro_text': ('An event has been created that '
'requires a COE calendar event to be '
'created.\nThe required information is '
'below:'),
'leader_uniq': leader1.uniqname,
'leader_name': leader1.get_firstlast_name(),
'leader_email': leader1.get_email(),
'event_name': self.name,
'blurb': self.announce_text[:200]+(
'...' if len(self.announce_text) > 200 else ''
),
'event_type': unicode(self.event_type),
'date': shift.start_time.strftime('%d %b %Y'),
'start_time': shift.start_time.strftime('%I:%M %p'),
'end_time': shift.end_time.strftime('%I:%M %p'),
'mult_shift': '\n'.join([
shift.start_time.strftime('%d %b %Y %I:%M %p') +
' -- ' +
shift.end_time.strftime('%d %b %Y %I:%M %p')
for shift in shifts
]),
'location': shift.location,
'description': self.description,
'event_link': reverse(
'event_cal:event_detail',
args=(self.id,)
)
}
subject = '[TBP] Event Needs COE Calendar Event.'
email = EmailMessage(
subject,
body,
'<EMAIL>',
recipients,
headers={'Reply-To': leader1.get_email()},
cc=ccs
)
email.send()
elif edited:
body = COE_EVENT_EMAIL_BODY % {
'salutation': salutation,
'intro_text': ('An event has been edited that '
'requires a COE calendar event to be '
'created.\nThe updated information is '
'below:'),
'leader_uniq': leader.uniqname,
'leader_name': leader.get_firstlast_name(),
'leader_email': leader.get_email(),
'event_name': self.name,
'blurb': self.announce_text[:200]+(
'...' if len(self.announce_text) > 200 else ''
),
'event_type': unicode(self.event_type),
'date': shift.start_time.strftime('%d %b %Y'),
'start_time': shift.start_time.strftime('%I:%M %p'),
'end_time': shift.end_time.strftime('%I:%M %p'),
'mult_shift': '\n'.join([
shift.start_time.strftime('%d %b %Y %I:%M %p') +
' -- ' +
shift.end_time.strftime('%d %b %Y %I:%M %p')
for shift in shifts
]),
'location': shift.location,
'description': self.description,
'event_link': reverse(
'event_cal:event_detail',
args=(self.id,)
)
}
subject = '[TBP] Event Needs COE Calendar Event (updated).'
email = EmailMessage(
subject,
body,
'<EMAIL>',
recipients,
headers={'Reply-To': leader1.get_email()},
cc=ccs
)
email.send()
elif needed_coe_event:
leaders = self.leaders.all()
leader1 = leaders[0]
recipients = [publicity_email]
salutation = 'Hello Publicity Officer,'
if pres.exists():
recipients.append(pres[0].email)
salutation = 'Hello President and Publicity Officer,'
ccs = [leader.get_email() for leader in leaders]
body = COE_EVENT_BODY_CANCEL % {
'salutation': salutation,
'event_name': self.name,
'event_link': reverse(
'event_cal:event_detail',
args=(self.id,)
)
}
subject = '[TBP] Event Needs COE Calendar Event (cancelled).'
email = EmailMessage(
subject,
body,
'<EMAIL>',
recipients,
headers={'Reply-To': leader1.get_email()},
cc=ccs
)
email.send()
def email_participants(self, subject, body, sender):
""" Emails the event participants with the included information.
Automatically CCs the event leaders. The email is sent by tbp.mi.g but
the reply-to header is the intended sender's. This is currently due to
the way email is handled by django/the website.
"""
attendees = MemberProfile.objects.filter(event_attendee__event=self)
recipients = [attendee.get_email() for attendee in attendees]
ccs = [leader.get_email() for leader in self.leaders.all()]
email = EmailMessage(subject,
body,
'<EMAIL>',
recipients,
headers={'Reply-To': sender.get_email()},
cc=ccs)
email.send()
def delete_gcal_event(self):
""" Deletes the event from the associated google calendar.
Events on the google calendars are actually event shifts, this goes
through and deletes all of the google calendar events associated with
this event's shifts.
If the website is in debug-mode, does nothing.
"""
if DEBUG:
return
c = get_credentials()
h = get_authorized_http(c)
if h:
service = get_service(h)
for shift in self.eventshift_set.all():
if shift.google_event_id:
try:
service.events().delete(
calendarId=self.google_cal.calendar_id,
eventId=shift.google_event_id
).execute()
except:
pass
def add_event_to_gcal(self, previous_cal=None):
""" Adds the event to the associated google calendar.
Events on the google calendars are actually event shifts, this goes
through and adds all of the google calendar events associated with
this event's shifts. If the event previously was associated with
google calendar events, it attempts to move them to the new calendar
(if it changed) or to update them. Otherwise (or that failing) it
creates new events.
If the website is in debug-mode, does nothing.
"""
if DEBUG:
return
c = get_credentials()
h = get_authorized_http(c)
current_error_wait = .1
if h:
service = get_service(h)
gcal = self.google_cal
for shift in self.eventshift_set.all():
new_event = True
successfully_added = False
if shift.google_event_id:
try:
if previous_cal and not (previous_cal == gcal):
service.events().move(
calendarId=previous_cal.calendar_id,
eventId=shift.google_event_id,
destination=gcal.calendar_id
).execute()
gcal_event = service.events().get(
calendarId=gcal.calendar_id,
eventId=shift.google_event_id
).execute()
if gcal_event['status'] == 'cancelled':
gcal_event = {}
new_event = True
else:
gcal_event['sequence'] = gcal_event['sequence']+1
new_event = False
except:
gcal_event = {}
else:
gcal_event = {}
gcal_event['summary'] = self.name
gcal_event['location'] = shift.location
gcal_event['start'] = {
'dateTime': shift.start_time.isoformat('T'),
'timeZone': 'America/Detroit'
}
gcal_event['end'] = {
'dateTime': shift.end_time.isoformat('T'),
'timeZone': 'America/Detroit'
}
gcal_event['recurrence'] = []
gcal_event['description'] = markdown(
force_unicode(self.description),
['nl2br'],
safe_mode=True,
enable_attributes=False
)
while not successfully_added:
try:
if not new_event:
service.events().update(
calendarId=gcal.calendar_id,
eventId=shift.google_event_id,
body=gcal_event
).execute()
else:
submitted_event = service.events().insert(
calendarId=gcal.calendar_id,
body=gcal_event
).execute()
shift.google_event_id = submitted_event['id']
shift.save()
successfully_added = True
except HttpError, err:
if err.resp.status in [403, 500, 503]:
time.sleep(current_error_wait)
current_error_wait = current_error_wait * 2
if 'sequence' in gcal_event:
gcal_event['sequence']=gcal_event['sequence']+1
else:
raise
def can_complete_event(self):
""" Returns True if the event is able to be marked 'complete'.
If the event is already completed or has shifts not yet finished,
it cannot be completed. Otherwise it can be.
"""
s = self.eventshift_set
now = timezone.now()
s_future = s.filter(end_time__gte=now)
if self.completed:
return False
if s_future:
return False
else:
return True
def does_shift_overlap_with_users_other_shifts(self, shift, profile):
""" Checks if the given shift overlaps with the profile's other shifts.
If a user is signed up for multiple shifts, this checks to see if the
given shift overlaps with any of the other shifts. It is used to
prevent users from signing up for shifts that overlap if this behavior
is not intended.
"""
attendee_shifts = self.eventshift_set.filter(attendees__in=[profile])
overlapping_q1 = Q(start_time__lt=shift.start_time,
end_time__lte=shift.end_time)
overlapping_q2 = Q(start_time__gte=shift.end_time,
end_time__gt=shift.end_time)
query = overlapping_q1 | overlapping_q2
overlapping_shifts = attendee_shifts.distinct().exclude(query)
return overlapping_shifts.exists()
def get_fullness(self):
"""Gives a trinary evaluation of the fullness of the event.
Currently only works for single-shift events. Returns one of:
- Almost empty
- Nearly full
- Not full
depending on how many people have signed up relative to the maximum
attendance listed.
"""
shifts = self.eventshift_set.all()
if shifts.count() > 1:
return ''
shift = shifts[0]
num_attendees = 1.0*shift.attendees.count()
if shift.max_attendance is None:
if num_attendees < 5:
return '(Almost empty)'
else:
return '(Not full)'
elif shift.max_attendance:
frac_full = num_attendees/shift.max_attendance
num_spots = shift.max_attendance - num_attendees
if frac_full > .8 or num_spots <= 1:
return '(Nearly full)'
elif frac_full < .2:
return '(Almost empty)'
else:
return '(Not full)'
else:
return ''
class EventShift(models.Model):
""" An event's shift on the TBP Calendar.
Users sign up for events at the shift level (as an event may have users
sign up for multiple separate fractions of the total event). Shifts
capture the time and location information of the event for each possible
shift and also manage the signing up for the event itself.
"""
event = models.ForeignKey(CalendarEvent)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
location = models.CharField(max_length=100, blank=True, null=True)
on_campus = models.BooleanField(default=False)
google_event_id = models.CharField('ID for gcal event', max_length=64)
max_attendance = models.IntegerField(null=True, blank=True, default=None)
attendees = models.ManyToManyField('mig_main.UserProfile',
related_name="event_attendee",
blank=True, default=None)
electees_only = models.BooleanField(default=False)
actives_only = models.BooleanField(default=False)
grads_only = models.BooleanField(default=False)
ugrads_only = models.BooleanField(default=False)
def __unicode__(self):
""" Gives a string representation of the event shift."""
u = "%(name)s shift from %(start)s--%(end)s" % {
'name': self.event.name,
'start': str(self.start_time),
'end': str(self.end_time),
}
return u
def save(self, *args, **kwargs):
""" Saves the shift (likely to the database).
Also deletes the event's ajax entry from the cache to force a refresh.
"""
super(EventShift, self).save(*args, **kwargs)
cache.delete('EVENT_AJAX'+unicode(self.event.id))
self.event.save()
def delete(self, *args, **kwargs):
""" Deletes the shift from the database.
Also deletes the event's ajax entry from the cache to force a refresh.
"""
cache.delete('EVENT_AJAX'+unicode(self.event.id))
super(EventShift, self).delete(*args, **kwargs)
def is_full(self):
""" Returns True if the shift cannot accept more attendees."""
if self.max_attendance is None:
return False
if self.attendees.count() >= self.max_attendance:
return True
return False
def is_now(self):
""" Returns True if the shift is currently happening.
This takes the before and after grace periods into account.
"""
now = timezone.now()
if now > (self.start_time + self.event.before_grace):
if now < (self.end_time + self.event.after_grace):
return True
return False
def is_before_start(self):
""" Returns True if the shift has not yet started.
Does NOT account for the grace period.
"""
now = timezone.now()
if now > self.start_time:
return False
return True
def can_sign_in(self):
""" Returns True if the shift is currently accepting sign-ins."""
if (self.event.use_sign_in and
not self.is_full() and
self.is_now()):
return True
return False
def can_sign_up(self):
""" Returns True if the shift is accepting | |
"""Parent class DataN."""
import os
import os.path
from warnings import warn
from typing import Union, NoReturn
from pycifstar import Data, to_data
from cryspy.A_functions_base.function_1_markdown import md_to_html
from cryspy.A_functions_base.function_1_objects import \
get_functions_of_objet, get_table_html_for_variables
from cryspy.B_parent_classes.cl_1_item import ItemN
from cryspy.B_parent_classes.cl_2_loop import LoopN
class DataN(object):
"""Data container of loops and items."""
def __repr__(self):
"""
Magic method print() is redefined.
Returns
-------
TYPE
DESCRIPTION.
"""
ls_out = [f"# Object '{self.get_name():}'"]
for item in self.items:
if isinstance(item, ItemN):
ls_out.append(f"{4*' ':}.{item.get_name():}")
else:
ls_out.append(f"{4*' ':}.{item.get_name():} (loop)")
method = self.methods_html()
if method != "":
ls_out.append(f"\n# Methods:\n{method:}\n")
return "\n".join(ls_out)
def _repr_html_(self):
"""Representation in HTML format."""
ls_html = [f"<h2>Object '{self.get_name():}'</h2>"]
ls_html.append(self.attributes_to_html())
ls_html.append(get_table_html_for_variables(self))
report = self.report_html()
if report != "":
ls_html.append(f"<h2>Description </h2> {report:}")
ls_html.append(f"<h2>Classes and methods</h2>")
try:
names = sorted([obj.__name__ for obj in self.CLASSES_MANDATORY])
if len(names) != 0:
ls_html.append("<b>Mandatory classes: </b>")
ls_html.append(f"{', '.join(names):}.<br>")
except AttributeError:
pass
try:
names = sorted([obj.__name__ for obj in self.CLASSES_OPTIONAL])
if len(names) != 0:
ls_html.append("<b>Optional classes: </b>")
ls_html.append(f"{', '.join(names):}.<br>")
except AttributeError:
pass
method = self.methods_html()
if method != "":
ls_html.append(f"<b>Methods: </b> {method:}")
return " ".join(ls_html)
def methods_html(self):
ls_html = [f".{func_name}" for func_name in
get_functions_of_objet(self)]
return ", ".join(ls_html)+"."
def attributes_to_html(self) -> str:
"""Representation of defined parameters in HTML format.
"""
ls_html = ["<table>"]
ls_html.append("<tr><th>Attribute</th><th> Note </th></tr>")
items_sorted = sorted(self.items, key=lambda item: item.get_name())
for item in items_sorted:
item_type = item.__doc__.strip().split("\n")[0]
ls_html.append(f"<tr><td>.{item.get_name():}</td>\
<td>{item_type:}</td></tr>")
ls_html.append("</table>")
return " ".join(ls_html)
def __str__(self):
"""
Magic method str() is redefined.
Returns
-------
TYPE
DESCRIPTION.
"""
return self.to_cif()
def __getattr__(self, name):
"""
Magic method __getattr__ is slightly changed for special attributes.
Parameters
----------
name : TYPE
DESCRIPTION.
Raises
------
AttributeError
DESCRIPTION.
Returns
-------
res : TYPE
DESCRIPTION.
"""
for item in self.items:
if name.lower() == item.get_name():
return item
raise AttributeError(f"Attribute '{name:}' is not defined")
def is_attribute(self, name):
"""Temporary construction.
Better to use:
try:
obj = self.attribute_name
except AttributeError as e:
obj = ...
"""
for item in self.items:
if name.lower() == item.get_name():
return True
return False
def __setattr__(self, name, value) -> NoReturn:
"""
Rules to set attribute.
Parameters
----------
name : TYPE
DESCRIPTION.
value : TYPE
DESCRIPTION.
Returns
-------
NoReturn
DESCRIPTION.
"""
flag_items, flag_direct = False, True
if name == "data_name":
flag_direct = False
val_new = str(value).strip()
elif name == "items":
flag_items = True
self.add_items(value)
else:
cls_value = type(value)
if cls_value in self.CLASSES:
l_name = [item.get_name() for item in self.items]
name_new = value.get_name()
if name_new in l_name:
self.items.pop(l_name.index(name))
self.items.append(value)
flag_items, flag_direct = True, False
if name_new != name:
warn(f"Access to variable by '{name_new:}'.", UserWarning)
if flag_items:
pass
elif flag_direct:
self.__dict__[name] = value
else:
self.__dict__[name] = val_new
def add_items(self, items: list):
"""Add items."""
l_name = [item.get_name() for item in items]
s_name = set(l_name)
if len(s_name) != len(l_name):
warn("Double items were given.", UserWarning)
items_unique = [items[l_name.index(name)] for name in s_name]
else:
items_unique = items
l_ind_del = []
for ind_item, item in enumerate(self.items):
if item.get_name() in s_name:
l_ind_del.append(ind_item)
l_ind_del.reverse()
for ind in l_ind_del:
self.items.pop(ind)
for item in items_unique:
if isinstance(item, self.CLASSES):
self.items.append(item)
@classmethod
def make_container(cls, cls_mandatory, cls_optional, prefix):
"""Create DataN object as a container for items."""
if cls is not DataN:
warn("The method 'make_container' is used only for DataN class.")
return
obj = cls()
obj.__dict__["CLASSES_MANDATORY"] = cls_mandatory
obj.__dict__["CLASSES_OPTIONAL"] = cls_optional
obj.__dict__["CLASSES"] = cls_mandatory+cls_optional
obj.__dict__["PREFIX"] = prefix
obj.__dict__["D_DEFAULT"] = {}
obj.__dict__["items"] = []
obj.__dict__["data_name"] = ""
return obj
@classmethod
def get_mandatory_attributes(cls, separator: str = "_"):
"""Get a list of mandatory attributes from mandatory classes."""
l_res = []
for cls_obj in cls.CLASSES_MANDATORY:
if issubclass(cls_obj, ItemN):
cls_item = cls_obj
else: #LoopN
cls_item = cls_obj.ITEM_CLASS
l_res.extend([f"{cls_item.PREFIX:}{separator:}{name_cif:}"
for name_cif in cls_item.ATTR_MANDATORY_CIF])
return l_res
def __getitem__(self, name: Union[int, str]):
"""
Get item by index or predefined index.
Parameters
----------
name : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
if isinstance(name, int):
return self.items[name]
elif isinstance(name, str):
for item in self.items:
if name.lower() == item.get_name():
return item
return None
def get_name(self) -> str:
"""Name of object."""
name = self.PREFIX
data_name = self.data_name
if data_name is not None:
name = f"{name:}_{data_name:}"
return name.lower()
def get_variable_names(self) -> list:
"""
Get names of variable as a list.
(((#prefix, #NAME), (#prefix, #NAME), (#attribute, #index))
Returns
-------
list
List of names of variable.
"""
prefix = self.PREFIX
data_name = self.data_name
l_var = []
for item in self.items:
l_var.extend(item.get_variable_names())
l_var_out = [((prefix, data_name), ) + var for var in l_var]
return l_var_out
def is_variables(self) -> bool:
"""Define is there variables or not."""
flag = False
for item in self.items:
if item.is_variables():
flag = True
break
return flag
def get_variable_by_name(self, name: tuple) -> Union[float, int, str]:
"""
Get variable given by name.
Parameters
----------
name : tuple
(((#prefix, #data_name), (#prefix, #loop_name),
(#attribute, #index_item))
Returns
-------
Union[float, int, str]
DESCRIPTION.
"""
prefix = self.PREFIX
data_name = self.data_name
prefix_d, prefix_n = name[0], name[1]
if prefix_d != (prefix, data_name):
return None
name_sh = tuple(name[1:])
for item in self.items:
if isinstance(item, ItemN):
prefix = item.PREFIX
elif isinstance(item, LoopN):
item_cls = item.ITEM_CLASS
if item_cls is ItemN:
prefix = item[0].PREFIX
else:
prefix = item_cls.PREFIX
else:
raise AttributeError(
f"Unknown type object '{type(item).__name__:}'")
if prefix == prefix_n[0]:
res = item.get_variable_by_name(name_sh)
if res is not None:
return res
return None
def set_variable_by_name(self, name: tuple, value) -> NoReturn:
"""
Set value to variable given by name.
Parameters
----------
name : tuple
DESCRIPTION.
value : TYPE
DESCRIPTION.
Returns
-------
NoReturn
DESCRIPTION.
"""
prefix = self.PREFIX
data_name = self.data_name
prefix_d, prefix_n = name[0], name[1]
if prefix_d != (prefix, data_name):
return
name_sh = tuple(name[1:])
for item in self.items:
if isinstance(item, ItemN):
prefix = item.PREFIX
elif isinstance(item, LoopN):
item_cls = item.ITEM_CLASS
if item_cls is ItemN:
prefix = item[0].PREFIX
else:
prefix = item_cls.PREFIX
else:
raise AttributeError(
f"Unknown type object '{type(item).__name__:}'")
if prefix == prefix_n[0]:
item.set_variable_by_name(name_sh, value)
def is_defined(self) -> bool:
"""
If all mandatory attributes is defined.
Returns
-------
bool
DESCRIPTION.
"""
flag = True
for item in self.items:
if not(item.is_defined()):
flag = False
if isinstance(item, ItemN):
warn(f"{item.PREFIX:} is not fully described.",
UserWarning)
break
elif isinstance(item, LoopN):
warn(f"{item.ITEM_CLASS.PREFIX:} is not fully described.",
UserWarning)
break
if flag:
cls_items = [type(item) for item in self.items]
for cls_mand in self.CLASSES_MANDATORY:
if not(cls_mand in cls_items):
flag = False
warn(f"The object of {cls_mand.__name__:} is not defined.",
UserWarning)
break
return flag
def form_object(self):
"""Form object."""
pass
def to_cif(self, separator="_") -> str:
"""Print information about object in string in STAR format.
Arguments
---------
prefix: prefix in front of label of attribute
separator: separator between prefix and attribute ("_" or ".")
flag: for undefined attribute "." will be printed
flag_minimal if it's True the minimal set of object will be printed
Returns
-------
A string in STAR/CIF format
"""
ls_out = []
if self.data_name is None:
ls_out.append("data_\n")
else:
ls_out.append(f"data_{self.data_name:}\n")
l_item = self.items
l_s_itemn = [item.to_cif(separator=separator)+"\n"
for item in l_item if isinstance(item, ItemN)]
l_s_loopn = [item.to_cif(separator=separator)+"\n"
for item in l_item if isinstance(item, LoopN)]
if l_s_loopn != []:
n_max_loop = max([len(_) for _ in l_s_loopn])
if n_max_loop < 1000:
n_max_loop = 1000
else:
n_max_loop = 10000
l_n_max_item = [len(_) for _ in l_s_itemn]
ls_out.extend([_1 for _1, _2 in zip(l_s_itemn, l_n_max_item)
if _2 <= n_max_loop])
ls_out.extend([_ for _ in l_s_loopn])
ls_out.extend([_1 for _1, _2 in zip(l_s_itemn, l_n_max_item)
if _2 > n_max_loop])
return "\n".join(ls_out)
@classmethod
def from_cif(cls, string: str):
"""Generate object from string of CIF format."""
cif_data = Data()
flag = cif_data.take_from_string(string)
cif_items = cif_data.items
cif_loops = cif_data.loops
items = []
flag = True
n_mandatory = len(cls.CLASSES_MANDATORY)
for i_cls, cls_ in enumerate(cls.CLASSES):
flag = i_cls >= n_mandatory
if issubclass(cls_, ItemN):
prefix_cls = cls_.PREFIX
if cif_items.is_prefix(prefix_cls):
cif_items_prefix = cif_items[prefix_cls]
cif_string = str(cif_items_prefix)
obj_prefix = cls_.from_cif(cif_string)
if obj_prefix is not None:
items.append(obj_prefix)
flag = True
elif issubclass(cls_, LoopN):
prefix_cls = cls_.ITEM_CLASS.PREFIX
for cif_loop in cif_loops:
if cif_loop.is_prefix("_"+prefix_cls):
cif_string = str(cif_loop)
obj_prefix = cls_.from_cif(cif_string)
if obj_prefix is not None:
items.append(obj_prefix)
flag = True
if (not(flag)):
warn(f"Mandatory class: '{cls_.__name__:}' is not given.",
UserWarning)
break
if not(flag):
return None
data_name = cif_data.name
obj = cls(data_name=data_name, items=items)
obj.form_object()
return obj
@classmethod
def from_cif_file(cls, f_name: str):
"""Read from cif file."""
if not(os.path.isfile(f_name)):
raise UserWarning(f"File {f_name:} is not found.")
return None
str_from_cif = str(to_data(f_name))
obj = | |
= res.lineage()['column']
assert len(lineage) == 4
assert sorted(lineage.keys()) == ['id', 'new1', 'new2', 'val']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['val'] == {('PROGRAM', 'val')}
assert lineage['new1'] == {('PROGRAM', '_XARRAY')}
assert lineage['new2'] == {('PROGRAM', '_XARRAY')}
# add_columns_array_in_place
# TODO test
def test_add_columns_frame(self):
tf1 = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
tf2 = XFrame({'new1': [3.0, 2.0, 1.0], 'new2': [30.0, 20.0, 10.0]})
res = tf1.add_columns(tf2)
lineage = res.lineage()['column']
assert len(lineage) == 4
assert sorted(lineage.keys()) == ['id', 'new1', 'new2', 'val']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['val'] == {('PROGRAM', 'val')}
assert lineage['new1'] == {('PROGRAM', 'new1')}
assert lineage['new2'] == {('PROGRAM', 'new2')}
# add_columns_frame_in_place
# TODO test
def test_remove_column(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c'], 'another': [3.0, 2.0, 1.0]})
res = t.remove_column('another')
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['id', 'val']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['val'] == {('PROGRAM', 'val')}
# remove_column_in_place
# TODO test
def test_remove_columns(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c'], 'new1': [3.0, 2.0, 1.0], 'new2': [30.0, 20.0, 10.0]})
res = t.remove_columns(['new1', 'new2'])
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['id', 'val']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['val'] == {('PROGRAM', 'val')}
def test_swap_columns(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c'], 'x': [3.0, 2.0, 1.0]})
res = t.swap_columns('val', 'x')
lineage = res.lineage()['column']
assert len(lineage) == 3
assert sorted(lineage.keys()) == ['id', 'val', 'x']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['val'] == {('PROGRAM', 'val')}
assert lineage['x'] == {('PROGRAM', 'x')}
def test_reorder_columns(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c'], 'x': [3.0, 2.0, 1.0]})
res = t.reorder_columns(['val', 'x', 'id'])
lineage = res.lineage()['column']
assert len(lineage) == 3
assert sorted(lineage.keys()) == ['id', 'val', 'x']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['val'] == {('PROGRAM', 'val')}
assert lineage['x'] == {('PROGRAM', 'x')}
# add_column_const_in_place
# TODO test
# replace_column_const_in_place
# TODO test
# replace_single_column_in_place
# TODO test
def test_replace_column(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
a = XArray(['x', 'y', 'z'])
res = t.replace_column('val', a)
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['id', 'val']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['val'] == {('PROGRAM', '_XARRAY')}
# replace_selected_column_in_place
# TODO test
def test_flat_map(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.flat_map(['number', 'letter'],
lambda row: [list(six.itervalues(row)) for _ in range(0, row['id'])],
column_types=[int, str])
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['letter', 'number']
assert lineage['number'] == {('PROGRAM', 'id'), ('PROGRAM', 'val')}
assert lineage['letter'] == {('PROGRAM', 'id'), ('PROGRAM', 'val')}
def test_flat_map_use_columns(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c'], 'another': [10, 20, 30]})
res = t.flat_map(['number', 'letter'],
lambda row: [list(six.itervalues(row)) for _ in range(0, row['id'])],
column_types=[int, str], use_columns=['id', 'val'])
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['letter', 'number']
assert lineage['number'] == {('PROGRAM', 'id'), ('PROGRAM', 'val')}
assert lineage['letter'] == {('PROGRAM', 'id'), ('PROGRAM', 'val')}
def test_filterby_xarray(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
a = XArray([1, 3])
res = t.filterby(a, 'id').sort('id')
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['id', 'val']
assert lineage['id'] == {('PROGRAM', 'id'), ('PROGRAM', '_XARRAY')}
assert lineage['val'] == {('PROGRAM', 'val')}
def test_stack_list(self):
t = XFrame({'id': [1, 2, 3], 'val': [['a1', 'b1', 'c1'], ['a2', 'b2'], ['a3', 'b3', 'c3', None]]})
res = t.stack('val', 'new-val')
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['id', 'new-val']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['new-val'] == {('PROGRAM', 'val')}
def test_stack_dict(self):
t = XFrame({'id': [1, 2, 3, 4], 'val': [{'a': 3, 'b': 2}, {'a': 2, 'c': 2}, {'c': 1, 'd': 3}, {}]})
res = t.stack('val', ['stack-key', 'stack-val'])
lineage = res.lineage()['column']
assert len(lineage) == 3
assert sorted(lineage.keys()) == ['id', 'stack-key', 'stack-val']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['stack-key'] == {('PROGRAM', 'val')}
assert lineage['stack-val'] == {('PROGRAM', 'val')}
def test_append(self):
t1 = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
t2 = XFrame({'id': [10, 20, 30], 'val': ['aa', 'bb', 'cc']})
res = t1.append(t2)
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['id', 'val']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['val'] == {('PROGRAM', 'val')}
def test_range_slice(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.range(slice(0, 2))
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['id', 'val']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['val'] == {('PROGRAM', 'val')}
def test_dropna(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.dropna()
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['id', 'val']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['val'] == {('PROGRAM', 'val')}
def test_add_row_number(self):
t = XFrame({'ident': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.add_row_number()
lineage = res.lineage()['column']
assert len(lineage) == 3
assert sorted(lineage.keys()) == ['id', 'ident', 'val']
assert lineage['id'] == {('INDEX', 'id')}
assert lineage['val'] == {('PROGRAM', 'val')}
assert lineage['ident'] == {('PROGRAM', 'ident')}
def test_pack_columns(self):
t = XFrame({'id': [1, 2, 3, 4], 'val': ['a', 'b', 'c', 'd']})
res = t.pack_columns(columns=['id', 'val'], new_column_name='new')
lineage = res.lineage()['column']
assert len(lineage) == 1
assert sorted(lineage.keys()) == ['new']
assert lineage['new'] == {('PROGRAM', 'id'), ('PROGRAM', 'val')}
def test_foreach(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
t.foreach(lambda row, ini: row['id'] * 2)
def test_apply(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.apply(lambda row: row['id'] * 2)
lineage = res.lineage()['column']
assert len(lineage) == 1
assert sorted(lineage.keys()) == ['_XARRAY']
assert lineage['_XARRAY'] == {('PROGRAM', 'id'), ('PROGRAM', 'val')}
def test_apply_with_use_columns(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c'], 'another': [10, 20, 30]})
res = t.apply(lambda row: row['id'] * 2, use_columns=['id', 'val'])
lineage = res.lineage()['column']
assert len(lineage) == 1
assert sorted(lineage.keys()) == ['_XARRAY']
assert lineage['_XARRAY'] == {('PROGRAM', 'id'), ('PROGRAM', 'val')}
def test_transform_col(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.transform_col('id', lambda row: row['id'] * 2)
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['id', 'val']
assert lineage['id'] == {('PROGRAM', 'id'), ('PROGRAM', 'val')}
assert lineage['val'] == {('PROGRAM', 'val')}
def test_transform_col_with_use_cols(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c'], 'another': [10, 20, 30]})
res = t.transform_col('id', lambda row: row['id'] * 2, use_columns=['id', 'val'])
lineage = res.lineage()['column']
assert len(lineage) == 3
assert sorted(lineage.keys()) == ['another', 'id', 'val']
assert lineage['id'] == {('PROGRAM', 'id'), ('PROGRAM', 'val')}
assert lineage['val'] == {('PROGRAM', 'val')}
def test_transform_cols(self):
t = XFrame({'other': ['x', 'y', 'z'], 'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.transform_cols(['id', 'val'], lambda row: [row['id'] * 2, row['val'] + 'x'])
lineage = res.lineage()['column']
assert len(lineage) == 3
assert sorted(lineage.keys()) == ['id', 'other', 'val']
assert lineage['id'] == {('PROGRAM', 'id'), ('PROGRAM', 'val'), ('PROGRAM', 'other')}
assert lineage['val'] == {('PROGRAM', 'id'), ('PROGRAM', 'val'), ('PROGRAM', 'other')}
def test_transform_cols_with_use_cols(self):
t = XFrame({'other': ['x', 'y', 'z'], 'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.transform_cols(['id', 'val'], lambda row: [row['id'] * 2, row['val'] + 'x'], use_columns=['id', 'val'])
lineage = res.lineage()['column']
assert len(lineage) == 3
assert sorted(lineage.keys()) == ['id', 'other', 'val']
assert lineage['id'] == {('PROGRAM', 'id'), ('PROGRAM', 'val')}
assert lineage['val'] == {('PROGRAM', 'id'), ('PROGRAM', 'val')}
def test_filterby_int_id(self):
t = XFrame({'id': [1, 2, 3, 4], 'val': ['a', 'b', 'c', 'd']})
res = t.filterby(1, 'id').sort('id')
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['id', 'val']
assert sorted(lineage.keys()) == ['id', 'val']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['val'] == {('PROGRAM', 'val')}
def test_groupby_count(self):
t = XFrame({'id': [1, 2, 3, 1, 2, 1],
'val': ['a', 'b', 'c', 'd', 'e', 'f'],
'another': [10, 20, 30, 40, 50, 60]})
res = t.groupby('id', {'count': COUNT})
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['count', 'id']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['count'] == {('COUNT', '')}
def test_groupby_sum(self):
t = XFrame({'id': [1, 2, 3, 1, 2, 1],
'val': ['a', 'b', 'c', 'd', 'e', 'f'],
'another': [10, 20, 30, 40, 50, 60]})
res = t.groupby('id', {'sum': SUM('another')})
lineage = res.lineage()['column']
assert len(lineage) == 2
assert sorted(lineage.keys()) == ['id', 'sum']
assert lineage['id'] == {('PROGRAM', 'id')}
assert lineage['sum'] == {('PROGRAM', 'another')}
def test_join(self):
path = 'files/test-frame.csv'
real_path = os.path.realpath(path)
t1 = XFrame(path)
t2 | |
"admin_name": "Borno",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Tede",
"lat": "8.5534",
"lng": "3.4465",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Oyo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Buni Yadi",
"lat": "11.2744",
"lng": "12.0085",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Yobe",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Lapai",
"lat": "9.0444",
"lng": "6.5709",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Niger",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Donga",
"lat": "7.7217",
"lng": "10.0453",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Taraba",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kangiwa",
"lat": "12.5534",
"lng": "3.8181",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kebbi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Maru",
"lat": "12.3336",
"lng": "6.4037",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Zamfara",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Geidam",
"lat": "12.8944",
"lng": "11.9265",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Yobe",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Patigi",
"lat": "8.7285",
"lng": "5.7556",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kwara",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Damboa",
"lat": "11.1553",
"lng": "12.7564",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Borno",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Damasak",
"lat": "13.1052",
"lng": "12.5085",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Borno",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Lissam",
"lat": "7.1961",
"lng": "10.0462",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Taraba",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Mafa",
"lat": "11.9242",
"lng": "13.6007",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Borno",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Agwara",
"lat": "10.7061",
"lng": "4.5813",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Niger",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Yelwa",
"lat": "10.8350",
"lng": "4.7424",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kebbi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Sunkani",
"lat": "8.7030",
"lng": "11.2576",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Taraba",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Bangi",
"lat": "10.8337",
"lng": "5.8269",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Niger",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Oke-Oyi",
"lat": "8.5826",
"lng": "4.7162",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kwara",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Bode Saadu",
"lat": "8.9390",
"lng": "4.7823",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kwara",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Benisheikh",
"lat": "11.8092",
"lng": "12.4915",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Borno",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Nasarawa",
"lat": "8.5389",
"lng": "7.7082",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Nasarawa",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ibi",
"lat": "8.1812",
"lng": "9.7443",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Taraba",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Dapchi",
"lat": "12.4954",
"lng": "11.4998",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Yobe",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Lau",
"lat": "9.2083",
"lng": "11.2754",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Taraba",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Karim Lamido",
"lat": "9.3143",
"lng": "11.1873",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Taraba",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Magumeri",
"lat": "12.1145",
"lng": "12.8262",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Borno",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Konduga",
"lat": "11.6533",
"lng": "13.4179",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Borno",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Odukpani",
"lat": "5.1337",
"lng": "8.3381",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "<NAME>",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Akamkpa",
"lat": "5.3125",
"lng": "8.3552",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "<NAME>",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Yusufari",
"lat": "13.0661",
"lng": "11.1735",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Yobe",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Mallam Fatori",
"lat": "13.6746",
"lng": "13.3395",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Borno",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Wase",
"lat": "9.0942",
"lng": "9.9561",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Plateau",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Bali",
"lat": "7.8553",
"lng": "10.9678",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Taraba",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Mashegu",
"lat": "9.9721",
"lng": "5.7789",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Niger",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Balle",
"lat": "13.4703",
"lng": "4.6812",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Sokoto",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kosubosu",
"lat": "9.5511",
"lng": "3.2284",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kwara",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kanamma",
"lat": "13.0997",
"lng": "12.1079",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Yobe",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Baissa",
"lat": "7.2309",
"lng": "10.6244",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Taraba",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Babban Gida",
"lat": "12.1548",
"lng": "11.7709",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Yobe",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "New Bussa",
"lat": "9.8864",
"lng": "4.5085",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Niger",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kaiama",
"lat": "9.6053",
"lng": "3.9410",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kwara",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Toungo",
"lat": "8.1173",
"lng": "12.0461",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Adamawa",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Serti",
"lat": "7.5057",
"lng": "11.3631",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Taraba",
"capital": "minor",
"population": "",
"population_proper": ""
}
]
features = [
{"AC" : "Air Conditioning"},
{"POOL" : "Swimming Pool"},
{"HEAT" : "Central Heating"},
{"LAUNTRY" : "Laundry Room"},
{"GYM" : "Gym"},
{"ALARM" : "Alarm"},
{"PARKING" : "Parking Lot"},
{"EXERCISE" : "Exercise Room"},
{"COOLING" : "Central Cooling"},
{"STORAGE" : "Srorage Room"},
{"WATER" : "Treated Water"},
]
class | |
>= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return N
def func_7034d50ae53245e68625a23b9291daaa(infile):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return B
def func_db34e22e8a64484484be32ac179afe80(infile):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return C
def func_9904e94971a14b1a8b9eb7dd54dd80ab(r, s, q, N):
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return ans
def func_c5b90ae2231b4184bd9d8c5a0b908d2d(r, s, q, N):
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return b
def func_c917279678694ace9ee7df628f006e7f(r, s, q, N):
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return t
def func_258524f459364d7fa64acb78b083a0bc(r, s, q, N):
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return i
def func_58e6c56b2505462caf9bbc9c83317748(r, s, q, N):
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return S
def func_89e275f4037845a6ba76246e9a9b0120(r, s, q, N):
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return B
def func_6e961f1e396d495db1d05d9fbdd34b40(r, s, q, N):
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return C
def func_d15a48030f564715853fc835576ae819(r, s, q, N):
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return p
def func_c82fe9e601a7488e9e6bedcdef19c5b8(r, s, q, N):
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return a
def func_861a2a36e09c4211ad6c1f93882fa3e2(r, s, q, N):
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
| |
<reponame>gappleto97/arguman.org
# -*- coding:utf-8 -*-
import json
from datetime import timedelta
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from markdown2 import markdown
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.db.models import Max, Sum
from django.utils.timezone import now
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import render_to_string
from django.views.generic import DetailView, TemplateView, CreateView, View, RedirectView
from django.views.generic.edit import UpdateView
from django.utils.translation import get_language
from django.db.models import Count
from django.shortcuts import render
from blog.models import Post
from premises.models import Contention, Premise, Report
from premises.forms import (ArgumentCreationForm, PremiseCreationForm,
PremiseEditForm, ReportForm)
from premises.signals import (added_premise_for_premise,
added_premise_for_contention,
reported_as_fallacy,
supported_a_premise)
from premises.templatetags.premise_tags import check_content_deletion
from premises.mixins import PaginationMixin, NextURLMixin
from newsfeed.models import Entry
from profiles.mixins import LoginRequiredMixin
from profiles.models import Profile
from nouns.models import Channel
from i18n.utils import normalize_language_code
def get_ip_address(request):
return (request.META.get('HTTP_X_FORWARDED_FOR') or
request.META.get('REMOTE_ADDR'))
class ContentionDetailView(DetailView):
queryset = (Contention.objects
.select_related('user')
.prefetch_related('premises'))
context_object_name = 'contention'
def get_template_names(self):
view = self.request.GET.get("view")
name = ("list_view" if view == "list" else "tree_view")
return ["premises/%s.html" % name]
def get_parent(self):
premise_id = self.kwargs.get("premise_id")
if premise_id:
return get_object_or_404(Premise, id=premise_id)
def get_premises(self):
contention = self.get_parent() or self.get_object()
return contention.published_children()
def get_context_data(self, **kwargs):
contention = self.get_object()
edit_mode = (
self.request.user.is_superuser or
self.request.user.is_staff or
contention.user == self.request.user)
parent = self.get_parent()
serialized = contention.serialize(self.request.user)
description = contention.title
if parent:
description = parent.text
elif serialized['premises']:
description = serialized['premises'][0]['text']
return super(ContentionDetailView, self).get_context_data(
premises=self.get_premises(),
parent_premise=parent,
description=description,
path=contention.get_absolute_url(),
edit_mode=edit_mode,
serialized=serialized,
**kwargs)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
host = request.META['HTTP_HOST']
if not host.startswith(settings.AVAILABLE_LANGUAGES):
return redirect(self.object.get_full_url(), permanent=True)
if not normalize_language_code(get_language()) == self.object.language:
return redirect(self.object.get_full_url(), permanent=True)
partial = request.GET.get('partial')
level = request.GET.get('level')
if partial:
contention = self.object
try:
serialized = contention.partial_serialize(int(partial), self.request.user)
except (StopIteration, ValueError):
raise Http404
return render(request, 'premises/tree.html', {
'premises': serialized['premises'],
'serialized': serialized,
'level': int(level)
})
return super(ContentionDetailView, self).get(request, *args, **kwargs)
class ContentionJsonView(DetailView):
model = Contention
def render_to_response(self, context, **response_kwargs):
contention = self.get_object(self.get_queryset())
return HttpResponse(json.dumps({
"nodes": self.build_tree(contention, self.request.user),
}), content_type="application/json")
def build_tree(self, contention, user):
return {
"name": contention.title,
"parent": None,
"pk": contention.pk,
"owner": contention.owner,
"sources": contention.sources,
"is_singular": self.is_singular(contention),
"children": self.get_premises(contention, user)
}
def get_premises(self, contention, user, parent=None):
children = [{
"pk": premise.pk,
"name": premise.text,
"parent": parent.text if parent else None,
"reportable_by_authenticated_user": self.user_can_report(
premise, user),
"report_count": premise.reports.count(),
"user": {
"id": premise.user.id,
"username": premise.user.username,
"absolute_url": reverse("auth_profile",
args=[premise.user.username])
},
"sources": premise.sources,
"premise_type": premise.premise_class(),
"children": (self.get_premises(contention, user, parent=premise)
if premise.published_children().exists() else [])
} for premise in contention.published_premises(parent)]
return children
def user_can_report(self, premise, user):
if user.is_authenticated() and user != premise.user:
return not premise.reported_by(user)
return False
def is_singular(self, contention):
result = contention.premises.all().aggregate(
max_sibling=Max('sibling_count'))
return result['max_sibling'] <= 1
class HomeView(TemplateView, PaginationMixin):
template_name = "index.html"
tab_class = "featured"
paginate_by = 20
def get_context_data(self, **kwargs):
contentions = self.get_contentions()
if self.request.user.is_authenticated():
notifications_qs = self.get_unread_notifications()
notifications = list(notifications_qs)
self.mark_as_read(notifications_qs)
else:
notifications = None
return super(HomeView, self).get_context_data(
channels=self.get_channels(),
next_page_url=self.get_next_page_url(),
tab_class=self.tab_class,
notifications=notifications,
has_next_page=self.has_next_page(),
announcements=self.get_announcements(),
contentions=contentions, **kwargs)
def get_announcements(self):
return Post.objects.filter(is_announcement=True)
def get_unread_notifications(self):
return (self.request.user
.notifications
.filter(is_read=False)[:5])
def mark_as_read(self, notifications):
pks = notifications.values_list("id", flat=True)
(self.request.user
.notifications
.filter(id__in=pks)
.update(is_read=True))
def get_contentions(self, paginate=True):
contentions = (Contention
.objects
.language()
.filter(is_featured=True)
.order_by("-date_modification"))
if paginate:
contentions = (contentions[self.get_offset(): self.get_limit()])
return contentions
def get_channels(self):
return Channel.objects.filter(
language=normalize_language_code(get_language())
).order_by('order')
class NotificationsView(LoginRequiredMixin, HomeView):
template_name = "notifications.html"
def get_context_data(self, **kwargs):
notifications_qs = self.request.user.notifications.all()[:40]
notifications = list(notifications_qs)
self.mark_as_read(notifications_qs)
return super(HomeView, self).get_context_data(
notifications=notifications,
**kwargs)
class FallaciesView(HomeView, PaginationMixin):
tab_class = "fallacies"
template_name = "fallacies.html"
paginate_by = 10
def get_context_data(self, **kwargs):
language = normalize_language_code(get_language())
fallacies = (Report
.objects
.filter(reason__isnull=False,
contention__language=language)
.order_by('-id')
[self.get_offset():self.get_limit()])
return super(FallaciesView, self).get_context_data(
fallacies=fallacies,
**kwargs)
class SearchView(HomeView):
tab_class = 'search'
template_name = 'search/search.html'
partial_templates = {
'contentions': 'search/contention.html',
'users': 'search/profile.html',
'premises': 'search/premise.html'
}
method_mapping = {'contentions': "get_contentions",
'users': "get_users",
'premises': "get_premises"}
def dispatch(self, request, *args, **kwargs):
self.type = request.GET.get('type', 'contentions')
if not self.method_mapping.get(self.type):
raise Http404()
return super(SearchView, self).dispatch(request, *args, **kwargs)
def get_keywords(self):
return self.request.GET.get('keywords') or ''
def is_json(self):
return (self.request.is_ajax() or
self.request.GET.get('json'))
def has_next_page(self):
method = getattr(self, self.method_mapping[self.type])
total = method().count()
return total > (self.get_offset() + self.paginate_by)
def get_search_bundle(self):
method = getattr(self, self.method_mapping[self.type])
return [{'template': self.partial_templates[self.type],
'object': item} for item in method()]
def get_context_data(self, **kwargs):
return super(SearchView, self).get_context_data(
results=self.get_search_bundle(),
**kwargs)
def get_next_page_url(self):
offset = self.get_offset() + self.paginate_by
return '?offset=%(offset)s&keywords=%(keywords)s&type=%(type)s' % {
"offset": offset,
"type": self.type,
"keywords": self.get_keywords()
}
def get_premises(self, paginate=True):
keywords = self.request.GET.get('keywords')
if not keywords or len(keywords) < 3:
result = Premise.objects.none()
else:
result = (Premise.objects.filter(
argument__language=normalize_language_code(get_language()),
text__contains=keywords))
if paginate:
result = result[self.get_offset():self.get_limit()]
return result
def get_users(self, paginate=True):
keywords = self.request.GET.get('keywords')
if not keywords or len(keywords) < 2:
result = Profile.objects.none()
else:
result = (Profile.objects.filter(
username__icontains=keywords))
if paginate:
result = result[self.get_offset():self.get_limit()]
return result
def get_contentions(self, paginate=True):
keywords = self.request.GET.get('keywords')
if not keywords or len(keywords) < 2:
result = Contention.objects.none()
else:
result = (Contention
.objects
.filter(title__icontains=keywords,
language=normalize_language_code(get_language())))
if paginate:
result = result[self.get_offset():self.get_limit()]
return result
def render_to_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
if not self.is_json():
return super(SearchView, self).render_to_response(
context, **response_kwargs)
results = [{
"id": result['object'].id,
"label": unicode(result['object'])
} for result in context['results']]
return HttpResponse(
json.dumps(results),
dict(content_type='application/json', **response_kwargs)
)
class NewsView(HomeView):
tab_class = "news"
def get_contentions(self, paginate=True):
contentions = (
Contention
.objects
.language()
.filter(is_published=True)
.order_by('-date_modification')
)
if paginate:
contentions = contentions[self.get_offset():self.get_limit()]
return contentions
class FeaturedJSONView(HomeView):
def render_to_response(self, context, **response_kwargs):
contentions = [contention.get_overview_bundle()
for contention in self.get_contentions()]
return HttpResponse(
json.dumps({"contentions": contentions},
cls=DjangoJSONEncoder),
content_type="application/json"
)
class NewsJSONView(NewsView):
def render_to_response(self, context, **response_kwargs):
contentions = [contention.get_overview_bundle()
for contention in self.get_contentions()]
return HttpResponse(
json.dumps({"contentions": contentions},
cls=DjangoJSONEncoder),
content_type="application/json"
)
class StatsView(HomeView):
tab_class = "stats"
template_name = "stats.html"
partial_templates = {
Profile: "stats/profile.html",
Contention: "stats/contention.html",
Premise: "stats/premise.html",
}
method_mapping = {
"active_users": "get_active_users",
"user_karma": "get_user_karma",
"disgraced_users": "get_disgraced_users",
"supported_premises": "get_supported_premises",
"fallacy_premises": "get_fallacy_premises",
"crowded_contentions": "get_crowded_contentions",
}
time_ranges = [7, 30]
def get_context_data(self, **kwargs):
return super(StatsView, self).get_context_data(
stats=self.get_stats_bundle(),
stats_type=self.get_stats_type(),
days=self.days,
**kwargs)
def get_stats_type(self):
return self.request.GET.get("what")
def build_time_filters(self, date_field="date_creation"):
days = self.request.GET.get("days")
if not days or days == "all":
self.days = None
return {}
try:
days = int(days)
except (TypeError, ValueError):
days = None
if not days or days not in self.time_ranges:
raise Http404()
self.days = days
field_expression = "%s__gt" % date_field
return {
field_expression: timezone.now() - timedelta(days=days)
}
def get_stats_bundle(self):
stat_type = self.get_stats_type()
if stat_type not in self.method_mapping:
raise Http404()
method = getattr(self, self.method_mapping[stat_type])
return [
{
"template": self.partial_templates[type(item)],
"object": item
} for item in method()
]
def get_active_users(self):
return Profile.objects.annotate(
premise_count=Sum("user_premises"),
).filter(
premise_count__gt=0,
**self.build_time_filters(date_field="user_premises__date_creation")
).order_by("-premise_count")[:10]
def get_user_karma(self):
return Profile.objects.filter(
karma__gt=0,
**self.build_time_filters(date_field="user_premises__date_creation")
).order_by("-karma", "id").distinct()[:10]
def get_disgraced_users(self):
return Profile.objects.annotate(
report_count=Sum("user_premises__reports"),
).filter(
report_count__gt=0,
**self.build_time_filters(date_field="user_premises__date_creation")
).order_by("-report_count")[:10]
def get_supported_premises(self):
return Premise.objects.annotate(
supporter_count=Sum("supporters")
).filter(
argument__language=get_language(),
supporter_count__gt=0,
**self.build_time_filters(date_field="date_creation")
).order_by("-supporter_count")[:50]
def get_fallacy_premises(self):
return Premise.objects.annotate(
report_count=Sum("reports"),
).filter(
report_count__gt=0,
**self.build_time_filters(date_field="date_creation")
).order_by("-report_count")[:10]
def get_crowded_contentions(self):
return Contention.objects.annotate(
premise_count=Sum("premises"),
).filter(
language=normalize_language_code(get_language()),
premise_count__gt=0,
**self.build_time_filters(date_field="date_creation")
).order_by("-premise_count")[:10]
class UpdatedArgumentsView(HomeView):
tab_class = "updated"
def get_contentions(self, paginate=True):
contentions = (Contention
.objects
.filter(is_published=True)
.order_by('-date_modification'))
if paginate:
contentions = contentions[self.get_offset():self.get_limit()]
return contentions
class ControversialArgumentsView(HomeView):
tab_class = "controversial"
def get_contentions(self, paginate=True):
last_week = now() - timedelta(days=3)
contentions = (Contention
.objects
.annotate(num_children=Count('premises'))
.order_by('-num_children')
.filter(date_modification__gte=last_week))
if paginate:
return contentions[self.get_offset():self.get_limit()]
return contentions
class AboutView(TemplateView):
template_name = "about.html"
def get_text_file(self):
language = get_language()
return render_to_string("about-%s.md" % language)
def get_context_data(self, **kwargs):
content = markdown(self.get_text_file())
return super(AboutView, self).get_context_data(
content=content, **kwargs)
class TosView(TemplateView):
template_name = "tos.html"
def get_context_data(self, **kwargs):
content = markdown(render_to_string("tos.md"))
return super(TosView, self).get_context_data(
content=content, **kwargs)
class ArgumentCreationView(LoginRequiredMixin, CreateView):
template_name = "premises/new_contention.html"
form_class = ArgumentCreationForm
help_texts = {
'title': 'premises/examples/contention.html',
'owner': 'premises/examples/owner.html',
'sources': 'premises/examples/sources.html'
}
def get_form_class(self):
form_class = self.form_class
for key, value in self.help_texts.items():
help_text = render_to_string(value)
form_class.base_fields[key].help_text = help_text
return form_class
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.ip_address = get_ip_address(self.request)
form.instance.language = normalize_language_code(get_language())
form.instance.is_published = True
response = super(ArgumentCreationView, self).form_valid(form)
form.instance.update_sibling_counts()
form.instance.save_nouns()
form.instance.save()
return response
class ArgumentUpdateView(LoginRequiredMixin, UpdateView):
template_name = "premises/edit_contention.html"
form_class = ArgumentCreationForm
def get_queryset(self):
contentions = Contention.objects.all()
if self.request.user.is_superuser:
return contentions
return contentions.filter(user=self.request.user)
def form_valid(self, form):
response = super(ArgumentUpdateView, self).form_valid(form)
form.instance.update_sibling_counts()
form.instance.nouns.clear()
form.instance.save_nouns()
form.instance.update_premise_weights()
form.instance.save()
return response
class RandomArgumentView(RedirectView):
permanent = False
def get_redirect_url(self, *args, **kwargs):
argument = Contention.objects.annotate(
premise_count=Count('premises')
).filter(
premise_count__gt=2,
language=normalize_language_code(get_language())
).order_by(
'?'
)[0]
return argument.get_absolute_url()
class ArgumentPublishView(LoginRequiredMixin, DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
contention.is_published = True
contention.save()
messages.info(request, u"Argument is published now.")
return redirect(contention)
class ArgumentUnpublishView(LoginRequiredMixin, DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
contention.is_published = False
contention.save()
messages.info(request, u"Argüman yayından kaldırıldı.")
return redirect(contention)
class ArgumentDeleteView(LoginRequiredMixin, DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
if check_content_deletion(contention):
# remove notification
Entry.objects.delete(contention.get_newsfeed_type(), contention.id)
contention.delete()
messages.info(request, u"Argument has been removed.")
return redirect("home")
| |
arg1) # type: ignore
_wasm_module_get_host_info = dll.wasm_module_get_host_info
_wasm_module_get_host_info.restype = c_void_p
_wasm_module_get_host_info.argtypes = [POINTER(wasm_module_t)]
def wasm_module_get_host_info(arg0: Any) -> pointer:
return _wasm_module_get_host_info(arg0) # type: ignore
_wasm_module_set_host_info = dll.wasm_module_set_host_info
_wasm_module_set_host_info.restype = None
_wasm_module_set_host_info.argtypes = [POINTER(wasm_module_t), c_void_p]
def wasm_module_set_host_info(arg0: Any, arg1: Any) -> None:
return _wasm_module_set_host_info(arg0, arg1) # type: ignore
_wasm_module_set_host_info_with_finalizer = dll.wasm_module_set_host_info_with_finalizer
_wasm_module_set_host_info_with_finalizer.restype = None
_wasm_module_set_host_info_with_finalizer.argtypes = [POINTER(wasm_module_t), c_void_p, CFUNCTYPE(None, c_void_p)]
def wasm_module_set_host_info_with_finalizer(arg0: Any, arg1: Any, arg2: Any) -> None:
return _wasm_module_set_host_info_with_finalizer(arg0, arg1, arg2) # type: ignore
_wasm_module_as_ref = dll.wasm_module_as_ref
_wasm_module_as_ref.restype = POINTER(wasm_ref_t)
_wasm_module_as_ref.argtypes = [POINTER(wasm_module_t)]
def wasm_module_as_ref(arg0: Any) -> pointer:
return _wasm_module_as_ref(arg0) # type: ignore
_wasm_module_as_ref_const = dll.wasm_module_as_ref_const
_wasm_module_as_ref_const.restype = POINTER(wasm_ref_t)
_wasm_module_as_ref_const.argtypes = [POINTER(wasm_module_t)]
def wasm_module_as_ref_const(arg0: Any) -> pointer:
return _wasm_module_as_ref_const(arg0) # type: ignore
class wasm_shared_module_t(Structure):
pass
_wasm_shared_module_delete = dll.wasm_shared_module_delete
_wasm_shared_module_delete.restype = None
_wasm_shared_module_delete.argtypes = [POINTER(wasm_shared_module_t)]
def wasm_shared_module_delete(arg0: Any) -> None:
return _wasm_shared_module_delete(arg0) # type: ignore
_wasm_module_share = dll.wasm_module_share
_wasm_module_share.restype = POINTER(wasm_shared_module_t)
_wasm_module_share.argtypes = [POINTER(wasm_module_t)]
def wasm_module_share(arg0: Any) -> pointer:
return _wasm_module_share(arg0) # type: ignore
_wasm_module_obtain = dll.wasm_module_obtain
_wasm_module_obtain.restype = POINTER(wasm_module_t)
_wasm_module_obtain.argtypes = [POINTER(wasm_store_t), POINTER(wasm_shared_module_t)]
def wasm_module_obtain(arg0: Any, arg1: Any) -> pointer:
return _wasm_module_obtain(arg0, arg1) # type: ignore
_wasm_module_new = dll.wasm_module_new
_wasm_module_new.restype = POINTER(wasm_module_t)
_wasm_module_new.argtypes = [POINTER(wasm_store_t), POINTER(wasm_byte_vec_t)]
def wasm_module_new(arg0: Any, binary: Any) -> pointer:
return _wasm_module_new(arg0, binary) # type: ignore
_wasm_module_validate = dll.wasm_module_validate
_wasm_module_validate.restype = c_bool
_wasm_module_validate.argtypes = [POINTER(wasm_store_t), POINTER(wasm_byte_vec_t)]
def wasm_module_validate(arg0: Any, binary: Any) -> c_bool:
return _wasm_module_validate(arg0, binary) # type: ignore
_wasm_module_imports = dll.wasm_module_imports
_wasm_module_imports.restype = None
_wasm_module_imports.argtypes = [POINTER(wasm_module_t), POINTER(wasm_importtype_vec_t)]
def wasm_module_imports(arg0: Any, out: Any) -> None:
return _wasm_module_imports(arg0, out) # type: ignore
_wasm_module_exports = dll.wasm_module_exports
_wasm_module_exports.restype = None
_wasm_module_exports.argtypes = [POINTER(wasm_module_t), POINTER(wasm_exporttype_vec_t)]
def wasm_module_exports(arg0: Any, out: Any) -> None:
return _wasm_module_exports(arg0, out) # type: ignore
class wasm_func_t(Structure):
pass
_wasm_func_delete = dll.wasm_func_delete
_wasm_func_delete.restype = None
_wasm_func_delete.argtypes = [POINTER(wasm_func_t)]
def wasm_func_delete(arg0: Any) -> None:
return _wasm_func_delete(arg0) # type: ignore
_wasm_func_copy = dll.wasm_func_copy
_wasm_func_copy.restype = POINTER(wasm_func_t)
_wasm_func_copy.argtypes = [POINTER(wasm_func_t)]
def wasm_func_copy(arg0: Any) -> pointer:
return _wasm_func_copy(arg0) # type: ignore
_wasm_func_same = dll.wasm_func_same
_wasm_func_same.restype = c_bool
_wasm_func_same.argtypes = [POINTER(wasm_func_t), POINTER(wasm_func_t)]
def wasm_func_same(arg0: Any, arg1: Any) -> c_bool:
return _wasm_func_same(arg0, arg1) # type: ignore
_wasm_func_get_host_info = dll.wasm_func_get_host_info
_wasm_func_get_host_info.restype = c_void_p
_wasm_func_get_host_info.argtypes = [POINTER(wasm_func_t)]
def wasm_func_get_host_info(arg0: Any) -> pointer:
return _wasm_func_get_host_info(arg0) # type: ignore
_wasm_func_set_host_info = dll.wasm_func_set_host_info
_wasm_func_set_host_info.restype = None
_wasm_func_set_host_info.argtypes = [POINTER(wasm_func_t), c_void_p]
def wasm_func_set_host_info(arg0: Any, arg1: Any) -> None:
return _wasm_func_set_host_info(arg0, arg1) # type: ignore
_wasm_func_set_host_info_with_finalizer = dll.wasm_func_set_host_info_with_finalizer
_wasm_func_set_host_info_with_finalizer.restype = None
_wasm_func_set_host_info_with_finalizer.argtypes = [POINTER(wasm_func_t), c_void_p, CFUNCTYPE(None, c_void_p)]
def wasm_func_set_host_info_with_finalizer(arg0: Any, arg1: Any, arg2: Any) -> None:
return _wasm_func_set_host_info_with_finalizer(arg0, arg1, arg2) # type: ignore
_wasm_func_as_ref = dll.wasm_func_as_ref
_wasm_func_as_ref.restype = POINTER(wasm_ref_t)
_wasm_func_as_ref.argtypes = [POINTER(wasm_func_t)]
def wasm_func_as_ref(arg0: Any) -> pointer:
return _wasm_func_as_ref(arg0) # type: ignore
_wasm_func_as_ref_const = dll.wasm_func_as_ref_const
_wasm_func_as_ref_const.restype = POINTER(wasm_ref_t)
_wasm_func_as_ref_const.argtypes = [POINTER(wasm_func_t)]
def wasm_func_as_ref_const(arg0: Any) -> pointer:
return _wasm_func_as_ref_const(arg0) # type: ignore
wasm_func_callback_t = CFUNCTYPE(c_size_t, POINTER(wasm_val_t), POINTER(wasm_val_t))
wasm_func_callback_with_env_t = CFUNCTYPE(c_size_t, c_void_p, POINTER(wasm_val_t), POINTER(wasm_val_t))
_wasm_func_new = dll.wasm_func_new
_wasm_func_new.restype = POINTER(wasm_func_t)
_wasm_func_new.argtypes = [POINTER(wasm_store_t), POINTER(wasm_functype_t), wasm_func_callback_t]
def wasm_func_new(arg0: Any, arg1: Any, arg2: Any) -> pointer:
return _wasm_func_new(arg0, arg1, arg2) # type: ignore
_wasm_func_new_with_env = dll.wasm_func_new_with_env
_wasm_func_new_with_env.restype = POINTER(wasm_func_t)
_wasm_func_new_with_env.argtypes = [POINTER(wasm_store_t), POINTER(wasm_functype_t), wasm_func_callback_with_env_t, c_void_p, CFUNCTYPE(None, c_void_p)]
def wasm_func_new_with_env(arg0: Any, type: Any, arg2: Any, env: Any, finalizer: Any) -> pointer:
return _wasm_func_new_with_env(arg0, type, arg2, env, finalizer) # type: ignore
_wasm_func_type = dll.wasm_func_type
_wasm_func_type.restype = POINTER(wasm_functype_t)
_wasm_func_type.argtypes = [POINTER(wasm_func_t)]
def wasm_func_type(arg0: Any) -> pointer:
return _wasm_func_type(arg0) # type: ignore
_wasm_func_param_arity = dll.wasm_func_param_arity
_wasm_func_param_arity.restype = c_size_t
_wasm_func_param_arity.argtypes = [POINTER(wasm_func_t)]
def wasm_func_param_arity(arg0: Any) -> int:
return _wasm_func_param_arity(arg0) # type: ignore
_wasm_func_result_arity = dll.wasm_func_result_arity
_wasm_func_result_arity.restype = c_size_t
_wasm_func_result_arity.argtypes = [POINTER(wasm_func_t)]
def wasm_func_result_arity(arg0: Any) -> int:
return _wasm_func_result_arity(arg0) # type: ignore
_wasm_func_call = dll.wasm_func_call
_wasm_func_call.restype = POINTER(wasm_trap_t)
_wasm_func_call.argtypes = [POINTER(wasm_func_t), POINTER(wasm_val_t), POINTER(wasm_val_t)]
def wasm_func_call(arg0: Any, args: Any, results: Any) -> pointer:
return _wasm_func_call(arg0, args, results) # type: ignore
class wasm_global_t(Structure):
pass
_wasm_global_delete = dll.wasm_global_delete
_wasm_global_delete.restype = None
_wasm_global_delete.argtypes = [POINTER(wasm_global_t)]
def wasm_global_delete(arg0: Any) -> None:
return _wasm_global_delete(arg0) # type: ignore
_wasm_global_copy = dll.wasm_global_copy
_wasm_global_copy.restype = POINTER(wasm_global_t)
_wasm_global_copy.argtypes = [POINTER(wasm_global_t)]
def wasm_global_copy(arg0: Any) -> pointer:
return _wasm_global_copy(arg0) # type: ignore
_wasm_global_same = dll.wasm_global_same
_wasm_global_same.restype = c_bool
_wasm_global_same.argtypes = [POINTER(wasm_global_t), POINTER(wasm_global_t)]
def wasm_global_same(arg0: Any, arg1: Any) -> c_bool:
return _wasm_global_same(arg0, arg1) # type: ignore
_wasm_global_get_host_info = dll.wasm_global_get_host_info
_wasm_global_get_host_info.restype = c_void_p
_wasm_global_get_host_info.argtypes = [POINTER(wasm_global_t)]
def wasm_global_get_host_info(arg0: Any) -> pointer:
return _wasm_global_get_host_info(arg0) # type: ignore
_wasm_global_set_host_info = dll.wasm_global_set_host_info
_wasm_global_set_host_info.restype = None
_wasm_global_set_host_info.argtypes = [POINTER(wasm_global_t), c_void_p]
def wasm_global_set_host_info(arg0: Any, arg1: Any) -> None:
return _wasm_global_set_host_info(arg0, arg1) # type: ignore
_wasm_global_set_host_info_with_finalizer = dll.wasm_global_set_host_info_with_finalizer
_wasm_global_set_host_info_with_finalizer.restype = None
_wasm_global_set_host_info_with_finalizer.argtypes = [POINTER(wasm_global_t), c_void_p, CFUNCTYPE(None, c_void_p)]
def wasm_global_set_host_info_with_finalizer(arg0: Any, arg1: Any, arg2: Any) -> None:
return _wasm_global_set_host_info_with_finalizer(arg0, arg1, arg2) # type: ignore
_wasm_global_as_ref = dll.wasm_global_as_ref
_wasm_global_as_ref.restype = POINTER(wasm_ref_t)
_wasm_global_as_ref.argtypes = [POINTER(wasm_global_t)]
def wasm_global_as_ref(arg0: Any) -> pointer:
return _wasm_global_as_ref(arg0) # type: ignore
_wasm_global_as_ref_const = dll.wasm_global_as_ref_const
_wasm_global_as_ref_const.restype = POINTER(wasm_ref_t)
_wasm_global_as_ref_const.argtypes = [POINTER(wasm_global_t)]
def wasm_global_as_ref_const(arg0: Any) -> pointer:
return _wasm_global_as_ref_const(arg0) # type: ignore
_wasm_global_new = dll.wasm_global_new
_wasm_global_new.restype = POINTER(wasm_global_t)
_wasm_global_new.argtypes = [POINTER(wasm_store_t), POINTER(wasm_globaltype_t), POINTER(wasm_val_t)]
def wasm_global_new(arg0: Any, arg1: Any, arg2: Any) -> pointer:
return _wasm_global_new(arg0, arg1, arg2) # type: ignore
_wasm_global_type = dll.wasm_global_type
_wasm_global_type.restype = POINTER(wasm_globaltype_t)
_wasm_global_type.argtypes = [POINTER(wasm_global_t)]
def wasm_global_type(arg0: Any) -> pointer:
return _wasm_global_type(arg0) # type: ignore
_wasm_global_get = dll.wasm_global_get
_wasm_global_get.restype = None
_wasm_global_get.argtypes = [POINTER(wasm_global_t), POINTER(wasm_val_t)]
def wasm_global_get(arg0: Any, out: Any) -> None:
return _wasm_global_get(arg0, out) # type: ignore
_wasm_global_set = dll.wasm_global_set
_wasm_global_set.restype = None
_wasm_global_set.argtypes = [POINTER(wasm_global_t), POINTER(wasm_val_t)]
def wasm_global_set(arg0: Any, arg1: Any) -> None:
return _wasm_global_set(arg0, arg1) # type: ignore
class wasm_table_t(Structure):
pass
_wasm_table_delete = dll.wasm_table_delete
_wasm_table_delete.restype = None
_wasm_table_delete.argtypes = [POINTER(wasm_table_t)]
def wasm_table_delete(arg0: Any) -> None:
return _wasm_table_delete(arg0) # type: ignore
_wasm_table_copy = dll.wasm_table_copy
_wasm_table_copy.restype = POINTER(wasm_table_t)
_wasm_table_copy.argtypes = [POINTER(wasm_table_t)]
def wasm_table_copy(arg0: Any) -> pointer:
return _wasm_table_copy(arg0) # type: ignore
_wasm_table_same = dll.wasm_table_same
_wasm_table_same.restype = c_bool
_wasm_table_same.argtypes = [POINTER(wasm_table_t), POINTER(wasm_table_t)]
def wasm_table_same(arg0: Any, arg1: Any) -> c_bool:
return _wasm_table_same(arg0, arg1) # type: ignore
_wasm_table_get_host_info = dll.wasm_table_get_host_info
_wasm_table_get_host_info.restype = c_void_p
_wasm_table_get_host_info.argtypes = [POINTER(wasm_table_t)]
def wasm_table_get_host_info(arg0: Any) -> pointer:
return _wasm_table_get_host_info(arg0) # type: ignore
_wasm_table_set_host_info = dll.wasm_table_set_host_info
_wasm_table_set_host_info.restype = None
_wasm_table_set_host_info.argtypes = [POINTER(wasm_table_t), c_void_p]
def wasm_table_set_host_info(arg0: Any, arg1: Any) -> None:
return _wasm_table_set_host_info(arg0, arg1) # type: ignore
_wasm_table_set_host_info_with_finalizer = dll.wasm_table_set_host_info_with_finalizer
_wasm_table_set_host_info_with_finalizer.restype = None
_wasm_table_set_host_info_with_finalizer.argtypes = [POINTER(wasm_table_t), c_void_p, CFUNCTYPE(None, c_void_p)]
def wasm_table_set_host_info_with_finalizer(arg0: Any, arg1: Any, arg2: Any) -> None:
return _wasm_table_set_host_info_with_finalizer(arg0, arg1, arg2) # type: ignore
_wasm_table_as_ref = dll.wasm_table_as_ref
_wasm_table_as_ref.restype = POINTER(wasm_ref_t)
_wasm_table_as_ref.argtypes = [POINTER(wasm_table_t)]
def wasm_table_as_ref(arg0: Any) -> pointer:
return _wasm_table_as_ref(arg0) # type: ignore
_wasm_table_as_ref_const = dll.wasm_table_as_ref_const
_wasm_table_as_ref_const.restype = POINTER(wasm_ref_t)
_wasm_table_as_ref_const.argtypes = [POINTER(wasm_table_t)]
def wasm_table_as_ref_const(arg0: Any) -> pointer:
return _wasm_table_as_ref_const(arg0) # type: ignore
wasm_table_size_t = c_uint32
_wasm_table_new = dll.wasm_table_new
_wasm_table_new.restype = POINTER(wasm_table_t)
_wasm_table_new.argtypes = [POINTER(wasm_store_t), POINTER(wasm_tabletype_t), POINTER(wasm_ref_t)]
def wasm_table_new(arg0: Any, arg1: Any, init: Any) -> pointer:
return _wasm_table_new(arg0, arg1, init) # type: ignore
_wasm_table_type = dll.wasm_table_type
_wasm_table_type.restype = POINTER(wasm_tabletype_t)
_wasm_table_type.argtypes = [POINTER(wasm_table_t)]
def wasm_table_type(arg0: Any) -> pointer:
return _wasm_table_type(arg0) # type: ignore
_wasm_table_get = dll.wasm_table_get
_wasm_table_get.restype = POINTER(wasm_ref_t)
_wasm_table_get.argtypes = [POINTER(wasm_table_t), wasm_table_size_t]
def wasm_table_get(arg0: Any, index: Any) -> pointer:
return _wasm_table_get(arg0, index) # type: ignore
_wasm_table_set = dll.wasm_table_set
_wasm_table_set.restype = c_bool
_wasm_table_set.argtypes = [POINTER(wasm_table_t), wasm_table_size_t, POINTER(wasm_ref_t)]
def wasm_table_set(arg0: Any, index: Any, arg2: Any) -> c_bool:
return _wasm_table_set(arg0, index, arg2) # type: ignore
_wasm_table_size = dll.wasm_table_size
_wasm_table_size.restype = wasm_table_size_t
_wasm_table_size.argtypes = [POINTER(wasm_table_t)]
def wasm_table_size(arg0: Any) -> int:
return _wasm_table_size(arg0) # type: ignore
_wasm_table_grow = dll.wasm_table_grow
_wasm_table_grow.restype = c_bool
_wasm_table_grow.argtypes = [POINTER(wasm_table_t), wasm_table_size_t, POINTER(wasm_ref_t)]
def wasm_table_grow(arg0: Any, delta: Any, init: Any) -> c_bool:
return _wasm_table_grow(arg0, delta, init) # type: ignore
class wasm_memory_t(Structure):
pass
_wasm_memory_delete = dll.wasm_memory_delete
_wasm_memory_delete.restype = None
_wasm_memory_delete.argtypes = [POINTER(wasm_memory_t)]
def wasm_memory_delete(arg0: Any) -> None:
return _wasm_memory_delete(arg0) # type: ignore
_wasm_memory_copy = dll.wasm_memory_copy
_wasm_memory_copy.restype = POINTER(wasm_memory_t)
_wasm_memory_copy.argtypes = [POINTER(wasm_memory_t)]
def wasm_memory_copy(arg0: Any) -> pointer:
return _wasm_memory_copy(arg0) # type: ignore
_wasm_memory_same = dll.wasm_memory_same
_wasm_memory_same.restype = c_bool
_wasm_memory_same.argtypes = [POINTER(wasm_memory_t), POINTER(wasm_memory_t)]
def wasm_memory_same(arg0: Any, arg1: Any) -> c_bool:
return _wasm_memory_same(arg0, arg1) # type: ignore
_wasm_memory_get_host_info = dll.wasm_memory_get_host_info
_wasm_memory_get_host_info.restype = c_void_p
_wasm_memory_get_host_info.argtypes = [POINTER(wasm_memory_t)]
def wasm_memory_get_host_info(arg0: Any) -> pointer:
return _wasm_memory_get_host_info(arg0) # type: ignore
_wasm_memory_set_host_info = dll.wasm_memory_set_host_info
_wasm_memory_set_host_info.restype = None
_wasm_memory_set_host_info.argtypes = [POINTER(wasm_memory_t), c_void_p]
def wasm_memory_set_host_info(arg0: Any, arg1: Any) -> None:
return _wasm_memory_set_host_info(arg0, arg1) # type: ignore
_wasm_memory_set_host_info_with_finalizer = dll.wasm_memory_set_host_info_with_finalizer
_wasm_memory_set_host_info_with_finalizer.restype = None
_wasm_memory_set_host_info_with_finalizer.argtypes = [POINTER(wasm_memory_t), c_void_p, CFUNCTYPE(None, c_void_p)]
def wasm_memory_set_host_info_with_finalizer(arg0: Any, arg1: Any, arg2: Any) -> None:
return _wasm_memory_set_host_info_with_finalizer(arg0, arg1, arg2) # type: ignore
_wasm_memory_as_ref = dll.wasm_memory_as_ref
_wasm_memory_as_ref.restype = POINTER(wasm_ref_t)
_wasm_memory_as_ref.argtypes = [POINTER(wasm_memory_t)]
def wasm_memory_as_ref(arg0: Any) -> pointer:
return _wasm_memory_as_ref(arg0) # type: ignore
_wasm_memory_as_ref_const = dll.wasm_memory_as_ref_const
_wasm_memory_as_ref_const.restype = POINTER(wasm_ref_t)
_wasm_memory_as_ref_const.argtypes = [POINTER(wasm_memory_t)]
def wasm_memory_as_ref_const(arg0: Any) -> pointer:
return _wasm_memory_as_ref_const(arg0) # type: ignore
wasm_memory_pages_t = c_uint32
_wasm_memory_new = dll.wasm_memory_new
_wasm_memory_new.restype = POINTER(wasm_memory_t)
_wasm_memory_new.argtypes = [POINTER(wasm_store_t), POINTER(wasm_memorytype_t)]
def wasm_memory_new(arg0: Any, arg1: Any) -> pointer:
return _wasm_memory_new(arg0, arg1) # type: ignore
_wasm_memory_type = dll.wasm_memory_type
_wasm_memory_type.restype = POINTER(wasm_memorytype_t)
_wasm_memory_type.argtypes = [POINTER(wasm_memory_t)]
def wasm_memory_type(arg0: Any) -> pointer:
return _wasm_memory_type(arg0) # type: ignore
_wasm_memory_data = dll.wasm_memory_data
_wasm_memory_data.restype = POINTER(c_ubyte)
_wasm_memory_data.argtypes = [POINTER(wasm_memory_t)]
def wasm_memory_data(arg0: Any) -> pointer:
return _wasm_memory_data(arg0) # type: ignore
_wasm_memory_data_size = dll.wasm_memory_data_size
_wasm_memory_data_size.restype = c_size_t
_wasm_memory_data_size.argtypes = [POINTER(wasm_memory_t)]
def wasm_memory_data_size(arg0: Any) -> int:
return _wasm_memory_data_size(arg0) # type: ignore
_wasm_memory_size = dll.wasm_memory_size
_wasm_memory_size.restype = wasm_memory_pages_t
_wasm_memory_size.argtypes = [POINTER(wasm_memory_t)]
def wasm_memory_size(arg0: Any) -> int:
return _wasm_memory_size(arg0) # type: ignore
_wasm_memory_grow = dll.wasm_memory_grow
_wasm_memory_grow.restype = c_bool
_wasm_memory_grow.argtypes = [POINTER(wasm_memory_t), wasm_memory_pages_t]
def wasm_memory_grow(arg0: Any, delta: Any) -> c_bool:
return _wasm_memory_grow(arg0, delta) # type: ignore
class wasm_extern_t(Structure):
pass
_wasm_extern_delete = dll.wasm_extern_delete
_wasm_extern_delete.restype = None
_wasm_extern_delete.argtypes = [POINTER(wasm_extern_t)]
def wasm_extern_delete(arg0: Any) -> None:
return _wasm_extern_delete(arg0) # type: ignore
_wasm_extern_copy = dll.wasm_extern_copy
_wasm_extern_copy.restype = POINTER(wasm_extern_t)
_wasm_extern_copy.argtypes = [POINTER(wasm_extern_t)]
def wasm_extern_copy(arg0: Any) -> pointer:
return _wasm_extern_copy(arg0) # type: ignore
_wasm_extern_same = dll.wasm_extern_same
_wasm_extern_same.restype = c_bool
_wasm_extern_same.argtypes = [POINTER(wasm_extern_t), POINTER(wasm_extern_t)]
def wasm_extern_same(arg0: Any, arg1: Any) -> c_bool:
return _wasm_extern_same(arg0, arg1) # type: ignore
_wasm_extern_get_host_info = dll.wasm_extern_get_host_info
_wasm_extern_get_host_info.restype = c_void_p
_wasm_extern_get_host_info.argtypes = [POINTER(wasm_extern_t)]
def wasm_extern_get_host_info(arg0: Any) -> pointer:
return _wasm_extern_get_host_info(arg0) # type: ignore
_wasm_extern_set_host_info = dll.wasm_extern_set_host_info
_wasm_extern_set_host_info.restype = None
_wasm_extern_set_host_info.argtypes = [POINTER(wasm_extern_t), c_void_p]
def wasm_extern_set_host_info(arg0: Any, arg1: Any) | |
"Sky", "SCALAR", 30, 33, 50, "all"),
("Fcst", "Wx", "WEATHER", 30, 33, "SChc:R:--:0SM:^Chc:R:-:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 30, 33, 60, "all"),
("Fcst", "QPF", "SCALAR", 30, 33, 2.5, "all"),
("Fcst", "SnowAmt", "SCALAR", 30, 33, 0, "all"),
("Fcst", "T", "SCALAR", 33, 36, 41, "all"),
("Fcst", "Td", "SCALAR", 33, 36, 39, "all"),
("Fcst", "Wind", "VECTOR", 33, 36, (10, "S"), "all"),
("Fcst", "Sky", "SCALAR", 33, 36, 90, "all"),
("Fcst", "Wx", "WEATHER", 33, 36, "Lkly:R:m:1/2SM:^Def:R:m:4SM:", "all"),
("Fcst", "PoP", "SCALAR", 33, 36, 70, "all"),
("Fcst", "QPF", "SCALAR", 33, 36, 4, "all"),
("Fcst", "SnowAmt", "SCALAR", 33, 36, 0, "all"),
("Fcst", "T", "SCALAR", 36, 39, 40, "all"),
("Fcst", "Td", "SCALAR", 36, 39, 40, "all"),
("Fcst", "Wind", "VECTOR", 36, 39, (5, "SE"), "all"),
("Fcst", "Sky", "SCALAR", 36, 39, 100, "all"),
("Fcst", "Wx", "WEATHER", 36, 39, "Frq:R:+:<NoVis>:^Brf:R:+:5SM:", "all"),
("Fcst", "PoP", "SCALAR", 36, 39, 100, "all"),
("Fcst", "QPF", "SCALAR", 36, 39, 5, "all"),
("Fcst", "SnowAmt", "SCALAR", 36, 39, 0, "all"),
("Fcst", "T", "SCALAR", 39, 42, 33, "all"),
("Fcst", "Td", "SCALAR", 39, 42, 33, "all"),
("Fcst", "Wind", "VECTOR", 39, 42, (5, "E"), "all"),
("Fcst", "Sky", "SCALAR", 39, 42, 100, "all"),
("Fcst", "Wx", "WEATHER", 39, 42, "Pds:R:m:<NoVis>:^Inter:R:m:2SM:", "all"),
("Fcst", "PoP", "SCALAR", 39, 42, 100, "all"),
("Fcst", "QPF", "SCALAR", 39, 42, 5, "all"),
("Fcst", "SnowAmt", "SCALAR", 39, 42, 0, "all"),
("Fcst", "T", "SCALAR", 42, 45, 35, "all"),
("Fcst", "Td", "SCALAR", 42, 45, 33, "all"),
("Fcst", "Wind", "VECTOR", 42, 45, (2.5, "E"), "all"),
("Fcst", "Sky", "SCALAR", 42, 45, 70, "all"),
("Fcst", "Wx", "WEATHER", 42, 45, "Iso:RW:+:<NoVis>:^Sct:RW:+:3/4SM:", "all"),
("Fcst", "PoP", "SCALAR", 42, 45, 95, "all"),
("Fcst", "QPF", "SCALAR", 42, 45, 5, "all"),
("Fcst", "SnowAmt", "SCALAR", 42, 45, 0, "all"),
("Fcst", "T", "SCALAR", 45, 48, 40, "all"),
("Fcst", "Td", "SCALAR", 45, 48, 35, "all"),
("Fcst", "Wind", "VECTOR", 45, 48, (2, "NE"), "all"),
("Fcst", "Sky", "SCALAR", 45, 48, 85, "all"),
("Fcst", "Wx", "WEATHER", 45, 48, "Num:RW:m:<NoVis>:^Wide:RW:-:1/2SM:", "all"),
("Fcst", "PoP", "SCALAR", 45, 48, 65, "all"),
("Fcst", "QPF", "SCALAR", 45, 48, 1, ["BelowElev"]),
("Fcst", "QPF", "SCALAR", 45, 48, 5, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 45, 48, 0, "all"),
("Fcst", "T", "SCALAR", 48, 51, 45, "all"),
("Fcst", "Td", "SCALAR", 48, 51, 38, "all"),
("Fcst", "Wind", "VECTOR", 48, 51, (5, "N"), "all"),
("Fcst", "Sky", "SCALAR", 48, 51, 70, "all"),
("Fcst", "Wx", "WEATHER", 48, 51, "Ocnl:RW:-:<NoVis>:^SChc:RW:--:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 48, 51, 60, "all"),
("Fcst", "QPF", "SCALAR", 48, 51, .5, ["BelowElev"]),
("Fcst", "QPF", "SCALAR", 48, 51, 3, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 48, 51, 0, "all"),
("Fcst", "T", "SCALAR", 51, 54, 50, "all"),
("Fcst", "Td", "SCALAR", 51, 54, 40, "all"),
("Fcst", "Wind", "VECTOR", 51, 54, (7, "NW"), "all"),
("Fcst", "Sky", "SCALAR", 51, 54, 35, "all"),
("Fcst", "Wx", "WEATHER", 51, 54, "Chc:RW:--:<NoVis>:^Lkly:RW:--:1SM:", "all"),
("Fcst", "PoP", "SCALAR", 51, 54, 50, "all"),
("Fcst", "QPF", "SCALAR", 51, 54, .5, ["BelowElev"]),
("Fcst", "QPF", "SCALAR", 51, 54, 2, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 51, 54, 0, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 48", "MaxTEnd + 48", 70, "all"),
("Fcst", "MinT", "SCALAR", "MinTBegin + 48", "MinTEnd + 48", 50, "all"),
("Fcst", "T", "SCALAR", 54, 57, 50, "all"),
("Fcst", "Td", "SCALAR", 54, 57, 45, "all"),
("Fcst", "Wind", "VECTOR", 54, 57, (10, "W"), "all"),
("Fcst", "Sky", "SCALAR", 54, 57, 30, "all"),
("Fcst", "Wx", "WEATHER", 54, 57, "Def:RW:--:<NoVis>:^Frq:RW:-:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 54, 57, 55, "all"),
("Fcst", "QPF", "SCALAR", 54, 57, .25, ["BelowElev"]),
("Fcst", "QPF", "SCALAR", 54, 57, 1, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 54, 57, 0, "all"),
("Fcst", "T", "SCALAR", 57, 60, 55, "all"),
("Fcst", "Td", "SCALAR", 57, 60, 47, "all"),
("Fcst", "Wind", "VECTOR", 57, 60, (12, "E"), "all"),
("Fcst", "Sky", "SCALAR", 57, 60, 40, "all"),
("Fcst", "Wx", "WEATHER", 57, 60, "Brf:RW:-:4SM:^Pds:RW:--:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 57, 60, 40, "all"),
("Fcst", "QPF", "SCALAR", 57, 60, .5, "all"),
("Fcst", "SnowAmt", "SCALAR", 57, 60, 0, "all"),
("Fcst", "T", "SCALAR", 60, 63, 65, "all"),
("Fcst", "Td", "SCALAR", 60, 63, 43, "all"),
("Fcst", "Wind", "VECTOR", 60, 63, (15, "S"), "all"),
("Fcst", "Sky", "SCALAR", 60, 63, 10, "all"),
("Fcst", "Wx", "WEATHER", 60, 63, "Inter:RW:--:<NoVis>:^Wide:L:-:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 60, 63, 10, "all"),
("Fcst", "QPF", "SCALAR", 60, 63, .01, "all"),
("Fcst", "SnowAmt", "SCALAR", 60, 63, 0, "all"),
("Fcst", "T", "SCALAR", 63, 66, 70, "all"),
("Fcst", "Td", "SCALAR", 63, 66, 40, "all"),
("Fcst", "Wind", "VECTOR", 63, 66, (15, "N"), "all"),
("Fcst", "Sky", "SCALAR", 63, 66, 5, "all"),
("Fcst", "Wx", "WEATHER", 63, 66, "Ocnl:L:--:<NoVis>:^SChc:L:-:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 63, 66, 10, "all"),
("Fcst", "QPF", "SCALAR", 63, 66, .01, "all"),
("Fcst", "SnowAmt", "SCALAR", 63, 66, 0, "all"),
("Fcst", "T", "SCALAR", 66, 69, 68, "all"),
("Fcst", "Td", "SCALAR", 66, 69, 55, "all"),
("Fcst", "Wind", "VECTOR", 66, 69, (20, "W"), "all"),
("Fcst", "Sky", "SCALAR", 66, 69, 75, "all"),
("Fcst", "Wx", "WEATHER", 66, 69, "Chc:L:-:3SM:^Lkly:L:-:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 66, 69, 10, "all"),
("Fcst", "QPF", "SCALAR", 66, 69, .01, "all"),
("Fcst", "SnowAmt", "SCALAR", 66, 69, 0, "all"),
("Fcst", "T", "SCALAR", 69, 75, 69, "all"),
("Fcst", "Td", "SCALAR", 69, 75, 56, "all"),
("Fcst", "Wind", "VECTOR", 69, 75, (10, "S"), "all"),
("Fcst", "Sky", "SCALAR", 69, 75, 10, "all"),
("Fcst", "Wx", "WEATHER", 69, 75, "Def:L:--:<NoVis>:^Areas:L:--:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 69, 75, 10, "all"),
("Fcst", "QPF", "SCALAR", 69, 75, 0, "all"),
("Fcst", "SnowAmt", "SCALAR", 69, 75, 0, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 72", "MaxTEnd + 72", 71, "all"),
("Fcst", "MinT", "SCALAR", "MinTBegin + 72", "MinTEnd + 72", 65, "all"),
("Fcst", "T", "SCALAR", 75, 81, 69, "all"),
("Fcst", "Td", "SCALAR", 75, 81, 60, "all"),
("Fcst", "Wind", "VECTOR", 75, 81, (5, "SW"), "all"),
("Fcst", "Sky", "SCALAR", 75, 81, 10, "all"),
("Fcst", "Wx", "WEATHER", 75, 81, "Patchy:L:--:<NoVis>:^Frq:L:--:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 75, 81, 10, "all"),
("Fcst", "QPF", "SCALAR", 75, 81, 0, "all"),
("Fcst", "SnowAmt", "SCALAR", 75, 81, 0, "all"),
("Fcst", "T", "SCALAR", 81, 87, 70, "all"),
("Fcst", "Td", "SCALAR", 81, 87, 61, "all"),
("Fcst", "Wind", "VECTOR", 81, 87, (20, "SE"), "all"),
("Fcst", "Sky", "SCALAR", 81, 87, 10, "all"),
("Fcst", "Wx", "WEATHER", 81, 87, "Brf:L:--:<NoVis>:^Pds:L:--:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 81, 87, 10, "all"),
("Fcst", "QPF", "SCALAR", 81, 87, 0, "all"),
("Fcst", "SnowAmt", "SCALAR", 81, 87, 0, "all"),
("Fcst", "T", "SCALAR", 87, 93, 71, "all"),
("Fcst", "Td", "SCALAR", 87, 93, 65, "all"),
("Fcst", "Wind", "VECTOR", 87, 93, (15, "E"), "all"),
("Fcst", "Sky", "SCALAR", 87, 93, 50, "all"),
("Fcst", "Wx", "WEATHER", 87, 93, "Inter:L:-:<NoVis>:^Wide:ZL:-:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 87, 93, 30, "all"),
("Fcst", "QPF", "SCALAR", 87, 93, .01, "all"),
("Fcst", "SnowAmt", "SCALAR", 87, 93, 0, "all"),
("Fcst", "T", "SCALAR", 93, 99, 65, "all"),
("Fcst", "Td", "SCALAR", 93, 99, 65, "all"),
("Fcst", "Wind", "VECTOR", 93, 99, (23, "N"), "all"),
("Fcst", "Sky", "SCALAR", 93, 99, 50, "all"),
("Fcst", "Wx", "WEATHER", 93, 99, "Ocnl:ZL:-:<NoVis>:^SChc:ZL:-:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 93, 99, 50, "all"),
("Fcst", "QPF", "SCALAR", 93, 99, .01, "all"),
("Fcst", "SnowAmt", "SCALAR", 93, 99, 0, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 96", "MaxTEnd + 96", 75, "all"),
("Fcst", "MinT", "SCALAR", "MinTBegin + 96", "MinTEnd + 96", 68, "all"),
("Fcst", "T", "SCALAR", 99, 105, 68, "all"),
("Fcst", "Td", "SCALAR", 99, 105, 65, "all"),
("Fcst", "Wind", "VECTOR", 99, 105, (31, "NE"), "all"),
("Fcst", "Sky", "SCALAR", 99, 105, 10, "all"),
("Fcst", "Wx", "WEATHER", 99, 105, "Chc:ZL:--:<NoVis>:^Lkly:ZL:--:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 99, 105, 10, "all"),
("Fcst", "QPF", "SCALAR", 99, 105, 0, "all"),
("Fcst", "SnowAmt", "SCALAR", 99, 105, 0, "all"),
("Fcst", "T", "SCALAR", 105, 111, 70, "all"),
("Fcst", "Td", "SCALAR", 105, 111, 65, "all"),
("Fcst", "Wind", "VECTOR", 105, 111, (40, "S"), "all"),
("Fcst", "Sky", "SCALAR", 105, 111, 10, "all"),
("Fcst", "Wx", "WEATHER", 105, 111, "Def:ZL:--:<NoVis>:^Areas:ZL:--:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 105, 111, 10, "all"),
("Fcst", "QPF", "SCALAR", 105, 111, 0, "all"),
("Fcst", "SnowAmt", "SCALAR", 105, 111, 0, "all"),
("Fcst", "T", "SCALAR", 111, 117, 73, "all"),
("Fcst", "Td", "SCALAR", 111, 117, 65, "all"),
("Fcst", "Wind", "VECTOR", 111, 117, (5, "S"), "all"),
("Fcst", "Sky", "SCALAR", 111, 117, 10, "all"),
("Fcst", "Wx", "WEATHER", 111, 117, "Patchy:ZL:--:<NoVis>:^Frq:ZL:--:<NoVis>:", "all"),
("Fcst", "PoP", "SCALAR", 111, 117, 10, "all"),
("Fcst", "QPF", "SCALAR", 111, 117, 0, | |
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
return self
class GetModelAttributeRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
model_id: str = None,
unique_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
# 字段所属模型的唯一标识
self.model_id = model_id
# 要获取的字段的唯一标识
self.unique_id = unique_id
def validate(self):
self.validate_required(self.model_id, 'model_id')
self.validate_required(self.unique_id, 'unique_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.model_id is not None:
result['model_id'] = self.model_id
if self.unique_id is not None:
result['unique_id'] = self.unique_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('model_id') is not None:
self.model_id = m.get('model_id')
if m.get('unique_id') is not None:
self.unique_id = m.get('unique_id')
return self
class GetModelAttributeResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
data: Attribute = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 获取到的字段
self.data = data
def validate(self):
if self.data:
self.data.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.data is not None:
result['data'] = self.data.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('data') is not None:
temp_model = Attribute()
self.data = temp_model.from_map(m['data'])
return self
class QueryModelAttributeRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
model_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
# 所属模型的唯一标识
self.model_id = model_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.model_id is not None:
result['model_id'] = self.model_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('model_id') is not None:
self.model_id = m.get('model_id')
return self
class QueryModelAttributeResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
data: List[Attribute] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 查询到的模型字段列表
self.data = data
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = Attribute()
self.data.append(temp_model.from_map(k))
return self
class CreateModelAttributeRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
model_id: str = None,
unique_id: str = None,
name: str = None,
description: str = None,
type: str = None,
readonly: bool = None,
required: bool = None,
priority: int = None,
display_properties: List[MapStringToStringEntry] = None,
group_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
# 所属模型的唯一标识
self.model_id = model_id
# 唯一标识(所属模型内唯一)
self.unique_id = unique_id
# 名称(所属模型内唯一)
self.name = name
# 描述
self.description = description
# 类型【取值范围:INTEGER,LONG,DOUBLE,FLOAT,BOOLEAN,STRING,DATE,DATETIME,ENUM,ARRAY】
self.type = type
# 是否只读
self.readonly = readonly
# 是否必填
self.required = required
# 用于前端展示排序,数字越大优先级越高,默认为 0
self.priority = priority
# 用于前端展示的扩展属性
self.display_properties = display_properties
# group_id
self.group_id = group_id
def validate(self):
self.validate_required(self.model_id, 'model_id')
self.validate_required(self.unique_id, 'unique_id')
self.validate_required(self.name, 'name')
self.validate_required(self.type, 'type')
self.validate_required(self.readonly, 'readonly')
self.validate_required(self.required, 'required')
if self.display_properties:
for k in self.display_properties:
if k:
k.validate()
self.validate_required(self.group_id, 'group_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.model_id is not None:
result['model_id'] = self.model_id
if self.unique_id is not None:
result['unique_id'] = self.unique_id
if self.name is not None:
result['name'] = self.name
if self.description is not None:
result['description'] = self.description
if self.type is not None:
result['type'] = self.type
if self.readonly is not None:
result['readonly'] = self.readonly
if self.required is not None:
result['required'] = self.required
if self.priority is not None:
result['priority'] = self.priority
result['display_properties'] = []
if self.display_properties is not None:
for k in self.display_properties:
result['display_properties'].append(k.to_map() if k else None)
if self.group_id is not None:
result['group_id'] = self.group_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('model_id') is not None:
self.model_id = m.get('model_id')
if m.get('unique_id') is not None:
self.unique_id = m.get('unique_id')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('description') is not None:
self.description = m.get('description')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('readonly') is not None:
self.readonly = m.get('readonly')
if m.get('required') is not None:
self.required = m.get('required')
if m.get('priority') is not None:
self.priority = m.get('priority')
self.display_properties = []
if m.get('display_properties') is not None:
for k in m.get('display_properties'):
temp_model = MapStringToStringEntry()
self.display_properties.append(temp_model.from_map(k))
if m.get('group_id') is not None:
self.group_id = m.get('group_id')
return self
class CreateModelAttributeResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
data: Attribute = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 创建出的模型字段
self.data = data
def validate(self):
if self.data:
self.data.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.data is not None:
result['data'] = self.data.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('data') is not None:
temp_model = Attribute()
self.data = temp_model.from_map(m['data'])
return self
class UpdateModelAttributeRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
model_id: str = None,
unique_id: str = None,
name: str = None,
description: str = None,
readonly: bool = None,
required: bool = None,
priority: int = None,
display_properties: List[MapStringToStringEntry] = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
# 要更新的模型字段所属模型的唯一标识
self.model_id = model_id
# 要更新的模型字段的唯一标识
self.unique_id = unique_id
# 名称(所属模型内唯一)
self.name = name
# 描述
self.description = description
# 是否只读
self.readonly = readonly
# 是否必填
self.required = required
# 用于前端展示排序,数字越大优先级越高,默认为 0
self.priority = priority
# 用于前端展示的扩展属性
self.display_properties = display_properties
def validate(self):
self.validate_required(self.model_id, 'model_id')
self.validate_required(self.unique_id, 'unique_id')
self.validate_required(self.name, 'name')
self.validate_required(self.readonly, 'readonly')
self.validate_required(self.required, 'required')
if self.display_properties:
for k in self.display_properties:
if k:
k.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.model_id is not None:
result['model_id'] = self.model_id
if self.unique_id is not None:
result['unique_id'] = self.unique_id
if self.name is not None:
result['name'] = self.name
if self.description is not None:
result['description'] = self.description
if self.readonly is not None:
result['readonly'] = self.readonly
if self.required is not None:
result['required'] = self.required
if self.priority is not None:
result['priority'] = self.priority
result['display_properties'] = []
if self.display_properties is not None:
for k in self.display_properties:
result['display_properties'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('model_id') is not None:
self.model_id = m.get('model_id')
if m.get('unique_id') is not None:
self.unique_id = m.get('unique_id')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('description') is not None:
self.description = m.get('description')
if m.get('readonly') is not None:
self.readonly = m.get('readonly')
if m.get('required') is not None:
self.required = m.get('required')
if m.get('priority') is not None:
self.priority = m.get('priority')
self.display_properties = []
if m.get('display_properties') is not None:
for k in m.get('display_properties'):
temp_model = MapStringToStringEntry()
self.display_properties.append(temp_model.from_map(k))
return self
class UpdateModelAttributeResponse(TeaModel):
def __init__(
self,
req_msg_id: str | |
<gh_stars>0
# ---------------------------------------------------------------------------
# Amazon ElasticTranscoder to AWS Elemental MediaConvert preset converter.
# Version: 2.1
#
# 2.1
# - Added Thumbnail preset creationg
# - Added -f option to save preset and thumbnail preset to files
# - Corrected interlaced mode
# - Corrected Codec Level logic
#
# 2.2
# -Corrected Auto logic for video/frame capture resolution, sample rate, and bitate
# -Validation for audio only on MP4 only outputs
#
# 2.3
# -Corrected casting logic
# -added fMP4 support for dash and smooth outputs
# -added more validation around container conversion types
# -updated supported AAC range
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ---------------------------------------------------------------------------
import hashlib
import json
import datetime
import time
import hashlib
import json
import os
import boto3
from botocore.exceptions import ClientError
import argparse
# Options###
parser = argparse.ArgumentParser(
prog="ets_mediaconvert_preset_v2.py",
description="ETS to AWS Elemental MediaConvert preset converter",
add_help=True,
)
parser.add_argument(
"-r",
"--aws-region",
action="store",
dest="region",
help="Valid ETS AWS Region to connect to",
)
parser.add_argument(
"-p", "--preset-id", action="store", dest="etsid", help="ETS Preset ID"
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Verbose debug messages"
)
# arser.add_argument('-l','--preset-list',action='store',dest='listpresets',help='Feed list of presets ID into script')
parser.add_argument(
"-i", "--interactive", action="store_true", help="Interactive Mode for user"
)
parser.add_argument(
"-c",
"--output-type",
action="store",
dest="outputtype",
help="Output group type for preset to move to ex: file, apple, dash, smooth",
)
parser.add_argument("-f", "--save", action="store_true", help="Save presets to file")
args = parser.parse_args()
##Print help if no args
if args.interactive == False and (
args.region == None or args.etsid == None or args.outputtype == None
):
parser.print_help()
exit()
outputtype = ["file", "apple", "dash", "smooth"]
unsupport_container = ['"webm"', '"mp3"', '"ogg"', '"flac"', '"flv"', '"gif"']
unsupport_video_codec = ['"vp8"', '"vp9"', '"gif"']
unsupport_audio_codec = ['"vorbis"', '"flac"', '"wav"']
###Check region support
def validation_region():
if args.interactive == False:
while True:
region = args.region.lower()
if (
(region == "us-east-1")
or (region == "us-west-1")
or (region == "us-west-2")
or (region == "eu-west-1")
or (region == "ap-southeast-1")
or (region == "ap-southeast-2")
or (region == "ap-south-1")
or (region == "ap-northeast-1")
):
return region
else:
print ("Unsupported region selected..exiting")
exit()
else:
while True:
region = input("Please type in a supported ETS region: ").lower()
if (
(region.strip() == "us-east-1")
or (region.strip() == "us-west-1")
or (region.strip() == "us-west-2")
or (region.strip() == "eu-west-1")
or (region.strip() == "ap-southeast-1")
or (region.strip() == "ap-southeast-2")
or (region.strip() == "ap-south-1")
or (region.strip() == "ap-northeast-1")
):
return region
break
def validation_preset():
if args.interactive == False:
while True:
try:
etspresetid = args.etsid.lower()
read_preset_result = etsclient.read_preset(Id=etspresetid)
return etspresetid, read_preset_result
except Exception as e:
print(e)
exit()
else:
while True:
presetid = input("Preset ID: ").lower()
try:
read_preset_result = etsclient.read_preset(presetid)
return presetid, read_preset_result
except Exception as e:
print(e)
print("Please enter a correct preset id")
def validate_container(ets_preset_payload, unsupported):
if json.dumps(ets_preset_payload["Preset"]["Container"]) in unsupported:
print("Unsupported Container found in preset, please try another preset")
exit()
else:
supported_container = json.dumps(ets_preset_payload["Preset"]["Container"])
if args.verbose == True:
print("==================VERBOSE LOGGING==================")
print("Supported Container Found!")
print(supported_container)
return supported_container
def validate_video(ets_preset_payload, unsupported):
if ets_preset_payload["Preset"]["Video"]:
if json.dumps(ets_preset_payload["Preset"]["Video"]["Codec"]) in unsupported:
print("Unsupported Video codec found in preset, please try anohter preset")
exit()
else:
supported_video = json.dumps(ets_preset_payload["Preset"]["Video"]["Codec"])
if args.verbose == True:
print("==================VERBOSE LOGGING==================")
print("Supported Video Codec Found!")
print(supported_video)
return supported_video
else:
supported_video = "none"
return supported_video
def validate_audio(ets_preset_payload, unsupported):
if ets_preset_payload["Preset"]["Audio"]:
if json.dumps(ets_preset_payload["Preset"]["Audio"]["Codec"]) in unsupported:
print("Unsupported Video condec found in preset, please try anohter preset")
exit()
else:
supported_audio = json.dumps(ets_preset_payload["Preset"]["Audio"]["Codec"])
if args.verbose == True:
print("==================VERBOSE LOGGING==================")
print("Supported Audio Codec Found")
print(supported_audio)
return supported_audio
else:
supported_audio = "none"
return supported_audio
def validate_output(outputtype):
if args.interactive == True:
while True:
emf_outputgroup = input(
"Please type in output a output group type you want to place this ETS preset into, options are file, apple, dash, smooth: "
)
if emf_outputgroup.lower() in outputtype:
return emf_outputgroup
else:
if args.outputtype.lower() in outputtype:
emf_outputgroup = args.outputtype
return emf_outputgroup
else:
print("Output group type must be file, apple, dash, or smooth")
exit()
def translate_audio(ets_preset_payload, s_audio):
audiodump = json.dumps(ets_preset_payload["Preset"]["Audio"])
ets_channel_num = json.dumps(ets_preset_payload["Preset"]["Audio"]["Channels"])
if ets_channel_num == '"auto"':
ets_channel_num = '"2"'
else:
ets_channel_num = json.dumps(ets_preset_payload["Preset"]["Audio"]["Channels"])
ets_audio_bitrate = int(
json.dumps(ets_preset_payload["Preset"]["Audio"]["BitRate"]).strip('"')
)
ets_audio_sample = json.dumps(
ets_preset_payload["Preset"]["Audio"]["SampleRate"]
).strip('"')
if ets_audio_sample == "auto":
ets_audio_sample = 48
else:
ets_audio_sample = int(
json.dumps(ets_preset_payload["Preset"]["Audio"]["SampleRate"]).strip('"')
)
###Translate Audio Profile###
###AAC Type
if s_audio == '"AAC"':
etsaudioprofile = json.dumps(
ets_preset_payload["Preset"]["Audio"]["CodecOptions"]["Profile"]
)
aac_range = [64, 84, 96, 112, 128, 192, 224, 256, 288, 320, 384, 448, 512, 576]
if etsaudioprofile == '"AAC-LC"':
audio_profile = "LC"
elif etsaudioprofile == '"HE-AAC"':
audio_profile = "HEV1"
elif etsaudioprofile == '"HE-AACV2"':
audio_profile = "HEV2"
else:
audio_profile = "LC"
print("Warning: No matching profile found, changing to lc \n")
if ets_channel_num == '"2"':
audio_coding = "CODING_MODE_2_0"
elif ets_channel_num == '"1"':
audio_coding == "CODING_MODE_1_0"
else:
audio_coding == "CODING_MODE_2_0"
emf_bitrate = str(min(aac_range, key=lambda x: abs(x - ets_audio_bitrate)))
emf_bitrate = int(emf_bitrate) * 1000
emf_sample = ets_preset_payload["Preset"]["Audio"]["SampleRate"]
AudioSettings = {}
AudioSettings = {
"LanguageCodeControl": "FOLLOW_INPUT",
"AudioTypeControl": "FOLLOW_INPUT",
"AudioSourceName": "Audio Selector 1",
"CodecSettings": {
"Codec": "AAC",
"AacSettings": {
"AudioDescriptionBroadcasterMix": "NORMAL",
"Bitrate": emf_bitrate,
"CodecProfile": audio_profile,
"CodingMode": audio_coding,
"RawFormat": "NONE",
"Specification": "MPEG4",
"RateControlMode": "CBR",
},
},
}
if emf_sample != "auto":
AudioSettings["CodecSettings"]["AacSettings"].update(
{"SampleRate": int(emf_sample)}
)
else:
warning = (
"Auto in setting Sample Rate not supported...defaulting to 48kHz\n"
)
AudioSettings["CodecSettings"]["AacSettings"].update(
{"SampleRate": int(48000)}
)
if args.verbose == True:
print("==================VERBOSE LOGGING==================")
print("==================AUDIO SETTINGS AAC==================")
print(json.dumps(AudioSettings))
###PCM/WAV Type
elif s_audio == '"wav"' or s_audio == '"pcm"':
wav_sample = [8, 16, 22.05, 24, 32, 44.1, 48, 88.2, 96, 192]
emf_sample = str(min(wav_sample, key=lambda x: abs(x - ets_audio_sample)))
emf_sample = int(emf_sample) * 1000
ets_bitdepth = [16, 24]
emf_bitdepth = str(
min(
ets_bitdepth,
key=lambda x: abs(
x
- int(
json.dumps(
ets_preset_payload["Preset"]["Audio"]["CodecOptions"][
"BitDepth"
]
).strip('"')
)
),
)
)
if (
json.dumps(ets_preset_payload["Preset"]["Audio"]["Channels"]) == '"auto"'
or json.dumps(ets_preset_payload["Preset"]["Audio"]["Channels"]) == '"0"'
):
warning = "0 and auto channels not supported...defaulting to 2\n"
emf_channels = "2"
else:
emf_channels = json.dumps(
ets_preset_payload["Preset"]["Audio"]["Channels"]
).strip('"')
AudioSettings = {}
AudioSettings = {
"LanguageCodeControl": "FOLLOW_INPUT",
"AudioTypeControl": "FOLLOW_INPUT",
"AudioSourceName": "Audio Selector 1",
"CodecSettings": {
"Codec": "WAV",
"WavSettings": {
"BitDepth": int(emf_bitdepth),
"Channels": int(emf_channels),
},
},
}
if emf_sample != "auto":
AudioSettings["CodecSettings"]["WavSettings"].update(
{"SampleRate": int(emf_sample)}
)
else:
warning = (
"Auto in setting Sample Rate not supported...defaulting to 44.1kHz\n"
)
AudioSettings["CodecSettings"]["WavSettings"].update(
{"SampleRate": int(44100)}
)
if args.verbose == True:
print("==================VERBOSE LOGGING==================")
print("==================AUDIO SETTINGS WAV==================")
print(json.dumps(AudioSettings))
###Type MP2
elif s_audio == '"mp2"':
mp2_range = [32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384]
mp2_sample_range = [32, 44.1, 48]
emf_bitrate = min(mp2_range, key=lambda x: abs(x - ets_audio_bitrate))
emf_sample = min(mp2_sample_range, key=lambda x: abs(x - ets_audio_sample))
emf_bitrate = str(min(mp2_range, key=lambda x: abs(x - ets_audio_bitrate)))
emf_bitrate = int(emf_bitrate) * 1000
emf_sample = emf_sample * 1000
AudioSettings = {}
if (
json.dumps(ets_preset_payload["Preset"]["Audio"]["Channels"]) == '"auto"'
or json.dumps(ets_preset_payload["Preset"]["Audio"]["Channels"]) == '"0"'
):
print("Warning = 0 and auto channels not supported...defaulting to 2\n")
emf_channels = "2"
else:
emf_channels = json.dumps(
ets_preset_payload["Preset"]["Audio"]["Channels"]
).strip('"')
AudioSettings = {
"LanguageCodeControl": "FOLLOW_INPUT",
"AudioTypeControl": "FOLLOW_INPUT",
"AudioSourceName": "Audio Selector 1",
"CodecSettings": {
"Codec": "MP2",
"Mp2Settings": {
"Bitrate": int(emf_bitrate),
"Channels": int(emf_channels),
},
},
}
if args.verbose == True:
print("==================VERBOSE LOGGING==================")
print("==================AUDIO SETTINGS MP2==================")
print(json.dumps(AudioSettings))
if emf_sample != "auto":
AudioSettings["CodecSettings"]["Mp2Settings"].update(
{"SampleRate": int(emf_sample)}
)
else:
warning = (
"Auto in setting Sample Rate not supported...defaulting to 48000kHz\n"
)
AudioSettings["CodecSettings"]["Mp2Settings"].update(
{"SampleRate": int(48000)}
)
AudioDescription = {}
AudioDesc1 = {}
AudioDesc1 = {
"LanguageCodeControl": "FOLLOW_INPUT",
"InputTypeControl": "FOLLOW_INPUT",
"AudioSourceName": "Audio Selector 1",
}
AudioExtra = json.dumps(AudioDesc1, indent=4, sort_keys=True)
AudioDescription = {"AudioDescriptions": []}
AudioDescription["AudioDescriptions"].insert(0, AudioSettings)
if args.verbose == True:
print("==================VERBOSE LOGGING==================")
print("==================AUDIO DESCRIPTION==================")
print(json.dumps(AudioDescription, indent=4, sort_keys=True))
return AudioDescription
def translate_video(ets_preset_payload, s_video):
##Checks for Profile for h264 - not putting into fill h264 if due to if ets support h265 in future will be easier to migrate
videodump = json.dumps(ets_preset_payload["Preset"]["Video"])
if "Profile" in videodump and s_video != '"mpeg2"':
emf_codec_profile = ets_preset_payload["Preset"]["Video"]["CodecOptions"][
"Profile"
].upper()
emf_codec_level = ets_preset_payload["Preset"]["Video"]["CodecOptions"]["Level"]
cavlc_profile = [
"HIGH",
"HIGH_10BIT",
"HIGH_422",
"HIGH_422_10BIT",
"MAIN",
"BASELINE",
]
if emf_codec_profile in cavlc_profile:
emf_entropy_encoding = "CAVLC"
else:
emf_entropy_encoding = "CABAC"
##Logic for Level 1b that isn't supported in AWS Elemental MediaConvert
if emf_codec_level == '"1b"':
emf_codec_level = '"AUTO"'
print("WARNING: 1b not supported in AWS Elemental MediaConvert, defaulting to auto, please change to 1 or 1.1 based off bitrate and resolution \n")
else:
emf_codec_level = ets_preset_payload["Preset"]["Video"]["CodecOptions"][
"Level"
]
if emf_codec_level == "1":
emf_codec_level = "LEVEL_1"
elif emf_codec_level == "1.1":
emf_codec_level = "LEVEL_1_1"
elif emf_codec_level | |
<reponame>proguy914629bot/LyricMasterOutdated<filename>cogs/reaction.py
import discord
import typing
import emojis
import os
import asyncio
from discord.ext import commands
import json
from discord.utils import get
os.chdir("/home/gilb/LyricMaster/")
class ReactionRolesNotSetup(Exception):
"""Reaction Roles are not setup for this guild."""
pass
def is_setup():
async def wrap_func(ctx):
data = await ctx.bot.config.find(ctx.guild.id)
if data is None:
await ctx.send("Error! You did not set up Reaction Roles yet!")
#raise ReactionRolesNotSetup({"Error": "You did not set up Reaction Roles yet!"})
if data.get("message_id") is None:
await ctx.send("Error! You did not set up Reaction Roles yet!")
#raise ReactionRolesNotSetup
return True
return commands.check(wrap_func)
class Reactions(commands.Cog, name = "Reaction Roles"):
def __init__(self, bot):
self.bot = bot
async def rebuild_role_embed(self, guild_id):
data = await self.bot.config.find(guild_id)
channel_id = data['channel_id']
message_id = data['message_id']
guild = await self.bot.fetch_guild(guild_id)
channel = await self.bot.fetch_channel(channel_id)
message = await channel.fetch_message(message_id)
with open('BotRecords/reaction.json', 'r') as f:
reaction = json.load(f)
embed = discord.Embed(
title = str(reaction[str(guild_id)]["title"]),
#color = int(str(reaction[str(guild_id)]["color"]).replace("#", "0x")),
#description = str(reaction[str(guild_id)]["desc"])
)
await message.clear_reactions()
await message.edit(embed=embed)
reaction_roles = await self.bot.reaction_roles.get_all()
reaction_roles = list(filter(lambda r: r['guild_id'] == guild_id, reaction_roles))
for item in reaction_roles:
await message.add_reaction(item["_id"])
async def get_current_reactions(self, guild_id):
data = await self.bot.reaction_roles.get_all()
data = filter(lambda r: r['guild_id'] == guild_id, data)
data = map(lambda r: r['_id'], data)
return list(data)
@commands.group(
aliases = ['rr', 'reactionrole'],
invoke_without_command = True
)
@commands.guild_only()
async def reactionroles(self, ctx):
embed = discord.Embed(
title = 'Reaction Roles Commands List:',
color = ctx.author.color
)
embed.add_field(
name = "Channel:",
value = """
```
lm?rr channel <Channel>
```\n\n
""",
inline = False
)
embed.add_field(
name = "Toggle: Toggles the Reaction Role for this guild.",
value = """
```
lm?rr toggle
```\n\n
""",
inline = False
)
embed.add_field(
name = "Add: Adds a new Reaction Role.",
value = """
```
lm?rr add <Emoji> <Role>
```\n\n
""",
inline = False
)
embed.add_field(
name = "Remove: Remove an existing Reaction Role using it's emoji!",
value = """
```
lm?rr rm <Emoji>
```\n\n
""",
inline = False
)
embed.add_field(
name = "Set: Sets a specific item of the Reaction Role Embed.",
value = """
```
lm?rr set <Type (Title, Description, Color)> <Message>
```
""",
inline = False
)
await ctx.send(embed=embed)
@reactionroles.command(name = "channel")
@commands.guild_only()
@commands.has_guild_permissions(manage_channels = True)
async def rr_channel(self, ctx, channel : discord.TextChannel = None):
if channel == None:
await ctx.send(f'Please mention the channel as a second argument! Do it like `lm?rr channel {ctx.channel.mention}` next time!')
return
try:
await ctx.send("Testing Permissions!", delete_after = 0.05)
except discord.HTTPException:
await ctx.send(f'I cannot send messages in {channel.name}! Make me have `Embed Links`, `Send Messages` and `Read Messages` permissions and then try again!')
return
try:
embed = discord.Embed(title = "Testing")
await ctx.send(embed=embed, delete_after = 0.05)
except discord.HTTPException:
await ctx.send(f'I cannot send embeds in {channel.name}! Make me have `Embed Links`, `Send Messages`, and `Read Messages` permissions then try again!')
return
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
questions = [
'What Should Be The Title of the Embed?',
'What Should Be The Description of the Embed? Say `none` if you do not want a description!',
]
answers = []
for i in questions:
await ctx.send(i)
try:
msg = await self.bot.wait_for('message', check=check, timeout = 120.0)
except asyncio.TimeoutError:
await ctx.send('Took Too Long. Try Again Later!')
else:
answers.append(msg.content)
#if str(answers[2]) == "none":
answer = ctx.guild.owner.color
if str(answer).startswith(("#", "0x")):
reaction_roles = await self.bot.reaction_roles.get_all()
reaction_roles = list(filter(lambda r: r['guild_id'] == ctx.guild.id, reaction_roles))
for item in reaction_roles:
role = ctx.guild.get_role(item["role"])
if str(answers[1]) == "none":
desc = f"{item['_id']}: {role.mention}"
else:
desc = f"{str(answers[1])}\n\n{item['_id']}: {role.mention}"
if answer != None:
finalanswer = answer
if str(answers[1]) == 'none':
embed = discord.Embed(
title = str(answers[0]),
color = finalanswer
)
else:
embed = discord.Embed(
title = str(answers[0]),
color = finalanswer,
description = desc
)
else:
if answers[1] == "none":
embed = discord.Embed(
title = str(answers[0])
)
else:
embed = discord.Embed(
title = str(answers[0]),
description = desc
)
with open('BotRecords/reaction.json', 'r') as f:
reaction = json.load(f)
reaction[str(ctx.guild.id)] = {}
reaction[str(ctx.guild.id)]["title"] = str(answers[0])
#reaction[str(ctx.guild,id)]["desc"] = str(desc)
reaction[str(ctx.guild.id)]["color"] = str(finalanswer)
with open('BotRecords/reaction.json', 'w') as f:
json.dump(reaction, f, indent=4)
else:
await ctx.send('That is not a HEX Code. Try again later!')
return
m = await channel.send(embed=embed)
for item in reaction_roles:
await m.add_reaction(item['_id'])
await self.bot.config.upsert(
{
"_id": ctx.guild.id,
"message_id": m.id,
"channel_id": m.channel.id,
"is_enabled": True,
}
)
await ctx.send('That should be all!', delete_after = 30.0)
@reactionroles.command(name = "toggle")
@commands.guild_only()
@commands.has_guild_permissions(administrator=True)
@is_setup()
async def rr_toggle(self, ctx):
"""Toggles the Reaction Role for this guild."""
data = await self.bot.config.find(ctx.guild.id)
data["is_enabled"] = not data["is_enabled"]
await self.bot.config.upsert(data)
is_enabled = "enabled!" if data["is_enabled"] else "disabled!"
await ctx.send(f"I have toggled the Reaction Role for this guild. It is now {is_enabled}")
@reactionroles.command(name = "add", aliases = ['mk', 'make'])
@commands.guild_only()
@commands.has_guild_permissions(administrator=True)
@is_setup()
async def rr_add(self, ctx, emoji : typing.Union[discord.Embed, str], *, role : discord.Role = None):
"""Adds a new Reaction Role."""
reacts = await self.get_current_reactions(ctx.guild.id)
if len(reacts) >= 20:
await ctx.send(f"This bot currently does not support more than 20 reactions!")
return
if not isinstance(emoji, discord.Emoji):
emoji = emojis.get(emoji)
emoji = emoji.pop()
elif isinstance(emoji, discord.Emoji):
if not emoji.is_useable():
await ctx.send("I cannot use that Emoji! Try again!")
return
try:
tryrole = ctx.guild.get_role(role.id)
try:
await ctx.author.add_roles(tryrole, reason = "Reaction Role Setup Test #1")
except:
await ctx.author.remove_roles(tryrole, reason = "Reaction Role Setup Test #1")
await ctx.author.add_roles(tryrole, reason = "Added back the role for the Reaction Role Setup Test #1!")
except:
await ctx.send('I am missing some permissons for adding and/or removing the role! Please fix this then Try again later!')
return
emoji = str(emoji)
await self.bot.reaction_roles.upsert(
{
"_id": emoji,
"role": role.id,
"guild_id": ctx.guild.id
}
)
await self.rebuild_role_embed(ctx.guild.id)
await ctx.send("The Reaction Role is ready and good to go!")
@reactionroles.command(name = "remove", aliases = ['rm'])
@commands.guild_only()
@commands.has_guild_permissions(manage_roles = True)
@is_setup()
async def rr_remove(self, ctx, emoji : typing.Union[discord.Emoji, str]):
"""Remove an existing Reaction Role using it's emoji!"""
if not isinstance(emoji, discord.Emoji):
emoji = emojis.get(emoji)
emoji = emoji.pop()
emoji = str(emoji)
try:
await self.bot.reaction_roles.delete(emoji)
except:
await ctx.send(f'{emoji} is not a valid Reaction Role Emoji!')
return
await self.rebuild_role_embed(ctx.guild.id)
await ctx.send("That should be all done and Removed!")
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
data = await self.bot.config.find(payload.guild_id)
if not payload.guild_id or not data or not data.get("is_enabled"):
return
guild_reaction_roles = await self.get_current_reactions(payload.guild_id)
if str(payload.emoji) not in guild_reaction_roles:
return
guild = await self.bot.fetch_guild(payload.guild_id)
emoji_data = await self.bot.reaction_roles.find(str(payload.emoji))
role = guild.get_role(emoji_data["role"])
member = await guild.fetch_member(payload.user_id)
if member.bot:
return
if role not in member.roles:
await member.add_roles(role, reason = f'Reaction Roles!')
@commands.Cog.listener()
async def on_raw_reaction_remove(self, payload):
data = await self.bot.config.find(payload.guild_id)
if not payload.guild_id or not data or not data.get("is_enabled"):
return
guild_reaction_roles = await self.get_current_reactions(payload.guild_id)
if str(payload.emoji) not in guild_reaction_roles:
return
guild = await self.bot.fetch_guild(payload.guild_id)
emoji_data = await self.bot.reaction_roles.find(str(payload.emoji))
role = guild.get_role(emoji_data["role"])
member = await guild.fetch_member(payload.user_id)
if role in member.roles:
await member.remove_roles(role, reason = f'Reaction Roles!')
@reactionroles.command(name = "set")
@commands.guild_only()
@commands.has_guild_permissions(administrator = True)
@is_setup()
async def rr_set(self, ctx, types = None, *, message = None): #Description, Title, Color
"""Sets a specific item of the Reaction Role Embed."""
with open('BotRecords/reaction.json', 'r') as f:
reaction = json.load(f)
data = await self.bot.config.find(ctx.guild.id)
channel_id = data['channel_id']
message_id = data['message_id']
guild_id = ctx.guild.id
channel = await self.bot.fetch_channel(channel_id)
message = await channel.fetch_message(message_id)
if types == None:
await ctx.send('Please put a type! Like `Title`, `Description`, and `Color`!')
return
if message == None:
await ctx.send('Please put a message for the Embed\'s {}!'.format(str(types)))
finaltype = str(types).lower()
if finaltype == "title":
reaction[str(ctx.guild.id)]["title"] = str(message)
embed = discord.Embed(
title = str(reaction[str(guild_id)]["title"]),
color = int(reaction[str(guild_id)]["color"]),
description = str(reaction[str(guild_id)]["desc"])
)
if finaltype == "description":
reaction[str(ctx.guild.id)]["desc"] = str(message)
embed = discord.Embed(
title = str(reaction[str(guild_id)]["title"]),
color = int(reaction[str(guild_id)]["color"]),
description = str(reaction[str(guild_id)]["desc"])
)
if finaltype == "desc":
reaction[str(ctx.guild.id)]["desc"] = str(message)
embed = discord.Embed(
title = str(reaction[str(guild_id)]["title"]),
color = int(reaction[str(guild_id)]["color"]),
description = str(reaction[str(guild_id)]["desc"])
)
if finaltype == "color":
if str(message).startswith("#"):
str(message).replace("#", "0x")
try:
message = int(message)
except:
await ctx.send('It is not a HEX Code! Try again later!')
return
reaction[str(ctx.guild.id)]["color"] = str(message)
embed = discord.Embed(
title = str(reaction[str(guild_id)]["title"]),
color = int(reaction[str(guild_id)]["color"]),
description = str(reaction[str(guild_id)]["desc"])
)
else:
await ctx.send('It is not a HEX Code! Try again later!')
return
await message.edit(embed=embed)
@commands.command(aliases = ['react-to', 'reactto', 'react'])
async def react_to(self, ctx, message : discord.Message = None, *, emoji: discord.Emoji = None):
if message == None:
await ctx.send(f"Please put the message ID for the bot to | |
Optional. The type of restore. If unspecified, defaults to
METADATA_ONLY.
"""
class RestoreTypeValueValuesEnum(_messages.Enum):
r"""Optional. The type of restore. If unspecified, defaults to
METADATA_ONLY.
Values:
RESTORE_TYPE_UNSPECIFIED: The restore type is unknown.
FULL: The service's metadata and configuration are restored.
METADATA_ONLY: Only the service's metadata is restored.
"""
RESTORE_TYPE_UNSPECIFIED = 0
FULL = 1
METADATA_ONLY = 2
backup = _messages.StringField(1)
requestId = _messages.StringField(2)
restoreType = _messages.EnumField('RestoreTypeValueValuesEnum', 3)
class Secret(_messages.Message):
r"""A securely stored value.
Fields:
cloudSecret: The relative resource name of a Secret Manager secret
version, in the following form:projects/{project_number}/secrets/{secret
_id}/versions/{version_id}.
"""
cloudSecret = _messages.StringField(1)
class Service(_messages.Message):
r"""A managed metastore service that serves metadata queries.
Enums:
ReleaseChannelValueValuesEnum: Immutable. The release channel of the
service. If unspecified, defaults to STABLE.
StateValueValuesEnum: Output only. The current state of the metastore
service.
TierValueValuesEnum: The tier of the service.
Messages:
LabelsValue: User-defined labels for the metastore service.
Fields:
artifactGcsUri: Output only. A Cloud Storage URI (starting with gs://)
that specifies where artifacts related to the metastore service are
stored.
createTime: Output only. The time when the metastore service was created.
encryptionConfig: Immutable. Information used to configure the Dataproc
Metastore service to encrypt customer data at rest. Cannot be updated.
endpointUri: Output only. The URI of the endpoint used to access the
metastore service.
hiveMetastoreConfig: Configuration information specific to running Hive
metastore software as the metastore service.
labels: User-defined labels for the metastore service.
maintenanceWindow: The one hour maintenance window of the metastore
service. This specifies when the service can be restarted for
maintenance purposes in UTC time. Maintenance window is not needed for
services with the SPANNER database type.
metadataIntegration: The setting that defines how metastore metadata
should be integrated with external services and systems.
metadataManagementActivity: Output only. The metadata management
activities of the metastore service.
name: Immutable. The relative resource name of the metastore service, of
the form:projects/{project_number}/locations/{location_id}/services/{ser
vice_id}.
network: Immutable. The relative resource name of the VPC network on which
the instance can be accessed. It is specified in the following
form:projects/{project_number}/global/networks/{network_id}.
networkConfig: Immutable. The configuration specifying the network
settings for the Dataproc Metastore service.
port: The TCP port at which the metastore service is reached. Default:
9083.
releaseChannel: Immutable. The release channel of the service. If
unspecified, defaults to STABLE.
state: Output only. The current state of the metastore service.
stateMessage: Output only. Additional information about the current state
of the metastore service, if available.
tier: The tier of the service.
uid: Output only. The globally unique resource identifier of the metastore
service.
updateTime: Output only. The time when the metastore service was last
updated.
"""
class ReleaseChannelValueValuesEnum(_messages.Enum):
r"""Immutable. The release channel of the service. If unspecified,
defaults to STABLE.
Values:
RELEASE_CHANNEL_UNSPECIFIED: Release channel is not specified.
CANARY: The CANARY release channel contains the newest features, which
may be unstable and subject to unresolved issues with no known
workarounds. Services using the CANARY release channel are not subject
to any SLAs.
STABLE: The STABLE release channel contains features that are considered
stable and have been validated for production use.
"""
RELEASE_CHANNEL_UNSPECIFIED = 0
CANARY = 1
STABLE = 2
class StateValueValuesEnum(_messages.Enum):
r"""Output only. The current state of the metastore service.
Values:
STATE_UNSPECIFIED: The state of the metastore service is unknown.
CREATING: The metastore service is in the process of being created.
ACTIVE: The metastore service is running and ready to serve queries.
SUSPENDING: The metastore service is entering suspension. Its query-
serving availability may cease unexpectedly.
SUSPENDED: The metastore service is suspended and unable to serve
queries.
UPDATING: The metastore service is being updated. It remains usable but
cannot accept additional update requests or be deleted at this time.
DELETING: The metastore service is undergoing deletion. It cannot be
used.
ERROR: The metastore service has encountered an error and cannot be
used. The metastore service should be deleted.
"""
STATE_UNSPECIFIED = 0
CREATING = 1
ACTIVE = 2
SUSPENDING = 3
SUSPENDED = 4
UPDATING = 5
DELETING = 6
ERROR = 7
class TierValueValuesEnum(_messages.Enum):
r"""The tier of the service.
Values:
TIER_UNSPECIFIED: The tier is not set.
DEVELOPER: The developer tier provides limited scalability and no fault
tolerance. Good for low-cost proof-of-concept.
ENTERPRISE: The enterprise tier provides multi-zone high availability,
and sufficient scalability for enterprise-level Dataproc Metastore
workloads.
"""
TIER_UNSPECIFIED = 0
DEVELOPER = 1
ENTERPRISE = 2
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""User-defined labels for the metastore service.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
artifactGcsUri = _messages.StringField(1)
createTime = _messages.StringField(2)
encryptionConfig = _messages.MessageField('EncryptionConfig', 3)
endpointUri = _messages.StringField(4)
hiveMetastoreConfig = _messages.MessageField('HiveMetastoreConfig', 5)
labels = _messages.MessageField('LabelsValue', 6)
maintenanceWindow = _messages.MessageField('MaintenanceWindow', 7)
metadataIntegration = _messages.MessageField('MetadataIntegration', 8)
metadataManagementActivity = _messages.MessageField('MetadataManagementActivity', 9)
name = _messages.StringField(10)
network = _messages.StringField(11)
networkConfig = _messages.MessageField('NetworkConfig', 12)
port = _messages.IntegerField(13, variant=_messages.Variant.INT32)
releaseChannel = _messages.EnumField('ReleaseChannelValueValuesEnum', 14)
state = _messages.EnumField('StateValueValuesEnum', 15)
stateMessage = _messages.StringField(16)
tier = _messages.EnumField('TierValueValuesEnum', 17)
uid = _messages.StringField(18)
updateTime = _messages.StringField(19)
class SetIamPolicyRequest(_messages.Message):
r"""Request message for SetIamPolicy method.
Fields:
policy: REQUIRED: The complete policy to be applied to the resource. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
updateMask: OPTIONAL: A FieldMask specifying which fields of the policy to
modify. Only the fields in the mask will be modified. If no mask is
provided, the following default mask is used:paths: "bindings, etag"
"""
policy = _messages.MessageField('Policy', 1)
updateMask = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The Status type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by gRPC (https://github.com/grpc). Each Status message contains three
pieces of data: error code, error message, and error details.You can find
out more about this error model and how to work with it in the API Design
Guide (https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in | |
celcius, 250 rpm.",
"ordered_locus_name": "BCE_0159",
"gene_start": 160105,
"gene_end": 162642,
"strand": "F",
"cog": "R",
"species": "Bacillus cereus ATCC 10987",
"ncbi_taxonomy_id": 222523
},
{
"halflife": 4041.87006,
"std": 523.16592,
"std_over_avg": 0.1294366004,
"unit": "s",
"reference": [
{
"doi": "10.1186/s12864-016-3219-8"
}
],
"growth_medium": "TMA",
"ordered_locus_name": "MA0001",
"ar_cog": "arCOG00468",
"cog_class": "L",
"cog": "COG1474",
"species": "Methanosarcina acetivorans",
"ncbi_taxonomy_id": 188937
},
{
"halflife": 1436.4,
"species": "Lactococcus lactis subsp. lactis Il1403",
"ncbi_taxonomy_id": 272623,
"unit": "s",
"reference": [
{
"doi": "10.1371/journal.pone.0059059"
}
],
"doubling_time": {
"value": 6.301338005090412,
"unit": "h"
}
}
],
"ko_number": "K10725",
"protein_names": [
"ORC1-type DNA replication protein 1",
"ORC1-type DNA replication protein 1"
]
}
result = self.src.build_rna_observation(obj)
self.assertEqual(len(result), 23)
self.assertEqual(result[2]["environment"]["replicate"], "a1")
self.assertEqual(result[3]["environment"]["replicate"], "a3")
@unittest.skip("passed")
def test_build_rna_modification_entity(self):
null = None
obj = {
"amino_acid": "Ala",
"aa_code": "A",
"aa_name": "Alanine",
"kegg_orthology_id": "K14218",
"kegg_gene_name": "tRNA-Ala",
"definition": "tRNA Ala",
"kegg_pathway_id": "ko00970 ",
"kegg_pathway_name": "Aminoacyl-tRNA biosynthesis",
"modifications": [
{
"anticodon": "VGC",
"organism": "Escherichia coli",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGGCUAUAGCUCAGCDGGGAGAGCGCCUGCUUVGCACGCAGGAG7UCUGCGGTPCGAUCCCGCAUAGCUCCACCA",
"sequence_bpforms": "GGGGCUAUAGCUCAGC{8U}GGGAGAGCGCCUGCUU{502U}GCACGCAGGAG{7G}UCUGCGG{5U}{9U}CGAUCCCGCAUAGCUCCACCA",
"sequence_iupac": "GGGGCUAUAGCUCAGCUGGGAGAGCGCCUGCUUUGCACGCAGGAGGUCUGCGGUUCGAUCCCGCAUAGCUCCACCA",
"length": 76,
"number_of_modifications": 5,
"number_of_modified_a": 0,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 4,
"formula": "C726H832N289O538P76",
"molecular_weight": 24568.13291,
"charge": -77,
"canonical_formula": "C722H822N289O535P76",
"canonical_molecular_weight": 24462.01191,
"canonical_charge": -77,
"extra_formula": "C4H10O3",
"extra_molecular_weight": 106.12100000000001,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:09.439Z"
},
"ncbi_taxonomy_id": 562
},
{
"anticodon": "GGC",
"organism": "Escherichia coli",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGGCUAUAGCUCAGCDGGGAGAGCGCUUGCAUGGCAUGCAAGAG7UCAGCGGTPCGAUCCCGCUUAGCUCCACCA",
"sequence_bpforms": "GGGGCUAUAGCUCAGC{8U}GGGAGAGCGCUUGCAUGGCAUGCAAGAG{7G}UCAGCGG{5U}{9U}CGAUCCCGCUUAGCUCCACCA",
"sequence_iupac": "GGGGCUAUAGCUCAGCUGGGAGAGCGCUUGCAUGGCAUGCAAGAGGUCAGCGGUUCGAUCCCGCUUAGCUCCACCA",
"length": 76,
"number_of_modifications": 4,
"number_of_modified_a": 0,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 3,
"formula": "C726H831N293O533P76",
"molecular_weight": 24543.157909999998,
"charge": -76,
"canonical_formula": "C724H822N293O533P76",
"canonical_molecular_weight": 24510.06391,
"canonical_charge": -77,
"extra_formula": "C2H9",
"extra_molecular_weight": 33.094,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:09.559Z"
},
"ncbi_taxonomy_id": 562
},
{
"anticodon": "AGC",
"organism": "Saccharomyces cerevisiae",
"organellum": "cytosolic",
"sequence_modomics": "GGGCGUGUKGCGUAGDCGGDAGCGCRCUCCCUUIGCOPGGGAGAGGDCUCCGGTPCGAUUCCGGACUCGUCCACCA",
"sequence_bpforms": "GGGCGUGU{1G}GCGUAG{8U}CGG{8U}AGCGC{22G}CUCCCUU{9A}GC{19A}{9U}GGGAGAGG{8U}CUCCGG{5U}{9U}CGAUUCCGGACUCGUCCACCA",
"sequence_iupac": "GGGCGUGUGGCGUAGUCGGUAGCGCGCUCCCUUAGCAUGGGAGAGGUCUCCGGUUCGAUUCCGGACUCGUCCACCA",
"length": 76,
"number_of_modifications": 10,
"number_of_modified_a": 2,
"number_of_modified_c": 0,
"number_of_modified_g": 2,
"number_of_modified_u": 6,
"formula": "C726H834N283O542P76",
"molecular_weight": 24550.102909999998,
"charge": -77,
"canonical_formula": "C721H820N285O540P76",
"canonical_molecular_weight": 24471.95191,
"canonical_charge": -77,
"extra_formula": "C5H14N-2O2",
"extra_molecular_weight": 78.15100000000001,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:10.684Z"
},
"ncbi_taxonomy_id": 4932
},
{
"anticodon": "UGC",
"organism": "Bacillus subtilis",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGAGCCUUAGCUCAGCDGGGAGAGCGCCUGCUU5GC=CGCAGGAG7UCAGCGGTPCGAUCCCGCUAGGCUCCACCA",
"sequence_bpforms": "GGAGCCUUAGCUCAGC{8U}GGGAGAGCGCCUGCUU{501U}GC{6A}CGCAGGAG{7G}UCAGCGG{5U}{9U}CGAUCCCGCUAGGCUCCACCA",
"sequence_iupac": "GGAGCCUUAGCUCAGCUGGGAGAGCGCCUGCUUUGCACGCAGGAGGUCAGCGGUUCGAUCCCGCUAGGCUCCACCA",
"length": 76,
"number_of_modifications": 6,
"number_of_modified_a": 1,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 4,
"formula": "C726H836N290O535P76",
"molecular_weight": 24538.174909999998,
"charge": -76,
"canonical_formula": "C722H823N290O534P76",
"canonical_molecular_weight": 24461.02791,
"canonical_charge": -77,
"extra_formula": "C4H13O",
"extra_molecular_weight": 77.14699999999999,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:11.889Z"
},
"ncbi_taxonomy_id": 1423
},
{
"anticodon": "UGC",
"organism": "Mycoplasma capricolum",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCCCU4AGCUCAGCDGGGAGAGCACCUGCCUUGC=CGCAGGGG7UCGACGGUPCGAUCCCGUUAGGGUCCACCA",
"sequence_bpforms": "GGGCCCU{74U}AGCUCAGC{8U}GGGAGAGCACCUGCCUUGC{6A}CGCAGGGG{7G}UCGACGGU{9U}CGAUCCCGUUAGGGUCCACCA",
"sequence_iupac": "GGGCCCUUAGCUCAGCUGGGAGAGCACCUGCCUUGCACGCAGGGGGUCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"length": 76,
"number_of_modifications": 5,
"number_of_modified_a": 1,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 3,
"formula": "C724H832N290O534P76S",
"molecular_weight": 24526.18191,
"charge": -76,
"canonical_formula": "C722H823N290O535P76",
"canonical_molecular_weight": 24477.02691,
"canonical_charge": -77,
"extra_formula": "C2H9O-1S",
"extra_molecular_weight": 49.155,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:11.918Z"
},
"ncbi_taxonomy_id": 2095
},
{
"anticodon": "GGC",
"organism": "Bacillus subtilis",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGGCCAUAGCUCAGCDGGGAGAGCGCUACGCUGGCAGCGUAGAG7UCAGGGGTPCGAGCCCCCUUGGCUCCACCA",
"sequence_bpforms": "GGGGCCAUAGCUCAGC{8U}GGGAGAGCGCUACGCUGGCAGCGUAGAG{7G}UCAGGGG{5U}{9U}CGAGCCCCCUUGGCUCCACCA",
"sequence_iupac": "GGGGCCAUAGCUCAGCUGGGAGAGCGCUACGCUGGCAGCGUAGAGGUCAGGGGUUCGAGCCCCCUUGGCUCCACCA",
"length": 76,
"number_of_modifications": 4,
"number_of_modified_a": 0,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 3,
"formula": "C727H834N298O532P76",
"molecular_weight": 24612.228909999998,
"charge": -76,
"canonical_formula": "C725H825N298O532P76",
"canonical_molecular_weight": 24579.13491,
"canonical_charge": -77,
"extra_formula": "C2H9",
"extra_molecular_weight": 33.094,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:11.947Z"
},
"ncbi_taxonomy_id": 1423
},
{
"anticodon": "CGC",
"organism": "Halobacterium salinarum",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCUCGUAGAUCAGCGGUAGAUCRCUUCCUUCGCAAGGAAGAGGCC?UGGG]PBOAAUCCCAGCGAGUCCACCA",
"sequence_bpforms": "GGGCUCGUAGAUCAGCGGUAGAUC{22G}CUUCCUUCGCAAGGAAGAGGCC{5C}UGGG{19U}{9U}{0C}{19A}AAUCCCAGCGAGUCCACCA",
"sequence_iupac": "GGGCUCGUAGAUCAGCGGUAGAUCGCUUCCUUCGCAAGGAAGAGGCCCUGGGUUCAAAUCCCAGCGAGUCCACCA",
"length": 75,
"number_of_modifications": 6,
"number_of_modified_a": 1,
"number_of_modified_c": 2,
"number_of_modified_g": 1,
"number_of_modified_u": 2,
"formula": "C720H823N288O524P75",
"molecular_weight": 24218.02815,
"charge": -76,
"canonical_formula": "C714H812N289O523P75",
"canonical_molecular_weight": 24132.88215,
"canonical_charge": -76,
"extra_formula": "C6H11N-1O",
"extra_molecular_weight": 85.146,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:13.398Z"
},
"ncbi_taxonomy_id": 2242
},
{
"anticodon": "CGC",
"organism": "Haloferax volcanii",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCUCGUAGAUCAGUGGCAGAUCRCUUCCUUCGCAAGGAAGAGGC??GGGG]PBOAAUCCCCGCGAGUCCACCA",
"sequence_bpforms": "GGGCUCGUAGAUCAGUGGCAGAUC{22G}CUUCCUUCGCAAGGAAGAGGC{5C}{5C}GGGG{19U}{9U}{0C}{19A}AAUCCCCGCGAGUCCACCA",
"sequence_iupac": "GGGCUCGUAGAUCAGUGGCAGAUCGCUUCCUUCGCAAGGAAGAGGCCCGGGGUUCAAAUCCCCGCGAGUCCACCA",
"length": 75,
"number_of_modifications": 7,
"number_of_modified_a": 1,
"number_of_modified_c": 3,
"number_of_modified_g": 1,
"number_of_modified_u": 2,
"formula": "C721H826N289O524P75",
"molecular_weight": 24247.07015,
"charge": -76,
"canonical_formula": "C714H813N290O523P75",
"canonical_molecular_weight": 24147.89715,
"canonical_charge": -76,
"extra_formula": "C7H13N-1O",
"extra_molecular_weight": 99.17299999999999,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:13.428Z"
},
"ncbi_taxonomy_id": 2246
},
{
"anticodon": "GGC",
"organism": "Haloferax volcanii",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCUCGUAGAUCAGGGGUAGAUCACUCCCUUGGCAUGGGAGAGGC??CGGG]PBOAAUCCCGGCGAGUCCACCA",
"sequence_bpforms": "GGGCUCGUAGAUCAGGGGUAGAUCACUCCCUUGGCAUGGGAGAGGC{5C}{5C}CGGG{19U}{9U}{0C}{19A}AAUCCCGGCGAGUCCACCA",
"sequence_iupac": "GGGCUCGUAGAUCAGGGGUAGAUCACUCCCUUGGCAUGGGAGAGGCCCCGGGUUCAAAUCCCGGCGAGUCCACCA",
"length": 75,
"number_of_modifications": 6,
"number_of_modified_a": 1,
"number_of_modified_c": 3,
"number_of_modified_g": 0,
"number_of_modified_u": 2,
"formula": "C720H822N291O525P75",
"molecular_weight": 24275.04015,
"charge": -76,
"canonical_formula": "C715H813N292O524P75",
"canonical_molecular_weight": 24203.92115,
"canonical_charge": -76,
"extra_formula": "C5H9N-1O",
"extra_molecular_weight": 71.119,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:13.457Z"
},
"ncbi_taxonomy_id": 2246
},
{
"anticodon": "UGC",
"organism": "Haloferax volcanii",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCCCAUAGCUCAGUGGUAGAGULCCUCCUUUGCAAGGAGGAUGC??AGGG]PBGAAUCCCUGUGGGUCCACCA",
"sequence_bpforms": "GGGCCCAUAGCUCAGUGGUAGAGU{2G}CCUCCUUUGCAAGGAGGAUGC{5C}{5C}AGGG{19U}{9U}{0C}GAAUCCCUGUGGGUCCACCA",
"sequence_iupac": "GGGCCCAUAGCUCAGUGGUAGAGUGCCUCCUUUGCAAGGAGGAUGCCCAGGGUUCGAAUCCCUGUGGGUCCACCA",
"length": 75,
"number_of_modifications": 6,
"number_of_modified_a": 0,
"number_of_modified_c": 3,
"number_of_modified_g": 1,
"number_of_modified_u": 2,
"formula": "C718H820N285O528P75",
"molecular_weight": 24212.95715,
"charge": -76,
"canonical_formula": "C713H810N285O528P75",
"canonical_molecular_weight": 24142.82215,
"canonical_charge": -76,
"extra_formula": "C5H10",
"extra_molecular_weight": 70.135,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:13.483Z"
},
"ncbi_taxonomy_id": 2246
},
{
"anticodon": "UGC",
"organism": "Mycoplasma mycoides",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCCCUUAGCUCAGCDGGGAGAGCACCUGCCUUGC=CGCAGGGG7UCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"sequence_bpforms": "GGGCCCUUAGCUCAGC{8U}GGGAGAGCACCUGCCUUGC{6A}CGCAGGGG{7G}UCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"sequence_iupac": "GGGCCCUUAGCUCAGCUGGGAGAGCACCUGCCUUGCACGCAGGGGGUCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"length": 76,
"number_of_modifications": 3,
"number_of_modified_a": 1,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 1,
"formula": "C724H832N290O535P76",
"molecular_weight": 24510.120909999998,
"charge": -76,
"canonical_formula": "C722H823N290O535P76",
"canonical_molecular_weight": 24477.02691,
"canonical_charge": -77,
"extra_formula": "C2H9",
"extra_molecular_weight": 33.094,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:31:13.512Z"
},
"ncbi_taxonomy_id": 2102
},
{
"anticodon": "UGC",
"organism": "Mycoplasma mycoides",
"organellum": "prokaryotic cytosol",
"sequence_modomics": "GGGCCCUUAGCUCAGCDGGGAGAGCACCUGCCUUGC=CGCAGGGG7UCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"sequence_bpforms": "GGGCCCUUAGCUCAGC{8U}GGGAGAGCACCUGCCUUGC{6A}CGCAGGGG{7G}UCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"sequence_iupac": "GGGCCCUUAGCUCAGCUGGGAGAGCACCUGCCUUGCACGCAGGGGGUCGACGGUUCGAUCCCGUUAGGGUCCACCA",
"length": 76,
"number_of_modifications": 3,
"number_of_modified_a": 1,
"number_of_modified_c": 0,
"number_of_modified_g": 1,
"number_of_modified_u": 1,
"formula": "C724H832N290O535P76",
"molecular_weight": 24510.120909999998,
"charge": -76,
"canonical_formula": "C722H823N290O535P76",
"canonical_molecular_weight": 24477.02691,
"canonical_charge": -77,
"extra_formula": "C2H9",
"extra_molecular_weight": 33.094,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:33:47.158Z"
},
"ncbi_taxonomy_id": 2102
},
{
"anticodon": "IGC",
"organism": "Pichia jadinii",
"organellum": "cytosolic",
"sequence_modomics": "GGGCGUGUKGCGUAGDDGGDAGCGCRPUCGCUUIGCOPGCGAAAGGDCUCCGGTPCG\"CUCCGGACUCGUCCACCA",
"sequence_bpforms": "GGGCGUGU{1G}GCGUAG{8U}{8U}GG{8U}AGCGC{22G}{9U}UCGCUU{9A}GC{19A}{9U}GCGAAAGG{8U}CUCCGG{5U}{9U}CG{1A}CUCCGGACUCGUCCACCA",
"sequence_iupac": "GGGCGUGUGGCGUAGUUGGUAGCGCGUUCGCUUAGCAUGCGAAAGGUCUCCGGUUCGACUCCGGACUCGUCCACCA",
"length": 76,
"number_of_modifications": 13,
"number_of_modified_a": 3,
"number_of_modified_c": 0,
"number_of_modified_g": 2,
"number_of_modified_u": 8,
"formula": "C727H837N282O542P76",
"molecular_weight": 24551.13091,
"charge": -77,
"canonical_formula": "C721H819N284O540P76",
"canonical_molecular_weight": 24456.93691,
"canonical_charge": -77,
"extra_formula": "C6H18N-2O2",
"extra_molecular_weight": 94.194,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:33:47.281Z"
},
"ncbi_taxonomy_id": null
},
{
"anticodon": "IGC",
"organism": "Bombyx mori",
"organellum": "cytosolic",
"sequence_modomics": "GGGGGCGUALCUCAGADGGUAGAGCRCUCGCJUIGCOP#PGAGAG7UA?CGGGAPCG\"UACCCGGCGCCUCCACCA",
"sequence_bpforms": "GGGGGCGUA{2G}CUCAGA{8U}GGUAGAGC{22G}CUCGC{0U}U{9A}GC{19A}{9U}{0G}{9U}GAGAG{7G}UA{5C}CGGGA{9U}CG{1A}UACCCGGCGCCUCCACCA",
"sequence_iupac": "GGGGGCGUAGCUCAGAUGGUAGAGCGCUCGCUUAGCAUGUGAGAGGUACCGGGAUCGAUACCCGGCGCCUCCACCA",
"length": 76,
"number_of_modifications": 13,
"number_of_modified_a": 3,
"number_of_modified_c": 1,
"number_of_modified_g": 4,
"number_of_modified_u": 5,
"formula": "C735H845N297O533P76",
"molecular_weight": 24721.39691,
"charge": -76,
"canonical_formula": "C726H824N299O531P76",
"canonical_molecular_weight": 24588.14591,
"canonical_charge": -77,
"extra_formula": "C9H21N-2O2",
"extra_molecular_weight": 133.251,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:33:47.310Z"
},
"ncbi_taxonomy_id": 7091
},
{
"anticodon": "IGC",
"organism": "<NAME>",
"organellum": "cytosolic",
"sequence_modomics": "GGGGGCGUALCUCAGADGGUAGAGCRCUCGCJUIGCOP#PGAGAG7UA?CGGGAPCG\"UACCCGGCGCCUCCACCA",
"sequence_bpforms": "GGGGGCGUA{2G}CUCAGA{8U}GGUAGAGC{22G}CUCGC{0U}U{9A}GC{19A}{9U}{0G}{9U}GAGAG{7G}UA{5C}CGGGA{9U}CG{1A}UACCCGGCGCCUCCACCA",
"sequence_iupac": "GGGGGCGUAGCUCAGAUGGUAGAGCGCUCGCUUAGCAUGUGAGAGGUACCGGGAUCGAUACCCGGCGCCUCCACCA",
"length": 76,
"number_of_modifications": 13,
"number_of_modified_a": 3,
"number_of_modified_c": 1,
"number_of_modified_g": 4,
"number_of_modified_u": 5,
"formula": "C735H845N297O533P76",
"molecular_weight": 24721.39691,
"charge": -76,
"canonical_formula": "C726H824N299O531P76",
"canonical_molecular_weight": 24588.14591,
"canonical_charge": -77,
"extra_formula": "C9H21N-2O2",
"extra_molecular_weight": 133.251,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:33:47.345Z"
},
"ncbi_taxonomy_id": 7091
},
{
"anticodon": "IGC",
"organism": "Homo sapiens",
"organellum": "cytosolic",
"sequence_modomics": "GGGGGAUUALCUCAAADGGDAGAGCRCUCGCJUIGCOP#CGAGAG7UAGCGGGAPCG\"UGCCCGCAUCCUCCACCA",
"sequence_bpforms": "GGGGGAUUA{2G}CUCAAA{8U}GG{8U}AGAGC{22G}CUCGC{0U}U{9A}GC{19A}{9U}{0G}CGAGAG{7G}UAGCGGGA{9U}CG{1A}UGCCCGCAUCCUCCACCA",
"sequence_iupac": "GGGGGAUUAGCUCAAAUGGUAGAGCGCUCGCUUAGCAUGCGAGAGGUAGCGGGAUCGAUGCCCGCAUCCUCCACCA",
"length": 76,
"number_of_modifications": 12,
"number_of_modified_a": 3,
"number_of_modified_c": 0,
"number_of_modified_g": 4,
"number_of_modified_u": 5,
"formula": "C734H844N296O532P76",
"molecular_weight": 24678.371909999998,
"charge": -76,
"canonical_formula": "C726H823N298O530P76",
"canonical_molecular_weight": 24557.13191,
"canonical_charge": -77,
"extra_formula": "C8H21N-2O2",
"extra_molecular_weight": 121.24,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:33:47.375Z"
},
"ncbi_taxonomy_id": 9606
},
{
"anticodon": "IGC",
"organism": "Homo sapiens",
"organellum": "cytosolic",
"sequence_modomics": "GGGGAAUUALCUCAAADGGDAGAGCRCUCGCJUIGCOP#CGAGAG7UAGCGGGAPCG\"UGCCCGCAUUCUCCACCA",
"sequence_bpforms": "GGGGAAUUA{2G}CUCAAA{8U}GG{8U}AGAGC{22G}CUCGC{0U}U{9A}GC{19A}{9U}{0G}CGAGAG{7G}UAGCGGGA{9U}CG{1A}UGCCCGCAUUCUCCACCA",
"sequence_iupac": "GGGGAAUUAGCUCAAAUGGUAGAGCGCUCGCUUAGCAUGCGAGAGGUAGCGGGAUCGAUGCCCGCAUUCUCCACCA",
"length": 76,
"number_of_modifications": 12,
"number_of_modified_a": 3,
"number_of_modified_c": 0,
"number_of_modified_g": 4,
"number_of_modified_u": 5,
"formula": "C734H843N295O532P76",
"molecular_weight": 24663.35691,
"charge": -76,
"canonical_formula": "C726H822N297O530P76",
"canonical_molecular_weight": 24542.11691,
"canonical_charge": -77,
"extra_formula": "C8H21N-2O2",
"extra_molecular_weight": 121.24,
"extra_charge": 1,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:33:47.403Z"
},
"ncbi_taxonomy_id": 9606
},
{
"anticodon": "UGC",
"organism": "Neurospora crassa",
"organellum": "mitochondrial",
"sequence_modomics": "GGGGGUAUAGUAUAADUGGDAGUACAGCAAUCUUGCUCANUGCUUGU?AAGGTPCAAAUCCUUGUAUCUCCACCA",
"sequence_bpforms": "GGGGGUAUAGUAUAA{8U}UGG{8U}AGUACAGCAAUCUUGCUCA[id: \"xU\"]UGCUUGU{5C}AAGG{5U}{9U}CAAAUCCUUGUAUCUCCACCA",
"sequence_iupac": "GGGGGUAUAGUAUAAUUGGUAGUACAGCAAUCUUGCUCANUGCUUGUCAAGGUUCAAAUCCUUGUAUCUCCACCA",
"length": 75,
"number_of_modifications": 6,
"number_of_modified_a": 0,
"number_of_modified_c": 1,
"number_of_modified_g": 0,
"number_of_modified_u": 4,
"formula": null,
"molecular_weight": null,
"charge": null,
"canonical_formula": null,
"canonical_molecular_weight": null,
"canonical_charge": null,
"extra_formula": null,
"extra_molecular_weight": null,
"extra_charge": null,
"bpforms_errors": "MODOMICS sequence uses monomeric forms xU",
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:33:56.216Z"
},
"ncbi_taxonomy_id": 5141
},
{
"anticodon": "UGC",
"organism": "Bos taurus",
"organellum": "mitochondrial",
"sequence_modomics": "GAGGAUUU\"LCUUAAUUAAAGULGPUGAUUUGCAUPCAAUUGAUGUAAGGUGPAGUCUUGCAAUCCUUACCA",
"sequence_bpforms": "GAGGAUUU{1A}{2G}CUUAAUUAAAGU{2G}G{9U}UGAUUUGCAU{9U}CAAUUGAUGUAAGGUG{9U}AGUCUUGCAAUCCUUACCA",
"sequence_iupac": "GAGGAUUUAGCUUAAUUAAAGUGGUUGAUUUGCAUUCAAUUGAUGUAAGGUGUAGUCUUGCAAUCCUUACCA",
"length": 72,
"number_of_modifications": 6,
"number_of_modified_a": 1,
"number_of_modified_c": 0,
"number_of_modified_g": 2,
"number_of_modified_u": 3,
"formula": "C687H772N261O512P72",
"molecular_weight": 23107.15886,
"charge": -73,
"canonical_formula": "C684H766N261O512P72",
"canonical_molecular_weight": 23065.077859999998,
"canonical_charge": -73,
"extra_formula": "C3H6",
"extra_molecular_weight": 42.081,
"extra_charge": 0,
"bpforms_errors": null,
"reference": {
"doi": "10.1093/nar/gkx1030"
},
"last_modified": {
"$date": "2020-04-28T23:34:00.410Z"
},
"ncbi_taxonomy_id": 9913
},
{
"anticodon": "UGC",
"organism": "Lactococcus | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Fri Mar 6 15:54:35 2020 by generateDS.py version 2.35.15.
# Python 3.8.1 (v3.8.1:1b293b6006, Dec 18 2019, 14:08:53) [Clang 6.0 (clang-600.0.57)]
#
# Command line options:
# ('--no-namespace-defs', '')
# ('-o', './python/dgds_service_v5.py')
#
# Command line arguments:
# ./schemas/DGDSService_v5.xsd
#
# Command line:
# /Users/danielkobina/Documents/Open/.sandbox/bin/generateDS --no-namespace-defs -o "./python/dgds_service_v5.py" ./schemas/DGDSService_v5.xsd
#
# Current working directory (os.getcwd()):
# 2020-02
#
from six.moves import zip_longest
import os
import sys
import re as re_
import base64
import datetime as datetime_
import decimal as decimal_
try:
from lxml import etree as etree_
except ImportError:
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
SaveElementTreeNode = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
try:
if isinstance(infile, os.PathLike):
infile = os.path.join(infile)
except AttributeError:
pass
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
def parsexmlstring_(instring, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
element = etree_.fromstring(instring, parser=parser, **kwargs)
return element
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for an example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
# Additionally, the generatedsnamespaces module can contain a python
# dictionary named GenerateDSNamespaceTypePrefixes that associates element
# types with the namespace prefixes that are to be added to the
# "xsi:type" attribute value. See the exportAttributes method of
# any generated element type and the generation of "xsi:type" for an
# example of the use of this table.
# An example table:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceTypePrefixes = {
# "ElementtypeC": "aaa:",
# "ElementtypeD": "bbb:",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ImportError:
GenerateDSNamespaceDefs_ = {}
try:
from generatedsnamespaces import GenerateDSNamespaceTypePrefixes as GenerateDSNamespaceTypePrefixes_
except ImportError:
GenerateDSNamespaceTypePrefixes_ = {}
#
# You can replace the following class definition by defining an
# importable module named "generatedscollector" containing a class
# named "GdsCollector". See the default class definition below for
# clues about the possible content of that class.
#
try:
from generatedscollector import GdsCollector as GdsCollector_
except ImportError:
class GdsCollector_(object):
def __init__(self, messages=None):
if messages is None:
self.messages = []
else:
self.messages = messages
def add_message(self, msg):
self.messages.append(msg)
def get_messages(self):
return self.messages
def clear_messages(self):
self.messages = []
def print_messages(self):
for msg in self.messages:
print("Warning: {}".format(msg))
def write_messages(self, outstream):
for msg in self.messages:
outstream.write("Warning: {}\n".format(msg))
#
# The super-class for enum types
#
try:
from enum import Enum
except ImportError:
Enum = object
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
__hash__ = object.__hash__
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_parse_string(self, input_data, node=None, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_parse_integer(self, input_data, node=None, input_name=''):
try:
ival = int(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires integer value: %s' % exp)
return ival
def gds_validate_integer(self, input_data, node=None, input_name=''):
try:
value = int(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires integer value')
return value
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integer valuess')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_parse_float(self, input_data, node=None, input_name=''):
try:
fval_ = float(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires float or double value: %s' % exp)
return fval_
def gds_validate_float(self, input_data, node=None, input_name=''):
try:
value = float(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires float value')
return value
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of float values')
return values
def gds_format_decimal(self, input_data, input_name=''):
return ('%s' % input_data).rstrip('0')
def gds_parse_decimal(self, input_data, node=None, input_name=''):
try:
decimal_value = decimal_.Decimal(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires decimal value')
return decimal_value
def gds_validate_decimal(self, input_data, node=None, input_name=''):
try:
value = decimal_.Decimal(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires decimal value')
return value
def gds_format_decimal_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_decimal_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
decimal_.Decimal(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of decimal values')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_parse_double(self, input_data, node=None, input_name=''):
try:
fval_ = float(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires double or float value: %s' % exp)
return fval_
def gds_validate_double(self, input_data, node=None, input_name=''):
try:
value = float(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires double or float value')
return value
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(
node, 'Requires sequence of double or float values')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_parse_boolean(self, input_data, node=None, input_name=''):
if input_data in ('true', '1'):
bval = True
elif input_data in ('false', '0'):
bval = False
else:
raise_parse_error(node, 'Requires boolean value')
return bval
def gds_validate_boolean(self, input_data, node=None, input_name=''):
if input_data not in (True, 1, False, 0, ):
raise_parse_error(
node,
'Requires boolean value '
'(one of True, 1, False, 0)')
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in (True, 1, False, 0, ):
raise_parse_error(
node,
'Requires sequence of boolean values '
'(one of True, 1, False, 0)')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (
time_parts[0], "{}".format(micro_seconds).rjust(6, "0"), )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
| |
os.path.dirname(output_dir))
]
found = [os.path.exists(file) for file in lookup_table]
if not any(found):
return None
return lookup_table[found.index(True)]
def _parse_parameter_file(self):
"""
Parses the parameter file and establishes the various
dictionaries.
"""
self._parse_header_file()
# Let's read the file
hfn = os.path.join(self.output_dir, "Header")
self.unique_identifier = int(os.stat(hfn)[ST_CTIME])
# the 'inputs' file is now optional
self._parse_cparams()
self._parse_fparams()
def _parse_cparams(self):
if self.cparam_filename is None:
return
for line in (line.split("#")[0].strip() for line in open(self.cparam_filename)):
try:
param, vals = (s.strip() for s in line.split("="))
except ValueError:
continue
if param == "amr.n_cell":
vals = self.domain_dimensions = np.array(vals.split(), dtype="int32")
# For 1D and 2D simulations in BoxLib usually only the relevant
# dimensions have a specified number of zones, but yt requires
# domain_dimensions to have three elements, with 1 in the additional
# slots if we're not in 3D, so append them as necessary.
if len(vals) == 1:
vals = self.domain_dimensions = np.array([vals[0], 1, 1])
elif len(vals) == 2:
vals = self.domain_dimensions = np.array([vals[0], vals[1], 1])
elif param == "amr.ref_ratio":
vals = self.refine_by = int(vals[0])
elif param == "Prob.lo_bc":
vals = tuple(p == "1" for p in vals.split())
assert len(vals) == self.dimensionality
periodicity = [False, False, False] # default to non periodic
periodicity[: self.dimensionality] = vals # fill in ndim parsed values
self._periodicity = tuple(periodicity)
elif param == "castro.use_comoving":
vals = self.cosmological_simulation = int(vals)
else:
try:
vals = _guess_pcast(vals)
except (IndexError, ValueError):
# hitting an empty string or a comment
vals = None
self.parameters[param] = vals
if getattr(self, "cosmological_simulation", 0) == 1:
self.omega_lambda = self.parameters["comoving_OmL"]
self.omega_matter = self.parameters["comoving_OmM"]
self.hubble_constant = self.parameters["comoving_h"]
a_file = open(os.path.join(self.output_dir, "comoving_a"))
line = a_file.readline().strip()
a_file.close()
self.current_redshift = 1 / float(line) - 1
else:
self.current_redshift = 0.0
self.omega_lambda = 0.0
self.omega_matter = 0.0
self.hubble_constant = 0.0
self.cosmological_simulation = 0
def _parse_fparams(self):
"""
Parses the fortran parameter file for Orion. Most of this will
be useless, but this is where it keeps mu = mass per
particle/m_hydrogen.
"""
if self.fparam_filename is None:
return
for line in (l for l in open(self.fparam_filename) if "=" in l):
param, vals = (v.strip() for v in line.split("="))
# Now, there are a couple different types of parameters.
# Some will be where you only have floating point values, others
# will be where things are specified as string literals.
# Unfortunately, we're also using Fortran values, which will have
# things like 1.d-2 which is pathologically difficult to parse if
# your C library doesn't include 'd' in its locale for strtod.
# So we'll try to determine this.
vals = vals.split()
if any(_scinot_finder.match(v) for v in vals):
vals = [float(v.replace("D", "e").replace("d", "e")) for v in vals]
if len(vals) == 1:
vals = vals[0]
self.parameters[param] = vals
def _parse_header_file(self):
"""
We parse the Boxlib header, which we use as our basis. Anything in the
inputs file will override this, but the inputs file is not strictly
necessary for orientation of the data in space.
"""
# Note: Python uses a read-ahead buffer, so using next(), which would
# be my preferred solution, won't work here. We have to explicitly
# call readline() if we want to end up with an offset at the very end.
# Fortunately, elsewhere we don't care about the offset, so we're fine
# everywhere else using iteration exclusively.
header_file = open(os.path.join(self.output_dir, "Header"))
self.orion_version = header_file.readline().rstrip()
n_fields = int(header_file.readline())
self._field_list = [header_file.readline().strip() for i in range(n_fields)]
self.dimensionality = int(header_file.readline())
self.current_time = float(header_file.readline())
# This is traditionally a index attribute, so we will set it, but
# in a slightly hidden variable.
self._max_level = int(header_file.readline())
for side, init in zip(["left", "right"], [np.zeros, np.ones]):
domain_edge = init(3, dtype="float64")
domain_edge[: self.dimensionality] = header_file.readline().split()
setattr(self, f"domain_{side}_edge", domain_edge)
ref_factors = np.array(header_file.readline().split(), dtype="int64")
if ref_factors.size == 0:
# We use a default of two, as Nyx doesn't always output this value
ref_factors = [2] * (self._max_level + 1)
# We can't vary refinement factors based on dimension, or whatever else
# they are varied on. In one curious thing, I found that some Castro 3D
# data has only two refinement factors, which I don't know how to
# understand.
self.ref_factors = ref_factors
if np.unique(ref_factors).size > 1:
# We want everything to be a multiple of this.
self.refine_by = min(ref_factors)
# Check that they're all multiples of the minimum.
if not all(
float(rf) / self.refine_by == int(float(rf) / self.refine_by)
for rf in ref_factors
):
raise RuntimeError
base_log = np.log2(self.refine_by)
self.level_offsets = [0] # level 0 has to have 0 offset
lo = 0
for rf in self.ref_factors:
lo += int(np.log2(rf) / base_log) - 1
self.level_offsets.append(lo)
# assert(np.unique(ref_factors).size == 1)
else:
self.refine_by = ref_factors[0]
self.level_offsets = [0 for l in range(self._max_level + 1)]
# Now we read the global index space, to get
index_space = header_file.readline()
# This will be of the form:
# ((0,0,0) (255,255,255) (0,0,0)) ((0,0,0) (511,511,511) (0,0,0))
# So note that if we split it all up based on spaces, we should be
# fine, as long as we take the first two entries, which correspond to
# the root level. I'm not 100% pleased with this solution.
root_space = index_space.replace("(", "").replace(")", "").split()[:2]
start = np.array(root_space[0].split(","), dtype="int64")
stop = np.array(root_space[1].split(","), dtype="int64")
dd = np.ones(3, dtype="int64")
dd[: self.dimensionality] = stop - start + 1
self.domain_dimensions = dd
# Skip timesteps per level
header_file.readline()
self._header_mesh_start = header_file.tell()
# Skip the cell size information per level - we'll get this later
for _ in range(self._max_level + 1):
header_file.readline()
# Get the geometry
next_line = header_file.readline()
if len(next_line.split()) == 1:
coordinate_type = int(next_line)
else:
coordinate_type = 0
known_types = {0: "cartesian", 1: "cylindrical", 2: "spherical"}
try:
self.geometry = known_types[coordinate_type]
except KeyError as err:
raise ValueError(f"Unknown BoxLib coord_type `{coordinate_type}`.") from err
if self.geometry == "cylindrical":
dre = self.domain_right_edge
dre[2] = 2.0 * np.pi
self.domain_right_edge = dre
def _set_code_unit_attributes(self):
setdefaultattr(self, "length_unit", self.quan(1.0, "cm"))
setdefaultattr(self, "mass_unit", self.quan(1.0, "g"))
setdefaultattr(self, "time_unit", self.quan(1.0, "s"))
setdefaultattr(self, "velocity_unit", self.quan(1.0, "cm/s"))
@parallel_root_only
def print_key_parameters(self):
for a in [
"current_time",
"domain_dimensions",
"domain_left_edge",
"domain_right_edge",
]:
if not hasattr(self, a):
mylog.error("Missing %s in parameter file definition!", a)
continue
v = getattr(self, a)
mylog.info("Parameters: %-25s = %s", a, v)
def relative_refinement(self, l0, l1):
offset = self.level_offsets[l1] - self.level_offsets[l0]
return self.refine_by ** (l1 - l0 + offset)
class OrionHierarchy(BoxlibHierarchy):
def __init__(self, ds, dataset_type="orion_native"):
BoxlibHierarchy.__init__(self, ds, dataset_type)
self._read_particles()
# self.io = IOHandlerOrion
def _detect_output_fields(self):
# This is all done in _parse_header_file
self.field_list = [("boxlib", f) for f in self.dataset._field_list]
self.field_indexes = {f[1]: i for i, f in enumerate(self.field_list)}
# There are times when field_list may change. We copy it here to
# avoid that possibility.
self.field_order = [f for f in self.field_list]
# look for particle fields
self.particle_filename = None
for particle_filename in ["StarParticles", "SinkParticles"]:
fn = os.path.join(self.ds.output_dir, particle_filename)
if os.path.exists(fn):
self.particle_filename = fn
if self.particle_filename is None:
return
pfield_list = [("io", c) for c in self.io.particle_field_index.keys()]
self.field_list.extend(pfield_list)
def _read_particles(self):
"""
Reads in particles and assigns them to grids. Will search for
Star particles, then sink particles if no star particle file
is found, and finally will simply note that no particles are
found if neither works. To add a new Orion particle type,
simply add it to the if/elif/else block.
"""
self.grid_particle_count = np.zeros(len(self.grids))
if self.particle_filename is not None:
self._read_particle_file(self.particle_filename)
def _read_particle_file(self, fn):
"""actually reads the orion particle data file itself."""
if not os.path.exists(fn):
return
with open(fn) as f:
lines = f.readlines()
self.num_stars = int(lines[0].strip()[0])
for num, line in enumerate(lines[1:]):
particle_position_x = float(line.split(" ")[1])
particle_position_y = float(line.split(" ")[2])
particle_position_z = float(line.split(" ")[3])
coord = [particle_position_x, particle_position_y, particle_position_z]
# for each particle, determine which grids contain it
# copied from object_finding_mixin.py
mask = np.ones(self.num_grids)
for i in range(len(coord)):
np.choose(
np.greater(self.grid_left_edge.d[:, i], coord[i]),
(mask, 0),
mask,
)
np.choose(
np.greater(self.grid_right_edge.d[:, i], coord[i]),
(0, mask),
mask,
)
ind = np.where(mask == 1)
selected_grids = self.grids[ind]
# in orion, particles always live on the finest level.
# so, we want to assign the particle to | |
<filename>PyU4V/common.py
# The MIT License (MIT)
# Copyright (c) 2016 Dell Inc. or its subsidiaries.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import csv
import logging
import time
import six
from PyU4V.utils import config_handler
from PyU4V.utils import constants
from PyU4V.utils import exception
LOG = logging.getLogger(__name__)
# HTTP constants
GET = constants.GET
POST = constants.POST
PUT = constants.PUT
DELETE = constants.DELETE
# U4V constants
STATUS_200 = constants.STATUS_200
STATUS_201 = constants.STATUS_201
STATUS_202 = constants.STATUS_202
STATUS_204 = constants.STATUS_204
STATUS_401 = constants.STATUS_401
STATUS_404 = constants.STATUS_404
# Job constants
INCOMPLETE_LIST = constants.INCOMPLETE_LIST
CREATED = constants.CREATED
SUCCEEDED = constants.SUCCEEDED
class CommonFunctions(object):
def __init__(self, request, interval, retries, u4v_version):
self.request = request
self.interval = interval
self.retries = retries
self.U4V_VERSION = u4v_version
def wait_for_job_complete(self, job):
"""Given the job wait for it to complete.
:param job: the job dict
:returns: rc -- int, result -- string, status -- string,
task -- list of dicts detailing tasks in the job
:raises: VolumeBackendAPIException
"""
res, tasks = None, None
if job['status'].lower() == SUCCEEDED:
try:
res, tasks = job['result'], job['task']
except KeyError:
pass
return 0, res, job['status'], tasks
def _wait_for_job_complete():
# Called at an interval until the job is finished.
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['wait_for_job_called']:
is_complete, result, rc, status, task = (
self._is_job_finished(job_id))
if is_complete is True:
kwargs['wait_for_job_called'] = True
kwargs['rc'], kwargs['status'] = rc, status
kwargs['result'], kwargs['task'] = result, task
except Exception:
exception_message = "Issue encountered waiting for job."
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
return kwargs
job_id = job['jobId']
kwargs = {'retries': 0, 'wait_for_job_called': False,
'rc': 0, 'result': None}
while not kwargs['wait_for_job_called']:
time.sleep(self.interval)
kwargs = _wait_for_job_complete()
if kwargs['retries'] > self.retries:
LOG.error("_wait_for_job_complete failed after "
"%(retries)d tries.", {'retries': kwargs['retries']})
kwargs['rc'], kwargs['result'] = -1, kwargs['result']
break
LOG.debug("Return code is: %(rc)lu. Result is %(res)s.",
{'rc': kwargs['rc'], 'res': kwargs['result']})
return (kwargs['rc'], kwargs['result'],
kwargs['status'], kwargs['task'])
def get_job_by_id(self, job_id):
"""Get details of a specific job.
:param job_id: the job id
"""
job_url = "/{}/system/job/{}".format(self.U4V_VERSION, job_id)
return self.get_request(job_url, 'job')
def _is_job_finished(self, job_id):
"""Check if the job is finished.
:param job_id: the id of the job
:returns: complete -- bool, result -- string,
rc -- int, status -- string, task -- list of dicts
"""
complete, rc, status, result, task = False, 0, None, None, None
job = self.get_job_by_id(job_id)
if job:
status = job['status']
try:
result, task = job['result'], job['task']
except KeyError:
pass
if status.lower() == SUCCEEDED:
complete = True
elif status.lower() in INCOMPLETE_LIST:
complete = False
else:
rc, complete = -1, True
return complete, result, rc, status, task
@staticmethod
def check_status_code_success(operation, status_code, message):
"""Check if a status code indicates success.
:param operation: the operation
:param status_code: the status code
:param message: the server response
:raises: VolumeBackendAPIException
"""
if status_code not in [STATUS_200, STATUS_201,
STATUS_202, STATUS_204]:
exception_message = (
'Error {operation}. The status code received '
'is {sc} and the message is {message}.'.format(
operation=operation, sc=status_code, message=message))
if status_code == STATUS_404:
raise exception.ResourceNotFoundException(
data=exception_message)
if status_code == STATUS_401:
raise exception.UnauthorizedRequestException()
else:
raise exception.VolumeBackendAPIException(
data=exception_message)
def wait_for_job(self, operation, status_code, job):
"""Check if call is async, wait for it to complete.
:param operation: the operation being performed
:param status_code: the status code
:param job: the job
:returns: task -- list of dicts detailing tasks in the job
:raises: VolumeBackendAPIException
"""
task = None
if status_code == STATUS_202:
rc, result, status, task = self.wait_for_job_complete(job)
if rc != 0:
exception_message = (
"Error {operation}. Status code: {sc}. "
"Error: {error}. Status: {status}.".format(
operation=operation, sc=rc,
error=six.text_type(result),
status=status))
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
return task
def _build_uri(self, *args, **kwargs):
""" Build the target URI.
:param args: optional arguments passed in to form URI
:param kwargs: optional key word arguments passed in to form URI
:return: target URI -- string
"""
target_uri = ''
version = None
# Version control logic
if kwargs.get('version') and not kwargs.get('no_version'):
version = kwargs['version']
elif kwargs.get('version') and kwargs.get('no_version'):
version = kwargs['version']
elif not kwargs.get('version') and not kwargs.get('no_version'):
LOG.debug("Version has been specified along with no_version "
"flag, ignoring no_version flag and using version "
"{ver}".format(ver=kwargs.get('version')))
version = self.U4V_VERSION
elif kwargs['no_version'] and not kwargs.get('version'):
pass
# Old method - has arguments passed which define URI
if args:
if version:
target_uri += ('/{version}'.format(version=version))
array_id = args[0]
category = args[1]
resource_type = args[2]
resource_name = kwargs.get('resource_name')
target_uri += ('/{cat}/symmetrix/{array_id}/{res_type}'.format(
cat=category,
array_id=array_id,
res_type=resource_type))
if resource_name:
target_uri += '/{resource_name}'.format(
resource_name=kwargs.get('resource_name'))
# New method - new method is to have only keyword arguments passed
if not args and kwargs:
if kwargs.get('category') in ['performance', 'common']:
version = None
if version:
target_uri += '/{}'.format(version)
target_uri += '/{category}/{resource_level}'.format(
category=kwargs.get('category'),
resource_level=kwargs.get('resource_level'))
if kwargs.get('resource_level_id'):
target_uri += '/{}'.format(kwargs.get('resource_level_id'))
if kwargs.get('resource_type'):
target_uri += '/{}'.format(kwargs.get('resource_type'))
if kwargs.get('resource_type_id'):
target_uri += '/{}'.format(kwargs.get('resource_type_id'))
if kwargs.get('resource'):
target_uri += '/{}'.format(kwargs.get('resource'))
if kwargs.get('resource_id'):
target_uri += '/{}'.format(kwargs.get('resource_id'))
if kwargs.get('object_type'):
target_uri += '/{}'.format(kwargs.get('object_type'))
if kwargs.get('object_type_id'):
target_uri += '/{}'.format(kwargs.get('object_type_id'))
return target_uri
def get_request(self, target_uri, resource_type, params=None):
"""Send a GET request to the array.
:param target_uri: the target uri
:param resource_type: the resource type, e.g. maskingview
:param params: optional dict of filter params
:returns: resource_object -- dict or None
:raises: ResourceNotFoundException
"""
message, sc = self.request(target_uri, GET, params=params)
operation = 'get {}'.format(resource_type)
self.check_status_code_success(operation, sc, message)
return message
def get_resource(self, *args, **kwargs):
"""Get resource details from the array. The args passed in are
positional and should be passed in using the order they are listed in
below.
Traditional Method:
:param args:
param0 array_id: the array serial number
param1 category: the resource category e.g. sloprovisioning
param2 resource_type: the resource type e.g. maskingview
:param kwargs:
param version: optional version of Unisphere
param resource_name: optional name of a specific resource
param params: optional dict of filter params
New Method:
:param kwargs:
param version: the version of Unisphere
param no_version: (boolean) if the URI required no version
param category: the resource category e.g. sloprovisioning, system
param resource_level: the resource level e.g. storagegroup, alert
param resource_level_id: the resource level ID
param resource_type: the name of a specific resource
param resource_type_id: the name of a specific resource
param resource: the name of a specific resource
param resource_id: the name of a specific resource
param object_type: the name of a specific resource
param object_type_id: the name of a specific resource
param params: query parameters
:return: resource object -- dict
"""
target_uri = self._build_uri(*args, **kwargs)
if args:
resource_type = args[2]
elif not args and kwargs:
resource_type = kwargs.get('resource_level')
else:
resource_type = None
return self.get_request(target_uri, resource_type, kwargs.get('params'))
def create_resource(self, *args, **kwargs):
"""Create a resource. The args passed in are positional and should be
passed in using the order they are listed in below.
Traditional Method:
:param args:
param0 array_id: the array serial number
param1 category: the resource category e.g. sloprovisioning
param2 resource_type: the resource type e.g. maskingview
:param kwargs:
param version: optional version of Unisphere
param resource_name: optional name of a specific resource
param payload: optional payload dict
New Method:
:param kwargs:
param version: the version of Unisphere
param no_version: (boolean) if the URI required no version
param category: the resource category e.g. sloprovisioning, system
param resource_level: the resource level e.g. storagegroup, alert
param resource_level_id: the resource level ID
param resource_type: the name of a specific resource
param resource_type_id: the name of a specific resource
param | |
U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR | |
#!/usr/bin/env python
# Copyright (c) <NAME>. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import sys
import time
import serial
import serial.tools.list_ports
import logging
from logging.handlers import RotatingFileHandler
import argparse
import platform
import collections
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import mplcursors
import matplotlib.animation as animation
from matplotlib.dates import num2date, MinuteLocator, SecondLocator, DateFormatter
from matplotlib.widgets import Button
from datetime import datetime, timedelta
from threading import Thread
import os
import configparser
import inspect
import easygui
version = '1.0.7'
port = ''
#baud = 115200
#logfile = 'current_viewer.log'
#refresh_interval = 66 # 66ms = 15fps
# controls the window size (and memory usage). 100k samples = 3 minutes
#buffer_max_samples = 100000
# controls how many samples to display in the chart (and CPU usage). Ie 4k display should be ok with 2k samples
#chart_max_samples = 2048
# how many samples to average (median)
#max_supersampling = 16;
# set to true to compute median instead of average (less noise, more CPU)
#median_filter = 0;
#
save_file = None;
save_format = None;
connected_device = "CurrentRanger"
#########################################################################################################
Config = configparser.ConfigParser()
if sys.platform.startswith('win'):
filename = inspect.getframeinfo(inspect.currentframe()).filename
settingsFilename = os.path.join(os.path.dirname(os.path.abspath(filename)), 'Config.txt')
else:
settingsFilename = os.path.join(sys.path[0], 'Config.txt')
Config.read(settingsFilename)
logger = logging.getLogger("main_logger")
# disable matplotlib logging for fonts, seems to be quite noisy
logging.getLogger('matplotlib.font_manager').disabled = True
#########################################################################################################
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
logger.debug("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def configure_logging(location, name, level, logger):
logDIR = ConfigSectionMap("Configuration")['log_location']
logFormatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
if not os.path.exists(logDIR):
os.makedirs(logDIR)
logname = '{3}/{0}-{1}-{2}.txt'.format(location, name, datetime.strftime(datetime.today(), '%d-%m-%Y'), logDIR)
if os.path.isfile(logname):
newlog = False
else:
newlog = True
if level == 'INFO':
print('got into info area')
fileHandler = logging.FileHandler(logname)
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(logFormatter)
elif level == 'DEBUG':
fileHandler = logging.FileHandler(logname)
fileHandler.setLevel(logging.DEBUG)
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
logging.basicConfig(filename='debug.log', level=logging.DEBUG)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setLevel(logging.DEBUG)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
# !/usr/bin/python3
class ReadLine:
def __init__(self, s):
self.buf = bytearray()
self.s = s
def readline(self):
i = self.buf.find(b"\n")
if i >= 0:
r = self.buf[:i + 1]
self.buf = self.buf[i + 1:]
return r
while True:
i = max(1, min(2048, self.s.in_waiting))
data = self.s.read(i)
i = data.find(b"\n")
if i >= 0:
r = self.buf + data[:i + 1]
self.buf[0:] = data[i + 1:]
return r
else:
self.buf.extend(data)
class CRPlot:
def __init__(self, sample_buffer = 100):
self.port = '/dev/ttyACM0'
self.baud = 9600
self.thread = None
self.stream_data = True
self.pause_chart = False
self.sample_count = 0
self.animation_index = 0
self.max_samples = sample_buffer
self.data = collections.deque(maxlen=sample_buffer)
self.timestamps = collections.deque(maxlen=sample_buffer)
self.dataStartTS = None
self.serialConnection = None
self.framerate = 30
def serialStart(self, port, speed = 115200):
self.port = port
self.baud = speed
logging.info("Trying to connect to port='{}' baud='{}'".format(port, speed))
try:
self.serialConnection = serial.Serial(self.port, self.baud, timeout=5)
logging.info("Connected to {} at baud {}".format(port, speed))
except serial.SerialException as e:
logging.error("Error connecting to serial port: {}".format(e))
return False
except:
logging.error("Error connecting to serial port, unexpected exception:{}".format(sys.exc_info()))
return False
if self.thread == None:
self.thread = Thread(target=self.serialStream)
self.thread.start()
print('Initializing data capture:', end='')
wait_timeout = 100
while wait_timeout > 0 and self.sample_count == 0:
print('.', end='', flush=True)
time.sleep(0.01)
wait_timeout -= 1
if (self.sample_count == 0):
logging.error("Error: No data samples received. Aborting")
return False
print("OK\n")
return True
def pauseRefresh(self, state):
logging.debug("pause {}".format(state))
self.pause_chart = not self.pause_chart
if self.pause_chart:
self.ax.set_title('<Paused>', color="yellow")
self.bpause.label.set_text('Resume')
else:
self.ax.set_title(f"Streaming: {connected_device}", color="white")
self.bpause.label.set_text('Pause')
def saveAnimation(self, state):
logging.debug("save {}".format(state))
self.bsave.label.set_text('Saving...')
plt.gcf().canvas.draw()
filename = None
while True:
filename = 'current' + str(self.animation_index) + '.gif'
self.animation_index += 1
if not path.exists(filename):
break
logging.info("Animation saved to '{}'".format(filename))
self.anim.save(filename, writer='imagemagick', fps=self.framerate)
self.bsave.label.set_text('GIF')
def chartSetup(self, refresh_interval=100):
plt.style.use('dark_background')
fig = plt.figure(num=f"Current Viewer {version}", figsize=(10, 6))
self.ax = plt.axes()
ax = self.ax
ax.set_title(f"Streaming: {connected_device}", color="white")
fig.text (0.2, 0.88, f"CurrentViewer {version}", color="yellow", verticalalignment='bottom', horizontalalignment='center', fontsize=9, alpha=0.7)
fig.text (0.89, 0.0, f"github.com/MGX3D/CurrentViewer", color="white", verticalalignment='bottom', horizontalalignment='center', fontsize=9, alpha=0.5)
ax.set_ylabel("Current draw (Amps)")
ax.set_yscale("log", nonpositive='clip')
ax.set_ylim(1e-10, 1e1)
plt.yticks([1.0e-9, 1.0e-8, 1.0e-7, 1.0e-6, 1.0e-5, 1.0e-4, 1.0e-3, 1.0e-2, 1.0e-1, 1.0], ['1nA', '10nA', '100nA', '1\u00B5A', '10\u00B5A', '100\u00B5A', '1mA', '10mA', '100mA', '1A'], rotation=0)
ax.grid(axis="y", which="both", color="yellow", alpha=.3, linewidth=.5)
ax.set_xlabel("Time")
plt.xticks(rotation=20)
ax.set_xlim(datetime.now(), datetime.now() + timedelta(seconds=10))
ax.grid(axis="x", color="green", alpha=.4, linewidth=2, linestyle=":")
#ax.xaxis.set_major_locator(SecondLocator())
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
def on_xlims_change(event_ax):
logging.debug("Interactive zoom: {} .. {}".format(num2date(event_ax.get_xlim()[0]), num2date(event_ax.get_xlim()[1])))
chart_len = num2date(event_ax.get_xlim()[1]) - num2date(event_ax.get_xlim()[0])
if chart_len.total_seconds() < 5:
self.ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S.%f'))
else:
self.ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
self.ax.xaxis.set_minor_formatter(DateFormatter('%H:%M:%S.%f'))
ax.callbacks.connect('xlim_changed', on_xlims_change)
lines = ax.plot([], [], label="Current")[0]
lastText = ax.text(0.50, 0.95, '', transform=ax.transAxes)
statusText = ax.text(0.50, 0.50, '', transform=ax.transAxes)
self.anim = animation.FuncAnimation(fig, self.getSerialData, fargs=(lines, plt.legend(), lastText), interval=refresh_interval)
plt.legend(loc="upper right", framealpha=0.5)
apause = plt.axes([0.91, 0.15, 0.08, 0.07])
self.bpause = Button(apause, label='Pause', color='0.2', hovercolor='0.1')
self.bpause.on_clicked(self.pauseRefresh)
self.bpause.label.set_color('yellow')
aanimation = plt.axes([0.91, 0.25, 0.08, 0.07])
self.bsave = Button(aanimation, 'GIF', color='0.2', hovercolor='0.1')
self.bsave.on_clicked(self.saveAnimation)
self.bsave.label.set_color('yellow')
crs = mplcursors.cursor(ax, hover=True)
@crs.connect("add")
def _(sel):
sel.annotation.arrow_patch.set(arrowstyle="simple", fc="yellow", alpha=.4)
sel.annotation.set_text(self.textAmp(sel.target[1]))
self.framerate = 1000/refresh_interval
plt.gcf().autofmt_xdate()
plt.show()
def serialStream(self):
# set data streaming mode on CR (assuming it was off)
self.serialConnection.write(b'u')
self.serialConnection.reset_input_buffer()
self.sample_count = 0
line_count = 0
error_count = 0
self.dataStartTS = datetime.now()
# data timeout threshold (seconds) - bails out of no samples received
data_timeout_ths = 0.5
line = None
device_data = bytearray()
logging.info("Starting USB streaming loop")
while (self.stream_data):
try:
# get the timestamp before the data string, likely to align better with the actual reading
ts = datetime.now()
chunk_len = device_data.find(b"\n")
if chunk_len >= 0:
line = device_data[:chunk_len]
device_data = device_data[chunk_len+1:]
else:
line = None
while line == None and self.stream_data:
chunk_len = max(1, min(4096, self.serialConnection.in_waiting))
chunk = self.serialConnection.read(chunk_len)
chunk_len = chunk.find(b"\n")
if chunk_len >= 0:
line = device_data + chunk[:chunk_len]
device_data[0:] = chunk[chunk_len+1:]
else:
device_data.extend(chunk)
if line == None:
continue
line = line.decode(encoding="ascii", errors="strict")
# TODO remove this, not needed in the new setup
if (line.startswith("USB_LOGGING")):
if (line.startswith("USB_LOGGING_DISABLED")):
# must have been left open by a different process/instance
logging.info("CR USB Logging was disabled. Re-enabling")
self.serialConnection.write(b'u')
self.serialConnection.flush()
continue
data = float(line)
self.sample_count += 1
line_count += 1
if save_file:
if save_format == 'CSV':
save_file.write(f"{ts},{data}\n")
elif save_format == 'JSON':
save_file.write("{}{{\"time\":\"{}\",\"amps\":\"{}\"}}".format(',\n' if self.sample_count>1 else '', ts, data))
if data < 0.0:
# this happens too often (negative values)
self.timestamps.append(np.datetime64(ts))
self.data.append(1.0e-11)
logging.warning("Unexpected value='{}'".format(line.strip()))
else:
self.timestamps.append(np.datetime64(ts))
self.data.append(data)
logging.debug(f"#{self.sample_count}:{ts}: {data}")
if (self.sample_count % 1000 == 0):
logging.debug("{}: '{}' -> {}".format(ts.strftime("%H:%M:%S.%f"), line.rstrip(), data))
dt = datetime.now() - self.dataStartTS
logging.info("Received {} samples in {:.0f}ms ({:.2f} samples/second)".format(self.sample_count, 1000*dt.total_seconds(), self.sample_count/dt.total_seconds()))
print("Received {} samples in {:.0f}ms ({:.2f} samples/second)".format(self.sample_count, 1000*dt.total_seconds(), self.sample_count/dt.total_seconds()))
except KeyboardInterrupt:
logging.info('Terminated by user')
break
except ValueError:
logging.error("Invalid data format: '{}': {}".format(line, sys.exc_info()))
error_count += 1
last_sample = (np.datetime64(datetime.now()) - (self.timestamps[-1] if self.sample_count else np.datetime64(datetime.now())))/np.timedelta64(1, 's')
if (error_count > 100) and last_sample > data_timeout_ths:
logging.error("Aborting. Error rate is too high {} errors, last valid sample received {} seconds ago".format(error_count, last_sample))
self.stream_data = False
break
pass
except serial.SerialException as e:
logging.error('Serial read error: {}: {}'.format(e.strerror, sys.exc_info()))
self.stream_data = False
break
self.stream_data = False
# stop streaming so the device shuts down if in auto mode
logging.info('Telling CR to stop USB streaming')
try:
# this will throw if the device has failed.disconnected already
self.serialConnection.write(b'u')
except:
logging.warning('Was not able to clean disconnect from the device')
logging.info('Serial streaming terminated')
def textAmp(self, amp):
if (abs(amp) > 1.0):
return "{:.3f} A".format(amp)
if (abs(amp) > 0.001):
return "{:.2f} mA".format(amp*1000)
if (abs(amp) > 0.000001):
return "{:.1f} \u00B5A".format(amp*1000*1000)
return "{:.1f} nA".format(amp*1000*1000*1000)
def getSerialData(self, frame, lines, legend, lastText):
if (self.pause_chart or len(self.data) < 2):
lastText.set_text('')
return
if not self.stream_data:
self.ax.set_title('<Disconnected>', color="red")
lastText.set_text('')
return
dt = datetime.now() - self.dataStartTS
# capped at buffer_max_samples
sample_set_size = len(self.data)
timestamps = []
samples = [] #np.arange(chart_max_samples, dtype="float64")
subsamples = max(1, min(max_supersampling, int(sample_set_size/chart_max_samples)))
# Sub-sampling for longer window views without the redraw perf impact
for i in range(0, chart_max_samples):
sample_index = int(sample_set_size*i/chart_max_samples)
timestamps.append(self.timestamps[sample_index])
supersample = np.array([self.data[i] for i in range(sample_index, sample_index+subsamples)])
samples.append(np.median(supersample) if median_filter else np.average(supersample))
self.ax.set_xlim(timestamps[0], timestamps[-1])
# some machines max out at 100fps, so this should react in 0.5-5 seconds to actual speed
sps_samples = min(512, sample_set_size);
dt_sps = (np.datetime64(datetime.now()) - self.timestamps[-sps_samples])/np.timedelta64(1, 's');
# if more than 1 second since last sample, automatically set SPS to 0 so we don't have until it slowly decays to 0
sps = sps_samples/dt_sps if ((np.datetime64(datetime.now()) - self.timestamps[-1])/np.timedelta64(1, 's')) < 1 else 0.0
lastText.set_text('{:.1f} SPS'.format(sps))
if sps > 500:
lastText.set_color("white")
elif sps > 100:
lastText.set_color("yellow")
else:
lastText.set_color("red")
logging.debug("Drawing chart: range {}@{} .. {}@{}".format(samples[0], timestamps[0], samples[-1], timestamps[-1]))
lines.set_data(timestamps, samples)
self.ax.legend(labels=['Last: {}\nAvg: {}'.format( | |
<gh_stars>1-10
#import gv,pygraph, edge
#from pygraph.classes.graph import graph
#from pygraph.classes.digraph import digraph
#from pygraph.algorithms.searching import breadth_first_search
#from pygraph.readwrite.dot import write
#import pygraphviz as pgv
import math
import copy
from operator import itemgetter
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords
import nltk
from nltk.collocations import*
stop=stopwords.words('english')
class AdjMatrix(object):
"""
A class defined for the nodes in the word graph. Here nodes include
node name, node id etc, and any other information needed. The methods
facilitate the processing specific to text graph
"""
def __init__(self):
self.gmatrix ={}
def createMatrix(self,wordlist, poslist, directed):
poslist=sorted(poslist,key=itemgetter(0))
poslist=sorted(poslist,key=itemgetter(1))
#print (wordlist)
from edge import Edge
for i in wordlist:
tmatrix = {}
for j in wordlist:
temp = Edge()
#if i == len(wordlist)-1:
# temp.updatewt(1.0)
#temp.fromnode = wordlist[i]
#temp.tonode = wordlist[j]
tmatrix[j]=temp
self.gmatrix[i]=tmatrix
i=0
while i < (len(poslist)-1):
#print(poslist[i][0]," : ",poslist[i][1])
size=nltk.word_tokenize(poslist[i][0])
a=i
z=0
#print ("size:",len(size))
c=a+1
flag=0
while z<len(size):
#print ('z:',z)
a=c
c=a+1
#print (a)
if a<len(poslist):
if poslist[a][0] != poslist[i][0]:
temp1=[]
temp2=[]
if poslist[i][1]!=poslist[a][1]:
z+=1
if z>len(size)-1:
temp2.append(a)
else:
temp1.append(a)
print("dwdp", poslist[a][0]," : ",poslist[a][1])
count =0
if c<len(poslist):
if poslist[a][1]==poslist[c][1]:
while poslist[c][1]==poslist[a][1]:
#print("1:",poslist[c][0])
#temp.append(c)
if z>len(size)-1:
temp2.append(c)
else:
temp1.append(c)
count+=1
#print (temp)
if poslist[i][0]==poslist[c][0]:
flag=1
for h in range(count+1):
#temp.pop()
if z>len(size)-1:
temp2.pop()
else:
temp1.pop()
break
c+=1
if c==len(poslist):
break
## for x in temp:
## dist = poslist[x][1]-poslist[i][1]
## if dist<0:
## print ("dist: ",dist,i+1,poslist[i+1][1],i,poslist[i][1])
## #print "found", poslist[i][0], i
## row = poslist[i][0]
## col = poslist[x][0]
## #print (self.gmatrix[row][col].weight, poslist[i][0], poslist[i+1][0])
## if self.gmatrix[row][col].numupdate == 0:
## self.gmatrix[row][col].weight = dist
## else:
## self.gmatrix[row][col].updatewt(dist)
## self.gmatrix[row][col].updatenumber()
## ## if poslist[i][0]=='executable' or poslist[i+1][0]=='executable':
## ## print dist , "......", poslist[i][0], poslist[i+1][0], row, col, self.gmatrix[row][col].weight
## if directed == False:
## if self.gmatrix[col][row].numupdate == 0:
## self.gmatrix[col][row].weight = dist
## else:
## self.gmatrix[col][row].updatewt(dist)
## self.gmatrix[col][row].updatenumber()
if poslist[i][1]==poslist[a][1]:
if z>len(size)-1:
temp2.append(a)
else:
temp1.append(a)
print("dwsp", poslist[a][0]," : ",poslist[a][1])
if c<len(poslist):
if poslist[a][1]==poslist[a+1][1]:
while poslist[c][1]==poslist[a][1]:
#temp.append(c)
if z>len(size)-1:
temp2.append(c)
else:
temp1.append(c)
#print("1:",poslist[c][0])
c+=1
if c==len(poslist):
break
if c<len(poslist):
#temp.append(c)
z+=1
if z>len(size)-1:
temp2.append(c)
#print poslist[c][0]
else:
temp1.append(c)
#print("2:",poslist[c][0])
c+=1
count =0
if c <len(poslist):
if poslist[c-1][1]==poslist[c][1]:
while poslist[c-1][1]==poslist[c][1]:
#temp.append(c)
if z>len(size)-1:
temp2.append(c)
else:
temp1.append(c)
#print("3:",poslist[c][0])
count+=1
if poslist[i][0]==poslist[c-1][0] or poslist[i][0]==poslist[c][0]:
flag=1
for h in range(count+1):
#temp.pop()
if z>len(size)-1:
temp2.pop()
else:
temp1.pop()
break
c+=1
if c==len(poslist):
break
for x in temp1:
dist = 0
## if dist<0:
## print ("dist: ",dist,a,poslist[a][1],i,poslist[i][1])
#print "found", poslist[i][0], i
row = poslist[i][0]
col = poslist[x][0]
#print row,col,dist
#print (self.gmatrix[row][col].weight, poslist[i][0], poslist[i+1][0])
#print(row,col,dist)
#print (row,col)
if self.gmatrix[row][col].numupdate == 0:
self.gmatrix[row][col].weight = dist
else:
self.gmatrix[row][col].updatewt(dist)
self.gmatrix[row][col].updatenumber()
## if poslist[i][0]=='executable' or poslist[i+1][0]=='executable':
## print dist , "......", poslist[i][0], poslist[i+1][0], row, col, self.gmatrix[row][col].weight
if directed == False:
if self.gmatrix[col][row].numupdate == 0:
self.gmatrix[col][row].weight = dist
else:
self.gmatrix[col][row].updatewt(dist)
self.gmatrix[col][row].updatenumber()
for x in temp2:
dist = poslist[x][1]-poslist[i][1]
## if dist<0:
## print ("dist: ",dist,a,poslist[a][1],i,poslist[i][1])
#print "found", poslist[i][0], i
row = poslist[i][0]
col = poslist[x][0]
#print row,col,dist
#print (self.gmatrix[row][col].weight, poslist[i][0], poslist[i+1][0])
#print(row,col,dist)
#print (row,col)
if self.gmatrix[row][col].numupdate == 0:
self.gmatrix[row][col].weight = dist
else:
self.gmatrix[row][col].updatewt(dist)
self.gmatrix[row][col].updatenumber()
## if poslist[i][0]=='executable' or poslist[i+1][0]=='executable':
## print dist , "......", poslist[i][0], poslist[i+1][0], row, col, self.gmatrix[row][col].weight
if directed == False:
if self.gmatrix[col][row].numupdate == 0:
self.gmatrix[col][row].weight = dist
else:
self.gmatrix[col][row].updatewt(dist)
self.gmatrix[col][row].updatenumber()
if flag==1:
break
if poslist[a][0] == poslist[i][0]:
print("swdp ", poslist[a][0]," : ",poslist[i][0])
temp1=[]
temp2=[]
if c<len(poslist):
z+=1
while c<len(poslist) and poslist[c][1]==poslist[a][1]:
#print("1:",poslist[c][0])
#temp.append(c)
if z>len(size)-1:
temp2.append(c)
else:
temp1.append(c)
c+=1
for x in temp1:
dist = 0
## if dist<0:
## print ("dist: ",dist,a,poslist[a][1],i,poslist[i][1])
#print "found", poslist[i][0], i
row = poslist[i][0]
col = poslist[x][0]
#print row,col,dist
#print (self.gmatrix[row][col].weight, poslist[i][0], poslist[i+1][0])
#print(row,col,dist)
if self.gmatrix[row][col].numupdate == 0:
self.gmatrix[row][col].weight = dist
else:
self.gmatrix[row][col].updatewt(dist)
self.gmatrix[row][col].updatenumber()
## if poslist[i][0]=='executable' or poslist[i+1][0]=='executable':
## print dist , "......", poslist[i][0], poslist[i+1][0], row, col, self.gmatrix[row][col].weight
if directed == False:
if self.gmatrix[col][row].numupdate == 0:
self.gmatrix[col][row].weight = dist
else:
self.gmatrix[col][row].updatewt(dist)
self.gmatrix[col][row].updatenumber()
for x in temp2:
dist = poslist[x][1]-poslist[i][1]
## if dist<0:
## print ("dist: ",dist,a,poslist[a][1],i,poslist[i][1])
#print "found", poslist[i][0], i
row = poslist[i][0]
col = poslist[x][0]
#print row,col,dist
#print (self.gmatrix[row][col].weight, poslist[i][0], poslist[i+1][0])
#print(row,col,dist)
if self.gmatrix[row][col].numupdate == 0:
self.gmatrix[row][col].weight = dist
else:
self.gmatrix[row][col].updatewt(dist)
self.gmatrix[row][col].updatenumber()
## if poslist[i][0]=='executable' or poslist[i+1][0]=='executable':
## print dist , "......", poslist[i][0], poslist[i+1][0], row, col, self.gmatrix[row][col].weight
if directed == False:
if self.gmatrix[col][row].numupdate == 0:
self.gmatrix[col][row].weight = dist
else:
self.gmatrix[col][row].updatewt(dist)
self.gmatrix[col][row].updatenumber()
else:
break
i += 1
return self.gmatrix
def addFilename(self,filename, wordlist):
from edge import Edge
initsize = len(self.gmatrix)
tmatrix = list()
for i in range(len(wordlist)):
#print wordlist(i)
#print wordlist[i]
temp = Edge()
#temp.updatewt(1.0)
tmatrix.insert(initsize,temp)
#self.gmatrix[initsize][i].updatewt(1.0)
#self.gmatrix[initsize][i].updatenumber()
self.gmatrix.insert(initsize,tmatrix)
print (wordlist[len(wordlist)-1])
for i in range(len(wordlist)):
self.gmatrix[len(wordlist)][i].weight = 1.0
#self.gmatrix[len(wordlist)][i].updatenumber()
#if self.gmatrix[len(wordlist)][i].weight != float('inf'):
return self.gmatrix
def updateIndMatrix(self, matrix, tok, itxt,txt):
mat=copy.deepcopy(matrix)
itxt=list(set(itxt))
#print(len(itxt))
#print(len(txt))
for i in range(len(itxt)):
for j in range(len(itxt)):
mat.gmatrix[i][j].weight=matrix.gmatrix[i][j].weight-self.gmatrix[i][j].weight
mat.gmatrix[i][j].numupdate=matrix.gmatrix[i][j].numupdate-self.gmatrix[i][j].numupdate
if(mat.gmatrix[i][j].numupdate==0):
mat.gmatrix[i][j].weight=float('inf')
return mat
"""
Given the current matrix and a new matrix with new values, the matrix must be updated
"""
def updateMatrix(self, wordlist, poslist, directed):
poslist=sorted(poslist,key=itemgetter(0))
poslist=sorted(poslist,key=itemgetter(1))
#print (poslist)
#print ("updateMatrix")
from edge import Edge
#print ("matrix initial size", len(wordlist), len(self.gmatrix), len(self.gmatrix[0]))
initsize = len(self.gmatrix)
x=0
#print wordlist
for i in wordlist:
tmatrix = {}
if x>=initsize:
for j in wordlist:
temp = Edge()
tmatrix[j]=temp
self.gmatrix[i]=tmatrix
else:
j = initsize
while j < len(wordlist):
temp = Edge()
self.gmatrix[i][wordlist[j]]=temp
j+=1
x+=1
## while (i<len(wordlist)):
## #print("i:",i)
## tmatrix = {}
## if i>= initsize:
## j=0
## while j < len(wordlist):
## temp = Edge()
## if i == len(wordlist)-1:
## temp.updatewt(1.0)
## tmatrix.insert(j,temp)
## j+=1
## self.gmatrix.insert(i,tmatrix)
## else:
## j = initsize
## while j < len(wordlist):
## temp = Edge()
## self.gmatrix[i].insert(j,temp)
## j+=1
## i+=1
i=0
while i < (len(poslist)-1):
#print(poslist[i][0]," : ",poslist[i][1])
size=nltk.word_tokenize(poslist[i][0])
a=i
z=0
#print ("size:",len(size))
c=a+1
flag=0
while z<len(size):
#print ('z:',z)
a=c
c=a+1
#print (a)
if a<len(poslist):
if poslist[a][0] != poslist[i][0]:
temp1=[]
temp2=[]
if poslist[i][1]!=poslist[a][1]:
z+=1
if z>len(size)-1:
temp2.append(a)
else:
temp1.append(a)
#print("dwdp", poslist[a][0]," : ",poslist[a][1])
count =0
if c<len(poslist):
if poslist[a][1]==poslist[c][1]:
while poslist[c][1]==poslist[a][1]:
#print("1:",poslist[c][0])
#temp.append(c)
if z>len(size)-1:
temp2.append(c)
else:
temp1.append(c)
count+=1
#print (temp)
if poslist[i][0]==poslist[c][0]:
flag=1
for h in range(count+1):
#temp.pop()
if z>len(size)-1:
temp2.pop()
else:
temp1.pop()
break
c+=1
if c==len(poslist):
break
## for x in temp:
## dist = poslist[x][1]-poslist[i][1]
## if dist<0:
## print ("dist: ",dist,i+1,poslist[i+1][1],i,poslist[i][1])
## #print "found", poslist[i][0], i
## row = poslist[i][0]
## col = poslist[x][0]
## #print (self.gmatrix[row][col].weight, poslist[i][0], poslist[i+1][0])
## if self.gmatrix[row][col].numupdate == 0:
## self.gmatrix[row][col].weight = dist
## else:
## self.gmatrix[row][col].updatewt(dist)
## self.gmatrix[row][col].updatenumber()
## ## if poslist[i][0]=='executable' or poslist[i+1][0]=='executable':
## ## print dist , "......", poslist[i][0], poslist[i+1][0], row, col, self.gmatrix[row][col].weight
## if directed == False:
## if self.gmatrix[col][row].numupdate == 0:
## self.gmatrix[col][row].weight = dist
## else:
## self.gmatrix[col][row].updatewt(dist)
## self.gmatrix[col][row].updatenumber()
if poslist[i][1]==poslist[a][1]:
if z>len(size)-1:
temp2.append(a)
else:
temp1.append(a)
#print("dwsp", poslist[a][0]," : ",poslist[a][1])
if c<len(poslist):
if poslist[a][1]==poslist[a+1][1]:
while poslist[c][1]==poslist[a][1]:
#temp.append(c)
if z>len(size)-1:
temp2.append(c)
else:
temp1.append(c)
#print("1:",poslist[c][0])
c+=1
if c==len(poslist):
break
if c<len(poslist):
#temp.append(c)
z+=1
if z>len(size)-1:
temp2.append(c)
#print poslist[c][0]
else:
temp1.append(c)
#print("2:",poslist[c][0])
c+=1
count =0
if c <len(poslist):
if poslist[c-1][1]==poslist[c][1]:
while poslist[c-1][1]==poslist[c][1]:
#temp.append(c)
if z>len(size)-1:
temp2.append(c)
else:
temp1.append(c)
#print("3:",poslist[c][0])
count+=1
if poslist[i][0]==poslist[c-1][0] or poslist[i][0]==poslist[c][0]:
flag=1
for h in range(count+1):
#temp.pop()
if z>len(size)-1:
temp2.pop()
else:
temp1.pop()
break
c+=1
if c==len(poslist):
break
for x in temp1:
dist = 0
## if dist<0:
## print ("dist: ",dist,a,poslist[a][1],i,poslist[i][1])
#print "found", poslist[i][0], i
row = poslist[i][0]
col = poslist[x][0]
#print row,col,dist
#print (self.gmatrix[row][col].weight, poslist[i][0], poslist[i+1][0])
#print(row,col,dist)
#print (row,col)
if self.gmatrix[row][col].numupdate == 0:
self.gmatrix[row][col].weight = dist
else:
self.gmatrix[row][col].updatewt(dist)
self.gmatrix[row][col].updatenumber()
## if poslist[i][0]=='executable' or poslist[i+1][0]=='executable':
## print dist , "......", poslist[i][0], poslist[i+1][0], row, col, self.gmatrix[row][col].weight
if directed == False:
if self.gmatrix[col][row].numupdate == 0:
self.gmatrix[col][row].weight = dist
else:
self.gmatrix[col][row].updatewt(dist)
self.gmatrix[col][row].updatenumber()
for x in temp2:
dist = poslist[x][1]-poslist[i][1]
## if dist<0:
## print ("dist: ",dist,a,poslist[a][1],i,poslist[i][1])
#print "found", poslist[i][0], i
row = poslist[i][0]
col = poslist[x][0]
#print row,col,dist
#print (self.gmatrix[row][col].weight, poslist[i][0], poslist[i+1][0])
#print(row,col,dist)
#print (row,col)
if self.gmatrix[row][col].numupdate == 0:
self.gmatrix[row][col].weight = dist
else:
self.gmatrix[row][col].updatewt(dist)
self.gmatrix[row][col].updatenumber()
## if poslist[i][0]=='executable' or poslist[i+1][0]=='executable':
## print dist , "......", poslist[i][0], poslist[i+1][0], row, col, self.gmatrix[row][col].weight
if directed == False:
if self.gmatrix[col][row].numupdate == 0:
self.gmatrix[col][row].weight = dist
else:
self.gmatrix[col][row].updatewt(dist)
self.gmatrix[col][row].updatenumber()
if flag==1:
break
if poslist[a][0] == poslist[i][0]:
#print("swdp ", poslist[a][0]," : ",poslist[i][0])
temp1=[]
temp2=[]
if c<len(poslist):
z+=1
while c<len(poslist) and poslist[c][1]==poslist[a][1]:
#print("1:",poslist[c][0])
#temp.append(c)
| |
<gh_stars>0
# encoding=utf-8
# Date: 2018-09-13
# Author: MJUZY
from ExtractMaxFace import extractProcess
from FaceRecognzedProcess import faceRecognzedProcess
import datetime
import dlib
from flask import Flask, url_for
from flask import request
import json
import shutil
from werkzeug.utils import secure_filename
import os
app = Flask(__name__)
os.system("python /var/www/demoapp/Try_Function_2.py")
def prepare_detector(predictor_path, face_rec_model_path):
file = open("/var/www/demoapp/show_hello_running.txt", 'a')
file.write("Prepare_detector !\n")
# 加载正脸检测器
detector = dlib.get_frontal_face_detector()
file.write("detector prepared !\n")
# 加载人脸关键点检测器
sp = dlib.shape_predictor(predictor_path)
file.write("sp prepared !\n")
# 加载人脸识别模型
facerec = dlib.face_recognition_model_v1(face_rec_model_path)
file.write("facerec prepared !\n")
file.close()
return detector, sp, facerec
def prepare_path_etc():
# 人脸关键点检测器
predictor_path = "/var/www/demoapp/shape_predictor_68_face_landmarks.dat"
# 人脸识别模型:
face_rec_model_path = "/var/www/demoapp/dlib_face_recognition_resnet_model_v1.dat"
return predictor_path, face_rec_model_path
predictor_path, face_rec_model_path = prepare_path_etc()
detector, sp, facerec = prepare_detector(predictor_path, face_rec_model_path)
"""Attention:
This is just the sample of the usage of the cache
>>>from werkzeug.contrib.cache import SimpleCache
>>>cache = SimpleCache()
>>>cache.set("detector", detector)
>>>cache.set("sp", sp)
>>>cache.set("facerec", facerec)
>>>cache.get("detector")
>>>...
"""
# Method One: curl http://127.0.0.1:5000/hello?name=dongzheng
@app.route('/hello')
def api_hello():
if 'name' in request.args:
return 'Hello ' + request.args['name']
else:
return 'Hello <NAME>'
# Method Two/First: curl http://127.0.0.1:5000/articles/abc
# Output: You are reading abc
# Method Two/Second: curl http://127.0.0.1:5000/articles
# Output: List of /articles
@app.route('/')
def api_root():
return 'Welcome'
@app.route('/articles')
def api_articles():
return 'List of ' + url_for('api_articles')
@app.route('/articles/<articleid>')
def api_article(articleid):
return 'You are reading ' + articleid
# Method Three:
# C:\Users\zheng>curl -X Get http://127.0.0.1:5000/echo
# ECHO: GET
@app.route('/echo', methods=['GET', 'POST', 'PATCH', 'PUT', 'DELETE'])
def api_echo():
if request.method == 'GET':
return "ECHO: GET\n"
elif request.method == 'POST':
return "ECHO: POST\n"
elif request.method == 'PATCH':
return "ECHO: PACTH\n"
elif request.method == 'PUT':
return "ECHO: PUT\n"
elif request.method == 'DELETE':
return "ECHO: DELETE"
# Method Four: see Client\__init__.py
# app.config['UPLOAD_FOLDER'] = 'D:\\PyFlaskLearningProjects\\20180613_Test1\\static\\uploads'
# The route below is used for Ubuntu, upside for Windows
app.config['UPLOAD_FOLDER'] = '/var/www/demoapp/student_photo'
app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg', 'gif'])
# For a given file, return whether it's an allowed type or not
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
@app.route('/sign_up_photo_upload', methods=['POST'])
def upload():
name = request.form.get('name', 'little apple')
class_name = request.form.get('class_name', 'little apple')
new_path = "/var/www/demoapp/Accounts/" + class_name + "/" + name
if os.path.exists(new_path) == False:
os.makedirs(new_path)
# Create the document storing CSV File, which used to recored customs' Travel coordinations
new_csv_path = new_path + "/" + "coordinations"
os.makedirs(new_csv_path)
new_orijpg_path = new_path + "/" + "OriJPG"
os.makedirs(new_orijpg_path)
dirpath = '/var/www/demoapp'
upload_file = request.files['image01']
if upload_file and allowed_file(upload_file.filename):
filename = secure_filename(upload_file.filename)
upload_file.save(os.path.join(app.root_path, app.config['UPLOAD_FOLDER'], filename))
sign_up_photo_path = dirpath + "/student_photo/" + str(upload_file.filename)
move_to_path = dirpath + "/Accounts/" + class_name + '/' + name + '/' + "OriJPG"
shutil.move(sign_up_photo_path, move_to_path)
"""Description: the orders following are used to check if the scripts have been correctly executed
>>>log_file = open('/var/www/demoapp/log.txt', mode='a')
>>>log_file.write("---shutil.move Ok !---")
"""
IP = request.remote_addr
dir = '/var/www/demoapp/Accounts'
extractProcess(upload_file.filename, dir, class_name, name, detector)
return 'hello, ' + name + ' class_name: ' + class_name + 'IP : ' + IP + ' success'
else:
return 'hello, ' + request.form.get('name', 'little apple') + ' failed'
@app.route('/checkin_photo_upload1', methods=['POST'])
def checkin_upload1():
"""Attention:
Now stipulate that Only Two Photos will be uploaded
And This api used for the first photo uploaded
:return:
"""
side_rate = 0
result = "This is the Original Value of the result !"
detector_forcheckin = detector
sp_forcheckin = sp
facerec_forcheckin = facerec
dir_path = '/var/www/demoapp/Accounts'
dirpath_forcheckin = "/var/www/demoapp/student_photo"
upload_file = request.files['image01']
if upload_file and allowed_file(upload_file.filename):
filename = secure_filename(upload_file.filename)
upload_file.save(os.path.join(app.root_path, app.config['UPLOAD_FOLDER'], filename))
name = request.form.get('name', 'little apple')
class_name = request.form.get('class_name', 'little apple')
result, side_rate = faceRecognzedProcess(detector_forcheckin, sp_forcheckin, facerec_forcheckin,
dir_path, class_name,name,
dirpath_forcheckin, upload_file.filename)
os.remove(dirpath_forcheckin + '/' + upload_file.filename)
if result == "Good !":
mark_path = dir_path + '/' + class_name + '/' + name + '/' + "mark1.txt"
file_mark = open(mark_path, 'w')
file_mark.write(str(side_rate))
file_mark.close()
return result
@app.route('/checkin_photo_upload2', methods=['POST'])
def checkin_upload2():
"""Attention:
Now stipulate that Only Two Photos will be uploaded
And This api used for the first photo uploaded
:return:
"""
side_rate = 0
result = "This is the Original Value of the result !"
detector_forcheckin = detector
sp_forcheckin = sp
facerec_forcheckin = facerec
dir_path = '/var/www/demoapp/Accounts'
dirpath_forcheckin = "/var/www/demoapp/student_photo"
upload_file = request.files['image01']
if upload_file and allowed_file(upload_file.filename):
filename = secure_filename(upload_file.filename)
upload_file.save(os.path.join(app.root_path, app.config['UPLOAD_FOLDER'], filename))
name = request.form.get('name', 'little apple')
class_name = request.form.get('class_name', 'little apple')
result, side_rate = faceRecognzedProcess(detector_forcheckin, sp_forcheckin, facerec_forcheckin,
dir_path, class_name,name,
dirpath_forcheckin, upload_file.filename)
os.remove(dirpath_forcheckin + '/' + upload_file.filename)
if result == "Good !":
mark_path = dir_path + '/' + class_name + '/' + name + '/' + "mark1.txt"
if os.path.exists(mark_path) == True:
file_mark = open(mark_path, 'r')
side_rate_1 = file_mark.read()
if abs(float(side_rate_1) - float(side_rate)) == 0:
result = "Same Side Rate !!!"
file_mark.close()
os.remove(mark_path)
return result
else:
return "Fail !"
"""Sample:
curl http://192.168.3.11/student/create_space/123456/20171000718
"""
@app.route('/student/create_space/<class_name>/<name>')
def api_create_space(class_name, name):
new_path = "/var/www/demoapp/Accounts/" + class_name + "/" + name
if os.path.exists(new_path) == False:
os.makedirs(new_path)
# Create the document storing CSV File, which used to recored customs' Travel coordinations
new_csv_path = new_path + "/" + "coordinations"
os.makedirs(new_csv_path)
new_orijpg_path = new_path + "/" + "OriJPG"
os.makedirs(new_orijpg_path)
return name + "'s space has been created !"
else:
return name + "'s space has been created before ! "
@app.route('/post_function_test1', methods=['POST'])
def post_function_test():
name = request.form.get('name', 'little apple')
class_name = request.form.get('class_name', 'little apple')
IP = request.remote_addr
file = open("/var/www/demoapp/post_function_test.txt", 'w')
file.close()
return 'hello, ' + name + ' class_name: ' + class_name + ' IP : ' + IP + ' success'
@app.route('/student_create_space', methods=['POST'])
def student_create_space():
# name = request.form.get('name', 'little apple')
# class_name = request.form.get('class_name', 'little apple')
# name = request.form['name']
# class_name = request.form['class_name']
IP = request.remote_addr
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
name = json_data.get("name")
class_name = json_data.get("class_name")
new_path = "/var/www/demoapp/Accounts/" + class_name + "/" + name
if os.path.exists(new_path) == False:
os.makedirs(new_path)
# Create the document storing CSV File, which used to recored customs' Travel coordinations
new_csv_path = new_path + "/" + "coordinations"
os.makedirs(new_csv_path)
new_csv_path = new_path + "/" + "coordinations_back_up"
os.makedirs(new_csv_path)
new_orijpg_path = new_path + "/" + "OriJPG"
os.makedirs(new_orijpg_path)
return 'hello, ' + name + ' class_name: ' + class_name + ' IP : ' + IP + ' success'
else:
return name + "'s space has been created before ! "
@app.route('/sign_up_photo_upload/<class_name>/<name>', methods=['POST'])
def sign_up_photo_upload(class_name, name):
new_path = "/var/www/demoapp/Accounts/" + class_name + "/" + name
if os.path.exists(new_path) == False:
os.makedirs(new_path)
# Create the document storing CSV File, which used to recored customs' Travel coordinations
new_csv_path = new_path + "/" + "coordinations"
os.makedirs(new_csv_path)
new_orijpg_path = new_path + "/" + "OriJPG"
os.makedirs(new_orijpg_path)
dirpath = '/var/www/demoapp'
upload_file = request.files['image01']
if upload_file and allowed_file(upload_file.filename):
filename = secure_filename(upload_file.filename)
upload_file.save(os.path.join(app.root_path, app.config['UPLOAD_FOLDER'], filename))
sign_up_photo_path = dirpath + "/student_photo/" + str(upload_file.filename)
move_to_path = dirpath + "/Accounts/" + class_name + '/' + name + '/' + "OriJPG"
shutil.move(sign_up_photo_path, move_to_path)
"""Description: the orders following are used to check if the scripts have been correctly executed
>>>log_file = open('/var/www/demoapp/log.txt', mode='a')
>>>log_file.write("---shutil.move Ok !---")
"""
IP = request.remote_addr
dir = '/var/www/demoapp/Accounts'
extractProcess(upload_file.filename, dir, class_name, name, detector)
return 'hello, ' + name + ' class_name: ' + class_name + 'IP : ' + IP + ' success'
else:
return 'hello, ' + request.form.get('name', 'little apple') + ' failed'
def getDateStringName():
nowTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # 现在的时间
construct1 = nowTime.split('-')
year = construct1[0]
month = construct1[1]
day = ((construct1[2]).split(' '))[0]
minPart = ((construct1[2]).split(' '))[1]
construct1 = minPart.split(":")
hour = construct1[0]
mint = construct1[1]
sec = construct1[2]
return year + "_" + month + "_" + day + "_" + hour + "_" + mint + "_" + sec
@app.route('/route_data_upload/<class_name>/<name>/<lng>/<lat>/new', methods=['POST'])
def route_data_upload_new(class_name, name, lng, lat):
dirpath = '/var/www/demoapp'
csv_dirpath = dirpath + "/Accounts/" + class_name + '/' + name + '/' + "coordinations"
dateStr = getDateStringName()
json_filepath = csv_dirpath + '/' + dateStr + ".json"
route_data = {}
route_data["lng"] = lng
route_data["lat"] = lat
a = json.dumps(route_data)
b = str(a) + "\n"
fh = open(json_filepath, mode='w')
fh.write(b)
fh.close()
return class_name + " " + name + " " + "route_data upload success ! "
@app.route('/route_data_upload/<class_name>/<name>/<lng>/<lat>/middle', methods=['POST'])
def route_data_upload_middle(class_name, name, lng, lat):
dirpath = '/var/www/demoapp'
csv_dirpath = dirpath + "/Accounts/" + class_name + '/' + name + '/' + "coordinations"
for dirpath, dirnames, filenames in os.walk(csv_dirpath):
json_filepath = csv_dirpath + '/' + filenames[0] # Attention: There will be only one csv json file
route_data = {}
route_data["lng"] = lng
route_data["lat"] = lat
a = json.dumps(route_data)
b = str(a) + "\n"
fh = open(json_filepath, mode='a')
fh.write(b)
fh.close()
return class_name + " " + name + " " + "route_data upload success ! "
@app.route('/route_data_upload/<class_name>/<name>/<lng>/<lat>/end', methods=['POST'])
def route_data_upload_end(class_name, name, | |
"NETWORK PRODUCTS",
"008021": "Alcatel Canada Inc.",
"008022": "SCAN-OPTICS",
"008023": "INTEGRATED BUSINESS NETWORKS",
"008024": "KALPANA, INC.",
"008025": "STOLLMANN GMBH",
"008026": "NETWORK PRODUCTS CORPORATION",
"008027": "ADAPTIVE SYSTEMS, INC.",
"008028": "TRADPOST (HK) LTD",
"008029": "EAGLE TECHNOLOGY, INC.",
"00802A": "TEST SYSTEMS & SIMULATIONS INC",
"00802B": "INTEGRATED MARKETING CO",
"00802C": "THE SAGE GROUP PLC",
"00802D": "XYLOGICS INC",
"00802E": "CASTLE ROCK COMPUTING",
"00802F": "NATIONAL INSTRUMENTS CORP.",
"008030": "NEXUS ELECTRONICS",
"008031": "BASYS, CORP.",
"008032": "ACCESS CO., LTD.",
"008033": "EMS Aviation, Inc.",
"008034": "SMT GOUPIL",
"008035": "TECHNOLOGY WORKS, INC.",
"008036": "REFLEX MANUFACTURING SYSTEMS",
"008037": "Ericsson Group",
"008038": "DATA RESEARCH & APPLICATIONS",
"008039": "ALCATEL STC AUSTRALIA",
"00803A": "VARITYPER, INC.",
"00803B": "APT COMMUNICATIONS, INC.",
"00803C": "TVS ELECTRONICS LTD",
"00803D": "SURIGIKEN CO., LTD.",
"00803E": "SYNERNETICS",
"00803F": "TATUNG COMPANY",
"008040": "<NAME> MANUFACTURING CO.",
"008041": "VE<NAME>",
"008042": "<NAME>",
"008043": "NETWORLD, INC.",
"008044": "SYSTECH COMPUTER CORP.",
"008045": "MATSUSHITA ELECTRIC IND. CO",
"008046": "Tattile SRL",
"008047": "IN-NET CORP.",
"008048": "COMPEX INCORPORATED",
"008049": "NISSIN ELECTRIC CO., LTD.",
"00804A": "PRO-LOG",
"00804B": "EAGLE TECHNOLOGIES PTY.LTD.",
"00804C": "CONTEC CO., LTD.",
"00804D": "CYCLONE MICROSYSTEMS, INC.",
"00804E": "APEX COMPUTER COMPANY",
"00804F": "DAIKIN INDUSTRIES, LTD.",
"008050": "ZIATECH CORPORATION",
"008051": "FIBERMUX",
"008052": "TECHNICALLY ELITE CONCEPTS",
"008053": "INTELLICOM, INC.",
"008054": "FRONTIER TECHNOLOGIES CORP.",
"008055": "FERMILAB",
"008056": "SPHINX ELEKTRONIK GMBH",
"008057": "ADSOFT, LTD.",
"008058": "PRINTER SYSTEMS CORPORATION",
"008059": "STANLEY ELECTRIC CO., LTD",
"00805A": "TULIP COMPUTERS INTERNAT'L B.V",
"00805B": "CONDOR SYSTEMS, INC.",
"00805C": "AGILIS CORPORATION",
"00805D": "CANSTAR",
"00805E": "LSI LOGIC CORPORATION",
"00805F": "Hewlett-Packard Company",
"008060": "NETWORK INTERFACE CORPORATION",
"008061": "LITTON SYSTEMS, INC.",
"008062": "INTERFACE CO.",
"008063": "Hirschmann Automation and Control GmbH",
"008064": "WYSE TECHNOLOGY LLC",
"008065": "CYBERGRAPHIC SYSTEMS PTY LTD.",
"008066": "ARCOM CONTROL SYSTEMS, LTD.",
"008067": "SQUARE D COMPANY",
"008068": "YAMATECH SCIENTIFIC LTD.",
"008069": "COMPUTONE SYSTEMS",
"00806A": "ERI (EMPAC RESEARCH INC.)",
"00806B": "SCHMID TELECOMMUNICATION",
"00806C": "CEGELEC PROJECTS LTD",
"00806D": "CENTURY SYSTEMS CORP.",
"00806E": "NIPPON STEEL CORPORATION",
"00806F": "ONELAN LTD.",
"008070": "COMPUTADORAS MICRON",
"008071": "SAI TECHNOLOGY",
"008072": "MICROPLEX SYSTEMS LTD.",
"008073": "DWB ASSOCIATES",
"008074": "FISHER CONTROLS",
"008075": "PARSYTEC GMBH",
"008076": "MCNC",
"008077": "BROTHER INDUSTRIES, LTD.",
"008078": "PRACTICAL PERIPHERALS, INC.",
"008079": "MICROBUS DESIGNS LTD.",
"00807A": "AITECH SYSTEMS LTD.",
"00807B": "ARTEL COMMUNICATIONS CORP.",
"00807C": "FIBERCOM, INC.",
"00807D": "EQUINOX SYSTEMS INC.",
"00807E": "SOUTHERN PACIFIC LTD.",
"00807F": "DY-4 INCORPORATED",
"008080": "DATAMEDIA CORPORATION",
"008081": "KENDALL SQUARE RESEARCH CORP.",
"008082": "PEP MODULAR COMPUTERS GMBH",
"008083": "AMDAHL",
"008084": "THE CLOUD INC.",
"008085": "H-THREE SYSTEMS CORPORATION",
"008086": "COMPUTER GENERATION INC.",
"008087": "OKI ELECTRIC INDUSTRY CO., LTD",
"008088": "VICTOR COMPANY OF JAPAN, LTD.",
"008089": "TECNETICS (PTY) LTD.",
"00808A": "SUMMIT MICROSYSTEMS CORP.",
"00808B": "DACOLL LIMITED",
"00808C": "NetScout Systems, Inc.",
"00808D": "WESTCOAST TECHNOLOGY B.V.",
"00808E": "RADSTONE TECHNOLOGY",
"00808F": "C. ITOH ELECTRONICS, INC.",
"008090": "MICROTEK INTERNATIONAL, INC.",
"008091": "TOKYO ELECTRIC CO.,LTD",
"008092": "Silex Technology, Inc.",
"008093": "XYRON CORPORATION",
"008094": "ALFA LAVAL AUTOMATION AB",
"008095": "BASIC MERTON HANDELSGES.M.B.H.",
"008096": "HUMAN DESIGNED SYSTEMS, INC.",
"008097": "CENTRALP AUTOMATISMES",
"008098": "TDK CORPORATION",
"008099": "Eaton Industries GmbH",
"00809A": "NOVUS NETWORKS LTD",
"00809B": "JUSTSYSTEM CORPORATION",
"00809C": "LUXCOM, INC.",
"00809D": "Commscraft Ltd.",
"00809E": "DATUS GMBH",
"00809F": "ALCATEL BUSINESS SYSTEMS",
"0080A0": "EDISA HEWLETT PACKARD S/A",
"0080A1": "MICROTEST, INC.",
"0080A2": "CREATIVE ELECTRONIC SYSTEMS",
"0080A3": "Lantronix",
"0080A4": "LIBERTY ELECTRONICS",
"0080A5": "SPEED INTERNATIONAL",
"0080A6": "REPUBLIC TECHNOLOGY, INC.",
"0080A7": "Honeywell International Inc",
"0080A8": "VITACOM CORPORATION",
"0080A9": "CLEARPOINT RESEARCH",
"0080AA": "MAXPEED",
"0080AB": "DUKANE NETWORK INTEGRATION",
"0080AC": "IMLOGIX, DIVISION OF GENESYS",
"0080AD": "CNET TECHNOLOGY, INC.",
"0080AE": "HUGHES NETWORK SYSTEMS",
"0080AF": "ALLUMER CO., LTD.",
"0080B0": "ADVANCED INFORMATION",
"0080B1": "SOFTCOM A/S",
"0080B2": "NETWORK EQUIPMENT TECHNOLOGIES",
"0080B3": "AVAL DATA CORPORATION",
"0080B4": "SOPHIA SYSTEMS",
"0080B5": "UNITED NETWORKS INC.",
"0080B6": "THEMIS COMPUTER",
"0080B7": "STELLAR COMPUTER",
"0080B8": "B.U.G. MORISEIKI, INCORPORATED",
"0080B9": "ARCHE TECHNOLIGIES INC.",
"0080BA": "SPECIALIX (ASIA) PTE, LTD",
"0080BB": "HUGHES LAN SYSTEMS",
"0080BC": "HITACHI ENGINEERING CO., LTD",
"0080BD": "THE FURUKAWA ELECTRIC CO., LTD",
"0080BE": "ARIES RESEARCH",
"0080BF": "TAKAOKA ELECTRIC MFG. CO. LTD.",
"0080C0": "PENRIL DATACOMM",
"0080C1": "LANEX CORPORATION",
"0080C2": "IEEE 802.1 COMMITTEE",
"0080C3": "BICC INFORMATION SYSTEMS & SVC",
"0080C4": "DOCUMENT TECHNOLOGIES, INC.",
"0080C5": "NOVELLCO DE MEXICO",
"0080C6": "NATIONAL DATACOMM CORPORATION",
"0080C7": "XIRCOM",
"0080C8": "D-LINK SYSTEMS, INC.",
"0080C9": "ALBERTA MICROELECTRONIC CENTRE",
"0080CA": "NETCOM RESEARCH INCORPORATED",
"0080CB": "FALCO DATA PRODUCTS",
"0080CC": "MICROWAVE BYPASS SYSTEMS",
"0080CD": "MICRONICS COMPUTER, INC.",
"0080CE": "BROADCAST TELEVISION SYSTEMS",
"0080CF": "EMBEDDED PERFORMANCE INC.",
"0080D0": "COMPUTER PERIPHERALS, INC.",
"0080D1": "KIMTRON CORPORATION",
"0080D2": "SHINNIHONDENKO CO., LTD.",
"0080D3": "SHIVA CORP.",
"0080D4": "CHASE RESEARCH LTD.",
"0080D5": "CADRE TECHNOLOGIES",
"0080D6": "NUVOTECH, INC.",
"0080D7": "Fantum Engineering",
"0080D8": "NETWORK PERIPHERALS INC.",
"0080D9": "EMK Elektronik GmbH & Co. KG",
"0080DA": "Bruel & Kjaer Sound & Vibration Measurement A/S",
"0080DB": "GRAPHON CORPORATION",
"0080DC": "PICKER INTERNATIONAL",
"0080DD": "GMX INC/GIMIX",
"0080DE": "GIPSI S.A.",
"0080DF": "ADC CODENOLL TECHNOLOGY CORP.",
"0080E0": "XTP SYSTEMS, INC.",
"0080E1": "STMICROELECTRONICS",
"0080E2": "T.D.I. CO., LTD.",
"0080E3": "CORAL NETWORK CORPORATION",
"0080E4": "NORTHWEST DIGITAL SYSTEMS, INC",
"0080E5": "NetApp, Inc",
"0080E6": "PEER NETWORKS, INC.",
"0080E7": "LYNWOOD SCIENTIFIC DEV. LTD.",
"0080E8": "CUMULUS CORPORATIION",
"0080E9": "Madge Ltd.",
"0080EA": "ADVA Optical Networking Ltd.",
"0080EB": "COMPCONTROL B.V.",
"0080EC": "SUPERCOMPUTING SOLUTIONS, INC.",
"0080ED": "IQ TECHNOLOGIES, INC.",
"0080EE": "THOMSON CSF",
"0080EF": "RATIONAL",
"0080F0": "Panasonic Communications Co., Ltd.",
"0080F1": "OPUS SYSTEMS",
"0080F2": "RAYCOM SYSTEMS INC",
"0080F3": "SUN ELECTRONICS CORP.",
"0080F4": "TELEMECANIQUE ELECTRIQUE",
"0080F5": "Quantel Ltd",
"0080F6": "SYNERGY MICROSYSTEMS",
"0080F7": "ZENITH ELECTRONICS",
"0080F8": "MIZAR, INC.",
"0080F9": "HEURIKON CORPORATION",
"0080FA": "RWT GMBH",
"0080FB": "BVM LIMITED",
"0080FC": "AVATAR CORPORATION",
"0080FD": "EXSCEED CORPRATION",
"0080FE": "AZURE TECHNOLOGIES, INC.",
"0080FF": "SOC. DE TELEINFORMATIQUE RTC",
"0086A0": "PRIVATE",
"008865": "Apple",
"008B43": "RFTECH",
"008C10": "Black Box Corp.",
"008C54": "ADB Broadband Italia",
"008CFA": "Inventec Corporation",
"008D4E": "CJSC NII STT",
"008DDA": "Link One Co., Ltd.",
"008EF2": "NETGEAR INC.,",
"009000": "DIAMOND MULTIMEDIA",
"009001": "NISHIMU ELECTRONICS INDUSTRIES CO., LTD.",
"009002": "ALLGON AB",
"009003": "APLIO",
"009004": "3COM EUROPE LTD.",
"009005": "PROTECH SYSTEMS CO., LTD.",
"009006": "HAMAMATSU PHOTONICS K.K.",
"009007": "DOMEX TECHNOLOGY CORP.",
"009008": "HanA Systems Inc.",
"009009": "I Controls, Inc.",
"00900A": "PROTON ELECTRONIC INDUSTRIAL CO., LTD.",
"00900B": "LANNER ELECTRONICS, INC.",
"00900C": "CISCO SYSTEMS, INC.",
"00900D": "Overland Storage Inc.",
"00900E": "HANDLINK TECHNOLOGIES, INC.",
"00900F": "KAWASAKI HEAVY INDUSTRIES, LTD",
"009010": "SIMULATION LABORATORIES, INC.",
"009011": "WAVTrace, Inc.",
"009012": "GLOBESPAN SEMICONDUCTOR, INC.",
"009013": "SAMSAN CORP.",
"009014": "ROTORK INSTRUMENTS, LTD.",
"009015": "CENTIGRAM COMMUNICATIONS CORP.",
"009016": "ZAC",
"009017": "Zypcom, Inc",
"009018": "ITO ELECTRIC INDUSTRY CO, LTD.",
"009019": "HERMES ELECTRONICS CO., LTD.",
"00901A": "UNISPHERE SOLUTIONS",
"00901B": "DIGITAL CONTROLS",
"00901C": "mps Software Gmbh",
"00901D": "PEC (NZ) LTD.",
"00901E": "Selesta Ingegneria S.p.A.",
"00901F": "ADTEC PRODUCTIONS, INC.",
"009020": "PHILIPS ANALYTICAL X-RAY B.V.",
"009021": "CISCO SYSTEMS, INC.",
"009022": "IVEX",
"009023": "ZILOG INC.",
"009024": "PIPELINKS, INC.",
"009025": "BAE Systems Australia (Electronic | |
self.exit)
BindShortcuts(self.txtarea, "Undo", self.undo)
BindShortcuts(self.txtarea,"Redo",self.redo)
BindShortcuts(self.txtarea,"Comment Line",self.CommentBlock)
BindShortcuts(self.txtarea,"Compile",self.Compile)
BindShortcuts(self.txtarea,"LineCol",self.UpdateLineandColumn)
BindShortcuts(self.txtarea,"ShiftLineUp",self.ShiftLineUp)
BindShortcuts(self.txtarea,"ShiftLineDown",self.ShiftLineDown)
BindShortcuts(self.txtarea,"CopyLineUp",self.CopyLineUp)
BindShortcuts(self.txtarea,"CopyLineDown",self.CopyLineDown)
BindShortcuts(self.txtarea,"InsertMultiComment",self.InsertMultiSingleLine)
# BindShortcuts(self.txtarea,"Keywords",self.SyntaxHighlighting) Checking for performance
BindShortcuts(self.txtarea,"Brackets",self.OpenCloseComplete)
BindShortcuts(self.txtarea,"QuotesD",self.SymmetericComplete)
BindShortcuts(self.txtarea,"QuotesS",self.SymmetericComplete)
BindShortcuts(self.txtarea,"SquareB",self.OpenCloseComplete)
BindShortcuts(self.txtarea,"CurlyB",self.OpenCloseComplete)
# BindShortcuts(self.txtarea,"PointyB",self.OpenCloseComplete) These are used in comparision,(can be enabled)
BindShortcuts(self.txtarea,"Change",self.UpdateOnChangeinTextArea)
# BindShortcuts(self.txtarea,"AngleBracketClose",self.CheckCloseCharacter) Not necessary for C/C++
BindShortcuts(self.txtarea,"RoundBracketClose",self.CheckCloseCharacter)
BindShortcuts(self.txtarea,"SquareBracketClose",self.CheckCloseCharacter)
BindShortcuts(self.txtarea,"CurlyBracketClose",self.CheckCloseCharacter)
BindShortcuts(self.txtarea,"EnterConfig",self.ConfigEnter)
BindShortcuts(self.txtarea,"IndentColon",self.AutoIndent)
BindShortcuts(self.txtarea,"Tabs",self.Tab)
#Master/Functions of the editor
BindShortcuts(self.master, "New", self.newfile)
BindShortcuts(self.master, "Open", self.openfile)
BindShortcuts(self.master, "Save", self.savefile)
BindShortcuts(self.master, "SaveAs", self.saveasfile)
BindShortcuts(self.master,"CreateConsole",self.CreateNewConsole)
BindShortcuts(self.master,"Show Line Numbers",self.ToggleLineNumbers)
BindShortcuts(self.master,"Full Screen",self.FullScreen)
BindShortcuts(self.master,"Normal Screen",self.EscapeFullScreen)
BindShortcuts(self.master,"Hide Directory",self.ToggleDirectory)
BindShortcuts(self.master,"Zen Mode",self.ZenMode)
BindShortcuts(self.master,"Logs",self.DisplayLogs)
BindShortcuts(self.master,"Settings",self.Settings)
BindShortcuts(self.master,"Close",self.exit)
def Tab(self,event):
self.txtarea.insert(INSERT,' '*4)
return "break"
def ConfigEnter(self,args):
""" [Remove the tag from single line comments!!] [Some problem in space to tabs]"""
index = self.GetIndexLineColumnTextArea()
line,col = self.GetLineandColumnofTextArea()
# print(self.txtarea.tag_ranges("Token.Comment.Single"))
self.txtarea.tag_remove("Token.Comment.Single","{}.0".format(line),END)
data = self.txtarea.get("{} linestart".format(index),"{} lineend".format(index))
currentIndent = len(data) - len(data.lstrip())
# print(currentIndent)
self.txtarea.insert(INSERT,"\n{}".format(" "*currentIndent))
return "break"
def CheckCloseCharacter(self,args):
""" By mistake, sometimes we press the closing bracket even when autocomplete is on- this is a small patch on it """
if self.OpenCloseGood:
return "break"
def OpenCloseComplete(self,args):
# print(args)
""" [Complete the opening and closing of the symbols that are mirror image(brackets and other)] """
offsetDict={"(":1,"{":2,"[":2,"<":2}
self.OpenCloseGood = 0
index = self.GetIndexLineColumnTextArea()
if not self.txtarea.tag_ranges(SEL):
self.txtarea.edit_separator()
self.txtarea.insert(index,chr(ord(args.char)+offsetDict[args.char])) #Most of open close characters are off by one
self.OpenCloseGood = 1
self.txtarea.mark_set(INSERT,index)
else:
txt = self.txtarea.selection_get()
self.txtarea.edit_separator()
self.txtarea.insert(INSERT,"{}{}{}".format(args.char,txt,chr(ord(args.char)+offsetDict[args.char])))
self.txtarea.edit_separator()
self.txtarea.delete(SEL_FIRST,SEL_LAST)
index = self.GetIndexLineColumnTextArea()
self.txtarea.mark_set(INSERT,"{}-1c".format(index))
return "break"
self.OpenCloseGood = 0
def GetIndexLineColumnTextArea(self):
line,col = self.GetLineandColumnofTextArea()
return "{}.{}".format(line,col)
def SymmetericComplete(self,args):
""" [Complete the opening and closing of the symbols that are similar] """
if not self.txtarea.tag_ranges(SEL):
index = self.GetIndexLineColumnTextArea()
self.txtarea.edit_separator()
self.txtarea.insert(index,args.char)
self.txtarea.mark_set(INSERT,index)
else:
txt = self.txtarea.selection_get()
self.txtarea.edit_separator()
self.txtarea.insert(INSERT,"{}{}{}".format(args.char,txt,args.char))
self.txtarea.edit_separator()
self.txtarea.delete(SEL_FIRST,SEL_LAST)
index = self.GetIndexLineColumnTextArea()
self.txtarea.mark_set(INSERT,"{}-1c".format(index))
return "break"
def CreateNewConsole(self,*args):
if self.filename:
cnl = Console(self.filename)
cnl.NewConsoleLaunch()
else:
messagebox.showerror("No folder opened","Open a folder first")
def ToggleLineNumbers(self,*args):
""" [Show/Hide the line numbers widget] """
if self.IsLineWindowMapped():
self.linenumbers.pack_forget()
else:
if self.IsDirectoryMapped():
self.linenumbers.pack(side = RIGHT,fill = Y,before=self.treeview,pady=(40,0),padx=3)
else:
self.linenumbers.pack(side = RIGHT,fill = Y,after=self.nbook,pady=(40,0),padx=3)
def CommentBlock(self,*args):
""" [Comment a line in the text space.(Control-C)] """
line,col = self.GetLineandColumnofTextArea()
if self.CheckifSelectedText():
self.InsertMultilineComment(SEL_FIRST,SEL_LAST)
else:
self.InsertSingleLineComment(line,col)
def CheckifSelectedText(self):
""" [Check if any text in text area is selected or not] """
#Use tag_ranges(sel.first,sel.last)
try:
self.txtarea.selection_get()
return True
except Exception:
return False
def GetLineandColumnofTextArea(self):
""" [Retuns the index of the current line in the textspace.] """
return self.txtarea.index('insert').split(".")
def InsertSingleLineComment(self,line,col):
""" [Add the language specific(currently C,C++)identifier of the start of a single line comment in a line of textarea.] """
self.txtarea.edit_separator()
string = self.txtarea.get("{}.0".format(line),"{}.{}".format(line,LanguageC.LenSLC))
if self.IsaCommentLine(string.strip()):
self.UncommentLine(line)
else:
self.txtarea.insert("{}.0".format(line),LanguageC.SingleLineCommentsStart)
# ,"{}.0".format(line)
def InsertMultilineComment(self,startIndex:str,endIndex:str):
""" [Add the language specific(currently C,C++)identifier of the start and end of a multi line comment in textarea.] """
self.txtarea.edit_separator()
if self.IsaMultilineComment(self.txtarea.get("{} linestart".format(startIndex),"{} lineend".format(endIndex))):
self.UncommentBlock(startIndex,endIndex)
self.txtarea.tag_delete("Token.Comment.Multiline")
else:
self.txtarea.insert("{} linestart".format(startIndex),LanguageC.MultilineCommentStart)
self.txtarea.insert("{} lineend".format(endIndex),LanguageC.MultilineCommentEnd)
self.TagBindsForTextArea()
def UncommentLine(self,line):
""" [Uncomment the line commented by CommentBlock ] """
self.txtarea.edit_separator()
self.txtarea.delete("{}.0".format(line),"{}.{}".format(line,LanguageC.LenSLC))
self.txtarea.tag_remove("Token.Comment.Single","{}.0".format(line),"{}.0 lineend".format(line))
def IsaMultilineComment(self,string:str):
""" [Check if a block of text is a mutliline comment or not.] """
return string.startswith(LanguageC.MultilineCommentStart) and string.endswith(LanguageC.MultilineCommentEnd)
def UncommentBlock(self,first,last):
""" [Uncomment the previously commented block of program.Edit:Improved version] """
self.txtarea.edit_separator()
self.txtarea.delete("{} linestart".format(first),"{} linestart +{}c".format(first,LanguageC.LenMLCStart))
self.txtarea.delete("{} lineend -{}c".format(last,LanguageC.LenMLCEnd),"{} lineend".format(last))
def InsertMultiSingleLine(self,*args):
index = self.GetIndexLineColumnTextArea()
self.txtarea.edit_separator()
self.txtarea.insert("{} linestart".format(index),LanguageC.MultilineCommentStart)
self.txtarea.insert("{} lineend".format(index),LanguageC.MultilineCommentEnd)
def IsaCommentLine(self,string:str):
""" [Check wheather the string is a comment line or not(used by-Comment/Uncomment).Currently supports C,C++] """
return string.startswith(LanguageC.SingleLineCommentsStart)
def Compile(self,*args):
""" [Comile the code written in the text area(Currently supports C,C++)] """
if self.filename:
self.savefile()
cmplObject = CompilerInterface(self.filename)
Msg = cmplObject.CallCompiler()
self.HighlightErrorLine(Msg)
if not Msg:
self.DisplayErrorMessageinLogs(Msg)
self.UpdateTreeview()
self.TagRemove()
# self.ClearMessagesinLogs()
elif self.IsTextAreaEmpty():
messagebox.showerror(Errors.ErrorsDictErrorHeadings["EmptyTextCompile"],Errors.ErrorsDictMessage["EmptyTextCompile"])
else:
self.savefile()
def CheckMessage(self,Message):
pass
def UpdateTreeview(self):
""" [Update the treeview] """
self.DeleteTreeContents()
self.CallTreeRoot()
def IsTextAreaEmpty(self):
""" [Check if the textarea is empty or not] """
return len(self.txtarea.get("1.0",END))<=1
def DisplayErrorMessageinLogs(self,msg:str):
""" [Display error message in Logs widget in case of compilation error.] """
self.logstext.config(state=NORMAL)
self.logstext.insert(END,"\n{}".format(msg))
self.bell()
self.logstext.config(state=DISABLED,font="bold")
def ClearMessagesinLogs(self):
""" [Clear the error messages in the Logs widget] """
self.logstext.config(state=NORMAL)
self.logstext.delete("2.0",END)
self.logstext.config(state=DISABLED)
def ShiftLineUp(self,*args):
""" [Shift Lines Up(Alt + Uparrow)] """
line,_ = self.GetLineandColumnofTextArea()
curLine = self.txtarea.get("{}.0".format(line),"{}.0 lineend".format(line))
aboveline = self.txtarea.get("{}.0".format(int(line)-1),"{}.0 lineend".format(int(line)-1))
self.txtarea.edit_separator()
self.txtarea.delete("{}.0".format(int(line)-1),"{}.0 lineend".format(int(line)-1))
self.txtarea.insert("{}.0".format(int(line)-1),curLine)
self.txtarea.edit_separator()
self.txtarea.delete("{}.0".format(line),"{}.0 lineend".format(line))
self.txtarea.insert("{}.0".format(line),aboveline)
def ShiftLineDown(self,*args):
""" [Shift Lines Down(Alt + Downarrow)] """
line,_ = self.GetLineandColumnofTextArea()
curLine = self.txtarea.get("{}.0".format(line),"{}.0 lineend".format(line))
belowline = self.txtarea.get("{}.0".format(int(line)+1),"{}.0 lineend".format(int(line)+1))
self.txtarea.delete("{}.0".format(int(line)+1),"{}.0 lineend".format(int(line)+1))
self.txtarea.edit_separator()
self.txtarea.insert("{}.0".format(int(line)+1),curLine)
self.txtarea.delete("{}.0".format(line),"{}.0 lineend".format(line))
self.txtarea.edit_separator()
self.txtarea.insert("{}.0".format(line),belowline)
def CopyLineUp(self,event):
""" [Copy the contents of current line to the line above] """
index = self.GetIndexLineColumnTextArea()
self.CopyLineDown()
self.txtarea.mark_set(INSERT,"{} lineend".format(index))
return "break"
def CopyLineDown(self,*args):
""" [Copy the contents of the current line to the line below] """
line,_ = self.GetLineandColumnofTextArea()
totalLinesbelow = len(self.txtarea.get(self.GetIndexLineColumnTextArea(),END).split("\n"))+1
# print(totalLinesbelow) Debug
self.txtarea.insert(END,"\n")
for i in range(totalLinesbelow,int(line)-1,-1):
currline = self.txtarea.get("{}.0".format(i),"{}.0 lineend".format(i))
self.txtarea.edit_separator()
self.txtarea.delete("{}.0".format(i+1),"{}.0 lineend".format(i+1))
self.txtarea.edit_separator()
self.txtarea.insert("{}.0".format(i+1),currline) #Because the line is deleted
return "break"
def OpenFolder(self):
""" [Binded function of the Button initially placed in treeview.Call the openfile method and destroys itself] """
self.openfile()
self.RemoveButtoninTreeview()
def TraverseDir(self,parent,path):
""" [Populate the treeview after opening a file(Currently nothing happens on selecting them.)] """
for d in os.listdir(path):
fullPath = os.path.join(path,d)
isDir = os.path.isdir(fullPath)
id = self.treeview.insert(parent,"end",text = d,open = False)
if isDir:
self.TraverseDir(id,fullPath)
def DeleteTreeContents(self):
""" [Delete the contents of the treeview.] """
self.treeview.delete(list(self.treeview.get_children()))
def CallTreeRoot(self):
""" [Call the root folder of the treeview and call TraverseDir to populate it] """
if len(self.treeview.get_children())>1:
self.btn.destroy()
if self.filename:
path =os.path.dirname(self.filename)
node = self.treeview.insert("","end",text="Opened Folder", open=True)
self.TraverseDir(node,path)
def IsTreeViewEmpty(self):
""" [Checks if the treeview is empty or not] """
return len(self.treeview.get_children())<1
def RemoveButtoninTreeview(self):
self.btn.destroy()
def TabName(self):
""" [Returns the tab name that is to be displayed on the notebook tab.] """
return os.path.basename(self.filename) if self.filename else "Untitled"
def Refresh(self):
""" [Refresh some functions that might need it(Updating values of some instances).] """
self.TabName()
self.settitle()
self.status.set("Refresh")
def AutoIndent(self,event):
""" [Bounded function for colon(:) for indentation] """
line = self.txtarea.get("insert linestart", "insert lineend")
current_indent = len(line) - len(line.lstrip())
new_indent = current_indent+MasterProperties.TabSize
self.txtarea.insert(INSERT,"{}\n{}".format(event.char," "*new_indent))
return "break"
def SyntaxHighlighting(self,*args):
""" [Syntax highlighting for textarea - called on <<Change>> and runs on different thread(safe)] """
index = self.GetIndexLineColumnTextArea()
self.txtarea.mark_set("range_start", "{} linestart".format(index))
data = self.txtarea.get("{} linestart".format(index), "{} lineend".format(index))
for token, content in lex(data, CLexer()):
self.txtarea.mark_set("range_end", "range_start + {}c".format(len(content)))
if str(token) == "Token.Comment.Single":
self.txtarea.tag_add(str(token), "range_start", "range_start lineend")
self.txtarea.tag_add(str(token), "range_start", "range_end")
self.txtarea.mark_set("range_start", "range_end")
# print(content)
def HighLightOpenedFile(self,*args):
self.txtarea.mark_set("range_start", "1.0")
data = self.txtarea.get("1.0", END)
for token, content in lex(data, CLexer()):
self.txtarea.mark_set("range_end", "range_start + {}c".format(len(content)))
self.txtarea.tag_add(str(token), "range_start", "range_end")
self.txtarea.mark_set("range_start", "range_end")
def HighlightErrorLine(self,Message:str):
try:
emess = self.ExtractErrorMessage(Message)
idxStart = self.txtarea.search(emess,index="1.0",stopindex=END)
if idxStart:
self.txtarea.tag_add("Error",idxStart,idxStart+" lineend")
self.txtarea.tag_configure("Error",foreground="red")
l,_ = idxStart.split(".")
self.linenumbers.tag_add("Glow","{}.0".format(l),"{}.0 lineend".format(l))
self.linenumbers.tag_configure("Glow",foreground="red")
except Exception as e:
print(e)
def ExtractErrorMessage(self,Message):
""" [Find the error message in logs text.] """
lineseq = Message.split("\n")
emess = lineseq[2].strip()
return emess
def TagRemove(self):
""" [Remove the highlight text tag from the textarea] """
self.txtarea.tag_delete("Error")
# self.linenumbers.tag_delete("Glow")
def UpdateLineandColumn(self,*args):
""" [Update the line number and column in StatusBar text.] """
line,column = self.GetLineandColumnofTextArea()
self.labeltext.config(text="Line:{} Col:{}".format(line,column))
def SetLanguageinStatus(self,*args):
""" [Set the language currently working on in the textarea(Currently only supports C/C++)] """
self.extension = (self.filename.split(".")[1]).lower()
if self.extension =="c" or self.extension =="cpp":
self.langtext.config(text="C/C++")
self.complietext.config(text="GNU/GCC",font=5)
def Selected(self,event):
""" [Treeview Select bound function.] """
item = self.treeview.selection()[0]
tabName = self.treeview.item(item,"text")
def Open(self,event):
""" [Treeview Open bounded function] """
item = self.treeview.selection()[0]
# print("Opened",self.treeview.item(item,"open"))
#Add tab adding stuff here !!
tabName = self.treeview.item(item,"text")
newTab = TextArea(self.nbook)
# print(self.nbook.children)
self.nbook.add(newTab,text=tabName)
raise NotImplementedError
def Closed(self,event):
""" [Treeview Closed bound function] """
print("Closed",event)
raise NotImplementedError
def TagBindText(self,*args):
""" [Testing function for selection tag] """
self.txtarea.tag_configure(SEL,foreground="red",underline=True)
class FileDialog:
""" Base class for Dialogs """
title = ""
filetypes = (("All Files", "*.*"), ("Text Files", "*.txt"), ("Python Files", "*.py"),("C Files","*.c"),("CPP Files","*.cpp"))
class OpenFileDialog(FileDialog):
""" [Derived class for opening a file dialog] """
title = "Select File"
class SaveasFileDialog(FileDialog):
""" [Derived class for closing a file dialog] """
title = "Save File as"
defaultExt = ".txt"
defaultFile = "Untitled.txt"
class TreeviewSpecs:
""" [Abstract Class containing specifications of Treeview widget(may derive from abc)] """
Style = "Treeview"
Selectmode="extended"
RootHead = "#0"
Anchor = W
Side=RIGHT
Expand=True
Fill=BOTH
PadX=2
class NBSpecs:
""" [Abstract Class containing specifications of Notebook widget(may derive from abc)] """
| |
possible3d
if observed2d.shape in not_seen2d:
not_seen2d.remove(observed2d.shape)
if observed3d.shape in not_seen3d:
not_seen3d.remove(observed3d.shape)
if not not_seen2d and not not_seen3d:
break
assert not not_seen2d
assert not not_seen3d
def test_decrease_size_by_tuples_of_floats__one_per_side(self):
image2d = self.image2d[0:4, 0:4]
image3d = self.image3d[0:4, 0:4, :]
aug = iaa.Resize({"height": (0.76, 1.0), "width": (0.76, 1.0)})
not_seen2d = set()
not_seen3d = set()
for hsize in sm.xrange(3, 4+1):
for wsize in sm.xrange(3, 4+1):
not_seen2d.add((hsize, wsize))
for hsize in sm.xrange(3, 4+1):
for wsize in sm.xrange(3, 4+1):
not_seen3d.add((hsize, wsize, 3))
possible2d = set(list(not_seen2d))
possible3d = set(list(not_seen3d))
for _ in sm.xrange(100):
observed2d = aug.augment_image(image2d)
observed3d = aug.augment_image(image3d)
assert observed2d.shape in possible2d
assert observed3d.shape in possible3d
if observed2d.shape in not_seen2d:
not_seen2d.remove(observed2d.shape)
if observed3d.shape in not_seen3d:
not_seen3d.remove(observed3d.shape)
if not not_seen2d and not not_seen3d:
break
assert not not_seen2d
assert not not_seen3d
def test_bad_datatype_for_size_leads_to_failure(self):
got_exception = False
try:
aug = iaa.Resize("foo")
_ = aug.augment_image(self.image2d)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_get_parameters(self):
aug = iaa.Resize(size=1, interpolation="nearest")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == "nearest"
def test_dtypes_roughly(self):
# most of the dtype testing is done for imresize_many_images()
# so we focus here on a rough test that merely checks if the dtype
# does not change
# these dtypes should be kept in sync with imresize_many_images()
dtypes = [
"uint8",
"uint16",
"int8",
"int16",
"float16",
"float32",
"float64",
"bool"
]
for dt in dtypes:
for ip in ["nearest", "cubic"]:
aug = iaa.Resize({"height": 10, "width": 20}, interpolation=ip)
for is_list in [False, True]:
with self.subTest(dtype=dt, interpolation=ip,
is_list=is_list):
image = np.full((9, 19, 3), 1, dtype=dt)
images = [image, image]
if not is_list:
images = np.array(images, dtype=dt)
images_aug = aug(images=images)
if is_list:
assert isinstance(images_aug, list)
else:
assert ia.is_np_array(images_aug)
assert len(images_aug) == 2
for image_aug in images_aug:
assert image_aug.dtype.name == dt
assert image_aug.shape == (10, 20, 3)
assert np.all(image_aug >= 1 - 1e-4)
def test_pickleable(self):
aug = iaa.Resize({"height": (10, 30), "width": (10, 30)},
interpolation=["nearest", "linear"],
seed=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(50, 50, 1))
class TestPad(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
base_img = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], dtype=np.uint8)
return base_img[:, :, np.newaxis]
@property
def images(self):
return np.array([self.image])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
return ia.KeypointsOnImage(kps, shape=self.image.shape)
@property
def psoi(self):
polys = [ia.Polygon([(1, 1), (2, 1), (2, 2)])]
return ia.PolygonsOnImage(polys, shape=self.image.shape)
@property
def lsoi(self):
ls = [ia.LineString([(1, 1), (2, 1), (2, 2)])]
return ia.LineStringsOnImage(ls, shape=self.image.shape)
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]
return ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)
@property
def heatmap(self):
heatmaps_arr = np.float32([[0, 0, 0],
[0, 1.0, 0],
[0, 0, 0]])
return ia.HeatmapsOnImage(heatmaps_arr, shape=self.image.shape)
@property
def segmap(self):
segmaps_arr = np.int32([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
return ia.SegmentationMapsOnImage(segmaps_arr, shape=self.image.shape)
def test___init___pad_mode_is_all(self):
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode=ia.ALL,
pad_cval=0,
keep_size=False)
expected = ["constant", "edge", "linear_ramp", "maximum", "mean",
"median", "minimum", "reflect", "symmetric", "wrap"]
assert isinstance(aug.pad_mode, iap.Choice)
assert len(aug.pad_mode.a) == len(expected)
assert np.all([v in aug.pad_mode.a for v in expected])
def test___init___pad_mode_is_list(self):
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode=["constant", "maximum"],
pad_cval=0,
keep_size=False)
expected = ["constant", "maximum"]
assert isinstance(aug.pad_mode, iap.Choice)
assert len(aug.pad_mode.a) == len(expected)
assert np.all([v in aug.pad_mode.a for v in expected])
def test___init___pad_cval_is_list(self):
aug = iaa.Pad(px=(0, 1, 0, 0),
pad_mode="constant",
pad_cval=[50, 100],
keep_size=False)
expected = [50, 100]
assert isinstance(aug.pad_cval, iap.Choice)
assert len(aug.pad_cval.a) == len(expected)
assert np.all([v in aug.pad_cval.a for v in expected])
def test_pad_images_by_1px_each_side_on_its_own(self):
# test pad by 1 pixel on each side
pads = [
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
]
for pad in pads:
with self.subTest(px=pad):
aug = iaa.Pad(px=pad, keep_size=False)
top, right, bottom, left = pad
base_img_padded = np.pad(
self.image,
((top, bottom), (left, right), (0, 0)),
mode="constant",
constant_values=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, np.array([base_img_padded]))
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [base_img_padded])
def _test_pad_cbaoi_by_1px_each_side_on_its_own(self, cbaoi, augf_name):
pads = [
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
]
for pad in pads:
with self.subTest(px=pad):
aug = iaa.Pad(px=pad, keep_size=False)
top, right, bottom, left = pad
image_padded_shape = list(self.image.shape)
image_padded_shape[0] += top + bottom
image_padded_shape[1] += left + right
observed = getattr(aug, augf_name)(cbaoi)
expected = cbaoi.shift(x=left, y=top)
expected.shape = tuple(image_padded_shape)
assert_cbaois_equal(observed, expected)
def test_pad_keypoints_by_1px_each_side_on_its_own(self):
self._test_pad_cbaoi_by_1px_each_side_on_its_own(
self.kpsoi, "augment_keypoints")
def test_pad_polygons_by_1px_each_side_on_its_own(self):
self._test_pad_cbaoi_by_1px_each_side_on_its_own(
self.psoi, "augment_polygons")
def test_pad_line_strings_by_1px_each_side_on_its_own(self):
self._test_pad_cbaoi_by_1px_each_side_on_its_own(
self.lsoi, "augment_line_strings")
def test_pad_bounding_boxes_by_1px_each_side_on_its_own(self):
self._test_pad_cbaoi_by_1px_each_side_on_its_own(
self.bbsoi, "augment_bounding_boxes")
def test_pad_heatmaps_by_1px_each_side_on_its_own(self):
pads = [
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
]
for pad in pads:
with self.subTest(px=pad):
aug = iaa.Pad(px=pad, keep_size=False)
top, right, bottom, left = pad
heatmaps_arr = self.heatmap.get_arr()
heatmaps_arr_padded = np.pad(
heatmaps_arr,
((top, bottom), (left, right)),
mode="constant",
constant_values=0)
heatmaps = [ia.HeatmapsOnImage(
heatmaps_arr, shape=self.image.shape)]
image_padded_shape = list(self.image.shape)
image_padded_shape[0] += top + bottom
image_padded_shape[1] += left + right
observed = aug.augment_heatmaps(heatmaps)[0]
assert observed.shape == tuple(image_padded_shape)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.array_equal(observed.get_arr(), heatmaps_arr_padded)
def test_pad_segmaps_by_1px_each_side_on_its_own(self):
pads = [
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
]
for pad in pads:
with self.subTest(px=pad):
aug = iaa.Pad(px=pad, keep_size=False)
top, right, bottom, left = pad
segmaps_arr = self.segmap.get_arr()
segmaps_arr_padded = np.pad(
segmaps_arr,
((top, bottom), (left, right)),
mode="constant",
constant_values=0)
segmaps = [SegmentationMapsOnImage(
segmaps_arr, shape=self.image.shape)]
image_padded_shape = list(self.image.shape)
image_padded_shape[0] += top + bottom
image_padded_shape[1] += left + right
observed = aug.augment_segmentation_maps(segmaps)[0]
assert observed.shape == tuple(image_padded_shape)
assert np.array_equal(observed.get_arr(), segmaps_arr_padded)
# TODO split up, add similar tests for polygons/LS/BBs
def test_pad_each_side_on_its_own_by_tuple_of_ints(self):
def _to_range_tuple(val):
return val if isinstance(val, tuple) else (val, val)
pads = [
((0, 2), 0, 0, 0),
(0, (0, 2), 0, 0),
(0, 0, (0, 2), 0),
(0, 0, 0, (0, 2)),
]
for pad in pads:
with self.subTest(px=pad):
aug = iaa.Pad(px=pad, keep_size=False)
aug_det = aug.to_deterministic()
top, right, bottom, left = pad
images_padded = []
keypoints_padded = []
top_range = _to_range_tuple(top)
right_range = _to_range_tuple(right)
bottom_range = _to_range_tuple(bottom)
left_range = _to_range_tuple(left)
top_values = sm.xrange(top_range[0], top_range[1]+1)
right_values = sm.xrange(right_range[0], right_range[1]+1)
bottom_values = sm.xrange(bottom_range[0], bottom_range[1]+1)
left_values = sm.xrange(left_range[0], left_range[1]+1)
for top_val in top_values:
for right_val in right_values:
for bottom_val in bottom_values:
for left_val in left_values:
images_padded.append(
np.pad(
self.image,
((top_val, bottom_val),
(left_val, right_val),
(0, 0)),
mode="constant",
constant_values=0
)
)
keypoints_padded.append(
self.kpsoi.shift(x=left_val, y=top_val))
movements = []
movements_det = []
for i in sm.xrange(100):
observed = aug.augment_images(self.images)
matches = [
(1 if np.array_equal(observed,
np.array([base_img_padded]))
else 0)
for base_img_padded
in images_padded
]
movements.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug_det.augment_images(self.images)
matches = [
(1 if np.array_equal(observed,
np.array([base_img_padded]))
else 0)
for base_img_padded
in images_padded
]
movements_det.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug.augment_images([self.image])
assert any([
array_equal_lists(observed, [base_img_padded])
for base_img_padded
in images_padded])
observed = aug.augment_keypoints(self.kpsoi)
assert any([
keypoints_equal(observed, kp)
for kp
in keypoints_padded])
assert len(set(movements)) == 3
assert len(set(movements_det)) == 1
# TODO split up, add similar tests for polygons/LS/BBs
def test_pad_each_side_on_its_own_by_list_of_ints(self):
# test pad by list of exact pixel values
pads = [
([0, 2], 0, 0, 0),
(0, [0, 2], 0, 0),
(0, 0, [0, 2], 0),
(0, 0, 0, [0, 2]),
]
for pad in pads:
top, right, bottom, left = pad
aug = iaa.Pad(px=pad, keep_size=False)
aug_det = aug.to_deterministic()
images_padded = []
keypoints_padded = []
top_range = top if isinstance(top, list) else [top]
right_range = right if isinstance(right, list) else [right]
bottom_range = bottom if isinstance(bottom, list) else [bottom]
left_range = left if isinstance(left, list) else [left]
for top_val in top_range:
for right_val in right_range:
for bottom_val in bottom_range:
for left_val in left_range:
images_padded.append(
np.pad(
self.image,
((top_val, bottom_val),
(left_val, right_val),
(0, 0)),
mode="constant",
constant_values=0
)
)
keypoints_padded.append(
self.kpsoi.shift(x=left_val, y=top_val))
movements = []
movements_det = []
for i in sm.xrange(100):
observed = aug.augment_images(self.images)
matches = [
(1 if np.array_equal(observed,
np.array([base_img_padded]))
else 0)
for base_img_padded
in images_padded]
movements.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug_det.augment_images(self.images)
matches = [
(1 if np.array_equal(observed,
np.array([base_img_padded]))
else 0)
for base_img_padded
in images_padded]
movements_det.append(np.argmax(np.array(matches)))
assert any([val == 1 for | |
import json
import logging
import os
import random
import secrets
import socket
import string
import threading
import time
import urllib.parse
import uuid
from collections import Counter
from collections import defaultdict
from pprint import pprint
import pandas as pd
import requests
from flask import abort
from flask import Flask
from flask import redirect
from flask import request
from flask import session
from flask import url_for
from flask_session import Session
from .checkMedia import checkPayload
from .databaseIntegration import clearCustomerHelperPairing
from .databaseIntegration import createNewCallHistory
from .databaseIntegration import deleteFromDatabase
from .databaseIntegration import fetchData
from .databaseIntegration import fetchHelper
from .databaseIntegration import readActiveCustomer
from .databaseIntegration import readActiveHelper
from .databaseIntegration import readCallHistory
from .databaseIntegration import readNameByNumber
from .databaseIntegration import readNewConnectionInfo
from .databaseIntegration import readZipcodeFromDatabase
from .databaseIntegration import saveCustomerToDatabase
from .databaseIntegration import saveHelperToDatabase
from .databaseIntegration import userExists
from .databaseIntegration import writeActiveCustomer
from .databaseIntegration import writeActiveHelper
from .databaseIntegration import writeCallHistory
from .databaseIntegration import writeCustomerAnalytics
from .databaseIntegration import writeHelperAnalytics
from .schemas import REGISTRATION_SCHEMA
from .schemas import VERIFICATION_SCHEMA
from .text2speech_utils import generateNameSoundByte
from .zipcode_utils import getCity
from .zipcode_utils import getDistanceApart
from .zipcode_utils import getDistrict
from .zipcode_utils import readZipCodeData
app = Flask(__name__, static_folder="../client/build", static_url_path="/")
SESSION_TYPE = "redis"
SECRET_KEY = os.getenv("SECRET_KEY")
app.config.from_object(__name__)
Session(app)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
handler = logging.FileHandler("flask.log")
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
log.addHandler(handler)
startTime = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
log.info(f"New log entry {startTime}")
BASE_URL = os.getenv("BASE_URL")
ELK_NUMBER = os.getenv("ELK_NUMBER")
API_USERNAME = os.getenv("API_USERNAME")
API_PASSWORD = os.getenv("API_PASSWORD")
DATABASE = os.getenv("DATABASE")
DATABASE_KEY = os.getenv("DATABASE_KEY")
HOOK_URL = os.getenv("HOOK_URL")
def checkEnv(envVar, envStr):
if envVar is None:
print(f"Warning! An environmental variable is not set {envStr}")
log.warning(f"Warning! An environmental variable is not set {envStr}")
# Checks if the environmental variables are set
checkEnv(BASE_URL, "BASE_URL")
checkEnv(ELK_NUMBER, "ELK_NUMBER")
checkEnv(API_USERNAME, "API_USERNAME")
checkEnv(API_PASSWORD, "API_PASSWORD")
checkEnv(DATABASE, "DATABASE")
checkEnv(DATABASE_KEY, "DATABASE_KEY")
checkEnv(SECRET_KEY, "SECRET_KEY")
checkEnv(HOOK_URL, "HOOK_URL")
ZIPDATA = "SE.txt"
MEDIA_URL = "https://files.telehelp.se/sv"
ELK_BASE = "https://api.46elks.com"
VERIFICATION_EXPIRY_TIME = 5 * 60 # 5 minutes
LOCATION_DICT, DISTRICT_DICT, CITY_DICT = readZipCodeData(ZIPDATA)
print("Site phone number: " + ELK_NUMBER)
def canonicalize_number(phone_number):
if phone_number[0] == "0":
phone_number = "+46" + phone_number[1:]
return phone_number
@app.route("/")
def index():
return app.send_static_file("index.html")
# ------------------------------ PHONE API ----------------------------------------------------
@app.route("/api/receiveCall", methods=["POST"])
def receiveCall():
callId = request.form.get("callid")
startTime = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
telehelpCallId = str(uuid.uuid1())
createNewCallHistory(DATABASE, DATABASE_KEY, callId)
from_sender = request.form.get("from")
print(from_sender)
# For registered helpers
if userExists(DATABASE, DATABASE_KEY, from_sender, "helper"):
print("Registered helper")
writeHelperAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["telehelp_callid", "elks_callid", "call_start_time"],
(telehelpCallId, callId, startTime),
)
activeCustomer = readActiveCustomer(DATABASE, DATABASE_KEY, from_sender)
print(activeCustomer)
if activeCustomer is None:
payload = {
"ivr": f"{MEDIA_URL}/ivr/hjalper_ingen.mp3",
"skippable": "true",
"digits": 1,
"2": BASE_URL + "/support",
"1": {
"play": MEDIA_URL + "/ivr/avreg_confirmed.mp3",
"next": BASE_URL + "/api/removeHelper/%s" % telehelpCallId,
},
"next": BASE_URL + "/api/receiveCall",
}
else:
payload = {
"ivr": MEDIA_URL + "/ivr/registrerad_volontar.mp3",
"digits": 1,
"1": BASE_URL + "/api/handleReturningHelper/%s" % telehelpCallId,
"2": {
"play": MEDIA_URL + "/ivr/avreg_confirmed.mp3",
"next": BASE_URL + "/api/removeHelper/%s" % telehelpCallId,
},
"3": BASE_URL + "/api/support",
"next": BASE_URL + "/api/receiveCall",
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
# For registered customers
elif userExists(DATABASE, DATABASE_KEY, from_sender, "customer"):
print("Registered customer")
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["telehelp_callid", "elks_callid", "call_start_time", "new_customer"],
(telehelpCallId, callId, startTime, "False"),
)
# Get name of person to suggest call to from DB
helperNumber = readActiveHelper(DATABASE, DATABASE_KEY, from_sender)
name = readNameByNumber(DATABASE, DATABASE_KEY, helperNumber)
if name is None:
payload = {
"ivr": MEDIA_URL + "/ivr/ensam_gamling.mp3",
"digits": 1,
"1": BASE_URL + "/api/handleLonelyCustomer/%s" % telehelpCallId,
"2": BASE_URL + "/api/removeCustomer",
"3": BASE_URL + "/api/support",
"next": BASE_URL + "/api/receiveCall",
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
else:
nameEncoded = urllib.parse.quote(name) # åäö etc not handled well as URL -> crash
# Make sure name already exists (generate if somehow missing, for example early volunteers)
if not os.path.isfile("/media/name/" + nameEncoded + ".mp3"):
generateNameSoundByte(name)
payload = {
"play": MEDIA_URL + "/ivr/behover_hjalp.mp3",
"next": {
"play": MEDIA_URL + "/name/" + nameEncoded + ".mp3",
"next": {
"ivr": MEDIA_URL + "/ivr/pratade_sist.mp3",
"digits": 1,
"1": BASE_URL + "/api/handleReturningCustomer/%s" % telehelpCallId,
"2": BASE_URL + "/api/handleReturningCustomer/%s" % telehelpCallId,
"3": BASE_URL + "/api/handleReturningCustomer/%s" % telehelpCallId,
"4": BASE_URL + "/api/support",
"next": BASE_URL + "/api/receiveCall",
},
},
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
# New customer
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["telehelp_callid", "elks_callid", "call_start_time", "new_customer"],
(telehelpCallId, callId, startTime, "True"),
)
payload = {
"ivr": MEDIA_URL + "/ivr/info.mp3",
"skippable": "true",
"digits": 1,
"1": BASE_URL + "/api/handleNumberInput/%s" % telehelpCallId,
"2": BASE_URL + "/api/receiveCall",
"3": BASE_URL + "/api/support",
"next": BASE_URL + "/api/receiveCall",
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
@app.route("/api/customerHangup/<string:telehelpCallId>", methods=["POST", "GET"])
def customerHangup(telehelpCallId):
print("hangup")
endTime = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
writeCustomerAnalytics(
DATABASE, DATABASE_KEY, telehelpCallId, ["call_end_time"], (endTime, telehelpCallId)
)
return ""
@app.route("/api/helperHangup/<string:telehelpCallId>", methods=["POST", "GET"])
def helperHangup(telehelpCallId):
print("hangup")
endTime = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
writeHelperAnalytics(DATABASE, DATABASE_KEY, telehelpCallId, ["call_end_time"], (endTime, telehelpCallId))
return ""
@app.route("/api/handleReturningHelper/<string:telehelpCallId>", methods=["POST"])
def handleReturningHelper(telehelpCallId):
print(request.form.get("result"))
number = int(request.form.get("result"))
if number == 1:
writeHelperAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["contacted_prev_customer", "deregistered"],
("True", "False", telehelpCallId),
)
payload = {
"play": MEDIA_URL + "/ivr/du_kopplas.mp3",
"next": BASE_URL + "/api/callExistingCustomer/%s" % telehelpCallId,
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
elif number == 2:
payload = {
"play": MEDIA_URL + "/ivr/avreg_confirmed.mp3",
"next": BASE_URL + "/api/removeHelper/%s" % telehelpCallId,
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
@app.route("/api/callExistingCustomer/<string:telehelpCallId>", methods=["POST"])
def callExistingCustomer(telehelpCallId):
helperPhone = request.form.get("from")
customerPhone = readActiveCustomer(DATABASE, DATABASE_KEY, helperPhone)
payload = {
"connect": customerPhone,
"callerid": ELK_NUMBER,
"whenhangup": BASE_URL + "/api/helperHangup/%s" % telehelpCallId,
}
return json.dumps(payload)
@app.route("/api/removeHelper/<string:telehelpCallId>", methods=["POST"])
def removeHelper(telehelpCallId):
from_sender = request.form.get("from")
endTime = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
writeHelperAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["call_end_time", "contacted_prev_customer", "deregistered"],
(endTime, "False", "True", telehelpCallId),
)
deleteFromDatabase(DATABASE, DATABASE_KEY, from_sender, "helper")
return ""
@app.route("/api/handleReturningCustomer/<string:telehelpCallId>", methods=["POST"])
def handleReturningCustomer(telehelpCallId):
print(request.form.get("result"))
number = int(request.form.get("result"))
phone = request.form.get("from")
if number == 1:
payload = {
"play": MEDIA_URL + "/ivr/du_kopplas.mp3",
"skippable": "true",
"next": BASE_URL + "/api/callExistingHelper/%s" % telehelpCallId,
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
if number == 2:
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["used_prev_helper", "deregistered"],
("False", "False", telehelpCallId),
)
zipcode = readZipcodeFromDatabase(DATABASE, DATABASE_KEY, phone, "customer")
payload = {
"play": MEDIA_URL + "/ivr/vi_letar.mp3",
"skippable": "true",
"next": BASE_URL + "/api/postcodeInput/%s/%s" % (zipcode, telehelpCallId),
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
if number == 3:
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["used_prev_helper", "deregistered"],
("False", "True", telehelpCallId),
)
payload = {
"play": MEDIA_URL + "/ivr/avreg_confirmed.mp3",
"next": BASE_URL + "/api/removeCustomer",
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
return ""
@app.route("/api/handleLonelyCustomer/<string:telehelpCallId>", methods=["POST"])
def handleLonelyCustomer(telehelpCallId):
print(request.form.get("result"))
number = int(request.form.get("result"))
phone = request.form.get("from")
if number == 1:
zipcode = readZipcodeFromDatabase(DATABASE, DATABASE_KEY, phone, "customer")
payload = {
"play": MEDIA_URL + "/ivr/vi_letar.mp3",
"skippable": "true",
"next": BASE_URL + "/api/postcodeInput/%s/%s" % (zipcode, telehelpCallId),
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
if number == 2:
writeCustomerAnalytics(
DATABASE, DATABASE_KEY, telehelpCallId, ["deregistered"], ("True", telehelpCallId)
)
payload = {
"play": MEDIA_URL + "/ivr/avreg_confirmed.mp3",
"next": BASE_URL + "/api/removeCustomer",
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
return ""
@app.route("/api/callExistingHelper/<string:telehelpCallId>", methods=["POST"])
def callExistingHelper(telehelpCallId):
customerPhone = request.form.get("from")
helperPhone = readActiveHelper(DATABASE, DATABASE_KEY, customerPhone)
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["used_prev_helper", "deregistered"],
("True", "False", telehelpCallId),
)
payload = {
"connect": helperPhone,
"callerid": ELK_NUMBER,
"whenhangup": BASE_URL + "/api/customerHangup/%s" % telehelpCallId,
}
return json.dumps(payload)
@app.route("/api/postcodeInput/<string:zipcode>/<string:telehelpCallId>", methods=["POST"])
def postcodeInput(zipcode, telehelpCallId):
callId = request.form.get("callid")
phone = request.form.get("from")
# TODO: Add sound if zipcode is invalid (n/a)
district = getDistrict(int(zipcode), DISTRICT_DICT)
timestr = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
saveCustomerToDatabase(DATABASE, DATABASE_KEY, phone, str(zipcode), district, timestr)
print("zipcode: ", zipcode)
closestHelpers = fetchHelper(DATABASE, DATABASE_KEY, district, zipcode, LOCATION_DICT)
# Reads if the customer has a current helper and if so it will delete the current helper from closestHelpers
# since the customer have choosen a new helper.
# closestHelpers
helperPhone = readActiveHelper(DATABASE, DATABASE_KEY, phone)
print(f"Helperphone: {helperPhone}")
print(f"closestHelpers: {closestHelpers}")
if helperPhone is not None:
if closestHelpers is not None and helperPhone in closestHelpers:
closestHelpers.remove(helperPhone)
writeActiveCustomer(DATABASE, DATABASE_KEY, helperPhone, None)
writeCallHistory(DATABASE, DATABASE_KEY, callId, "closest_helpers", json.dumps(closestHelpers))
if closestHelpers is None:
writeCustomerAnalytics(
DATABASE, DATABASE_KEY, telehelpCallId, ["n_helpers_contacted"], ("0", telehelpCallId)
)
payload = {"play": MEDIA_URL + "/ivr/finns_ingen.mp3"}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
else:
writeCallHistory(DATABASE, DATABASE_KEY, callId, "hangup", "False")
payload = {
"play": MEDIA_URL + "/ivr/ringer_tillbaka.mp3",
"skippable": "true",
"next": BASE_URL + "/api/call/0/%s/%s/%s" % (callId, phone, telehelpCallId),
}
checkPayload(payload, MEDIA_URL, log=log)
return json.dumps(payload)
@app.route(
"/api/call/<int:helperIndex>/<string:customerCallId>/<string:customerPhone>/<string:telehelpCallId>",
methods=["POST"],
)
def call(helperIndex, customerCallId, customerPhone, telehelpCallId):
# NOTE: When making changes here, also update /callSupport :)
stopCalling = readCallHistory(DATABASE, DATABASE_KEY, customerCallId, "hangup")
if stopCalling == "True":
endTime = time.strftime("%Y-%m-%d:%H-%M-%S", time.gmtime())
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["call_end_time", "n_helpers_contacted"],
(endTime, str(helperIndex), telehelpCallId),
)
return ""
else:
print("helperIndex:", helperIndex)
print("Customer callId: ", customerCallId)
closestHelpers = json.loads(readCallHistory(DATABASE, DATABASE_KEY, customerCallId, "closest_helpers"))
print("closest helpers: ", closestHelpers)
auth = (API_USERNAME, API_PASSWORD)
if helperIndex >= len(closestHelpers):
writeCallHistory(DATABASE, DATABASE_KEY, customerCallId, "hangup", "True")
writeCustomerAnalytics(
DATABASE,
DATABASE_KEY,
telehelpCallId,
["n_helpers_contacted"],
(str(helperIndex), telehelpCallId),
)
return redirect(
url_for("callBackToCustomer", customerPhone=customerPhone, telehelpCallId=telehelpCallId)
)
print(closestHelpers[helperIndex])
print(ELK_NUMBER)
payload = {
"ivr": MEDIA_URL + "/ivr/hjalte.mp3",
"timeout": "30",
"1": BASE_URL + "/api/connectUsers/%s/%s/%s" % (customerPhone, customerCallId, telehelpCallId),
"2": BASE_URL
+ "/api/call/%s/%s/%s/%s" % (str(helperIndex + 1), customerCallId, customerPhone, telehelpCallId),
"next": BASE_URL
+ "/api/call/%s/%s/%s/%s" % (str(helperIndex + 1), customerCallId, customerPhone, telehelpCallId),
}
checkPayload(payload, MEDIA_URL, log=log)
print("Calling: ", closestHelpers[helperIndex])
fields = {
"from": ELK_NUMBER,
"to": closestHelpers[helperIndex],
"voice_start": json.dumps(payload),
"whenhangup": BASE_URL
+ "/api/call/%s/%s/%s/%s" % (str(helperIndex + 1), customerCallId, customerPhone, telehelpCallId),
}
response = requests.post(ELK_BASE | |
<reponame>paulherman/meson
# Copyright 2013-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import ExternalDependency, DependencyException, sort_libpaths, DependencyTypeName
from ..mesonlib import MachineChoice, OptionKey, OrderedSet, PerMachine, Popen_safe
from ..programs import find_external_program, ExternalProgram
from .. import mlog
from pathlib import PurePath
import re
import os
import shlex
import typing as T
if T.TYPE_CHECKING:
from ..environment import Environment
from .._typing import ImmutableListProtocol
class PkgConfigDependency(ExternalDependency):
# The class's copy of the pkg-config path. Avoids having to search for it
# multiple times in the same Meson invocation.
class_pkgbin: PerMachine[T.Union[None, bool, ExternalProgram]] = PerMachine(None, None)
# We cache all pkg-config subprocess invocations to avoid redundant calls
pkgbin_cache: T.Dict[
T.Tuple[ExternalProgram, T.Tuple[str, ...], T.FrozenSet[T.Tuple[str, str]]],
T.Tuple[int, str, str]
] = {}
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any], language: T.Optional[str] = None) -> None:
super().__init__(DependencyTypeName('pkgconfig'), environment, kwargs, language=language)
self.name = name
self.is_libtool = False
# Store a copy of the pkg-config path on the object itself so it is
# stored in the pickled coredata and recovered.
self.pkgbin: T.Union[None, bool, ExternalProgram] = None
# Only search for pkg-config for each machine the first time and store
# the result in the class definition
if PkgConfigDependency.class_pkgbin[self.for_machine] is False:
mlog.debug(f'Pkg-config binary for {self.for_machine} is cached as not found.')
elif PkgConfigDependency.class_pkgbin[self.for_machine] is not None:
mlog.debug(f'Pkg-config binary for {self.for_machine} is cached.')
else:
assert PkgConfigDependency.class_pkgbin[self.for_machine] is None
mlog.debug(f'Pkg-config binary for {self.for_machine} is not cached.')
for potential_pkgbin in find_external_program(
self.env, self.for_machine, 'pkgconfig', 'Pkg-config',
environment.default_pkgconfig, allow_default_for_cross=False):
version_if_ok = self.check_pkgconfig(potential_pkgbin)
if not version_if_ok:
continue
if not self.silent:
mlog.log('Found pkg-config:', mlog.bold(potential_pkgbin.get_path()),
f'({version_if_ok})')
PkgConfigDependency.class_pkgbin[self.for_machine] = potential_pkgbin
break
else:
if not self.silent:
mlog.log('Found Pkg-config:', mlog.red('NO'))
# Set to False instead of None to signify that we've already
# searched for it and not found it
PkgConfigDependency.class_pkgbin[self.for_machine] = False
self.pkgbin = PkgConfigDependency.class_pkgbin[self.for_machine]
if self.pkgbin is False:
self.pkgbin = None
msg = f'Pkg-config binary for machine {self.for_machine} not found. Giving up.'
if self.required:
raise DependencyException(msg)
else:
mlog.debug(msg)
return
assert isinstance(self.pkgbin, ExternalProgram)
mlog.debug('Determining dependency {!r} with pkg-config executable '
'{!r}'.format(name, self.pkgbin.get_path()))
ret, self.version, _ = self._call_pkgbin(['--modversion', name])
if ret != 0:
return
self.is_found = True
try:
# Fetch cargs to be used while using this dependency
self._set_cargs()
# Fetch the libraries and library paths needed for using this
self._set_libs()
except DependencyException as e:
mlog.debug(f"pkg-config error with '{name}': {e}")
if self.required:
raise
else:
self.compile_args = []
self.link_args = []
self.is_found = False
self.reason = e
def __repr__(self) -> str:
s = '<{0} {1}: {2} {3}>'
return s.format(self.__class__.__name__, self.name, self.is_found,
self.version_reqs)
def _call_pkgbin_real(self, args: T.List[str], env: T.Dict[str, str]) -> T.Tuple[int, str, str]:
assert isinstance(self.pkgbin, ExternalProgram)
cmd = self.pkgbin.get_command() + args
p, out, err = Popen_safe(cmd, env=env)
rc, out, err = p.returncode, out.strip(), err.strip()
call = ' '.join(cmd)
mlog.debug(f"Called `{call}` -> {rc}\n{out}")
return rc, out, err
@staticmethod
def setup_env(env: T.MutableMapping[str, str], environment: 'Environment', for_machine: MachineChoice,
extra_path: T.Optional[str] = None) -> None:
extra_paths: T.List[str] = environment.coredata.options[OptionKey('pkg_config_path', machine=for_machine)].value[:]
if extra_path and extra_path not in extra_paths:
extra_paths.append(extra_path)
sysroot = environment.properties[for_machine].get_sys_root()
if sysroot:
env['PKG_CONFIG_SYSROOT_DIR'] = sysroot
new_pkg_config_path = ':'.join([p for p in extra_paths])
env['PKG_CONFIG_PATH'] = new_pkg_config_path
pkg_config_libdir_prop = environment.properties[for_machine].get_pkg_config_libdir()
if pkg_config_libdir_prop:
new_pkg_config_libdir = ':'.join([p for p in pkg_config_libdir_prop])
env['PKG_CONFIG_LIBDIR'] = new_pkg_config_libdir
# Dump all PKG_CONFIG environment variables
for key, value in env.items():
if key.startswith('PKG_'):
mlog.debug(f'env[{key}]: {value}')
def _call_pkgbin(self, args: T.List[str], env: T.Optional[T.Dict[str, str]] = None) -> T.Tuple[int, str, str]:
# Always copy the environment since we're going to modify it
# with pkg-config variables
if env is None:
env = os.environ.copy()
else:
env = env.copy()
assert isinstance(self.pkgbin, ExternalProgram)
PkgConfigDependency.setup_env(env, self.env, self.for_machine)
fenv = frozenset(env.items())
targs = tuple(args)
cache = PkgConfigDependency.pkgbin_cache
if (self.pkgbin, targs, fenv) not in cache:
cache[(self.pkgbin, targs, fenv)] = self._call_pkgbin_real(args, env)
return cache[(self.pkgbin, targs, fenv)]
def _convert_mingw_paths(self, args: T.List[str]) -> T.List[str]:
'''
Both MSVC and native Python on Windows cannot handle MinGW-esque /c/foo
paths so convert them to C:/foo. We cannot resolve other paths starting
with / like /home/foo so leave them as-is so that the user gets an
error/warning from the compiler/linker.
'''
if not self.env.machines.build.is_windows():
return args
converted = []
for arg in args:
pargs: T.Tuple[str, ...] = tuple()
# Library search path
if arg.startswith('-L/'):
pargs = PurePath(arg[2:]).parts
tmpl = '-L{}:/{}'
elif arg.startswith('-I/'):
pargs = PurePath(arg[2:]).parts
tmpl = '-I{}:/{}'
# Full path to library or .la file
elif arg.startswith('/'):
pargs = PurePath(arg).parts
tmpl = '{}:/{}'
elif arg.startswith(('-L', '-I')) or (len(arg) > 2 and arg[1] == ':'):
# clean out improper '\\ ' as comes from some Windows pkg-config files
arg = arg.replace('\\ ', ' ')
if len(pargs) > 1 and len(pargs[1]) == 1:
arg = tmpl.format(pargs[1], '/'.join(pargs[2:]))
converted.append(arg)
return converted
def _split_args(self, cmd: str) -> T.List[str]:
# pkg-config paths follow Unix conventions, even on Windows; split the
# output using shlex.split rather than mesonlib.split_args
return shlex.split(cmd)
def _set_cargs(self) -> None:
env = None
if self.language == 'fortran':
# gfortran doesn't appear to look in system paths for INCLUDE files,
# so don't allow pkg-config to suppress -I flags for system paths
env = os.environ.copy()
env['PKG_CONFIG_ALLOW_SYSTEM_CFLAGS'] = '1'
ret, out, err = self._call_pkgbin(['--cflags', self.name], env=env)
if ret != 0:
raise DependencyException(f'Could not generate cargs for {self.name}:\n{err}\n')
self.compile_args = self._convert_mingw_paths(self._split_args(out))
def _search_libs(self, out: str, out_raw: str) -> T.Tuple[T.List[str], T.List[str]]:
'''
@out: PKG_CONFIG_ALLOW_SYSTEM_LIBS=1 pkg-config --libs
@out_raw: pkg-config --libs
We always look for the file ourselves instead of depending on the
compiler to find it with -lfoo or foo.lib (if possible) because:
1. We want to be able to select static or shared
2. We need the full path of the library to calculate RPATH values
3. De-dup of libraries is easier when we have absolute paths
Libraries that are provided by the toolchain or are not found by
find_library() will be added with -L -l pairs.
'''
# Library paths should be safe to de-dup
#
# First, figure out what library paths to use. Originally, we were
# doing this as part of the loop, but due to differences in the order
# of -L values between pkg-config and pkgconf, we need to do that as
# a separate step. See:
# https://github.com/mesonbuild/meson/issues/3951
# https://github.com/mesonbuild/meson/issues/4023
#
# Separate system and prefix paths, and ensure that prefix paths are
# always searched first.
prefix_libpaths: OrderedSet[str] = OrderedSet()
# We also store this raw_link_args on the object later
raw_link_args = self._convert_mingw_paths(self._split_args(out_raw))
for arg in raw_link_args:
if arg.startswith('-L') and not arg.startswith(('-L-l', '-L-L')):
path = arg[2:]
if not os.path.isabs(path):
# Resolve the path as a compiler in the build directory would
path = os.path.join(self.env.get_build_dir(), path)
prefix_libpaths.add(path)
# Library paths are not always ordered in a meaningful way
#
# Instead of relying on pkg-config or pkgconf to provide -L flags in a
# specific order, we reorder library paths ourselves, according to th
# order specified in PKG_CONFIG_PATH. See:
# https://github.com/mesonbuild/meson/issues/4271
#
# Only prefix_libpaths are reordered here because there should not be
# too many system_libpaths to cause library version issues.
pkg_config_path: T.List[str] = self.env.coredata.options[OptionKey('pkg_config_path', machine=self.for_machine)].value
pkg_config_path = self._convert_mingw_paths(pkg_config_path)
prefix_libpaths = OrderedSet(sort_libpaths(list(prefix_libpaths), pkg_config_path))
system_libpaths: OrderedSet[str] = OrderedSet()
full_args = self._convert_mingw_paths(self._split_args(out))
for arg in full_args:
if arg.startswith(('-L-l', '-L-L')):
# These are D language arguments, not library paths
continue
if arg.startswith('-L') and arg[2:] not in prefix_libpaths:
system_libpaths.add(arg[2:])
# Use this re-ordered path list for library resolution
libpaths = list(prefix_libpaths) + list(system_libpaths)
# Track -lfoo libraries to avoid duplicate work
libs_found: OrderedSet[str] = OrderedSet()
# Track not-found libraries to know whether to add library paths
libs_notfound = []
# Generate link arguments for this library
link_args = []
for lib in full_args:
if lib.startswith(('-L-l', '-L-L')):
# These | |
<filename>pypadre/pod/importing/dataset/ds_import.py<gh_stars>1-10
# """
# Modul containing import methods from different packages / repositories.
# """
#
# import copy
# import json
# import os.path
# import tempfile
# from multiprocessing import Process
#
# import arff
# import networkx as nx
# import numpy as np
# import openml as oml
# import pandas as pd
# import pypadre.pod.backend.http.protobuffer.proto_organizer as proto
# import requests
# from requests.exceptions import ConnectionError
#
# import pypadre
# from pypadre.core.model.dataset.attribute import Attribute
# from pypadre.core.model.dataset.dataset import Dataset
#
# # def _split_DESCR(s):
# # s = s.strip()
# # k = s.find("\n")
# # return s[0:k], s[k + 1:]
# #
# #
# # def _create_dataset_data(bunch):
# # n_feat = bunch.data.shape[1]
# # if len(bunch.target.shape) == 1:
# # data = np.concatenate([bunch.data[:, :], bunch.target[:, None]], axis=1)
# # else:
# # data = np.concatenate([bunch.data[:, :], bunch.target[:, :]], axis=1)
# # fn = bunch.get("feature_names")
# # atts = []
# # for ix in range(data.shape[1]):
# # if fn is not None and len(fn) > ix:
# # atts.append(Attribute(fn[ix], "Ratio", None, None, n_feat <= ix))
# # else:
# # atts.append(Attribute(str(ix), "Ratio", None, None, n_feat <= ix))
# #
# # return data, atts
# #
# # def _create_dataset(bunch, type,source):
# # meta = dict()
# # meta["id"] = str(uuid.uuid4())
# # meta["name"], meta["description"] = _split_DESCR(bunch["DESCR"])
# # meta["type"] = type
# # meta["originalSource"]=source
# # meta["creator"] = ""
# # meta["version"] = ""
# # meta["context"] = {}
# #
# # dataset = Dataset(meta["id"], **meta)
# # dataset.set_data(lambda: _create_dataset_data(bunch))
# # return dataset
# #
# #
# # @deprecated(reason ="use updated load_csv function")
# # def load_csv_file(path_dataset,path_target=None,target_features=[],originalSource="imported by csv",
# # description="imported form csv",type="multivariate"):
# # """Takes the path of a csv file and a list of the target columns and creates a padre-Dataset.
# #
# # Args:
# # path_dataset (str): The path of the csv-file
# # path_target (list): The column names of the target features of the csv-file.
# #
# # Returns:
# # pypadre.Dataset() A dataset containing the data of the .csv file
# #
# # """
# # assert_condition(condition=os.path.exists(os.path.abspath(path_dataset)), source='ds_import.load_csv',
# # message='Dataset path does not exist')
# #
# # trigger_event('EVENT_WARN', condition=len(target_features)>0, source='ds_import.load_csv',
# # message='No targets defined. Program will crash when used for supervised learning')
# #
# # dataset_path_list = path_dataset.split('/')
# # nameOfDataset = dataset_path_list[-1].split('.csv')[0]
# # data =pd.read_csv(path_dataset)
# #
# # meta =dict()
# # meta["name"]=nameOfDataset
# #
# # meta["description"]=description
# # meta["originalSource"]=originalSource
# # meta["creator"]=""
# # meta["version"]=""
# # meta["type"]=type
# # meta["context"]={}
# #
# # dataset=Dataset(None, **meta)
# # trigger_event('EVENT_WARN', condition=data.applymap(np.isreal).all(1).all() == True, source='ds_import.load_csv',
# # message='Non-numeric data values found. Program may crash if not handled by estimators')
# #
# # targets=None
# # if path_target != None:
# # target = pd.read_csv(path_dataset)
# # data=data.join(target,lsuffix="data",rsuffix="target")
# # targets=list(target.columns.values)
# #
# # else:
# # targets=target_features
# #
# # atts = []
# #
# # for feature in data.columns.values:
# # atts.append(Attribute(feature,None, None, None,feature in targets,None,None))
# #
# # dataset.set_data(data,atts)
# # return dataset
# #
# #
# # def load_csv(csv_path, targets=None, name=None, description="imported form csv", source="csvloaded",
# # type="Multivariat"):
# # """Takes the path of a csv file and a list of the target columns and creates a padre-Dataset.
# #
# # Args:
# # csv_path (str): The path of the csv-file
# # targets (list): The column names of the target features of the csv-file.
# # name(str): Optional name of dataset
# # description(str): Optional description of the dataset
# # source(str): original source - should be url
# # type(str): type of dataset
# #
# # Returns:
# # <class 'pypadre.datasets.Dataset'> A dataset containing the data of the .csv file
# #
# # """
# # assert_condition(condition=os.path.exists(os.path.abspath(csv_path)), source='ds_import.load_csv',
# # message='Dataset path does not exist')
# #
# # if targets is None:
# # targets = []
# # trigger_event('EVENT_WARN', condition=len(targets)>0, source='ds_import.load_csv',
# # message='No targets defined. Program will crash when used for supervised learning')
# #
# # dataset_path_list = csv_path.split('/')
# # if name is None:
# # name = dataset_path_list[-1].split('.csv')[0]
# #
# # data = pd.read_csv(csv_path)
# # meta = dict()
# # meta["id"] = str(uuid.uuid4())
# # meta["name"] = name
# # meta["description"] = description
# # meta["originalSource"]="http://" + source
# # meta["version"] = 1
# # meta["type"] = type
# # meta["published"] = True
# #
# # dataset = Dataset(None, **meta)
# # trigger_event('EVENT_WARN', condition=data.applymap(np.isreal).all(1).all() == True,
# # source='ds_import.load_csv',
# # message='Non-numeric data values found. Program may crash if not handled by estimators')
# #
# # for col_name in targets:
# # data[col_name] = data[col_name].astype('category')
# # data[col_name] = data[col_name].cat.codes
# # atts = []
# # for feature in data.columns.values:
# # atts.append(Attribute(name=feature,
# # measurementLevel="Ratio" if feature in targets else None,
# # defaultTargetAttribute=feature in targets))
# # dataset.set_data(data,atts)
# # return dataset
# #
# #
# # def load_pandas_df(pandas_df,target_features=[]):
# # """
# # Takes a pandas dataframe and a list of the names of target columns and creates a padre-Dataset.
# #
# # Args:
# # pandas_df (str): The pandas dataset.
# # path_target (list): The column names of the target features of the csv-file.
# #
# # Returns:
# # pypadre.Dataset() A dataset containing the data of the .csv file
# #
# # """
# # meta = dict()
# #
# # meta["name"] = "pandas_imported_df"
# # meta["description"]="imported by pandas_df"
# # meta["originalSource"]="https://imported/from/pandas/Dataframe.html"
# # meta["creator"]=""
# # meta["version"]=""
# # meta["context"]={}
# # meta["type"]="multivariate"
# # dataset = Dataset(None, **meta)
# #
# # atts = []
# #
# # if len(target_features) == 0:
# # targets = [0] * len(pandas_df)
# #
# # for feature in pandas_df.columns.values:
# # atts.append(Attribute(name=feature, measurementLevel=None, unit=None, description=None,
# # defaultTargetAttribute=feature in target_features, context=None))
# # dataset.set_data(pandas_df, atts)
# # return dataset
# #
# #
# # def load_numpy_array_multidimensional(features, targets, columns=None, target_features=None):
# # """
# # Takes a multidimensional numpy array and creates a dataset out of it
# # :param features: The input n dimensional numpy array
# # :param targets: The targets corresponding to every feature
# # :param columns: Array of data column names
# # :param target_features: Target features column names
# # :return: A dataset object
# # """
# # meta = dict()
# #
# # meta["name"] = "numpy_imported"
# # meta["description"] = "imported by numpy multidimensional"
# # meta["originalSource"] = ""
# # meta["creator"] = ""
# # meta["version"] = ""
# # meta["context"] = {}
# # meta["type"] = "multivariate"
# # dataset = Dataset(None, **meta)
# # atts = []
# #
# # if len(target_features) == 0:
# # targets = [0] * len(features)
# #
# # for feature in columns:
# # atts.append(Attribute(name=feature, measurementLevel=None, unit=None, description=None,
# # defaultTargetAttribute=feature in target_features, context=None))
# # dataset.set_data_multidimensional(features, targets, atts)
# # return dataset
#
#
# # def load_sklearn_toys():
# # #returns an iterator loading different sklearn datasets
# # loaders = [(ds.load_boston, ("regression", "Multivariat"),"https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html#sklearn.datasets.load_boston"),
# # (ds.load_breast_cancer, ("classification", "Multivariat"),"https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)"),
# # (ds.load_diabetes, ("regression", "Multivariat"),"https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_diabetes.html#sklearn.datasets.load_diabetes"),
# # (ds.load_digits, ("classification", "Multivariat"),"http://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits"),
# # (ds.load_iris, ("classification", "Multivariat"),"https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_iris.html#sklearn.datasets.load_iris"),
# # (ds.load_linnerud, ("mregression", "Multivariat"),"https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_linnerud.html#sklearn.datasets.load_linnerud")]
# #
# # for loader in loaders:
# # yield _create_dataset(loader[0](), loader[1][1],loader[2])
#
# #possible Datatypes of imported open-ml dataset columns
# LEGAL_DATA_TYPES = ['NOMINAL','INTEGER', 'NUMERIC', 'REAL', 'STRING']
#
# DATATYPE_MAP={'INTEGER': np.int64,'NUMERIC':np.float64, 'REAL':np.float64, 'STRING': str}
#
#
#
# def storeDataset(dataset,file_backend):
# if dataset.uid not in file_backend.datasets.list_datasets():
# file_backend._dataset_repository.put_dataset(dataset)
#
# def __check_ds_meta(dataset_meta,check_key_value):
# for key in check_key_value.keys():
# try:
# if dataset_meta[key] != check_key_value[key]:
# return False
# except KeyError:
# return False
# return True
#
#
# def get_did_from_meta(check_dict,auth_token,max_hits=10,url="http://localhost:8080"):
# """Takes a dictionary of dataset attributes to search for. Searches the Server for a Dataset, that matches the
# requirements. Returns the dids of the matches.
#
# Args:
# check_dict (dict): A dictionary, that specifies the required metadata of a dataset.
# auth_token (str): The auth_token for authentication at the server.
# max_hits (int): Amount of maximum results.
# url (str): The url of the server.
#
# Returns:
# pypadre.Dataset() A list of several did, that fulfill the required check_dict.
#
# """
# hed = {'Authorization': auth_token}
#
# morePages=True
# hit_list = []
# page=0
#
# while len(hit_list)<max_hits and morePages:
# url = url+"/api/datasets?page=" + str(page) + "&size=9"
# response = requests.get(url, headers=hed)
# try:
# content = json.loads(response.content,encoding="utf-8")
#
# for dataset_meta in content["_embedded"]["datasets"]:
# if(__check_ds_meta(dataset_meta,check_dict)):
#
# hit_list.append(dataset_meta["uid"])
# except KeyError as err:
# print("invalid name!"+str(err))
#
# page+=1
# if content["page"]["totalPages"]<=page:
# morePages=False
# if len(hit_list)>max_hits:
# hit_list=hit_list[:max_hits]
# return hit_list
#
#
# def getDataset_load_or_cached(did,file_backend,force_download=False,auth_token=None):
# """Fetches the requested Dataset from the Server or (if available) from the local cache. A downloaded dataset gets
# cached. Returns the Dataset as pypadre.Dataset().
#
# Args:
# did (str): id of the requested dataset
# path (str): path of the pypadre directory
# force_download (bool): If set to True, downloads the dataset from the server unconditionally.
# auth_token (str): The Token for identification.
#
# Returns:
# pypadre.Dataset() A dataset containing with the requested data.
#
# """
#
#
# dataset=None
#
# if(force_download or did not in file_backend.datasets.list_datasets(search_metadata="")):
# dataset=requestServerDataset(did,auth_token)
# file_backend.datasets.put_dataset(dataset)
# return dataset
# return file_backend.datasets.get_dataset(did)
#
#
# def sendTop100Datasets_single(auth_token,server_url="http://localhost:8080"):
# """Takes a list of the Top-100 Datasets and downloads all of them from open-ml and uploads them to the Server.
# Those Datasets are not cached locally. The list of Datasets is available under
# /datasets/config/top100datasetIDs.txt and can be customized.
#
# Args:
# auth_token (str): The Token for identification.
#
# | |
import greenaddress as gdk
import json
# To install GDK, download the GDK python wheel from:
# https://github.com/Blockstream/gdk/releases
# The 'cp' number refers to the python version you have.
# To install GDK, pip install the .whl file:
# pip install greenaddress-0.0.36-cp37-cp37m-linux_x86_64.whl
# GDK README and reference documentation:
# https://github.com/Blockstream/gdk
# https://gdk.readthedocs.io/en/latest/
def main():
# Our calls to GDK are wrapped in the gdk_wallet class, which should only be
# created using either create_new_wallet, login_with_mnemonic or
# login_with_pin methods. The example uses the live Liquid network.
# Initialize GDK.
gdk.init({})
# Wallet creation and login using Mnemonic
# ========================================
# To create a wallet with a Managed Assets account, pass a mnemonic
# into the following. You can generate a 24 word mnemonic yourself or
# have GDK generate it for you by leaving mnemonic as None.
# You can choose to create a wallet that's covered by 2FA or not.
# 2FA can be activated or deactivated at any point in time.
"""
wallet = gdk_wallet.create_new_wallet(create_with_2fa_enabled=False, mnemonic=None)
print(f'\nMnemonic: {wallet.mnemonic}')
"""
# To login to an existing wallet you can either use the mnemonic or pin.
# Later we'll see how to use a pin, for now we will use the mnemonic.
mnemonic = 'your twenty four word mnemonic goes here with single spaced words'
if not gdk.validate_mnemonic(mnemonic):
raise Exception("Invalid mnemonic.")
# Login to a GDK wallet session using the mnemonic.
wallet = gdk_wallet.login_with_mnemonic(mnemonic)
# We can now perform calls against the session, such as get balance for
# the logged in Blockstream AMP Managed Assets account.
balance = wallet.get_balance()
print(f'\n{json.dumps(balance, indent=4)}')
# Using a pin to encrypt the mnemonic and login
# =============================================
# You can also login using a pin. Setting the pin for the wallet returns
# encrypted data that is saved to file. When you login with the pin, the
# server will give you the key to decrypt the mnemonic which it uses to
# login. If the pin is entered incorrectly 3 times the server will delete
# the key and you must use the mnemonic to login.
"""
# Before setting the pin, login with the wallet's mnemonic.
wallet = gdk_wallet.login_with_mnemonic(mnemonic)
# Then set the pin for the wallet, this saves encrypted data to file.
# Don't use the example value below, set you own.
pin = 123456
# You only need to set the pin data once.
wallet.set_pin(mnemonic, pin)
# After setting the pin you can then login using pin and do not have to
# enter the mnemonic again. The pin is used to decrypt the local file.
wallet.login_with_pin(pin)
"""
# Two factor authorization
# ========================
# You can add Two Factor Authentication (2FA) to a wallet when you create
# it or enable or disable 2FA at a later date.
# Check the current 2FA status for the wallet.
twofactor_status = wallet.get_current_2fa_status()
print(f'\n{json.dumps(twofactor_status, indent=4)}')
# The example below will enable 2FA on an existing wallet and uses email by
# default, which you can amend if you want.
"""
try:
wallet.twofactor_auth_enabled(False)
except RuntimeError as e:
# Will error if 2FA is already enabled
print(f'\nError: {e}\n')
"""
# Getting notification data from GDK to obtain the last block height
# ==================================================================
# The fetch_block_height example shows how to handle notification events
# from Green by processing the notifications queue.
block_height = wallet.fetch_block_height()
print(f'\nCurrent Liquid block height: {block_height}')
# Getting a new address and understanding pointers
# ================================================
# The new address returned will be confidential, whereas GDK transactions
# will show the unconfidential address. For this reason, use the address
# 'pointer' to identify it in transactions. The pointer plus sub account
# index maps to a derivation path so you can use pointers within each
# sub account to link confidential and unconfidential addresses. Be sure
# to note that you must consider which sub account you are using when
# using the pointer as an identifier like this.
address_info = wallet.get_new_address()
print(f'Address: {address_info["address"]}')
print(f'Address pointer: {address_info["pointer"]}')
# Each call creates a new address/pointer pair for the user.
address_info = wallet.get_new_address()
print(f'Address: {address_info["address"]}')
print(f'Address pointer: {address_info["pointer"]}')
# Getting transaction data from Green using GDK
# =============================================
txs = wallet.get_wallet_transactions()
for tx in txs:
print(f'TRANSACTION ID : {tx["txhash"]}')
print(f'CONFIRMATION STATUS : {tx["confirmation_status"]}')
print(f'BLOCK HEIGHT : {tx["block_height"]}')
print(f'TYPE : {tx["type"]}')
print(f'INPUT COUNT : {len(tx["inputs"])}')
print(f'OUTPUT COUNT : {len(tx["outputs"])}\n')
# Sending assets
# ==============
# Please be aware that AMP issued assets are issued with a precision
# that affects how the number of sats sent are converted to the number
# of units of the asset itself. Please refer to the examples under
# 'precision' on the following page for more details and examples:
# https://docs.blockstream.com/blockstream-amp/api-tutorial.html#issuing-an-asset
# If the asset is registered with the Liquid Assets Registry you can
# check the precision using the following link, or check with the
# asset's issuer:
# https://blockstream.info/liquid/assets
amount_sat = 1
asset_id = 'asset id here'
address = 'destination address here'
txid = wallet.send_to_address(amount_sat, asset_id, address)
if txid:
print(f'\nTransaction sent. Txid: {txid}')
else:
print(f'\nTransaction failed. See error logging.')
class gdk_wallet:
"""Class method to create and return an instance of gdk_wallet"""
@classmethod
def create_new_wallet(cls, create_with_2fa_enabled, mnemonic=None):
self = cls()
# Create a new wallet with a Managed Assets account.
# You can pass in a mnemonic generated outside GDK if you want, or have
# GDK generate it for you by omitting it. 2FA is enabled if chosen and
# can be enabled/disabled at any point.
if not mnemonic:
self.mnemonic = gdk.generate_mnemonic()
# Set the network name to 'liquid' for the live Liquid network.
# There is currently no test Liquid network.
self.session = gdk.Session({'name': 'liquid'})
self.session.register_user({}, self.mnemonic).resolve()
self.session.login({}, self.mnemonic).resolve()
self.session.create_subaccount({'name': self.SUBACCOUNT_NAME, 'type': self.AMP_ACCOUNT_TYPE}).resolve()
if create_with_2fa_enabled:
self.twofactor_auth_enabled(True)
return self
"""Class method to create and return an instance of gdk_wallet"""
@classmethod
def login_with_mnemonic(cls, mnemonic):
self = cls()
self.mnemonic = mnemonic
self.session = gdk.Session({'name': 'liquid'})
self.session.login({}, self.mnemonic).resolve()
self.fetch_subaccount()
return self
"""Class method to create and return an instance of gdk_wallet"""
@classmethod
def login_with_pin(cls, pin):
self = cls()
pin_data = open(self.PIN_DATA_FILENAME).read()
self.session = gdk.Session({'name': 'liquid'})
self.session.login_with_pin(str(pin), pin_data).resolve()
self.fetch_subaccount()
return self
"""Do not use this to instantiate the object, use create_new_wallet or login_with_*"""
def __init__(self):
# 2of2_no_recovery is the account type used by Blockstream AMP.
# Do not change this value!
self.AMP_ACCOUNT_TYPE = '2of2_no_recovery'
# 'Managed Assets' is the same name as Green mobile and desktop use.
# You can change this if you like, but note that account type and
# name are used to retrieve the correct account and should be unique
# per wallet so you can retrieve the right account when you login.
self.SUBACCOUNT_NAME = 'Managed Assets'
# If you use a pin to login, the encrypted data will be saved and read
# from this file:
self.PIN_DATA_FILENAME = 'pin_data.json'
self.mnemonic = None
self.session = None
self.subaccount_pointer = None
self.gaid = None
self.last_block_height = 0
def set_pin(self, mnemonic, pin):
pin_data = gdk.set_pin(self.session.session_obj, mnemonic, str(pin), str('device_id_1'))
open(self.PIN_DATA_FILENAME, 'w').write(pin_data)
return pin_data
def get_balance(self):
return self.session.get_balance({'subaccount': self.subaccount_pointer, 'num_confs': 0}).resolve()
def get_current_2fa_status(self):
return self.session.get_twofactor_config()
def twofactor_auth_enabled(self, enabled):
# We will use email but others are available ('sms', 'phone', 'gauth').
# https://gdk.readthedocs.io/en/latest/gdk-json.html#twofactor-detail
method = 'email'
if enabled:
print('\nRequesting email authentication is enabled for this account')
email = input('\nPlease enter the email address that you will use to authenticate 2FA requests: ')
details = {'confirmed': False, 'enabled': True, 'data': email}
else:
print('\nRequesting email authentication is disabled for this account')
details = {'confirmed': True, 'enabled': False}
# The following is an example of how to handle the GDK authentication
# state machine as it progresses to completion.
self._gdk_resolve(gdk.change_settings_twofactor(self.session.session_obj, method, json.dumps(details)))
def _gdk_resolve(self, auth_handler):
# Processes and handles the state of calls that need authentication.
# The authentication process works as a state machine and may require
# input to progress. This example only uses email as a authentication
# method. If you would | |
er:
print("\nSqrt failed! Error {}".format(er))
print("Test failed")
## ========================================
print("\n8.2. Square root all negative")
tests_cnt+=1
try:
a = sd_var(-16,-4)
c = a.get_sqrt()
print("\tsqrt({}) = [nan, nan]".format(a))
if c.defined():
print("Test failed, must be undeifined, but {}".format(c))
else:
print("Test passed")
passed+=1
except ValueError as er:
print("\nSqrt failed! Error {}".format(er))
print("Test failed")
## ========================================
print("\n8.3. Square root all negative and positive")
tests_cnt+=1
try:
a = sd_var(-16,16)
c = a.get_sqrt()
d = sd_var(0,4)
print("\tsqrt({}) = {}".format(a,c))
if c == d:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nSqrt failed! Error {}".format(er))
print("Test failed")
## ========================================
print("\n8.4. Square root all negative and zero")
tests_cnt+=1
try:
a = sd_var(-16,0)
c = a.get_sqrt()
d = sd_var(0,0)
print("\tsqrt({}) = {}".format(a,c))
if c == d and c.full_defined():
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nSqrt failed! Error {}".format(er))
print("Test failed")
## ========================================
print("\n9.1. Less than int")
tests_cnt+=1
try:
a = sd_var(-16,0)
b = 1
print("\t{} < {} and {} > {}".format(a,b,b,a))
if a < b and b > a:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nSqrt failed! Error {}".format(er))
print("Test failed")
## ========================================
print("\n9.2. Greater or eq than float")
tests_cnt+=1
try:
a = sd_var(3,20)
b = 1.1
print("\t{} >= {} and {} <= {}".format(a,b,b,a))
if a >= b and b <= a:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nSqrt failed! Error {}".format(er))
print("Test failed")
## ========================================
print("\n10.1. Assign intersected")
tests_cnt+=1
try:
a = sd_var(3,20)
b = sd_var(10, 100)
c = a.assign(b)
d = sd_var(10,20)
print("\t {} : {} = {}".format(a,b,c))
if c == d:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nAssign failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n10.2. Assign not intersected")
tests_cnt+=1
try:
a = sd_var(3,20)
b = sd_var(30, 100)
c = a.assign(b)
d = undefined_sd_var()
print("\t {} : {} = {}".format(a,b,c))
if not c.defined():
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nAssign failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n11.1. Power int")
tests_cnt+=1
try:
a = sd_var(2,4)
b = 2
c = a**b
d = sd_var(4,16)
print("\t {} ^ {} = {}".format(a,b,c))
if c == d:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nPower failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n11.2. Power float")
tests_cnt+=1
try:
a = sd_var(4,16)
b = 1/2
c = a**b
d = sd_var(2,4)
print("\t {} ^ {} = {}".format(a,b,c))
if c == d:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nPower failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n12.1. Join intersected")
tests_cnt+=1
try:
a = sd_var(4,16)
b = sd_var(10,20)
c = a.join(b)
d = sd_var(4,20)
print("\t {} ++ {} = {}".format(a,b,c))
if c == d:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nPower failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n12.2. Join not intersected")
tests_cnt+=1
try:
a = sd_var(4,5)
b = sd_var(10,20)
c = a.join(b)
d = sd_var(4,20)
print("\t {} ++ {} = {}".format(a,b,c))
if c == d:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nPower failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n13.1. Contains float")
tests_cnt+=1
try:
a = sd_var(4,5)
c = 4.5
print("\t {} in {}".format(c, a))
if c in a:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nContains failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n13.2. Not contains int")
tests_cnt+=1
try:
a = sd_var(4,5)
c = 6
print("\t not {} in {}".format(c, a))
if not c in a:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nContains failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n13.3. Contains sd_var")
tests_cnt+=1
try:
a = sd_var(4, 10)
c = sd_var(5, 9)
print("\t {} in {}".format(c, a))
if c in a:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nContains failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n13.4. Not contains sd_var")
tests_cnt+=1
try:
a = sd_var(4, 10)
c = sd_var(5, 12)
print("\t {} in {}".format(c, a))
if not c in a:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(d))
except ValueError as er:
print("\nContains failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n14.1. Sinus")
tests_cnt+=1
try:
a = sd_var(0, np.pi/6)
b = a.sin()
c = sd_var(0, 0.5)
print("\t sin({}) = {}".format(a, b))
if c.high - b.high <= 0.01:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(c))
except ValueError as er:
print("\tSin failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n14.2. Arcsinus normal")
tests_cnt+=1
try:
a = sd_var(-0.5, 0.5)
b = a.arcsin()
c = sd_var(-np.pi/6, np.pi/6)
print("\t sin({}) = {}".format(a, b))
#if c.high - b.high <= 0.01 and c.low - b.low <= 0.01:
if c.acc_eq(b, 0.01):
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(c))
except ValueError as er:
print("\tArcsin failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n14.3. Arcsinus not fully normal")
tests_cnt+=1
try:
a = sd_var(-2, 0.5)
b = a.arcsin()
c = sd_var(-np.pi, np.pi/6)
print("\t sin({}) = {}".format(a, b))
if c.high - b.high <= 0.01 and c.low - b.low <= 0.01:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(c))
except ValueError as er:
print("\tArcsin failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n14.4. Arcsinus not normal")
tests_cnt+=1
try:
a = sd_var(-3, 3)
b = a.arcsin()
c = sd_var(-np.pi, np.pi)
print("\t sin({}) = {}".format(a, b))
if c.high - b.high <= 0.01 and c.low - b.low <= 0.01:
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(c))
except ValueError as er:
print("\tArcsin failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n14.5. Arcsinus not normal 2")
tests_cnt+=1
try:
a = sd_var(-3, -2)
b = a.arcsin()
#c = sd_var(-np.pi, np.pi)
print("\t sin({}) = {}".format(a, b))
if not b.defined():
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(undefined_sd_var()))
except ValueError as er:
print("\tArcsin failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n14.6. Tan 1")
tests_cnt+=1
try:
a = sd_var(-1, 1)
b = sd_var(-1.557,1.557)
c = a.tan()
print("\t tan({}) = {}".format(a, c))
if is_close(b,c):
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(b))
except ValueError as er:
print("\tTan failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n14.7. Tan 2")
tests_cnt+=1
try:
a = sd_var(-1, 2)
b = full_undefined_sd_var()
c = a.tan()
print("\t tan({}) = {}".format(a, c))
if is_close(b,c):
print("Test passed")
passed+=1
else:
print("Test failed, must be {}".format(b))
except ValueError as er:
print("\tTan failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n15.1. Arctan2 x > 0 y > 0")
tests_cnt+=1
try:
a = sd_var(1, 2)
b = sd_var(1, 2)
c = atan2(a,b)
print("\t atan2({},{}) = {}".format(a, b, c[0]))
if len(c) == 1:
print("Test passed")
passed+=1
else:
print("Test failed")
except ValueError as er:
print("\tAtan2 failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n15.2. Arctan2 x < 0 y > 0")
tests_cnt+=1
try:
a = sd_var(-2, -1)
b = sd_var(1, 2)
c = atan2(a,b)
print("\t atan2({},{}) = {}".format(a, b, c[0]))
if len(c) == 1:
print("Test passed")
passed+=1
else:
print("Test failed")
except ValueError as er:
print("\tAtan2 failed! Error {}".format(er))
print("Test failed!")
## ========================================
print("\n15.3. Arctan2 x < 0 y < 0")
tests_cnt+=1
try:
a = sd_var(-2, -1)
b = sd_var(-2, -1)
c = atan2(a,b)
print("\t atan2({},{}) = {}".format(a, b, c[0]))
if len(c) == 1:
print("Test passed")
| |
foo1 -> std::str;
CREATE PROPERTY bar1 -> std::str {
SET default := __source__.foo1;
};
};
""")
async def test_edgeql_ddl_15(self):
await self.con.execute(r"""
CREATE TYPE TestSelfLink2 {
CREATE PROPERTY foo2 -> std::str;
CREATE MULTI PROPERTY bar2 -> std::str {
# NOTE: this is a set of all TestSelfLink2.foo2
SET default := TestSelfLink2.foo2;
};
};
INSERT TestSelfLink2 {
foo2 := 'Alice'
};
INSERT TestSelfLink2 {
foo2 := 'Bob'
};
INSERT TestSelfLink2 {
foo2 := 'Carol'
};
""")
await self.assert_query_result(
r"""
SELECT TestSelfLink2 {
foo2,
bar2,
} ORDER BY TestSelfLink2.foo2;
""",
[
{'bar2': {}, 'foo2': 'Alice'},
{'bar2': {'Alice'}, 'foo2': 'Bob'},
{'bar2': {'Alice', 'Bob'}, 'foo2': 'Carol'}
],
)
async def test_edgeql_ddl_16(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
'possibly more than one element'):
await self.con.execute(r"""
CREATE TYPE TestSelfLink3 {
CREATE PROPERTY foo3 -> std::str;
CREATE PROPERTY bar3 -> std::str {
# NOTE: this is a set of all TestSelfLink3.foo3
SET default := TestSelfLink3.foo3;
};
};
""")
async def test_edgeql_ddl_18(self):
await self.con.execute("""
CREATE MODULE foo;
CREATE MODULE bar;
SET MODULE foo;
SET ALIAS b AS MODULE bar;
CREATE SCALAR TYPE foo_t EXTENDING int64 {
CREATE CONSTRAINT expression ON (__subject__ > 0);
};
CREATE SCALAR TYPE b::bar_t EXTENDING int64;
CREATE TYPE Obj {
CREATE PROPERTY foo -> foo_t {
SET default := <foo::foo_t>20;
};
CREATE PROPERTY bar -> b::bar_t;
};
CREATE TYPE b::Obj2 {
CREATE LINK obj -> Obj;
};
""")
await self.assert_query_result(
r"""
WITH MODULE schema
SELECT ScalarType {
name,
constraints: {
name,
subjectexpr,
}
}
FILTER .name LIKE '%bar%' OR .name LIKE '%foo%'
ORDER BY .name;
""",
[
{'name': 'bar::bar_t', 'constraints': []},
{'name': 'foo::foo_t', 'constraints': [
{
'name': 'std::expression',
'subjectexpr': '(__subject__ > 0)',
},
]},
]
)
await self.con.execute("""
ALTER SCALAR TYPE foo::foo_t RENAME TO foo::baz_t;
""")
await self.con.execute("""
ALTER SCALAR TYPE foo::baz_t RENAME TO bar::quux_t;
""")
await self.con.execute("""
DROP TYPE bar::Obj2;
DROP TYPE foo::Obj;
DROP SCALAR TYPE bar::quux_t;
""")
async def test_edgeql_ddl_19(self):
await self.con.execute("""
CREATE TYPE ActualType {
CREATE REQUIRED PROPERTY foo -> str;
};
CREATE ALIAS Alias1 := ActualType {
bar := 9
};
CREATE ALIAS Alias2 := ActualType {
connected := (SELECT Alias1 ORDER BY Alias1.foo)
};
INSERT ActualType {
foo := 'obj1'
};
INSERT ActualType {
foo := 'obj2'
};
""")
await self.assert_query_result(
r"""
SELECT Alias2 {
foo,
connected: {
foo,
bar
}
}
ORDER BY Alias2.foo;
""",
[
{
'foo': 'obj1',
'connected': [{
'foo': 'obj1',
'bar': 9,
}, {
'foo': 'obj2',
'bar': 9,
}],
},
{
'foo': 'obj2',
'connected': [{
'foo': 'obj1',
'bar': 9,
}, {
'foo': 'obj2',
'bar': 9,
}],
}
]
)
async def test_edgeql_ddl_20(self):
await self.con.execute("""
CREATE TYPE A20 {
CREATE REQUIRED PROPERTY foo -> str;
};
CREATE TYPE B20 {
CREATE LINK l -> A20;
};
""")
await self.assert_query_result(
r"""
WITH MODULE schema
SELECT ObjectType {
links: {
name,
bases: {
name
}
} FILTER .name = 'l'
}
FILTER .name = 'default::B20'
""",
[
{
'links': [{
'name': 'l',
'bases': [{
'name': 'std::link',
}],
}],
},
]
)
await self.con.execute("""
CREATE ABSTRACT LINK l20;
ALTER TYPE B20 {
ALTER LINK l EXTENDING l20;
};
""")
await self.assert_query_result(
r"""
WITH MODULE schema
SELECT ObjectType {
links: {
name,
bases: {
name
}
} FILTER .name = 'l'
}
FILTER .name = 'default::B20'
""",
[
{
'links': [{
'name': 'l',
'bases': [{
'name': 'default::l20',
}],
}],
},
]
)
await self.con.execute("""
ALTER TYPE B20 {
ALTER LINK l DROP EXTENDING l20;
};
""")
await self.assert_query_result(
r"""
WITH MODULE schema
SELECT ObjectType {
links: {
name,
bases: {
name
}
} FILTER .name = 'l'
}
FILTER .name = 'default::B20'
""",
[
{
'links': [{
'name': 'l',
'bases': [{
'name': 'std::link',
}],
}],
},
]
)
async def test_edgeql_ddl_23(self):
# Test that an unqualifed reverse link expression
# as an alias pointer target is handled correctly and
# manifests as std::BaseObject.
await self.con.execute("""
CREATE TYPE User;
CREATE TYPE Award {
CREATE LINK user -> User;
};
CREATE ALIAS Alias1 := (SELECT User {
awards := .<user
});
""")
await self.assert_query_result(
r"""
WITH
C := (SELECT schema::ObjectType
FILTER .name = 'default::Alias1')
SELECT
C.pointers { target: { name } }
FILTER
C.pointers.name = 'awards'
""",
[
{
'target': {
'name': 'std::BaseObject'
}
},
],
)
async def test_edgeql_ddl_24(self):
# Test transition of property from inherited to owned.
await self.con.execute("""
CREATE TYPE Desc;
CREATE TYPE Named {
CREATE PROPERTY name -> str;
CREATE LINK desc -> Desc;
};
CREATE TYPE User EXTENDING Named;
""")
await self.assert_query_result(
r"""
WITH
C := (SELECT schema::ObjectType
FILTER .name = 'default::User')
SELECT
C {
pointers: { @owned }
FILTER .name IN {'name', 'desc'}
};
""",
[
{
'pointers': [{
'@owned': False,
}, {
'@owned': False,
}],
},
],
)
await self.con.execute("""
ALTER TYPE User {
ALTER PROPERTY name SET OWNED;
ALTER LINK desc SET OWNED;
};
""")
await self.assert_query_result(
r"""
WITH
C := (SELECT schema::ObjectType
FILTER .name = 'default::User')
SELECT
C {
pointers: { @owned }
FILTER .name IN {'name', 'desc'}
};
""",
[
{
'pointers': [{
'@owned': True,
}, {
'@owned': True,
}],
},
],
)
await self.con.execute("""
ALTER TYPE User {
ALTER PROPERTY name {
SET REQUIRED;
CREATE CONSTRAINT exclusive;
};
ALTER LINK desc {
SET REQUIRED;
CREATE CONSTRAINT exclusive;
};
};
""")
await self.assert_query_result(
r"""
WITH
C := (SELECT schema::ObjectType
FILTER .name = 'default::User')
SELECT
C {
pointers: {
@owned,
required,
constraints: {
name,
}
}
FILTER .name IN {'name', 'desc'}
};
""",
[
{
'pointers': [{
'@owned': True,
'required': True,
'constraints': [{
'name': 'std::exclusive',
}],
}, {
'@owned': True,
'required': True,
'constraints': [{
'name': 'std::exclusive',
}],
}],
},
],
)
# and drop it again
await self.con.execute("""
ALTER TYPE User {
ALTER PROPERTY name DROP OWNED;
ALTER LINK desc DROP OWNED;
};
""")
await self.assert_query_result(
r"""
WITH
C := (SELECT schema::ObjectType
FILTER .name = 'default::User')
SELECT
C {
pointers: {
@owned,
required,
constraints: {
name,
}
}
FILTER .name IN {'name', 'desc'}
};
""",
[
{
'pointers': [{
'@owned': False,
'required': False,
'constraints': [],
}, {
'@owned': False,
'required': False,
'constraints': [],
}],
},
],
)
async def test_edgeql_ddl_25(self):
with self.assertRaisesRegex(
edgedb.InvalidDefinitionError,
"cannot drop owned property 'name'.*not inherited",
):
await self.con.execute("""
CREATE TYPE Named {
CREATE PROPERTY name -> str;
};
ALTER TYPE Named ALTER PROPERTY name DROP OWNED;
""")
async def test_edgeql_ddl_26(self):
await self.con.execute("""
CREATE TYPE Target;
CREATE TYPE Source {
CREATE LINK target -> Source;
};
CREATE TYPE Child EXTENDING Source {
ALTER LINK target {
SET REQUIRED;
CREATE PROPERTY foo -> str;
}
};
CREATE TYPE Grandchild EXTENDING Child {
ALTER LINK target {
ALTER PROPERTY foo {
CREATE CONSTRAINT exclusive;
}
}
};
""")
await self.con.execute("""
ALTER TYPE Child ALTER LINK target DROP OWNED;
""")
await self.assert_query_result(
r"""
WITH
C := (SELECT schema::ObjectType
FILTER .name = 'default::Child')
SELECT
C {
links: {
@owned,
required,
properties: {
name,
} ORDER BY .name
}
FILTER .name = 'target'
};
""",
[
{
'links': [{
'@owned': False,
'required': False,
'properties': [{"name": "source"}, {"name": "target"}],
}],
},
],
)
await self.assert_query_result(
r"""
WITH
C := (SELECT schema::ObjectType
FILTER .name = 'default::Grandchild')
SELECT
C {
links: {
@owned,
required,
properties: {
name,
@owned,
constraints: {
name,
}
} FILTER .name = 'foo'
}
FILTER .name = 'target'
};
""",
[
{
'links': [{
'@owned': True,
'required': True,
'properties': [{
'name': 'foo',
'@owned': True,
'constraints': [{
'name': 'std::exclusive',
}]
}],
}],
},
],
)
async def test_edgeql_ddl_27(self):
await self.con.execute("""
CREATE TYPE Base {
CREATE PROPERTY foo -> str;
};
CREATE TYPE Derived EXTENDING Base {
ALTER PROPERTY foo SET REQUIRED;
};
""")
await self.assert_query_result(
r"""
WITH
C := (SELECT schema::ObjectType
FILTER .name = 'default::Derived')
SELECT
C {
properties: {
@owned,
required,
inherited_fields,
}
FILTER .name = 'foo'
};
""",
[
{
'properties': [{
'@owned': True,
'required': True,
'inherited_fields': {
'cardinality',
'readonly',
'target',
},
}],
},
],
)
await self.con.execute("""
ALTER TYPE Base DROP PROPERTY foo;
""")
await self.assert_query_result(
| |
#!/usr/bin/env python
"""Connect to Darshan logs.
This connector provides an interface into Darshan logs created by Darshan 3.0 or
higher and represents the counters and data contained therein as a Python
dictionary. This dictionary has the following structure, where ``block``
denote literal key names.
* ``header`` which contains key-value pairs corresponding to each line in the
header. ``exe`` and ``metadata`` are lists; the other keys correspond to a
single scalar value.
* ``compression``, ``end_time``, ``end_time_string``, ``exe``, etc
* ``counters``
* `modulename` which is ``posix``, ``lustre``, ``stdio``, etc
* `recordname`, which is usually the full path to a file opened by the
profiled application _or_ ``_perf`` (contains performance summary metrics)
or ``_total`` (contains aggregate file statistics)
* ranknum which is a string (``0``, ``1``, etc or ``-1``)
* `counternames`, which depends on the Darshan module defined by
`modulename` above
* ``mounts`` which is the mount table with keys of a path to a mount location
and values of the file system type
The `counternames` are module-specific and have their module name prefix
stripped off. The following counter names are examples of what a Darshan log
may expose through this connector for the ``posix`` module:
* ``BYTES_READ`` and ``BYTES_WRITTEN`` - number of bytes read/written to the file
* ``MAX_BYTE_WRITTEN`` and ``MAX_BYTE_READ`` - highest byte written/read; useful if an application re-reads or re-writes a lot of data
* ``WRITES`` and ``READS`` - number of write and read ops issued
* ``F_WRITE_TIME`` and ``F_READ_TIME`` - amount of time spent inside write and read calls (in seconds)
* ``F_META_TIME`` - amount of time spent in metadata (i.e., non-read/write) calls
Similarly the ``lustre`` module provides the following counter keys:
* ``MDTS`` - number of MDTs in the underlying file system
* ``OSTS`` - number of OSTs in the underlying file system
* ``OST_ID_0`` - the OBD index for the 0th OST over which the file is striped
* ``STRIPE_OFFSET`` - the setting used to define stripe offset when the file was created
* ``STRIPE_SIZE`` - the size, in bytes, of each stripe
* ``STRIPE_WIDTH`` - how many OSTs the file touches
Note:
This connector presently relies on ``darshan-parser`` to convert the binary
logs to ASCII, then convert the ASCII into Python objects. In the future,
we plan on using the Python API provided by darshan-utils to circumvent the
ASCII translation.
"""
import os
import re
import json
import errno
import subprocess
import warnings
from .common import SubprocessOutputDict
from ..common import isstr
DARSHAN_PARSER_BIN = 'darshan-parser'
DARSHAN_FILENAME_REX = re.compile(r'([^_%s]+)_([^%s]*?)_id(\d+)_(\d+)-(\d+)-(\d+)-(\d+)_(\d+).darshan' % (os.path.sep, os.path.sep))
class Darshan(SubprocessOutputDict):
def __init__(self, log_file=None, *args, **kwargs):
"""Initialize the object from either a Darshan log or a cache file.
Configures the object's internal state to operate on a Darshan
log file or a cached JSON representation of a previously processed
Darshan log.
Args:
log_file (str, optional): Path to a Darshan log to be processed
cache_file (str, optional): Path to a Darshan log's contents cached
*args: Passed to tokio.connectors.common.SubprocessOutputDict
*kwargs: Passed to tokio.connectors.common.SubprocessOutputDict
Attributes:
log_file (str): Path to the Darshan log file to load
"""
super(Darshan, self).__init__(*args, **kwargs)
self.log_file = log_file
self._parser_mode = None
self._only_modules = None
self._only_counters = None
self.subprocess_cmd = [DARSHAN_PARSER_BIN]
self.filename_metadata = {}
if log_file is None:
self.load()
else:
self.filename_metadata = parse_filename_metadata(log_file)
def __repr__(self):
"""Serialize self into JSON.
Returns:
str: JSON representation of the object
"""
return json.dumps(list(self.values()))
def load(self):
if self.from_string is not None:
self.load_str(self.from_string)
elif self.cache_file:
self.load_cache()
elif self.log_file is None:
raise Exception("parameters should be provided (at least log_file or cache_file)")
def darshan_parser_base(self, modules=None, counters=None):
"""Populate data produced by ``darshan-parser --base``
Runs the ``darshan-parser --base`` and convert all results into
key-value pairs which are inserted into the object.
Args:
modules (list of str): If specified, only return data from the given
Darshan modules
counters (list of str): If specified, only return data for the
given counters
Returns:
dict: Dictionary containing all key-value pairs generated by running
``darshan-parser --base``. These values are also accessible via the
`BASE` key in the object.
"""
self._parser_mode = "BASE"
self._only_modules = set(modules) if modules else None
self._only_counters = set(counters) if counters else None
return self._darshan_parser()
def darshan_parser_total(self, modules=None, counters=None):
"""Populate data produced by ``darshan-parser --total``
Runs the ``darshan-parser --total`` and convert all results into
key-value pairs which are inserted into the object.
Args:
modules (list of str): If specified, only return data from the given
Darshan modules
counters (list of str): If specified, only return data for the
given counters
Returns:
dict: Dictionary containing all key-value pairs generated by running
``darshan-parser --total``. These values are also accessible via
the `TOTAL` key in the object.
"""
self._parser_mode = "TOTAL"
self._only_modules = set() if not modules else set(modules)
self._only_counters = set() if not counters else set(counters)
return self._darshan_parser()
def darshan_parser_perf(self, modules=None, counters=None):
"""Populate data produced by ``darshan-parser --perf``
Runs the ``darshan-parser --perf`` and convert all results into
key-value pairs which are inserted into the object.
Args:
modules (list of str): If specified, only return data from the given
Darshan modules
counters (list of str): If specified, only return data for the
given counters
Returns:
dict: Dictionary containing all key-value pairs generated by running
``darshan-parser --perf``. These values are also accessible via the
`PERF` key in the object.
"""
self._parser_mode = "PERF"
self._only_modules = set() if not modules else set(modules)
self._only_counters = set() if not counters else set(counters)
return self._darshan_parser()
def _darshan_parser(self):
"""Call darshan-parser to initialize values in self
"""
if self.log_file is None:
return self
if self._parser_mode in ["BASE", "TOTAL", "PERF"]:
darshan_flag = "--" + self._parser_mode.lower()
else:
self._parser_mode = "BASE"
darshan_flag = ""
args = [darshan_flag, self.log_file]
# this loads the entire stdout into memory at once. is a problem for
# Darshan logs that expand to tens of gigabytes of ascii
#
# self._load_subprocess(*args)
# this loads stdout line-by-line from a pipe and is memory-efficient
# but may suffer from weird buffering effects on some platforms
#
self._load_subprocess_iter(*args)
return self
def _load_subprocess_iter(self, *args):
"""Run a subprocess and pass its stdout to a self-initializing parser
"""
cmd = self.subprocess_cmd
if args:
cmd += args
try:
if self.silent_errors:
dparser = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
else:
dparser = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except OSError as error:
if error.errno == errno.ENOENT:
raise type(error)(error.errno, "%s command not found" % self.subprocess_cmd[0])
raise
# Python 2 - subprocess.check_output returns a string
self.load_str(iter(dparser.stdout.readline, ''))
dparser.stdout.close()
dparser.wait()
retcode = dparser.returncode
if retcode != 0:
warnings.warn("%s returned nonzero exit code (%d)" % (cmd, retcode))
def load_str(self, input_str):
"""Load from either a json cache or the output of darshan-parser
Args:
input_str: Either (1) stdout of the darshan-parser command as a
string, (2) the json-encoded representation of a Darshan
object that can be deserialized to initialize self, or (3)
an iterator that produces the output of darshan-parser
line-by-line
"""
if isstr(input_str):
loaded_data = None
try:
loaded_data = json.loads(input_str)
except ValueError:
pass
if loaded_data:
# if we could successfully load json, store it in self
self.__setitem__(loaded_data)
else:
# otherwise, treat input_str as the raw stdout of darshan-parser
self._parse_darshan_parser(input_str.splitlines())
else:
# treat input_str as an iterator that will produce lines of
# darshan-parser
self._parse_darshan_parser(input_str)
def _parse_darshan_parser(self, lines):
"""Load values from output of darshan-parser
Args:
lines: Any iterable that produces lines of darshan-parser output
"""
def is_valid_counter(counter):
"""
if counter is not None, this line is valid (return True)
if counter is None but we can identify a module section, return it
if counter is None but we cannot identify a module section, return False
"""
if counter is None:
match = module_rex.search(line)
if match is not None:
module_section = match.group(1)
module_section = module_section.replace('-', '') # because of "MPI-IO" and "MPIIO"
module_section = module_section.replace('/', '') # because of "BG/Q" and "BGQ"
return False, module_section
return False, None
return True, None
def insert_record(section, module, file_name, rank, counter, value, counter_prefix=None):
"""
Embed a counter=value pair deep within the darshan_data structure based
on a bunch of nested keys.
"""
# Force the local shadow of 'module' to lowercase
module = module.lower()
# Assert that the counter actually belongs to the current module
if counter_prefix is not None:
if counter.startswith(counter_prefix):
# Strip off the counter_prefix from | |
<reponame>sbenthall/DemARK
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # A Gentle Introduction to HARK: Buffer Stock Saving
#
# This notebook explores the behavior of a consumer identical to the perfect foresight consumer described in [Gentle-Intro-To-HARK-PerfForesightCRRA](https://econ-ark.org/materials/Gentle-Intro-To-HARK-PerfForesightCRRA) except that now the model incorporates income uncertainty.
# %% {"code_folding": []}
# This cell has a bit of initial setup.
# Click the "Run" button immediately above the notebook in order to execute the contents of any cell
# WARNING: Each cell in the notebook relies upon results generated by previous cells
# The most common problem beginners have is to execute a cell before all its predecessors
# If you do this, you can restart the kernel (see the "Kernel" menu above) and start over
import matplotlib.pyplot as plt
import numpy as np
import HARK
from time import clock
from copy import deepcopy
mystr = lambda number : "{:.4f}".format(number)
from HARK.utilities import plotFuncs
# %% [markdown]
# ## The Consumer's Problem with Transitory and Permanent Shocks
# ### Mathematical Description
#
# Our new type of consumer receives two income shocks at the beginning of each period. Permanent income would grow by a factor $\Gamma$ in the absence of any shock , but its growth is modified by a shock, $\psi_{t+1}$:
# \begin{align}
# P_{t+1} & = \Gamma P_{t}\psi_{t+1}
# \end{align}
# whose expected (mean) value is $\mathbb{E}_{t}[\psi_{t+1}]=1$. Actual income received $Y$ is equal to permanent income $P$ multiplied by a transitory shock $\theta$:
# \begin{align}
# Y_{t+1} & = \Gamma P_{t+1}\theta_{t+1}
# \end{align}
# where again $\mathbb{E}_{t}[\theta_{t+1}] = 1$.
#
# As with the perfect foresight problem, this model can be rewritten in terms of _normalized_ variables, e.g. the ratio of 'market resources' $M_{t}$ (wealth plus current income) to permanent income is $m_t \equiv M_t/P_t$. (See [here](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/) for the theory). In addition, lenders may set a limit on borrowing: The ratio $a_{t}$ of end-of-period assets to permanent income $A_t/P_t$ must be greater than $\underline{a} \leq 0$. (So, if $\underline{a}=-0.3$, the consumer cannot borrow more than 30 percent of their permanent income).
#
# The consumer's (normalized) problem turns out to be:
# \begin{eqnarray*}
# v_t(m_t) &=& \max_{c_t} ~~u(c_t) + \beta \mathbb{E} [(\Gamma_{t+1}\psi_{t+1})^{1-\rho} v_{t+1}(m_{t+1}) ], \\
# & \text{s.t.} & \\
# a_t &=& m_t - c_t, \\
# a_t &\geq& \underline{a}, \\
# m_{t+1} &=& a_t R/(\Gamma_{t+1} \psi_{t+1}) + \theta_{t+1}.
# \end{eqnarray*}
#
# %% [markdown]
# For present purposes, we assume that the transitory and permanent shocks are independent. The permanent shock is assumed to be (approximately) lognormal, while the transitory shock has two components: A probability $\wp$ that the consumer is unemployed, in which case $\theta^{u}=\underline{\theta}$, and a probability $(1-\wp)$ of a shock that is a lognormal with a mean chosen so that $\mathbb{E}_{t}[\theta_{t+n}]=1$.
#
#
# ### Representing the Income Shocks
#
# Computers are discrete devices; even if somehow we knew with certainty that the transitory and permanent shocks were, say, continuously lognormally distributed, in order to be represented on a computer those distributions would need to be approximated by a finite set of points. A large literature in numerical computation explores ways to construct such approximations; probably the easiest discretization to understand is the equiprobable approximation, in which the continuous distribution is represented by a set of $N$ outcomes that are equally likely to occur.
#
# In the case of a single variable (say, the permanent shock $\psi$), and when the number of equiprobable points is, say, 5, the procedure is to construct a list: $\psi^{0}$ is the mean value of the continuous $\psi$ given that the draw of $\psi$ is in the bottom 20 percent of the distribution of the continuous $\psi$. $\psi^{1}$ is the mean value of $\psi$ given that the draw is between the 20th and 40th percentiles, and so on. Having constructed these, the approximation to the expectation of some expression $g(\psi)$ can be very quickly calculated by:
#
# $$
# \mathbb{E}_{t}[g(\psi)] \equiv \int_{0}^{\infty} g(\psi) dF_{\psi} \approx (1/N) \sum_{i=0}^{N-1} g(\psi^{i}).
# $$
#
# (For a graphical depiction of a particular instance of this, see [SolvingMicroDSOPs/#discreteApprox](http://www.econ2.jhu.edu/people/ccarroll/SolvingMicroDSOPs/#discreteApprox).)
#
# ## The New Parameters
#
# In addition to the parameters required for the perfect foresight model (like the time preference factor $\beta$), under the assumptions above, we need to choose values for the following extra parameters that describe the income shock distribution and the artificial borrowing constraint.
#
# | Param | Description | Code | Value |
# | :---: | --- | --- | :---: |
# | $\underline{a}$ | Artificial borrowing constraint | $\texttt{BoroCnstArt}$ | 0.0 |
# | $\sigma_\psi$ | Underlying stdev of permanent income shocks | $\texttt{PermShkStd}$ | 0.1 |
# | $\sigma_\theta^{e}$ | Underlying stdev of transitory income shocks | $\texttt{TranShkStd}$ | 0.1 |
# | $N_\psi$ | Number of discrete permanent income shocks | $\texttt{PermShkCount}$ | 7 |
# | $N_\theta$ | Number of discrete transitory income shocks | $\texttt{TranShkCount}$ | 7 |
# | $\wp$ | Unemployment probability | $\texttt{UnempPrb}$ | 0.05 |
# | $\underline{\theta}$ | Transitory shock when unemployed | $\texttt{IncUnemp}$ | 0.3 |
# %% [markdown]
# ## Representation in HARK
#
# HARK agents with this kind of problem are instances of the class $\texttt{IndShockConsumerType}$, which is constructed by "inheriting" the properties of the $\texttt{PerfForesightConsumerType}$ and then adding only the _new_ information required:
# %% {"code_folding": [0, 2]}
# This cell defines a parameter dictionary for making an instance of IndShockConsumerType.
IndShockDictionary = {
'PermShkStd': [0.1], # ... by specifying the new parameters for constructing the income process.
'PermShkCount': 7,
'TranShkStd': [0.1],
'TranShkCount': 7,
'UnempPrb': 0.05,
'IncUnemp': 0.3, # ... and income for unemployed people (30 percent of "permanent" income)
'BoroCnstArt': 0.0, # ... and specifying the location of the borrowing constraint (0 means no borrowing is allowed)
'cycles': 0 # signifies an infinite horizon solution (see below)
}
# %% [markdown]
# ## Other Attributes are Inherited from PerfForesightConsumerType
#
# You can see all the **attributes** of an object in Python by using the `dir()` command. From the output of that command below, you can see that many of the model variables are now attributes of this object, along with many other attributes that are outside the scope of this tutorial.
# %%
from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType
pfc = PerfForesightConsumerType()
dir(pfc)
# %% [markdown]
# In python terminology, `IndShockConsumerType` is a **superclass** of `PerfForesightConsumerType`. This means that it builds on the functionality of its parent type (including, for example, the definition of the utility function). You can find the superclasses of a type in Python using the `__bases__` attribute:
# %%
from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType
IndShockConsumerType.__bases__
# %%
# So, let's create an instance of the IndShockConsumerType
IndShockExample = IndShockConsumerType(**IndShockDictionary)
# %% [markdown]
# As before, we need to import the relevant subclass of $\texttt{AgentType}$ into our workspace, then create an instance by passing the dictionary to the class as if the class were a function.
# %% [markdown]
# ## The Discretized Probability Distribution
#
# The scatterplot below shows how the discretized probability distribution is represented in HARK: The lognormal distribution is represented by a set of equiprobable point masses.
# %%
# Plot values for equiprobable distribution of permanent shocks
plt.scatter(IndShockExample.PermShkDstn[0].X,
IndShockExample.PermShkDstn[0].pmf)
plt.xlabel("Value")
plt.ylabel("Probability Mass")
plt.show()
# %% [markdown]
# This distribution was created, using the parameters in the dictionary above, when the `IndShockConsumerType` object was initialized.
# %% [markdown]
# ## Solution by Backwards Induction
#
# HARK solves this problem using _backwards induction_: It will derive a solution for each period ($t$) by finding a mapping between specific values of market resources $\{m[0],m[1],...\}$ and the corresponding optimal consumption $\{c[0],c[1],...\}$. The function that "connects the dots" will be stored in a variable named `cFunc`.
#
# Backwards induction requires a "terminal" (last; final) period to work backwards from. `IndShockExample` constructed above did not specify a terminal consumption function, and consequently it uses the default terminal function in which all resources are consumed: $c_{T} = m_{t}$.
# %%
IndShockExample.solution_terminal
# %% [markdown]
# The consumption function `cFunc` is defined by _piecewise linear interpolation_.
#
# It is defined by a series of $(m,c)$ points on a grid; the value of the function for any $m$ is the $c$ determined by the line connecting the nearest defined gridpoints.
#
# You | |
0x44, 0x48, 0x28,
0x00, 0x00, 0x60, 0x44, 0x48, 0x00, 0xe0, 0x82,
0x34, 0x48, 0x49, 0x01, 0x22, 0x44, 0x00, 0x40,
0x18, 0x04, 0x18, 0x48, 0x00, 0x40, 0x08, 0x41,
0x28, 0x40, 0x24, 0x12, 0x88, 0x14, 0x48, 0xf2,
0xcf, 0x21, 0x00, 0x40, 0x01, 0x2c, 0x81, 0x14,
0x81, 0x04, 0x40, 0x02, 0x80, 0x04, 0xc0, 0x42,
0x62, 0x48, 0x10, 0x22, 0x02, 0x60, 0x24, 0x20,
0x04, 0x00, 0x24, 0x00, 0x00, 0x00, 0x28, 0x80,
0xa4, 0x24, 0xb0, 0x22, 0x04, 0x82, 0x80, 0x34,
0x82, 0x8a, 0x04, 0xc0, 0x42, 0x28, 0x80, 0x02,
0x00, 0x21, 0xff, 0x39, 0x0b, 0x18, 0x10, 0x64,
0x24, 0x80, 0x01, 0xc0, 0x42, 0x80, 0x04, 0x11,
0x10, 0x01, 0x00, 0x81, 0x21, 0x00, 0x81, 0x00,
0x42, 0x85, 0x22, 0x04, 0x41, 0x48, 0x22, 0x24,
0x49, 0x84, 0x84, 0x04, 0x41, 0x22, 0x26, 0x02,
0x20, 0x21, 0x34, 0x22, 0x20, 0x81, 0x84, 0x02,
0x52, 0x00, 0x81, 0x00, 0x82, 0x81, 0x82, 0x4f,
0xa6, 0x08, 0x00, 0x00, 0x11, 0x21, 0x00, 0x21,
0x24, 0x40, 0x02, 0x20, 0x02, 0x00, 0x00, 0x00,
0x20, 0x04, 0x00, 0x23, 0x02, 0x42, 0x00, 0x80,
0x02, 0x00, 0x40, 0x82, 0x52, 0x22, 0x22, 0x24,
0x00, 0x00, 0x00, 0x00, 0x10, 0x02, 0x00, 0x28,
0x00, 0xef, 0x7d, 0x0a, 0x27, 0x11, 0x42, 0x11,
0x16, 0x88, 0x21, 0x08, 0x00, 0x84, 0x80, 0x04,
0x25, 0x48, 0x34, 0x22, 0x00, 0x00, 0x44, 0x48,
0x42, 0x40, 0x04, 0x42, 0x20, 0x04, 0x80, 0x26,
0x02, 0x00, 0x28, 0x40, 0x08, 0x80, 0x04, 0x40,
0x88, 0x22, 0x82, 0x42, 0x68, 0x46, 0x23, 0x08,
0x20, 0x22, 0xc8, 0x24, 0x00, 0x2c, 0x72, 0x54,
0x04, 0x82, 0x00, 0x00, 0x12, 0x00, 0x24, 0x50,
0x41, 0x10, 0x04, 0x14, 0x60, 0x24, 0x48, 0x80,
0x04, 0x48, 0x20, 0x04, 0x00, 0x00, 0x42, 0x46,
0x22, 0x24, 0x86, 0x24, 0x84, 0x02, 0x22, 0x28,
0x00, 0x00, 0x2c, 0x04, 0x44, 0xe2, 0x00, 0x48,
0x48, 0x00, 0x20, 0x02, 0x29, 0x04, 0xf0, 0x4c,
0x5a, 0x00, 0x00, 0x10, 0x82, 0x64, 0x14, 0x48,
0x11, 0x00, 0x10, 0x81, 0x06, 0x00, 0x84, 0x00,
0x00, 0x21, 0x40, 0x22, 0x04, 0x40, 0x06, 0x00,
0x24, 0x20, 0x92, 0x42, 0x84, 0x29, 0x04, 0x00,
0x00, 0xa0, 0x48, 0x48, 0xc0, 0x42, 0x8a, 0x02,
0x22, 0x40, 0x08, 0x29, 0x04, 0x90, 0x22, 0xf0,
0xbc, 0x35, 0x40, 0xc4, 0x12, 0x12, 0x18, 0x49,
0x04, 0x41, 0x21, 0x10, 0x04, 0x25, 0x24, 0x84,
0x02, 0x81, 0x00, 0x42, 0x24, 0x84, 0x00, 0xc0,
0x48, 0x10, 0x04, 0x81, 0x10, 0x48, 0x14, 0x08,
0x40, 0x0c, 0x20, 0x14, 0x04, 0x41, 0x29, 0x14,
0x84, 0x21, 0x18, 0xa2, 0x82, 0x4a, 0x21, 0x18,
0x08, 0x12, 0x20, 0x41, 0x08, 0x12, 0xf0, 0x7a,
0xc8, 0x00, 0x84, 0x12, 0x10, 0x01, 0x2c, 0x01,
0x14, 0x28, 0x14, 0x00, 0x00, 0x21, 0x00, 0x20,
0x04, 0x40, 0x02, 0x00, 0x00, 0x10, 0x02, 0x00,
0x00, 0x21, 0x26, 0x02, 0x48, 0x00, 0x00, 0x22,
0x20, 0x92, 0x82, 0x41, 0x80, 0x12, 0x02, 0x80,
0x22, 0x04, 0x22, 0x80, 0xf4, 0xd2, 0xa4, 0x50,
0x81, 0x80, 0x04, 0x40, 0x08, 0x84, 0x48, 0x80,
0xc4, 0x48, 0x22, 0x29, 0x02, 0x40, 0x04, 0x44,
0x40, 0x02, 0x27, 0x44, 0x80, 0x01, 0x88, 0x42,
0x42, 0x00, 0xc0, 0x41, 0x22, 0x60, 0x82, 0x42,
0x48, 0x24, 0x60, 0x82, 0x48, 0x84, 0x20, 0x04,
0x84, 0x90, 0x22, 0x20, 0x68, 0x44, 0x82, 0x4c,
0x14, 0x02, 0xd0, 0x42, 0x82, 0xf2, 0x48, 0xd5,
0x24, 0x60, 0xa9, 0x00, 0x27, 0x44, 0x42, 0x2c,
0x21, 0xe4, 0x14, 0x04, 0x24, 0xa4, 0x14, 0x83,
0x14, 0x91, 0x28, 0x90, 0x42, 0x81, 0x2c, 0x44,
0x02, 0x44, 0x42, 0x43, 0x04, 0x43, 0x34, 0x42,
0x84, 0x43, 0xa4, 0x44, 0x60, 0x26, 0xe0, 0x24,
0x04, 0x28, 0x2b, 0x22, 0x4e, 0x22, 0x26, 0x96,
0x22, 0x28, 0x1e, 0x42, 0x2a, 0x84, 0x2c, 0x62,
0xa6, 0x2e, 0x82, 0xa9, 0x4c, 0xb2, 0xca, 0x44,
0x82, 0xb1, 0x48, 0x32, 0x62, 0x22, 0x68, 0x44,
0x8a, 0xf2, 0x6b, 0xbb, 0x14, 0x14, 0x48, 0x80,
0x65, 0x31, 0x12, 0x17, 0x54, 0x25, 0x4a, 0x52,
0x2a, 0x1d, 0x41, 0x48, 0x51, 0x00, 0x21, 0xa0,
0x24, 0x46, 0x06, 0x45, 0x08, 0x8b, 0x44, 0x21,
0x48, 0x48, 0x80, 0xe6, 0x24, 0x14, 0x42, 0xda,
0x42, 0x04, 0x4b, 0x24, 0x28, 0x4a, 0x54, 0x28,
0xa0, 0x42, 0x29, 0xc4, 0x42, 0x46, 0x74, 0x42,
0x48, 0xb2, 0x4a, 0x01, 0x92, 0x46, 0x24, 0xa4,
0x86, 0x88, 0x10, 0x42, 0x84, 0x51, 0x84, 0xa2,
0x83, 0x92, 0x42, 0xaf, 0x17, 0x0d, 0x1f, 0x41,
0x01, 0x5f, 0x11, 0x71, 0x18, 0x92, 0x11, 0x2d,
0x58, 0x80, 0x14, 0xc1, 0x24, 0x43, 0x34, 0x42,
0x66, 0x92, 0x42, 0x47, 0x32, 0xe0, 0x64, 0x14,
0x88, 0x24, 0x14, 0xa2, 0x64, 0x80, 0x44, 0x3a,
0x42, 0x8d, 0x42, 0x10, 0x84, 0x04, 0x6a, 0xc2,
0x68, 0x2b, 0x42, 0xf0, 0x22, 0x24, 0x62, 0x41,
0x81, 0x28, 0x26, 0xc8, 0x42, 0x22, 0x46, 0x24,
0xfa, 0x44, 0xa2, 0x29, 0xae, 0x68, 0x8a, 0x91,
0x64, 0x32, 0x4e, 0x44, 0x8b, 0x28, 0x26, 0x82,
0x4e, 0x02, 0x38, 0x22, 0x9f, 0x93, 0x4e, 0x52,
0x41, 0x2c, 0x51, 0x41, 0x81, 0x1d, 0x44, 0xa1,
0x1d, 0x44, 0xa1, 0x15, 0x14, 0x1a, 0x11, 0x9a,
0x41, 0xa1, 0x41, 0xa1, 0x41, 0xa1, 0x45, 0x18,
0x52, 0x84, 0x42, 0x45, 0x08, 0x45, 0x0a, 0x45,
0x0a, 0x45, 0x4a, 0x44, 0x4a, 0x44, 0x6a, 0x44,
0x26, 0x42, 0x44, 0x42, 0x54, 0x28, 0x4c, 0x14,
0x4a, 0x94, 0x1a, 0x4c, 0x94, 0x1a, 0x8a, 0x94,
0x1a, 0x90, 0x1a, 0x43, 0x98, 0x1a, 0x41, 0x29,
0x59, 0x84, 0x29, 0x59, 0x84, 0x98, 0x45, 0x88,
0xf8, 0xf5, 0x75, 0x14, 0x5f, 0x71, 0x75, 0x17,
0x57, 0xfb, 0xcf, 0xd1, 0xf1, 0x1f, 0x1f, 0xf5,
0xf7, 0x11, 0x1b, 0x75, 0xff, 0x4f, 0x4f, 0x2f,
0x36, 0xf6, 0x43, 0x4b, 0x37, 0xa4, 0xb5, 0x5b,
0x77, 0xf5, 0x5f, 0xee, 0xcf, 0xa4, 0x54, 0x62,
0x2f, 0xc4, 0x54, 0xec, 0x85, 0x58, 0xc8, 0x4e,
0x4c, 0xcf, 0xe4, 0xd4, 0xaa, 0xd4, 0xee, 0x74,
0x46, 0x5e, 0xee, 0xaf, 0xa4, 0xd4, 0xca, 0x74,
0x46, 0xd6, 0xee, 0xf6, 0x2e, 0x2e, 0xa5, 0xdc,
0x66, 0xf4, 0x66, 0x66, 0x67, 0x62, 0xad, 0x2c,
0xe5, 0x7e, 0x4e, 0xfe, 0x4e, 0x26, 0x2f, 0xa6,
0xf2, 0x26, 0x3e, 0xef, 0xe5, 0xf5, 0x16, 0x16,
0x5e, 0x3a, 0x2f, 0xa6, 0xf3, 0xda, 0xda, 0x6f,
0x61, 0xf5, 0x1e, 0x1e, 0xed, 0xde, 0x4f, 0x2f,
0xff, 0xfa, 0x5e, 0x2f, 0xeb, 0xf5, 0x6e, 0x2e,
0xaf, 0x8b, 0xfb, 0xd8, 0x7c, 0x8e, 0x2c, 0xcf,
0xe6, 0xfe, 0xc7, 0xe8, 0x24, 0x3f, 0x11, 0xf5,
0x51, 0x48, 0x17, 0x71, 0x7f, 0x71, 0xf1, 0x59,
0x5b, 0xa7, 0x34, 0x7f, 0xf5, 0x75, 0x47, 0xfc,
0x4f, 0x4f, 0xbf, 0x96, 0xf6, 0x47, 0x4b, 0x3f,
0x94, 0xf4, 0x6f, 0x6f, 0xa7, 0x94, 0x35, 0x53,
0x66, 0xef, 0xa4, 0x54, 0xca, 0x2f, 0xc4, 0x54,
0xec, 0x45, 0x56, 0x42, 0xcc, 0xf4, 0x1c, 0x5e,
0x4d, 0x46, 0x2f, 0x2c, 0x49, 0xda, 0xee, 0x58,
0x64, 0x27, 0x64, 0xed, 0x4e, 0xbf, 0xb6, 0xe2,
0x26, 0x76, 0x42, 0xf7, 0x4e, 0x4e, 0x2f, 0x24,
0xc6, 0x4a, 0x2f, 0x62, 0xf2, 0x26, 0x46, 0xaf,
0xa6, 0xb2, 0x6a, 0xf2, 0x46, 0x4e, 0x6d, 0x1c,
0xaf, 0xa5, 0xd5, 0xaa, 0xf8, 0xe6, 0xaa, 0x2f,
0x86, 0xf7, 0xfe, 0xfe, 0xaf, 0x84, 0xfc, 0xc2,
0x82, 0x6f, 0x68, 0xf1, 0x7e, 0xfa, 0x2f, 0x4e,
0xf6, 0xe2, 0x6e, 0xef, 0xcf, 0xff, 0x96, 0x96,
0x2f, 0x4f, 0x65, 0xca, 0xcf, 0xe7, 0xfd, 0xfe,
0x61, 0x24, 0x3f, 0x15, 0xb1, 0x11, 0xd5, 0x35,
0xc1, 0x14, 0xbf, 0x34, 0x75, 0x42, 0xf9, 0x55,
0x47, 0xb3, 0xf4, 0x43, 0x43, 0xef, 0xc2, 0xf2,
0x43, 0x47, 0xab, 0x44, 0x3f, 0x34, 0x76, 0x4b,
0x59, 0x33, 0x61, 0x6c, 0xd4, 0x8a, 0xf4, 0x4e,
0x42, 0x4e, 0x4a, 0x6f, 0x44, 0x14, 0x92, 0x4c,
0x4e, 0x5a, 0x4f, 0x64, 0xf4, 0x42, 0x82, 0x87,
0x24, 0x2f, 0xa4, 0xf4, 0x44, 0x46, 0x6b, 0x44,
0x8d, 0x4e, 0x27, 0x34, 0x24, 0x63, 0xe2, 0xe6,
0xf6, 0x62, 0x62, 0x2e, 0x62, 0x61, 0x64, 0xaf,
0x24, 0xf4, 0x62, 0x68, 0x6f, 0x66, 0xb2, 0x7a,
0xf2, 0x62, 0x52, 0xaf, 0x86, 0xe8, 0x6c, 0xbc,
0xfa, 0xf8, 0xc2, 0xf2, 0xaf, 0x84, 0xf8, 0xc2,
0x82, 0x6b, 0x81, 0xae, 0xb6, 0xaf, 0x86, 0xfe,
0x2e, 0x62, 0x2f, 0x8f, 0xfb, 0x26, 0x24, 0x2b,
0xe6, 0xcb, 0x31, 0xee, 0x7a, 0x7f, 0x9d, 0x09,
0x5f, 0x55, 0xf1, 0x18, 0x52, 0x7f, 0x31, 0x51,
0xfa, 0xdf, 0x74, 0x75, 0x42, 0xfa, 0x51, 0x43,
0xe7, 0x44, 0x9f, 0xb4, 0xf4, 0x26, 0x25, 0x1f,
0x74, 0xb4, 0x4b, 0xf4, 0x4d, 0x6b, 0x27, 0x14,
0x55, 0x57, 0xae, 0x8f, 0xe4, 0xb4, 0x42, 0x74,
0x4c, 0x72, 0x44, 0xfe, 0x4a, 0x48, 0x21, 0xcb,
0x44, 0x4f, 0xc4, 0xf5, 0x48, 0x4a, 0x2b, 0xc5,
0xc5, 0xf6, 0xc6, 0x4c, 0x8f, 0xa4, 0xb4, 0x46,
0x74, 0x42, 0xd6, 0xde, 0xf4, 0x48, 0x4a, 0x7b,
0x22, 0x2f, 0xe6, 0xf6, 0x46, 0x64, 0x8d, 0x22,
0x63, 0x72, 0x2a, 0xfe, 0x6e, 0x66, 0x2f, 0x86,
0xe6, 0x26, 0xf6, 0x7e, 0x24, | |
<filename>camkes/runner/NameMangling.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017, Data61
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# ABN 41 687 119 230.
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(DATA61_BSD)
#
'''This code manages the name mangling (and reversal of such) that needs to
happen in the templates and follow-on logic in the runner. E.g. based on the
name of a component instance, we need to construct a name of the control TCB.
The logic for performing that translation and (if necessary) reversing it later
is encapsulated here so it can more easily be modified.
Callers should only import and use the Perspective class. When instantiating
one of these, generally as much information as is known should be provided to
give Perspective the opportunity to spot internal inconsistencies. See the
comments in the class itself for further information.'''
from __future__ import absolute_import, division, print_function, \
unicode_literals
from camkes.internal.seven import cmp, filter, map, zip
from camkes.internal.dictutils import get_fields
import abc, re, six
class Deriver(six.with_metaclass(abc.ABCMeta, object)):
'''Logic for constructing one symbol from one or more other symbols. This
class itself is never intended to be directly instantiated and is probably
best understood by looking at its inherited children.'''
@abc.abstractmethod
def inputs(self):
raise NotImplementedError
@abc.abstractmethod
def output(self):
raise NotImplementedError
@abc.abstractmethod
def derive(self, perspective):
raise NotImplementedError
class ForwardDeriver(Deriver):
'''Logic for deriving one symbol from several other symbols by way of
concatenation, interspersed with other static text.'''
def __init__(self, format, out):
self.format = format
self.out = out
def inputs(self):
return get_fields(self.format)
def output(self):
return self.out
def derive(self, perspective):
return self.format % perspective
class BackwardDeriver(Deriver):
'''Logic for deriving one symbol from one other symbol by pulling out a
substring of the input.'''
def __init__(self, regex, input, out):
self.regex = re.compile(regex)
self.input = input
self.out = out
def inputs(self):
return set([self.input])
def output(self):
return self.out
def derive(self, perspective):
m = self.regex.match(perspective[self.input])
if m is None:
return None
return m.group(1)
# The remaining derivers are for specific symbols (or qualities) that are not
# strings. These each need slightly inflected logic.
class ControlDeriver(Deriver):
def __init__(self, regex, input):
self.regex = re.compile(regex)
self.input = input
def inputs(self):
return set([self.input])
def output(self):
return 'control'
def derive(self, perspective):
return self.regex.match(perspective[self.input]) is not None
class PoolDeriver(Deriver):
def __init__(self, regex, input):
self.regex = re.compile(regex)
self.input = input
def inputs(self):
return set([self.input])
def output(self):
return 'pool'
def derive(self, perspective):
return self.regex.match(perspective[self.input]) is not None
class PoolIndexDeriver(Deriver):
def __init__(self, regex, input):
self.regex = re.compile(regex)
self.input = input
def inputs(self):
return set([self.input])
def output(self):
return 'pool_index'
def derive(self, perspective):
m = self.regex.match(perspective[self.input])
if m is None:
return None
return int(m.group(1))
class FromControlDeriver(ForwardDeriver):
def derive(self, perspective):
if not perspective.get('control', False):
return None
return self.format % perspective
class DMAFrameIndexDeriver(Deriver):
def __init__(self, regex, input):
self.regex = re.compile(regex)
self.input = input
def inputs(self):
return set([self.input])
def output(self):
return 'dma_frame_index'
def derive(self, perspective):
m = self.regex.match(perspective[self.input])
if m is None:
return None
return int(m.group(1))
class PerThreadDeriver(Deriver):
def __init__(self, name):
self.name = name
def inputs(self):
return set(('instance', 'interface', 'intra_index'))
def output(self):
return self.name
def derive(self, perspective):
return '%s_%d_%s_%d_%s_%s' % (
perspective['instance'],
len(perspective['instance']),
perspective['interface'],
len(perspective['interface']),
perspective['intra_index'],
self.name)
class FromControlPerThreadDeriver(Deriver):
def __init__(self, name):
self.name = name
def inputs(self):
return set(('instance',))
def output(self):
return self.name
def derive(self, perspective):
if not perspective.get('control', False):
return None
return '%s_%d_0_control_%d_%s' % (
perspective['instance'],
len(perspective['instance']),
len('0_control'),
self.name)
class PerThreadInstanceDeriver(Deriver):
def __init__(self, name):
self.name = name
self.outer = re.compile(r'(?P<remainder>.*)_(?P<len>\d+)_\d{4}_(?P<type>.*)$')
self.inner = re.compile(r'(?P<instance>.*)_(?P<len>\d+)_$')
def inputs(self):
return set((self.name,))
def output(self):
return 'instance'
def derive(self, perspective):
m = self.outer.match(perspective[self.name])
if m is None:
return None
l = int(m.group('len'))
assert len(m.group('remainder')) >= l, 'unexpected fault in undoing ' \
'%s name mangling (name mangling and inverse mismatched?)' % self.name
assert m.group('type') == self.name, 'unexpected type suffix deriving instance ' \
'from %s (expected %s, got %s)' % (perspective[self.name], self.name, m.group('type'))
remainder = m.group('remainder')[:-l]
m = self.inner.match(remainder)
if m is None:
return None
l = int(m.group('len'))
assert len(m.group('instance')) == l, 'unexpected fault in undoing ' \
'%s name mangling (name mangling and inverse mismatched?)' % self.name
return m.group('instance')
class FromControlPerThreadInstanceDeriver(Deriver):
def __init__(self, name):
self.name = name
self.regex = re.compile(r'(?P<instance>.*)_(?P<instance_len>\d+)'
r'_0_control_(?P<control_len>\d+)_(?P<type>.*)$')
def inputs(self):
return set((self.name,))
def output(self):
return 'instance'
def derive(self, perspective):
m = self.regex.match(perspective[self.name])
if m is None:
return None
assert m.group('type') == self.name, 'unexpected type suffix deriving instance ' \
'from %s (expected %s, got %s)' % (perspective[self.name], self.name, m.group('type'))
control_len = int(m.group('control_len'))
if control_len != len('0_control'):
return None
instance_len = int(m.group('instance_len'))
if instance_len != len(m.group('instance')):
return None
return m.group('instance')
class PerThreadInterfaceDeriver(Deriver):
def __init__(self, name):
self.name = name
self.prefix = re.compile(r'(?P<interface>.*)_(?P<len>\d+)_\d{4}_(?P<type>.*)$')
def inputs(self):
return set((self.name,))
def output(self):
return 'interface'
def derive(self, perspective):
m = self.prefix.match(perspective[self.name])
if m is None:
return None
l = int(m.group('len'))
assert len(m.group('interface')) >= l, 'unexpected fault in undoing ' \
'%s name mangling (name mangling and inverse mismatched?)' % self.name
assert m.group('type') == self.name, 'unexpected type suffix deriving interface ' \
'from %s (expected %s, got %s)' % (perspective[self.name], self.name, m.group('type'))
return m.group('interface')[-l:]
class PerThreadIntraindexDeriver(Deriver):
def __init__(self, name):
self.name = name
self.regex = re.compile(r'.*_(?P<intra_index>\d{4})_(?P<type>.*)$')
def inputs(self):
return set((self.name,))
def output(self):
return 'intra_index'
def derive(self, perspective):
m = self.regex.match(perspective[self.name])
if m is None:
return None
assert m.group('type') == self.name, 'unexpected type suffix deriving intra index ' \
'from %s (expected %s, got %s)' % (perspective[self.name], self.name, m.group('type'))
return m.group('intra_index')
class ToControlPerThreadDeriver(Deriver):
def __init__(self, name):
self.name = name
self.regex = re.compile(r'.*_0_control_(?P<len>\d+)_(?P<type>.*)$')
def inputs(self):
return set((self.name,))
def output(self):
return 'control'
def derive(self, perspective):
m = self.regex.match(perspective[self.name])
if m is None:
return False
assert m.group('type') == self.name, 'unexpected type suffix deriving control ' \
'from %s (expected %s, got %s)' % (perspective[self.name], self.name, m.group('type'))
return int(m.group('len')) == len('0_control')
# Phases.
RUNNER, TEMPLATES, FILTERS = list(range(3))
# Instantiate the derivers to describe how name mangling happens in CAmkES. If
# you want to modify the name mangling scheme, this is the place to do it.
DERIVATIONS = {
RUNNER:[
ForwardDeriver('%(group)s_group_bin_pd', 'pd'),
ForwardDeriver('%(elf_name)s_pd', 'pd'),
BackwardDeriver(r'(.+)_pd$', 'pd', 'elf_name'),
BackwardDeriver(r'(.+)_group_bin_pd$', 'pd', 'group'),
ForwardDeriver('%(group)s_cnode', 'cnode'),
BackwardDeriver(r'(.+)_cnode$', 'cnode', 'group'),
], TEMPLATES:[
ForwardDeriver('dma_frame_%(dma_frame_index)04d', 'dma_frame_symbol'),
DMAFrameIndexDeriver(r'dma_frame_([0-9]+)$', 'dma_frame_symbol'),
ForwardDeriver('_camkes_ipc_buffer_%(safe_instance)s_%(interface)s_%(intra_index)04d', 'ipc_buffer_symbol'),
FromControlDeriver('_camkes_ipc_buffer_%(safe_instance)s_0_control', 'ipc_buffer_symbol'),
ControlDeriver(r'_camkes_ipc_buffer_.+_0_control$', 'ipc_buffer_symbol'),
ForwardDeriver('_camkes_stack_%(safe_instance)s_%(interface)s_%(intra_index)04d', 'stack_symbol'),
FromControlDeriver('_camkes_stack_%(safe_instance)s_0_control', 'stack_symbol'),
ControlDeriver(r'_camkes_stack_.+_0_control$', 'stack_symbol'),
ForwardDeriver('%(to_interface)s_cached', 'hardware_cached'),
BackwardDeriver(r'^(.+)_cached', 'hardware_cached', 'to_interface'),
ForwardDeriver('%(group)s_group_bin', 'elf_name'),
BackwardDeriver(r'(.+)_group_bin', 'elf_name', 'group'),
ForwardDeriver('camkes_dma_pool', 'dma_pool_symbol'),
BackwardDeriver(r'.*?\.?([a-zA-Z_]\w*)$', 'instance', 'safe_instance'),
ControlDeriver(r'_passive$', 'passive_attribute'),
FromControlDeriver('_passive', 'passive_attribute'),
ForwardDeriver('%(interface)s_passive', 'passive_attribute'),
BackwardDeriver(r'([^_].*)_passive$', 'passive_attribute', 'interface'),
], FILTERS:[
PerThreadDeriver('tcb'),
FromControlPerThreadDeriver('tcb'),
PerThreadInstanceDeriver('tcb'),
FromControlPerThreadInstanceDeriver('tcb'),
PerThreadInterfaceDeriver('tcb'),
PerThreadIntraindexDeriver('tcb'),
ToControlPerThreadDeriver('tcb'),
ForwardDeriver('_camkes_ipc_buffer_%(safe_instance)s_%(interface)s_%(intra_index)s', 'ipc_buffer_symbol'),
FromControlDeriver('_camkes_ipc_buffer_%(safe_instance)s_0_control', 'ipc_buffer_symbol'),
ControlDeriver(r'_camkes_ipc_buffer_.+_0_control$', 'ipc_buffer_symbol'),
ForwardDeriver('_camkes_stack_%(safe_instance)s_%(interface)s_%(intra_index)s', 'stack_symbol'),
FromControlDeriver('_camkes_stack_%(safe_instance)s_0_control', 'stack_symbol'),
ControlDeriver(r'_camkes_stack_.+_0_control$', 'stack_symbol'),
ForwardDeriver(r'camkes %(instance)s _camkes_start', 'entry_symbol'),
BackwardDeriver(r'camkes ([a-zA-Z_]\w*) _camkes_start$', 'entry_symbol', 'instance'),
ForwardDeriver('%(group)s_group_bin', 'elf_name'),
BackwardDeriver(r'(.+)_group_bin', 'elf_name', 'group'),
PoolDeriver(r'.+_tcb_pool_[0-9]+$', 'tcb'),
PoolIndexDeriver(r'.+_tcb_pool_([0-9]+)$', 'tcb'),
ForwardDeriver('%(group)s_group_bin_pd', 'pd'),
ForwardDeriver('%(elf_name)s_pd', 'pd'),
BackwardDeriver(r'(.+)_pd$', 'pd', 'elf_name'),
BackwardDeriver(r'(.+)_group_bin_pd$', 'pd', 'group'),
ForwardDeriver('%(to_interface)s_cached', 'hardware_cached'),
BackwardDeriver(r'^(.+)_cached', 'hardware_cached', 'to_interface'),
ForwardDeriver('camkes %(instance)s_dma_pool', 'dma_pool_symbol'),
BackwardDeriver(r'camkes (.+)_dma_pool$', 'dma_pool_symbol', 'instance'),
ForwardDeriver('%(instance)s_dma_frame_%(dma_frame_index)04d', 'dma_frame_symbol'),
BackwardDeriver(r'(.+)_dma_frame_[0-9]+$', 'dma_frame_symbol', 'instance'),
DMAFrameIndexDeriver(r'.+_dma_frame_([0-9]+)$', 'dma_frame_symbol'),
ControlDeriver(r'_priority$', 'priority_attribute'),
FromControlDeriver('_priority', 'priority_attribute'),
ForwardDeriver('%(interface)s_priority', 'priority_attribute'),
BackwardDeriver(r'([a-zA-Z_]\w*)_priority$', 'priority_attribute', 'interface'),
ControlDeriver(r'_affinity$', 'affinity_attribute'),
FromControlDeriver('_affinity', 'affinity_attribute'),
ForwardDeriver('%(interface)s_affinity', 'affinity_attribute'),
BackwardDeriver(r'([a-zA-Z_]\w*)_affinity$', 'affinity_attribute', 'interface'),
ControlDeriver(r'_domain$', 'domain_attribute'),
FromControlDeriver('_domain', 'domain_attribute'),
ForwardDeriver('%(interface)s_domain', 'domain_attribute'),
BackwardDeriver(r'([a-zA-Z_]\w*)_domain$', 'domain_attribute', 'interface'),
ForwardDeriver('%(group)s_cnode', 'cnode'),
BackwardDeriver(r'(.+)_cnode$', 'cnode', 'group'),
BackwardDeriver(r'.*?\.?([a-zA-Z_]\w*)$', 'instance', 'safe_instance'),
PerThreadDeriver('sc'),
FromControlPerThreadDeriver('sc'),
PerThreadInstanceDeriver('sc'),
FromControlPerThreadInstanceDeriver('sc'),
PerThreadInterfaceDeriver('sc'),
PerThreadIntraindexDeriver('sc'),
ToControlPerThreadDeriver('sc'),
FromControlPerThreadInstanceDeriver('passive_init_sc'),
FromControlPerThreadDeriver('passive_init_sc'),
ToControlPerThreadDeriver('passive_init_sc'),
ControlDeriver(r'^_max_priority$', 'max_priority_attribute'),
FromControlDeriver('_max_priority', 'max_priority_attribute'),
ForwardDeriver('%(interface)s_max_priority', 'max_priority_attribute'),
BackwardDeriver(r'^([^_].*)_max_priority$', 'max_priority_attribute', 'interface'),
ControlDeriver(r'^_period$', 'period_attribute'),
FromControlDeriver('_period', 'period_attribute'),
ForwardDeriver('%(interface)s_period', 'period_attribute'),
BackwardDeriver(r'^([^_].*)_period$', 'period_attribute', 'interface'),
ControlDeriver(r'^_budget$', 'budget_attribute'),
FromControlDeriver('_budget', 'budget_attribute'),
ForwardDeriver('%(interface)s_budget', 'budget_attribute'),
BackwardDeriver(r'^([^_].*)_budget$', 'budget_attribute', 'interface'),
ControlDeriver(r'^_data$', 'data_attribute'),
FromControlDeriver('_data', 'data_attribute'),
ForwardDeriver('%(interface)s_data', 'data_attribute'),
BackwardDeriver(r'^([^_].*)_data$', 'data_attribute', 'interface'),
ControlDeriver(r'^_sc_size_bits$', 'size_bits_attribute'),
FromControlDeriver('_sc_size_bits', 'size_bits_attribute'),
ForwardDeriver('%(interface)s_sc_size_bits', 'size_bits_attribute'),
BackwardDeriver(r'^([^_].*)_sc_size_bits$', 'size_bits_attribute', 'interface'),
],
}
class Perspective(object):
'''A partial state from which to mangle symbols. That may make no sense,
but consider this as a collection of *some* of the symbols we need from
which *all* the symbols we need can be derived. You need to pass some
initial symbols in to the constructor. These may not be sufficient to
derive all other known symbols, but they must be sufficient to derive any
you need. The known symbols can be updated at any point via __setitem__. A
more appropriate name for this class would be 'context', but I didn't want
to cause confusion by introducing yet another 'context' into this code
base.'''
def __init__(self, phase=FILTERS, **kwargs):
self.kwargs = kwargs
self.derivations = DERIVATIONS[phase]
if __debug__:
# When optimisations are not enabled, infer everything possible
# upfront (not | |
<gh_stars>0
import asyncio
import re
from collections import Counter
from distutils.version import LooseVersion
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union, cast
import msgpack
import nats
import validators
from asgiref.sync import sync_to_async
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.core.cache import cache
from django.db import models
from django.utils import timezone as djangotime
from nats.errors import TimeoutError
from packaging import version as pyver
from core.models import TZ_CHOICES
from core.utils import get_core_settings
from logs.models import BaseAuditModel, DebugLog
from tacticalrmm.constants import ONLINE_AGENTS, CheckType, CheckStatus, DebugLogType
from tacticalrmm.models import PermissionQuerySet
if TYPE_CHECKING:
from alerts.models import Alert, AlertTemplate
from automation.models import Policy
from autotasks.models import AutomatedTask
from checks.models import Check
from clients.models import Client
from winupdate.models import WinUpdatePolicy
# type helpers
Disk = Union[Dict[str, Any], str]
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat = models.CharField(max_length=255, default="windows")
goarch = models.CharField(max_length=255, null=True, blank=True)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(max_length=30)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
@property
def client(self) -> "Client":
return self.site.client
@property
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
else:
return get_core_settings().default_time_zone
@property
def is_posix(self) -> bool:
return self.plat == "linux" or self.plat == "darwin"
@property
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
@property
def winagent_dl(self) -> Optional[str]:
if self.arch == "64":
return settings.DL_64
elif self.arch == "32":
return settings.DL_32
return None
@property
def win_inno_exe(self) -> Optional[str]:
if self.arch == "64":
return f"winagent-v{settings.LATEST_AGENT_VER}.exe"
elif self.arch == "32":
return f"winagent-v{settings.LATEST_AGENT_VER}-x86.exe"
return None
@property
def status(self) -> str:
offline = djangotime.now() - djangotime.timedelta(minutes=self.offline_time)
overdue = djangotime.now() - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return "offline"
elif (self.last_seen < offline) and (self.last_seen < overdue):
return "overdue"
else:
return "online"
else:
return "offline"
@property
def checks(self) -> Dict[str, Any]:
from checks.models import CheckResult
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in [
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
]
else check.alert_severity
)
if alert_severity == "error":
failing += 1
elif alert_severity == "warning":
warning += 1
elif alert_severity == "info":
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
@property
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
ret.append([x["Name"] for x in cpu if "Name" in x][0])
return ret
except:
return ["unknown cpu model"]
@property
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
@property
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
else:
return ", ".join(ret) if ret else "error getting local ips"
@property
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
try:
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
except:
pass
try:
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
except:
pass
return "unknown make/model"
@property
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
@classmethod
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == "online"
]
return [i for i in cls.objects.only(*ONLINE_AGENTS) if i.status == "online"]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in [self.policy, site_policy, client_policy, default_policy]
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None,
"site_policy": site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None,
"client_policy": client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None,
"default_policy": default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None,
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval | |
<reponame>ivmfnal/metacat<filename>src/old/mql5.py
from dbobjects import DBDataset, DBFile, DBNamedQuery, DBFileSet
import json, time
from lark import Lark
from lark import Transformer, Tree, Token
from lark.visitors import Interpreter
import pprint
CMP_OPS = [">" , "<" , ">=" , "<=" , "==" , "=" , "!="]
MQL_Grammar = """
exp: add_exp -> f_
add_exp : add_exp "+" mul_exp -> add
| add_exp "-" mul_exp -> subtract
| mul_exp -> f_
mul_exp : mul_exp "*" term_with_params -> mult
| term_with_params -> f_
term_with_params : with_clause term2
| term2 -> f_
with_clause : "with" param_def ("," param_def)*
param_def: CNAME "=" constant
term2 : term -> f_
| filterable_term "where" meta_exp -> meta_filter
?term : dataset_exp -> f_
| filterable_term -> f_
?filterable_term: union -> f_
| join -> f_
| "filter" CNAME "(" filter_params ")" "(" exp_list ")" -> filter
| "parents" "(" exp ")" -> parents_of
| "children" "(" exp ")" -> children_of
| "query" namespace_name -> named_query
| "(" exp ")" -> f_
union: "union" "(" exp_list ")"
| "[" exp_list "]"
join: "join" "(" exp_list ")"
| "{" exp_list "}"
exp_list: exp ("," exp)*
filter_params: ( constant ("," constant)* )? -> filter_params
dataset_exp: "dataset" namespace_name ("where" meta_exp)? -> dataset
namespace_name: (FNAME ":")? FNAME
?meta_exp: meta_or -> f_
meta_or: meta_and ( "or" meta_and )*
meta_and: term_meta ( "and" term_meta )*
term_meta: ANAME CMPOP constant -> cmp_op
| constant "in"i ANAME -> in_op
| "(" meta_exp ")" -> f_
| "!" term_meta -> meta_not
constant : SIGNED_FLOAT -> float_constant
| STRING -> string_constant
| SIGNED_INT -> int_constant
| BOOL -> bool_constant
ANAME: WORD ("." WORD)*
FNAME: LETTER ("_"|"-"|"."|LETTER|DIGIT)*
WORD: LETTER ("_"|LETTER|DIGIT)*
CMPOP: ">" | "<" | ">=" | "<=" | "==" | "=" | "!="
BOOL: "true"i | "false"i
%import common.CNAME
%import common.SIGNED_INT
%import common.SIGNED_FLOAT
%import common.ESCAPED_STRING -> STRING
%import common.WS
%import common.LETTER
%import common.DIGIT
%ignore WS
"""
class Node(object):
def __init__(self, typ, children=[], meta=None):
self.T = typ
self.M = meta
self.C = children[:]
def __str__(self):
return "<Node %s meta=%s c:%d>" % (self.T, self.M, len(self.C))
__repr__ = __str__
def __add__(self, lst):
return Node(self.T, self.C + lst, self.M)
def as_list(self):
out = [self.T, self.M]
for c in self.C:
if isinstance(c, Node):
out.append(c.as_list())
else:
out.append(c)
return out
def _pretty(self, indent=0):
out = []
out.append("%s%s %s" % (" "*indent, self.T, '' if self.M is None else self.M))
for c in self.C:
if isinstance(c, Node):
out += c._pretty(indent+2)
else:
out.append("%s%s" % (" "*(indent+2), repr(c)))
return out
def pretty(self):
return "\n".join(self._pretty())
def jsonable(self):
d = dict(T=self.T, M=self.M, C=[c.jsonable() if isinstance(c, Node) else c
for c in self.C]
)
d["///class///"] = "node"
return d
def to_json(self):
return json.dumps(self.jsonable())
@staticmethod
def from_jsonable(data):
if isinstance(data, dict) and data.get("///class///") == "node":
return Node(data["T"],
children = [Node.from_jsonable(c) for c in data.get("C", [])],
meta = data.get("M")
)
else:
return data
@staticmethod
def from_json(text):
return Node.from_jsonable(json.loads(text))
def pass_node(method):
def decorated(self, *params, **args):
return method(self, *params, **args)
decorated.__pass_node__ = True
return decorated
class Visitor(object):
def visit(self, node, context):
if not isinstance(node, Node):
return
node_type, children = node.T, node.C
if hasattr(self, node_type):
method = getattr(self, node_type)
visit_children = method(node, context)
else:
visit_children = self.__default(node, context)
if visit_children:
for c in children:
self.visit(c, context)
def __default(self, node, context):
return True
class Ascender(object):
def __init__(self):
self.Indent = ""
def walk(self, node):
if not isinstance(node, Node):
return node
node_type, children = node.T, node.C
#print("Ascender.walk:", node_type, children)
assert isinstance(node_type, str)
#print("walk: in:", node.pretty())
saved = self.Indent
self.Indent += " "
children = [self.walk(c) for c in children]
self.Indent = saved
#print("walk: children->", children)
if hasattr(self, node_type):
method = getattr(self, node_type)
if hasattr(method, "__pass_node__") and getattr(method, "__pass_node__"):
out = method(node)
else:
out = method(children, node.M)
else:
out = self.__default(node, children)
return out
def __default(self, node, children):
return Node(node.T, children=children, meta=node.M)
class _Converter(Transformer):
def convert(self, tree, default_namespace):
tree = self.transform(tree)
return self._apply_params({"namespace":default_namespace}, tree)
def f_(self, args):
assert len(args) == 1
return args[0]
def int_constant(self, args):
return int(args[0].value)
def float_constant(self, args):
return float(args[0].value)
def bool_constant(self, args):
#print("bool_constant:", args, args[0].value)
return args[0].value.lower() == "true"
def string_constant(self, args):
s = args[0].value
if s[0] == '"':
s = s[1:-1]
return s
def named_query(self, args):
assert len(args) == 1
return Node("named_query", meta = args[0].M) # value = (namespace, name) - tuple
def exp_list(self, args):
return args
def __default__(self, data, children, meta):
#print("__default__:", data, children)
return Node(data, children)
def param_def(self, args):
return (args[0].value, args[1])
def _apply_params(self, params, node):
if isinstance(node, Node):
#print("_apply_params:", node)
if node.T == "namespace_name":
assert len(node.M) == 2
if node.M[0] is None and "namespace" in params:
node.M[0] = params["namespace"]
#print("_apply_params: applied namespace:", params["namespace"])
else:
for n in node.C:
self._apply_params(params, n)
return node
def term_with_params(self, args):
assert len(args) == 2
params, term = args
return self._apply_params(params, term)
def with_clause(self, args):
return dict(args)
def parents_of(self, args):
assert len(args) == 1
return Node("parents_of", args)
def children_of(self, args):
assert len(args) == 1
return Node("children_of", args)
def add(self, args):
assert len(args) == 2
left, right = args
if isinstance(left, Node) and left.T == "union":
return left + [right]
else:
return Node("union", [left, right])
def union(self, args):
assert len(args) == 1
args = args[0]
if len(args) == 1: return args[0]
unions = []
others = []
for a in args:
if isinstance(a, Node) and a.T == "union":
unions += a[1:]
else:
others.append(a)
return Node("union", unions + others)
def mult(self, args):
assert len(args) == 2
left, right = args
if isinstance(left, Node) and left.T == "join":
return left + [right]
else:
return Node("join", [left, right])
def join(self, args):
assert len(args) == 1
args = args[0]
if len(args) == 1: return args[0]
joins = []
others = []
for a in args:
if isinstance(a, Node) and a.T == "join":
joins += a.C
else:
others.append(a)
return Node("join", joins + others)
def subtract(self, args):
assert len(args) == 2
left, right = args
return Node("minus", [left, right])
def namespace_name(self, args):
assert len(args) in (1,2)
if len(args) == 1:
return Node("namespace_name", meta=[None, args[0].value]) # no namespace
else:
return Node("namespace_name", meta=[args[0].value, args[1].value])
def dataset(self, args):
assert len(args) in (1,2)
if len(args) == 1:
return Node("dataset", [args[0], None]) # dataset without meta_filter
else:
return Node("dataset", [args[0], args[1]])
def filter(self, args):
assert len(args) == 3
return Node("filter", args[2], meta = (args[0].value, args[1]))
#def metafilter_exp(self, args):
# assert len(args) == 2
# return Node("meta_filter", args)
def filter_params(self, args):
#print("filter_params:", args)
return args
def cmp_op(self, args):
return Node(args[1].value, [args[0].value, args[2]])
def in_op(self, args):
return Node("in", [args[1].value, args[0]])
#def meta_not(self, args):
# assert len(args) == 1
# #print("meta_not: arg:", args[0])
# return Node("meta_not", [args[0]])
def meta_and(self, args):
if len(args) == 1:
return args[0]
children = []
for a in args:
if a.T == "meta_and":
children += a.C
else:
children.append(a)
return Node("meta_and", children)
def meta_or(self, args):
if len(args) == 1:
return args[0]
children = []
for a in args:
if a.T == "meta_or":
children += a.C
else:
children.append(a)
return Node("meta_or", children)
def _apply_not(self, node):
if node.T == "meta_and":
return Node("meta_or", [self._apply_not(c) for c in node.C])
elif node.T == "meta_or":
return Node("meta_and", [self._apply_not(c) for c in node.C])
elif node.T == "meta_not":
return node.C[0]
elif node.T in CMP_OPS:
new_op = {
">": "<=",
"<": ">=",
">=": "<",
"<=": ">",
"=": "!=",
"==": "!=",
"!=": "=="
}[node.T]
return Node(new_op, node.C)
def meta_not(self, children):
assert len(children) == 1
return self._apply_not(children[0])
class _Assembler(Ascender):
def __init__(self, db, default_namespace):
Ascender.__init__(self)
self.DB = db
self.DefaultNamespace = default_namespace
def walk(self, inp):
print("Assembler.walk(): in:", inp.pretty() if isinstance(inp, Node) else repr(inp))
out = Ascender.walk(self, inp)
print("Assembler.walk(): out:", out.pretty() if isinstance(out, Node) else repr(out))
return out
def named_query(self, children, query_name):
namespace, name = query_name
namespace = namespace or self.DefaultNamespace
return Query.from_db(self.DB, namespace, name).parse()
class _MetaFlagPusher(Visitor):
def filter(self, node, keep_meta):
for c in node.C:
self.visit(c, True)
return False
def meta_filter(self, node, keep_meta):
assert len(node.C) == 2
self.visit(node.C[0], True)
return False
def dataset(self, node, keep_meta):
if isinstance(node.M, dict):
node.M["keep_meta"] = keep_meta
elif node.M is None:
node.M = {"keep_meta":keep_meta}
else:
raise ValueError("Unknown type of pre-existing metadata for dataset node: %s" % (node.pretty(),))
return | |
thread.get()
:param async_req bool
:param int device_group_id: (required)
:param DeviceClusterAlertConfig body: (required)
:return: DeviceClusterAlertConfig
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_device_group_cluster_alert_conf_with_http_info(device_group_id, body, **kwargs) # noqa: E501
else:
(data) = self.add_device_group_cluster_alert_conf_with_http_info(device_group_id, body, **kwargs) # noqa: E501
return data
def add_device_group_cluster_alert_conf_with_http_info(self, device_group_id, body, **kwargs): # noqa: E501
"""Add cluster alert configuration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_device_group_cluster_alert_conf_with_http_info(device_group_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_group_id: (required)
:param DeviceClusterAlertConfig body: (required)
:return: DeviceClusterAlertConfig
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_group_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_device_group_cluster_alert_conf" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_group_id' is set
if ('device_group_id' not in params or
params['device_group_id'] is None):
raise ValueError("Missing the required parameter `device_group_id` when calling `add_device_group_cluster_alert_conf`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_device_group_cluster_alert_conf`") # noqa: E501
if 'device_group_id' in params and not re.search('\d+', params['device_group_id'] if type(params['device_group_id']) is str else str(params['device_group_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `device_group_id` when calling `add_device_group_cluster_alert_conf`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_group_id' in params:
path_params['deviceGroupId'] = params['device_group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/groups/{deviceGroupId}/clusterAlertConf', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceClusterAlertConfig', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_device_group_property(self, gid, body, **kwargs): # noqa: E501
"""add device group property # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_device_group_property(gid, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int gid: group ID (required)
:param EntityProperty body: (required)
:return: EntityProperty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_device_group_property_with_http_info(gid, body, **kwargs) # noqa: E501
else:
(data) = self.add_device_group_property_with_http_info(gid, body, **kwargs) # noqa: E501
return data
def add_device_group_property_with_http_info(self, gid, body, **kwargs): # noqa: E501
"""add device group property # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_device_group_property_with_http_info(gid, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int gid: group ID (required)
:param EntityProperty body: (required)
:return: EntityProperty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['gid', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_device_group_property" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'gid' is set
if ('gid' not in params or
params['gid'] is None):
raise ValueError("Missing the required parameter `gid` when calling `add_device_group_property`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_device_group_property`") # noqa: E501
if 'gid' in params and not re.search('\d+', params['gid'] if type(params['gid']) is str else str(params['gid'])): # noqa: E501
raise ValueError("Invalid value for parameter `gid` when calling `add_device_group_property`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'gid' in params:
path_params['gid'] = params['gid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/groups/{gid}/properties', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityProperty', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_device_property(self, device_id, body, **kwargs): # noqa: E501
"""add device property # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_device_property(device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param EntityProperty body: (required)
:return: EntityProperty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_device_property_with_http_info(device_id, body, **kwargs) # noqa: E501
else:
(data) = self.add_device_property_with_http_info(device_id, body, **kwargs) # noqa: E501
return data
def add_device_property_with_http_info(self, device_id, body, **kwargs): # noqa: E501
"""add device property # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_device_property_with_http_info(device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param EntityProperty body: (required)
:return: EntityProperty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_device_property" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `add_device_property`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_device_property`") # noqa: E501
if 'device_id' in params and not re.search('\d+', params['device_id'] if type(params['device_id']) is str else str(params['device_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `device_id` when calling `add_device_property`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/devices/{deviceId}/properties', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityProperty', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_escalation_chain(self, body, **kwargs): # noqa: E501
"""add escalation chain # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_escalation_chain(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EscalatingChain body: (required)
:return: EscalatingChain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_escalation_chain_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.add_escalation_chain_with_http_info(body, **kwargs) # noqa: E501
return data
def add_escalation_chain_with_http_info(self, body, **kwargs): # noqa: E501
"""add escalation chain # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_escalation_chain_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EscalatingChain body: (required)
:return: EscalatingChain
If the method is called asynchronously,
returns the | |
<gh_stars>0
import logging
import xml.etree.ElementTree as ET
import dateutil.parser as DU
from typing import List, Union
from .address import Address
from .affiliation import Affiliation
from .biblio_version import BibliographicItemVersion
from .bibliographic_date import BibliographicDate, BibliographicDateType
from .bibliographic_item import BibliographicItem, BibliographicItemType
from .biblio_note import BiblioNoteCollection, BiblioNote
from .bib_item_locality import Locality, BibItemLocality, LocalityStack, \
SourceLocalityStack, SourceLocality
from .biblio_note import BiblioNote
from .contact import Contact, ContactType
from .classification import Classification
from .contribution_info import ContributionInfo, ContributorRole
from .copyright_association import CopyrightAssociation
from .document_identifier import DocumentIdentifier
from .document_relation import DocumentRelation
from .document_status import DocumentStatus
from .editorial_group import EditorialGroup
from .formatted_ref import FormattedRef
from .formatted_string import FormattedString, FormattedStringFormat
from .ics import ICS
from .localized_string import LocalizedString
from .person import Person, FullName, PersonIdentifier
from .place import Place
from .series import Series, SeriesType
from .structured_identifier import StructuredIdentifier, \
StructuredIdentifierCollection
from .technical_committee import TechnicalCommittee
from .typed_title_string import TypedTitleString, TypedTitleStringCollection
from .typed_uri import TypedUri
from .organization import Organization, OrgIdentifier
from .medium import Medium
from .validity import Validity
from .workgroup import WorkGroup
def from_xml(xml: Union[ET.ElementTree, ET.Element]) -> BibliographicItem:
bibitem = xml.getroot() if isinstance(xml, ET.ElementTree) else xml
if bibitem.tag in ["bibitem", "bibdata"]:
return _fetch_bibliographic_item(bibitem)
else:
logging.warning(
"[relaton-bib] WARNING: "
"can't find bibitem or bibdata element in the XML")
def _fetch_bibliographic_item(bibitem: ET.Element):
fetched = bibitem.find("./fetched")
if fetched is not None:
fetched = DU.parse(fetched.text)
docnumber = bibitem.find("./docnumber")
if docnumber is not None:
docnumber = docnumber.text
edition = bibitem.find("./edition")
if edition is not None:
edition = edition.text
ext = bibitem.find("./ext")
doctype = None
subdoctype = None
if ext is not None:
doctype = ext.find("doctype")
if doctype is not None:
doctype = doctype.text
subdoctype = ext.find("subdoctype")
if subdoctype is not None:
subdoctype = subdoctype.text
return BibliographicItem(
id=bibitem.get("id", None),
type=bibitem.get("type", None),
fetched=fetched,
title=_fetch_titles(bibitem),
formattedref=_fref(bibitem),
link=_fetch_link(bibitem),
docidentifier=_fetch_docid(bibitem),
docnumber=docnumber,
date=_fetch_dates(bibitem),
contributor=_fetch_contributors(bibitem),
edition=edition,
version=_fetch_version(bibitem),
biblionote=_fetch_note(bibitem),
language=_fetch_list(bibitem, "./language"),
script=_fetch_list(bibitem, "./script"),
abstract=_fetch_abstract(bibitem),
status=_fetch_status(bibitem),
copyright=_fetch_copyright(bibitem),
relation=_fetch_relations(bibitem),
series=_fetch_series(bibitem),
medium=_fetch_medium(bibitem),
place=_fetch_place(bibitem),
extent=_fetch_extent(bibitem),
accesslocation=_fetch_list(bibitem, "./accesslocation"),
classification=_fetch_classification(bibitem),
keyword=_fetch_list(bibitem, "./keyword"),
license=_fetch_list(bibitem, "./license"),
validity=_fetch_validity(bibitem),
doctype=doctype,
subdoctype=subdoctype,
editorialgroup=_fetch_editorialgroup(ext),
ics=_fetch_ics(ext),
structuredidentifier=_fetch_structuredidentifier(ext),
)
def _fetch_titles(item: ET.Element) -> TypedTitleStringCollection:
return TypedTitleStringCollection(list(map(
lambda t: _ttitle(t), item.findall("./title"))))
def _fetch_version(item: ET.Element) -> BibliographicItemVersion:
version = item.find("./version")
if not version:
return
rev_date = version.find("revision-date")
if rev_date is not None:
rev_date = rev_date.text
draft = _fetch_list(version, "draft")
return BibliographicItemVersion(revision_date=rev_date, draft=draft)
def _fetch_place(item: ET.Element) -> List[Place]:
return list(map(
lambda pl: Place(name=pl.text,
uri=pl.get("uri"),
region=pl.get("region")),
item.findall("./place")))
def _fetch_note(item: ET.Element) -> BiblioNoteCollection:
return BiblioNoteCollection(list(map(
lambda n: BiblioNote(content=n.text, **n.attrib),
item.findall("./note"))))
def _fetch_list(item: ET.Element, xpath: str) -> List[str]:
if item is None:
return []
return list(map(
lambda l: l.text,
item.findall(xpath)))
def _fetch_series(item: ET.Element) -> List[Series]:
result = []
for sr in item.findall("./series"):
abbr = sr.find("abbreviation")
if abbr is not None:
abbr = _localized_str(abbr)
formattedref = _fref(sr)
title = _ttitle(sr.find("title"))
if not (formattedref or title):
continue
props = {p: n.text
for p in ["place",
"organization",
"from",
"to",
"number",
"partnumber"] if (n := sr.find(p)) is not None}
result.append(Series(
type=sr.get("type"),
formattedref=formattedref,
title=title,
place=props.get("place"),
organization=props.get("organization"),
abbreviation=abbr,
from_=props.get("from"),
to=props.get("to"),
number=props.get("number"),
partnumber=props.get("partnumber")))
return result
def _fetch_medium(item: ET.Element) -> Medium:
medium = item.find("./medium")
if not medium:
return
props = {p: n.text
for p in ["form", "size", "scale"]
if (n := medium.find(p)) is not None}
return Medium(**props)
def _fetch_extent(item: ET.Element) -> List[BibItemLocality]:
result = []
for ext in item.findall("./extent"):
type = ext.get("type")
reference_from = ext.find("referenceFrom")
if reference_from is not None:
reference_from = reference_from.text
reference_to = ext.find("referenceTo")
if reference_to is not None:
reference_to = reference_to.text
result.append(BibItemLocality(
type=type,
reference_from=reference_from,
reference_to=reference_to))
return result
def _fetch_classification(item: ET.Element) -> List[Classification]:
return list(map(
lambda cls: Classification(type=cls.get("type"),
value=cls.text),
item.findall("./classification")))
def _fetch_validity(item: ET.Element) -> Validity:
validity = item.find("./validity")
if validity is None:
return
begins = validity.find("validityBegins")
if begins is not None:
begins = DU.parse(begins.text)
ends = validity.find("validityEnds")
if ends is not None:
ends = DU.parse(ends.text)
revision = validity.find("revision")
if revision is not None:
revision = DU.parse(revision.text)
props = {p: DU.parse(n.text)
for t, p in {"validityBegins": "begins",
"validityEnds": "ends",
"revision": "revision"}.items()
if (n := validity.find(t)) is not None}
return Validity(**props)
def _fetch_docid(item: ET.Element) -> List[DocumentIdentifier]:
return list(map(
lambda did: DocumentIdentifier(id=did.text,
type=did.get("type"),
scope=did.get("scope")),
item.findall("./docidentifier")))
def _ttitle(title: ET.Element) -> TypedTitleString:
if title is None:
return []
content = _localized_strs(title, "./variant")
if not any(content):
content = title.text
props = title.attrib.copy()
props["content"] = content
return TypedTitleString(**props)
def _localized_strs(node: ET.Element, xpath: str) -> List[LocalizedString]:
return list(map(
lambda v: _localized_str(v),
node.findall(xpath)))
def _fetch_status(item: ET.Element) -> DocumentStatus:
status = item.find("./status")
if status is None:
return
stg = status.find("stage")
iter = status.find("iteration")
if iter is not None:
iter = iter.text
return DocumentStatus(
stage=status.text if stg is None else _stage(stg),
substage=_stage(status.find("substage")),
iteration=iter,
)
def _stage(node: ET.Element) -> DocumentStatus.Stage:
if node is None:
return
return DocumentStatus.Stage(
value=node.text,
abbreviation=node.get("abbreviation"))
def _fetch_dates(item: ET.Element) -> List[BibliographicDate]:
result = []
for d in item.findall("./date"):
props = {p: n.text
for p in ["on", "from", "to"]
if (n := d.find(p)) is not None}
props["type"] = d.get("type", BibliographicDateType.PUBLISHED)
if "from" in props:
props["from_"] = props.pop("from")
elif "on" not in props:
continue
result.append(BibliographicDate(**props))
return result
def _get_org(org: ET.Element) -> Organization:
props = {p: n.text
for p in ["abbreviation", "uri"]
if (n := org.find(p)) is not None}
props["name"] = list(map(
lambda n: _localized_str(n),
org.findall(f"./name")))
props["identifier"] = list(map(
lambda i: OrgIdentifier(value=i.text,
type=i.get("type")),
org.findall(f"./identifier")))
props["subdivision"] = _fetch_list(org, "subdivision")
return Organization(**props)
def _get_person(person: ET.Element) -> Person:
affiliations = []
for a in person.findall("./affiliation"):
desc = list(map(
lambda d: _formatted_str(d), a.findall("./description")))
affiliations.append(
Affiliation(
organization=_get_org(a.find("./organization")),
description=desc
)
)
contact = []
for c in list(person):
if c.tag == ContactType.ADDRESS:
props = {p: n.text
for p in ["city", "state", "country", "postcode"]
if (n := c.find(p)) is not None}
props["street"] = _fetch_list(c, "./street")
contact.append(Address(**props))
elif c.tag in [ContactType.PHONE,
ContactType.EMAIL,
ContactType.URI]:
contact.append(Contact(type=c.tag, value=c.text))
identifier = list(map(
lambda pi: PersonIdentifier(type=pi.get("type"), value=pi.text),
person.findall("./identifier")))
fullname_props = dict(
initial=_name_part(person, "initial"),
forename=_name_part(person, "forename"),
addition=_name_part(person, "addition"),
prefix=_name_part(person, "prefix"))
if (cname := person.find("./name/completename")) is not None:
fullname_props["completename"] = _localized_str(cname)
if (sname := person.find("./name/surname")) is not None:
fullname_props["surname"] = _localized_str(sname)
name = FullName(**fullname_props)
return Person(
name=name,
affiliation=affiliations,
contact=contact,
identifier=identifier)
def _name_part(person: ET.Element, part: str) -> List[LocalizedString]:
return list(map(
lambda v: _localized_str(v),
person.findall(f"./name/{part}")))
def _fetch_contributors(item: ET.Element) -> List[ContributionInfo]:
result = []
for c in item.findall("./contributor"):
entity = None
if (org := c.find("./organization")) is not None:
entity = _get_org(org)
elif (person := c.find("./person")) is not None:
entity = _get_person(person)
role = list(map(
lambda r: ContributorRole(
type=r.get("type"),
description=_localized_strs(r, "./description")),
c.findall("./role")))
result.append(ContributionInfo(entity=entity, role=role))
return result
def _fetch_abstract(item: ET.Element) -> List[FormattedString]:
return list(map(lambda a: _formatted_str(a), item.findall("./abstract")))
def _fetch_copyright(item: ET.Element) -> List[TypedUri]:
result = []
for cp in item.findall("./copyright"):
props = {p: n.text
for p in ["from", "to", "scope"]
if (n := cp.find(p)) is not None}
props["from_"] = props.pop("from")
props["owner"] = list(map(
lambda o: ContributionInfo(
entity=_get_org(o.find("organization"))),
cp.findall("owner")))
result.append(CopyrightAssociation(**props))
return result
def _fetch_link(item: ET.Element) -> List[TypedUri]:
return list(map(
lambda l: TypedUri(type=l.get("type"), content=l.text),
item.findall("./uri")))
def _fetch_relations(item: ET.Element, klass=DocumentRelation):
result = []
for rel in item.findall("./relation"):
result.append(klass(
type=rel.get("type"),
description=_relation_description(rel),
bibitem=_fetch_bibliographic_item(rel.find("./bibitem")),
locality=_localities(rel),
source_locality=_source_localities(rel),
))
return result
def _relation_description(rel: ET.Element) -> FormattedString:
d = rel.find("./description")
if d is None:
return
return _formatted_str(d)
def _formatted_str(node: ET.Element) -> FormattedString:
return FormattedString(content=node.text,
language=node.get("language", []),
script=node.get("script", []),
format=node.get("format",
FormattedStringFormat.TEXT_PLAIN))
def _localized_str(node: ET.Element) -> LocalizedString:
return LocalizedString(content=node.text,
language=node.get("language", []),
script=node.get("script", []))
def _localities(loc: ET.Element) -> List[LocalityStack]:
result = []
for lc in list(loc):
if lc.tag not in ["locality", "localityStack"]:
continue
lcs = None
if lc.get("type"):
lcs = [_locality(lc)]
else:
lcs = list(filter(None, map(
lambda l: _locality(l), lc.findall("./locality"))))
result.append(LocalityStack(lcs))
return result
def _locality(loc: ET.Element, klass=Locality):
to = None
if (rt := loc.find("./referenceTo")) is not None:
to = LocalizedString(rt.text)
fr0m = None
if (rf := loc.find("./referenceFrom")) is not None:
fr0m = LocalizedString(rf.text)
return klass(
type=loc.get("type"),
reference_from=fr0m,
reference_to=to
)
def _source_localities(rel: ET.Element) -> List[SourceLocalityStack]:
result = []
for lc in list(rel):
if lc.tag not in ["sourceLocality", "sourceLocalityStack"]:
continue
sls = None
if lc.get("type"):
sls = [_locality(lc, SourceLocality)]
else:
sls = list(filter(None, map(
lambda l: _locality(l, SourceLocality),
lc.findall("./sourceLocality"))))
result.append(SourceLocalityStack(sls))
return result
def _fref(item: ET.Element) -> FormattedRef:
if not item:
return
ident = item.find("./formattedref")
if ident is None:
return
return FormattedRef(
content=ident.text,
format=ident.get("format", FormattedStringFormat.TEXT_PLAIN),
language=ident.get("language", []),
script=ident.get("script", []))
def _fetch_editorialgroup(ext: ET.Element) -> EditorialGroup:
if ext is None:
return
eg = ext.find("editorialgroup")
if eg is None:
return
return EditorialGroup(list(map(
lambda tc: TechnicalCommittee(
WorkGroup(name=tc.text,
number=int(tc.get("number")),
type=tc.get("type"),
identifier=tc.get("identifier"),
prefix=tc.get("prefix"))),
eg.findall("./technical-committee"))))
def _fetch_ics(ext: ET.Element) -> List[ICS]:
if ext is None:
return []
result = []
for ics in ext.findall("ics"):
props = {p: n.text
for p in ["code", "text"]
if (n := ics.find(p)) is not None}
result.append(ICS(**props))
return result
def _fetch_structuredidentifier(ext: ET.Element) -> StructuredIdentifier:
if ext is None:
return
sids = []
for si in ext.findall("structuredidentifier"):
agency = _fetch_list(si, "agency")
class_ = si.find("class")
if class_ is | |
mounted correctly: PASSED', tid)
else:
log_red(
'Verify that the file structure has "/proprietary" mounted correctly: FAILED', tid)
if "/dev/mapper/proprietary" in file_structure:
log_green('Verify that "/proprietary" is encrypted: PASSED', tid)
else:
log_red('Verify that "/proprietary" is encrypted: FAILED', tid)
if "/proprietary type ext4" in file_structure:
log_green(
'Verify that "/proprietary" is ext4 type file structure: PASSED', tid)
else:
log_red(
'Verify that "/proprietary" is ext4 type file structure: FAILED', tid)
stdin, stdout, stderr = ssh.exec_command('touch /proprietary/test')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_green(
'Verify that "/proprietary" is Read Only type file structure: PASSED', tid)
# log_red("error in executing the test command encountered" + result + ": FAILED", tid)
else:
log_red(
'Verify that "/proprietary" is Read Only type file structure: FAILED', tid)
stdin, stdout, stderr = ssh.exec_command(
'rm /proprietary/test')
# Testing "/autonet"
# if "/dev/mapper/autonet on /autonet type ext4 (rw,noatime,data=ordered)" in file_structure:
if "/dev/mapper/autonet on /autonet type ext4 (rw,noatime,block_validity,delalloc,barrier,user_xattr,acl)" in file_structure:
log_green(
'Verify that the file structure has "/autonet" mounted correctly: PASSED', tid)
else:
log_red(
'Verify that the file structure has "/autonet" mounted correctly: FAILED', tid)
if "/dev/mapper/autonet" in file_structure:
log_green('Verify that "/autonet" is encrypted: PASSED', tid)
else:
log_red('Verify that "/autonet" is encrypted: FAILED', tid)
if "/autonet type ext4" in file_structure:
log_green(
'Verify that "/autonet" is ext4 type file structure: PASSED', tid)
else:
log_red(
'Verify that "/autonet" is ext4 type file structure: FAILED', tid)
stdin, stdout, stderr = ssh.exec_command('touch /autonet/test')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
'Verify that "/autonet" is Read/Write type file structure: FAILED', tid)
else:
log_green(
'Verify that "/autonet" is Read/Write type file structure: PASSED', tid)
stdin, stdout, stderr = ssh.exec_command('rm /autonet/test')
# log_red("error in executing the test command encountered" + result + ": FAILED", tid)
# Testing "/user"
if "/dev/mapper/user on /user type ext4 (rw,noatime,data=ordered)" in file_structure:
log_green(
'Verify that the file structure has "/user" mounted correctly: PASSED', tid)
else:
log_red(
'Verify that the file structure has "/user" mounted correctly: FAILED', tid)
if "/dev/mapper/user" in file_structure:
log_green('Verify that "/user" is encrypted: PASSED', tid)
else:
log_red('Verify that "/user" is encrypted: FAILED', tid)
if "/user type ext4" in file_structure:
log_green(
'Verify that "/user" is ext4 type file structure: PASSED', tid)
else:
log_red(
'Verify that "/user" is ext4 type file structure: FAILED', tid)
stdin, stdout, stderr = ssh.exec_command('touch /user/test')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
'Verify that "/user" is Read/Write type file structure: FAILED', tid)
else:
log_green(
'Verify that "/user" is Read/Write type file structure: PASSED', tid)
stdin, stdout, stderr = ssh.exec_command('rm /user/test')
# log_red("error in executing the test command encountered" + result + ": FAILED", tid)
# Testing "/tmp"
# if "tmpfs on /tmp type tmpfs (rw,noatime)" in file_structure:
if "tmpfs on /tmp type tmpfs (rw,relatime)" in file_structure:
log_green(
'Verify that the file structure has "/tmp" mounted correctly: PASSED', tid)
else:
log_red(
'Verify that the file structure has "/tmp" mounted correctly: FAILED', tid)
if "tmpfs on /tmp" in file_structure:
log_green('Verify that "/tmp" is NOT encrypted: PASSED', tid)
else:
log_red('Verify that "/tmp" is encrypted: FAILED', tid)
if "/tmp type tmpfs" in file_structure:
log_green(
'Verify that "/tmp" is tmpfs type file structure: PASSED', tid)
else:
log_red(
'Verify that "/tmp" is tmpfs type file structure: FAILED', tid)
stdin, stdout, stderr = ssh.exec_command('touch /tmp/test')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
'Verify that "/tmp" is Read/Write type file structure: FAILED', tid)
else:
log_green(
'Verify that "/tmp" is Read/Write type file structure: PASSED', tid)
stdin, stdout, stderr = ssh.exec_command('rm /tmp/test')
# cryptsetup to check if password is required to mount the partitions
stdin, stdout, stderr = ssh.exec_command(
'echo wrongpw|cryptsetup open --type luks /dev/mmcblk2p11 test --tries=1')
result = stderr.read().decode('UTF-8').replace("\n", "")
if "No key available with this passphrase" not in result:
log_red(
'Verify that a password is required to mount a device(/dev/mmcblk2p11) using cryptsetup: FAILED', tid)
else:
log_green(
'Verify that a password is required to mount a device(/dev/mmcblk2p11) using cryptsetup: PASSED', tid)
# cryptsetup to check if password is required to mount the partitions: successful
stdin, stdout, stderr = ssh.exec_command(
'echo connexus|cryptsetup open --type luks /dev/mmcblk2p11 test --tries=1')
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
'Verify that a device(/dev/mmcblk2p11) can be mounted if correct password used: FAILED', tid)
else:
log_green(
'Verify that a device(/dev/mmcblk2p11) can be mounted if correct password used: PASSED', tid)
# Check if device is active using cyptsetup: successful
stdin, stdout, stderr = ssh.exec_command('cryptsetup status test')
output = stdout.read().decode('UTF-8').replace("\n", "")
if "/dev/mapper/test is active" not in output:
log_red(
'Verify that a device(/dev/mmcblk2p11) is active to be mounted using cryptsetup: FAILED', tid)
else:
log_green(
'Verify that a device(/dev/mmcblk2p11) is active to be mounted using cryptsetup: PASSED', tid)
# Check /tmp/test directory is created: successful
stdin, stdout, stderr = ssh.exec_command('mkdir /tmp/test')
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
'Verify that the creation of directory /tmp/test is successful: FAILED', tid)
else:
log_green(
'Verify that the creation of directory /tmp/test is successful: PASSED', tid)
# Check if /tmp/test can be mounted successfully: successful
stdin, stdout, stderr = ssh.exec_command(
'mount /dev/mapper/test /tmp/test')
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
'Verify that the /tmp/test can be mounted /dev/mapper/test successfully: FAILED', tid)
else:
log_green(
'Verify that the /tmp/test can be mounted /dev/mapper/test successfully: PASSED', tid)
# Check if /tmp/test has all the essential files: successful
stdin, stdout, stderr = ssh.exec_command('ls /tmp/test/etc')
result = stderr.read().decode('UTF-8').replace("\n", "")
output = stdout.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red('Verify that a /tmp/test/etc exist: FAILED', tid)
else:
if "local_features" and "features" and "autonet.conf" in output:
log_green(
'Verify that the /tmp/test/etc has essential files present: PASSED', tid)
else:
log_red(
'Verify that the /tmp/test/etc has essential files present: FAILED', tid)
# Check if /tmp/test can be unmounted successfully: successful
stdin, stdout, stderr = ssh.exec_command('umount /tmp/test')
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
'Verify that a /tmp/test can be unmounted /dev/mapper/test successfully: FAILED', tid)
else:
log_green(
'Verify that a /tmp/test can be unmounted /dev/mapper/test successfully: PASSED', tid)
# cryptsetup to check if password is required to close the partitions: successful
stdin, stdout, stderr = ssh.exec_command('cryptsetup close test')
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
'Verify that a device(/dev/mmcblk2p11) can be closed successfully using cryptsetup: FAILED', tid)
else:
log_green(
'Verify that a device(/dev/mmcblk2p11) can be closed successfully using cryptsetup: PASSED', tid)
stdin, stdout, stderr = ssh.exec_command('rmdir /tmp/test')
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
'Verify that the /tmp/test is deleted successfully: FAILED', tid)
else:
log_green(
'Verify that the /tmp/test is deleted successfully: PASSED', tid)
except Exception as e:
log_red("Operation error:" + str(e) + ": FAILED", tid)
def test_REFP_018_Respawner_Process(self):
'''Verify the successful implementation, running and functionality of Respawner process'''
tid = 'REFP_018'
print('[Test Case ID ]: %s' % tid)
print('[Test Case Name ]: %s' % inspect.stack()[0].function)
print('[Title ]: Verify the successful implementation and functionality of Respawner process')
print('[Product Requirement ]: EINS-41')
print('[Development Task ]: CONLAREINS-403')
print('[Test Automation Task ]: CONLAREINS-427')
log_blue('[================================================================================================================]')
ssh = self.ssh # handle
# TBA
try:
stdin, stdout, stderr = ssh.exec_command(
'pidof respawner > /dev/null; echo $?')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red("error in executing the command encountered" +
result + ": FAILED", tid)
Respawner = stdout.read().decode('UTF-8').replace("\n", "")
if Respawner == '0':
log_green(
"Verify that the Respawner process is successfully running: PASSED", tid)
else:
log_red(
"Verify that the Respawner process is successfully running: FAILED", tid)
except Exception as e:
log_red("Operation error:" + str(e) + ": FAILED", tid)
output = runr(self, 'pkill unitcomm >/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that unitcom process is successfully stopped: PASSED", tid)
else:
log_red("Verify that unitcom process | |
data types present in file
topo_freq_df = pd.DataFrame(
{
"TopologyID": topo_names,
"Frequency": topo_freqs,
}
)
return topo_freq_df, pd.DataFrame()
def current_view_topo_freq_chart(basic_stats_topo_freqs, template, color_mapping):
"""Return pie chart figure object for local topology frequencies
:param basic_stats_topo_freqs: Dataframe of topology frequencies
:type basic_stats_topo_freqs: DataFrame
:return: Plotly express pie chart
:rtype: Figure object
"""
if "Other" in basic_stats_topo_freqs["TopologyID"].to_list():
fig = px.bar(
basic_stats_topo_freqs,
x='TopologyID',
y="Frequency",
color="TopologyID",
color_discrete_map=color_mapping,
text="Frequency",
)
fig.update_layout(
template=template,
uniformtext_minsize=12,
uniformtext_mode='hide',
)
fig.update_traces(textposition='outside')
return fig
else:
fig = px.pie(
basic_stats_topo_freqs,
values="Frequency",
names="TopologyID",
color="TopologyID",
color_discrete_map=color_mapping,
template=template,
title="Current View Topology Frequencies",
)
fig.update_layout(
legend=dict(itemclick=False, itemdoubleclick=False),
margin=dict(l=120, r=20, t=40, b=10),
uniformtext_minsize=12,
uniformtext_mode='hide',
title_x=0.5,
)
fig.update_traces(textposition='inside')
return fig
def whole_genome_datatable(tv_df):
valid_cols = _get_valid_cols(tv_df[4:])
for i in tv_df.columns.to_list()[4:]:
if i in valid_cols:
continue
else:
tv_df.drop(labels=i, axis=1, inplace=True)
df_group = tv_df.groupby(by="TopologyID")
out_df = pd.DataFrame(columns=["TopologyID", "Additional Data", "Num. Windows", "Average", "Std Dev"])
idx = 0
for topology, data in df_group:
additional_datatypes = [i for i in data.columns[4:]]
for datatype in additional_datatypes:
dt_data = data[datatype]
mean = dt_data.mean()
stdev = dt_data.std()
out_df.at[idx, "TopologyID"] = topology
out_df.at[idx, "Additional Data"] = datatype
out_df.at[idx, "Num. Windows"] = len(dt_data)
out_df.at[idx, "Average"] = mean
out_df.at[idx, "Std Dev"] = stdev
idx += 1
continue
columns = [{'id': c, 'name': ["Per-Topology Whole Genome Comparison", c], 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal)} for c in out_df.columns]
data = out_df.to_dict('records')
return data, columns
# --- post-hoc tests ---
def mann_whitney_posthoc(tv_df, additional_data_type, pval_adjustment):
return sp.posthoc_mannwhitney(tv_df, val_col=additional_data_type, group_col='TopologyID', p_adjust=pval_adjustment)
def dunns_test_posthoc(tv_df, additional_data_type, pval_adjustment):
return sp.posthoc_dunn(tv_df, val_col=additional_data_type, group_col='TopologyID', p_adjust=pval_adjustment)
def tukeyHSD_posthoc(tv_df, additional_data_type, pval_adjustment, alpha):
return sp.posthoc_tukey_hsd(tv_df[additional_data_type], tv_df["TopologyID"], alpha=alpha)
# --- Significance tests ---
def kruskal_wallis_H_test(tv_df, additional_data_type, posthoc_type, pval_adjustment, alpha):
"""Return dataframe with Kruskal-Wallis H test information for each topology
"""
d = [tv_df.loc[ids, additional_data_type].values for ids in tv_df.groupby('TopologyID').groups.values()]
H, p = ss.kruskal(*d, nan_policy='omit')
if posthoc_type == "Mann-Whitney rank test":
posthoc = mann_whitney_posthoc(tv_df, additional_data_type, pval_adjustment)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
if c1 == c2: # Remove self-self comparisons
continue
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
elif posthoc_type == "Dunn's test":
posthoc = dunns_test_posthoc(tv_df, additional_data_type, pval_adjustment)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
if c1 == c2: # Remove self-self comparisons
continue
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
elif posthoc_type == "TukeyHSD":
posthoc = tukeyHSD_posthoc(tv_df, additional_data_type, pval_adjustment, alpha)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
if c1 == c2: # Remove self-self comparisons
continue
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
else:
pass
return posthoc, data, columns, H, p
def one_way_anova(tv_df, additional_data_type, posthoc_type, pval_adjustment, alpha):
d = [tv_df.loc[ids, additional_data_type].values for ids in tv_df.groupby('TopologyID').groups.values()]
F, p = ss.f_oneway(*d)
if posthoc_type == "Mann-Whitney rank test":
posthoc = mann_whitney_posthoc(tv_df, additional_data_type, pval_adjustment)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
elif posthoc_type == "Dunn's test":
posthoc = dunns_test_posthoc(tv_df, additional_data_type, pval_adjustment)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
elif posthoc_type == "TukeyHSD":
posthoc = tukeyHSD_posthoc(tv_df, additional_data_type, pval_adjustment, alpha)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
else:
pass
return posthoc, data, columns, F, p
def stats_test_heatmap(posthoc, template):
fig = go.Figure(data=go.Heatmap(
z=posthoc.values,
x=posthoc.columns,
y=posthoc.index,
zmin=0,
zmax=1,
colorscale='Viridis',
colorbar=dict(title='p-value'),
hovertemplate = 'p-value: %{z}<extra></extra>',
))
fig.update_layout(
template=template,
coloraxis_colorbar=dict(title="log(p-value)"),
margin=dict(
t=60,
),
)
return fig
def frequency_distribution(data, name, template):
"""Return frequency density distribution"""
fig = px.histogram(data, x=name, histnorm='density')
fig.update_layout(template=template, margin=dict(t=20, pad=30))
return fig
def mean_frequency_of_alt_data_per_topology(tv_df, topologies, additional_data_type):
out_df = pd.DataFrame(columns=["TopologyID", "Total Windows", f"Mean ({additional_data_type})"])
idx = 1
for i in topologies:
topo_df = tv_df[tv_df["TopologyID"] == i]
additional_data_mean = topo_df[f"{additional_data_type}"].mean()
out_df.at[idx, "TopologyID"] = i
out_df.at[idx, "Total Windows"] = len(topo_df)
out_df.at[idx, f"Mean ({additional_data_type})"] = additional_data_mean
idx += 1
continue
return out_df.to_dict('records')
# ---------------------------------------------------------------------------------
# ------------------------- Graph Customization Functions -------------------------
def set_topology_colors(data, color):
df = pd.read_json(data)
# Set colors to current_topologies
sorted_topologies = df.assign(freq=df.groupby('TopologyID')['TopologyID'].transform('count')).sort_values(by=['freq','TopologyID'],ascending=[False,True]).loc[:,['TopologyID']]
unique_topos = sorted_topologies["TopologyID"].unique()
color_list = (color * ((len(unique_topos) // len(color))))+ color[:len(unique_topos) % len(color)]
output_dict = dict()
for s, c in zip(unique_topos, color_list):
output_dict[s] = c
return output_dict
def get_RFxpos(hoverdata, df):
hoverdata = hoverdata['points'][0]
if ('customdata' in hoverdata.keys()) or ('marker.color' in hoverdata.keys()):
return int(hoverdata['x'])
else:
return df.loc[hoverdata['binNumber']]['Window']
def get_Treexpos(hoverdata, df):
hoverdata = hoverdata['points'][0]
if ('customdata' in hoverdata.keys()) or ('marker.color' in hoverdata.keys()):
return int(hoverdata['x'])
else:
return int(hoverdata['x'])
# ---------------------------------------------------------------------------------
# ------------------------- Init + Empty Graph Functions --------------------------
def no_data_graph(template):
"""This function returns a blank figure with a "NO DATA" watermark"""
fig = go.Figure()
fig.update_layout(
template=template,
title='',
annotations=[
dict(
name="draft watermark",
text="NO DATA",
textangle=0,
opacity=0.5,
font=dict(color="white", size=50),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
return fig
def init_data_graph(template):
"""
This function returns a blank figure with a "NO DATA LOADED" watermark.
"""
fig = go.Figure()
fig.update_layout(
template=template,
annotations=[
dict(
name="draft watermark",
text="NO DATA LOADED",
textangle=0,
opacity=0.9,
font=dict(color="white", size=50),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(range=[0.2, 1], showgrid=False, visible=False, zeroline=False)
fig.update_yaxes(range=[0.2, 1], showgrid=False, visible=False, zeroline=False)
return fig
def init_stats_graph(template):
"""
This function returns a blank figure with a "NO DATA" watermark.
"""
fig = go.Figure()
fig.update_layout(
template=template,
annotations=[
dict(
name="draft watermark",
text="NO DATA",
textangle=0,
opacity=0.9,
font=dict(color="white", size=35),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(range=[0.2, 1], showgrid=False, visible=False, zeroline=False)
fig.update_yaxes(range=[0.2, 1], showgrid=False, visible=False, zeroline=False)
return fig
def loading_data_graph(template):
"""
This function returns a blank figure with a "NO DATA" watermark.
"""
fig = go.Figure()
fig.update_layout(
template=template,
annotations=[
dict(
name="draft watermark",
text="GATHERING DATA...",
textangle=0,
opacity=0.9,
font=dict(color="white", size=100),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
return fig
def init_RF_graph(template):
"""
This function returns a blank figure with a "NO DATA" watermark.
"""
fig = go.Figure()
fig.update_layout(
template=template,
annotations=[
dict(
name="draft watermark",
text="Hover Over Data to Activate",
textangle=0,
opacity=0.9,
font=dict(color="white", size=100),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
return fig
def no_tree_data(template, msg):
"""
This function returns a blank figure with a "NO DATA" watermark.
"""
fig = go.Figure()
fig.update_layout(
template=template,
annotations=[
dict(
name="draft watermark",
text=msg,
textangle=0,
opacity=0.9,
font=dict(size=25),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
return fig
def zoom_in_gff(template):
"""
This function returns a blank figure with a "NO DATA" watermark.
"""
fig = go.Figure()
fig.update_layout(
height=300,
template=template,
annotations=[
dict(
name="draft watermark",
text="Zoom in to minimum 5Mb to view",
textangle=0,
opacity=0.9,
font=dict(color="white", size=25),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
return fig
# ---------------------------------------------------------------------------------
# --------------------------- Input File Verification -----------------------------
def validate_chrom_lengths(chromDF, tvDF):
"""Ensure all chromosomes in chromDF are present in tvDF.
Chromosome length file can contain for chromosomes than TV file,
but not the other way around.
Return True if all are found, False if not."""
chrom_names = chromDF['Chromosome'].unique()
tv_chrom_names = tvDF['Chromosome'].unique()
missing_chromosomes = []
valid | |
thresholdWidget = qt.QWidget()
thresholdWidget.setLayout(form)
layout.addWidget(thresholdWidget)
layout.addStretch(1)
self.thresholdGroup = qt.QGroupBox('Threshold')
self.thresholdGroup.setLayout(layout)
return self.thresholdGroup
# Handle mask refresh on the plot
def _updatePlotMask(self):
"""Update mask image in plot"""
mask = self.getSelectionMask(copy=False)
if len(mask):
self.plot.addImage(mask, legend=self._maskName,
colormap=self._colormap,
origin=self._origin,
scale=self._scale,
z=self._z,
replace=False, resetzoom=False)
elif self.plot.getImage(self._maskName):
self.plot.remove(self._maskName, kind='image')
# track widget visibility and plot active image changes
def changeEvent(self, event):
"""Reset drawing action when disabling widget"""
if (event.type() == qt.QEvent.EnabledChange and
not self.isEnabled() and
not self.browseAction.isChecked()):
self.browseAction.trigger() # Disable drawing tool
def showEvent(self, event):
try:
self.plot.sigActiveImageChanged.disconnect(
self._activeImageChangedAfterCare)
except (RuntimeError, TypeError):
pass
self._activeImageChanged() # Init mask + enable/disable widget
self.plot.sigActiveImageChanged.connect(self._activeImageChanged)
def hideEvent(self, event):
self.plot.sigActiveImageChanged.disconnect(self._activeImageChanged)
if not self.browseAction.isChecked():
self.browseAction.trigger() # Disable drawing tool
if len(self.getSelectionMask(copy=False)):
self.plot.sigActiveImageChanged.connect(
self._activeImageChangedAfterCare)
def _activeImageChangedAfterCare(self, *args):
"""Check synchro of active image and mask when mask widget is hidden.
If active image has no more the same size as the mask, the mask is
removed, otherwise it is adjusted to origin, scale and z.
"""
activeImage = self.plot.getActiveImage()
if activeImage is None or activeImage.getLegend() == self._maskName:
# No active image or active image is the mask...
self.plot.sigActiveImageChanged.disconnect(
self._activeImageChangedAfterCare)
else:
colormap = activeImage.getColormap()
self._defaultOverlayColor = rgba(cursorColorForColormap(colormap['name']))
self._setMaskColors(self.levelSpinBox.value(),
self.transparencySlider.value() /
self.transparencySlider.maximum())
self._origin = activeImage.getOrigin()
self._scale = activeImage.getScale()
self._z = activeImage.getZValue() + 1
self._data = activeImage.getData(copy=False)
if self._data.shape != self.getSelectionMask(copy=False).shape:
# Image has not the same size, remove mask and stop listening
if self.plot.getImage(self._maskName):
self.plot.remove(self._maskName, kind='image')
self.plot.sigActiveImageChanged.disconnect(
self._activeImageChangedAfterCare)
else:
# Refresh in case origin, scale, z changed
self._updatePlotMask()
def _activeImageChanged(self, *args):
"""Update widget and mask according to active image changes"""
activeImage = self.plot.getActiveImage()
if activeImage is None or activeImage.getLegend() == self._maskName:
# No active image or active image is the mask...
self.setEnabled(False)
self._data = numpy.zeros((0, 0), dtype=numpy.uint8)
self._mask.reset()
self._mask.commit()
else: # There is an active image
self.setEnabled(True)
colormap = activeImage.getColormap()
self._defaultOverlayColor = rgba(cursorColorForColormap(colormap['name']))
self._setMaskColors(self.levelSpinBox.value(),
self.transparencySlider.value() /
self.transparencySlider.maximum())
self._origin = activeImage.getOrigin()
self._scale = activeImage.getScale()
self._z = activeImage.getZValue() + 1
self._data = activeImage.getData(copy=False)
if self._data.shape != self.getSelectionMask(copy=False).shape:
self._mask.reset(self._data.shape)
self._mask.commit()
else:
# Refresh in case origin, scale, z changed
self._updatePlotMask()
self._updateInteractiveMode()
# Handle whole mask operations
def load(self, filename):
"""Load a mask from an image file.
:param str filename: File name from which to load the mask
:raise Exception: An exception in case of failure
:raise RuntimeWarning: In case the mask was applied but with some
import changes to notice
"""
_, extension = os.path.splitext(filename)
extension = extension.lower()[1:]
if extension == "npy":
try:
mask = numpy.load(filename)
except IOError:
_logger.error("Can't load filename '%s'", filename)
_logger.debug("Backtrace", exc_info=True)
raise RuntimeError('File "%s" is not a numpy file.', filename)
elif extension == "edf":
try:
mask = EdfFile(filename, access='r').GetData(0)
except Exception as e:
_logger.error("Can't load filename %s", filename)
_logger.debug("Backtrace", exc_info=True)
raise e
elif extension == "msk":
if fabio is None:
raise ImportError("Fit2d mask files can't be read: Fabio module is not available")
try:
mask = fabio.open(filename).data
except Exception as e:
_logger.error("Can't load fit2d mask file")
_logger.debug("Backtrace", exc_info=True)
raise e
else:
msg = "Extension '%s' is not supported."
raise RuntimeError(msg % extension)
effectiveMaskShape = self.setSelectionMask(mask, copy=False)
if effectiveMaskShape is None:
return
if mask.shape != effectiveMaskShape:
msg = 'Mask was resized from %s to %s'
msg = msg % (str(mask.shape), str(effectiveMaskShape))
raise RuntimeWarning(msg)
def _loadMask(self):
"""Open load mask dialog"""
dialog = qt.QFileDialog(self)
dialog.setWindowTitle("Load Mask")
dialog.setModal(1)
filters = [
'EDF (*.edf)',
'TIFF (*.tif)',
'NumPy binary file (*.npy)',
# Fit2D mask is displayed anyway fabio is here or not
# to show to the user that the option exists
'Fit2D mask (*.msk)',
]
dialog.setNameFilters(filters)
dialog.setFileMode(qt.QFileDialog.ExistingFile)
dialog.setDirectory(self.maskFileDir)
if not dialog.exec_():
dialog.close()
return
filename = dialog.selectedFiles()[0]
dialog.close()
self.maskFileDir = os.path.dirname(filename)
try:
self.load(filename)
except RuntimeWarning as e:
message = e.args[0]
msg = qt.QMessageBox(self)
msg.setIcon(qt.QMessageBox.Warning)
msg.setText("Mask loaded but an operation was applied.\n" + message)
msg.exec_()
except Exception as e:
message = e.args[0]
msg = qt.QMessageBox(self)
msg.setIcon(qt.QMessageBox.Critical)
msg.setText("Cannot load mask from file. " + message)
msg.exec_()
def save(self, filename, kind):
"""Save current mask in a file
:param str filename: The file where to save to mask
:param str kind: The kind of file to save in 'edf', 'tif', 'npy'
:raise Exception: Raised if the process fails
"""
self._mask.save(filename, kind)
def _saveMask(self):
"""Open Save mask dialog"""
dialog = qt.QFileDialog(self)
dialog.setWindowTitle("Save Mask")
dialog.setModal(1)
filters = [
'EDF (*.edf)',
'TIFF (*.tif)',
'NumPy binary file (*.npy)',
# Fit2D mask is displayed anyway fabio is here or not
# to show to the user that the option exists
'Fit2D mask (*.msk)',
]
dialog.setNameFilters(filters)
dialog.setFileMode(qt.QFileDialog.AnyFile)
dialog.setAcceptMode(qt.QFileDialog.AcceptSave)
dialog.setDirectory(self.maskFileDir)
if not dialog.exec_():
dialog.close()
return
# convert filter name to extension name with the .
extension = dialog.selectedNameFilter().split()[-1][2:-1]
filename = dialog.selectedFiles()[0]
dialog.close()
if not filename.lower().endswith(extension):
filename += extension
if os.path.exists(filename):
try:
os.remove(filename)
except IOError:
msg = qt.QMessageBox(self)
msg.setIcon(qt.QMessageBox.Critical)
msg.setText("Cannot save.\n"
"Input Output Error: %s" % (sys.exc_info()[1]))
msg.exec_()
return
self.maskFileDir = os.path.dirname(filename)
try:
self.save(filename, extension[1:])
except Exception as e:
msg = qt.QMessageBox(self)
msg.setIcon(qt.QMessageBox.Critical)
msg.setText("Cannot save file %s\n%s" % (filename, e.args[0]))
msg.exec_()
def getCurrentMaskColor(self):
"""Returns the color of the current selected level.
:rtype: A tuple or a python array
"""
currentLevel = self.levelSpinBox.value()
if self._defaultColors[currentLevel]:
return self._defaultOverlayColor
else:
return self._overlayColors[currentLevel].tolist()
def _setMaskColors(self, level, alpha):
"""Set-up the mask colormap to highlight current mask level.
:param int level: The mask level to highlight
:param float alpha: Alpha level of mask in [0., 1.]
"""
assert 0 < level <= self._maxLevelNumber
colors = numpy.empty((self._maxLevelNumber + 1, 4), dtype=numpy.float32)
# Set color
colors[:, :3] = self._defaultOverlayColor[:3]
# check if some colors has been directly set by the user
mask = numpy.equal(self._defaultColors, False)
colors[mask, :3] = self._overlayColors[mask, :3]
# Set alpha
colors[:, -1] = alpha / 2.
# Set highlighted level color
colors[level, 3] = alpha
# Set no mask level
colors[0] = (0., 0., 0., 0.)
self._colormap['colors'] = colors
def resetMaskColors(self, level=None):
"""Reset the mask color at the given level to be defaultColors
:param level:
The index of the mask for which we want to reset the color.
If none we will reset color for all masks.
"""
if level is None:
self._defaultColors[level] = True
else:
self._defaultColors[:] = True
self._updateColors()
def setMaskColors(self, rgb, level=None):
"""Set the masks color
:param rgb: The rgb color
:param level:
The index of the mask for which we want to change the color.
If none set this color for all the masks
"""
if level is None:
self._overlayColors[:] = rgb
self._defaultColors[:] = False
else:
self._overlayColors[level] = rgb
self._defaultColors[level] = False
self._updateColors()
def getMaskColors(self):
"""masks colors getter"""
return self._overlayColors
def _updateColors(self, *args):
"""Rebuild mask colormap when selected level or transparency change"""
self._setMaskColors(self.levelSpinBox.value(),
self.transparencySlider.value() /
self.transparencySlider.maximum())
self._updatePlotMask()
self._updateInteractiveMode()
def _pencilWidthChanged(self, width):
old = self.pencilSpinBox.blockSignals(True)
try:
self.pencilSpinBox.setValue(width)
finally:
self.pencilSpinBox.blockSignals(old)
old = self.pencilSlider.blockSignals(True)
try:
self.pencilSlider.setValue(width)
finally:
self.pencilSlider.blockSignals(old)
self._updateInteractiveMode()
def _updateInteractiveMode(self):
"""Update the current mode to the same if some cached data have to be
updated. It is the case for the color for example.
"""
if self._drawingMode == 'rectangle':
self._activeRectMode()
elif self._drawingMode == 'polygon':
self._activePolygonMode()
elif self._drawingMode == 'pencil':
self._activePencilMode()
def _handleClearMask(self):
"""Handle clear button clicked: reset current level mask"""
self._mask.clear(self.levelSpinBox.value())
self._mask.commit()
def resetSelectionMask(self):
"""Reset the mask"""
self._mask.reset(shape=self._data.shape)
self._mask.commit()
def _handleInvertMask(self):
"""Invert the current mask level selection."""
self._mask.invert(self.levelSpinBox.value())
self._mask.commit()
# Handle drawing tools UI events
def _interactiveModeChanged(self, source):
"""Handle plot interactive mode changed:
If changed from elsewhere, disable drawing tool
"""
if source is not self:
# Do not trigger browseAction to avoid to call
# self.plot.setInteractiveMode
self.browseAction.setChecked(True)
self._releaseDrawingMode()
def _releaseDrawingMode(self):
"""Release the drawing mode if is was used"""
if self._drawingMode is None:
return
self.plot.sigPlotSignal.disconnect(self._plotDrawEvent)
self._drawingMode = None
def _activeBrowseMode(self):
"""Handle browse action mode triggered by user.
Set plot interactive mode only when
the user is triggering the browse action.
"""
self._releaseDrawingMode()
self.plot.setInteractiveMode('zoom', source=self)
self._updateDrawingModeWidgets()
def _activeRectMode(self):
"""Handle rect action mode triggering"""
self._releaseDrawingMode()
self._drawingMode = 'rectangle'
self.plot.sigPlotSignal.connect(self._plotDrawEvent)
color = self.getCurrentMaskColor()
self.plot.setInteractiveMode(
'draw', shape='rectangle', source=self, color=color)
self._updateDrawingModeWidgets()
def _activePolygonMode(self):
"""Handle polygon action mode triggering"""
self._releaseDrawingMode()
self._drawingMode = 'polygon'
self.plot.sigPlotSignal.connect(self._plotDrawEvent)
color = self.getCurrentMaskColor()
self.plot.setInteractiveMode('draw', shape='polygon', source=self, color=color)
self._updateDrawingModeWidgets()
def _activePencilMode(self):
"""Handle pencil action mode triggering"""
self._releaseDrawingMode()
self._drawingMode = 'pencil'
self.plot.sigPlotSignal.connect(self._plotDrawEvent)
color = self.getCurrentMaskColor()
width = self.pencilSpinBox.value()
self.plot.setInteractiveMode(
'draw', shape='pencil', source=self, color=color, | |
break
except NotFittedError as e:
continue
if random.random() < dieProb:
person.dead = True
deaths[person.classRank] += 1
person.house.occupants.remove(person)
if len(person.house.occupants) == 0:
self.map.occupiedHouses.remove(person.house)
if (self.p['interactiveGraphics']):
self.canvas.itemconfig(person.house.icon, state='hidden')
if person.partner != None:
person.partner.partner = None
if person.house == self.displayHouse:
messageString = str(self.year) + ": #" + str(person.id) + " died aged " + str(age) + "."
self.textUpdateList.append(messageString)
self.pop.livingPeople[:] = [x for x in self.pop.livingPeople if x.dead == False]
postDeath = len(self.pop.livingPeople)
# print('the number of people who died is: ' + str(preDeath - postDeath))
def relocateOrphans(self):
toRelocate = [list(h.occupants) for h in self.map.occupiedHouses if len([i for i in h.occupants if i.independentStatus == True]) == 0]
for household in toRelocate:
relocatingNetwork = self.orphansNetwork(household)
potentialHosts = list(relocatingNetwork.neighbors(household[0]))
if len(potentialHosts) > 0:
weights = []
hosts = []
for i in relocatingNetwork.neighbors(household[0]):
distance = relocatingNetwork[household[0]][i]['distance']
hosts.append(i)
weights.append(self.weightedIncome(i, distance))
probs = [x/sum(weights) for x in weights]
if len(hosts) < 1:
print 'Error in relocateOrphans: the list hosts is empty!'
host = np.random.choice(hosts, p = probs)
self.movePeopleIntoChosenHouse(host.house, household[0].house, household, 'relocateOrphans')
else:
# if household[0].partner == None:
adoptiveMothers = [x for x in self.pop.livingPeople if x.sex == 'female' and x.partner != None and x.independentStatus == True and self.householdIncome(x.house.occupants) > 0]
if len(adoptiveMothers) < 1:
print 'Error in relocateOrphans: the list adoptiveMothers is empty!'
adoptiveMother = random.choice(adoptiveMothers)
for person in household:
if person.status == 'child' or person.status == 'teenager':
person.mother = adoptiveMother
adoptiveMother.children.append(person)
person.father = adoptiveMother.partner
adoptiveMother.partner.children.append(person)
if adoptiveMother.house == self.displayHouse:
self.textUpdateList.append(str(self.year) + ": #" + str(person.id) +
" and brothers have been newly adopted by " + str(adoptiveMother.id) + "." )
self.movePeopleIntoChosenHouse(adoptiveMother.house, household[0].house, household, 'relocateOrphans')
def orphansNetwork(self, household):
H = nx.Graph()
households = []
firstNode = household[0]
households.append(household[0].house)
H.add_node(firstNode)
for member in household:
if member.father != None:
income = self.householdIncome(member.father.house.occupants)
if member.father.dead == False and member.father.house not in households and member.father.independentStatus == True and income > 0:
H.add_edge(firstNode, member.father, distance = 1)
households.append(member.father.house)
income = self.householdIncome(member.mother.house.occupants)
if member.mother.dead == False and member.mother.house not in households and member.father.independentStatus == True and income > 0:
H.add_edge(firstNode, member.mother, distance = 1)
households.append(member.mother.house)
# Grandparents
if member.father != None and member.father.father != None:
income = self.householdIncome(member.father.father.house.occupants)
if member.father.father.dead == False and member.father.father.house not in households and member.father.father.independentStatus == True and income > 0:
H.add_edge(firstNode, member.father.father, distance = 2)
households.append(member.father.father.house)
income = self.householdIncome(member.father.mother.house.occupants)
if member.father.mother.dead == False and member.father.mother.house not in households and member.father.mother.independentStatus == True and income > 0:
H.add_edge(firstNode, member.father.mother, distance = 2)
households.append(member.father.mother.house)
if member.father != None and member.mother.father != None:
income = self.householdIncome(member.mother.father.house.occupants)
if member.mother.father.dead == False and member.mother.father.house not in households and member.mother.father.independentStatus == True and income > 0:
H.add_edge(firstNode, member.mother.father, distance = 2)
households.append(member.mother.father.house)
income = self.householdIncome(member.mother.mother.house.occupants)
if member.mother.mother.dead == False and member.mother.mother.house not in households and member.mother.mother.independentStatus == True and income > 0:
H.add_edge(firstNode, member.mother.mother, distance = 2)
households.append(member.mother.mother.house)
# Indipendent children
for child in member.children:
income = self.householdIncome(child.house.occupants)
if child.dead == False and child.house not in households and child.independentStatus == True and income > 0:
H.add_edge(firstNode, child, distance = 1)
households.append(child.house)
# Independent grandchildren
for child in member.children:
for grandson in child.children:
income = self.householdIncome(grandson.house.occupants)
if grandson.dead == False and grandson.house not in households and grandson.independentStatus == True and income > 0:
H.add_edge(firstNode, grandson, distance = 2)
households.append(grandson.house)
# Indipendent brothers and sisters
if member.father != None:
brothers = list(set(member.father.children+member.mother.children))
brothers = [x for x in brothers if x.dead == False]
brothers.remove(member)
for brother in brothers:
income = self.householdIncome(brother.house.occupants)
if brother.dead == False and brother.house not in households and brother.independentStatus == True and income > 0:
H.add_edge(firstNode, brother, distance = 2)
households.append(brother.house)
for child in brother.children:
income = self.householdIncome(child.house.occupants)
if child.dead == False and child.house not in households and child.independentStatus == True and income > 0:
H.add_edge(firstNode, child, distance = 3)
households.append(child.house)
# Uncles and aunts
uncles = []
maternalUncles = []
paternalUncles = []
if member.father != None and member.father.father != None:
paternalUncles = list(set(member.father.father.children + member.father.mother.children))
paternalUncles.remove(member.father)
if member.father != None and member.mother.father != None:
maternalUncles = list(set(member.mother.father.children + member.mother.mother.children))
maternalUncles.remove(member.mother)
unclesList = list(set(maternalUncles+paternalUncles))
unclesList = [x for x in unclesList if x.dead == False]
for uncle in unclesList:
income = self.householdIncome(uncle.house.occupants)
if uncle.dead == False and uncle.house not in households and uncle.independentStatus == True and income > 0:
H.add_edge(firstNode, uncle, distance = 3)
households.append(uncle.house)
return H
def weightedIncome(self, person, distance):
sizeHousehold = float(len(person.house.occupants))
totalIncome = sum([x.income for x in person.house.occupants])
averageIncome = totalIncome/sizeHousehold
weightedIncome = averageIncome/math.exp(self.p['orphansRelocationParam']*(distance-1))
return weightedIncome
def doRegressions(self):
if self.year == self.p['implementPoliciesFromYear'] and self.p['noPolicySim'] == False:
for k in range(2):
for i in range(int(self.p['numberClasses'])):
for j in range(int(self.p['numCareLevels'])):
# print('cat: ' + str(k) + ' ' + str(i) + ' ' + str(j))
# print(len(self.inputsMortality[k][i][j]))
# print(len(self.outputMortality[k][i][j]))
# self.regressionModels_M[k][i][j] = LinearRegression()
if len(self.inputsMortality[k][i][j]) == 0:
print('Warning: RandomForestRegressor instance not fitted yet')
if len(self.inputsMortality[k][i][j]) > 0:
self.regressionModels_M[k][i][j].fit(self.inputsMortality[k][i][j], self.outputMortality[k][i][j])
# mr_predict = self.regressionModels_M[k][i][j].predict(self.inputsMortality[k][i][j])
# self.plotRegressions(self.outputMortality[k][i][j], mr_predict)
# print(self.regressionModels_M[k][i][j].score(self.inputsMortality[k][i][j], self.outputMortality[k][i][j]))
for i in range(int(self.p['numberClasses'])):
# self.regressionModels_F[i] = LinearRegression()
self.regressionModels_F[i].fit(self.inputsFertility[i], self.outputFertility[i])
def plotRegressions(self, mr, prediction):
plt.scatter(mr, prediction)
plt.show()
def doBirths(self):
preBirth = len(self.pop.livingPeople)
marriedLadies = 0
adultLadies = 0
births = [0, 0, 0, 0, 0]
marriedPercentage = []
notMarriedReproductiveWomen = [x for x in self.pop.livingPeople
if x.sex == 'female'
and x.age >= self.p['minPregnancyAge']
and x.age <= self.p['maxPregnancyAge']
and x.careNeedLevel < 3]
womenOfReproductiveAge = [x for x in self.pop.livingPeople
if x.sex == 'female'
and x.age >= self.p['minPregnancyAge']
and x.age <= self.p['maxPregnancyAge']
and x.partner != None and x.careNeedLevel < 3]
adultLadies_1 = [x for x in notMarriedReproductiveWomen if x.classRank == 0]
marriedLadies_1 = len([x for x in adultLadies_1 if x.partner != None])
if len(adultLadies_1) > 0:
marriedPercentage.append(marriedLadies_1/float(len(adultLadies_1)))
else:
marriedPercentage.append(0)
adultLadies_2 = [x for x in notMarriedReproductiveWomen if x.classRank == 1]
marriedLadies_2 = len([x for x in adultLadies_2 if x.partner != None])
if len(adultLadies_2) > 0:
marriedPercentage.append(marriedLadies_2/float(len(adultLadies_2)))
else:
marriedPercentage.append(0)
adultLadies_3 = [x for x in notMarriedReproductiveWomen if x.classRank == 2]
marriedLadies_3 = len([x for x in adultLadies_3 if x.partner != None])
if len(adultLadies_3) > 0:
marriedPercentage.append(marriedLadies_3/float(len(adultLadies_3)))
else:
marriedPercentage.append(0)
adultLadies_4 = [x for x in notMarriedReproductiveWomen if x.classRank == 3]
marriedLadies_4 = len([x for x in adultLadies_4 if x.partner != None])
if len(adultLadies_4) > 0:
marriedPercentage.append(marriedLadies_4/float(len(adultLadies_4)))
else:
marriedPercentage.append(0)
adultLadies_5 = [x for x in notMarriedReproductiveWomen if x.classRank == 4]
marriedLadies_5 = len([x for x in adultLadies_5 if x.partner != None])
if len(adultLadies_5) > 0:
marriedPercentage.append(marriedLadies_5/float(len(adultLadies_5)))
else:
marriedPercentage.append(0)
# print(marriedPercentage)
# for person in self.pop.livingPeople:
#
# if person.sex == 'female' and person.age >= self.p['minPregnancyAge']:
# adultLadies += 1
# if person.partner != None:
# marriedLadies += 1
# marriedPercentage = float(marriedLadies)/float(adultLadies)
for woman in womenOfReproductiveAge:
if self.year < 1951:
rawRate = self.p['growingPopBirthProb']
birthProb = self.computeBirthProb(self.p['fertilityBias'], rawRate, woman.classRank)
else:
rawRate = self.fert_data[(self.year - woman.birthdate)-16, self.year-1950]
birthProb = self.computeBirthProb(self.p['fertilityBias'], rawRate, woman.classRank)/marriedPercentage[woman.classRank]
# birthProb = self.computeBirthProb(self.p['fertilityBias'], rawRate, woman.classRank)
if self.p['noPolicySim'] == False:
if self.year < self.p['implementPoliciesFromYear'] and self.year >= self.p['regressionCollectFrom']:
age = woman.age-16
year = self.year-self.p['regressionCollectFrom']
regressors = [age, math.log(age), year, math.log(year+1)]
self.inputsFertility[woman.classRank].append(regressors)
dependentVariable = birthProb # [birthProb]
self.outputFertility[woman.classRank].append(dependentVariable)
elif self.year >= self.p['implementPoliciesFromYear']:
age = woman.age-16
year = self.year-self.p['regressionCollectFrom']
regressors = [age, math.log(age), year, math.log(year+1)]
r = woman.classRank
try:
birthProb = self.regressionModels_F[r].predict([regressors])
except NotFittedError as e:
for i in reversed(xrange(r)):
try:
birthProb = self.regressionModels_F[i].predict([regressors])
break
except NotFittedError as e:
continue
#baseRate = self.baseRate(self.socialClassShares, self.p['fertilityBias'], rawRate)
#fertilityCorrector = (self.socialClassShares[woman.classRank] - self.p['initialClassShares'][woman.classRank])/self.p['initialClassShares'][woman.classRank]
#baseRate *= 1/math.exp(self.p['fertilityCorrector']*fertilityCorrector)
#birthProb = baseRate*math.pow(self.p['fertilityBias'], woman.classRank)
if random.random() < birthProb: #*0.85
# (self, mother, father, age, birthYear, sex, status, house,
# classRank, sec, edu, wage, income, finalIncome):
sex = random.choice(['male', 'female'])
baby = Person(woman, woman.partner, 0, self.year, sex,
'child', woman.house, woman.classRank, woman.sec, None, 0, 0, 0, 0, 0, 0, 0, 0)
births[woman.classRank] += 1
self.pop.allPeople.append(baby)
self.pop.livingPeople.append(baby)
woman.house.occupants.append(baby)
woman.children.append(baby)
woman.partner.children.append(baby)
if woman.house == self.displayHouse:
messageString = | |
m.x688 + m.x695 + m.x1233 == 0)
m.c1676 = Constraint(expr= - m.x360 + m.x388 + m.x395 + m.x402 - m.x458 + m.x486 + m.x493 + m.x500 - m.x556 + m.x584
+ m.x591 + m.x598 - m.x654 + m.x682 + m.x689 + m.x696 + m.x1234 == 0)
m.c1677 = Constraint(expr= - m.x361 + m.x389 + m.x396 + m.x403 - m.x459 + m.x487 + m.x494 + m.x501 - m.x557 + m.x585
+ m.x592 + m.x599 - m.x655 + m.x683 + m.x690 + m.x697 + m.x1235 == 0)
m.c1678 = Constraint(expr= - m.x362 + m.x390 + m.x397 + m.x404 - m.x460 + m.x488 + m.x495 + m.x502 - m.x558 + m.x586
+ m.x593 + m.x600 - m.x656 + m.x684 + m.x691 + m.x698 + m.x1236 == 0)
m.c1679 = Constraint(expr= - m.x363 + m.x391 + m.x398 + m.x405 - m.x461 + m.x489 + m.x496 + m.x503 - m.x559 + m.x587
+ m.x594 + m.x601 - m.x657 + m.x685 + m.x692 + m.x699 + m.x1237 == 20)
m.c1680 = Constraint(expr= - m.x364 + m.x392 + m.x399 + m.x406 - m.x462 + m.x490 + m.x497 + m.x504 - m.x560 + m.x588
+ m.x595 + m.x602 - m.x658 + m.x686 + m.x693 + m.x700 + m.x1238 == 0)
m.c1681 = Constraint(expr= - m.x365 + m.x393 + m.x400 + m.x407 - m.x463 + m.x491 + m.x498 + m.x505 - m.x561 + m.x589
+ m.x596 + m.x603 - m.x659 + m.x687 + m.x694 + m.x701 + m.x1239 == 0)
m.c1682 = Constraint(expr= - m.x366 + m.x408 + m.x415 - m.x464 + m.x506 + m.x513 - m.x562 + m.x604 + m.x611 - m.x660
+ m.x702 + m.x709 + m.x1240 == 0)
m.c1683 = Constraint(expr= - m.x367 + m.x409 + m.x416 - m.x465 + m.x507 + m.x514 - m.x563 + m.x605 + m.x612 - m.x661
+ m.x703 + m.x710 + m.x1241 == 0)
m.c1684 = Constraint(expr= - m.x368 + m.x410 + m.x417 - m.x466 + m.x508 + m.x515 - m.x564 + m.x606 + m.x613 - m.x662
+ m.x704 + m.x711 + m.x1242 == 0)
m.c1685 = Constraint(expr= - m.x369 + m.x411 + m.x418 - m.x467 + m.x509 + m.x516 - m.x565 + m.x607 + m.x614 - m.x663
+ m.x705 + m.x712 + m.x1243 == 0)
m.c1686 = Constraint(expr= - m.x370 + m.x412 + m.x419 - m.x468 + m.x510 + m.x517 - m.x566 + m.x608 + m.x615 - m.x664
+ m.x706 + m.x713 + m.x1244 == 0)
m.c1687 = Constraint(expr= - m.x371 + m.x413 + m.x420 - m.x469 + m.x511 + m.x518 - m.x567 + m.x609 + m.x616 - m.x665
+ m.x707 + m.x714 + m.x1245 == 20)
m.c1688 = Constraint(expr= - m.x372 + m.x414 + m.x421 - m.x470 + m.x512 + m.x519 - m.x568 + m.x610 + m.x617 - m.x666
+ m.x708 + m.x715 + m.x1246 == 0)
m.c1689 = Constraint(expr= - m.x373 - m.x387 + m.x422 - m.x471 - m.x485 + m.x520 - m.x569 - m.x583 + m.x618 - m.x667
- m.x681 + m.x716 + m.x1247 == 0)
m.c1690 = Constraint(expr= - m.x374 - m.x388 + m.x423 - m.x472 - m.x486 + m.x521 - m.x570 - m.x584 + m.x619 - m.x668
- m.x682 + m.x717 + m.x1248 == 0)
m.c1691 = Constraint(expr= - m.x375 - m.x389 + m.x424 - m.x473 - m.x487 + m.x522 - m.x571 - m.x585 + m.x620 - m.x669
- m.x683 + m.x718 + m.x1249 == 0)
m.c1692 = Constraint(expr= - m.x376 - m.x390 + m.x425 - m.x474 - m.x488 + m.x523 - m.x572 - m.x586 + m.x621 - m.x670
- m.x684 + m.x719 + m.x1250 == 0)
m.c1693 = Constraint(expr= - m.x377 - m.x391 + m.x426 - m.x475 - m.x489 + m.x524 - m.x573 - m.x587 + m.x622 - m.x671
- m.x685 + m.x720 + m.x1251 == 0)
m.c1694 = Constraint(expr= - m.x378 - m.x392 + m.x427 - m.x476 - m.x490 + m.x525 - m.x574 - m.x588 + m.x623 - m.x672
- m.x686 + m.x721 + m.x1252 == 0)
m.c1695 = Constraint(expr= - m.x379 - m.x393 + m.x428 - m.x477 - m.x491 + m.x526 - m.x575 - m.x589 + m.x624 - m.x673
- m.x687 + m.x722 + m.x1253 == 30)
m.c1696 = Constraint(expr= - m.x380 - m.x394 - m.x408 + m.x429 + m.x436 - m.x478 - m.x492 - m.x506 + m.x527 + m.x534
- m.x576 - m.x590 - m.x604 + m.x625 + m.x632 - m.x674 - m.x688 - m.x702 + m.x723 + m.x730
+ m.x1254 == 0)
m.c1697 = Constraint(expr= - m.x381 - m.x395 - m.x409 + m.x430 + m.x437 - m.x479 - m.x493 - m.x507 + m.x528 + m.x535
- m.x577 - m.x591 - m.x605 + m.x626 + m.x633 - m.x675 - m.x689 - m.x703 + m.x724 + m.x731
+ m.x1255 == 0)
m.c1698 = Constraint(expr= - m.x382 - m.x396 - m.x410 + m.x431 + m.x438 - m.x480 - m.x494 - m.x508 + m.x529 + m.x536
- m.x578 - m.x592 - m.x606 + m.x627 + m.x634 - m.x676 - m.x690 - m.x704 + m.x725 + m.x732
+ m.x1256 == 0)
m.c1699 = Constraint(expr= - m.x383 - m.x397 - m.x411 + m.x432 + m.x439 - m.x481 - m.x495 - m.x509 + m.x530 + m.x537
- m.x579 - m.x593 - m.x607 + m.x628 + m.x635 - m.x677 - m.x691 - m.x705 + m.x726 + m.x733
+ m.x1257 == 0)
m.c1700 = Constraint(expr= - m.x384 - m.x398 - m.x412 + m.x433 + m.x440 - m.x482 - m.x496 - m.x510 + m.x531 + m.x538
- m.x580 - m.x594 - m.x608 + m.x629 + m.x636 - m.x678 - m.x692 - m.x706 + m.x727 + m.x734
+ m.x1258 == 50)
m.c1701 = Constraint(expr= - m.x385 - m.x399 - m.x413 + m.x434 + m.x441 - m.x483 - m.x497 - m.x511 + m.x532 + m.x539
- m.x581 - m.x595 - m.x609 + m.x630 + m.x637 - m.x679 - m.x693 - m.x707 + m.x728 + m.x735
+ m.x1259 == 0)
m.c1702 = Constraint(expr= - m.x386 - m.x400 - m.x414 + m.x435 + m.x442 - m.x484 - m.x498 - m.x512 + m.x533 + m.x540
- m.x582 - m.x596 - m.x610 + m.x631 + m.x638 - m.x680 - m.x694 - m.x708 + m.x729 + m.x736
+ m.x1260 == 0)
m.c1703 = Constraint(expr= - m.x401 - m.x415 + m.x443 - m.x499 - m.x513 + m.x541 - m.x597 - m.x611 + m.x639 - m.x695
- m.x709 + m.x737 + m.x1261 == 0)
m.c1704 = Constraint(expr= - m.x402 - m.x416 + m.x444 - m.x500 - m.x514 + m.x542 - m.x598 - m.x612 + m.x640 - m.x696
- m.x710 + m.x738 + m.x1262 == 0)
m.c1705 = Constraint(expr= - m.x403 - m.x417 + m.x445 - m.x501 - m.x515 + m.x543 - m.x599 - m.x613 + m.x641 - m.x697
- m.x711 + m.x739 + m.x1263 == 0)
m.c1706 = Constraint(expr= - m.x404 - m.x418 + m.x446 - m.x502 - m.x516 + m.x544 - m.x600 - m.x614 + m.x642 - m.x698
- m.x712 + m.x740 + m.x1264 == 0)
m.c1707 = Constraint(expr= - m.x405 - m.x419 + m.x447 - m.x503 - m.x517 + m.x545 - m.x601 - m.x615 + m.x643 - m.x699
- m.x713 + m.x741 + m.x1265 == 0)
m.c1708 = Constraint(expr= - m.x406 - m.x420 + m.x448 - m.x504 - m.x518 + m.x546 - m.x602 - m.x616 + m.x644 - m.x700
- m.x714 + m.x742 + m.x1266 == 30)
m.c1709 = Constraint(expr= - m.x407 - m.x421 + m.x449 - m.x505 - m.x519 + m.x547 - m.x603 - m.x617 + m.x645 - m.x701
- m.x715 + m.x743 + m.x1267 == 0)
m.c1710 = Constraint(expr= - m.x422 - m.x429 - m.x520 - m.x527 - m.x618 - m.x625 - m.x716 - m.x723 + m.x1268 == 0)
m.c1711 = Constraint(expr= - m.x423 - m.x430 - m.x521 - m.x528 - m.x619 - m.x626 - m.x717 - m.x724 + m.x1269 == 0)
m.c1712 = Constraint(expr= - m.x424 - m.x431 - m.x522 - m.x529 - m.x620 - m.x627 - m.x718 - m.x725 + m.x1270 == 0)
m.c1713 = Constraint(expr= - m.x425 - m.x432 - m.x523 - m.x530 - m.x621 - m.x628 - m.x719 - m.x726 + m.x1271 == 0)
m.c1714 = Constraint(expr= - m.x426 - | |
"""
Quantilization functions and related stuff
"""
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.common import (
is_integer,
is_scalar,
is_categorical_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
_ensure_int64)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
from pandas._libs.lib import infer_dtype
from pandas import (to_timedelta, to_datetime,
Categorical, Timestamp, Timedelta,
Series, Interval, IntervalIndex)
import numpy as np
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False):
"""
Return indices of half-open bins to which each value of `x` belongs.
Parameters
----------
x : array-like
Input array to be binned. It has to be 1-dimensional.
bins : int, sequence of scalars, or IntervalIndex
If `bins` is an int, it defines the number of equal-width bins in the
range of `x`. However, in this case, the range of `x` is extended
by .1% on each side to include the min or max values of `x`. If
`bins` is a sequence it defines the bin edges allowing for
non-uniform bin width. No extension of the range of `x` is done in
this case.
right : bool, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the bins or not. Can be useful if bins is given
as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
include_lowest : bool, optional
Whether the first interval should be left-inclusive or not.
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
The `cut` function can be useful for going from a continuous variable to
a categorical variable. For example, `cut` could convert ages to groups
of age ranges.
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Categorical object
Examples
--------
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.19, 3.367], (0.19, 3.367], (0.19, 3.367], (3.367, 6.533], ...
Categories (3, interval[float64]): [(0.19, 3.367] < (3.367, 6.533] ...
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]),
... 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, good, medium, bad, good]
Categories (3, object): [good < medium < bad]
>>> pd.cut(np.ones(5), 4, labels=False)
array([1, 1, 1, 1, 1])
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= .001 * abs(mn) if mn != 0 else .001
mx += .001 * abs(mx) if mx != 0 else .001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
pass
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(x, bins, labels=labels,
precision=precision, include_lowest=True,
dtype=dtype, duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def _bins_to_cuts(x, bins, right=True, labels=None,
precision=3, include_lowest=False,
dtype=None, duplicates='raise'):
if duplicates not in ['raise', 'drop']:
raise ValueError("invalid value for 'duplicates' parameter, "
"valid options are: raise, drop")
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(repr(bins)))
else:
bins = unique_bins
side = 'left' if right else 'right'
ids = _ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(bins, precision, right=right,
include_lowest=include_lowest,
dtype=dtype)
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
if len(x) > 1 and x[-1] == '.':
x = x[:-1]
return x
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to integer so that cut method can
handle it
"""
dtype = None
if is_timedelta64_dtype(x):
x = to_timedelta(x).view(np.int64)
dtype = np.timedelta64
elif is_datetime64_dtype(x):
x = to_datetime(x).view(np.int64)
dtype = np.datetime64
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-liek of bins
dtype : dtype of data
Raises
| |
"top-secret",
"roma",
"surplus",
"judo",
"say-",
"doctrine",
"accessible",
"crazed",
"audi",
"purposely",
"sleet",
"yugoslavia",
"ostrich",
"no-no",
"gunning",
"estelle",
"oversee",
"atrocities",
"landmark",
"holdings",
"generators",
"cheerio",
"unimaginable",
"stimulate",
"what`s",
"incriminating",
"jap",
"clangs",
"fanatic",
"surreal",
"sewage",
"dehydrated",
"acoustic",
"ante",
"afloat",
"kapoor",
"genevieve",
"charisma",
"sadistic",
"zeo",
"iconic",
"enormously",
"constructive",
"elk",
"exhaling",
"admissions",
"inexperienced",
"misjudged",
"mast",
"clubhouse",
"meade",
"illustrious",
"prepping",
"declaring",
"nucleus",
"whomever",
"councillor",
"curls",
"camden",
"cowl",
"townsend",
"a-ha",
"unsure",
"mag",
"colton",
"prentiss",
"costello",
"chul",
"henrietta",
"cures",
"marla",
"swanson",
"autographs",
"borderline",
"provocation",
"perverse",
"nutrients",
"occult",
"cadets",
"socket",
"shawl",
"mio",
"markham",
"framing",
"junkyard",
"cabot",
"twitch",
"influences",
"browning",
"jfk",
"upward",
"cooped",
"rooted",
"deserving",
"'li",
"goro",
"dost",
"burps",
"ludicrous",
"demonstrations",
"biography",
"cy",
"concludes",
"retreating",
"satin",
"bridesmaid",
"penicillin",
"murmur",
"martinis",
"deployment",
"pence",
"predecessor",
"feeny",
"canon",
"tian",
"underlying",
"avon",
"accessed",
"blowjob",
"ashton",
"transfusion",
"blockade",
"mischievous",
"doreen",
"jiffy",
"repressed",
"driscoll",
"kaplan",
"showered",
"biff",
"yarn",
"astonished",
"her-",
"catchy",
"titanium",
"maguire",
"deserter",
"dah",
"documentation",
"guineas",
"hotch",
"explored",
"bom",
"dunham",
"playoffs",
"faraday",
"feisty",
"explorers",
"w-we",
"blanca",
"imam",
"plainly",
"federico",
"theaters",
"inadequate",
"caddy",
"astounding",
"highlands",
"hustler",
"clans",
"paternity",
"washer",
"borne",
"gilles",
"disclosure",
"negotiated",
"attracting",
"andie",
"indigestion",
"spur",
"cabe",
"mayer",
"schnapps",
"hagen",
"renegade",
"barley",
"koji",
"spiked",
"complains",
"repairing",
"cheapest",
"foxes",
"barf",
"sox",
"magicians",
"inquest",
"humidity",
"featured",
"keiko",
"midwest",
"reptile",
"priscilla",
"mythical",
"paramount",
"bitching",
"unpaid",
"neatly",
"abbie",
"volvo",
"hippo",
"smacked",
"smokin",
"brigitte",
"dodger",
"fallin",
"rigby",
"shepherds",
"swamped",
"hailey",
"prepped",
"frivolous",
"copyright",
"twenty-two",
"witnessing",
"duly",
"glide",
"l`ll",
"enclosed",
"fai",
"nameless",
"rai",
"armando",
"electrons",
"constellation",
"howe",
"inhabited",
"sketchy",
"implore",
"malice",
"no-good",
"u0",
"agreements",
"johanna",
"deformed",
"billing",
"cornwall",
"crusher",
"strains",
"orchids",
"riker",
"outdated",
"reinforced",
"yamada",
"newark",
"trainers",
"janis",
"provocative",
"hemingway",
"ci",
"complimentary",
"bethlehem",
"suppressed",
"bordeaux",
"goodies",
"geeks",
"festivities",
"checkers",
"migration",
"brig",
"bloodstream",
"distraught",
"grasshopper",
"sir.",
"ashok",
"ah-ha",
"hardships",
"tickling",
"desolate",
"masturbation",
"fiasco",
"jem",
"rudi",
"kung-fu",
"sufficiently",
"hereditary",
"psychologically",
"orient",
"gullible",
"falsely",
"friar",
"tuvok",
"underwood",
"abed",
"full-on",
"17-year-old",
"gunpoint",
"jim-bob",
"jiro",
"luo",
"vern",
"realtor",
"attire",
"clientele",
"passages",
"dreaded",
"hatched",
"hardened",
"navid",
"innovative",
"kroner",
"patched",
"guidelines",
"3d",
"juices",
"lockup",
"delgado",
"slopes",
"conclusive",
"latex",
"chennai",
"rey",
"deathbed",
"collapses",
"schoolgirl",
"fuzz",
"imposing",
"settings",
"nationwide",
"schoolteacher",
"dealership",
"kei",
"hearings",
"negotiator",
"drills",
"brightly",
"dames",
"neutralize",
"rosario",
"ninjas",
"manicure",
"meditate",
"eavesdropping",
"viewer",
"mormon",
"wake-up",
"dipping",
"splits",
"muzzle",
"redneck",
"seatbelt",
"richter",
"atone",
"envoy",
"sentiments",
"poorer",
"tia",
"sweeps",
"forgives",
"upside-down",
"improper",
"niagara",
"highland",
"hardworking",
"enlist",
"wilcox",
"medically",
"chittering",
"pappy",
"scarred",
"claps",
"gia",
"outcast",
"faucet",
"prepares",
"taboo",
"weaponry",
"consensus",
"breathed",
"maneuvers",
"archaeologist",
"tolls",
"gobble",
"gibbering",
"rican",
"maud",
"darby",
"hernandez",
"smitty",
"rica",
"widespread",
"scraped",
"boutique",
"quake",
"ignite",
"provider",
"sprinkle",
"serbia",
"contradict",
"realities",
"valentina",
"shaping",
"powdered",
"indestructible",
"electrocuted",
"silverware",
"disorders",
"everytime",
"terence",
"albany",
"armory",
"delegates",
"pathology",
"inspires",
"poked",
"scumbags",
"cyclops",
"floated",
"intrigue",
"auditorium",
"kashmir",
"inspirational",
"doodle",
"raisins",
"sibling",
"emery",
"anomalies",
"forrester",
"honorary",
"concession",
"midway",
"reload",
"sweaters",
"dormitory",
"accelerated",
"fluke",
"mmm-mmm",
"larkin",
"dwarves",
"nutrition",
"kendra",
"warlock",
"exploiting",
"bloated",
"proclaim",
"middleton",
"choreography",
"patriotism",
"sash",
"detached",
"niki",
"fest",
"intensely",
"raced",
"1950s",
"sprint",
"endorsement",
"gage",
"carlisle",
"hobo",
"gauntlet",
"crowbar",
"progressing",
"groin",
"prudence",
"inseparable",
"chopsticks",
"guthrie",
"foo",
"cavanaugh",
"saxophone",
"fiat",
"drains",
"thrusters",
"tink",
"visibility",
"passionately",
"wannabe",
"klara",
"tiresome",
"complexity",
"homicidal",
"l.a.p.d.",
"succeeds",
"rom",
"scolding",
"indicted",
"prix",
"alexa",
"uninvited",
"insider",
"kamal",
"moz",
"bondage",
"chao",
"taser",
"indifference",
"tum",
"fscx100",
"slang",
"tending",
"compton",
"gunn",
"campaigns",
"resistant",
"baines",
"urged",
"reacts",
"hard-working",
"snag",
"fidelity",
"mcnally",
"rattled",
"stalls",
"quickest",
"copycat",
"wilde",
"damnation",
"burma",
"cashed",
"dodging",
"canopy",
"ridley",
"ilana",
"ogden",
"goof",
"twister",
"rolex",
"seriousness",
"toothless",
"reckoned",
"macdonald",
"retching",
"immensely",
"overlooking",
"airway",
"lm",
"rickshaw",
"esposito",
"propulsion",
"elsie",
"butchered",
"orbiting",
"herds",
"comprehensive",
"reckoning",
"shelton",
"israelis",
"thursdays",
"fabian",
"journeys",
"confiscate",
"coworkers",
"checkup",
"wad",
"aeroplane",
"innocents",
"endeavor",
"shootings",
"daybreak",
"mandate",
"just--i",
"estimates",
"inflict",
"drapes",
"bellamy",
"wolverine",
"socrates",
"subspace",
"notebooks",
"let`s",
"unrest",
"annika",
"son-of-a-bitch",
"hyuk",
"p.s.",
"shiro",
"lala",
"cram",
"salts",
"infinitely",
"exemplary",
"bearings",
"replicate",
"perfected",
"cao",
"olympus",
"baboon",
"reproach",
"completing",
"medusa",
"'lady",
"sewed",
"teased",
"karev",
"adversary",
"captions",
"doyou",
"stitched",
"abruptly",
"stacks",
"sioux",
"rapes",
"ether",
"hoodie",
"namaste",
"extradition",
"vincenzo",
"ange",
"cambodia",
"clicked",
"tallest",
"jeb",
"unison",
"ion",
"bozo",
"undertaking",
"relating",
"enrolled",
"sprouts",
"secretaries",
"projected",
"turkeys",
"irwin",
"fink",
"good-for-nothing",
"zedd",
"sipping",
"africans",
"macarthur",
"twenty-one",
"visionary",
"borden",
"talkative",
"alyssa",
"sie",
"kemp",
"jeffries",
"unkind",
"pegged",
"conservation",
"veto",
"blondes",
"heartfelt",
"outlook",
"wrinkled",
"valeria",
"teleport",
"interrogating",
"diabolical",
"breather",
"desks",
"thirty-five",
"eyebrow",
"tut",
"sightseeing",
"unwilling",
"profoundly",
"barclay",
"dearie",
"dae",
"nearing",
"hester",
"franks",
"rashid",
"auditioning",
"dyed",
"pharmaceuticals",
"cantonese",
"pika",
"martina",
"clarkson",
"toki",
"outright",
"ito",
"coincidences",
"downside",
"oats",
"ortiz",
"administered",
"downward",
"okinawa",
"graduates",
"palate",
"jihad",
"nakamura",
"emmet",
"lucrezia",
"martians",
"nasal",
"enduring",
"winged",
"thrashing",
"dumber",
"faggots",
"bernice",
"vibration",
"musketeers",
"tumour",
"stature",
"petrified",
"erich",
"ieast",
"hiss",
"baht",
"rematch",
"lakshmi",
"lightweight",
"rah",
"'neil",
"eunice",
"pharrell",
"attentive",
"dunbar",
"negatives",
"ge",
"go-",
"fates",
"equity",
"ringer",
"tvs",
"brooch",
"delinquent",
"schizophrenia",
"bangles",
"sculptor",
"netherlands",
"inca",
"ado",
"odessa",
"marisol",
"neurological",
"slums",
"comets",
"strife",
"coordinator",
"chokes",
"santana",
"guerrilla",
"charismatic",
"xev",
"garret",
"startling",
"historically",
"funnel",
"curator",
"improvements",
"campfire",
"castillo",
"himalayas",
"storeroom",
"jez",
"tulip",
"looting",
"pinball",
"decaf",
"marshmallow",
"guitars",
"sharpen",
"darla",
"refreshments",
"tensions",
"mckinley",
"hathaway",
"leaping",
"reconciliation",
"vou",
"greenwich",
"waterloo",
"isaiah",
"ofyou",
"idols",
"induce",
"cosmetic",
"grooming",
"dalek",
"rejecting",
"cortez",
"schooling",
"wasteland",
"blueprint",
"biz",
"smokey",
"nostalgic",
"lon",
"leprechaun",
"zulu",
"dinging",
"hoof",
"timetable",
"transmissions",
"barkley",
"analogy",
"stomping",
"judgmental",
"bagels",
"levine",
"meowing",
"wallets",
"nightgown",
"demolished",
"bobbie",
"lafayette",
"electron",
"cones",
"motherfuckin",
"calculation",
"dreamy",
"vigilant",
"asgard",
"suppliers",
"vogel",
"companionship",
"dangling",
"pic",
"rushes",
"paired",
"withholding",
"clones",
"burnett",
"bolivia",
"commitments",
"airs",
"tubbs",
"demented",
"apprehend",
"learner",
"minions",
"toro",
"scenarios",
"sandro",
"fda",
"overreacted",
"contradiction",
"gregg",
"vat",
"nia",
"bellies",
"mardi",
"horizons",
"rev",
"puny",
"nominee",
"insolence",
"wartime",
"fm",
"c.i.a.",
"shing",
"grapefruit",
"deodorant",
"concede",
"homosexuals",
"hierarchy",
"remnants",
"detectors",
"sever",
"hendricks",
"nachos",
"jens",
"munitions",
"gyu",
"trailing",
"rochelle",
"folds",
"attitudes",
"twain",
"develops",
"stomachs",
"emerges",
"chilli",
"hotline",
"dries",
"acceleration",
"gruber",
"surya",
"homicides",
"scrubbed",
"murph",
"seduction",
"trait",
"ezekiel",
"translating",
"chemotherapy",
"hazardous",
"taelon",
"qué",
"hackers",
"pancho",
"asteroids",
"defines",
"meatloaf",
"nephews",
"boobies",
"collide",
"mohan",
"iphone",
"flores",
"salaries",
"porky",
"coulda",
"sheltered",
"revoked",
"cinematography",
"variation",
"maia",
"railways",
"burglars",
"asa",
"milkshake",
"newbie",
"undertake",
"fergus",
"stellar",
"dud",
"abode",
"isles",
"elmer",
"tiniest",
"latitude",
"crucify",
"that.",
"salazar",
"clarice",
"sledge",
"shrewd",
"blurred",
"'is",
"spatter",
"menus",
"nostrils",
"filet",
"lexx",
"locally",
"consistently",
"u-turn",
"synchronized",
"tres",
"j.t.",
"bumblebee",
"mccall",
"thrashed",
"annabel",
"tutoring",
"ingenuity",
"scriptures",
"visualize",
"juggling",
"beige",
"oslo",
"hypnotized",
"enigma",
"lanterns",
"marseilles",
"memento",
"bedford",
"uphill",
"prosecuting",
"volumes",
"contraband",
"claudio",
"z.",
"debating",
"bagged",
"albanian",
"staten",
"cahill",
"ravine",
"protesters",
"writ",
"stalked",
"heroism",
"prolonged",
"benevolent",
"squashed",
"baloney",
"e-mailed",
"sweethearts",
"sweatshirt",
"nutcase",
"lecturing",
"developer",
"1970s",
"deity",
"quebec",
"unni",
"jailed",
"pokémon",
"attain",
"distributor",
"hype",
"shackles",
"one-eyed",
"portions",
"housework",
"ferret",
"mami",
"snort",
"kiev",
"ghostly",
"chandelier",
"dubious",
"mstoll",
"mammal",
"suspecting",
"bubbly",
"jacky",
"follower",
"starling",
"sup",
"albuquerque",
"smugglers",
"consuming",
"burp",
"jams",
"neighboring",
"mathias",
"explicit",
"titty",
"dual",
"anwar",
"plastered",
"gums",
"extends",
"win-win",
"bitchy",
"nicotine",
"edmond",
"alumni",
"boop",
"tremendously",
"madhouse",
"hearse",
"snickering",
"wizards",
"oaks",
"humankind",
"belfast",
"partisans",
"recollection",
"holdup",
"mantis",
"martyrs",
"micki",
"birthmark",
"baldy",
"supporter",
"fundamentally",
"exclude",
"traveller",
"ichi",
"vp",
"unravel",
"ratted",
"tucson",
"stabilized",
"banter",
"drilled",
"malloy",
"good.",
"red-handed",
"architects",
"taxis",
"consecutive",
"untouchable",
"trough",
"señorita",
"nisha",
"wither",
"pew",
"pesetas",
"accelerating",
"brides",
"inventing",
"embark",
"stalled",
"chameleon",
"spawn",
"selves",
"nicholson",
"taggart",
"quad",
"sellers",
"stevenson",
"walkie-talkie",
"fatter",
"tendencies",
"rodent",
"soaring",
"bolton",
"résumé",
"commits",
"measles",
"schwartz",
"slid",
"yanks",
"tyrone",
"kaylie",
"charities",
"vamos",
"benign",
"charmer",
"aligned",
"topped",
"gnome",
"wholesale",
"sur",
"didn`t",
"plunged",
"paddles",
"laddie",
"blitz",
"dole",
"barging",
"mildly",
"reflecting",
"tended",
"staked",
"cylons",
"boldly",
"vocation",
"na-na",
"leung",
"memoirs",
"infidelity",
"nugget",
"cecile",
"operators",
"aki",
"swells",
"securities",
"rejects",
"dialysis",
"lineage",
"notions",
"wharf",
"'argo",
"comedians",
"polygraph",
"recipient",
"atta",
"beetles",
"nests",
"puttin",
"recommendations",
"adopting",
"complication",
"hemorrhage",
"annalise",
"reconstruct",
"anesthetic",
"consort",
"revived",
"mats",
"chaz",
"owing",
"unorthodox",
"slop",
"meticulous",
"seagull",
"damnit",
"forthcoming",
"dion",
"carnegie",
"cobbler",
"quark",
"freezes",
"simpsons",
"honours",
"manually",
"resigning",
"polishing",
"vibes",
"attila",
"cranberry",
"gigolo",
"persist",
"puffy",
"swiped",
"fir",
"hawke",
"startle",
"wingman",
"submarines",
"daffy",
"erect",
"mutually",
"roadside",
"prodigy",
"horton",
"kerosene",
"editors",
"governess",
"integrated",
"canine",
"reformed",
"lifeline",
"lockers",
"hors",
"mondays",
"world-class",
"brighten",
"chavez",
"achieving",
"scottie",
"optical",
"-i",
"lug",
"lag",
"spirited",
"14-year-old",
"hark",
"hopelessly",
"bouncy",
"respecting",
"bueno",
"beirut",
"mounting",
"hurried",
"kimchi",
"jong",
"moretti",
"slaughterhouse",
"davina",
"shilling",
"tarts",
"overslept",
"grinning",
"collars",
"yuen",
"kneeling",
"billings",
"selfishness",
"unloading",
"mets",
"sanderson",
"straps",
"eviction",
"rog",
"eliminating",
"bae",
"manhunt",
"whirlwind",
"dodo",
"munna",
"lobsters",
"riviera",
"millimeter",
"jimi",
"fished",
"wield",
"detachment",
"mps",
"legions",
"hasan",
"hooper",
"crabtree",
"fragrant",
"marek",
"glands",
"palaces",
"delaware",
"hurley",
"faction",
"ive",
"rooney",
"nominees",
"sleepwalking",
"hairstyle",
"cryin",
"pastries",
"eloise",
"casts",
"tijuana",
"marple",
"cabs",
"synchro",
"spotless",
"sobriety",
"beka",
"gulp",
"intensifies",
"theres",
"heirloom",
"merrily",
"unified",
"fixes",
"briefs",
"aesthetic",
"swarming",
"campers",
"directory",
| |
card.controller = card.owner
else:
remoteCall(card.controller, 'passControl', [card, card.owner])
else: #skip detachment since the card is not involved in attachments
return ""
setGlobalVariable('cattach', str(cattach))
return text
def autopersist(card):
if card.group.name == "Graveyard":
stackData = autoCast(card)
resolveData = autoResolve(card)
etbData = autoTrigger(card, 'etb', cost = resolveData['cost'], x = resolveData['x'])
card.markers[counters['m1m1']] += 1
return ", persisting"
else:
return ""
def autoundying(card):
if card.group.name == "Graveyard":
stackData = autoCast(card)
resolveData = autoResolve(card)
etbData = autoTrigger(card, 'etb', cost = resolveData['cost'], x = resolveData['x'])
card.markers[counters['p1p1']] += 1
return ", undying"
else:
return ""
def automoveto(card, pile):
n = rnd(0, len(card.owner.Library)) #we need to delay scripts here, might as well find n for shuffle
try:
pos = int(pile)
card.moveTo(card.owner.Library, pos)
if pos == 0:
text = "top of Library"
else:
text = "{} from top of Library".format(pos)
except:
if re.search(r'bottom', pile):
card.moveToBottom(card.owner.Library)
text = "bottom of library"
elif re.search(r'shuffle', pile):
card.moveTo(card.owner.Library, n)
shuffle(card.owner.Library, silence = True)
text = "shuffled into Library"
elif re.search(r'exile', pile):
stackData = autoTrigger(card, 'exile')
if stackData == "BREAK":
return ''
card.moveTo(card.owner.piles['Exiled Zone'])
text = "exile" + stackData['text']
elif re.search(r'hand', pile):
stackData = autoTrigger(card, 'hand')
if stackData == "BREAK":
return ''
card.moveTo(card.owner.hand)
text = "hand" + stackData['text']
elif re.search(r'graveyard', pile):
stackData = autoTrigger(card, 'destroy')
if stackData == "BREAK":
return ''
card.moveTo(card.owner.Graveyard)
text = "graveyard" + stackData['text']
elif re.search(r'stack', pile):
stackData = autoCast(card)
if stackData == "BREAK":
return ''
text = "stack" + stackData['text']
elif re.search(r'table', pile):
text = 'battlefield'
alternate = card.alternate
stackData = autoCast(card)
if stackData == "BREAK":
return ''
text += stackData['text']
card.alternate = alternate
resolveData = autoResolve(card)
if resolveData == "BREAK":
return ''
text += resolveData['text']
etbData = autoTrigger(card, 'etb', cost = resolveData['cost'], x = resolveData['x'])
return ", moving to {}".format(text)
def autocounter(card, stackData, tag):
name = tag[0]
if name not in me.counters:
return
if len(tag) > 1:
qty = tag[1]
else:
qty = 1
quantity = cardcount(card, stackData, qty)
if quantity == 0: return ""
me.counters[name].value += quantity
return ", {} {}".format(quantity, name)
def autotransform(card, tag):
if tag == "no":
return ""
if tag == "ask":
if not confirm("Transform {}?".format(card.Name)):
return ""
if 'transform' in card.alternates:
if card.alternate == '':
card.alternate = 'transform'
else:
card.alternate = ''
elif 'meld' in card.alternates:
if card.alternate == '':
card.alternate = 'meld'
else:
card.alternate = ''
elif 'modal_dfc' in card.alternates:
if card.alternate == '':
card.alternate = 'modal_dfc'
else:
card.alternate = ''
else:
whisper("Oops, transform cards aren't ready yet!")
return ", transforming to {}".format(card)
def autotoken(card, stackData, tag):
name = tag[0]
if len(tag) > 1:
qty = tag[1]
else:
qty = 1
if len(tag) > 2: #since the modifiers are optional
modifiers = tag[2:]
else:
modifiers = []
quantity = cardcount(card, stackData, qty)
if quantity > 0:
for x in range(0, quantity):
token = tokenArtSelector(name)
for modtag in modifiers:
if modtag == 'attack':
token.highlight = AttackColor
elif modtag == 'tap':
token.orientation = Rot90
elif re.search(r'marker', modtag):
(marker, type, qty) = modtag.split('_', 2)
token.markers[counters[type]] += cardcount(token, stackData, qty)
elif modtag == 'attach':
autoattach(card, token)
return ", creating {} {}/{} {} {} token{}".format(quantity, token.Power, token.Toughness, token.Color, token.name, "" if quantity == 1 else "s")
else:
return ""
def automarker(card, stackData, tag):
markername = tag[0]
if len(tag) > 1:
qty = tag[1]
else:
qty = 1
quantity = cardcount(card, stackData, qty)
originalquantity = int(str(quantity))
if markername not in counters: ## make sure the marker exists in the dict
addmarker = (markername, "d9eb829e-55ad-4376-b109-884b0dad3d4b")
else:
addmarker = counters[markername]
while markername == "p1p1" and counters["m1m1"] in card.markers and quantity > 0:
card.markers[counters["m1m1"]] -= 1
quantity -= 1
while markername == "m1m1" and counters["p1p1"] in card.markers and quantity > 0:
card.markers[counters["p1p1"]] -= 1
quantity -= 1
card.markers[addmarker] += quantity
if originalquantity > 0:
sign = "+"
else:
sign = ""
return ", {}{} {}{}".format(sign, originalquantity, addmarker[0], "" if quantity == 1 else "s")
def autohighlight(card, color):
if color == "nountap":
if card.highlight == AttackColor:
card.highlight = AttackDoesntUntapColor
elif card.highlight == BlockColor:
card.highlight = BlockDoesntUntapColor
else:
card.highlight = DoesntUntapColor
text = "does not untap"
elif color == "attack":
if card.highlight == DoesntUntapColor:
card.highlight = AttackDoesntUntapColor
else:
card.highlight = AttackColor
text = "attacking"
elif color == "block":
if card.highlight == DoesntUntapColor:
card.highlight = BlockDoesntUntapColor
else:
card.highlight = BlockColor
text = "blocking"
else:
text = ""
return ", {}".format(text)
def autotapped(card):
card.orientation = Rot90
return ", tapped"
def autountapped(card):
card.orientation = Rot0
return ", untapped"
def autosmartmarker(card, marker):
if marker in counters:
setGlobalVariable("smartmarker", marker)
notify("{} sets the Smart Counter to {}.".format(me, counters[marker][0]))
return ""
############################
# Alignment Stuff
############################
def attach(card, x = 0, y = 0):
mute()
if autoscriptCheck():
target = [cards for cards in table if cards.targetedBy]
if len(target) == 0 or (len(target) == 1 and card in target):
text = autodetach(card)
elif len(target) == 1:
text = autoattach(card, target[0])
else:
whisper("Incorrect targets, select up to 1 target.")
return
if text == "":
return
notify("{} {}.".format(me, text))
cardalign()
else:
whisper("Autoscripts must be enabled to use this feature")
def align(group, x = 0, y = 0): ## This is the menu groupaction for aligning ALL your cards.
mute()
global alignIgnore
alignIgnore = [] ## forces all cards to realign, even ones previously anchored to the board
if cardalign(force = True) != "BREAK": ## Don't show the notify message if cardalign fails
notify("{} re-aligns his cards on the table".format(me))
def alignCard(cards, x = 0, y = 0): ##This is the menu cardaction for reactivating alignment on a card
mute()
if not alignCheck():
whisper("Cannot align card: You must enable auto-alignment.")
return
global alignIgnore
for card in cards:
if card in alignIgnore:
alignIgnore.remove(card)
cardalign()
def isStack(card): ## Checks to see if the card is on the stack
mute()
for marker in scriptMarkers.values():
if marker in card.markers:
return True
return False
def playerSide(): ## Initializes the player's top/bottom side of table variables
mute()
global playerside
if playerside == None: ## script skips this if playerside has already been determined
if Table.isTwoSided():
if me.isInverted:
playerside = -1 # inverted (negative) side of the table
else:
playerside = 1
else: ## If two-sided table is disabled, assume the player is on the normal side.
playerside = 1
return playerside
def sideFlip(): ## Initializes the player's left/right side of table variables
mute()
global sideflip
if sideflip == None: ##Initialize sideflip
playersort = sorted(getPlayers(), key=lambda player: player._id) ##makes a sorted players list so its consistent between all players
playercount = [p for p in playersort if me.isInverted == p.isInverted] ##counts the number of players on your side of the table
if len(playercount) > 2: ##since alignment only works with a maximum of two players on each side
whisper("Cannot set sideflip: Too many players on your side of the table.")
sideflip = 0 ##disables alignment for the rest of the play session
elif playercount[0] == me: ##if you're the 'first' player on this side, you go on the positive (right) side
sideflip = 1
else:
sideflip = -1
return sideflip
def cardalign(force = False):
mute()
timer = time.clock()
if sideFlip() == 0: ## the 'disabled' state for alignment so the alignment positioning doesn't have to process each time
return
if not Table.isTwoSided(): ## the case where two-sided table is disabled
whisper("Cannot align: Two-sided table is required for card alignment.")
global sideflip
sideflip = 0 ## disables alignment for the rest of the play session
return
alignQueue = {}
## align the stack
stackcount = 0
lastCard = None ## Contains the last card to be aligned, so remoteCall knows which card to tuck the aligned cards behind
for card in table:
if isStack(card): ## check to see if the card is on the stack
if card.controller == me: ## It's safe to move your own cards on the stack
card.moveToTable(0, 10 * stackcount)
card.index = stackcount
else: ## you must send a remoteCall to the other players to align their stack cards
position = (card._id, 0, 10 * stackcount, lastCard)
controller = card.controller
if controller not in alignQueue:
alignQueue[controller] = []
alignQueue[controller].append(position)
lastCard = | |
2D equal area projection."""
Y = y/(1+x)
Z = z/(1+x)
return Y,Z
def plot_Lune(MTs, MTp, six_MT_max_prob=[], frac_to_sample=0.1, figure_filename=[], plot_max_prob_on_Lune=False):
"""Function to plot Lune plot for certain inversions (if Lune plot is relevent, i.e. not DC constrained or single-force constrained).
Will plot sampled MT solutions on Lune, binned. Will also fit gaussian to this and return the maximum location of the gaussian and the contour coordinates. Also outputs saved figure."""
# Get sample of MT solutions for fitting Gaussian to:
MTs_sample, MTp_sample = get_frac_of_MTs_using_MT_probs(MTs, MTp, frac_to_sample, return_MTp_samples_switch=True)
# Get bin values for delta-gamma space (for plotting Lune):
bin_value_labels_delta, bin_value_labels_gamma, bins_delta_gamma, max_prob_bins_delta_gamma, num_samples_in_bins_delta_gamma = get_binned_MT_solutions_by_delta_gamma_dict(MTs_sample, MTp_sample)
# And plot:
print("Plotting Lune with fitted Gaussian")
# Set up figure:
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
# Plot major gridlines:
for phi in [-np.pi/6., np.pi/6.]:
theta_range = np.linspace(0.0,np.pi,180)
phi_range = np.ones(len(theta_range))*phi
r_range = np.ones(len(theta_range))
# And convert to 2D projection:
x,y,z = convert_spherical_coords_to_cartesian_coords(r_range,theta_range,phi_range)
Y_range,Z_range = equal_angle_stereographic_projection_conv_YZ_plane(x,y,z)
ax.plot(Y_range,Z_range, color="black")
# Plot horizontal minor grid lines:
minor_horiz_interval = np.pi/12.
for theta in np.arange(0.+minor_horiz_interval, np.pi+minor_horiz_interval, minor_horiz_interval):
phi_range = np.linspace(-np.pi/6,np.pi/6,90)
theta_range = np.ones(len(phi_range))*theta
r_range = np.ones(len(theta_range))
# And convert to 2D projection:
x,y,z = convert_spherical_coords_to_cartesian_coords(r_range,theta_range,phi_range)
Y_range,Z_range = equal_angle_stereographic_projection_conv_YZ_plane(x,y,z)
ax.plot(Y_range,Z_range, color="black", linestyle="--", alpha=0.5)
# Plot vertical minor gridlines:
minor_vert_interval = np.pi/24.
for phi in np.arange(-np.pi/6+minor_vert_interval, np.pi/6, minor_vert_interval):
theta_range = np.linspace(0.0,np.pi,180)
phi_range = np.ones(len(theta_range))*phi
r_range = np.ones(len(theta_range))
# And convert to 2D projection:
x,y,z = convert_spherical_coords_to_cartesian_coords(r_range,theta_range,phi_range)
Y_range,Z_range = equal_angle_stereographic_projection_conv_YZ_plane(x,y,z)
ax.plot(Y_range,Z_range, color="black", linestyle="--", alpha=0.5)
# And plot binned data, colored by bin value:
# Flatten data with respect to biased sampling due to flat distribution in spherical space rather than Lune space:
# bins_delta_gamma = bins_delta_gamma/num_samples_in_bins_delta_gamma
# Normallise data:
if plot_max_prob_on_Lune:
bins_delta_gamma_normallised = max_prob_bins_delta_gamma/np.max(max_prob_bins_delta_gamma)
# Remove zero values:
bins_delta_gamma_normallised[bins_delta_gamma_normallised==0.] = np.nan
# bins_delta_gamma_normallised = (bins_delta_gamma_normallised-np.min(np.isfinite(bins_delta_gamma_normallised)))/(np.max(np.isfinite(bins_delta_gamma_normallised)) - np.min(np.isfinite(bins_delta_gamma_normallised)))
else:
bins_delta_gamma_normallised = bins_delta_gamma/np.max(bins_delta_gamma) # Normalise data
# Loop over binned data points:
Y_all = []
Z_all = []
c_all = []
for i in range(len(bin_value_labels_delta)):
for j in range(len(bin_value_labels_gamma)):
delta = bin_value_labels_delta[i]
gamma = bin_value_labels_gamma[j]
# And plot data coord (if bin greater than 0):
if bins_delta_gamma_normallised[i,j]>0.:
x,y,z = convert_spherical_coords_to_cartesian_coords(1.,(np.pi/2.) - delta,gamma)
Y,Z = equal_angle_stereographic_projection_conv_YZ_plane(x,y,z)
# ax.scatter(Y,Z, color = matplotlib.cm.inferno(int(bins_delta_gamma_normallised[i,j]*256)), alpha=0.6,s=50)
Y_all.append(Y)
Z_all.append(Z)
c_all.append(bins_delta_gamma_normallised[i,j])
ax.scatter(Y_all,Z_all, c=c_all, cmap="inferno", alpha=0.6,s=50)
# # Plot maximum location and associated contours associated with Guassian fit:
# # Plot maximum location:
# delta = max_bin_delta_gamma_values[0]
# gamma = max_bin_delta_gamma_values[1]
# x,y,z = convert_spherical_coords_to_cartesian_coords(1.,(np.pi/2.) - delta,gamma)
# Y,Z = equal_angle_stereographic_projection_conv_YZ_plane(x,y,z)
# ax.scatter(Y,Z, color = "green", alpha=1.0,s=50, marker="X")
# # And plot 1 stdev contour:
# contour_bin_delta_values_sorted = []
# contour_bin_gamma_values_sorted = []
# for i in range(len(contour_bin_delta_gamma_values_sorted)):
# contour_bin_delta_values_sorted.append(contour_bin_delta_gamma_values_sorted[i][0])
# contour_bin_gamma_values_sorted.append(contour_bin_delta_gamma_values_sorted[i][1])
# delta = np.array(contour_bin_delta_values_sorted)
# gamma = np.array(contour_bin_gamma_values_sorted)
# x,y,z = convert_spherical_coords_to_cartesian_coords(1.,(np.pi/2.) - delta,gamma)
# Y,Z = equal_angle_stereographic_projection_conv_YZ_plane(x,y,z)
# ax.plot(Y,Z, color = "green", alpha=0.5)
# Plot location of maximum probability single MT solution (passed as argument):
if len(six_MT_max_prob)>0:
delta, gamma = find_delta_gamm_values_from_sixMT(six_MT_max_prob)
# And plot data coord:
x,y,z = convert_spherical_coords_to_cartesian_coords(1.,(np.pi/2.) - delta,gamma)
Y,Z = equal_angle_stereographic_projection_conv_YZ_plane(x,y,z)
ax.scatter(Y,Z, c="gold", alpha=0.8,s=250, marker="*")
# And Finish plot:
# Plot labels for various defined locations (locations from Tape and Tape 2012, table 1):
plt.scatter(0.,1.,s=50,color="black")
plt.text(0.,1.,"Explosion", fontsize=12, horizontalalignment="center", verticalalignment='bottom')
plt.scatter(0.,-1.,s=50,color="black")
plt.text(0.,-1.,"Implosion", fontsize=12, horizontalalignment="center", verticalalignment='top')
x,y,z = convert_spherical_coords_to_cartesian_coords(1.,(np.pi/2.) - np.arcsin(5/np.sqrt(33)),-np.pi/6.)
Y,Z = equal_angle_stereographic_projection_conv_YZ_plane(x,y,z)
plt.scatter(Y,Z,s=50,color="red")
plt.text(Y,Z,"TC$^+$",color="red", fontsize=12, horizontalalignment="right", verticalalignment='bottom')
x,y,z = convert_spherical_coords_to_cartesian_coords(1.,(np.pi/2.) + np.arcsin(5/np.sqrt(33)),np.pi/6.)
Y,Z = equal_angle_stereographic_projection_conv_YZ_plane(x,y,z)
plt.scatter(Y,Z,s=50,color="red")
plt.text(Y,Z,"TC$^-$",color="red", fontsize=12, horizontalalignment="left", verticalalignment='top')
plt.scatter(0.,0.,s=50,color="red")
plt.text(0.,0.,"DC",color="red", fontsize=12, horizontalalignment="center", verticalalignment='top')
# Various tidying:
ax.set_xlim(-1.,1.)
ax.set_ylim(-1.,1.)
plt.axis('off')
# And save figure if given figure filename:
if not len(figure_filename) == 0:
plt.savefig(figure_filename, dpi=600)
else:
plt.show()
# # And return MT data at maximum (and mts within contour?!):
# # Get all solutions associated with bins inside contour on Lune plot:
# gamma_delta_binned_MT_store = get_binned_MT_solutions_by_delta_gamma_dict(MTs_sample) # Returns dictionary of all MTs binned by gamma, delta value
# # And get all values associated with gaussian maximum on Lune plot:
# max_bin_delta_gamma_indices = np.where(bins_delta_gamma_gau_fitted==np.max(bins_delta_gamma_gau_fitted))
# max_bin_delta_gamma_values = [bin_value_labels_delta[max_bin_delta_gamma_indices[0][0]], bin_value_labels_gamma[max_bin_delta_gamma_indices[1][0]]]
# delta = max_bin_delta_gamma_values[0]
# gamma = max_bin_delta_gamma_values[1]
# MTs_max_gau_loc = gamma_delta_binned_MT_store["delta="+str(delta)]["gamma="+str(gamma)]["MTs"] # MT solutions associated with gaussian maximum (note: may be different to maximum value due to max value being fit rather than real value)
#
# return MTs_max_gau_loc
def sort_wfs_components_current_station(wfs_component_labels_current_station, real_wfs_current_station, synth_wfs_current_station):
"""Function to sort current waveform components."""
wfs_component_labels_current_station_sorted = list(wfs_component_labels_current_station)
wfs_component_labels_current_station_sorted.sort()
if wfs_component_labels_current_station_sorted == ['R','T','Z']:
real_wfs_current_station_unsorted = list(real_wfs_current_station)
synth_wfs_current_station_unsorted = list(synth_wfs_current_station)
idx_tmp = wfs_component_labels_current_station.index("R")
real_wfs_current_station[0] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[0] = synth_wfs_current_station_unsorted[idx_tmp]
idx_tmp = wfs_component_labels_current_station.index("T")
real_wfs_current_station[1] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[1] = synth_wfs_current_station_unsorted[idx_tmp]
idx_tmp = wfs_component_labels_current_station.index("Z")
real_wfs_current_station[2] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[2] = synth_wfs_current_station_unsorted[idx_tmp]
wfs_component_labels_current_station = wfs_component_labels_current_station_sorted
elif wfs_component_labels_current_station_sorted == ['L','Q','T']:
real_wfs_current_station_unsorted = list(real_wfs_current_station)
synth_wfs_current_station_unsorted = list(synth_wfs_current_station)
idx_tmp = wfs_component_labels_current_station.index("L")
real_wfs_current_station[0] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[0] = synth_wfs_current_station_unsorted[idx_tmp]
idx_tmp = wfs_component_labels_current_station.index("Q")
real_wfs_current_station[1] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[1] = synth_wfs_current_station_unsorted[idx_tmp]
idx_tmp = wfs_component_labels_current_station.index("T")
real_wfs_current_station[2] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[2] = synth_wfs_current_station_unsorted[idx_tmp]
wfs_component_labels_current_station = wfs_component_labels_current_station_sorted
elif wfs_component_labels_current_station_sorted == ['R-P', 'R-S', 'T-P', 'T-S', 'Z-P', 'Z-S']:
real_wfs_current_station_unsorted = list(real_wfs_current_station)
synth_wfs_current_station_unsorted = list(synth_wfs_current_station)
idx_tmp = wfs_component_labels_current_station.index("R-P")
real_wfs_current_station[0] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[0] = synth_wfs_current_station_unsorted[idx_tmp]
idx_tmp = wfs_component_labels_current_station.index("R-S")
real_wfs_current_station[1] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[1] = synth_wfs_current_station_unsorted[idx_tmp]
idx_tmp = wfs_component_labels_current_station.index("T-P")
real_wfs_current_station[2] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[2] = synth_wfs_current_station_unsorted[idx_tmp]
idx_tmp = wfs_component_labels_current_station.index("T-S")
real_wfs_current_station[3] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[3] = synth_wfs_current_station_unsorted[idx_tmp]
idx_tmp = wfs_component_labels_current_station.index("Z-P")
real_wfs_current_station[4] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[4] = synth_wfs_current_station_unsorted[idx_tmp]
idx_tmp = wfs_component_labels_current_station.index("Z-S")
real_wfs_current_station[5] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[5] = synth_wfs_current_station_unsorted[idx_tmp]
wfs_component_labels_current_station = wfs_component_labels_current_station_sorted
elif wfs_component_labels_current_station_sorted == ['Z-P', 'R-P']:
real_wfs_current_station_unsorted = list(real_wfs_current_station)
synth_wfs_current_station_unsorted = list(synth_wfs_current_station)
idx_tmp = wfs_component_labels_current_station.index("Z-P")
real_wfs_current_station[0] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[0] = synth_wfs_current_station_unsorted[idx_tmp]
idx_tmp = wfs_component_labels_current_station.index("Z-S")
real_wfs_current_station[1] = real_wfs_current_station_unsorted[idx_tmp]
synth_wfs_current_station[1] = synth_wfs_current_station_unsorted[idx_tmp]
wfs_component_labels_current_station = wfs_component_labels_current_station_sorted
return wfs_component_labels_current_station_sorted, real_wfs_current_station, synth_wfs_current_station
def plot_wfs_of_most_likely_soln_separate_plot(stations, wfs_dict, plot_fname):
"""Function to plot waveforms for the most likely inversion solution and save as separate plot."""
# Setup figure:
fig = plt.figure(figsize=(8, 3*len(stations)))
outer_plot_obj = gridspec.GridSpec(len(stations), 1, wspace=0.2, hspace=0.2)
# Loop over each station, plotting waveforms:
i=0
for station in stations:
station_name = station[0][0]
# Get current real and synthetic waveforms:
# Note: Will get all components for current station
real_wfs_current_station = []
synth_wfs_current_station = []
wfs_component_labels_current_station = []
for wfs_key in list(wfs_dict.keys()):
if station_name in wfs_key:
real_wfs_current_station.append(wfs_dict[wfs_key]['real_wf']) # Append current real waveforms to wfs for current station
synth_wfs_current_station.append(wfs_dict[wfs_key]['synth_wf']) # Append current synth waveforms to wfs for current station
wfs_component_labels_current_station.append(wfs_key.split(", ")[1]) # Get current component label
# and reorder if have Z,R and T components:
wfs_component_labels_current_station_sorted, real_wfs_current_station, synth_wfs_current_station = sort_wfs_components_current_station(wfs_component_labels_current_station, real_wfs_current_station, synth_wfs_current_station)
# And plot:
if len(real_wfs_current_station) > 0:
# Setup inner plot for current station:
inner_plot_obj = gridspec.GridSpecFromSubplotSpec(len(real_wfs_current_station), 1, subplot_spec=outer_plot_obj[i], wspace=0.1, hspace=0.1)
for j in range(len(real_wfs_current_station)):
ax_curr = plt.Subplot(fig, inner_plot_obj[j])
if j==0:
ax_curr.set_title(station_name)
ax_curr.plot(real_wfs_current_station[j],c='k', alpha=0.75, linewidth=2.5) # Plot real data
ax_curr.plot(synth_wfs_current_station[j],c='#E83313',linestyle="--", alpha=0.75, linewidth=2.0) # Plot synth data
ax_curr.set_ylabel(wfs_component_labels_current_station_sorted[j])
ax_curr.spines['top'].set_visible(False)
ax_curr.spines['right'].set_visible(False)
ax_curr.spines['bottom'].set_visible(False)
ax_curr.spines['left'].set_visible(False)
ax_curr.get_xaxis().set_ticks([])
ax_curr.get_yaxis().set_ticks([])
fig.add_subplot(ax_curr)
i+=1
# And save figure:
plt.savefig(plot_fname, dpi=300)
def plot_wfs_of_most_likely_soln_separate_plot_das(wfs_dict, plot_fname, fs=1000.):
"""Function to plot waveforms for the most likely inversion solution for DAS data and save as separate plot."""
# Get real and synth waveform data:
stations_to_plot = list(wfs_dict.keys())
print(stations_to_plot)
real_wfs = np.zeros( (len(wfs_dict[stations_to_plot[0]]['real_wf']), len(stations_to_plot)) )
for i in range(len(stations_to_plot)):
real_wfs[:,i] = wfs_dict[stations_to_plot[i]]['real_wf']
synth_wfs = np.zeros( (len(wfs_dict[stations_to_plot[0]]['synth_wf']), len(stations_to_plot)) )
for i in range(len(stations_to_plot)):
synth_wfs[:,i] = wfs_dict[stations_to_plot[i]]['synth_wf']
# Setup figure:
fig, axes = plt.subplots(ncols=3, figsize=(12,6), sharey=True)
# Get spatial and time gridded coords:
X, T = np.meshgrid( 10.0*np.arange(real_wfs.shape[1]), np.arange(real_wfs.shape[0])/fs )
# Find max. value:
max_amp = np.max(np.array([np.max(np.abs(real_wfs)), np.max(np.abs(synth_wfs))]))
# And plot data:
axes[0].pcolormesh(X, T, real_wfs, cmap='RdBu', vmin=-max_amp, vmax=max_amp)
axes[1].pcolormesh(X, T, synth_wfs, cmap='RdBu', vmin=-max_amp, vmax=max_amp)
axes[2].pcolormesh(X, T, real_wfs - synth_wfs, cmap='RdBu', vmin=-max_amp, vmax=max_amp)
# And invert y-axis (for consistancy with other DAS studies):
axes[0].invert_yaxis()
axes[1].invert_yaxis()
axes[2].invert_yaxis()
# Do additional labelling:
for i in range(3):
axes[i].set_xlabel('Channel no.')
axes[0].set_ylabel('Time (s)')
axes[0].set_title('Obs.')
axes[1].set_title('Model')
axes[2].set_title('Difference')
# And save figure:
plt.savefig(plot_fname, dpi=600)
def plot_slip_vector_distribution(MTs, MTp, six_MT_max_prob=[], frac_to_sample=0.1, figure_filename=[]):
"""Function to plot the slip vector distribution in terms of the spherical coordinates theta and phi."""
# Get highest sample of MTs to plot for:
MTs_sample, MTp_sample = get_frac_of_MTs_using_MT_probs(MTs, MTp, frac_to_sample, return_MTp_samples_switch=True)
# Loop over solutions, finding binned and maximum probabilities:
theta_bin_vals = np.arange(0.,np.pi,np.pi/100)
phi_bin_vals = np.arange(0.,2.*np.pi,np.pi/100)
theta_phi_bins = np.zeros((len(theta_bin_vals), len(phi_bin_vals)), dtype=float)
theta_phi_bins_num_samples = np.zeros((len(theta_bin_vals), len(phi_bin_vals)), dtype=float)
theta_phi_bins_max_prob_vals = np.zeros((len(theta_bin_vals), len(phi_bin_vals)), dtype=float)
for ii | |
uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pimv6interface_74a3aa08a315ca50732e853e3e8cdc43 import PimV6Interface
if self._properties.get('PimV6Interface', None) is not None:
return self._properties.get('PimV6Interface')
else:
return PimV6Interface(self)
@property
def Tag(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d.Tag): An instance of the Tag class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import Tag
if self._properties.get('Tag', None) is not None:
return self._properties.get('Tag')
else:
return Tag(self)
@property
def TlvProfile(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c.TlvProfile): An instance of the TlvProfile class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c import TlvProfile
if self._properties.get('TlvProfile', None) is not None:
return self._properties.get('TlvProfile')
else:
return TlvProfile(self)
@property
def Vxlanv6(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.vxlanv6_c18187deccae3db44b9e9de30ad538ec.Vxlanv6): An instance of the Vxlanv6 class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.vxlanv6_c18187deccae3db44b9e9de30ad538ec import Vxlanv6
if self._properties.get('Vxlanv6', None) is not None:
return self._properties.get('Vxlanv6')
else:
return Vxlanv6(self)
@property
def Vxlanv6gpe(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.vxlanv6gpe_c816572194cd020274b16a0978c849fa.Vxlanv6gpe): An instance of the Vxlanv6gpe class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.vxlanv6gpe_c816572194cd020274b16a0978c849fa import Vxlanv6gpe
if self._properties.get('Vxlanv6gpe', None) is not None:
return self._properties.get('Vxlanv6gpe')
else:
return Vxlanv6gpe(self)
@property
def ComputedIapdAddresses(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): The computed IPv6 addresses.
"""
return self._get_attribute(self._SDM_ATT_MAP['ComputedIapdAddresses'])
@property
def ConnectedVia(self):
# type: () -> List[str]
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
# type: (List[str]) -> None
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def CustomLinkLocalAddress(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configures the Manual Link-Local IPv6 Address for the DHCPv6 Client.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CustomLinkLocalAddress']))
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Dhcp6DuidEnterpriseId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The enterprise-number is the vendor's registered Private Enterprise Number as maintained by IANA.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp6DuidEnterpriseId']))
@property
def Dhcp6DuidType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): DHCP Unique Identifier Type.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp6DuidType']))
@property
def Dhcp6DuidVendorId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The vendor-assigned unique ID for this range. This ID is incremented automaticaly for each DHCP client.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp6DuidVendorId']))
@property
def Dhcp6GatewayAddress(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configures the Manual Gateway IPv6 Address for the DHCPv6 Client.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp6GatewayAddress']))
@property
def Dhcp6GatewayMac(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configures the Manual Gateway MAC corresponding to the configured Manual Gateway IP of the DHCPv6 Client session.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp6GatewayMac']))
@property
def Dhcp6IANACount(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Number of IANA options to be included in a negotiation. This value must be smaller than Maximum Leases per Client.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp6IANACount']))
@property
def Dhcp6IAPDCount(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Number of IAPD options to be included in a negotiation. This value must be smaller than Maximum Leases per Client.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp6IAPDCount']))
@property
def Dhcp6IaId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The identity association unique ID for this range.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp6IaId']))
@property
def Dhcp6IaIdInc(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Increment step for each IAID in a multiple IANA/IAPD case.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp6IaIdInc']))
@property
def Dhcp6IaT1(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The suggested time at which the client contacts the server from which the addresses were obtained to extend the lifetimes of the addresses assigned.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp6IaT1']))
@property
def Dhcp6IaT2(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The suggested time at which the client contacts any available server to extend the lifetimes of the addresses assigned.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp6IaT2']))
@property
def Dhcp6IaType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Identity Association Type.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp6IaType']))
@property
def Dhcp6UsePDGlobalAddress(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Use DHCPc6-PD global addressing.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp6UsePDGlobalAddress']))
@property
def DiscoveredAddresses(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): The discovered IPv6 addresses.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredAddresses'])
@property
def DiscoveredGateways(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): The discovered gateway IPv6 addresses.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredGateways'])
@property
def DiscoveredPrefix(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): The discovered IPv6 prefix.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredPrefix'])
@property
def DiscoveredPrefixLength(self):
# type: () -> List[int]
"""
Returns
-------
- list(number): The length of the discovered IPv6 prefix.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredPrefixLength'])
@property
def EnableStateless(self):
# type: () -> bool
"""
Returns
-------
- bool: Enables DHCP stateless.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableStateless'])
@EnableStateless.setter
def EnableStateless(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableStateless'], value)
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def MaxNoPerClient(self):
# type: () -> int
"""
Returns
-------
- number: Maximum number of Addresses/Prefixes accepted by a Client in a negotiation.
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxNoPerClient'])
@MaxNoPerClient.setter
def MaxNoPerClient(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['MaxNoPerClient'], value)
@property
def Multiplier(self):
# type: () -> int
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NoOfAddresses(self):
# type: () -> List[int]
"""
Returns
-------
- list(number): Number of Negotiated Addresses.
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfAddresses'])
@property
def NoOfPrefixes(self):
# type: () -> List[int]
"""
Returns
-------
- list(number): Number of Negotiated Addresses.
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfPrefixes'])
@property
def RenewTimer(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The used-defined lease renewal timer. The value is estimated in seconds and will override the lease renewal timer if it is not zero and is smaller than server-defined value.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RenewTimer']))
@property
def SessionInfo(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[duidNak | excessiveTlvs | noAddrsAvail | noAddrsBelow | none | noPrefixAvail | nsFailed | partiallyNegotiated | rebindTimeout | relayDown | renewTimeout | requestTimeout | solicitTimeout]): Logs additional information about the session state
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionInfo'])
@property
def SessionStatus(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def StackedLayers(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
# type: (List[str]) -> None
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of | |
import sys
import os
import json
import pickle
import argparse
import glob
import math
import numpy as np
import time
import traceback
from tqdm import tqdm
from collections import defaultdict
from graphviz import Digraph
import bisect
from denoising_event_lm.utils.utils import read_data
def get_all_doc_spans(doc_len, eiid2events, events_edges, unmatchedsrleiids, unmatchedsrl_eiid2events, mentions, tokens):
e_in_graph = set([eiid for eiid in events_edges.keys()]) | set([eiid for ends in events_edges.values() for eiid in ends])
# temporal events
obj_spans = [[e['tok_start'], e['tok_end'], ["in_graph" if eiid in e_in_graph else "not_in_graph", eiid]]
for eiid, e in eiid2events.items()]
# unmatched srl events
obj_spans += [[unmatchedsrl_eiid2events[eiid]['tok_start'], unmatchedsrl_eiid2events[eiid]['tok_end'], ["srl", eiid]] for eiid in unmatchedsrleiids]
# mentions, some mentions may be a predicate so we check here (eg: UDS-T dev #113)
span2idx = {(s, e): i for i, (s, e, tags) in enumerate(obj_spans)}
for m in mentions:
if (m['span'][0], m['span'][1]) in span2idx:
obj_spans[span2idx[(m['span'][0], m['span'][1])]][2][1] = ", entity"
else:
obj_spans.append([m['span'][0], m['span'][1], ["mention", "entity"]])
obj_spans = sorted(obj_spans)
#print(json.dumps(obj_spans, indent=4))
# check non-overlap
i = 0
while i < len(obj_spans)-1:
prev_s, prev_e, prev_tags = obj_spans[i]
s, e, tags = obj_spans[i+1]
if not s > prev_e:
if not (tags[0] == "mention" or prev_tags[0] == "mention"):
if e >= prev_e + 1: # s1 s2 e1 e2 -> (s1 e1)(e1+1, e2)
if i+2 == len(obj_spans) or not [prev_e+1, e] == [obj_spans[i+2][0], obj_spans[i+2][1]]: # prevent [e1+1, e2] already exists
obj_spans[i+1][0] = prev_e + 1
obj_spans = sorted(obj_spans) # when modify i+1, need to re-sort
else:
if tags[0] == "in_graph" or (tags[0] == "not_in_graph" and not obj_spans[i+2][2][0] == 'in_graph') or (tags[0] == "srl" and not obj_spans[i+2][2][0] == 'in_graph'):
obj_spans[i+2][2] = tags
obj_spans = obj_spans[:i+1] + obj_spans[i+2:]
else:
# s1 s2 e2 e1 -> (s1, s2-1)(s2, e2)(e2, e1)
obj_spans[i][1] = s - 1
if s == prev_s:
print(tokens[prev_s:prev_e+1], tokens[s:e+1])
print((prev_s, prev_e), (s, e))
print(prev_tags, tags)
assert not s == prev_s
if prev_e > e+1: # prevent s1 s2 e2==e1
insert_sp = [e+1, prev_e, prev_tags]
insert_pos = bisect.bisect_left([(ele[0], ele[1]) for ele in obj_spans], (e+1, prev_e), lo=i+2) # get insert pos only by (s, e) or the already existed (e2+1, e1) may be at insert_pos-1 instead of insert_pos
if insert_pos == len(obj_spans) or not [e+1, prev_e] == [obj_spans[insert_pos][0], obj_spans[insert_pos][1]]: # prevent [e2+1, e1] already exists
obj_spans = obj_spans[:insert_pos] + [insert_sp] + obj_spans[insert_pos:]
else:
if prev_tags[0] == "mention":
if e >= prev_e + 1: # s1 s2 e1 e2 -> (s1 e1)(e1+1, e2)
if i+2 == len(obj_spans) or not [prev_e+1, e] == [obj_spans[i+2][0], obj_spans[i+2][1]]: # prevent [e1+1, e2] already exists
obj_spans[i+1][0] = prev_e + 1
obj_spans = sorted(obj_spans) # when modify i+1, need to re-sort
else:
if tags[0] == "in_graph" or (tags[0] == "not_in_graph" and not obj_spans[i+2][2][0] == 'in_graph') or (tags[0] == "srl" and not obj_spans[i+2][2][0] == 'in_graph'):
obj_spans[i+2][2] = tags
obj_spans = obj_spans[:i+1] + obj_spans[i+2:]
else:
# s1 s2 e2 e1 -> (s1, s2-1)(s2, e2)(e2, e1)
obj_spans[i][1] = s - 1
if s == prev_s:
print(tokens[prev_s:prev_e+1], tokens[s:e+1])
print((prev_s, prev_e), (s, e))
print(prev_tags, tags)
assert not s == prev_s
if prev_e >= e+1: # prevent s1 s2 e2==e1
insert_sp = [e+1, prev_e, ["mention", "entity"]]
insert_pos = bisect.bisect_left([(ele[0], ele[1]) for ele in obj_spans], (e+1, prev_e), lo=i+2) # get insert pos only by (s, e) or the already existed (e2+1, e1) may be at insert_pos-1 instead of insert_pos
if insert_pos == len(obj_spans) or not [e+1, prev_e] == [obj_spans[insert_pos][0], obj_spans[insert_pos][1]]: # prevent [e2+1, e1] already exists
obj_spans = obj_spans[:insert_pos] + [insert_sp] + obj_spans[insert_pos:]
elif tags[0] == "mention":
if s - 1 >= prev_s: # s1 s2 e1 e2 or s1 s2 e2 e1 -> (s1, s2-1)(s2, e2)
obj_spans[i][1] = s - 1
else:
# s1==s2 e1 e2 -> (s1, e1)(e1+1, e2)
if i+2 == len(obj_spans) or not [prev_e+1, e] == [obj_spans[i+2][0], obj_spans[i+2][1]]: # prevent [e1+1, e2] already exists
obj_spans[i+1][0] = prev_e + 1
obj_spans = sorted(obj_spans) # when modify i+1, need to re-sort
else:
if tags[0] == "in_graph" or (tags[0] == "not_in_graph" and not obj_spans[i+2][2][0] == 'in_graph') or (tags[0] == "srl" and not obj_spans[i+2][2][0] == 'in_graph'):
obj_spans[i+2][2] = tags
obj_spans = obj_spans[:i+1] + obj_spans[i+2:]
if not e >= prev_e + 1:
print(span2idx)
print((prev_s, prev_e), (s, e))
print(prev_tags, tags)
exit()
i += 1
# check results
assert all(obj_spans[i][0] > obj_spans[i-1][1] for i in range(1, len(obj_spans)))
assert all(e >= s for s, e, tags in obj_spans)
all_spans = []
sp2tags = []
last_end = -1
for s, e, tags in obj_spans:
if s > last_end+1:
all_spans.append((last_end+1, s-1))
sp2tags.append(["", ""])
all_spans.append((s, e))
sp2tags.append(tags)
last_end = e
if doc_len > last_end+1:
all_spans.append((last_end+1, doc_len-1))
sp2tags.append(["", ""])
return all_spans, sp2tags
def get_digraph_template(eiid2events, events_edges):
g = Digraph()
for start, ends in events_edges.items():
for end in ends:
g.edge(("[%s]\n" % start)+eiid2events[start]['event'],
("[%s]\n" % end)+eiid2events[end]['event'])
return g.source
def get_instance_for_render(d_nlp, d_graphs):
assert d_nlp['doc_id'] == d_graphs['doc_id']
doc_text = d_nlp['text']
doc_toks = d_nlp['tokens']
sents_tok_offset = d_nlp['sents_tok_offset'] + [len(doc_toks)]
eiid2srlvid = d_graphs['eiid2srlvid']
unmatchedsrl_eiid2events = d_graphs['unmatchedsrl_eiid2events']
clusterid2graph = d_graphs['clusterid2graph']
clusterid2unmatchedsrleiids = d_graphs['clusterid2unmatchedsrleiids']
# get coref
coref_clusters = d_nlp['pred_coref']
clusterid2mentions = defaultdict(list)
for cluster in coref_clusters:
for m in cluster:
offset = sents_tok_offset[m['sent_id']]
start, end = m['span']
m['span'] = [start+offset, end+offset]
clusterid2mentions[m['cluster_id']].append(m)
# get render instance for each entity
entity_objs = []
for c_id in clusterid2graph:
eiid2events = clusterid2graph[c_id]['eiid2events']
events_edges = clusterid2graph[c_id]['events_edges']
unmatchedsrleiids = clusterid2unmatchedsrleiids.get(c_id, [])
mentions = clusterid2mentions[int(c_id)]
all_doc_spans, doc_sp2tags = get_all_doc_spans(len(doc_toks), eiid2events, events_edges, unmatchedsrleiids, unmatchedsrl_eiid2events, mentions, doc_toks)
graph_template = get_digraph_template(eiid2events, events_edges)
obj = {"doc_tokens": doc_toks,
"all_doc_spans": all_doc_spans,
"doc_sp2tags": doc_sp2tags,
"graph_template": graph_template,
"doc_id": d_nlp['doc_id'],
}
entity_objs.append(obj)
return entity_objs
def render_token(tok, tags):
style = ""
if tags[0] == "in_graph":
style = "background-color: rgba(0, 0, 255, 0.5); border-radius: 7px; padding-left: 3px; padding-right: 3px; border-style: solid; border-color: rgba(0, 0, 255, 0.6); border-width: 1.5px"
elif tags[0] == "not_in_graph":
style = "background-color: rgba(0, 0, 255, 0.2); border-radius: 7px; padding-left: 3px; padding-right: 3px; border-style: dashed; border-color: rgba(0, 0, 255, 0.3); border-width: 1.5px"
elif tags[0] == "srl":
style = "background-color: rgba(0, 0, 255, 0.2); border-radius: 7px; padding-left: 3px; padding-right: 3px; border-style: dashed; border-color: rgba(0, 0, 255, 0.3); border-width: 1.5px"
elif tags[0] == "mention":
style = "background-color: rgba(0, 179, 179, 0.4); border-radius: 7px; padding-left: 3px; padding-right: 3px;"
style = repr(style)
tip = repr(tags[1])
br_splits = tok.split('<br />\n')
block = "".join("<span>{:s}</span>".format(br_split if i == len(br_splits)-1 else f"<span>{br_split}</span><br/><br/>")
for i, br_split in enumerate(br_splits))
return \
f"""<span><span data-toggle="tooltip" data-placement="auto top" title={tip} style={style}>{block}</span><span> </span></span>"""
def render_doc(entity_obj, c_id, last=False):
"""render documents with each special spans being highlighted, also add divs for graphviz rendering"""
doc_id = entity_obj['doc_id'].replace('.', '_')
tokens = entity_obj['doc_tokens']
spans = entity_obj['all_doc_spans']
sp2tags = entity_obj['doc_sp2tags']
doc_block = "".join(render_token(" ".join(tokens[s:e+1]), sp2tags[i]) for i, (s, e) in enumerate(spans))
hr = """<hr style="height: 1px" />""" if not last else ""
return f"""
<div class="form__field">
<div class="doc">
<h4>Doc #{doc_id} - Entity #{c_id}</h4>
{doc_block}
</div>
<div id="graph_{doc_id}_{c_id}" style="text-align: center;" class="doc">
</div>
{hr}
</div>
"""
def render_entity_events_graphs(ins):
"""render documents with each special spans being highlighted, also add divs for graphviz rendering"""
block = "".join(render_doc(entity_obj, c_id, c_id == len(ins)-1) for c_id, entity_obj in enumerate(ins))
return f"""
<div>
{block}
<br/>
<br/>
<br/>
<hr style="height: 2px; border: none; background-color: #b3b3b3;" />
</div>
"""
def render_graphviz_objects(ins):
"""render graphviz object for each instance, put into the script part"""
block = "\n".join('d3.select("#graph_{:s}_{:s}").graphviz().zoom(false).renderDot({:s});'.format(obj['doc_id'].replace('.', '_'), str(c_id), repr(obj['graph_template'])) for c_id, obj in enumerate(ins))
return block
def render_index_html(html_body, script_body):
"""get index.html"""
return f"""
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link href="https://fonts.googleapis.com/css?family=Roboto+Mono&display=swap" rel="stylesheet">
<link href='https://fonts.googleapis.com/css?family=Source+Sans+Pro' rel='stylesheet' type='text/css'>
<script src="https://d3js.org/d3.v5.min.js"></script>
<script src="https://unpkg.com/@hpcc-js/[email protected]/dist/index.min.js"></script>
<script src="https://unpkg.com/[email protected]/build/d3-graphviz.js"></script>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/js/bootstrap.min.js"></script>
<style>
body,
html {{
min-width: 48em;
font-size: 16px;
width: 100%;
height: 100%;
margin: 0;
padding: 0;
}}
* {{
font-family: 'Source Sans Pro', sans-serif;
color: #232323;
}}
.model__content {{
padding: 0.6em 2em 0.875em 2em;
margin: auto;
-webkit-transition: padding .2s ease, margin .2s ease;
transition: padding .2s ease, margin .2s ease;
}}
.form__field {{
-webkit-transition: margin .2s ease;
transition: margin .2s ease;
}}
div.doc {{
color:black;
font-size: 16px;
padding-left: 5px;
padding-top: 5px;
padding-bottom: 5px;
padding-right: 5px;
margin-bottom: 10px;
line-height: 40px;
}}
</style>
</head>
<body>
<div class="model__content">
{html_body}
</div>
<script>
{script_body}
$(document).ready(function(){{
$('[data-toggle="tooltip"]').tooltip();
}});
</script>
</body>
</html>
"""
def main(args):
graphs_data, _ = read_data(args.graphs_input, args)
nlp_data, _ = read_data(args.nlp_input, args)
if args.num_splits is None or args.num_splits | |
<reponame>mattdangerw/gnn
"""GraphTensor adjacency types.
"""
from typing import Dict, Mapping, Optional, Tuple, Union
import tensorflow as tf
from tensorflow_gnn.graph import graph_constants as const
from tensorflow_gnn.graph import graph_piece as gp
from tensorflow_gnn.graph import tensor_utils as utils
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import type_spec
# pylint: enable=g-direct-tensorflow-import
Field = const.Field
FieldSpec = const.FieldSpec
IncidentNodeTag = const.IncidentNodeTag
NodeSetName = const.NodeSetName
Index = Tuple[NodeSetName, Field]
Indices = Mapping[IncidentNodeTag, Index]
class HyperAdjacency(gp.GraphPieceBase):
"""Stores edges as indices of nodes in node sets.
Node adjacency is represented as a mapping of unique node tags to pairs of
(node set names, index tensors) into them. The tags are `SOURCE` and
`TARGET` for ordinary graphs but there can be more of them for hypergraphs
(e.g., edges linking more than two nodes, also known as "hyper-edges"). All
index tensors must agree in their type (`tf.Tensor` or `tf.RaggedTensor`),
integer dtype, and shape. Corresponding values are the indices of nodes that
belong to the same hyper-edge.
"""
# TODO(b/210004712): Replace `*_` by more Pythonic `*`.
@classmethod
@tf.__internal__.dispatch.add_dispatch_support
def from_indices(cls,
indices: Indices,
*_,
validate: bool = True) -> 'HyperAdjacency':
"""Constructs a new instance from the `indices` tensors.
Example 1. Single graph (rank is 0). Connects pairs of nodes (a.0, b.2),
(a.1, b.1), (a.2, b.0) from node sets a and b:
gnn.HyperAdjacency.from_indices({
gnn.SOURCE: ('a', [0, 1, 2]),
gnn.TARGET: ('b', [2, 1, 0])
})
Example 2. Single hypergraph (rank is 0). Connects triplets of nodes
(a.0, b.2, c.1), (a.1, b.1, c.0) from the node sets a, b and c:
gnn.HyperAdjacency.from_indices({
0: ('a', [0, 1]),
1: ('b', [2, 1]),
2: ('c', [1, 0]),
})
Example 3. Batch of two graphs (rank is 1). Connects pairs of nodes
graph 0: (a.0, b.2), (a.1, b.1); graph 1: (a.2, b.0):
gnn.HyperAdjacency.from_indices({
gnn.SOURCE: ('a', tf.ragged.constant([[0, 1], [2]])),
gnn.TARGET: ('b', tf.ragged.constant([[2, 1], [0]])),
})
Args:
indices: Mapping from node tags to tuples of node set names and integer
Tensors or RaggedTensors with the indices of nodes in the respective
node set. All tensors must have shape = graph_shape + [num_edges], where
num_edges is a number of edges in each graph. If graph_shape.rank > 0
and num_edges has variable size, the tensors are ragged.
validate: if set, checks that node indices have the same shapes.
Returns:
A `HyperAdjacency` tensor with a shape and an indices_dtype being inferred
from the `indices` values.
"""
if _:
raise TypeError('Positional arguments are not supported:', _)
indices = {
key: (name, gp.convert_to_tensor_or_ragged(index))
for key, (name, index) in indices.items()
}
if validate or const.validate_internal_results:
indices = _validate_indices(indices)
data = {
_node_tag_to_index_key(tag): index
for tag, (_, index) in indices.items()
}
metadata = {
_node_tag_to_index_key(tag): name for tag, (name, _) in indices.items()
}
indicative_index_tensor = _get_indicative_index(data)
return cls._from_data(
data,
shape=indicative_index_tensor.shape[:-1],
indices_dtype=indicative_index_tensor.dtype,
metadata=metadata)
def __getitem__(self, node_set_tag: IncidentNodeTag) -> Field:
"""Returns index tensor for a given node set tag."""
return self._data[_node_tag_to_index_key(node_set_tag)]
def node_set_name(self, node_set_tag: IncidentNodeTag) -> NodeSetName:
"""Returns node set name for a given node set tag."""
return self.spec.node_set_name(node_set_tag)
def get_indices_dict(
self) -> Dict[IncidentNodeTag, Tuple[NodeSetName, Field]]:
"""Returns copy of indices tensor."""
return {
_index_key_to_node_tag(key):
(self.node_set_name(_index_key_to_node_tag(key)), index)
for key, index in self._data.items()
}
def _merge_batch_to_components(
self, num_edges_per_example: Field,
num_nodes_per_example: Mapping[NodeSetName, Field]) -> 'HyperAdjacency':
if self.rank == 0:
return self
flat_adj = super()._merge_batch_to_components(
num_edges_per_example=num_edges_per_example,
num_nodes_per_example=num_nodes_per_example)
assert isinstance(flat_adj, HyperAdjacency)
def flatten_indices(node_tag_key, index: Field) -> Field:
node_set_name = self.spec._metadata[node_tag_key] # pylint: disable=protected-access
return utils.flatten_indices(index, num_edges_per_example,
num_nodes_per_example[node_set_name])
new_data = {
node_tag_key: flatten_indices(node_tag_key, index)
for node_tag_key, index in flat_adj._data.items() # pylint: disable=protected-access
}
return self.__class__(new_data, flat_adj.spec)
@staticmethod
def _type_spec_cls():
return HyperAdjacencySpec
@type_spec.register('tensorflow_gnn.HyperAdjacencySpec')
class HyperAdjacencySpec(gp.GraphPieceSpecBase):
"""TypeSpec for HyperAdjacency."""
@classmethod
def from_incident_node_sets(
cls,
incident_node_sets: Mapping[IncidentNodeTag, NodeSetName],
index_spec: FieldSpec = tf.TensorSpec((None,),
const.default_indices_dtype)
) -> 'HyperAdjacencySpec':
"""Constructs a new instance from the `incident_node_sets`.
Args:
incident_node_sets: mapping from incident node tags to node set names.
index_spec: type spec for all index tensors. Its shape must be graph_shape
+ [num_edges], where num_edges is the number of edges in each graph. If
graph_shape.rank > 0 and num_edges has variable size, the spec should be
an instance of tf.RaggedTensorSpec.
Returns:
A `HyperAdjacencySpec` TypeSpec.
"""
if not (index_spec.shape.rank > 0 and
index_spec.dtype in (tf.int32, tf.int64)):
raise ValueError(
'Index spec must have rank > 0 and dtype in (tf.int32, tf.int64),'
f' got {index_spec}')
data_spec = {
_node_tag_to_index_key(tag): index_spec for tag in incident_node_sets
}
metadata = {
_node_tag_to_index_key(tag): name
for tag, name in incident_node_sets.items()
}
return cls._from_data_spec(
data_spec,
shape=index_spec.shape[:-1],
indices_dtype=index_spec.dtype,
metadata=metadata)
@property
def value_type(self):
return HyperAdjacency
def __getitem__(self, node_set_tag: IncidentNodeTag) -> FieldSpec:
"""Returns index tensor type spec for a given node set tag."""
return self._data_spec[_node_tag_to_index_key(node_set_tag)]
def get_index_specs_dict(
self) -> Dict[IncidentNodeTag, Tuple[NodeSetName, FieldSpec]]:
"""Returns copy of index type specs."""
return {
_index_key_to_node_tag(key):
(self.node_set_name(_index_key_to_node_tag(key)), index)
for key, index in self._data_spec.items()
}
def node_set_name(self, node_set_tag: IncidentNodeTag) -> NodeSetName:
"""Returns node set name for a given node set tag."""
return self._metadata[_node_tag_to_index_key(node_set_tag)]
@property
def total_size(self) -> Optional[int]:
"""Returns the total number of edges across dimensions if known."""
ind_spec = _get_indicative_index(self._data_spec)
assert ind_spec is not None
return ind_spec.shape[:(self.rank + 1)].num_elements()
class Adjacency(HyperAdjacency):
"""Stores simple binary edges with a source and target.
Node adjacency is represented as mapping of source and target edge endpoints
to pairs of (node set names, index tensors) into them. All index tensors must
agree in their type (`tf.Tensor` or `tf.RaggedTensor`), integer dtype, and
shape. Corresponding values are the indices of nodes that belong to the same
edge.
"""
# TODO(b/210004712): Replace `*_` by more Pythonic `*`.
@classmethod
@tf.__internal__.dispatch.add_dispatch_support
def from_indices(cls,
source: Index,
target: Index,
*_,
validate: bool = True) -> 'Adjacency':
"""Constructs a new instance from the `indices` tensors.
Example 1. Single graph (rank is 0). Connects pairs of nodes (a.0, b.2),
(a.1, b.1), (a.2, b.0) from node sets a and b:
gnn.Adjacency.from_indices(('a', [0, 1, 2]),
('b', [2, 1, 0]))
Example 2. Batch of two graphs (rank is 1). Connects pairs of nodes
graph 0: (a.0, b.2), (a.1, b.1); graph 1: (a.2, b.0):
gnn.Adjacency.from_indices(('a', tf.ragged.constant([[0, 1], [2]])),
('b', tf.ragged.constant([[2, 1], [0]])))
Args:
source: Tuple of (node set name, integer Tensors or RaggedTensors with the
indices of nodes in the respective node set). Must have shape =
graph_shape + [num_edges], where num_edges is a number of edges in each
graph. If graph_shape.rank > 0 and num_edges has variable size, the
tensors are ragged.
target: Like `source` field, but for target edge endpoint.
validate: if set, checks that node indices have the same shapes.
Returns:
An `Adjacency` tensor with a shape and an indices_dtype being inferred
from the `indices` values.
"""
if _:
raise TypeError('Positional arguments are not supported:', _)
return super().from_indices({const.SOURCE: source, const.TARGET: target})
@property
def source(self) -> Field:
return self[const.SOURCE]
@property
def target(self) -> Field:
return self[const.TARGET]
@property
def source_name(self) -> NodeSetName:
"""Returns the node set name for source nodes."""
return self.node_set_name(const.SOURCE)
@property
def target_name(self) -> NodeSetName:
"""Returns the node set name for target nodes."""
return self.node_set_name(const.TARGET)
@staticmethod
def _type_spec_cls():
return AdjacencySpec
@type_spec.register('tensorflow_gnn.AdjacencySpec')
class AdjacencySpec(HyperAdjacencySpec):
"""TypeSpec for Adjacency."""
@classmethod
def from_incident_node_sets(
cls,
source_node_set: NodeSetName,
target_node_set: NodeSetName,
index_spec: FieldSpec = tf.TensorSpec((None,),
const.default_indices_dtype)
) -> 'AdjacencySpec':
"""Constructs a new instance from the `incident_node_sets`.
Args:
source_node_set: A string, the name of the source node set.
target_node_set: A string, the name of the target node set.
index_spec: type spec for all index tensors. Its shape must be graph_shape
+ [num_edges], where num_edges is the number of edges in each graph. If
graph_shape.rank > 0 and num_edges has variable size, the spec should be
an instance of tf.RaggedTensorSpec.
Returns:
A `AdjacencySpec` TypeSpec.
"""
return super().from_incident_node_sets(
{const.SOURCE: source_node_set,
const.TARGET: target_node_set}, index_spec)
@property
def value_type(self):
return Adjacency
@property
def source(self) -> FieldSpec:
return self[const.SOURCE]
@property
def target(self) -> FieldSpec:
return self[const.TARGET]
@property
def source_name(self) -> NodeSetName:
"""Returns the node set name for source nodes."""
return self.node_set_name(const.SOURCE)
@property
def target_name(self) -> NodeSetName:
"""Returns the node set name for target nodes."""
return self.node_set_name(const.TARGET)
def _validate_indices(indices: Indices) -> Indices:
"""Checks that indices have compatible shapes."""
if not indices:
raise ValueError('`indices` must contain at least one entry.')
assert_ops = []
def check_index(tag, name, index):
if index.dtype not in (tf.int32, tf.int64):
raise ValueError((f'Adjacency indices ({tag_0}, {name_0}) must have '
f'tf.int32 or tf.int64 dtype, got {index.dtype}'))
if | |
<reponame>abhaikollara/tensorflow<filename>tensorflow/python/grappler/hierarchical_controller.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""HierarchicalController Class.
The HierarchicalController encompasses the entire lifecycle of training the
device placement policy, including generating op embeddings, getting groups for
each op, placing those groups and running the predicted placements.
Different assignment models can inherit from this class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.grappler.controller import Controller
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import training_util
class PlacerParams(object):
"""Class to hold a set of placement parameters as name-value pairs.
A typical usage is as follows:
```python
# Create a PlacerParams object specifying names and values of the model
# parameters:
params = PlacerParams(hidden_size=128, decay_steps=50)
# The parameters are available as attributes of the PlacerParams object:
hparams.hidden_size ==> 128
hparams.decay_steps ==> 50
```
"""
def __init__(self, **kwargs):
"""Create an instance of `PlacerParams` from keyword arguments.
The keyword arguments specify name-values pairs for the parameters.
The parameter types are inferred from the type of the values passed.
The parameter names are added as attributes of `PlacerParams` object,
and they can be accessed directly with the dot notation `params._name_`.
Example:
```python
# Define 1 parameter: 'hidden_size'
params = PlacerParams(hidden_size=128)
params.hidden_size ==> 128
```
Args:
**kwargs: Key-value pairs where the key is the parameter name and
the value is the value for the parameter.
"""
for name, value in six.iteritems(kwargs):
self.add_param(name, value)
def add_param(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
"""
# Keys in kwargs are unique, but 'name' could be the name of a pre-existing
# attribute of this object. In that case we refuse to use it as a
# parameter name.
if getattr(self, name, None) is not None:
raise ValueError("Parameter name is reserved: %s" % name)
setattr(self, name, value)
def hierarchical_controller_hparams():
"""Hyperparameters for hierarchical planner."""
return PlacerParams(
hidden_size=512,
forget_bias_init=1.0,
temperature=1.0,
logits_std_noise=0.5,
stop_noise_step=750,
decay_steps=50,
max_num_outputs=5,
max_output_size=5,
tanh_constant=1.0,
adj_embed_dim=20,
grouping_hidden_size=64,
num_groups=None,
bi_lstm=True,
failing_signal=100,
stop_sampling=500,
start_with_failing_signal=True,
always_update_baseline=False,
bl_dec=0.9,
grad_bound=1.0,
lr=0.1,
lr_dec=0.95,
start_decay_step=400,
optimizer_type="adam",
stop_updating_after_steps=1000,
name="hierarchical_controller",
keep_prob=1.0,
reward_function="sqrt",
seed=1234,
# distributed training params
num_children=1)
class HierarchicalController(Controller):
"""HierarchicalController class."""
def __init__(self, hparams, item, cluster, controller_id=0):
"""HierarchicalController class initializer.
Args:
hparams: All hyper-parameters.
item: The metagraph to place.
cluster: The cluster of hardware devices to optimize for.
controller_id: the id of the controller in a multi-controller setup.
"""
super(HierarchicalController, self).__init__(item, cluster)
self.ctrl_id = controller_id
self.hparams = hparams
if self.hparams.num_groups is None:
self.num_groups = min(256, 20 * self.num_devices)
else:
self.num_groups = self.hparams.num_groups
# creates self.op_embeddings and self.type_dict
self.create_op_embeddings(verbose=False)
# TODO(azalia) clean up embedding/group_embedding_size names
self.group_emb_size = (
2 * self.num_groups + len(self.type_dict) +
self.hparams.max_num_outputs * self.hparams.max_output_size)
self.embedding_size = self.group_emb_size
self.initializer = init_ops.glorot_uniform_initializer(
seed=self.hparams.seed)
with variable_scope.variable_scope(
self.hparams.name,
initializer=self.initializer,
reuse=variable_scope.AUTO_REUSE):
# define parameters of feedforward
variable_scope.get_variable("w_grouping_ff", [
1 + self.hparams.max_num_outputs * self.hparams.max_output_size +
self.hparams.adj_embed_dim, self.hparams.grouping_hidden_size
])
variable_scope.get_variable(
"w_grouping_softmax",
[self.hparams.grouping_hidden_size, self.num_groups])
if self.hparams.bi_lstm:
variable_scope.get_variable("encoder_lstm_forward", [
self.embedding_size + self.hparams.hidden_size / 2,
2 * self.hparams.hidden_size
])
variable_scope.get_variable("encoder_lstm_backward", [
self.embedding_size + self.hparams.hidden_size / 2,
2 * self.hparams.hidden_size
])
variable_scope.get_variable(
"device_embeddings", [self.num_devices, self.hparams.hidden_size])
variable_scope.get_variable(
"decoder_lstm",
[2 * self.hparams.hidden_size, 4 * self.hparams.hidden_size])
variable_scope.get_variable(
"device_softmax", [2 * self.hparams.hidden_size, self.num_devices])
variable_scope.get_variable("device_go_embedding",
[1, self.hparams.hidden_size])
variable_scope.get_variable(
"encoder_forget_bias",
shape=1,
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
self.hparams.forget_bias_init))
variable_scope.get_variable(
"decoder_forget_bias",
shape=1,
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
self.hparams.forget_bias_init))
variable_scope.get_variable(
"attn_w_1", [self.hparams.hidden_size, self.hparams.hidden_size])
variable_scope.get_variable(
"attn_w_2", [self.hparams.hidden_size, self.hparams.hidden_size])
variable_scope.get_variable("attn_v", [self.hparams.hidden_size, 1])
else:
variable_scope.get_variable("encoder_lstm", [
self.embedding_size + self.hparams.hidden_size,
4 * self.hparams.hidden_size
])
variable_scope.get_variable(
"device_embeddings", [self.num_devices, self.hparams.hidden_size])
variable_scope.get_variable(
"decoder_lstm",
[2 * self.hparams.hidden_size, 4 * self.hparams.hidden_size])
variable_scope.get_variable(
"device_softmax", [2 * self.hparams.hidden_size, self.num_devices])
variable_scope.get_variable("device_go_embedding",
[1, self.hparams.hidden_size])
variable_scope.get_variable(
"encoder_forget_bias",
shape=1,
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
self.hparams.forget_bias_init))
variable_scope.get_variable(
"decoder_forget_bias",
shape=1,
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
self.hparams.forget_bias_init))
variable_scope.get_variable(
"attn_w_1", [self.hparams.hidden_size, self.hparams.hidden_size])
variable_scope.get_variable(
"attn_w_2", [self.hparams.hidden_size, self.hparams.hidden_size])
variable_scope.get_variable("attn_v", [self.hparams.hidden_size, 1])
seq2seq_input_layer = array_ops.placeholder_with_default(
array_ops.zeros([self.hparams.num_children,
self.num_groups,
self.group_emb_size],
dtypes.float32),
shape=(self.hparams.num_children, self.num_groups, self.group_emb_size))
self.seq2seq_input_layer = seq2seq_input_layer
def compute_reward(self, run_time):
if self.hparams.reward_function == "id":
reward = run_time
elif self.hparams.reward_function == "sqrt":
reward = math.sqrt(run_time)
elif self.hparams.reward_function == "log":
reward = math.log1p(run_time)
else:
raise NotImplementedError(
"Unrecognized reward function '%s', consider your "
"--reward_function flag value." % self.hparams.reward_function)
return reward
def build_controller(self):
"""RL optimization interface.
Returns:
ops: A dictionary holding handles of the model used for training.
"""
self._global_step = training_util.get_or_create_global_step()
ops = {}
ops["loss"] = 0
failing_signal = self.compute_reward(self.hparams.failing_signal)
ctr = {}
with tf_ops.name_scope("controller_{}".format(self.ctrl_id)):
with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)):
ctr["reward"] = {"value": [], "ph": [], "update": []}
ctr["ready"] = {"value": [], "ph": [], "update": []}
ctr["best_reward"] = {"value": [], "update": []}
for i in range(self.hparams.num_children):
reward_value = variable_scope.get_local_variable(
"reward_{}".format(i),
initializer=0.0,
dtype=dtypes.float32,
trainable=False)
reward_ph = array_ops.placeholder(
dtypes.float32, shape=(), name="reward_ph_{}".format(i))
reward_update = state_ops.assign(
reward_value, reward_ph, use_locking=True)
ctr["reward"]["value"].append(reward_value)
ctr["reward"]["ph"].append(reward_ph)
ctr["reward"]["update"].append(reward_update)
best_reward = variable_scope.get_local_variable(
"best_reward_{}".format(i),
initializer=failing_signal,
dtype=dtypes.float32,
trainable=False)
ctr["best_reward"]["value"].append(best_reward)
ctr["best_reward"]["update"].append(
state_ops.assign(best_reward,
math_ops.minimum(best_reward, reward_update)))
ready_value = variable_scope.get_local_variable(
"ready_{}".format(i),
initializer=True,
dtype=dtypes.bool,
trainable=False)
ready_ph = array_ops.placeholder(
dtypes.bool, shape=(), name="ready_ph_{}".format(i))
ready_update = state_ops.assign(
ready_value, ready_ph, use_locking=True)
ctr["ready"]["value"].append(ready_value)
ctr["ready"]["ph"].append(ready_ph)
ctr["ready"]["update"].append(ready_update)
ctr["grouping_y_preds"], ctr["grouping_log_probs"] = self.get_groupings()
summary.histogram(
"grouping_actions",
array_ops.slice(ctr["grouping_y_preds"]["sample"], [0, 0],
[1, array_ops.shape(self.op_embeddings)[0]]))
with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)):
ctr["baseline"] = variable_scope.get_local_variable(
"baseline",
initializer=failing_signal
if self.hparams.start_with_failing_signal else 0.0,
dtype=dtypes.float32,
trainable=False)
new_baseline = self.hparams.bl_dec * ctr["baseline"] + (
1 - self.hparams.bl_dec) * math_ops.reduce_mean(
ctr["reward"]["value"])
if not self.hparams.always_update_baseline:
baseline_mask = math_ops.less(ctr["reward"]["value"], failing_signal)
selected_reward = array_ops.boolean_mask(ctr["reward"]["value"],
baseline_mask)
selected_baseline = control_flow_ops.cond(
math_ops.reduce_any(baseline_mask),
lambda: math_ops.reduce_mean(selected_reward),
lambda: constant_op.constant(0, dtype=dtypes.float32))
ctr["pos_reward"] = selected_baseline
pos_ = math_ops.less(
constant_op.constant(0, dtype=dtypes.float32), selected_baseline)
selected_baseline = self.hparams.bl_dec * ctr["baseline"] + (
1 - self.hparams.bl_dec) * selected_baseline
selected_baseline = control_flow_ops.cond(
pos_, lambda: selected_baseline, lambda: ctr["baseline"])
new_baseline = control_flow_ops.cond(
math_ops.less(self.global_step,
self.hparams.stop_updating_after_steps),
lambda: new_baseline, lambda: selected_baseline)
ctr["baseline_update"] = state_ops.assign(
ctr["baseline"], new_baseline, use_locking=True)
ctr["y_preds"], ctr["log_probs"] = self.get_placements()
summary.histogram("actions", ctr["y_preds"]["sample"])
mask = math_ops.less(ctr["reward"]["value"], failing_signal)
ctr["loss"] = ctr["reward"]["value"] - ctr["baseline"]
ctr["loss"] *= (
ctr["log_probs"]["sample"] + ctr["grouping_log_probs"]["sample"])
selected_loss = array_ops.boolean_mask(ctr["loss"], mask)
selected_loss = control_flow_ops.cond(
math_ops.reduce_any(mask),
lambda: math_ops.reduce_mean(-selected_loss),
lambda: constant_op.constant(0, dtype=dtypes.float32))
ctr["loss"] = control_flow_ops.cond(
math_ops.less(self.global_step,
self.hparams.stop_updating_after_steps),
lambda: math_ops.reduce_mean(-ctr["loss"]), lambda: selected_loss)
ctr["reward_s"] = math_ops.reduce_mean(ctr["reward"]["value"])
summary.scalar("loss", ctr["loss"])
summary.scalar("avg_reward", ctr["reward_s"])
summary.scalar("best_reward_so_far", best_reward)
summary.scalar(
"advantage",
math_ops.reduce_mean(ctr["reward"]["value"] - ctr["baseline"]))
with variable_scope.variable_scope(
"optimizer", reuse=variable_scope.AUTO_REUSE):
(ctr["train_op"], ctr["lr"], ctr["grad_norm"],
ctr["grad_norms"]) = self._get_train_ops(
ctr["loss"],
tf_ops.get_collection(tf_ops.GraphKeys.TRAINABLE_VARIABLES),
self.global_step,
grad_bound=self.hparams.grad_bound,
lr_init=self.hparams.lr,
lr_dec=self.hparams.lr_dec,
start_decay_step=self.hparams.start_decay_step,
decay_steps=self.hparams.decay_steps,
optimizer_type=self.hparams.optimizer_type)
summary.scalar("gradnorm", ctr["grad_norm"])
summary.scalar("lr", ctr["lr"])
ctr["summary"] = summary.merge_all()
ops["controller"] = ctr
self.ops = ops
return ops
@property
def global_step(self):
return self._global_step
def create_op_embeddings(self, verbose=False):
if verbose:
print("process input graph for op embeddings")
self.num_ops = len(self.important_ops)
# topological sort of important nodes
topo_order = [op.name for op in self.important_ops]
# create index to name for topologicaly sorted important nodes
name_to_topo_order_index = {}
for idx, x in enumerate(topo_order):
name_to_topo_order_index[x] = idx
self.name_to_topo_order_index = name_to_topo_order_index
# create adj matrix
adj_dict = {}
for idx, op in enumerate(self.important_ops):
for output_op in self.get_node_fanout(op):
output_op_name = output_op.name
if output_op_name in self.important_op_names:
if name_to_topo_order_index[op.name] not in adj_dict:
adj_dict[name_to_topo_order_index[op.name]] = []
adj_dict[name_to_topo_order_index[op.name]].extend(
[name_to_topo_order_index[output_op_name], 1])
if output_op_name not in adj_dict:
adj_dict[name_to_topo_order_index[output_op_name]] = []
adj_dict[name_to_topo_order_index[output_op_name]].extend(
[name_to_topo_order_index[op.name], -1])
# get op_type op_output_shape, and adj info
output_embed_dim = (self.hparams.max_num_outputs *
self.hparams.max_output_size)
# TODO(bsteiner): don't filter based on used ops so that we can generalize
# to models that use other types of ops.
used_ops = set()
for node in self.important_ops:
op_type = str(node.op)
used_ops.add(op_type)
self.type_dict = {}
for op_type in self.cluster.ListAvailableOps():
if op_type in used_ops:
self.type_dict[op_type] = len(self.type_dict)
op_types = np.zeros([self.num_ops], dtype=np.int32)
op_output_shapes = np.full(
[self.num_ops, output_embed_dim], -1.0, dtype=np.float32)
for idx, node in enumerate(self.important_ops):
op_types[idx] = self.type_dict[node.op]
| |
<reponame>SSK-14/Covid19-Search-Engine
import pickle
a = """1H – 1st Half
24/7 - 24 hours a day, seven days a week
80/20 – for many events, roughly 80% of the effects come from 20% of the causes
ADR - American Depositary Receipt
AI - Artificial Intelligence
AM – Account manager
AOP – Adjusted Operating Profit
AOP – Annual Operating Plan
AP – Accounts payable
ARPU – Average revenue per user
ASP – Average selling price
ASAP – "As soon as possible"
agcy. – Agency
agt. – Agent
asst. – Assistant
a/c. – Account
afaik. – As far as I know
BAU – Business As Usual
BEP - Break Even Point
BIC - Bank Identifier Code
bldg. – Building
BLS - Balance Sheet
BMC - Business Model Canvas
BOM - Bill of Materials
BPO - Business Process Outsourcing
BPR - Brief Project Report
BPV - Bank Payment Voucher
BRD – Business Requirements Document
BRU – Business Recovery Unit
BRV - Bank Receipt Voucher
BTW - By the way
B2B – Business-to-business
B2C – Business to Consumer
B2G – Business-to-government
BU – Business Unit
BUSI – Business
bus. – Business
CAGR – Compound annual growth rate
CAO – Chief Administrative Officer or Chief Accounting Officer
CAPEX – Capital Expenditure
CAPM – Capital asset pricing model
CBOE – Chicago Board Options Exchange
CBOT – Chicago Board of Trade
CDO – Collateralized debt obligation or Chief Data Officer
CDM – Change and Data Management
CDS – Credit default swap
CEO – Chief executive officer
COA – Chart of Account
CFA – Chartered Financial Analyst
CFD – Contract for difference
CFC – Consumption of fixed capital
CFCT – Cash Flow Cycle Time
CFM – Certified Financial Manager
CFO – Chief Financial Officer
CFS – Consolidated Financial Statement
CIA – Certified Internal Auditor
CIF – Cost Insurance With Freight
CIMA – Chartered Institute of Management Accountants
CIO – Chief Information Officer, Chief Innovation Officer or Chief Investment Officer
CIP - Carriage and Insurance Paid
CISA – Certified Information Systems Auditor
CISO - Chief Information Security Officer
CLO - Chief Legal Officer
CMA – Certified Management Accountant
CMFB – Committee on monetary, finance and balance of payments statistics
CMO – Chief Marketing Officer
COB – Close of Business
COGS – Cost of Goods Sold
Corp. – Corporation
COO – Chief Operating Officer
CPA – Certified Public Accountant
CPI – Consumer Price Index
CPO – Chief People Officer also Chief Procurement Officer
CPQ – Configure, Price, Quote
CPU - Central Processing Unit
CSI - Corporate Social Investment
CSO – Chief Security Officer
CSR - Corporate social responsibility
CRM – Customer Relationship Management
CVP – Cost Volume Profit
CTA - Call to action
CTO – Chief Technology Officer
CX - Customer Experience
CXO - Any chief officer(s), x being a placeholder.
C2B - Consumer-to-business
C&F – Cost With Freight
CKM - Customer Knowledge Management
CTC - Cost to company
CUSIP number - Committee on Uniform Security Identification Procedures number
Cr - Credit
CA - Current account (disambiguation)Current Account
DDA – Depletion Depreciation Amortization
Dept. – Department
DI – Dispatch information
DIFOT – Delivery in full on time, a variant of On Time In Full
Dir - Director
disc. – Discount
DMA – Direct market access
DPO - Days Payable Outstanding
DR – Depositary receipt
DSO - Days Sales Outstanding
DSP - Delivery service provider
DTP- Desktop Publishing
DVP - Delivery versus payment
EAR – Effective annual rate
EAY – Effective Annual Yield
EBITA – Earnings before interest and taxes and amortization
EBITDA – Earnings before Interest, Taxes, Depreciation, and Amortization
ECB - European Central Bank
ECS — Electronic Clearing Service or Electronic Clearing System
EDI – Electronic Data Interchange
EFSM - European Financial Stabilisation Mechanism
EFTPOS – Electronic Funds Transfer at Point of Sale
EPS – Earnings per share
EXP – Export
EOB – End of business
EOD – End of day
EOM – End of Message
ERP - Enterprise Resource Planning
ETA - Estimated Time of Arrival
ETD - Estimated Time of Departure or Estimated Time of Delivery
EMI - Equated Monthly Installment
EPC - Export Promotion Council
ECGC - Export Credit Guarantee Corporation of India
EXW - Ex Works
FAB - Feature Advantage Benefits
FDP – Finance Department
FOB – Freight On Board
FIFO – First In, First Out
FinMin - Finance Minister
Fin Min - Finance Minister
FL – Financial leverage
FOMC - Federal Open Market Committee
FP&A – Financial Planning & Analysis
FPO – Follow on public offer
FIX – Financial Information Exchange
FSA - Financial Services Authority
FTE- Full Time Equivalent
FV – Future Value
FX – Foreign exchange market
FY - Fiscal year or Financial year
FYA- For Your Action
FYI- For Your Information
FOC- Free Of Cost
F/U- Follow-Up
GAAP – Generally Accepted Accounting Principles
GAAS – Generally Accepted Audit Standards
GDP – Gross Domestic Product
GDPR – General Data Protection Regulation
GDR – Global depository receipt
GFCF – Gross fixed capital formation
GL – General Ledger
GMV – Gross Merchandise Volume
GP – Gross Profit
GPO – Group purchasing organization
GRN – Goods Receipt Note
GRNI - Goods Receipt Not Invoiced
GSV – Gross Sales Value
GVC – Global Value Chain
GMROII- Gross Margin Return on Inventory Investment
G&A – General and Administration expense. expenditures related to the day-to-day operations of a business.
HMRC - Her Majesty's Revenue and Customs
HP - Hire purchase
HQ – Headquarters
HR – Human Resources
HRD – Human Resource Development
HS Code - Harmonized Commodity Description and Coding System
IAS – International Accounting Standards
IBAN - International Bank Account Number
ICB – Industry Classification Benchmark
ICRM – Innovative Customer Relationship Management
IE – Interest expense
IFRS – International Financial Reporting Standard
ILCLP – IdentLogic Systems Customer Loyalty Program
IMF – International Monetary Fund
IMP – Import
Inc. – Incorporated
IoT – Internet of Things
IPO – Initial Public Offering
IPT - Item Per Transaction
IR – Interest Rate - typically referring to an IR derivative product
IRS – Internal Revenue Service
IRR – Internal Rate of Return
ISIN – International Securities Identification Number
ISM – Institute of Supply Management
ITT - Invitation to Tender
IYR – In Year Revenue
J – Journal
JIT - Just in time
JIS - Just in sequence
JST - Joint Supervisory Team
LBO – Leveraged Buyout
LC – Letter of credit
LIBOR – London Interbank Offered Rate
LE – Latest Estimate
LIFFE – London International Financial Futures and Options Exchange
LIFO – Last In, First Out
LLC – Limited Liability Company
LME – London Metal Exchange
LMS – Learning Management System
Ltd. – Limited Company
LTV - Loan to Value
LoU - Letters of Undertaking
MBS – mortgage-backed security
mfg. – Manufacturing
MGMT - Management
MIC - Market Identifier Code
MiFID - Markets in Financial Instruments Directive
MoM - Month on Month / Month over Month
MOQ – Minimum Order Quantity
MOU - Memorandum of understanding
MPC – marginal propensity to consume
MRO – Maintenance, Repair, and Operations
MRP - Maximum Retail Price
MSOD – Monthly Statement of Select Operational Data
MSRP - Manufacturer's Suggested Retail Price
MTD – Month-to-date
MWC – Managerial Working Capital
MPR - Monthly Progress Report
NAV – Net asset value
NCBO – No Change of Beneficial Ownership
NCND – Non-Circumvent and Non-Disclosure
NDA – Non-Disclosure Agreement
NII - Net Interest Income
NIM – Net Interest Margin
NOA – Net Operating Assets
NOI – Net Operating Income
NOPAT – Net Operating Profit After Tax
NPA - Non Performing Asset
NPL – Non-performing loan
NPV – Net Present Value
NTE - Not To Exceed
NYMEX- New York Mercantile Exchange
NYSE - New York Stock Exchange
OC – Opportunity Cost
OCF – Operating cash flow
OECD - Organisation for Economic Co-operation and Development
OEM - Original Equipment Manufacturer
OIBDA - Operating Income Before Depreciation And Amortization
OKR - Objectives and key results
OOO - Out of Office
OPEX – Operating Expenditure or Operational Expenditure
OTIF - On Time In Full
OTC – Over-the-counter (finance)
P&L – Profit and Loss
PA - Purchasing agent or Personal Assistant
PAT – Profit After Tax
PBT – Profit Before Tax
P/E – Price-to-earnings ratio
PE – Private Equity
PEG – Price-to-earnings growth ratio
PHEK – Planherstellungskosten (Product Planning cost)
PFI - Private Finance Initiative
PI or PII - Professional Indemnity (insurance coverage)
pip - Percentage in point
PMAC – Period Moving Average Cost
PO – Profit Objective or Purchase Order
POA – Plan Of Action
POS – Point of sale
PP&E – Property, plant, and equipment
PPP - Public-private partnership
PPP – Purchasing power parity
PPT - Powerpoint presentation
PR – Purchase Requisition
PTD - Project to Date
QC – Quality control or Quality costs
QoQ - Quarter on quarter
q/q – Quarter on quarter
QTD – Quarter-to-date
RAQSCI - Regulatory, Assurance of Supply, Quality, Service, Cost, Innovation
RBI - Reserve Bank of India
RBA - Reserve Bank of Australia
RE – Retained Earnings
RFI – Request for information
RFP – Request for Proposal
RFQ – Request for Quotation
RFX – Generic name for a Request for Information, Proposal or Quotation
ROA – Return on assets
ROC - Registration Of Company
ROCE – Return on Capital Employed
ROE – Return on Equity
ROI – Return on Investment
ROIC – Return on Invested Capital
RONA – Return on net assets
ROS – Return on Sales
RR – Resource rent
RSP – Retail selling price
RWA - Risk-weighted asset
R&D – Research and Development
RC – Retail | |
import pymbar
from fe import endpoint_correction
from collections import namedtuple
import pickle
import dataclasses
import time
import functools
import copy
import jax
import numpy as np
from md import minimizer
from typing import Tuple, List, Any
import os
from fe import standard_state
from fe.utils import sanitize_energies, extract_delta_Us_from_U_knk
from timemachine.lib import potentials, custom_ops
@dataclasses.dataclass
class SimulationResult:
xs: np.array
boxes: np.array
du_dps: np.array
lambda_us: np.array
def flatten(v):
return tuple(), (v.xs, v.boxes, v.du_dps, v.lambda_us)
def unflatten(aux_data, children):
xs, boxes, du_dps, lambda_us = aux_data
return SimulationResult(xs, boxes, du_dps, lambda_us)
jax.tree_util.register_pytree_node(SimulationResult, flatten, unflatten)
def run_model_simulations(model, sys_params):
assert len(sys_params) == len(model.unbound_potentials)
bound_potentials = []
for params, unbound_pot in zip(sys_params, model.unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bound_potentials.append(bp)
all_args = []
for lamb_idx, lamb in enumerate(model.lambda_schedule):
subsample_interval = 1000
all_args.append(
(
lamb,
model.box,
model.x0,
model.v0,
bound_potentials,
model.integrator,
model.barostat,
model.equil_steps,
model.prod_steps,
subsample_interval,
subsample_interval,
model.lambda_schedule,
)
)
if model.endpoint_correct:
assert isinstance(bound_potentials[-1], potentials.HarmonicBond)
all_args.append(
(
1.0,
model.box,
model.x0,
model.v0,
bound_potentials[:-1], # strip out the restraints
model.integrator,
model.barostat,
model.equil_steps,
model.prod_steps,
subsample_interval,
subsample_interval,
[], # no need to evaluate Us for the endpoint correction
)
)
results = []
if model.client is None:
for args in all_args:
results.append(simulate(*args))
else:
futures = []
for args in all_args:
futures.append(model.client.submit(simulate, *args))
for future in futures:
results.append(future.result())
return results
def simulate(
lamb,
box,
x0,
v0,
final_potentials,
integrator,
barostat,
equil_steps,
prod_steps,
x_interval,
u_interval,
lambda_windows,
):
"""
Run a simulation and collect relevant statistics for this simulation.
Parameters
----------
lamb: float
lambda value used for the equilibrium simulation
box: np.array
3x3 numpy array of the box, dtype should be np.float64
x0: np.array
Nx3 numpy array of the coordinates
v0: np.array
Nx3 numpy array of the velocities
final_potentials: list
list of unbound potentials
integrator: timemachine.Integrator
integrator to be used for dynamics
barostat: timemachine.Barostat
barostat to be used for equilibration
equil_steps: int
number of equilibration steps
prod_steps: int
number of production steps
x_interval: int
how often we store coordinates. If x_interval == 0 then
no frames are returned.
u_interval: int
how often we store energies. If u_interval == 0 then
no energies are returned
lambda_windows: list of float
lambda windows we evaluate energies at.
Returns
-------
SimulationResult
Results of the simulation.
"""
all_impls = []
# set up observables for du_dps here as well.
du_dp_obs = []
for bp in final_potentials:
impl = bp.bound_impl(np.float32)
all_impls.append(impl)
du_dp_obs.append(custom_ops.AvgPartialUPartialParam(impl, 25))
# fire minimize once again, needed for parameter interpolation
x0 = minimizer.fire_minimize(x0, all_impls, box, np.ones(100, dtype=np.float64) * lamb)
# sanity check that forces are well behaved
for bp in all_impls:
du_dx, du_dl, u = bp.execute(x0, box, lamb)
norm_forces = np.linalg.norm(du_dx, axis=1)
assert np.all(norm_forces < 25000), "Forces much greater than expected after minimization"
if integrator.seed == 0:
# this deepcopy is needed if we're running if client == None
integrator = copy.deepcopy(integrator)
integrator.seed = np.random.randint(np.iinfo(np.int32).max)
if barostat.seed == 0:
barostat = copy.deepcopy(barostat)
barostat.seed = np.random.randint(np.iinfo(np.int32).max)
intg_impl = integrator.impl()
# technically we need to only pass in the nonbonded impl
barostat_impl = barostat.impl(all_impls)
# context components: positions, velocities, box, integrator, energy fxns
ctxt = custom_ops.Context(x0, v0, box, intg_impl, all_impls, barostat_impl)
# equilibration
equil_schedule = np.ones(equil_steps) * lamb
ctxt.multiple_steps(equil_schedule)
# (ytz): intentionally hard-coded, I'd rather the end-user *not*
# muck with this unless they have a good reason to.
barostat_impl.set_interval(25)
for obs in du_dp_obs:
ctxt.add_observable(obs)
full_us, xs, boxes = ctxt.multiple_steps_U(lamb, prod_steps, np.array(lambda_windows), u_interval, x_interval)
# keep the structure of grads the same as that of final_potentials so we can properly
# form their vjps.
grads = []
for obs in du_dp_obs:
grads.append(obs.avg_du_dp())
result = SimulationResult(
xs=xs.astype("float32"),
boxes=boxes.astype("float32"),
du_dps=grads,
lambda_us=full_us,
)
return result
FreeEnergyModel = namedtuple(
"FreeEnergyModel",
[
"unbound_potentials",
"endpoint_correct",
"client",
"box",
"x0",
"v0",
"integrator",
"barostat",
"lambda_schedule",
"equil_steps",
"prod_steps",
"beta",
"prefix",
],
)
gradient = List[Any] # TODO: make this more descriptive of dG_grad structure
def _deltaG_from_results(model, results, sys_params) -> Tuple[Tuple[float, List], np.array]:
assert len(sys_params) == len(model.unbound_potentials)
bound_potentials = []
for params, unbound_pot in zip(sys_params, model.unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bound_potentials.append(bp)
if model.endpoint_correct:
sim_results = results[:-1]
else:
sim_results = results
U_knk = []
N_k = []
for result in sim_results:
U_knk.append(result.lambda_us)
N_k.append(len(result.lambda_us)) # number of frames
U_knk = np.array(U_knk)
bar_dG = 0
bar_dG_err = 0
delta_Us = extract_delta_Us_from_U_knk(U_knk)
for lambda_idx in range(len(model.lambda_schedule) - 1):
fwd_delta_u = model.beta * delta_Us[lambda_idx][0]
rev_delta_u = model.beta * delta_Us[lambda_idx][1]
dG_exact, exact_bar_err = pymbar.BAR(fwd_delta_u, rev_delta_u)
bar_dG += dG_exact / model.beta
exact_bar_overlap = endpoint_correction.overlap_from_cdf(fwd_delta_u, rev_delta_u)
# probably off by a factor of two since we re-use samples.
bar_dG_err += (exact_bar_err / model.beta) ** 2
lamb_start = model.lambda_schedule[lambda_idx]
lamb_end = model.lambda_schedule[lambda_idx + 1]
print(
f"{model.prefix}_BAR: lambda {lamb_start:.3f} -> {lamb_end:.3f} dG: {dG_exact/model.beta:.3f} dG_err: {exact_bar_err/model.beta:.3f} overlap: {exact_bar_overlap:.3f}"
)
# for MBAR we need to sanitize the energies
clean_U_knks = [] # [K, F, K]
for lambda_idx, full_us in enumerate(U_knk):
clean_U_knks.append(sanitize_energies(full_us, lambda_idx))
print(
model.prefix,
" MBAR: amin",
np.amin(clean_U_knks),
"median",
np.median(clean_U_knks),
"max",
np.amax(clean_U_knks),
)
K = len(model.lambda_schedule)
clean_U_knks = np.array(clean_U_knks) # [K, F, K]
U_kn = np.reshape(clean_U_knks, (-1, K)).transpose() # [K, F*K]
u_kn = U_kn * model.beta
np.save(model.prefix + "_U_kn.npy", U_kn)
mbar = pymbar.MBAR(u_kn, N_k)
differences, error_estimates = mbar.getFreeEnergyDifferences()
f_k, error_k = differences[0], error_estimates[0]
mbar_dG = f_k[-1] / model.beta
mbar_dG_err = error_k[-1] / model.beta
bar_dG_err = np.sqrt(bar_dG_err)
dG = bar_dG # use the exact answer
dG_grad = []
# (ytz): results[-1].du_dps contain system parameter derivatives for the
# independent, gas phase simulation. They're usually ordered as:
# [Bonds, Angles, Torsions, Nonbonded]
#
# results[0].du_dps contain system parameter derivatives for the core
# restrained state. If we're doing the endpoint correction during
# decoupling stages, the derivatives are ordered as:
# [Bonds, Angles, Torsions, Nonbonded, RestraintBonds]
# Otherwise, in stages like conversion where the endpoint correction
# is turned off, the derivatives are ordered as :
# [Bonds, Angles, Torsions, Nonbonded]
# Note that this zip will always loop over only the
# [Bonds, Angles, Torsions, Nonbonded] terms, since it only
# enumerates over the smaller of the two lists.
for rhs, lhs in zip(results[-1].du_dps, results[0].du_dps):
dG_grad.append(rhs - lhs)
if model.endpoint_correct:
assert len(results[0].du_dps) - len(results[-1].du_dps) == 1
# (ytz): Fill in missing derivatives since zip() from above loops
# over the shorter array.
lhs = results[0].du_dps[-1]
rhs = 0 # zero as the energies do not depend the core restraints.
dG_grad.append(rhs - lhs)
core_restr = bound_potentials[-1]
# (ytz): tbd, automatically find optimal k_translation/k_rotation such that
# standard deviation and/or overlap is maximized
k_translation = 200.0
k_rotation = 100.0
start = time.time()
lhs_du, rhs_du, rotation_samples, translation_samples = endpoint_correction.estimate_delta_us(
k_translation=k_translation,
k_rotation=k_rotation,
core_idxs=core_restr.get_idxs(),
core_params=core_restr.params.reshape((-1, 2)),
beta=model.beta,
lhs_xs=results[-2].xs,
rhs_xs=results[-1].xs,
seed=2021,
)
dG_endpoint, endpoint_err = pymbar.BAR(model.beta * lhs_du, model.beta * np.array(rhs_du))
dG_endpoint = dG_endpoint / model.beta
endpoint_err = endpoint_err / model.beta
# compute standard state corrections for translation and rotation
dG_ssc_translation, dG_ssc_rotation = standard_state.release_orientational_restraints(
k_translation, k_rotation, model.beta
)
overlap = endpoint_correction.overlap_from_cdf(lhs_du, rhs_du)
lhs_mean = np.mean(lhs_du)
rhs_mean = np.mean(rhs_du)
print(
f"{model.prefix} bar (A) {bar_dG:.3f} bar_err {bar_dG_err:.3f} mbar (A) {mbar_dG:.3f} mbar_err {mbar_dG_err:.3f} dG_endpoint (E) {dG_endpoint:.3f} dG_endpoint_err {endpoint_err:.3f} dG_ssc_translation {dG_ssc_translation:.3f} dG_ssc_rotation {dG_ssc_rotation:.3f} overlap {overlap:.3f} lhs_mean {lhs_mean:.3f} rhs_mean {rhs_mean:.3f} lhs_n {len(lhs_du)} rhs_n {len(rhs_du)} | time: {time.time()-start:.3f}s"
)
dG += dG_endpoint + dG_ssc_translation + dG_ssc_rotation
bar_dG_err = np.sqrt(bar_dG_err ** 2 + endpoint_err ** 2)
else:
print(
f"{model.prefix} bar (A) {bar_dG:.3f} bar_err {bar_dG_err:.3f} mbar (A) {mbar_dG:.3f} mbar_err {mbar_dG_err:.3f} "
)
return (dG, bar_dG_err, results), dG_grad
@functools.partial(
jax.custom_vjp,
nondiff_argnums=(
0,
1,
),
)
def deltaG_from_results(model, results, sys_params) -> Tuple[float, List]:
return _deltaG_from_results(model=model, results=results, sys_params=sys_params)[0]
def deltaG_from_results_fwd(model, results, sys_params) -> Tuple[Tuple[float, List], np.array]:
"""same signature as DeltaG_from_results, but returns the full tuple"""
return _deltaG_from_results(model=model, results=results, sys_params=sys_params)
def deltaG_from_results_bwd(model, results, residual, grad) -> Tuple[np.array]:
"""Note: nondiff args must appear first here, even though one of them appears last in the original function's signature!"""
# residual are the partial dG / partial dparams for each term
# grad[0] is the adjoint of dG w.r.t. loss: partial L/partial dG
# grad[1] is the adjoint of dG_err w.r.t. loss: which we don't use
# grad[2] is the adjoint of simulation results w.r.t. loss: which we don't use
return ([grad[0] * r for r in residual],)
@functools.partial(jax.custom_vjp, nondiff_argnums=(0,))
def deltaG(model, sys_params) -> Tuple[float, List]:
results = run_model_simulations(model, sys_params)
return _deltaG_from_results(model=model, results=results, sys_params=sys_params)[0]
def deltaG_fwd(model, sys_params) -> Tuple[Tuple[float, List], np.array]:
"""same signature as DeltaG_from_results, but returns the full | |
The example below shows a box, and the same box
scaled by 2 in the x direction, scaled by 2 in the y direction,
and scaled by 2 in the z direction.
.. figure:: ../examples/images/example_scale1_128.png
:target: ../examples/images/example_scale1_512.png
.. literalinclude:: ../examples/example_scale1.py
:lines: 9-15
"""
directions = vector( x, y, z )
return modifier( lambda subject :
apply( "scale( %s )" % str( directions ), subject ) )
def _hull():
return modifier( lambda subject :
apply( "hull()", subject ) )
hull = _hull()
"""convex hull
This manipulator creates the convex hull around its subject,
which can be 2D or 3D.
.. figure:: ../examples/images/example_hull1_128.png
:target: ../examples/images/example_hull1_512.png
.. literalinclude:: ../examples/example_hull1.py
:lines: 11, 14-15
"""
def resize(
x: _float_or_vector,
y: float = None,
z: float = None
):
"""resize operator: resize an object
:param x: the x of the vector, or the full vector
:param y: (optional) the y of the vector
:param z: (optional) the z of the vector
The vector is either specified as a single vector
parameter, or as separate x, y and z values.
This manipulator resizes its subject to the sizes
indicated for the x, y and z direction.
A size of 0 keeps the size (in that direction) unchanged.
A size of None scales the size in that direction
with another non-0 non-None size.
The example below shows a text, and
the same text scaled to fit in a 30 by 10 rectangle.
.. figure:: ../examples/images/example_resize1_128.png
:target: ../examples/images/example_resize1_512.png
.. literalinclude:: ../examples/example_resize1.py
:lines: 9, 11
The example below shows a sphere, and
the same sphere scaled to size 40 in the x direction,
unchanged (size 10) ijn the y direction,
and z matching the x direction (scaled to 40).
.. figure:: ../examples/images/example_resize2_128.png
:target: ../examples/images/example_resize2_512.png
.. literalinclude:: ../examples/example_resize2.py
:lines: 9, 11
"""
amounts = vector( x, y, z )
auto = str( [ x == None for x in amounts._list() ] ).lower()
return modifier( lambda subject :
apply(
"resize( %s, auto=%s )" % ( str( amounts ), auto ),
subject ) )
def _negative():
return modifier( lambda subject :
shape( "", str( subject ) ) )
negative = _negative()
"""makes its subject a dominant negative
This manipulator makes its subject a dominant negative:
something that will not be filled.
"""
def _positive():
return modifier(
lambda subject :
shape( str( subject ), "" ) )
positive = _positive()
"""removes dominant negatives
The positive manipulator subtracts and removes
the dominant emptinesses in its subject, so an
solid can be placed in the space of what was a dominant emptiness.
"""
def repeat2(
x: _float_or_vector,
y: float = None,
z: float = None
):
"""repeat at two positions
:param x: the x of the vector, or the full vector
:param y: (optional) the y of the vector
:param z: (optional) the z of the vector
The vector is either specified as a single vector
parameter, or as separate x, y and z values.
This manipulator repeats its subject twice:
once at its original location,
and once shifted by the specified vector.
.. figure:: ../examples/images/example_repeat2_128.png
:target: ../examples/images/example_repeat2_512.png
.. literalinclude:: ../examples/example_repeat2.py
:lines: 10
"""
v = vector( x, y, z )
return modifier(
lambda subject :
subject + ( v ** subject ) )
def repeat4(
x: _float_or_vector,
y: float = None
):
"""repeat at four positions
:param x: the x of the vector, or the full vector
:param y: (optional) the y of the vector
The vector is either specified as a single vector
parameter, or as separate x and y values.
This manipulator repeats its subject at the
four corners of the rectangle specified by the parameters.
.. figure:: ../examples/images/example_repeat4_128.png
:target: ../examples/images/example_repeat4_512.png
.. literalinclude:: ../examples/example_repeat4.py
:lines: 10
"""
v = vector( x, y )
return modifier(
lambda subject :
vector( 0, 0 ) ** subject +
vector( v.x, 0 ) ** subject +
vector( 0, v.y ) ** subject +
vector( v.x, v.y ) ** subject
)
def repeat8(
x: _float_or_vector,
y: float = None,
z: float = None
):
"""repeat at eight positions
:param x: the x of the vector, or the full vector
:param y: (optional) the y of the vector
:param z: (optional) the z of the vector
The vector is either specified as a single vector
parameter, or as separate x, y and z values.
This manipulator repeats its subject at the corners
of the box specified by the parameters.
.. figure:: ../examples/images/example_repeat8_128.png
:target: ../examples/images/example_repeat8_512.png
.. literalinclude:: ../examples/example_repeat8.py
:lines: 10
"""
v = vector( x, y, z )
return modifier(
lambda subject :
vector( 0, 0, 0 ) ** subject +
vector( v.x, 0, 0 ) ** subject +
vector( 0, v.y, 0 ) ** subject +
vector( v.x, v.y, 0 ) ** subject +
vector( 0, 0, v.z ) ** subject +
vector( v.x, 0, v.z ) ** subject +
vector( 0, v.y, v.z ) ** subject +
vector( v.x, v.y, v.z ) ** subject
)
#============================================================================
#
# colors
#
#============================================================================
_colors = [
"Lavender", "Thistle", "Plum", "Violet", "Orchid", "Fuchsia", "Magenta",
"MediumOrchid", "MediumPurple", "BlueViolet", "DarkViolet", "DarkOrchid",
"DarkMagenta", "Purple", "Indigo", "DarkSlateBlue", "SlateBlue",
"MediumSlateBlue", "Pink", "LightPink", "HotPink", "DeepPink",
"MediumVioletRed", "PaleVioletRed", "Aqua", "Cyan", "LightCyan",
"PaleTurquoise", "Aquamarine", "Turquoise", "MediumTurquoise",
"DarkTurquoise", "CadetBlue", "SteelBlue", "LightSteelBlue",
"PowderBlue", "LightBlue", "SkyBlue", "LightSkyBlue", "DeepSkyBlue",
"DodgerBlue", "CornflowerBlue", "RoyalBlue", "Blue", "MediumBlue",
"DarkBlue", "Navy", "MidnightBlue", "IndianRed", "LightCoral", "Salmon",
"DarkSalmon", "LightSalmon", "Red", "Crimson", "FireBrick", "DarkRed",
"GreenYellow", "Chartreuse", "LawnGreen", "Lime", "LimeGreen",
"PaleGreen", "LightGreen", "MediumSpringGreen", "SpringGreen",
"MediumSeaGreen", "SeaGreen", "ForestGreen", "Green", "DarkGreen",
"YellowGreen", "OliveDrab", "Olive", "DarkOliveGreen", "MediumAquamarine",
"DarkSeaGreen", "LightSeaGreen", "DarkCyan", "Teal", "LightSalmon",
"Coral", "Tomato", "OrangeRed", "DarkOrange", "Orange", "Gold", "Yellow",
"LightYellow", "LemonChiffon", "LightGoldenrodYellow", "PapayaWhip",
"Moccasin", "PeachPuff", "PaleGoldenrod", "Khaki", "DarkKhaki",
"Cornsilk", "BlanchedAlmond", "Bisque", "NavajoWhite", "Wheat",
"BurlyWood", "Tan", "RosyBrown", "SandyBrown", "Goldenrod",
"DarkGoldenrod", "Peru", "Chocolate", "SaddleBrown", "Sienna", "Brown",
"Maroon", "White", "Snow", "Honeydew", "MintCream", "Azure", "AliceBlue",
"GhostWhite", "WhiteSmoke", "Seashell", "Beige", "OldLace", "FloralWhite",
"Ivory", "AntiqueWhite", "Linen", "LavenderBlush", "MistyRose",
"Gainsboro", "LightGrey", "Silver", "DarkGray", "Gray", "DimGray",
"LightSlateGray", "SlateGray", "DarkSlateGray", "Black" ]
_current_module = __import__(__name__)
for c in _colors:
# c_copy forces a copy, otherwise the *variable* c
# would be captured (and all colors would be Black
f = modifier( lambda s, c_copy = c: apply( 'color( "%s" )' % c_copy, s ))
setattr( _current_module, c, f )
setattr( _current_module, c.lower(), f )
def color(
r: _float_or_vector,
g: float = None,
b: float = None,
alpha: float = 1.0
):
"""a color in RGB format
:param r: the r of the vector, or the full color vector
:param g: (optional) the g of the color
:param b: (optional) the b of the color
:param alpha: (optional) the alpha value (opacity)
The color is either specified as a single vector
parameter, or as separate r, g and b values.
When a single vector is specified, and alpha value
(if present) must be named parameter.
The individual color values must be in the range 0..255.
An alpha of 0 is full transparency, a value of 1 is a solid color.
A lower alpha makes the object more faintly colored.
It does not make it opaque (translucent).
Colors are visible in OpenSCAD preview, but NOT in after
rendering. Hence the examples below show previews, unlike
the other examples, which show the result of rendering.
.. figure:: ../examples/images/example_color1_128.png
:target: ../examples/images/example_color1_512.png
.. literalinclude:: ../examples/example_color1.py
:lines: 10-12
The color names in the World Wide Web | |
iI1I , OooOoOo , III1I1Iii1iiI )
if 78 - 78: iII111i + I11i . ooOoO0o - iII111i . Ii1I
if 30 - 30: I1IiiI + OoO0O00 % Ii1I * iII111i / Oo0Ooo - I11i
if 64 - 64: iIii1I11I1II1
if 21 - 21: Oo0Ooo . II111iiii
ooo000o000 = "none" if len ( iiIiIIi . registered_rlocs ) == 0 else ""
if ( ooo000o000 != "" ) : ooo000o000 = lisp . lisp_print_cour ( ooo000o000 )
i1I1i111Ii += "<br>Registered RLOC-set ({}): {}<br>" . format ( "merge-semantics" if ( iiIiIIi . merge_register_requested ) else "replacement-semantics" ,
# iIii1I11I1II1 * i1IIi * iII111i % OOooOOo % I1ii11iIi11i + II111iiii
ooo000o000 )
if ( ooo000o000 == "" ) :
for OooO0OO in iiIiIIi . registered_rlocs :
IiIiII1 = lisp . lisp_print_cour ( OooO0OO . rloc . print_address ( ) )
Iii1iiIi1II = lisp . lisp_print_cour ( OooO0OO . print_state ( ) )
OO0O00oOo = lisp . lisp_print_cour ( str ( OooO0OO . priority ) )
ii1II = lisp . lisp_print_cour ( str ( OooO0OO . weight ) )
iI1I = lisp . lisp_print_cour ( str ( OooO0OO . mpriority ) )
OooOoOo = lisp . lisp_print_cour ( str ( OooO0OO . mweight ) )
III1I1Iii1iiI = OooO0OO . print_rloc_name ( True )
if ( III1I1Iii1iiI != "" ) : III1I1Iii1iiI = ", " + III1I1Iii1iiI
if 42 - 42: IiII - o0oOOo0O0Ooo . II111iiii
i1I1i111Ii += '''{}{}, state: {}, up/uw/mp/mw: {}/{}/{}/{}{}<br>''' . format ( o0oooOO00 , IiIiII1 , Iii1iiIi1II , OO0O00oOo , ii1II , iI1I , OooOoOo , III1I1Iii1iiI )
if 94 - 94: I1IiiI * Ii1I . I11i
if ( OooO0OO . geo ) :
oOoOoOoo0 = lisp . lisp_print_cour ( OooO0OO . geo . print_geo_url ( ) )
i1I1i111Ii += "{}geo: {}<br>" . format ( o00o , oOoOoOoo0 )
if 34 - 34: OoOoOO00 - OOooOOo + O0 . Ii1I
if ( OooO0OO . elp ) :
iIi1i1iIi1iI = lisp . lisp_print_cour ( OooO0OO . elp . print_elp ( False ) )
i1I1i111Ii += "{}elp: {}<br>" . format ( o00o , iIi1i1iIi1iI )
if 26 - 26: OoooooooOO * I1IiiI + OOooOOo
if ( OooO0OO . rle ) :
IiIii1i111 = lisp . lisp_print_cour ( OooO0OO . rle . print_rle ( True , True ) )
i1I1i111Ii += "{}rle: {}<br>" . format ( o00o , IiIii1i111 )
if 43 - 43: O0
if ( OooO0OO . json ) :
Ii1 = lisp . lisp_print_cour ( OooO0OO . json . print_json ( True ) )
i1I1i111Ii += "{}json: {}<br>" . format ( o00o , Ii1 )
if 14 - 14: iIii1I11I1II1 % iIii1I11I1II1 * i11iIiiIii - OoO0O00 - I11i
if 63 - 63: OoO0O00
if 69 - 69: iIii1I11I1II1 . I1ii11iIi11i % ooOoO0o + iIii1I11I1II1 / O0 / I1ii11iIi11i
if 61 - 61: OOooOOo % OOooOOo * o0oOOo0O0Ooo / o0oOOo0O0Ooo
ooo000o000 = "none" if len ( iiIiIIi . individual_registrations ) == 0 else ""
if ( ooo000o000 == "none" ) :
ooo000o000 = lisp . lisp_print_cour ( ooo000o000 )
elif ( iiIiIIi . inconsistent_registration ) :
ooo000o000 = lisp . red ( "inconsistent registrations" , True )
ooo000o000 = lisp . lisp_print_cour ( ooo000o000 )
if 75 - 75: IiII . ooOoO0o
i1I1i111Ii += "<br>Individual registrations: {}<br>" . format ( ooo000o000 )
if 50 - 50: OoOoOO00
if 60 - 60: ooOoO0o * iIii1I11I1II1 * I1ii11iIi11i * Oo0Ooo
if 69 - 69: Ii1I * O0 . i11iIiiIii / Ii1I . o0oOOo0O0Ooo
if 63 - 63: I11i + o0oOOo0O0Ooo . II111iiii - I1IiiI
if 52 - 52: o0oOOo0O0Ooo % Oo0Ooo
Oo000ooOOO = [ ]
for Ii11i1I11i in list ( iiIiIIi . individual_registrations . values ( ) ) :
if ( Ii11i1I11i . registered == False ) : continue
o000ooooO0o = old_div ( Ii11i1I11i . register_ttl , 2 )
if ( time . time ( ) - Ii11i1I11i . last_registered >= o000ooooO0o ) : continue
Oo000ooOOO . append ( Ii11i1I11i )
if 13 - 13: IiII / i11iIiiIii % II111iiii % I11i . I1ii11iIi11i
for Ii11i1I11i in list ( iiIiIIi . individual_registrations . values ( ) ) :
if ( Ii11i1I11i . registered == False ) : continue
o000ooooO0o = old_div ( Ii11i1I11i . register_ttl , 2 )
if ( time . time ( ) - Ii11i1I11i . last_registered >= o000ooooO0o ) : Oo000ooOOO . append ( Ii11i1I11i )
if 8 - 8: OoOoOO00 + Oo0Ooo - II111iiii
for Ii11i1I11i in list ( iiIiIIi . individual_registrations . values ( ) ) :
if ( Ii11i1I11i . registered == False ) : Oo000ooOOO . append ( Ii11i1I11i )
if 11 - 11: i1IIi % i11iIiiIii - i1IIi * OoOoOO00
if 39 - 39: I1Ii111
for iiIiIIi in Oo000ooOOO :
iiI1IIIi = lisp . green ( "yes" , True ) if iiIiIIi . registered else lisp . red ( "no" , True )
if 86 - 86: I11i * I1IiiI + I11i + II111iiii
OooO0OOo0OOo0o0O0O = "sha1" if ( iiIiIIi . auth_sha1_or_sha2 ) else "sha2"
OooO0OOo0OOo0o0O0O = lisp . lisp_print_cour ( OooO0OOo0OOo0o0O0O )
o000ooooO0o = str ( old_div ( iiIiIIi . register_ttl , 60 ) ) + " mins"
o000ooooO0o = lisp . lisp_print_cour ( o000ooooO0o )
OOOooo = iiIiIIi . print_flags ( False )
OOOooo = lisp . lisp_print_cour ( OOOooo )
IiIiII1 = lisp . lisp_print_cour ( iiIiIIi . last_registerer . print_address_no_iid ( ) )
if 8 - 8: I1Ii111 - iII111i / ooOoO0o
o0ooooO0o0O = lisp . lisp_print_elapsed ( iiIiIIi . first_registered )
o0ooooO0o0O = lisp . lisp_print_cour ( o0ooooO0o0O )
iiIi11iI1iii = lisp . lisp_print_elapsed ( iiIiIIi . last_registered )
if ( time . time ( ) - iiIiIIi . last_registered >=
( old_div ( iiIiIIi . register_ttl , 2 ) ) and iiIi11iI1iii != "never" ) :
iiIi11iI1iii = lisp . red ( iiIi11iI1iii , True )
if 96 - 96: OoOoOO00
iiIi11iI1iii = lisp . lisp_print_cour ( iiIi11iI1iii )
ii = lisp . lisp_print_cour ( str ( iiIiIIi . site_id ) )
O0oOo00o = lisp . lisp_print_cour ( lisp . lisp_hex_string ( iiIiIIi . xtr_id ) )
if 29 - 29: I1ii11iIi11i / i1IIi . I1IiiI - OoOoOO00 - OoOoOO00 - Ii1I
i1I1i111Ii += '''
{}Registerer: {}, xTR-ID: 0x{}, site-id: {}, registered: {}<br>
{}First registered: {}, last registered: {}, registration TTL: {},
auth-type: {}, registration flags: {}<br>
''' . format ( o0oooOO00 , IiIiII1 , O0oOo00o , ii , iiI1IIIi , o0oooOO00 , o0ooooO0o0O , iiIi11iI1iii , o000ooooO0o , OooO0OOo0OOo0o0O0O ,
OOOooo )
if 20 - 20: i1IIi % OoO0O00 . I1IiiI / IiII * i11iIiiIii * OOooOOo
ooo000o000 = "none" if len ( iiIiIIi . registered_rlocs ) == 0 else ""
ooo000o000 = lisp . lisp_print_cour ( ooo000o000 )
i1I1i111Ii += "{}Registered RLOC-set: {}<br>" . format ( o0oooOO00 , ooo000o000 )
if 85 - 85: o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . O0 % I1Ii111
for OooO0OO in iiIiIIi . registered_rlocs :
IiIiII1 = lisp . lisp_print_cour ( OooO0OO . rloc . print_address ( ) )
Iii1iiIi1II = lisp . lisp_print_cour ( OooO0OO . print_state ( ) )
OO0O00oOo = lisp . lisp_print_cour ( str ( OooO0OO . priority ) )
ii1II = lisp . lisp_print_cour ( str ( OooO0OO . weight ) )
iI1I = lisp . lisp_print_cour ( str ( OooO0OO . mpriority ) )
OooOoOo = lisp . lisp_print_cour ( str ( OooO0OO . mweight ) )
III1I1Iii1iiI = OooO0OO . print_rloc_name ( True )
if ( III1I1Iii1iiI != "" ) : III1I1Iii1iiI = ", " + III1I1Iii1iiI
if 90 - 90: Oo0Ooo % O0 * iIii1I11I1II1 . iII111i
i1I1i111Ii += '''{}{}, state: {}, up/uw/mp/mw: {}/{}/{}/{}{}<br>''' . format ( iiIiii1IIIII , IiIiII1 , Iii1iiIi1II , OO0O00oOo , ii1II , iI1I , OooOoOo , III1I1Iii1iiI )
if 8 - 8: ooOoO0o + II111iiii / iII111i / I11i
if ( | |
<gh_stars>1-10
import os
from os.path import join as pjoin, exists as pexists
import bz2
import numpy as np
import pandas as pd
import gzip
import shutil
import torch
import random
import warnings
from sklearn.model_selection import train_test_split
from .utils import download, Timer
from sklearn.datasets import load_svmlight_file
from sklearn.preprocessing import QuantileTransformer, StandardScaler
from sklearn.compose import ColumnTransformer
from category_encoders import LeaveOneOutEncoder
from zipfile import ZipFile
import requests
class MyPreprocessor:
def __init__(self, random_state=1377, cat_features=None, normalize=False,
y_normalize=False, quantile_transform=False,
output_distribution='normal', n_quantiles=1000,
quantile_noise=0, **kwargs):
"""
Preprocessor is a dataclass that contains all training and evaluation data required for an experiment
:param dataset: a pre-defined dataset name (see DATSETS) or a custom dataset
Your dataset should be at (or will be downloaded into) {data_path}/{dataset}
:param random_state: global random seed for an experiment
:param data_path: a shared data folder path where the dataset is stored (or will be downloaded into)
:param normalize: standardize features by removing the mean and scaling to unit variance
:param quantile_transform: transforms the features to follow a normal distribution.
:param output_distribution: if quantile_transform == True, data is projected onto this distribution
See the same param of sklearn QuantileTransformer
:param quantile_noise: if specified, fits QuantileTransformer on data with added gaussian noise
with std = :quantile_noise: * data.std ; this will cause discrete values to be more separable
Please not that this transformation does NOT apply gaussian noise to the resulting data,
the noise is only applied for QuantileTransformer
:param kwargs: depending on the dataset, you may select train size, test size or other params
If dataset is not in DATASETS, provide six keys: X_train, y_train, X_valid, y_valid, X_test and y_test
"""
self.random_state = random_state
self.cat_features = cat_features
self.normalize = normalize
self.y_normalize = y_normalize
self.quantile_transform = quantile_transform
self.output_distribution = output_distribution
self.quantile_noise = quantile_noise
self.n_quantiles = n_quantiles
self.transformers = []
self.y_mu, self.y_std = None, None
self.feature_names = None
def fit(self, X, y=None):
assert isinstance(X, pd.DataFrame), 'X is not a dataframe! %s' % type(X)
self.feature_names = X.columns
if self.cat_features is not None:
cat_encoder = LeaveOneOutEncoder(cols=self.cat_features)
cat_encoder.fit(X, y)
self.transformers.append(cat_encoder)
if self.normalize:
scaler = StandardScaler(copy=False)
scaler.fit(X)
self.transformers.append(scaler)
if self.quantile_transform:
quantile_train = X.copy()
if self.cat_features is not None:
quantile_train = cat_encoder.transform(quantile_train)
if self.quantile_noise:
r = np.random.RandomState(self.random_state)
stds = np.std(quantile_train.values, axis=0, keepdims=True)
noise_std = self.quantile_noise / np.maximum(stds, self.quantile_noise)
quantile_train += noise_std * r.randn(*quantile_train.shape)
qt = QuantileTransformer(random_state=self.random_state,
n_quantiles=self.n_quantiles,
output_distribution=self.output_distribution,
copy=False)
# if self.cat_features is not None:
# conti_fs = [f for f in self.feature_names if f not in self.cat_features]
# qt = ColumnTransformer(transformers=[("quantile", qt, conti_fs)],
# remainder='passthrough')
qt.fit(quantile_train)
self.transformers.append(qt)
if y is not None and self.y_normalize:
self.y_mu, self.y_std = y.mean(axis=0), y.std(axis=0)
print("Normalize y. mean = {}, std = {}".format(self.y_mu, self.y_std))
def transform(self, *args):
assert len(args) <= 2
X = args[0]
if len(self.transformers) > 0:
X = X.copy()
if isinstance(X, np.ndarray):
X = pd.DataFrame(X, columns=self.feature_names)
for i, t in enumerate(self.transformers):
# Leave one out transform when it's training set
X = t.transform(X)
# The LeaveOneOutEncoder makes it as np.float64 instead of 32
X = X.astype(np.float32)
if len(args) == 1:
return X
y = args[1]
if y is None:
return X, None
if self.y_normalize and self.y_mu is not None and self.y_std is not None:
y = (y - self.y_mu) / self.y_std
return X, y
def download_file_from_google_drive(id, destination):
'''
https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
'''
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def fetch_A9A(path='./data/', train_size=None, valid_size=None, test_size=None, fold=0):
path = pjoin(path, 'A9A')
train_path = pjoin(path, 'a9a')
test_path = pjoin(path, 'a9a.t')
if not all(pexists(fname) for fname in (train_path, test_path)):
os.makedirs(path, exist_ok=True)
download("https://www.dropbox.com/s/9cqdx166iwonrj9/a9a?dl=1", train_path)
download("https://www.dropbox.com/s/sa0ds895c0v4xc6/a9a.t?dl=1", test_path)
X_train, y_train = load_svmlight_file(train_path, dtype=np.float32, n_features=123)
X_test, y_test = load_svmlight_file(test_path, dtype=np.float32, n_features=123)
X_train, X_test = X_train.toarray(), X_test.toarray()
y_train[y_train == -1] = 0
y_test[y_test == -1] = 0
y_train, y_test = y_train.astype(np.int), y_test.astype(np.int)
if all(sizes is None for sizes in (train_size, valid_size, test_size)):
train_idx_path = pjoin(path, 'stratified_train_idx.txt')
valid_idx_path = pjoin(path, 'stratified_valid_idx.txt')
if not all(pexists(fname) for fname in (train_idx_path, valid_idx_path)):
download("https://www.dropbox.com/s/xy4wwvutwikmtha/stratified_train_idx.txt?dl=1", train_idx_path)
download("https://www.dropbox.com/s/nthpxofymrais5s/stratified_test_idx.txt?dl=1", valid_idx_path)
train_idx = pd.read_csv(train_idx_path, header=None)[0].values
valid_idx = pd.read_csv(valid_idx_path, header=None)[0].values
else:
assert train_size, "please provide either train_size or none of sizes"
if valid_size is None:
valid_size = len(X_train) - train_size
assert valid_size > 0
if train_size + valid_size > len(X_train):
warnings.warn('train_size + valid_size = {} exceeds dataset size: {}.'.format(
train_size + valid_size, len(X_train)), Warning)
if test_size is not None:
warnings.warn('Test set is fixed for this dataset.', Warning)
shuffled_indices = np.random.permutation(np.arange(len(X_train)))
train_idx = shuffled_indices[:train_size]
valid_idx = shuffled_indices[train_size: train_size + valid_size]
return dict(
X_train=X_train[train_idx], y_train=y_train[train_idx],
X_valid=X_train[valid_idx], y_valid=y_train[valid_idx],
X_test=X_test, y_test=y_test,
problem='classification',
)
def fetch_EPSILON(path='./data/', train_size=None, valid_size=None, test_size=None, fold=0):
path = pjoin(path, 'EPSILON')
train_path = pjoin(path, 'epsilon_normalized')
test_path = pjoin(path, 'epsilon_normalized.t')
if not all(pexists(fname) for fname in (train_path, test_path)):
os.makedirs(path, exist_ok=True)
train_archive_path = pjoin(path, 'epsilon_normalized.bz2')
test_archive_path = pjoin(path, 'epsilon_normalized.t.bz2')
if not all(pexists(fname) for fname in (train_archive_path, test_archive_path)):
download("https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/epsilon_normalized.bz2", train_archive_path)
download("https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/epsilon_normalized.t.bz2", test_archive_path)
print("unpacking dataset")
for file_name, archive_name in zip((train_path, test_path), (train_archive_path, test_archive_path)):
zipfile = bz2.BZ2File(archive_name)
with open(file_name, 'wb') as f:
f.write(zipfile.read())
with Timer("reading dataset (it may take a long time)"):
X_train, y_train = load_svmlight_file(train_path, dtype=np.float32, n_features=2000)
X_test, y_test = load_svmlight_file(test_path, dtype=np.float32, n_features=2000)
X_train, X_test = X_train.toarray(), X_test.toarray()
y_train, y_test = y_train.astype(np.int), y_test.astype(np.int)
y_train[y_train == -1] = 0
y_test[y_test == -1] = 0
if all(sizes is None for sizes in (train_size, valid_size, test_size)):
train_idx_path = pjoin(path, 'stratified_train_idx.txt')
valid_idx_path = pjoin(path, 'stratified_valid_idx.txt')
if not all(pexists(fname) for fname in (train_idx_path, valid_idx_path)):
download("https://www.dropbox.com/s/wxgm94gvm6d3xn5/stratified_train_idx.txt?dl=1", train_idx_path)
download("https://www.dropbox.com/s/fm4llo5uucdglti/stratified_valid_idx.txt?dl=1", valid_idx_path)
train_idx = pd.read_csv(train_idx_path, header=None)[0].values
valid_idx = pd.read_csv(valid_idx_path, header=None)[0].values
else:
assert train_size, "please provide either train_size or none of sizes"
if valid_size is None:
valid_size = len(X_train) - train_size
assert valid_size > 0
if train_size + valid_size > len(X_train):
warnings.warn('train_size + valid_size = {} exceeds dataset size: {}.'.format(
train_size + valid_size, len(X_train)), Warning)
if test_size is not None:
warnings.warn('Test set is fixed for this dataset.', Warning)
shuffled_indices = np.random.permutation(np.arange(len(X_train)))
train_idx = shuffled_indices[:train_size]
valid_idx = shuffled_indices[train_size: train_size + valid_size]
X_train = pd.DataFrame(X_train)
X_test = pd.DataFrame(X_test)
return dict(
X_train=X_train.iloc[train_idx], y_train=y_train[train_idx],
X_valid=X_train.iloc[valid_idx], y_valid=y_train[valid_idx],
X_test=X_test, y_test=y_test,
problem='classification',
)
def fetch_PROTEIN(path='./data/', train_size=None, valid_size=None, test_size=None, fold=0):
"""
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#protein
"""
path = pjoin(path, 'PROTEIN')
train_path = pjoin(path, 'protein')
test_path = pjoin(path, 'protein.t')
if not all(pexists(fname) for fname in (train_path, test_path)):
os.makedirs(path, exist_ok=True)
download("https://www.dropbox.com/s/pflp4vftdj3qzbj/protein.tr?dl=1", train_path)
download("https://www.dropbox.com/s/z7i5n0xdcw57weh/protein.t?dl=1", test_path)
for fname in (train_path, test_path):
raw = open(fname).read().replace(' .', '0.')
with open(fname, 'w') as f:
f.write(raw)
X_train, y_train = load_svmlight_file(train_path, dtype=np.float32, n_features=357)
X_test, y_test = load_svmlight_file(test_path, dtype=np.float32, n_features=357)
X_train, X_test = X_train.toarray(), X_test.toarray()
y_train, y_test = y_train.astype(np.int), y_test.astype(np.int)
if all(sizes is None for sizes in (train_size, valid_size, test_size)):
train_idx_path = pjoin(path, 'stratified_train_idx.txt')
valid_idx_path = pjoin(path, 'stratified_valid_idx.txt')
if not all(pexists(fname) for fname in (train_idx_path, valid_idx_path)):
download("https://www.dropbox.com/s/wq2v9hl1wxfufs3/small_stratified_train_idx.txt?dl=1", train_idx_path)
download("https://www.dropbox.com/s/7o9el8pp1bvyy22/small_stratified_valid_idx.txt?dl=1", valid_idx_path)
train_idx = pd.read_csv(train_idx_path, header=None)[0].values
valid_idx = pd.read_csv(valid_idx_path, header=None)[0].values
else:
assert train_size, "please provide either train_size or none of sizes"
if valid_size is None:
valid_size = len(X_train) - train_size
assert valid_size > 0
if train_size + valid_size > len(X_train):
warnings.warn('train_size + valid_size = {} exceeds dataset size: {}.'.format(
train_size + valid_size, len(X_train)), Warning)
if test_size is not None:
warnings.warn('Test set is fixed for this dataset.', Warning)
shuffled_indices = np.random.permutation(np.arange(len(X_train)))
train_idx = shuffled_indices[:train_size]
valid_idx = shuffled_indices[train_size: train_size + valid_size]
X_train = pd.DataFrame(X_train)
X_test = pd.DataFrame(X_test)
return dict(
X_train=X_train.iloc[train_idx], y_train=y_train[train_idx],
X_valid=X_train.iloc[valid_idx], y_valid=y_train[valid_idx],
X_test=X_test, y_test=y_test
)
def fetch_YEAR(path='./data/', train_size=None, valid_size=None, test_size=51630, fold=0):
path = pjoin(path, 'YEAR')
data_path = pjoin(path, 'data.csv')
if not pexists(data_path):
os.makedirs(path, exist_ok=True)
download('https://www.dropbox.com/s/l09pug0ywaqsy0e/YearPredictionMSD.txt?dl=1', data_path)
n_features = 91
types = {i: (np.float32 if i != 0 else np.int) for i in range(n_features)}
data = pd.read_csv(data_path, header=None, dtype=types)
data_train, data_test = data.iloc[:-test_size], data.iloc[-test_size:]
X_train, y_train = data_train.iloc[:, 1:].values, data_train.iloc[:, 0].values
X_test, y_test = data_test.iloc[:, 1:].values, data_test.iloc[:, 0].values
if all(sizes is None for sizes in (train_size, valid_size)):
train_idx_path = pjoin(path, 'stratified_train_idx.txt')
valid_idx_path = pjoin(path, 'stratified_valid_idx.txt')
if not all(pexists(fname) for fname in (train_idx_path, valid_idx_path)):
download("https://www.dropbox.com/s/00u6cnj9mthvzj1/stratified_train_idx.txt?dl=1", train_idx_path)
download("https://www.dropbox.com/s/420uhjvjab1bt7k/stratified_valid_idx.txt?dl=1", valid_idx_path)
| |
JA': None,
'BUGINESE LETTER KA': None,
'BUGINESE LETTER LA': None,
'BUGINESE LETTER MA': None,
'BUGINESE LETTER MPA': None,
'BUGINESE LETTER NA': None,
'BUGINESE LETTER NGA': None,
'BUGINESE LETTER NGKA': None,
'BUGINESE LETTER NRA': None,
'BUGINESE LETTER NYA': None,
'BUGINESE LETTER NYCA': None,
'BUGINESE LETTER PA': None,
'BUGINESE LETTER RA': None,
'BUGINESE LETTER SA': None,
'BUGINESE LETTER TA': None,
'BUGINESE LETTER VA': None,
'BUGINESE LETTER YA': None,
'BUGINESE PALLAWA': None,
'BUGINESE VOWEL SIGN AE': None,
'BUGINESE VOWEL SIGN E': None,
'BUGINESE VOWEL SIGN I': None,
'BUGINESE VOWEL SIGN O': None,
'BUGINESE VOWEL SIGN U': None,
'BYZANTINE MUSICAL SYMBOL FTHORA SKLIRON CHROMA VASIS': None,
'CANADIAN SYLLABICS AAY': None,
'CANADIAN SYLLABICS AY': None,
'CANADIAN SYLLABICS BEAVER DENE L': None,
'CANADIAN SYLLABICS BEAVER DENE R': None,
'CANADIAN SYLLABICS BLACKFOOT W': None,
'CANADIAN SYLLABICS CARRIER DENE GEE': None,
'CANADIAN SYLLABICS CARRIER DENTAL S': None,
'CANADIAN SYLLABICS CARRIER GAA': None,
'CANADIAN SYLLABICS CARRIER GWA': None,
'CANADIAN SYLLABICS CARRIER GWU': None,
'CANADIAN SYLLABICS CARRIER JWA': None,
'CANADIAN SYLLABICS EASTERN W': None,
'CANADIAN SYLLABICS FINAL RAISED DOT': None,
'CANADIAN SYLLABICS FINAL SMALL RING': None,
'CANADIAN SYLLABICS HYPHEN': None,
'CANADIAN SYLLABICS KAY': None,
'CANADIAN SYLLABICS KWAY': None,
'CANADIAN SYLLABICS LAY': None,
'CANADIAN SYLLABICS MAY': None,
'CANADIAN SYLLABICS NAY': None,
'CANADIAN SYLLABICS NOY': None,
'CANADIAN SYLLABICS NWI': None,
'CANADIAN SYLLABICS NWII': None,
'CANADIAN SYLLABICS NWO': None,
'CANADIAN SYLLABICS NWOO': None,
'CANADIAN SYLLABICS OJIBWAY C': None,
'CANADIAN SYLLABICS OJIBWAY K': None,
'CANADIAN SYLLABICS OJIBWAY M': None,
'CANADIAN SYLLABICS OJIBWAY N': None,
'CANADIAN SYLLABICS OJIBWAY NWI': None,
'CANADIAN SYLLABICS OJIBWAY NWII': None,
'CANADIAN SYLLABICS OJIBWAY NWO': None,
'CANADIAN SYLLABICS OJIBWAY NWOO': None,
'CANADIAN SYLLABICS OJIBWAY P': None,
'CANADIAN SYLLABICS OJIBWAY S': None,
'CANADIAN SYLLABICS OJIBWAY SH': None,
'CANADIAN SYLLABICS OJIBWAY T': None,
'CANADIAN SYLLABICS OY': None,
'CANADIAN SYLLABICS PAY': None,
'CANADIAN SYLLABICS POY': None,
'CANADIAN SYLLABICS PWOY': None,
'CANADIAN SYLLABICS R-CREE RWE': None,
'CANADIAN SYLLABICS RAY': None,
'CANADIAN SYLLABICS RWA': None,
'CANADIAN SYLLABICS RWEE': None,
'CANADIAN SYLLABICS RWI': None,
'CANADIAN SYLLABICS RWII': None,
'CANADIAN SYLLABICS RWO': None,
'CANADIAN SYLLABICS RWOO': None,
'CANADIAN SYLLABICS SAY': None,
'CANADIAN SYLLABICS SAYISI HOO': None,
'CANADIAN SYLLABICS SAYISI JUU': None,
'CANADIAN SYLLABICS SAYISI SHOO': None,
'CANADIAN SYLLABICS SAYISI SHWE': None,
'CANADIAN SYLLABICS SHAY': None,
'CANADIAN SYLLABICS SHOY': None,
'CANADIAN SYLLABICS SHWOY': None,
'CANADIAN SYLLABICS SOY': None,
'CANADIAN SYLLABICS TAY': None,
'CANADIAN SYLLABICS THWA': None,
'CANADIAN SYLLABICS THWE': None,
'CANADIAN SYLLABICS TLHOO': None,
'CANADIAN SYLLABICS TLHWE': None,
'CANADIAN SYLLABICS TTHAA': None,
'CANADIAN SYLLABICS TTHOO': None,
'CANADIAN SYLLABICS TTHWE': None,
'CANADIAN SYLLABICS WAY': None,
'CANADIAN SYLLABICS WEST-CREE LAA': None,
'CANADIAN SYLLABICS WEST-CREE LOO': None,
'CANADIAN SYLLABICS WESTERN W': None,
'CANADIAN SYLLABICS WOODS-CREE FINAL TH': None,
'CANADIAN SYLLABICS WOODS-CREE THWA': None,
'CANADIAN SYLLABICS WOODS-CREE THWAA': None,
'CANADIAN SYLLABICS WOODS-CREE THWEE': None,
'CANADIAN SYLLABICS WOODS-CREE THWI': None,
'CANADIAN SYLLABICS WOODS-CREE THWII': None,
'CANADIAN SYLLABICS WOODS-CREE THWO': None,
'CANADIAN SYLLABICS WOODS-CREE THWOO': None,
'CANADIAN SYLLABICS YAY': None,
'CANADIAN SYLLABICS YOY': None,
'CAR SLIDING': None,
'CARIAN LETTER A': None,
'CARIAN LETTER A2': None,
'CARIAN LETTER B': None,
'CARIAN LETTER C-18': None,
'CARIAN LETTER C-39': None,
'CARIAN LETTER D': None,
'CARIAN LETTER D2': None,
'CARIAN LETTER E': None,
'CARIAN LETTER E2': None,
'CARIAN LETTER G': None,
'CARIAN LETTER G2': None,
'CARIAN LETTER I': None,
'CARIAN LETTER II': None,
'CARIAN LETTER K': None,
'CARIAN LETTER K2': None,
'CARIAN LETTER L': None,
'CARIAN LETTER LD': None,
'CARIAN LETTER LD2': None,
'CARIAN LETTER M': None,
'CARIAN LETTER MB': None,
'CARIAN LETTER MB2': None,
'CARIAN LETTER MB3': None,
'CARIAN LETTER MB4': None,
'CARIAN LETTER N': None,
'CARIAN LETTER ND': None,
'CARIAN LETTER NG': None,
'CARIAN LETTER NN': None,
'CARIAN LETTER O': None,
'CARIAN LETTER P': None,
'CARIAN LETTER P2': None,
'CARIAN LETTER Q': None,
'CARIAN LETTER R': None,
'CARIAN LETTER RR': None,
'CARIAN LETTER S': None,
'CARIAN LETTER SH': None,
'CARIAN LETTER SH2': None,
'CARIAN LETTER SS': None,
'CARIAN LETTER ST': None,
'CARIAN LETTER ST2': None,
'CARIAN LETTER T': None,
'CARIAN LETTER TT': None,
'CARIAN LETTER TT2': None,
'CARIAN LETTER U': None,
'CARIAN LETTER UU': None,
'CARIAN LETTER UUU': None,
'CARIAN LETTER UUU2': None,
'CARIAN LETTER UUU3': None,
'CARIAN LETTER UUUU': None,
'CARIAN LETTER X': None,
'CASTLE': None,
'CEDI SIGN': None,
'CERES': None,
'CHAINS': None,
'CHAM CONSONANT SIGN FINAL H': None,
'CHAM CONSONANT SIGN FINAL M': None,
'CHAM CONSONANT SIGN FINAL NG': None,
'CHAM CONSONANT SIGN LA': None,
'CHAM CONSONANT SIGN RA': None,
'CHAM CONSONANT SIGN WA': None,
'CHAM CONSONANT SIGN YA': None,
'CHAM DIGIT EIGHT': None,
'CHAM DIGIT FIVE': None,
'CHAM DIGIT FOUR': None,
'CHAM DIGIT NINE': None,
'CHAM DIGIT ONE': None,
'CHAM DIGIT SEVEN': None,
'CHAM DIGIT SIX': None,
'CHAM DIGIT THREE': None,
'CHAM DIGIT TWO': None,
'CHAM DIGIT ZERO': None,
'CHAM LETTER A': None,
'CHAM LETTER AI': None,
'CHAM LETTER BA': None,
'CHAM LETTER BBA': None,
'CHAM LETTER BHA': None,
'CHAM LETTER CHA': None,
'CHAM LETTER CHHA': None,
'CHAM LETTER DA': None,
'CHAM LETTER DDA': None,
'CHAM LETTER DHA': None,
'CHAM LETTER E': None,
'CHAM LETTER FINAL CH': None,
'CHAM LETTER FINAL G': None,
'CHAM LETTER FINAL K': None,
'CHAM LETTER FINAL L': None,
'CHAM LETTER FINAL N': None,
'CHAM LETTER FINAL NG': None,
'CHAM LETTER FINAL P': None,
'CHAM LETTER FINAL R': None,
'CHAM LETTER FINAL SS': None,
'CHAM LETTER FINAL T': None,
'CHAM LETTER FINAL Y': None,
'CHAM LETTER GA': None,
'CHAM LETTER GHA': None,
'CHAM LETTER HA': None,
'CHAM LETTER I': None,
'CHAM LETTER JA': None,
'CHAM LETTER JHA': None,
'CHAM LETTER KA': None,
'CHAM LETTER KHA': None,
'CHAM LETTER LA': None,
'CHAM LETTER MA': None,
'CHAM LETTER MUE': None,
'CHAM LETTER NA': None,
'CHAM LETTER NGA': None,
'CHAM LETTER NGUE': None,
'CHAM LETTER NHA': None,
'CHAM LETTER NHJA': None,
'CHAM LETTER NHUE': None,
'CHAM LETTER NUE': None,
'CHAM LETTER O': None,
'CHAM LETTER PA': None,
'CHAM LETTER PHA': None,
'CHAM LETTER PPA': None,
'CHAM LETTER RA': None,
'CHAM LETTER SA': None,
'CHAM LETTER SSA': None,
'CHAM LETTER TA': None,
'CHAM LETTER THA': None,
'CHAM LETTER U': None,
'CHAM LETTER VA': None,
'CHAM LETTER YA': None,
'CHAM PUNCTUATION DANDA': None,
'CHAM PUNCTUATION DOUBLE DANDA': None,
'CHAM PUNCTUATION SPIRAL': None,
'CHAM PUNCTUATION TRIPLE DANDA': None,
'CHAM VOWEL SIGN AA': None,
'CHAM VOWEL SIGN AI': None,
'CHAM VOWEL SIGN AU': None,
'CHAM VOWEL SIGN EI': None,
'CHAM VOWEL SIGN I': None,
'CHAM VOWEL SIGN II': None,
'CHAM VOWEL SIGN O': None,
'CHAM VOWEL SIGN OE': None,
'CHAM VOWEL SIGN U': None,
'CHAM VOWEL SIGN UE': None,
'CHIRON': None,
'CHURCH': None,
'CIRCLED CD': None,
'CIRCLED CROSSING LANES': None,
'CIRCLED HANGUL IEUNG U': None,
'CIRCLED IDEOGRAPH KINDERGARTEN': None,
'CIRCLED IDEOGRAPH KOTO': None,
'CIRCLED IDEOGRAPH QUESTION': None,
'CIRCLED IDEOGRAPH SCHOOL': None,
'CIRCLED ITALIC LATIN CAPITAL LETTER C': None,
'CIRCLED ITALIC LATIN CAPITAL LETTER R': None,
'CIRCLED KOREAN CHARACTER CHAMKO': None,
'CIRCLED KOREAN CHARACTER JUEUI': None,
'CIRCLED NUMBER EIGHTY ON BLACK SQUARE': None,
'CIRCLED NUMBER FIFTY ON BLACK SQUARE': None,
'CIRCLED NUMBER FORTY ON BLACK SQUARE': None,
'CIRCLED NUMBER SEVENTY ON BLACK SQUARE': None,
'CIRCLED NUMBER SIXTY ON BLACK SQUARE': None,
'CIRCLED NUMBER TEN ON BLACK SQUARE': None,
'CIRCLED NUMBER THIRTY ON BLACK SQUARE': None,
'CIRCLED NUMBER TWENTY ON BLACK SQUARE': None,
'CIRCLED WZ': None,
'CJK COMPATIBILITY IDEOGRAPH-FA6B': None,
'CJK COMPATIBILITY IDEOGRAPH-FA6C': None,
'CJK COMPATIBILITY IDEOGRAPH-FA6D': None,
'CJK COMPATIBILITY IDEOGRAPH-FA70': None,
'CJK COMPATIBILITY IDEOGRAPH-FA71': None,
'CJK COMPATIBILITY IDEOGRAPH-FA72': None,
'CJK COMPATIBILITY IDEOGRAPH-FA73': None,
'CJK COMPATIBILITY IDEOGRAPH-FA74': None,
'CJK COMPATIBILITY IDEOGRAPH-FA75': None,
'CJK COMPATIBILITY IDEOGRAPH-FA76': None,
'CJK COMPATIBILITY IDEOGRAPH-FA77': None,
'CJK COMPATIBILITY IDEOGRAPH-FA78': None,
'CJK COMPATIBILITY IDEOGRAPH-FA79': None,
'CJK COMPATIBILITY IDEOGRAPH-FA7A': None,
'CJK COMPATIBILITY IDEOGRAPH-FA7B': None,
'CJK COMPATIBILITY IDEOGRAPH-FA7C': None,
'CJK COMPATIBILITY IDEOGRAPH-FA7D': None,
'CJK COMPATIBILITY IDEOGRAPH-FA7E': None,
'CJK COMPATIBILITY IDEOGRAPH-FA7F': None,
'CJK COMPATIBILITY IDEOGRAPH-FA80': None,
'CJK COMPATIBILITY IDEOGRAPH-FA81': None,
'CJK COMPATIBILITY IDEOGRAPH-FA82': None,
'CJK COMPATIBILITY IDEOGRAPH-FA83': None,
'CJK COMPATIBILITY IDEOGRAPH-FA84': None,
'CJK COMPATIBILITY IDEOGRAPH-FA85': None,
'CJK COMPATIBILITY IDEOGRAPH-FA86': None,
'CJK COMPATIBILITY IDEOGRAPH-FA87': None,
'CJK COMPATIBILITY IDEOGRAPH-FA88': None,
'CJK COMPATIBILITY IDEOGRAPH-FA89': None,
'CJK COMPATIBILITY IDEOGRAPH-FA8A': None,
'CJK COMPATIBILITY IDEOGRAPH-FA8B': None,
'CJK COMPATIBILITY IDEOGRAPH-FA8C': None,
'CJK COMPATIBILITY IDEOGRAPH-FA8D': None,
'CJK COMPATIBILITY IDEOGRAPH-FA8E': None,
'CJK COMPATIBILITY IDEOGRAPH-FA8F': None,
'CJK COMPATIBILITY IDEOGRAPH-FA90': None,
'CJK COMPATIBILITY IDEOGRAPH-FA91': None,
'CJK COMPATIBILITY IDEOGRAPH-FA92': None,
'CJK COMPATIBILITY IDEOGRAPH-FA93': None,
'CJK COMPATIBILITY IDEOGRAPH-FA94': None,
'CJK COMPATIBILITY IDEOGRAPH-FA95': None,
'CJK COMPATIBILITY IDEOGRAPH-FA96': None,
'CJK COMPATIBILITY IDEOGRAPH-FA97': None,
'CJK COMPATIBILITY IDEOGRAPH-FA98': None,
'CJK COMPATIBILITY IDEOGRAPH-FA99': None,
'CJK COMPATIBILITY IDEOGRAPH-FA9A': None,
'CJK COMPATIBILITY IDEOGRAPH-FA9B': None,
'CJK COMPATIBILITY IDEOGRAPH-FA9C': None,
'CJK COMPATIBILITY IDEOGRAPH-FA9D': None,
'CJK COMPATIBILITY IDEOGRAPH-FA9E': None,
'CJK COMPATIBILITY IDEOGRAPH-FA9F': None,
'CJK COMPATIBILITY IDEOGRAPH-FAA0': None,
'CJK COMPATIBILITY IDEOGRAPH-FAA1': None,
'CJK COMPATIBILITY IDEOGRAPH-FAA2': None,
'CJK COMPATIBILITY IDEOGRAPH-FAA3': None,
'CJK COMPATIBILITY IDEOGRAPH-FAA4': None,
'CJK COMPATIBILITY IDEOGRAPH-FAA5': None,
'CJK COMPATIBILITY IDEOGRAPH-FAA6': None,
'CJK COMPATIBILITY IDEOGRAPH-FAA7': None,
'CJK COMPATIBILITY IDEOGRAPH-FAA8': None,
'CJK COMPATIBILITY IDEOGRAPH-FAA9': None,
'CJK COMPATIBILITY IDEOGRAPH-FAAA': None,
'CJK COMPATIBILITY IDEOGRAPH-FAAB': None,
'CJK COMPATIBILITY IDEOGRAPH-FAAC': None,
'CJK COMPATIBILITY IDEOGRAPH-FAAD': None,
'CJK COMPATIBILITY IDEOGRAPH-FAAE': None,
'CJK COMPATIBILITY IDEOGRAPH-FAAF': None,
'CJK COMPATIBILITY IDEOGRAPH-FAB0': None,
'CJK COMPATIBILITY IDEOGRAPH-FAB1': None,
'CJK COMPATIBILITY IDEOGRAPH-FAB2': None,
'CJK COMPATIBILITY IDEOGRAPH-FAB3': None,
'CJK COMPATIBILITY IDEOGRAPH-FAB4': None,
'CJK COMPATIBILITY IDEOGRAPH-FAB5': None,
'CJK COMPATIBILITY IDEOGRAPH-FAB6': None,
'CJK COMPATIBILITY IDEOGRAPH-FAB7': None,
'CJK COMPATIBILITY IDEOGRAPH-FAB8': None,
'CJK COMPATIBILITY IDEOGRAPH-FAB9': None,
'CJK COMPATIBILITY IDEOGRAPH-FABA': None,
'CJK COMPATIBILITY IDEOGRAPH-FABB': None,
'CJK COMPATIBILITY IDEOGRAPH-FABC': None,
'CJK COMPATIBILITY IDEOGRAPH-FABD': None,
'CJK COMPATIBILITY IDEOGRAPH-FABE': None,
'CJK COMPATIBILITY IDEOGRAPH-FABF': None,
'CJK COMPATIBILITY IDEOGRAPH-FAC0': None,
'CJK COMPATIBILITY IDEOGRAPH-FAC1': None,
'CJK COMPATIBILITY IDEOGRAPH-FAC2': None,
'CJK COMPATIBILITY IDEOGRAPH-FAC3': None,
'CJK COMPATIBILITY IDEOGRAPH-FAC4': None,
'CJK COMPATIBILITY IDEOGRAPH-FAC5': None,
'CJK COMPATIBILITY IDEOGRAPH-FAC6': None,
'CJK COMPATIBILITY IDEOGRAPH-FAC7': None,
'CJK COMPATIBILITY IDEOGRAPH-FAC8': None,
'CJK COMPATIBILITY IDEOGRAPH-FAC9': None,
'CJK COMPATIBILITY IDEOGRAPH-FACA': None,
'CJK COMPATIBILITY IDEOGRAPH-FACB': None,
'CJK COMPATIBILITY IDEOGRAPH-FACC': None,
'CJK COMPATIBILITY IDEOGRAPH-FACD': None,
'CJK COMPATIBILITY IDEOGRAPH-FACE': None,
'CJK COMPATIBILITY IDEOGRAPH-FACF': None,
'CJK COMPATIBILITY IDEOGRAPH-FAD0': None,
'CJK COMPATIBILITY IDEOGRAPH-FAD1': None,
'CJK COMPATIBILITY IDEOGRAPH-FAD2': None,
'CJK COMPATIBILITY IDEOGRAPH-FAD3': None,
'CJK COMPATIBILITY IDEOGRAPH-FAD4': None,
'CJK COMPATIBILITY IDEOGRAPH-FAD5': None,
'CJK COMPATIBILITY IDEOGRAPH-FAD6': None,
'CJK COMPATIBILITY IDEOGRAPH-FAD7': None,
'CJK COMPATIBILITY IDEOGRAPH-FAD8': None,
'CJK COMPATIBILITY IDEOGRAPH-FAD9': None,
'CJK STROKE BXG': None,
'CJK STROKE D': None,
'CJK STROKE H': None,
'CJK STROKE HG': None,
'CJK STROKE HP': None,
'CJK STROKE HPWG': None,
'CJK STROKE HXWG': None,
'CJK STROKE HZ': None,
'CJK STROKE HZG': None,
'CJK STROKE HZT': None,
'CJK STROKE HZW': None,
'CJK STROKE HZWG': None,
'CJK STROKE HZZ': None,
'CJK STROKE HZZP': None,
'CJK STROKE HZZZ': None,
'CJK STROKE HZZZG': None,
'CJK STROKE N': None,
'CJK STROKE P': None,
'CJK STROKE PD': None,
'CJK STROKE | |
Rational Field
sage: Algebras(QQ).Commutative().WithBasis()._without_axioms(named=True)
Category of algebras over Rational Field
"""
return self
_flatten_categories = _flatten_categories
@staticmethod
def _sort(categories):
"""
Return the categories after sorting them decreasingly according
to their comparison key.
.. SEEALSO:: :meth:`_cmp_key`
INPUT:
- ``categories`` -- a list (or iterable) of non-join categories
OUTPUT:
A sorted tuple of categories, possibly with repeats.
.. NOTE::
The auxiliary function `_flatten_categories` used in the test
below expects a second argument, which is a type such that
instances of that type will be replaced by its super
categories. Usually, this type is :class:`JoinCategory`.
EXAMPLES::
sage: Category._sort([Sets(), Objects(), Coalgebras(QQ), Monoids(), Sets().Finite()])
(Category of monoids,
Category of coalgebras over Rational Field,
Category of finite sets,
Category of sets,
Category of objects)
sage: Category._sort([Sets().Finite(), Semigroups().Finite(), Sets().Facade(),Magmas().Commutative()])
(Category of finite semigroups,
Category of commutative magmas,
Category of finite sets,
Category of facade sets)
sage: Category._sort(Category._flatten_categories([Sets().Finite(), Algebras(QQ).WithBasis(), Semigroups().Finite(), Sets().Facade(),Algebras(QQ).Commutative(), Algebras(QQ).Graded().WithBasis()], sage.categories.category.JoinCategory))
(Category of algebras with basis over Rational Field,
Category of algebras with basis over Rational Field,
Category of graded algebras over Rational Field,
Category of commutative algebras over Rational Field,
Category of finite semigroups,
Category of finite sets,
Category of facade sets)
"""
return tuple(sorted(categories, key=category_sort_key, reverse=True))
_sort_uniq = _sort_uniq # a cythonised helper
def __and__(self, other):
"""
Return the intersection of two categories.
This is just a shortcut for :meth:`join`.
EXAMPLES::
sage: Sets().Finite() & Rings().Commutative()
Category of finite commutative rings
sage: Monoids() & CommutativeAdditiveMonoids()
Join of Category of monoids and Category of commutative additive monoids
"""
return Category.join([self, other])
def __or__(self, other):
"""
Return the smallest category containing the two categories.
This is just a shortcut for :meth:`meet`.
EXAMPLES::
sage: Algebras(QQ) | Groups()
Category of monoids
"""
return Category.meet([self, other])
_join_cache = _join_cache
@staticmethod
def join(categories, as_list=False, ignore_axioms=(), axioms=()):
"""
Return the join of the input categories in the lattice of categories.
At the level of objects and morphisms, this operation
corresponds to intersection: the objects and morphisms of a
join category are those that belong to all its super
categories.
INPUT:
- ``categories`` -- a list (or iterable) of categories
- ``as_list`` -- a boolean (default: ``False``);
whether the result should be returned as a list
- ``axioms`` -- a tuple of strings; the names of some
supplementary axioms
.. SEEALSO:: :meth:`__and__` for a shortcut
EXAMPLES::
sage: J = Category.join((Groups(), CommutativeAdditiveMonoids())); J
Join of Category of groups and Category of commutative additive monoids
sage: J.super_categories()
[Category of groups, Category of commutative additive monoids]
sage: J.all_super_categories(proper=True)
[Category of groups, ..., Category of magmas,
Category of commutative additive monoids, ..., Category of additive magmas,
Category of sets, ...]
As a short hand, one can use::
sage: Groups() & CommutativeAdditiveMonoids()
Join of Category of groups and Category of commutative additive monoids
This is a commutative and associative operation::
sage: Groups() & Posets()
Join of Category of groups and Category of posets
sage: Posets() & Groups()
Join of Category of groups and Category of posets
sage: Groups() & (CommutativeAdditiveMonoids() & Posets())
Join of Category of groups
and Category of commutative additive monoids
and Category of posets
sage: (Groups() & CommutativeAdditiveMonoids()) & Posets()
Join of Category of groups
and Category of commutative additive monoids
and Category of posets
The join of a single category is the category itself::
sage: Category.join([Monoids()])
Category of monoids
Similarly, the join of several mutually comparable categories is
the smallest one::
sage: Category.join((Sets(), Rings(), Monoids()))
Category of rings
In particular, the unit is the top category :class:`Objects`::
sage: Groups() & Objects()
Category of groups
If the optional parameter ``as_list`` is ``True``, this
returns the super categories of the join as a list, without
constructing the join category itself::
sage: Category.join((Groups(), CommutativeAdditiveMonoids()), as_list=True)
[Category of groups, Category of commutative additive monoids]
sage: Category.join((Sets(), Rings(), Monoids()), as_list=True)
[Category of rings]
sage: Category.join((Modules(ZZ), FiniteFields()), as_list=True)
[Category of finite fields, Category of modules over Integer Ring]
sage: Category.join([], as_list=True)
[]
sage: Category.join([Groups()], as_list=True)
[Category of groups]
sage: Category.join([Groups() & Posets()], as_list=True)
[Category of groups, Category of posets]
Support for axiom categories (TODO: put here meaningfull examples)::
sage: Sets().Facade() & Sets().Infinite()
Category of facade infinite sets
sage: Magmas().Infinite() & Sets().Facade()
Category of facade infinite magmas
sage: FiniteSets() & Monoids()
Category of finite monoids
sage: Rings().Commutative() & Sets().Finite()
Category of finite commutative rings
Note that several of the above examples are actually join
categories; they are just nicely displayed::
sage: AlgebrasWithBasis(QQ) & FiniteSets().Algebras(QQ)
Join of Category of finite dimensional algebras with basis over Rational Field
and Category of finite set algebras over Rational Field
sage: UniqueFactorizationDomains() & Algebras(QQ)
Join of Category of unique factorization domains
and Category of commutative algebras over Rational Field
TESTS::
sage: Magmas().Unital().Commutative().Finite() is Magmas().Finite().Commutative().Unital()
True
sage: from sage.categories.category_with_axiom import TestObjects
sage: T = TestObjects()
sage: TCF = T.Commutative().Facade(); TCF
Category of facade commutative test objects
sage: TCF is T.Facade().Commutative()
True
sage: TCF is (T.Facade() & T.Commutative())
True
sage: TCF.axioms()
frozenset({'Commutative', 'Facade'})
sage: type(TCF)
<class 'sage.categories.category_with_axiom.TestObjects.Commutative.Facade_with_category'>
sage: TCF = T.Commutative().FiniteDimensional()
sage: TCF is T.FiniteDimensional().Commutative()
True
sage: TCF is T.Commutative() & T.FiniteDimensional()
True
sage: TCF is T.FiniteDimensional() & T.Commutative()
True
sage: type(TCF)
<class 'sage.categories.category_with_axiom.TestObjects.Commutative.FiniteDimensional_with_category'>
sage: TCU = T.Commutative().Unital()
sage: TCU is T.Unital().Commutative()
True
sage: TCU is T.Commutative() & T.Unital()
True
sage: TCU is T.Unital() & T.Commutative()
True
sage: TUCF = T.Unital().Commutative().FiniteDimensional(); TUCF
Category of finite dimensional commutative unital test objects
sage: type(TUCF)
<class 'sage.categories.category_with_axiom.TestObjects.FiniteDimensional.Unital.Commutative_with_category'>
sage: TFFC = T.Facade().FiniteDimensional().Commutative(); TFFC
Category of facade finite dimensional commutative test objects
sage: type(TFFC)
<class 'sage.categories.category.JoinCategory_with_category'>
sage: TFFC.super_categories()
[Category of facade commutative test objects,
Category of finite dimensional commutative test objects]
"""
# Get the list of categories and deal with some trivial cases
categories = list(categories)
if not categories:
if as_list:
return []
else:
# Since Objects() is the top category, it is the neutral element of join
from objects import Objects
return Objects()
elif len(categories) == 1:
category = categories[0]
if as_list:
if isinstance(category, JoinCategory):
return category.super_categories()
else:
return categories
else:
return category
# Get the cache key, and look into the cache
# Ensure associativity and commutativity by flattening
# TODO:
# - Do we want to store the cache after or before the mangling of the categories?
# - Caching with ignore_axioms?
# JoinCategory's sorting, and removing duplicates
cache_key = _sort_uniq(_flatten_categories(categories, JoinCategory))
if not ignore_axioms:
try:
out = _join_cache[cache_key]
if as_list:
if isinstance(out, JoinCategory):
return out._super_categories
return [out]
return out
except KeyError:
pass
# Handle axioms
result = join_as_tuple(cache_key, axioms, ignore_axioms)
if as_list:
return list(result)
if len(result) == 1:
result = result[0]
else:
result = JoinCategory(result)
if not ignore_axioms:
_join_cache[cache_key] = result
return result
def category(self):
"""
Return the category of this category. So far, all categories
are in the category of objects.
EXAMPLES::
sage: Sets().category()
Category of objects
sage: VectorSpaces(QQ).category()
Category of objects
"""
from objects import Objects
return Objects()
def example(self, *args, **keywords):
"""
Returns an object in this category. Most of the time, this is a parent.
This serves three purposes:
- Give a typical example to better explain what the category is all about.
(and by the way prove that the category is non empty :-) )
- Provide a minimal template for implementing other objects in this category
- Provide an object on which to test generic code implemented by the category
For all those applications, the implementation of the object
shall be kept to a strict minimum. The object is therefore not
meant to be used for other applications; most of the time a
full featured version is available elsewhere in Sage, and
should be used insted.
Technical note: by default ``FooBar(...).example()`` is
constructed by looking up
``sage.categories.examples.foo_bar.Example`` and calling it as
``Example()``. Extra positional or named parameters are also
passed down. For a category over base ring, the base ring is
further passed down as an optional argument.
Categories are welcome to override this default implementation.
EXAMPLES::
sage: Semigroups().example()
An example of | |
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import messagebox, filedialog
from collections import OrderedDict
import logging
import os
import xlsxwriter
from tkintertable.TableModels import TableModel
from sqlalchemy.sql import literal
from sqlalchemy.sql.expression import and_, union_all
from cep_price_console.utils.log_utils import CustomAdapter, debug
# TODO: Add an "OK" button (or proceed)
# TODO: Last sold date?
# TODO: Get rid of shipping information
# TODO: Get rid of most columns
# TODO: Write validation formula for typed data
logger = CustomAdapter(logging.getLogger(str(__name__)), None)
check_btn_style_template = "{}.dflt.TCheckbutton"
@debug(lvl=logging.DEBUG, prefix='')
def vendor_dataset(session):
import cep_price_console.db_management.ARW_PRF_Mapping as ARW_PRF_Mapping
return session.query(ARW_PRF_Mapping.vend_main_01_current.__table__.c.Vend_Num,
ARW_PRF_Mapping.vend_main_01_current.__table__.c.Vend_Name,
ARW_PRF_Mapping.vend_main_01_current.__table__.c.Status)
@debug(lvl=logging.DEBUG, prefix='')
def product_dataset(session):
import cep_price_console.db_management.ARW_PRF_Mapping as ARW_PRF_Mapping
return session.query(ARW_PRF_Mapping.prod_main_01_current.__table__.c.Prod_Num,
ARW_PRF_Mapping.prod_main_01_current.__table__.c.Desc_Full,
ARW_PRF_Mapping.prod_main_01_current.__table__.c.Status)
@debug(lvl=logging.DEBUG, prefix='')
def product_line_dataset(session):
import cep_price_console.db_management.ARW_PRF_Mapping as ARW_PRF_Mapping
return session.query(ARW_PRF_Mapping.prod_line_main_01_current.__table__.c.Code,
ARW_PRF_Mapping.prod_line_main_01_current.__table__.c.Desc,
ARW_PRF_Mapping.prod_line_main_01_current.__table__.c.Major_Group)
@debug(lvl=logging.DEBUG, prefix='')
def major_group_dataset(session):
import cep_price_console.db_management.ARW_PRF_Mapping as ARW_PRF_Mapping
return session.query(ARW_PRF_Mapping.major_group_main_01_current.__table__.c.Code,
ARW_PRF_Mapping.major_group_main_01_current.__table__.c.Desc)
@debug(lvl=logging.DEBUG, prefix='')
def customer_dataset(session):
import cep_price_console.db_management.ARW_PRF_Mapping as ARW_PRF_Mapping
cust_shipto_union = union_all(
session.query(
ARW_PRF_Mapping.shipto_main_01_current.Cust_Num_ShipTo_Combo.label("Cust_Num_ShipTo_Combo"),
ARW_PRF_Mapping.shipto_main_01_current.__table__.c.Cust_Num.label("Cust_Num"),
ARW_PRF_Mapping.shipto_cust_01_current.__table__.c.Cust_Name.label("Cust_Name"),
ARW_PRF_Mapping.shipto_main_01_current.__table__.c.Ship_To_Code.label("Ship_To_Code"),
ARW_PRF_Mapping.shipto_main_01_current.__table__.c.ShipTo_Name.label("ShipTo_Name")
).join(
ARW_PRF_Mapping.shipto_cust_01_current.__table__,
ARW_PRF_Mapping.shipto_cust_01_current.Cust_Num_ShipTo_Combo ==
ARW_PRF_Mapping.shipto_main_01_current.Cust_Num_ShipTo_Combo
),
session.query(
ARW_PRF_Mapping.cust_master_01_current.Cust_Num_ShipTo_Combo.label("Cust_Num_ShipTo_Combo"),
ARW_PRF_Mapping.cust_master_01_current.__table__.c.Cust_Num.label("Cust_Num"),
ARW_PRF_Mapping.cust_master_01_current.__table__.c.Cust_Name.label("Cust_Name"),
literal("_All").label("Ship_To_Code"),
literal("N/A").label("ShipTo_Name")
)
).alias()
return session.query(cust_shipto_union)
@debug(lvl=logging.DEBUG, prefix='')
def contract_dataset(session):
import cep_price_console.db_management.ARW_PRF_Mapping as ARW_PRF_Mapping
return session.query(ARW_PRF_Mapping.cntr_header_01_current.__table__.c.Cntr_Num,
ARW_PRF_Mapping.cntr_header_01_current.__table__.c.Desc,
ARW_PRF_Mapping.cntr_header_01_current.__table__.c.Vend_Num,
ARW_PRF_Mapping.cntr_header_01_current.__table__.c.Vend_Cntr_Num,
ARW_PRF_Mapping.cntr_header_01_current.__table__.c.All_Cust_Flag)
@debug(lvl=logging.DEBUG, prefix='')
def customer_category_dataset(session):
import cep_price_console.db_management.ARW_PRF_Mapping as ARW_PRF_Mapping
return session.query(ARW_PRF_Mapping.cust_master_01_current.__table__.c.Cust_Cat).distinct()
@debug(lvl=logging.DEBUG, prefix='')
def price_group_dataset(session):
import cep_price_console.db_management.ARW_PRF_Mapping as ARW_PRF_Mapping
return session.query(ARW_PRF_Mapping.prod_main_01_current.__table__.c.Price_Group_Code).distinct()
class PriceList(tk.Toplevel):
logger = CustomAdapter(logging.getLogger(str(__name__)), None)
@debug(lvl=logging.DEBUG, prefix='')
def __init__(self, master, *args, **kwargs):
import cep_price_console.db_management.ARW_PRF_Mapping as ARW_PRF_Mapping
from cep_price_console.db_management.server_utils import mysql_session_maker
from cep_price_console.utils import config
from cep_price_console.utils.gui_utils import center_window
self.session = mysql_session_maker()
self.master = master
self.name = str(PriceList.__name__).lower()
super().__init__(name=self.name, *args, **kwargs)
Factor.reset(root=self)
PanedFrame.reset(root=self)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.title("Filtering Criteria")
self.iconbitmap(config.FAVICON)
self.protocol("WM_DELETE_WINDOW", self.close)
self.paned_outer = tk.PanedWindow(self,
orient=tk.HORIZONTAL,
name="paned_outer",
sashrelief=tk.RAISED,
sashwidth=7)
self.paned_outer.grid(row=0, column=0, sticky=tk.NSEW)
self.factor_selection_frame = ttk.Frame(self.paned_outer,
name="factor_selection_canvas_frame",
style="even.group.TFrame",
padding=5)
self.factor_selection_frame.columnconfigure(0, weight=1)
# self.column_selection_frame = ttk.Frame(self.paned_outer,
# style="odd.group.TFrame",
# padding=5)
self.paned_factor_criteria = tk.PanedWindow(self.paned_outer,
orient=tk.HORIZONTAL,
name="paned_factor_criteria",
sashrelief=tk.RAISED,
sashwidth=7)
self.factor_contract = Factor(dataset_query=contract_dataset(self.session),
dataset_key_col=ARW_PRF_Mapping.cntr_header_01_current.__table__.c.Cntr_Num,
orient=tk.VERTICAL,
lbl_str="Contract")
self.factor_customer = Factor(dataset_query=customer_dataset(self.session),
dataset_key_col=ARW_PRF_Mapping.shipto_main_01_current.Cust_Num_ShipTo_Combo,
orient=tk.VERTICAL,
lbl_str="Customer")
self.factor_cust_category = Factor(dataset_query=customer_category_dataset(self.session),
dataset_key_col=ARW_PRF_Mapping.cust_master_01_current.__table__.c.Cust_Cat,
orient=tk.VERTICAL,
lbl_str="Customer Category")
self.factor_product = Factor(dataset_query=product_dataset(self.session),
dataset_key_col=ARW_PRF_Mapping.prod_main_01_current.__table__.c.Prod_Num,
orient=tk.VERTICAL,
lbl_str="Product")
self.factor_product_line = Factor(dataset_query=product_line_dataset(self.session),
dataset_key_col=ARW_PRF_Mapping.prod_line_main_01_current.__table__.c.Code,
orient=tk.VERTICAL,
lbl_str="Product Line")
self.factor_price_group = Factor(
dataset_query=price_group_dataset(self.session),
dataset_key_col=ARW_PRF_Mapping.prod_main_01_current.__table__.c.Price_Group_Code,
orient=tk.VERTICAL,
lbl_str="Price Group")
self.factor_primary_vendor = Factor(dataset_query=vendor_dataset(self.session),
dataset_key_col=ARW_PRF_Mapping.vend_main_01_current.__table__.c.Vend_Num,
orient=tk.VERTICAL,
lbl_str="Primary Vendor")
self.factor_secondary_vendor = Factor(dataset_query=vendor_dataset(self.session),
dataset_key_col=ARW_PRF_Mapping.vend_main_01_current.__table__.c.Vend_Num,
orient=tk.VERTICAL,
lbl_str="Secondary Vendor")
self.factor_major_group = Factor(dataset_query=major_group_dataset(self.session),
dataset_key_col=ARW_PRF_Mapping.major_group_main_01_current.__table__.c.Code,
orient=tk.VERTICAL,
lbl_str="Major Group")
self.test_btn = ttk.Button(self.factor_selection_frame,
text="Run Price List",
command=self.make_a_list)
self.test_btn.grid(row=0, column=0)
self.reporting_options = ReportingOptions(
self.factor_selection_frame, relief=tk.GROOVE, padding=5, borderwidth=8
)
self.reporting_options.grid(row=1, column=0, sticky=tk.NW + tk.E)
self.factor_selection = FactorSelection(
self.factor_selection_frame, relief=tk.GROOVE, padding=5, borderwidth=8
)
self.factor_selection.grid(row=2, column=0, sticky=tk.NW + tk.N)
Factor.factor_selection_populate(self.factor_selection, row=1)
self.factor_selection_instructions = FactorSelectionInstructions(
self.factor_selection_frame, relief=tk.GROOVE, padding=5, borderwidth=8
)
self.factor_selection_instructions.grid(row=3, column=0, sticky=tk.NW + tk.E)
self.factor_selection_frame.update_idletasks()
self.paned_outer.add(self.factor_selection_frame,
width=480,
pady=5,
padx=5,
stretch="never")
# self.paned_outer.add(self.column_selection_frame,
# width=300,
# pady=5,
# padx=5,
# stretch="never")
self.paned_outer.add(self.paned_factor_criteria,
width=0,
pady=5,
padx=5,
stretch="always")
self.paned_outer.bind("<Configure>", self.test_config)
self.factor_customer.sel_check_btn.invoke()
# self.factor_customer.crit_obj.add_btn.invoke()
# self.factor_customer.crit_obj.add_btn.invoke()
# self.factor_customer.crit_obj.add_btn.invoke()
# value_list = ["0001229_All", "0001052_00000001", "0001219_00000013"]
self.factor_customer.crit_obj.add_btn.invoke()
value_list = ["0001229_All"]
for entry_dict, value in zip(self.factor_customer.crit_obj.entry_dict.values(), value_list):
entry_obj = entry_dict["entry_obj"]
entry_obj.value = value
# self.factor_product.sel_check_btn.invoke()
# self.factor_product.crit_obj.add_btn.invoke()
# for entry_dict in self.factor_product.crit_obj.entry_dict.values():
# entry_obj = entry_dict["entry_obj"]
# entry_obj.value = "HOSS-HG12-S000"
self.factor_product_line.sel_check_btn.invoke()
self.factor_product_line.crit_obj.add_btn.invoke()
for entry_dict in self.factor_product_line.crit_obj.entry_dict.values():
entry_obj = entry_dict["entry_obj"]
# entry_obj.value = "PLSoloTest"
entry_obj.value = "JANI"
center_window(win_obj=self, width=1200, height=900)
@debug(lvl=logging.DEBUG, prefix='')
def make_a_list(self):
# worksheet2.repeat_rows(0, 1) to repeat rows at top while printing
# for headers/footers
# https://xlsxwriter.readthedocs.io/example_headers_footers.html?highlight=header
workbook = self.get_workbook()
self.write_worksheet(
workbook=workbook,
output_query=self.get_sherpa(
min_level=self.reporting_options.min_level_chkbtn.instate(['selected']),
expired=self.reporting_options.expired_chkbtn.instate(['selected']),
current=self.reporting_options.current_chkbtn.instate(['selected']),
future=self.reporting_options.future_chkbtn.instate(['selected']),
return_mode=self.reporting_options.return_mode_combobox.get()
),
sheet_name="price_list") # TODO: Change this
workbook.close()
@debug(lvl=logging.DEBUG, prefix='')
def get_workbook(self):
filename_options = dict(
title='Save Output',
initialdir=str(os.path.expanduser('~')).replace('\\', '/'),
initialfile=None,
parent=self,
filetypes=[('Workbook', '.xlsx')])
fullpath_var = str(filedialog.asksaveasfilename(**filename_options)).replace("/", "\\")
# fullpath_var = os.path.join(os.path.expanduser('~'), "Desktop", "{test_name}")
filename, _ = os.path.splitext(fullpath_var)
return xlsxwriter.Workbook('{}.xlsx'.format(filename))
@debug(lvl=logging.DEBUG, prefix='')
def write_worksheet(self, workbook, output_query, sheet_name=None):
if sheet_name is not None:
worksheet = workbook.add_worksheet(sheet_name)
else:
worksheet = workbook.add_worksheet()
header_format = workbook.add_format({'bold': True,
'align': 'center',
'valign': 'vcenter',
'fg_color': '#D7E4BC',
'border': 1})
worksheet.freeze_panes(1, 0)
integer_format = workbook.add_format({'num_format': '#,##0'})
currency_format = workbook.add_format({'num_format': '#,##0.00'})
date_format = workbook.add_format({'num_format': 'mm/dd/yy'})
col_number = 0
row_number = 0
col_list = []
for desc in output_query.column_descriptions:
self.__class__.logger.log(logging.DEBUG, str(desc))
name = desc.get('name').replace("'", "").replace('"', "")
if name in (
# "Cust_Num_ShipTo_Combo",
"Cust_Num",
"Ship_To_Code",
# "Cust_Cat",
"Prod_Num",
# "Prod_Line",
# "Price_Group_Code",
# "C1_Cost",
# "C2_Cost",
# "C3_Cost",
# "C4_Cost",
# "C5_Cost",
# "C6_Cost",
# "C7_Cost",
# "L1_Price",
# "L2_Price",
# "L3_Price",
# "L4_Price",
# "Fut_Price",
# "Fut_Price_Column",
# "Fut_Price_Date",
"Days_Since_Last_Purch",
# "Price_Matrix_Combo_ID",
"Price_Level_Num",
"Price_Eff_Date",
"Price_Exp_Date",
"Net_Price",
# "Cost_Matrix_Combo_ID",
"Cost_Level_Num",
"Cost_Eff_Date",
"Cost_Exp_Date",
"Net_Cost"
):
col_list.append(name)
worksheet.write(row_number, col_number, name, header_format)
col_number += 1
row_number += 1
for row in output_query.all():
col_number = 0
for col_name in col_list:
# noinspection PyProtectedMember
value = row._asdict().get(col_name)
if isinstance(value, str):
value.replace("{", "").replace("}", "")
if col_name in (
"C1_Cost",
"C2_Cost",
"C3_Cost",
"C4_Cost",
"C5_Cost",
"C6_Cost",
"C7_Cost",
"L1_Price",
"L2_Price",
"L3_Price",
"L4_Price",
"Fut_Price",
"Net_Cost",
"Net_Price"
):
worksheet.write(row_number, col_number, value, currency_format)
elif col_name in (
"Days_Since_Last_Purch",
"Price_Level_Num",
"Cost_Level_Num"
):
worksheet.write(row_number, col_number, value, integer_format)
elif col_name in (
"Fut_Price_Date",
"Price_Eff_Date",
"Price_Exp_Date",
"Cost_Eff_Date",
"Cost_Exp_Date"
):
if value != "0000-00-00":
worksheet.write(row_number, col_number, value, date_format)
else:
worksheet.write(row_number, col_number, value)
col_number += 1
row_number += 1
worksheet.autofilter(0, 0, row_number, col_number)
@debug(lvl=logging.DEBUG, prefix='')
def test_it(self, workbook):
option_list = []
for min_level in True, False:
for expired in True, False:
for current in True, False:
for future in True, False:
# for return_mode in ("all", "sales", "matrix", "sales_or_matrix"):
option_list.append(
{
"min_level": min_level,
"expired": expired,
"current": current,
"future": future,
"return_mode": "matrix"
}
)
for option in option_list:
if option["return_mode"] == "sales_or_matrix":
ws_ret_mode = "s_or_m"
else:
ws_ret_mode = option["return_mode"]
self.write_worksheet(
workbook=workbook,
output_query=self.get_sherpa(**option),
sheet_name="Lvl-{min_level}_Exp-{expired}_Cur-{current}_Fut-{future}_{return_mode}".format(
min_level=int(option["min_level"]),
expired=int(option["expired"]),
current=int(option["current"]),
future=int(option["future"]),
return_mode=ws_ret_mode)
)
workbook.close()
@debug(lvl=logging.DEBUG, prefix='')
def get_sherpa(self,
min_level=False,
expired=True,
current=True,
future=True,
return_mode="all"):
from cep_price_console.db_management.price_matrix_utils import MatrixSherpa
matrix_sherpa = MatrixSherpa(
min_level=min_level,
expired=expired,
current=current,
future=future,
return_mode=return_mode,
cntr_num_list=self.factor_contract.get_values(),
cust_num_shipto_combo_list=self.factor_customer.get_values(),
cust_cat_list=self.factor_cust_category.get_values(),
prod_num_list=self.factor_product.get_values(),
prod_line_list=self.factor_product_line.get_values(),
price_group_code_list=self.factor_price_group.get_values(),
prim_vend_num_list=self.factor_primary_vendor.get_values(),
secondary_vend_num_list=self.factor_secondary_vendor.get_values(),
major_group_list=self.factor_major_group.get_values()
)
return matrix_sherpa.final_return_query()
@debug(lvl=logging.DEBUG, prefix='')
def close(self):
msgbox = messagebox.askokcancel("Quit", "Do you want to quit?", parent=self)
if msgbox:
self.destroy()
# noinspection PyUnusedLocal
# @debug(lvl=logging.DEBUG, prefix='')
def test_config(self, event):
self.factor_selection_frame.event_generate("<Configure>")
class ReportingOptions(ttk.Labelframe):
logger = CustomAdapter(logging.getLogger(str(__name__)), None)
# @debug(lvl=logging.DEBUG, prefix='')
def __init__(self, parent, *args, **kwargs):
self.parent = parent
self.labelwidget = ttk.Label(
self.parent,
text="1) Report Options",
wraplength=20,
style="heading3.TLabel",
)
# noinspection PyArgumentList
super().__init__(self.parent, *args, labelwidget=self.labelwidget, **kwargs)
self.bind("<Configure>", self.on_resize)
self.columnconfigure(0, weight=1)
self.row_count = 0
self.option_instructions = ttk.Label(
self,
text=(" a) Minimum Level Only: If there are multiple 'levels' in the matrix for a product/customer match, "
"return only the lowest level \n b) Time Period Options: Return expired, current, or future entries "
"based on today's date\n c) Return Mode: \n - all: Return all combinations \n - sales: "
"Return only combinations that have a recorded sale\n - matrix: Return only combinations that "
"have a matrix entry \n - sales or matrix: Return combinations that have either a recorded sale "
"or a matrix entry"),
wraplength=self.winfo_width(),
style="even.notes.TLabel"
)
self.option_instructions.grid(row=self.row_count, column=0, columnspan=2, sticky=tk.NW + tk.E)
self.row_count += 1
self.min_level_chkbtn = self.add_checkbutton(self, "Minimum Level Only", columnspan=2)
self.period_frame_label = ttk.Label(
self,
text="Time Period Options:",
style="dflt.TLabel"
)
self.period_label_frame = ttk.Labelframe(
self,
borderwidth=12,
labelwidget=self.period_frame_label
)
self.period_label_frame.grid(row=self.row_count, column=0, columnspan=2, sticky=tk.NW + tk.E)
self.period_label_frame.columnconfigure(0, weight=1)
self.row_count += 1
self.expired_chkbtn = self.add_checkbutton(self.period_label_frame, "Expired Entries")
self.current_chkbtn = self.add_checkbutton(self.period_label_frame, "Current Entries")
self.future_chkbtn = self.add_checkbutton(self.period_label_frame, "Future Entries")
self.return_mode_label = ttk.Label(
self,
text="Return Mode",
style="even.dflt.TLabel",
)
self.return_mode_label.grid(row=self.row_count, column=0, sticky=tk.EW)
self.return_mode_combobox = ttk.Combobox(self,
state="readonly",
values=["all", "sales", "matrix", "sales_or_matrix"],
)
self.return_mode_combobox.set("all")
self.return_mode_combobox.grid(row=self.row_count, column=1, sticky=tk.W)
def add_checkbutton(self, parent, text, columnspan=None):
if self.row_count % 2 == 0:
style_string = check_btn_style_template.format("even")
elif self.row_count % 2 == 1:
style_string = check_btn_style_template.format("odd")
else:
raise ValueError
checkbutton = ttk.Checkbutton(parent,
text=text,
# Command?
style="{}".format(style_string))
checkbutton.state(['!alternate', 'selected'])
if columnspan is not None:
checkbutton.grid(row=self.row_count, column=0, sticky=tk.EW, columnspan=columnspan)
else:
checkbutton.grid(row=self.row_count, column=0, sticky=tk.EW)
self.row_count += 1
return checkbutton
# noinspection PyUnusedLocal
# @debug(lvl=logging.DEBUG, prefix='')
def on_resize(self, event):
self.labelwidget.configure(wraplength=self.winfo_width())
self.option_instructions.configure(wraplength=self.winfo_width())
class FactorSelection(ttk.Labelframe):
logger = CustomAdapter(logging.getLogger(str(__name__)), None)
# @debug(lvl=logging.DEBUG, prefix='')
def __init__(self, parent, *args, **kwargs):
self.parent = parent
self.labelwidget = ttk.Label(
self.parent,
text="2) Factor Selection",
wraplength=20,
style="heading3.TLabel",
)
# noinspection PyArgumentList
super().__init__(self.parent, *args, labelwidget=self.labelwidget, **kwargs)
self.bind("<Configure>", self.on_resize)
self.columnconfigure(0, weight=1)
self.factor_check_box_instr = ttk.Label(
self,
text=(" a) Check the box next to the entity you want to filter by. A selection dialogue will appear to "
"the right.\n"
" b) See instructions for the selection dialogue below"),
wraplength=self.winfo_width(),
style="even.notes.TLabel"
)
self.factor_check_box_instr.grid(row=0, column=0, sticky=tk.NW + tk.E)
# noinspection PyUnusedLocal
# @debug(lvl=logging.DEBUG, prefix='')
def on_resize(self, event):
self.labelwidget.configure(wraplength=self.winfo_width())
self.factor_check_box_instr.configure(wraplength=self.winfo_width())
class FactorSelectionInstructions(ttk.Labelframe):
logger = CustomAdapter(logging.getLogger(str(__name__)), None)
# @debug(lvl=logging.DEBUG, prefix='')
def __init__(self, parent, *args, **kwargs):
self.parent = parent
self.labelwidget = ttk.Label(
self.parent,
text="3) Selection Instructions",
wraplength=20,
style="heading3.TLabel"
)
# noinspection PyArgumentList
super().__init__(self.parent, *args, labelwidget=self.labelwidget, **kwargs)
| |
<reponame>stillmatic/mizani<filename>mizani/breaks.py
"""
All scales have a means by which the values that are mapped
onto the scale are interpreted. Numeric digital scales put
out numbers for direct interpretation, but most scales
cannot do this. What they offer is named markers/ticks that
aid in assessing the values e.g. the common odometer will
have ticks and values to help gauge the speed of the vehicle.
The named markers are what we call breaks. Properly calculated
breaks make interpretation straight forward. These functions
provide ways to calculate good(hopefully) breaks.
"""
from __future__ import division
import numpy as np
import pandas as pd
from matplotlib.dates import MinuteLocator, HourLocator, DayLocator
from matplotlib.dates import WeekdayLocator, MonthLocator, YearLocator
from matplotlib.dates import AutoDateLocator
from matplotlib.dates import num2date, YEARLY
from matplotlib.ticker import MaxNLocator
from .utils import min_max, SECONDS, NANOSECONDS
from .utils import same_log10_order_of_magnitude
__all__ = ['mpl_breaks', 'log_breaks', 'minor_breaks',
'trans_minor_breaks', 'date_breaks',
'timedelta_breaks', 'extended_breaks']
# The break calculations rely on MPL locators to do
# the heavylifting. It may be more convinient to lift
# the calculations out of MPL.
class DateLocator(AutoDateLocator):
def __init__(self):
AutoDateLocator.__init__(self, minticks=5,
interval_multiples=True)
# Remove 4 and 400
self.intervald[YEARLY] = [
1, 2, 5, 10, 20, 50, 100, 200, 500,
1000, 2000, 5000, 10000]
self.create_dummy_axis()
def tick_values(self, vmin, vmax):
# get locator
# if yearlocator
# change the vmin to turn of decade or half-decade
ticks = AutoDateLocator.tick_values(self, vmin, vmax)
return ticks
class mpl_breaks(object):
"""
Compute breaks using MPL's default locator
See :class:`~matplotlib.ticker.MaxNLocator` for the
parameter descriptions
Examples
--------
>>> x = range(10)
>>> limits = (0, 9)
>>> mpl_breaks()(limits)
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> mpl_breaks(nbins=2)(limits)
array([ 0., 5., 10.])
"""
def __init__(self, *args, **kwargs):
self.locator = MaxNLocator(*args, **kwargs)
def __call__(self, limits):
"""
Compute breaks
Parameters
----------
limits : tuple
Minimum and maximum values
Returns
-------
out : array_like
Sequence of breaks points
"""
if any(np.isinf(limits)):
return []
if limits[0] == limits[1]:
return np.array([limits[0]])
return self.locator.tick_values(limits[0], limits[1])
class log_breaks(object):
"""
Integer breaks on log transformed scales
Parameters
----------
n : int
Desired number of breaks
base : int
Base of logarithm
Examples
--------
>>> x = np.logspace(3, 7)
>>> limits = min(x), max(x)
>>> log_breaks()(limits)
array([ 100, 10000, 1000000])
>>> log_breaks(2)(limits)
array([ 100, 100000])
"""
def __init__(self, n=5, base=10):
self.n = n
self.base = base
def __call__(self, limits):
"""
Compute breaks
Parameters
----------
limits : tuple
Minimum and maximum values
Returns
-------
out : array_like
Sequence of breaks points
"""
n = self.n
base = self.base
if any(np.isinf(limits)):
return []
rng = np.log(limits)/np.log(base)
if base == 10 and same_log10_order_of_magnitude(rng):
return extended_breaks(n=4)(limits)
_min = int(np.floor(rng[0]))
_max = int(np.ceil(rng[1]))
if _max == _min:
return base ** _min
step = (_max-_min)//n + 1
dtype = float if (_min < 0) else int
return base ** np.arange(_min, _max+1, step, dtype=dtype)
class minor_breaks(object):
"""
Compute minor breaks
Parameters
----------
n : int
Number of minor breaks between the major
breaks.
Examples
--------
>>> major = [1, 2, 3, 4]
>>> limits = [0, 5]
>>> minor_breaks()(major, limits)
array([0.5, 1.5, 2.5, 3.5, 4.5])
"""
def __init__(self, n=1):
self.n = n
def __call__(self, major, limits=None):
"""
Minor breaks
Parameters
----------
major : array_like
Major breaks
limits : array_like | None
Limits of the scale. If *array_like*, must be
of size 2. If **None**, then the minimum and
maximum of the major breaks are used.
Returns
-------
out : array_like
Minor beraks
"""
n = self.n
if len(major) < 2:
return np.array([])
if limits is None:
limits = min_max(major)
# Try to infer additional major breaks so that
# minor breaks can be generated beyond the first
# and last major breaks
diff = np.diff(major)
step = diff[0]
if len(diff) > 1 and all(diff == step):
major = np.hstack([major[0]-step,
major,
major[-1]+step])
mbreaks = []
factors = np.arange(1, n+1)
for lhs, rhs in zip(major[:-1], major[1:]):
sep = (rhs - lhs)/(n+1)
mbreaks.append(lhs + factors * sep)
minor = np.hstack(mbreaks)
minor = minor.compress((limits[0] <= minor) &
(minor <= limits[1]))
return minor
class trans_minor_breaks(object):
"""
Compute minor breaks for transformed scales
The minor breaks are computed in data space.
This together with major breaks computed in
transform space reveals the non linearity of
of a scale. See the log transforms created
with :func:`log_trans` like :class:`log10_trans`.
Parameters
----------
trans : trans or type
Trans object or trans class.
n : int
Number of minor breaks between the major
breaks.
Examples
--------
>>> from mizani.transforms import sqrt_trans
>>> major = [1, 2, 3, 4]
>>> limits = [0, 5]
>>> sqrt_trans().minor_breaks(major, limits)
array([0.5, 1.5, 2.5, 3.5, 4.5])
>>> class sqrt_trans2(sqrt_trans):
... def __init__(self):
... self.minor_breaks = trans_minor_breaks(sqrt_trans2)
>>> sqrt_trans2().minor_breaks(major, limits)
array([1.58113883, 2.54950976, 3.53553391])
"""
def __init__(self, trans, n=1):
self.trans = trans
self.n = n
def __call__(self, major, limits=None):
"""
Minor breaks for transformed scales
Parameters
----------
major : array_like
Major breaks
limits : array_like | None
Limits of the scale. If *array_like*, must be
of size 2. If **None**, then the minimum and
maximum of the major breaks are used.
Returns
-------
out : array_like
Minor breaks
"""
if not self.trans.dataspace_is_numerical:
raise TypeError(
"trans_minor_breaks can only be used for data "
"whose format is numerical.")
if limits is None:
limits = min_max(major)
major = self._extend_breaks(major)
major = self.trans.inverse(major)
limits = self.trans.inverse(limits)
minor = minor_breaks(self.n)(major, limits)
return self.trans.transform(minor)
def _extend_breaks(self, major):
"""
Append 2 extra breaks at either end of major
If breaks of transform space are non-equidistant,
:func:`minor_breaks` add minor breaks beyond the first
and last major breaks. The solutions is to extend those
breaks (in transformed space) before the minor break call
is made. How the breaks depends on the type of transform.
"""
trans = self.trans
trans = trans if isinstance(trans, type) else trans.__class__
# so far we are only certain about this extending stuff
# making sense for log transform
is_log = trans.__name__.startswith('log')
diff = np.diff(major)
step = diff[0]
if is_log and all(diff == step):
major = np.hstack([major[0]-step, major, major[-1]+step])
return major
# Matplotlib's YearLocator uses different named
# arguments than the others
LOCATORS = {
'minute': MinuteLocator,
'hour': HourLocator,
'day': DayLocator,
'week': WeekdayLocator,
'month': MonthLocator,
'year': lambda interval: YearLocator(base=interval)
}
class date_breaks(object):
"""
Regularly spaced dates
Parameters
----------
width : str | None
An interval specification. Must be one of
[minute, hour, day, week, month, year]
If ``None``, the interval automatic.
Examples
--------
>>> from datetime import datetime
>>> x = [datetime(year, 1, 1) for year in [2010, 2026, 2015]]
Default breaks will be regularly spaced but the spacing
is automatically determined
>>> limits = min(x), max(x)
>>> breaks = date_breaks()
>>> [d.year for d in breaks(limits)]
[2010, 2012, 2014, 2016, 2018, 2020, 2022, 2024, 2026]
Breaks at 4 year intervals
>>> breaks = date_breaks('4 year')
>>> [d.year for d in breaks(limits)]
[2008, 2012, 2016, 2020, 2024, 2028]
"""
def __init__(self, width=None):
if not width:
locator = DateLocator()
else:
# Parse the width specification
# e.g. '10 weeks' => (10, week)
_n, units = width.strip().lower().split()
interval, units = int(_n), units.rstrip('s')
locator = LOCATORS[units](interval=interval)
self.locator = locator
def __call__(self, limits):
"""
Compute breaks
Parameters
----------
limits : tuple
Minimum and maximum :class:`datetime.datetime` values.
Returns
-------
out : array_like
Sequence of break points.
"""
if any(pd.isnull(x) for x in limits):
return []
ret = self.locator.tick_values(*limits)
# MPL returns the tick_values in ordinal format,
# but we return them in the same space as the
# inputs.
return [num2date(val) for val in ret]
class timedelta_breaks(object):
"""
Timedelta breaks
Returns
-------
out : callable ``f(limits)``
A function that takes a sequence of two
:class:`datetime.timedelta` values and returns
a sequence of break points.
Examples
--------
>>> from datetime import timedelta
>>> breaks = timedelta_breaks()
>>> x = [timedelta(days=i*365) for i in range(25)]
>>> limits = min(x), max(x)
>>> major = breaks(limits)
>>> [val.total_seconds()/(365*24*60*60)for val in major]
[0.0, 5.0, 10.0, 15.0, 20.0, 25.0]
"""
def __init__(self, n=5, Q=(1, 2, 5, 10)):
self._breaks_func = extended_breaks(n=n, Q=Q)
def __call__(self, limits):
"""
Compute breaks
Parameters
----------
limits : tuple
Minimum and maximum :class:`datetime.timedelta` values.
Returns
-------
out : array_like
Sequence of break points.
"""
if any(pd.isnull(x) | |
<filename>digic_aligner/doc.py<gh_stars>0
import sklearn.feature_extraction
import sklearn.metrics
import numpy
import sys
import os
import yaml
import ufal.udpipe as udpipe
from laserembeddings import Laser
import datetime
import torch
import transformers
import requests
from digic_aligner.variables import METHOD, THRESHOLD
PORT=os.getenv("PORT", "6000")
#METHOD=os.getenv("METHOD", "tfidf")
if METHOD=="bert":
bert_model = transformers.BertModel.from_pretrained("TurkuNLP/bert-base-finnish-cased-v1")
bert_model.eval()
if torch.cuda.is_available():
bert_model = bert_model.cuda()
bert_tokenizer = transformers.BertTokenizer.from_pretrained("TurkuNLP/bert-base-finnish-cased-v1")
elif METHOD=="laser":
laser = Laser()
def embed(data,bert_model,how_to_pool="CLS"):
with torch.no_grad(): #tell the model not to gather gradients
mask=data.clone().float() #
mask[data>0]=1.0
emb=bert_model(data.cuda(),attention_mask=mask.cuda()) #runs BERT and returns several things, we care about the first
#emb[0] # batch x word x embedding
if how_to_pool=="AVG":
pooled=emb[0]*(mask.unsqueeze(-1)) #multiply everything by the mask
pooled=pooled.sum(1)/mask.sum(-1).unsqueeze(-1) #sum and divide by non-zero elements in mask to get masked average
elif how_to_pool=="CLS":
pooled=emb[0][:,0,:].squeeze() #Pick the first token as the embedding
else:
assert False, "how_to_pool should be CLS or AVG"
print("Pooled shape:",pooled.shape)
return pooled.cpu().numpy() #done! move data back to CPU and extract the numpy array
class Doc:
def __init__(self,doc_dict):
global laser, bert_model, bert_tokenizer
self.doc_dict=doc_dict #this dictionary can have anything the user ever wants but must have "text" field and "id" field
self.text=doc_dict["text"]
self.id=doc_dict["id"]
self.preproc_udpipe()
if METHOD=="laser":
self.laser_emb=laser.embed_sentences(self.lines_and_tokens,lang="fi")
elif METHOD=="bert":
tokenized_ids=[bert_tokenizer.encode(txt,add_special_tokens=True) for txt in self.lines_and_tokens] #this runs the BERT tokenizer, returns list of lists of integers
tokenized_ids_t=[torch.tensor(ids,dtype=torch.long) for ids in tokenized_ids] #turn lists of integers into torch tensors
tokenized_single_batch=torch.nn.utils.rnn.pad_sequence(tokenized_ids_t,batch_first=True)
self.bert_embedded=embed(tokenized_single_batch,bert_model)
if len(self.lines_and_tokens)==1:
self.bert_embedded=self.bert_embedded.reshape(1, -1)
def get_segmentation(self):
#Tells the user how this document is segmented
#TODO: modify this to tell character offsets rather than the actual sentences that have been destroyed by now by udpipe's tokenization
#Whatever this returns should have enough information for the user to know what we mean when we say "segment index 5 is aligned with something"
return {"segmented":self.sentences}
def preproc_udpipe(self):
#This runs whatever preprocessing we need
# Download UDPipe model from:
# https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-3131
#
# TODO: lemmatization/stemming/something would likely be quite useful for Finnish
# TODO: store the result in some sort of Document object
r = requests.get('http://127.0.0.1:'+PORT+'/process', params={'data': self.text, 'tokenizer': ''})
self.sentences=[line[9:] for line in r.json()['result'].split('\n') if line.startswith('# text = ')]
r = requests.get('http://127.0.0.1:'+PORT+'/process', params={'data': self.text, 'tokenizer': '', 'output': 'horizontal'})
self.lines_and_tokens=[line.strip() for line in r.json()['result'].split("\n") if line.strip()]
assert len(self.sentences)==len(self.lines_and_tokens)
#list of strings, each string is one whitespace-tokenized sentences, I don't split into tokens here on purpose
#this is also a place to get lemmas and what have you
class DocCollection:
def __init__(self,doc_dicts,vectorizer=None):
self.docs=[Doc(doc_dict) for doc_dict in doc_dicts]
#1) Pre-compute the doc2doc sim matrix
self.doc_doc_sim_matrix_tfidf,self.vectorizer=doc_sim_matrix_tfidf(self.docs,vectorizer) #if vectorizer is None, this function makes one, let's store it
self.doc_doc_sim_matrix_tfids_margin=margin_doc_sim(self.doc_doc_sim_matrix_tfidf) #calculate also the margin-method based matrix (I dont think this has ever been done before!)
def query_by_doc_id(self,docid,method,margin_cutoff):
#Which doc?
print("LOOKING FOR",repr(docid))
print("IDS",list(doc.id for doc in self.docs))
qdoc=[doc for doc in self.docs if doc.id==docid][0]
return self.query(qdoc=qdoc,method=method,margin_cutoff=margin_cutoff)
def get_doc_ids(self):
return list(doc.id for doc in self.docs)
def query(self,text=None,qdoc=None,method="tfidf",margin_cutoff=1.05): #margin cutoff on sentences, anything below that is not considered
"""Given a query text, find hitting documents and align them. Prepares a dictionary which can be returned to the user"""
if qdoc is None:
qdoc=Doc({"text":text,"id":"qry"}) #turn the query into a fake doc
doc_hits=[] #this will be a list of the hits
for d in self.docs: #and compare against all docs (I don't think we should use a doc-embedding approach here since queries will be short, so we really want the alignment)
if method=="tfidf":
swise_sim=sentence_wise_sim_tfidf(qdoc,d,self.vectorizer)
elif method=="laser":
swise_sim=sentence_wise_sim_laser(qdoc,d)
elif method=="bert":
swise_sim=sentence_wise_sim_bert(qdoc,d)
overlaps=overlapping_segments(swise_sim)
segment_pairs=[]
for qry_sent_idx,d_sent_idx,margin in zip(*overlaps): #here we have the actual alignments of query sentences with d's sentences
print("MARGIN",margin,file=sys.stderr)
if margin<margin_cutoff:
break
# numpy numbers cannot be jsonified later, convert type first
segment_pairs.append((int(qry_sent_idx),int(d_sent_idx),float(margin))) #store these indices and margin so we can give them back to the user
if len(segment_pairs)>0:
doc_avg=float(sum(margin for i1,i2,margin in segment_pairs)/len(segment_pairs))
else:
continue #this one doesnt make the result
doc_result={}
doc_result["target_id"]=d.id
doc_result["target_segmentation"]=d.get_segmentation()
doc_result["matching_segments"]=segment_pairs
doc_result["avg_match"]=doc_avg
if qdoc.id!=d.id: # put this here so I have the least number of lines to change the indentation
doc_hits.append(doc_result)
doc_hits.sort(key=lambda dhit:dhit["avg_match"],reverse=True) #sort so that best docs come first
#now yet give some sort of info about the query
result={"qry_segmentation":qdoc.get_segmentation(),"hits":doc_hits} #this can basically be returned as a json
return result
####### HERE's then the generic function for sim calc etc
# I don't think this needs a BERT version
# all the docs will be similar to each other, so I think individual words matter
# so maybe tfidf is just fine
# Returs doc x doc similarity matrix
def doc_sim_matrix_tfidf(segmented_docs,vectorizer=None):
# segmented_docs is a list of Doc() that must have preproc_udpipe() ran on them in init
# This is a document-by-document work, so we rejoin the sentences at least for the tfidf work
docs_as_one_text=[" ".join(doc.lines_and_tokens) for doc in segmented_docs]
if vectorizer is None: #TODO I really think we should have a vectorizer trained on a bunch of normal Finnish
vectorizer=sklearn.feature_extraction.text.TfidfVectorizer(analyzer="char_wb",ngram_range=(2,5)) #TODO: should TF or IDF be somehow log-squeezed? This often helps. #TODO: rather give this a vectorizer from outside
vectorizer.fit(docs_as_one_text)
doc_by_term_M=vectorizer.transform(docs_as_one_text)
doc_sim_matrix=sklearn.metrics.pairwise.cosine_similarity(doc_by_term_M)
return doc_sim_matrix,vectorizer
def margin_doc_sim(doc_sim_matrix):
# This takes any doc sim square matrix
# and does the margin method on it
M=doc_sim_matrix
means12=M.mean(axis=-1)
means21=M.T.mean(axis=-1)
means=(means12+means21)/2.0
#print("Means:",means)
margins=M/means #This is again the margin method, bidirectional, and done on documents
margins=(margins+margins.T)/2 #make it symmetric yet by averaging the two triangles
return margins
#TODO maybe someone could do a LASER version of this?
#TODO BERT version would be nice, but needs some effort to get the positions right and embedd the documents correctly
#TODO if we do Laser/BERT then probably these should be pre-embedded when reading the documents in, it doesnt matter for tfidf all that much, so that doesnt cache anything
def sentence_wise_sim_tfidf(d1_segm,d2_segm,vectorizer):
"""
This gets two documents (one of these could be a query even)
It runs a comparison of segments in d1 against segments in d2
"""
#TODO: In future we could use some fancy machine translation aligner, but let's for now do the obvious
#TODO: would using a vectorizer fitted on the current two documents, using a sentence as pseudo-document be a good idea?
d1_vectorized=vectorizer.transform(d1_segm.lines_and_tokens) #sentence-by-term
d2_vectorized=vectorizer.transform(d2_segm.lines_and_tokens) #sentence-by-term
segment_wise_matrix=sklearn.metrics.pairwise.cosine_similarity(d1_vectorized,d2_vectorized,dense_output=True)
#this is now d1 segments by d2 segments similarity matrix
return segment_wise_matrix
def sentence_wise_sim_laser(d1_segm,d2_segm):
#can this be any simpler? :)
embeddings_d1 = d1_segm.laser_emb
embeddings_d2 = d2_segm.laser_emb
segment_wise_matrix=sklearn.metrics.pairwise.cosine_similarity(embeddings_d1,embeddings_d2,dense_output=True)
#this is now d1 segments by d2 segments similarity matrix
return segment_wise_matrix
def sentence_wise_sim_bert(d1_segm,d2_segm):
embeddings_d1 = d1_segm.bert_embedded
embeddings_d2 = d2_segm.bert_embedded
segment_wise_matrix=sklearn.metrics.pairwise.cosine_similarity(embeddings_d1,embeddings_d2,dense_output=True)
#this is now d1 segments by d2 segments similarity matrix
return segment_wise_matrix
# This takes the result of sentence_wise_sim() is agnostic to how that was done, right now we only have the tfidf version, but should work with anything
def overlapping_segments(segment_wise_matrix):
M=segment_wise_matrix #just make this shorter in name
#assumption: M is a similarity matrix of N1 x N2 elements, where N1 are segments from doc1 and N2 are segments from doc2
#note: right now these are sentences, but I think whatever overlapping segments should be doable too
# like phrases etc, so there is no deeply inbuilt assumption that these are sentences
# the document lengths can differ considerably (especially if the other is a simple query)
# I think it's good to compare all segments of the shorter doc against all segments of the longer doc and not the other way around
d1_len,d2_len=M.shape
if d1_len>d2_len:
M=M.T
#get the coefficients for the margin method
#TODO: bidirectional margin, now only one-directional
#https://arxiv.org/pdf/1811.01136
sorted_M=-numpy.sort(-M,axis=-1) #funny how numpy sort does not have order parameter; this sorts target similarities (second dimension) for every source doc (first dimension)
means_of_nearest=sorted_M[:,:10].mean(axis=-1)+0.00001 #10 neighbors seems a decent consensus, add a little epsilon not to divide by zero
margin=sorted_M[:,0]/means_of_nearest #the sim of nearest doc divided by avg of sims of 10 nearest docs: the (c) method from the paper above
targets=(-M).argsort(axis=-1)[:,0] #for every shorter document segment, this is the corresponding longer document segment
sources=numpy.arange(M.shape[0]) #indices into the shorter document basically (0,1,2,3....) so we can sort later
#So now winner target segments and their margin are the results of the comparison, for each source segment
best_sorting_indices=(-margin).argsort(axis=-1) #this sorts the hits from best to worst by margin
final_sources=sources[best_sorting_indices] #indices into source
final_targets=targets[best_sorting_indices] #indices into target
final_margins=margin[best_sorting_indices] #margins
#the three above are now sorted from best-to-worst | |
<filename>hivwholeseq/sequencing/build_consensus.py<gh_stars>1-10
#!/usr/bin/env python
# vim: fdm=marker
'''
author: <NAME>
date: 27/03/14
content: Build consensus after premap, trim and divide. Thanks to the premap
we have an easy time here: we collect reads a bit from all over the
fragment and make local consensi (100 bp), which we then chain.
This script is not working in case of unexpected insertions/deletions,
e.g. the 1kb+ plasmid insertion in SF162. De novo assembly is needed
for that, which we could script but for now looks quite useless: the
divided reads are remapped to initial patient references anyway.
'''
# Modules
import os
import argparse
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet.IUPAC import ambiguous_dna
from Bio import AlignIO
from hivwholeseq.sequencing.samples import load_sequencing_run, SampleSeq
from hivwholeseq.utils.mapping import align_muscle
from hivwholeseq.sequencing.filenames import get_divided_filename, \
get_premapped_filename, \
get_reference_premap_filename, \
get_consensus_filename, \
get_allele_counts_filename, \
get_build_consensus_summary_filename, \
get_reference_consensus_ali_filename
from hivwholeseq.cluster.fork_cluster import fork_build_consensus as fork_self
from hivwholeseq.utils.sequence import build_local_consensus
# Functions
def build_consensus(bamfilename, len_reference, VERBOSE=0,
block_len_initial=100,
reads_per_alignment=31,
accept_holes=False,
store_allele_counts=False):
'''Build a consensus from premapped and divided reads'''
if VERBOSE:
print 'Build consensus'
import numpy as np
import pysam
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet.IUPAC import ambiguous_dna
from hivwholeseq.utils.mapping import align_muscle
# Three steps:
# 1. collect reads uniformly across the fragment
# 2. make local consensi
# 3. join into fragmentwide consensus
consensus = None
consensi_local = []
if store_allele_counts:
allcounts_local = []
pos_ref = 0
block_len = block_len_initial
with pysam.Samfile(bamfilename, 'rb') as bamfile:
# Initial block
if VERBOSE >= 2:
print 'Block n', len(consensi_local) + 1,
for pos_first_block in xrange(len_reference):
bamfile.reset()
# The first block has to make a consensus for the FIRST base, this needs
# at least ONE read starting exactly at the first position. Otherwise,
# the same is repeated for position 2, and so on.
reads = [read for read in bamfile if (read.is_proper_pair) and (read.pos == pos_first_block)]
if not len(reads):
continue
np.random.shuffle(reads)
reads = reads[:n_reads_per_ali]
seqs = [SeqRecord(Seq(read.seq[:block_len], ambiguous_dna), id=read.qname)
for read in reads]
cons_local = build_local_consensus(seqs, VERBOSE=VERBOSE, store_allele_counts=store_allele_counts)
if store_allele_counts:
(cons_local, allcount_local) = cons_local
allcounts_local.append(allcount_local)
consensi_local.append(cons_local)
pos_ref += (block_len_initial // 2) * (1 + pos_first_block // (block_len_initial // 2))
if VERBOSE >= 2:
print 'pos', pos_first_block, 'to', pos_first_block + block_len, 'block len', block_len
break
# Start consensus
if len(consensi_local) == 1:
consensus = [consensi_local[0]]
if store_allele_counts:
allcounts = [allcounts_local[0]]
# Divide reads by block (more efficient than scrolling the file every time)
# FIXME: extract random subsample, assign to blocks, and only complete the missing blocks!
reads_by_block = [[] for n_block in xrange((len_reference - pos_ref) // (block_len_initial // 2))]
bamfile.reset()
for read in bamfile:
if not read.is_proper_pair:
continue
pos_ref_tmp = pos_ref
n_block = 1
while (pos_ref_tmp < len_reference):
block_len_tmp = min(block_len, len_reference - pos_ref)
read_start = read.pos
read_end = read.pos + sum(bl for (bt, bl) in read.cigar if bt in (0, 2))
if (pos_ref_tmp - 100 < read_start <= pos_ref_tmp) and \
(read_end >= pos_ref_tmp + block_len_tmp):
reads_by_block[n_block - 1].append(read)
break
pos_ref_tmp += block_len_initial // 2
n_block += 1
# Stack local consensi on top of the first one
n_block = 1
while (pos_ref < len_reference):
block_len = min(block_len, len_reference - pos_ref)
if block_len < block_len_initial // 2:
break
if VERBOSE >= 2:
print 'Block n', len(consensi_local) + 1, 'pos', pos_ref, 'to', pos_ref + block_len, 'block len', block_len
# Get reads that cover the whole block
reads = reads_by_block[n_block - 1]
n_block += 1
#FIXME
#if n_block >= 2:
# print pos_ref, pos_ref + block_len
# import ipdb; ipdb.set_trace()
# Internal coverage holes are not tolerated, but the last block
# is allowed to be missing. However, we should try to squeeze out
# all the bases by rescanning the reads a last time with less strict
# criteria: if it has even one base more than what we have, add it
if len(reads):
full_cover= True
else:
full_cover= False
bamfile.reset()
reads = []
for read in bamfile:
if not read.is_proper_pair:
continue
read_start = read.pos
read_end = read.pos + sum(bl for (bt, bl) in read.cigar if bt in (0, 2))
if (read_start <= pos_ref) and (read_end > pos_ref + block_len_initial // 2):
reads.append(read)
if not len(reads):
if pos_ref + block_len < len_reference:
if VERBOSE >= 2:
print 'WARNING: consensus looks interrupted in mid-way'
break
# Take a random subsample of reads. If it's a problematic block, not
# fully covered, take more reads than usual
if full_cover:
np.random.shuffle(reads)
reads = reads[:n_reads_per_ali]
else:
# Trim all, then take longest
pass
# Trim reads from the left to start all at the block start
# NOTE: reads have been selected to start @ or before the block start!
seqs = []
for read in reads:
pos_reft = read.pos
# Find start of the block in the read
start_found = False
pos_read_start = 0
pos_read_end = 0
for (bt, bl) in read.cigar:
if bt == 1:
if not start_found:
pos_read_start += bl
pos_read_end += bl
elif bt == 2:
if (not start_found) and (pos_reft + bl > pos_ref):
start_found = True
if pos_reft + bl > pos_ref + block_len:
break
pos_reft += bl
else:
if (not start_found) and (pos_reft + bl > pos_ref):
pos_read_start += pos_ref - pos_reft
start_found = True
if pos_reft + bl > pos_ref + block_len:
pos_read_end += pos_ref + block_len - pos_reft
break
if not start_found:
pos_read_start += bl
pos_read_end += bl
pos_reft += bl
seq = SeqRecord(Seq(read.seq[pos_read_start: pos_read_end],
ambiguous_dna), id=read.qname)
seqs.append(seq)
# If it's a problematic block, take longest reads
if not full_cover:
seqs.sort(key=len, reverse=True)
seqs = seqs[:n_reads_per_ali]
#FIXME
#if n_block >= 2:
# print pos_ref, pos_ref + block_len
# import ipdb; ipdb.set_trace()
# Make local consensus using a multiple sequence alignment
# --------------
# ----- ------
# -------- ---
#---------------
cons_local = build_local_consensus(seqs, VERBOSE=VERBOSE,
store_allele_counts=store_allele_counts,
full_cover=full_cover)
if store_allele_counts:
(cons_local, allcount_local) = cons_local
allcounts_local.append(allcount_local)
consensi_local.append(cons_local)
pos_ref += block_len_initial // 2
# Join block <-- to the stack, like this:
# ---------------------------
# --------------------
if consensus is None:
consensus = [consensi_local[0]]
if store_allele_counts:
allcounts = [allcounts_local[0]]
else:
cons = cons_local
seed = consensus[-1][-20:]
sl = len(seed)
pos_start = cons.find(seed)
# Allow imperfect matches
if pos_start == -1:
consm = np.fromstring(cons, 'S1')
seedm = np.fromstring(seed, 'S1')
n_matches = [(consm[i: i + sl] == seedm).sum()
for i in xrange(len(cons) + 1 - len(seed))]
pos_start = np.argmax(n_matches)
# Try to only add non-bogus stuff
if n_matches[pos_start] < 0.66 * sl:
pos_start = -1
if VERBOSE >= 4:
print 'Block n.', len(consensi_local)+': cannot stack to previous one!'
if pos_start != -1:
consensus.append(cons[pos_start + sl:])
if store_allele_counts:
allcounts.append(allcounts_local[-1][:, pos_start + sl:])
elif accept_holes:
consensus.append('N' * 10)
consensus.append(cons)
if store_allele_counts:
tmpall = np.zeros((allcounts_local[-1].shape[0], 10), int)
tmpall[-1] = 1
allcounts.append(tmpall)
allcounts.append(allcounts_local[-1])
if consensus is None:
raise ValueError('Consensus is still None: unable to build!')
consensus = ''.join(consensus)
if store_allele_counts:
allcounts = np.concatenate(allcounts, axis=1)
return (consensus, allcounts)
return consensus
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Build consensus by mapping-assisted assembly',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--run', required=True,
help='Seq run to analyze (e.g. Tue28)')
parser.add_argument('--adaIDs', nargs='*',
help='Adapter IDs to analyze (e.g. TS2)')
parser.add_argument('--fragments', nargs='*',
help='Fragment to map (e.g. F1 F6 genomewide)')
parser.add_argument('--block-length', type=int, default=150, dest='block_len',
help='Length of each local consensus block')
parser.add_argument('--reads-per-alignment', type=int, default=31,
dest='reads_per_alignment',
help='Number of (random) reads used for the local consensi')
parser.add_argument('--verbose', type=int, default=0,
help=('Verbosity level [0-3]'))
parser.add_argument('--submit', action='store_true',
help='Execute the script in parallel on the cluster')
parser.add_argument('--no-summary', action='store_false', dest='summary',
help='Do not save results in a summary file')
parser.add_argument('--allele-counts', action='store_true', dest='allele_counts',
help='Also create rough allele frequencies')
args = parser.parse_args()
seq_run = args.run
adaIDs = args.adaIDs
fragments = args.fragments
VERBOSE = args.verbose
submit = args.submit
summary = args.summary
n_reads_per_ali = args.reads_per_alignment
block_len_initial = args.block_len
store_allele_counts = args.allele_counts
# Specify the dataset
dataset = load_sequencing_run(seq_run)
data_folder = dataset.folder
# If the script is called with no adaID, iterate over all
dataset.discard_nondivided_samples()
samples = dataset.samples
if adaIDs is not None:
samples = samples.loc[samples.adapter.isin(adaIDs)]
if VERBOSE >= 3:
print 'adaIDs', samples.adapter
# Iterate over all | |
import unittest
from time import time
from mock import patch
from appscale.common.service_stats import stats_manager, samples
def request_simulator(service_stats, time_mock=None):
""" Builds quick request reported for simulating
start and finalization of request with optionally specified latency.
Args:
service_stats: an instance of stats_manager.ServiceStats
time_mock: a mock of time.time() function
Returns:
a function for reporting requests
"""
def parametrized(latency=None, end_time=None, app=None, status=None,
resource=None):
""" Util function for quick reporting request with wanted latency.
Args:
latency: an integer number representing wanted latency in ms.
end_time: unix epoch time in ms.
app: a string representing application name.
status: an integer representing request status.
"""
if latency:
if end_time:
end_time_s = float(end_time)/1000
start_time = end_time_s - float(latency)/1000
else:
start_time = time()
end_time_s = start_time + float(latency)/1000
time_mock.return_value = start_time
request_info = service_stats.start_request()
request_info.app = app
request_info.resource = resource
time_mock.return_value = end_time_s
request_info.status = status
request_info.finalize()
else:
request_info = service_stats.start_request()
request_info.finalize()
return parametrized
class TestDefaultCumulativeCounters(unittest.TestCase):
def setUp(self):
self.time_patcher = patch.object(stats_manager.time, 'time')
self.time_mock = self.time_patcher.start()
# Initialize ServiceStats
current_time = time()
self.time_mock.return_value = current_time - 0.001
self.start_time = int((current_time - 0.001) * 1000)
self.stats = stats_manager.ServiceStats("my_service")
self.time_mock.return_value = current_time
self.request_simulation = request_simulator(self.stats, self.time_mock)
def tearDown(self):
self.time_patcher.stop()
def test_default_configs(self):
in_mock_time = self.time_mock()
# Test cumulative counters in zero state
self.assertEqual(self.stats.get_cumulative_counters(), {
"from": self.start_time,
"to": int(in_mock_time * 1000),
"all": 0,
"4xx": 0,
"5xx": 0,
"latency": 0,
"by_app": {
}
})
# Starting new request shouldn't affect counters
req_info = self.stats.start_request()
req_info.app = "guestbook"
self.assertEqual(self.stats.get_cumulative_counters(), {
"from": self.start_time,
"to": int(in_mock_time * 1000),
"all": 0,
"4xx": 0,
"5xx": 0,
"latency": 0,
"by_app": {
}
})
# Only finishing request should change things
req_info.status = 200
req_info.finalize()
self.time_mock.return_value = in_mock_time = int(time() * 1000)
self.assertEqual(self.stats.get_cumulative_counters(), {
"from": self.start_time,
"to": int(in_mock_time * 1000),
"all": 1,
"4xx": 0,
"5xx": 0,
"latency": 0,
"by_app": {
"guestbook": {"all": 1, "4xx": 0, "5xx": 0, "latency": 0}
}
})
# Reporting client error in guestbook
req_info = self.stats.start_request()
req_info.app = "guestbook"
req_info.status = 401 # You can fill request fields manually
req_info.finalize()
self.time_mock.return_value = in_mock_time = int(time() * 1000)
self.assertEqual(self.stats.get_cumulative_counters(), {
"from": self.start_time,
"to": int(in_mock_time * 1000),
"all": 2,
"4xx": 1,
"5xx": 0,
"latency": 0,
"by_app": {
"guestbook": {"all": 2, "4xx": 1, "5xx": 0, "latency": 0}
}
})
# Reporting client and then server errors in new application ghostbook
req_info = self.stats.start_request()
req_info.app = "ghostbook"
req_info.status=404
req_info.finalize()
req_info = self.stats.start_request()
req_info.app = "ghostbook"
req_info.status = 503
req_info.finalize()
self.time_mock.return_value = in_mock_time = int(time() * 1000)
self.assertEqual(self.stats.get_cumulative_counters(), {
"from": self.start_time,
"to": int(in_mock_time * 1000),
"all": 4,
"4xx": 2,
"5xx": 1,
"latency": 0,
"by_app": {
"guestbook": {"all": 2, "4xx": 1, "5xx": 0, "latency": 0},
"ghostbook": {"all": 2, "4xx": 1, "5xx": 1, "latency": 0}
}
})
# Testing latency using request_simulator function
self.request_simulation(latency=100, app="guestbook", status=200,
end_time=1515595821111)
self.assertEqual(self.stats.get_cumulative_counters(), {
"from": self.start_time,
"to": 1515595821111,
"all": 5,
"4xx": 2,
"5xx": 1,
"latency": 100,
"by_app": {
"guestbook": {"all": 3, "4xx": 1, "5xx": 0, "latency": 100},
"ghostbook": {"all": 2, "4xx": 1, "5xx": 1, "latency": 0}
}
})
self.request_simulation(latency=150, app="guestbook", status=200,
end_time=1515595821111)
self.assertEqual(self.stats.get_cumulative_counters(), {
"from": self.start_time,
"to": 1515595821111,
"all": 6,
"4xx": 2,
"5xx": 1,
"latency": 250,
"by_app": {
"guestbook": {"all": 4, "4xx": 1, "5xx": 0, "latency": 250},
"ghostbook": {"all": 2, "4xx": 1, "5xx": 1, "latency": 0}
}
})
self.request_simulation(latency=200, app="guestbook", status=404,
end_time=1515595821111)
self.assertEqual(self.stats.get_cumulative_counters(), {
"from": self.start_time,
"to": 1515595821111,
"all": 7,
"4xx": 3,
"5xx": 1,
"latency": 450,
"by_app": {
"guestbook": {"all": 5, "4xx": 2, "5xx": 0, "latency": 450},
"ghostbook": {"all": 2, "4xx": 1, "5xx": 1, "latency": 0}
}
})
self.request_simulation(latency=200, app="ghostbook", status=503,
end_time=1515595821111)
self.assertEqual(self.stats.get_cumulative_counters(), {
"from": self.start_time,
"to": 1515595821111,
"all": 8,
"4xx": 3,
"5xx": 2,
"latency": 650,
"by_app": {
"guestbook": {"all": 5, "4xx": 2, "5xx": 0, "latency": 450},
"ghostbook": {"all": 3, "4xx": 1, "5xx": 2, "latency": 200}
}
})
self.request_simulation(latency=350, app="mybook", status=404,
end_time=1515595821111)
self.assertEqual(self.stats.get_cumulative_counters(), {
"from": self.start_time,
"to": 1515595821111,
"all": 9,
"4xx": 4,
"5xx": 2,
"latency": 1000,
"by_app": {
"guestbook": {"all": 5, "4xx": 2, "5xx": 0, "latency": 450},
"ghostbook": {"all": 3, "4xx": 1, "5xx": 2, "latency": 200},
"mybook": {"all": 1, "4xx": 1, "5xx": 0, "latency": 350},
}
})
class TestCustomCumulativeCounters(unittest.TestCase):
def setUp(self):
request_fields = ["app", "namespace", "status", "method",
"preproc_time", "postproc_time"]
def data_proc_summarizer(request_info):
return request_info.preproc_time + request_info.postproc_time
counters_config = {
"all": samples.summarize_all,
"total": data_proc_summarizer,
("by_app", samples.categorize_by_app): {
"all": samples.summarize_all,
"default_ns": lambda req_info: req_info.namespace == "default",
("by_ns", lambda req_info: req_info.namespace): {
"all": samples.summarize_all,
"4xx": samples.summarize_client_error,
"5xx": samples.summarize_server_error,
},
("by_status", samples.categorize_by_status): samples.summarize_all,
("by_method", samples.categorize_by_method): data_proc_summarizer
}
}
self.stats = stats_manager.ServiceStats(
"my_service", cumulative_counters=counters_config,
request_fields=request_fields
)
def test_custom_configs(self):
# Check initial state of counters
counters = self.stats.get_cumulative_counters()
self.assertEqual(counters, {
"from": counters["from"], # it's not an object of the test
"to": counters["to"], # it's not an object of the test
"all": 0,
"total": 0,
"by_app": {
}
})
# Report requests
req_info = self.stats.start_request()
req_info.app = "guestbook"
req_info.namespace = "friends"
req_info.method = "POST"
req_info.preproc_time = 6
req_info.status = 500
req_info.postproc_time = 0
req_info.finalize()
req_info = self.stats.start_request()
req_info.app = "guestbook"
req_info.namespace = "friends"
req_info.method = "GET"
req_info.preproc_time = 12
req_info.status = 200
req_info.postproc_time = 12
req_info.finalize()
req_info = self.stats.start_request()
req_info.app = "guestbook"
req_info.namespace = "default"
req_info.method = "GET"
req_info.preproc_time = 12
req_info.status = 400
req_info.postproc_time = 0
req_info.finalize()
req_info = self.stats.start_request()
req_info.app = "guestbook"
req_info.namespace = "default"
req_info.method = "GET"
req_info.preproc_time = 10
req_info.status = 201
req_info.postproc_time = 10
req_info.finalize()
req_info = self.stats.start_request()
req_info.app = "guestbook"
req_info.namespace = "default"
req_info.method = "POST"
req_info.preproc_time = 5
req_info.status = 201
req_info.postproc_time = 10
req_info.finalize()
req_info = self.stats.start_request()
req_info.app = "other"
req_info.namespace = "ghosts"
req_info.method = "POST"
req_info.preproc_time = 20
req_info.status = 200
req_info.postproc_time = 10
req_info.finalize()
req_info = self.stats.start_request()
req_info.app = "other"
req_info.namespace = "ghosts"
req_info.method = "GET"
req_info.preproc_time = 10
req_info.status = 200
req_info.postproc_time = 10
req_info.finalize()
req_info = self.stats.start_request()
req_info.app = "other"
req_info.namespace = "ghosts"
req_info.method = "GET"
req_info.preproc_time = 15
req_info.status = 200
req_info.postproc_time = 10
req_info.finalize()
req_info = self.stats.start_request()
req_info.app = "guestbook"
req_info.namespace = "friends"
req_info.method = "POST"
req_info.preproc_time = 10
req_info.status = 200
req_info.postproc_time = 10
req_info.finalize()
# Check counters
counters = self.stats.get_cumulative_counters()
self.maxDiff = None
self.assertEqual(counters, {
"from": counters["from"], # it's not an object of the test
"to": counters["to"], # it's not an object of the test
"all": 9,
"total": 172,
"by_app": {
"guestbook": {
"all": 6,
"default_ns": 3,
"by_ns": {
"friends": {"all": 3, "4xx": 0, "5xx": 1},
"default": {"all": 3, "4xx": 1, "5xx": 0},
},
"by_status": {200: 2, 201: 2, 400: 1, 500: 1},
"by_method": {"GET": 56, "POST": 41}
},
"other": {
"all": 3,
"default_ns": 0,
"by_ns": {
"ghosts": {"all": 3, "4xx": 0, "5xx": 0},
},
"by_status": {200: 3},
"by_method": {"GET": 45, "POST": 30}
}
}
})
class TestRecentStatsFor0Requests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.time_patcher = patch.object(stats_manager.time, 'time')
cls.time_mock = cls.time_patcher.start()
# Initialize ServiceStats
cls.time_mock.return_value = time()
cls.stats = stats_manager.ServiceStats("my_service")
@classmethod
def tearDownClass(cls):
cls.time_patcher.stop()
def test_default_metrics(self):
in_mock_time = self.time_mock()
# Check recent stats for 0 requests
self.assertEqual(self.stats.get_recent(), {
"from": int(in_mock_time * 1000),
"to": int(in_mock_time * 1000),
"all": 0,
"4xx": 0,
"5xx": 0,
"avg_latency": None
})
def test_detailed_metrics(self):
in_mock_time = self.time_mock()
# Check recent stats for 0 requests using detailed metrics
detailed_metrics = stats_manager.PER_APP_DETAILED_METRICS_MAP
self.assertEqual(self.stats.get_recent(metrics_map=detailed_metrics), {
"from": int(in_mock_time * 1000),
"to": int(in_mock_time * 1000),
"all": 0,
"4xx": 0,
"5xx": 0,
"avg_latency": None,
"by_app": {}
})
class TestMetricsConfigs(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.time_patcher = patch.object(stats_manager.time, 'time')
cls.time_mock = cls.time_patcher.start()
# Initialize ServiceStats with default metrics for recent requests
cls.time_mock.return_value = time()
cls.stats = stats_manager.ServiceStats("my_service")
# Initialize request_simulator for shorter form of start-finalize calls
request = request_simulator(cls.stats, cls.time_mock)
# Start and finalize requests using request_simulator
request(latency=100, app="guestbook", resource="/", status=200,
end_time=1515595821111)
request(latency=150, app="guestbook", resource="/", status=200)
request(latency=200, app="guestbook", resource="/api/foo", status=200)
request(latency=250, app="guestbook", resource="/api/v2", status=200)
request(latency=300, app="guestbook", resource="/api/v2", status=403)
request(latency=350, app="guestbook", resource="/api/v2", status=403)
request(latency=400, app="guestbook", resource="/api/v3/z", status=404)
request(latency=450, app="guestbook", resource="/api/v3/z", status=502)
request(latency=500, app="other", resource="/foo/bar", status=200)
request(latency=550, app="other", resource="/foo/bar", status=200)
request(latency=600, app="other", resource="/path", status=200)
request(latency=650, app="other", resource="/path", status=200)
request(latency=701, app="other", resource="/path", status=401,
end_time=1515595824444)
@classmethod
def tearDownClass(cls):
cls.time_patcher.stop()
def test_default_metrics(self):
# Check recent stats for 13 requests using default main metrics
self.assertEqual(self.stats.get_recent(), {
"from": 1515595821111,
"to": 1515595824444,
"all": 13,
"4xx": 4,
"5xx": 1,
"avg_latency": 400
})
def test_detailed_metrics(self):
# Check recent stats | |
<filename>tensorflow/python/util/deprecation.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
# Allow deprecation warnings to be silenced temporarily with a context manager.
_PRINT_DEPRECATION_WARNINGS = True
# Remember which deprecation warnings have been printed already.
_PRINTED_WARNING = {}
def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
return decorator_utils.add_notice_to_docstring(
doc, instructions,
'DEPRECATED FUNCTION',
'(deprecated)', [
'THIS FUNCTION IS DEPRECATED. It will be removed %s.' % (
'in a future version' if date is None else ('after %s' % date)),
'Instructions for updating:'])
def _add_deprecated_arg_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
return decorator_utils.add_notice_to_docstring(
doc, instructions,
'DEPRECATED FUNCTION ARGUMENTS',
'(deprecated arguments)', [
'SOME ARGUMENTS ARE DEPRECATED. '
'They will be removed %s.' % (
'in a future version' if date is None else ('after %s' % date)),
'Instructions for updating:'])
def _validate_deprecation_args(date, instructions):
if date is not None and not re.match(r'20\d\d-[01]\d-[0123]\d', date):
raise ValueError('Date must be YYYY-MM-DD.')
if not instructions:
raise ValueError('Don\'t deprecate things without conversion instructions!')
def _call_location():
"""Returns call location given level up from current call."""
frame = tf_inspect.currentframe()
if frame:
# CPython internals are available, use them for performance.
# walk back two frames to get to deprecated function caller.
first_frame = frame.f_back
second_frame = first_frame.f_back
frame = second_frame if second_frame else first_frame
return '%s:%d' % (frame.f_code.co_filename, frame.f_lineno)
else:
# Slow fallback path
stack = tf_inspect.stack(0) # 0 avoids generating unused context
entry = stack[2]
return '%s:%d' % (entry[1], entry[2])
def deprecated(date, instructions, warn_once=True):
"""Decorator for marking functions or methods deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called. It has the following format:
<function> (from <module>) is deprecated and will be removed after <date>.
Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated)' is appended
to the first line of the docstring and a deprecation notice is prepended
to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
warn_once: Boolean. Set to `True` to warn only the first time the decorated
function is called. Otherwise, every call will log a warning.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
def deprecated_wrapper(func):
"""Deprecation wrapper."""
decorator_utils.validate_callable(func, 'deprecated')
@functools.wraps(func)
def new_func(*args, **kwargs): # pylint: disable=missing-docstring
if _PRINT_DEPRECATION_WARNINGS:
if func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[func] = True
logging.warning(
'From %s: %s (from %s) is deprecated and will be removed %s.\n'
'Instructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
return tf_decorator.make_decorator(
func, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(func.__doc__, date,
instructions))
return deprecated_wrapper
DeprecatedArgSpec = collections.namedtuple(
'DeprecatedArgSpec', ['position', 'has_ok_value', 'ok_value'])
def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples,
**kwargs):
"""Decorator for marking specific function arguments as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument. It has the following format:
Calling <function> (from <module>) with <arg> is deprecated and will be
removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> includes the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
*deprecated_arg_names_or_tuples: String or 2-Tuple(String,
[ok_vals]). The string is the deprecated argument name.
Optionally, an ok-value may be provided. If the user provided
argument equals this value, the warning is suppressed.
**kwargs: If `warn_once=False` is passed, every call with a deprecated
argument will log a warning. The default behavior is to only warn the
first time the function is called with any given deprecated argument.
All other kwargs raise `ValueError`.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, instructions are
empty, the deprecated arguments are not present in the function
signature, the second element of a deprecated_tuple is not a
list, or if a kwarg other than `warn_once` is passed.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_arg_names_or_tuples:
raise ValueError('Specify which argument is deprecated.')
if kwargs and list(kwargs.keys()) != ['warn_once']:
kwargs.pop('warn_once', None)
raise ValueError('Illegal argument to deprecated_args: %s' % kwargs)
warn_once = kwargs.get('warn_once', True)
def _get_arg_names_to_ok_vals():
"""Returns a dict mapping arg_name to DeprecatedArgSpec w/o position."""
d = {}
for name_or_tuple in deprecated_arg_names_or_tuples:
if isinstance(name_or_tuple, tuple):
d[name_or_tuple[0]] = DeprecatedArgSpec(-1, True, name_or_tuple[1])
else:
d[name_or_tuple] = DeprecatedArgSpec(-1, False, None)
return d
def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec):
"""Builds a dictionary from deprecated arguments to their spec.
Returned dict is keyed by argument name.
Each value is a DeprecatedArgSpec with the following fields:
position: The zero-based argument position of the argument
within the signature. None if the argument isn't found in
the signature.
ok_values: Values of this argument for which warning will be
suppressed.
Args:
names_to_ok_vals: dict from string arg_name to a list of values,
possibly empty, which should not elicit a warning.
arg_spec: Output from tf_inspect.getargspec on the called function.
Returns:
Dictionary from arg_name to DeprecatedArgSpec.
"""
arg_name_to_pos = dict(
(name, pos) for (pos, name) in enumerate(arg_spec.args))
deprecated_positional_args = {}
for arg_name, spec in iter(names_to_ok_vals.items()):
if arg_name in arg_name_to_pos:
pos = arg_name_to_pos[arg_name]
deprecated_positional_args[arg_name] = DeprecatedArgSpec(
pos, spec.has_ok_value, spec.ok_value)
return deprecated_positional_args
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_args')
deprecated_arg_names = _get_arg_names_to_ok_vals()
arg_spec = tf_inspect.getargspec(func)
deprecated_positions = _get_deprecated_positional_arguments(
deprecated_arg_names, arg_spec)
is_varargs_deprecated = arg_spec.varargs in deprecated_arg_names
is_kwargs_deprecated = arg_spec.keywords in deprecated_arg_names
if (len(deprecated_positions) + is_varargs_deprecated + is_kwargs_deprecated
!= len(deprecated_arg_names_or_tuples)):
known_args = arg_spec.args + [arg_spec.varargs, arg_spec.keywords]
missing_args = [arg_name for arg_name in deprecated_arg_names
if arg_name not in known_args]
raise ValueError('The following deprecated arguments are not present '
'in the function signature: %s. '
'Found next arguments: %s.' % (missing_args, known_args))
def _same_value(a, b):
"""A comparison operation that works for multiple object types.
Returns True for two empty lists, two numeric values with the
same value, etc.
Returns False for (pd.DataFrame, None), and other pairs which
should not be considered equivalent.
Args:
a: value one of the comparison.
b: value two of the comparison.
Returns:
A boolean indicating whether the two inputs are the same value
for the purposes of deprecation.
"""
if a is b:
return True
try:
equality = a == b
if isinstance(equality, bool):
return equality
except TypeError:
return False
return False
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
if _PRINT_DEPRECATION_WARNINGS:
invalid_args = []
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, spec in iter(deprecated_positions.items()):
if (spec.position < len(args) and
not (spec.has_ok_value and
_same_value(named_args[arg_name], spec.ok_value))):
invalid_args.append(arg_name)
if is_varargs_deprecated and len(args) > len(arg_spec.args):
invalid_args.append(arg_spec.varargs)
if is_kwargs_deprecated and kwargs:
invalid_args.append(arg_spec.keywords)
for arg_name in deprecated_arg_names:
if (arg_name in kwargs and
not (deprecated_positions[arg_name].has_ok_value and
_same_value(named_args[arg_name],
deprecated_positions[arg_name].ok_value))):
invalid_args.append(arg_name)
for arg_name in invalid_args:
if (func, arg_name) | |
True
else:
self.depart_docinfo_item()
def visit_authors(self, node):
self.visit_docinfo_item(node, 'authors')
self.author_in_authors = False # initialize
def depart_authors(self, node):
self.depart_docinfo_item()
def visit_block_quote(self, node):
self.body.append(self.starttag(node, 'blockquote'))
def depart_block_quote(self, node):
self.body.append('</blockquote>\n')
def check_simple_list(self, node):
"""Check for a simple list that can be rendered compactly."""
visitor = SimpleListChecker(self.document)
try:
node.walk(visitor)
except nodes.NodeFound:
return None
else:
return 1
def is_compactable(self, node):
return ('compact' in node['classes']
or (self.settings.compact_lists
and 'open' not in node['classes']
and (self.compact_simple
or self.topic_classes == ['contents']
or self.check_simple_list(node))))
def visit_bullet_list(self, node):
atts = {}
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = 'simple'
self.body.append(self.starttag(node, 'ul', **atts))
def depart_bullet_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ul>\n')
def visit_caption(self, node):
# ??? It would be appropriate to use <figcaption> here but we
# would need to ensure a parent <figure>.
self.body.append(self.starttag(node, 'p', '', CLASS='caption'))
def depart_caption(self, node):
self.body.append('</p>\n')
def visit_citation(self, node):
# @@@ Evil use of table for layout. If only <dl compact> worked...
self.body.append(self.starttag(node, 'table',
CLASS='docutils citation',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label"><col></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def depart_citation(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_citation_reference(self, node):
href = '#'
if 'refid' in node:
href += node['refid']
elif 'refname' in node:
href += self.document.nameids[node['refname']]
# else: # TODO system message (or already in the transform)?
# 'Citation reference missing.'
self.body.append(self.starttag(
node, 'a', '[', CLASS='citation-reference', href=href))
def depart_citation_reference(self, node):
self.body.append(']</a>')
def visit_classifier(self, node):
self.body.append(' <span class="classifier-delimiter">:</span> ')
self.body.append(self.starttag(node, 'span', '', CLASS='classifier'))
def depart_classifier(self, node):
self.body.append('</span>')
def visit_colspec(self, node):
self.colspecs.append(node)
# "stubs" list is an attribute of the tgroup element:
node.parent.stubs.append(node.attributes.get('stub'))
def depart_colspec(self, node):
pass
def write_colspecs(self):
width = 0
for node in self.colspecs:
width += node['colwidth']
for node in self.colspecs:
colwidth = int(node['colwidth'] * 100.0 / width + 0.5)
self.body.append(self.emptytag(node, 'col',
width='%i%%' % colwidth))
self.colspecs = []
def visit_comment(self, node,
sub=re.compile('-(?=-)').sub):
"""Escape double-dashes in comment text."""
self.body.append('<!-- %s -->\n' % sub('- ', node.astext()))
# Content already processed:
raise nodes.SkipNode
def visit_compound(self, node):
self.body.append(self.starttag(node, 'div', CLASS='compound'))
if len(node) > 1:
node[0]['classes'].append('compound-first')
node[-1]['classes'].append('compound-last')
for child in node[1:-1]:
child['classes'].append('compound-middle')
def depart_compound(self, node):
self.body.append('</div>\n')
def visit_container(self, node):
self.body.append(self.starttag(node, 'div', CLASS='container'))
def depart_container(self, node):
self.body.append('</div>\n')
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact', meta=False)
def depart_contact(self, node):
self.depart_docinfo_item()
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item()
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item()
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
self.body.append('</dt>\n')
self.body.append(self.starttag(node, 'dd', ''))
self.set_first_last(node)
def depart_definition(self, node):
self.body.append('</dd>\n')
def visit_definition_list(self, node):
# ??? Gratuitous class?
self.body.append(self.starttag(node, 'dl', CLASS='docutils'))
def depart_definition_list(self, node):
self.body.append('</dl>\n')
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.body.append(self.starttag(node, 'td', ''))
self.set_first_last(node)
def depart_description(self, node):
self.body.append('</td>')
def visit_docinfo(self, node):
# @@@ Evil use of table for layout. If only <dl compact> worked...
self.context.append(len(self.body))
self.body.append(self.starttag(node, 'table',
CLASS='docinfo',
frame="void", rules="none"))
self.body.append('<col class="docinfo-name">\n'
'<col class="docinfo-content">\n'
'<tbody valign="top">\n')
self.in_docinfo = True
def depart_docinfo(self, node):
self.body.append('</tbody>\n</table>\n')
self.in_docinfo = False
start = self.context.pop()
self.docinfo = self.body[start:]
self.body = []
def visit_docinfo_item(self, node, name, meta=True):
if meta:
meta_tag = '<meta name="%s" content="%s">\n' \
% (name, self.attval(node.astext()))
self.add_meta(meta_tag)
self.body.append(self.starttag(node, 'tr', ''))
self.body.append('<th class="docinfo-name">%s:</th>\n<td>'
% self.language.labels[name])
if len(node):
if isinstance(node[0], nodes.Element):
node[0]['classes'].append('first')
if isinstance(node[-1], nodes.Element):
node[-1]['classes'].append('last')
def depart_docinfo_item(self):
self.body.append('</td></tr>\n')
def visit_doctest_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='doctest-block'))
def depart_doctest_block(self, node):
self.body.append('\n</pre>\n')
def visit_document(self, node):
self.head.append('<title>%s</title>\n'
% self.encode(node.get('title', '')))
def depart_document(self, node):
self.head_prefix.extend([self.doctype,
self.head_prefix_template %
{'lang': self.settings.language_code}])
self.html_prolog.append(self.doctype)
self.meta.insert(0, self.charset_decl % self.settings.output_encoding)
self.head.insert(0, self.charset_decl % self.settings.output_encoding)
if self.math_header:
if self.math_output == 'mathjax':
self.head.extend(self.math_header)
else:
self.stylesheet.extend(self.math_header)
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.body_prefix.append(self.starttag(node, 'main'))
self.body_suffix.insert(0, '</main>\n')
self.fragment.extend(self.body) # self.fragment is the "naked" body
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
assert not self.context, 'len(context) = %s' % len(self.context)
def visit_emphasis(self, node):
self.body.append(self.starttag(node, 'em', ''))
def depart_emphasis(self, node):
self.body.append('</em>')
def visit_entry(self, node):
atts = {'class': []}
if isinstance(node.parent.parent, nodes.thead):
atts['class'].append('head')
if node.parent.parent.parent.stubs[node.parent.column]:
# "stubs" list is an attribute of the tgroup element
atts['class'].append('stub')
if atts['class']:
tagname = 'th'
atts['class'] = ' '.join(atts['class'])
else:
tagname = 'td'
del atts['class']
node.parent.column += 1
if 'morerows' in node:
atts['rowspan'] = node['morerows'] + 1
if 'morecols' in node:
atts['colspan'] = node['morecols'] + 1
node.parent.column += node['morecols']
self.body.append(self.starttag(node, tagname, '', **atts))
self.context.append('</%s>\n' % tagname.lower())
if len(node) == 0: # empty cell
self.body.append(' ')
self.set_first_last(node)
def depart_entry(self, node):
self.body.append(self.context.pop())
def visit_enumerated_list(self, node):
atts = {}
if 'start' in node:
# 'start' was restored to acceptability by HTML5 after
# cooler heads realized that the numbering of ordered
# lists is, in fact, part of the content. It must be an
# integer, though.
atts['start'] = str(int(node['start']))
if 'enumtype' in node:
atts['class'] = node['enumtype']
# @@@ To do: prefix, suffix. How? Change prefix/suffix to a
# single "format" attribute? Use CSS2?
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = (atts.get('class', '') + ' simple').strip()
self.body.append(self.starttag(node, 'ol', **atts))
def depart_enumerated_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ol>\n')
def visit_field(self, node):
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def depart_field(self, node):
self.body.append('</tr>\n')
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'td', '', CLASS='field-body'))
self.set_class_on_child(node, 'first', 0)
field = node.parent
if (self.compact_field_list or
isinstance(field.parent, nodes.docinfo) or
field.parent.index(field) == len(field.parent) - 1):
# If we are in a compact list, the docinfo, or if this is
# the last field of the field list, do not add vertical
# space after last element.
self.set_class_on_child(node, 'last', -1)
def depart_field_body(self, node):
self.body.append('</td>\n')
def visit_field_list(self, node):
# @@@ Evil use of table for layout. If only <dl compact> worked...
self.context.append((self.compact_field_list, self.compact_p))
self.compact_p = None
if 'compact' in node['classes']:
self.compact_field_list = True
elif (self.settings.compact_field_lists
and 'open' not in node['classes']):
self.compact_field_list = True
if self.compact_field_list:
for field in node:
field_body = field[-1]
assert isinstance(field_body, nodes.field_body)
children = [n for n in field_body
if not isinstance(n, nodes.Invisible)]
if not (len(children) == 0 or
len(children) == 1 and
isinstance(children[0],
(nodes.paragraph, nodes.line_block))):
self.compact_field_list = False
break
self.body.append(self.starttag(node, 'table', frame='void',
rules='none',
CLASS='docutils field-list'))
self.body.append('<col class="field-name">\n'
'<col class="field-body">\n'
'<tbody valign="top">\n')
def depart_field_list(self, node):
self.body.append('</tbody>\n</table>\n')
self.compact_field_list, self.compact_p = self.context.pop()
def visit_field_name(self, node):
atts = {}
if self.in_docinfo:
atts['class'] = 'docinfo-name'
else:
atts['class'] = 'field-name'
if ( self.settings.field_name_limit
and len(node.astext()) > self.settings.field_name_limit):
atts['colspan'] = 2
self.context.append('</tr>\n'
+ self.starttag(node.parent, 'tr', '',
CLASS='field')
+ '<td> </td>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'th', '', **atts))
def depart_field_name(self, node):
self.body.append(':</th>')
self.body.append(self.context.pop())
def visit_figure(self, node):
if node.get('width'):
atts['style'] = 'width: %s' % node['width']
if node.get('align'):
atts['class'] += " align-" + node['align']
self.body.append(self.starttag(node, 'figure'))
def depart_figure(self, node):
self.body.append('</figure>\n')
def visit_footer(self, node):
self.context.append(len(self.body))
def depart_footer(self, node):
start = self.context.pop()
footer = [self.starttag(node, 'footer')]
footer.extend(self.body[start:])
footer.append('\n</footer>\n')
self.footer.extend(footer)
self.body_suffix[:0] = footer
del self.body[start:]
def visit_footnote(self, node):
# @@@ Evil use of table for layout. Not clear why in this instance.
self.body.append(self.starttag(node, 'table',
CLASS='docutils footnote',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label"><col></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def footnote_backrefs(self, node):
backlinks = []
backrefs = node['backrefs']
if self.settings.footnote_backlinks and backrefs:
if len(backrefs) == 1:
self.context.append('')
self.context.append('</a>')
self.context.append('<a class="fn-backref" href="#%s">'
% backrefs[0])
else:
i = 1
for backref in backrefs:
backlinks.append('<a class="fn-backref" href="#%s">%s</a>'
% (backref, i))
i += 1
self.context.append('<em>(%s)</em> ' % ', '.join(backlinks))
self.context += ['', '']
else:
self.context.append('')
self.context += ['', '']
# If the node does not only consist of a label.
if len(node) > 1:
# If there are preceding backlinks, we do not set class
# 'first', because we need to retain the top-margin.
if not backlinks:
node[1]['classes'].append('first')
node[-1]['classes'].append('last')
def depart_footnote(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_footnote_reference(self, node):
href = '#' + node['refid']
format = self.settings.footnote_references
if format == 'brackets':
suffix = '['
self.context.append(']')
else:
assert format == 'superscript'
suffix = '<sup>'
self.context.append('</sup>')
self.body.append(self.starttag(node, 'a', suffix,
CLASS='footnote-reference', href=href))
def depart_footnote_reference(self, node):
self.body.append(self.context.pop() + '</a>')
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.context.append(len(self.body))
def depart_header(self, node):
start = self.context.pop()
header = [self.starttag(node, 'header')]
header.extend(self.body[start:])
header.append('\n</header>\n')
self.body_prefix.extend(header)
self.header.extend(header)
del self.body[start:]
def visit_image(self, node):
atts = {}
uri = node['uri']
# SVG works in <img> now
# place SWF images in an <object> element
types = {'.swf': 'application/x-shockwave-flash'}
ext = os.path.splitext(uri)[1].lower()
if ext == '.swf':
atts['data'] = uri
atts['type'] = | |
<filename>scripts/cat_rules.py
from difflib import SequenceMatcher
from string import punctuation
import spacy.parts_of_speech as spos
# Contractions
#conts = {"'d", "'ll", "'m", "n't", "'re", "'s", "'ve"}
# Rare POS tags that make uninformative error categories
rare_tags = {"INTJ", "NUM", "SYM", "X"}
# Special auxiliaries in contractions.
#special_aux1 = ({"ca", "can"}, {"sha", "shall"}, {"wo", "will"})
#special_aux2 = {"ca", "sha", "wo"}
# Open class spacy POS tag objects
open_pos = (spos.ADJ, spos.ADV, spos.NOUN, spos.VERB)
# Open class POS tags
open_tags = {"ADJ", "ADV", "NOUN", "VERB"}
# Some dep labels that map to pos tags.
dep_map = { "acomp": "ADJ",
"amod": "ADJ",
"advmod": "ADV",
"det": "DET",
"prep": "PREP",
"prt": "PART",
"punct": "PUNCT" }
# Input 1: An edit list. [orig_start, orig_end, cat, cor, cor_start, cor_end]
# Input 2: An original SpaCy sentence.
# Input 3: A corrected SpaCy sentence.
# Input 4: A set of valid GB English words.
# Input 5: A dictionary to map PTB tags to Stanford Universal Dependency tags.
# Input 6: A preloaded spacy processing object.
# Input 7: The Lancaster stemmer in NLTK.
# Output: The input edit with new error tag, in M2 edit format.
def autoTypeEdit(edit, orig_sent, cor_sent, gb_spell, tag_map, nlp, stemmer):
# Get the tokens in the edit.
orig_toks = orig_sent[edit[0]:edit[1]]
cor_toks = cor_sent[edit[4]:edit[5]]
# Nothing to nothing is a detected, but not corrected edit.
if not orig_toks and not cor_toks:
return "UNK"
# Missing
elif not orig_toks and cor_toks:
op = "M:"
cat = getOneSidedType(cor_toks, tag_map)
# Unnecessary
elif orig_toks and not cor_toks:
op = "U:"
cat = getOneSidedType(orig_toks, tag_map)
# Replacement and special cases
else:
# Same to same is a detected, but not corrected edit.
if orig_toks.text == cor_toks.text:
return "UNK"
# Special: Orthographic errors at the end of multi-token edits are ignored.
# E.g. [Doctor -> The doctor], [The doctor -> Dcotor], [, since -> . Since]
# Classify the edit as if the last token weren't there.
elif orig_toks[-1].lower_ == cor_toks[-1].lower_ and \
(len(orig_toks) > 1 or len(cor_toks) > 1):
min_edit = edit[:]
min_edit[1] -= 1
min_edit[5] -= 1
return autoTypeEdit(min_edit, orig_sent, cor_sent, gb_spell, tag_map, nlp, stemmer)
# Replacement
else:
op = "R:"
cat = getTwoSidedType(orig_toks, cor_toks, gb_spell, tag_map, nlp, stemmer)
return op+cat
# Input 1: Spacy tokens
# Input 2: A map dict from PTB to universal dependency pos tags.
# Output: A list of token, pos and dep tag strings.
def getEditInfo(toks, tag_map):
str = []
pos = []
dep = []
for tok in toks:
str.append(tok.text)
tag = tok.tag_.split('__')[0]
if tag == 'PROPN':
tag = 'NOUN'
pos.append(tag)
dep.append(tok.dep_)
return str, pos, dep
# Input 1: Spacy tokens.
# Input 2: A map dict from PTB to universal dependency pos tags.
# Output: An error type string.
# When one side of the edit is null, we can only use the other side.
def getOneSidedType(toks, tag_map):
# Extract strings, pos tags and parse info from the toks.
str_list, pos_list, dep_list = getEditInfo(toks, tag_map)
# Special cases.
if len(toks) == 1:
# Possessive noun suffixes; e.g. ' -> 's
if toks[0].tag_ == "POS":
return "NOUN:POSS"
# Contraction. Rule must come after possessive.
# if toks[0].lower_ in conts:
# return "CONTR"
# Infinitival "to" is treated as part of a verb form.
if toks[0].lower_ == "to" and toks[0].pos_ == "PART" and toks[0].dep_ != "prep":
return "VERB:FORM"
# Auxiliary verbs.
if set(dep_list).issubset({"aux", "auxpass"}):
return "VERB:TENSE"
# POS-based tags. Ignores rare, uninformative categories.
if len(set(pos_list)) == 1 and pos_list[0] not in rare_tags:
return pos_list[0]
# More POS-based tags using special dependency labels.
if len(set(dep_list)) == 1 and dep_list[0] in dep_map.keys():
return dep_map[dep_list[0]]
# To-infinitives and phrasal verbs.
if set(pos_list) == {"PART", "VERB"}:
return "VERB"
# Tricky cases
else:
return "OTHER"
# Input 1: Original text spacy tokens.
# Input 2: Corrected text spacy tokens.
# Input 3: A set of valid GB English words.
# Input 4: A map from PTB to universal dependency pos tags.
# Input 5: A preloaded spacy processing object.
# Input 6: The Lancaster stemmer in NLTK.
# Output: An error type string.
def getTwoSidedType(orig_toks, cor_toks, gb_spell, tag_map, nlp, stemmer):
# Extract strings, pos tags and parse info from the toks.
orig_str, orig_pos, orig_dep = getEditInfo(orig_toks, tag_map)
cor_str, cor_pos, cor_dep = getEditInfo(cor_toks, tag_map)
# Orthography; i.e. whitespace and/or case errors.
if onlyOrthChange(orig_str, cor_str):
return "ORTH"
# Word Order; only matches exact reordering.
if exactReordering(orig_str, cor_str):
return "WO"
# 1:1 replacements (very common)
if len(orig_str) == len(cor_str) == 1:
# 1. SPECIAL CASES
# Possessive noun suffixes; e.g. ' -> 's
# if orig_toks[0].tag_ == "POS" or cor_toks[0].tag_ == "POS":
# return "NOUN:POSS"
# Contraction. Rule must come after possessive.
# if (orig_str[0].lower() in conts or cor_str[0].lower() in conts) and orig_pos == cor_pos:
# return "CONTR"
# Special auxiliaries in contractions (1); e.g. ca -> can
# if set(orig_str[0].lower()+cor_str[0].lower()) in special_aux1:
# return "CONTR"
# Special auxiliaries in contractions (2); e.g. ca -> could
# if orig_str[0].lower() in special_aux2 or cor_str[0].lower() in special_aux2:
# return "VERB:TENSE"
# Special: "was" and "were" are the only past tense SVA.
# if {orig_str[0].lower(), cor_str[0].lower()} == {"was", "were"}:
# return "VERB:SVA"
# 2. SPELLING AND INFLECTION
# Only check alphabetical strings on the original side.
# Spelling errors take precendece over POS errors so this rule is ordered.
if orig_str[0].isalpha():
# Check a GB English dict for both orig and lower case.
# "cat" is in the dict, but "Cat" is not.
if orig_str[0] not in gb_spell and orig_str[0].lower() not in gb_spell:
# Check if both sides have a common lemma
if sameLemma(orig_toks[0], cor_toks[0], nlp):
# Inflection; Usually count vs mass nouns or e.g. got vs getted
if orig_pos == cor_pos and orig_pos[0] in {"NOUN", "VERB"}:
return orig_pos[0]+":INFL"
# Unknown morphology; i.e. we cannot be more specific.
else:
return "MORPH"
# Use string similarity to detect true spelling errors.
else:
char_ratio = SequenceMatcher(None, orig_str[0], cor_str[0]).ratio()
# Ratio > 0.5 means both side share at least half the same chars.
# WARNING: THIS IS AN APPROXIMATION.
if char_ratio > 0.5:
return "SPELL"
# If ratio is <= 0.5, this may be a spelling+other error; e.g. tolk -> say
else:
# If POS is the same, this takes precedence over spelling.
if orig_pos == cor_pos and orig_pos[0] not in rare_tags:
return orig_pos[0]
# Tricky cases.
else:
return "OTHER"
# 3. MORPHOLOGY
# Only ADJ, ADV, NOUN and VERB with same lemma can have inflectional changes.
if sameLemma(orig_toks[0], cor_toks[0], nlp) and \
orig_pos[0] in open_tags and cor_pos[0] in open_tags:
# Same POS on both sides
if orig_pos == cor_pos:
# Adjective form; e.g. comparatives
if orig_pos[0] == "ADJ":
return "ADJ:FORM"
# Noun number
if orig_pos[0] == "NOUN":
# return "NOUN:NUM"
return "NOUN:FORM"
# Verbs - various types
if orig_pos[0] == "VERB":
# NOTE: These rules are carefully ordered.
# Use the dep parse to find some form errors.
# Main verbs preceded by aux cannot be tense or SVA.
if precededByAux(orig_toks, cor_toks):
return "VERB:FORM"
# Use fine PTB tags to find various errors.
# FORM errors normally involve VBG or VBN.
if orig_toks[0].tag_ in {"VBG", "VBN"} or cor_toks[0].tag_ in {"VBG", "VBN"}:
return "VERB:FORM"
# Of what's left, TENSE errors normally involved VBD.
if orig_toks[0].tag_ == "VBD" or cor_toks[0].tag_ == "VBD":
return "VERB:TENSE"
# Of what's left, SVA errors normally involve VBZ.
if orig_toks[0].tag_ == "VBZ" or cor_toks[0].tag_ == "VBZ":
return "VERB:SVA"
# Any remaining aux verbs are called TENSE.
if orig_dep[0].startswith("aux") and cor_dep[0].startswith("aux"):
return "VERB:TENSE"
# Use dep labels to find some more ADJ:FORM
if set(orig_dep+cor_dep).issubset({"acomp", "amod"}):
return "ADJ:FORM"
# Adj to plural noun is usually a noun number error; e.g. musical -> musicals.
if orig_pos[0] == "ADJ" and cor_toks[0].tag_ == "NNS":
return "NOUN:NUM"
# For remaining verb errors (rare), rely on cor_pos
if cor_toks[0].tag_ in {"VBG", "VBN"}:
return "VERB:FORM"
# Cor VBD = TENSE
if cor_toks[0].tag_ == "VBD":
return "VERB:TENSE"
# Cor VBZ = SVA
if cor_toks[0].tag_ == "VBZ":
return "VERB:SVA"
# Tricky cases that all have the same lemma.
else:
return "MORPH"
# Derivational morphology.
if stemmer.stem(orig_str[0]) == stemmer.stem(cor_str[0]) and \
orig_pos[0] in open_tags and cor_pos[0] in open_tags:
return "MORPH"
# 4. GENERAL
# Auxiliaries with different lemmas
if orig_dep[0].startswith("aux") and cor_dep[0].startswith("aux"):
return "VERB:TENSE"
# POS-based tags. Some of these are context sensitive mispellings.
if orig_pos == cor_pos and orig_pos[0] not in rare_tags:
return orig_pos[0]
# Some dep labels map to POS-based tags.
if orig_dep == cor_dep and orig_dep[0] in dep_map.keys():
return dep_map[orig_dep[0]]
# Phrasal verb particles.
if set(orig_pos+cor_pos) == {"PART", "PREP"} or set(orig_dep+cor_dep) == {"prt", "prep"}:
return "PART"
# Can use dep labels to resolve DET + PRON combinations.
if set(orig_pos+cor_pos) == {"DET", "PRON"}:
# DET cannot be a subject or object.
if cor_dep[0] in {"nsubj", "nsubjpass", "dobj", "pobj"}:
return "PRON"
# "poss" indicates possessive determiner
if cor_dep[0] == "poss":
return "DET"
# Tricky cases.
else:
return "OTHER"
# Multi-token replacements (uncommon)
# All auxiliaries
if set(orig_dep+cor_dep).issubset({"aux", "auxpass"}):
return "VERB:TENSE"
# All same POS
if len(set(orig_pos+cor_pos)) == 1:
# Final verbs with the same lemma are tense; e.g. eat -> has eaten
if orig_pos[0] == "VERB" and sameLemma(orig_toks[-1], cor_toks[-1], nlp):
return "VERB:TENSE"
# POS-based tags.
elif orig_pos[0] not in rare_tags:
return orig_pos[0]
# All same special dep labels.
if len(set(orig_dep+cor_dep)) == 1 and orig_dep[0] in dep_map.keys():
return dep_map[orig_dep[0]]
# Infinitives, gerunds, phrasal verbs.
if set(orig_pos+cor_pos) == {"PART", "VERB"}:
# Final verbs with the same lemma are form; e.g. to eat -> eating
if sameLemma(orig_toks[-1], cor_toks[-1], nlp):
return "VERB:FORM"
# Remaining edits are often verb; e.g. to eat -> consuming, look at -> see
else:
return "VERB"
# Possessive nouns; | |
<reponame>byron123t/faceoff
## lp_attack.py -- attack a network optimizing for l_2 distance
##
## Copyright (C) 2016, <NAME> <<EMAIL>>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the LICENCE file in this directory.
import math
import tensorflow as tf
import numpy as np
from backend import Config
BINARY_SEARCH_STEPS = 9 # number of times to adjust the constant with binary search
MAX_ITERATIONS = 10000 # number of iterations to perform gradient descent
ABORT_EARLY = False # if we stop improving, abort gradient descent early
LEARNING_RATE = 1e-2 # larger values converge faster to less accurate results
TARGET_FLAG = False # should we target one specific class? or just be wrong?
CONFIDENCE = 0 # how strong the adversarial example should be
INITIAL_CONST = 1e-2 # the initial constant c to pick as a first guess
MARGIN = 0
TV_FLAG = False
LARGE = 1e10
#needed for l_inf attack
LARGEST_CONST = 2e+1 # the largest value of c to go up to before giving up
REDUCE_CONST = True # try to lower c each iteration; faster to set to false
CONST_FACTOR = 4.0 # f>1, rate at which we increase constant, smaller better
DECREASE_FACTOR = 0.5 # 0<f<1, rate at which we shrink tau; larger is more accurate
class CW:
def __init__(self,
sess,
model,
params,
num_base = 1,
num_src = 1,
num_target = 1,
confidence = CONFIDENCE,
margin = MARGIN,
abort_early = ABORT_EARLY,
hinge_loss = True,
largest_const = LARGEST_CONST,
reduce_const = REDUCE_CONST,
decrease_factor = DECREASE_FACTOR,
const_factor = CONST_FACTOR):
"""
This attack is the most efficient and should be used as the primary
attack to evaluate potential defenses.
Returns adversarial examples for the supplied model.
confidence: Confidence of adversarial examples: higher produces examples
that are farther away, but more strongly classified as adversarial.
batch_size: Number of attacks to run simultaneously.
targeted: True if we should perform a targetted attack, False otherwise.
learning_rate: The learning rate for the attack algorithm. Smaller values
produce better results but are slower to converge.
binary_search_steps: The number of times we perform binary search to
find the optimal tradeoff-constant between distance and confidence.
max_iterations: The maximum number of iterations. Larger values are more
accurate; setting too small will require a large learning rate and will
produce poor results.
abort_early: If true, allows early aborts if gradient descent gets stuck.
initial_const: The initial tradeoff-constant to use to tune the relative
importance of distance and confidence. If binary_search_steps is large,
the initial constant is not important.
boxmin: Minimum pixel value (default -0.5).
boxmax: Maximum pixel value (default 0.5).
"""
#above: missing several parameter descriptions
image_height, image_width, num_channels = model.image_height, model.image_width, model.num_channels
self.sess = sess
self.model = model
self.model_type = params['model_type']
self.loss_type = params['loss_type']
self.TARGET_FLAG = params['targeted_flag']
self.LEARNING_RATE = params['learning_rate']
self.MAX_ITERATIONS = params['iterations']
#params['iterations']
self.BINARY_SEARCH_STEPS = params['binary_steps']
self.ABORT_EARLY = abort_early
self.CONFIDENCE = confidence
self.MARGIN = margin
if params['batch_size'] <= 0:
self.batch_size = num_base
else:
self.batch_size = min(params['batch_size'], num_base)
self.num_target = num_target
self.num_src = num_src
self.is_hinge_loss = params['hinge_flag']
self.p_norm = params['norm']
if self.p_norm != '2':
self.batch_size = 1
self.INITIAL_CONST = [params['init_const']] * self.batch_size
self.TV_FLAG = params['tv_flag']
self.COS_FLAG = params['cos_flag']
self.LOSS_IMPL = params['mean_loss']
self.boxmin = params['pixel_min']
self.boxmax = params['pixel_max']
#needed for l_inf attack
self.LARGEST_CONST = largest_const
self.DECREASE_FACTOR = decrease_factor
self.REDUCE_CONST = reduce_const
self.const_factor = const_factor
self.repeat = self.BINARY_SEARCH_STEPS >= 10
print('Batch size: {}'.format(self.batch_size))
print('Margin: {}'.format(self.MARGIN))
if self.model_type == 'large':
shape = (self.batch_size, image_height, image_width, num_channels)
target_db_shape = (num_target, image_height, image_width, num_channels)
self_db_shape = (num_src, image_height, image_width, num_channels)
else:
shape = (self.batch_size, num_channels, image_width, image_height)
target_db_shape = (num_target, num_channels, image_width, image_height)
self_db_shape = (num_src, num_channels, image_width, image_height)
print("shape:", shape)
modifier = tf.Variable(tf.random_uniform(shape, minval=-0.1, maxval=0.1, dtype=tf.float32))
self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.const = tf.Variable(np.zeros(self.batch_size), dtype=tf.float32)
self.tau = tf.Variable(np.zeros(1), dtype=tf.float32)
self.assign_timg = tf.placeholder(tf.float32, shape)
self.assign_const = tf.placeholder(tf.float32, [self.batch_size])
self.assign_tau = tf.placeholder(tf.float32, [1])
self.targetdb = tf.Variable(np.zeros(target_db_shape), dtype=tf.float32)
self.selfdb = tf.Variable(np.zeros(self_db_shape), dtype=tf.float32)
self.assign_targetdb = tf.placeholder(tf.float32, target_db_shape)
self.assign_selfdb = tf.placeholder(tf.float32, self_db_shape)
# what are the 2 variables below?
self.boxmul = (self.boxmax - self.boxmin) / 2.
self.boxplus = (self.boxmin + self.boxmax) / 2.
# this condition is different from carlini's original implementation
self.newimg = tf.tanh(modifier + self.timg)
self.newimg = self.newimg * self.boxmul + self.boxplus
self.targetdb_bounded = tf.tanh(self.targetdb)
self.targetdb_bounded = self.targetdb_bounded * self.boxmul + self.boxplus
self.selfdb_bounded = tf.tanh(self.selfdb)
self.selfdb_bounded = self.selfdb_bounded * self.boxmul + self.boxplus
self.outputNew = model.predict(self.newimg)
# self.outputOld = model.predict(self.timg)
self.outputTarg = model.predict(self.targetdb_bounded)
self.outputSelf = model.predict(self.selfdb_bounded)
if self.LOSS_IMPL == 'embeddingmean':
if self.p_norm == '2':
self.lpdist = tf.sqrt(tf.reduce_sum(tf.square(self.newimg - (tf.tanh(self.timg) * self.boxmul + self.boxplus)), [1,2,3]))
else: #check this line below
self.lpdist = tf.reduce_sum(tf.maximum(0.0, tf.abs(self.newimg - (tf.tanh(self.timg) * self.boxmul + self.boxplus)) - self.tau))
else: #chuhan's loss function
if self.p_norm == '2':
self.lpdist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul + self.boxplus)),[1,2,3])
else:
self.lpdist = tf.reduce_sum(tf.maximum(0.0, tf.abs(self.newimg - (tf.tanh(self.timg) * self.boxmul + self.boxplus)) - self.tau))
self.modifier_bounded = self.newimg-(tf.tanh(self.timg) * self.boxmul + self.boxplus)
self.outputTargMean = tf.reduce_mean(self.outputTarg, axis=0)
self.outputSelfMean = tf.reduce_mean(self.outputSelf, axis=0)
def ZERO():
return np.asarray(0., dtype=np.dtype('float32'))
self.target_loss = tf.sqrt(tf.reduce_sum(tf.square(self.outputNew - self.outputTargMean), [1]))
self.src_loss = tf.sqrt(tf.reduce_sum(tf.square(self.outputNew - self.outputSelfMean), [1]))
# self.orig_loss = tf.sqrt(tf.reduce_sum(tf.square(self.outputOld - self.outputSelfMean), [1]))
if self.COS_FLAG:
self.cosTargMean = tf.multiply(self.outputTargMean, np.ones(self.outputNew.shape))
self.cosSelfMean = tf.multiply(self.outputSelfMean, np.ones(self.outputNew.shape))
dot_targ = tf.reduce_sum(tf.multiply(self.outputNew, self.cosTargMean), [1])
dot_src = tf.reduce_sum(tf.multiply(self.outputNew, self.cosSelfMean), [1])
norm_targ = tf.sqrt(tf.reduce_sum(tf.square(self.outputNew), [1])) * tf.sqrt(tf.reduce_sum(tf.square(self.cosTargMean), [1]))
norm_src = tf.sqrt(tf.reduce_sum(tf.square(self.outputNew), [1])) * tf.sqrt(tf.reduce_sum(tf.square(self.cosSelfMean), [1]))
self.target_loss_cos = tf.acos(dot_targ / norm_targ) / math.pi * 180
self.src_loss_cos = tf.acos(dot_src / norm_src) / math.pi * 180
if self.TARGET_FLAG:
if self.is_hinge_loss:
self.hinge_loss_cos = self.target_loss_cos - self.src_loss_cos + (self.CONFIDENCE * 6)
self.hinge_loss_cos = tf.maximum(self.hinge_loss_cos, ZERO())
self.loss4 = self.hinge_loss_cos
else:
self.loss4 = self.target_loss_cos
else:
self.loss4 = 0
if self.LOSS_IMPL == 'embeddingmean':
if self.TARGET_FLAG:
if self.is_hinge_loss:
self.hinge_loss = self.target_loss - self.src_loss + self.CONFIDENCE
self.hinge_loss = tf.maximum(self.hinge_loss, ZERO())
self.loss1 = self.hinge_loss
else:
self.loss1 = self.target_loss
else:
# self.loss1 = self.orig_loss - self.src_loss + self.CONFIDENCE
self.loss1 = tf.maximum(self.loss1, ZERO())
else: #chuhan's implementation
if self.TARGET_FLAG:
self.target_loss = tf.reduce_mean(tf.reduce_sum(tf.square(self.outputNew - self.outputTarg),1), axis=0)
self.src_loss = tf.reduce_mean(tf.reduce_sum(tf.square(self.outputNew - self.outputSelf),1), axis=0)
if self.is_hinge_loss:
self.hinge_loss = self.target_loss - self.src_loss + self.CONFIDENCE
self.hinge_loss = tf.maximum(self.hinge_loss, ZERO())
self.loss1 = self.hinge_loss
else:
self.loss1 = -tf.reduce_sum(tf.square(self.outputNew - self.outputTarg),1)
#add condition to check if smoothing term is needed/not
self.loss1 = tf.reduce_sum(self.const * self.loss1)
self.loss2 = tf.reduce_sum(self.lpdist)
self.loss4 = tf.reduce_sum(self.const * self.loss4)
self.loss = self.loss1 + self.loss2 + self.loss4
start_vars = set(x.name for x in tf.global_variables())
optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE)
self.train = optimizer.minimize(self.loss, var_list=[modifier])
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if x.name not in start_vars]
self.setup = []
self.setup.append(self.timg.assign(self.assign_timg))
self.setup.append(self.const.assign(self.assign_const))
self.setup.append(self.targetdb.assign(self.assign_targetdb))
self.setup.append(self.selfdb.assign(self.assign_selfdb))
self.setup.append(self.tau.assign(self.assign_tau))
self.init = tf.variables_initializer(var_list=[modifier]+new_vars)
def attack(self,
imgs,
target_imgs,
src_imgs,
params):
"""
Perform the L_2 attack on the given images for the given targets.
If self.targeted is true, then the targets represents the target labels.
If self.targeted is false, then targets are the original class labels.
"""
lp_list = []
const_list = []
adv_list = []
delta_list = []
batch_size = self.batch_size
for i in range(0,len(imgs),batch_size):
cur_batch = len(imgs) - i
if self.batch_size > cur_batch:
self.__init__(sess=self.sess,
model=self.model,
params=params,
num_base=cur_batch,
num_src=self.num_src,
num_target=self.num_target,
confidence=self.MARGIN,
margin=self.MARGIN)
lp, const, adv, delta = self.attack_batch_l2(imgs[i:i+self.batch_size], target_imgs, src_imgs)
print(len(lp), len(const), len(adv), len(delta))
lp_list.extend(lp)
const_list.extend(const)
adv_list.extend(adv)
delta_list.extend(delta)
r = np.squeeze(np.array([(lp_list, const_list, adv_list, delta_list)]))
return r
def attack_batch_l2(self,
imgs,
target_imgs,
src_imgs):
"""
Run the attack on a batch of images and labels.
"""
batch_size = self.batch_size
imgs = np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999)
face_stack_target = np.arctanh((target_imgs - self.boxplus) / self.boxmul * 0.999999)
face_stack_self = np.arctanh((src_imgs - self.boxplus) / self.boxmul * 0.999999)
CONST = np.ones(batch_size)*self.INITIAL_CONST
const_high = [1e3] * batch_size
const_low = [0.0] * batch_size
best_lp = [9999.0] * batch_size
best_adv = [None] * batch_size
best_delta = [None] * batch_size
best_const = [None] * batch_size
for outer_step in range(self.BINARY_SEARCH_STEPS):
self.sess.run(self.init)
best_loss_inner = [1e10] * batch_size
best_adv_inner = [None] * batch_size
best_delta_inner = [None] * batch_size
best_dist_src = [None] * batch_size
best_dist_target = [None] * batch_size
# The last iteration (if we run many steps) repeat the search once.
| |
(cross and self.cross_info.has_host() and self.cross_info.config['host_machine']['system'] == 'windows'):
self.exe_suffix = 'exe'
self.object_suffix = 'obj'
self.win_libdir_layout = True
elif (not cross and mesonlib.is_cygwin()) \
or (cross and self.cross_info.has_host() and self.cross_info.config['host_machine']['system'] == 'cygwin'):
self.exe_suffix = 'exe'
self.object_suffix = 'o'
self.win_libdir_layout = True
else:
self.exe_suffix = ''
self.object_suffix = 'o'
self.win_libdir_layout = False
if 'STRIP' in os.environ:
self.native_strip_bin = shlex.split(os.environ['STRIP'])
else:
self.native_strip_bin = ['strip']
def is_cross_build(self):
return self.cross_info is not None
def dump_coredata(self):
return coredata.save(self.coredata, self.get_build_dir())
def get_script_dir(self):
import mesonbuild.scripts
return os.path.dirname(mesonbuild.scripts.__file__)
def get_log_dir(self):
return self.log_dir
def get_coredata(self):
return self.coredata
def get_build_command(self, unbuffered=False):
cmd = mesonlib.meson_command[:]
if unbuffered and 'python' in cmd[0]:
cmd.insert(1, '-u')
return cmd
def is_header(self, fname):
return is_header(fname)
def is_source(self, fname):
return is_source(fname)
def is_assembly(self, fname):
return is_assembly(fname)
def is_llvm_ir(self, fname):
return is_llvm_ir(fname)
def is_object(self, fname):
return is_object(fname)
def is_library(self, fname):
return is_library(fname)
@staticmethod
def get_gnu_compiler_defines(compiler):
"""
Detect GNU compiler platform type (Apple, MinGW, Unix)
"""
# Arguments to output compiler pre-processor defines to stdout
# gcc, g++, and gfortran all support these arguments
args = compiler + ['-E', '-dM', '-']
p, output, error = Popen_safe(args, write='', stdin=subprocess.PIPE)
if p.returncode != 0:
raise EnvironmentException('Unable to detect GNU compiler type:\n' + output + error)
# Parse several lines of the type:
# `#define ___SOME_DEF some_value`
# and extract `___SOME_DEF`
defines = {}
for line in output.split('\n'):
if not line:
continue
d, *rest = line.split(' ', 2)
if d != '#define':
continue
if len(rest) == 1:
defines[rest] = True
if len(rest) == 2:
defines[rest[0]] = rest[1]
return defines
@staticmethod
def get_gnu_version_from_defines(defines):
dot = '.'
major = defines.get('__GNUC__', '0')
minor = defines.get('__GNUC_MINOR__', '0')
patch = defines.get('__GNUC_PATCHLEVEL__', '0')
return dot.join((major, minor, patch))
@staticmethod
def get_lcc_version_from_defines(defines):
dot = '.'
generation_and_major = defines.get('__LCC__', '100')
generation = generation_and_major[:1]
major = generation_and_major[1:]
minor = defines.get('__LCC_MINOR__', '0')
return dot.join((generation, major, minor))
@staticmethod
def get_gnu_compiler_type(defines):
# Detect GCC type (Apple, MinGW, Cygwin, Unix)
if '__APPLE__' in defines:
return GCC_OSX
elif '__MINGW32__' in defines or '__MINGW64__' in defines:
return GCC_MINGW
elif '__CYGWIN__' in defines:
return GCC_CYGWIN
return GCC_STANDARD
def warn_about_lang_pointing_to_cross(self, compiler_exe, evar):
evar_str = os.environ.get(evar, 'WHO_WOULD_CALL_THEIR_COMPILER_WITH_THIS_NAME')
if evar_str == compiler_exe:
mlog.warning('''Env var %s seems to point to the cross compiler.
This is probably wrong, it should always point to the native compiler.''' % evar)
def _get_compilers(self, lang, evar, want_cross):
'''
The list of compilers is detected in the exact same way for
C, C++, ObjC, ObjC++, Fortran, CS so consolidate it here.
'''
if self.is_cross_build() and want_cross:
compilers = mesonlib.stringlistify(self.cross_info.config['binaries'][lang])
# Ensure ccache exists and remove it if it doesn't
if compilers[0] == 'ccache':
compilers = compilers[1:]
ccache = self.detect_ccache()
else:
ccache = []
self.warn_about_lang_pointing_to_cross(compilers[0], evar)
# Return value has to be a list of compiler 'choices'
compilers = [compilers]
is_cross = True
if self.cross_info.need_exe_wrapper():
exe_wrap = self.cross_info.config['binaries'].get('exe_wrapper', None)
else:
exe_wrap = []
elif evar in os.environ:
compilers = shlex.split(os.environ[evar])
# Ensure ccache exists and remove it if it doesn't
if compilers[0] == 'ccache':
compilers = compilers[1:]
ccache = self.detect_ccache()
else:
ccache = []
# Return value has to be a list of compiler 'choices'
compilers = [compilers]
is_cross = False
exe_wrap = None
else:
compilers = getattr(self, 'default_' + lang)
ccache = self.detect_ccache()
is_cross = False
exe_wrap = None
return compilers, ccache, is_cross, exe_wrap
def _handle_exceptions(self, exceptions, binaries, bintype='compiler'):
errmsg = 'Unknown {}(s): {}'.format(bintype, binaries)
if exceptions:
errmsg += '\nThe follow exceptions were encountered:'
for (c, e) in exceptions.items():
errmsg += '\nRunning "{0}" gave "{1}"'.format(c, e)
raise EnvironmentException(errmsg)
def _detect_c_or_cpp_compiler(self, lang, evar, want_cross):
popen_exceptions = {}
compilers, ccache, is_cross, exe_wrap = self._get_compilers(lang, evar, want_cross)
for compiler in compilers:
if isinstance(compiler, str):
compiler = [compiler]
if 'cl' in compiler or 'cl.exe' in compiler:
# Watcom C provides it's own cl.exe clone that mimics an older
# version of Microsoft's compiler. Since Watcom's cl.exe is
# just a wrapper, we skip using it if we detect its presence
# so as not to confuse Meson when configuring for MSVC.
#
# Additionally the help text of Watcom's cl.exe is paged, and
# the binary will not exit without human intervention. In
# practice, Meson will block waiting for Watcom's cl.exe to
# exit, which requires user input and thus will never exit.
if 'WATCOM' in os.environ:
def sanitize(p):
return os.path.normcase(os.path.abspath(p))
watcom_cls = [sanitize(os.path.join(os.environ['WATCOM'], 'BINNT', 'cl')),
sanitize(os.path.join(os.environ['WATCOM'], 'BINNT', 'cl.exe'))]
found_cl = sanitize(shutil.which('cl'))
if found_cl in watcom_cls:
continue
arg = '/?'
elif 'armcc' in compiler[0]:
arg = '--vsn'
else:
arg = '--version'
try:
p, out, err = Popen_safe(compiler + [arg])
except OSError as e:
popen_exceptions[' '.join(compiler + [arg])] = e
continue
version = search_version(out)
full_version = out.split('\n', 1)[0]
guess_gcc_or_lcc = False
if 'Free Software Foundation' in out:
guess_gcc_or_lcc = 'gcc'
if 'e2k' in out and 'lcc' in out:
guess_gcc_or_lcc = 'lcc'
if guess_gcc_or_lcc:
defines = self.get_gnu_compiler_defines(compiler)
if not defines:
popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'
continue
gtype = self.get_gnu_compiler_type(defines)
if guess_gcc_or_lcc == 'lcc':
version = self.get_lcc_version_from_defines(defines)
cls = ElbrusCCompiler if lang == 'c' else ElbrusCPPCompiler
else:
version = self.get_gnu_version_from_defines(defines)
cls = GnuCCompiler if lang == 'c' else GnuCPPCompiler
return cls(ccache + compiler, version, gtype, is_cross, exe_wrap, defines, full_version=full_version)
if 'armclang' in out:
# The compiler version is not present in the first line of output,
# instead it is present in second line, startswith 'Component:'.
# So, searching for the 'Component' in out although we know it is
# present in second line, as we are not sure about the
# output format in future versions
arm_ver_str = re.search('.*Component.*', out)
if arm_ver_str is None:
popen_exceptions[' '.join(compiler)] = 'version string not found'
continue
arm_ver_str = arm_ver_str.group(0)
# Override previous values
version = search_version(arm_ver_str)
full_version = arm_ver_str
cls = ArmclangCCompiler if lang == 'c' else ArmclangCPPCompiler
return cls(ccache + compiler, version, is_cross, exe_wrap, full_version=full_version)
if 'clang' in out:
if 'Apple' in out or mesonlib.for_darwin(want_cross, self):
cltype = CLANG_OSX
elif 'windows' in out or mesonlib.for_windows(want_cross, self):
cltype = CLANG_WIN
else:
cltype = CLANG_STANDARD
cls = ClangCCompiler if lang == 'c' else ClangCPPCompiler
return cls(ccache + compiler, version, cltype, is_cross, exe_wrap, full_version=full_version)
if 'Microsoft' in out or 'Microsoft' in err:
# Latest versions of Visual Studio print version
# number to stderr but earlier ones print version
# on stdout. Why? Lord only knows.
# Check both outputs to figure out version.
version = search_version(err)
if version == 'unknown version':
version = search_version(out)
if version == 'unknown version':
m = 'Failed to detect MSVC compiler arch: stderr was\n{!r}'
raise EnvironmentException(m.format(err))
is_64 = err.split('\n')[0].endswith(' x64')
cls = VisualStudioCCompiler if lang == 'c' else VisualStudioCPPCompiler
return cls(compiler, version, is_cross, exe_wrap, is_64)
if '(ICC)' in out:
# TODO: add microsoft add check OSX
inteltype = ICC_STANDARD
cls = IntelCCompiler if lang == 'c' else IntelCPPCompiler
return cls(ccache + compiler, version, inteltype, is_cross, exe_wrap, full_version=full_version)
if 'ARM' in out:
cls = ArmCCompiler if lang == 'c' else ArmCPPCompiler
return cls(ccache + compiler, version, is_cross, exe_wrap, full_version=full_version)
self._handle_exceptions(popen_exceptions, compilers)
def detect_c_compiler(self, want_cross):
return self._detect_c_or_cpp_compiler('c', 'CC', want_cross)
def detect_cpp_compiler(self, want_cross):
return self._detect_c_or_cpp_compiler('cpp', 'CXX', want_cross)
def detect_fortran_compiler(self, want_cross):
popen_exceptions = {}
compilers, ccache, is_cross, exe_wrap = self._get_compilers('fortran', 'FC', want_cross)
for compiler in compilers:
if isinstance(compiler, str):
compiler = [compiler]
for arg in ['--version', '-V']:
try:
p, out, err = Popen_safe(compiler + [arg])
except OSError as e:
popen_exceptions[' '.join(compiler + [arg])] = e
continue
version = search_version(out)
full_version = out.split('\n', 1)[0]
guess_gcc_or_lcc = False
if 'GNU Fortran' in out:
guess_gcc_or_lcc = 'gcc'
if 'e2k' in out and 'lcc' in out:
guess_gcc_or_lcc = 'lcc'
if guess_gcc_or_lcc:
defines = self.get_gnu_compiler_defines(compiler)
if not defines:
popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'
continue
gtype = self.get_gnu_compiler_type(defines)
if guess_gcc_or_lcc == 'lcc':
version = self.get_lcc_version_from_defines(defines)
cls = ElbrusFortranCompiler
else:
version = self.get_gnu_version_from_defines(defines)
cls = GnuFortranCompiler
return cls(compiler, version, gtype, is_cross, exe_wrap, defines, full_version=full_version)
if 'G95' in out:
return G95FortranCompiler(compiler, version, is_cross, exe_wrap, full_version=full_version)
if 'Sun Fortran' in err:
version = search_version(err)
return SunFortranCompiler(compiler, version, is_cross, exe_wrap, full_version=full_version)
if 'ifort (IFORT)' in out:
return IntelFortranCompiler(compiler, version, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.