id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/MTGProxyPrinter-0.25.0.tar.gz/MTGProxyPrinter-0.25.0/mtg_proxy_printer/model/imagedb.py |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import dataclasses
import errno
import functools
import io
import itertools
import pathlib
import shutil
import socket
import string
import typing
import urllib.error
from PyQt5.QtCore import QObject, pyqtSignal as Signal, pyqtSlot as Slot, QThread, QSize, QModelIndex, Qt
from PyQt5.QtGui import QPixmap, QColor
from mtg_proxy_printer.document_controller.card_actions import ActionAddCard
from mtg_proxy_printer.document_controller.replace_card import ActionReplaceCard
from mtg_proxy_printer.document_controller.import_deck_list import ActionImportDeckList
from mtg_proxy_printer.document_controller import DocumentAction
import mtg_proxy_printer.app_dirs
import mtg_proxy_printer.downloader_base
import mtg_proxy_printer.http_file
from mtg_proxy_printer.model.carddb import Card, CheckCard, AnyCardType
from mtg_proxy_printer.stop_thread import stop_thread
from mtg_proxy_printer.logger import get_logger
logger = get_logger(__name__)
del get_logger
ItemDataRole = Qt.ItemDataRole
DEFAULT_DATABASE_LOCATION = mtg_proxy_printer.app_dirs.data_directories.user_cache_path / "CardImages"
__all__ = [
"ImageDatabase",
"ImageDownloader",
"CacheContent",
"ImageKey",
]
@dataclasses.dataclass(frozen=True)
class ImageKey:
scryfall_id: str
is_front: bool
is_high_resolution: bool
def format_relative_path(self) -> pathlib.Path:
"""Returns the file system path of the associated image relative to the image database root path."""
level1 = self.format_level_1_directory_name(self.is_front, self.is_high_resolution)
return pathlib.Path(level1, self.scryfall_id[:2], f"{self.scryfall_id}.png")
@staticmethod
def format_level_1_directory_name(is_front: bool, is_high_resolution: bool) -> str:
side = "front" if is_front else "back"
res = "highres" if is_high_resolution else "lowres"
return f"{res}_{side}"
@dataclasses.dataclass(frozen=True)
class CacheContent(ImageKey):
absolute_path: pathlib.Path
def as_key(self):
return ImageKey(self.scryfall_id, self.is_front, self.is_high_resolution)
PathSizeList = typing.List[typing.Tuple[pathlib.Path, int]]
IMAGE_SIZE = QSize(745, 1040)
class ImageDatabase(QObject):
"""
This class manages the on-disk PNG image cache. It can asynchronously fetch images from disk or from the Scryfall
servers, as needed, provides an in-memory cache, and allows deletion of images on disk.
"""
card_download_starting = Signal(int, str)
card_download_finished = Signal()
card_download_progress = Signal(int)
request_action = Signal(DocumentAction)
missing_images_obtained = Signal()
"""
Messages if the internal ImageDownloader instance performs a batch operation when it processes image requests for
a deck list. It signals if such a long-running process starts or finishes.
"""
batch_processing_state_changed = Signal(bool)
request_batch_state_change = Signal(bool)
network_error_occurred = Signal(str) # Emitted when downloading failed due to network issues.
def __init__(self, db_path: pathlib.Path = DEFAULT_DATABASE_LOCATION, parent: QObject = None):
super(ImageDatabase, self).__init__(parent)
self.db_path = db_path
_migrate_database(db_path)
# Caches loaded images in a map from scryfall_id to image. If a file is already loaded, use the loaded instance
# instead of loading it from disk again. This prevents duplicated file loads in distinct QPixmap instances
# to save memory.
self.loaded_images: typing.Dict[ImageKey, QPixmap] = {}
self.images_on_disk: typing.Set[ImageKey] = set()
self.download_thread = QThread()
self.download_thread.setObjectName(f"{self.__class__.__name__} background worker")
self.download_thread.finished.connect(lambda: logger.debug(f"{self.download_thread.objectName()} stopped."))
self.download_worker = ImageDownloader(self)
self.download_worker.moveToThread(self.download_thread)
self.request_batch_state_change.connect(self.download_worker.request_batch_processing_state_change)
self.download_worker.download_begins.connect(self.card_download_starting)
self.download_worker.download_finished.connect(self.card_download_finished)
self.download_worker.download_progress.connect(self.card_download_progress)
self.download_worker.batch_processing_state_changed.connect(self.batch_processing_state_changed)
self.download_worker.request_action.connect(self.request_action)
self.download_worker.missing_images_obtained.connect(self.missing_images_obtained)
self.download_worker.network_error_occurred.connect(self.network_error_occurred)
self.download_thread.started.connect(self.download_worker.scan_disk_image_cache)
self.download_thread.start()
logger.info(f"Created {self.__class__.__name__} instance.")
@property
@functools.lru_cache(maxsize=1)
def blank_image(self):
"""Returns a static, empty QPixmap in the size of a regular magic card."""
pixmap = QPixmap(IMAGE_SIZE)
pixmap.fill(QColor("white"))
return pixmap
def quit_background_thread(self):
logger.info(f"Quitting {self.__class__.__name__} background worker thread")
self.download_worker.should_run = False
try:
self.download_worker.currently_opened_file_monitor.close()
self.download_worker.currently_opened_file.close()
except AttributeError:
# Ignore error on possible race condition, if the download worker thread removes the currently opened file,
# while this runs.
pass
stop_thread(self.download_thread, logger)
def filter_already_downloaded(self, possible_matches: typing.List[Card]) -> typing.List[Card]:
"""
Takes a list of cards and returns a new list containing all cards from the source list that have
already downloaded images. The order of cards is preserved.
"""
return [
card for card in possible_matches
if ImageKey(card.scryfall_id, card.is_front, card.highres_image) in self.images_on_disk
]
def read_disk_cache_content(self) -> typing.List[CacheContent]:
"""
Returns all entries currently in the hard disk image cache.
:returns: List with tuples (scryfall_id: str, is_front: bool, absolute_image_file_path: pathlib.Path)
"""
result: typing.List[CacheContent] = []
data: typing.Iterable[typing.Tuple[pathlib.Path, bool, bool]] = (
(self.db_path/CacheContent.format_level_1_directory_name(is_front, is_high_resolution),
is_front, is_high_resolution)
for is_front, is_high_resolution in itertools.product([True, False], repeat=2)
)
for directory, is_front, is_high_resolution in data:
result += (
CacheContent(path.stem, is_front, is_high_resolution, path)
for path in directory.glob("[0-9a-z][0-9a-z]/*.png"))
return result
def delete_disk_cache_entries(self, images: typing.Iterable[ImageKey]) -> PathSizeList:
"""
Remove the given images from the hard disk cache.
:returns: List with removed paths.
"""
removed: PathSizeList = []
for image in images:
path = self.db_path/image.format_relative_path()
if path.is_file():
logger.debug(f"Removing image: {path}")
size_bytes = path.stat().st_size
path.unlink()
removed.append((path, size_bytes))
self.images_on_disk.remove(image)
self._delete_image_parent_directory_if_empty(path)
else:
logger.warning(f"Trying to remove image not in the cache. Not present: {image}")
logger.info(f"Removed {len(removed)} images from the card cache")
return removed
@staticmethod
def _delete_image_parent_directory_if_empty(image_path: pathlib.Path):
try:
image_path.parent.rmdir()
except OSError as e:
if e.errno != errno.ENOTEMPTY:
raise e
class ImageDownloader(mtg_proxy_printer.downloader_base.DownloaderBase):
"""
This class performs image downloads from Scryfall. It is designed to be used as an asynchronous worker inside
a QThread. To perform its tasks, it offers multiple Qt Signals that broadcast its state changes
over thread-safe signal connections.
It can be used synchronously, if precise, synchronous sequencing of small operations is required.
"""
request_action = Signal(DocumentAction)
missing_images_obtained = Signal()
missing_image_obtained = Signal(QModelIndex)
"""
Messages if the instance performs a batch operation when it processes image requests for
a deck list. It signals if such a long-running process starts or finishes.
"""
request_batch_processing_state_change = Signal(bool)
batch_processing_state_changed = Signal(bool)
def __init__(self, image_db: ImageDatabase, parent: QObject = None):
super(ImageDownloader, self).__init__(parent)
self.request_batch_processing_state_change.connect(self.update_batch_processing_state)
self.image_database = image_db
self.should_run = True
self.batch_processing_state: bool = False
self.last_error_message = ""
# Reference to the currently opened file. Used here to be able to force close it in case the user wants to quit
# or cancel the download process.
self.currently_opened_file: typing.Optional[io.BytesIO] = None
self.currently_opened_file_monitor: typing.Optional[mtg_proxy_printer.http_file.MeteredSeekableHTTPFile] = None
logger.info(f"Created {self.__class__.__name__} instance.")
def scan_disk_image_cache(self):
"""
Performs two tasks in order: Scans the image cache on disk, then starts to process the download request queue.
This is done to perform both tasks asynchronously and not block the application GUI/startup.
"""
logger.info("Reading all image IDs of images stored on disk.")
self.image_database.images_on_disk.update(
image.as_key() for image in self.image_database.read_disk_cache_content()
)
@Slot(ActionReplaceCard)
@Slot(ActionAddCard)
def fill_document_action_image(self, action: typing.Union[ActionAddCard, ActionReplaceCard]):
logger.info("Got DocumentAction, filling card")
self.get_image_synchronous(action.card)
logger.info("Obtained image, requesting apply()")
self.request_action.emit(action)
@Slot(ActionImportDeckList)
def fill_batch_document_action_images(self, action: ActionImportDeckList):
logger.info("Got batch DocumentAction, filling cards")
self.update_batch_processing_state(True)
for card in action.cards:
self.get_image_synchronous(card)
logger.info(f"Obtained images for {len(action.cards)} cards.")
self.request_action.emit(action)
self.update_batch_processing_state(False)
@Slot(list)
def obtain_missing_images(self, card_indices: typing.List[QModelIndex]):
logger.debug(f"Requesting {len(card_indices)} missing images")
blank = self.image_database.blank_image
self.update_batch_processing_state(True)
for index in card_indices:
card = index.data(ItemDataRole.UserRole)
self.get_image_synchronous(card)
if card.image_file is not blank:
self.missing_image_obtained.emit(index)
self.update_batch_processing_state(False)
logger.debug("Done fetching missing images.")
self.missing_images_obtained.emit()
@Slot(bool)
def update_batch_processing_state(self, value: bool):
self.batch_processing_state = value
if not self.batch_processing_state and self.last_error_message:
self.network_error_occurred.emit(self.last_error_message)
# Unconditionally forget any previously stored error messages when changing the batch processing state.
# This prevents re-raising already reported, previous errors when starting a new batch
self.last_error_message = ""
self.batch_processing_state_changed.emit(value)
def _handle_network_error_during_download(self, card: Card, reason_str: str):
card.set_image_file(self.image_database.blank_image)
logger.warning(
f"Image download failed for card {card}, reason is \"{reason_str}\". Using blank replacement image.")
# Only return the error message for storage, if the queue currently processes a batch job.
# Otherwise, it’ll be re-raised if a batch job starts right after a singular request failed.
if not self.batch_processing_state:
self.network_error_occurred.emit(reason_str)
return reason_str
def get_image_synchronous(self, card: AnyCardType):
try:
if isinstance(card, CheckCard):
self._get_image_synchronous(card.front)
self._get_image_synchronous(card.back)
else:
self._get_image_synchronous(card)
except urllib.error.URLError as e:
self.last_error_message = self._handle_network_error_during_download(
card, str(e.reason))
except socket.timeout as e:
self.last_error_message = self._handle_network_error_during_download(
card, f"Reading from socket failed: {e}")
finally:
self.download_finished.emit()
def _get_image_synchronous(self, card: Card):
key = ImageKey(card.scryfall_id, card.is_front, card.highres_image)
try:
pixmap = self.image_database.loaded_images[key]
except KeyError:
logger.debug("Image not in memory, requesting from disk")
pixmap = self._fetch_image(card)
self.image_database.loaded_images[key] = pixmap
self.image_database.images_on_disk.add(key)
logger.debug("Image loaded")
card.set_image_file(pixmap)
def _fetch_image(self, card: Card) -> QPixmap:
key = ImageKey(card.scryfall_id, card.is_front, card.highres_image)
cache_file_path = self.image_database.db_path / key.format_relative_path()
cache_file_path.parent.mkdir(parents=True, exist_ok=True)
pixmap = None
if cache_file_path.exists():
pixmap = QPixmap(str(cache_file_path))
if pixmap.isNull():
logger.warning(f'Failed to load image from "{cache_file_path}", deleting file.')
cache_file_path.unlink()
if not cache_file_path.exists():
logger.debug("Image not in disk cache, downloading from Scryfall")
self._download_image_from_scryfall(card, cache_file_path)
pixmap = QPixmap(str(cache_file_path))
if card.highres_image:
self._remove_outdated_low_resolution_image(card)
return pixmap
def _remove_outdated_low_resolution_image(self, card):
low_resolution_image_path = self.image_database.db_path / ImageKey(
card.scryfall_id, card.is_front, False).format_relative_path()
if low_resolution_image_path.exists():
logger.info("Removing outdated low-resolution image")
low_resolution_image_path.unlink()
def _download_image_from_scryfall(self, card: Card, target_path: pathlib.Path):
if not self.should_run:
return
download_uri = card.image_uri
download_path = self.image_database.db_path / target_path.name
self.currently_opened_file, self.currently_opened_file_monitor = self.read_from_url(
download_uri, f"Downloading image for card '{card.name}'")
self.currently_opened_file_monitor.total_bytes_processed.connect(self.download_progress)
# Download to the root of the cache first. Move to the target only after downloading finished.
# This prevents inserting damaged files into the cache, if the download aborts due to an application crash,
# getting terminated by the user, a mid-transfer network outage, a full disk or any other failure condition.
try:
with self.currently_opened_file, download_path.open("wb") as file_in_cache:
shutil.copyfileobj(self.currently_opened_file, file_in_cache)
except Exception as e:
logger.exception(e)
# raise e
finally:
if self.should_run:
logger.debug(f"Moving downloaded image into the image cache at {target_path}")
shutil.move(download_path, target_path)
else:
logger.info("Download aborted, not moving potentially incomplete download into the cache.")
self.currently_opened_file = None
if download_path.is_file():
download_path.unlink()
self.download_finished.emit()
def _migrate_database(db_path: pathlib.Path):
if not db_path.exists():
db_path.mkdir(parents=True)
version_file = db_path/"version.txt"
if not version_file.exists():
for possible_dir in map("".join, itertools.product(string.hexdigits, string.hexdigits)):
if (path := db_path/possible_dir).exists():
shutil.rmtree(path)
version_file.write_text("2")
if version_file.read_text() == "2":
old_front = db_path/"front"
old_back = db_path/"back"
high_res_front = db_path/ImageKey.format_level_1_directory_name(True, True)
low_res_front = db_path/ImageKey.format_level_1_directory_name(True, False)
high_res_back = db_path/ImageKey.format_level_1_directory_name(False, True)
low_res_back = db_path/ImageKey.format_level_1_directory_name(False, False)
if old_front.exists():
old_front.rename(low_res_front)
else:
low_res_front.mkdir(exist_ok=True)
if old_back.exists():
old_back.rename(low_res_back)
else:
low_res_back.mkdir(exist_ok=True)
high_res_front.mkdir(exist_ok=True)
high_res_back.mkdir(exist_ok=True)
version_file.write_text("3") | PypiClean |
/DuHast-1.0.7-py3-none-any.whl/duHast/APISamples/RevitFamilyBaseDataAnalysisCircularReferencing.py |
import threading
import os
import RevitFamilyBaseDataUtils as rFamBaseDataUtils
from timer import Timer
import Result as res
def _ExtractParentFamilies(currentParent, treePath):
'''
Find the index of the match in the root tree, any entries in the root tree list with a lower index are parents
Note: Changes currentParent.parent property of the currentParent variable!
:param currentParent: A tuple containing family root data
:type currentParent: named tuple rootFamily
:param treePath: list of family names describing the nesting tree of a family
:type treePath: [str]
:return: Nothing
:rtype: None
'''
indexMatch = treePath.index(currentParent.name)
# double check...it exists and it is not root itself
if(indexMatch > 0):
# add all parents
for i in range (indexMatch):
if(treePath[i] not in currentParent.parent):
currentParent.parent.append(treePath[i])
def _ExtractChildFamilies(currentParent, treePath):
'''
Find the index of the match in the root tree, any entries in the root tree list with a lower index are children
Note: Changes currentParent.child property of the currentParent variable!
:param currentParent: A tuple containing family root data
:type currentParent: named tuple rootFamily
:param treePath: list of family names describing the nesting tree of a family
:type treePath: [str]
:return: Nothing
:rtype: None
'''
indexMatch = treePath.index(currentParent.name)
# double check...it exists and it is not root itself and its not the last item in tree path
if(indexMatch > 0 and indexMatch != len(treePath)):
# add all children
for i in range (indexMatch + 1, len(treePath)):
if(treePath[i] not in currentParent.child):
currentParent.child.append(treePath[i])
def _CheckDataBlocksForOverLap(blockOne, blockTwo):
'''
Checks whether the root path of families in the first block overlaps with the root path of any family in the second block.
Overlap is checked from the start of the root path. Any families from block one which are not overlapping any family in\
block two are returned.
:param blockOne: List of family tuples of type nestedFamily
:type blockOne: [nestedFamily]
:param blockTwo: List of family tuples of type nestedFamily
:type blockTwo: [nestedFamily]
:return: List of family tuples of type nestedFamily
:rtype: [nestedFamily]
'''
uniqueTreeNodes = []
for fam in blockOne:
match = False
for famUp in blockTwo:
if(' :: '.join(famUp.rootPath).startswith(' :: '.join(fam.rootPath))):
match = True
break
if(match == False):
uniqueTreeNodes.append(fam)
return uniqueTreeNodes
def _CullDataBlock(familyBaseNestedDataBlock):
'''
Sorts family data blocks into a dictionary where key, from 1 onwards, is the level of nesting indicated by number of '::' in root path string.
After sorting it compares adjacent blocks in the dictionary (key and key + 1) for overlaps in the root path string. Only unique families will be returned.
:param familyBaseNestedDataBlock: A list containing all nested families belonging to a single root host family.
:type familyBaseNestedDataBlock: [nestedFamily]
:return: A list of unique families in terms of root path.
:rtype: [nestedFamily]
'''
culledFamilyBaseNestedDataBlocks = []
dataBlocksByLength = {}
# build dic by root path length
# start at 1 because for nesting level ( 1 based rather then 0 based )
for family in familyBaseNestedDataBlock:
if(len(family.rootPath) -1 in dataBlocksByLength):
dataBlocksByLength[len(family.rootPath) -1 ].append(family)
else:
dataBlocksByLength[len(family.rootPath)- 1 ] = [family]
# loop over dictionary and check block entries against next entry up blocks
for i in range(1, len(dataBlocksByLength) + 1):
# last block get automatically added
if(i == len(dataBlocksByLength)):
culledFamilyBaseNestedDataBlocks = culledFamilyBaseNestedDataBlocks + dataBlocksByLength[i]
else:
# check for matches in next one up
uniqueNodes = _CheckDataBlocksForOverLap(dataBlocksByLength[i], dataBlocksByLength[i + 1])
# only add non overlapping blocks
culledFamilyBaseNestedDataBlocks = culledFamilyBaseNestedDataBlocks + uniqueNodes
return culledFamilyBaseNestedDataBlocks
def _CullNestedBaseDataBlocks(overallFamilyBaseNestedData):
'''
Reduce base data families for parent / child finding purposes. Keep the nodes with the root path longes branch only.
Sample:
famA :: famB :: famC
famA :: famB
The second of the above examples can be culled since the first contains the same information.
:param overallFamilyBaseNestedData: A list containing all nested families with the longest nesting levels per branch per host family.
:type overallFamilyBaseNestedData: [nestedFamily]
'''
currentRootFamName = ''
familyBlocks = []
block = []
# read families into blocks
for nested in overallFamilyBaseNestedData:
if(nested.rootPath[0] != currentRootFamName):
# read family block
if(len(block) > 0):
familyBlocks.append(block)
# reset block
block = []
block.append(nested)
currentRootFamName = nested.rootPath[0]
else:
block.append(nested)
currentRootFamName = nested.rootPath[0]
else:
block.append(nested)
retainedFamilyBaseNestedData = []
# cull data per block
for familyBlock in familyBlocks:
d = _CullDataBlock(familyBlock)
retainedFamilyBaseNestedData = retainedFamilyBaseNestedData + d
return retainedFamilyBaseNestedData
def FindParentsAndChildren(overallFamilyBaseRootData, overallFamilyBaseNestedData):
'''
Loop over all root families and check if they exist in root path of any nested families.
if so extract families higher up the root path tree as parents and families further down the root path tree as children
:param overallFamilyBaseRootData: List of tuples containing root family data.
:type overallFamilyBaseRootData: [rootFamily]
:param overallFamilyBaseNestedData: List of tuples containing nested family data.
:type overallFamilyBaseNestedData: [nestedFamily]
:return: List of tuples containing root family data.
:rtype: [rootFamily]
'''
for i in range(len(overallFamilyBaseRootData)):
#print ('checking family :' , i, ' ', overallFamilyBaseRootData[i].name)
for nestedFam in overallFamilyBaseNestedData:
try:
# get the index of the match
indexMatch = nestedFam.rootPath.index(overallFamilyBaseRootData[i].name)
if(indexMatch > 0):
#print('found ', overallFamilyBaseRootData[i].name ,' in ', nestedFam.rootPath)
_ExtractParentFamilies(overallFamilyBaseRootData[i], nestedFam.rootPath)
_ExtractChildFamilies(overallFamilyBaseRootData[i], nestedFam.rootPath)
#print('after: ', overallFamilyBaseRootData[i].child)
except:
pass
return overallFamilyBaseRootData
def FindCircularReferences(overallFamilyBaseRootData):
'''
Loops over family data and returns any families which appear in circular references.
(A family appears in their parent and child collection)
:param overallFamilyBaseRootData: List of tuples containing root family data.
:type overallFamilyBaseRootData: [rootFamily]
:return: List of tuples containing root family data.
:rtype: [rootFamily]
'''
circularReferences = []
# loop over all families and check whether there are any families in both the parent as well as child collection
for family in overallFamilyBaseRootData:
for parentFamily in family.parent:
if (parentFamily in family.child):
circularReferences.append(family)
return circularReferences
def CheckFamiliesHaveCircularReferences(familyBaseDataReportFilePath):
'''
Processes a family base data report and identifies any families which contain circular reference.
Makes use of multithreading when more then 2 cores are present.
:param familyBaseDataReportFilePath: Fully qualified file path to family base data report file.
:type familyBaseDataReportFilePath: str
:return:
Result class instance.
- result.status. True if circular referencing file was written successfully, otherwise False.
- result.message will contain the summary messages of the process including time stamps.
- result.result empty list
On exception:
- result.status (bool) will be False.
- result.message will contain generic exception message.
- result.result will be empty
:rtype: :class:`.Result`
'''
# set up a timer
tProcess = Timer()
tProcess.start()
returnValue = res.Result()
# read overall family base data and nested data from file
overallFamilyBaseRootData, overallFamilyBaseNestedData = rFamBaseDataUtils.ReadOverallFamilyDataList(familyBaseDataReportFilePath)
returnValue.AppendMessage(tProcess.stop() + ' Read overall family base data report. ' + str(len(overallFamilyBaseRootData)) + ' root entries found and '\
+ str(len(overallFamilyBaseNestedData)) + ' nested entries found.')
tProcess.start()
before = len(overallFamilyBaseNestedData)
# reduce workload by culling not needed nested family data
overallFamilyBaseNestedData = _CullNestedBaseDataBlocks(overallFamilyBaseNestedData)
returnValue.AppendMessage(tProcess.stop() + ' culled nested family base data from : ' + str(before) +' to: ' + str(len(overallFamilyBaseNestedData)) + ' families.' )
tProcess.start()
# set up some multithreading
coreCount = int(os.environ['NUMBER_OF_PROCESSORS'])
if (coreCount > 2):
returnValue.AppendMessage('cores: ' + str(coreCount))
# leave some room for other processes
coreCount = coreCount - 1
chunkSize = len(overallFamilyBaseRootData)/coreCount
threads = []
# set up threads
for i in range(coreCount):
t = threading.Thread(target=FindParentsAndChildren, args=(overallFamilyBaseRootData[i*chunkSize:(i+1) * chunkSize],overallFamilyBaseNestedData))
threads.append(t)
# start up threads
for t in threads:
t.start()
# wait for results
for t in threads:
t.join()
else:
# find parents and children
overallFamilyBaseRootData = FindParentsAndChildren(overallFamilyBaseRootData, overallFamilyBaseNestedData)
returnValue.AppendMessage(tProcess.stop() + ' Populated parents and children properties of: ' + str(len(overallFamilyBaseRootData)) +' root families.' )
tProcess.start()
# identify circular references
circularReferences = FindCircularReferences(overallFamilyBaseRootData)
returnValue.AppendMessage(tProcess.stop() + ' Found ' + str(len(circularReferences)) +' circular references in families.' )
if(len(circularReferences) > 0):
returnValue.result = circularReferences
return returnValue | PypiClean |
/EagleVision-0.0.5.tar.gz/EagleVision-0.0.5/eaglevision/similarity_eagle.py |
import os
import os.path
import datetime
import time
from pathlib import Path
import pandas as pd
from functiondefextractor import core_extractor
from functiondefextractor import condition_checker
from similarity.similarity_io import SimilarityIO
from eaglevision.base_eagle import BaseEagle
class SimilarityEagle(BaseEagle):
""" Class which conducts the Code extraction, Pattern check in the code and similarity analysis """
def __init__(self):
""" Constructor for the class """
super(SimilarityEagle)
super().__init__()
self.dataframe = None
self.report_path = None
def __code_extraction__(self):
""" Function to extract code from the folder"""
val = True
self.dataframe = core_extractor.extractor(self.get_proj_path(), annot=self.get_annotation(),
delta=self.get_delta(),
exclude=r"%s" % self.get_exclude_extraction())
if self.dataframe.empty:
print("No functions are extracted. Data frame is empty. Recheck your input arguments")
val = False
return val
@staticmethod
def get_timestamp():
""" Function to get timestamp"""
return str(datetime.datetime.fromtimestamp(time.time()).strftime('%H-%M-%S_%d_%m_%Y')) # pragma: no mutate
def __code_pattern_analyzer__(self):
"""" Function to extract patterns from the source code fetched in to the dataframe """
if self.get_pattern() is not None and len(self.get_pattern()) == len(self.get_pattern_seperator()):
for i in range(len(self.get_pattern())):
pattern_sep = str(self.get_pattern_seperator()[i]) if self.get_pattern_seperator()[i] else None
data, pattern = condition_checker.check_condition(str(self.get_pattern()[i]), self.dataframe,
pattern_sep)
if self.get_run_pattern_match():
self.__report_xlsx__(data, "%s_pattern" % self.get_pattern()[i])
pattern.to_html("%s.html" % os.path.join(self.report_path, self.get_pattern()[i] + "Pivot_" +
self.get_timestamp()))
else:
print("The pattern input is expected to be list and should be of same length as pattern separators")
def __code_similarity__(self):
""" Function to conduct the similarity analysis """
similarity_io_obj = SimilarityIO(None, None, None)
similarity_io_obj.file_path = self.report_path # where to put the report
similarity_io_obj.data_frame = self.dataframe
if self.get_similarity_range():
similarity_io_obj.filter_range = self.get_similarity_range()
mapping = {similarity_io_obj.data_frame.columns[0]: 'Uniq ID',
similarity_io_obj.data_frame.columns[1]: 'Steps'}
similarity_io_obj.data_frame.rename(columns=mapping, inplace=True)
similarity_io_obj.uniq_header = "Uniq ID" # Unique header of the input data frame
processed_similarity = similarity_io_obj.process_cos_match()
similarity_io_obj.report(processed_similarity)
def __report_xlsx__(self, data_f, name):
""" Function which write the dataframe to xlsx """
file_path = os.path.join(self.report_path, name)
# Github open ticket for the abstract method
writer = pd.ExcelWriter("%s_%s.xlsx" % (file_path, self.get_timestamp()), engine="xlsxwriter")
data_f.to_excel(writer, sheet_name=name)
writer.save()
def __set_report_path__(self):
""" Function to set the report path"""
self.report_path = os.path.join(self.get_report_path(), "pattern_and_similarity_report")
Path(self.report_path).mkdir(parents=True, exist_ok=True)
def orchestrate_similarity(self, json):
""" Function which orchestrate the similarity execution"""
self.populate_data(json)
print("\n\n=================================") # pragma: no mutate
print("Please wait while input is processed") # pragma: no mutate
self.__set_report_path__()
if self.__code_extraction__():
print("Please wait while [Pattern matching tool] process your inputs") # pragma: no mutate
self.__code_pattern_analyzer__()
print("[Pattern matching tool] have completed extracting the pattern check") # pragma: no mutate
if self.get_run_similarity():
print("Please wait while [Code Similarity Tool]"
" process your inputs, This will take a while") # pragma: no mutate
self.__code_similarity__()
print("\n[Code Similarity Tool] have completed Similarity analysis, " # pragma: no mutate
"reports @ %s" % self.report_path) # pragma: no mutate
print("=================================") # pragma: no mutate | PypiClean |
/LEPL-5.1.3.zip/LEPL-5.1.3/src/lepl/bin/bits.py | # pylint: disable-msg=R0903
# using __ methods
if bytes is str:
print('Binary parsing unsupported in this Python version')
else:
STRICT = 'strict'
class Int(int):
'''
An integer with a length (the number of bits). This extends Python's type
system so that we can distinguish between different integer types, which
may have different encodings.
'''
def __new__(cls, value, length):
return super(Int, cls).__new__(cls, str(value), 0)
def __init__(self, value, length):
super(Int, self).__init__()
self.__length = length
def __len__(self):
return self.__length
def __repr__(self):
return 'Int({0},{1})'.format(super(Int, self).__str__(),
self.__length)
def swap_table():
'''
Table of reversed bit patterns for 8 bits.
'''
# pylint: disable-msg=C0103
table = [0] * 256
power = [1 << n for n in range(8)]
for n in range(8):
table[1 << n] = 1 << (7 - n)
for i in range(256):
if not table[i]:
for p in power:
if i & p:
table[i] |= table[p]
table[table[i]] = i
return table
class BitString(object):
'''
A sequence of bits, of arbitrary length. Has similar semantics to
strings, in that a single index is itself a BitString (of unit length).
This is intended as a standard fmt for arbitrary binary data, to help
with conversion between other types. In other words, convert to and from
this, and then chain conversions.
BitStr are stored as a contiguous sequence in an array of bytes. Both bits
and bytes are "little endian" - this allows arbitrary lengths of bits,
at arbitrary offsets, to be given values without worrying about
alignment.
The bit sequence starts at bit 'offset' in the first byte and there are
a total of 'length' bits. The number of bytes stored is the minimum
implied by those two values, with zero padding.
'''
__swap = swap_table()
def __init__(self, value=None, length=0, offset=0):
'''
value is a bytes() instance that contains the data.
length is the number of valid bits. If given as a float it is the
number of bytes (bits = int(float) * 8 + decimal(float) * 10)
offset is the index of the first valid bit in the value.
'''
if value is None:
value = bytes()
if not isinstance(value, bytes):
raise TypeError('BitString wraps bytes: {0!r}'.format(value))
if length < 0:
raise ValueError('Negative length: {0!r}'.format(length))
if not 0 <= offset < 8 :
raise ValueError('Non-byte offset: {0!r}'.format(offset))
self.__bytes = value
self.__length = unpack_length(length)
self.__offset = offset
if len(value) != bytes_for_bits(self.__length, self.__offset):
raise ValueError('Inconsistent length: {0!r}/{1!r}'
.format(value, length))
def bytes(self, offset=0):
'''
Return a series of bytes values, which encode the data for len(self)
bits when offset=0 (with final padding in the last byte if necessary).
It is the caller's responsibility to discard any trailing bits.
When 0 < offset < 8 then the data are zero-padded by offset bits first.
'''
# if self.__offset and offset == 0:
# # normalize our own value
# self.__bytes = \
# bytes(ByteIterator(self.__bytes, self.__length,
# self.__offset, offset))
# self.__offset = 0
return ByteIterator(self.__bytes, self.__length,
self.__offset, offset)
def bits(self):
'''
Return a series of bits (encoded as booleans) that contain the contents.
'''
return BitIterator(self.__bytes, 0, self.__length, 1, self.__offset)
def __str__(self):
'''
For 64 bits or less, show bits grouped by byte (octet), with bytes
and bits running from left to right. This is a "picture" of the bits.
For more than 64 bits, give a hex encoding of bytes (right padded
with zeros), shown in big-endian fmt.
In both cases, the length in bits is given after a trailing slash.
Whatever the internal offset, values are displayed with no initial
padding.
'''
if self.__length > 64:
hex_ = ''.join(hex(x)[2:] for x in self.bytes())
return '{0}x0/{1}'.format(hex_, self.__length)
else:
chars = []
byte = []
count = 0
for bit in self.bits():
if not count % 8:
chars.extend(byte)
byte = []
if count:
chars.append(' ')
if bit.zero():
byte.append('0')
else:
byte.append('1')
count += 1
chars.extend(byte)
return '{0}b0/{1}'.format(''.join(chars), self.__length)
def __repr__(self):
'''
An explicit display of internal state, including padding and offset.
'''
return 'BitString({0!r}, {1!r}, {2!r})' \
.format(self.__bytes, self.__length, self.__offset)
def __len__(self):
return self.__length
def zero(self):
'''
Are all bits zero?
'''
for byte in self.__bytes:
if byte != 0:
return False
return True
def offset(self):
'''
The internal offset. This is not useful as an external API, but
helps with debugging.
'''
return self.__offset
def __iter__(self):
return self.bits()
def __add__(self, other):
'''
Combine two sequences, appending then together.
'''
bbs = bytearray(self.to_bytes())
matching_offset = self.__length % 8
for byte in other.bytes(matching_offset):
if matching_offset:
bbs[-1] |= byte
matching_offset = False
else:
bbs.append(byte)
return BitString(bytes(bbs), self.__length + len(other))
def to_bytes(self, offset=0):
'''
Return a bytes() object, right-padded with zero bits of necessary.
'''
if self.__offset == offset:
return self.__bytes
else:
return bytes(self.bytes(offset))
def to_int(self, big_endian=False):
'''
Convert the entire bit sequence (of any size) to an integer.
Big endian conversion is only possible if the bits form a whole number
of bytes.
'''
if big_endian and self.__length % 8:
raise ValueError('Length is not a multiple of 8 bits, so big '
'endian integer poorly defined: {0}'
.format(self.__length))
bbs = self.bytes()
if not big_endian:
bbs = reversed(list(bbs))
value = 0
for byte in bbs:
value = (value << 8) + byte
return Int(value, self.__length)
def to_str(self, encoding=None, errors='strict'):
'''
Convert to string.
'''
# do we really need to do this in two separate calls?
if encoding:
return bytes(self.bytes()).decode(encoding=encoding,
errors=errors)
else:
return bytes(self.bytes()).decode(errors=errors)
def __int__(self):
return self.to_int()
def __index__(self):
return self.to_int()
def __invert__(self):
inv = bytearray([0xff ^ b for b in self.bytes()])
if self.__length % 8:
inv[-1] &= 0xff >> self.__length % 8
return BitString(bytes(inv), self.__length)
def __getitem__(self, index):
if not isinstance(index, slice):
index = slice(index, index+1, None)
(start, stop, step) = index.indices(self.__length)
if step == 1:
start += self.__offset
stop += self.__offset
bbs = bytearray(self.__bytes[start // 8:bytes_for_bits(stop)])
if start % 8:
bbs[0] &= 0xff << start % 8
if stop % 8:
bbs[-1] &= 0xff >> 8 - stop % 8
return BitString(bytes(bbs), stop - start, start % 8)
else:
acc = BitString()
for byte in BitIterator(self.__bytes, start, stop, step,
self.__offset):
acc += byte
return acc
def __eq__(self, other):
# pylint: disable-msg=W0212
# (we check the type)
if not isinstance(other, BitString) \
or self.__length != other.__length:
return False
for (bb1, bb2) in zip(self.bytes(), other.bytes()):
if bb1 != bb2:
return False
return True
def __hash__(self):
return hash(self.__bytes) ^ self.__length
@staticmethod
def from_byte(value):
'''
Create a BitString from a byte.
'''
return BitString.from_int(value, 8)
@staticmethod
def from_int32(value, big_endian=None):
'''
Create a BitString from a 32 bit integer.
'''
return BitString.from_int(value, 32, big_endian)
@staticmethod
def from_int64(value, big_endian=None):
'''
Create a BitString from a 64 bit integer.
'''
return BitString.from_int(value, 64, big_endian)
@staticmethod
def from_int(value, length=None, big_endian=None):
'''
Value can be an int, or a string with a leading or trailing tag.
A plain int, or no tag, or leading tag, is byte little-endian by
default.
Length and big-endianness are inferred from the fmt for values
given as strings, but explicit parameters override these.
If no length is given, and none can be inferred, 32 bits is assumed
(bit length cannot be inferred for decimal values, even as strings).
The interpretation of big-endian values depends on the base and is
either very intuitive and useful, or completely stupid. Use at your
own risk.
Big-endian hex values must specify an exact number of bytes (even
number of hex digits). Each separate byte is assigned a value
according to big-endian semantics, but with a byte small-endian
order is used. This is consistent with the standard conventions for
network data. So, for example, 1234x0 gives two bytes. The first
contains the value 0x12, the second the value 0x34.
Big-endian binary values are taken to be a "picture" of the bits,
with the array reading from left to right. So 0011b0 specifies
four bits, starting with two zeroes.
Big-endian decimal and octal values are treated as hex values.
'''
# order is very important below - edit with extreme care
bits = None
if isinstance(value, str):
value.strip()
# move postfix to prefix, saving endian hint
if value.endswith('0') and len(value) > 1 and \
not value[-2].isdigit() \
and not (len(value) == 3 and value.startswith('0')):
value = '0' + value[-2] + value[0:-2]
if big_endian is None:
big_endian = True
# drop 0d for decimal
if value.startswith('0d') or value.startswith('0D'):
value = value[2:]
# infer implicit length
if len(value) > 1 and not value[1].isdigit() and length is None:
bits = {'b':1, 'o':3, 'x':4}.get(value[1].lower(), None)
if not bits:
raise ValueError('Unexpected base: {0!r}'.format(value))
length = bits * (len(value) - 2)
if big_endian and bits == 1:
# binary value is backwards!
value = value[0:2] + value[-1:1:-1]
value = int(value, 0)
if length is None:
try:
# support round-tripping of sized integers
length = len(value)
except TypeError:
# assume 32 bits if nothing else defined
length = 32
length = unpack_length(length)
if length % 8 and big_endian and bits != 1:
raise ValueError('A big-endian int with a length that '
'is not an integer number of bytes cannot be '
'encoded as a stream of bits: {0!r}/{1!r}'
.format(value, length))
bbs, val = bytearray(), value
for _index in range(bytes_for_bits(length)):
bbs.append(val & 0xff)
val >>= 8
if val > 0:
raise ValueError('Value contains more bits than length: %r/%r' %
(value, length))
# binary was swapped earlier
if big_endian and bits != 1:
bbs = reversed(bbs)
return BitString(bytes(bbs), length)
@staticmethod
def from_sequence(value, unpack=lambda x: x):
'''
Unpack is called for each item in turn (so should be, say, from_byte).
'''
accumulator = BitString()
for item in value:
accumulator += unpack(item)
return accumulator
@staticmethod
def from_bytearray(value):
'''
Create a BitString from a bytearray.
'''
if not isinstance(value, bytes):
value = bytes(value)
return BitString(value, len(value) * 8)
@staticmethod
def from_str(value, encoding=None, errors=STRICT):
'''
Create a BitString from a string.
'''
if encoding:
return BitString.from_bytearray(value.encode(encoding=encoding,
errors=errors))
else:
return BitString.from_bytearray(value.encode(errors=errors))
def unpack_length(length):
'''
Length is in bits, unless a decimal is specified, in which case it
it has the structure bytes.bits. Obviously this is ambiguous with float
values (eg 3.1 or 3.10), but since we only care about bits 0-7 we can
avoid any issues by requiring that range.
'''
if isinstance(length, str):
try:
length = int(length, 0)
except ValueError:
length = float(length)
if isinstance(length, int):
return length
if isinstance(length, float):
nbytes = int(length)
bits = int(10 * (length - nbytes) + 0.5)
if bits < 0 or bits > 7:
raise ValueError('BitStr specification must be between 0 and 7')
return nbytes * 8 + bits
raise TypeError('Cannot infer length from %r' % length)
def bytes_for_bits(bits, offset=0):
'''
The number of bytes required to specify the given number of bits.
'''
return (bits + 7 + offset) // 8
class BitIterator(object):
'''
A sequence of bits (used by BitString).
'''
def __init__(self, value, start, stop, step, offset):
assert 0 <= offset < 8
self.__bytes = value
self.__start = start
self.__stop = stop
self.__step = step
self.__offset = offset
self.__index = start
def __iter__(self):
return self
def __next__(self):
if (self.__step > 0 and self.__index < self.__stop) \
or (self.__step < 0 and self.__index > self.__stop):
index = self.__index + self.__offset
byte = self.__bytes[index // 8] >> index % 8
self.__index += self.__step
return ONE if byte & 0x1 else ZERO
else:
raise StopIteration()
class ByteIterator(object):
'''
A sequence of bytes (used by BitString).
'''
def __init__(self, value, length, existing, required):
assert 0 <= required < 8
assert 0 <= existing < 8
self.__bytes = value
self.__length = length
self.__required = required
self.__existing = existing
if self.__required > self.__existing:
self.__index = -1
else:
self.__index = 0
self.__total = 0
def __iter__(self):
return self
def __next__(self):
if self.__required == self.__existing:
return self.__byte_aligned()
elif self.__required > self.__existing:
return self.__add_offset()
else:
return self.__correct_offset()
def __byte_aligned(self):
'''
Already aligned, so return next byte.
'''
if self.__index < len(self.__bytes):
byte = self.__bytes[self.__index]
self.__index += 1
return byte
else:
raise StopIteration()
def __add_offset(self):
'''
No longer understand this. Replace with BitStream project?
'''
if self.__index < 0:
if self.__total < self.__length:
# initial offset chunk
byte = 0xff & (self.__bytes[0] <<
(self.__required - self.__existing))
self.__index = 0
self.__total = 8 - self.__required
return byte
else:
raise StopIteration()
else:
if self.__total < self.__length:
byte = 0xff & (self.__bytes[self.__index] >>
(8 - self.__required + self.__existing))
self.__index += 1
self.__total += self.__required
else:
raise StopIteration()
if self.__total < self.__length:
byte |= 0xff & (self.__bytes[self.__index] <<
(self.__required - self.__existing))
self.__total += 8 - self.__required
return byte
def __correct_offset(self):
'''
No longer understand this. Replace with BitStream project?
'''
if self.__total < self.__length:
byte = 0xff & (self.__bytes[self.__index] >>
(self.__existing - self.__required))
self.__index += 1
self.__total += 8 - self.__existing + self.__required
else:
raise StopIteration()
if self.__total < self.__length:
byte |= 0xff & (self.__bytes[self.__index] <<
(8 - self.__existing + self.__required))
self.__total += self.__existing - self.__required
return byte
ONE = BitString(b'\x01', 1)
ZERO = BitString(b'\x00', 1) | PypiClean |
/Flask-DebugToolbar-0.13.1.tar.gz/Flask-DebugToolbar-0.13.1/src/flask_debugtoolbar/static/codemirror/util/closetag.js | (function() {
/** Option that allows tag closing behavior to be toggled. Default is true. */
CodeMirror.defaults['closeTagEnabled'] = true;
/** Array of tag names to add indentation after the start tag for. Default is the list of block-level html tags. */
CodeMirror.defaults['closeTagIndent'] = ['applet', 'blockquote', 'body', 'button', 'div', 'dl', 'fieldset', 'form', 'frameset', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'head', 'html', 'iframe', 'layer', 'legend', 'object', 'ol', 'p', 'select', 'table', 'ul'];
/**
* Call during key processing to close tags. Handles the key event if the tag is closed, otherwise throws CodeMirror.Pass.
* - cm: The editor instance.
* - ch: The character being processed.
* - indent: Optional. Omit or pass true to use the default indentation tag list defined in the 'closeTagIndent' option.
* Pass false to disable indentation. Pass an array to override the default list of tag names.
*/
CodeMirror.defineExtension("closeTag", function(cm, ch, indent) {
if (!cm.getOption('closeTagEnabled')) {
throw CodeMirror.Pass;
}
var mode = cm.getOption('mode');
if (mode == 'text/html') {
/*
* Relevant structure of token:
*
* htmlmixed
* className
* state
* htmlState
* type
* context
* tagName
* mode
*
* xml
* className
* state
* tagName
* type
*/
var pos = cm.getCursor();
var tok = cm.getTokenAt(pos);
var state = tok.state.base || tok.state;
if (state.mode && state.mode != 'html') {
throw CodeMirror.Pass; // With htmlmixed, we only care about the html sub-mode.
}
if (ch == '>') {
var type = state.htmlState ? state.htmlState.type : state.type; // htmlmixed : xml
if (tok.className == 'tag' && type == 'closeTag') {
throw CodeMirror.Pass; // Don't process the '>' at the end of an end-tag.
}
cm.replaceSelection('>'); // Mode state won't update until we finish the tag.
pos = {line: pos.line, ch: pos.ch + 1};
cm.setCursor(pos);
tok = cm.getTokenAt(cm.getCursor());
state = tok.state.base || tok.state;
type = state.htmlState ? state.htmlState.type : state.type; // htmlmixed : xml
if (tok.className == 'tag' && type != 'selfcloseTag') {
var tagName = state.htmlState ? state.htmlState.context.tagName : state.tagName; // htmlmixed : xml
if (tagName.length > 0) {
insertEndTag(cm, indent, pos, tagName);
}
return;
}
// Undo the '>' insert and allow cm to handle the key instead.
cm.setSelection({line: pos.line, ch: pos.ch - 1}, pos);
cm.replaceSelection("");
} else if (ch == '/') {
if (tok.className == 'tag' && tok.string == '<') {
var tagName = state.htmlState ? (state.htmlState.context ? state.htmlState.context.tagName : '') : state.context.tagName; // htmlmixed : xml # extra htmlmized check is for '</' edge case
if (tagName.length > 0) {
completeEndTag(cm, pos, tagName);
return;
}
}
}
} else if (mode == 'xmlpure') {
var pos = cm.getCursor();
var tok = cm.getTokenAt(pos);
var tagName = tok.state.context.tagName;
if (ch == '>') {
// <foo> tagName=foo, string=foo
// <foo /> tagName=foo, string=/ # ignore
// <foo></foo> tagName=foo, string=/foo # ignore
if (tok.string == tagName) {
cm.replaceSelection('>'); // parity w/html modes
pos = {line: pos.line, ch: pos.ch + 1};
cm.setCursor(pos);
insertEndTag(cm, indent, pos, tagName);
return;
}
} else if (ch == '/') {
// <foo / tagName=foo, string= # ignore
// <foo></ tagName=foo, string=<
if (tok.string == '<') {
completeEndTag(cm, pos, tagName);
return;
}
}
}
throw CodeMirror.Pass; // Bubble if not handled
});
function insertEndTag(cm, indent, pos, tagName) {
if (shouldIndent(cm, indent, tagName)) {
cm.replaceSelection('\n\n</' + tagName + '>', 'end');
cm.indentLine(pos.line + 1);
cm.indentLine(pos.line + 2);
cm.setCursor({line: pos.line + 1, ch: cm.getLine(pos.line + 1).length});
} else {
cm.replaceSelection('</' + tagName + '>');
cm.setCursor(pos);
}
}
function shouldIndent(cm, indent, tagName) {
if (typeof indent == 'undefined' || indent == null || indent == true) {
indent = cm.getOption('closeTagIndent');
}
if (!indent) {
indent = [];
}
return indexOf(indent, tagName.toLowerCase()) != -1;
}
// C&P from codemirror.js...would be nice if this were visible to utilities.
function indexOf(collection, elt) {
if (collection.indexOf) return collection.indexOf(elt);
for (var i = 0, e = collection.length; i < e; ++i)
if (collection[i] == elt) return i;
return -1;
}
function completeEndTag(cm, pos, tagName) {
cm.replaceSelection('/' + tagName + '>');
cm.setCursor({line: pos.line, ch: pos.ch + tagName.length + 2 });
}
})(); | PypiClean |
/Montreal-Forced-Aligner-3.0.0a3.tar.gz/Montreal-Forced-Aligner-3.0.0a3/montreal_forced_aligner/g2p/generator.py | from __future__ import annotations
import csv
import functools
import itertools
import logging
import os
import queue
import statistics
import threading
import time
import typing
from multiprocessing.pool import ThreadPool
from pathlib import Path
from queue import Queue
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union
import pynini
import pywrapfst
from praatio import textgrid
from pynini import Fst, TokenType
from pynini.lib import rewrite
from pywrapfst import SymbolTable
from sqlalchemy.orm import selectinload
from tqdm.rich import tqdm
from montreal_forced_aligner import config
from montreal_forced_aligner.abc import DatabaseMixin, KaldiFunction, TopLevelMfaWorker
from montreal_forced_aligner.corpus.text_corpus import DictionaryTextCorpusMixin, TextCorpusMixin
from montreal_forced_aligner.data import MfaArguments, TextgridFormats, WordType, WorkflowType
from montreal_forced_aligner.db import File, Utterance, Word, bulk_update
from montreal_forced_aligner.exceptions import PyniniGenerationError
from montreal_forced_aligner.g2p.mixins import G2PTopLevelMixin
from montreal_forced_aligner.helper import comma_join, mfa_open, score_g2p
from montreal_forced_aligner.models import G2PModel
from montreal_forced_aligner.textgrid import construct_output_path
from montreal_forced_aligner.utils import run_kaldi_function
if TYPE_CHECKING:
from dataclasses import dataclass
SpeakerCharacterType = Union[str, int]
else:
from dataclassy import dataclass
__all__ = [
"Rewriter",
"RewriterWorker",
"PyniniGenerator",
"PyniniCorpusGenerator",
"PyniniWordListGenerator",
"PyniniValidator",
]
logger = logging.getLogger("mfa")
def threshold_lattice_to_dfa(
lattice: pynini.Fst, threshold: float = 1.0, state_multiplier: int = 2
) -> pynini.Fst:
"""Constructs a (possibly pruned) weighted DFA of output strings.
Given an epsilon-free lattice of output strings (such as produced by
rewrite_lattice), attempts to determinize it, pruning non-optimal paths if
optimal_only is true. This is valid only in a semiring with the path property.
To prevent unexpected blowup during determinization, a state threshold is
also used and a warning is logged if this exact threshold is reached. The
threshold is a multiplier of the size of input lattice (by default, 4), plus
a small constant factor. This is intended by a sensible default and is not an
inherently meaningful value in and of itself.
Parameters
----------
lattice: :class:`~pynini.Fst`
Epsilon-free non-deterministic finite acceptor.
threshold: float
Threshold for weights, 1.0 is optimal only, 0 is for all paths, greater than 1
prunes the lattice to include paths with costs less than the optimal path's score times the threshold
state_multiplier: int
Max ratio for the number of states in the DFA lattice to the NFA lattice; if exceeded, a warning is logged.
Returns
-------
:class:`~pynini.Fst`
Epsilon-free deterministic finite acceptor.
"""
weight_type = lattice.weight_type()
weight_threshold = pynini.Weight(weight_type, threshold)
state_threshold = 256 + state_multiplier * lattice.num_states()
lattice = pynini.determinize(lattice, nstate=state_threshold, weight=weight_threshold)
return lattice
def optimal_rewrites(
string: pynini.FstLike,
rule: pynini.Fst,
input_token_type: Optional[TokenType] = None,
output_token_type: Optional[TokenType] = None,
threshold: float = 1,
) -> List[str]:
"""Returns all optimal rewrites.
Args:
string: Input string or FST.
rule: Input rule WFST.
input_token_type: Optional input token type, or symbol table.
output_token_type: Optional output token type, or symbol table.
threshold: Threshold for weights (1 is optimal only, 0 is for all paths)
Returns:
A tuple of output strings.
"""
lattice = rewrite.rewrite_lattice(string, rule, input_token_type)
lattice = threshold_lattice_to_dfa(lattice, threshold, 4)
return rewrite.lattice_to_strings(lattice, output_token_type)
class Rewriter:
"""
Helper object for rewriting
Parameters
----------
fst: pynini.Fst
G2P FST model
input_token_type: pynini.TokenType
Grapheme symbol table or "utf8"
output_token_type: pynini.SymbolTable
Phone symbol table
num_pronunciations: int
Number of pronunciations, default to 0. If this is 0, thresholding is used
threshold: float
Threshold to use for pruning rewrite lattice, defaults to 1.5, only used if num_pronunciations is 0
"""
def __init__(
self,
fst: Fst,
input_token_type: TokenType,
output_token_type: SymbolTable,
num_pronunciations: int = 0,
threshold: float = 1,
graphemes: Set[str] = None,
):
self.graphemes = graphemes
self.input_token_type = input_token_type
if num_pronunciations > 0:
self.rewrite = functools.partial(
rewrite.top_rewrites,
nshortest=num_pronunciations,
rule=fst,
input_token_type=None,
output_token_type=output_token_type,
)
else:
self.rewrite = functools.partial(
optimal_rewrites,
threshold=threshold,
rule=fst,
input_token_type=None,
output_token_type=output_token_type,
)
def create_word_fst(self, word: str) -> pynini.Fst:
if self.graphemes is not None:
word = "".join([x for x in word if x in self.graphemes])
fst = pynini.accep(word, token_type=self.input_token_type)
return fst
def __call__(self, graphemes: str) -> List[str]: # pragma: no cover
"""Call the rewrite function"""
if " " in graphemes:
words = graphemes.split()
hypotheses = []
for w in words:
w_fst = self.create_word_fst(w)
if not w_fst:
continue
hypotheses.append(self.rewrite(w_fst))
hypotheses = sorted(set(" ".join(x) for x in itertools.product(*hypotheses)))
else:
fst = self.create_word_fst(graphemes)
if not fst:
return []
hypotheses = self.rewrite(fst)
return [x for x in hypotheses if x]
class PhonetisaurusRewriter:
"""
Helper function for rewriting
Parameters
----------
fst: pynini.Fst
G2P FST model
input_token_type: pynini.SymbolTable
Grapheme symbol table
output_token_type: pynini.SymbolTable
num_pronunciations: int
Number of pronunciations, default to 0. If this is 0, thresholding is used
threshold: float
Threshold to use for pruning rewrite lattice, defaults to 1.5, only used if num_pronunciations is 0
grapheme_order: int
Maximum number of graphemes to consider single segment
seq_sep: str
Separator to use between grapheme symbols
"""
def __init__(
self,
fst: Fst,
input_token_type: SymbolTable,
output_token_type: SymbolTable,
num_pronunciations: int = 0,
threshold: float = 1.5,
grapheme_order: int = 2,
seq_sep: str = "|",
graphemes: Set[str] = None,
):
self.fst = fst
self.seq_sep = seq_sep
self.input_token_type = input_token_type
self.output_token_type = output_token_type
self.grapheme_order = grapheme_order
self.graphemes = graphemes
if num_pronunciations > 0:
self.rewrite = functools.partial(
rewrite.top_rewrites,
nshortest=num_pronunciations,
rule=fst,
input_token_type=None,
output_token_type=output_token_type,
)
else:
self.rewrite = functools.partial(
optimal_rewrites,
threshold=threshold,
rule=fst,
input_token_type=None,
output_token_type=output_token_type,
)
def create_word_fst(self, word: str) -> typing.Optional[pynini.Fst]:
if self.graphemes is not None:
word = [x for x in word if x in self.graphemes]
if not word:
return None
fst = pynini.Fst()
one = pywrapfst.Weight.one(fst.weight_type())
max_state = 0
for i in range(len(word)):
start_state = fst.add_state()
for j in range(1, self.grapheme_order + 1):
if i + j <= len(word):
substring = self.seq_sep.join(word[i : i + j])
ilabel = self.input_token_type.find(substring)
if ilabel != pywrapfst.NO_LABEL:
fst.add_arc(start_state, pywrapfst.Arc(ilabel, ilabel, one, i + j))
if i + j >= max_state:
max_state = i + j
for _ in range(fst.num_states(), max_state + 1):
fst.add_state()
fst.set_start(0)
fst.set_final(len(word), one)
fst.set_input_symbols(self.input_token_type)
fst.set_output_symbols(self.input_token_type)
return fst
def __call__(self, graphemes: str) -> List[str]: # pragma: no cover
"""Call the rewrite function"""
if " " in graphemes:
words = graphemes.split()
hypotheses = []
for w in words:
w_fst = self.create_word_fst(w)
if not w_fst:
continue
hypotheses.append(self.rewrite(w_fst))
hypotheses = sorted(set(" ".join(x) for x in itertools.product(*hypotheses)))
else:
fst = self.create_word_fst(graphemes)
if not fst:
return []
hypotheses = self.rewrite(fst)
hypotheses = [x.replace(self.seq_sep, " ") for x in hypotheses if x]
return hypotheses
class RewriterWorker(threading.Thread):
"""
Rewriter process
Parameters
----------
job_queue: :class:`~multiprocessing.Queue`
Queue to pull words from
return_queue: :class:`~multiprocessing.Queue`
Queue to put pronunciations
rewriter: :class:`~montreal_forced_aligner.g2p.generator.Rewriter`
Function to generate pronunciations of words
stopped: :class:`~threading.Event`
Stop check
"""
def __init__(
self,
job_queue: Queue,
return_queue: Queue,
rewriter: Rewriter,
stopped: threading.Event,
):
super().__init__()
self.job_queue = job_queue
self.return_queue = return_queue
self.rewriter = rewriter
self.stopped = stopped
self.finished = threading.Event()
def run(self) -> None:
"""Run the rewriting function"""
while True:
try:
word = self.job_queue.get(timeout=1)
except queue.Empty:
break
if self.stopped.is_set():
continue
try:
rep = self.rewriter(word)
self.return_queue.put((word, rep))
except rewrite.Error:
pass
except Exception as e: # noqa
self.stopped.set()
self.return_queue.put(e)
raise
self.finished.set()
return
@dataclass
class G2PArguments(MfaArguments):
"""
Arguments for :class:`~montreal_forced_aligner.alignment.multiprocessing.CompileTrainGraphsFunction`
Parameters
----------
job_name: int
Integer ID of the job
db_string: str
String for database connections
log_path: :class:`~pathlib.Path`
Path to save logging information during the run
tree_path: :class:`~pathlib.Path`
Path to tree file
model_path: :class:`~pathlib.Path`
Path to model file
use_g2p: bool
Flag for whether acoustic model uses g2p
"""
rewriter: Rewriter
class G2PFunction(KaldiFunction):
def __init__(self, args: G2PArguments):
super().__init__(args)
self.rewriter = args.rewriter
def _run(self):
"""Run the function"""
with mfa_open(self.log_path, "w") as log_file, self.session() as session:
query = (
session.query(Utterance.id, Utterance.normalized_text)
.filter(Utterance.job_id == self.job_name)
.filter(Utterance.normalized_text != "")
)
for id, text in query:
try:
pronunciation_text = self.rewriter(text)[0]
self.callback((id, pronunciation_text))
except pynini.lib.rewrite.Error:
log_file.write(f"Error on generating pronunciation for {text}\n")
def clean_up_word(word: str, graphemes: Set[str]) -> Tuple[str, Set[str]]:
"""
Clean up word by removing graphemes not in a specified set
Parameters
----------
word : str
Input string
graphemes: set[str]
Set of allowable graphemes
Returns
-------
str
Cleaned up word
Set[str]
Graphemes excluded
"""
new_word = []
missing_graphemes = set()
for c in word:
if c not in graphemes:
missing_graphemes.add(c)
else:
new_word.append(c)
return "".join(new_word), missing_graphemes
class OrthographyGenerator(G2PTopLevelMixin):
"""
Abstract mixin class for generating "pronunciations" based off the orthographic word
See Also
--------
:class:`~montreal_forced_aligner.g2p.mixins.G2PTopLevelMixin`
For top level G2P generation parameters
"""
def generate_pronunciations(self) -> Dict[str, List[str]]:
"""
Generate pronunciations for the word set
Returns
-------
dict[str, Word]
Mapping of words to their "pronunciation"
"""
pronunciations = {}
for word in self.words_to_g2p:
pronunciations[word] = [" ".join(word)]
return pronunciations
class PyniniGenerator(G2PTopLevelMixin):
"""
Class for generating pronunciations from a Pynini G2P model
Parameters
----------
g2p_model_path: str
Path to G2P model
strict_graphemes: bool
Flag for whether to be strict with missing graphemes and skip words containing new graphemes
See Also
--------
:class:`~montreal_forced_aligner.g2p.mixins.G2PTopLevelMixin`
For top level G2P generation parameters
Attributes
----------
g2p_model: G2PModel
G2P model
"""
def __init__(self, g2p_model_path: Path = None, strict_graphemes: bool = False, **kwargs):
self.strict_graphemes = strict_graphemes
super().__init__(**kwargs)
self.g2p_model = G2PModel(
g2p_model_path, root_directory=getattr(self, "workflow_directory", None)
)
self.output_token_type = "utf8"
self.input_token_type = "utf8"
self.rewriter = None
def setup(self):
self.fst = pynini.Fst.read(self.g2p_model.fst_path)
if self.g2p_model.meta["architecture"] == "phonetisaurus":
self.output_token_type = pynini.SymbolTable.read_text(self.g2p_model.sym_path)
self.input_token_type = pynini.SymbolTable.read_text(self.g2p_model.grapheme_sym_path)
self.fst.set_input_symbols(self.input_token_type)
self.fst.set_output_symbols(self.output_token_type)
self.rewriter = PhonetisaurusRewriter(
self.fst,
self.input_token_type,
self.output_token_type,
num_pronunciations=self.num_pronunciations,
threshold=self.g2p_threshold,
grapheme_order=self.g2p_model.meta["grapheme_order"],
graphemes=self.g2p_model.meta["graphemes"],
)
else:
if self.g2p_model.sym_path is not None and os.path.exists(self.g2p_model.sym_path):
self.output_token_type = pynini.SymbolTable.read_text(self.g2p_model.sym_path)
self.rewriter = Rewriter(
self.fst,
self.input_token_type,
self.output_token_type,
num_pronunciations=self.num_pronunciations,
threshold=self.g2p_threshold,
graphemes=self.g2p_model.meta["graphemes"],
)
def generate_pronunciations(self) -> Dict[str, List[str]]:
"""
Generate pronunciations
Returns
-------
dict[str, list[str]]
Mappings of keys to their generated pronunciations
"""
num_words = len(self.words_to_g2p)
begin = time.time()
missing_graphemes = set()
if self.rewriter is None:
self.setup()
logger.info("Generating pronunciations...")
to_return = {}
skipped_words = 0
if num_words < 30 or config.NUM_JOBS == 1:
with tqdm(total=num_words, disable=config.QUIET) as pbar:
for word in self.words_to_g2p:
w, m = clean_up_word(word, self.g2p_model.meta["graphemes"])
pbar.update(1)
missing_graphemes = missing_graphemes | m
if self.strict_graphemes and m:
skipped_words += 1
continue
if not w:
skipped_words += 1
continue
try:
prons = self.rewriter(w)
except rewrite.Error:
continue
to_return[word] = prons
logger.debug(
f"Skipping {skipped_words} words for containing the following graphemes: "
f"{comma_join(sorted(missing_graphemes))}"
)
else:
stopped = threading.Event()
job_queue = Queue()
for word in self.words_to_g2p:
w, m = clean_up_word(word, self.g2p_model.meta["graphemes"])
missing_graphemes = missing_graphemes | m
if self.strict_graphemes and m:
skipped_words += 1
continue
if not w:
skipped_words += 1
continue
job_queue.put(w)
logger.debug(
f"Skipping {skipped_words} words for containing the following graphemes: "
f"{comma_join(sorted(missing_graphemes))}"
)
error_dict = {}
return_queue = Queue()
procs = []
for _ in range(config.NUM_JOBS):
p = RewriterWorker(
job_queue,
return_queue,
self.rewriter,
stopped,
)
procs.append(p)
p.start()
num_words -= skipped_words
with tqdm(total=num_words, disable=config.QUIET) as pbar:
while True:
try:
word, result = return_queue.get(timeout=1)
if stopped.is_set():
continue
except queue.Empty:
for proc in procs:
if not proc.finished.is_set():
break
else:
break
continue
pbar.update(1)
if isinstance(result, Exception):
error_dict[word] = result
continue
to_return[word] = result
for p in procs:
p.join()
if error_dict:
raise PyniniGenerationError(error_dict)
logger.debug(f"Processed {num_words} in {time.time() - begin:.3f} seconds")
return to_return
class PyniniConsoleGenerator(PyniniGenerator):
@property
def data_directory(self) -> Path:
return Path("-")
@property
def working_directory(self) -> Path:
return config.TEMPORARY_DIRECTORY.joinpath("g2p_stdin")
def cleanup(self) -> None:
pass
class PyniniValidator(PyniniGenerator, TopLevelMfaWorker):
"""
Class for running validation for G2P model training
Parameters
----------
word_list: list[str]
List of words to generate pronunciations
See Also
--------
:class:`~montreal_forced_aligner.g2p.generator.PyniniGenerator`
For parameters to generate pronunciations
"""
def __init__(self, word_list: List[str] = None, **kwargs):
super().__init__(**kwargs)
if word_list is None:
word_list = []
self.word_list = word_list
@property
def words_to_g2p(self) -> List[str]:
"""Words to produce pronunciations"""
return self.word_list
@property
def data_source_identifier(self) -> str:
"""Dummy "validation" data source"""
return "validation"
@property
def data_directory(self) -> Path:
"""Data directory"""
return self.working_directory
@property
def evaluation_csv_path(self) -> Path:
"""Path to working directory's CSV file"""
return self.working_directory.joinpath("pronunciation_evaluation.csv")
def setup(self) -> None:
"""Set up the G2P validator"""
TopLevelMfaWorker.setup(self)
if self.initialized:
return
self._current_workflow = "validation"
os.makedirs(self.working_log_directory, exist_ok=True)
self.g2p_model.validate(self.words_to_g2p)
PyniniGenerator.setup(self)
self.initialized = True
self.wer = None
self.ler = None
def compute_validation_errors(
self,
gold_values: Dict[str, Set[str]],
hypothesis_values: Dict[str, List[str]],
):
"""
Computes validation errors
Parameters
----------
gold_values: dict[str, set[str]]
Gold pronunciations
hypothesis_values: dict[str, list[str]]
Hypothesis pronunciations
"""
begin = time.time()
# Word-level measures.
correct = 0
incorrect = 0
# Label-level measures.
total_edits = 0
total_length = 0
# Since the edit distance algorithm is quadratic, let's do this with
# multiprocessing.
logger.debug(f"Processing results for {len(hypothesis_values)} hypotheses")
to_comp = []
indices = []
hyp_pron_count = 0
gold_pron_count = 0
output = []
for word, gold_pronunciations in gold_values.items():
if word not in hypothesis_values:
incorrect += 1
gold_length = statistics.mean(len(x.split()) for x in gold_pronunciations)
total_edits += gold_length
total_length += gold_length
output.append(
{
"Word": word,
"Gold pronunciations": ", ".join(gold_pronunciations),
"Hypothesis pronunciations": "",
"Accuracy": 0,
"Error rate": 1.0,
"Length": gold_length,
}
)
continue
hyp = hypothesis_values[word]
if not isinstance(hyp, list):
hyp = [hyp]
for h in hyp:
if h in gold_pronunciations:
correct += 1
total_length += len(h)
output.append(
{
"Word": word,
"Gold pronunciations": ", ".join(gold_pronunciations),
"Hypothesis pronunciations": ", ".join(hyp),
"Accuracy": 1,
"Error rate": 0.0,
"Length": len(h),
}
)
break
else:
incorrect += 1
indices.append(word)
to_comp.append((gold_pronunciations, hyp)) # Multiple hypotheses to compare
logger.debug(
f"For the word {word}: gold is {gold_pronunciations}, hypothesized are: {hyp}"
)
hyp_pron_count += len(hyp)
gold_pron_count += len(gold_pronunciations)
logger.debug(
f"Generated an average of {hyp_pron_count /len(hypothesis_values)} variants "
f"The gold set had an average of {gold_pron_count/len(hypothesis_values)} variants."
)
with ThreadPool(config.NUM_JOBS) as pool:
gen = pool.starmap(score_g2p, to_comp)
for i, (edits, length) in enumerate(gen):
word = indices[i]
gold_pronunciations = gold_values[word]
hyp = hypothesis_values[word]
output.append(
{
"Word": word,
"Gold pronunciations": ", ".join(gold_pronunciations),
"Hypothesis pronunciations": ", ".join(hyp),
"Accuracy": 1,
"Error rate": edits / length,
"Length": length,
}
)
total_edits += edits
total_length += length
with mfa_open(self.evaluation_csv_path, "w") as f:
writer = csv.DictWriter(
f,
fieldnames=[
"Word",
"Gold pronunciations",
"Hypothesis pronunciations",
"Accuracy",
"Error rate",
"Length",
],
)
writer.writeheader()
for line in output:
writer.writerow(line)
self.wer = 100 * incorrect / (correct + incorrect)
self.ler = 100 * total_edits / total_length
logger.info(f"WER:\t{self.wer:.2f}")
logger.info(f"LER:\t{self.ler:.2f}")
logger.debug(
f"Computation of errors for {len(gold_values)} words took {time.time() - begin:.3f} seconds"
)
def evaluate_g2p_model(self, gold_pronunciations: Dict[str, Set[str]]) -> None:
"""
Evaluate a G2P model on the word list
Parameters
----------
gold_pronunciations: dict[str, set[str]]
Gold pronunciations
"""
output = self.generate_pronunciations()
self.compute_validation_errors(gold_pronunciations, output)
class PyniniWordListGenerator(PyniniValidator, DatabaseMixin):
"""
Top-level worker for generating pronunciations from a word list and a Pynini G2P model
Parameters
----------
word_list_path: :class:`~pathlib.Path`
Path to word list file
See Also
--------
:class:`~montreal_forced_aligner.g2p.generator.PyniniGenerator`
For Pynini G2P generation parameters
:class:`~montreal_forced_aligner.abc.TopLevelMfaWorker`
For top-level parameters
Attributes
----------
word_list: list[str]
Word list to generate pronunciations
"""
def __init__(self, word_list_path: Path, **kwargs):
self.word_list_path = word_list_path
super().__init__(**kwargs)
@property
def data_directory(self) -> Path:
"""Data directory"""
return self.working_directory
@property
def data_source_identifier(self) -> str:
"""Name of the word list file"""
return os.path.splitext(os.path.basename(self.word_list_path))[0]
def setup(self) -> None:
"""Set up the G2P generator"""
if self.initialized:
return
with mfa_open(self.word_list_path, "r") as f:
for line in f:
self.word_list.extend(line.strip().split())
if not self.include_bracketed:
self.word_list = [x for x in self.word_list if not self.check_bracketed(x)]
super().setup()
self.g2p_model.validate(self.words_to_g2p)
self.initialized = True
class PyniniCorpusGenerator(PyniniGenerator, TextCorpusMixin, TopLevelMfaWorker):
"""
Top-level worker for generating pronunciations from a corpus and a Pynini G2P model
See Also
--------
:class:`~montreal_forced_aligner.g2p.generator.PyniniGenerator`
For Pynini G2P generation parameters
:class:`~montreal_forced_aligner.corpus.text_corpus.TextCorpusMixin`
For corpus parsing parameters
:class:`~montreal_forced_aligner.abc.TopLevelMfaWorker`
For top-level parameters
"""
def __init__(self, per_utterance: bool = False, **kwargs):
super().__init__(**kwargs)
self.per_utterance = per_utterance
def setup(self) -> None:
"""Set up the pronunciation generator"""
if self.initialized:
return
self._load_corpus()
self.initialize_jobs()
super().setup()
self._create_dummy_dictionary()
self.normalize_text()
self.create_new_current_workflow(WorkflowType.g2p)
self.g2p_model.validate(self.words_to_g2p)
self.initialized = True
def g2p_arguments(self) -> List[G2PArguments]:
return [
G2PArguments(
j.id,
getattr(self, "session", ""),
self.working_log_directory.joinpath(f"g2p_utterances.{j.id}.log"),
self.rewriter,
)
for j in self.jobs
]
def export_file_pronunciations(self, output_file_path: Path):
"""
Generate and export per-utterance G2P
Parameters
----------
output_file_path: :class:`~pathlib.Path`
Output directory to save utterance pronunciations
"""
output_file_path.mkdir(parents=True, exist_ok=True)
if self.num_pronunciations != 1:
logger.warning(
"Number of pronunciations is hard-coded to 1 for generating per-utterance pronunciations"
)
self.num_pronunciations = 1
begin = time.time()
if self.rewriter is None:
self.setup()
logger.info("Generating pronunciations...")
with tqdm(total=self.num_utterances, disable=config.QUIET) as pbar:
update_mapping = []
for utt_id, pronunciation in run_kaldi_function(
G2PFunction, self.g2p_arguments(), pbar.update
):
update_mapping.append({"id": utt_id, "transcription_text": pronunciation})
with self.session() as session:
bulk_update(session, Utterance, update_mapping)
session.commit()
logger.debug(f"Processed {self.num_utterances} in {time.time() - begin:.3f} seconds")
logger.info("Exporting files...")
with self.session() as session:
files = session.query(File).options(
selectinload(File.utterances), selectinload(File.speakers)
)
for file in files:
utterance_count = len(file.utterances)
if file.sound_file is not None:
duration = file.sound_file.duration
else:
duration = file.utterances[-1].end
if utterance_count == 0:
logger.debug(f"Could not find any utterances for {file.name}")
elif (
utterance_count == 1
and file.utterances[0].begin == 0
and file.utterances[0].end == duration
):
output_format = "lab"
else:
output_format = TextgridFormats.SHORT_TEXTGRID
output_path = construct_output_path(
file.name,
file.relative_path,
output_file_path,
output_format=output_format,
)
data = file.construct_transcription_tiers()
if output_format == "lab":
for intervals in data.values():
with mfa_open(output_path, "w") as f:
f.write(intervals["transcription"][0].label)
else:
tg = textgrid.Textgrid()
tg.minTimestamp = 0
tg.maxTimestamp = round(duration, 5)
for speaker in file.speakers:
speaker = speaker.name
intervals = data[speaker]["transcription"]
tier = textgrid.IntervalTier(
speaker,
[x.to_tg_interval() for x in intervals],
minT=0,
maxT=round(duration, 5),
)
tg.addTier(tier)
tg.save(output_path, includeBlankSpaces=True, format=output_format)
@property
def words_to_g2p(self) -> List[str]:
"""Words to produce pronunciations"""
word_list = self.corpus_word_set
if not self.include_bracketed:
word_list = [x for x in word_list if not self.check_bracketed(x)]
return word_list
def export_pronunciations(self, output_file_path: typing.Union[str, Path]) -> None:
if self.per_utterance:
self.export_file_pronunciations(output_file_path)
else:
super().export_pronunciations(output_file_path)
class PyniniDictionaryCorpusGenerator(
PyniniGenerator, DictionaryTextCorpusMixin, TopLevelMfaWorker
):
"""
Top-level worker for generating pronunciations from a corpus and a Pynini G2P model
See Also
--------
:class:`~montreal_forced_aligner.g2p.generator.PyniniGenerator`
For Pynini G2P generation parameters
:class:`~montreal_forced_aligner.corpus.text_corpus.TextCorpusMixin`
For corpus parsing parameters
:class:`~montreal_forced_aligner.abc.TopLevelMfaWorker`
For top-level parameters
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._word_list = None
def setup(self) -> None:
"""Set up the pronunciation generator"""
if self.initialized:
return
self.load_corpus()
super().setup()
self.g2p_model.validate(self.words_to_g2p)
self.initialized = True
@property
def words_to_g2p(self) -> List[str]:
"""Words to produce pronunciations"""
if self._word_list is None:
with self.session() as session:
query = (
session.query(Word.word)
.filter(Word.word_type == WordType.oov, Word.word != self.oov_word)
.order_by(Word.word)
)
self._word_list = [x for x, in query]
return self._word_list | PypiClean |
/GradAttack-0.1.2.tar.gz/GradAttack-0.1.2/gradattack/models/covidmodel.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class PEXP(nn.Module):
def __init__(self, n_input, n_out):
super(PEXP, self).__init__()
"""
• First-stage Projection: 1×1 convolutions for projecting input features to a lower dimension,
• Expansion: 1×1 convolutions for expanding features
to a higher dimension that is different than that of the
input features,
• Depth-wise Representation: efficient 3×3 depthwise convolutions for learning spatial characteristics to
minimize computational complexity while preserving
representational capacity,
• Second-stage Projection: 1×1 convolutions for projecting features back to a lower dimension, and
• Extension: 1×1 convolutions that finally extend channel dimensionality to a higher dimension to produce
the final features.
"""
self.network = nn.Sequential(
nn.Conv2d(in_channels=n_input,
out_channels=n_input // 2,
kernel_size=1),
nn.Conv2d(
in_channels=n_input // 2,
out_channels=int(3 * n_input / 4),
kernel_size=1,
),
nn.Conv2d(
in_channels=int(3 * n_input / 4),
out_channels=int(3 * n_input / 4),
kernel_size=3,
groups=int(3 * n_input / 4),
padding=1,
),
nn.Conv2d(
in_channels=int(3 * n_input / 4),
out_channels=n_input // 2,
kernel_size=1,
),
nn.Conv2d(in_channels=n_input // 2,
out_channels=n_out,
kernel_size=1),
)
def forward(self, x):
return self.network(x)
class CovidNet(nn.Module):
def __init__(self, model="small", num_classes=3):
super(CovidNet, self).__init__()
filters = {
"pexp1_1": [64, 256],
"pexp1_2": [256, 256],
"pexp1_3": [256, 256],
"pexp2_1": [256, 512],
"pexp2_2": [512, 512],
"pexp2_3": [512, 512],
"pexp2_4": [512, 512],
"pexp3_1": [512, 1024],
"pexp3_2": [1024, 1024],
"pexp3_3": [1024, 1024],
"pexp3_4": [1024, 1024],
"pexp3_5": [1024, 1024],
"pexp3_6": [1024, 1024],
"pexp4_1": [1024, 2048],
"pexp4_2": [2048, 2048],
"pexp4_3": [2048, 2048],
}
self.add_module(
"conv1",
nn.Conv2d(in_channels=3,
out_channels=64,
kernel_size=7,
stride=2,
padding=3),
)
for key in filters:
if "pool" in key:
self.add_module(key,
nn.MaxPool2d(filters[key][0], filters[key][1]))
else:
self.add_module(key, PEXP(filters[key][0], filters[key][1]))
if model == "large":
self.add_module(
"conv1_1x1",
nn.Conv2d(in_channels=64, out_channels=256, kernel_size=1))
self.add_module(
"conv2_1x1",
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=1))
self.add_module(
"conv3_1x1",
nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=1),
)
self.add_module(
"conv4_1x1",
nn.Conv2d(in_channels=1024, out_channels=2048, kernel_size=1),
)
self.__forward__ = self.forward_large_net
else:
self.__forward__ = self.forward_small_net
self.add_module("flatten", Flatten())
self.add_module("fc1", nn.Linear(7 * 7 * 2048, 1024))
self.add_module("fc2", nn.Linear(1024, 256))
self.add_module("classifier", nn.Linear(256, num_classes))
def forward(self, x):
return self.__forward__(x)
def forward_large_net(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
out_conv1_1x1 = self.conv1_1x1(x)
pepx11 = self.pexp1_1(x)
pepx12 = self.pexp1_2(pepx11 + out_conv1_1x1)
pepx13 = self.pexp1_3(pepx12 + pepx11 + out_conv1_1x1)
out_conv2_1x1 = F.max_pool2d(
self.conv2_1x1(pepx12 + pepx11 + pepx13 + out_conv1_1x1), 2)
pepx21 = self.pexp2_1(
F.max_pool2d(pepx13, 2) + F.max_pool2d(pepx11, 2) +
F.max_pool2d(pepx12, 2) + F.max_pool2d(out_conv1_1x1, 2))
pepx22 = self.pexp2_2(pepx21 + out_conv2_1x1)
pepx23 = self.pexp2_3(pepx22 + pepx21 + out_conv2_1x1)
pepx24 = self.pexp2_4(pepx23 + pepx21 + pepx22 + out_conv2_1x1)
out_conv3_1x1 = F.max_pool2d(
self.conv3_1x1(pepx22 + pepx21 + pepx23 + pepx24 + out_conv2_1x1),
2)
pepx31 = self.pexp3_1(
F.max_pool2d(pepx24, 2) + F.max_pool2d(pepx21, 2) +
F.max_pool2d(pepx22, 2) + F.max_pool2d(pepx23, 2) +
F.max_pool2d(out_conv2_1x1, 2))
pepx32 = self.pexp3_2(pepx31 + out_conv3_1x1)
pepx33 = self.pexp3_3(pepx31 + pepx32 + out_conv3_1x1)
pepx34 = self.pexp3_4(pepx31 + pepx32 + pepx33 + out_conv3_1x1)
pepx35 = self.pexp3_5(pepx31 + pepx32 + pepx33 + pepx34 +
out_conv3_1x1)
pepx36 = self.pexp3_6(pepx31 + pepx32 + pepx33 + pepx34 + pepx35 +
out_conv3_1x1)
out_conv4_1x1 = F.max_pool2d(
self.conv4_1x1(pepx31 + pepx32 + pepx33 + pepx34 + pepx35 +
pepx36 + out_conv3_1x1),
2,
)
pepx41 = self.pexp4_1(
F.max_pool2d(pepx31, 2) + F.max_pool2d(pepx32, 2) +
F.max_pool2d(pepx32, 2) + F.max_pool2d(pepx34, 2) +
F.max_pool2d(pepx35, 2) + F.max_pool2d(pepx36, 2) +
F.max_pool2d(out_conv3_1x1, 2))
pepx42 = self.pexp4_2(pepx41 + out_conv4_1x1)
pepx43 = self.pexp4_3(pepx41 + pepx42 + out_conv4_1x1)
flattened = self.flatten(pepx41 + pepx42 + pepx43 + out_conv4_1x1)
fc1out = F.relu(self.fc1(flattened))
fc2out = F.relu(self.fc2(fc1out))
logits = self.classifier(fc2out)
return logits
def forward_small_net(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
pepx11 = self.pexp1_1(x)
pepx12 = self.pexp1_2(pepx11)
pepx13 = self.pexp1_3(pepx12 + pepx11)
pepx21 = self.pexp2_1(
F.max_pool2d(pepx13, 2) + F.max_pool2d(pepx11, 2) +
F.max_pool2d(pepx12, 2))
pepx22 = self.pexp2_2(pepx21)
pepx23 = self.pexp2_3(pepx22 + pepx21)
pepx24 = self.pexp2_4(pepx23 + pepx21 + pepx22)
pepx31 = self.pexp3_1(
F.max_pool2d(pepx24, 2) + F.max_pool2d(pepx21, 2) +
F.max_pool2d(pepx22, 2) + F.max_pool2d(pepx23, 2))
pepx32 = self.pexp3_2(pepx31)
pepx33 = self.pexp3_3(pepx31 + pepx32)
pepx34 = self.pexp3_4(pepx31 + pepx32 + pepx33)
pepx35 = self.pexp3_5(pepx31 + pepx32 + pepx33 + pepx34)
pepx36 = self.pexp3_6(pepx31 + pepx32 + pepx33 + pepx34 + pepx35)
pepx41 = self.pexp4_1(
F.max_pool2d(pepx31, 2) + F.max_pool2d(pepx32, 2) +
F.max_pool2d(pepx32, 2) + F.max_pool2d(pepx34, 2) +
F.max_pool2d(pepx35, 2) + F.max_pool2d(pepx36, 2))
pepx42 = self.pexp4_2(pepx41)
pepx43 = self.pexp4_3(pepx41 + pepx42)
flattened = self.flatten(pepx41 + pepx42 + pepx43)
fc1out = F.relu(self.fc1(flattened))
fc2out = F.relu(self.fc2(fc1out))
logits = self.classifier(fc2out)
return logits
def COVIDNet(num_classes=3):
return CovidNet()
def ResNet18_COVID(num_classes=3):
net = models.resnet18(pretrained=True)
net.fc = nn.Linear(512, num_classes)
return net | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/ats/model/url_request.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
def lazy_import():
from MergePythonSDK.ats.model.url_type_enum import UrlTypeEnum
globals()['UrlTypeEnum'] = UrlTypeEnum
class UrlRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('value',): {
'max_length': 2000,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
defined_types = {
'value': (str, none_type, none_type,), # noqa: E501
'url_type': (UrlTypeEnum, str, none_type,),
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'value': 'value', # noqa: E501
'url_type': 'url_type', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""UrlRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
value (str, none_type): The site's url.. [optional] # noqa: E501
url_type (bool, dict, float, int, list, str, none_type): The type of site.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = kwargs.get("value", None)
self.url_type = kwargs.get("url_type", None)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""UrlRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
value (str, none_type): The site's url.. [optional] # noqa: E501
url_type (bool, dict, float, int, list, str, none_type): The type of site.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value: Union[str, none_type] = kwargs.get("value", None)
self.url_type: Union[bool, dict, float, int, list, str, none_type] = kwargs.get("url_type", None) | PypiClean |
/BiblioPixel-3.4.46.tar.gz/BiblioPixel-3.4.46/bibliopixel/util/deprecated.py | import os, sys
CHOICES = 'ignore', 'fail', 'warn', 'warn_once'
DEFAULT = 'warn_once'
ACTION = None
HELP = """
Specify what to do when a project uses deprecated features:
ignore: do nothing
warn: print warning messages for each feature
warn_once: print a warning message, but only once for each type of feature
fail: throw an exception
"""
DEPRECATED = set()
FLAG = '--deprecated'
V4_FLAG = '--v4'
ENVIRONMENT_VARIABLE = 'BP_DEPRECATED'
V4_HELP = """\
Run BiblioPixel in v4 compatibility mode, to see if it will work with
future releases v4.x
"""
def add_arguments(parser):
parser.add_argument(V4_FLAG, action='store_true', help=V4_HELP)
def allowed():
_compute_action()
return ACTION != 'fail'
def deprecated(msg, *args, **kwds):
_compute_action()
if ACTION == 'ignore':
return
if ACTION == 'warn_once' and msg in DEPRECATED:
return
formatted = msg.format(*args, **kwds)
if ACTION == 'fail':
raise ValueError(formatted)
DEPRECATED.add(msg)
from . import log
log.warning(formatted)
def _compute_action():
global ACTION
if ACTION:
return
if FLAG in sys.argv:
raise ValueError('%s needs an argument (one of %s)' %
(FLAG, ', '.join(CHOICES)))
if V4_FLAG in sys.argv:
ACTION = 'fail'
d = [i for i, v in enumerate(sys.argv) if v.startswith(FLAG + '=')]
if len(d) > 1:
raise ValueError('Only one %s argument can be used' % FLAG)
if not d:
ACTION = os.getenv(ENVIRONMENT_VARIABLE, ACTION or DEFAULT)
else:
arg = sys.argv.pop(d[0])
_, *rest = arg.split('=')
if len(rest) > 1:
raise ValueError('Extra = in flag %s' % arg)
if not (rest and rest[0].strip()):
raise ValueError('%s needs an argument (one of %s)' %
(FLAG, ', '.join(CHOICES)))
ACTION = rest[0]
if ACTION not in CHOICES:
ACTION = None
raise ValueError('Unknown deprecation value (must be one of %s)' %
', '.join(CHOICES)) | PypiClean |
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/docs/gw/graphite.rst | .. _graphite:
Graphite
========
You can export statistics to a ``Graphite`` server (time series server).
The connection should be defined in the Glances configuration file as
following:
.. code-block:: ini
[graphite]
host=localhost
port=2003
# Prefix will be added for all measurement name
# Ex: prefix=foo
# => foo.cpu
# => foo.mem
# You can also use dynamic values
#prefix=`hostname`
prefix=glances
and run Glances with:
.. code-block:: console
$ glances --export graphite
Note 1: the port defines the TCP port where the Graphite listen plain-text requests.
Note 2: As many time-series database, only integer and float are supported in the Graphite datamodel.
Note 3: Under the wood, Glances uses GraphiteSender Python lib (https://github.com/NicoAdrian/graphitesender).
| PypiClean |
/Cohen-0.7.4.tar.gz/Cohen-0.7.4/coherence/backends/lolcats_storage.py |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Benjamin Kampmann <[email protected]>
"""
This is a Media Backend that allows you to access the cool and cute pictures
from lolcats.com. This is mainly meant as a Sample Media Backend to learn how to
write a Media Backend.
So. You are still reading which allows me to assume that you want to learn how
to write a Media Backend for Coherence. NICE :) .
Once again: This is a SIMPLE Media Backend. It does not contain any big
requests, searches or even transcoding. The only thing we want to do in this
simple example, is to fetch a rss link on startup, parse it, save it and restart
the process one hour later again. Well, on top of this, we also want to provide
these informations as a Media Server in the UPnP/DLNA Network of course ;) .
Wow. You are still reading. You must be really interessted. Then let's go.
"""
########## NOTE:
# Please don't complain about the coding style of this document - I know. It is
# just this way to make it easier to document and to understand.
########## The imports
# The entry point for each kind of Backend is a 'BackendStore'. The BackendStore
# is the instance that does everything Usually. In this Example it can be
# understood as the 'Server', the object retrieving and serving the data.
from coherence.backend import BackendStore
# The data itself is stored in BackendItems. They are also the first things we
# are going to create.
from coherence.backend import BackendItem
# To make the data 'renderable' we need to define the DIDLite-Class of the Media
# we are providing. For that we have a bunch of helpers that we also want to
# import
from coherence.upnp.core import DIDLLite
# Coherence relies on the Twisted backend. I hope you are familar with the
# concept of deferreds. If not please read:
# http://twistedmatrix.com/projects/core/documentation/howto/async.html
#
# It is a basic concept that you need to understand to understand the following
# code. But why am I talking about it? Oh, right, because we use a http-client
# based on the twisted.web.client module to do our requests.
from coherence.upnp.core.utils import getPage
# And we also import the reactor, that allows us to specify an action to happen
# later
from twisted.internet import reactor
# And to parse the RSS-Data (which is XML), we use lxml.etree.fromstring
from lxml.etree import fromstring
########## The models
# After the download and parsing of the data is done, we want to save it. In
# this case, we want to fetch the images and store their URL and the title of
# the image. That is the LolcatsImage class:
class LolcatsImage(BackendItem):
# We inherit from BackendItem as it already contains a lot of helper methods
# and implementations. For this simple example, we only have to fill the
# item with data.
def __init__(self, parent_id, id, title, url):
BackendItem.__init__(self)
self.parentid = parent_id # used to be able to 'go back'
self.update_id = 0
self.id = id # each item has its own and unique id
self.location = url # the url of the picture
self.name = title # the title of the picture. Inside
# coherence this is called 'name'
# Item.item is a special thing. This is used to explain the client what
# kind of data this is. For e.g. A VideoItem or a MusicTrack. In our
# case, we have an image.
self.item = DIDLLite.ImageItem(id, parent_id, self.name)
# each Item.item has to have one or more Resource objects
# these hold detailed information about the media data
# and can represent variants of it (different sizes, transcoded formats)
res = DIDLLite.Resource(self.location, 'http-get:*:image/jpeg:*')
res.size = None # FIXME: we should have a size here
# and a resolution entry would be nice too
self.item.res.append(res)
class LolcatsContainer(BackendItem):
# The LolcatsContainer will hold the reference to all our LolcatsImages. This
# kind of BackenedItem is a bit different from the normal BackendItem,
# because it has 'children' (the lolcatsimages). Because of that we have
# some more stuff to do in here.
def __init__(self, parent_id, id):
BackendItem.__init__(self)
# the ids as above
self.parent_id = parent_id
self.id = id
# we never have a different name anyway
self.name = 'LOLCats'
# but we need to set it to a certain mimetype to explain it, that we
# contain 'children'.
self.mimetype = 'directory'
# As we are updating our data periodically, we increase this value so
# that our clients can check easier if something has changed since their
# last request.
self.update_id = 0
# that is where we hold the children
self.children = []
# and we need to give a DIDLLite again. This time we want to be
# understood as 'Container'.
self.item = DIDLLite.Container(id, parent_id, self.name)
self.item.childCount = None # will be set as soon as we have images
def get_children(self, start=0, end=0):
# This is the only important implementation thing: we have to return our
# list of children
if end != 0:
return self.children[start:end]
return self.children[start:]
# there is nothing special in here
# FIXME: move it to a base BackendContainer class
def get_child_count(self):
return len(self.children)
def get_item(self):
return self.item
def get_name(self):
return self.name
def get_id(self):
return self.id
########## The server
# As already said before the implementation of the server is done in an
# inheritance of a BackendStore. This is where the real code happens (usually).
# In our case this would be: downloading the page, parsing the content, saving
# it in the models and returning them on request.
class LolcatsStore(BackendStore):
# this *must* be set. Because the (most used) MediaServer Coherence also
# allows other kind of Backends (like remote lights).
implements = ['MediaServer']
# this is only for this implementation: the http link to the lolcats rss
# feed that we want to read and parse:
rss_url = "http://feeds.feedburner.com/ICanHasCheezburger?format=xml"
# as we are going to build a (very small) tree with the items, we need to
# define the first (the root) item:
ROOT_ID = 0
def __init__(self, server, *args, **kwargs):
# first we inizialize our heritage
BackendStore.__init__(self, server, **kwargs)
# When a Backend is initialized, the configuration is given as keyword
# arguments to the initialization. We receive it here as a dicitonary
# and allow some values to be set:
# the name of the MediaServer as it appears in the network
self.name = kwargs.get('name', 'Lolcats')
# timeout between updates in hours:
self.refresh = int(kwargs.get('refresh', 1)) * (60 * 60)
# the UPnP device that's hosting that backend, that's already done
# in the BackendStore.__init__, just left here the sake of completeness
self.server = server
# internally used to have a new id for each item
self.next_id = 1000
# we store the last update from the rss feed so that we know if we have
# to parse again, or not:
self.last_updated = None
# initialize our lolcats container (no parent, this is the root)
self.container = LolcatsContainer(None, self.ROOT_ID)
# but as we also have to return them on 'get_by_id', we have our local
# store of images per id:
self.images = {}
# we tell that if an XBox sends a request for images we'll
# map the WMC id of that request to our local one
self.wmc_mapping = {'16': 0}
# and trigger an update of the data
dfr = self.update_data()
# So, even though the initialize is kind of done, Coherence does not yet
# announce our Media Server.
# Coherence does wait for signal send by us that we are ready now.
# And we don't want that to happen as long as we don't have succeeded
# in fetching some first data, so we delay this signaling after the update is done:
dfr.addCallback(self.init_completed)
dfr.addCallback(self.queue_update)
def get_by_id(self, id):
print "asked for", id, type(id)
# what ever we are asked for, we want to return the container only
if isinstance(id, basestring):
id = id.split('@', 1)
id = id[0]
if int(id) == self.ROOT_ID:
return self.container
return self.images.get(int(id), None)
def upnp_init(self):
# after the signal was triggered, this method is called by coherence and
# from now on self.server is existing and we can do
# the necessary setup here
# that allows us to specify our server options in more detail.
# here we define what kind of media content we do provide
# mostly needed to make some naughty DLNA devices behave
# will probably move into Coherence internals one day
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo', [
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_TN;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_SM;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_MED;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_LRG;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
'http-get:*:image/jpeg:*'])
# and as it was done after we fetched the data the first time
# we want to take care about the server wide updates as well
self._update_container()
def _update_container(self, result=None):
# we need to inform Coherence about these changes
# again this is something that will probably move
# into Coherence internals one day
if self.server:
self.server.content_directory_server.set_variable(0,
'SystemUpdateID', self.update_id)
value = (self.ROOT_ID, self.container.update_id)
self.server.content_directory_server.set_variable(0,
'ContainerUpdateIDs', value)
return result
def update_loop(self):
# in the loop we want to call update_data
dfr = self.update_data()
# aftert it was done we want to take care about updating
# the container
dfr.addCallback(self._update_container)
# in ANY case queue an update of the data
dfr.addBoth(self.queue_update)
def update_data(self):
# trigger an update of the data
# fetch the rss
dfr = getPage(self.rss_url)
# push it through our xml parser
dfr.addCallback(fromstring)
# then parse the data into our models
dfr.addCallback(self.parse_data)
return dfr
def parse_data(self, root):
# from there, we look for the newest update and compare it with the one
# we have saved. If they are the same, we don't need to go on:
pub_date = root.find('./channel/lastBuildDate').text
if pub_date == self.last_updated:
return
# not the case, set this as the last update and continue
self.last_updated = pub_date
# and reset the childrens list of the container and the local storage
self.container.children = []
self.images = {}
# Attention, as this is an example, this code is meant to be as simple
# as possible and not as efficient as possible. IMHO the following code
# pretty much sucks, because it is totally blocking (even though we have
# 'only' 20 elements)
# we go through our entries and do something specific to the
# lolcats-rss-feed to fetch the data out of it
url_item = './{http://search.yahoo.com/mrss/}content'
for item in root.findall('./channel/item'):
title = item.find('./title').text
try:
url = item.findall(url_item)[1].get('url', None)
except IndexError:
continue
if url is None:
continue
image = LolcatsImage(self.ROOT_ID, self.next_id, title, url)
self.container.children.append(image)
self.images[self.next_id] = image
# increase the next_id entry every time
self.next_id += 1
# and increase the container update id and the system update id
# so that the clients can refresh with the new data
self.container.update_id += 1
self.update_id += 1
def queue_update(self, error_or_failure):
# We use the reactor to queue another updating of our data
print error_or_failure
reactor.callLater(self.refresh, self.update_loop) | PypiClean |
/BioFlow-0.2.3.tar.gz/BioFlow-0.2.3/bioflow/bio_db_parsers/uniprotParser.py | import re
import copy
from bioflow.utils.log_behavior import get_logger
log = get_logger(__name__)
interesting_lines = ['ID', 'AC', 'DE', 'GN', 'OX', 'DR']
interesting_xrefs = ['EMBL', 'GO', 'Pfam', 'Ensembl', 'KEGG', 'PDB', 'GeneID', 'SUPFAM']
names_to_ignore = [
'Contains',
'Allergen',
'EC=',
'Flags: ',
'CD_antigen',
'INN=']
uniprot_load_dict = {
'Acnum': [],
'Names': {
'Full': '',
'AltNames': []},
'GeneRefs': {
'Names': [],
'AltNames': [],
'OrderedLocusNames': [],
'ORFNames': []},
'Ensembl': [],
'KEGG': [],
'EMBL': [],
'GO': [],
'Pfam': [],
'SUPFAM': [],
'PDB': [],
'GeneID': [],
'RefSeq': [],
'MGI': []}
class UniProtParser(object):
"""Wraps the Uniprot parser """
def __init__(self, tax_ids_to_parse):
"""
:param tax_ids_to_parse: list of NCBI taxonomy identifiers we are interested in
:return:
"""
self._ignore = [False, 2]
self.interesting_lines = interesting_lines
self.interesting_xrefs = interesting_xrefs
self.names_to_ignore = names_to_ignore
self._single_up_dict = {}
self.uniprot = {}
self.parsed = False
self.tax_id_list = tax_ids_to_parse
def parse_xref(self, line):
"""
Parses an xref line from the Uniprot text file and updates the provided dictionary with the
results of parsing
:param line:
"""
if 'EMBL; ' in line and 'ChEMBL' not in line:
contents_list = line.split(';')
if len(contents_list) > 4:
package = {'Accession': contents_list[1].strip(),
'ID': contents_list[2].strip(),
'status': contents_list[3].strip(),
'type': contents_list[4].strip().strip('.')}
else:
package = {'Accession': contents_list[1].strip(),
'ID': contents_list[2].strip(),
'status': contents_list[3].strip(),
'type': ''}
self._single_up_dict['EMBL'].append(package)
if 'GO; GO:' in line:
self._single_up_dict['GO'].append(line.split(';')[1].split(':')[1].strip())
if 'Pfam; ' in line:
self._single_up_dict['Pfam'].append(line.split(';')[1].strip())
if 'SUPFAM; ' in line:
self._single_up_dict['SUPFAM'].append(line.split(';')[1].strip())
if 'Ensembl; ' in line:
self._single_up_dict['Ensembl'].append(line.split(';')[1].strip())
self._single_up_dict['Ensembl'].append(line.split(';')[2].strip())
self._single_up_dict['Ensembl'].append(line.split(';')[3].strip().strip('.'))
if 'KEGG; ' in line:
self._single_up_dict['KEGG'].append(line.split(';')[1].strip())
if 'PDB; ' in line:
self._single_up_dict['PDB'].append(line.split(';')[1].strip())
if 'GeneID; ' in line:
self._single_up_dict['GeneID'].append(line.split(';')[1].strip())
if 'RefSeq; ' in line:
self._single_up_dict['RefSeq'].append(line.split(';')[1].strip())
self._single_up_dict['RefSeq'].append(line.split(';')[2].split(' ')[0].strip())
if 'MGI;' in line:
self._single_up_dict['MGI'].append(line.split(';')[2].split(' ')[0].strip())
def parse_gene_references(self, line):
"""
Parses gene names and references from the UNIPROT text file
:param line:
"""
words = [x for x in str(line[2:].strip() + ' ').split('; ') if x != '']
for word in words:
if 'ORFNames' in word:
for subword in word.split('=')[1].strip().split(','):
self._single_up_dict['GeneRefs']['ORFNames'].append(subword.strip())
if 'OrderedLocusNames' in word:
for subword in word.split('=')[1].strip().split(','):
self._single_up_dict['GeneRefs']['OrderedLocusNames'].append(subword.strip())
if 'Name=' in word:
for subword in word.split('=')[1].strip().replace(',', ' ').replace(';', ' ').split():
if re.match("^[a-zA-Z0-9_.-]*$", subword):
self._single_up_dict['GeneRefs']['Names'].append(subword.strip())
else:
if '{' not in subword:
print "rejected %s: doesn't look like a valid name" % subword
if 'Synonyms=' in word:
for subword in word.split('=')[1].strip().replace(',', ' ').replace(';', ' ').split():
if re.match("^[a-zA-Z0-9_.-]*$", subword):
self._single_up_dict['GeneRefs']['AltNames'].append(subword.strip())
else:
if '{' not in subword:
print "rejected %s: doesn't look like a valid name" % subword
def parse_name(self, line):
"""
Parses a line that contains a name associated to the entry we are trying to load
:param line:
:return:
"""
if 'RecName: Full=' in line:
self._single_up_dict['Names']['Full'] = line.split('RecName: Full=')[1].split(';')[0].split('{')[0]
return ''
if 'AltName: Full=' in line:
self._single_up_dict['Names']['AltNames'].append(
line.split('AltName: Full=')[1].split(';')[0].split('{')[0])
return ''
if 'Short=' in line:
self._single_up_dict['Names']['AltNames'].append(line.split('Short=')[1].split(';')[0].split('{')[0])
return ''
if self._ignore[0]:
if self._ignore[1] == 0:
self._ignore[0] = False
self._ignore[1] = 2
return ''
else:
return ''
if ' Includes:' in line:
self._ignore[0] = True
return ''
if any(x in line for x in self.names_to_ignore):
return ''
def process_line(self, line, keyword):
"""
A function that processes a line parsed from the UNIPROT database file
:param line:
:param keyword:
"""
if keyword == 'ID':
words = [a for a in line.split(' ') if a != '']
self._single_up_dict['ID'] = words[1]
if keyword == 'AC':
words = [a for a in line[5:].split(' ') if a != '']
for word in words:
self._single_up_dict['Acnum'].append(word.split(';')[0])
if keyword == 'OX':
tentative_tax_id = line.split('NCBI_TaxID=')[1].split(';')[0]
if ' ' in tentative_tax_id:
tentative_tax_id = tentative_tax_id.split(' ')[0]
self._single_up_dict['TaxID'] = tentative_tax_id
if keyword == 'DE':
self.parse_name(line)
if keyword == 'GN':
self.parse_gene_references(line)
if keyword == 'DR' and any(x in line for x in self.interesting_xrefs):
self.parse_xref(line)
def end_block(self):
"""
Manages the behavior of the end of a parse block
:return:
"""
if self._single_up_dict['TaxID'] in self.tax_id_list:
self._ignore[0] = False
self.uniprot[self._single_up_dict['ID']] = self._single_up_dict
return copy.deepcopy(uniprot_load_dict)
def parse_uniprot(self, source_path):
"""
Performs the entire uniprot file parsing and importing
:param source_path: path towards the uniprot test file
:return: uniprot parse dictionary
"""
self._single_up_dict = copy.deepcopy(uniprot_load_dict)
source_file = open(source_path, "r")
line_counter = 0
while True:
line = source_file.readline()
line_counter += 1
if not line:
break
keyword = line[0:2]
if keyword == '//':
self._single_up_dict = self.end_block()
if keyword in self.interesting_lines:
self.process_line(line, keyword)
log.info("%s lines scanned during UNIPROT import", line_counter)
self.parsed = True
return self.uniprot
def get_access_dicts(self):
"""
Returns an access dictionary that would plot genes names, AcNums or EMBL identifiers to the
Swissprot IDs
:return: dictionary mapping all teh external database identifiers towards uniprot IDs
"""
if not self.parsed:
log.warning('Attempting to get access points to a non-parsed uniprot object')
access_dict = {}
for key in self.uniprot.keys():
for sub_element in self.uniprot[key]['KEGG']:
access_dict[sub_element] = key
for sub_element in self.uniprot[key]['Ensembl']:
access_dict[sub_element] = key
for sub_element in self.uniprot[key]['EMBL']:
access_dict[sub_element['Accession']] = key
access_dict[sub_element['ID']] = key
for sub_element in self.uniprot[key]['Acnum']:
access_dict[sub_element] = key
for sub_element in self.uniprot[key]['GeneRefs']['Names']:
access_dict[sub_element] = key
for sub_element in self.uniprot[key]['GeneRefs']['AltNames']:
access_dict[sub_element] = key
for sub_element in self.uniprot[key]['GeneRefs']['OrderedLocusNames']:
access_dict[sub_element] = key
for sub_element in self.uniprot[key]['GeneRefs']['ORFNames']:
access_dict[sub_element] = key
return access_dict | PypiClean |
/Eden-2.1.14.zip/Eden-2.1.14/eden/tutorialWinForms/calculatorRpn/calculatorRpn.py |
# calculatorRpn.py
from org.qquick.eden import *
# --- Constants
Digits = list ('0123456789')
Operators = list ('+-*/')
Open, Terminated, Entered = range (3)
# --- Local nodes
doDigitNodes = [Node (None) .tagged (digit) for digit in Digits]
doOperatorNodes = [Node (None) .tagged (operator) for operator in Operators]
doDotNode = Node (None) .tagged ('.')
doChangeSignNode = Node (None) .tagged ('+/-')
doEnterNode = Node (None) .tagged ('enter')
doClearNode = Node (None) .tagged ('C')
doKeyNodes = doDigitNodes + doOperatorNodes + [doDotNode, doChangeSignNode, doEnterNode, doClearNode]
inputNode = Node ('')
stackNode = Node (['', '0', '0', '0'])
stateNode = Node (Open)
displayNode = Node ()
# --- Dependencies
inputNode.dependsOn (doKeyNodes, lambda: triggerNode () .tag)
def getStack ():
o = stackNode.old
if inputNode.new == '+/-': return [str (-1 * eval (o [0])), o [1], o [2], o [3]]
elif inputNode.new in Operators: return [str (eval ('1.*' + o [1] + inputNode.new + o [0])), o [2], o [3], o [3]]
elif inputNode.new in Digits + ['.']:
if stateNode.old == Terminated: return [inputNode.new, o [0], o [1], o [2]]
elif stateNode.old == Entered: return [inputNode.new, o [1], o [2], o [3]]
else: return [o [0] + inputNode.new, o [1], o [2], o [3]]
elif inputNode.new == 'enter': return [o [0], o [0], o [1], o [2]]
else: return ['', o [1], o [2], o [3]]
stackNode.dependsOn ([inputNode], getStack)
def getState ():
if inputNode.new in Operators: return Terminated
elif inputNode.new == 'enter': return Entered
elif inputNode.new == '+/-': return stateNode.old
else: return Open
stateNode.dependsOn ([inputNode], getState)
displayNode.dependsOn ([stackNode], lambda: stackNode.new [0])
# --- Views
def key (tag):
for doKeyNode in doKeyNodes:
if doKeyNode.tag == tag:
return ButtonView (doKeyNode, tag)
mainView = MainView (
GridView ([
[TextView (displayNode), HExtensionView (), HExtensionView (), HExtensionView ()],
[key ('enter'), HExtensionView (), key ('+/-'), HExtensionView ()],
[key (tag) for tag in '789/'],
[key (tag) for tag in '456*'],
[key (tag) for tag in '123-'],
[key (tag) for tag in '0.C+'],
]), 'RPN Calculator'
)
mainView.execute () | PypiClean |
/APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/FilterDesing.py | from PySide import QtCore
from PySide import QtGui
import matplotlib
matplotlib.rcParams['backend'] = 'qt4agg'
matplotlib.rcParams['backend.qt4'] = 'PySide'
matplotlib.rcParams['patch.antialiased'] = False
matplotlib.rcParams['agg.path.chunksize'] = 80000
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from apasvo.gui.views import navigationtoolbar
from apasvo.gui.views import processingdialog
from apasvo.utils import clt
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import butter, lfilter, freqz
import numpy as np
import traceback
from apasvo.picking import apasvotrace as rc
from apasvo.picking import takanami
from apasvo._version import _application_name
from apasvo._version import _organization
MINIMUM_MARGIN_IN_SECS = 0.5
class FilterDesignTask(QtCore.QObject):
"""A class to handle a Takanami exec. task.
Attributes:
record: An opened seismic record.
start: Start point of the signal segment where
the algorithm is going to be applied.
end: End point of the signal segment where
the algorithm is going to be applied.
Signals:
finished: Task finishes.
position_estimated: Return values of Takanami method are ready.
"""
finished = QtCore.Signal()
error = QtCore.Signal(str, str)
position_estimated = QtCore.Signal(int, np.ndarray, int)
def __init__(self, record):
super(FilterDesignTask, self).__init__()
self.record = record
class FilterDesignDialog(QtGui.QDialog):
"""A dialog to apply Takanami's AR picking method to a selected piece of a
seismic signal.
Attributes:
document: Current opened document containing a seismic record.
seismic_event: A seismic event to be refined by using Takanami method.
If no event is provided, then a new seismic event will be created
by using the estimated arrival time after clicking on 'Accept'
"""
def __init__(self, stream, trace_list=None, parent=None):
super(FilterDesignDialog, self).__init__(parent)
# Calc max. frequency
traces = stream.traces if not trace_list else trace_list
self.max_freq = max([trace.fs for trace in traces])
self._init_ui()
self.load_settings()
# Initial draw
w, h_db, angles = self._retrieve_filter_plot_data()
self._module_data = self.module_axes.plot(w, h_db, 'b')[0]
self._phase_data = self.phase_axes.plot(w, angles, 'g')[0]
self.module_axes.set_ylim([-60,10])
self.phase_axes.set_ylim([min(angles), max(angles)])
self.canvas.draw_idle()
self.start_point_spinbox.valueChanged.connect(self.on_freq_min_changed)
self.end_point_spinbox.valueChanged.connect(self.on_freq_max_changed)
self.start_point_spinbox.valueChanged.connect(self._draw_filter_response)
self.end_point_spinbox.valueChanged.connect(self._draw_filter_response)
self.number_coefficient_spinbox.valueChanged.connect(self._draw_filter_response)
self.zeroPhaseCheckBox.toggled.connect(self._draw_filter_response)
self.button_box.accepted.connect(self.accept)
self.button_box.rejected.connect(self.reject)
self.button_box.clicked.connect(self.on_click)
def _init_ui(self):
self.setWindowTitle("Filter Design (Butterworth-Bandpass Filter)")
self.fig, _ = plt.subplots(1, 1, sharex=True)
# Set up filter axes
self.module_axes = self.fig.axes[0]
self.phase_axes = self.module_axes.twinx()
self.module_axes.set_title('Digital filter frequency response (Butterworth-Bandpass filter)')
self.module_axes.set_xlabel('Frequency [Hz]')
self.module_axes.set_ylabel('Amplitude [dB]', color='b')
self.module_axes.axis('tight')
self.module_axes.grid(which='both', axis='both')
self.phase_axes.set_ylabel('Angle (radians)', color='g')
self.canvas = FigureCanvas(self.fig)
self.canvas.setMinimumSize(self.canvas.size())
self.canvas.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Expanding,
QtGui.QSizePolicy.Policy.Expanding))
self.toolBarNavigation = navigationtoolbar.NavigationToolBar(self.canvas, self)
self.group_box = QtGui.QGroupBox(self)
self.group_box2 = QtGui.QGroupBox(self)
self.group_box3 = QtGui.QGroupBox(self)
self.group_box4 = QtGui.QGroupBox(self)
self.group_box.setTitle("")
self.group_box2.setTitle("")
self.group_box3.setTitle("Parameters")
self.start_point_label = QtGui.QLabel("Lower cutoff frequency (Hz): ")
self.start_point_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.start_point_spinbox = QtGui.QDoubleSpinBox(self.group_box)
self.start_point_spinbox.setMinimum(1.0)
self.start_point_spinbox.setSingleStep(1.00)
self.start_point_spinbox.setAccelerated(True)
self.start_point_spinbox.setMaximum(self.max_freq * 0.5)
self.end_point_label = QtGui.QLabel("Higher cutoff frequency (Hz):")
self.end_point_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.end_point_spinbox = QtGui.QDoubleSpinBox(self.group_box4)
self.end_point_spinbox.setMinimum(1.0)
self.end_point_spinbox.setSingleStep(1.00)
self.end_point_spinbox.setAccelerated(True)
self.end_point_spinbox.setMaximum(self.max_freq * 0.5)
self.end_point_spinbox.setValue(5.0)
#######################################################################
self.number_coefficient_label = QtGui.QLabel("Order: ")
self.number_coefficient_label2 = QtGui.QLabel("")
self.number_coefficient_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.number_coefficient_label2.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.number_coefficient_spinbox = QtGui.QSpinBox(self.group_box3)
self.number_coefficient_spinbox.adjustSize()
self.number_coefficient_spinbox.setMinimum(1)
self.number_coefficient_spinbox.setSingleStep(1)
self.number_coefficient_spinbox.setAccelerated(True)
self.zeroPhaseCheckBox = QtGui.QCheckBox("Zero phase filtering", self.group_box2)
self.zeroPhaseCheckBox.setChecked(True)
#######################################################################
self.group_box_layout = QtGui.QHBoxLayout(self.group_box)
self.group_box_layout.setContentsMargins(9, 9, 9, 9)
self.group_box_layout.setSpacing(12)
self.group_box_layout.addWidget(self.start_point_label)
self.group_box_layout.addWidget(self.start_point_spinbox)
self.group_box4_layout = QtGui.QHBoxLayout(self.group_box4)
self.group_box4_layout.setContentsMargins(9, 9, 9, 9)
self.group_box4_layout.setSpacing(12)
self.group_box4_layout.addWidget(self.end_point_label)
self.group_box4_layout.addWidget(self.end_point_spinbox)
#####################################################################
self.group_box2_layout = QtGui.QHBoxLayout(self.group_box2)
self.group_box2_layout.setContentsMargins(9, 9, 9, 9)
self.group_box2_layout.setSpacing(12)
self.group_box2_layout.addWidget(self.zeroPhaseCheckBox)
###################################################################
self.group_box3_layout = QtGui.QHBoxLayout(self.group_box3)
self.group_box3_layout.setContentsMargins(9, 9, 9, 9)
self.group_box3_layout.setSpacing(12)
self.group_box3_layout.addWidget(self.number_coefficient_label)
self.group_box3_layout.addWidget(self.number_coefficient_spinbox)
self.group_box3_layout.addWidget(self.number_coefficient_label2)
#####################################################################
self.button_box = QtGui.QDialogButtonBox(self)
self.button_box.setOrientation(QtCore.Qt.Horizontal)
self.button_box.setStandardButtons(QtGui.QDialogButtonBox.Apply |
QtGui.QDialogButtonBox.Cancel |
QtGui.QDialogButtonBox.Ok)
self.layout = QtGui.QVBoxLayout(self)
self.layout.setContentsMargins(9, 9, 9, 9)
self.layout.setSpacing(6)
self.layout.addWidget(self.toolBarNavigation)
self.layout.addWidget(self.canvas)
self.layout.addWidget(self.group_box3)
self.layout.addWidget(self.group_box)
self.layout.addWidget(self.group_box4)
#self.layout.addWidget(self.group_box2)
self.layout.addWidget(self.zeroPhaseCheckBox)
self.layout.addWidget(self.button_box)
def on_freq_min_changed(self, value):
self.end_point_spinbox.setMinimum(value + 1.0)
def on_freq_max_changed(self, value):
self.start_point_spinbox.setMaximum(value - 1.0)
def on_click(self, button):
if self.button_box.standardButton(button) == QtGui.QDialogButtonBox.Ok:
self.save_settings()
if self.button_box.standardButton(button) == QtGui.QDialogButtonBox.Apply:
self._draw_filter_response()
def save_settings(self):
"""Save settings to persistent storage."""
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("filterdesign_settings")
#self.default_margin = int(float(settings.value('filterdesign_margin', 5.0)) *
#self.record.fs)
settings.setValue('freq_min', self.start_point_spinbox.value())
settings.setValue('freq_max', self.end_point_spinbox.value())
settings.setValue('coef_number', self.number_coefficient_spinbox.value())
settings.setValue('zero_phase', self.zeroPhaseCheckBox.isChecked())
settings.endGroup()
def load_settings(self):
"""Loads settings from persistent storage."""
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("filterdesign_settings")
self.start_point_spinbox.setValue(float(settings.value('freq_min', 0.0)))
self.end_point_spinbox.setValue(float(settings.value('freq_max', self.max_freq * 0.5)))
self.number_coefficient_spinbox.setValue(int(settings.value('coef_number', 1)))
self.zeroPhaseCheckBox.setChecked(bool(settings.value('zero_phase', True)))
settings.endGroup()
def _butter_bandpass(self, lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def _retrieve_filter_plot_data(self):
b, a = self._butter_bandpass(self.start_point_spinbox.value(), self.end_point_spinbox.value(), self.max_freq, order=self.number_coefficient_spinbox.value())
#w, h = freqz(b, a)
w, h = freqz(b, a,1024)
angles = np.unwrap(np.angle(h))
#return (self.max_freq * 0.5 / np.pi) * w, 20 * np.log10(abs(h)), angles
f= (self.max_freq/2)*(w/np.pi)
return f, 20 * np.log10(abs(h)), angles
def _draw_filter_response(self, *args, **kwargs):
w, h_db, angles = self._retrieve_filter_plot_data()
self._module_data.set_xdata(w)
self._module_data.set_ydata(h_db)
self._phase_data.set_xdata(w)
self._phase_data.set_ydata(angles)
self.phase_axes.set_ylim([min(angles), max(angles)])
self.canvas.draw_idle() | PypiClean |
/BullETS-0.1.1.tar.gz/BullETS-0.1.1/README.md | # BullETS

BullETS is a Python library designed to help with the development of algorithmic trading strategies.
## Upcoming features
- Retrieve stock data
- Trading portfolio management
- Backtesting framework
## Installation
This section will assume you have **Python** installed, if not, you can download & install it from [here](https://www.python.org/downloads/).
We strongly recommend using a [virtual environment](https://docs.python.org/3/library/venv.html) to keep BullETS and its dependencies from interfering with your system installs.
### Initializing and running a virtual environment
Windows:
```shell
# Initializing a virtual environment in the ./venv directory
py -3 -m venv venv
# Activating the virtual environment
venv\Scripts\activate.bat
```
Mac OS & Linux:
```shell
# Initializing a virtual environment in the ./venv directory
python3 -m venv bot-env
# Activating the virtual environment (Mac OS & Linux)
source bot-env/bin/activate
```
### Using BullETS to develop a strategy
1. Register an account on the [FinancialModelingPrep website](https://financialmodelingprep.com/developer) and retrieve your API key
2. Create a new folder, initialize and activate a virtual environment inside (see above)
3. Install [BullETS](https://pypi.org/project/BullETS/) from PyPI
```shell
pip install BullETS
```
4. Code your own strategy
```python
from bullets.strategy import Strategy, Resolution
from bullets.runner import Runner
from bullets.data_source.data_source_fmp import FmpDataSource
from datetime import datetime
# Extend the default strategy from BullETS
class MyStrategy(Strategy):
# You can access the `portfolio` and the `data_source` variables to retrieve information for your strategy
# You are also free to add your own data sources here and use them
# Redefine this function to perform a task when the strategy starts
def on_start(self):
pass
# Redefine this function to perform a task on each resolution
def on_resolution(self):
self.portfolio.market_order("AAPL", 5)
# Redefine this function to perform a task at the end of the strategy
def on_finish(self):
pass
# Initialize your new strategy
if __name__ == '__main__':
resolution = Resolution.DAILY # Define your resolution (DAILY, HOURLY or MINUTE)
start_time = datetime(2019, 3, 5) # Define your strategy start time
end_time = datetime(2019, 4, 22) # Define your strategy end time
data_source = FmpDataSource("Insert your key here", resolution) # Initialize the FMP data source with your API key and resolution
strategy = MyStrategy(resolution=resolution,
start_time=start_time,
end_time=end_time,
starting_balance=5000,
data_source=data_source)
runner = Runner(strategy) # Initialize the runner, which handles the execution of your strategy
runner.start() # Start the runner and your strategy
```
This section only covers the basic features to develop a strategy. BullETS has other features, such as slippage, transaction fees, and many others. Stay updated for our upcoming detailed documentation that demonstrates how to use these features.
### Development mode
This section covers the installation process if you wish to **contribute** to the library.
1. Clone the repo and go to the library's root directory
``` shell
# Clone this repository
git clone https://github.com/AlgoETS/BullETS
# Move to the BullETS directory
cd BullETS
```
2. Initialize and run a virtual environment (see above)
3. Install BullETS in editable mode (while the virtual environment is activated)
```shell
pip install -e .
```
4. Setup environment variables
1. Make a copy of the `.env.sample` file and name it `.env`
2. Replace the required values inside the `.env` file
| PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-4.4.0/SCons/Tool/packaging/msi.py |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import SCons
from SCons.Action import Action
from SCons.Builder import Builder
from xml.dom.minidom import Document
from xml.sax.saxutils import escape
from SCons.Tool.packaging import stripinstallbuilder
#
# Utility functions
#
def convert_to_id(s, id_set):
""" Some parts of .wxs need an Id attribute (for example: The File and
Directory directives. The charset is limited to A-Z, a-z, digits,
underscores, periods. Each Id must begin with a letter or with a
underscore. Google for "CNDL0015" for information about this.
Requirements:
* the string created must only contain chars from the target charset.
* the string created must have a minimal editing distance from the
original string.
* the string created must be unique for the whole .wxs file.
Observation:
* There are 62 chars in the charset.
Idea:
* filter out forbidden characters. Check for a collision with the help
of the id_set. Add the number of the number of the collision at the
end of the created string. Furthermore care for a correct start of
the string.
"""
charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'
if s[0] in '0123456789.':
s = '_' + s
id = ''.join([c for c in s if c in charset])
# did we already generate an id for this file?
try:
return id_set[id][s]
except KeyError:
# no we did not, so initialize with the id
if id not in id_set: id_set[id] = { s : id }
# there is a collision, generate an id which is unique by appending
# the collision number
else: id_set[id][s] = id + str(len(id_set[id]))
return id_set[id][s]
def is_dos_short_file_name(file):
""" Examine if the given file is in the 8.3 form.
"""
fname, ext = os.path.splitext(file)
proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot
proper_fname = file.isupper() and len(fname) <= 8
return proper_ext and proper_fname
def gen_dos_short_file_name(file, filename_set):
""" See http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982
These are no complete 8.3 dos short names. The ~ char is missing and
replaced with one character from the filename. WiX warns about such
filenames, since a collision might occur. Google for "CNDL1014" for
more information.
"""
# guard this to not confuse the generation
if is_dos_short_file_name(file):
return file
fname, ext = os.path.splitext(file) # ext contains the dot
# first try if it suffices to convert to upper
file = file.upper()
if is_dos_short_file_name(file):
return file
# strip forbidden characters.
forbidden = '."/[]:;=, '
fname = ''.join([c for c in fname if c not in forbidden])
# check if we already generated a filename with the same number:
# thisis1.txt, thisis2.txt etc.
duplicate, num = not None, 1
while duplicate:
shortname = "%s%s" % (fname[:8-len(str(num))].upper(), str(num))
if len(ext) >= 2:
shortname = "%s%s" % (shortname, ext[:4].upper())
duplicate, num = shortname in filename_set, num+1
assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file)
filename_set.append(shortname)
return shortname
def create_feature_dict(files):
""" X_MSI_FEATURE and doc FileTag's can be used to collect files in a
hierarchy. This function collects the files into this hierarchy.
"""
dict = {}
def add_to_dict( feature, file ):
if not SCons.Util.is_List( feature ):
feature = [ feature ]
for f in feature:
if f not in dict:
dict[ f ] = [ file ]
else:
dict[ f ].append( file )
for file in files:
if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):
add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)
elif hasattr( file, 'PACKAGING_DOC' ):
add_to_dict( 'PACKAGING_DOC', file )
else:
add_to_dict( 'default', file )
return dict
def generate_guids(root):
""" generates globally unique identifiers for parts of the xml which need
them.
Component tags have a special requirement. Their UUID is only allowed to
change if the list of their contained resources has changed. This allows
for clean removal and proper updates.
To handle this requirement, the uuid is generated with an md5 hashing the
whole subtree of a xml node.
"""
import uuid
# specify which tags need a guid and in which attribute this should be stored.
needs_id = { 'Product' : 'Id',
'Package' : 'Id',
'Component' : 'Guid',
}
# find all XMl nodes matching the key, retrieve their attribute, hash their
# subtree, convert hash to string and add as a attribute to the xml node.
for (key,value) in needs_id.items():
node_list = root.getElementsByTagName(key)
attribute = value
for node in node_list:
hash = uuid.uuid5(uuid.NAMESPACE_URL, node.toxml())
node.attributes[attribute] = str(hash)
def string_wxsfile(target, source, env):
return "building WiX file %s" % target[0].path
def build_wxsfile(target, source, env):
""" Compiles a .wxs file from the keywords given in env['msi_spec'] and
by analyzing the tree of source nodes and their tags.
"""
f = open(target[0].get_abspath(), 'w')
try:
# Create a document with the Wix root tag
doc = Document()
root = doc.createElement( 'Wix' )
root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi'
doc.appendChild( root )
filename_set = [] # this is to circumvent duplicates in the shortnames
id_set = {} # this is to circumvent duplicates in the ids
# Create the content
build_wxsfile_header_section(root, env)
build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set)
generate_guids(root)
build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set)
build_wxsfile_default_gui(root)
build_license_file(target[0].get_dir(), env)
# write the xml to a file
f.write( doc.toprettyxml() )
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError as e:
raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] )
finally:
f.close()
#
# setup function
#
def create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set):
r""" Create the wix default target directory layout and return the innermost
directory.
We assume that the XML tree delivered in the root argument already contains
the Product tag.
Everything is put under the PFiles directory property defined by WiX.
After that a directory with the 'VENDOR' tag is placed and then a
directory with the name of the project and its VERSION. This leads to the
following TARGET Directory Layout:
C:\<PFiles>\<Vendor>\<Projectname-Version>\
Example: C:\Programme\Company\Product-1.2\
"""
doc = Document()
d1 = doc.createElement( 'Directory' )
d1.attributes['Id'] = 'TARGETDIR'
d1.attributes['Name'] = 'SourceDir'
d2 = doc.createElement( 'Directory' )
d2.attributes['Id'] = 'ProgramFilesFolder'
d2.attributes['Name'] = 'PFiles'
d3 = doc.createElement( 'Directory' )
d3.attributes['Id'] = 'VENDOR_folder'
d3.attributes['Name'] = escape( gen_dos_short_file_name( VENDOR, filename_set ) )
d3.attributes['LongName'] = escape( VENDOR )
d4 = doc.createElement( 'Directory' )
project_folder = "%s-%s" % ( NAME, VERSION )
d4.attributes['Id'] = 'MY_DEFAULT_FOLDER'
d4.attributes['Name'] = escape( gen_dos_short_file_name( project_folder, filename_set ) )
d4.attributes['LongName'] = escape( project_folder )
d1.childNodes.append( d2 )
d2.childNodes.append( d3 )
d3.childNodes.append( d4 )
root.getElementsByTagName('Product')[0].childNodes.append( d1 )
return d4
#
# mandatory and optional file tags
#
def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set):
""" Builds the Component sections of the wxs file with their included files.
Files need to be specified in 8.3 format and in the long name format, long
filenames will be converted automatically.
Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag.
"""
root = create_default_directory_layout( root, NAME, VERSION, VENDOR, filename_set )
components = create_feature_dict( files )
factory = Document()
def get_directory( node, dir ):
""" Returns the node under the given node representing the directory.
Returns the component node if dir is None or empty.
"""
if dir == '' or not dir:
return node
Directory = node
dir_parts = dir.split(os.path.sep)
# to make sure that our directory ids are unique, the parent folders are
# consecutively added to upper_dir
upper_dir = ''
# walk down the xml tree finding parts of the directory
dir_parts = [d for d in dir_parts if d != '']
for d in dir_parts[:]:
already_created = [c for c in Directory.childNodes
if c.nodeName == 'Directory'
and c.attributes['LongName'].value == escape(d)]
if already_created:
Directory = already_created[0]
dir_parts.remove(d)
upper_dir += d
else:
break
for d in dir_parts:
nDirectory = factory.createElement( 'Directory' )
nDirectory.attributes['LongName'] = escape( d )
nDirectory.attributes['Name'] = escape( gen_dos_short_file_name( d, filename_set ) )
upper_dir += d
nDirectory.attributes['Id'] = convert_to_id( upper_dir, id_set )
Directory.childNodes.append( nDirectory )
Directory = nDirectory
return Directory
for file in files:
drive, path = os.path.splitdrive( file.PACKAGING_INSTALL_LOCATION )
filename = os.path.basename( path )
dirname = os.path.dirname( path )
h = {
# tagname : default value
'PACKAGING_X_MSI_VITAL' : 'yes',
'PACKAGING_X_MSI_FILEID' : convert_to_id(filename, id_set),
'PACKAGING_X_MSI_LONGNAME' : filename,
'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name(filename, filename_set),
'PACKAGING_X_MSI_SOURCE' : file.get_path(),
}
# fill in the default tags given above.
for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]:
setattr( file, k, v )
File = factory.createElement( 'File' )
File.attributes['LongName'] = escape( file.PACKAGING_X_MSI_LONGNAME )
File.attributes['Name'] = escape( file.PACKAGING_X_MSI_SHORTNAME )
File.attributes['Source'] = escape( file.PACKAGING_X_MSI_SOURCE )
File.attributes['Id'] = escape( file.PACKAGING_X_MSI_FILEID )
File.attributes['Vital'] = escape( file.PACKAGING_X_MSI_VITAL )
# create the <Component> Tag under which this file should appear
Component = factory.createElement('Component')
Component.attributes['DiskId'] = '1'
Component.attributes['Id'] = convert_to_id( filename, id_set )
# hang the component node under the root node and the file node
# under the component node.
Directory = get_directory( root, dirname )
Directory.childNodes.append( Component )
Component.childNodes.append( File )
#
# additional functions
#
def build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set):
""" This function creates the <features> tag based on the supplied xml tree.
This is achieved by finding all <component>s and adding them to a default target.
It should be called after the tree has been built completly. We assume
that a MY_DEFAULT_FOLDER Property is defined in the wxs file tree.
Furthermore a top-level with the name and VERSION of the software will be created.
An PACKAGING_X_MSI_FEATURE can either be a string, where the feature
DESCRIPTION will be the same as its title or a Tuple, where the first
part will be its title and the second its DESCRIPTION.
"""
factory = Document()
Feature = factory.createElement('Feature')
Feature.attributes['Id'] = 'complete'
Feature.attributes['ConfigurableDirectory'] = 'MY_DEFAULT_FOLDER'
Feature.attributes['Level'] = '1'
Feature.attributes['Title'] = escape( '%s %s' % (NAME, VERSION) )
Feature.attributes['Description'] = escape( SUMMARY )
Feature.attributes['Display'] = 'expand'
for (feature, files) in create_feature_dict(files).items():
SubFeature = factory.createElement('Feature')
SubFeature.attributes['Level'] = '1'
if SCons.Util.is_Tuple(feature):
SubFeature.attributes['Id'] = convert_to_id( feature[0], id_set )
SubFeature.attributes['Title'] = escape(feature[0])
SubFeature.attributes['Description'] = escape(feature[1])
else:
SubFeature.attributes['Id'] = convert_to_id( feature, id_set )
if feature=='default':
SubFeature.attributes['Description'] = 'Main Part'
SubFeature.attributes['Title'] = 'Main Part'
elif feature=='PACKAGING_DOC':
SubFeature.attributes['Description'] = 'Documentation'
SubFeature.attributes['Title'] = 'Documentation'
else:
SubFeature.attributes['Description'] = escape(feature)
SubFeature.attributes['Title'] = escape(feature)
# build the componentrefs. As one of the design decision is that every
# file is also a component we walk the list of files and create a
# reference.
for f in files:
ComponentRef = factory.createElement('ComponentRef')
ComponentRef.attributes['Id'] = convert_to_id( os.path.basename(f.get_path()), id_set )
SubFeature.childNodes.append(ComponentRef)
Feature.childNodes.append(SubFeature)
root.getElementsByTagName('Product')[0].childNodes.append(Feature)
def build_wxsfile_default_gui(root):
""" This function adds a default GUI to the wxs file
"""
factory = Document()
Product = root.getElementsByTagName('Product')[0]
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_Mondo'
Product.childNodes.append(UIRef)
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_ErrorProgressText'
Product.childNodes.append(UIRef)
def build_license_file(directory, spec):
""" Creates a License.rtf file with the content of "X_MSI_LICENSE_TEXT"
in the given directory
"""
name, text = '', ''
try:
name = spec['LICENSE']
text = spec['X_MSI_LICENSE_TEXT']
except KeyError:
pass # ignore this as X_MSI_LICENSE_TEXT is optional
if name!='' or text!='':
with open(os.path.join(directory.get_path(), 'License.rtf'), 'w') as f:
f.write('{\\rtf')
if text!='':
f.write(text.replace('\n', '\\par '))
else:
f.write(name+'\\par\\par')
f.write('}')
#
# mandatory and optional package tags
#
def build_wxsfile_header_section(root, spec):
""" Adds the xml file node which define the package meta-data.
"""
# Create the needed DOM nodes and add them at the correct position in the tree.
factory = Document()
Product = factory.createElement( 'Product' )
Package = factory.createElement( 'Package' )
root.childNodes.append( Product )
Product.childNodes.append( Package )
# set "mandatory" default values
if 'X_MSI_LANGUAGE' not in spec:
spec['X_MSI_LANGUAGE'] = '1033' # select english
# mandatory sections, will throw a KeyError if the tag is not available
Product.attributes['Name'] = escape( spec['NAME'] )
Product.attributes['Version'] = escape( spec['VERSION'] )
Product.attributes['Manufacturer'] = escape( spec['VENDOR'] )
Product.attributes['Language'] = escape( spec['X_MSI_LANGUAGE'] )
Package.attributes['Description'] = escape( spec['SUMMARY'] )
# now the optional tags, for which we avoid the KeyErrror exception
if 'DESCRIPTION' in spec:
Package.attributes['Comments'] = escape( spec['DESCRIPTION'] )
if 'X_MSI_UPGRADE_CODE' in spec:
Package.attributes['X_MSI_UPGRADE_CODE'] = escape( spec['X_MSI_UPGRADE_CODE'] )
# We hardcode the media tag as our current model cannot handle it.
Media = factory.createElement('Media')
Media.attributes['Id'] = '1'
Media.attributes['Cabinet'] = 'default.cab'
Media.attributes['EmbedCab'] = 'yes'
root.getElementsByTagName('Product')[0].childNodes.append(Media)
# this builder is the entry-point for .wxs file compiler.
wxs_builder = Builder(
action = Action( build_wxsfile, string_wxsfile ),
ensure_suffix = '.wxs' )
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
DESCRIPTION, SUMMARY, VENDOR, X_MSI_LANGUAGE, **kw):
# make sure that the Wix Builder is in the environment
SCons.Tool.Tool('wix').generate(env)
# get put the keywords for the specfile compiler. These are the arguments
# given to the package function and all optional ones stored in kw, minus
# the the source, target and env one.
loc = locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# strip the install builder from the source files
target, source = stripinstallbuilder(target, source, env)
# put the arguments into the env and call the specfile builder.
env['msi_spec'] = kw
specfile = wxs_builder(* [env, target, source], **kw)
# now call the WiX Tool with the built specfile added as a source.
msifile = env.WiX(target, specfile)
# return the target and source tuple.
return (msifile, source+[specfile])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/Klampt-0.9.0-cp36-cp36m-win_amd64.whl/klampt/control/blocks/cartesian_drive.py | from .robotcontroller import RobotControllerBlock
from ..cartesian_drive import CartesianDriveSolver
class CartesianDriveController(RobotControllerBlock):
"""Adapts a CartesianDriveSolver to a RobotControllerBlock. The robot's
commanded position is updated by the solver.
It is assumed that the solver is initialized with all the settings but
start is not necessarily called yet. If start=True, then this will start
working by default. Otherwise, it will wait for the 'enter' or 'start'
signal to begin using the drive commands.
"""
def __init__(self,solver,links,baseLinks=None,endEffectorPositions=None,start=True):
assert isinstance(solver,CartesianDriveSolver)
self.solver = solver
self.links = links
self.baseLinks = baseLinks
self.endEffectorPositions = endEffectorPositions
self.start = start
self._qcmd = None
RobotControllerBlock.__init__(self)
self._inputs.addChannel('wdes')
self._inputs.addChannel('vdes')
self._inputs.addChannel('dt')
self._outputs.addChannel('progress')
def signal(self,type,**inputs):
if type == 'enter' or type == 'start':
self.start = True
def advance(self,**inputs):
if 'qcmd' in inputs:
self._qcmd = inputs['qcmd']
if self._qcmd is None:
self._qcmd = inputs['q']
assert self._qcmd is not None,"Need either q or qcmd"
if self.start:
self.solver.start(self._qcmd,self.links,self.baseLinks,self.endEffectorPositions)
self.start=False
progress,qcmd = self.solver.drive(self._qcmd,inputs['wdes'],inputs['vdes'],inputs['dt'])
self._qcmd = qcmd
return {'progress':progress,'qcmd':qcmd}
def __getstate__(self):
import copy
return copy.deepcopy({'driveTransforms':self.solver.driveTransforms,'driveSpeedAdjustment':self.solver.driveSpeedAdjustment})
def __setstate__(self,state):
self.solver.driveTransforms = state['driveTransforms']
self.solver.driveSpeedAdjustment = state['driveSpeedAdjustment'] | PypiClean |
/BehaviorPattern-0.0.8.tar.gz/BehaviorPattern-0.0.8/README.md | BehaviorTool是一个Python包,提供了CombinePattern、ContinuePattern和SequencePattern三个类的实现行为模式挖掘。
## 安装
你可以使用pip安装BahaviorPattern:
```
pip install BehaviorPattern
```
## 使用
在你的Python代码中引入类,例如:
```python
from BehaviorPattern. import CombinePattern, ContinuePattern, SequencePattern
#--------------------------------- 组合行为模式挖掘 ---------------------------------#
use_behavior = []
del_behavior = []
# 创建实例
behavior = CombinePattern.Generate(data=data,
use_behavior=use_behavior,
del_behavior=del_behavior,
min_support=0.1,
min_confidence=0.5,
min_length=3,
max_length=7,
sep='@')
# 运行模型,返回pattern结果和使用的行为列表
combine, combine_use_behavior = behavior.run()
# 筛选lift符合要求的pattern
combine_result = combine[combine['lift'] > 6]
#--------------------------------- 连续行为模式挖掘 ---------------------------------#
use_behavior = []
del_behavior = []
# 创建实例
behavior = ContinuePattern.Generate(data=data,
use_behavior=use_behavior,
del_behavior=del_behavior,
min_support=0.1,
min_length=3,
max_length=6,
sep='@')
# 运行模型,返回pattern结果和使用的行为列表
continues, continue_use_behavior = behavior.run()
# 筛选lift符合要求的pattern
continues_result = continues[continues['lift'] > 6]
#--------------------------------- 序列行为模式挖掘 ---------------------------------#
use_behavior = []
del_behavior = []
# 创建实例
behavior = SequencePattern.Generate(data=data,
use_behavior=use_behavior,
del_behavior=del_behavior,
min_support=0.1,
min_length=3,
max_length=7,
sep='@')
# 运行模型,返回pattern结果和使用的行为列表
sequence, seq_use_behavior = behavior.run()
# 筛选lift符合要求的pattern
sequence_result = sequence[sequence['lift'] > 6]
```
## 依赖
BehaviorPattern依赖以下Python库:
- numpy
- pandas
- efficient_apriori
- tqdm
- prefixspan
完整的依赖列表可以在setup.py中找到。
## 贡献
如果你发现任何bugs,请提交Issue或Pull Request进行更正
## 作者
BehaviorPattern由Chen Chen编写和维护。 | PypiClean |
/FP-SMC-ALS-test1-0.0.1.tar.gz/FP-SMC-ALS-test1-0.0.1/smc/examples/ip_lists.py | import smc.examples
from smc import session
from smc.elements.network import IPList
from smc_info import *
def upload_as_zip(name, filename):
"""
Upload an IPList as a zip file. Useful when IPList is very large.
This is the default upload format for IPLists.
:param str name: name of IPList
:param str filename: name of zip file to upload, full path
:return: None
"""
location = list(IPList.objects.filter(name))
if location:
iplist = location[0]
return iplist.upload(filename=filename)
def upload_as_text(name, filename):
"""
Upload the IPList as text from a file.
:param str name: name of IPList
:param str filename: name of text file to upload
:return: None
"""
location = list(IPList.objects.filter(name))
if location:
iplist = location[0]
return iplist.upload(filename=filename, as_type="txt")
def upload_as_json(name, mylist):
"""
Upload the IPList as json payload.
:param str name: name of IPList
:param list: list of IPList entries
:return: None
"""
location = list(IPList.objects.filter(name))
if location:
iplist = location[0]
return iplist.upload(json=mylist, as_type="json")
def download_as_zip(name, filename):
"""
Download IPList with zip compression. Recommended for IPLists
of larger sizes. This is the default format for downloading
IPLists.
:param str name: name of IPList
:param str filename: name of filename for IPList
"""
location = list(IPList.objects.filter(name))
if location:
iplist = location[0]
return iplist.download(filename=filename)
def download_as_text(name, filename):
"""
Download IPList as text to specified filename.
:param str name: name of IPList
:param str filename: name of file for IPList download
"""
location = list(IPList.objects.filter(name))
if location:
iplist = location[0]
return iplist.download(filename=filename, as_type="txt")
def download_as_json(name):
"""
Download IPList as json. This would allow for easily
manipulation of the IPList, but generally recommended only for
smaller lists
:param str name: name of IPList
:return: None
"""
location = list(IPList.objects.filter(name))
if location:
iplist = location[0]
return iplist.download(as_type="json")
def create_iplist(name):
"""
Create an empty IPList as name
:param str name: name of IPList
:return: href of list location
"""
iplist = IPList.create(name=name)
return iplist
def create_iplist_with_data(name, iplist):
"""
Create an IPList with initial list contents.
:param str name: name of IPList
:param list iplist: list of IPList IP's, networks, etc
:return: href of list location
"""
iplist = IPList.create(name=name, iplist=iplist)
return iplist
if __name__ == '__main__':
session.login(url=SMC_URL, api_key=API_KEY, verify=False, timeout=120, api_version=API_VERSION)
print("session OK")
try:
# Create initial list
result = create_iplist_with_data(name="mylist", iplist=["123.123.123.123", "23.23.23.23"])
print("This is the href location for the newly created list: %s" % result.href)
print(download_as_text('mylist', filename='/tmp/iplist.txt'))
print(download_as_zip('mylist', filename='/tmp/iplist.zip'))
upload_as_text('mylist', '/tmp/iplist.txt')
upload_as_json('mylist', {'ip': ['1.1.1.1', '2.2.2.2', '3.3.3.3']})
print(download_as_json('mylist'))
upload_as_zip('mylist', '/tmp/iplist.zip')
print(download_as_json('mylist'))
print(create_iplist(name='newlist'))
except Exception as e:
print(e)
exit(1)
finally:
print("delete elements..")
IPList("mylist").delete()
IPList("newlist").delete()
session.logout() | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/components/managed_lists/lists/plex_watchlist.py | import typing
from collections.abc import MutableSet
from typing import List, Optional, Type, Union
from loguru import logger
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
PLUGIN_NAME = 'plex_watchlist'
SUPPORTED_IDS = ['imdb_id', 'tmdb_id', 'tvdb_id', 'plex_guid']
logger = logger.bind(name=PLUGIN_NAME)
if typing.TYPE_CHECKING:
from plexapi.myplex import MyPlexAccount
from plexapi.video import Movie, Show
def import_plexaccount() -> "Type[MyPlexAccount]":
try:
from plexapi.myplex import MyPlexAccount
return MyPlexAccount
except ImportError:
raise plugin.DependencyError('plex_watchlist', 'plexapi', 'plexapi package required')
def to_entry(plex_item: "Union[Movie, Show]") -> Entry:
entry = Entry(
title=f"{plex_item.title} ({plex_item.year})" if plex_item.year else plex_item.title,
url=plex_item.guid,
)
if plex_item.TYPE == 'movie':
entry['movie_name'] = plex_item.title
entry['movie_year'] = plex_item.year
elif plex_item.TYPE == 'show':
entry['series_name'] = plex_item.title
entry['series_year'] = plex_item.year
entry.update(get_supported_ids_from_plex_object(plex_item))
return entry
def get_supported_ids_from_plex_object(plex_item):
ids = {'plex_guid': plex_item.guid}
for guid in plex_item.guids:
x = guid.id.split("://")
try:
value = int(x[1])
except ValueError:
value = x[1]
media_id = f'{x[0]}_id'
if media_id in SUPPORTED_IDS:
ids[media_id] = value
return ids
class VideoStub:
guid: str
title: str
# plexapi objects are build fomr XML. So we create a simple stub that works for watchlist calls
def to_plex_item(entry):
item = VideoStub()
item.guid = entry['plex_guid']
item.title = entry['title']
return item
class PlexManagedWatchlist(MutableSet):
def __init__(
self,
username: Optional[str] = None,
password: Optional[str] = None,
token: Optional[str] = None,
filter: Optional[str] = None,
type: Optional[str] = None,
):
self.username = username
self.password = password
self.token = token
self.type = type
self.filter = filter
self._items: Optional[List[Entry]] = None
self._account: Optional[MyPlexAccount] = None
@property
def account(self) -> "MyPlexAccount":
MyPlexAccount = import_plexaccount()
if self._account is None:
self._account = MyPlexAccount(self.username, self.password, self.token)
return self._account
@property
def items(self) -> List[Entry]:
if self._items is None:
watchlist = self.account.watchlist(filter=self.filter, libtype=self.type)
self._items = []
for item in watchlist:
self._items.append(to_entry(item))
return self._items
def __iter__(self):
return iter(self.items)
def __len__(self) -> int:
return len(self.items)
def __contains__(self, entry) -> bool:
return self._find_entry(entry) is not None
def get(self, entry) -> Optional[Entry]:
return self._find_entry(entry)
def add(self, entry: Entry) -> None:
item = None
if 'plex_guid' in entry:
item = to_plex_item(entry)
else:
logger.debug('Searching for {} with discover', entry['title'])
results = self.account.searchDiscover(entry['title'], libtype=self.type)
matched_entry = self._match_entry(entry, [to_entry(result) for result in results])
if matched_entry:
item = to_plex_item(matched_entry)
if item:
if self.account.onWatchlist(item):
logger.debug(f'"{item.title}" is already on the watchlist')
return
logger.debug(f'Adding "{item.title}" to the watchlist')
self.account.addToWatchlist(item)
def discard(self, entry) -> None:
entry = self._find_entry(entry)
if entry:
item = to_plex_item(entry)
logger.debug('Removing {} from watchlist', entry['title'])
self.account.removeFromWatchlist(item)
@property
def online(self) -> bool:
return True
@property
def immutable(self):
return False
def _find_entry(self, entry):
return self._match_entry(entry, self.items)
def _match_entry(self, entry: Entry, entries: List[Entry]):
for item in entries:
# match on supported ids
if any(entry.get(id) is not None and entry[id] == item[id] for id in SUPPORTED_IDS):
return item
name = entry.get('movie_name', None) or entry.get('series_name', None)
year = entry.get('movie_year', None) or entry.get('series_year', None)
_name = item.get('movie_name', None) or item.get('series_name', None)
_year = item.get('movie_year', None) or item.get('series_year', None)
if (name and year) and (_name == name and _year == year):
return item
# title matching sucks but lets try as last resort
if entry.get('title').lower() == item['title'].lower():
return item
class PlexWatchlist:
schema = {
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'},
'token': {'type': 'string'},
'type': {'type': 'string', 'enum': ['movie', 'show']},
'filter': {'type': 'string', 'enum': ['available', 'released']},
},
'anyOf': [{'required': ['token']}, {'required': ['username', 'password']}],
}
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_start(self, task, config):
import_plexaccount()
def get_list(self, config):
return PlexManagedWatchlist(**config)
@plugin.internet(logger)
def on_task_input(self, task, config):
yaml_list = PlexManagedWatchlist(**config)
yield from yaml_list
@event('plugin.register')
def register_plugin():
plugin.register(PlexWatchlist, PLUGIN_NAME, api_ver=2, interfaces=['task', 'list']) | PypiClean |
/NodeGraphQt_QuiltiX_fork-0.6.0.tar.gz/NodeGraphQt_QuiltiX_fork-0.6.0/NodeGraphQt/qgraphics/node_text_item.py | from Qt import QtWidgets, QtCore, QtGui
class NodeTextItem(QtWidgets.QGraphicsTextItem):
"""
NodeTextItem class used to display and edit the name of a NodeItem.
"""
def __init__(self, text, parent=None):
super(NodeTextItem, self).__init__(text, parent)
self._locked = False
self.set_locked(False)
self.set_editable(False)
def mouseDoubleClickEvent(self, event):
"""
Re-implemented to jump into edit mode when user clicks on node text.
Args:
event (QtWidgets.QGraphicsSceneMouseEvent): mouse event.
"""
if not self._locked:
if event.button() == QtCore.Qt.LeftButton:
self.set_editable(True)
event.ignore()
return
super(NodeTextItem, self).mouseDoubleClickEvent(event)
def keyPressEvent(self, event):
"""
Re-implemented to catch the Return & Escape keys when in edit mode.
Args:
event (QtGui.QKeyEvent): key event.
"""
if event.key() == QtCore.Qt.Key_Return:
current_text = self.toPlainText()
self.set_node_name(current_text)
self.set_editable(False)
elif event.key() == QtCore.Qt.Key_Escape:
self.setPlainText(self.node.name)
self.set_editable(False)
super(NodeTextItem, self).keyPressEvent(event)
def focusOutEvent(self, event):
"""
Re-implemented to jump out of edit mode.
Args:
event (QtGui.QFocusEvent):
"""
current_text = self.toPlainText()
self.set_node_name(current_text)
self.set_editable(False)
super(NodeTextItem, self).focusOutEvent(event)
def set_editable(self, value=False):
"""
Set the edit mode for the text item.
Args:
value (bool): true in edit mode.
"""
if self._locked:
return
if value:
self.setTextInteractionFlags(
QtCore.Qt.TextEditable |
QtCore.Qt.TextSelectableByMouse |
QtCore.Qt.TextSelectableByKeyboard
)
else:
self.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)
cursor = self.textCursor()
cursor.clearSelection()
self.setTextCursor(cursor)
def set_node_name(self, name):
"""
Updates the node name through the node "NodeViewer().node_name_changed"
signal which then updates the node name through the BaseNode object this
will register it as an undo command.
Args:
name (str): new node name.
"""
name = name.strip()
if name != self.node.name:
viewer = self.node.viewer()
viewer.node_name_changed.emit(self.node.id, name)
def set_locked(self, state=False):
"""
Locks the text item so it can not be editable.
Args:
state (bool): lock state.
"""
self._locked = state
if self._locked:
self.setFlag(QtWidgets.QGraphicsItem.ItemIsFocusable, False)
self.setCursor(QtCore.Qt.ArrowCursor)
self.setToolTip('')
else:
self.setFlag(QtWidgets.QGraphicsItem.ItemIsFocusable, True)
self.setToolTip('double-click to edit node name.')
self.setCursor(QtCore.Qt.IBeamCursor)
@property
def node(self):
"""
Get the parent node item.
Returns:
NodeItem: parent node qgraphics item.
"""
return self.parentItem() | PypiClean |
/Joule-0.9.41.tar.gz/Joule-0.9.41/joule/controllers/data_controller.py | from sqlalchemy.orm import Session
from aiohttp import web
import numpy as np
import asyncio
import logging
import aiohttp
from joule.models import (folder, DataStore, DataStream,
InsufficientDecimationError, DataError,
pipes)
from joule.models.supervisor import Supervisor
from joule.errors import SubscriptionError
log = logging.getLogger('joule')
async def read_json(request: web.Request):
return await read(request, json=True)
async def read(request: web.Request, json=False):
if 'subscribe' in request.query and request.query['subscribe'] == '1':
return await _subscribe(request, json)
else:
return await _read(request, json)
async def _read(request: web.Request, json):
db: Session = request.app["db"]
data_store: DataStore = request.app["data-store"]
# find the requested stream
if 'path' in request.query:
stream = folder.find_stream_by_path(request.query['path'], db, stream_type=DataStream)
elif 'id' in request.query:
stream = db.get(DataStream,request.query["id"])
else:
return web.Response(text="specify an id or a path", status=400)
if stream is None:
return web.Response(text="stream does not exist", status=404)
# parse optional parameters
params = {'start': None, 'end': None, 'max-rows': None, 'decimation-level': None}
param = "" # to appease type checker
try:
for param in params:
if param in request.query:
params[param] = int(request.query[param])
except ValueError:
return web.Response(text="parameter [%s] must be an int" % param, status=400)
# make sure parameters make sense
if ((params['start'] is not None and params['end'] is not None) and
(params['start'] >= params['end'])):
return web.Response(text="[start] must be < [end]", status=400)
if params['max-rows'] is not None and params['max-rows'] <= 0:
return web.Response(text="[max-rows] must be > 0", status=400)
if params['decimation-level'] is not None and params['decimation-level'] <= 0:
return web.Response(text="[decimation-level] must be > 0", status=400)
# --- Binary Streaming Handler ---
resp = None
async def stream_data(data: np.ndarray, layout, factor):
nonlocal resp
if resp is None:
resp = web.StreamResponse(status=200,
headers={'joule-layout': layout,
'joule-decimation': str(factor)})
resp.enable_chunked_encoding()
await resp.prepare(request)
await resp.write(data.tobytes())
# --- JSON Handler ---
data_blocks = [] # array of data segments
data_segment = None
decimation_factor = 1
async def retrieve_data(data: np.ndarray, layout, factor):
nonlocal data_blocks, data_segment, decimation_factor
decimation_factor = factor
if np.array_equal(data, pipes.interval_token(layout)):
if data_segment is not None:
data_blocks.append(data_segment.tolist())
data_segment = None
else:
data = np.c_[data['timestamp'][:, None], data['data']]
if data_segment is None:
data_segment = data
else:
data_segment = np.vstack((data_segment, data))
if json:
callback = retrieve_data
else:
callback = stream_data
# create an extraction task
try:
await data_store.extract(stream, params['start'], params['end'],
callback=callback,
max_rows=params['max-rows'],
decimation_level=params['decimation-level'])
except InsufficientDecimationError as e:
return web.Response(text="decimated data is not available: %s" % e, status=400)
except DataError as e:
msg = str(e)
if 'no such stream' in msg.lower() and (params['decimation-level'] is not None): # pragma: no cover
# clean up error message when user requested a particular decimation level
msg = "requested decimation level [%d] does not exist" % params['decimation-level']
return web.Response(text="read error: %s" % msg, status=400)
if json:
# put the last data_segment on
if data_segment is not None:
data_blocks.append(data_segment.tolist())
return web.json_response({"data": data_blocks, "decimation_factor": decimation_factor})
else:
if resp is None:
return web.json_response(text="this stream has no data", status=400)
return resp
async def _subscribe(request: web.Request, json: bool):
db: Session = request.app["db"]
supervisor: Supervisor = request.app['supervisor']
if json:
return web.Response(text="JSON subscription not implemented", status=400)
# find the requested stream
if 'path' in request.query:
stream = folder.find_stream_by_path(request.query['path'], db, stream_type=DataStream)
elif 'id' in request.query:
stream = db.get(DataStream,request.query["id"])
else:
return web.Response(text="specify an id or a path", status=400)
if stream is None:
return web.Response(text="stream does not exist", status=404)
pipe = pipes.LocalPipe(stream.layout)
try:
unsubscribe = supervisor.subscribe(stream, pipe)
except SubscriptionError:
return web.Response(text="stream is not being produced", status=400)
resp = web.StreamResponse(status=200,
headers={'joule-layout': stream.layout,
'joule-decimation': '1'})
resp.enable_chunked_encoding()
try:
await resp.prepare(request)
except ConnectionResetError:
unsubscribe()
return resp
try:
while True:
try:
data = await pipe.read()
except pipes.EmptyPipe:
unsubscribe()
return resp
pipe.consume(len(data))
if len(data) > 0:
await resp.write(data.tobytes())
if pipe.end_of_interval:
await resp.write(pipes.interval_token(stream.layout).tobytes())
except asyncio.CancelledError as e:
unsubscribe()
# propogate the CancelledError up
raise e
except ConnectionResetError:
unsubscribe()
return resp
async def intervals(request: web.Request):
db: Session = request.app["db"]
data_store: DataStore = request.app["data-store"]
# find the requested stream
if 'path' in request.query:
stream = folder.find_stream_by_path(request.query['path'], db, stream_type=DataStream)
elif 'id' in request.query:
stream = db.get(DataStream,request.query["id"])
else:
return web.Response(text="specify an id or a path", status=400)
if stream is None:
return web.Response(text="stream does not exist", status=404)
# parse time bounds if specified
try:
if 'start' in request.query:
start = int(request.query['start'])
else:
start = None
if 'end' in request.query:
end = int(request.query['end'])
else:
end = None
except ValueError:
return web.Response(text="[start] and [end] must be an integers", status=400)
# make sure parameters make sense
if (start is not None and end is not None) and start >= end:
return web.Response(text="[start] must be < [end]", status=400)
return web.json_response(await data_store.intervals(stream, start, end))
async def write(request: web.Request):
db: Session = request.app["db"]
data_store: DataStore = request.app["data-store"]
# find the requested stream
if 'path' in request.query:
stream = folder.find_stream_by_path(request.query['path'], db, stream_type=DataStream)
elif 'id' in request.query:
stream = db.get(DataStream,request.query["id"])
else:
return web.Response(text="specify an id or a path", status=400)
if stream is None:
return web.Response(text="stream does not exist", status=404)
# spawn in inserter task
stream.is_destination = True
db.commit()
pipe = pipes.InputPipe(name="inbound", stream=stream, reader=request.content)
try:
task = await data_store.spawn_inserter(stream, pipe, insert_period=0)
await task
except DataError as e:
stream.is_destination = False
db.commit()
print("closing stream due to error")
return web.Response(text=str(e), status=400)
except asyncio.CancelledError as e:
raise e
finally:
stream.is_destination = False
db.commit()
return web.Response(text="ok")
async def remove(request: web.Request):
db: Session = request.app["db"]
data_store: DataStore = request.app["data-store"]
# find the requested stream
if 'path' in request.query:
stream = folder.find_stream_by_path(request.query['path'], db, stream_type=DataStream)
elif 'id' in request.query:
stream = db.get(DataStream,request.query["id"])
else:
return web.Response(text="specify an id or a path", status=400)
if stream is None:
return web.Response(text="stream does not exist", status=404)
# parse time bounds
start = None
end = None
try:
if 'start' in request.query:
start = int(request.query['start'])
if 'end' in request.query:
end = int(request.query['end'])
except ValueError:
return web.Response(text="[start] and [end] must be integers", status=400)
# make sure bounds make sense
if ((start is not None and end is not None) and
(start >= end)):
return web.Response(text="[start] must be < [end]", status=400)
await data_store.remove(stream, start, end)
return web.Response(text="ok")
async def consolidate(request):
db: Session = request.app["db"]
data_store: DataStore = request.app["data-store"]
# find the requested stream
if 'path' in request.query:
stream = folder.find_stream_by_path(request.query['path'], db, stream_type=DataStream)
elif 'id' in request.query:
stream = db.get(DataStream,request.query["id"])
else:
return web.Response(text="specify an id or a path", status=400)
if stream is None:
return web.Response(text="stream does not exist", status=404)
# parse time bounds
start = None
end = None
try:
if 'start' in request.query:
start = int(request.query['start'])
if 'end' in request.query:
end = int(request.query['end'])
except ValueError:
return web.Response(text="[start] and [end] must be integers", status=400)
# make sure bounds make sense
if ((start is not None and end is not None) and
(start >= end)):
return web.Response(text="[start] must be < [end]", status=400)
# parse the max_gap parameter
if 'max_gap' not in request.query:
return web.Response(text="specify max_gap as us integer", status=400)
try:
max_gap = int(request.query['max_gap'])
if max_gap <= 0:
raise ValueError()
except ValueError:
return web.Response(text="max_gap must be postive integer", status=400)
num_removed = await data_store.consolidate(stream, start, end, max_gap)
return web.json_response(data={"num_consolidated": num_removed})
async def decimate(request):
db: Session = request.app["db"]
data_store: DataStore = request.app["data-store"]
# find the requested stream
if 'path' in request.query:
stream = folder.find_stream_by_path(request.query['path'], db, stream_type=DataStream)
elif 'id' in request.query:
stream = db.get(DataStream,request.query["id"])
else:
return web.Response(text="specify an id or a path", status=400)
if stream is None:
return web.Response(text="stream does not exist", status=404)
await data_store.decimate(stream)
return web.Response(text="ok")
async def drop_decimations(request):
db: Session = request.app["db"]
data_store: DataStore = request.app["data-store"]
# find the requested stream
if 'path' in request.query:
stream = folder.find_stream_by_path(request.query['path'], db, stream_type=DataStream)
elif 'id' in request.query:
stream = db.get(DataStream, request.query["id"])
else:
return web.Response(text="specify an id or a path", status=400)
if stream is None:
return web.Response(text="stream does not exist", status=404)
await data_store.drop_decimations(stream)
return web.Response(text="ok") | PypiClean |
/NSoL-0.1.14.tar.gz/NSoL-0.1.14/nsol/similarity_measures.py |
import skimage.measure
import numpy as np
class SimilarityMeasures(object):
##
# Compute sum of absolute differences (symmetric)
# \date 2017-08-04 10:09:05+0100
#
# \param x numpy data array
# \param x_ref reference numpy data array
#
# \return sum of absolute differences as scalar value >= 0
#
@staticmethod
def sum_of_absolute_differences(x, x_ref):
if x.shape != x_ref.shape:
raise ValueError("Input data shapes do not match")
return np.sum(np.abs(x - x_ref))
##
# Compute mean of absolute error (symmetric)
# \date 2019-02-10 11:29:07+0000
#
# \param x numpy data array
# \param x_ref reference numpy data array
#
# \return mean of absolute error as scalar value >= 0
#
@staticmethod
def mean_absolute_error(x, x_ref):
mae = SimilarityMeasures.sum_of_absolute_differences(x, x_ref)
mae /= float(x.size)
return mae
##
# Compute sum of squared differences (symmetric)
# \date 2017-08-04 10:09:05+0100
#
# \param x numpy data array
# \param x_ref reference numpy data array
#
# \return sum of squared differences as scalar value >= 0
#
@staticmethod
def sum_of_squared_differences(x, x_ref):
if x.shape != x_ref.shape:
raise ValueError("Input data shapes do not match")
return np.sum(np.square(x - x_ref))
##
# Compute mean of squared error (symmetric)
# \date 2017-08-04 10:09:46+0100
#
# \param x numpy data array
# \param x_ref reference numpy data array
#
# \return mean of squared error as scalar value >= 0
#
@staticmethod
def mean_squared_error(x, x_ref):
mse = SimilarityMeasures.sum_of_squared_differences(x, x_ref)
mse /= float(x.size)
return mse
##
# Compute root mean square error (symmetric)
# \date 2017-08-04 10:09:46+0100
#
# \param x numpy data array
# \param x_ref reference numpy data array
#
# \return mean of squared error as scalar value >= 0
#
@staticmethod
def root_mean_square_error(x, x_ref):
return np.sqrt(SimilarityMeasures.mean_squared_error(x, x_ref))
##
# Compute peak signal to noise ratio (non-symmetric)
# \date 2017-08-04 10:10:13+0100
#
# \param x numpy data array
# \param x_ref reference numpy data array
#
# \return peak signal to noise ratio as scalar value
#
@staticmethod
def peak_signal_to_noise_ratio(x, x_ref):
mse = SimilarityMeasures.mean_squared_error(x, x_ref)
return 10 * np.log10(np.max(x_ref) ** 2 / mse)
##
# Compute normalized cross correlation (symmetric)
# \date 2017-08-04 10:12:14+0100
#
# \param x numpy data array
# \param x_ref reference numpy data array
#
# \return Normalized cross correlation as scalar value between -1 and 1
#
@staticmethod
def normalized_cross_correlation(x, x_ref):
if x.shape != x_ref.shape:
raise ValueError("Input data shapes do not match")
ncc = np.sum((x - x.mean()) * (x_ref - x_ref.mean()))
ncc /= float(x.size * x.std(ddof=1) * x_ref.std(ddof=1))
return ncc
##
# Compute structural similarity (symmetric)
# \see Wang, Z. et al., 2004. Image Quality Assessment: From Error
# Visibility to Structural Similarity. IEEE Transactions on Image
# Processing, 13(4), pp.600-612.
# \date 2017-08-04 10:16:32+0100
#
# \param x numpy data array
# \param x_ref reference numpy data array
#
# \return Structural similarity as scalar value between 0 and 1
#
@staticmethod
def structural_similarity(x, x_ref):
return skimage.measure.compare_ssim(x, x_ref)
##
# Compute Shannon entropy
#
# Shannon entropy H(X) = - sum p(x) * ln(p(x))
# \see Pluim, J.P.W., Maintz, J.B.A. & Viergever, M.A., 2003.
# Mutual-information-based registration of medical images: a
# survey. IEEE Transactions on Medical Imaging, 22(8), pp.986-1004.
# \date 2017-08-04 10:21:02+0100
#
# \param x numpy data array
# \param bins number of bins for histogram, int
#
# \return Shannon entropy as scalar value \in [0, log_b(n)] (e.g.
# Wikipedia)
#
@staticmethod
def shannon_entropy(x, bins=100):
# histogram is computed over flattened array
hist, bin_edges = np.histogram(x, bins=bins)
# Compute probabilities
prob = hist / float(np.sum(hist))
entropy = - sum([p * np.log(p) for p in prob.flatten() if p != 0])
return entropy
##
# Compute joint entropy (symmetric)
#
# Joint entropy H(X,Y) = - sum p(x,y) * ln(p(x,y))
# \see Pluim, J.P.W., Maintz, J.B.A. & Viergever, M.A., 2003.
# Mutual-information-based registration of medical images: a
# survey. IEEE Transactions on Medical Imaging, 22(8), pp.986-1004.
# \date 2017-08-04 10:35:18+0100
#
# \param x numpy data array
# \param x_ref reference numpy data array
# \param bins number of bins for histogram, sequence or int
#
# \return Joint entropy as scalar value >=0
#
@staticmethod
def joint_entropy(x, x_ref, bins=100):
hist, x_edges, y_edges = np.histogram2d(
x.flatten(), x_ref.flatten(), bins=bins)
# Compute probabilities
prob = hist / float(np.sum(hist))
jentropy = - sum([p * np.log(p) for p in prob.flatten() if p != 0])
return jentropy
##
# Compute mutual information (symmetric)
#
# MI(X,Y) = - sum p(x,y) * ln( p(x,y) / (p(x) * p(y)) ) = H(X) + H(Y) - H(X,Y)
# \see Pluim, J.P.W., Maintz, J.B.A. & Viergever, M.A., 2003.
# Mutual-information-based registration of medical images: a
# survey. IEEE Transactions on Medical Imaging, 22(8), pp.986-1004.
# \see Skouson, M.B., Quji Guo & Zhi-Pei Liang, 2001. A bound on mutual
# information for image registration. IEEE Transactions on Medical
# Imaging, 20(8), pp.843-846.
# \date 2017-08-04 10:40:35+0100
#
# \param x numpy data array
# \param x_ref reference numpy data array
# \param bins number of bins for histogram, sequence or int
#
# \return Mutual information as scalar value >= 0 with upper bound as in
# Skouson2001
#
@staticmethod
def mutual_information(x, x_ref, bins=100):
mi = SimilarityMeasures.shannon_entropy(x, bins=bins)
mi += SimilarityMeasures.shannon_entropy(x_ref, bins=bins)
mi -= SimilarityMeasures.joint_entropy(x, x_ref, bins=bins)
return mi
##
# Compute mutual information (symmetric)
#
# NMI(X,Y) = H(X) + H(Y) / H(X,Y)
# \see Pluim, J.P.W., Maintz, J.B.A. & Viergever, M.A., 2003.
# Mutual-information-based registration of medical images: a
# survey. IEEE Transactions on Medical Imaging, 22(8), pp.986-1004.
# \date 2017-08-04 10:40:35+0100
#
# \param x numpy data array
# \param x_ref reference numpy data array
# \param bins number of bins for histogram, sequence or int
#
# \return Normalized mutual information as scalar value >= 0
#
@staticmethod
def normalized_mutual_information(x, x_ref, bins=100):
nmi = SimilarityMeasures.shannon_entropy(x, bins=bins)
nmi += SimilarityMeasures.shannon_entropy(x_ref, bins=bins)
nmi /= SimilarityMeasures.joint_entropy(x, x_ref, bins=bins)
return nmi
##
# Compute Dice's score (Dice's coefficient).
#
# dice(A, B) = 2 * |A \cap B | / (|A| + |B|)
# \see Dice, L.R., 1945. Measures of the Amount of Ecologic Association
# Between Species. Ecology, 26(3), pp.297-302.
# \date 2017-08-04 11:11:21+0100
#
# \param x numpy data array, bool
# \param x_ref reference numpy data array, bool
#
# \return Dice score between 0 and 1
#
@staticmethod
def dice_score(x, x_ref):
if x.dtype is not np.dtype(np.bool) or \
x_ref.dtype is not np.dtype(np.bool):
raise ValueError("x and x_ref need to be of type boolean")
dice = 2 * np.sum(x * x_ref)
dice /= np.sum(x) + np.sum(x_ref)
return dice
# Dictionary for all similarity measures
similarity_measures = {
"SSD": sum_of_squared_differences.__func__,
"MAE": mean_absolute_error.__func__,
"MSE": mean_squared_error.__func__,
"RMSE": root_mean_square_error.__func__,
"PSNR": peak_signal_to_noise_ratio.__func__,
"SSIM": structural_similarity.__func__,
"NCC": normalized_cross_correlation.__func__,
"MI": mutual_information.__func__,
"NMI": normalized_mutual_information.__func__,
}
# Values for each similarity measure to 'define' undefined states
UNDEF = {
"SSD": np.NaN,
"MAE": np.NaN,
"MSE": np.NaN,
"RMSE": np.NaN,
"PSNR": np.NaN,
"SSIM": np.NaN,
"NCC": np.NaN,
"MI": np.NaN,
"NMI": np.NaN,
} | PypiClean |
/Jupytils-0.41100000000000003.tar.gz/Jupytils-0.41100000000000003/ExcelFormulas.ipynb | ```
#========================================================================
# Description: Tokenise an Excel formula using an implementation of
# E. W. Bachtal's algorithm, found here:
#
# http://ewbi.blogs.com/develops/2004/12/excel_formula_p.html
#
# Tested with Python v2.5 (win32)
# Author: Robin Macharg
# Copyright: Algorithm (c) E. W. Bachtal, this implementation (c) R. Macharg
#
# CVS Info:
# $Header: T:\\cvsarchive/Excel\040export\040&\040import\040XML/ExcelXMLTransform/EWBI_Javascript_port/jsport.py,v 1.5 2006/12/07 13:41:08 rmacharg Exp $
#
# Modification History
#
# Date Author Comment
# =======================================================================
# 2006/11/29 - RMM - Made strictly class-based.
# Added parse, render and pretty print methods
# 2006/11 - RMM - RMM = Robin Macharg
# Created
#========================================================================
#========================================================================
# Class: ExcelParserTokens
# Description: Inheritable container for token definitions
#
# Attributes: Self explanatory
#
# Methods: None
#========================================================================
class f_token:
def __init__(self, value, type, subtype):
self.tvalue = value
self.ttype = type
self.tsubtype = subtype
def tostr(self):
t = self;
return t.tvalue + " <" + t.ttype +"> <" + t.tsubtype
class ExcelParserTokens:
TOK_TYPE_NOOP = "noop";
TOK_TYPE_OPERAND = "operand";
TOK_TYPE_FUNCTION = "function";
TOK_TYPE_SUBEXPR = "subexpression";
TOK_TYPE_ARGUMENT = "argument";
TOK_TYPE_OP_PRE = "operator-prefix";
TOK_TYPE_OP_IN = "operator-infix";
TOK_TYPE_OP_POST = "operator-postfix";
TOK_TYPE_WSPACE = "white-space";
TOK_TYPE_UNKNOWN = "unknown"
TOK_SUBTYPE_START = "start";
TOK_SUBTYPE_STOP = "stop";
TOK_SUBTYPE_TEXT = "text";
TOK_SUBTYPE_NUMBER = "number";
TOK_SUBTYPE_LOGICAL = "logical";
TOK_SUBTYPE_ERROR = "error";
TOK_SUBTYPE_RANGE = "range";
TOK_SUBTYPE_MATH = "math";
TOK_SUBTYPE_CONCAT = "concatenate";
TOK_SUBTYPE_INTERSECT = "intersect";
TOK_SUBTYPE_UNION = "union";
#========================================================================
# Class: f_token
# Description: Encapsulate a formula token
#
# Attributes: tvalue -
# ttype - See token definitions, above, for values
# tsubtype - See token definitions, above, for values
#
# Methods: f_token - __init__()
#========================================================================
#========================================================================
# Class: f_tokens
# Description: An ordered list of tokens
# Attributes: items - Ordered list
# index - Current position in the list
#
# Methods: f_tokens - __init__()
# f_token - add() - Add a token to the end of the list
# None - addRef() - Add a token to the end of the list
# None - reset() - reset the index to -1
# Boolean - BOF() - End of list?
# Boolean - EOF() - Beginning of list?
# Boolean - moveNext() - Move the index along one
# f_token/None - current() - Return the current token
# f_token/None - next() - Return the next token (leave the index unchanged)
# f_token/None - previous() - Return the previous token (leave the index unchanged)
#========================================================================
class f_tokens:
def __init__(self):
self.items = []
self.index = -1
def add(self, value, type, subtype=""):
if (not subtype):
subtype = ""
token = f_token(value, type, subtype)
self.addRef(token)
return token
def addRef(self, token):
self.items.append(token)
def reset(self):
self.index = -1
def BOF(self):
return self.index <= 0
def EOF(self):
return self.index >= (len(self.items) - 1)
def moveNext(self):
if self.EOF():
return False
self.index += 1
return True
def current(self):
if self.index == -1:
return None
return self.items[self.index]
def next(self):
if self.EOF():
return None
return self.items[self.index + 1]
def previous(self):
if self.index < 1:
return None
return self.items[self.index -1]
#========================================================================
# Class: f_tokenStack
# Inherits: ExcelParserTokens - a list of token values
# Description: A LIFO stack of tokens
#
# Attributes: items - Ordered list
#
# Methods: f_tokenStack - __init__()
# None - push(token) - Push a token onto the stack
# f_token/None - pop() - Pop a token off the stack
# f_token/None - token() - Non-destructively return the top item on the stack
# String - type() - Return the top token's type
# String - subtype() - Return the top token's subtype
# String - value() - Return the top token's value
#========================================================================
class f_tokenStack(ExcelParserTokens):
def __init__(self):
self.items = []
def push(self, token):
self.items.append(token)
def pop(self):
token = self.items.pop()
return f_token("", token.ttype, self.TOK_SUBTYPE_STOP)
def token(self):
# Note: this uses Pythons and/or "hack" to emulate C's ternary operator (i.e. cond ? exp1 : exp2)
return ((len(self.items) > 0) and [self.items[len(self.items) - 1]] or [None])[0]
def value(self):
return ((self.token()) and [(self.token()).tvalue] or [""])[0]
def type(self):
t = self.token()
return ((self.token()) and [(self.token()).ttype] or [""])[0]
def subtype(self):
return ((self.token()) and [(self.token()).tsubtype] or [""])[0]
#========================================================================
# Class: ExcelParser
# Description: Parse an Excel formula into a stream of tokens
# Attributes:
#
# Methods: f_tokens - getTokens(formula) - return a token stream (list)
#========================================================================
class ExcelParser(ExcelParserTokens):
def getTokens(self, formula):
def currentChar():
return formula[offset]
def doubleChar():
return formula[offset:offset+2]
def nextChar():
# JavaScript returns an empty string if the index is out of bounds,
# Python throws an IndexError. We mimic this behaviour here.
try:
formula[offset+1]
except IndexError:
return ""
else:
return formula[offset+1]
def EOF():
return offset >= len(formula)
tokens = f_tokens()
tokenStack = f_tokenStack()
offset = 0
token = ""
inString = False
inPath = False
inRange = False
inError = False
while (len(formula) > 0):
if (formula[0] == " "):
formula = formula[1:]
else:
if (formula[0] == "="):
formula = formula[1:]
break;
# state-dependent character evaluation (order is important)
while not EOF():
# double-quoted strings
# embeds are doubled
# end marks token
if inString:
if currentChar() == "\"":
if nextChar() == "\"":
token += "\""
offset += 1
else:
inString = False
tokens.add(token, self.TOK_TYPE_OPERAND, self.TOK_SUBTYPE_TEXT)
token = ""
else:
token += currentChar()
offset += 1
continue
# single-quoted strings (links)
# embeds are double
# end does not mark a token
if inPath:
if currentChar() == "'":
if nextChar() == "'":
token += "'"
offset += 1
else:
inPath = False
else:
token += currentChar()
offset += 1;
continue;
# bracketed strings (range offset or linked workbook name)
# no embeds (changed to "()" by Excel)
# end does not mark a token
if inRange:
if currentChar() == "]":
inRange = False
token += currentChar()
offset += 1
continue
# error values
# end marks a token, determined from absolute list of values
if inError:
token += currentChar()
offset += 1
if ",#NULL!,#DIV/0!,#VALUE!,#REF!,#NAME?,#NUM!,#N/A,".find("," + token + ",") != -1:
inError = False
tokens.add(token, self.TOK_TYPE_OPERAND, self.TOK_SUBTYPE_ERROR)
token = ""
continue;
# independent character evaulation (order not important)
#
# establish state-dependent character evaluations
if currentChar() == "\"":
if len(token) > 0:
# not expected
tokens.add(token, self.TOK_TYPE_UNKNOWN)
token = ""
inString = True
offset += 1
continue
if currentChar() == "'":
if len(token) > 0:
# not expected
tokens.add(token, self.TOK_TYPE_UNKNOWN)
token = ""
inPath = True
offset += 1
continue
if (currentChar() == "["):
inRange = True
token += currentChar()
offset += 1
continue
if (currentChar() == "#"):
if (len(token) > 0):
# not expected
tokens.add(token, self.TOK_TYPE_UNKNOWN)
token = ""
inError = True
token += currentChar()
offset += 1
continue
# mark start and end of arrays and array rows
if (currentChar() == "{"):
if (len(token) > 0):
# not expected
tokens.add(token, self.TOK_TYPE_UNKNOWN)
token = ""
tokenStack.push(tokens.add("ARRAY", self.TOK_TYPE_FUNCTION, self.TOK_SUBTYPE_START))
tokenStack.push(tokens.add("ARRAYROW", self.TOK_TYPE_FUNCTION, self.TOK_SUBTYPE_START))
offset += 1
continue
if (currentChar() == ";"):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.addRef(tokenStack.pop())
tokens.add(",", self.TOK_TYPE_ARGUMENT)
tokenStack.push(tokens.add("ARRAYROW", self.TOK_TYPE_FUNCTION, self.TOK_SUBTYPE_START))
offset += 1
continue
if (currentChar() == "}"):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.addRef(tokenStack.pop())
tokens.addRef(tokenStack.pop())
offset += 1
continue
# trim white-space
if (currentChar() == " "):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.add("", self.TOK_TYPE_WSPACE)
offset += 1
while ((currentChar() == " ") and (not EOF())):
offset += 1
continue
# multi-character comparators
if (",>=,<=,<>,".find("," + doubleChar() + ",") != -1):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.add(doubleChar(), self.TOK_TYPE_OP_IN, self.TOK_SUBTYPE_LOGICAL)
offset += 2
continue
# standard infix operators
if ("+-*/^&=><".find(currentChar()) != -1):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.add(currentChar(), self.TOK_TYPE_OP_IN)
offset += 1
continue
# standard postfix operators
if ("%".find(currentChar()) != -1):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.add(currentChar(), self.TOK_TYPE_OP_POST)
offset += 1
continue
# start subexpression or function
if (currentChar() == "("):
if (len(token) > 0):
tokenStack.push(tokens.add(token, self.TOK_TYPE_FUNCTION, self.TOK_SUBTYPE_START))
token = ""
else:
tokenStack.push(tokens.add("", self.TOK_TYPE_SUBEXPR, self.TOK_SUBTYPE_START))
offset += 1
continue
# function, subexpression, array parameters
if (currentChar() == ","):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
if (not (tokenStack.type() == self.TOK_TYPE_FUNCTION)):
tokens.add(currentChar(), self.TOK_TYPE_OP_IN, self.TOK_SUBTYPE_UNION)
else:
tokens.add(currentChar(), self.TOK_TYPE_ARGUMENT)
offset += 1
continue
# stop subexpression
if (currentChar() == ")"):
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
token = ""
tokens.addRef(tokenStack.pop())
offset += 1
continue
# token accumulation
token += currentChar()
offset += 1
# dump remaining accumulation
if (len(token) > 0):
tokens.add(token, self.TOK_TYPE_OPERAND)
# move all tokens to a new collection, excluding all unnecessary white-space tokens
tokens2 = f_tokens()
while (tokens.moveNext()):
token = tokens.current();
if (token.ttype == self.TOK_TYPE_WSPACE):
if ((tokens.BOF()) or (tokens.EOF())):
pass
elif (not(
((tokens.previous().ttype == self.TOK_TYPE_FUNCTION) and (tokens.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
((tokens.previous().ttype == self.TOK_TYPE_SUBEXPR) and (tokens.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
(tokens.previous().ttype == self.TOK_TYPE_OPERAND)
)
):
pass
elif (not(
((tokens.next().ttype == self.TOK_TYPE_FUNCTION) and (tokens.next().tsubtype == self.TOK_SUBTYPE_START)) or
((tokens.next().ttype == self.TOK_TYPE_SUBEXPR) and (tokens.next().tsubtype == self.TOK_SUBTYPE_START)) or
(tokens.next().ttype == self.TOK_TYPE_OPERAND)
)
):
pass
else:
tokens2.add(token.tvalue, self.TOK_TYPE_OP_IN, self.TOK_SUBTYPE_INTERSECT)
continue
tokens2.addRef(token);
# switch infix "-" operator to prefix when appropriate, switch infix "+" operator to noop when appropriate, identify operand
# and infix-operator subtypes, pull "@" from in front of function names
while (tokens2.moveNext()):
token = tokens2.current()
if ((token.ttype == self.TOK_TYPE_OP_IN) and (token.tvalue == "-")):
if (tokens2.BOF()):
token.ttype = self.TOK_TYPE_OP_PRE
elif (
((tokens2.previous().ttype == self.TOK_TYPE_FUNCTION) and (tokens2.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
((tokens2.previous().ttype == self.TOK_TYPE_SUBEXPR) and (tokens2.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
(tokens2.previous().ttype == self.TOK_TYPE_OP_POST) or
(tokens2.previous().ttype == self.TOK_TYPE_OPERAND)
):
token.tsubtype = self.TOK_SUBTYPE_MATH;
else:
token.ttype = self.TOK_TYPE_OP_PRE
continue
if ((token.ttype == self.TOK_TYPE_OP_IN) and (token.tvalue == "+")):
if (tokens2.BOF()):
token.ttype = self.TOK_TYPE_NOOP
elif (
((tokens2.previous().ttype == self.TOK_TYPE_FUNCTION) and (tokens2.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
((tokens2.previous().ttype == self.TOK_TYPE_SUBEXPR) and (tokens2.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
(tokens2.previous().ttype == self.TOK_TYPE_OP_POST) or
(tokens2.previous().ttype == self.TOK_TYPE_OPERAND)
):
token.tsubtype = self.TOK_SUBTYPE_MATH
else:
token.ttype = self.TOK_TYPE_NOOP
continue
if ((token.ttype == self.TOK_TYPE_OP_IN) and (len(token.tsubtype) == 0)):
if (("<>=").find(token.tvalue[0:1]) != -1):
token.tsubtype = self.TOK_SUBTYPE_LOGICAL
elif (token.tvalue == "&"):
token.tsubtype = self.TOK_SUBTYPE_CONCAT
else:
token.tsubtype = self.TOK_SUBTYPE_MATH
continue
if ((token.ttype == self.TOK_TYPE_OPERAND) and (len(token.tsubtype) == 0)):
try:
float(token.tvalue)
#except (ValueError, e:
except ValueError:
if ((token.tvalue == 'TRUE') or (token.tvalue == 'FALSE')):
token.tsubtype = self.TOK_SUBTYPE_LOGICAL
else:
token.tsubtype = self.TOK_SUBTYPE_RANGE
else:
token.tsubtype = self.TOK_SUBTYPE_NUMBER
continue
if (token.ttype == self.TOK_TYPE_FUNCTION):
if (token.tvalue[0:1] == "@"):
token.tvalue = token.tvalue[1:]
continue
tokens2.reset();
# move all tokens to a new collection, excluding all noops
tokens = f_tokens()
while (tokens2.moveNext()):
if (tokens2.current().ttype != self.TOK_TYPE_NOOP):
tokens.addRef(tokens2.current())
tokens.reset()
return tokens
def parse(self, formula):
self.tokens = self.getTokens(formula)
def render(self):
output = ""
if self.tokens:
for t in self.tokens.items:
if t.ttype == self.TOK_TYPE_FUNCTION and t.tsubtype == self.TOK_SUBTYPE_START: output += t.tvalue + "("
elif t.ttype == self.TOK_TYPE_FUNCTION and t.tsubtype == self.TOK_SUBTYPE_STOP: output += ")"
elif t.ttype == self.TOK_TYPE_SUBEXPR and t.tsubtype == self.TOK_SUBTYPE_START: output += "("
elif t.ttype == self.TOK_TYPE_SUBEXPR and t.tsubtype == self.TOK_SUBTYPE_STOP: output += ")"
# TODO: add in RE substitution of " with "" for strings
elif t.ttype == self.TOK_TYPE_OPERAND and t.tsubtype == self.TOK_SUBTYPE_TEXT: output += "\"" + t.tvalue + "\""
elif t.ttype == self.TOK_TYPE_OP_IN and t.tsubtype == self.TOK_SUBTYPE_INTERSECT: output += " "
else: output += t.tvalue
return output
def prettyprint(self):
indent = 0
output = ""
if self.tokens:
for t in self.tokens.items:
if (t.tsubtype == self.TOK_SUBTYPE_STOP):
indent -= 1
output += " "*indent + t.tvalue + " <" + t.ttype +"> <" + t.tsubtype + ">" + "\n"
if (t.tsubtype == self.TOK_SUBTYPE_START):
indent += 1;
return output
#========================================================================
# Main code:
#
# A simple test-rig. Iterate through a list of test input strings,
# outputing a nested display of the token stream parsed from each one.
#========================================================================
if __name__ == "__main1__":
# Test inputs
inputs = [
# Simple test formulae
'=1+3+5',
'=3 * 4 + 5',
'=50',
'=1+1',
'=$A1',
'=$B$2',
'=SUM(B5:B15)',
'=SUM(B5:B15,D5:D15)',
'=SUM(B5:B15 A7:D7)',
'=SUM(sheet1!$A$1:$B$2)',
'=[data.xls]sheet1!$A$1',
'=SUM((A:A 1:1))',
'=SUM((A:A,1:1))',
'=SUM((A:A A1:B1))',
'=SUM(D9:D11,E9:E11,F9:F11)',
'=SUM((D9:D11,(E9:E11,F9:F11)))',
'=IF(P5=1.0,"NA",IF(P5=2.0,"A",IF(P5=3.0,"B",IF(P5=4.0,"C",IF(P5=5.0,"D",IF(P5=6.0,"E",IF(P5=7.0,"F",IF(P5=8.0,"G"))))))))',
'={SUM(B2:D2*B3:D3)}',
'=SUM(123 + SUM(456) + (45<6))+456+789',
'=AVG(((((123 + 4 + AVG(A1:A2))))))',
# E. W. Bachtal's test formulae
'=IF("a"={"a","b";"c",#N/A;-1,TRUE}, "yes", "no") & " more ""test"" text"',
'=+ AName- (-+-+-2^6) = {"A","B"} + @SUM(R1C1) + (@ERROR.TYPE(#VALUE!) = 2)',
'=IF(R13C3>DATE(2002,1,6),0,IF(ISERROR(R[41]C[2]),0,IF(R13C3>=R[41]C[2],0, IF(AND(R[23]C[11]>=55,R[24]C[11]>=20),R53C3,0))))',
'=IF(R[39]C[11]>65,R[25]C[42],ROUND((R[11]C[11]*IF(OR(AND(R[39]C[11]>=55, ' +
'R[40]C[11]>=20),AND(R[40]C[11]>=20,R11C3="YES")),R[44]C[11],R[43]C[11]))+(R[14]C[11] ' +
'*IF(OR(AND(R[39]C[11]>=55,R[40]C[11]>=20),AND(R[40]C[11]>=20,R11C3="YES")), ' +
'R[45]C[11],R[43]C[11])),0))',
]
p = ExcelParser()
for i in inputs:
print ("========================================")
print ("Formula: " + i)
p.parse(i)
print ("Pretty printed:\n", p.prettyprint())
class Stack:
def __init__(self):
self.s = []
def Empty(self):
return self.size() == 0
def Notempty(self):
return not self.Empty()
def push(self, item, debug=True):
if (debug):
#print("Push :", item );
;
if (isinstance(item, Iterable) ):
for k in item:
self.s.append(k)
else:
self.s.append(item)
def pop(self):
return self.s.pop()
def size(self):
return len(self.s)
def dump():
l = str(self.s) if self.size() < 7 else str(self.s[0:9]) + "..." + str(self.s[-1])
o = "Stack has {} items : {} ".format(self.size(), l)
def EVAL(self, p, v=None):
t = self.pop()
print('Evaluating:', t.tostr())
o = "";
gotit = False
if (t.ttype == p.TOK_TYPE_FUNCTION and t.tsubtype == p.TOK_SUBTYPE_STOP):
gotit = True
print ("Eval function:");
o = ")"
tt = self.pop();
while(tt.ttype != p.TOK_TYPE_FUNCTION and tt.tsubtype != p.TOK_SUBTYPE_START):
o = "," + str(tt.tvalue) + o;
tt = self.pop();
o = str(tt.tvalue) + "( " + o[1:];
print (o)
ttt = f_token(o, p.TOK_TYPE_OPERAND, p.TOK_SUBTYPE_TEXT)
self.push(ttt)
print ("Eval function: ", o , ttt);
if (t.ttype == p.TOK_TYPE_OPERAND and t.tsubtype == p.TOK_SUBTYPE_RANGE):
gotit = True
o = "RANGE(" + t.tvalue + ")";
ttt = f_token(o, p.TOK_TYPE_OPERAND, p.TOK_SUBTYPE_TEXT)
print ("Eval RANGE: ", o , ttt);
if ( not gotit ):
print ("Hmmmm: ", t.tostr());
self.push(t)
```
| PypiClean |
/NovalIDE-1.1.8-py3-none-any.whl/noval/python/analyzer.py | from noval import GetApp
import noval.python.parser.codeparser as codeparser
import threading
import noval.python.parser.scope as scope
import noval.util.utils as utils
class PythonModuleAnalyzer(object):
"""description of class"""
STATUS_START_ANALYZING = 0
STATUS_PARSING_SYNTAX = 1
STATUS_LOADING_SYNTAX_TREE = 2
STATUS_FINISH_ANALYZING = 3
def __init__(self,mod_view):
self._mod_view = mod_view
self._status = self.STATUS_START_ANALYZING
self._lock = threading.Lock()
#when close window,the flag is set to true
self._is_analyzing_stoped = False
self._module_scope = None
self._code_parser = codeparser.CodeParser()
def LoadModule(self,filename):
self._status = self.STATUS_PARSING_SYNTAX
try:
module = self._code_parser.ParsefileContent(self._mod_view.GetDocument().GetFilename(),self._mod_view.GetValue(),self._mod_view.GetDocument().file_encoding)
except Exception as e:
self._syntax_error_msg = str(e)
self.FinishAnalyzing()
return
module_scope = scope.ModuleScope(module,self._mod_view.GetCtrl().GetLineCount())
if not self.IsAnalyzingStopped():
module_scope.MakeModuleScopes()
else:
utils.GetLogger().debug("analyze module file %s is canceled by user,will not make module scopes step",filename)
if not self.IsAnalyzingStopped():
module_scope.RouteChildScopes()
else:
utils.GetLogger().debug("analyze module file %s is canceled by user,will not route child scopes step",filename)
self.ModuleScope = module_scope
def AnalyzeModuleSynchronizeTree(self,view,outlineView,force,lineNum):
# t = threading.Thread(target=self.LoadMouduleSynchronizeTree,args=(view,outlineView,force,lineNum))
#t.start()
self.LoadMouduleSynchronizeTree(view,outlineView,force,lineNum)
def LoadMouduleSynchronizeTree(self,callback_view,outlineView,force,lineNum):
with self._lock:
if self.IsAnalyzing():
utils.get_logger().debug('document %s is analyzing,will not analyze again',self._mod_view.GetDocument().GetFilename())
return True
document = self._mod_view.GetDocument()
filename = document.GetFilename()
if force:
self.LoadModule(filename)
if not force and callback_view == self._mod_view:
return False
self._status = self.STATUS_LOADING_SYNTAX_TREE
if self.ModuleScope is not None:
#should freeze control to prevent update and treectrl flick
if not self.IsAnalyzingStopped():
outlineView.LoadModuleAst(self.ModuleScope,self,lineNum)
else:
utils.GetLogger().debug("analyze module file %s is canceled by user,will not load and synchronize tree",filename)
#如果语法解析错误,则清除大纲视图内容
else:
outlineView._clear_tree()
self.FinishAnalyzing()
return True
@property
def ModuleScope(self):
return self._module_scope
@property
def SyntaxError(self):
return self._syntax_error_msg
@ModuleScope.setter
def ModuleScope(self,module_scope):
self._module_scope = module_scope
@property
def View(self):
return self._mod_view
def StopAnalyzing(self):
utils.get_logger().info("analyze module file %s is canceled by user,will stop analyzing",self._mod_view.GetDocument().GetFilename())
self.FinishAnalyzing()
self._is_analyzing_stoped = True
def IsAnalyzingStopped(self):
return self._is_analyzing_stoped
def IsAnalyzing(self):
return self._status == self.STATUS_PARSING_SYNTAX or self._status == self.STATUS_LOADING_SYNTAX_TREE
def FinishAnalyzing(self):
self._status = self.STATUS_FINISH_ANALYZING | PypiClean |
/HydPy-5.0.1-cp38-cp38-win_amd64.whl/hydpy/docs/rst/core.rst |
.. _core:
Core Tools
==========
The core subpackage of *HydPy* essentially defines how models can
and should be programmed, documented and applied. As can be seen in
the side-bar, the list of modules contained in the core subpackage
is quite large. The following paragraphs try to give some hints to
novices, which basic aspects of using *HydPy* are related with
which module.
Module |hydpytools| provides the |HydPy| class. The main purpose
of this class is to help users accomplish possibly complex things via
a simple interface. Very often, you will only need to initialize an
|HydPy| object and to call its methods in order to e.g. load all input
data, perform a simulation run, and to store the relevant results.
So trying to get an overview of the methods of class |HydPy| is generally
a good idea.
The documentation on module |filetools| describes the standard
directory structure of *HydPy* projects. Module |filetools|
offers some flexibility in adjusting this project structure to your
needs. Also, it is responsible for many aspects of loading data from
files and storing data to files. It is supplemented by module
|netcdftools| for reading data from and storing data to NetCDF files.
*HydPy* represents the network of a river basin via connected
objects of the classes |Node| and |Element|. These are defined in module
|devicetools|. It is often helpful to define subsets of networks, which
is provided by module |selectiontools|. In this context, reading the
documentation on module |networktools| could also be of interest, as it
implements strategies to define *HydPy* networks in large basins.
The actual data to run a certain model is handled in `control files`
(containing parameter values), `condition files` (containing state
conditions) and `sequence files` (containing input or output time
series). Modules |parametertools| and |sequencetools| provide
features to handle these different kinds of data.
Module |timetools| provides the |Timegrids| class, of which an object
needs to be stored in the "global information" module |pub|. Use this
|Timegrids| object to define the time period for which data shall be
initialized and the time period for which one simulation (or multiple
simulations) shall be performed.
The other modules serve more special purposes. If you are thinking
about adding new code to *HydPy* or changing existing one, you
should read the documentation of some other modules as well.
|autodoctools| provides features for automatically generating this
online documentation. Modules |testtools| provides features for
testing new code (or old code, that has not been covered by the
existing tests so far). Module |objecttools| (need to be refactored)
provides very different kinds of features to simplify and standardize
writing *HydPy* code.
.. toctree::
:hidden:
aliastools
autodoctools
auxfiletools
devicetools
exceptiontools
filetools
hydpytools
importtools
indextools
itemtools
masktools
modeltools
netcdftools
objecttools
optiontools
parametertools
printtools
propertytools
pubtools
selectiontools
sequencetools
seriestools
testtools
timetools
typingtools
variabletools
| PypiClean |
/GQCMS-0.0.4-py3-none-any.whl/build/lib/build/lib/build/lib/build/lib/build/lib/build/lib/build/lib/gqcms/Scans.py | import numpy as np
import pandas as pd
from contextlib import closing
from functools import partial
import gqcms
def LagrangeScan(
hubbard, operator, start: float, stop: float, step: float=0.05, method_str: str ="FCI", method_kwargs: dict = {}
) -> pd.DataFrame:
"""
Computes the energy and expectation value of the constrained Hubbard
Hamiltonian over a predefined range of Lagrange multipliers using the given
method. The given method should return a pandas DataFrame with at least
the columns ['E', 'C', '1PDM'].
:param hubbard: hubbard class object
:param operator: matrix representation of the feature operator
:param start: first Lagrange value, inclusive
:param stop: last Lagrange value, inclusive
:param step: difference between two subsequent Lagrange values (default is 0.05)
:param method_str: method used to solve the constrained Hubbard Hamiltonian, supported methods are
FCI, SCI and HF(default is FCI)
:param method_kwargs: key word arguments passed to the used method
:return: pandas DataFrame with the at least the columns
['E', 'C', '1PDM', 'mu', 'expectation_value']
"""
multipliers = np.arange(start, stop + step, step)
if method_str == 'FCI':
scan_result = [
gqcms.ConstrainedFCI(hubbard, operator, m) for m in multipliers
]
return pd.concat(scan_result, ignore_index=True)
elif method_str == 'SCI':
scan_result = [
gqcms.ConstrainedSCI(hubbard, operator, m, **method_kwargs)
for m in multipliers
]
elif method_str == 'HF':
scan_result = []
for m in multipliers:
HF_solver = gqcms.ConstrainedHartreeFock(hubbard, operator, m, **method_kwargs)
scan_result.append(HF_solver.solve())
else:
# Raise error if asked method is not supported
raise ValueError("Supplied method is not supported yet.")
# Return one DataFrame
return pd.concat(scan_result, ignore_index=True, axis=1).T
def ExpectationValueScan(
hubbard: gqcms.Hubbard,
operator: np.ndarray,
start: float,
stop: float,
step: float = 0.05,
method_str: str = "FCI",
method_kwargs: dict = {},
iterations = 100,
processes: int = 1,
threshold: float = 1e-6,
check_threshold: float = 1e-3,
lower_boundary: int = -10,
upper_boundary: int = 10,
bPrintCheck: bool = True
) -> pd.DataFrame:
"""
Computes the energy and expectation value of the constrained Hubbard
Hamiltonian over a predefined range of expectation values
:param hubbard: hubbard class object
:param operator: matrix representation of the feature operator
:param start: first Lagrange value, inclusive
:param stop: last Lagrange value, inclusive
:param step: difference between two subsequent Lagrange values (default is 0.05)
:param method_str: method used to solve the constrained Hubbard Hamiltonian (default is FCI)
:param method_kwargs: arguments passed to the given method
:param processes: number of cores multiprocessing can use (default is 1)
:param lower_boundary: lower boundary of the interval used for the line search (default is -10)
:param upper_boundary: upper boundary of the interval used for the line search (default is 10)
:param bPrintCheck: indicate if the number of failed optimizations must be print or not (default is True)
:return: pandas DataFrame with the at least the columns
['E', 'C', '1PDM', 'mu', 'expectation_value']
"""
expectation_values = np.arange(start, stop+step, step)
# List to store the result DataFrames
scan_results = []
# Define function to optimize
if method_str == 'FCI':
def f(m, args) -> float:
"""
Compute the expectation value of the operator.
:param m: Lagrange multiplier
"""
hubbard, operator, method_kwargs = args
result = gqcms.ConstrainedFCI(hubbard, operator, m).squeeze()
return result
elif method_str == 'HF':
def f(m, args) -> float:
"""
Compute the expectation value of the operator.
:param m: Lagrange multiplier
"""
hubbard, operator, method_kwargs = args
HF_solver = gqcms.ConstrainedHartreeFock(hubbard, operator, m, **method_kwargs)
result = HF_solver.solve()
return result
elif method_str == 'SCI':
def f(m, args) -> float:
"""
Compute the expectation value of the operator.
:param m: Lagrange multiplier
"""
hubbard, operator, method_kwargs = args
result = gqcms.ConstrainedSCI(hubbard, operator, m, **method_kwargs)
return result
else:
# Raise error if asked method is not supported
raise ValueError("Supplied method is not supported yet.")
for expectation_value in expectation_values:
_, result, success = gqcms.LineSearch(
expectation_value,
f,
args=(hubbard, operator, method_kwargs),
threshold=threshold,
check_threshold=check_threshold,
lower_boundary=lower_boundary,
upper_boundary=upper_boundary,
maxiter=iterations
)
# Add success status to dataframe
result['success'] = success
# Add requested expectation value
result['requested_expectation_value'] = expectation_value
scan_results.append(result)
# Conver list of pandas Series to a dataframe
# if method_str == "FCI":
# df = pd.concat(scan_results, ignore_index=True)
# print(df.info())
# else:
df = pd.concat(scan_results, ignore_index=True, axis=1).T
# Print how many optimizations failed
if bPrintCheck:
num_failed = df['success'].value_counts().get(False, 0)
print(f"There are {num_failed} failed optimizations")
# Return one DataFrame
return df | PypiClean |
/LatticeJSON-0.1.6-py3-none-any.whl/latticejson/utils.py | from itertools import chain
from warnings import warn
def tree(latticejson, name=None):
lattices = latticejson["lattices"]
def _tree(name, prefix=""):
string = f"{name}\n"
if name in lattices:
*other, last = lattices[name]
for child in other:
string += f"{prefix}├─── {_tree(child, prefix + '│ ')}"
string += f"{prefix}└─── {_tree(last, prefix + ' ')}"
return string
return _tree(latticejson["root"] if name is None else name)
def sort_lattices(latticejson, root=None, keep_unused=False):
"""Returns a sorted dict of lattice objects."""
lattices = latticejson["lattices"]
lattices_set = set(lattices)
lattices_sorted = {}
def _sort_lattices(name):
lattices_set.remove(name)
for child in lattices[name]:
if child in lattices_set:
_sort_lattices(child)
lattices_sorted[name] = lattices[name]
_sort_lattices(root if root is not None else latticejson["root"])
if keep_unused:
while len(lattices_set) > 0:
_sort_lattices(lattices_set.pop())
else:
for lattice in lattices_set:
warn(f"Discard unused lattice '{lattice}'.")
return lattices_sorted
def remove_unused(latticejson, root=None, warn_unused=False):
"""Remove unused objects starting from the `root` lattice. Also sorts lattices."""
if root is None:
root = latticejson["root"]
elements = latticejson["elements"]
lattices = latticejson["lattices"]
elements_set = set(elements)
lattices_set = set(lattices)
elements_new = {}
lattices_new = {}
def _remove_unused(name):
try:
elements_set.remove(name)
except KeyError:
pass
else:
elements_new[name] = elements[name]
return
try:
lattices_set.remove(name)
except KeyError:
pass
else:
lattice = lattices[name]
for child in lattice:
_remove_unused(child)
lattices_new[name] = lattices[name]
_remove_unused(root)
latticejson_new = latticejson.copy()
latticejson_new["root"] = root
latticejson_new["elements"] = elements_new
latticejson_new["lattices"] = lattices_new
if warn_unused:
for obj in chain(elements_set, lattices_set):
warn(f"Discard unused object '{obj}'.")
return latticejson_new
def flattened_element_sequence(latticejson, start_lattice=None):
"Returns a flattened generator of the element names in the physical order."
def _helper(lattice_name, lattices=latticejson["lattices"]):
for child in lattices[lattice_name]:
if child in lattices:
yield from _helper(child)
else:
yield child
return _helper(start_lattice if start_lattice is not None else latticejson["root"]) | PypiClean |
/Netfoll_TL-2.0.1-py3-none-any.whl/netfoll_tl/events/inlinequery.py | import inspect
import re
import asyncio
from .common import EventBuilder, EventCommon, name_inner_event
from .. import utils, helpers
from ..tl import types, functions, custom
from ..tl.custom.sendergetter import SenderGetter
@name_inner_event
class InlineQuery(EventBuilder):
"""
Occurs whenever you sign in as a bot and a user
sends an inline query such as ``@bot query``.
Args:
users (`entity`, optional):
May be one or more entities (username/peer/etc.), preferably IDs.
By default, only inline queries from these users will be handled.
blacklist_users (`bool`, optional):
Whether to treat the users as a blacklist instead of
as a whitelist (default). This means that every chat
will be handled *except* those specified in ``users``
which will be ignored if ``blacklist_users=True``.
pattern (`str`, `callable`, `Pattern`, optional):
If set, only queries matching this pattern will be handled.
You can specify a regex-like string which will be matched
against the message, a callable function that returns `True`
if a message is acceptable, or a compiled regex pattern.
Example
.. code-block:: python
from telethon import events
@client.on(events.InlineQuery)
async def handler(event):
builder = event.builder
# Two options (convert user text to UPPERCASE or lowercase)
await event.answer([
builder.article('UPPERCASE', text=event.text.upper()),
builder.article('lowercase', text=event.text.lower()),
])
"""
def __init__(
self, users=None, *, blacklist_users=False, func=None, pattern=None):
super().__init__(users, blacklist_chats=blacklist_users, func=func)
if isinstance(pattern, str):
self.pattern = re.compile(pattern).match
elif not pattern or callable(pattern):
self.pattern = pattern
elif hasattr(pattern, 'match') and callable(pattern.match):
self.pattern = pattern.match
else:
raise TypeError('Invalid pattern type given')
@classmethod
def build(cls, update, others=None, self_id=None):
if isinstance(update, types.UpdateBotInlineQuery):
return cls.Event(update)
def filter(self, event):
if self.pattern:
match = self.pattern(event.text)
if not match:
return
event.pattern_match = match
return super().filter(event)
class Event(EventCommon, SenderGetter):
"""
Represents the event of a new callback query.
Members:
query (:tl:`UpdateBotInlineQuery`):
The original :tl:`UpdateBotInlineQuery`.
Make sure to access the `text` property of the query if
you want the text rather than the actual query object.
pattern_match (`obj`, optional):
The resulting object from calling the passed ``pattern``
function, which is ``re.compile(...).match`` by default.
"""
def __init__(self, query):
super().__init__(chat_peer=types.PeerUser(query.user_id))
SenderGetter.__init__(self, query.user_id)
self.query = query
self.pattern_match = None
self._answered = False
def _set_client(self, client):
super()._set_client(client)
self._sender, self._input_sender = utils._get_entity_pair(
self.sender_id, self._entities, client._mb_entity_cache)
@property
def id(self):
"""
Returns the unique identifier for the query ID.
"""
return self.query.query_id
@property
def text(self):
"""
Returns the text the user used to make the inline query.
"""
return self.query.query
@property
def offset(self):
"""
The string the user's client used as an offset for the query.
This will either be empty or equal to offsets passed to `answer`.
"""
return self.query.offset
@property
def geo(self):
"""
If the user location is requested when using inline mode
and the user's device is able to send it, this will return
the :tl:`GeoPoint` with the position of the user.
"""
return self.query.geo
@property
def builder(self):
"""
Returns a new `InlineBuilder
<telethon.tl.custom.inlinebuilder.InlineBuilder>` instance.
"""
return custom.InlineBuilder(self._client)
async def answer(
self, results=None, cache_time=0, *,
gallery=False, next_offset=None, private=False,
switch_pm=None, switch_pm_param=''):
"""
Answers the inline query with the given results.
See the documentation for `builder` to know what kind of answers
can be given.
Args:
results (`list`, optional):
A list of :tl:`InputBotInlineResult` to use.
You should use `builder` to create these:
.. code-block:: python
builder = inline.builder
r1 = builder.article('Be nice', text='Have a nice day')
r2 = builder.article('Be bad', text="I don't like you")
await inline.answer([r1, r2])
You can send up to 50 results as documented in
https://core.telegram.org/bots/api#answerinlinequery.
Sending more will raise ``ResultsTooMuchError``,
and you should consider using `next_offset` to
paginate them.
cache_time (`int`, optional):
For how long this result should be cached on
the user's client. Defaults to 0 for no cache.
gallery (`bool`, optional):
Whether the results should show as a gallery (grid) or not.
next_offset (`str`, optional):
The offset the client will send when the user scrolls the
results and it repeats the request.
private (`bool`, optional):
Whether the results should be cached by Telegram
(not private) or by the user's client (private).
switch_pm (`str`, optional):
If set, this text will be shown in the results
to allow the user to switch to private messages.
switch_pm_param (`str`, optional):
Optional parameter to start the bot with if
`switch_pm` was used.
Example:
.. code-block:: python
@bot.on(events.InlineQuery)
async def handler(event):
builder = event.builder
rev_text = event.text[::-1]
await event.answer([
builder.article('Reverse text', text=rev_text),
builder.photo('/path/to/photo.jpg')
])
"""
if self._answered:
return
if results:
futures = [self._as_future(x) for x in results]
await asyncio.wait(futures)
# All futures will be in the `done` *set* that `wait` returns.
#
# Precisely because it's a `set` and not a `list`, it
# will not preserve the order, but since all futures
# completed we can use our original, ordered `list`.
results = [x.result() for x in futures]
else:
results = []
if switch_pm:
switch_pm = types.InlineBotSwitchPM(switch_pm, switch_pm_param)
return await self._client(
functions.messages.SetInlineBotResultsRequest(
query_id=self.query.query_id,
results=results,
cache_time=cache_time,
gallery=gallery,
next_offset=next_offset,
private=private,
switch_pm=switch_pm
)
)
@staticmethod
def _as_future(obj):
if inspect.isawaitable(obj):
return asyncio.ensure_future(obj)
f = helpers.get_running_loop().create_future()
f.set_result(obj)
return f | PypiClean |
/MCRAMP-0.0.3-py3-none-any.whl/mcramp/scat/guide.py | from .sprim import SPrim
import numpy as np
import pyopencl as cl
import pyopencl.array as clarr
import os
import re
class SGuide(SPrim):
"""
Scattering kernel for tapered rectangular Guide. Recreates the functionality
of the Guide component in McStas. The path of the neutron through the guide
is numerically simulated and its weight adjusted according to the reflectivity
function of the guide walls.
Intersection is taken as the point at which the neutron enters the guide and
the guide geometry is taken to lie centered along the z axis.
Parameters
----------
w1 : float
Width of the guide entrance in meters
h1 : float
Height of the guide entrance in meters
w2 : float
Width of the guide exit in meters
h2 : float
Height of the guide exit in meters
l : float
Length of the guide in meters
R0 : float
Low-angle reflectivity of the guide
Qc : float
Critical scattering vector of the guide
alpha : float
Slope of the reflectivity
m : float
m-value of the guide coating
W : float
Width of the guide supermirror cutoff
max_bounces : float
Cutoff to prevent infinite scattering due to numerical error in the kernel
Methods
-------
Data
None
Plot
None
Save
None
"""
def __init__(self, w1=0, h1=0, w2=0, h2=0, l=0,
R0=0, Qc=0, alpha=0, m=1, W=0, idx=0, ctx=0, max_bounces=50,
**kwargs):
if (("PYOPENCL_BUILD_OPTIONS" in os.environ)
and ("CONFIG_USE_DOUBLE=1" in os.environ["PYOPENCL_BUILD_OPTIONS"])):
self.w1 = np.float64(w1)
self.h1 = np.float64(h1)
self.w2 = np.float64(w2)
self.h2 = np.float64(h2)
self.l = np.float64(l)
self.R0 = np.float64(R0)
self.Qc = np.float64(Qc)
self.alpha = np.float64(alpha)
self.m = np.float64(m)
self.W = np.float64(W)
else:
self.w1 = np.float32(w1)
self.h1 = np.float32(h1)
self.w2 = np.float32(w2)
self.h2 = np.float32(h2)
self.l = np.float32(l)
self.R0 = np.float32(R0)
self.Qc = np.float32(Qc)
self.alpha = np.float32(alpha)
self.m = np.float32(m)
self.W = np.float32(W)
self.idx = np.uint32(idx)
self.max_bounces = np.uint32(max_bounces)
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'guide.cl'), mode='r') as f:
self.prg = cl.Program(ctx, f.read()).build(options=r'-I "{}/include"'.format(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
def scatter_prg(self, queue, N, neutron_buf, intersection_buf, iidx_buf):
self.prg.guide_scatter(queue, (N, ),
None,
neutron_buf,
intersection_buf,
iidx_buf,
self.idx,
self.w1,
self.h1,
self.w2,
self.h2,
self.l,
self.R0,
self.Qc,
self.alpha,
self.m,
self.W,
self.max_bounces)
def lines(self):
w1_2 = self.w1 / 2
h1_2 = self.h1 / 2
w2_2 = self.w2 / 2
h2_2 = self.h2 / 2
l = self.l
x_arr = [-w1_2, -w2_2, w2_2, w1_2, -w1_2, -w1_2, w1_2, w1_2, w2_2, w2_2, w1_2, -w1_2, -w2_2, w2_2, -w2_2, -w2_2]
y_arr = [h1_2, h2_2, h2_2, h1_2, h1_2, -h1_2, -h1_2, h1_2, h2_2, -h2_2, -h1_2, -h1_2, -h2_2, -h2_2, -h2_2, h2_2]
z_arr = [0, l, l, 0, 0, 0, 0, 0, l, l, 0, 0, l, l, l, l]
return [x_arr, y_arr, z_arr] | PypiClean |
/CloudFerry-1.55.2.tar.gz/CloudFerry-1.55.2/cloudferry/lib/os/migrate/cinder.py | import cStringIO
import hashlib
import inspect
import logging
import os
import random
import threading
import time
import uuid
import cloudferry
from cloudferry import config
from cloudferry import discover
from cloudferry import model
from cloudferry.model import compute
from cloudferry.model import identity
from cloudferry.model import image
from cloudferry.model import network
from cloudferry.model import storage
from cloudferry.lib.os import clients
from cloudferry.lib.os.migrate import base
from cloudferry.lib.utils import remote
from cinderclient import exceptions as cinder_exceptions
from novaclient import exceptions as nova_exceptions
from keystoneclient import exceptions as keystone_exceptions
from glanceclient import exc as glance_exceptions
from neutronclient.common import exceptions as neutron_exceptions
import netaddr
import paramiko
LOG = logging.getLogger(__name__)
LOCK = threading.Lock()
IMAGE_FILENAME = 'alpine_vol_tx.qcow2'
RSA1024_KEY = paramiko.RSAKey.generate(1024)
_image_md5 = None
_ip_counter = 2 # 0 is for network and 1 is for compute node
_used_ports = {}
class BaseSymmetricTask(base.MigrationTask):
def __init__(self, cfg, migration, obj, location, **kwargs):
self.location = location
super(BaseSymmetricTask, self).__init__(cfg, migration, obj,
name_suffix=location, **kwargs)
@property
def cloud(self):
return self.config.clouds[getattr(self.migration, self.location)]
class BaseSymmetricSingletonTask(BaseSymmetricTask,
base.SingletonMigrationTask):
def __init__(self, cfg, migration, obj, location, **kwargs):
super(BaseSymmetricSingletonTask, self).__init__(
cfg, migration, obj, location, **kwargs)
self.destructor = None
def get_singleton_key(self, *args, **kwargs):
return self.location,
def rollback(self, *args, **kwargs):
if self.destructor is not None:
self.destructor.run(self.config, self.migration)
def save_internal_state(self):
serialized_destructor = None
if self.destructor is not None:
serialized_destructor = self.destructor.dump()
return {'serialized_destructor': serialized_destructor}
def restore_internal_state(self, internal_state):
serialized_destructor = internal_state['serialized_destructor']
if serialized_destructor is None:
self.destructor = None
else:
self.destructor = base.Destructor.load(serialized_destructor)
class CreateVolume(base.MigrationTask):
default_provides = ['dst_object']
def migrate(self, source_obj, *args, **kwargs):
dst_tenant_id = _get_object_tenant_id(self.dst_cloud, source_obj)
volume_client = clients.volume_client(self.dst_cloud,
_scope(dst_tenant_id))
ovr_source_obj = self.override(source_obj)
zone = ovr_source_obj.availability_zone
vol = clients.retry(volume_client.volumes.create,
size=ovr_source_obj.size,
display_name=ovr_source_obj.name,
display_description=ovr_source_obj.description,
volume_type=ovr_source_obj.volume_type,
availability_zone=zone,
metadata=ovr_source_obj.metadata)
try:
self.created_object = clients.wait_for(
_object_status_is, volume_client, 'volumes', vol.id,
'available')
except clients.Timeout:
self._delete_volume(vol)
raise base.AbortMigration('Volume didn\'t become active')
result = self.load_from_cloud(
storage.Volume, self.dst_cloud, self.created_object)
return dict(dst_object=result)
def rollback(self, *args, **kwargs):
super(CreateVolume, self).rollback(*args, **kwargs)
if self.created_object is not None:
self._delete_volume(self.created_object)
self.created_object = None
def _delete_volume(self, vol):
tenant_id = getattr(vol, 'os-vol-tenant-attr:tenant_id')
volume_client = clients.volume_client(self.dst_cloud,
_scope(tenant_id))
try:
volume = clients.retry(
volume_client.volumes.get, vol.id,
expected_exceptions=[cinder_exceptions.NotFound])
if volume.status not in ('available', 'in-use', 'error',
'error_restoring'):
clients.retry(
volume_client.volumes.reset_state, volume, 'error',
expected_exceptions=[cinder_exceptions.NotFound])
clients.retry(volume_client.volumes.delete, volume,
expected_exceptions=[cinder_exceptions.NotFound])
except cinder_exceptions.NotFound:
LOG.warning('Can not delete cinder volume: already deleted')
def save_internal_state(self):
tenant_id = volume_id = None
if self.created_object is not None:
volume = self.created_object
volume_id = volume.id
tenant_id = getattr(volume, 'os-vol-tenant-attr:tenant_id')
return {
'tenant_id': tenant_id,
'volume_id': volume_id,
}
def restore_internal_state(self, internal_state):
tenant_id = internal_state['tenant_id']
volume_id = internal_state['volume_id']
self.created_object = None
if tenant_id is not None and volume_id is not None:
volume_client = clients.volume_client(self.dst_cloud,
_scope(tenant_id))
try:
self.created_object = clients.retry(
volume_client.volumes.get, volume_id,
expected_exceptions=[cinder_exceptions.NotFound])
except cinder_exceptions.NotFound:
LOG.warning('Failed to find volume with id %s when restoring '
'task state', volume_id)
class BootTransferVm(BaseSymmetricTask):
def __init__(self, cfg, migration, obj, location):
self.var_name = location + '_vm'
self.image_var_name = location + '_image_id'
self.flavor_var_name = location + '_flavor'
self.net_var_name = location + '_net'
super(BootTransferVm, self).__init__(cfg, migration, obj, location,
requires=[self.image_var_name,
self.flavor_var_name,
self.net_var_name],
provides=[self.var_name])
def migrate(self, source_obj, *args, **kwargs):
int_ip_address = _allocate_ip_address(self.cloud)
nova_client = clients.compute_client(self.cloud)
self.created_object = nova_client.servers.create(
image=kwargs[self.image_var_name],
flavor=kwargs[self.flavor_var_name].flavor_id,
name='trans_vol_{}'.format(source_obj.object_id.id),
config_drive=True,
nics=[{'net-id': kwargs[self.net_var_name].object_id.id}],
meta=dict(cidr=str(int_ip_address),
internal_address=str(int_ip_address.ip),
access_key=RSA1024_KEY.get_base64()))
try:
self.created_object = clients.wait_for(
_object_status_is, nova_client, 'servers',
self.created_object.id, 'active')
except clients.Timeout:
self._delete_vm()
raise base.AbortMigration(
'Timeout waiting for VM %s to start on %s',
self.created_object.id, self.location)
result = self.load_from_cloud(
compute.Server, self.cloud, self.created_object)
return {self.var_name: result}
def rollback(self, *args, **kwargs):
super(BootTransferVm, self).rollback(*args, **kwargs)
if self.created_object is not None:
self._delete_vm()
def _delete_vm(self):
_delete_vm(self.cloud, self.created_object.id)
self.created_object = None
def save_internal_state(self):
vm_id = None
if self.created_object is not None:
vm_id = self.created_object.id
return {
'vm_id': vm_id,
}
def restore_internal_state(self, internal_state):
vm_id = internal_state['vm_id']
self.created_object = None
if vm_id is not None:
compute_client = clients.compute_client(self.cloud)
try:
self.created_object = clients.retry(
compute_client.servers.get, vm_id,
expected_exceptions=[nova_exceptions.NotFound])
except nova_exceptions.NotFound:
LOG.warning('Failed to find VM with id %s when restoring '
'task state', vm_id)
class AttachNodeLocalInterface(BaseSymmetricTask):
def __init__(self, cfg, migration, obj, location):
self.var_name = location + '_vm'
super(AttachNodeLocalInterface, self).__init__(
cfg, migration, obj, location, requires=[self.var_name])
def migrate(self, **kwargs):
target_vm = kwargs.get(self.var_name)
with remote.RemoteExecutor(self.cloud,
target_vm.compute_node) as rexec:
br_name = 'cn_local'
rexec.sudo('brctl addbr {bridge} || true', bridge=br_name)
rexec.sudo('ip addr add {cidr} dev {bridge} || true',
cidr=_first_unused_address(self.cloud), bridge=br_name)
rexec.sudo('ip link set dev {bridge} up', bridge=br_name)
rexec.sudo('virsh attach-interface {instance} --type bridge '
'--source {bridge} --mac {mac_address} '
'--model virtio',
instance=target_vm.instance_name, bridge=br_name,
mac_address=_random_mac())
class TransferVolumeData(base.MigrationTask):
def __init__(self, *args, **kwargs):
super(TransferVolumeData, self).__init__(*args, **kwargs)
self.session_name = None
self.started_on_src_host = False
self.started_on_dst_host = False
def migrate(self, source_obj, source_vm, destination_vm, *args, **kwargs):
self.session_name = 'vol_{}_{}'.format(
source_obj.object_id.cloud, source_obj.object_id.id)
port = _allocate_port(source_vm.hypervisor_hostname, self.src_cloud)
src_ip = source_vm.metadata['internal_address']
dst_ip = destination_vm.metadata['internal_address']
listen_ip = _first_unused_address(self.src_cloud).ip
dst_private_key = self.dst_cloud.ssh_settings.private_key
agent = remote.SSHAgent()
try:
if dst_private_key is not None:
agent.start()
agent.add_key(dst_private_key)
with remote.RemoteExecutor(
self.dst_cloud, destination_vm.compute_node) as dst_re:
_wait_ip_accessible(self.dst_cloud, dst_re, dst_ip)
key_path = _deploy_pkey(dst_re)
dst_re.run('screen -S {session} -d -m '
'ssh -o UserKnownHostsFile=/dev/null '
'-o StrictHostKeyChecking=no -i {key_path} '
'root@{dst_ip} /bin/sh -c '
'"\'nc -l {dst_ip} 11111 | '
'/usr/local/bin/zstd -d | '
'dd of=/dev/vdb bs=512k\'"; sleep 1',
session=self.session_name, key_path=key_path,
dst_ip=dst_ip)
self.started_on_dst_host = True
with remote.RemoteExecutor(self.src_cloud,
source_vm.compute_node) as src_re:
_wait_ip_accessible(self.src_cloud, src_re, src_ip)
key_path = _deploy_pkey(src_re)
# Port forwarding to remote machine
src_re.run('screen -S {session} -d -m ssh -N '
'-o UserKnownHostsFile=/dev/null '
'-o StrictHostKeyChecking=no '
'-L {listen_ip}:{listen_port}:{forward_ip}:11111 '
'{dst_user}@{dst_address}; sleep 1',
agent=agent, session=self.session_name,
listen_ip=listen_ip, listen_port=port,
forward_ip=dst_ip, dst_address=dst_re.hostname,
dst_user=self.dst_cloud.ssh_settings.username)
self.started_on_src_host = True
LOG.info('Starting to transfer %dGb volume %s',
source_obj.size, source_obj.object_id)
data_transfer_start = time.time()
src_re.run('ssh -t -o UserKnownHostsFile=/dev/null '
'-o StrictHostKeyChecking=no -i {key_path} '
'root@{src_ip} /bin/sh -c '
'"\'dd if=/dev/vdb bs=512k | pv -r -i 30 | '
'/usr/local/bin/zstd | '
'nc -w 5 {listen_ip} {listen_port}\'"',
session=self.session_name, key_path=key_path,
listen_port=port, listen_ip=listen_ip,
src_ip=src_ip)
data_transfer_dur = time.time() - data_transfer_start
LOG.info('Transferred %dGb volume in %.1f seconds '
'(avg. speed: %.2fMb/s)', source_obj.size,
data_transfer_dur,
source_obj.size * 1024 / data_transfer_dur)
finally:
self._cleanup(source_vm, destination_vm)
agent.terminate()
def rollback(self, source_vm, destination_vm, *args, **kwargs):
super(TransferVolumeData, self).rollback(*args, **kwargs)
self._cleanup(source_vm, destination_vm)
def _close_screen_session(self, rexec):
rexec.run('screen -S {session} -x -X quit || true',
session=self.session_name)
def _cleanup(self, source_vm, destination_vm):
if self.started_on_src_host:
with remote.RemoteExecutor(self.src_cloud,
source_vm.compute_node) as rexec:
self._close_screen_session(rexec)
if self.started_on_dst_host:
with remote.RemoteExecutor(self.dst_cloud,
destination_vm.compute_node) as rexec:
self._close_screen_session(rexec)
class CleanupVms(base.MigrationTask):
def migrate(self, source_vm, destination_vm, *args, **kwargs):
self._delete_vm_obj(source_vm)
self._delete_vm_obj(destination_vm)
def _delete_vm_obj(self, vm):
cloud = self.config.clouds[vm.object_id.cloud]
_delete_vm(cloud, vm.object_id.id)
class BaseAttachmentTask(base.MigrationTask):
def _attach_volume(self, cloud, volume, vm_id):
volume_id = volume.object_id.id
nova_client = clients.compute_client(cloud)
cinder_client = clients.volume_client(
cloud, _scope(volume.tenant.object_id.id))
if _object_status_is(cinder_client, 'volumes', volume_id, 'available'):
nova_client.volumes.create_server_volume(vm_id, volume_id,
'/dev/vdb')
try:
clients.wait_for(
_object_status_is, cinder_client, 'volumes',
volume_id, 'in-use')
except clients.Timeout:
raise base.AbortMigration(
'Volume %s in cloud %s couldn\'t attach',
volume_id, cloud.name)
else:
raise base.AbortMigration(
'Volume %s in cloud %s is not available for attachment',
volume_id, cloud.name)
def _detach_volume(self, cloud, volume, vm_id, abort_migration=False):
volume_id = volume.object_id.id
nova_client = clients.compute_client(cloud)
cinder_client = clients.volume_client(
cloud, _scope(volume.tenant.object_id.id))
if _object_is_deleted(cinder_client, 'volumes', volume_id,
cinder_exceptions.NotFound):
return
if _object_status_is(cinder_client, 'volumes', volume_id, 'in-use'):
nova_client.volumes.delete_server_volume(vm_id, volume_id)
try:
clients.wait_for(_object_status_is, cinder_client, 'volumes',
volume_id, 'available')
except clients.Timeout:
if abort_migration:
raise base.AbortMigration(
'Volume %s in cloud %s couldn\'t attach',
volume_id, cloud.name)
class AttachSourceVolume(BaseAttachmentTask):
def migrate(self, source_obj, source_vm):
self._attach_volume(self.src_cloud, source_obj, source_vm.object_id.id)
def rollback(self, source_obj, source_vm, **kwargs):
self._detach_volume(self.src_cloud, source_obj, source_vm.object_id.id)
class AttachDestinationVolume(BaseAttachmentTask):
def migrate(self, dst_object, destination_vm):
self._attach_volume(self.dst_cloud, dst_object,
destination_vm.object_id.id)
def rollback(self, dst_object, destination_vm, **kwargs):
self._detach_volume(self.dst_cloud, dst_object,
destination_vm.object_id.id)
class DetachSourceVolume(BaseAttachmentTask):
def migrate(self, source_obj, source_vm):
self._detach_volume(self.src_cloud, source_obj, source_vm.object_id.id,
abort_migration=True)
class DetachDestinationVolume(BaseAttachmentTask):
def migrate(self, dst_object, destination_vm):
self._detach_volume(self.dst_cloud, dst_object,
destination_vm.object_id.id, abort_migration=True)
class DetachMigratedVolume(BaseAttachmentTask):
default_provides = ['attached_vm_id']
def __init__(self, cfg, migration, obj):
super(DetachMigratedVolume, self).__init__(cfg, migration, obj)
self.detached_vm_id = None
def migrate(self, source_obj, *args, **kwargs):
cinder_client = clients.volume_client(
self.src_cloud, _scope(source_obj.tenant.object_id.id))
raw_volume = clients.retry(cinder_client.volumes.get,
source_obj.object_id.id)
if raw_volume.attachments:
nova_client = clients.compute_client(self.src_cloud)
assert len(raw_volume.attachments) == 1
detached_vm_id = raw_volume.attachments[0]['server_id']
shutoff_vm(nova_client, detached_vm_id)
self._detach_volume(self.src_cloud, source_obj, detached_vm_id,
abort_migration=True)
self.detached_vm_id = detached_vm_id
return dict(attached_vm_id=self.detached_vm_id)
def rollback(self, source_obj, *args, **kwargs):
if self.detached_vm_id is not None:
self._attach_volume(self.src_cloud, source_obj,
self.detached_vm_id)
class ReattachMigratedVolume(BaseAttachmentTask):
def migrate(self, source_obj, attached_vm_id, *args, **kwargs):
if attached_vm_id is None:
return None
self._attach_volume(self.src_cloud, source_obj, attached_vm_id)
class ImageDestructor(base.Destructor):
def __init__(self, location, image_id):
self.location = location
self.image_id = image_id
def get_signature(self):
return self.location, self.image_id
def run(self, cfg, migration):
cloud = cfg.clouds[getattr(migration, self.location)]
image_client = clients.image_client(cloud)
try:
with model.Session() as session:
object_id = model.ObjectId(self.image_id, cloud.name)
session.delete(image.Image, object_id=object_id)
clients.retry(image_client.images.delete, self.image_id,
expected_exceptions=[glance_exceptions.NotFound])
except glance_exceptions.NotFound:
pass
class FindOrUploadImage(BaseSymmetricSingletonTask):
def __init__(self, cfg, migration, obj, location):
self.var_name = location + '_image_id'
super(FindOrUploadImage, self).__init__(
cfg, migration, obj, location, provides=[self.var_name])
def migrate(self, *args, **kwargs):
with model.Session() as session:
image_id = self._find_supported_cirros_image(session)
if image_id is None:
try:
img = self._upload_cirros_image(session)
except clients.Timeout:
raise base.AbortMigration(
'Failed to upload transfer VM image')
image_obj = self.load_from_cloud(image.Image, self.cloud, img)
session.store(image_obj)
image_id = img.id
self.destructor = ImageDestructor(self.location, image_id)
return {self.var_name: image_id,
self.destructor_var: self.destructor}
def _find_supported_cirros_image(self, session):
image_client = clients.image_client(self.cloud)
for img in session.list(image.Image, self.cloud):
if img.checksum.lower() == _get_image_md5():
# Test if image is good
image_id = img.object_id.id
try:
next(image_client.images.data(image_id))
except Exception:
LOG.debug('Failed to download part of image %s from %s',
image_id, self.location)
continue
return image_id
return None
def _upload_cirros_image(self, session):
image_client = clients.image_client(self.cloud)
with open(_get_image_location(), 'r') as f:
img = image_client.images.create(
data=f, name=IMAGE_FILENAME,
container_format='bare',
disk_format='qcow2',
is_public=False, protected=False,
owner=_get_admin_tenant_id(self.cloud, session))
return clients.wait_for(_object_status_is, image_client, 'images',
img.id, 'active')
class FlavorDestructor(base.Destructor):
def __init__(self, location, flavor_id, object_id):
self.location = location
self.flavor_id = flavor_id
self.object_id = object_id
def get_signature(self):
return self.object_id
def run(self, cfg, migration):
cloud = cfg.clouds[getattr(migration, self.location)]
nova_client = clients.compute_client(cloud)
try:
with model.Session() as session:
session.delete(compute.Flavor, object_id=self.object_id)
clients.retry(nova_client.flavors.delete, self.flavor_id,
expected_exceptions=[nova_exceptions.NotFound])
except nova_exceptions.NotFound:
pass
class FindOrCreateFlavor(BaseSymmetricSingletonTask):
def __init__(self, cfg, migration, obj, location):
self.var_name = location + '_flavor'
super(FindOrCreateFlavor, self).__init__(
cfg, migration, obj, location,
provides=[self.var_name])
def migrate(self, *args, **kwargs):
with model.Session() as session:
flavor = self._find_existing_flavor(session)
if flavor is None:
flavor = self._create_flavor()
self.destructor = FlavorDestructor(
self.location, flavor.flavor_id, flavor.object_id)
return {self.var_name: flavor,
self.destructor_var: self.destructor}
def _find_existing_flavor(self, session):
for flavor in session.list(compute.Flavor, self.cloud):
if not flavor.is_disabled \
and not flavor.is_deleted \
and flavor.vcpus == 1 \
and 48 <= flavor.memory_mb <= 64 \
and flavor.root_gb == 0 \
and flavor.ephemeral_gb == 0 \
and flavor.swap_mb == 0:
return flavor
def _create_flavor(self):
nova_client = clients.compute_client(self.cloud)
flavor_id = str(uuid.uuid4())
clients.retry(nova_client.flavors.create, 'tmp.vol_tx', 64, 1, 0,
flavorid=flavor_id, is_public=False)
flavor_discoverer = discover.get_discoverer(self.config, self.cloud,
compute.Flavor)
flavor = flavor_discoverer.discover_by_flavor_id(flavor_id)
return flavor
class NetworkDestructor(base.Destructor):
def __init__(self, location, network_id, subnet_id):
self.location = location
self.network_id = network_id
self.subnet_id = subnet_id
def get_signature(self):
return self.location, self.network_id
def run(self, cfg, migration):
cloud = cfg.clouds[getattr(migration, self.location)]
network_client = clients.network_client(cloud)
try:
with model.Session() as session:
net_obj_id = model.ObjectId(self.network_id, cloud.name)
subnet_obj_id = model.ObjectId(self.subnet_id, cloud.name)
session.delete(network.Network, object_id=net_obj_id)
session.delete(network.Subnet, object_id=subnet_obj_id)
clients.retry(network_client.delete_network, self.network_id,
expected_exceptions=[neutron_exceptions.NotFound])
except neutron_exceptions.NotFound:
pass
class FindOrCreateNetwork(BaseSymmetricSingletonTask):
def __init__(self, cfg, migration, obj, location):
self.var_name = location + '_net'
super(FindOrCreateNetwork, self).__init__(
cfg, migration, obj, location,
provides=[self.var_name])
def migrate(self, *args, **kwargs):
with model.Session() as session:
net = self._find_existing_network(session)
if net is None:
net, net_id, subnet_id = self._create_net(session)
self.destructor = NetworkDestructor(
self.location, net_id, subnet_id)
return {self.var_name: net, self.destructor_var: self.destructor}
def _find_existing_network(self, session):
for net in session.list(network.Network, self.cloud):
if net.name == 'tmp_vol_tx' and len(net.subnets) == 1:
return net
return None
def _create_net(self, session):
network_client = clients.network_client(self.cloud)
raw_net = network_client.create_network({
'network': {
'name': 'tmp_vol_tx',
'shared': False,
},
})
raw_subnet = network_client.create_subnet({
'subnet': {
'cidr': '128.0.0.0/1',
'ip_version': 4,
'gateway_ip': None,
'network_id': raw_net['network']['id']
},
})
net = self.load_from_cloud(network.Network, self.cloud,
raw_net['network'])
session.store(net)
subnet = self.load_from_cloud(network.Subnet, self.cloud,
raw_subnet['subnet'])
session.store(subnet)
return net, raw_net['network']['id'], raw_subnet['subnet']['id']
class EnsureAdminRoleDestructor(base.Destructor):
def __init__(self, location, user_id, role_id, tenant_id):
self.location = location
self.user_id = user_id
self.role_id = role_id
self.tenant_id = tenant_id
def get_signature(self):
return self.location, self.user_id, self.role_id, self.tenant_id
def run(self, cfg, migration):
cloud = cfg.clouds[getattr(migration, self.location)]
identity_client = clients.identity_client(cloud)
try:
clients.retry(identity_client.roles.remove_user_role,
user=self.user_id, role=self.role_id,
tenant=self.tenant_id,
expected_exceptions=[
keystone_exceptions.NotFound])
except keystone_exceptions.NotFound:
pass
class EnsureAdminRole(BaseSymmetricSingletonTask):
def __init__(self, cfg, migration, obj, location):
super(EnsureAdminRole, self).__init__(cfg, migration, obj, location)
self.already_member = False
self.user_id = None
self.role_id = None
self.tenant_id = None
def get_singleton_key(self, source_obj, *args, **kwargs):
return self.location, _get_object_tenant_id(self.cloud, source_obj)
def _user_id(self, username):
with model.Session() as session:
for user in session.list(identity.User, self.cloud):
if user.name.lower() == username.lower():
return user.object_id.id
raise base.AbortMigration('User % not found in cloud %s', username,
self.cloud.name)
def _role_id(self, rolename):
with model.Session() as session:
for role in session.list(identity.Role, self.cloud):
if role.name.lower() == rolename.lower():
return role.object_id.id
raise base.AbortMigration('Role % not found in cloud %s', rolename,
self.cloud.name)
def migrate(self, source_obj, *args, **kwargs):
cloud = self.cloud
identity_client = clients.identity_client(cloud)
destructor_var = self.destructor_var
try:
self.user_id = self._user_id(cloud.credential.username)
self.role_id = self._role_id(cloud.admin_role)
self.tenant_id = _get_object_tenant_id(self.cloud, source_obj)
clients.retry(
identity_client.roles.add_user_role,
user=self.user_id, role=self.role_id, tenant=self.tenant_id,
expected_exceptions=[keystone_exceptions.Conflict])
self.destructor = EnsureAdminRoleDestructor(
self.location, self.user_id, self.role_id, self.tenant_id)
except keystone_exceptions.Conflict:
pass
return {
destructor_var: self.destructor
}
class RestoreQuotas(base.Destructor):
def __init__(self, location, admin_tenant_id, obj_tenant_id,
net_quota, compute_quota, storage_quota):
self.location = location
self.admin_tenant_id = admin_tenant_id
self.obj_tenant_id = obj_tenant_id
self.net_quota = net_quota
self.compute_quota = compute_quota
self.storage_quota = storage_quota
def get_signature(self):
return self.location, self.admin_tenant_id, self.obj_tenant_id
def run(self, cfg, migration):
cloud = cfg.clouds[getattr(migration, self.location)]
network_client = clients.network_client(cloud)
compute_client = clients.compute_client(cloud)
storage_client = clients.volume_client(cloud)
try:
if self.net_quota is None:
clients.retry(network_client.delete_quota,
self.admin_tenant_id)
else:
clients.retry(
network_client.update_quota, self.admin_tenant_id, {
'quota': {
'network': self.net_quota['network'],
'subnet': self.net_quota['subnet'],
'port': self.net_quota['port'],
}
})
except neutron_exceptions.NotFound:
pass
if self.compute_quota:
clients.retry(compute_client.quotas.update, self.admin_tenant_id,
**self.compute_quota)
if self.storage_quota:
clients.retry(storage_client.quotas.update, self.obj_tenant_id,
**self.storage_quota)
class SetUnlimitedQuotas(BaseSymmetricSingletonTask):
def __init__(self, cfg, migration, obj, location):
super(SetUnlimitedQuotas, self).__init__(cfg, migration, obj, location)
self.obj_tenant_id = None
with model.Session() as session:
self.admin_tenant_id = _get_admin_tenant_id(self.cloud, session)
def get_singleton_key(self, source_obj, *args, **kwargs):
return self.location, _get_object_tenant_id(self.cloud, source_obj)
def migrate(self, source_obj, *args, **kwargs):
self.obj_tenant_id = _get_object_tenant_id(self.cloud, source_obj)
net_quotas = self._set_network_quotas(self.admin_tenant_id)
compute_quotas = self._set_compute_quotas(self.admin_tenant_id)
storage_quotas = self._set_cinder_quotas(self.obj_tenant_id)
self.destructor = RestoreQuotas(
self.location, self.admin_tenant_id, self.obj_tenant_id,
net_quotas, compute_quotas, storage_quotas)
return {
self.destructor_var: self.destructor
}
def _set_network_quotas(self, tenant_id):
network_client = clients.network_client(self.cloud)
for quota in network_client.list_quotas(tenant_id=tenant_id)['quotas']:
if quota['tenant_id'] == tenant_id:
break
else:
quota = None
network_client.update_quota(tenant_id, {
'quota': {
'network': -1,
'subnet': -1,
'port': -1,
}
})
return quota
def _set_compute_quotas(self, tenant_id):
compute_client = clients.compute_client(self.cloud)
return self._set_quotas(compute_client, tenant_id, cores=-1, ram=-1,
injected_file_content_bytes=-1, instances=-1,
fixed_ips=-1)
def _set_cinder_quotas(self, tenant_id):
storage_client = clients.volume_client(self.cloud)
return self._set_quotas(storage_client, tenant_id, gigabytes=-1,
snapshots=-1, volumes=-1)
@staticmethod
def _set_quotas(client, tenant_id, **kwargs):
quotas = getattr(clients.retry(client.quotas.get, tenant_id), '_info')
original = {}
for item, value in kwargs.items():
if quotas[item] != value:
original[item] = quotas[item]
clients.retry(client.quotas.update, tenant_id, **kwargs)
return original
class VolumeMigrationFlowFactory(base.MigrationFlowFactory):
migrated_class = storage.Volume
def create_flow(self, cfg, migration, obj):
return [
SetUnlimitedQuotas(cfg, migration, obj, 'source'),
SetUnlimitedQuotas(cfg, migration, obj, 'destination'),
EnsureAdminRole(cfg, migration, obj, 'source'),
EnsureAdminRole(cfg, migration, obj, 'destination'),
FindOrCreateNetwork(cfg, migration, obj, 'source'),
FindOrCreateNetwork(cfg, migration, obj, 'destination'),
FindOrCreateFlavor(cfg, migration, obj, 'source'),
FindOrCreateFlavor(cfg, migration, obj, 'destination'),
FindOrUploadImage(cfg, migration, obj, 'source'),
FindOrUploadImage(cfg, migration, obj, 'destination'),
DetachMigratedVolume(cfg, migration, obj),
CreateVolume(cfg, migration, obj),
BootTransferVm(cfg, migration, obj, 'source'),
BootTransferVm(cfg, migration, obj, 'destination'),
AttachNodeLocalInterface(cfg, migration, obj, 'source'),
AttachNodeLocalInterface(cfg, migration, obj, 'destination'),
AttachSourceVolume(cfg, migration, obj),
AttachDestinationVolume(cfg, migration, obj),
TransferVolumeData(cfg, migration, obj),
DetachSourceVolume(cfg, migration, obj),
DetachDestinationVolume(cfg, migration, obj),
CleanupVms(cfg, migration, obj),
ReattachMigratedVolume(cfg, migration, obj),
base.RememberMigration(cfg, migration, obj),
]
def _random_mac():
mac = [0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join("%02x" % x for x in mac)
def _first_unused_address(cloud):
result = netaddr.IPNetwork(cloud.unused_network)
result.value += 1
return result
def _allocate_ip_address(cloud):
global _ip_counter
with LOCK:
result = netaddr.IPNetwork(cloud.unused_network)
result.value += _ip_counter
_ip_counter += 1
assert result in cloud.unused_network
return result
def _allocate_port(host, cloud):
with LOCK:
min_port, max_port = cloud.unused_port_range
used_host_ports = _used_ports.setdefault(host, set())
while True:
port = random.randint(min_port, max_port)
if port not in used_host_ports:
used_host_ports.add(port)
return port
else:
LOG.warning('Port %d already used on host %s in cloud %s, '
'generating new one', port, host, cloud.name)
def _get_private_key(rsa_key):
pkey = cStringIO.StringIO()
rsa_key.write_private_key(pkey)
return pkey.getvalue()
def _deploy_pkey(rexec):
key_path = rexec.run('mktemp').strip()
rexec.run('echo "{private_key}" > {key_path}; chmod 600 {key_path}',
private_key=_get_private_key(RSA1024_KEY),
key_path=key_path)
return key_path
def _wait_ip_accessible(cloud, rexec, ip_address):
waited = 0.0
while waited <= cloud.operation_timeout:
before = time.time()
try:
rexec.run('ping -c 1 -W 1 {ip_address}', ip_address=ip_address)
return
except remote.RemoteFailure:
after = time.time()
delta = after - before
if delta < 1.0:
delta = 1.0
time.sleep(1.0)
waited += delta
raise base.AbortMigration('VM couldn\'t be reached through %s', ip_address)
def _object_status_is(client, manager_name, obj_id, status):
manager = getattr(client, manager_name)
obj = clients.retry(manager.get, obj_id)
LOG.debug('Checking object %s is in status \'%s\': actual status \'%s\'',
obj_id, status.lower(), obj.status.lower())
if obj.status.lower() == status.lower():
return obj
elif obj.status.lower() == 'error':
raise base.AbortMigration('Object %s ended up in ERROR state', obj_id)
else:
return None
def _object_is_deleted(client, manager, obj_id, expected_exception):
try:
manager_obj = getattr(client, manager)
clients.retry(manager_obj.get, obj_id,
expected_exceptions=[expected_exception])
return False
except expected_exception:
return True
def _scope(tenant_id):
return config.Scope(project_id=tenant_id,
project_name=None,
domain_id=None)
def _get_admin_tenant_id(cloud, session):
scope = cloud.scope
project_name = scope.project_name
if scope.project_id is not None:
return scope.project_id
elif project_name is not None:
for tenant in session.list(identity.Tenant, cloud):
if tenant.name.lower() == project_name.lower():
return tenant.object_id.id
raise base.AbortMigration(
'Unable to upload image: no admin tenant.')
def _get_object_tenant_id(cloud, obj):
tenant = obj.tenant
if tenant.object_id.cloud != cloud.name:
return tenant.find_link(cloud).primary_key.id
else:
return tenant.primary_key.id
def _delete_vm(cloud, vm_id):
nova_client = clients.compute_client(cloud)
for do_reset in (False, True):
try:
if do_reset:
clients.retry(
nova_client.servers.reset_state, vm_id,
expected_exceptions=[nova_exceptions.NotFound])
try:
clients.retry(
nova_client.servers.delete, vm_id,
expected_exceptions=[nova_exceptions.NotFound])
except nova_exceptions.NotFound:
raise
except nova_exceptions.ClientException:
LOG.error('Failed to delete VM %s from cloud %s',
vm_id, cloud.name, exc_info=True)
continue
if clients.wait_for(_object_is_deleted, nova_client, 'servers',
vm_id, nova_exceptions.NotFound):
return True
except nova_exceptions.NotFound:
return True
except clients.Timeout:
continue
LOG.error('Timeout waiting for VM %s from cloud %s to be deleted',
vm_id, cloud.name, exc_info=True)
return False
def shutoff_vm(nova_client, instace_id):
# TODO: make general-purpose utility function
instance = clients.retry(nova_client.servers.get, instace_id)
current = instance.status.lower()
def wait_status(status):
return clients.wait_for(
_object_status_is, nova_client, 'servers', instace_id, status)
try:
if current == 'paused':
nova_client.servers.unpause(instance)
wait_status('active')
nova_client.servers.stop(instance)
wait_status('shutoff')
elif current == 'suspended':
nova_client.servers.resume(instance)
wait_status('active')
nova_client.servers.stop(instance)
wait_status('shutoff')
elif current == 'active':
nova_client.servers.stop(instance)
wait_status('shutoff')
elif current == 'verify_resize':
nova_client.servers.confirm_resize(instance)
wait_status('active')
nova_client.servers.stop(instance)
wait_status('shutoff')
elif current != 'shutoff':
raise base.AbortMigration('Invalid state change: %s -> shutoff',
current)
except clients.Timeout:
LOG.debug("Failed to change state from '%s' to 'shutoff' for VM "
"'%s'", current, instace_id)
def _get_image_location():
cf_init_path = inspect.getfile(cloudferry)
return os.path.join(os.path.dirname(cf_init_path),
'static', IMAGE_FILENAME)
def _get_image_md5():
# We don't care about race condition here since MD5 will always return
# same results
global _image_md5
if _image_md5 is None:
location = _get_image_location()
hash_md5 = hashlib.md5()
with open(location, "rb") as f:
for chunk in iter(lambda: f.read(65536), b""):
hash_md5.update(chunk)
_image_md5 = hash_md5.hexdigest().lower()
return _image_md5 | PypiClean |
/Oasys-Canvas-Core-1.0.7.tar.gz/Oasys-Canvas-Core-1.0.7/orangecanvas/scheme/signalmanager.py | import logging
import itertools
from collections import namedtuple, defaultdict, deque
from operator import attrgetter
from functools import partial
from PyQt5.QtCore import QObject, QCoreApplication, QEvent
from PyQt5.QtCore import pyqtSignal as Signal
from .scheme import SchemeNode
from functools import reduce
log = logging.getLogger(__name__)
_Signal = namedtuple(
"_Signal",
["link", # link on which the signal is sent
"value", # signal value
"id"]) # signal id
is_enabled = attrgetter("enabled")
class SignalManager(QObject):
"""
Handle all runtime signal propagation for a :clas:`Scheme` instance.
The scheme must be passed to the constructor and will become the parent
of this object. Furthermore this should happen before any items
(nodes, links) are added to the scheme.
"""
Running, Stoped, Paused, Error = range(4)
"""SignalManger state flags."""
Waiting, Processing = range(2)
"""SignalManager runtime state flags."""
stateChanged = Signal(int)
"""Emitted when the state of the signal manager changes."""
updatesPending = Signal()
"""Emitted when signals are added to the queue."""
processingStarted = Signal([], [SchemeNode])
"""Emitted right before a `SchemeNode` instance has its inputs
updated.
"""
processingFinished = Signal([], [SchemeNode])
"""Emitted right after a `SchemeNode` instance has had its inputs
updated.
"""
runtimeStateChanged = Signal(int)
"""Emitted when `SignalManager`'s runtime state changes."""
def __init__(self, scheme):
assert(scheme)
QObject.__init__(self, scheme)
self._input_queue = []
# mapping a node to it's current outputs
# {node: {channel: {id: signal_value}}}
self._node_outputs = {}
self.__state = SignalManager.Running
self.__runtime_state = SignalManager.Waiting
# A flag indicating if UpdateRequest event should be rescheduled
self.__reschedule = False
def _can_process(self):
"""
Return a bool indicating if the manger can enter the main
processing loop.
"""
return self.__state not in [SignalManager.Error, SignalManager.Stoped]
def scheme(self):
"""
Return the parent class:`Scheme` instance.
"""
return self.parent()
def start(self):
"""
Start the update loop.
.. note:: The updates will not happen until the control reaches
the Qt event loop.
"""
if self.__state != SignalManager.Running:
self.__state = SignalManager.Running
self.stateChanged.emit(SignalManager.Running)
self._update()
def stop(self):
"""
Stop the update loop.
.. note:: If the `SignalManager` is currently in `process_queues` it
will still update all current pending signals, but will not
re-enter until `start()` is called again
"""
if self.__state != SignalManager.Stoped:
self.__state = SignalManager.Stoped
self.stateChanged.emit(SignalManager.Stoped)
def pause(self):
"""
Pause the updates.
"""
if self.__state != SignalManager.Paused:
self.__state = SignalManager.Paused
self.stateChanged.emit(SignalManager.Paused)
def resume(self):
if self.__state == SignalManager.Paused:
self.__state = SignalManager.Running
self.stateChanged.emit(self.__state)
self._update()
def step(self):
if self.__state == SignalManager.Paused:
self.process_queued(1)
def state(self):
"""
Return the current state.
"""
return self.__state
def _set_runtime_state(self, state):
"""
Set the runtime state.
Should only be called by `SignalManager` implementations.
"""
if self.__runtime_state != state:
self.__runtime_state = state
self.runtimeStateChanged.emit(self.__runtime_state)
def runtime_state(self):
"""
Return the runtime state. This can be `SignalManager.Waiting`
or `SignalManager.Processing`.
"""
return self.__runtime_state
def on_node_removed(self, node):
# remove all pending input signals for node so we don't get
# stale references in process_node.
# NOTE: This does not remove output signals for this node. In
# particular the final 'None' will be delivered to the sink
# nodes even after the source node is no longer in the scheme.
log.info("Node %r removed. Removing pending signals.",
node.title)
self.remove_pending_signals(node)
del self._node_outputs[node]
def on_node_added(self, node):
self._node_outputs[node] = defaultdict(dict)
def link_added(self, link):
# push all current source values to the sink
if link.enabled:
log.info("Link added (%s). Scheduling signal data update.", link)
self._schedule(self.signals_on_link(link))
self._update()
link.enabled_changed.connect(self.link_enabled_changed)
def link_removed(self, link):
# purge all values in sink's queue
log.info("Link removed (%s). Scheduling signal data purge.", link)
self.purge_link(link)
def link_enabled_changed(self, enabled):
if enabled:
link = self.sender()
log.info("Link %s enabled. Scheduling signal data update.", link)
self._schedule(self.signals_on_link(link))
def signals_on_link(self, link):
"""
Return _Signal instances representing the current values
present on the link.
"""
items = self.link_contents(link)
signals = []
for key, value in items.items():
signals.append(_Signal(link, value, key))
return signals
def link_contents(self, link):
"""
Return the contents on link.
"""
node, channel = link.source_node, link.source_channel
return self._node_outputs[node][channel]
def send(self, node, channel, value, id):
"""
"""
log.debug("%r sending %r (id: %r) on channel %r",
node.title, type(value), id, channel.name)
scheme = self.scheme()
self._node_outputs[node][channel][id] = value
links = scheme.find_links(source_node=node, source_channel=channel)
links = filter(is_enabled, links)
signals = []
for link in links:
signals.append(_Signal(link, value, id))
self._schedule(signals)
def purge_link(self, link):
"""
Purge the link (send None for all ids currently present)
"""
contents = self.link_contents(link)
ids = contents.keys()
signals = [_Signal(link, None, id) for id in ids]
self._schedule(signals)
def _schedule(self, signals):
"""
Schedule a list of :class:`_Signal` for delivery.
"""
self._input_queue.extend(signals)
if signals:
self.updatesPending.emit()
self._update()
def _update_link(self, link):
"""
Schedule update of a single link.
"""
signals = self.signals_on_link(link)
self._schedule(signals)
def process_queued(self, max_nodes=None):
"""
Process queued signals.
"""
if self.__runtime_state == SignalManager.Processing:
raise RuntimeError("Cannot re-enter 'process_queued'")
if not self._can_process():
raise RuntimeError("Can't process in state %i" % self.__state)
log.info("Processing queued signals")
node_update_front = self.node_update_front()
if max_nodes is not None:
node_update_front = node_update_front[:max_nodes]
log.debug("Nodes for update %s",
[node.title for node in node_update_front])
self._set_runtime_state(SignalManager.Processing)
try:
# TODO: What if the update front changes in the loop?
for node in node_update_front:
self.process_node(node)
finally:
self._set_runtime_state(SignalManager.Waiting)
def process_node(self, node):
"""
Process pending input signals for `node`.
"""
signals_in = self.pending_input_signals(node)
self.remove_pending_signals(node)
signals_in = self.compress_signals(signals_in)
log.debug("Processing %r, sending %i signals.",
node.title, len(signals_in))
self.processingStarted.emit()
self.processingStarted[SchemeNode].emit(node)
try:
self.send_to_node(node, signals_in)
finally:
self.processingFinished.emit()
self.processingFinished[SchemeNode].emit(node)
def compress_signals(self, signals):
"""
Compress a list of :class:`_Signal` instances to be delivered.
The base implementation returns the list unmodified.
"""
return signals
def send_to_node(self, node, signals):
"""
Abstract. Reimplement in subclass.
Send/notify the :class:`SchemeNode` instance (or whatever
object/instance it is a representation of) that it has new inputs
as represented by the signals list (list of :class:`_Signal`).
"""
raise NotImplementedError
def is_pending(self, node):
"""
Is `node` (class:`SchemeNode`) scheduled for processing (i.e.
it has incoming pending signals).
"""
return node in [signal.link.sink_node for signal in self._input_queue]
def pending_nodes(self):
"""
Return a list of pending nodes (in no particular order).
"""
return list(set(sig.link.sink_node for sig in self._input_queue))
def pending_input_signals(self, node):
"""
Return a list of pending input signals for node.
"""
return [signal for signal in self._input_queue
if node is signal.link.sink_node]
def remove_pending_signals(self, node):
"""
Remove pending signals for `node`.
"""
for signal in self.pending_input_signals(node):
try:
self._input_queue.remove(signal)
except ValueError:
pass
def blocking_nodes(self):
"""
Return a list of nodes in a blocking state.
"""
scheme = self.scheme()
return [node for node in scheme.nodes if self.is_blocking(node)]
def is_blocking(self, node):
return False
def node_update_front(self):
"""
Return a list of nodes on the update front, i.e. nodes scheduled for
an update that have no ancestor which is either itself scheduled
for update or is in a blocking state)
.. note::
The node's ancestors are only computed over enabled links.
"""
scheme = self.scheme()
def expand(node):
return [link.sink_node for
link in scheme.find_links(source_node=node) if
link.enabled]
components = strongly_connected_components(scheme.nodes, expand)
node_scc = {node: scc for scc in components for node in scc}
def isincycle(node):
return len(node_scc[node]) > 1
# a list of all nodes currently active/executing a task.
blocking_nodes = set(self.blocking_nodes())
dependents = partial(dependent_nodes, scheme)
blocked_nodes = reduce(set.union,
map(dependents, blocking_nodes),
set(blocking_nodes))
pending = set(self.pending_nodes())
pending_downstream = set()
for n in pending:
depend = set(dependents(n))
if isincycle(n):
# a pending node in a cycle would would have a circular
# dependency on itself, preventing any progress being made
# by the workflow execution.
cc = node_scc[n]
depend -= set(cc)
pending_downstream.update(depend)
log.debug("Pending nodes: %s", pending)
log.debug("Blocking nodes: %s", blocking_nodes)
return list(pending - pending_downstream - blocked_nodes)
def event(self, event):
if event.type() == QEvent.UpdateRequest:
if not self.__state == SignalManager.Running:
log.debug("Received 'UpdateRequest' event while not "
"in 'Running' state")
event.setAccepted(False)
return False
if self.__runtime_state == SignalManager.Processing:
log.debug("Received 'UpdateRequest' event while in "
"'process_queued'")
# This happens if someone calls QCoreApplication.processEvents
# from the signal handlers.
self.__reschedule = True
event.accept()
return True
log.info("'UpdateRequest' event, queued signals: %i",
len(self._input_queue))
if self._input_queue:
self.process_queued(max_nodes=1)
event.accept()
if self.__reschedule:
log.debug("Rescheduling 'UpdateRequest' event")
self._update()
self.__reschedule = False
elif self.node_update_front():
log.debug("More nodes are eligible for an update. "
"Scheduling another update.")
self._update()
return True
return QObject.event(self, event)
def _update(self):
"""
Schedule processing at a later time.
"""
QCoreApplication.postEvent(self, QEvent(QEvent.UpdateRequest))
def can_enable_dynamic(link, value):
"""
Can the a dynamic `link` (:class:`SchemeLink`) be enabled for`value`.
"""
return isinstance(value, link.sink_type())
def compress_signals(signals):
"""
Compress a list of signals.
"""
groups = group_by_all(reversed(signals),
key=lambda sig: (sig.link, sig.id))
signals = []
def has_none(signals):
return any(sig.value is None for sig in signals)
for (link, id), signals_grouped in groups:
if len(signals_grouped) > 1 and has_none(signals_grouped[1:]):
signals.append(signals_grouped[0])
signals.append(_Signal(link, None, id))
else:
signals.append(signals_grouped[0])
return list(reversed(signals))
def dependent_nodes(scheme, node):
"""
Return a list of all nodes (in breadth first order) in `scheme` that
are dependent on `node`,
.. note::
This does not include nodes only reachable by disables links.
"""
def expand(node):
return [link.sink_node
for link in scheme.find_links(source_node=node)
if link.enabled]
nodes = list(traverse_bf(node, expand))
assert nodes[0] is node
# Remove the first item (`node`).
return nodes[1:]
def traverse_bf(start, expand):
queue = deque([start])
visited = set()
while queue:
item = queue.popleft()
if item not in visited:
yield item
visited.add(item)
queue.extend(expand(item))
def group_by_all(sequence, key=None):
order_seen = []
groups = {}
for item in sequence:
if key is not None:
item_key = key(item)
else:
item_key = item
if item_key in groups:
groups[item_key].append(item)
else:
groups[item_key] = [item]
order_seen.append(item_key)
return [(key, groups[key]) for key in order_seen]
def strongly_connected_components(nodes, expand):
"""
Return a list of strongly connected components.
Implementation of Tarjan's SCC algorithm.
"""
# SCC found
components = []
# node stack in BFS
stack = []
# == set(stack) : a set of all nodes in stack (for faster lookup)
stackset = set()
# node -> int increasing node numbering as encountered in DFS traversal
index = {}
# node -> int the lowest node index reachable from a node
lowlink = {}
indexgen = itertools.count()
def push_node(v):
"""Push node onto the stack."""
stack.append(v)
stackset.add(v)
index[v] = lowlink[v] = next(indexgen)
def pop_scc(v):
"""Pop from the stack a SCC rooted at node v."""
i = stack.index(v)
scc = stack[i:]
del stack[i:]
stackset.difference_update(scc)
return scc
isvisited = lambda node: node in index
def strong_connect(v):
push_node(v)
for w in expand(v):
if not isvisited(w):
strong_connect(w)
lowlink[v] = min(lowlink[v], lowlink[w])
elif w in stackset:
lowlink[v] = min(lowlink[v], index[w])
if index[v] == lowlink[v]:
scc = pop_scc(v)
components.append(scc)
for node in nodes:
if not isvisited(node):
strong_connect(node)
return components | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/SVG/fonts/Neo-Euler/Main/Regular/Main.js | MathJax.OutputJax.SVG.FONTDATA.FONTS.NeoEulerMathJax_Main={directory:"Main/Regular",family:"NeoEulerMathJax_Main",id:"NEOEULERMAIN",1:[0,0,333,0,0,""],32:[0,0,333,0,0,""],33:[690,11,295,90,204,"90 46c0 31 26 57 58 57c31 0 56 -26 56 -57s-25 -57 -56 -57c-32 0 -58 26 -58 57zM132 184l-27 383c-2 19 -3 41 -3 58c0 12 1 24 4 36c2 7 5 15 12 20c10 7 22 9 34 9c9 0 18 -2 24 -8c11 -10 13 -24 13 -38c0 -29 -2 -58 -5 -87l-36 -371"],34:[695,-432,214,9,196,"34 432l-20 152c-3 24 -5 49 -5 73c0 10 0 22 9 29c7 6 16 9 25 9c8 0 16 0 22 -5c5 -4 11 -8 11 -15v-52l-21 -191h-21zM155 432l-22 193c-2 14 -5 38 -5 43s2 11 7 15c8 8 20 9 32 9c8 0 17 -1 23 -8c3 -3 6 -7 6 -11c0 -16 0 -32 -1 -47l-19 -194h-21"],35:[690,0,500,4,493,"469 268v-40h-107l-35 -228h-41l35 228h-164l-35 -228h-41l35 228h-112v40h118l26 166h-117v40h123l33 216h41l-33 -216h164l33 216h41l-33 -216h93v-40h-99l-26 -166h101zM327 268l26 166h-164l-26 -166h164"],36:[708,107,557,84,480,"477 555l-58 -51l-12 3c-6 26 -15 53 -34 72c-15 14 -32 24 -51 28v-246l12 -5c34 -15 81 -42 98 -59c28 -27 48 -69 48 -107c0 -48 -22 -92 -56 -126c-29 -29 -64 -51 -102 -64v-99l-40 -8l-1 96c-17 -3 -34 -5 -52 -5c-43 0 -81 11 -111 42c-15 15 -27 33 -34 52l52 61 l16 -1c8 -28 19 -57 39 -77c22 -22 46 -35 76 -35c5 0 9 0 14 1l1 263c-41 18 -93 42 -116 65c-30 30 -47 72 -47 114c0 43 14 87 44 118c33 32 75 49 119 55v58l40 8v-64c45 -1 90 -9 122 -41c14 -14 24 -31 33 -48zM322 268v-229c16 7 30 17 42 29c24 24 35 60 35 94 c0 22 -15 52 -32 71c-11 12 -28 24 -45 35zM281 381v228c-24 -4 -45 -16 -61 -33c-19 -19 -27 -47 -27 -75c0 -33 17 -62 40 -85c13 -13 30 -25 48 -35"],37:[708,20,840,58,789,"656 344c31 -9 60 -26 82 -49c30 -29 51 -67 51 -109c0 -51 -25 -98 -60 -133c-29 -29 -64 -53 -105 -53c-29 0 -56 12 -76 32c-29 29 -41 71 -41 112c0 50 23 96 58 131c27 27 57 51 91 69zM612 293c-16 -10 -24 -20 -30 -30c-16 -23 -22 -55 -22 -84 c0 -39 11 -78 39 -105c16 -16 40 -29 62 -29c20 0 30 1 44 11c26 18 33 67 33 97c0 37 -13 74 -40 101c-19 19 -58 37 -86 39zM208 688c31 -9 60 -26 82 -49c30 -29 51 -67 51 -109c0 -51 -25 -98 -60 -133c-29 -29 -65 -53 -106 -53c-29 0 -56 11 -76 31 c-29 29 -41 71 -41 112c0 50 23 97 58 132c27 27 58 51 92 69zM163 637c-16 -10 -24 -21 -30 -31c-16 -23 -22 -55 -22 -84c0 -39 11 -78 39 -105c16 -16 41 -28 63 -28c20 0 29 1 43 11c26 18 34 67 34 97c0 37 -14 74 -41 101c-19 19 -58 37 -86 39zM603 702l4 -11 l-357 -711l-32 10l-2 14l354 703l11 1"],38:[698,11,737,49,734,"346 356l2 43c56 -3 133 -3 200 -3c52 0 126 0 156 3l-3 -48c-41 7 -98 7 -147 7c5 -21 9 -42 9 -63c0 -67 -33 -127 -74 -180l3 -3c16 -17 34 -32 55 -44c15 -8 32 -15 50 -15c24 0 47 9 64 26c29 29 42 67 52 110l21 -5c-10 -58 -29 -116 -71 -158 c-26 -26 -61 -37 -97 -37c-50 0 -95 25 -130 59l-5 4c-40 -32 -95 -60 -153 -60c-61 0 -123 12 -167 55c-39 40 -62 93 -62 148c0 47 13 89 47 123c24 24 70 54 111 79c-16 39 -26 81 -26 123c0 50 17 99 53 135c29 29 69 43 110 43c29 0 57 -11 77 -32 c16 -15 25 -37 25 -60c0 -44 -14 -81 -47 -111c-30 -28 -104 -71 -121 -79c24 -53 58 -114 92 -163c29 -42 68 -85 104 -122c26 38 38 87 38 135c0 30 -15 58 -36 79c-16 16 -39 16 -61 16c-23 0 -46 -2 -69 -5zM416 68c-37 41 -75 92 -105 138c-35 52 -71 113 -95 170 c-23 -15 -43 -37 -51 -45c-26 -30 -38 -72 -38 -113c0 -54 21 -105 59 -143c28 -28 67 -44 107 -44c53 0 91 11 123 37zM270 435c27 17 56 37 70 51c25 25 34 61 34 96c0 21 -10 42 -25 57c-10 10 -29 24 -42 24c-23 0 -31 -3 -45 -17c-18 -18 -20 -62 -20 -87 c0 -43 11 -84 28 -124"],39:[695,-436,212,68,134,"88 436l-18 193c-1 12 -2 24 -2 37c0 8 3 15 9 20c7 6 17 9 27 9c8 0 16 -1 22 -7c6 -7 8 -17 8 -26c0 -11 -2 -27 -4 -40l-26 -186h-16"],40:[738,187,388,113,301,"301 -163v-24c-48 4 -90 42 -118 79c-31 43 -43 95 -52 146c-14 80 -18 161 -18 242c0 77 5 155 20 230c11 54 27 108 58 154c25 35 69 67 110 74v-27c-32 -8 -65 -38 -81 -72c-22 -47 -33 -98 -41 -150c-11 -71 -14 -143 -14 -215c0 -86 3 -172 17 -256 c6 -29 12 -58 22 -86c7 -20 16 -39 30 -55c17 -20 47 -36 67 -40"],41:[736,189,388,86,276,"86 709v27c34 -7 66 -28 90 -54c40 -44 60 -101 75 -158c20 -83 25 -167 25 -252c0 -84 -4 -169 -20 -252c-11 -53 -26 -106 -61 -148c-27 -33 -78 -57 -109 -61v24c27 7 59 22 79 47c12 15 19 33 25 51c9 27 16 52 20 79c13 87 14 176 14 264c0 79 -5 158 -19 235 c-11 53 -24 106 -55 151c-16 22 -39 38 -64 47"],42:[692,-448,277,28,239,"157 448h-40c6 38 8 68 9 111c-27 -21 -53 -44 -77 -72l-21 35c34 17 61 35 87 53c-29 21 -56 37 -85 52l27 28c22 -21 43 -40 68 -59c0 32 0 63 -8 96h40c-7 -33 -9 -62 -11 -95c25 20 48 41 68 62l25 -31c-32 -15 -60 -32 -88 -52c31 -23 60 -43 88 -56l-26 -29 c-17 19 -41 41 -67 61c1 -35 3 -68 11 -104"],43:[586,74,755,46,709,"396 276h313l-5 -40h-308v-303l-40 -7v310h-310l7 40h303v304l40 6v-310"],44:[138,210,277,67,214,"110 -210l-19 14c20 26 48 68 58 93c5 12 8 29 8 42c0 15 -4 30 -13 42l-70 83c-4 5 -7 16 -7 26c0 13 13 28 23 36c8 6 26 12 36 12c8 0 26 -8 34 -18c15 -20 37 -73 46 -96c6 -16 8 -34 8 -51c0 -25 -10 -47 -22 -69c-18 -32 -39 -64 -82 -114"],45:[276,-236,544,40,504,"498 236h-458l7 40h457"],46:[119,15,277,77,211,"77 52c0 37 30 67 67 67s67 -30 67 -67s-30 -67 -67 -67s-67 30 -67 67"],47:[720,192,501,39,463,"458 715l5 -11l-389 -896l-32 8l-3 14l386 888l11 2"],48:[704,14,499,34,460,"290 704c50 -32 86 -72 115 -122c40 -69 55 -148 55 -226c0 -61 -8 -122 -28 -179c-16 -48 -42 -94 -78 -131c-39 -38 -92 -60 -147 -60c-33 0 -65 13 -89 36c-31 31 -52 72 -63 114c-16 56 -21 109 -21 167c0 120 45 241 130 326c33 33 82 57 126 75zM234 640 c-19 -12 -37 -26 -53 -45c-52 -63 -80 -143 -80 -224c0 -58 5 -117 21 -172c13 -43 31 -84 62 -115c19 -19 48 -32 68 -32c31 0 64 9 85 31c23 23 36 52 46 83c12 38 15 79 15 119c0 90 -24 176 -68 254c-6 12 -51 82 -96 101"],49:[712,5,499,92,314,"308 712l6 -12c-5 -89 -7 -179 -7 -268c0 -136 0 -334 3 -410l-84 -27l-12 5c5 139 8 279 8 419c0 49 0 122 -1 147s-21 46 -46 46h-83v27"],50:[708,-2,499,12,472,"472 80l-23 -78h-437v16c57 58 113 117 166 179c35 41 70 83 98 129c35 56 58 117 58 182c0 29 -6 70 -34 99c-20 22 -50 33 -79 33c-22 0 -44 -4 -63 -14c-28 -15 -50 -30 -74 -50l-20 19c30 33 64 63 104 84c33 18 69 29 107 29c41 0 80 -15 109 -44 c30 -29 42 -70 42 -111c0 -53 -22 -102 -48 -148c-25 -46 -56 -88 -88 -130c-45 -60 -93 -118 -143 -176c-1 -1 -2 -4 -1 -6c2 -4 7 -5 12 -5h309"],51:[702,17,499,18,446,"241 382v-8c34 -3 67 -8 100 -19c23 -8 47 -19 65 -37c27 -27 40 -64 40 -103c0 -59 -27 -114 -69 -156c-52 -51 -123 -76 -196 -76c-57 0 -111 20 -156 55l-7 11l42 72h10c12 -20 25 -38 41 -54c25 -26 59 -44 95 -44c37 0 73 13 100 40c32 32 46 78 46 123 c0 43 -20 83 -49 114c-18 19 -43 28 -68 34c-36 9 -73 11 -110 11l1 26c30 7 61 15 89 28c22 10 43 22 59 38c28 28 43 65 43 103c0 27 -10 52 -29 71c-21 21 -50 28 -80 28c-17 0 -34 -6 -51 -13c-25 -11 -48 -25 -71 -40l-14 22c31 26 65 51 101 70c25 13 52 24 80 24 c39 0 80 -8 108 -37c26 -25 38 -61 38 -97c0 -45 -15 -90 -47 -122c-31 -30 -70 -52 -111 -64"],52:[704,5,499,-1,473,"361 704l7 -6c-1 -27 -1 -299 -1 -454l7 -9h94l5 -7l-16 -37h-82l-8 -8c0 -54 0 -133 3 -155l-80 -33l-5 6c5 35 5 118 5 181l-7 9h-284v44l286 434zM290 243v342c0 2 -1 5 -1 6c-2 3 -7 2 -9 0c-34 -39 -64 -85 -93 -130c-38 -59 -122 -197 -123 -219l5 -7h215"],53:[687,11,499,12,448,"125 594v-181v-7c41 20 86 37 132 37c51 0 102 -14 138 -50c38 -37 53 -90 53 -142c0 -67 -23 -133 -70 -180c-53 -53 -125 -82 -200 -82c-63 0 -124 26 -165 75l-1 12l44 56h11c12 -26 25 -52 46 -73c19 -19 45 -33 73 -33c41 0 82 13 112 43c39 39 54 98 54 154 c0 50 -11 102 -47 138c-24 24 -58 34 -92 34c-44 0 -85 -22 -117 -53l-13 11v327l7 7h335l-27 -78c-87 -2 -213 -5 -261 -5c-6 0 -12 -4 -12 -10"],54:[700,13,499,45,471,"471 688l-20 -54l-9 -4c-20 8 -38 11 -58 11c-61 0 -115 -27 -156 -65c-79 -73 -98 -174 -99 -270l7 -1c53 43 116 72 185 72c38 0 76 -16 103 -43c32 -33 47 -78 47 -123c0 -60 -30 -113 -72 -155c-43 -43 -101 -69 -163 -69c-45 0 -89 15 -121 47 c-53 53 -70 130 -70 204c0 125 45 249 134 338c35 37 75 69 120 92c36 19 76 32 117 32c15 0 40 -6 55 -12zM132 265c0 -41 4 -86 16 -126c9 -29 26 -58 47 -79c18 -17 43 -28 68 -28c32 0 65 8 88 31c29 28 41 68 41 109c0 40 -17 79 -46 107c-23 24 -57 35 -90 35 c-41 0 -102 -28 -124 -49"],55:[694,8,499,49,494,"494 674l-154 -252c-78 -128 -143 -264 -175 -410l-84 -20l-6 8c24 82 109 232 163 324c0 0 98 171 160 270l-6 12h-336l-7 9l21 79c18 -6 171 -8 216 -8h208v-12"],56:[706,10,499,40,461,"238 334c-27 -14 -57 -33 -79 -55c-26 -27 -36 -65 -36 -102c0 -41 13 -82 42 -112c21 -20 51 -33 80 -33c33 0 68 8 92 32c26 26 37 67 37 104c0 41 -21 77 -50 106c-15 13 -64 46 -86 60zM312 385c37 -25 86 -61 103 -78c29 -29 46 -68 46 -110c0 -48 -17 -95 -51 -130 c-50 -50 -119 -77 -191 -77c-48 0 -102 10 -137 45c-32 32 -42 77 -42 122c0 42 12 85 43 115c25 25 75 55 118 80v11c-32 22 -59 46 -82 78c-17 27 -30 57 -30 89c0 40 14 81 45 108c35 31 77 51 120 68c30 1 57 -3 85 -12c21 -7 44 -19 60 -35c24 -24 36 -57 36 -91 s-15 -66 -35 -93c-24 -32 -54 -58 -88 -79v-11zM279 412c42 29 72 78 72 131c0 21 -9 51 -29 75c-23 26 -56 41 -99 41c-15 -10 -33 -25 -41 -36c-12 -17 -16 -41 -16 -61c0 -26 12 -50 28 -71c21 -30 55 -58 85 -79"],57:[702,9,499,40,462,"214 11l-85 -20l-10 8c20 46 68 110 113 168c0 0 85 114 117 169l-5 6c-30 -16 -61 -28 -91 -36c-18 -5 -40 -7 -59 -7c-28 0 -75 18 -102 45c-29 29 -52 75 -52 121c0 52 10 97 39 133s117 82 167 104c50 0 80 -5 99 -11c25 -8 49 -21 68 -40c32 -32 49 -77 49 -123 c0 -38 -13 -75 -28 -111c-23 -52 -52 -100 -84 -147c-34 -50 -67 -101 -95 -155c-18 -33 -35 -67 -41 -104zM351 384c25 25 31 63 31 98c0 47 -19 92 -52 125c-25 25 -69 37 -111 37c-7 0 -10 0 -17 -1c-46 -27 -73 -70 -73 -128c0 -48 15 -96 49 -129c21 -21 52 -34 81 -34 c33 0 68 8 92 32"],58:[455,12,216,50,164,"50 45c0 31 26 57 58 57c31 0 56 -26 56 -57s-25 -57 -56 -57c-32 0 -58 26 -58 57zM50 398c0 31 26 57 58 57c31 0 56 -26 56 -57s-25 -57 -56 -57c-32 0 -58 26 -58 57"],59:[457,190,216,48,179,"48 400c0 31 26 57 58 57c31 0 56 -26 56 -57s-25 -57 -56 -57c-32 0 -58 26 -58 57zM89 -190l-15 13c17 23 29 48 40 74c6 13 7 26 7 39c0 14 -5 27 -13 38l-34 51c-5 8 -9 17 -9 26c0 12 1 27 9 35l2 2c9 7 20 11 32 11c8 0 16 -4 21 -11c17 -18 31 -38 40 -60 c6 -16 10 -33 10 -50c0 -27 -10 -52 -23 -75c-19 -33 -43 -63 -67 -93"],60:[531,36,756,59,680,"680 505l-2 -9l-522 -241v-12l519 -241l5 -10l-11 -23l-10 -5l-600 280v12l601 275l11 -3"],61:[369,-132,756,54,722,"716 329h-662l7 40h661zM716 132h-662l7 40h661"],62:[531,36,756,76,698,"78 494l-2 10l10 24l10 3l602 -275v-12l-600 -280l-10 5l-8 24l4 9l518 243v9"],63:[693,11,362,45,358,"106 46c0 31 25 57 56 57c32 0 58 -26 58 -57s-26 -57 -58 -57c-31 0 -56 26 -56 57zM186 211l-46 -32c-23 14 -42 41 -42 71c0 21 14 38 26 55c22 28 48 52 75 76c49 42 88 98 88 164c0 30 -13 61 -34 82c-17 17 -40 34 -64 34s-45 -8 -62 -25c-10 -11 -19 -31 -19 -47 c0 -24 13 -46 32 -61l-67 -40c-18 16 -28 43 -28 69c0 29 14 55 34 75c40 40 94 61 150 61c33 0 68 -8 92 -32c25 -25 37 -60 37 -96c0 -33 -13 -60 -30 -89c-18 -30 -73 -79 -103 -106c-36 -32 -66 -71 -66 -96c0 -20 11 -40 27 -52v-11"],64:[688,31,744,26,718,"389 20c121 0 208 61 263 133l11 -2l28 -18v-12c-63 -84 -167 -152 -304 -152c-200 0 -361 152 -361 352c0 91 36 182 99 248c70 74 170 118 272 119h3c168 0 318 -118 318 -287c0 -51 -15 -103 -44 -145c-36 -53 -106 -112 -175 -116c-11 0 -24 3 -32 12 c-9 10 -11 22 -13 35l-12 3c-22 -14 -43 -27 -67 -36c-20 -8 -40 -14 -62 -14c-29 0 -59 10 -79 33c-30 36 -37 83 -37 129c0 62 16 126 61 170c34 32 80 46 127 46c25 0 59 -10 84 -28l42 25l4 -4c-5 -89 -5 -180 -3 -269c0 -20 6 -58 31 -53c34 8 84 55 107 99 c16 31 24 66 24 101c0 147 -128 247 -274 247h-3c-85 -1 -169 -37 -229 -98c-56 -57 -89 -136 -89 -216c0 -172 136 -302 310 -302zM368 192c35 0 62 20 84 36v182c0 21 0 41 -13 54c-14 15 -33 26 -53 26c-28 0 -54 -10 -75 -30c-19 -17 -41 -61 -43 -141 c0 -33 4 -66 22 -94c17 -26 49 -33 78 -33"],65:[680,10,770,25,743,"536 76l-54 188c-2 4 -8 9 -11 9h-225c-4 0 -8 -4 -10 -8l-117 -248l-91 -19l-3 10c119 200 234 404 339 612c1 3 1 4 1 7s-3 9 -12 11l-90 15v27h194c15 -83 60 -243 93 -363c22 -81 49 -178 85 -238c9 -15 24 -24 41 -28c7 -1 15 -2 23 -2c14 0 29 2 43 3l1 -26 l-104 -30c-15 -4 -30 -6 -45 -6c-11 0 -20 5 -27 13c-14 13 -24 47 -31 73zM382 572l-116 -244c-1 -2 -1 -8 -1 -9c0 -3 4 -7 7 -7h188c5 0 7 4 7 7c0 0 -2 8 -2 9l-70 244c-1 2 -4 4 -7 4s-6 -3 -6 -4"],66:[686,1,655,50,574,"215 324v-258c0 -13 5 -17 11 -21s32 -6 64 -6c54 0 110 12 149 50c31 32 44 75 44 119c0 33 -8 68 -33 93c-18 18 -42 27 -67 34c-34 9 -70 11 -106 11c-18 0 -47 0 -52 -1c-4 0 -8 -3 -10 -7v-14zM215 411v-20c1 -4 5 -7 8 -8c8 -3 31 -2 46 -2c51 0 101 15 137 50 c30 30 43 72 43 114c0 29 -14 55 -34 75c-12 12 -30 17 -47 21c-26 6 -53 7 -80 7c-20 0 -40 -2 -60 -5c-7 -1 -11 -7 -13 -14v-218zM381 378l-1 -8c31 -4 67 -12 97 -21c20 -6 42 -19 57 -34c31 -31 40 -71 40 -114c0 -50 -18 -98 -53 -133c-55 -56 -136 -69 -212 -69 c-66 1 -197 2 -198 2l-1 10c6 9 18 25 18 26c2 3 3 6 4 10c0 1 1 23 1 34v530c0 17 -15 27 -17 28c-17 8 -41 16 -66 23l1 17l7 7h247c41 0 81 -2 121 -12c27 -7 54 -16 74 -36c22 -23 34 -53 34 -85c0 -41 -14 -81 -43 -110c-30 -31 -70 -48 -110 -65"],67:[699,15,714,87,672,"615 528l-14 1c-12 32 -29 61 -52 84c-33 33 -81 45 -127 45c-61 0 -123 -21 -167 -65c-28 -29 -46 -65 -58 -103c-15 -47 -21 -97 -21 -146c0 -92 48 -190 81 -223c55 -55 97 -83 179 -83c68 0 131 29 187 67l17 -29c-72 -49 -152 -91 -239 -91c-79 0 -161 17 -218 74 c-70 70 -96 169 -96 267c0 96 21 195 91 264c73 73 174 109 278 109c65 0 131 -16 177 -63c15 -15 28 -32 39 -50"],68:[686,-2,828,51,738,"212 575v-405c1 -40 3 -102 18 -117c13 -13 52 -13 79 -13c49 0 98 5 145 20c37 12 73 29 101 57c68 68 89 166 89 260c0 61 -17 158 -73 214c-22 22 -48 35 -90 44c-33 7 -142 10 -189 10c-43 0 -60 -1 -66 -4s-8 -6 -11 -12c-2 -3 -3 -23 -3 -54zM106 2v10 c20 15 21 29 22 67v494c0 16 -1 32 -2 47c-1 10 -8 17 -17 21c-19 7 -39 12 -58 18v19l7 8h313c62 0 124 -5 184 -20c43 -11 85 -27 116 -58c25 -25 40 -57 50 -91c13 -42 17 -87 17 -132c0 -127 -22 -211 -99 -294c-31 -34 -81 -54 -125 -67c-57 -17 -117 -22 -177 -22 h-231"],69:[688,0,604,46,559,"456 336c-39 3 -79 6 -118 6c-38 0 -101 -2 -119 -3c-7 -1 -10 -5 -10 -12v-139c0 -38 1 -87 15 -112c6 -12 18 -20 30 -26c12 -5 38 -6 66 -6c47 0 70 0 234 14l5 -5l-13 -53h-388c-9 5 -15 15 -20 25c-5 12 -6 27 -6 41v492c0 14 0 44 -9 64c-2 6 -10 11 -17 16 c-8 6 -38 13 -59 16l-1 20l7 7l148 1c70 0 171 6 242 6c18 0 33 0 46 -1l-2 -50l-4 -7c-69 4 -202 10 -209 10h-40c-5 -1 -10 -4 -13 -9c-8 -12 -11 -44 -11 -67v-166c0 -7 3 -11 10 -12c39 -3 159 -1 236 5l6 -7"],70:[690,2,499,39,467,"125 360l2 161c0 30 -1 60 -4 90c-1 17 -12 32 -29 36l-55 13v21l7 7c142 0 348 0 421 2v-53l-8 -8c-72 5 -144 20 -216 20c-13 0 -27 -6 -32 -23c-4 -14 -7 -24 -7 -235c0 -5 4 -13 13 -13c73 0 163 9 235 12l-1 -49l-5 -5c-70 4 -158 5 -229 5c-5 0 -12 -4 -12 -11 c0 -101 1 -216 5 -317l-92 -15c4 121 7 241 7 362"],71:[699,17,765,90,676,"376 296l276 11c2 -91 5 -273 5 -274c-88 -23 -196 -50 -269 -50c-76 0 -151 26 -205 80c-68 68 -93 166 -93 261c0 98 22 199 92 269c75 75 180 106 285 106c64 0 128 -20 173 -65c11 -11 25 -31 36 -48l-58 -57l-12 1c-13 26 -28 51 -48 71c-36 36 -85 49 -136 49 c-59 0 -118 -14 -161 -56c-29 -29 -47 -66 -60 -105c-15 -48 -21 -98 -21 -149c0 -88 24 -160 82 -228c44 -52 118 -77 196 -77c32 0 63 3 94 10c10 2 17 11 18 21c2 31 3 62 2 93c0 23 -1 47 -3 69c-1 14 -11 27 -25 28c-44 5 -108 7 -162 9l-6 5v26"],72:[690,2,783,52,744,"744 14l-6 -6l-149 -8l-11 21c2 97 5 216 5 313c0 5 -3 12 -12 12h-343c-7 0 -13 -5 -13 -12c0 -67 -1 -163 0 -230c0 -26 2 -52 5 -78l-92 -28l-6 5c10 113 10 242 10 364c0 83 -1 166 -6 249c-1 9 -3 20 -13 24c-4 1 -40 12 -60 19l-1 19l7 7h162l5 -5 c-4 -22 -6 -46 -7 -69c-2 -65 -6 -149 -6 -214c0 -5 2 -12 12 -12h346c11 0 12 6 12 12c0 86 -3 190 -3 276l88 17l5 -6c-3 -39 -7 -80 -8 -119c-4 -162 -5 -324 1 -486c0 -17 13 -30 29 -34c18 -5 47 -13 48 -13"],73:[683,1,394,78,348,"346 -1h-265v26c20 4 43 9 61 14c13 5 24 17 26 31c1 11 1 43 2 64v363c0 39 0 77 -3 115c-1 13 -9 26 -22 29c-1 0 -45 9 -67 14v22l7 6h250l6 -6v-21l-67 -15c-13 -3 -21 -14 -22 -26c-3 -20 -3 -78 -3 -116c0 -124 -1 -228 2 -330c0 -33 1 -66 4 -99 c1 -16 14 -26 28 -30c22 -5 64 -17 65 -17"],74:[690,234,402,50,351,"260 608v-517c0 -41 -1 -83 -13 -122c-14 -45 -37 -88 -71 -121c-27 -25 -77 -64 -112 -82l-14 18c22 14 48 37 64 54c21 24 32 55 43 93c10 34 18 105 18 165v512c0 11 -4 34 -16 37l-72 18v21l7 6h250l7 -6l-2 -24l-68 -14c-12 -3 -21 -20 -21 -38"],75:[683,7,668,49,632,"221 319l-9 -1c0 -98 0 -196 6 -293l-81 -29l-11 5l3 147v447c0 10 -1 21 -5 31c-3 6 -7 13 -14 15c-20 6 -60 18 -61 18l2 17l8 7h158l7 -7c-4 -12 -8 -49 -8 -50c-2 -27 -7 -155 -7 -268l8 -1l287 326h90l-1 -9l-291 -294c-3 -3 -8 -8 -8 -11s1 -5 5 -9 c107 -107 272 -271 333 -325l-1 -11l-85 -31c-105 112 -217 217 -325 326"],76:[690,5,559,50,534,"534 48l-14 -53c-74 7 -275 7 -413 7v14c7 10 12 22 15 31c4 10 7 82 7 96v478c0 14 -6 30 -21 33c-20 4 -57 12 -58 12l1 17l7 7h237l6 -6v-21c-1 0 -44 -9 -65 -14c-12 -2 -17 -14 -18 -25c-10 -177 -5 -383 1 -568c1 -6 8 -13 13 -13c104 0 286 14 292 15"],77:[690,6,1044,79,1010,"800 690h103v-12c-6 -3 -15 -10 -19 -19c-6 -15 -6 -51 -6 -76c0 -164 2 -317 18 -481c2 -17 10 -34 24 -45c10 -7 22 -11 34 -11c13 0 38 4 56 6v-24c-44 -16 -96 -34 -136 -34c-14 0 -28 2 -41 6s-22 21 -22 29v182c0 25 -8 271 -10 337c0 8 -3 15 -8 15 c-3 0 -8 -3 -11 -8l-286 -554h-30c-75 180 -169 409 -232 537c-3 5 -7 10 -11 10s-7 -4 -8 -11c-23 -170 -40 -342 -54 -514l-75 -26l-7 3c38 196 73 393 100 591c2 12 2 26 -6 36c-5 7 -12 12 -21 15l-68 19l2 22l7 7h176c55 -161 149 -361 232 -538c2 -4 7 -14 11 -14 s10 5 12 9"],78:[694,5,829,49,710,"704 694l6 -8c-13 -227 -15 -459 -15 -686h-36c-23 26 -37 42 -67 80l-159 198c-102 126 -184 221 -234 277c-3 4 -8 10 -12 9c-6 -2 -6 -11 -6 -17c0 -129 13 -348 24 -522l-70 -30l-9 9c10 201 16 403 12 604c0 8 0 16 -6 22c-4 5 -9 10 -15 11c-1 0 -43 10 -64 15 l-4 27l8 7h162c133 -176 295 -391 412 -517c3 -3 7 -8 11 -8c5 0 6 6 6 8c0 102 -3 201 -24 505"],79:[700,18,803,72,726,"72 318c0 129 27 214 101 294c64 68 168 88 261 88c76 0 153 -22 208 -76c29 -30 48 -68 61 -108c16 -48 23 -100 23 -152c0 -65 -1 -131 -41 -212c-47 -98 -177 -170 -334 -170c-66 0 -140 20 -187 68c-71 70 -92 168 -92 268zM165 348c0 -94 17 -180 79 -248 c60 -67 121 -77 160 -77c58 0 106 20 146 59c29 30 47 68 61 108c15 46 21 96 21 146c0 96 -24 202 -93 270c-38 40 -84 54 -137 54c-57 0 -120 -24 -158 -64c-28 -29 -45 -64 -57 -102c-15 -46 -22 -96 -22 -146"],80:[679,6,576,55,550,"281 640h-56c-7 0 -9 -6 -9 -11c-4 -88 -6 -201 -6 -288c0 -9 7 -12 11 -12c35 0 76 4 109 15c27 8 52 20 71 39c33 33 53 77 53 123c0 36 -12 72 -38 98c-14 14 -33 22 -52 27c-27 7 -55 9 -83 9zM217 22l-83 -28l-7 7c2 140 7 281 7 422c0 59 -1 118 -5 177 c-1 18 -9 36 -27 43c-15 6 -31 9 -47 11v18l7 7h257c36 0 73 -2 108 -10c24 -5 49 -12 68 -28c38 -31 55 -79 55 -127c0 -58 -22 -115 -64 -156c-20 -21 -46 -34 -74 -44c-35 -12 -71 -18 -108 -21c-25 -2 -57 -4 -82 -6c-8 -1 -12 -6 -12 -13c0 -25 6 -151 7 -252"],81:[698,235,828,85,774,"767 -174l7 -20c-39 -16 -104 -41 -155 -41c-49 0 -103 28 -137 61c-37 37 -89 114 -116 158c-69 0 -146 18 -195 73c-57 64 -86 156 -85 248c1 135 28 229 101 304c68 70 163 89 258 89c77 0 155 -18 213 -76c63 -63 83 -173 83 -256c0 -97 -5 -115 -19 -156 c-14 -44 -26 -77 -68 -123c-45 -49 -115 -79 -178 -94l-4 -10c16 -30 42 -79 62 -105c15 -21 58 -67 146 -67c16 0 64 9 87 15zM410 22c74 0 112 21 151 58c65 63 85 168 85 258c0 52 -5 97 -21 147c-14 41 -36 86 -68 117c-38 39 -82 58 -136 58c-57 0 -126 -23 -165 -66 c-29 -32 -44 -66 -57 -105c-15 -50 -20 -103 -20 -155c0 -89 23 -178 85 -242c38 -40 93 -70 146 -70"],82:[679,8,609,54,582,"213 370c0 -6 2 -15 9 -15c13 0 26 0 38 1c46 4 94 15 127 49c31 31 51 73 51 118c0 30 -12 60 -33 81c-14 14 -33 22 -52 27c-27 7 -55 9 -82 9h-40c-8 0 -11 -7 -12 -26c-2 -64 -6 -239 -6 -244zM321 331l-2 -8l263 -296v-9l-99 -26c-93 119 -220 282 -249 311 c-5 5 -14 9 -20 9c0 -96 0 -193 5 -289l-83 -29l-11 9c8 145 9 291 9 436c0 15 -2 115 -3 172c0 14 -10 26 -23 30c-18 5 -53 15 -54 15l1 17l6 6h272c34 0 68 -3 100 -12c24 -7 47 -16 64 -34c25 -25 39 -59 39 -94c0 -48 -4 -62 -19 -96c-32 -70 -113 -103 -196 -112"],83:[702,15,557,45,483,"480 606l-63 -56l-13 3c-7 28 -16 58 -37 79c-23 22 -54 33 -85 33s-61 -15 -82 -37c-21 -21 -29 -51 -29 -81c0 -36 19 -68 44 -93c27 -27 70 -48 109 -65c37 -16 89 -45 107 -64c30 -29 52 -75 52 -116c0 -52 -24 -100 -61 -137c-56 -56 -133 -87 -212 -87 c-47 0 -95 12 -128 46c-16 16 -29 35 -37 56l57 66l17 -1c9 -30 20 -61 42 -83c24 -24 58 -38 91 -38c38 0 77 18 105 45c26 26 38 65 38 102c0 24 -16 56 -35 77c-21 22 -58 47 -86 59c-46 20 -106 48 -132 74c-33 33 -51 78 -51 124c0 47 14 94 47 128 c44 43 104 62 165 62c51 0 104 -7 141 -44c15 -15 26 -33 36 -52"],84:[697,10,492,26,575,"575 689l-2 -48l-8 -7c-168 8 -189 10 -210 10c-6 0 -9 -5 -9 -11c-1 -14 -4 -115 -4 -195c0 -121 6 -300 13 -421l-100 -27l-9 9c8 127 13 254 13 381c0 82 -1 226 -2 256c0 4 -4 6 -9 6c-24 0 -103 -3 -214 -7l-8 7v48l7 7c95 -3 95 -11 272 -11c75 0 174 6 262 10"],85:[694,9,774,34,746,"210 542l-8 -273c0 -35 4 -70 14 -104c8 -26 18 -52 38 -71c32 -32 78 -42 124 -42c62 0 122 27 176 59c0 183 2 366 4 548l88 35l6 -7c-15 -114 -16 -323 -16 -485c0 -50 3 -126 18 -143c7 -9 20 -11 30 -11c20 0 38 6 58 8l4 -17l-5 -7c-51 -16 -122 -37 -153 -37 c-12 0 -30 6 -32 21c-1 16 -1 32 -1 48l-8 6c-70 -45 -150 -79 -233 -79c-28 0 -57 3 -84 11c-21 6 -40 16 -56 31c-26 27 -40 74 -44 94c-7 46 -8 120 -8 181c0 97 4 195 2 293c0 19 -14 34 -32 38c-18 5 -54 14 -56 14l-2 20l8 7h169l7 -6c-2 -14 -8 -115 -8 -132"],86:[698,16,646,23,729,"637 680l86 17l6 -19c-64 -79 -128 -188 -139 -210c-38 -69 -56 -103 -90 -177c-42 -89 -79 -185 -112 -281l-68 -26l-10 7l-143 482c-14 46 -29 92 -48 136c-4 8 -8 16 -14 22c-5 5 -11 9 -18 11c-12 5 -42 10 -63 13l-1 27l7 6l175 10c39 -191 94 -378 165 -560 c2 -4 6 -6 8 -7c3 1 6 1 8 4c3 3 36 80 63 139l55 128l64 146c20 45 44 89 69 132"],87:[690,11,986,13,1060,"979 675l81 14v-21l-115 -188c-46 -77 -164 -306 -228 -464l-60 -27l-12 8c-32 181 -69 360 -109 539c-1 5 -2 12 -7 12c-3 0 -7 -7 -9 -12l-28 -63c-54 -121 -135 -300 -182 -455l-64 -29l-11 5c-23 128 -48 255 -78 381c-20 81 -40 162 -70 239c-5 13 -14 24 -27 28 s-30 10 -45 13l-2 24l8 7l156 3c15 -81 74 -351 113 -526c1 -5 5 -6 7 -6c3 0 6 2 7 4c42 83 174 374 219 500l76 29l7 -6c0 -62 31 -213 62 -359c13 -61 26 -121 36 -174c1 -4 6 -12 13 -12c9 0 11 4 13 7l80 170l109 242c22 46 42 86 60 117"],88:[688,11,666,27,636,"632 672l-187 -206c-30 -35 -63 -79 -67 -86c0 -13 117 -200 156 -249l60 -72c7 -7 29 -21 37 -23l5 -4v-16l-110 -27l-14 6c-18 18 -32 40 -46 62l-143 239h-12c-14 -17 -55 -73 -83 -114c-38 -53 -91 -137 -107 -171l-94 -11v16l271 329c-4 1 -99 163 -145 245 c-11 18 -37 37 -55 44c-14 5 -31 10 -50 15v25l7 6c25 1 101 5 146 8l147 -266h9c5 5 70 90 103 135c32 46 77 117 77 117l95 13v-15"],89:[693,11,555,23,615,"258 299l-149 295c-6 12 -13 24 -23 34c-6 7 -14 13 -23 15c-13 3 -26 6 -40 8v28l7 7h145l141 -325c1 -2 6 -8 12 -8c4 0 8 4 11 9s46 75 59 98c8 14 92 159 128 216l85 17l4 -18c-111 -145 -185 -241 -268 -369l9 -288l-90 -29l-11 9c3 100 3 201 3 301"],90:[695,12,666,46,640,"595 664c-135 -193 -309 -464 -399 -603l6 -7l429 14l9 -9l-18 -71c-129 12 -383 12 -575 12l-1 21l424 606l-7 10c-187 0 -289 -6 -316 -9l-6 5v54l9 8c148 -9 296 -12 445 -12v-19"],91:[741,125,277,120,279,"279 712l-119 -13v-782l119 -13v-24c-54 0 -107 0 -159 -5v866c39 -5 105 -5 159 -5v-24"],92:[722,192,501,39,463,"428 -192l-389 896l5 11l31 7l388 -893l-3 -13"],93:[741,125,277,-1,158,"118 699l-119 13v24c54 0 120 0 159 5v-866c-52 5 -105 5 -159 5v24l119 13v782"],94:[735,-452,499,0,495,"251 735l244 -249l-39 -27l-208 214l-225 -221l-23 12"],95:[-60,100,450,-7,457,"451 -100h-458l7 40h457"],96:[677,-506,201,0,201,"201 532l-17 -26l-144 90c-33 21 -40 29 -40 47c0 19 14 34 31 34c15 0 33 -11 55 -33"],97:[466,12,609,87,596,"433 431l53 31l5 -5c-6 -113 -7 -227 -4 -340c0 -18 1 -36 11 -52c5 -9 16 -13 26 -13c14 0 44 10 65 18l7 -24c-40 -24 -94 -58 -126 -58c-14 0 -30 3 -40 15c-11 13 -14 27 -17 44l-15 4c-28 -18 -54 -33 -84 -45c-25 -10 -51 -18 -79 -18c-37 0 -75 13 -100 42 c-38 45 -48 105 -47 163c0 79 21 159 78 215c43 41 101 58 160 58c32 0 75 -12 107 -35zM411 99v230c0 26 0 52 -16 69c-18 19 -43 33 -68 33c-36 0 -69 -13 -95 -38c-24 -21 -51 -77 -54 -178c0 -42 5 -85 28 -120c21 -33 62 -41 99 -41c17 0 40 7 55 14c21 10 31 16 51 31 "],98:[684,10,588,27,523,"27 661l6 5l169 18l6 -7c-9 -93 -16 -184 -14 -278l9 -5c53 40 111 74 179 74c36 0 72 -13 98 -38c35 -34 45 -85 43 -133c-1 -41 -6 -81 -19 -120c-10 -31 -24 -61 -47 -84c-58 -60 -134 -103 -219 -103c-33 0 -60 9 -84 33c-18 18 -33 46 -33 100v476 c0 16 -6 30 -24 32c-1 0 -50 6 -70 9v21zM198 347c-2 -34 -4 -79 -4 -118c0 -62 4 -139 31 -165c21 -20 40 -26 68 -26c33 0 65 12 91 33c20 16 32 38 40 61c11 30 16 63 16 95c0 73 -16 115 -30 136c-13 20 -37 43 -75 43c-53 0 -98 -25 -137 -59"],99:[475,19,486,49,446,"435 114l11 -18c-62 -61 -135 -115 -224 -115c-44 0 -81 8 -110 32c-21 18 -36 44 -46 69c-17 43 -17 89 -17 135c0 67 20 133 65 183c47 52 120 75 188 75c28 0 55 -6 76 -23c24 -19 37 -46 40 -75l-54 -53l-17 6c-5 37 -16 64 -33 82c-13 14 -34 24 -52 24 c-34 0 -57 -10 -85 -43c-35 -40 -45 -93 -45 -145c0 -61 9 -131 57 -172c23 -21 54 -29 84 -29c59 0 109 31 153 69"],100:[684,19,603,86,597,"314 660l5 4l163 20l8 -9v-565c1 -17 4 -34 11 -47c5 -10 15 -15 27 -15c10 0 41 11 61 19l8 -23c-25 -17 -51 -34 -78 -47c-16 -8 -33 -16 -51 -16c-15 0 -31 5 -40 18c-11 16 -18 35 -19 55c-31 -21 -63 -40 -98 -54c-24 -10 -50 -18 -76 -18c-35 0 -71 11 -96 37 c-41 43 -53 103 -53 161c0 80 20 160 76 218c42 44 101 64 161 64c29 0 56 -6 83 -15l7 4c0 53 0 125 -7 151c-4 13 -14 25 -28 29c-20 5 -42 7 -63 7zM409 95l4 215c0 35 -4 73 -30 99c-16 16 -41 19 -64 19c-33 0 -64 -14 -87 -38c-47 -48 -59 -120 -59 -187 c0 -45 9 -93 42 -126c20 -20 49 -29 76 -29c35 0 91 20 118 47"],101:[478,12,499,69,463,"198 396c-39 -42 -46 -87 -46 -136c0 -6 1 -12 2 -18l4 -4l202 80c6 2 13 10 13 26c0 3 0 7 -1 10c-6 25 -22 58 -46 74c-12 7 -25 10 -44 10c-33 0 -63 -16 -84 -42zM438 319l-282 -122c3 -33 17 -84 47 -112c22 -21 52 -29 82 -29c29 0 56 9 82 22c25 13 49 28 72 46 l11 -2l13 -18c-33 -31 -69 -59 -109 -81c-36 -21 -77 -35 -119 -35c-35 0 -70 8 -98 30c-20 16 -33 39 -42 62c-19 46 -26 97 -26 146c0 58 12 117 51 162c49 57 121 90 197 90c33 0 66 -7 92 -28c33 -27 50 -68 50 -110c0 -4 0 -7 -1 -8c-3 -5 -13 -10 -20 -13"],102:[669,12,419,36,422,"47 434h105l6 6c0 35 3 86 18 122c15 34 27 50 54 73c31 25 64 34 99 34c54 0 80 -27 93 -54l-58 -54l-12 4c-2 10 -5 26 -10 34c-6 11 -22 30 -48 30c-13 0 -24 -9 -34 -25c-19 -29 -26 -100 -26 -164l4 -6h124l5 -7l-7 -22l-7 -6h-115l-6 -6v-111c0 -94 3 -189 10 -270 l-79 -24l-17 11c8 99 12 193 13 284v108l-6 8h-112l-5 7l4 22"],103:[465,233,568,83,494,"399 52c-33 -19 -60 -33 -95 -48c-22 -10 -52 -15 -76 -15c-30 0 -64 13 -88 36c-36 34 -57 91 -57 159c0 58 9 92 21 128c12 32 29 65 53 89c41 41 97 64 155 64c41 0 84 -9 117 -34l51 29l14 -6c-5 -91 -9 -163 -9 -372c0 -38 -1 -110 -22 -168 c-15 -44 -53 -78 -73 -93c-51 -39 -101 -54 -162 -54c-44 0 -89 10 -138 56l49 62h14c18 -36 64 -77 100 -77c38 0 76 17 101 47c18 20 29 46 36 72c9 31 17 81 18 118zM408 99v264c-1 14 -9 31 -18 42c-18 19 -44 26 -71 26c-34 0 -66 -17 -91 -41c-22 -21 -37 -49 -45 -77 c-12 -38 -14 -71 -14 -110c0 -27 16 -79 29 -98c23 -33 57 -53 85 -53c46 0 102 29 125 47"],104:[681,10,621,27,637,"624 107l13 -10v-11c-28 -30 -51 -53 -84 -72c-23 -14 -45 -24 -73 -24c-19 0 -38 10 -48 26c-12 20 -18 46 -18 73c0 54 4 141 4 177c0 24 -1 47 -5 72c-3 17 -11 36 -25 48c-17 12 -34 18 -56 18s-50 -8 -70 -16c-28 -14 -47 -26 -72 -44c0 -110 -2 -220 -2 -328 l-72 -22l-6 10c7 132 13 264 14 396c0 52 0 125 -3 156c-2 20 -4 42 -17 60c-8 12 -50 20 -77 26v18l5 4l158 17l6 -7c-4 -85 -4 -184 -4 -276l8 -8c36 22 73 42 113 58c28 10 55 18 88 18c15 0 47 -10 65 -28c13 -12 19 -30 21 -48c3 -28 3 -88 3 -130 c0 -44 -1 -108 1 -132c1 -16 3 -34 13 -50c7 -10 18 -16 30 -16c16 0 32 5 46 12c19 11 32 22 44 33"],105:[683,8,360,14,357,"216 392l-8 -210c0 -41 4 -77 12 -95c6 -13 18 -22 32 -22s30 3 42 9c18 10 34 22 50 36l13 -10v-11c-20 -27 -42 -50 -69 -70c-22 -14 -46 -27 -72 -27c-24 0 -47 7 -64 28c-14 18 -16 40 -16 84l6 221c0 30 -4 56 -11 70c-5 10 -14 15 -24 15c-22 0 -56 -17 -80 -32 l-13 18c24 19 57 42 81 54c18 9 40 21 59 21c14 0 34 -2 46 -18c14 -16 16 -40 16 -61zM230 625c-2 -33 -28 -56 -60 -56s-58 26 -58 58c2 34 34 56 64 56c34 0 54 -28 54 -58"],106:[683,231,331,-1,226,"27 375l-13 18c27 21 54 40 84 56c18 9 37 19 57 19c15 0 31 -3 41 -13c15 -14 18 -34 20 -54c7 -43 7 -210 7 -247c0 -20 0 -160 -12 -195c-13 -38 -36 -71 -63 -100c-33 -35 -87 -68 -133 -90l-16 16c74 44 114 89 128 139c17 62 20 186 20 230c0 98 0 164 -3 201 c-2 20 -9 35 -13 39c-11 14 -17 14 -28 14c-16 0 -51 -20 -76 -33zM168 683c30 0 58 -22 58 -54c0 -34 -26 -64 -61 -64c-33 0 -56 28 -56 60s27 58 59 58"],107:[686,7,555,27,565,"202 266l134 123c27 24 77 70 80 73c11 9 21 17 33 23l46 -29v-11c-75 -44 -146 -95 -213 -151c-7 -6 -18 -15 -18 -20c0 -6 5 -11 8 -16c47 -60 100 -112 146 -159c14 -14 38 -26 50 -26c33 0 56 16 78 39l19 -20c-27 -36 -51 -62 -97 -92c-6 -4 -20 -7 -30 -7 c-9 0 -27 3 -34 8c-11 6 -28 22 -45 41c0 0 -89 120 -160 199l-10 -7l-2 -221l-71 -20l-6 7c3 120 7 240 10 359c2 79 5 189 -7 235c-4 15 -15 28 -30 34c-10 3 -37 8 -56 10v21l5 4l160 23l6 -7l-9 -413h13"],108:[686,11,365,41,360,"338 112h11l11 -12c-19 -31 -42 -59 -71 -81c-24 -17 -52 -29 -81 -30c-19 0 -39 6 -52 19c-23 25 -30 44 -30 122l7 378c0 33 0 76 -5 100c-3 13 -14 22 -27 24c-19 4 -40 5 -60 6v23l6 5l165 20l6 -7c-4 -140 -14 -280 -14 -420c0 -42 0 -102 2 -127 c2 -17 4 -34 13 -49c8 -12 19 -17 33 -18c14 0 29 5 40 12c16 10 31 22 46 35"],109:[471,18,915,29,922,"422 -4c0 32 10 159 10 273c0 24 0 51 -5 70c-4 16 -11 31 -23 43c-14 14 -34 18 -54 18c-47 0 -108 -38 -117 -44c-5 -113 -7 -229 -3 -342l-67 -24l-14 8c1 92 7 184 7 275c0 44 -2 110 -13 126c-7 10 -16 13 -27 13c-16 0 -52 -16 -75 -29l-12 19c24 19 49 37 76 51 c19 9 40 17 61 18c17 0 34 -6 46 -17c16 -15 16 -37 16 -57l8 -6c33 18 66 37 102 52c22 9 46 16 70 16c37 0 71 -20 90 -52l16 -2c25 13 50 27 85 39c24 9 49 16 75 16c29 0 59 -6 79 -27c14 -14 16 -34 18 -53c2 -20 2 -88 2 -132v-131c0 -13 2 -38 14 -51 c9 -10 24 -14 37 -14c22 1 52 22 77 40h14l7 -21c-26 -25 -53 -48 -84 -65c-24 -13 -49 -23 -76 -24c-16 0 -35 5 -46 17c-15 16 -22 39 -22 60c0 16 5 136 5 210c0 24 0 47 -6 70c-4 17 -12 31 -24 43c-15 14 -35 18 -55 18c-49 0 -84 -18 -112 -34c-3 -115 -3 -233 4 -348 l-74 -28"],110:[471,10,664,5,656,"210 18l-70 -24l-15 7c2 43 8 170 9 256c1 46 2 112 -10 136c-5 11 -14 20 -27 20c-21 0 -46 -16 -75 -35l-17 22c42 30 99 72 136 71c18 0 37 -4 50 -17c16 -15 18 -32 21 -51l8 -6c34 21 67 39 105 53c27 11 55 19 84 19s59 -7 77 -29c20 -23 20 -120 20 -184 c0 -54 0 -126 1 -145c2 -34 24 -51 47 -51s55 21 80 40h13l9 -21c-29 -28 -56 -49 -89 -68c-20 -11 -55 -21 -68 -21c-26 0 -46 8 -58 27c-12 17 -14 40 -14 61l5 171c0 29 -2 59 -7 87c-3 18 -9 35 -20 48c-17 18 -38 26 -63 26c-20 0 -40 -7 -58 -15 c-26 -12 -49 -28 -71 -46c-2 -47 -5 -99 -5 -151c0 -60 -2 -99 2 -180"],111:[465,14,563,56,497,"150 316c-6 -25 -8 -51 -8 -77c0 -66 12 -141 63 -186c19 -17 45 -26 72 -26c34 0 65 12 88 37c19 21 29 48 35 75s8 54 8 81c0 64 -8 136 -58 180c-20 19 -47 26 -74 26c-30 0 -68 -14 -94 -42c-18 -19 -26 -43 -32 -68zM250 -14c-44 0 -92 10 -125 40 c-48 43 -65 107 -69 170c0 69 7 143 55 195c47 52 118 74 188 74c47 0 94 -9 130 -41c51 -47 68 -118 68 -185c0 -65 -14 -131 -58 -180c-47 -53 -119 -73 -189 -73"],112:[470,237,589,-12,518,"199 -206l-81 -31l-10 11c5 58 6 263 6 320v221c0 22 -5 66 -14 80c-6 10 -13 17 -25 17c-21 0 -49 -15 -73 -29l-14 19c24 19 50 37 77 50c19 10 38 18 59 18c16 0 32 -3 44 -15c15 -14 19 -36 21 -60l8 -4c33 26 55 39 91 55c30 13 61 19 93 19c31 0 71 -15 93 -37 c36 -40 44 -78 44 -130c0 -44 -9 -96 -25 -135c-13 -31 -37 -71 -57 -89c-28 -25 -59 -47 -93 -63c-27 -13 -55 -22 -85 -22c-20 0 -40 3 -59 8l-10 -8c1 -46 4 -143 10 -195zM191 66c24 -22 55 -37 88 -37c36 0 71 10 98 36c21 20 34 47 43 75c12 35 14 62 14 99 c0 26 -6 61 -14 86c-5 18 -13 33 -26 48c-18 21 -42 30 -63 30c-51 0 -92 -21 -139 -58c0 -6 -3 -82 -3 -93c0 -82 2 -185 2 -186"],113:[504,243,605,88,502,"419 -243l-16 16c3 41 5 185 8 281l-7 5c-51 -38 -109 -66 -172 -66c-32 0 -63 12 -86 33c-44 43 -58 106 -57 166c2 87 25 172 92 230c42 37 100 49 148 49c31 0 71 -14 99 -28l9 2l44 59l21 -4c-18 -223 -19 -528 -2 -714zM411 105v226c0 25 0 55 -16 71 c-18 18 -41 32 -67 32c-33 0 -64 -12 -89 -33c-46 -37 -58 -118 -58 -188c0 -26 4 -52 11 -77c4 -18 13 -32 24 -46c19 -23 49 -35 79 -35c39 0 83 21 116 50"],114:[473,3,432,9,415,"361 339l-12 5c-3 16 -8 32 -19 44c-9 9 -21 14 -34 14c-41 0 -65 -35 -77 -62c-5 -12 -5 -32 -6 -46c-1 -93 -2 -184 2 -276l-70 -21l-15 9c7 70 11 157 12 237c0 53 0 129 -16 154c-6 10 -16 15 -27 15c-23 0 -53 -17 -78 -31l-12 17c25 20 52 40 80 56 c18 9 36 19 55 19c18 0 36 -4 49 -17c14 -14 20 -35 23 -55l10 -4c19 18 40 36 63 51c14 9 29 19 47 19c19 0 42 -4 56 -18s21 -39 23 -61"],115:[466,17,455,52,387,"376 383l-55 -37l-12 7c-1 18 -3 37 -13 53c-11 18 -32 25 -52 25c-22 0 -44 -8 -59 -23c-16 -15 -23 -36 -23 -58s11 -34 26 -50c12 -12 55 -34 84 -46c32 -14 69 -36 83 -50c18 -18 32 -41 32 -66c0 -35 -12 -69 -36 -94c-39 -42 -95 -61 -152 -61c-38 0 -78 7 -107 33 c-16 14 -30 31 -40 50l49 71l14 -1c2 -30 12 -60 35 -81c22 -21 49 -34 79 -34c22 0 49 12 64 28c12 13 21 30 21 48c0 21 -11 41 -26 56c-19 19 -47 31 -73 42c-31 13 -61 27 -84 47c-23 21 -41 49 -41 81c0 35 14 70 41 94c36 33 83 49 132 49c30 0 62 -4 85 -25 c16 -15 27 -36 28 -58"],116:[611,9,416,17,399,"138 582l84 29l7 -5c-5 -57 -10 -108 -11 -166l6 -5h128l5 -4l-8 -26l-6 -5h-119l-6 -6v-189c0 -42 4 -106 26 -130c13 -15 28 -20 45 -20c25 0 61 22 91 41l12 -2l7 -15c-27 -25 -56 -48 -88 -65c-24 -13 -49 -23 -76 -23c-23 0 -54 7 -72 31c-21 27 -23 46 -23 181v190 l-6 7h-112l-5 5l7 24l6 6h104l6 6c0 51 0 115 -2 141"],117:[470,10,642,-1,655,"644 105l11 -19c-30 -26 -61 -50 -94 -70c-21 -12 -43 -24 -67 -24c-18 0 -37 5 -50 18c-14 14 -17 35 -19 54l-8 1c-37 -22 -75 -41 -115 -58c-23 -10 -48 -17 -73 -17c-28 0 -61 7 -80 29c-20 23 -25 72 -25 93c0 33 4 134 4 163c0 91 -4 107 -17 121c-6 7 -16 9 -28 9 c-14 0 -48 -17 -71 -30l-13 20c26 20 53 39 82 55c17 9 35 18 54 18c16 0 34 -3 46 -15c14 -14 16 -34 18 -54s2 -103 2 -155c0 -59 0 -143 27 -170c16 -16 40 -23 63 -23c21 0 41 7 59 15c26 12 47 23 72 38c1 108 3 277 0 331l77 35l13 -11c-8 -93 -12 -186 -12 -278 c0 -36 0 -86 16 -102c9 -9 21 -14 33 -14c13 0 26 6 37 12c18 8 33 18 50 30"],118:[469,11,495,-21,466,"412 460c3 2 6 2 8 2c3 0 7 0 9 -3c18 -14 37 -43 37 -97c0 -55 -27 -104 -55 -150c-45 -72 -100 -135 -159 -195c-11 -11 -25 -25 -30 -28h-24l-69 258c-13 51 -38 122 -51 144c-6 10 -15 17 -27 17c-22 0 -40 -8 -63 -21l-9 24c23 16 47 31 73 43c17 8 34 15 52 15 c15 0 29 -7 39 -17c17 -17 38 -99 54 -159c14 -58 39 -154 55 -201c3 -9 10 -10 13 -10c5 0 16 14 24 22c30 35 58 73 79 114c16 30 28 61 28 94c0 32 -20 68 -37 100c-1 1 -1 3 -1 5c0 3 2 6 4 8"],119:[469,8,812,-13,782,"675 416c-1 2 -1 4 -1 6c0 3 0 6 2 7l57 35c1 1 3 1 6 1s6 0 8 -3c21 -27 35 -58 35 -93c0 -55 -27 -106 -56 -153c-44 -74 -100 -140 -158 -203c-10 -10 -21 -21 -35 -21c-11 0 -23 12 -26 21c-17 48 -62 223 -91 337c-2 4 -4 10 -9 10c-4 0 -10 -2 -11 -5 c-56 -103 -124 -231 -179 -350l-42 -13l-7 6l-24 224c-14 126 -35 171 -48 183c-6 5 -9 7 -17 7c-23 0 -56 -17 -83 -31l-9 20c26 19 52 36 81 51c17 9 34 17 53 17c18 0 34 -6 46 -19c20 -22 27 -49 32 -77c15 -80 19 -162 29 -243l11 -2c57 103 114 207 166 313l52 26 l12 -3c26 -126 53 -250 97 -368c3 -7 10 -15 18 -15c4 0 12 6 15 10c32 40 64 81 88 127c15 29 26 61 26 94c0 15 -5 30 -10 44c-7 21 -17 41 -28 60"],120:[464,12,526,2,512,"422 462l31 -29c1 -1 3 -3 3 -4c0 -3 -1 -4 -3 -5c-56 -39 -120 -104 -172 -165c26 -57 76 -151 107 -178c10 -9 22 -18 37 -18c12 0 23 6 33 11c14 8 25 16 38 27h8l8 -19c-23 -25 -47 -50 -75 -70c-16 -12 -33 -24 -53 -24c-17 0 -31 9 -43 21c-16 14 -27 33 -38 52 c-15 25 -48 87 -70 130h-7c-39 -60 -79 -122 -109 -199l-16 -4l-28 25v8c43 69 93 138 142 202c-29 59 -66 134 -100 169c-9 9 -22 15 -35 15c-21 0 -46 -13 -64 -25l-14 18c23 18 47 34 72 48c15 8 30 16 46 16c17 0 41 -4 54 -19c21 -23 62 -95 91 -151l6 -1 c36 48 95 118 139 169c1 2 3 2 6 2c2 0 4 0 6 -2"],121:[468,233,593,-17,491,"97 -182l-2 11l44 56h13c21 -38 59 -77 104 -77c37 0 76 14 98 42c28 35 38 87 43 116c6 33 7 66 8 98l-6 5c-34 -21 -69 -41 -106 -57c-26 -10 -53 -20 -82 -20c-25 0 -49 6 -67 24c-35 29 -37 101 -37 141c0 20 1 41 2 61c2 62 6 152 -13 178c-7 10 -19 14 -31 14 c-17 0 -48 -16 -71 -28l-11 19c8 6 49 34 85 51c17 8 35 16 54 16c15 0 32 -4 42 -14c14 -14 21 -35 21 -56c0 -56 -4 -114 -4 -172c3 -93 12 -123 28 -144c15 -19 39 -26 63 -26c21 0 42 4 62 14c26 10 52 26 72 38c4 106 2 216 -6 320l79 38l12 -4c-6 -86 -6 -174 -6 -260 c0 -124 2 -177 -15 -251c-12 -43 -35 -83 -59 -109c-48 -51 -119 -75 -187 -75c-47 0 -100 23 -127 51"],122:[462,27,470,12,479,"408 462h6l14 -13c-83 -88 -191 -221 -290 -355l5 -8c98 -10 196 -13 205 -13c24 0 51 10 64 23c17 17 32 35 45 55l22 -12l-41 -80c-10 -21 -22 -41 -42 -53c-16 -10 -36 -14 -55 -14c-11 0 -86 8 -129 13l-117 11c-24 0 -46 -20 -62 -43h-6l-15 19l295 381 c-10 -2 -19 -2 -30 -2c-16 0 -44 6 -66 11c-18 4 -38 8 -54 8s-32 -7 -44 -17s-31 -35 -45 -54l-20 13l50 89c7 11 15 25 24 31c11 8 25 10 38 10c24 0 58 -9 87 -16s61 -15 88 -15c29 0 52 14 73 31"],123:[730,178,320,-3,292,"292 -154v-24c-39 0 -92 0 -116 6c-15 4 -28 12 -38 23c-19 21 -23 50 -26 77c-3 24 -3 70 -3 106c0 69 0 174 -25 202c-8 9 -20 15 -33 18c-17 5 -36 5 -54 5v26c39 0 63 1 85 20c16 14 19 37 22 58c4 35 4 100 4 151c0 35 0 96 1 106c3 28 8 56 25 78c9 11 21 20 35 25 c21 7 82 7 123 7v-24c-29 0 -60 0 -87 -8c-12 -3 -19 -9 -25 -20c-8 -12 -12 -27 -15 -42c-6 -39 -6 -78 -6 -118c0 -41 -1 -82 -7 -123c-3 -23 -7 -50 -19 -70c-17 -26 -42 -45 -59 -51v-1c20 -7 38 -18 52 -38c17 -23 24 -49 27 -76c5 -43 6 -86 6 -129 c0 -40 2 -80 8 -119c3 -14 7 -27 15 -39c7 -11 18 -17 30 -20c24 -6 53 -6 80 -6"],124:[738,167,213,86,126,"126 -159l-40 -8v897l40 8v-897"],125:[733,175,320,23,318,"23 -175v24c27 0 57 1 80 6c12 3 23 9 30 20c8 12 12 25 15 39c6 39 8 79 8 119c0 43 1 86 6 129c3 27 10 53 27 76c14 20 32 31 52 38v1c-17 6 -42 25 -59 51c-12 20 -16 47 -19 70c-6 41 -7 82 -7 123c0 40 0 79 -6 118c-3 15 -7 30 -15 42c-6 11 -13 17 -25 20 c-28 7 -58 8 -87 8v24c41 0 102 0 123 -7c14 -5 26 -14 35 -25c17 -22 22 -50 25 -78c1 -10 1 -71 1 -106c0 -51 0 -116 4 -151c3 -21 6 -44 22 -58c22 -19 46 -20 85 -20v-26c-18 0 -37 0 -54 -5c-13 -3 -25 -9 -33 -18c-25 -28 -25 -133 -25 -202c0 -36 0 -82 -3 -106 c-3 -27 -7 -56 -26 -77c-10 -11 -23 -19 -38 -23c-24 -6 -77 -6 -116 -6"],126:[347,-178,551,22,530,"281 291l74 -46c22 -14 47 -26 74 -26c19 0 32 7 44 17c14 10 26 32 36 50l21 -11c-11 -26 -28 -52 -47 -70c-15 -14 -41 -27 -65 -27c-38 0 -73 16 -105 36l-99 63c-24 15 -51 28 -80 28c-19 0 -35 -5 -50 -18c-12 -11 -28 -37 -40 -59l-22 12c12 24 27 49 42 66 c13 15 28 28 47 34c11 4 23 7 35 7c19 0 38 -6 56 -14c28 -11 54 -26 79 -42"],160:[0,0,333,0,0,""],167:[701,107,515,56,451,"385 166l3 4c13 18 18 40 18 62c0 29 -16 53 -36 73c-21 22 -62 40 -96 56c-47 22 -121 55 -140 81c-14 -18 -24 -45 -24 -69c0 -23 10 -45 26 -61c28 -28 70 -48 108 -66c50 -23 115 -53 141 -80zM56 23l70 27l8 -5c1 -33 10 -66 33 -90c21 -21 56 -36 85 -36 c33 0 55 17 78 40c21 21 27 60 27 89c0 25 -12 47 -30 65c-20 20 -73 44 -112 63c-38 17 -82 38 -107 63c-26 26 -37 63 -37 99c0 46 22 88 48 126c-6 20 -10 40 -10 61c0 47 15 95 49 129c32 31 76 47 121 47c18 0 37 -2 55 -6c13 -4 26 -9 36 -19c15 -14 27 -33 27 -54 c0 -7 -4 -25 -6 -37l-59 -21l-15 11c0 15 -1 31 -5 46c-3 10 -7 21 -14 29c-13 13 -31 21 -49 21c-23 0 -39 -8 -58 -28c-19 -21 -25 -54 -25 -82c0 -25 8 -49 26 -67c5 -6 12 -10 19 -14c29 -17 60 -32 90 -49c38 -22 82 -34 113 -65c22 -22 37 -50 37 -81 c0 -51 -19 -99 -52 -136c13 -21 18 -48 18 -74c0 -48 -18 -94 -52 -128s-80 -54 -128 -54c-52 0 -106 10 -143 47c-22 22 -35 52 -38 83"],168:[642,-542,299,0,299,"299 592c0 -30 -20 -50 -50 -50s-50 20 -50 50s20 50 51 50c29 0 49 -21 49 -50zM100 592c0 -30 -20 -50 -50 -50s-50 20 -50 50s20 50 51 50c29 0 49 -21 49 -50"],172:[401,-205,773,30,709,"709 401v-196h-50v146h-629v50h679"],175:[619,-566,312,0,312,"312 566h-312v53h312v-53"],177:[586,74,755,46,710,"396 276h313l-5 -40h-308v-270h314l-6 -40h-658l7 40h303v270h-310l7 40h303v304l40 6v-310"],180:[677,-506,201,0,201,"0 532l17 -26l144 90c33 21 40 29 40 47c0 19 -14 34 -31 34c-15 0 -33 -11 -55 -33"],181:[466,199,617,96,635,"485 466l8 -11c-3 -41 -6 -92 -6 -123v-167c0 -18 2 -36 6 -54c4 -12 8 -25 18 -34c7 -7 18 -11 28 -11c23 0 53 18 77 33l11 -2l8 -14c-25 -23 -52 -46 -83 -62c-21 -12 -44 -22 -69 -22c-17 0 -36 1 -48 13c-13 13 -18 37 -21 56l-8 5c-58 -33 -134 -76 -183 -76 c-32 0 -45 10 -62 30l-6 -2c0 -66 6 -141 19 -204l-53 -20l-12 6c2 92 4 184 4 276v189c0 55 -13 140 -17 164l83 26l9 -7v-269c0 -41 5 -84 35 -114c15 -15 38 -15 58 -15c21 0 59 11 87 29c16 11 26 18 41 34c1 48 3 121 3 142c0 58 -2 115 -4 172"],183:[379,-245,277,72,206,"72 312c0 37 30 67 66 67c38 0 68 -30 68 -67s-30 -67 -68 -67c-36 0 -66 30 -66 67"],215:[505,-7,755,133,623,"376 283l222 222l25 -32l-218 -218l214 -214l-23 -34l-220 220l-219 -220l-23 34l214 214l-215 215l24 32"],247:[537,36,777,56,720,"714 229h-658l7 40h657zM322 31c0 37 30 67 66 67c38 0 68 -30 68 -67s-30 -67 -68 -67c-36 0 -66 30 -66 67zM322 470c0 37 30 67 66 67c38 0 68 -30 68 -67s-30 -67 -68 -67c-36 0 -66 30 -66 67"],305:[471,8,333,-2,341,"200 392l-8 -210c0 -41 4 -77 12 -95c6 -13 18 -22 32 -22s30 3 42 9c18 10 34 22 50 36l13 -10v-11c-20 -27 -42 -50 -69 -70c-22 -14 -46 -27 -72 -27c-24 0 -47 7 -64 28c-14 18 -16 40 -16 84l6 221c0 30 -4 56 -11 70c-5 10 -14 15 -24 15c-22 0 -56 -17 -80 -32 l-13 18c24 19 57 42 81 54c18 9 40 21 59 21c14 0 34 -2 46 -18c14 -16 16 -40 16 -61"],567:[468,231,331,-1,223,"27 375l-13 18c27 21 54 40 84 56c18 9 37 19 57 19c15 0 31 -3 41 -13c15 -14 18 -34 20 -54c7 -43 7 -210 7 -247c0 -20 0 -160 -12 -195c-13 -38 -36 -71 -63 -100c-33 -35 -87 -68 -133 -90l-16 16c74 44 114 89 128 139c17 62 20 186 20 230c0 98 0 164 -3 201 c-2 20 -9 35 -13 39c-11 14 -17 14 -28 14c-16 0 -51 -20 -76 -33"],710:[735,-452,499,0,495,"251 735l244 -249l-39 -27l-208 214l-225 -221l-23 12"],711:[735,-452,495,0,495,"251 452l244 249l-39 27l-208 -214l-225 221l-23 -12"],728:[671,-513,282,0,282,"282 671c-6 -134 -91 -158 -141 -158c-53 0 -135 25 -141 158h30c3 -57 49 -98 111 -98s108 41 111 98h30"],729:[642,-542,100,0,100,"100 592c0 -30 -20 -50 -50 -50s-50 20 -50 50s20 50 51 50c29 0 49 -21 49 -50"],730:[692,-492,200,0,200,"200 592c0 -56 -45 -100 -100 -100c-56 0 -100 44 -100 101c0 55 45 99 102 99c53 0 98 -45 98 -100zM162 592c0 38 -25 64 -61 64c-38 0 -63 -25 -63 -63c0 -40 24 -65 62 -65c37 0 62 26 62 64"],732:[640,-540,321,0,322,"101 640c53 -3 99 -45 136 -45c28 0 40 14 61 45l24 -13c-21 -43 -49 -87 -106 -87c-55 0 -102 45 -139 45c-24 0 -39 -20 -53 -45l-24 14c32 66 63 88 101 86"],768:[677,-506,0,-385,-184,"-184 532l-17 -26l-144 90c-33 21 -40 29 -40 47c0 19 14 34 31 34c15 0 33 -11 55 -33"],769:[677,-506,0,-315,-114,"-315 532l17 -26l144 90c33 21 40 29 40 47c0 19 -14 34 -31 34c-15 0 -33 -11 -55 -33"],770:[735,-452,499,0,495,"251 735l244 -249l-39 -27l-208 214l-225 -221l-23 12"],771:[640,-540,0,-409,-88,"-308 640c53 -3 99 -45 136 -45c28 0 40 14 60 45l24 -13c-20 -43 -48 -87 -105 -87c-55 0 -101 45 -139 45c-24 0 -38 -20 -53 -45l-24 14c32 66 63 88 101 86"],772:[619,-566,0,-405,-93,"-93 566h-312v53h312v-53"],774:[671,-513,0,-390,-108,"-108 671c-6 -134 -91 -158 -141 -158c-53 0 -135 25 -141 158h30c3 -57 49 -98 111 -98s108 41 111 98h30"],775:[642,-542,0,-300,-200,"-200 592c0 -30 -20 -50 -50 -50s-50 20 -50 50s20 50 51 50c29 0 49 -21 49 -50"],776:[642,-542,0,-399,-100,"-100 592c0 -30 -20 -50 -50 -50s-50 20 -50 50s20 50 51 50c29 0 49 -21 49 -50zM-299 592c0 -30 -20 -50 -50 -50s-50 20 -50 50s20 50 51 50c29 0 49 -21 49 -50"],778:[692,-492,0,-349,-149,"-149 592c0 -56 -45 -100 -100 -100c-56 0 -100 44 -100 101c0 55 45 99 102 99c53 0 98 -45 98 -100zM-187 592c0 38 -25 64 -61 64c-38 0 -63 -25 -63 -63c0 -40 24 -65 62 -65c37 0 62 26 62 64"],779:[683,-502,0,-346,-3,"-327 502l-19 25l107 123c17 20 39 33 55 33c17 0 31 -15 31 -33c0 -16 -10 -29 -39 -50zM-177 502l-19 25l107 123c17 20 39 33 55 33c17 0 31 -15 31 -33c0 -16 -10 -29 -39 -50"],780:[735,-452,499,0,495,"251 452l244 249l-39 27l-208 -214l-225 221l-23 -12"],824:[720,192,0,39,463,"458 715l5 -11l-389 -896l-32 8l-3 14l386 888l11 2"],913:[680,10,770,25,743,"536 76l-54 188c-2 4 -8 9 -11 9h-225c-4 0 -8 -4 -10 -8l-117 -248l-91 -19l-3 10c119 200 234 404 339 612c1 3 1 4 1 7s-3 9 -12 11l-90 15v27h194c15 -83 60 -243 93 -363c22 -81 49 -178 85 -238c9 -15 24 -24 41 -28c7 -1 15 -2 23 -2c14 0 29 2 43 3l1 -26 l-104 -30c-15 -4 -30 -6 -45 -6c-11 0 -20 5 -27 13c-14 13 -24 47 -31 73zM382 572l-116 -244c-1 -2 -1 -8 -1 -9c0 -3 4 -7 7 -7h188c5 0 7 4 7 7c0 0 -2 8 -2 9l-70 244c-1 2 -4 4 -7 4s-6 -3 -6 -4"],914:[686,1,655,50,574,"215 324v-258c0 -13 5 -17 11 -21s32 -6 64 -6c54 0 110 12 149 50c31 32 44 75 44 119c0 33 -8 68 -33 93c-18 18 -42 27 -67 34c-34 9 -70 11 -106 11c-18 0 -47 0 -52 -1c-4 0 -8 -3 -10 -7v-14zM215 411v-20c1 -4 5 -7 8 -8c8 -3 31 -2 46 -2c51 0 101 15 137 50 c30 30 43 72 43 114c0 29 -14 55 -34 75c-12 12 -30 17 -47 21c-26 6 -53 7 -80 7c-20 0 -40 -2 -60 -5c-7 -1 -11 -7 -13 -14v-218zM381 378l-1 -8c31 -4 67 -12 97 -21c20 -6 42 -19 57 -34c31 -31 40 -71 40 -114c0 -50 -18 -98 -53 -133c-55 -56 -136 -69 -212 -69 c-66 1 -197 2 -198 2l-1 10c6 9 18 25 18 26c2 3 3 6 4 10c0 1 1 23 1 34v530c0 17 -15 27 -17 28c-17 8 -41 16 -66 23l1 17l7 7h247c41 0 81 -2 121 -12c27 -7 54 -16 74 -36c22 -23 34 -53 34 -85c0 -41 -14 -81 -43 -110c-30 -31 -70 -48 -110 -65"],915:[697,7,428,74,507,"496 697l11 -145h-27l-19 52c-9 24 -34 36 -58 40c-57 10 -116 9 -174 6c-13 -1 -22 -8 -27 -19c-8 -16 -9 -65 -9 -82c0 -177 0 -355 3 -533l-77 -23l-10 11c3 183 7 457 7 549c0 20 0 41 -2 62c-1 13 -4 27 -14 36l-26 22l3 20h237c61 0 122 1 182 4"],916:[696,4,713,30,689,"363 671l58 25c44 -132 91 -257 143 -385c37 -90 81 -182 125 -267l-1 -16l-61 -32c-198 13 -397 21 -595 7l-2 42zM562 50l6 9c-41 94 -201 494 -201 495h-11c-95 -164 -175 -321 -255 -490l3 -12c152 3 306 -2 458 -2"],917:[688,0,604,46,559,"456 336c-39 3 -79 6 -118 6c-38 0 -101 -2 -119 -3c-7 -1 -10 -5 -10 -12v-139c0 -38 1 -87 15 -112c6 -12 18 -20 30 -26c12 -5 38 -6 66 -6c47 0 70 0 234 14l5 -5l-13 -53h-388c-9 5 -15 15 -20 25c-5 12 -6 27 -6 41v492c0 14 0 44 -9 64c-2 6 -10 11 -17 16 c-8 6 -38 13 -59 16l-1 20l7 7l148 1c70 0 171 6 242 6c18 0 33 0 46 -1l-2 -50l-4 -7c-69 4 -202 10 -209 10h-40c-5 -1 -10 -4 -13 -9c-8 -12 -11 -44 -11 -67v-166c0 -7 3 -11 10 -12c39 -3 159 -1 236 5l6 -7"],918:[694,12,666,46,640,"595 664c-135 -193 -309 -464 -399 -603l6 -7l429 14l9 -9l-18 -71c-129 12 -383 12 -575 12l-1 21l424 606l-7 10c-187 0 -309 -5 -336 -9l-6 6v52l9 8c148 -7 316 -11 465 -11v-19"],919:[690,2,783,52,744,"744 14l-6 -6l-149 -8l-11 21c2 97 5 216 5 313c0 5 -3 12 -12 12h-343c-7 0 -13 -5 -13 -12c0 -67 -1 -163 0 -230c0 -26 2 -52 5 -78l-92 -28l-6 5c10 113 10 242 10 364c0 83 -1 166 -6 249c-1 9 -3 20 -13 24c-4 1 -40 12 -60 19l-1 19l7 7h162l5 -5 c-4 -22 -6 -46 -7 -69c-2 -65 -6 -149 -6 -214c0 -5 2 -12 12 -12h346c11 0 12 6 12 12c0 86 -3 190 -3 276l88 17l5 -6c-3 -39 -7 -80 -8 -119c-4 -162 -5 -324 1 -486c0 -17 13 -30 29 -34c18 -5 47 -13 48 -13"],920:[701,12,757,38,714,"208 350l-1 44c13 -4 35 -6 42 -7c44 -2 104 -3 152 -3c35 0 73 0 105 2c11 1 23 2 43 7v-41v-38c-15 5 -37 8 -48 8c-35 1 -69 4 -104 4c-40 0 -91 -2 -137 -4c-10 0 -26 -1 -52 -6v34zM336 -12c-73 0 -150 17 -203 71c-69 72 -95 173 -95 272c0 96 3 138 44 212 c57 100 191 158 339 158c77 0 157 -20 213 -77c61 -64 80 -155 80 -243c0 -58 -7 -116 -24 -170c-15 -46 -37 -89 -70 -123c-73 -76 -181 -100 -284 -100zM378 31c81 0 131 27 179 77c25 27 40 63 48 99c13 55 14 113 14 170c0 80 -20 162 -74 219c-46 49 -112 67 -177 67 c-75 0 -130 -31 -168 -73c-21 -23 -40 -58 -50 -95c-13 -48 -16 -99 -16 -150c0 -94 24 -188 88 -255c41 -43 99 -59 156 -59"],921:[683,1,394,78,348,"346 -1h-265v26c20 4 43 9 61 14c13 5 24 17 26 31c1 11 1 43 2 64v363c0 39 0 77 -3 115c-1 13 -9 26 -22 29c-1 0 -45 9 -67 14v22l7 6h250l6 -6v-21l-67 -15c-13 -3 -21 -14 -22 -26c-3 -20 -3 -78 -3 -116c0 -124 -1 -228 2 -330c0 -33 1 -66 4 -99 c1 -16 14 -26 28 -30c22 -5 64 -17 65 -17"],922:[683,7,668,49,632,"221 319l-9 -1c0 -98 0 -196 6 -293l-81 -29l-11 5l3 147v447c0 10 -1 21 -5 31c-3 6 -7 13 -14 15c-20 6 -60 18 -61 18l2 17l8 7h158l7 -7c-4 -12 -8 -49 -8 -50c-2 -27 -7 -155 -7 -268l8 -1l287 326h90l-1 -9l-291 -294c-3 -3 -8 -8 -8 -11s1 -5 5 -9 c107 -107 272 -271 333 -325l-1 -11l-85 -31c-105 112 -217 217 -325 326"],923:[698,7,770,28,771,"393 673l66 25c54 -215 118 -427 213 -627c6 -12 19 -19 32 -19l67 3l-1 -22c-52 -16 -104 -30 -157 -40l-23 8c-68 186 -127 376 -190 564h-12c-95 -188 -241 -482 -269 -548c-25 -10 -51 -19 -78 -23l-13 10c127 220 250 442 365 669"],924:[690,6,1044,79,1010,"800 690h103v-12c-6 -3 -15 -10 -19 -19c-6 -15 -6 -51 -6 -76c0 -164 2 -317 18 -481c2 -17 10 -34 24 -45c10 -7 22 -11 34 -11c13 0 38 4 56 6v-24c-44 -16 -96 -34 -136 -34c-14 0 -28 2 -41 6s-22 21 -22 29v182c0 25 -8 271 -10 337c0 8 -3 15 -8 15 c-3 0 -8 -3 -11 -8l-286 -554h-30c-75 180 -169 409 -232 537c-3 5 -7 10 -11 10s-7 -4 -8 -11c-23 -170 -40 -342 -54 -514l-75 -26l-7 3c38 196 73 393 100 591c2 12 2 26 -6 36c-5 7 -12 12 -21 15l-68 19l2 22l7 7h176c55 -161 149 -361 232 -538c2 -4 7 -14 11 -14 s10 5 12 9"],925:[694,5,829,49,710,"704 694l6 -8c-13 -227 -15 -459 -15 -686h-36c-23 26 -37 42 -67 80l-159 198c-102 126 -184 221 -234 277c-3 4 -8 10 -12 9c-6 -2 -6 -11 -6 -17c0 -129 13 -348 24 -522l-70 -30l-9 9c10 201 16 403 12 604c0 8 0 16 -6 22c-4 5 -9 10 -15 11c-1 0 -43 10 -64 15 l-4 27l8 7h162c133 -176 295 -391 412 -517c3 -3 7 -8 11 -8c5 0 6 6 6 8c0 102 -3 201 -24 505"],926:[695,6,596,27,569,"547 688l22 -163l-34 -1l-16 61c-1 6 -4 12 -8 17c-10 10 -24 16 -37 16h-346c-10 0 -21 -2 -30 -7s-17 -12 -21 -22l-23 -64l-27 -2c7 57 8 115 13 172zM122 329l17 70c101 -4 201 -2 302 3l-15 -72c-101 2 -203 4 -304 -1zM569 147l-15 -153c-1 0 -344 4 -516 6 c0 1 -7 101 -11 152l26 1l14 -51c2 -9 10 -18 15 -23c6 -5 14 -7 22 -7h301h100c12 0 20 9 23 20c0 1 11 37 16 55h25"],927:[701,12,803,66,742,"364 -12c-73 0 -150 17 -203 71c-69 72 -95 173 -95 272c0 96 3 138 44 212c57 100 191 158 339 158c77 0 157 -20 213 -77c61 -64 80 -155 80 -243c0 -58 -7 -116 -24 -170c-15 -46 -37 -89 -70 -123c-73 -76 -181 -100 -284 -100zM406 31c81 0 131 27 179 77 c25 27 40 63 48 99c13 55 14 113 14 170c0 80 -20 162 -74 219c-46 49 -112 67 -177 67c-75 0 -130 -31 -168 -73c-21 -23 -40 -58 -50 -95c-13 -48 -16 -99 -16 -150c0 -94 24 -188 88 -255c41 -43 99 -59 156 -59"],928:[690,14,722,26,693,"693 658l-76 -17c-5 -1 -9 -3 -13 -6c-3 -4 -4 -14 -4 -21c-3 -30 -3 -134 -3 -202c0 -135 2 -269 12 -404l-80 -22l-14 11c3 98 3 195 3 293c0 108 -1 229 -2 337c0 3 -1 6 -3 8c-3 4 -9 5 -10 5h-289c-1 0 -7 0 -10 -4c-3 -3 -4 -9 -4 -10c-7 -101 -7 -202 -7 -304 c0 -82 3 -204 5 -307l-85 -19l-7 6c5 104 8 208 8 312c0 73 1 147 0 220c0 26 -1 51 -6 77c-1 7 -3 17 -7 21s-11 7 -17 10c-2 0 -38 12 -58 18v30c75 -5 150 -9 225 -9h198c44 0 163 3 244 5v-28"],929:[679,6,576,55,550,"281 640h-56c-7 0 -9 -6 -9 -11c-4 -88 -6 -201 -6 -288c0 -9 7 -12 11 -12c35 0 76 4 109 15c27 8 52 20 71 39c33 33 53 77 53 123c0 36 -12 72 -38 98c-14 14 -33 22 -52 27c-27 7 -55 9 -83 9zM217 22l-83 -28l-7 7c2 140 7 281 7 422c0 59 -1 118 -5 177 c-1 18 -9 36 -27 43c-15 6 -31 9 -47 11v18l7 7h257c36 0 73 -2 108 -10c24 -5 49 -12 68 -28c38 -31 55 -79 55 -127c0 -58 -22 -115 -64 -156c-20 -21 -46 -34 -74 -44c-35 -12 -71 -18 -108 -21c-25 -2 -57 -4 -82 -6c-8 -1 -12 -6 -12 -13c0 -25 6 -151 7 -252"],931:[695,-2,646,49,600,"340 362l-175 -256l5 -10c89 -2 177 -2 266 -2c65 0 108 5 118 8c6 2 11 6 12 11c5 11 12 41 16 67h18l-5 -175c-89 3 -187 4 -276 4c-90 0 -180 -7 -270 -7c0 1 5 28 9 47l163 216c19 26 44 60 54 85c-8 22 -22 46 -36 66l-152 225l68 47c138 -3 276 0 414 7l4 -11 l-22 -53l-331 4l-11 -13l137 -222c3 -5 5 -10 5 -16s-7 -15 -11 -22"],932:[697,10,492,26,575,"575 689l-2 -48l-8 -7c-168 8 -189 10 -210 10c-6 0 -9 -5 -9 -11c-1 -14 -4 -115 -4 -195c0 -121 6 -300 13 -421l-100 -27l-9 9c8 127 13 254 13 381c0 82 -1 226 -2 256c0 4 -4 6 -9 6c-24 0 -103 -3 -214 -7l-8 7v48l7 7c95 -3 95 -11 272 -11c75 0 174 6 262 10"],933:[697,11,716,27,682,"682 593l-16 -6c-3 4 -14 13 -21 17c-16 9 -42 14 -58 14c-28 0 -53 -11 -73 -31c-64 -64 -102 -147 -119 -236c-10 -52 -10 -217 -10 -326l-67 -36l-17 7c4 75 4 150 4 225c0 76 -7 152 -27 225c-15 53 -37 106 -76 145c-21 21 -47 34 -77 34c-18 0 -33 -3 -47 -12 c-21 -13 -27 -26 -33 -39l-18 7c9 39 19 61 44 88c22 22 54 28 84 28c38 0 73 -15 99 -41c29 -29 45 -61 57 -100c19 -59 29 -120 35 -181h9c10 57 24 114 46 168c16 41 37 80 69 111c28 29 68 43 109 43c26 0 53 -9 72 -28c13 -13 20 -30 25 -48c2 -6 3 -13 6 -28"],934:[721,33,833,73,748,"362 699l92 22l4 -8c-1 -7 -9 -93 -9 -105l7 -7c47 0 95 -6 141 -19c36 -11 72 -28 99 -55c40 -40 52 -98 52 -154c0 -75 -20 -152 -73 -205c-26 -26 -59 -44 -94 -55c-40 -13 -83 -25 -124 -27l-8 -6c0 -34 1 -53 4 -91l-91 -22l-5 9l8 104l-7 6c-82 1 -166 21 -224 80 c-44 43 -61 105 -61 166c0 65 17 132 64 180c58 58 140 87 222 89l7 7c0 18 -2 71 -4 91zM449 565v-443l6 -8c51 1 103 19 140 56c48 48 61 116 61 182c0 33 -2 67 -11 99c-6 23 -15 46 -32 63c-42 42 -100 58 -157 59zM367 121v444l-7 8c-51 -2 -102 -26 -138 -62 c-43 -43 -57 -104 -57 -164c0 -71 27 -139 77 -189c30 -30 76 -43 118 -44"],935:[688,11,666,27,636,"632 672l-187 -206c-30 -35 -63 -79 -67 -86c0 -13 117 -200 156 -249l60 -72c7 -7 29 -21 37 -23l5 -4v-16l-110 -27l-14 6c-18 18 -32 40 -46 62l-143 239h-12c-14 -17 -55 -73 -83 -114c-38 -53 -91 -137 -107 -171l-94 -11v16l271 329c-4 1 -99 163 -145 245 c-11 18 -37 37 -55 44c-14 5 -31 10 -50 15v25l7 6c25 1 101 5 146 8l147 -266h9c5 5 70 90 103 135c32 46 77 117 77 117l95 13v-15"],936:[694,1,703,-28,698,"409 244l2 -158c1 -21 10 -44 33 -48l58 -12l-1 -27h-266v27l68 12c22 4 29 40 29 49v155l-6 6c-36 0 -71 4 -105 13c-27 8 -53 18 -72 38c-20 19 -29 46 -36 73c-12 41 -18 84 -21 127c-3 33 -8 88 -10 99c-2 13 -4 26 -13 35c-7 7 -17 12 -27 11l-63 -10l-7 22l108 36 c3 2 23 2 35 2s22 -9 31 -18c12 -12 12 -53 12 -81c0 -58 4 -116 17 -173c10 -38 23 -76 51 -104c26 -26 62 -41 100 -43l6 7v83c0 100 0 220 -6 302l86 23l4 -8c-7 -75 -13 -149 -13 -224c0 -59 1 -118 3 -176l7 -6c37 2 71 23 97 49c18 18 26 44 34 69c14 42 22 85 28 129 c7 44 7 89 19 132c34 12 69 20 104 24l3 -18c-24 -15 -35 -44 -42 -71c-5 -18 -13 -68 -19 -102c-6 -40 -14 -80 -28 -118c-8 -24 -17 -48 -35 -66c-42 -42 -101 -52 -159 -53"],937:[689,2,875,25,844,"835 60l9 -9l-25 -53c-115 9 -231 9 -346 1l9 58c47 11 90 36 124 70c27 26 43 61 54 97c14 47 19 96 19 144c0 84 -22 169 -82 229c-40 40 -97 57 -153 57c-62 0 -126 -17 -170 -62c-31 -30 -51 -69 -65 -110c-16 -49 -23 -101 -23 -153c0 -81 21 -164 79 -223 c29 -29 69 -45 110 -53l-21 -55c-110 10 -219 14 -329 6l17 61c61 -3 137 -6 190 -11l6 15c-26 14 -50 30 -71 51c-24 25 -40 56 -51 90c-13 41 -18 85 -18 128c0 95 28 190 96 257c70 71 171 94 269 94c44 0 89 -5 131 -19c35 -12 68 -29 94 -55c60 -60 81 -146 81 -230 c0 -93 -23 -189 -90 -256c-29 -28 -58 -51 -95 -66v-12c85 4 181 9 251 9"],945:[468,20,658,84,673,"403 212c-17 56 -44 142 -69 167c-18 19 -42 32 -68 32c-24 0 -47 -11 -64 -28c-19 -19 -31 -43 -39 -68c-10 -32 -13 -66 -13 -99c0 -49 7 -100 42 -135c12 -12 28 -19 44 -19c13 0 25 5 37 11c16 9 34 23 47 35c25 26 56 66 83 104zM558 468l51 -23 c-46 -69 -138 -207 -138 -208c10 -38 22 -76 37 -112c10 -21 19 -44 36 -61c7 -7 15 -14 25 -14c25 0 60 25 87 46l10 -1l7 -19c-34 -27 -83 -63 -103 -77c-14 -9 -30 -19 -48 -19c-13 0 -26 5 -36 15c-8 8 -15 17 -18 29l-46 134l-11 2l-65 -78c-5 -6 -12 -14 -18 -20 c-21 -21 -45 -41 -71 -55c-21 -11 -43 -19 -67 -19c-21 0 -40 9 -54 24c-41 41 -52 100 -52 157c0 85 23 171 84 232c40 40 92 67 148 67c28 0 54 -10 73 -29c16 -16 28 -36 37 -56c14 -28 22 -52 28 -75l10 -1"],946:[698,202,662,113,569,"381 380c49 -2 104 -14 134 -45c34 -35 54 -85 54 -133c0 -56 -21 -111 -61 -151c-48 -48 -116 -66 -184 -66c-30 0 -59 9 -80 30c-6 6 -10 12 -14 19l49 69l13 -6c-2 -20 5 -39 19 -53s33 -23 53 -23c29 0 54 16 73 35c36 36 47 88 47 138c0 43 -13 87 -43 118 c-32 32 -78 40 -122 40c-21 0 -42 0 -63 -2l2 36c50 0 99 15 134 50c28 27 35 66 35 104c0 56 -41 111 -101 111c-29 0 -60 -9 -81 -30s-30 -49 -37 -76c-10 -42 -14 -85 -15 -129c-1 -31 -1 -138 -1 -208c0 -126 2 -252 9 -377l-76 -33l-12 15c5 190 1 381 9 571 c2 51 10 129 32 176c17 36 30 54 59 82c39 37 93 56 156 56c75 0 136 -60 136 -135c0 -47 -21 -90 -54 -123c-20 -21 -43 -38 -69 -48"],947:[470,198,608,-25,582,"522 470l60 -27c-80 -131 -170 -255 -245 -389l-17 -43c1 -60 -2 -123 -8 -183l-34 -26c-21 7 -39 19 -56 33l53 165c0 39 -31 300 -104 372c-19 18 -44 31 -70 31c-28 0 -65 -7 -87 -30l-23 -27l-16 12c15 23 30 47 48 65c32 32 76 46 121 46c31 0 62 -12 85 -35 c34 -34 52 -81 65 -128c17 -65 24 -146 23 -214l8 -2c65 120 147 280 184 375"],948:[694,9,501,56,438,"384 687l5 -8l-15 -42c-35 2 -73 9 -139 9c-33 0 -65 -9 -65 -41c0 -14 10 -26 19 -36c4 -5 57 -55 86 -83l95 -91c38 -39 68 -89 68 -144c0 -70 -27 -138 -77 -188c-42 -43 -97 -72 -157 -72c-34 0 -69 6 -93 30c-37 37 -55 87 -55 139c0 47 18 92 41 133 c31 55 72 104 117 149c-25 23 -48 46 -68 72c-11 16 -24 32 -24 51c0 13 5 25 11 37c8 16 19 31 31 44c15 18 35 28 56 35c27 9 55 13 84 13c27 0 51 -3 80 -7zM238 422c-70 -72 -105 -134 -105 -210c0 -53 17 -105 54 -143c18 -17 42 -25 67 -25c30 0 58 18 79 39 c29 29 41 81 41 126c0 34 -17 64 -35 92c-26 43 -65 85 -101 121"],949:[471,15,486,78,430,"392 430l-30 -65l-11 1c-5 14 -13 26 -23 36c-20 19 -46 28 -73 28c-26 0 -50 -11 -69 -30c-12 -12 -17 -29 -17 -47c0 -21 3 -41 18 -57c19 -18 45 -20 70 -20c1 0 34 -1 51 -2l-21 -47c-15 4 -30 5 -46 5c-28 0 -55 -10 -75 -30c-14 -13 -18 -33 -18 -51 c0 -33 16 -60 39 -82c27 -25 59 -31 93 -31c22 0 43 7 64 16c24 11 48 27 69 44l9 -2l8 -14c-53 -57 -125 -97 -204 -97c-40 0 -84 12 -113 41c-23 23 -35 52 -35 83c0 33 11 67 34 90c15 14 32 27 51 36v7c-23 14 -43 48 -43 78c0 43 16 77 43 104c32 32 79 47 124 47 c39 0 74 -19 105 -41"],950:[695,136,512,84,491,"419 -136l-21 12c15 27 34 59 34 85c0 11 -8 20 -16 27c-10 9 -22 11 -35 11c-18 0 -36 0 -54 -2c-27 -4 -71 -7 -81 -7c-40 0 -82 6 -111 35c-39 39 -51 95 -51 149c0 66 24 129 55 188c41 76 96 143 157 204c17 16 34 32 52 47l-1 7l-91 8c-24 4 -47 10 -67 23 c-1 1 -2 4 -2 4c0 1 0 2 2 4l28 35c2 1 3 1 5 1s1 0 8 -4c71 -47 131 -52 234 -36v-19c-54 -17 -103 -49 -143 -90c-91 -90 -160 -206 -160 -336c0 -48 16 -96 50 -131c26 -26 63 -32 99 -32c18 0 38 0 53 2c16 1 39 4 48 4c14 0 28 -1 41 -4c10 -3 19 -6 26 -13 c8 -8 13 -19 13 -30c0 -22 -9 -43 -19 -62c-14 -29 -33 -55 -53 -80"],951:[466,199,560,-32,479,"397 -199l-9 11c1 27 1 208 1 313c0 63 0 158 -3 190c-2 25 -5 51 -23 69c-16 15 -38 18 -59 18c-22 0 -42 -6 -62 -15c-26 -11 -50 -26 -74 -43c-2 -24 -2 -107 -2 -160c0 -46 4 -113 7 -170l-72 -25l-9 10v335c0 22 0 50 -14 64c-7 7 -16 10 -25 10 c-19 0 -47 -19 -69 -33l-16 18c23 19 47 37 74 52c17 9 36 18 56 18c19 0 39 -5 53 -19c13 -13 15 -32 16 -51l11 -2c33 22 68 41 105 56c28 11 57 19 86 19c25 0 50 -6 67 -24c17 -16 20 -41 23 -64c3 -33 3 -116 3 -174c0 -123 2 -246 17 -368"],952:[695,11,554,59,486,"231 -11c-36 0 -71 13 -96 38c-31 31 -46 72 -56 114c-16 60 -20 122 -20 184c0 60 2 122 20 180c15 46 37 87 72 122c43 43 105 68 166 68c37 0 73 -14 99 -40c29 -29 42 -69 52 -108c14 -59 18 -119 18 -179s-6 -120 -23 -177c-14 -44 -34 -87 -67 -120 c-45 -45 -101 -82 -165 -82zM142 370h254l5 7c-3 55 -8 108 -20 161c-6 30 -18 62 -40 84c-19 19 -43 26 -69 26c-29 0 -55 -13 -75 -33c-27 -27 -42 -70 -50 -106c-10 -46 -11 -86 -11 -132zM398 333h-255l-7 -7c0 -49 5 -101 18 -147c9 -35 23 -70 49 -95 c23 -23 53 -37 85 -37c27 0 49 17 67 36c23 23 31 56 38 88c10 48 13 103 13 154"],953:[474,9,334,101,332,"319 109l13 -14c-23 -29 -49 -56 -79 -77c-21 -14 -45 -27 -71 -27c-23 0 -44 14 -57 33c-17 22 -24 50 -24 78v347l81 25l11 -10c-15 -89 -17 -179 -17 -269c0 -26 2 -52 7 -78c4 -16 8 -32 20 -44c8 -8 21 -9 32 -9c14 0 26 6 38 13c14 9 25 19 37 31"],954:[472,4,555,112,516,"504 441l-1 -15c-89 -38 -202 -103 -245 -154c-1 -2 -1 -4 -1 -6l4 -11c80 -75 166 -143 255 -207l-1 -14l-67 -36c-85 72 -175 148 -251 218l-9 -4c0 -61 3 -127 6 -190l-63 -26l-16 5c2 146 3 292 -3 438l79 26l5 -6c-2 -58 -6 -164 -7 -195l6 -2 c76 82 164 150 258 210"],955:[690,11,541,21,510,"422 -4l-146 384c-1 3 -4 6 -8 6c-4 -1 -6 -4 -7 -7c-42 -89 -126 -268 -126 -269c-12 -25 -26 -66 -37 -99l-59 -17l-18 13l218 409l13 26c-2 18 -21 79 -42 123c-10 21 -22 41 -39 58c-12 13 -31 23 -49 23c-15 0 -47 -7 -70 -12l-8 25c44 16 94 31 146 31 c33 0 58 -16 90 -78c22 -44 52 -151 76 -216c18 -52 53 -149 154 -369c2 -4 -1 -12 -1 -12l-73 -26"],956:[466,199,617,96,635,"485 466l8 -11c-3 -41 -6 -92 -6 -123v-167c0 -18 2 -36 6 -54c4 -12 8 -25 18 -34c7 -7 18 -11 28 -11c23 0 53 18 77 33l11 -2l8 -14c-25 -23 -52 -46 -83 -62c-21 -12 -44 -22 -69 -22c-17 0 -36 1 -48 13c-13 13 -18 37 -21 56l-8 5c-58 -33 -134 -76 -183 -76 c-32 0 -45 10 -62 30l-6 -2c0 -66 6 -141 19 -204l-53 -20l-12 6c2 92 4 184 4 276v189c0 55 -13 140 -17 164l83 26l9 -7v-269c0 -41 5 -84 35 -114c15 -15 38 -15 58 -15c21 0 59 11 87 29c16 11 26 18 41 34c1 48 3 121 3 142c0 58 -2 115 -4 172"],957:[471,8,599,15,561,"561 444l-136 -274c-24 -48 -46 -96 -66 -146l-56 -32l-7 7c-12 81 -28 162 -55 239c-18 52 -39 99 -78 138c-16 15 -39 26 -61 26c-31 0 -52 -8 -67 -18l-20 26c35 39 78 61 131 61c30 0 63 -11 84 -32c55 -55 92 -219 118 -338h5c24 50 50 107 69 159 c22 62 39 131 56 198l23 10l57 -16"],958:[694,137,553,74,545,"471 -137l-21 15c15 25 34 55 34 79c0 13 -7 25 -16 34c-16 16 -41 17 -63 17s-55 -3 -82 -6c-35 -4 -98 -10 -104 -10c-37 0 -76 7 -103 34s-42 63 -42 100c0 54 24 104 61 141c35 35 79 59 124 73v6c-31 7 -63 19 -87 42c-16 16 -21 40 -21 63c0 35 12 71 38 96 c28 29 62 51 98 65l-1 8c-44 1 -91 6 -132 28l28 46h8c39 -32 90 -45 140 -45c31 0 66 5 98 11v-28c-60 -7 -123 -22 -166 -66c-23 -23 -33 -56 -33 -88c0 -26 6 -53 25 -72c15 -14 34 -24 54 -31c26 -9 53 -14 81 -17v-23c-67 0 -137 -15 -185 -63 c-31 -31 -45 -74 -45 -117c0 -30 11 -58 32 -79c27 -27 67 -30 104 -28c55 4 127 10 163 10c27 0 56 -4 76 -24c8 -7 11 -17 11 -27c0 -21 -9 -41 -19 -60c-15 -30 -35 -57 -55 -84"],959:[465,14,563,56,497,"150 316c-6 -25 -8 -51 -8 -77c0 -66 12 -141 63 -186c19 -17 45 -26 72 -26c34 0 65 12 88 37c19 21 29 48 35 75s8 54 8 81c0 64 -8 136 -58 180c-20 19 -47 26 -74 26c-30 0 -68 -14 -94 -42c-18 -19 -26 -43 -32 -68zM250 -14c-44 0 -92 10 -125 40 c-48 43 -65 107 -69 170c0 69 7 143 55 195c47 52 118 74 188 74c47 0 94 -9 130 -41c51 -47 68 -118 68 -185c0 -65 -14 -131 -58 -180c-47 -53 -119 -73 -189 -73"],960:[488,5,609,21,628,"186 460h323c39 0 77 10 111 28l8 -7l-35 -59c-34 -16 -71 -27 -109 -28l-7 -7c-2 -35 -6 -71 -6 -105c0 -73 0 -176 34 -209c7 -7 19 -10 29 -10c18 0 34 6 49 16l6 -2l10 -17c-19 -17 -39 -33 -62 -45c-16 -8 -33 -15 -51 -15c-19 0 -41 0 -53 12c-14 13 -19 32 -23 50 c-7 28 -8 57 -8 85c0 81 6 161 16 241l-6 6h-143c-27 -130 -72 -261 -116 -389l-11 -10l-55 15l-1 13c67 116 121 259 146 371h-74c-25 0 -51 0 -69 -18c-14 -14 -22 -33 -29 -51l-18 -46l-21 9l28 113c4 13 11 26 21 35c11 11 26 15 41 18c25 5 50 6 75 6"],961:[477,189,548,90,499,"121 -189l-20 8c-6 104 -11 209 -11 314c0 61 3 152 16 197c8 27 24 66 57 95c41 36 98 52 152 52c50 0 99 -18 135 -54c19 -18 29 -43 36 -68c10 -34 13 -69 13 -104c0 -71 -24 -141 -75 -191c-33 -34 -81 -63 -129 -63c-43 0 -77 17 -108 48c-9 9 -16 18 -24 30l-7 -1 c3 -80 7 -152 27 -230c0 -1 -4 -9 -6 -13zM161 231c0 -32 2 -64 11 -95c6 -22 16 -42 30 -58c17 -18 39 -31 64 -31c35 0 68 12 93 37c39 39 52 95 52 150c0 36 -2 73 -11 108c-5 24 -17 52 -32 66c-18 16 -42 33 -68 33c-32 0 -62 -11 -85 -34c-20 -19 -33 -48 -41 -74 c-10 -33 -13 -68 -13 -102"],963:[476,5,605,82,608,"261 462l342 14l5 -10l-14 -57l-7 -6c-64 13 -133 21 -199 29l-1 -6c3 -2 21 -13 46 -38c42 -42 56 -101 56 -160s-14 -120 -57 -163c-47 -47 -98 -70 -175 -70c-53 0 -90 21 -122 53c-19 20 -31 44 -39 70c-10 33 -14 68 -14 103c0 62 10 132 52 178c35 39 74 61 127 63 zM280 429c-28 0 -51 -13 -71 -32c-36 -37 -44 -93 -44 -144c0 -35 3 -69 14 -103c8 -26 17 -52 41 -76c23 -23 44 -38 74 -38c31 0 54 15 73 33c15 15 28 45 33 63c7 28 10 56 10 84c0 36 -3 73 -13 108c-7 25 -21 54 -40 73c-20 20 -49 32 -77 32"],964:[484,9,513,15,519,"142 461h254c38 0 76 7 110 23l13 -9l-43 -67c-27 -6 -55 -10 -83 -10h-68l-9 -8c-2 -34 -11 -111 -11 -165c0 -17 0 -125 29 -153c9 -8 20 -15 32 -15c19 0 45 15 61 25l9 -2l9 -14c-19 -18 -55 -46 -78 -58c-18 -10 -37 -17 -57 -17c-18 0 -34 9 -47 22 c-12 12 -20 30 -23 47c-6 27 -8 54 -8 82c0 83 8 166 15 249l-7 7h-75c-28 0 -65 -2 -85 -22c-13 -14 -30 -60 -43 -94l-22 3c4 19 26 121 39 141c20 30 59 35 88 35"],965:[472,12,587,-12,519,"387 398l-4 18l42 51l20 4c45 -48 74 -109 74 -175c0 -81 -36 -157 -93 -214c-55 -55 -126 -94 -205 -94c-27 0 -55 7 -74 27c-27 27 -34 66 -34 104v211c0 23 -3 54 -12 68c-6 8 -16 16 -26 16c-26 0 -50 -18 -71 -35l-16 22c23 21 47 39 74 53c19 10 40 18 63 18 c17 0 34 -6 46 -18c8 -8 12 -19 15 -30c4 -16 5 -32 5 -48c0 -37 -3 -74 -4 -110c-1 -26 -1 -57 -1 -85c0 -21 2 -42 7 -62c4 -15 10 -29 21 -40c17 -17 41 -28 66 -28c43 0 65 10 90 28c57 41 74 120 74 174s-18 107 -57 145"],966:[467,197,763,91,685,"320 -183l2 168l-7 7c-59 6 -109 16 -161 76c-45 52 -63 112 -63 181c0 57 25 110 65 150c33 33 74 55 118 68l12 -22c-26 -12 -47 -22 -67 -42c-34 -33 -48 -88 -48 -136c0 -67 13 -145 61 -193c38 -37 64 -46 83 -46l7 7v205c0 61 8 126 66 176c43 37 94 48 136 48 c41 0 82 -13 111 -42c37 -38 50 -92 50 -145c0 -69 -19 -138 -68 -187c-58 -58 -135 -86 -216 -97l-7 -8c0 -54 3 -107 8 -161l-69 -21zM401 26c54 2 103 26 141 65c21 21 35 48 44 77c12 35 16 72 16 109c0 12 -6 88 -38 120c-17 17 -36 28 -59 28c-26 0 -43 -3 -67 -19 c-30 -22 -44 -93 -44 -141v-232"],967:[466,197,576,-18,564,"288 206l187 250l20 4l33 -32l1 -7l-179 -206c-16 -19 -33 -38 -48 -59c43 -95 101 -227 149 -274c9 -10 23 -14 36 -14c24 0 48 4 69 15l8 -23c-27 -16 -54 -31 -83 -42c-18 -8 -36 -15 -55 -15c-18 0 -32 12 -45 24c-21 22 -34 49 -47 76c-20 41 -52 118 -78 180h-10 c-34 -51 -65 -101 -95 -152c-22 -38 -43 -76 -59 -116l-17 -6l-50 30c68 100 137 199 211 295c0 1 -73 143 -109 215c-8 17 -21 36 -34 49c-8 8 -20 13 -31 13c-18 0 -46 -22 -65 -37l-15 21l63 48c19 15 44 23 69 23c15 0 30 -6 40 -17c15 -15 25 -32 34 -51l90 -192h10"],968:[695,189,754,-7,684,"353 -182l4 176l-7 8c-40 0 -79 3 -117 11c-26 5 -52 12 -71 30c-33 34 -44 82 -44 129v153c0 16 -2 33 -6 49c-3 11 -7 23 -15 31c-6 6 -14 10 -23 10c-19 0 -44 -17 -64 -30l-17 21c24 19 46 35 73 48c20 10 40 18 62 18c16 0 35 -4 48 -20c10 -13 16 -42 16 -94v-129 c0 -80 12 -121 41 -149c22 -21 73 -44 117 -44l7 6v315c0 96 -6 204 -12 305l80 33l7 -7c-2 -30 -2 -194 -2 -292v-352l6 -7c63 5 116 59 130 77c36 44 47 97 47 153c0 54 -27 103 -63 142l3 13l44 48l21 -3c41 -51 66 -115 66 -181c0 -72 -31 -143 -82 -194 c-46 -46 -98 -74 -165 -88l-7 -8c0 -50 2 -101 9 -150l-76 -35"],969:[472,13,851,67,781,"649 421l42 48l14 3c51 -40 76 -102 76 -166c0 -90 -36 -177 -100 -241c-41 -40 -93 -69 -151 -69c-31 0 -62 6 -84 28c-13 13 -26 40 -37 63h-12c-26 -27 -51 -52 -84 -72c-25 -15 -59 -28 -88 -28c-37 0 -73 12 -99 38c-40 39 -59 95 -59 151c0 88 23 178 87 242 c23 23 55 37 87 48l16 -14c-13 -10 -28 -21 -39 -32c-49 -49 -69 -119 -69 -188c0 -29 3 -59 11 -87c7 -22 16 -44 33 -60c20 -20 47 -35 76 -35c34 0 65 18 89 42c16 16 22 37 27 59c8 32 10 65 10 98v189l75 28l6 -7c-9 -59 -9 -130 -9 -195c0 -37 3 -81 13 -116 c7 -26 16 -46 35 -64c16 -16 38 -23 60 -23c36 0 65 15 90 40c36 35 49 93 49 143c0 61 -20 124 -68 165"],977:[698,14,552,-16,522,"391 370c0 52 -6 102 -22 151c-13 41 -31 80 -62 110c-16 17 -34 28 -58 28c-18 0 -36 -6 -50 -19c-17 -17 -27 -39 -27 -63c0 -55 26 -106 64 -145c41 -41 93 -62 150 -67zM108 118v110c0 25 0 55 -16 71c-7 7 -17 13 -28 13c-22 0 -47 -15 -69 -28l-11 20 c22 19 47 37 74 50c21 10 43 18 66 18c15 0 32 -9 42 -20c12 -12 16 -38 16 -55v-108c0 -53 1 -80 6 -100c3 -13 8 -26 17 -36c13 -12 28 -19 46 -19c29 0 52 11 73 31c28 29 43 65 54 103c14 52 17 105 17 158l-4 5c-40 4 -75 11 -112 25c-31 12 -61 28 -85 52 c-38 38 -64 89 -64 143c0 39 13 77 41 105c29 28 68 42 108 42c34 0 66 -16 90 -40c33 -33 59 -80 72 -124c18 -56 24 -110 24 -167l4 -5l53 -1v-27h-52l-4 -4c0 -53 -7 -100 -24 -150c-15 -43 -41 -87 -73 -119c-43 -43 -97 -75 -157 -75c-28 0 -58 13 -78 33 c-10 10 -15 24 -19 38c-6 20 -7 40 -7 61"],981:[694,192,727,66,659,"405 -156l-72 -36l-17 10c1 57 5 112 5 169l-8 9c-39 1 -75 5 -113 17c-31 9 -61 24 -85 47c-39 40 -49 98 -49 152c0 72 24 142 74 193c21 21 47 37 74 47c32 12 81 21 98 21l9 9c0 57 -2 114 -6 170l82 42l7 -8c-5 -67 -8 -135 -8 -203l8 -10c39 0 77 -4 117 -15 c27 -7 58 -20 78 -40c42 -42 60 -100 60 -159c0 -67 -21 -135 -69 -184c-50 -50 -117 -71 -187 -73l-9 -10c0 -49 4 -99 11 -148zM402 33c47 3 86 29 118 62c44 44 57 106 57 166c0 51 -11 103 -47 140c-34 34 -80 41 -127 42l-9 -9v-392zM312 441c-31 0 -80 -13 -110 -43 c-41 -41 -53 -100 -53 -156c0 -62 16 -124 60 -167c27 -27 65 -46 104 -46l8 9v395"],982:[541,12,875,74,790,"478 330l8 -12v-154c0 -30 13 -62 32 -81s43 -27 70 -27c32 0 61 15 84 37c17 18 29 40 36 63c9 30 15 62 15 93c-1 59 -12 104 -64 158c-27 28 -58 41 -94 51c-53 14 -108 19 -162 19c-90 0 -157 -11 -194 -25c-46 -17 -85 -48 -120 -83l-15 16c33 39 63 76 108 104 c42 26 105 52 260 52c61 0 120 -8 178 -24c42 -11 85 -31 116 -62c39 -39 54 -95 54 -149c0 -93 -38 -181 -104 -246c-39 -39 -86 -72 -142 -72c-33 0 -65 11 -89 35c-17 17 -30 37 -38 60l-8 1c-49 -53 -112 -92 -185 -92c-34 0 -65 15 -89 39c-43 42 -54 104 -54 163 c0 58 28 111 69 152c14 14 36 28 55 41l10 -21c-38 -37 -55 -89 -55 -142s9 -109 47 -147c17 -16 47 -30 65 -30c27 0 59 8 82 31c17 18 28 41 35 64c9 31 13 63 13 94c0 22 -3 44 -5 66"],1013:[471,11,550,88,490,"279 256l119 2l-14 -42c-30 1 -94 1 -141 1c-1 0 -43 -2 -69 -3l-5 -6c2 -49 20 -97 55 -132c11 -11 26 -17 41 -21c21 -6 42 -8 64 -8c51 0 102 16 141 49l8 -1l12 -20c-32 -25 -67 -48 -105 -64c-30 -12 -61 -22 -94 -22c-52 0 -106 12 -144 49c-43 44 -59 105 -59 165 c0 75 28 147 81 199c45 45 105 69 168 69c52 0 101 -22 138 -59l-37 -53l-13 -2c-10 14 -24 34 -32 41c-23 23 -57 30 -89 30c-34 0 -66 -13 -90 -37c-34 -34 -42 -82 -43 -128l6 -6c37 0 101 -1 102 -1"],8214:[738,167,392,86,306,"126 -159l-40 -8v897l40 8v-897zM306 -159l-40 -8v897l40 8v-897"],8216:[709,-406,214,45,167,"119 532l32 -49c5 -8 16 -27 16 -35c0 -10 -11 -20 -19 -28c-8 -7 -22 -14 -29 -14c-8 0 -21 12 -27 20c-26 34 -47 72 -47 115c0 25 12 47 25 68c21 35 45 68 70 100l14 -14c-15 -26 -28 -52 -40 -79c-6 -12 -9 -25 -9 -39c0 -16 5 -31 14 -45"],8217:[695,-395,214,44,163,"74 395l-16 11c14 24 29 49 38 72c6 15 10 31 10 47c0 14 -5 27 -13 39l-35 57c-6 10 -14 26 -14 33c0 8 8 16 14 22c10 9 24 19 34 19c8 0 23 -14 30 -25c15 -21 26 -45 34 -69c5 -12 7 -25 7 -37c0 -29 -13 -55 -27 -80c-18 -31 -42 -59 -62 -89"],8230:[119,15,768,77,691,"77 52c0 37 30 67 67 67s67 -30 67 -67s-30 -67 -67 -67s-67 30 -67 67zM317 52c0 37 30 67 67 67s67 -30 67 -67s-30 -67 -67 -67s-67 30 -67 67zM557 52c0 37 30 67 67 67s67 -30 67 -67s-30 -67 -67 -67s-67 30 -67 67"],8242:[782,-422,257,48,211,"205 723l-124 -290c-4 -9 -5 -11 -9 -11c-6 0 -24 5 -24 13c0 0 3 8 3 9l83 306c5 19 18 32 37 32c22 0 40 -17 40 -37c0 -7 -3 -13 -6 -22"],8243:[782,-422,490,48,404,"205 723l-124 -290c-4 -9 -5 -11 -9 -11c-6 0 -24 5 -24 13c0 0 3 8 3 9l83 306c5 19 18 32 37 32c22 0 40 -17 40 -37c0 -7 -3 -13 -6 -22zM397 723l-124 -290c-4 -9 -5 -11 -9 -11c-5 0 -24 5 -24 13c0 0 3 8 3 9l84 306c5 19 17 32 37 32c21 0 40 -17 40 -37 c0 -7 -3 -13 -7 -22"],8244:[782,-422,724,48,596,"205 723l-124 -290c-4 -9 -5 -11 -9 -11c-6 0 -24 5 -24 13c0 0 3 8 3 9l83 306c5 19 18 32 37 32c22 0 40 -17 40 -37c0 -7 -3 -13 -6 -22zM397 723l-124 -290c-4 -9 -5 -11 -9 -11c-5 0 -24 5 -24 13c0 0 3 8 3 9l84 306c5 19 17 32 37 32c21 0 40 -17 40 -37 c0 -7 -3 -13 -7 -22zM590 723l-124 -290c-4 -9 -5 -11 -9 -11c-6 0 -24 5 -24 13c0 0 3 8 3 9l83 306c5 19 18 32 37 32c22 0 40 -17 40 -37c0 -7 -3 -13 -6 -22"],8245:[782,-422,241,30,193,"36 723c-3 9 -6 15 -6 22c0 20 18 37 40 37c20 0 32 -13 37 -32l83 -306c0 -1 3 -9 3 -9c0 -8 -18 -13 -24 -13c-4 0 -5 2 -9 11"],8260:[720,192,501,39,463,"458 715l5 -11l-389 -896l-32 8l-3 14l386 888l11 2"],8279:[782,-422,958,48,789,"205 723l-124 -290c-4 -9 -5 -11 -9 -11c-6 0 -24 5 -24 13c0 0 3 8 3 9l83 306c5 19 18 32 37 32c22 0 40 -17 40 -37c0 -7 -3 -13 -6 -22zM397 723l-124 -290c-4 -9 -5 -11 -9 -11c-5 0 -24 5 -24 13c0 0 3 8 3 9l84 306c5 19 17 32 37 32c21 0 40 -17 40 -37 c0 -7 -3 -13 -7 -22zM590 723l-124 -290c-4 -9 -5 -11 -9 -11c-6 0 -24 5 -24 13c0 0 3 8 3 9l83 306c5 19 18 32 37 32c22 0 40 -17 40 -37c0 -7 -3 -13 -6 -22zM782 723l-124 -290c-4 -9 -4 -11 -9 -11s-23 5 -23 13c0 0 2 8 2 9l84 306c4 19 17 32 37 32 c21 0 40 -17 40 -37c0 -7 -3 -13 -7 -22"],8407:[750,-479,287,0,418,"346 595h-346v40h346l-79 94l21 21l130 -136l-130 -135l-21 21"],8463:[681,10,621,20,637,"298 516h-278l7 40h277zM624 107l13 -10v-11c-28 -30 -51 -53 -84 -72c-23 -14 -45 -24 -73 -24c-19 0 -38 10 -48 26c-12 20 -18 46 -18 73c0 54 4 141 4 177c0 24 -1 47 -5 72c-3 17 -11 36 -25 48c-17 12 -34 18 -56 18s-50 -8 -70 -16c-28 -14 -47 -26 -72 -44 c0 -110 -2 -220 -2 -328l-72 -22l-6 10c7 132 13 264 14 396c0 52 0 125 -3 156c-2 20 -4 42 -17 60c-8 12 -50 20 -77 26v18l5 4l158 17l6 -7c-4 -85 -4 -184 -4 -276l8 -8c36 22 73 42 113 58c28 10 55 18 88 18c15 0 47 -10 65 -28c13 -12 19 -30 21 -48 c3 -28 3 -88 3 -130c0 -44 -1 -108 1 -132c1 -16 3 -34 13 -50c7 -10 18 -16 30 -16c16 0 32 5 46 12c19 11 32 22 44 33"],8465:[686,27,554,28,533,"522 671l11 -16c-47 -44 -98 -82 -155 -112c-43 16 -100 37 -129 47c-19 6 -37 11 -57 11c-30 0 -59 -10 -80 -32c-29 -29 -53 -75 -67 -113l-17 11c13 35 32 78 48 112c11 23 26 49 43 67c26 30 56 40 95 40c42 0 104 -29 154 -53c17 -8 34 -16 53 -16c15 0 35 6 47 13 c22 11 39 27 54 41zM43 -16l-5 16c32 44 109 124 114 128c27 -58 32 -68 45 -81c17 -17 45 -29 69 -29c30 0 48 12 68 32c16 16 27 46 33 68c9 29 11 59 11 89c0 48 -5 89 -10 127c-4 33 -8 67 -8 95c0 12 1 23 2 34c40 44 89 90 140 122l12 -7c-20 -18 -39 -41 -53 -64 c-9 -16 -12 -32 -12 -50c0 -22 1 -54 3 -65c7 -51 12 -106 12 -154c0 -62 -14 -126 -59 -171c-57 -57 -131 -101 -213 -101c-37 0 -69 24 -96 48"],8467:[704,9,388,30,355,"338 122l17 -13l-28 -40c-17 -28 -58 -78 -113 -78c-25 0 -50 7 -68 25c-15 15 -28 35 -34 56c-8 33 -10 72 -12 105l-9 3l-46 -44l-15 18l66 74c-5 93 -3 189 16 280c13 62 32 118 75 164c20 20 52 32 80 32c15 0 34 -3 46 -14c19 -18 25 -44 25 -82 c0 -56 -22 -112 -46 -162c-36 -72 -77 -133 -127 -196c5 -61 14 -149 43 -180c12 -13 25 -19 43 -19c20 0 46 24 55 33zM170 302c78 105 119 200 119 304c0 32 -5 45 -12 55c-6 8 -14 11 -26 11c-9 0 -18 -6 -25 -15c-24 -29 -33 -65 -40 -102c-10 -55 -24 -162 -24 -251"],8472:[472,196,604,60,566,"285 59l47 43l7 -1c4 -15 11 -30 20 -38c10 -10 23 -14 36 -14c25 0 46 9 59 23c19 19 30 44 38 70c9 30 14 74 14 109c0 49 -8 99 -43 134c-16 16 -37 25 -59 25c-19 0 -29 -2 -57 -13c-39 -16 -102 -108 -142 -170c26 -45 52 -95 71 -143c3 -8 6 -16 9 -25zM198 464 l14 -10c-26 -33 -51 -69 -51 -112c0 -18 5 -35 11 -52c4 -9 7 -17 11 -25h9l18 26c28 44 79 122 149 159c26 14 58 22 87 22c28 0 57 -8 77 -28c36 -36 43 -89 43 -138c0 -93 -25 -188 -91 -255c-17 -16 -35 -31 -55 -42c-15 -9 -32 -15 -49 -15c-30 0 -57 15 -77 36 c4 -15 6 -29 6 -44c0 -51 -11 -105 -48 -142c-23 -23 -53 -40 -86 -40c-25 0 -49 8 -67 25c-28 28 -39 67 -39 106c0 35 11 69 21 101c15 46 55 129 65 148c-9 17 -17 31 -24 49c-9 24 -17 48 -17 73c0 66 42 121 93 158zM161 144c-30 -52 -51 -124 -51 -174 c0 -37 7 -77 34 -102c10 -10 23 -16 38 -16c18 0 37 5 50 18c20 21 27 51 27 79c0 27 -11 53 -22 77c-19 40 -43 80 -67 118h-9"],8476:[686,27,828,27,826,"432 348c21 11 70 33 115 40l129 75c-13 1 -27 6 -37 16c-7 7 -14 44 -16 69c-1 19 -7 37 -16 53c-8 16 -24 27 -42 27c-25 0 -52 -10 -69 -28c-25 -24 -44 -50 -66 -88c1 -5 5 -48 5 -61c0 -33 0 -69 -3 -103zM418 548l4 5c16 20 34 41 52 59c42 42 95 74 155 74 c22 0 44 -9 58 -26c15 -18 19 -45 22 -69c2 -16 1 -33 5 -50c3 -14 8 -28 47 -28c13 0 26 2 38 4l7 -13c-29 -11 -60 -27 -87 -41c-35 -18 -88 -47 -123 -75c51 0 76 -15 102 -37c3 -66 7 -170 16 -197c7 -22 15 -43 27 -63c7 -10 14 -22 26 -22c5 0 39 13 59 21v-16 l-133 -101l-64 117c1 31 3 60 3 89c0 98 -17 117 -29 129c-21 21 -54 35 -83 35c-23 0 -65 -11 -92 -19c-3 -22 -7 -45 -12 -67c-12 -51 -27 -94 -56 -138c-56 -45 -113 -100 -168 -146c-22 22 -60 53 -81 53c-31 0 -55 -18 -73 -42l-11 16c16 25 34 55 61 78 c16 14 33 28 56 28c12 0 34 -7 48 -17c18 -13 32 -26 46 -41c35 24 63 52 82 90c14 28 19 59 24 90c6 46 10 92 10 139s-1 127 -40 184c-26 39 -71 72 -118 72c-40 0 -56 -7 -79 -30c-16 -15 -18 -35 -18 -57c0 -12 21 -43 36 -60c11 -14 30 -37 40 -54c6 -11 13 -28 13 -38 c0 -29 -14 -52 -32 -72c-22 -25 -59 -46 -91 -62l-15 11c18 10 36 26 49 42c10 12 15 24 15 39c0 10 -7 19 -13 27c-9 13 -21 30 -31 43c-23 28 -41 52 -41 90c0 40 23 70 51 98c24 24 90 86 183 86c46 0 89 -34 119 -71c15 -18 27 -42 36 -67"],8487:[689,2,875,25,844,"34 627l-9 9l25 53c115 -9 231 -9 346 -1l-9 -58c-47 -11 -90 -36 -124 -70c-27 -26 -43 -61 -54 -97c-14 -47 -19 -96 -19 -144c0 -84 22 -169 82 -229c40 -40 97 -57 153 -57c62 0 126 17 170 62c31 30 51 69 65 110c16 49 23 101 23 153c0 81 -21 164 -79 223 c-29 29 -69 45 -110 53l21 55c110 -10 219 -14 329 -6l-17 -61c-61 3 -137 6 -190 11l-6 -15c26 -14 50 -30 71 -51c24 -25 40 -56 51 -90c13 -41 18 -85 18 -128c0 -95 -28 -190 -96 -257c-70 -71 -171 -94 -269 -94c-44 0 -89 5 -131 19c-35 12 -68 29 -94 55 c-60 60 -81 146 -81 230c0 93 23 189 90 256c29 28 58 51 95 66v12c-85 -4 -181 -9 -251 -9"],8501:[689,-1,774,78,689,"325 91l-14 -90h-216l25 90c2 52 3 105 12 147c8 37 57 106 101 150c-48 38 -135 108 -155 145v156h31c6 -66 47 -119 98 -160c80 -65 252 -203 253 -203v44c0 67 32 137 80 190c-24 0 -72 0 -82 11c-11 11 -16 14 -16 29v89h24c3 -11 8 -22 16 -30c9 -9 28 -9 43 -9h120 c23 0 44 -16 44 -39v-72l-15 -5c-11 14 -26 27 -44 27h-48c-23 -16 -54 -76 -60 -93c-14 -41 -16 -66 -16 -112c0 -28 19 -79 39 -99c39 -38 101 -83 144 -114l-21 -142h-17c-45 73 -139 161 -212 220l-183 148c-19 -17 -49 -61 -64 -86c-13 -21 -18 -42 -18 -73 c0 -50 38 -119 66 -119h85"],8592:[500,0,1000,57,945,"945 230h-818c38 -47 90 -126 115 -230h-40c-31 130 -113 220 -145 250c30 28 114 119 145 250h40c-25 -105 -78 -184 -115 -230h818v-40"],8593:[693,194,500,28,472,"270 617v-811h-40v811c-53 -45 -121 -88 -202 -109v40c79 21 161 71 222 145c55 -66 134 -122 222 -145v-40c-79 21 -148 63 -202 109"],8594:[500,0,1000,56,944,"874 230h-818v40h818c-38 47 -90 126 -115 230h40c31 -130 113 -220 145 -250c-30 -28 -114 -119 -145 -250h-40c25 105 78 184 115 230"],8595:[674,193,500,28,472,"270 674v-791c53 45 121 88 202 109v-40c-79 -21 -161 -71 -222 -145c-55 66 -134 122 -222 145v40c79 -21 148 -63 202 -109v791h40"],8596:[500,0,1000,57,944,"874 230h-747c38 -47 90 -126 115 -230h-40c-31 130 -113 220 -145 250c30 28 114 119 145 250h40c-25 -105 -78 -184 -115 -230h747c-38 47 -90 126 -115 230h40c31 -130 113 -220 145 -250c-30 -28 -114 -119 -145 -250h-40c25 105 78 184 115 230"],8597:[771,271,500,28,472,"270 695v-890c53 45 121 88 202 109v-40c-79 -21 -161 -71 -222 -145c-55 66 -134 122 -222 145v40c79 -21 148 -63 202 -109v890c-53 -45 -121 -88 -202 -109v40c79 21 161 71 222 145c55 -66 134 -122 222 -145v-40c-79 21 -148 63 -202 109"],8598:[767,193,1000,-17,943,"121 657l822 -822l-29 -28l-822 822c-11 -120 -62 -213 -81 -243l-28 28c73 122 73 235 73 280c45 0 158 0 280 73l28 -28c-29 -19 -117 -67 -243 -82"],8599:[767,193,1000,58,1018,"908 629l-822 -822l-28 29l822 822c-120 11 -213 62 -243 81l28 28c122 -73 235 -73 280 -73c0 -45 0 -158 73 -280l-28 -28c-19 29 -67 117 -82 243"],8600:[694,267,1000,57,1018,"880 -157l-823 823l29 28l823 -823c11 120 62 213 81 243l28 -28c-73 -122 -73 -235 -73 -280c-45 0 -158 0 -280 -73l-28 28c29 19 117 67 243 82"],8601:[694,267,1000,-17,944,"944 665l-823 -823c120 -11 213 -62 243 -81l-28 -28c-122 73 -235 73 -280 73c0 45 0 158 -73 280l28 28c19 -29 67 -117 82 -243l823 823"],8614:[500,0,1000,56,944,"56 50v400h40v-180h778c-38 47 -90 126 -115 230h40c31 -130 113 -220 145 -250c-30 -28 -114 -119 -145 -250h-40c25 105 78 184 115 230h-778v-180h-40"],8617:[554,0,1000,56,944,"782 270c67 0 122 55 122 122s-55 122 -122 122v40c89 0 162 -73 162 -162s-73 -162 -162 -162h-656c38 -47 90 -126 115 -230h-40c-31 130 -113 220 -145 250c30 28 114 119 145 250h40c-25 -105 -78 -184 -115 -230h656"],8618:[554,0,1000,56,944,"218 270c-67 0 -122 55 -122 122s55 122 122 122v40c-89 0 -162 -73 -162 -162s73 -162 162 -162h656c-38 -47 -90 -126 -115 -230h40c31 130 113 220 145 250c-30 28 -114 119 -145 250h-40c25 -105 78 -184 115 -230h-656"],8636:[500,-230,1000,56,945,"910 230h-854c0 12 0 17 1 19c1 1 2 2 16 15c29 23 112 102 129 236h40c-13 -110 -67 -187 -105 -230h773c17 0 35 0 35 -20s-17 -20 -35 -20"],8637:[270,0,1000,56,945,"910 230h-773c34 -39 92 -115 105 -230h-40c-12 95 -60 179 -133 240c-12 11 -13 12 -13 30h854c17 0 35 0 35 -20s-17 -20 -35 -20"],8638:[693,194,298,28,270,"28 693h20c55 -66 134 -122 222 -145v-40c-79 21 -148 63 -202 109v-776c0 -17 0 -35 -20 -35s-20 17 -20 35v776v76"],8639:[693,194,298,28,270,"270 693v-76v-776c0 -18 0 -35 -20 -35s-20 18 -20 35v776c-54 -46 -123 -88 -202 -109v40c88 23 167 79 222 145h20"],8640:[500,-230,1000,56,945,"945 230h-854c-17 0 -35 0 -35 20s17 20 35 20h773c-34 39 -92 115 -105 230h40c12 -95 60 -179 133 -240c12 -11 13 -12 13 -30"],8641:[270,0,1000,56,945,"864 230h-773c-17 0 -35 0 -35 20s17 20 35 20h854c0 -12 0 -17 -1 -19c-1 -1 -2 -2 -16 -15c-29 -23 -112 -102 -129 -236h-40c13 110 67 187 105 230"],8642:[693,194,298,28,270,"28 -194v76v776c0 18 0 35 20 35s20 -18 20 -35v-776c54 46 123 88 202 109v-40c-88 -23 -167 -79 -222 -145h-20"],8643:[693,194,298,28,270,"270 -194h-20c-55 66 -134 122 -222 145v40c79 -21 148 -63 202 -109v776c0 17 0 35 20 35s20 -17 20 -35v-776v-76"],8651:[599,98,999,55,944,"863 132h-773c-17 0 -35 0 -35 20s17 20 35 20h854c0 -12 0 -17 -1 -19c-1 -1 -2 -2 -16 -15c-29 -23 -112 -102 -129 -236h-40c13 110 67 187 105 230zM909 329h-854c0 12 0 17 1 19c1 1 2 2 16 15c29 23 112 102 129 236h40c-13 -110 -67 -187 -105 -230h773 c17 0 35 0 35 -20s-17 -20 -35 -20"],8652:[599,98,999,55,944,"136 132h773c17 0 35 0 35 20s-17 20 -35 20h-854c0 -12 0 -17 1 -19c1 -1 2 -2 16 -15c29 -23 112 -102 129 -236h40c-13 110 -67 187 -105 230zM90 329h854c0 12 0 17 -1 19c-1 1 -2 2 -16 15c-29 23 -112 102 -129 236h-40c13 -110 67 -187 105 -230h-773 c-17 0 -35 0 -35 -20s17 -20 35 -20"],8656:[598,98,1000,55,922,"55 250c146 72 258 196 313 348h40c-31 -85 -78 -163 -138 -230h652v-40h-691c-31 -29 -64 -55 -99 -78c35 -23 68 -49 99 -78h691v-40h-652c60 -67 107 -145 138 -230h-40c-55 152 -167 276 -313 348"],8657:[694,174,667,12,652,"332 694c61 -143 176 -255 320 -313v-40c-75 30 -143 73 -201 127v-642h-40v682c-30 32 -56 68 -79 106c-22 -38 -49 -74 -78 -106v-682h-40v642c-58 -54 -127 -97 -202 -127v40c145 58 260 170 320 313"],8658:[598,98,1000,76,943,"943 250c-146 -72 -258 -196 -313 -348h-40c31 85 78 163 138 230h-652v40h691c31 29 64 55 99 78c-35 23 -68 49 -99 78h-691v40h652c-60 67 -107 145 -138 230h40c55 -152 167 -276 313 -348"],8659:[674,194,667,12,652,"332 -194c-60 143 -175 255 -320 313v40c75 -30 144 -73 202 -127v642h40v-682c29 -32 56 -68 78 -106c23 38 49 74 79 106v682h40v-642c58 54 126 97 201 127v-40c-144 -58 -259 -170 -320 -313"],8660:[598,98,1000,33,965,"965 250c-123 -83 -214 -206 -257 -348h-41c26 83 66 161 118 230h-572c52 -69 92 -147 118 -230h-41c-43 142 -134 265 -257 348c123 83 214 206 257 348h41c-26 -83 -66 -161 -118 -230h572c-52 69 -92 147 -118 230h41c43 -142 134 -265 257 -348zM818 172 c25 28 52 54 81 78c-29 24 -56 50 -81 78h-638c-25 -28 -52 -54 -81 -78c29 -24 56 -50 81 -78h638"],8661:[772,272,667,12,652,"332 -272c-72 121 -185 213 -320 257v41c74 -25 142 -62 202 -109v666c-60 -47 -128 -84 -202 -109v41c135 44 248 136 320 257c72 -121 186 -213 320 -257v-41c-73 25 -141 62 -201 109v-666c60 47 128 84 201 109v-41c-134 -44 -248 -136 -320 -257zM254 -117 c28 -26 55 -55 78 -86c24 31 50 60 79 86v734c-29 26 -55 55 -79 86c-23 -31 -50 -60 -78 -86v-734"],8704:[681,9,555,1,552,"126 451h301l88 230l37 -14l-257 -676h-37l-257 676l38 14zM142 411l135 -357l135 357h-270"],8706:[699,7,560,79,485,"101 548l-18 7c13 38 30 76 61 102c31 27 71 42 113 42c54 0 108 -30 142 -71c38 -46 59 -96 73 -155c11 -48 13 -102 13 -151c0 -101 -15 -214 -97 -283c-33 -28 -74 -46 -116 -46c-58 0 -111 9 -146 51c-34 42 -47 112 -47 159c0 76 24 156 83 206c41 34 92 48 144 48 c9 0 17 0 25 -1c11 -1 37 -7 55 -12l4 4c-13 52 -28 98 -59 134c-29 35 -65 60 -111 60c-37 0 -58 -12 -77 -32c-17 -18 -31 -37 -42 -62zM383 125c18 51 25 107 25 161c0 40 -4 80 -12 119c-25 11 -52 20 -79 20c-32 0 -63 -14 -88 -35c-53 -45 -66 -120 -66 -185 c0 -26 7 -92 39 -130c20 -23 51 -42 83 -42c23 0 38 7 56 21c21 18 34 46 42 71"],8707:[694,0,555,75,498,"89 327v40h369v287h-383v40h423v-694h-423v40h383v287h-369"],8708:[800,112,555,65,498,"484 795l5 -11l-39 -90h48v-694h-349l-49 -112l-32 8l-3 14l39 90h-29v40h47l124 287h-157v40h175l125 287h-314v40h331l45 104l11 2zM291 327l-125 -287h292v287h-167zM433 654l-125 -287h150v287h-25"],8709:[720,192,742,55,687,"215 702v-450v450zM55 254c0 174 142 316 316 316c39 0 77 -7 112 -20l73 168l11 2l22 -5l5 -11l-74 -171c99 -54 167 -159 167 -279c0 -174 -142 -316 -316 -316c-36 0 -70 6 -102 17l-64 -147l-32 8l-3 14l61 141c-104 51 -176 159 -176 283zM467 513 c-30 11 -62 17 -96 17c-152 0 -276 -124 -276 -276c0 -108 62 -201 152 -247zM504 496l-219 -504c27 -9 56 -14 86 -14c152 0 276 124 276 276c0 104 -58 195 -143 242"],8711:[696,4,713,30,689,"356 21l-58 -25c-44 132 -91 257 -143 385c-37 90 -81 182 -125 267l1 16l61 32c198 -13 397 -21 595 -7l2 -42zM157 642l-6 -9c41 -94 201 -494 201 -495h11c95 164 175 321 255 490l-3 12c-152 -3 -306 2 -458 2"],8712:[541,41,666,83,561,"561 270v-40h-437c11 -131 126 -231 264 -231h173v-40h-173c-165 0 -305 127 -305 291s140 291 305 291h173v-40h-173c-138 0 -253 -100 -264 -231h437"],8713:[720,192,666,83,561,"561 270v-40h-222l-85 -197c39 -22 85 -34 134 -34h173v-40h-173c-54 0 -106 14 -150 38l-82 -189l-32 8l-3 14l82 189c-72 52 -120 135 -120 231c0 164 140 291 305 291h42l77 177l11 2l22 -5l5 -11l-71 -163h87v-40h-104l-100 -231h204zM312 270l101 231h-25 c-138 0 -253 -100 -264 -231h188zM219 56l76 174h-171c6 -70 42 -132 95 -174"],8715:[541,41,666,103,581,"103 230v40h438c-11 131 -126 231 -264 231h-174v40h174c165 0 304 -127 304 -291s-139 -291 -304 -291h-174v40h174c138 0 253 100 264 231h-438"],8722:[276,-236,756,46,710,"704 236h-658l7 40h657"],8723:[586,74,755,46,710,"396 276h313l-5 -40h-308v-303l-40 -7v310h-310l7 40h303v270h-310l7 40h657l-6 -40h-308v-270"],8725:[720,192,0,39,463,"458 715l5 -11l-389 -896l-32 8l-3 14l386 888l11 2"],8726:[722,192,501,39,463,"428 -192l-389 896l5 11l31 7l388 -893l-3 -13"],8727:[514,-26,482,30,452,"288 26h-80c12 76 16 136 18 222c-54 -42 -106 -88 -154 -144l-42 70c68 34 122 70 174 106c-58 42 -112 74 -170 104l54 56c44 -42 86 -80 136 -118c0 64 0 126 -16 192h80c-14 -66 -18 -124 -22 -190c50 40 96 82 136 124l50 -62c-64 -30 -120 -64 -176 -104 c62 -46 120 -86 176 -112l-52 -58c-34 38 -82 82 -134 122c2 -70 6 -136 22 -208"],8728:[444,-56,500,55,444,"444 250c0 -107 -89 -194 -194 -194c-108 0 -195 88 -195 194c0 105 87 194 195 194c105 0 194 -87 194 -194zM250 96c83 0 154 68 154 154s-71 154 -154 154c-86 0 -155 -70 -155 -154s68 -154 155 -154"],8729:[444,-56,500,55,444,"444 250c0 -107 -89 -194 -194 -194c-108 0 -195 88 -195 194c0 105 87 194 195 194c105 0 194 -87 194 -194"],8730:[988,1,833,70,849,"381 -1h-23l-205 450l-67 -50l-16 16l135 102l185 -406l423 877l36 -17"],8733:[442,11,815,56,760,"462 197l-50 65c-36 46 -95 122 -175 122c-77 0 -139 -72 -139 -169c0 -88 48 -183 143 -183c88 0 163 64 221 165zM760 -10c-5 -1 -11 -1 -16 -1c-82 0 -151 49 -202 106c-20 22 -27 34 -48 60c-36 -60 -117 -166 -240 -166c-116 0 -198 108 -198 226 c0 119 83 227 201 227c82 0 151 -49 202 -106c20 -22 27 -34 48 -60c36 60 117 166 240 166h13v-43c-88 0 -163 -64 -221 -165l50 -65c35 -45 93 -119 171 -122v-57"],8734:[442,11,1000,56,945,"462 197l-50 65c-36 46 -95 122 -175 122c-77 0 -139 -72 -139 -169c0 -88 48 -183 143 -183c88 0 163 64 221 165zM507 276c36 60 117 166 240 166c116 0 198 -108 198 -226c0 -119 -83 -227 -201 -227c-82 0 -151 49 -202 106c-20 22 -27 34 -48 60 c-36 -60 -117 -166 -240 -166c-116 0 -198 108 -198 226c0 119 83 227 201 227c82 0 151 -49 202 -106c20 -22 27 -34 48 -60zM539 234l50 -65c36 -46 95 -122 175 -122c77 0 139 72 139 169c0 88 -48 183 -143 183c-88 0 -163 -64 -221 -165"],8739:[698,97,213,86,126,"126 -89l-40 -8v787l40 8v-787"],8741:[738,167,392,86,306,"126 -159l-40 -8v897l40 8v-897zM306 -159l-40 -8v897l40 8v-897"],8743:[714,4,775,11,768,"404 713l364 -711l-42 -6l-328 641l-347 -636l-29 -2l-11 11l384 704"],8744:[688,12,775,6,756,"756 684l-374 -696h-15l-361 689l42 7l327 -625l338 629h39"],8745:[598,2,666,55,609,"55 384c0 128 129 214 277 214c149 0 277 -86 277 -214v-386h-40v386c0 107 -113 174 -237 174c-123 0 -237 -67 -237 -174v-386h-40v386"],8746:[578,22,666,55,609,"95 191c0 -106 114 -173 237 -173c124 0 237 67 237 173v387h40v-387c0 -128 -128 -213 -277 -213c-148 0 -277 85 -277 213v387h40v-387"],8747:[950,161,556,49,507,"319 401l-1 -201c0 -43 -1 -87 -3 -131c-2 -51 -11 -230 -149 -230c-58 0 -117 25 -117 87c0 53 45 54 49 54c24 0 56 -18 56 -54c0 -18 -6 -34 -22 -44c2 -2 8 -6 12 -7c6 -3 8 -3 15 -4c2 -1 3 -1 9 -1c76 0 78 103 78 214c0 38 -3 205 -3 328c0 81 1 162 2 243 c1 106 3 295 146 295c54 0 116 -23 116 -87c0 -52 -44 -54 -49 -54c-25 0 -56 18 -56 54c0 18 6 34 22 44c-1 1 -6 5 -12 7c-6 3 -8 3 -15 4c-1 1 -3 1 -8 1c-73 0 -73 -101 -73 -223c0 -48 3 -265 3 -295"],8756:[455,12,569,50,517,"228 398c0 31 26 57 58 57c31 0 56 -26 56 -57s-25 -57 -56 -57c-32 0 -58 26 -58 57zM460 -12c-31 0 -57 26 -57 58c0 30 26 56 57 56s57 -26 57 -56c0 -32 -26 -58 -57 -58zM107 -12c-31 0 -57 26 -57 58c0 30 26 56 57 56s57 -26 57 -56c0 -32 -26 -58 -57 -58"],8757:[455,12,569,50,517,"228 44c0 -30 26 -56 58 -56c31 0 56 26 56 56c0 32 -25 58 -56 58c-32 0 -58 -26 -58 -58zM460 455c-31 0 -57 -26 -57 -58c0 -31 26 -56 57 -56s57 25 57 56c0 32 -26 58 -57 58zM107 455c-31 0 -57 -26 -57 -58c0 -31 26 -56 57 -56s57 25 57 56c0 32 -26 58 -57 58"],8764:[347,-178,551,22,530,"281 291l74 -46c22 -14 47 -26 74 -26c19 0 32 7 44 17c14 10 26 32 36 50l21 -11c-11 -26 -28 -52 -47 -70c-15 -14 -41 -27 -65 -27c-38 0 -73 16 -105 36l-99 63c-24 15 -51 28 -80 28c-19 0 -35 -5 -50 -18c-12 -11 -28 -37 -40 -59l-22 12c12 24 27 49 42 66 c13 15 28 28 47 34c11 4 23 7 35 7c19 0 38 -6 56 -14c28 -11 54 -26 79 -42"],8765:[347,-178,551,22,530,"271 291l-74 -46c-22 -14 -47 -26 -74 -26c-19 0 -32 7 -44 17c-14 10 -26 32 -36 50l-21 -11c11 -26 28 -52 47 -70c15 -14 41 -27 65 -27c38 0 73 16 105 36l99 63c24 15 51 28 80 28c19 0 35 -5 50 -18c12 -11 28 -37 40 -59l22 12c-12 24 -27 49 -42 66 c-13 15 -28 28 -47 34c-11 4 -23 7 -35 7c-19 0 -38 -6 -56 -14c-28 -11 -54 -26 -79 -42"],8768:[422,77,243,54,189,"150 172l-36 -79c-11 -24 -19 -50 -16 -77c3 -19 11 -31 23 -42c12 -12 35 -21 54 -29l-8 -22c-27 8 -55 21 -76 37c-15 13 -32 38 -35 61c-5 38 6 75 22 109l50 107c11 25 21 54 17 83c-3 18 -10 34 -24 47c-13 10 -41 23 -64 32l9 23c25 -9 52 -20 71 -33 c16 -11 31 -24 40 -42c5 -10 10 -22 11 -34c3 -19 -1 -38 -6 -57c-8 -29 -19 -57 -32 -84"],8769:[454,-32,551,22,530,"372 449l5 -11l-70 -163l48 -30c22 -14 47 -26 74 -26c19 0 32 7 44 17c14 10 26 32 36 50l21 -11c-11 -26 -28 -52 -47 -70c-15 -14 -41 -27 -65 -27c-38 0 -73 16 -105 36l-26 16l-85 -198l-32 8l-3 14l86 198l-39 25c-24 15 -51 28 -80 28c-19 0 -35 -5 -50 -18 c-12 -11 -28 -37 -40 -59l-22 12c12 24 27 49 42 66c13 15 28 28 47 34c11 4 23 7 35 7c19 0 38 -6 56 -14c25 -10 48 -22 70 -36l67 155l11 2"],8770:[397,-103,597,54,563,"557 357h-503l7 40h502zM314 216l74 -46c22 -14 47 -26 74 -26c19 0 32 7 44 17c14 10 26 32 36 50l21 -11c-11 -26 -28 -52 -47 -70c-15 -14 -41 -27 -65 -27c-38 0 -73 16 -105 36l-99 63c-24 15 -51 28 -80 28c-19 0 -35 -5 -50 -18c-12 -11 -28 -37 -40 -59l-22 12 c12 24 27 49 42 66c13 15 28 28 47 34c11 4 23 7 35 7c19 0 38 -6 56 -14c28 -11 54 -26 79 -42"],8771:[396,-101,597,54,563,"557 101h-503l7 40h502zM314 340l74 -46c22 -14 47 -26 74 -26c19 0 32 7 44 17c14 10 26 32 36 50l21 -11c-11 -26 -28 -52 -47 -70c-15 -14 -41 -27 -65 -27c-38 0 -73 16 -105 36l-99 63c-24 15 -51 28 -80 28c-19 0 -35 -5 -50 -18c-12 -11 -28 -37 -40 -59l-22 12 c12 24 27 49 42 66c13 15 28 28 47 34c11 4 23 7 35 7c19 0 38 -6 56 -14c28 -11 54 -26 79 -42"],8773:[597,-102,597,54,563,"557 299h-503l7 40h502zM557 102h-503l7 40h502zM314 541l74 -46c22 -14 47 -26 74 -26c19 0 32 7 44 17c14 10 26 32 36 50l21 -11c-11 -26 -28 -52 -47 -70c-15 -14 -41 -27 -65 -27c-38 0 -73 16 -105 36l-99 63c-24 15 -51 28 -80 28c-19 0 -35 -5 -50 -18 c-12 -11 -28 -37 -40 -59l-22 12c12 24 27 49 42 66c13 15 28 28 47 34c11 4 23 7 35 7c19 0 38 -6 56 -14c28 -11 54 -26 79 -42"],8774:[597,8,597,54,563,"402 409l5 -11l-25 -59h181l-6 -40h-193l-67 -157h266l-6 -40h-278l-47 -110l-32 8l-3 14l38 88h-181l7 40h191l68 157h-266l7 40h276l32 73l11 2zM314 541l74 -46c22 -14 47 -26 74 -26c19 0 32 7 44 17c14 10 26 32 36 50l21 -11c-11 -26 -28 -52 -47 -70 c-15 -14 -41 -27 -65 -27c-38 0 -73 16 -105 36l-99 63c-24 15 -51 28 -80 28c-19 0 -35 -5 -50 -18c-12 -11 -28 -37 -40 -59l-22 12c12 24 27 49 42 66c13 15 28 28 47 34c11 4 23 7 35 7c19 0 38 -6 56 -14c28 -11 54 -26 79 -42"],8776:[427,-108,551,22,530,"281 371l74 -46c22 -14 47 -26 74 -26c19 0 32 7 44 17c14 10 26 32 36 50l21 -11c-11 -26 -28 -52 -47 -70c-15 -14 -41 -27 -65 -27c-38 0 -73 16 -105 36l-99 63c-24 15 -51 28 -80 28c-19 0 -35 -5 -50 -18c-12 -11 -28 -37 -40 -59l-22 12c12 24 27 49 42 66 c13 15 28 28 47 34c11 4 23 7 35 7c19 0 38 -6 56 -14c28 -11 54 -26 79 -42zM281 221l74 -46c22 -14 47 -26 74 -26c19 0 32 7 44 17c14 10 26 32 36 50l21 -11c-11 -26 -28 -52 -47 -70c-15 -14 -41 -27 -65 -27c-38 0 -73 16 -105 36l-99 63c-24 15 -51 28 -80 28 c-19 0 -35 -5 -50 -18c-12 -11 -28 -37 -40 -59l-22 12c12 24 27 49 42 66c13 15 28 28 47 34c11 4 23 7 35 7c19 0 38 -6 56 -14c28 -11 54 -26 79 -42"],8778:[546,-101,597,54,563,"557 101h-503l7 40h502zM313 490l74 -46c22 -14 47 -26 74 -26c19 0 32 7 44 17c14 10 26 32 36 50l21 -11c-11 -26 -28 -52 -47 -70c-15 -14 -41 -27 -65 -27c-38 0 -73 16 -105 36l-99 63c-24 15 -51 28 -80 28c-19 0 -35 -5 -50 -18c-12 -11 -28 -37 -40 -59l-22 12 c12 24 27 49 42 66c13 15 28 28 47 34c11 4 23 7 35 7c19 0 38 -6 56 -14c28 -11 54 -26 79 -42zM314 340l74 -46c22 -14 47 -26 74 -26c19 0 32 7 44 17c14 10 26 32 36 50l21 -11c-11 -26 -28 -52 -47 -70c-15 -14 -41 -27 -65 -27c-38 0 -73 16 -105 36l-99 63 c-24 15 -51 28 -80 28c-19 0 -35 -5 -50 -18c-12 -11 -28 -37 -40 -59l-22 12c12 24 27 49 42 66c13 15 28 28 47 34c11 4 23 7 35 7c19 0 38 -6 56 -14c28 -11 54 -26 79 -42"],8800:[720,192,756,54,722,"598 715l5 -11l-145 -335h264l-6 -40h-276l-68 -157h350l-6 -40h-361l-141 -324l-32 8l-3 14l131 302h-256l7 40h267l68 157h-342l7 40h352l152 349l11 2"],8801:[465,-33,830,81,749,"90 465h659l-9 -39h-659zM90 268h659l-9 -38h-659zM90 72h659l-9 -39h-659"],8804:[648,150,807,79,700,"680 -150l-572 260l-2 13l11 22l13 2l568 -261l2 -12l-10 -20zM700 622l-2 -9l-522 -241v-12l519 -241l5 -10l-11 -23l-10 -5l-600 280v12l601 275l11 -3"],8805:[647,149,807,102,724,"680 148l10 -4l10 -20l-2 -12l-568 -261l-13 2l-11 22l2 13zM104 610l-2 10l10 24l10 3l602 -275v-12l-600 -280l-10 5l-8 24l4 9l518 243v9"],8806:[800,0,756,54,722,"696 772l-2 -8l-538 -205v-12l535 -206l5 -9l-9 -24l-10 -6l-617 239v26l618 233l11 -4zM716 197h-662l7 40h661zM716 0h-662l7 40h661"],8807:[800,0,756,54,722,"60 772l7 24l11 4l618 -233v-26l-617 -239l-10 6l-9 24l5 9l535 206v12l-538 205zM716 197h-662l7 40h661zM716 0h-662l7 40h661"],8808:[800,93,756,54,722,"696 772l-2 -8l-538 -205v-12l535 -206l5 -9l-9 -24l-10 -6l-617 239v26l618 233l11 -4zM478 324l5 -11l-33 -76h272l-6 -40h-283l-68 -157h357l-6 -40h-368l-40 -93l-32 8l-3 14l31 71h-250l7 40h260l68 157h-335l7 40h345l39 90l11 2"],8809:[800,93,756,54,722,"60 772l7 24l11 4l618 -233v-26l-617 -239l-10 6l-9 24l5 9l535 206v12l-538 205zM478 324l5 -11l-33 -76h272l-6 -40h-283l-68 -157h357l-6 -40h-368l-40 -93l-32 8l-3 14l31 71h-250l7 40h260l68 157h-335l7 40h345l39 90l11 2"],8814:[720,192,756,59,680,"585 715l5 -11l-111 -256l181 83l11 -3l9 -23l-2 -9l-223 -103l-104 -240l324 -151l5 -10l-11 -23l-10 -5l-325 151l-133 -307l-32 8l-3 14l132 303l-239 111v12l365 167l128 295l11 2zM314 170l86 197l-244 -112v-12"],8815:[720,192,756,76,698,"585 715l5 -11l-144 -333l252 -115v-12l-386 -180l-111 -256l-32 8l-3 14l90 208l-158 -74l-10 5l-8 24l4 9l197 92l111 256l-314 144l-2 10l10 24l10 3l313 -143l143 330l11 2zM429 333l-92 -212l265 124v9"],8816:[720,192,807,79,700,"585 715l5 -11l-53 -121l143 65l11 -3l9 -23l-2 -9l-184 -85l-118 -270l299 -139l5 -10l-11 -23l-10 -5l-299 139l-68 -157l386 -177l2 -12l-10 -20l-10 -4l-385 175l-94 -217l-32 8l-3 14l92 212l-150 68l-2 13l11 22l13 2l145 -66l68 157l-264 123v12l403 185l70 160 l11 2zM359 275l99 227l-282 -130v-12"],8817:[720,192,807,102,724,"585 715l5 -11l-98 -226l232 -106v-12l-355 -166l-102 -234l413 188l10 -4l10 -20l-2 -12l-456 -210l-41 -94l-32 8l-3 14l20 47l-56 -26l-13 2l-11 22l2 13l104 47l101 233l-189 -88l-10 5l-8 24l4 9l228 107l100 232l-334 153l-2 10l10 24l10 3l333 -152l97 223l11 2z M475 440l-82 -189l235 110v9"],8818:[663,52,807,79,716,"716 635l-3 -8l-537 -205v-12l534 -206l6 -9l-10 -24l-9 -6l-618 239v26l619 233l11 -4zM390 61l89 -46c26 -14 56 -26 89 -26c23 0 38 7 53 17c16 10 31 32 43 50l25 -11c-13 -26 -34 -52 -56 -70c-18 -14 -50 -27 -78 -27c-46 0 -88 16 -126 36l-119 63 c-29 15 -61 28 -96 28c-23 0 -42 -5 -60 -18c-15 -11 -34 -37 -48 -59l-27 12c15 24 33 49 51 66c15 15 33 28 56 34c13 4 28 7 42 7c23 0 46 -6 67 -14c34 -11 65 -26 95 -42"],8819:[663,52,807,88,725,"88 635l7 24l11 4l619 -233v-26l-618 -239l-9 6l-10 24l6 9l534 206v12l-537 205zM424 61l89 -46c26 -14 56 -26 89 -26c23 0 38 7 53 17c16 10 31 32 43 50l25 -11c-13 -26 -34 -52 -56 -70c-18 -14 -50 -27 -78 -27c-46 0 -88 16 -126 36l-119 63c-29 15 -61 28 -96 28 c-23 0 -42 -5 -60 -18c-15 -11 -34 -37 -48 -59l-27 12c15 24 33 49 51 66c15 15 33 28 56 34c13 4 28 7 42 7c23 0 46 -6 67 -14c34 -11 65 -26 95 -42"],8822:[766,119,807,71,716,"716 738l-3 -8l-537 -205v-12l534 -206l6 -9l-10 -24l-9 -6l-618 239v26l619 233l11 -4zM71 351l7 24l11 4l619 -233v-26l-618 -239l-9 6l-10 24l6 9l534 206v12l-537 205"],8823:[764,120,807,72,716,"72 736l7 24l11 4l619 -233v-26l-618 -239l-9 6l-10 24l6 9l534 206v12l-537 205zM716 350l-3 -8l-537 -205v-12l534 -206l6 -9l-10 -24l-9 -6l-618 239v26l619 233l11 -4"],8834:[541,41,777,83,673,"388 501c-145 0 -265 -111 -265 -251s120 -251 265 -251h285v-40h-285c-165 0 -305 127 -305 291s140 291 305 291h285v-40h-285"],8835:[541,41,777,103,693,"388 541c165 0 305 -127 305 -291s-140 -291 -305 -291h-285v40h285c145 0 265 111 265 251s-120 251 -265 251h-285v40h285"],8838:[636,143,777,83,673,"388 596c-145 0 -265 -111 -265 -251c0 -141 120 -251 265 -251h285v-40h-285c-165 0 -305 127 -305 291s140 291 305 291h285v-40h-285zM123 -143v40h550v-40h-550"],8839:[636,143,777,103,693,"388 636c165 0 305 -127 305 -291s-140 -291 -305 -291h-285v40h285c145 0 265 110 265 251c0 140 -120 251 -265 251h-285v40h285zM652 -103v-40h-549v40h549"],8840:[720,192,777,83,673,"596 715l4 -11l-29 -68h102v-40h-119l-216 -498c16 -3 33 -4 50 -4h285v-40h-285c-23 0 -45 2 -67 7l-71 -164h423v-40h-440l-21 -49l-32 8l-4 14l12 27h-65v40h83l76 175c-115 41 -199 146 -199 273c0 164 140 291 305 291h139l35 82l12 2zM388 596 c-145 0 -265 -111 -265 -251c0 -110 74 -201 175 -236l211 487h-121"],8841:[720,192,777,103,693,"596 715l4 -11l-49 -113c84 -52 142 -141 142 -246c0 -164 -140 -291 -305 -291h-70l-68 -157h402v-40h-419l-21 -49l-32 8l-4 14l12 27h-85v40h103l68 157h-171v40h188l208 479c-34 15 -71 23 -111 23h-285v40h285c45 0 88 -10 127 -27l47 109l12 2zM535 553l-199 -459 h52c145 0 265 110 265 251c0 88 -47 164 -118 208"],8842:[636,222,777,83,673,"123 -143v40h251l33 76l11 2l22 -5l5 -11l-27 -62h255v-40h-272l-34 -79l-32 8l-3 14l25 57h-234zM388 596c-145 0 -265 -111 -265 -251c0 -141 120 -251 265 -251h285v-40h-285c-165 0 -305 127 -305 291s140 291 305 291h285v-40h-285"],8843:[636,222,777,103,693,"652 -103v-40h-251l-34 -79l-32 8l-3 14l25 57h-254v40h271l33 76l11 2l22 -5l5 -11l-27 -62h234zM388 636c165 0 305 -127 305 -291s-140 -291 -305 -291h-285v40h285c145 0 265 110 265 251c0 140 -120 251 -265 251h-285v40h285"],8846:[578,22,665,55,609,"312 482h40v-157h157v-40h-157v-156h-40v156h-156v40h156v157zM95 191c0 -106 114 -173 237 -173c124 0 237 67 237 173v387h40v-387c0 -128 -128 -213 -277 -213c-148 0 -277 85 -277 213v387h40v-387"],8849:[636,143,1000,94,693,"134 596v-502h559v-40h-599v582h599v-40h-559zM103 -143v40h590v-40h-590"],8850:[636,143,1000,83,681,"83 636h598v-582h-598v40h558v502h-558v40zM673 -103v-40h-590v40h590"],8853:[583,83,777,55,722,"722 250c0 -182 -148 -333 -334 -333c-183 0 -333 149 -333 333c0 182 148 333 334 333c183 0 333 -149 333 -333zM80 263h296v295c-156 -8 -286 -127 -296 -295zM401 558v-295h296c-10 167 -139 287 -296 295zM376 -58v296h-296c10 -171 141 -288 296 -296zM697 238 h-296v-296c154 8 286 125 296 296"],8854:[583,83,777,55,722,"722 250c0 -182 -148 -333 -334 -333c-183 0 -333 149 -333 333c0 182 148 333 334 333c183 0 333 -149 333 -333zM80 263h617c-10 176 -152 295 -309 295c-153 0 -298 -117 -308 -295zM697 238h-617c10 -179 154 -296 309 -296c151 0 298 114 308 296"],8855:[583,83,777,55,722,"722 250c0 -182 -148 -333 -334 -333c-183 0 -333 149 -333 333c0 182 148 333 334 333c183 0 333 -149 333 -333zM190 465l199 -198l208 209c-82 77 -174 82 -209 82c-126 0 -207 -78 -207 -82c0 -2 7 -9 9 -11zM161 41l209 209l-209 209c-52 -59 -81 -132 -81 -209 c0 -64 20 -141 81 -209zM615 459l-208 -209l209 -209c52 59 81 132 81 209c0 82 -34 158 -82 209zM587 35l-199 198l-208 -209c82 -77 174 -82 209 -82c126 0 207 78 207 82c0 2 -7 9 -9 11"],8856:[583,83,777,55,722,"722 250c0 -182 -148 -333 -334 -333c-183 0 -333 149 -333 333c0 182 148 333 334 333c183 0 333 -149 333 -333zM171 51l426 425c-82 77 -174 82 -209 82c-168 0 -308 -136 -308 -308c0 -127 79 -207 82 -207c1 0 1 1 9 8zM615 459l-435 -435c82 -77 174 -82 209 -82 c168 0 308 136 308 308c0 82 -34 158 -82 209"],8857:[583,83,777,55,722,"722 250c0 -182 -148 -333 -334 -333c-183 0 -333 149 -333 333c0 182 148 333 334 333c183 0 333 -149 333 -333zM389 -58c168 0 308 136 308 308c0 169 -137 308 -309 308c-168 0 -308 -136 -308 -308c0 -169 137 -308 309 -308zM457 250c0 -36 -29 -69 -69 -69 c-37 0 -68 31 -68 69c0 36 29 69 69 69c37 0 68 -31 68 -69"],8866:[694,0,673,55,618,"55 694h40v-327h523v-40h-523v-327h-40v694"],8867:[694,0,673,55,618,"618 694h-40v-327h-523v-40h523v-327h40v694"],8868:[684,0,875,55,820,"55 644v40l765 -1v-40h-362v-643h-40v644h-363"],8869:[684,0,875,55,820,"55 40v-40l765 1v40h-362v643h-40v-644h-363"],8900:[496,-4,500,3,495,"495 250l-246 -246l-246 246l246 246zM438 250l-189 189l-189 -189l189 -189"],8901:[379,-245,277,72,206,"72 312c0 37 30 67 66 67c38 0 68 -30 68 -67s-30 -67 -68 -67c-36 0 -66 30 -66 67"],8943:[319,-185,768,77,691,"77 252c0 37 30 67 67 67s67 -30 67 -67s-30 -67 -67 -67s-67 30 -67 67zM317 252c0 37 30 67 67 67s67 -30 67 -67s-30 -67 -67 -67s-67 30 -67 67zM557 252c0 37 30 67 67 67s67 -30 67 -67s-30 -67 -67 -67s-67 30 -67 67"],8945:[533,-60,627,76,550,"191 419c-26 -26 -68 -26 -94 0c-27 26 -27 69 0 95c26 26 68 26 94 0c27 -26 27 -69 0 -95zM361 250c-26 -27 -69 -27 -95 0c-26 26 -26 68 0 94c26 27 69 27 95 0c26 -26 26 -68 0 -94zM531 80c-26 -26 -69 -26 -95 0s-26 69 0 95s69 26 95 0s26 -69 0 -95"],8968:[980,0,511,174,401,"214 940v-940h-40v980h227v-40h-187"],8969:[980,0,511,41,269,"41 980h228v-980h-40v940h-188v40"],8970:[980,0,511,174,401,"214 40h187v-40h-227v980h40v-940"],8971:[980,0,511,41,269,"269 980v-980h-228v40h188v940h40"],9180:[770,-582,1037,56,981,"80 582h-24c4 48 42 90 79 118c43 30 95 42 146 52c80 14 161 18 242 18c77 0 155 -6 230 -20c54 -12 108 -28 154 -58c35 -26 67 -70 74 -110h-27c-8 32 -38 64 -72 80c-47 22 -98 34 -150 42c-71 10 -143 14 -215 14c-86 0 -172 -4 -256 -18c-29 -6 -58 -12 -86 -22 c-20 -6 -39 -16 -55 -30c-20 -16 -36 -46 -40 -66"],9181:[-32,222,1037,56,981,"954 -32h27c-7 -34 -28 -66 -54 -90c-44 -40 -101 -60 -158 -74c-83 -20 -167 -26 -252 -26c-84 0 -169 4 -252 20c-53 12 -106 26 -148 62c-33 26 -57 78 -61 108h24c7 -26 22 -58 47 -78c15 -12 33 -20 51 -26c27 -8 52 -16 79 -20c87 -12 176 -14 264 -14 c79 0 158 6 235 20c53 10 106 24 151 54c22 16 38 40 47 64"],9182:[824,-528,1020,56,964,"80 528h-24c0 40 0 92 6 116c4 16 12 28 23 38c21 20 50 24 77 26c24 4 70 4 106 4c69 0 174 0 202 24c9 8 15 20 18 34c5 16 5 36 5 54h26c0 -40 1 -64 20 -86c14 -16 37 -18 58 -22c35 -4 100 -4 151 -4h106c28 -4 56 -8 78 -26c11 -8 20 -20 25 -34 c7 -22 7 -82 7 -124h-24c0 30 0 60 -8 88c-3 12 -9 18 -20 24c-12 8 -27 12 -42 16c-39 6 -78 6 -118 6c-41 0 -82 0 -123 6c-23 4 -50 8 -70 20c-26 16 -45 42 -51 58h-1c-7 -20 -18 -38 -38 -52c-23 -16 -49 -24 -76 -26c-43 -6 -86 -6 -129 -6c-40 0 -80 -2 -119 -8 c-14 -4 -27 -8 -39 -16c-11 -6 -17 -18 -20 -30c-6 -24 -6 -52 -6 -80"],9183:[26,268,1020,56,964,"56 26h24c0 -26 1 -56 6 -80c3 -12 9 -22 20 -30c12 -8 25 -12 39 -14c39 -6 79 -8 119 -8c43 0 86 -2 129 -6c27 -4 53 -10 76 -28c20 -14 31 -32 38 -52h1c6 18 25 42 51 60c20 12 47 16 70 18c41 6 82 8 123 8c40 0 79 0 118 6c15 2 30 6 42 14c11 6 17 14 20 26 c7 28 8 58 8 86h24c0 -40 0 -102 -7 -122c-5 -14 -14 -26 -25 -36c-22 -16 -50 -22 -78 -24c-10 -2 -71 -2 -106 -2c-51 0 -116 0 -151 -4c-21 -2 -44 -6 -58 -22c-19 -22 -20 -46 -20 -84h-26c0 18 0 36 -5 54c-3 12 -9 24 -18 32c-28 26 -133 26 -202 26 c-36 0 -82 0 -106 2c-27 4 -56 8 -77 26c-11 10 -19 24 -23 38c-6 24 -6 78 -6 116"],10216:[737,237,388,107,330,"150 250l180 -473l-37 -14l-186 487l186 487l37 -14"],10217:[737,237,388,57,280,"94 737l186 -487l-186 -487l-37 14l180 473l-180 473"],10229:[500,0,1610,55,1553,"1553 270h-1428c38 47 90 126 115 230h-40c-31 -130 -113 -220 -145 -250c30 -28 114 -119 145 -250h40c-25 105 -78 184 -115 230h1428v40"],10230:[500,0,1610,55,1553,"55 270h1428c-38 47 -90 126 -115 230h40c31 -130 113 -220 145 -250c-30 -28 -114 -119 -145 -250h-40c25 105 78 184 115 230h-1428v40"],10231:[500,0,1700,57,1644,"1574 230h-1447c38 -47 90 -126 115 -230h-40c-31 130 -113 220 -145 250c30 28 114 119 145 250h40c-25 -105 -78 -184 -115 -230h1447c-38 47 -90 126 -115 230h40c31 -130 113 -220 145 -250c-30 -28 -114 -119 -145 -250h-40c25 105 78 184 115 230"],10232:[598,98,1700,55,1622,"55 250c145 72 257 196 312 348h40c-31 -86 -78 -163 -138 -230h1353v-40h-1392c-31 -29 -64 -55 -99 -78c35 -23 68 -49 99 -78h1392v-40h-1353c60 -67 107 -144 138 -230h-40c-55 152 -167 276 -312 348"],10233:[598,98,1700,55,1622,"1622 250c-145 72 -257 196 -312 348h-40c31 -86 78 -163 138 -230h-1353v-40h1392c31 -29 64 -55 99 -78c-35 -23 -68 -49 -99 -78h-1392v-40h1353c-60 -67 -107 -144 -138 -230h40c55 152 167 276 312 348"],10234:[598,98,1700,33,1665,"1665 250c-123 -83 -214 -206 -257 -348h-41c26 83 66 161 118 230h-1272c52 -69 92 -147 118 -230h-41c-43 142 -134 265 -257 348c123 83 214 206 257 348h41c-26 -83 -66 -161 -118 -230h1272c-52 69 -92 147 -118 230h41c43 -142 134 -265 257 -348zM1518 172 c25 28 52 54 81 78c-29 24 -56 50 -81 78h-1338c-25 -28 -52 -54 -81 -78c29 -24 56 -50 81 -78h1338"],10236:[500,0,1690,56,1634,"56 50v400h40v-180h1468c-38 47 -90 126 -115 230h40c31 -130 113 -220 145 -250c-30 -28 -114 -119 -145 -250h-40c25 105 78 184 115 230h-1468v-180h-40"]};MathJax.Ajax.loadComplete(MathJax.OutputJax.SVG.fontDir+"/Main/Regular/Main.js"); | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/image/LightboxNano.js.uncompressed.js | define("dojox/image/LightboxNano", ["dojo", "dojo/fx"], function(dojo, fx) {
var abs = "absolute",
vis = "visibility",
getViewport = function(){
// summary: Returns the dimensions and scroll position of the viewable area of a browser window
var scrollRoot = (dojo.doc.compatMode == "BackCompat") ? dojo.body() : dojo.doc.documentElement,
scroll = dojo._docScroll();
return { w: scrollRoot.clientWidth, h: scrollRoot.clientHeight, l: scroll.x, t: scroll.y };
}
;
return dojo.declare("dojox.image.LightboxNano", null, {
// summary:
// A simple "nano" version of the lightbox.
//
// description:
// Very lightweight lightbox which only displays a larger image. There is
// no support for a caption or description. The lightbox can be closed by
// clicking any where or pressing any key. This widget is intended to be
// used on <a> and <img> tags. Upon creation, if the domNode is <img> tag,
// then it is wrapped in an <a> tag, then a <div class="enlarge"> is placed
// inside the <a> and can be styled to display an icon that the original
// can be enlarged.
//
// example:
// | <a dojoType="dojox.image.LightboxNano" href="/path/to/largeimage.jpg"><img src="/path/to/thumbnail.jpg"></a>
//
// example:
// | <img dojoType="dojox.image.LightboxNano" src="/path/to/thumbnail.jpg" href="/path/to/largeimage.jpg">
// href: string
// URL to the large image to show in the lightbox.
href: "",
// duration: int
// The delay in milliseconds of the LightboxNano open and close animation.
duration: 500,
// preloadDelay: int
// The delay in milliseconds after the LightboxNano is created before preloading the larger image.
preloadDelay: 5000,
constructor: function(/*Object?*/p, /*DomNode?*/n){
// summary: Initializes the DOM node and connect onload event
var _this = this;
dojo.mixin(_this, p);
n = _this._node = dojo.byId(n);
// if we have a origin node, then prepare it to show the LightboxNano
if(n){
if(!/a/i.test(n.tagName)){
var a = dojo.create("a", { href: _this.href, "class": n.className }, n, "after");
n.className = "";
a.appendChild(n);
n = a;
}
dojo.style(n, "position", "relative");
_this._createDiv("dojoxEnlarge", n);
dojo.setSelectable(n, false);
_this._onClickEvt = dojo.connect(n, "onclick", _this, "_load");
}
if(_this.href){
setTimeout(function(){
(new Image()).src = _this.href;
_this._hideLoading();
}, _this.preloadDelay);
}
},
destroy: function(){
// summary: Destroys the LightboxNano and it's DOM node
var a = this._connects || [];
a.push(this._onClickEvt);
dojo.forEach(a, dojo.disconnect);
dojo.destroy(this._node);
},
_createDiv: function(/*String*/cssClass, /*DomNode*/refNode, /*boolean*/display){
// summary: Creates a div for the enlarge icon and loading indicator layers
return dojo.create("div", { // DomNode
"class": cssClass,
style: {
position: abs,
display: display ? "" : "none"
}
}, refNode);
},
_load: function(/*Event*/e){
// summary: Creates the large image and begins to show it
var _this = this;
e && dojo.stopEvent(e);
if(!_this._loading){
_this._loading = true;
_this._reset();
var i = _this._img = dojo.create("img", {
style: {
visibility: "hidden",
cursor: "pointer",
position: abs,
top: 0,
left: 0,
zIndex: 9999999
}
}, dojo.body()),
ln = _this._loadingNode,
n = dojo.query("img", _this._node)[0] || _this._node,
a = dojo.position(n, true),
c = dojo.contentBox(n),
b = dojo._getBorderExtents(n)
;
if(ln == null){
_this._loadingNode = ln = _this._createDiv("dojoxLoading", _this._node, true);
var l = dojo.marginBox(ln);
dojo.style(ln, {
left: parseInt((c.w - l.w) / 2) + "px",
top: parseInt((c.h - l.h) / 2) + "px"
});
}
c.x = a.x - 10 + b.l;
c.y = a.y - 10 + b.t;
_this._start = c;
_this._connects = [dojo.connect(i, "onload", _this, "_show")];
i.src = _this.href;
}
},
_hideLoading: function(){
// summary: Hides the animated loading indicator
if(this._loadingNode){
dojo.style(this._loadingNode, "display", "none");
}
this._loadingNode = false;
},
_show: function(){
// summary: The image is now loaded, calculate size and display
var _this = this,
vp = getViewport(),
w = _this._img.width,
h = _this._img.height,
vpw = parseInt((vp.w - 20) * 0.9),
vph = parseInt((vp.h - 20) * 0.9),
dd = dojo.doc,
bg = _this._bg = dojo.create("div", {
style: {
backgroundColor: "#000",
opacity: 0.0,
position: abs,
zIndex: 9999998
}
}, dojo.body()),
ln = _this._loadingNode
;
if(_this._loadingNode){
_this._hideLoading();
}
dojo.style(_this._img, {
border: "10px solid #fff",
visibility: "visible"
});
dojo.style(_this._node, vis, "hidden");
_this._loading = false;
_this._connects = _this._connects.concat([
dojo.connect(dd, "onmousedown", _this, "_hide"),
dojo.connect(dd, "onkeypress", _this, "_key"),
dojo.connect(window, "onresize", _this, "_sizeBg")
]);
if(w > vpw){
h = h * vpw / w;
w = vpw;
}
if(h > vph){
w = w * vph / h;
h = vph;
}
_this._end = {
x: (vp.w - 20 - w) / 2 + vp.l,
y: (vp.h - 20 - h) / 2 + vp.t,
w: w,
h: h
};
_this._sizeBg();
dojo.fx.combine([
_this._anim(_this._img, _this._coords(_this._start, _this._end)),
_this._anim(bg, { opacity: 0.5 })
]).play();
},
_sizeBg: function(){
// summary: Resize the background to fill the page
var dd = dojo.doc.documentElement;
dojo.style(this._bg, {
top: 0,
left: 0,
width: dd.scrollWidth + "px",
height: dd.scrollHeight + "px"
});
},
_key: function(/*Event*/e){
// summary: A key was pressed, so hide the lightbox
dojo.stopEvent(e);
this._hide();
},
_coords: function(/*Object*/s, /*Object*/e){
// summary: Returns animation parameters with the start and end coords
return { // Object
left: { start: s.x, end: e.x },
top: { start: s.y, end: e.y },
width: { start: s.w, end: e.w },
height: { start: s.h, end: e.h }
};
},
_hide: function(){
// summary: Closes the lightbox
var _this = this;
dojo.forEach(_this._connects, dojo.disconnect);
_this._connects = [];
dojo.fx.combine([
_this._anim(_this._img, _this._coords(_this._end, _this._start), "_reset"),
_this._anim(_this._bg, {opacity:0})
]).play();
},
_reset: function(){
// summary: Destroys the lightbox
dojo.style(this._node, vis, "visible");
dojo.destroy(this._img);
dojo.destroy(this._bg);
this._img = this._bg = null;
this._node.focus();
},
_anim: function(/*DomNode*/node, /*Object*/args, /*Function*/onEnd){
// summary: Creates the lightbox open/close and background fadein/out animations
return dojo.animateProperty({ // dojo.Animation
node: node,
duration: this.duration,
properties: args,
onEnd: onEnd ? dojo.hitch(this, onEnd) : null
});
},
show: function(/*Object?*/args){
// summary:
// Shows this LightboxNano programatically. Allows passing a new href and
// a programatic origin.
//
// args: Object?
// An object with optional members of `href` and `origin`.
// `origin` can be be a String|Id of a DomNode to use when
// animating the openeing of the image (the 'box' effect starts
// from this origin point. eg: { origin: e.target })
// If there's no origin, it will use the center of the viewport.
// The `href` member is a string URL for the image to be
// displayed. Omiting either of these members will revert to
// the default href (which could be absent in some cases) and
// the original srcNodeRef for the widget.
args = args || {};
this.href = args.href || this.href;
var n = dojo.byId(args.origin),
vp = getViewport();
// if we don't have a valid origin node, then create one as a reference
// that is centered in the viewport
this._node = n || dojo.create("div", {
style: {
position: abs,
width: 0,
hieght: 0,
left: (vp.l + (vp.w / 2)) + "px",
top: (vp.t + (vp.h / 2)) + "px"
}
}, dojo.body())
;
this._load();
// if we don't have a valid origin node, then destroy the centered reference
// node since load() has already been called and it's not needed anymore.
if(!n){
dojo.destroy(this._node);
}
}
});
}); | PypiClean |
/ARC_Alkali_Rydberg_Calculator-3.3.0-cp311-cp311-win_amd64.whl/arc/calculations_atom_pairstate.py | from __future__ import division, print_function, absolute_import
from arc._database import sqlite3
from arc.wigner import Wigner6j, CG, WignerDmatrix
from arc.alkali_atom_functions import (
_atomLightAtomCoupling,
singleAtomState,
compositeState,
)
from scipy.constants import physical_constants, pi
import gzip
import sys
import os
import datetime
import matplotlib
from matplotlib.colors import LinearSegmentedColormap
from arc.calculations_atom_single import StarkMap
from arc.alkali_atom_functions import (
printStateStringLatex,
printStateString,
printStateLetter,
)
from arc.divalent_atom_functions import DivalentAtom
from scipy.special import factorial
from scipy.sparse.linalg import eigsh
from scipy.sparse import csr_matrix
from scipy.optimize import curve_fit
from scipy.constants import e as C_e
from scipy.constants import h as C_h
from scipy.constants import c as C_c
import numpy as np
from math import exp, sqrt
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams["xtick.minor.visible"] = True
mpl.rcParams["ytick.minor.visible"] = True
mpl.rcParams["xtick.major.size"] = 8
mpl.rcParams["ytick.major.size"] = 8
mpl.rcParams["xtick.minor.size"] = 4
mpl.rcParams["ytick.minor.size"] = 4
mpl.rcParams["xtick.direction"] = "in"
mpl.rcParams["ytick.direction"] = "in"
mpl.rcParams["xtick.top"] = True
mpl.rcParams["ytick.right"] = True
mpl.rcParams["font.family"] = "serif"
# for matrices
if sys.version_info > (2,):
xrange = range
DPATH = os.path.join(os.path.expanduser("~"), ".arc-data")
__all__ = ["PairStateInteractions", "StarkMapResonances"]
class PairStateInteractions:
"""
Calculates Rydberg level diagram (spaghetti) for the given pair state
Initializes Rydberg level spaghetti calculation for the given atom
species (or for two atoms of different species) in the vicinity
of the given pair state. For details of calculation see
Ref. [1]_. For a quick start point example see
`interactions example snippet`_.
For inter-species calculations see
`inter-species interaction calculation snippet`_.
.. _`interactions example snippet`:
./Rydberg_atoms_a_primer.html#Short-range-interactions
.. _`inter-species interaction calculation snippet`:
./ARC_3_0_introduction.html#Inter-species-pair-state-calculations
Parameters:
atom (:obj:`arc.alkali_atom_functions.AlkaliAtom` or :obj:`arc.divalent_atom_functions.DivalentAtom`):
= {
:obj:`arc.alkali_atom_data.Lithium6`,
:obj:`arc.alkali_atom_data.Lithium7`,
:obj:`arc.alkali_atom_data.Sodium`,
:obj:`arc.alkali_atom_data.Potassium39`,
:obj:`arc.alkali_atom_data.Potassium40`,
:obj:`arc.alkali_atom_data.Potassium41`,
:obj:`arc.alkali_atom_data.Rubidium85`,
:obj:`arc.alkali_atom_data.Rubidium87`,
:obj:`arc.alkali_atom_data.Caesium`,
:obj:`arc.divalent_atom_data.Strontium88`,
:obj:`arc.divalent_atom_data.Calcium40`
:obj:`arc.divalent_atom_data.Ytterbium174` }
Select the alkali metal for energy level
diagram calculation
n (int):
principal quantum number for the *first* atom
l (int):
orbital angular momentum for the *first* atom
j (float):
total angular momentum for the *first* atom
nn (int):
principal quantum number for the *second* atom
ll (int):
orbital angular momentum for the *second* atom
jj (float):
total angular momentum for the *second* atom
m1 (float):
projection of the total angular momentum on z-axis
for the *first* atom
m2 (float):
projection of the total angular momentum on z-axis
for the *second* atom
interactionsUpTo (int):
Optional. If set to 1, includes only
dipole-dipole interactions. If set to 2 includes interactions
up to quadrupole-quadrupole. Default value is 1.
s (float):
optional, spin state of the first atom. Default value
of 0.5 is correct for :obj:`arc.alkali_atom_functions.AlkaliAtom`
but for :obj:`arc.divalent_atom_functions.DivalentAtom`
it has to be explicitly set to 0 or 1 for
singlet and triplet states respectively.
**If `s2` is not specified, it is assumed that the second
atom is in the same spin state.**
s2 (float):
optinal, spin state of the second atom. If not
specified (left to default value None) it will assume spin
state of the first atom.
atom2 (:obj:`arc.alkali_atom_functions.AlkaliAtom` or :obj:`arc.divalent_atom_functions.DivalentAtom`):
optional,
specifies atomic species for the second atom, enabeling
calculation of **inter-species pair-state interactions**.
If not specified (left to default value None) it will assume
spin state of the first atom.
References:
.. [1] T. G Walker, M. Saffman, PRA **77**, 032723 (2008)
https://doi.org/10.1103/PhysRevA.77.032723
Examples:
**Advanced interfacing of pair-state is2=None, atom2=Nonenteractions calculations
(PairStateInteractions class).** This
is an advanced example intended for building up extensions to the
existing code. If you want to directly access the pair-state
interaction matrix, constructed by :obj:`defineBasis`,
you can assemble it easily from diagonal part
(stored in :obj:`matDiagonal` ) and off-diagonal matrices whose
spatial dependence is :math:`R^{-3},R^{-4},R^{-5}` stored in that
order in :obj:`matR`. Basis states are stored in :obj:`basisStates`
array.
>>> from arc import *
>>> calc = PairStateInteractions(Rubidium(), 60,0,0.5, \
60,0,0.5, 0.5,0.5,interactionsUpTo = 1)
>>> # theta=0, phi = 0, range of pqn, range of l, deltaE = 25e9
>>> calc.defineBasis(0 ,0 , 5, 5, 25e9, progressOutput=True)
>>> # now calc stores interaction matrix and relevant basis
>>> # we can access this directly and generate interaction matrix
>>> # at distance rval :
>>> rval = 4 # in mum
>>> matrix = calc.matDiagonal
>>> rX = (rval*1.e-6)**3
>>> for matRX in self.matR:
>>> matrix = matrix + matRX/rX
>>> rX *= (rval*1.e-6)
>>> # matrix variable now holds full interaction matrix for
>>> # interacting atoms at distance rval calculated in
>>> # pair-state basis states can be accessed as
>>> basisStates = calc.basisStates
"""
dataFolder = DPATH
# =============================== Methods ===============================
def __init__(
self,
atom,
n,
l,
j,
nn,
ll,
jj,
m1,
m2,
interactionsUpTo=1,
s=0.5,
s2=None,
atom2=None,
):
# alkali atom type, principal quantum number, orbital angular momentum,
# total angular momentum projections of the angular momentum on z axis
self.atom1 = atom #: the first atom type (isotope)
if atom2 is None:
self.atom2 = atom #: the second atom type (isotope)
else:
self.atom2 = atom2 #: thge second atom type (isotope)
self.n = n # : pair-state definition: principal quantum number of the first atom
self.l = l # : pair-state definition: orbital angular momentum of the first atom
self.j = j # : pair-state definition: total angular momentum of the first atom
self.nn = nn # : pair-state definition: principal quantum number of the second atom
self.ll = ll # : pair-state definition: orbital angular momentum of the second atom
self.jj = jj # : pair-state definition: total angular momentum of the second atom
self.m1 = m1 # : pair-state definition: projection of the total ang. momentum for the *first* atom
self.m2 = m2 # : pair-state definition: projection of the total angular momentum for the *second* atom
self.interactionsUpTo = interactionsUpTo
""" Specifies up to which approximation we include in pair-state interactions.
By default value is 1, corresponding to pair-state interactions up to
dipole-dipole coupling. Value of 2 is also supported, corresponding
to pair-state interactions up to quadrupole-quadrupole coupling.
"""
if issubclass(type(atom), DivalentAtom) and not (s == 0 or s == 1):
raise ValueError(
"total angular spin s has to be defined explicitly "
"for calculations, and value has to be 0 or 1 "
"for singlet and tripplet states respectively."
)
self.s1 = s #: total spin angular momentum, optional (default 0.5)
if s2 is None:
self.s2 = s
else:
self.s2 = s2
# check that values of spin states are valid for entered atomic species
if issubclass(type(self.atom1), DivalentAtom):
if abs(self.s1) > 0.1 and abs(self.s1 - 1) > 0.1:
raise ValueError(
"atom1 is DivalentAtom and its spin has to be "
"s=0 or s=1 (for singlet and triplet states "
"respectively)"
)
elif abs(self.s1 - 0.5) > 0.1:
raise ValueError(
"atom1 is AlkaliAtom and its spin has to be " "s=0.5"
)
if issubclass(type(self.atom2), DivalentAtom):
if abs(self.s2) > 0.1 and abs(self.s2 - 1) > 0.1:
raise ValueError(
"atom2 is DivalentAtom and its spin has to be "
"s=0 or s=1 (for singlet and triplet states "
"respectively)"
)
elif abs(self.s2 - 0.5) > 0.1:
# we have divalent atom
raise ValueError(
"atom2 is AlkaliAtom and its spin has to be " "s=0.5"
)
if abs((self.s1 - self.m1) % 1) > 0.1:
raise ValueError(
"atom1 with spin s = %.1d cannot have m1 = %.1d"
% (self.s1, self.m1)
)
if abs((self.s2 - self.m2) % 1) > 0.1:
raise ValueError(
"atom2 with spin s = %.1d cannot have m2 = %.1d"
% (self.s2, self.m2)
)
# ====================== J basis (not resolving mj) ===================
self.coupling = []
"""
List of matrices defineing coupling strengths between the states in
J basis (not resolving :math:`m_j` ). Basis is given by
:obj:`PairStateInteractions.channel`. Used as intermediary for full
interaction matrix calculation by
:obj:`PairStateInteractions.defineBasis`.
"""
self.channel = []
"""
states relevant for calculation, defined in J basis (not resolving
:math:`m_j`. Used as intermediary for full interaction matrix
calculation by :obj:`PairStateInteractions.defineBasis`.
"""
# ======================= Full basis (resolving mj) ===================
self.basisStates = []
"""
List of pair-states for calculation. In the form
[[n1,l1,j1,mj1,n2,l2,j2,mj2], ...].
Each state is an array [n1,l1,j1,mj1,n2,l2,j2,mj2] corresponding to
:math:`|n_1,l_1,j_1,m_{j1},n_2,l_2,j_2,m_{j2}\\rangle` state.
Calculated by :obj:`PairStateInteractions.defineBasis`.
"""
self.matrixElement = []
"""
`matrixElement[i]` gives index of state in
:obj:`PairStateInteractions.channel` basis
(that doesn't resolve :math:`m_j` states), for the given index `i`
of the state in :obj:`PairStateInteractions.basisStates`
( :math:`m_j` resolving) basis.
"""
# variuos parts of interaction matrix in pair-state basis
self.matDiagonal = []
"""
Part of interaction matrix in pair-state basis that doesn't depend
on inter-atomic distance. E.g. diagonal elements of the interaction
matrix, that describe energies of the pair states in unperturbed
basis, will be stored here. Basis states are stored in
:obj:`PairStateInteractions.basisStates`. Calculated by
:obj:`PairStateInteractions.defineBasis`.
"""
self.matR = []
"""
Stores interaction matrices in pair-state basis
that scale as :math:`1/R^3`, :math:`1/R^4` and :math:`1/R^5`
with distance in :obj:`matR[0]`, :obj:`matR[1]` and :obj:`matR[2]`
respectively. These matrices correspond to dipole-dipole
( :math:`C_3`), dipole-quadrupole ( :math:`C_4`) and
quadrupole-quadrupole ( :math:`C_5`) interactions
coefficients. Basis states are stored in
:obj:`PairStateInteractions.basisStates`.
Calculated by :obj:`PairStateInteractions.defineBasis`.
"""
self.originalPairStateIndex = 0
"""
index of the original n,l,j,m1,nn,ll,jj,m2 pair-state in the
:obj:`PairStateInteractions.basisStates` basis.
"""
self.matE = []
self.matB_1 = []
self.matB_2 = []
# ===================== Eigen states and plotting =====================
# finding perturbed energy levels
self.r = [] # detuning scale
self.y = [] # energy levels
self.highlight = []
# pointers towards figure
self.fig = 0
self.ax = 0
# for normalization of the maximum coupling later
self.maxCoupling = 0.0
# n,l,j,mj, drive polarization q
self.drivingFromState = [0, 0, 0, 0, 0]
# sam = saved angular matrix metadata
self.angularMatrixFile = "angularMatrix.npy"
self.angularMatrixFile_meta = "angularMatrix_meta.npy"
# self.sam = []
self.savedAngularMatrix_matrix = []
# intialize precalculated values for factorial term
# in __getAngularMatrix_M
def fcoef(l1, l2, m):
return (
factorial(l1 + l2)
/ (
factorial(l1 + m)
* factorial(l1 - m)
* factorial(l2 + m)
* factorial(l2 - m)
)
** 0.5
)
x = self.interactionsUpTo
self.fcp = np.zeros((x + 1, x + 1, 2 * x + 1))
for c1 in range(1, x + 1):
for c2 in range(1, x + 1):
for p in range(-min(c1, c2), min(c1, c2) + 1):
self.fcp[c1, c2, p + x] = fcoef(c1, c2, p)
self.conn = False
def __getAngularMatrix_M(self, l, j, ll, jj, l1, j1, l2, j2):
# did we already calculated this matrix?
c = self.conn.cursor()
c.execute(
"""SELECT ind FROM pair_angularMatrix WHERE
l1 = ? AND j1_x2 = ? AND
l2 = ? AND j2_x2 = ? AND
l3 = ? AND j3_x2 = ? AND
l4 = ? AND j4_x2 = ?
""",
(l, j * 2, ll, jj * 2, l1, j1 * 2, l2, j2 * 2),
)
index = c.fetchone()
if index:
return self.savedAngularMatrix_matrix[index[0]]
# determine coupling
dl = abs(l - l1)
dj = abs(j - j1)
c1 = 0
if dl == 1 and (dj < 1.1):
c1 = 1 # dipole coupling
elif dl == 0 or dl == 2 or dl == 1:
c1 = 2 # quadrupole coupling
else:
raise ValueError("error in __getAngularMatrix_M")
dl = abs(ll - l2)
dj = abs(jj - j2)
c2 = 0
if dl == 1 and (dj < 1.1):
c2 = 1 # dipole coupling
elif dl == 0 or dl == 2 or dl == 1:
c2 = 2 # quadrupole coupling
else:
raise ValueError("error in __getAngularMatrix_M")
am = np.zeros(
(
round((2 * j1 + 1) * (2 * j2 + 1)),
round((2 * j + 1) * (2 * jj + 1)),
),
dtype=np.float64,
)
if (c1 > self.interactionsUpTo) or (c2 > self.interactionsUpTo):
return am
j1range = np.linspace(-j1, j1, round(2 * j1) + 1)
j2range = np.linspace(-j2, j2, round(2 * j2) + 1)
jrange = np.linspace(-j, j, round(2 * j) + 1)
jjrange = np.linspace(-jj, jj, round(2 * jj) + 1)
for m1 in j1range:
for m2 in j2range:
# we have chosen the first index
index1 = round(
m1 * (2.0 * j2 + 1.0) + m2 + (j1 * (2.0 * j2 + 1.0) + j2)
)
for m in jrange:
for mm in jjrange:
# we have chosen the second index
index2 = round(
m * (2.0 * jj + 1.0)
+ mm
+ (j * (2.0 * jj + 1.0) + jj)
)
# angular matrix element from Sa??mannshausen, Heiner,
# Merkt, Fr??d??ric, Deiglmayr, Johannes
# PRA 92: 032505 (2015)
elem = (
(-1.0) ** (j + jj + self.s1 + self.s2 + l1 + l2)
* CG(l, 0, c1, 0, l1, 0)
* CG(ll, 0, c2, 0, l2, 0)
)
elem = (
elem
* sqrt((2.0 * l + 1.0) * (2.0 * ll + 1.0))
* sqrt((2.0 * j + 1.0) * (2.0 * jj + 1.0))
)
elem = (
elem
* Wigner6j(l, self.s1, j, j1, c1, l1)
* Wigner6j(ll, self.s2, jj, j2, c2, l2)
)
sumPol = 0.0 # sum over polarisations
limit = min(c1, c2)
for p in xrange(-limit, limit + 1):
sumPol = sumPol + self.fcp[
c1, c2, p + self.interactionsUpTo
] * CG(j, m, c1, p, j1, m1) * CG(
jj, mm, c2, -p, j2, m2
)
am[index1, index2] = elem * sumPol
index = len(self.savedAngularMatrix_matrix)
c.execute(
""" INSERT INTO pair_angularMatrix
VALUES (?,?, ?,?, ?,?, ?,?, ?)""",
(l, j * 2, ll, jj * 2, l1, j1 * 2, l2, j2 * 2, index),
)
self.conn.commit()
self.savedAngularMatrix_matrix.append(am)
self.savedAngularMatrixChanged = True
return am
def __updateAngularMatrixElementsFile(self):
if not (self.savedAngularMatrixChanged):
return
try:
c = self.conn.cursor()
c.execute("""SELECT * FROM pair_angularMatrix """)
data = []
for v in c.fetchall():
data.append(v)
data = np.array(data, dtype=np.float32)
data[:, 1] /= 2.0 # 2 r j1 -> j1
data[:, 3] /= 2.0 # 2 r j2 -> j2
data[:, 5] /= 2.0 # 2 r j3 -> j3
data[:, 7] /= 2.0 # 2 r j4 -> j4
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile_meta), "wb"
)
np.save(fileHandle, data)
fileHandle.close()
except IOError:
print(
"Error while updating angularMatrix \
data meta (description) File "
+ self.angularMatrixFile_meta
)
try:
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile), "wb"
)
np.save(fileHandle, self.savedAngularMatrix_matrix)
fileHandle.close()
except IOError as e:
print(
"Error while updating angularMatrix \
data File "
+ self.angularMatrixFile
)
print(e)
def __loadAngularMatrixElementsFile(self):
try:
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile_meta), "rb"
)
data = np.load(fileHandle, encoding="latin1", allow_pickle=True)
fileHandle.close()
except Exception as ex:
print(ex)
print("Note: No saved angular matrix metadata files to be loaded.")
print(sys.exc_info())
return
data[:, 1] *= 2 # j1 -> 2 r j1
data[:, 3] *= 2 # j2 -> 2 r j2
data[:, 5] *= 2 # j3 -> 2 r j3
data[:, 7] *= 2 # j4 -> 2 r j4
data = np.array(np.rint(data), dtype=np.int)
try:
c = self.conn.cursor()
c.executemany(
"""INSERT INTO pair_angularMatrix
(l1, j1_x2 ,
l2 , j2_x2 ,
l3, j3_x2,
l4 , j4_x2 ,
ind)
VALUES (?,?,?,?,?,?,?,?,?)""",
data,
)
self.conn.commit()
except sqlite3.Error as e:
print("Error while loading precalculated values into the database!")
print(e)
exit()
if len(data) == 0:
print("error")
return
try:
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile), "rb"
)
self.savedAngularMatrix_matrix = np.load(
fileHandle, encoding="latin1", allow_pickle=True
).tolist()
fileHandle.close()
except Exception as ex:
print(ex)
print("Note: No saved angular matrix files to be loaded.")
print(sys.exc_info())
def __isCoupled(self, n, l, j, nn, ll, jj, n1, l1, j1, n2, l2, j2, limit):
if (
(
abs(
self.__getEnergyDefect(
n, l, j, nn, ll, jj, n1, l1, j1, n2, l2, j2
)
)
/ C_h
< limit
)
and not (
n == n1
and nn == n2
and l == l1
and ll == l2
and j == j1
and jj == j2
)
and not (
(
abs(l1 - l) != 1
and (
(
abs(j - 0.5) < 0.1 and abs(j1 - 0.5) < 0.1
) # j = 1/2 and j'=1/2 forbidden
or (
abs(j) < 0.1 and abs(j1 - 1) < 0.1
) # j = 0 and j'=1 forbidden
or (
abs(j - 1) < 0.1 and abs(j1) < 0.1
) # j = 1 and j'=0 forbidden
)
)
or (
abs(l2 - ll) != 1
and (
(
abs(jj - 0.5) < 0.1 and abs(j2 - 0.5) < 0.1
) # j = 1/2 and j'=1/2 forbidden
or (
abs(jj) < 0.1 and abs(j2 - 1) < 0.1
) # j = 0 and j'=1 forbidden
or (
abs(jj - 1) < 0.1 and abs(j2) < 0.1
) # j = 1 and j'=0 forbidden
)
)
)
and not (abs(j) < 0.1 and abs(j1) < 0.1) # j = 0 and j'=0 forbiden
and not (abs(jj) < 0.1 and abs(j2) < 0.1)
and not (
abs(l) < 0.1 and abs(l1) < 0.1
) # l = 0 and l' = 0 is forbiden
and not (abs(ll) < 0.1 and abs(l2) < 0.1)
):
# determine coupling
dl = abs(l - l1)
dj = abs(j - j1)
c1 = 0
if dl == 1 and (dj < 1.1):
c1 = 1 # dipole coupling
elif (
(dl == 0 or dl == 2 or dl == 1)
and (dj < 2.1)
and (2 <= self.interactionsUpTo)
):
c1 = 2 # quadrupole coupling
else:
return False
dl = abs(ll - l2)
dj = abs(jj - j2)
c2 = 0
if dl == 1 and (dj < 1.1):
c2 = 1 # dipole coupling
elif (
(dl == 0 or dl == 2 or dl == 1)
and (dj < 2.1)
and (2 <= self.interactionsUpTo)
):
c2 = 2 # quadrupole coupling
else:
return False
return c1 + c2
else:
return False
def __getEnergyDefect(self, n, l, j, nn, ll, jj, n1, l1, j1, n2, l2, j2):
"""
Energy defect between |n,l,j>x|nn,ll,jj> state and |n1,l1,j1>x|n1,l1,j1>
state of atom1 and atom2 in respective spins states s1 and s2
Takes spin vales s1 and s2 as the one defined when defining calculation.
Parameters:
n (int): principal quantum number
l (int): orbital angular momenutum
j (float): total angular momentum
nn (int): principal quantum number
ll (int): orbital angular momenutum
jj (float): total angular momentum
n1 (int): principal quantum number
l1 (int): orbital angular momentum
j1 (float): total angular momentum
n2 (int): principal quantum number
l2 (int): orbital angular momentum
j2 (float): total angular momentum
Returns:
float: energy defect (SI units: J)
"""
return C_e * (
self.atom1.getEnergy(n1, l1, j1, s=self.s1)
+ self.atom2.getEnergy(n2, l2, j2, s=self.s2)
- self.atom1.getEnergy(n, l, j, s=self.s1)
- self.atom2.getEnergy(nn, ll, jj, s=self.s2)
)
def __makeRawMatrix2(
self,
n,
l,
j,
nn,
ll,
jj,
k,
lrange,
limit,
limitBasisToMj,
progressOutput=False,
debugOutput=False,
):
# limit = limit in Hz on energy defect
# k defines range of n' = [n-k, n+k]
dimension = 0
# which states/channels contribute significantly in the second order perturbation?
states = []
# original pairstate index
opi = 0
# this numbers are conserved if we use only dipole-dipole interactions
Lmod2 = (l + ll) % 2
l1start = max(l - self.interactionsUpTo, 0)
l2start = max(ll - self.interactionsUpTo, 0)
if debugOutput:
print("\n ======= Relevant states =======\n")
for n1 in xrange(max(n - k, 1), n + k + 1):
for n2 in xrange(max(nn - k, 1), nn + k + 1):
l1max = max(l + self.interactionsUpTo, lrange) + 1
l1max = min(l1max, n1 - 1)
for l1 in xrange(l1start, l1max):
l2max = max(ll + self.interactionsUpTo, lrange) + 1
l2max = min(l2max, n2 - 1)
for l2 in xrange(l2start, l2max):
j1 = l1 - self.s1
while j1 < -0.1:
j1 += 2 * self.s1
while j1 <= l1 + self.s1 + 0.1:
j2 = l2 - self.s2
while j2 < -0.1:
j2 += 2 * self.s2
while j2 <= l2 + self.s2 + 0.1:
ed = (
self.__getEnergyDefect(
n,
l,
j,
nn,
ll,
jj,
n1,
l1,
j1,
n2,
l2,
j2,
)
/ C_h
)
if (
abs(ed) < limit
and (
not (self.interactionsUpTo == 1)
or (Lmod2 == ((l1 + l2) % 2))
)
and (
(not limitBasisToMj)
or (j1 + j2 + 0.1 > self.m1 + self.m2)
)
and (
n1 >= self.atom1.groundStateN
or [n1, l1, j1]
in self.atom1.extraLevels
)
and (
n2 >= self.atom2.groundStateN
or [n2, l2, j2]
in self.atom2.extraLevels
)
):
if debugOutput:
pairState = (
"|"
+ printStateString(
n1, l1, j1, s=self.s1
)
+ ","
+ printStateString(
n2, l2, j2, s=self.s2
)
+ ">"
)
print(
pairState
+ (
"\t EnergyDefect = %.3f GHz"
% (ed * 1.0e-9)
)
)
states.append([n1, l1, j1, n2, l2, j2])
if (
n == n1
and nn == n2
and l == l1
and ll == l2
and j == j1
and jj == j2
):
opi = dimension
dimension = dimension + 1
j2 = j2 + 1.0
j1 = j1 + 1.0
if debugOutput:
print("\tMatrix dimension\t=\t", dimension)
# mat_value, mat_row, mat_column for each sparce matrix describing
# dipole-dipole, dipole-quadrupole (and quad-dipole) and quadrupole-quadrupole
couplingMatConstructor = [
[[], [], []] for i in xrange(2 * self.interactionsUpTo - 1)
]
# original pair-state (i.e. target pair state) Zeeman Shift
opZeemanShift = (
(
self.atom1.getZeemanEnergyShift(
self.l, self.j, self.m1, self.Bz, s=self.s1
)
+ self.atom2.getZeemanEnergyShift(
self.ll, self.jj, self.m2, self.Bz, s=self.s2
)
)
/ C_h
* 1.0e-9
) # in GHz
if debugOutput:
print("\n ======= Coupling strengths (radial part only) =======\n")
maxCoupling = "quadrupole-quadrupole"
if self.interactionsUpTo == 1:
maxCoupling = "dipole-dipole"
if debugOutput:
print(
"Calculating coupling (up to ",
maxCoupling,
") between the pair states",
)
for i in xrange(dimension):
ed = (
self.__getEnergyDefect(
states[opi][0],
states[opi][1],
states[opi][2],
states[opi][3],
states[opi][4],
states[opi][5],
states[i][0],
states[i][1],
states[i][2],
states[i][3],
states[i][4],
states[i][5],
)
/ C_h
* 1.0e-9
- opZeemanShift
)
pairState1 = (
"|"
+ printStateString(
states[i][0], states[i][1], states[i][2], s=self.s1
)
+ ","
+ printStateString(
states[i][3], states[i][4], states[i][5], s=self.s2
)
+ ">"
)
states[i].append(ed) # energy defect of given state
for j in xrange(i + 1, dimension):
coupled = self.__isCoupled(
states[i][0],
states[i][1],
states[i][2],
states[i][3],
states[i][4],
states[i][5],
states[j][0],
states[j][1],
states[j][2],
states[j][3],
states[j][4],
states[j][5],
limit,
)
if states[i][0] == 24 and states[j][0] == 18:
print("\n")
print(states[i])
print(states[j])
print(coupled)
if coupled and (
abs(states[i][0] - states[j][0]) <= k
and abs(states[i][3] - states[j][3]) <= k
):
if debugOutput:
pairState2 = (
"|"
+ printStateString(
states[j][0],
states[j][1],
states[j][2],
s=self.s1,
)
+ ","
+ printStateString(
states[j][3],
states[j][4],
states[j][5],
s=self.s2,
)
+ ">"
)
print(pairState1 + " <---> " + pairState2)
couplingStregth = (
_atomLightAtomCoupling(
states[i][0],
states[i][1],
states[i][2],
states[i][3],
states[i][4],
states[i][5],
states[j][0],
states[j][1],
states[j][2],
states[j][3],
states[j][4],
states[j][5],
self.atom1,
atom2=self.atom2,
s=self.s1,
s2=self.s2,
)
/ C_h
* 1.0e-9
)
couplingMatConstructor[coupled - 2][0].append(
couplingStregth
)
couplingMatConstructor[coupled - 2][1].append(i)
couplingMatConstructor[coupled - 2][2].append(j)
exponent = coupled + 1
if debugOutput:
print(
(
"\tcoupling (C_%d/R^%d) = %.5f"
% (
exponent,
exponent,
couplingStregth * (1e6) ** (exponent),
)
),
"/R^",
exponent,
" GHz (mu m)^",
exponent,
"\n",
)
# coupling = [1,1] dipole-dipole, [2,1] quadrupole dipole, [2,2] quadrupole quadrupole
couplingMatArray = [
csr_matrix(
(
couplingMatConstructor[i][0],
(
couplingMatConstructor[i][1],
couplingMatConstructor[i][2],
),
),
shape=(dimension, dimension),
)
for i in xrange(len(couplingMatConstructor))
]
return states, couplingMatArray
def __initializeDatabaseForMemoization(self):
# memoization of angular parts
self.conn = sqlite3.connect(
os.path.join(self.dataFolder, "precalculated_pair.db")
)
c = self.conn.cursor()
# ANGULAR PARTS
c.execute("""DROP TABLE IF EXISTS pair_angularMatrix""")
c.execute(
"""SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='pair_angularMatrix';"""
)
if c.fetchone()[0] == 0:
# create table
try:
c.execute(
"""CREATE TABLE IF NOT EXISTS pair_angularMatrix
(l1 TINYINT UNSIGNED, j1_x2 TINYINT UNSIGNED,
l2 TINYINT UNSIGNED, j2_x2 TINYINT UNSIGNED,
l3 TINYINT UNSIGNED, j3_x2 TINYINT UNSIGNED,
l4 TINYINT UNSIGNED, j4_x2 TINYINT UNSIGNED,
ind INTEGER,
PRIMARY KEY (l1,j1_x2, l2,j2_x2, l3,j3_x2, l4,j4_x2)
) """
)
except sqlite3.Error as e:
print(e)
self.conn.commit()
self.__loadAngularMatrixElementsFile()
self.savedAngularMatrixChanged = False
def __closeDatabaseForMemoization(self):
self.conn.commit()
self.conn.close()
self.conn = False
def getLeRoyRadius(self):
"""
Returns Le Roy radius for initial pair-state.
Le Roy radius [#leroy]_ is defined as
:math:`2(\\langle r_1^2 \\rangle^{1/2} + \\langle r_2^2 \\rangle^{1/2})`,
where :math:`r_1` and :math:`r_2` are electron coordinates for the
first and the second atom in the initial pair-state.
Below this radius, calculations are not valid since electron
wavefunctions start to overlap.
Returns:
float: LeRoy radius measured in :math:`\\mu m`
References:
.. [#leroy] R.J. Le Roy, Can. J. Phys. **52**, 246 (1974)
http://www.nrcresearchpress.com/doi/abs/10.1139/p74-035
"""
step = 0.001
r1, psi1_r1 = self.atom1.radialWavefunction(
self.l,
0.5,
self.j,
self.atom1.getEnergy(self.n, self.l, self.j, s=self.s1) / 27.211,
self.atom1.alphaC ** (1 / 3.0),
2.0 * self.n * (self.n + 15.0),
step,
)
sqrt_r1_on2 = np.trapz(
np.multiply(np.multiply(psi1_r1, psi1_r1), np.multiply(r1, r1)),
x=r1,
)
r2, psi2_r2 = self.atom2.radialWavefunction(
self.ll,
0.5,
self.jj,
self.atom2.getEnergy(self.nn, self.ll, self.jj, s=self.s2) / 27.211,
self.atom2.alphaC ** (1 / 3.0),
2.0 * self.nn * (self.nn + 15.0),
step,
)
sqrt_r2_on2 = np.trapz(
np.multiply(np.multiply(psi2_r2, psi2_r2), np.multiply(r2, r2)),
x=r2,
)
return (
2.0
* (sqrt(sqrt_r1_on2) + sqrt(sqrt_r2_on2))
* (physical_constants["Bohr radius"][0] * 1.0e6)
)
def getC6perturbatively(
self, theta, phi, nRange, energyDelta, degeneratePerturbation=False
):
r"""
Calculates :math:`C_6` from second order perturbation theory.
Calculates
:math:`C_6=\sum_{\rm r',r''}|\langle {\rm r',r''}|V|\
{\rm r1,r2}\rangle|^2/\Delta_{\rm r',r''}`, where
:math:`\Delta_{\rm r',r''}\equiv E({\rm r',r''})-E({\rm r1, r2})`
When second order perturbation couples to multiple energy degenerate
states, users shold use **degenerate perturbation calculations** by
setting `degeneratePerturbation=True` .
This calculation is faster then full diagonalization, but it is valid
only far from the so called spaghetti region that occurs when atoms
are close to each other. In that region multiple levels are strongly
coupled, and one needs to use full diagonalization. In region where
perturbative calculation is correct, energy level shift can be
obtained as :math:`V(R)=-C_6/R^6`
See `perturbative C6 calculations example snippet`_ and for
degenerate perturbation calculation see
`degenerate pertubation C6 calculation example snippet`_
.. _`perturbative C6 calculations example snippet`:
./Rydberg_atoms_a_primer.html#Dispersion-Coefficients
.. _`degenerate pertubation C6 calculation example snippet`:
./ARC_3_0_introduction.html#Pertubative-C6-calculation-in-the-manifold-of-degenerate-states
Parameters:
theta (float):
orientation of inter-atomic axis with respect
to quantization axis (:math:`z`) in Euler coordinates
(measured in units of radian)
phi (float):
orientation of inter-atomic axis with respect
to quantization axis (:math:`z`) in Euler coordinates
(measured in units of radian)
nRange (int):
how much below and above the given principal quantum number
of the pair state we should be looking
energyDelta (float):
what is maximum energy difference ( :math:`\Delta E/h` in Hz)
between the original pair state and the other pair states that we are including in
calculation
degeneratePerturbation (bool):
optional, default False. Should one
use degenerate perturbation theory. This should be used whenever
angle between quantisation and interatomic axis is non-zero,
as well as when one considers non-stretched states.
Returns:
float: if **degeneratePerturbation=False**, returns
:math:`C_6` measured in :math:`\text{GHz }\mu\text{m}^6`;
if **degeneratePerturbation=True**, returns array of
:math:`C_6` measured in :math:`\text{GHz }\mu\text{m}^6`
AND array of corresponding eigenvectors in
:math:`\{m_{j_1}=-j_1, \ldots, m_{j_1} = +j1\}\bigotimes \
\{ m_{j_2}=-j_2, \ldots, m_{j_2} = +j2\}`
basis
Example:
If we want to quickly calculate :math:`C_6` for two Rubidium
atoms in state :math:`62 D_{3/2} m_j=3/2`, positioned in space
along the shared quantization axis::
from arc import *
calculation = PairStateInteractions(Rubidium(), 62, 2, 1.5, 62, 2, 1.5, 1.5, 1.5)
c6 = calculation.getC6perturbatively(0,0, 5, 25e9)
print "C_6 = %.0f GHz (mu m)^6" % c6
Which returns::
C_6 = 767 GHz (mu m)^6
Quick calculation of angular anisotropy of for Rubidium
:math:`D_{2/5},m_j=5/2` states::
# Rb 60 D_{2/5}, mj=2.5 , 60 D_{2/5}, mj=2.5 pair state
calculation1 = PairStateInteractions(Rubidium(), 60, 2, 2.5, 60, 2, 2.5, 2.5, 2.5)
# list of atom orientations
thetaList = np.linspace(0,pi,30)
# do calculation of C6 pertubatively for all atom orientations
c6 = []
for theta in thetaList:
value = calculation1.getC6perturbatively(theta,0,5,25e9)
c6.append(value)
print ("theta = %.2f * pi \tC6 = %.2f GHz mum^6" % (theta/pi,value))
# plot results
plot(thetaList/pi,c6,"b-")
title("Rb, pairstate 60 $D_{5/2},m_j = 5/2$, 60 $D_{5/2},m_j = 5/2$")
xlabel(r"$\Theta /\pi$")
ylabel(r"$C_6$ (GHz $\mu$m${}^6$")
show()
"""
self.__initializeDatabaseForMemoization()
# ========= START OF THE MAIN CODE ===========
# wigner D matrix allows calculations with arbitrary orientation of
# the two atoms
wgd = WignerDmatrix(theta, phi)
# any conservation?
# this numbers are conserved if we use only dipole-dipole interactions
Lmod2 = (self.l + self.ll) % 2
# find nearby states
lmin1 = self.l - 1
if lmin1 < -0.1:
lmin1 = 1
lmin2 = self.ll - 1
if lmin2 < -0.1:
lmin2 = 1
interactionMatrix = np.zeros(
(
round((2 * self.j + 1) * (2 * self.jj + 1)),
round((2 * self.j + 1) * (2 * self.jj + 1)),
),
dtype=np.complex,
)
for n1 in xrange(max(self.n - nRange, 1), self.n + nRange + 1):
for n2 in xrange(max(self.nn - nRange, 1), self.nn + nRange + 1):
lmax1 = min(self.l + 2, n1)
for l1 in xrange(lmin1, lmax1, 2):
lmax2 = min(self.ll + 2, n2)
for l2 in xrange(lmin2, lmax2, 2):
if (l1 + l2) % 2 == Lmod2:
j1 = l1 - self.s1
while j1 < -0.1:
j1 += 2 * self.s1
while j1 <= l1 + self.s1 + 0.1:
j2 = l2 - self.s2
while j2 < -0.1:
j2 += 2 * self.s2
while j2 <= l2 + self.s2 + 0.1:
coupled = self.__isCoupled(
self.n,
self.l,
self.j,
self.nn,
self.ll,
self.jj,
n1,
l1,
j1,
n2,
l2,
j2,
energyDelta,
)
if (
coupled
and (
not (self.interactionsUpTo == 1)
or (Lmod2 == ((l1 + l2) % 2))
)
and (
n1 >= self.atom1.groundStateN
or [n1, l1, j1]
in self.atom1.extraLevels
)
and (
n2 >= self.atom2.groundStateN
or [n2, l2, j2]
in self.atom2.extraLevels
)
):
energyDefect = (
self.__getEnergyDefect(
self.n,
self.l,
self.j,
self.nn,
self.ll,
self.jj,
n1,
l1,
j1,
n2,
l2,
j2,
)
/ C_h
)
energyDefect = (
energyDefect * 1.0e-9
) # GHz
if abs(energyDefect) < 1e-10:
raise ValueError(
"The requested pair-state "
"is dipole coupled resonatly "
"(energy defect = 0)"
"to other pair-states"
"Aborting pertubative "
"calculation."
"(This usually happens for "
"high-L states for which "
"identical quantum defects give "
"raise to degeneracies, making "
"total L ultimately not "
"conserved quantum number) "
)
# calculate radial part
couplingStregth = (
_atomLightAtomCoupling(
self.n,
self.l,
self.j,
self.nn,
self.ll,
self.jj,
n1,
l1,
j1,
n2,
l2,
j2,
self.atom1,
atom2=self.atom2,
s=self.s1,
s2=self.s2,
)
* (1.0e-9 * (1.0e6) ** 3 / C_h)
) # GHz / mum^3
d = self.__getAngularMatrix_M(
self.l,
self.j,
self.ll,
self.jj,
l1,
j1,
l2,
j2,
)
interactionMatrix += (
d.conj().T.dot(d)
* abs(couplingStregth) ** 2
/ energyDefect
)
j2 = j2 + 1.0
j1 = j1 + 1.0
rotationMatrix = np.kron(
wgd.get(self.j).toarray(), wgd.get(self.jj).toarray()
)
interactionMatrix = rotationMatrix.dot(
interactionMatrix.dot(rotationMatrix.conj().T)
)
# ========= END OF THE MAIN CODE ===========
self.__closeDatabaseForMemoization()
value, vectors = np.linalg.eigh(interactionMatrix)
vectors = vectors.T
stateCom = compositeState(
singleAtomState(self.j, self.m1), singleAtomState(self.jj, self.m2)
).T
if not degeneratePerturbation:
for i, v in enumerate(vectors):
if abs(np.vdot(v, stateCom)) > 1 - 1e-9:
return value[i]
# else:
# print(np.vdot(v, stateCom))
# if initial state is not eigen state print warning and return
# results for eigenstates, and eigenstate composition
"""
print("WARNING: Requested state is not eigenstate when dipole-dipole "
"interactions and/or relative position of atoms are "
"taken into account.\n"
"We will use degenerate pertubative theory to correctly "
"calculate C6.\n"
"Method will return values AND eigenvectors in basis \n"
"{mj1 = -j1, ... , mj1 = +j1} x {mj2 = -j2, ... , m2 = +j2}, "
"where x denotes Kronecker product\n"
"To not see this warning request explicitly "
"degeneratePerturbation=True in call of this method.\n")
"""
# print(stateCom.conj().dot(interactionMatrix.dot(stateCom.T)))
# print(stateCom.conj().dot(interactionMatrix.dot(stateCom.T)).shape)
return np.real(
stateCom.conj().dot(interactionMatrix.dot(stateCom.T))[0][0]
)
return np.real(value), vectors
def defineBasis(
self,
theta,
phi,
nRange,
lrange,
energyDelta,
Bz=0,
progressOutput=False,
debugOutput=False,
):
r"""
Finds relevant states in the vicinity of the given pair-state
Finds relevant pair-state basis and calculates interaction matrix.
Pair-state basis is saved in :obj:`basisStates`.
Interaction matrix is saved in parts depending on the scaling with
distance. Diagonal elements :obj:`matDiagonal`, correponding to
relative energy defects of the pair-states, don't change with
interatomic separation. Off diagonal elements can depend
on distance as :math:`R^{-3}, R^{-4}` or :math:`R^{-5}`,
corresponding to dipole-dipole (:math:`C_3` ), dipole-qudrupole
(:math:`C_4` ) and quadrupole-quadrupole coupling (:math:`C_5` )
respectively. These parts of the matrix are stored in
:obj:`PairStateInteractions.matR`
in that order. I.e. :obj:`matR[0]`
stores dipole-dipole coupling
(:math:`\propto R^{-3}`),
:obj:`matR[1]` stores dipole-quadrupole
couplings etc.
Parameters:
theta (float): relative orientation of the two atoms
(see figure on top of the page), range 0 to :math:`\pi`
phi (float): relative orientation of the two atoms (see figure
on top of the page), range 0 to :math:`2\pi`
nRange (int): how much below and above the given principal
quantum number of the pair state we should be looking?
lrange (int): what is the maximum angular orbital momentum
state that we are including in calculation
energyDelta (float): what is maximum energy difference (
:math:`\Delta E/h` in Hz)
between the original pair state and the other pair states
that we are including in calculation
Bz (float): optional, magnetic field directed along z-axis in
units of Tesla. Calculation will be correct only for weak
magnetic fields, where paramagnetic term is much stronger
then diamagnetic term. Diamagnetic term is neglected.
progressOutput (bool): optional, False by default. If true,
prints information about the progress of the calculation.
debugOutput (bool): optional, False by default. If true,
similarly to progressOutput=True, this will print
information about the progress of calculations, but with
more verbose output.
See also:
:obj:`arc.alkali_atom_functions.saveCalculation` and
:obj:`arc.alkali_atom_functions.loadSavedCalculation` for
information on saving intermediate results of calculation for
later use.
"""
self.__initializeDatabaseForMemoization()
# save call parameters
self.theta = theta
self.phi = phi
self.nRange = nRange
self.lrange = lrange
self.energyDelta = energyDelta
self.Bz = Bz
self.basisStates = []
# wignerDmatrix
wgd = WignerDmatrix(theta, phi)
limitBasisToMj = False
if theta < 0.001:
limitBasisToMj = True # Mj will be conserved in calculations
originalMj = self.m1 + self.m2
self.channel, self.coupling = self.__makeRawMatrix2(
self.n,
self.l,
self.j,
self.nn,
self.ll,
self.jj,
nRange,
lrange,
energyDelta,
limitBasisToMj,
progressOutput=progressOutput,
debugOutput=debugOutput,
)
self.atom1.updateDipoleMatrixElementsFile()
self.atom2.updateDipoleMatrixElementsFile()
# generate all the states (with mj principal quantum number)
# opi = original pairstate index
opi = 0
# NEW FOR SPACE MATRIX
self.index = np.zeros(len(self.channel) + 1, dtype=np.int16)
for i in xrange(len(self.channel)):
self.index[i] = len(self.basisStates)
stateCoupled = self.channel[i]
for m1c in np.linspace(
stateCoupled[2],
-stateCoupled[2],
round(1 + 2 * stateCoupled[2]),
):
for m2c in np.linspace(
stateCoupled[5],
-stateCoupled[5],
round(1 + 2 * stateCoupled[5]),
):
if (not limitBasisToMj) or (
abs(originalMj - m1c - m2c) < 0.1
):
self.basisStates.append(
[
stateCoupled[0],
stateCoupled[1],
stateCoupled[2],
m1c,
stateCoupled[3],
stateCoupled[4],
stateCoupled[5],
m2c,
]
)
self.matrixElement.append(i)
if (
abs(stateCoupled[0] - self.n) < 0.1
and abs(stateCoupled[1] - self.l) < 0.1
and abs(stateCoupled[2] - self.j) < 0.1
and abs(m1c - self.m1) < 0.1
and abs(stateCoupled[3] - self.nn) < 0.1
and abs(stateCoupled[4] - self.ll) < 0.1
and abs(stateCoupled[5] - self.jj) < 0.1
and abs(m2c - self.m2) < 0.1
):
opi = len(self.basisStates) - 1
if self.index[i] == len(self.basisStates):
print(stateCoupled)
self.index[-1] = len(self.basisStates)
if progressOutput or debugOutput:
print("\nCalculating Hamiltonian matrix...\n")
dimension = len(self.basisStates)
if progressOutput or debugOutput:
print("\n\tmatrix (dimension ", dimension, ")\n")
# INITIALIZING MATICES
# all (sparce) matrices will be saved in csr format
# value, row, column
matDiagonalConstructor = [[], [], []]
matRConstructor = [
[[], [], []] for i in xrange(self.interactionsUpTo * 2 - 1)
]
matRIndex = 0
for c in self.coupling:
progress = 0.0
for ii in xrange(len(self.channel)):
if progressOutput:
dim = len(self.channel)
progress += (dim - ii) * 2 - 1
sys.stdout.write(
"\rMatrix R%d %.1f %% (state %d of %d)"
% (
matRIndex + 3,
float(progress) / float(dim**2) * 100.0,
ii + 1,
len(self.channel),
)
)
sys.stdout.flush()
ed = self.channel[ii][6]
# solves problems with exactly degenerate basisStates
degeneracyOffset = 0.00000001
i = self.index[ii]
dMatrix1 = wgd.get(self.basisStates[i][2])
dMatrix2 = wgd.get(self.basisStates[i][6])
for i in xrange(self.index[ii], self.index[ii + 1]):
statePart1 = singleAtomState(
self.basisStates[i][2], self.basisStates[i][3]
)
statePart2 = singleAtomState(
self.basisStates[i][6], self.basisStates[i][7]
)
# rotate individual states
statePart1 = dMatrix1.T.conjugate().dot(statePart1)
statePart2 = dMatrix2.T.conjugate().dot(statePart2)
stateCom = compositeState(statePart1, statePart2)
if matRIndex == 0:
zeemanShift = (
(
self.atom1.getZeemanEnergyShift(
self.basisStates[i][1],
self.basisStates[i][2],
self.basisStates[i][3],
self.Bz,
s=self.s1,
)
+ self.atom2.getZeemanEnergyShift(
self.basisStates[i][5],
self.basisStates[i][6],
self.basisStates[i][7],
self.Bz,
s=self.s2,
)
)
/ C_h
* 1.0e-9
) # in GHz
matDiagonalConstructor[0].append(
ed + zeemanShift + degeneracyOffset
)
degeneracyOffset += 0.00000001
matDiagonalConstructor[1].append(i)
matDiagonalConstructor[2].append(i)
for dataIndex in xrange(c.indptr[ii], c.indptr[ii + 1]):
jj = c.indices[dataIndex]
radialPart = c.data[dataIndex]
j = self.index[jj]
dMatrix3 = wgd.get(self.basisStates[j][2])
dMatrix4 = wgd.get(self.basisStates[j][6])
if self.index[jj] != self.index[jj + 1]:
d = self.__getAngularMatrix_M(
self.basisStates[i][1],
self.basisStates[i][2],
self.basisStates[i][5],
self.basisStates[i][6],
self.basisStates[j][1],
self.basisStates[j][2],
self.basisStates[j][5],
self.basisStates[j][6],
)
secondPart = d.dot(stateCom)
else:
print(" - - - ", self.channel[jj])
for j in xrange(self.index[jj], self.index[jj + 1]):
statePart1 = singleAtomState(
self.basisStates[j][2], self.basisStates[j][3]
)
statePart2 = singleAtomState(
self.basisStates[j][6], self.basisStates[j][7]
)
# rotate individual states
statePart1 = dMatrix3.T.conjugate().dot(statePart1)
statePart2 = dMatrix4.T.conjugate().dot(statePart2)
# composite state of two atoms
stateCom2 = compositeState(statePart1, statePart2)
angularFactor = stateCom2.T.conjugate().dot(
secondPart
)
if abs(self.phi) < 1e-9:
angularFactor = angularFactor[0, 0].real
else:
angularFactor = angularFactor[0, 0]
if abs(angularFactor) > 1.0e-5:
matRConstructor[matRIndex][0].append(
(radialPart * angularFactor).conj()
)
matRConstructor[matRIndex][1].append(i)
matRConstructor[matRIndex][2].append(j)
matRConstructor[matRIndex][0].append(
radialPart * angularFactor
)
matRConstructor[matRIndex][1].append(j)
matRConstructor[matRIndex][2].append(i)
matRIndex += 1
if progressOutput or debugOutput:
print("\n")
self.matDiagonal = csr_matrix(
(
matDiagonalConstructor[0],
(matDiagonalConstructor[1], matDiagonalConstructor[2]),
),
shape=(dimension, dimension),
)
self.matR = [
csr_matrix(
(
matRConstructor[i][0],
(matRConstructor[i][1], matRConstructor[i][2]),
),
shape=(dimension, dimension),
)
for i in xrange(self.interactionsUpTo * 2 - 1)
]
self.originalPairStateIndex = opi
self.__updateAngularMatrixElementsFile()
self.__closeDatabaseForMemoization()
def diagonalise(
self,
rangeR,
noOfEigenvectors,
drivingFromState=[0, 0, 0, 0, 0],
eigenstateDetuning=0.0,
sortEigenvectors=False,
progressOutput=False,
debugOutput=False,
):
r"""
Finds eigenstates in atom pair basis.
ARPACK ( :obj:`scipy.sparse.linalg.eigsh`) calculation of the
`noOfEigenvectors` eigenvectors closest to the original state. If
`drivingFromState` is specified as `[n,l,j,mj,q]` coupling between
the pair-states and the situation where one of the atoms in the
pair state basis is in :math:`|n,l,j,m_j\rangle` state due to
driving with a laser field that drives :math:`q` transition
(+1,0,-1 for :math:`\sigma^-`, :math:`\pi` and :math:`\sigma^+`
transitions respectively) is calculated and marked by the
colourmaping these values on the obtained eigenvectors.
Parameters:
rangeR ( :obj:`array`): Array of values for distance between
the atoms (in :math:`\mu` m) for which we want to calculate
eigenstates.
noOfEigenvectors (int): number of eigen vectors closest to the
energy of the original (unperturbed) pair state. Has to be
smaller then the total number of states.
eigenstateDetuning (float, optional): Default is 0. This
specifies detuning from the initial pair-state (in Hz)
around which we want to find `noOfEigenvectors`
eigenvectors. This is useful when looking only for couple
of off-resonant features.
drivingFromState ([int,int,float,float,int]): Optional. State
of one of the atoms from the original pair-state basis
from which we try to drive to the excited pair-basis
manifold, **assuming that the first of the two atoms is
already excited to the specified Rydberg state**.
By default, program will calculate just
contribution of the original pair-state in the eigenstates
obtained by diagonalization, and will highlight it's
admixure by colour mapping the obtained eigenstates plot.
State is specified as :math:`[n,\ell,j,mj, d]`
where :math:`d` is +1, 0 or
-1 for driving :math:`\sigma^-` , :math:`\pi`
and :math:`\sigma^+` transitions respectively.
sortEigenvectors(bool): optional, False by default. Tries to
sort eigenvectors so that given eigen vector index
corresponds to adiabatically changing eigenstate, as
detirmined by maximising overlap between old and new
eigenvectors.
progressOutput (bool): optional, False by default. If true,
prints information about the progress of the calculation.
debugOutput (bool): optional, False by default. If true,
similarly to progressOutput=True, this will print
information about the progress of calculations, but with
more verbose output.
"""
self.r = np.sort(rangeR)
dimension = len(self.basisStates)
self.noOfEigenvectors = noOfEigenvectors
# energy of the state - to be calculated
self.y = []
# how much original state is contained in this eigenvector
self.highlight = []
# what are the dominant contributing states?
self.composition = []
if noOfEigenvectors >= dimension - 1:
noOfEigenvectors = dimension - 1
print(
"Warning: Requested number of eigenvectors >=dimension-1\n \
ARPACK can only find up to dimension-1 eigenvectors, where\
dimension is matrix dimension.\n"
)
if noOfEigenvectors < 1:
return
coupling = []
self.maxCoupling = 0.0
self.maxCoupledStateIndex = 0
if drivingFromState[0] != 0:
self.drivingFromState = drivingFromState
if progressOutput:
print("Finding coupling strengths")
# get first what was the state we are calculating coupling with
state1 = drivingFromState
n1 = round(state1[0])
l1 = round(state1[1])
j1 = state1[2]
m1 = state1[3]
q = state1[4]
for i in xrange(dimension):
thisCoupling = 0.0
if (
round(abs(self.basisStates[i][5] - l1)) == 1
and abs(
self.basisStates[i][0]
- self.basisStates[self.originalPairStateIndex][0]
)
< 0.1
and abs(
self.basisStates[i][1]
- self.basisStates[self.originalPairStateIndex][1]
)
< 0.1
and abs(
self.basisStates[i][2]
- self.basisStates[self.originalPairStateIndex][2]
)
< 0.1
and abs(
self.basisStates[i][3]
- self.basisStates[self.originalPairStateIndex][3]
)
< 0.1
):
state2 = self.basisStates[i]
n2 = round(state2[0 + 4])
l2 = round(state2[1 + 4])
j2 = state2[2 + 4]
m2 = state2[3 + 4]
if debugOutput:
print(
n1,
" ",
l1,
" ",
j1,
" ",
m1,
" ",
n2,
" ",
l2,
" ",
j2,
" ",
m2,
" q=",
q,
)
print(self.basisStates[i])
dme = self.atom2.getDipoleMatrixElement(
n1, l1, j1, m1, n2, l2, j2, m2, q, s=self.s2
)
thisCoupling += dme
thisCoupling = abs(thisCoupling) ** 2
if thisCoupling > self.maxCoupling:
self.maxCoupling = thisCoupling
self.maxCoupledStateIndex = i
if (thisCoupling > 0.000001) and debugOutput:
print(
"original pairstate index = ",
self.originalPairStateIndex,
)
print("this pairstate index = ", i)
print("state itself ", self.basisStates[i])
print("coupling = ", thisCoupling)
coupling.append(thisCoupling)
print("Maximal coupling from a state")
print("is to a state ", self.basisStates[self.maxCoupledStateIndex])
print("is equal to %.3e a_0 e" % self.maxCoupling)
if progressOutput:
print("\n\nDiagonalizing interaction matrix...\n")
rvalIndex = 0.0
previousEigenvectors = []
for rval in self.r:
if progressOutput:
sys.stdout.write(
"\r%d%%" % (rvalIndex / len(self.r - 1) * 100.0)
)
sys.stdout.flush()
rvalIndex += 1.0
# calculate interaction matrix
m = self.matDiagonal
rX = (rval * 1.0e-6) ** 3
for matRX in self.matR:
m = m + matRX / rX
rX *= rval * 1.0e-6
# uses ARPACK algorithm to find only noOfEigenvectors eigenvectors
# sigma specifies center frequency (in GHz)
ev, egvector = eigsh(
m,
noOfEigenvectors,
sigma=eigenstateDetuning * 1.0e-9,
which="LM",
tol=1e-6,
)
if sortEigenvectors:
# Find which eigenvectors overlap most with eigenvectors from
# previous diagonalisatoin, in order to find "adiabatic"
# continuation for the respective states
if previousEigenvectors == []:
previousEigenvectors = np.copy(egvector)
rowPicked = [False for i in range(len(ev))]
columnPicked = [False for i in range(len(ev))]
stateOverlap = np.zeros((len(ev), len(ev)))
for i in range(len(ev)):
for j in range(len(ev)):
stateOverlap[i, j] = (
np.vdot(egvector[:, i], previousEigenvectors[:, j])
** 2
)
sortedOverlap = np.dstack(
np.unravel_index(
np.argsort(stateOverlap.ravel()), (len(ev), len(ev))
)
)[0]
sortedEigenvaluesOrder = np.zeros(len(ev), dtype=np.int32)
j = len(ev) ** 2 - 1
for i in range(len(ev)):
while (
rowPicked[sortedOverlap[j, 0]]
or columnPicked[sortedOverlap[j, 1]]
):
j -= 1
rowPicked[sortedOverlap[j, 0]] = True
columnPicked[sortedOverlap[j, 1]] = True
sortedEigenvaluesOrder[sortedOverlap[j, 1]] = sortedOverlap[
j, 0
]
egvector = egvector[:, sortedEigenvaluesOrder]
ev = ev[sortedEigenvaluesOrder]
previousEigenvectors = np.copy(egvector)
self.y.append(ev)
if drivingFromState[0] < 0.1:
# if we've defined from which state we are driving
sh = []
comp = []
for i in xrange(len(ev)):
sh.append(
abs(egvector[self.originalPairStateIndex, i]) ** 2
)
comp.append(self._stateComposition(egvector[:, i]))
self.highlight.append(sh)
self.composition.append(comp)
else:
sh = []
comp = []
for i in xrange(len(ev)):
sumCoupledStates = 0.0
for j in xrange(dimension):
sumCoupledStates += (
abs(coupling[j] / self.maxCoupling)
* abs(egvector[j, i]) ** 2
)
comp.append(self._stateComposition(egvector[:, i]))
sh.append(sumCoupledStates)
self.highlight.append(sh)
self.composition.append(comp)
# end of FOR loop over inter-atomic dinstaces
def exportData(self, fileBase, exportFormat="csv"):
"""
Exports PairStateInteractions calculation data.
Only supported format (selected by default) is .csv in a
human-readable form with a header that saves details of calculation.
Function saves three files: 1) `filebase` _r.csv;
2) `filebase` _energyLevels
3) `filebase` _highlight
For more details on the format, see header of the saved files.
Parameters:
filebase (string): filebase for the names of the saved files
without format extension. Add as a prefix a directory path
if necessary (e.g. saving outside the current working directory)
exportFormat (string): optional. Format of the exported file. Currently
only .csv is supported but this can be extended in the future.
"""
fmt = "on %Y-%m-%d @ %H:%M:%S"
ts = datetime.datetime.now().strftime(fmt)
commonHeader = "Export from Alkali Rydberg Calculator (ARC) %s.\n" % ts
commonHeader += (
"\n *** Pair State interactions for %s %s m_j = %d/2 , %s %s m_j = %d/2 pair-state. ***\n\n"
% (
self.atom1.elementName,
printStateString(self.n, self.l, self.j),
round(2.0 * self.m1),
self.atom2.elementName,
printStateString(self.nn, self.ll, self.jj),
round(2.0 * self.m2),
)
)
if self.interactionsUpTo == 1:
commonHeader += " - Pair-state interactions included up to dipole-dipole coupling.\n"
elif self.interactionsUpTo == 2:
commonHeader += " - Pair-state interactions included up to quadrupole-quadrupole coupling.\n"
commonHeader += (
" - Pair-state interactions calculated for manifold with spin angular momentum s1 = %.1d s2 = %.1d .\n"
% (self.s1, self.s2)
)
if hasattr(self, "theta"):
commonHeader += " - Atom orientation:\n"
commonHeader += " theta (polar angle) = %.5f x pi\n" % (
self.theta / pi
)
commonHeader += " phi (azimuthal angle) = %.5f x pi\n" % (
self.phi / pi
)
commonHeader += " - Calculation basis includes:\n"
commonHeader += (
" States with principal quantum number in range [%d-%d]x[%d-%d],\n"
% (
self.n - self.nRange,
self.n + self.nRange,
self.nn - self.nRange,
self.nn + self.nRange,
)
)
commonHeader += (
" AND whose orbital angular momentum (l) is in range [%d-%d] (i.e. %s-%s),\n"
% (
0,
self.lrange,
printStateLetter(0),
printStateLetter(self.lrange),
)
)
commonHeader += (
" AND whose pair-state energy difference is at most %.3f GHz\n"
% (self.energyDelta / 1.0e9)
)
commonHeader += " (energy difference is measured relative to original pair-state).\n"
else:
commonHeader += " ! Atom orientation and basis not yet set (this is set in defineBasis method).\n"
if hasattr(self, "noOfEigenvectors"):
commonHeader += (
" - Finding %d eigenvectors closest to the given pair-state\n"
% self.noOfEigenvectors
)
if self.drivingFromState[0] < 0.1:
commonHeader += (
" - State highlighting based on the relative contribution \n"
+ " of the original pair-state in the eigenstates obtained by diagonalization.\n"
)
else:
commonHeader += (
" - State highlighting based on the relative driving strength \n"
+ " to a given energy eigenstate (energy level) from state\n"
+ " %s m_j =%d/2 with polarization q=%d.\n"
% (
printStateString(*self.drivingFromState[0:3]),
round(2.0 * self.drivingFromState[3]),
self.drivingFromState[4],
)
)
else:
commonHeader += " ! Energy levels not yet found (this is done by calling diagonalise method).\n"
if exportFormat == "csv":
print("Exporting StarkMap calculation results as .csv ...")
commonHeader += " - Export consists of three (3) files:\n"
commonHeader += " 1) %s,\n" % (
fileBase + "_r." + exportFormat
)
commonHeader += " 2) %s,\n" % (
fileBase + "_energyLevels." + exportFormat
)
commonHeader += " 3) %s.\n\n" % (
fileBase + "_highlight." + exportFormat
)
filename = fileBase + "_r." + exportFormat
np.savetxt(
filename,
self.r,
fmt="%.18e",
delimiter=", ",
newline="\n",
header=(
commonHeader
+ " - - - Interatomic distance, r (\\mu m) - - -"
),
comments="# ",
)
print(" Interatomic distances (\\mu m) saved in %s" % filename)
filename = fileBase + "_energyLevels." + exportFormat
headerDetails = " NOTE : Each row corresponds to eigenstates for a single specified interatomic distance"
np.savetxt(
filename,
self.y,
fmt="%.18e",
delimiter=", ",
newline="\n",
header=(
commonHeader + " - - - Energy (GHz) - - -\n" + headerDetails
),
comments="# ",
)
print(
" Lists of energies (in GHz relative to the original pair-state energy)"
+ (" saved in %s" % filename)
)
filename = fileBase + "_highlight." + exportFormat
np.savetxt(
filename,
self.highlight,
fmt="%.18e",
delimiter=", ",
newline="\n",
header=(
commonHeader
+ " - - - Highlight value (rel.units) - - -\n"
+ headerDetails
),
comments="# ",
)
print(" Highlight values saved in %s" % filename)
print("... data export finished!")
else:
raise ValueError("Unsupported export format (.%s)." % format)
def _stateComposition(self, stateVector):
contribution = np.absolute(stateVector)
order = np.argsort(contribution, kind="heapsort")
index = -1
totalContribution = 0
value = "$"
while (index > -5) and (totalContribution < 0.95):
i = order[index]
if index != -1 and (
stateVector[i].real > 0 or abs(stateVector[i].imag) > 1e-9
):
value += "+"
if abs(self.phi) < 1e-9:
value = (
value
+ ("%.2f" % stateVector[i])
+ self._addState(*self.basisStates[i])
)
else:
value = (
value
+ (
"(%.2f+i%.2f)"
% (stateVector[i].real, stateVector[i].imag)
)
+ self._addState(*self.basisStates[i])
)
totalContribution += contribution[i] ** 2
index -= 1
if totalContribution < 0.999:
value += "+\\ldots"
return value + "$"
def _addState(self, n1, l1, j1, mj1, n2, l2, j2, mj2):
stateString = ""
if abs(self.s1 - 0.5) < 0.1:
# Alkali atom
stateString += "|%s %d/2" % (
printStateStringLatex(n1, l1, j1, s=self.s1),
round(2 * mj1),
)
else:
# divalent atoms
stateString += "|%s %d" % (
printStateStringLatex(n1, l1, j1, s=self.s1),
round(mj1),
)
if abs(self.s2 - 0.5) < 0.1:
# Alkali atom
stateString += ",%s %d/2\\rangle" % (
printStateStringLatex(n2, l2, j2, s=self.s2),
round(2 * mj2),
)
else:
# divalent atom
stateString += ",%s %d\\rangle" % (
printStateStringLatex(n2, l2, j2, s=self.s2),
round(mj2),
)
return stateString
def plotLevelDiagram(
self, highlightColor="red", highlightScale="linear", units="GHz"
):
"""
Plots pair state level diagram
Call :obj:`showPlot` if you want to display a plot afterwards.
Parameters:
highlightColor (string): optional, specifies the colour used
for state highlighting
highlightScale (string): optional, specifies scaling of
state highlighting. Default is 'linear'. Use 'log-2' or
'log-3' for logarithmic scale going down to 1e-2 and 1e-3
respectively. Logarithmic scale is useful for spotting
weakly admixed states.
units (:obj:`char`,optional): possible values {'*GHz*','cm','eV'};
[case insensitive] if value is 'GHz' (default), diagram will
be plotted as energy :math:`/h` in units of GHz; if the
string contains 'cm' diagram will be plotted in energy units
cm :math:`{}^{-1}`; if the value is 'eV', diagram
will be plotted as energy in units eV.
"""
rvb = LinearSegmentedColormap.from_list(
"mymap", ["0.9", highlightColor]
)
if units.lower() == "ev":
self.units = "eV"
self.scaleFactor = 1e9 * C_h / C_e
eLabel = ""
elif units.lower() == "ghz":
self.units = "GHz"
self.scaleFactor = 1
eLabel = "/h"
elif "cm" in units.lower():
self.units = "cm$^{-1}$"
self.scaleFactor = 1e9 / (C_c * 100)
eLabel = "/(h c)"
if highlightScale == "linear":
cNorm = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
elif highlightScale == "log-2":
cNorm = matplotlib.colors.LogNorm(vmin=1e-2, vmax=1)
elif highlightScale == "log-3":
cNorm = matplotlib.colors.LogNorm(vmin=1e-3, vmax=1)
else:
raise ValueError(
"Only 'linear', 'log-2' and 'log-3' are valid "
"inputs for highlightScale"
)
print(" Now we are plotting...")
self.fig, self.ax = plt.subplots(1, 1, figsize=(11.5, 5.0))
self.y = np.array(self.y)
self.highlight = np.array(self.highlight)
colorfulX = []
colorfulY = []
colorfulState = []
for i in xrange(len(self.r)):
for j in xrange(len(self.y[i])):
colorfulX.append(self.r[i])
colorfulY.append(self.y[i][j])
colorfulState.append(self.highlight[i][j])
colorfulState = np.array(colorfulState)
sortOrder = colorfulState.argsort(kind="heapsort")
colorfulX = np.array(colorfulX)
colorfulY = np.array(colorfulY)
colorfulX = colorfulX[sortOrder]
colorfulY = colorfulY[sortOrder]
colorfulState = colorfulState[sortOrder]
self.ax.scatter(
colorfulX,
colorfulY * self.scaleFactor,
s=10,
c=colorfulState,
linewidth=0,
norm=cNorm,
cmap=rvb,
zorder=2,
picker=5,
)
cax = self.fig.add_axes([0.91, 0.1, 0.02, 0.8])
cb = matplotlib.colorbar.ColorbarBase(cax, cmap=rvb, norm=cNorm)
if self.drivingFromState[0] == 0:
# colouring is based on the contribution of the original pair state here
label = ""
if abs(self.s1 - 0.5) < 0.1:
# Alkali atom
label += r"$|\langle %s m_j=%d/2 " % (
printStateStringLatex(self.n, self.l, self.j),
round(2.0 * self.m1),
)
else:
# divalent atom
label += r"$|\langle %s m_j=%d " % (
printStateStringLatex(self.n, self.l, self.j, s=self.s1),
round(self.m1),
)
if abs(self.s2 - 0.5) < 0.1:
# Alkali atom
label += r", %s m_j=%d/2 | \mu \rangle |^2$" % (
printStateStringLatex(self.nn, self.ll, self.jj),
round(2.0 * self.m2),
)
else:
# divalent atom
label += r", %s m_j=%d | \mu \rangle |^2$" % (
printStateStringLatex(self.nn, self.ll, self.jj, s=self.s2),
round(self.m2, 0),
)
cb.set_label(label)
else:
# colouring is based on the coupling to different states
cb.set_label(r"$(\Omega_\mu/\Omega)^2$")
self.ax.set_xlabel(r"Interatomic distance, $R$ ($\mu$m)")
self.ax.set_ylabel(
r"Pair-state relative energy, $\Delta E %s$ (%s)"
% (eLabel, self.units)
)
def savePlot(self, filename="PairStateInteractions.pdf"):
"""
Saves plot made by :obj:`PairStateInteractions.plotLevelDiagram`
Args:
filename (:obj:`str`, optional): file location where the plot
should be saved
"""
if self.fig != 0:
self.fig.savefig(filename, bbox_inches="tight")
else:
print("Error while saving a plot: nothing is plotted yet")
return 0
def showPlot(self, interactive=True):
"""
Shows level diagram printed by
:obj:`PairStateInteractions.plotLevelDiagram`
By default, it will output interactive plot, which means that
clicking on the state will show the composition of the clicked
state in original basis (dominant elements)
Args:
interactive (bool): optional, by default it is True. If true,
plotted graph will be interactive, i.e. users can click
on the state to identify the state composition
Note:
interactive=True has effect if the graphs are explored in usual
matplotlib pop-up windows. It doesn't have effect on inline
plots in Jupyter (IPython) notebooks.
"""
if interactive:
self.ax.set_title("Click on state to see state composition")
self.clickedPoint = 0
self.fig.canvas.draw()
self.fig.canvas.mpl_connect("pick_event", self._onPick)
plt.show()
return 0
def _onPick(self, event):
if isinstance(event.artist, matplotlib.collections.PathCollection):
x = event.mouseevent.xdata
y = event.mouseevent.ydata / self.scaleFactor
i = np.searchsorted(self.r, x)
if i == len(self.r):
i -= 1
if (i > 0) and (abs(self.r[i - 1] - x) < abs(self.r[i] - x)):
i -= 1
j = 0
for jj in xrange(len(self.y[i])):
if abs(self.y[i][jj] - y) < abs(self.y[i][j] - y):
j = jj
# now choose the most higlighted state in this area
distance = abs(self.y[i][j] - y) * 1.5
for jj in xrange(len(self.y[i])):
if abs(self.y[i][jj] - y) < distance and (
abs(self.highlight[i][jj]) > abs(self.highlight[i][j])
):
j = jj
if self.clickedPoint != 0:
self.clickedPoint.remove()
(self.clickedPoint,) = self.ax.plot(
[self.r[i]],
[self.y[i][j] * self.scaleFactor],
"bs",
linewidth=0,
zorder=3,
)
self.ax.set_title(
"State = "
+ self.composition[i][j]
+ (" Colourbar = %.2f" % self.highlight[i][j]),
fontsize=11,
)
event.canvas.draw()
def getC6fromLevelDiagram(
self, rStart, rStop, showPlot=False, minStateContribution=0.0
):
"""
Finds :math:`C_6` coefficient for original pair state.
Function first finds for each distance in the range
[ `rStart` , `rStop` ] the eigen state with highest contribution of
the original state. One can set optional parameter
`minStateContribution` to value in the range [0,1), so that function
finds only states if they have contribution of the original state
that is bigger then `minStateContribution`.
Once original pair-state is found in the range of interatomic
distances, from smallest `rStart` to the biggest `rStop`, function
will try to perform fitting of the corresponding state energy
:math:`E(R)` at distance :math:`R` to the function
:math:`A+C_6/R^6` where :math:`A` is some offset.
Args:
rStart (float): smallest inter-atomic distance to be used for fitting
rStop (float): maximum inter-atomic distance to be used for fitting
showPlot (bool): If set to true, it will print the plot showing
fitted energy level and the obtained best fit. Default is
False
minStateContribution (float): valid values are in the range [0,1).
It specifies minimum amount of the original state in the given
energy state necessary for the state to be considered for
the adiabatic continuation of the original unperturbed
pair state.
Returns:
float:
:math:`C_6` measured in :math:`\\text{GHz }\\mu\\text{m}^6`
on success; If unsuccessful returns False.
Note:
In order to use this functions, highlighting in
:obj:`diagonalise` should be based on the original pair
state contribution of the eigenvectors (that this,
`drivingFromState` parameter should not be set, which
corresponds to `drivingFromState` = [0,0,0,0,0]).
"""
initialStateDetuning = []
initialStateDetuningX = []
fromRindex = -1
toRindex = -1
for br in xrange(len(self.r)):
if (fromRindex == -1) and (self.r[br] >= rStart):
fromRindex = br
if self.r[br] > rStop:
toRindex = br - 1
break
if (fromRindex != -1) and (toRindex == -1):
toRindex = len(self.r) - 1
if fromRindex == -1:
print(
"\nERROR: could not find data for energy levels for interatomic"
)
print("distances between %2.f and %.2f mu m.\n\n" % (rStart, rStop))
return 0
for br in xrange(fromRindex, toRindex + 1):
index = -1
maxPortion = minStateContribution
for br2 in xrange(len(self.y[br])):
if abs(self.highlight[br][br2]) > maxPortion:
index = br2
maxPortion = abs(self.highlight[br][br2])
if index != -1:
initialStateDetuning.append(abs(self.y[br][index]))
initialStateDetuningX.append(self.r[br])
initialStateDetuning = np.log(np.array(initialStateDetuning))
initialStateDetuningX = np.array(initialStateDetuningX)
def c6fit(r, c6, offset):
return np.log(c6 / r**6 + offset)
try:
popt, pcov = curve_fit(
c6fit, initialStateDetuningX, initialStateDetuning, [1, 0]
)
except Exception as ex:
print(ex)
print("ERROR: unable to find a fit for C6.")
return False
print("c6 = ", popt[0], " GHz /R^6 (mu m)^6")
print("offset = ", popt[1])
y_fit = []
for val in initialStateDetuningX:
y_fit.append(c6fit(val, popt[0], popt[1]))
y_fit = np.array(y_fit)
if showPlot:
fig, ax = plt.subplots(1, 1, figsize=(8.0, 5.0))
ax.loglog(
initialStateDetuningX,
np.exp(initialStateDetuning),
"b-",
lw=2,
zorder=1,
)
ax.loglog(
initialStateDetuningX, np.exp(y_fit), "r--", lw=2, zorder=2
)
ax.legend(
("calculated energy level", "fitted model function"),
loc=1,
fontsize=10,
)
ax.set_xlim(np.min(self.r), np.max(self.r))
ymin = np.min(initialStateDetuning)
ymax = np.max(initialStateDetuning)
ax.set_ylim(exp(ymin), exp(ymax))
minorLocator = mpl.ticker.MultipleLocator(1)
minorFormatter = mpl.ticker.FormatStrFormatter("%d")
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_minor_formatter(minorFormatter)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlabel(r"Interatomic distance, $r$ ($\mu$m)")
ax.set_ylabel(r"Pair-state energy, $|E|$ (GHz)")
ax.set_title(r"$C_6$ fit")
plt.show()
self.fitX = initialStateDetuningX
self.fitY = initialStateDetuning
self.fittedCurveY = y_fit
return popt[0]
def getC3fromLevelDiagram(
self,
rStart,
rStop,
showPlot=False,
minStateContribution=0.0,
resonantBranch=+1,
):
"""
Finds :math:`C_3` coefficient for original pair state.
Function first finds for each distance in the range
[`rStart` , `rStop`] the eigen state with highest contribution of
the original state. One can set optional parameter
`minStateContribution` to value in the range [0,1), so that function
finds only states if they have contribution of the original state
that is bigger then `minStateContribution`.
Once original pair-state is found in the range of interatomic
distances, from smallest `rStart` to the biggest `rStop`, function
will try to perform fitting of the corresponding state energy
:math:`E(R)` at distance :math:`R` to the function
:math:`A+C_3/R^3` where :math:`A` is some offset.
Args:
rStart (float): smallest inter-atomic distance to be used for fitting
rStop (float): maximum inter-atomic distance to be used for fitting
showPlot (bool): If set to true, it will print the plot showing
fitted energy level and the obtained best fit. Default is
False
minStateContribution (float): valid values are in the range [0,1).
It specifies minimum amount of the original state in the given
energy state necessary for the state to be considered for
the adiabatic continuation of the original unperturbed
pair state.
resonantBranch (int): optional, default +1. For resonant
interactions we have two branches with identical
state contributions. In this case, we will select only
positively detuned branch (for resonantBranch = +1)
or negatively detuned branch (fore resonantBranch = -1)
depending on the value of resonantBranch optional parameter
Returns:
float:
:math:`C_3` measured in :math:`\\text{GHz }\\mu\\text{m}^6`
on success; If unsuccessful returns False.
Note:
In order to use this functions, highlighting in
:obj:`diagonalise` should be based on the original pair
state contribution of the eigenvectors (that this,
`drivingFromState` parameter should not be set, which
corresponds to `drivingFromState` = [0,0,0,0,0]).
"""
selectBranch = False
if abs(self.l - self.ll) == 1:
selectBranch = True
resonantBranch = float(resonantBranch)
initialStateDetuning = []
initialStateDetuningX = []
fromRindex = -1
toRindex = -1
for br in xrange(len(self.r)):
if (fromRindex == -1) and (self.r[br] >= rStart):
fromRindex = br
if self.r[br] > rStop:
toRindex = br - 1
break
if (fromRindex != -1) and (toRindex == -1):
toRindex = len(self.r) - 1
if fromRindex == -1:
print(
"\nERROR: could not find data for energy levels for interatomic"
)
print("distances between %2.f and %.2f mu m.\n\n" % (rStart, rStop))
return False
discontinuityDetected = False
for br in xrange(toRindex, fromRindex - 1, -1):
index = -1
maxPortion = minStateContribution
for br2 in xrange(len(self.y[br])):
if (abs(self.highlight[br][br2]) > maxPortion) and (
not selectBranch or (self.y[br][br2] * selectBranch > 0.0)
):
index = br2
maxPortion = abs(self.highlight[br][br2])
if len(initialStateDetuningX) > 2:
slope1 = (
initialStateDetuning[-1] - initialStateDetuning[-2]
) / (initialStateDetuningX[-1] - initialStateDetuningX[-2])
slope2 = (abs(self.y[br][index]) - initialStateDetuning[-1]) / (
self.r[br] - initialStateDetuningX[-1]
)
if abs(slope2) > 3.0 * abs(slope1):
discontinuityDetected = True
if (index != -1) and (not discontinuityDetected):
initialStateDetuning.append(abs(self.y[br][index]))
initialStateDetuningX.append(self.r[br])
initialStateDetuning = np.log(np.array(initialStateDetuning)) # *1e9
initialStateDetuningX = np.array(initialStateDetuningX)
def c3fit(r, c3, offset):
return np.log(c3 / r**3 + offset)
try:
popt, pcov = curve_fit(
c3fit, initialStateDetuningX, initialStateDetuning, [1, 0]
)
except Exception as ex:
print(ex)
print("ERROR: unable to find a fit for C3.")
return False
print("c3 = ", popt[0], " GHz /R^3 (mu m)^3")
print("offset = ", popt[1])
y_fit = []
for val in initialStateDetuningX:
y_fit.append(c3fit(val, popt[0], popt[1]))
y_fit = np.array(y_fit)
if showPlot:
fig, ax = plt.subplots(1, 1, figsize=(8.0, 5.0))
ax.loglog(
initialStateDetuningX,
np.exp(initialStateDetuning),
"b-",
lw=2,
zorder=1,
)
ax.loglog(
initialStateDetuningX, np.exp(y_fit), "r--", lw=2, zorder=2
)
ax.legend(
("calculated energy level", "fitted model function"),
loc=1,
fontsize=10,
)
ax.set_xlim(np.min(self.r), np.max(self.r))
ymin = np.min(initialStateDetuning)
ymax = np.max(initialStateDetuning)
ax.set_ylim(exp(ymin), exp(ymax))
minorLocator = mpl.ticker.MultipleLocator(1)
minorFormatter = mpl.ticker.FormatStrFormatter("%d")
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_minor_formatter(minorFormatter)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlabel(r"Interatomic distance, $r$ ($\mu$m)")
ax.set_ylabel(r"Pair-state energy, $|E|$ (GHz)")
locatorStep = 1.0
while (locatorStep > (ymax - ymin)) and locatorStep > 1.0e-4:
locatorStep /= 10.0
ax.yaxis.set_major_locator(mpl.ticker.MultipleLocator(locatorStep))
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter("%.3f"))
ax.yaxis.set_minor_locator(
mpl.ticker.MultipleLocator(locatorStep / 10.0)
)
ax.yaxis.set_minor_formatter(plt.NullFormatter())
# ax.yaxis.set_minor_formatter(mpl.ticker.FormatStrFormatter('%.3f'))
ax.set_title(r"$C_3$ fit")
plt.show()
self.fitX = initialStateDetuningX
self.fitY = initialStateDetuning
self.fittedCurveY = y_fit
return popt[0]
def getVdwFromLevelDiagram(
self, rStart, rStop, showPlot=False, minStateContribution=0.0
):
"""
Finds :math:`r_{\\rm vdW}` coefficient for original pair state.
Function first finds for each distance in the range [`rStart`,`rStop`]
the eigen state with highest contribution of the original state.
One can set optional parameter `minStateContribution` to value in
the range [0,1), so that function finds only states if they have
contribution of the original state that is bigger then
`minStateContribution`.
Once original pair-state is found in the range of interatomic
distances, from smallest `rStart` to the biggest `rStop`, function
will try to perform fitting of the corresponding state energy
:math:`E(R)` at distance :math:`R` to the function
:math:`A+B\\frac{1-\\sqrt{1+(r_{\\rm vdW}/r)^6}}{1-\\sqrt{1+r_{\\rm vdW}^6}}`
where :math:`A` and :math:`B` are some offset.
Args:
rStart (float): smallest inter-atomic distance to be used for fitting
rStop (float): maximum inter-atomic distance to be used for fitting
showPlot (bool): If set to true, it will print the plot showing
fitted energy level and the obtained best fit. Default is
False
minStateContribution (float): valid values are in the range [0,1).
It specifies minimum amount of the original state in the given
energy state necessary for the state to be considered for
the adiabatic continuation of the original unperturbed
pair state.
Returns:
float: :math:`r_{\\rm vdW}` measured in :math:`\\mu\\text{m}`
on success; If unsuccessful returns False.
Note:
In order to use this functions, highlighting in
:obj:`diagonalise` should be based on the original pair
state contribution of the eigenvectors (that this,
`drivingFromState` parameter should not be set, which
corresponds to `drivingFromState` = [0,0,0,0,0]).
"""
initialStateDetuning = []
initialStateDetuningX = []
fromRindex = -1
toRindex = -1
for br in xrange(len(self.r)):
if (fromRindex == -1) and (self.r[br] >= rStart):
fromRindex = br
if self.r[br] > rStop:
toRindex = br - 1
break
if (fromRindex != -1) and (toRindex == -1):
toRindex = len(self.r) - 1
if fromRindex == -1:
print(
"\nERROR: could not find data for energy levels for interatomic"
)
print("distances between %2.f and %.2f mu m.\n\n" % (rStart, rStop))
return False
discontinuityDetected = False
for br in xrange(toRindex, fromRindex - 1, -1):
index = -1
maxPortion = minStateContribution
for br2 in xrange(len(self.y[br])):
if abs(self.highlight[br][br2]) > maxPortion:
index = br2
maxPortion = abs(self.highlight[br][br2])
if len(initialStateDetuningX) > 2:
slope1 = (
initialStateDetuning[-1] - initialStateDetuning[-2]
) / (initialStateDetuningX[-1] - initialStateDetuningX[-2])
slope2 = (abs(self.y[br][index]) - initialStateDetuning[-1]) / (
self.r[br] - initialStateDetuningX[-1]
)
if abs(slope2) > 3.0 * abs(slope1):
discontinuityDetected = True
if (index != -1) and (not discontinuityDetected):
initialStateDetuning.append(abs(self.y[br][index]))
initialStateDetuningX.append(self.r[br])
initialStateDetuning = np.log(abs(np.array(initialStateDetuning)))
initialStateDetuningX = np.array(initialStateDetuningX)
def vdwFit(r, offset, scale, vdw):
return np.log(
abs(
offset
+ scale
* (1.0 - np.sqrt(1.0 + (vdw / r) ** 6))
/ (1.0 - np.sqrt(1 + vdw**6))
)
)
noOfPoints = len(initialStateDetuningX)
print("Data points to fit = ", noOfPoints)
try:
popt, pcov = curve_fit(
vdwFit,
initialStateDetuningX,
initialStateDetuning,
[
0,
initialStateDetuning[noOfPoints // 2],
initialStateDetuningX[noOfPoints // 2],
],
)
except Exception as ex:
print(ex)
print("ERROR: unable to find a fit for van der Waals distance.")
return False
if (initialStateDetuningX[0] < popt[2]) or (
popt[2] < initialStateDetuningX[-1]
):
print("WARNING: vdw radius seems to be outside the fitting range!")
print(
"It's estimated to be around %.2f mu m from the current fit."
% popt[2]
)
print("Rvdw = ", popt[2], " mu m")
print("offset = ", popt[0], "\n scale = ", popt[1])
y_fit = []
for val in initialStateDetuningX:
y_fit.append(vdwFit(val, popt[0], popt[1], popt[2]))
y_fit = np.array(y_fit)
if showPlot:
fig, ax = plt.subplots(1, 1, figsize=(8.0, 5.0))
ax.loglog(
initialStateDetuningX,
np.exp(initialStateDetuning),
"b-",
lw=2,
zorder=1,
)
ax.loglog(
initialStateDetuningX, np.exp(y_fit), "r--", lw=2, zorder=2
)
ax.set_xlim(np.min(self.r), np.max(self.r))
ymin = np.min(initialStateDetuning)
ymax = np.max(initialStateDetuning)
ax.set_ylim(exp(ymin), exp(ymax))
ax.axvline(x=popt[2], color="k")
ax.text(
popt[2],
exp((ymin + ymax) / 2.0),
r"$R_{vdw} = %.1f$ $\mu$m" % popt[2],
)
minorLocator = mpl.ticker.MultipleLocator(1)
minorFormatter = mpl.ticker.FormatStrFormatter("%d")
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_minor_formatter(minorFormatter)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlabel(r"Interatomic distance, $r$ ($\mu$m)")
ax.set_ylabel(r"Pair-state energy, $|E|$ (GHz)")
ax.legend(
("calculated energy level", "fitted model function"),
loc=1,
fontsize=10,
)
plt.show()
self.fitX = initialStateDetuningX
self.fitY = initialStateDetuning
self.fittedCurveY = y_fit
return popt[2]
class StarkMapResonances:
"""
Calculates pair state Stark maps for finding resonances
Tool for finding conditions for Foster resonances. For a given pair
state, in a given range of the electric fields, looks for the pair-state
that are close in energy and coupled via dipole-dipole interactions
to the original pair-state.
See `Stark resonances example snippet`_.
.. _`Stark resonances example snippet`:
././Rydberg_atoms_a_primer.html#Tuning-the-interaction-strength-with-electric-fields
Parameters:
atom1 (:obj:`arc.alkali_atom_functions.AlkaliAtom` or :obj:`arc.divalent_atom_functions.DivalentAtom`):
={ :obj:`arc.alkali_atom_data.Lithium6`,
:obj:`arc.alkali_atom_data.Lithium7`,
:obj:`arc.alkali_atom_data.Sodium`,
:obj:`arc.alkali_atom_data.Potassium39`,
:obj:`arc.alkali_atom_data.Potassium40`,
:obj:`arc.alkali_atom_data.Potassium41`,
:obj:`arc.alkali_atom_data.Rubidium85`,
:obj:`arc.alkali_atom_data.Rubidium87`,
:obj:`arc.alkali_atom_data.Caesium`,
:obj:`arc.divalent_atom_data.Strontium88`,
:obj:`arc.divalent_atom_data.Calcium40`
:obj:`arc.divalent_atom_data.Ytterbium174` }
the first atom in the pair-state
state1 ([int,int,float,float,(float)]): specification of the state
of the first state as an array of values :math:`[n,l,j,m_j]`.
For :obj:`arc.divalent_atom_functions.DivalentAtom` and other divalent atoms, 5th value
should be added specifying total spin angular momentum `s`.
Full definition of state then has format
:math:`[n,l,j,m_j,s]`.
atom2 (:obj:`arc.alkali_atom_functions.AlkaliAtom` or :obj:`arc.divalent_atom_functions.DivalentAtom`):
={ :obj:`arc.alkali_atom_data.Lithium6`,
:obj:`arc.alkali_atom_data.Lithium7`,
:obj:`arc.alkali_atom_data.Sodium`,
:obj:`arc.alkali_atom_data.Potassium39`,
:obj:`arc.alkali_atom_data.Potassium40`,
:obj:`arc.alkali_atom_data.Potassium41`,
:obj:`arc.alkali_atom_data.Rubidium85`,
:obj:`arc.alkali_atom_data.Rubidium87`,
:obj:`arc.alkali_atom_data.Caesium`,
:obj:`arc.divalent_atom_data.Strontium88`,
:obj:`arc.divalent_atom_data.Calcium40`
:obj:`arc.divalent_atom_data.Ytterbium174` }
the second atom in the pair-state
state2 ([int,int,float,float,(float)]): specification of the state
of the first state as an array of values :math:`[n,l,j,m_j]`,
For :obj:`arc.divalent_atom_functions.DivalentAtom` and other divalent atoms, 5th value
should be added specifying total spin angular momentum `s`.
Full definition of state then has format
:math:`[n,l,j,m_j,s]`.
Note:
In checking if certain state is dipole coupled to the original
state, only the highest contributing state is checked for dipole
coupling. This should be fine if one is interested in resonances
in weak fields. For stronger fields, one might want to include
effect of coupling to other contributing base states.
"""
def __init__(self, atom1, state1, atom2, state2):
self.atom1 = atom1
if issubclass(type(self.atom1), DivalentAtom) and (
len(state1) != 5 or (state1[4] != 0 and state1[4] != 1)
):
raise ValueError(
"For divalent atoms state specification has to "
"include total spin angular momentum s as the last "
"number in the state specification [n,l,j,m_j,s]."
)
self.state1 = state1
# add exlicitly total spin of the state for Alkaline atoms
if len(self.state1) == 4:
self.state1.append(0.5)
self.atom2 = atom2
if issubclass(type(self.atom2), DivalentAtom) and (
len(state1) != 5 or (state1[4] != 0 and state1[4] != 1)
):
raise ValueError(
"For divalent atoms state specification has to "
"include total spin angular momentum s as the last "
"numbre in the state specification [n,l,j,m_j,s]."
)
self.state2 = state2
# add exlicitly total spin of the state for Alkaline atoms
if len(self.state2) == 4:
self.state2.append(0.5)
self.pairStateEnergy = (
(
atom1.getEnergy(
self.state1[0],
self.state1[1],
self.state1[2],
s=self.state1[4],
)
+ atom2.getEnergy(
self.state2[0],
self.state2[1],
self.state2[2],
s=self.state2[4],
)
)
* C_e
/ C_h
* 1e-9
)
def findResonances(
self,
nMin,
nMax,
maxL,
eFieldList,
energyRange=[-5.0e9, +5.0e9],
Bz=0,
progressOutput=False,
):
r"""
Finds near-resonant dipole-coupled pair-states
For states in range of principal quantum numbers [`nMin`,`nMax`]
and orbital angular momentum [0,`maxL`], for a range of electric fields
given by `eFieldList` function will find near-resonant pair states.
Only states that are in the range given by `energyRange` will be
extracted from the pair-state Stark maps.
Args:
nMin (int): minimal principal quantum number of the state to be
included in the StarkMap calculation
nMax (int): maximal principal quantum number of the state to be
included in the StarkMap calculation
maxL (int): maximum value of orbital angular momentum for the states
to be included in the calculation
eFieldList ([float]): list of the electric fields (in V/m) for
which to calculate level diagram (StarkMap)
Bz (float): optional, magnetic field directed along z-axis in
units of Tesla. Calculation will be correct only for weak
magnetic fields, where paramagnetic term is much stronger
then diamagnetic term. Diamagnetic term is neglected.
energyRange ([float,float]): optinal argument. Minimal and maximal
energy of that some dipole-coupled state should have in order
to keep it in the plot (in units of Hz). By default it finds
states that are :math:`\pm 5` GHz
progressOutput (:obj:`bool`, optional): if True prints the
progress of calculation; Set to false by default.
"""
self.eFieldList = eFieldList
self.Bz = Bz
eMin = energyRange[0] * 1.0e-9 # in GHz
eMax = energyRange[1] * 1.0e-9
# find where is the original pair state
sm1 = StarkMap(self.atom1)
sm1.defineBasis(
self.state1[0],
self.state1[1],
self.state1[2],
self.state1[3],
nMin,
nMax,
maxL,
Bz=self.Bz,
progressOutput=progressOutput,
s=self.state1[4],
)
sm1.diagonalise(eFieldList, progressOutput=progressOutput)
if (
(self.atom2 is self.atom1)
and (self.state1[0] == self.state2[0])
and (self.state1[1] == self.state2[1])
and (abs(self.state1[2] - self.state2[2]) < 0.1)
and (abs(self.state1[3] - self.state2[3]) < 0.1)
and (abs(self.state1[4] - self.state2[4]) < 0.1)
):
sm2 = sm1
else:
sm2 = StarkMap(self.atom2)
sm2.defineBasis(
self.state2[0],
self.state2[1],
self.state2[2],
self.state2[3],
nMin,
nMax,
maxL,
Bz=self.Bz,
progressOutput=progressOutput,
s=self.state2[4],
)
sm2.diagonalise(eFieldList, progressOutput=progressOutput)
self.originalStateY = []
self.originalStateContribution = []
for i in xrange(len(sm1.eFieldList)):
jmax1 = 0
jmax2 = 0
for j in xrange(len(sm1.highlight[i])):
if sm1.highlight[i][j] > sm1.highlight[i][jmax1]:
jmax1 = j
for j in xrange(len(sm2.highlight[i])):
if sm2.highlight[i][j] > sm2.highlight[i][jmax2]:
jmax2 = j
self.originalStateY.append(
sm1.y[i][jmax1] + sm2.y[i][jmax2] - self.pairStateEnergy
)
self.originalStateContribution.append(
(sm1.highlight[i][jmax1] + sm2.highlight[i][jmax2]) / 2.0
)
# M= mj1+mj2 is conserved with dipole-dipole interaction
dmlist1 = [1, 0]
if self.state1[3] != 0.5:
dmlist1.append(-1)
dmlist2 = [1, 0]
if self.state2[3] != 0.5:
dmlist2.append(-1)
n1 = self.state1[0]
l1 = self.state1[1] + 1
j1 = self.state1[2] + 1
mj1 = self.state1[3]
n2 = self.state2[0]
l2 = self.state2[1] + 1
j2 = self.state2[2] + 1
mj2 = self.state2[3]
self.fig, self.ax = plt.subplots(1, 1, figsize=(9.0, 6))
cm = LinearSegmentedColormap.from_list("mymap", ["0.9", "red", "black"])
cNorm = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
self.r = []
self.y = []
self.composition = []
for dm1 in dmlist1:
sm1.defineBasis(
n1,
l1,
j1,
mj1 + dm1,
nMin,
nMax,
maxL,
Bz=self.Bz,
progressOutput=progressOutput,
s=self.state1[4],
)
sm1.diagonalise(eFieldList, progressOutput=progressOutput)
for dm2 in dmlist2:
sm2.defineBasis(
n2,
l2,
j2,
mj2 + dm2,
nMin,
nMax,
maxL,
Bz=self.Bz,
progressOutput=progressOutput,
s=self.state2[4],
)
sm2.diagonalise(eFieldList, progressOutput=progressOutput)
for i in xrange(len(sm1.eFieldList)):
yList = []
compositionList = []
if progressOutput:
sys.stdout.write("\rE=%.2f V/m " % sm1.eFieldList[i])
sys.stdout.flush()
for j in xrange(len(sm1.y[i])):
for jj in xrange(len(sm2.y[i])):
energy = (
sm1.y[i][j]
+ sm2.y[i][jj]
- self.pairStateEnergy
)
statec1 = sm1.basisStates[
sm1.composition[i][j][0][1]
]
statec2 = sm2.basisStates[
sm2.composition[i][jj][0][1]
]
if (
(energy > eMin)
and (energy < eMax)
and (abs(statec1[1] - self.state1[1]) == 1)
and (abs(statec2[1] - self.state2[1]) == 1)
):
# add this to PairStateMap
yList.append(energy)
compositionList.append(
[
sm1._stateComposition(
sm1.composition[i][j]
),
sm2._stateComposition(
sm2.composition[i][jj]
),
]
)
if len(self.y) <= i:
self.y.append(yList)
self.composition.append(compositionList)
else:
self.y[i].extend(yList)
self.composition[i].extend(compositionList)
if progressOutput:
print("\n")
for i in xrange(len(sm1.eFieldList)):
self.y[i] = np.array(self.y[i])
self.composition[i] = np.array(self.composition[i])
self.ax.scatter(
[sm1.eFieldList[i] / 100.0] * len(self.y[i]),
self.y[i],
c="k",
s=5,
norm=cNorm,
cmap=cm,
lw=0,
picker=5,
)
self.ax.plot(sm1.eFieldList / 100.0, self.originalStateY, "r-", lw=1)
self.ax.set_ylim(eMin, eMax)
self.ax.set_xlim(
min(self.eFieldList) / 100.0, max(self.eFieldList) / 100.0
)
self.ax.set_xlabel("Electric field (V/cm)")
self.ax.set_ylabel(r"Pair-state relative energy, $\Delta E/h$ (GHz)")
def showPlot(self, interactive=True):
"""
Plots initial state Stark map and its dipole-coupled resonances
Args:
interactive (optional, bool): if True (by default) points on plot
will be clickable so that one can find the state labels
and their composition (if they are heavily admixed).
Note:
Zero is given by the initial states of the atom given in
initialisation of calculations, calculated **in absence of
magnetic field B_z**. In other words, for non-zero magnetic
field the inital states will have offset from zero even
for zero electric field due to Zeeman shift.
"""
if self.fig != 0:
if interactive:
self.ax.set_title("Click on state to see state composition")
self.clickedPoint = 0
self.fig.canvas.draw()
self.fig.canvas.mpl_connect("pick_event", self._onPick)
plt.show()
else:
print("Error while showing a plot: nothing is plotted yet")
def _onPick(self, event):
if isinstance(event.artist, matplotlib.collections.PathCollection):
x = event.mouseevent.xdata * 100.0
y = event.mouseevent.ydata
i = np.searchsorted(self.eFieldList, x)
if i == len(self.eFieldList):
i -= 1
if (i > 0) and (
abs(self.eFieldList[i - 1] - x) < abs(self.eFieldList[i] - x)
):
i -= 1
j = 0
for jj in xrange(len(self.y[i])):
if abs(self.y[i][jj] - y) < abs(self.y[i][j] - y):
j = jj
if self.clickedPoint != 0:
self.clickedPoint.remove()
(self.clickedPoint,) = self.ax.plot(
[self.eFieldList[i] / 100.0],
[self.y[i][j]],
"bs",
linewidth=0,
zorder=3,
)
atom1 = self.atom1.elementName
atom2 = self.atom2.elementName
composition1 = str(self.composition[i][j][0])
composition2 = str(self.composition[i][j][1])
self.ax.set_title(
("[%s,%s]=[" % (atom1, atom2))
+ composition1
+ ","
+ composition2
+ "]",
fontsize=10,
)
event.canvas.draw()
def _onPick2(self, xdata, ydata):
x = xdata * 100.0
y = ydata
i = np.searchsorted(self.eFieldList, x)
if i == len(self.eFieldList):
i -= 1
if (i > 0) and (
abs(self.eFieldList[i - 1] - x) < abs(self.eFieldList[i] - x)
):
i -= 1
j = 0
for jj in xrange(len(self.y[i])):
if abs(self.y[i][jj] - y) < abs(self.y[i][j] - y):
j = jj
if self.clickedPoint != 0:
self.clickedPoint.remove()
(self.clickedPoint,) = self.ax.plot(
[self.eFieldList[i] / 100.0],
[self.y[i][j]],
"bs",
linewidth=0,
zorder=3,
)
atom1 = self.atom1.elementName
atom2 = self.atom2.elementName
composition1 = str(self.composition[i][j][0])
composition2 = str(self.composition[i][j][1])
self.ax.set_title(
("[%s,%s]=[" % (atom1, atom2))
+ composition1
+ ","
+ composition2
+ "]",
fontsize=10,
) | PypiClean |
/CloudCRMSat-0.0.2.tar.gz/CloudCRMSat-0.0.2/cloudsat/web_tools.py | import os
import re
import requests
from lxml import html
from pandas import DataFrame
from dateutil.rrule import rrule, DAILY
from datetime import datetime, timedelta
class OverpassNotFoundError(Exception):
pass
class InvalidDateForSatelliteError(Exception):
pass
class WebSiteOfflineError(Exception):
pass
def verify_landsat_scene_exists(scene_string):
if scene_string.startswith('LT5'):
url_spec = '12266'
elif scene_string.startswith('LE7'):
url_spec = '13350'
elif scene_string.startswith('LC8'):
url_spec = '13400'
else:
raise NotImplementedError('Must choose a valid satellite to find url_spec')
base = 'https://earthexplorer.usgs.gov/fgdc/'
url = '{}{}/{}/'.format(base, url_spec, scene_string)
r = requests.get(url)
tree = html.fromstring(r.text)
if r.status_code != 200:
raise WebSiteOfflineError('USGS application unavailable.')
string = tree.xpath('//pre/text()')
split_str = string[0].split('\n')[5].split(':')
title = [x.strip() for x in split_str]
if len(title[1]) < 1:
return False
else:
return True
def get_l5_overpass_data(path, row, date):
if date > datetime(2013, 06, 01):
raise ValueError('The date requested is after L5 deactivation')
lat, lon = convert_lat_lon_wrs2pr(path, row, conversion_type='convert_pr_to_ll')
url = 'https://cloudsgate2.larc.nasa.gov/cgi-bin/predict/predict.cgi'
# submit form > copy POST data
# use sat='SATELITE X' with a space
num_passes = 30
payload = dict(c='compute', sat='LANDSAT 5', instrument='0-0', res='9', month=str(date.month),
day=str(date.day),
numday=str(num_passes), viewangle='', solarangle='day', gif='track', ascii='element',
lat=str(lat),
lon=str(lon), sitename='Optional',
choice='track', year=str(date.year))
r = requests.post(url, data=payload)
tree = html.fromstring(r.text)
head_string = tree.xpath('//table/tr[4]/td[1]/pre/b/font/text()')
col = head_string[0].split()[8]
ind = []
zeniths = []
for row in range(5, num_passes + 5):
string = tree.xpath('//table/tr[{}]/td[1]/pre/font/text()'.format(row))
l = string[0].split()
dt_str = '{}-{}-{} {}:{}'.format(l[0], l[1], l[2], l[3], l[4])
dt = datetime.strptime(dt_str, '%Y-%m-%d %H:%M')
ind.append(dt)
zenith = float(l[8])
zeniths.append(zenith)
df = DataFrame(zeniths, index=ind, columns=[col])
print 'reference dtime overpass: {}'.format(df['zenith'].argmin())
return df['zenith'].argmin()
def landsat_overpass_time(lndst_path_row, start_date, satellite):
delta = timedelta(days=20)
end = start_date + delta
if satellite == 'LT5':
if start_date > datetime(2013, 06, 01):
raise InvalidDateForSatelliteError('The date requested is after L5 deactivation')
reference_time = get_l5_overpass_data(lndst_path_row[0], lndst_path_row[1], start_date)
return reference_time
else:
if satellite == 'LE7':
sat_abv = 'L7'
elif satellite == 'LC8':
sat_abv = 'L8'
base = 'https://landsat.usgs.gov/landsat/all_in_one_pending_acquisition/'
for day in rrule(DAILY, dtstart=start_date, until=end):
tail = '{}/Pend_Acq/y{}/{}/{}.txt'.format(sat_abv, day.year,
day.strftime('%b'),
day.strftime('%b-%d-%Y'))
url = '{}{}'.format(base, tail)
r = requests.get(url)
for line in r.iter_lines():
l = line.split()
try:
if l[0] == str(lndst_path_row[0]):
if l[1] == str(lndst_path_row[1]):
# dtime is in GMT
time_str = '{}-{}'.format(day.year, l[2])
ref_time = datetime.strptime(time_str, '%Y-%j-%H:%M:%S')
return ref_time
except IndexError:
pass
except TypeError:
pass
raise OverpassNotFoundError('Did not find overpass data, check your dates...')
def convert_lat_lon_wrs2pr(lat, lon, conversion_type='convert_ll_to_pr'):
base = 'https://landsat.usgs.gov/landsat/lat_long_converter/tools_latlong.php'
unk_number = 1490995492704
if conversion_type == 'convert_ll_to_pr':
full_url = '{}?rs={}&rsargs[]={}&rsargs[]={}&rsargs[]=1&rsrnd={}'.format(base, conversion_type,
lat, lon,
unk_number)
r = requests.get(full_url)
tree = html.fromstring(r.text)
# remember to view source html to build xpath
# i.e. inspect element > network > find GET with relevant PARAMS
# > go to GET URL > view source HTML
p_string = tree.xpath('//table/tr[1]/td[2]/text()')
path = int(re.search(r'\d+', p_string[0]).group())
r_string = tree.xpath('//table/tr[1]/td[4]/text()')
row = int(re.search(r'\d+', r_string[0]).group())
print 'path: {}, row: {}'.format(path, row)
return path, row
elif conversion_type == 'convert_pr_to_ll':
full_url = '{}?rs={}&rsargs[]=\n' \
'{}&rsargs[]={}&rsargs[]=1&rsrnd={}'.format(base, conversion_type,
lat, lon, unk_number)
r = requests.get(full_url)
tree = html.fromstring(r.text)
lat_string = tree.xpath('//table/tr[2]/td[2]/text()')
lat = float(re.search(r'[+-]?\d+(?:\.\d+)?', lat_string[0]).group())
lon_string = tree.xpath('//table/tr[2]/td[4]/text()')
lon = float(re.search(r'[+-]?\d+(?:\.\d+)?', lon_string[0]).group())
return lat, lon
else:
raise NotImplementedError('Must chose either convert_pr_to_ll or convert_ll_to_pr')
if __name__ == '__main__':
pass
# ================================================================================== | PypiClean |
/JustCause-0.4.tar.gz/JustCause-0.4/src/justcause/learners/meta/tlearner.py | import copy
from typing import Optional, Tuple, Union
import numpy as np
from sklearn.linear_model import LassoLars
from ..utils import replace_factual_outcomes
#: Type alias for predict_ite return type
SingleComp = Union[Tuple[np.array, np.array, np.array], np.array]
class TLearner:
"""Generic TLearner implementation for the binary treatment case"""
def __init__(self, learner=None, learner_c=None, learner_t=None):
"""Takes either one base learner for both or two specific base learners
Defaults to a `sklearn.linear_model.LassoLars` for both treated and control
Args:
learner: base learner for treatment and control outcomes
learner_c: base learner for control outcome
learner_t: base learner for treatment outcome
"""
if learner is None:
if learner_c is None and learner_t is None:
self.learner_c = LassoLars()
self.learner_t = LassoLars()
else:
self.learner_c = learner_c
self.learner_t = learner_t
else:
self.learner_c = copy.deepcopy(learner)
self.learner_t = copy.deepcopy(learner)
def __repr__(self):
return self.__str__()
def __str__(self):
"""Simple string representation for logs and outputs"""
return "{}(control={}, treated={})".format(
self.__class__.__name__,
self.learner_c.__class__.__name__,
self.learner_t.__class__.__name__,
)
def fit(
self, x: np.array, t: np.array, y: np.array, weights: Optional[np.array] = None,
) -> None:
r"""Fit the two learners on the given samples
``learner_c`` is trained on the untreated (control) samples while ``learner_t``
is trained on the treated samples.
.. math::
\mu_0(x) &= E[Y \mid X=x, T=0],\\
\mu_1(x) &= E[Y \mid X=x, T=1], \\
&\text{which are plugged into the prediction:} \\
\hat{\tau}(x) &= \hat{\mu_1}(x) - \hat{\mu_0}(x).
Args:
x: covariates, shape (num_instances, num_features)
t: treatment indicator, shape (num_instances)
y: factual outcomes, shape (num_instances)
weights: sample weights for weighted fitting.
If used, the learners must allow a ``sample_weights`` argument to their
``fit()`` method
"""
assert (
t is not None and y is not None
), "treatment and factual outcomes are required to fit Causal Forest"
if weights is not None:
assert len(weights) == len(t), "weights must match the number of instances"
self.learner_c.fit(x[t == 0], y[t == 0], sample_weights=weights)
self.learner_t.fit(x[t == 1], y[t == 1], sample_weights=weights)
else:
# Fit without weights to avoid unknown argument error
self.learner_c.fit(x[t == 0], y[t == 0])
self.learner_t.fit(x[t == 1], y[t == 1])
def predict_ite(
self,
x: np.array,
t: np.array = None,
y: np.array = None,
return_components: bool = False,
replace_factuals: bool = False,
) -> SingleComp:
"""Predicts ITE for the given samples
Args:
x: covariates in shape (num_instances, num_features)
t: treatment indicator, binary in shape (num_instances)
y: factual outcomes in shape (num_instances)
return_components: whether to return Y(0) and Y(1) predictions separately
replace_factuals
Returns:
a vector of ITEs for the inputs; also returns Y(0) and Y(1) for all
inputs if return_components is True
"""
y_0 = self.learner_c.predict(x)
y_1 = self.learner_t.predict(x)
if return_components:
if t is not None and y is not None and replace_factuals:
y_0, y_1 = replace_factual_outcomes(y_0, y_1, y, t)
return y_1 - y_0, y_0, y_1
else:
return y_1 - y_0
def estimate_ate(
self,
x: np.array,
t: np.array = None,
y: np.array = None,
weights: Optional[np.array] = None,
) -> float:
"""Estimates the average treatment effect of the given population
First, it fits the model on the given population, then predicts ITEs and uses
the mean as an estimate for the ATE
Args:
x: covariates in shape (num_instances, num_features)
t: treatment indicator, binary in shape (num_instances)
y: factual outcomes in shape (num_instances)
weights: sample weights for weighted fitting.
If used, the learners must allow a ``sample_weights`` argument to their
``fit()`` method
Returns:
ATE estimate as the mean of ITEs
"""
self.fit(x, t, y, weights)
ite = self.predict_ite(x, t, y)
return float(np.mean(ite)) | PypiClean |
/ABBA-QuPath-RegistrationExporter-0.4.2.tar.gz/ABBA-QuPath-RegistrationExporter-0.4.2/regexport/actions/save_cells.py | from pathlib import Path
import pandas as pd
from PySide2.QtWidgets import QFileDialog, QCheckBox, QDialog, QAction
from traitlets import HasTraits, Bool, directional_link
from regexport.model import AppState
class SaveCellsActionModel(HasTraits):
text = "3. Save Cells"
enabled = Bool(default_value=False)
def register(self, model: AppState):
self.model = model
directional_link((model, 'cells'), (self, 'enabled'), lambda cells: cells is not None)
def submit(self, filename: Path, export_visible_cells_only: bool = False):
print('File saving...')
df = self.model.selected_cells if export_visible_cells_only else self.model.cells
types = {
'Image': 'category',
'BrainRegion': 'category',
'Acronym': 'category',
'X': 'float32',
'Y': 'float32',
'Z': 'float32',
}
types.update({col: 'uint16' for col in self.model.cells.columns if "Num Spots" in col})
df = df.astype(types)
df: pd.DataFrame = df.drop(columns=['BGIdx'])
print(df.info())
print(filename)
if filename.suffix.lower() == ".csv":
df.to_csv(filename, index=False)
elif filename.suffix.lower() == ".feather":
df.reset_index(drop=True).to_feather(filename)
else:
raise TypeError(f"Error saving file {str(filename)}: {filename.suffix} extension not supported.")
print("File saved")
class ChkBxFileDialog(QFileDialog):
def __init__(self, checkbox_title="Selected Cells Only", filename_filter="*.txt"):
super().__init__(filter=filename_filter)
self.setSupportedSchemes(["file"])
self.setOption(QFileDialog.DontUseNativeDialog)
self.setAcceptMode(QFileDialog.AcceptSave)
self.setNameFilter("Feather file (*.feather);;CSV file (*.csv)")
self.selectNameFilter("Feather file (*.feather);;CSV file (*.csv)")
self.checkbox = QCheckBox(checkbox_title)
self.layout().addWidget(self.checkbox)
@property
def full_filename(self) -> Path:
filename = self.selectedUrls()[0].toLocalFile()
extension_filter = self.selectedNameFilter()
extension = extension_filter[extension_filter.index('*.') + 1:-1]
full_filename = Path(filename).with_suffix(extension)
return full_filename
@property
def selected_cells_only(self) -> bool:
return self.checkbox.isChecked()
class SaveCellsAction(QAction):
def __init__(self, model: SaveCellsActionModel, *args, **kwargs):
self.model = model
super().__init__(*args, **kwargs)
self.setText(model.text)
self.triggered.connect(self.click)
self.model.observe(self.set_enabled, 'enabled')
self.set_enabled(None)
def set_enabled(self, changed):
self.setEnabled(self.model.enabled)
def click(self):
dialog = ChkBxFileDialog()
if dialog.exec_() == QDialog.Accepted:
self.model.submit(
filename=dialog.full_filename,
export_visible_cells_only=dialog.selected_cells_only
) | PypiClean |
/Finance-Hermes-0.3.6.tar.gz/Finance-Hermes-0.3.6/hermes/factors/technical/factor_liquidity_stock.py | import numpy as np
import pandas as pd
from alphakit.const import *
from alphakit.factor import *
from alphakit.portfolio import *
from alphakit.data import *
from hermes.factors.base import FactorBase, LongCallMixin, ShortCallMixin
class FactorLiquidityStock(FactorBase, LongCallMixin, ShortCallMixin):
def __init__(self, data_format, **kwargs):
__str__ = 'factor_liquidity_stock'
self.category = 'Liquidity'
self.name = '股票流动性'
self._data_format = data_format
self._data = self.init_data(**kwargs) if 'end_date' in kwargs else None
def _init_self(self, **kwargs):
pass
def factor_liquid1(
self,
data=None,
dependencies=['dummy120_fst', 'ffancy_bcvp05M20D', 'sw1'],
window=1):
data = self._data if data is None else data
dummy = data['dummy120_fst']
sw1 = data['sw1']
factor = data['ffancy_bcvp05M20D']
factor = indfill_median(factor * dummy, sw1)
return self._format(factor, "factor_liquid1")
def factor_liquid2(
self,
data=None,
dependencies=['dummy120_fst', 'ffancy_taEntropy', 'sw1'],
window=1):
data = self._data if data is None else data
dummy = data['dummy120_fst']
sw1 = data['sw1']
factor = data['ffancy_taEntropy']
factor = indfill_median(factor * dummy, sw1)
return self._format(factor, "factor_liquid2")
def factor_liquid3(self,
data=None,
dependencies=['dummy120_fst', 'turnoverVol', 'sw1'],
window=3):
data = self._data if data is None else data
dummy = data['dummy120_fst']
sw1 = data['sw1']
vol = data['turnoverVol']
vol[vol <= 0] = np.nan
log_val = np.log(vol)
factor = -log_val.rolling(3, min_periods=1).sum() / 3
factor = indfill_median(factor * dummy, sw1)
return self._format(factor, "factor_liquid3")
def factor_liquid4(self,
data=None,
dependencies=['dummy120_fst', 'turnoverValue', 'sw1'],
window=60):
data = self._data if data is None else data
dummy = data['dummy120_fst']
sw1 = data['sw1']
vol = data['turnoverVol']
vol[vol <= 0] = np.nan
log_val = np.log(vol)
factor = -log_val.rolling(60, min_periods=20).std()
factor = indfill_median(factor * dummy, sw1)
return self._format(factor, "factor_liquid4")
def factor_liquid5(self,
data=None,
dependencies=['dummy120_fst', 'turnoverValue', 'sw1'],
window=120):
data = self._data if data is None else data
dummy = data['dummy120_fst']
sw1 = data['sw1']
vol = data['turnoverVol']
vol[vol <= 0] = np.nan
log_val = np.log(vol)
factor = -log_val.rolling(120, min_periods=30).std()
factor = indfill_median(factor * dummy, sw1)
return self._format(factor, "factor_liquid5") | PypiClean |
/LTB-Symm-1.0.0.tar.gz/LTB-Symm-1.0.0/src/ltbsymm/configuration.py | import time
import numpy as np
import scipy.sparse as sp
from scipy.spatial import KDTree
from tqdm import tqdm
'''
This code is not using MPI
'''
'''
I am sure this code is well commented! AS
'''
class Pwl:
def __init__(self, folder_name, sparse_flag=True, dtype=None):
"""
Define the namespace, useful in case of loading
"""
self.dtypeR = dtype
self.folder_name = folder_name
self.sparse_flag = sparse_flag
self.xy = None
self.xlen = None
self.ylen = None
self.zlen = None
#self.xhi = None
#self.yhi = None
#self.zhi = None
self.tot_number= None
self.xlen_half = None
self.ylen_half = None
self.coords = None
self.atomsAllinfo = None
self.fnn_id = None
self.B_flag = None
self.dist_matrix = None
self.fnn_vec = None
self.ez = None
self.cutoff = None
self.local_flag = None
self.file_name = ''
def read_coords(self, file_name):
"""
Read coordinates from files.
Args:
file_name: str
Correctly onlt LAMMPS format is accepted.
"""
self.file_name = file_name
file_ = open(file_name)
l_ = file_.readlines()
self.xy = 0
header_ = ''
ii = 0
end_flag = False
while ii<20:
if 'atoms' in l_[ii]:
self.tot_number = int(l_[ii].split()[0])
elif 'atom types' in l_[ii]:
self.tot_type = int(l_[ii].split()[0])
elif 'xlo' in l_[ii]:
self.xlo = float(l_[ii].split()[0])
self.xhi = float(l_[ii].split()[1])
self.xlen = self.xhi - self.xlo # positive by defination
elif 'ylo' in l_[ii]:
self.ylo = float(l_[ii].split()[0])
self.yhi = float(l_[ii].split()[1])
self.ylen = self.yhi - self.ylo # positive by defination
elif 'zlo' in l_[ii]:
self.zlo = float(l_[ii].split()[0])
self.zhi = float(l_[ii].split()[1])
self.zlen = self.zhi - self.zlo # positive by defination
elif 'xy xz yz' in l_[ii]:
self.xy = float(l_[ii].split()[0])
elif 'Atoms' in l_[ii]:
skiplines = ii + 1 +1
end_flag = True
header_ += l_[ii]
header_ +=l_[ii] if not end_flag else ''
ii += 1
self.ylen_half = self.ylen/2
self.xlen_half = self.xlen/2
file_.close()
### get coordinates
self.atomsAllinfo = np.loadtxt(file_name, skiprows =skiplines, max_rows= self.tot_number, dtype=self.dtypeR)
self.coords = self.atomsAllinfo[:,4:7]
def vector_connection_matrix(self, fnn_cutoff=1.55):
"""
Create geometrical vectros connecting each two neighbors.
Args:
fnn_cutoff: float
Maximum cut off to detect first nearest neghbours (default =1.55)
Returns: None
"""
print('creating vector_connection_matrix...')
if self.sparse_flag:
dist_matrix_X = sp.lil_matrix((self.tot_number, self.tot_number), dtype=self.dtypeR)
dist_matrix_Y = sp.lil_matrix((self.tot_number, self.tot_number), dtype=self.dtypeR)
dist_matrix_Z = sp.lil_matrix((self.tot_number, self.tot_number), dtype=self.dtypeR)
boundary_flag_X = sp.lil_matrix((self.tot_number, self.tot_number), dtype='int')
boundary_flag_Y = sp.lil_matrix((self.tot_number, self.tot_number), dtype='int')
#self.dist_matrix_norm = sp.lil_matrix((self.tot_number, self.tot_number), dtype='float')
self.dist_matrix = np.array([dist_matrix_X, dist_matrix_Y, dist_matrix_Z]) # being 3,N,N # no otherway
self.B_flag = np.array([boundary_flag_X, boundary_flag_Y]) # being 3,N,N # no otherway
else:
self.dist_matrix = np.zeros((self.tot_number, self.tot_number, 3), self.dtypeR)
self.fnn_vec = np.full((self.tot_number,3,3), np.nan)
self.fnn_id = np.zeros((self.tot_number,3), dtype='int' )
for ii in range(self.tot_number):
neighs = self.nl[ii][~(np.isnan(self.nl[ii]))].astype('int')
# number of neighbors are not known, so we use this approach
zz = 0
for jj in neighs:
dist_ = self.coords[jj] - self.coords[ii]
yout =0
xout =0
old_dist_size = np.linalg.norm(dist_)
if dist_[1] > self.ylen_half:
dist_[1] -= self.ylen
dist_[0] -= self.xy
yout = -1
elif -1*dist_[1] > self.ylen_half:
dist_[1] += self.ylen
dist_[0] += self.xy
yout = +1
if dist_[0] > self.xlen_half:
dist_[0] -= self.xlen
xout = -1
elif -1*dist_[0] > self.xlen_half:
dist_[0] += self.xlen
xout = +1
dist_size = np.linalg.norm(dist_)
if dist_size < fnn_cutoff:
self.fnn_vec[ii, zz] = dist_
self.fnn_id[ii, zz] = jj
zz += 1
## for debugging
if dist_size > 1.01*self.cutoff:
print('POS_ii, POS_jj\n {0} \n {1}'.format(self.coords[ii], self.coords[jj]))
print('New dist = {0}'.format(dist_size))
print('Old dist = {0}'.format(old_dist_size))
raise RuntimeError('something is wrong with PBC')
if self.sparse_flag:
self.dist_matrix[0][ii, jj] = dist_[0]
self.dist_matrix[1][ii, jj] = dist_[1]
self.dist_matrix[2][ii, jj] = dist_[2]
self.B_flag[0][ii, jj] = xout
self.B_flag[1][ii, jj] = yout
else:
self.dist_matrix[ii, jj] = dist_
if zz!=3:
raise RuntimeError("there is an error in finding first nearest neighbors: please tune *fnn_cutoff*")
def normal_vec(self, local = False):
"""
To create local normal vector.
Note: On the choice of axis, it is assumed that the two main dimension of structure is on average perpendicular to Z axis.
Warning!! chirality is not computed. Always the positive normal_vec (pointing upward) is returned.
Args:
local: boolean
If to calculated vertical normals:
Returns: None
"""
self.local_flag = local
self.ez = np.full((self.tot_number,3), np.nan)
if local:
print("calculating local normal vectors ...")
for ii in range(self.tot_number):
neighs = self.fnn_vec[ii]
aa = np.cross(neighs[0], neighs[1])
bb = np.cross(neighs[1], neighs[2])
cc = np.cross(neighs[2], neighs[0])
norm_ = aa + bb + cc
norm_ = norm_/np.linalg.norm(norm_)
if norm_[2] < 0:
norm_ *= -1
self.ez[ii] = norm_
else:
print("ez=[0,0,1] for all orbitals ...")
self.ez = np.full((self.tot_number,3), [0,0,1])
def neigh_list(self, cutoff, nl_method='RS', l_width = 500, load_=True, version_=''):
"""
Neighbor detect all neighbours of all cites within a cutoff range.
Args:
cutoff: float
Used to detect neighbors within a circular range around each individual cites.
method: str, optional
which method to use for creating neighborlist. 'RS' -reduce space implementation- (faster but might have a bug in rhombic cells, to be investigated) or 'RC' -replicating coordinates- (slower, bug free) (default = 'RS')
l_width: int, optional
Maximum number of allowed neghbours. For memory efficency reasons it is better to keep number small (default = 500)
In rare senarios if you use very large cutoff, you might need to increase this quantity. You get a MemoryError in this case.
load_: boolean, optional
Load a previously created neighborlist. (default = True)
version_: str, optional
A postfix for save name
Returns: None
"""
# check inputs
try:
assert nl_method == 'RC' or nl_method == 'RS'
except AssertionError: raise TypeError("Wrong nl_method. Only 'RC' or 'RS'")
self.cutoff = cutoff
if load_:
try:
data_ = np.load(self.folder_name + 'neigh_list_{0}.npz'.format(version_))
self.nl = data_['np_nl']
tot_neigh = data_['tot_neigh']
ave_neigh = data_['ave_neigh']
#print('neigh_list is loaded from the existing file: neigh_list_{0}.npz'.format(version_))
print('ave_neigh={0} \ntot_neigh={1}'.format(ave_neigh, tot_neigh))
#print("if want to rebuild the neigh_list, use: build_up(..,load_neigh=False)")
except FileNotFoundError:
load_ = False
print('A neighlist file was not found, building one... ')
if not load_:
# Init array with maximum neighbours l_width
np_nl = np.full((self.tot_number, l_width), np.nan)
if nl_method == 'RC':
coords_9x = np.concatenate((self.coords,
self.coords+np.array([+self.xlen,0,0]),
self.coords+np.array([-self.xlen,0,0]),
self.coords+np.array([+self.xy,+self.ylen,0]),
self.coords+np.array([-self.xy,-self.ylen,0]),
self.coords+np.array([+self.xlen+self.xy,+self.ylen,0]),
self.coords+np.array([-self.xlen+self.xy,+self.ylen,0]),
self.coords+np.array([-self.xlen-self.xy,-self.ylen,0]),
self.coords+np.array([+self.xlen-self.xy,-self.ylen,0])) , axis=0)
print('Start loop on %i atoms' % self.tot_number)
t0 = time.time()
pbar = tqdm(total=self.tot_number, unit='neigh list', desc='creating neigh') # Initialise
tot_neigh = 0
for i in range(self.tot_number):
cond_R = np.linalg.norm(coords_9x-coords_9x[i],axis=1) < cutoff
possible = np.where(cond_R == True)[0]
possible = possible % self.tot_number
possible = np.unique(possible)
possible = np.delete(possible, np.argwhere(possible==i))
k = possible.shape[0]
try:
np_nl[i][:k] = possible
except ValueError:
if l_width <= k :
raise MemoryError('please increase *l_width* in neigh_list_me_smart')
else:
raise ValueError('np_nl[i][:k] = possible')
tot_neigh += k
pbar.update()
pbar.close()
del coords_9x
elif nl_method == 'RS':
''' Andrea reduce space implementation '''
# !!! ASSUMES CELL CAN BE OBATINED LIKE THIS !!!
z_off = 30 # assume z is not well defined...
A1 = np.array([self.xlen, 0, 0 ])
A2 = np.array([self.xy, self.ylen, 0])
A3 = np.array([0,0,z_off])
u, u_inv = calc_matrices_bvect(A1, A2, A3)
print('Ask KDTree for neighbours d0=%.3f (it may take a while)' % self.cutoff)
# List containing neighbour pairs
neighs = np.array(list(pbc_neigh(self.coords, u, self.cutoff))) # convert from set to numpy array
## Ali: I don't understand. pbc_neigh returns a numpy array, whu do you convert to list then numpy array? converting from numpy modules to numpy is strongly discouraged.
# Now we just need to re-order: get all the entries relative to each atom
print('Start loop on %i atoms' % self.tot_number)
pbar = tqdm(total=self.tot_number, unit='neigh list', desc='creating neigh') # Initialise
tot_neigh = 0
for i in range(self.tot_number):
# Where in neigh list (j->k) the current atom i appears?
mask_left = np.where(neighs[:,0] == i)[0] # is it index on the left?
mask_right = np.where(neighs[:,1] == i)[0] # is it index on the right?
mask = np.concatenate([mask_left, mask_right]) # more or less a logical orlogical OR
# All index pairs where this atom is present
c_neighs = neighs[mask].flatten()
c_neighs = c_neighs[c_neighs != i] # get only the indices differnt from considered atom i
k = len(c_neighs) # number of neighbours
# Ali why len? pls use shape[0]
np_nl[i][:k] = c_neighs # save the indices of the k neighbours of atom i
tot_neigh += k
pbar.update()
pbar.close()
ave_neigh= tot_neigh/self.tot_number
## decrease the array size accordignly
max_n = np.max(np.count_nonzero(~np.isnan(np_nl),axis=1))
np_nl = np_nl[:,:max_n]
np.savez(self.folder_name + 'neigh_list_{0}'.format(version_), np_nl=np_nl, tot_neigh=tot_neigh, ave_neigh=ave_neigh )
self.nl = np_nl
print('ave_neigh={0} \ntot_neigh={1}'.format(ave_neigh, tot_neigh))
def pbc_neigh(pos, u, d0, sskin=10):
u_inv = np.linalg.inv(u) # Get matrix back to real space
S = u_inv.T # Lattice matrix
print('Search in unit cell')
pos_tree = KDTree(pos)
nn_uc = np.array(list(pos_tree.query_pairs(d0))) # Nn in unit cell
# Go to reduced space: lattice vectors are (1,0), (0,1)
posp = np.dot(u, (pos).T).T # Fast numpy dot, but weird convention on row/cols
# Define a skin: no need to sarch in whole unit cell, only close to PBC
skinp = (1/2-d0/np.sqrt(np.linalg.det(S))*sskin)
skinmask = np.logical_or(np.abs(posp-0.5)[:,0]>skinp,
np.abs(posp-0.5)[:,1]>skinp)
posp_ind = np.array(range(posp.shape[0]))
skin_ind = posp_ind[skinmask]
pospm = posp[skinmask]
print('N=%i Nskin=%i (skin=%.4g)' % (posp.shape[0], pospm.shape[0], skinp))
# Wrap cell to center
tol = 0
## Ali: using python objects mixed with numpy heavily slows your code. Slower than not using numpy!
nn_pbc = []
for shift in [ # look for repatitions:
[-0.5, 0, 0], # along a1
[0, -0.5, 0], # along a2
[-0.5, -0.5,0] # along both
]:
print('Search in pbc shift %s' % str(shift))
posp2 = pospm - np.floor(pospm + shift)
#posp2 = posp - np.floor(posp + shift)
# Map back to real space
posp2p = np.dot(u_inv, posp2.T).T
# Compute distances in real space
pos2_tree = KDTree(posp2p)
# Record the indices of full position, not just skin
nn_pbc += list(skin_ind[list(pos2_tree.query_pairs(d0))])
#nn_pbc += list(pos2_tree.query_pairs(d0))
nn_pbc = np.array(nn_pbc)
# Merge the nearest neighbours, deleting duplicates
return np.unique(np.concatenate([nn_uc, nn_pbc]), axis=0)
def calc_matrices_bvect(b1, b2, b3):
"""Metric matrices from primitive lattice vectors b1, b2.
Return matrix to map to unit cell and inverse, back to real space."""
St = np.array([b1, b2, b3])
u = np.linalg.inv(St).T
u_inv = St.T
return u, u_inv | PypiClean |
/Abhilash1_optimizers-0.1.tar.gz/Abhilash1_optimizers-0.1/Abhilash1_optimizers/ADAM_modified_update.py | import math
import numpy as np
import pandas as pd
import Abhilash1_optimizers.Activation as Activation
import Abhilash1_optimizers.hyperparameters as hyperparameters
import Abhilash1_optimizers.Moment_Initializer as Moment_Initializer
class ADAMM():
def __init__(alpha,b_1,b_2,epsilon,noise_g):
return hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g)
def init(m_t,v_t,t,theta):
return Moment_Initializer.Moment_Initializer.initialize(m_t,v_t,t,theta)
def Adam_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale):
alpha,b_1,b_2,epsilon,noise_g=hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g)
m_t,v_t,t,theta_0=ADAMM.init(0,0,0,0)
final_weight_vector=[]
for i in range(len_data):
theta_0=data[i]
for i in range(max_itr):
t+=1
if(act_func=="softPlus"):
g_t=Activation.Activation.softplus(theta_0)
elif (act_func=="relu"):
g_t=Activation.Activation.relu(theta_0)
elif (act_func=="elu"):
g_t=Activation.Activation.elu(theta_0,alpha)
elif (act_func=="selu"):
g_t=Activation.Activation.selu(scale,theta_0,theta)
elif (act_func=="tanh"):
g_t=Activation.Activation.tanh(theta_0)
elif (act_func=="hardSigmoid"):
g_t=Activation.Activation.hard_sigmoid(theta_0)
elif (act_func=="softSign"):
g_t=Activation.Activation.softsign(theta_0)
elif (act_func=="linear"):
g_t=Activation.Activation.linear(theta_0)
elif (act_func=="exponential"):
g_t=Activation.Activation.exponential(theta_0)
m_t=b_1*m_t + (1-b_1)*g_t
v_t=b_2*v_t +(1-b_2)*g_t*g_t
m_hat=m_t/(1-(b_1**t))
v_hat=v_t/(1-(b_2**t))
theta_prev=theta_0
alpha_t=(alpha*(math.sqrt(1-b_2**t)/(1-b_1**t)))
theta_0=theta_prev-((alpha_t*(m_t)/(math.sqrt(v_hat) + epsilon)))
print("Intrermediate gradients")
print("==========================================")
print("Previous gradient",theta_prev)
print("Present gradient",theta_0)
print("==========================================")
if theta_0==theta_prev:
break;
final_weight_vector.append(theta_0)
return final_weight_vector
def initialize(data,max_itr):
len_data=len(data)
optimized_weights=ADAMM.Adam_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale)
print("Optimized Weight Vector")
print("=====================================")
for i in range(len(optimized_weights)):
print("=====",optimized_weights[i])
if __name__=='__main__':
print("Verbose")
#t_0=Adagrad_optimizer()
#print("gradient coefficient",t_0)
#solve_grad=poly_func(t_0)
#print("Gradient Value",solve_grad)
sample_data=[1,0.5,0.7,0.1]
ADAMM.initialize(sample_data,10) | PypiClean |
/CGRtools-4.1.34.tar.gz/CGRtools-4.1.34/README.rst | CGRTools
========
.. image:: https://zenodo.org/badge/14690494.svg
:target: https://zenodo.org/badge/latestdoi/14690494
Tools for processing of reactions based on Condensed Graph of Reaction (CGR) approach.
Basic operations:
- Read/write/convert formats: MDL .RDF (RXN) and .SDF (MOL), .MRV, SMILES, INCHI (Linux and Windows), .XYZ, .PDB
- Standardize molecules and reactions and valid structures checker.
- Duplicate searching.
- Tetrahedron, Allene and CIS-TRANS stereo checking.
- Produce CGRs.
- Perform subgraph search.
- Build/edit molecules and reactions.
- Produce template based reactions and molecules.
- Atom-to-atom mapping checker and rule-based fixer.
- Perform MCS search.
- 2d coordinates generation (based on `SmilesDrawer <https://github.com/reymond-group/smilesDrawer>`_)
- 2d/3d depiction.
Full documentation can be found `here <https://cgrtools.readthedocs.io>`_
INSTALL
=======
Highly recommended to use python 3.8+. Python 3.6 and 3.7 deprecated.
Linux Debian based
------------------
* Install python3.8, virtualenv and git::
sudo apt install python3.8 python3.8-dev git python3-virtualenv
* Create new environment and activate it::
virtualenv -p python3.8 venv
source venv/bin/activate
Mac
---
* Install python3.8 and git using `brew <https://brew.sh>`_::
brew install git
brew install python3
* Install virtualenv::
pip install virtualenv
* Create new environment and activate it::
virtualenv -p python3.8 venv
source venv/bin/activate
Windows
-------
* Install python3.8 and git using `Chocolatey <https://chocolatey.org/>`_::
choco install git
choco install python3
* Install virtualenv::
pip install virtualenv
* Create new environment and activate it::
virtualenv venv
venv\Scripts\activate
General part
------------
* **stable version available through PyPI**::
pip install CGRTools
* Install CGRtools with MRV files parsing support::
pip install CGRTools[mrv]
* Install CGRtools with structures `clean2d` support (Note: install NodeJS into system, see `details <https://github.com/sqreen/PyMiniRacer>`_)::
pip install CGRtools[clean2d]
* Install CGRtools with optimized XYZ parser::
pip install CGRtools[jit]
**If you still have questions, please open issue within github.**
PACKAGING
=========
For wheel generation just type next command in source root::
python setup.py bdist_wheel
COPYRIGHT
=========
* 2014-2022 Timur Madzhidov [email protected] product owner, idea and development supervision
* 2014-2021 Ramil Nugmanov [email protected] main developer
* 2021-2022 Valentina Afonina [email protected] development and support
CONTRIBUTORS
============
* Dinar Batyrshin [email protected]
* Timur Gimadiev [email protected]
* Adelia Fatykhova [email protected]
* Tagir Akhmetshin [email protected]
* Ravil Mukhametgaleev [email protected]
* Valentina Afonina [email protected]
CITE THIS
=========
CGRtools: Python Library for Molecule, Reaction, and Condensed Graph of Reaction Processing.
Journal of Chemical Information and Modeling 2019 59 (6), 2516-2521.
DOI: 10.1021/acs.jcim.9b00102
| PypiClean |
/ICS_IPA-1.1.58.tar.gz/ICS_IPA-1.1.58/README.md | # ICS_IPA
This repo is designed to manage the library functions used for DataMining through mdf data files using the DataSpy product made by Intrepid Control System. The library contains a bunch of File I/O functions in a dll that allow users to parse through mdf data files using their own applications like Python, Excel, Matlab, C# etc. This library of functions is duplicated on Intrepids wireless data server (Wireless NeoVI) allowing users to develop scripts on their PC and then run those scripts on the Wireless Neo VI data server without requiring the data to be downloaded.
### Install Process
```
pip install ICS_IPA
```
| PypiClean |
/Dabo-0.9.16.tar.gz/Dabo-0.9.16/dabo/ui/uiwx/dGlWindow.py | from wx import glcanvas
import wx
import dabo
import dabo.ui
if __name__ == "__main__":
dabo.ui.loadUI("wx")
import dControlMixin as cm
from dabo.dLocalize import _
from dabo.ui import makeDynamicProperty
try:
from OpenGL.GL import *
from OpenGL.GLUT import *
openGL = True
except ImportError:
openGL = False
except StandardError, e:
# Report the error, and abandon the import
dabo.log.error(_("Error importing OpenGL: %s") % e)
openGL = False
class dGlWindow(cm.dControlMixin, glcanvas.GLCanvas):
def __init__(self, parent, properties=None, attProperties=None, *args, **kwargs):
if not openGL:
raise ImportError, "PyOpenGL is not present, so dGlWindow cannot instantiate."
self.init = False
self._rotate = self._pan = False
#set initial mouse position for rotate
self.lastx = self.x = 30
self.lasty = self.y = 30
self._leftDown = self._rightDown = False
self._baseClass = dGlWindow
preClass = glcanvas.GLCanvas
cm.dControlMixin.__init__(self, preClass, parent, properties=properties,
attProperties=attProperties, *args, **kwargs)
def initGL(self):
"""Hook function. Put your initial GL code in here."""
pass
def onDraw(self):
"""
Hook function. Put the code here for what happens when you draw.
.. note::
You don't need to swap buffers here....We do this for you automatically.
"""
pass
def onResize(self, event):
if self.GetContext():
self.SetCurrent()
glViewport(0, 0, self.Width, self.Height)
def onPaint(self, event):
dc = wx.PaintDC(self)
self.SetCurrent()
if not self.init:
self.initGL()
self.init = True
self._onDraw()
def _onDraw(self):
#Call user hook method
self.onDraw()
if self.Rotate:
glRotatef((self.y - self.lasty), 0.0, 0.0, 1.0);
glRotatef((self.x - self.lastx), 1.0, 0.0, 0.0);
#if self.Pan:
# pass
self.SwapBuffers()
def onMouseRightDown(self, evt):
self.x, self.y = self.lastx, self.lasty = evt.EventData["mousePosition"]
self._rightDown = True
def onMouseRightUp(self, evt):
self._rightDown = False
#def onMouseLeftDown(self, evt):
#pass
#def onMouseLeftUp(self, evt):
#pass
def onMouseMove(self, evt):
if self._rightDown: #want to rotate object
self.lastx, self.lasty = self.x, self.y #store the previous x and y
self.x, self.y = evt.EventData["mousePosition"] #store the new x,y so we know how much to rotate
self.Refresh(False) #Mark window as "dirty" so it will be repainted
# Getters and Setters
def _getRotate(self):
return self._rotate
def _setRotate(self, val):
self._rotate = val
# Property Definitions
Rotate = property(_getRotate, _setRotate, None,
_("Rotate on Right Mouse Click and Drag"))
class _dGlWindow_test(dGlWindow):
def initProperties(self):
self.Rotate = True
def initGL(self):
# set viewing projection
glMatrixMode(GL_PROJECTION)
glFrustum(-0.5, 0.5, -0.5, 0.5, 1.0, 3.0)
# position viewer
glMatrixMode(GL_MODELVIEW)
glTranslatef(0.0, 0.0, -2.0)
# position object
glRotatef(self.y, 1.0, 0.0, 0.0)
glRotatef(self.x, 0.0, 1.0, 0.0)
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
def onDraw(self):
# clear color and depth buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# draw six faces of a cube
glBegin(GL_QUADS)
glNormal3f( 0.0, 0.0, 1.0)
glVertex3f( 0.5, 0.5, 0.5)
glVertex3f(-0.5, 0.5, 0.5)
glVertex3f(-0.5,-0.5, 0.5)
glVertex3f( 0.5,-0.5, 0.5)
glNormal3f( 0.0, 0.0,-1.0)
glVertex3f(-0.5,-0.5,-0.5)
glVertex3f(-0.5, 0.5,-0.5)
glVertex3f( 0.5, 0.5,-0.5)
glVertex3f( 0.5,-0.5,-0.5)
glNormal3f( 0.0, 1.0, 0.0)
glVertex3f( 0.5, 0.5, 0.5)
glVertex3f( 0.5, 0.5,-0.5)
glVertex3f(-0.5, 0.5,-0.5)
glVertex3f(-0.5, 0.5, 0.5)
glNormal3f( 0.0,-1.0, 0.0)
glVertex3f(-0.5,-0.5,-0.5)
glVertex3f( 0.5,-0.5,-0.5)
glVertex3f( 0.5,-0.5, 0.5)
glVertex3f(-0.5,-0.5, 0.5)
glNormal3f( 1.0, 0.0, 0.0)
glVertex3f( 0.5, 0.5, 0.5)
glVertex3f( 0.5,-0.5, 0.5)
glVertex3f( 0.5,-0.5,-0.5)
glVertex3f( 0.5, 0.5,-0.5)
glNormal3f(-1.0, 0.0, 0.0)
glVertex3f(-0.5,-0.5,-0.5)
glVertex3f(-0.5,-0.5, 0.5)
glVertex3f(-0.5, 0.5, 0.5)
glVertex3f(-0.5, 0.5,-0.5)
glEnd()
class _dGlWindow_test2(dGlWindow):
def initProperties(self):
self.Rotate = True
def initGL(self):
glMatrixMode(GL_PROJECTION)
# camera frustrum setup
glFrustum(-0.5, 0.5, -0.5, 0.5, 1.0, 3.0)
glMaterial(GL_FRONT, GL_AMBIENT, [0.2, 0.2, 0.2, 1.0])
glMaterial(GL_FRONT, GL_DIFFUSE, [0.8, 0.8, 0.8, 1.0])
glMaterial(GL_FRONT, GL_SPECULAR, [1.0, 0.0, 1.0, 1.0])
glMaterial(GL_FRONT, GL_SHININESS, 50.0)
glLight(GL_LIGHT0, GL_AMBIENT, [0.0, 1.0, 0.0, 1.0])
glLight(GL_LIGHT0, GL_DIFFUSE, [1.0, 1.0, 1.0, 1.0])
glLight(GL_LIGHT0, GL_SPECULAR, [1.0, 1.0, 1.0, 1.0])
glLight(GL_LIGHT0, GL_POSITION, [1.0, 1.0, 1.0, 0.0])
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, [0.2, 0.2, 0.2, 1.0])
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# position viewer
glMatrixMode(GL_MODELVIEW)
# position viewer
glTranslatef(0.0, 0.0, -2.0);
#
glutInit([])
def onDraw(self):
# clear color and depth buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# use a fresh transformation matrix
glPushMatrix()
# position object
#glTranslate(0.0, 0.0, -2.0)
glRotate(30.0, 1.0, 0.0, 0.0)
glRotate(30.0, 0.0, 1.0, 0.0)
glTranslate(0, -1, 0)
glRotate(250, 1, 0, 0)
glutSolidCone(0.5, 1, 30, 5)
glPopMatrix()
if __name__ == "__main__":
import test
test.Test().runTest(_dGlWindow_test)
test.Test().runTest(_dGlWindow_test2) | PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/bower_components/bootstrap-table/src/extensions/editable/bootstrap-table-editable.js | * @author zhixin wen <[email protected]>
* extensions: https://github.com/vitalets/x-editable
*/
const Utils = $.fn.bootstrapTable.utils
$.extend($.fn.bootstrapTable.defaults, {
editable: true,
onEditableInit () {
return false
},
onEditableSave (field, row, rowIndex, oldValue, $el) {
return false
},
onEditableShown (field, row, $el, editable) {
return false
},
onEditableHidden (field, row, $el, reason) {
return false
}
})
$.extend($.fn.bootstrapTable.columnDefaults, {
alwaysUseFormatter: false
})
$.extend($.fn.bootstrapTable.Constructor.EVENTS, {
'editable-init.bs.table': 'onEditableInit',
'editable-save.bs.table': 'onEditableSave',
'editable-shown.bs.table': 'onEditableShown',
'editable-hidden.bs.table': 'onEditableHidden'
})
$.BootstrapTable = class extends $.BootstrapTable {
initTable () {
super.initTable()
if (!this.options.editable) {
return
}
this.editedCells = []
$.each(this.columns, (i, column) => {
if (!column.editable) {
return
}
const editableOptions = {}
const editableDataMarkup = []
const editableDataPrefix = 'editable-'
const processDataOptions = (key, value) => {
// Replace camel case with dashes.
const dashKey = key.replace(/([A-Z])/g, $1 => `-${$1.toLowerCase()}`)
if (dashKey.indexOf(editableDataPrefix) === 0) {
editableOptions[dashKey.replace(editableDataPrefix, 'data-')] = value
}
}
$.each(this.options, processDataOptions)
column.formatter = column.formatter || (value => value)
column._formatter = column._formatter ? column._formatter : column.formatter
column.formatter = (value, row, index) => {
let result = Utils.calculateObjectValue(column, column._formatter, [value, row, index], value)
result = typeof result === 'undefined' || result === null ? this.options.undefinedText : result
if (this.options.uniqueId !== undefined && !column.alwaysUseFormatter) {
const uniqueId = Utils.getItemField(row, this.options.uniqueId, false)
if ($.inArray(column.field + uniqueId, this.editedCells) !== -1) {
result = value
}
}
$.each(column, processDataOptions)
$.each(editableOptions, (key, value) => {
editableDataMarkup.push(` ${key}="${value}"`)
})
let noEditFormatter = false
const editableOpts = Utils.calculateObjectValue(column,
column.editable, [index, row], {})
if (editableOpts.hasOwnProperty('noEditFormatter')) {
noEditFormatter = editableOpts.noEditFormatter(value, row, index)
}
if (noEditFormatter === false) {
return `<a href="javascript:void(0)"
data-name="${column.field}"
data-pk="${row[this.options.idField]}"
data-value="${result}"
${editableDataMarkup.join('')}></a>`
}
return noEditFormatter
}
})
}
initBody (fixedScroll) {
super.initBody(fixedScroll)
if (!this.options.editable) {
return
}
$.each(this.columns, (i, column) => {
if (!column.editable) {
return
}
const data = this.getData({ escape: true })
const $field = this.$body.find(`a[data-name="${column.field}"]`)
$field.each((i, element) => {
const $element = $(element)
const $tr = $element.closest('tr')
const index = $tr.data('index')
const row = data[index]
const editableOpts = Utils.calculateObjectValue(column,
column.editable, [index, row, $element], {})
$element.editable(editableOpts)
})
$field.off('save').on('save', ({ currentTarget }, { submitValue }) => {
const $this = $(currentTarget)
const data = this.getData()
const rowIndex = $this.parents('tr[data-index]').data('index')
const row = data[rowIndex]
const oldValue = row[column.field]
if (this.options.uniqueId !== undefined && !column.alwaysUseFormatter) {
const uniqueId = Utils.getItemField(row, this.options.uniqueId, false)
if ($.inArray(column.field + uniqueId, this.editedCells) === -1) {
this.editedCells.push(column.field + uniqueId)
}
}
submitValue = Utils.escapeHTML(submitValue)
$this.data('value', submitValue)
row[column.field] = submitValue
this.trigger('editable-save', column.field, row, rowIndex, oldValue, $this)
this.initBody()
})
$field.off('shown').on('shown', ({ currentTarget }, editable) => {
const $this = $(currentTarget)
const data = this.getData()
const rowIndex = $this.parents('tr[data-index]').data('index')
const row = data[rowIndex]
this.trigger('editable-shown', column.field, row, $this, editable)
})
$field.off('hidden').on('hidden', ({ currentTarget }, reason) => {
const $this = $(currentTarget)
const data = this.getData()
const rowIndex = $this.parents('tr[data-index]').data('index')
const row = data[rowIndex]
this.trigger('editable-hidden', column.field, row, $this, reason)
})
})
this.trigger('editable-init')
}
getData (params) {
const data = super.getData(params)
if (params && params.escape) {
for (const row of data) {
for (const [key, value] of Object.entries(row)) {
row[key] = Utils.unescapeHTML(value)
}
}
}
return data
}
} | PypiClean |
/HDDM-0.9.9.tar.gz/HDDM-0.9.9/hddm/plotting.py | from hddm.simulators import *
from hddm.generate import *
from hddm.utils import *
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import seaborn as sns
import arviz as az
import os
import warnings
# import pymc as pm
# import hddm
import pandas as pd
from tqdm import tqdm
import pymc
from kabuki.analyze import _post_pred_generate, _parents_to_random_posterior_sample
from statsmodels.distributions.empirical_distribution import ECDF
from hddm.model_config import model_config
from hddm.model_config_rl import model_config_rl
# Basic utility
def prettier_tag(tag):
len_tag = len(tag)
if len_tag == 1:
return tag[0]
else:
return "(" + ", ".join([str(t) for t in tag]) + ")"
# Plot Composer Functions
def plot_posterior_pair(
model,
plot_func=None,
save=False,
path=None,
figsize=(8, 6),
format="png",
samples=100,
parameter_recovery_mode=False,
**kwargs,
):
"""Generate posterior pair plots for each observed node.
Arguments:
model: kabuki.Hierarchical
The (constructed and sampled) kabuki hierarchical model to
create the posterior preditive from.
Optional:
samples: int <default=10>
How many posterior samples to use.
columns: int <default=3>
How many columns to use for plotting the subjects.
bins: int <default=100>
How many bins to compute the data histogram over.
figsize: (int, int) <default=(8, 6)>
save: bool <default=False>
Whether to save the figure to a file.
path: str <default=None>
Save figure into directory prefix
format: str or list of strings <default='png'>
Save figure to a image file of type 'format'. If more then one format is
given, multiple files are created
parameter_recovery_mode: bool <default=False>
If the data attached to the model supplied under the model argument
has the format expected of the simulator_h_c() function from the simulators.hddm_dataset_generators
module, then parameter_recovery_mode = True can be use to supply ground truth parameterizations to the
plot_func argument describes below.
plot_func: function <default=_plot_posterior_pdf_node>
Plotting function to use for each observed node
(see default function for an example).
Note:
This function changes the current value and logp of the nodes.
"""
if hasattr(model, "reg_outcomes"):
return "Note: The posterior pair plot does not support regression models at this point! Aborting..."
if hasattr(model, "model"):
kwargs["model_"] = model.model
else:
kwargs["model_"] = "ddm_hddm_base"
if plot_func is None:
plot_func = _plot_func_pair
observeds = model.get_observeds()
kwargs["figsize"] = figsize
kwargs["n_samples"] = samples
# Plot different conditions (new figure for each)
for tag, nodes in observeds.groupby("tag"):
# Plot individual subjects (if present)
for subj_i, (node_name, bottom_node) in enumerate(nodes.iterrows()):
if "subj_idx" in bottom_node:
if str(node_name) == "wfpt":
kwargs["title"] = str(subj_i)
else:
kwargs["title"] = str(node_name)
if parameter_recovery_mode:
kwargs["node_data"] = model.data.loc[bottom_node["node"].value.index]
g = plot_func(bottom_node["node"], **kwargs)
plt.show()
# Save figure if necessary
if save:
if len(tag) == 0:
fname = "ppq_subject_" + str(subj_i)
else:
fname = (
"ppq_"
+ ".".join([str(t) for t in tag])
+ "_subject_"
+ str(subj_i)
)
if path is None:
path = "."
if isinstance(format, str):
format = [format]
print(["%s.%s" % (os.path.join(path, fname), x) for x in format])
[
g.fig.savefig("%s.%s" % (os.path.join(path, fname), x), format=x)
for x in format
]
def plot_from_data(
df,
generative_model="ddm_hddm_base",
plot_func=None,
columns=None,
save=False,
save_name=None,
make_transparent=False,
show=True,
path=None,
groupby="subj_idx",
figsize=(8, 6),
format="png",
keep_frame=True,
keep_title=True,
**kwargs,
):
"""Plot data from a hddm ready DataFrame.
Arguments:
df : pd.DataFrame
HDDM ready dataframe.
value_range : numpy.ndarray
Array to evaluate the likelihood over.
Optional:
columns : int <default=3>
How many columns to use for plotting the subjects.
bins : int <default=100>
How many bins to compute the data histogram over.
figsize : (int, int) <default=(8, 6)>
save : bool <default=False>
Whether to save the figure to a file.
path : str <default=None>
Save figure into directory prefix
format : str or list of strings
Save figure to a image file of type 'format'. If more then one format is
given, multiple files are created
plot_func : function <default=_plot_func_posterior_pdf_node_nn>
Plotting function to use for each observed node
(see default function for an example).
Note:
This function changes the current value and logp of the nodes.
"""
# Flip argument names to make the compatible with downstream expectations
# of the plot_func() function
if "add_data_model" in kwargs.keys():
kwargs["add_posterior_mean_model"] = kwargs["add_data_model"]
if "add_data_rts" in kwargs.keys():
kwargs["add_posterior_mean_rts"] = kwargs["add_data_rts"]
if "data_color" in kwargs.keys():
kwargs["posterior_mean_color"] = kwargs["data_color"]
else:
kwargs["posterior_mean_color"] = "blue"
kwargs["model_"] = generative_model
title_ = kwargs.pop("title", "")
ax_title_size = kwargs.pop("ax_title_fontsize", 10)
if type(groupby) == str:
groupby = [groupby]
if plot_func is None:
plot_func = _plot_func_posterior_pdf_node_nn
if columns is None:
# If there are less than 3 items to plot per figure,
# only use as many columns as there are items.
max_items = max([len(i[1]) for i in df.groupby(groupby).groups.items()])
columns = min(3, max_items)
n_plots = len(df.groupby(groupby))
# Plot different conditions (new figure for each)
fig = plt.figure(figsize=figsize)
if make_transparent:
fig.patch.set_facecolor("None")
fig.patch.set_alpha(0.0)
fig.suptitle(title_, fontsize=12)
fig.subplots_adjust(top=0.9, hspace=0.4, wspace=0.3)
i = 1
for group_id, df_tmp in df.groupby(groupby):
nrows = int(np.ceil(n_plots / columns))
# Plot individual subjects (if present)
ax = fig.add_subplot(nrows, columns, i)
# Allow kwargs to pass to the plot_func, whether this is the first plot
# (useful to generate legends only for the first subplot)
if i == 1:
kwargs["add_legend"] = True
else:
kwargs["add_legend"] = False
# Make axis title
tag = ""
for j in range(len(groupby)):
tag += groupby[j] + "(" + str(group_id[j]) + ")"
if j < (len(groupby) - 1):
tag += "_"
# print(tag)
if keep_title:
ax.set_title(tag, fontsize=ax_title_size)
# ax.set(frame_on=False)
if not keep_frame:
ax.set_axis_off()
# Call plot function on ax
# This function should manipulate the ax object, and is expected to not return anything.
plot_func(df_tmp, ax, **kwargs)
i += 1
# Save figure if desired
if save:
if save_name is None:
fname = "ppq_" + prettier_tag(tag)
else:
fname = save_name
if path is None:
path = "."
if isinstance(format, str):
format = [format]
[
fig.savefig(
"%s.%s" % (os.path.join(path, fname), x),
facecolor=fig.get_facecolor(),
format=x,
)
for x in format
]
# Todo take care of plot closing etc.
if show:
pass
def plot_posterior_predictive(
model,
plot_func=None,
required_method="pdf",
columns=None,
save=False,
path=None,
figsize=(8, 6),
format="png",
num_subjs=None,
parameter_recovery_mode=False,
subplots_adjust={"top": 0.85, "hspace": 0.4, "wspace": 0.3},
**kwargs,
):
"""Plot the posterior predictive distribution of a kabuki hierarchical model.
Arguments:
model : kabuki.Hierarchical
The (constructed and sampled) kabuki hierarchical model to
create the posterior preditive from.
value_range : numpy.ndarray
Array to evaluate the likelihood over.
Optional:
samples : int <default=10>
How many posterior samples to generate the posterior predictive over.
columns : int <default=3>
How many columns to use for plotting the subjects.
bins : int <default=100>
How many bins to compute the data histogram over.
figsize : (int, int) <default=(8, 6)>
save : bool <default=False>
Whether to save the figure to a file.
path : str <default=None>
Save figure into directory prefix
format : str or list of strings
Save figure to a image file of type 'format'. If more then one format is
given, multiple files are created
subplots_adjust : dict <default={'top': 0.85, 'hspace': 0.4, 'wspace': 0.3}>
Spacing rules for subplot organization. See Matplotlib documentation for details.
parameter_recovery_mode: bool <default=False>
If the data attached to the model supplied under the model argument
has the format expected of the simulator_h_c() function from the simulators.hddm_dataset_generators
module, then parameter_recovery_mode = True can be use to supply ground truth parameterizations to the
plot_func argument describes below.
plot_func : function <default=_plot_func_posterior_pdf_node_nn>
Plotting function to use for each observed node
(see default function for an example).
Note:
This function changes the current value and logp of the nodes.
"""
if hasattr(model, "model"):
kwargs["model_"] = model.model
else:
kwargs["model_"] = "ddm_hddm_base"
if plot_func is None:
plot_func = _plot_func_posterior_pdf_node_nn
observeds = model.get_observeds()
if columns is None:
# If there are less than 3 items to plot per figure,
# only use as many columns as there are items.
max_items = max([len(i[1]) for i in observeds.groupby("tag").groups.items()])
columns = min(3, max_items)
# Plot different conditions (new figure for each)
for tag, nodes in observeds.groupby("tag"):
fig = plt.figure(figsize=figsize) # prev utils.pretty_tag
fig.suptitle(prettier_tag(tag), fontsize=12)
fig.subplots_adjust(
top=subplots_adjust["top"],
hspace=subplots_adjust["hspace"],
wspace=subplots_adjust["wspace"],
)
nrows = num_subjs or int(np.ceil(len(nodes) / columns))
if len(nodes) - (nrows * columns) > 0:
nrows += 1
# Plot individual subjects (if present)
i = 0
for subj_i, (node_name, bottom_node) in enumerate(nodes.iterrows()):
i += 1
if not hasattr(bottom_node["node"], required_method):
continue # skip nodes that do not define the required_method
ax = fig.add_subplot(nrows, columns, subj_i + 1)
if "subj_idx" in bottom_node:
ax.set_title(str(bottom_node["subj_idx"]))
# Allow kwargs to pass to the plot_func, whether this is the first plot
# (useful to generate legends only for the first subplot)
if i == 1:
kwargs["add_legend"] = True
else:
kwargs["add_legend"] = False
if parameter_recovery_mode:
kwargs["parameter_recovery_mode"] = True
kwargs["node_data"] = model.data.loc[bottom_node["node"].value.index]
# Call plot function on ax
# This function should manipulate the ax object, and is expected to not return anything.
plot_func(bottom_node["node"], ax, **kwargs)
if i > (nrows * columns):
warnings.warn("Too many nodes. Consider increasing number of columns.")
break
if num_subjs is not None and i >= num_subjs:
break
# Save figure if necessary
if save:
fname = "ppq_" + prettier_tag(tag) # ".".join(tag)
if path is None:
path = "."
if isinstance(format, str):
format = [format]
[
fig.savefig("%s.%s" % (os.path.join(path, fname), x), format=x)
for x in format
]
plt.show()
# AXIS MANIPULATORS ---------------
def _plot_func_posterior_pdf_node_nn(
bottom_node,
axis,
value_range=None,
samples=10,
bin_size=0.05,
plot_likelihood_raw=False,
linewidth=0.5,
data_color="blue",
posterior_color="red",
add_legend=True,
alpha=0.05,
**kwargs,
):
"""Calculate posterior predictives from raw likelihood values and plot it on top of a histogram of the real data.
The function does not define a figure, but manipulates an axis object.
Arguments:
bottom_node : pymc.stochastic
Bottom node to compute posterior over.
axis : matplotlib.axis
Axis to plot into.
value_range : numpy.ndarray
Range over which to evaluate the likelihood.
Optional:
model : str <default='ddm_hddm_base'>
str that defines the generative model underlying the kabuki model from which the bottom_node
argument derives.
samples : int <default=10>
Number of posterior samples to use.
bin_size: float <default=0.05>
Size of bins for the data histogram.
plot_likelihood_raw : bool <default=False>
Whether or not to plot likelihoods sample wise.
add_legend : bool <default=True>
Whether or not to add a legend to the plot
linewidth : float <default=0.5>
Linewidth of histogram outlines.
data_color : str <default="blue">
Color of the data part of the plot.
posterior_color : str <default="red">
Color of the posterior part of the plot.
"""
# Setup -----
color_dict = {
-1: "black",
0: "black",
1: "green",
2: "blue",
3: "red",
4: "orange",
5: "purple",
6: "brown",
}
model_ = kwargs.pop("model_", "ddm_hddm_base")
choices = model_config[model_]["choices"]
n_choices = len(model_config[model_]["choices"])
bins = np.arange(value_range[0], value_range[-1], bin_size)
if value_range is None:
# Infer from data by finding the min and max from the nodes
raise NotImplementedError("value_range keyword argument must be supplied.")
if n_choices == 2:
like = np.empty((samples, len(value_range)), dtype=np.float32)
pdf_in = value_range
else:
like = np.empty((samples, len(value_range), n_choices), dtype=np.float32)
pdf_in = np.zeros((len(value_range), 2))
pdf_in[:, 0] = value_range
# -----
# Get posterior parameters and plot corresponding likelihoods (if desired) ---
for sample in range(samples):
# Get random posterior sample
_parents_to_random_posterior_sample(bottom_node)
# Generate likelihood for parents parameters
if n_choices == 2:
like[sample, :] = bottom_node.pdf(pdf_in)
if plot_likelihood_raw:
axis.plot(
value_range,
like[sample, :],
color=posterior_color,
lw=1.0,
alpha=alpha,
)
else:
c_cnt = 0
for choice in choices:
pdf_in[:, 1] = choice
like[sample, :, c_cnt] = bottom_node.pdf(pdf_in)
if plot_likelihood_raw:
like[sample, :, c_cnt] = bottom_node.pdf(pdf_in)
axis.plot(
pdf_in[:, 0],
like[sample, :, c_cnt],
color=color_dict[choice],
lw=1.0,
alpha=alpha,
)
c_cnt += 1
# -------
# If we don't plot raw likelihoods, we generate a mean likelihood from the samples above
# and plot it as a line with uncertainty bars
if not plot_likelihood_raw:
y = like.mean(axis=0)
try:
y_std = like.std(axis=0)
except FloatingPointError:
print(
"WARNING! %s threw FloatingPointError over std computation. Setting to 0 and continuing."
% bottom_node.__name__
)
y_std = np.zeros_like(y)
if n_choices == 2:
axis.plot(value_range, y, label="post pred", color=posterior_color)
axis.fill_between(
value_range, y - y_std, y + y_std, color=posterior_color, alpha=0.5
)
else:
c_cnt = 0
for choice in choices:
axis.plot(
value_range,
y[:, c_cnt],
label="post pred",
color=color_dict[choice],
)
axis.fill_between(
value_range,
y[:, c_cnt] - y_std[:, c_cnt],
y[:, c_cnt] + y_std[:, c_cnt],
color=color_dict[choice],
alpha=0.5,
)
c_cnt += 1
# Plot data
if len(bottom_node.value) != 0:
if n_choices == 2:
rt_dat = bottom_node.value.copy()
if np.sum(rt_dat.rt < 0) == 0:
rt_dat.loc[rt_dat.response != 1, "rt"] = (-1) * rt_dat.rt[
rt_dat.response != 1
].values
axis.hist(
rt_dat.rt.values,
density=True,
color=data_color,
label="data",
bins=bins,
linestyle="-",
histtype="step",
lw=linewidth,
)
else:
for choice in choices:
weights = np.tile(
(1 / bin_size) / bottom_node.value.shape[0],
reps=bottom_node.value[bottom_node.value.response == choice].shape[
0
],
)
if np.sum(bottom_node.value.response == choice) > 0:
axis.hist(
bottom_node.value.rt[bottom_node.value.response == choice],
bins=np.arange(value_range[0], value_range[-1], bin_size),
weights=weights,
color=color_dict[choice],
label="data",
linestyle="dashed",
histtype="step",
lw=linewidth,
)
axis.set_ylim(bottom=0) # Likelihood and histogram can only be positive
# Add a custom legend
if add_legend:
# If two choices only --> show data in blue, posterior samples in black
if n_choices == 2:
custom_elems = []
custom_titles = []
custom_elems.append(
Line2D([0], [0], color=data_color, lw=1.0, linestyle="-")
)
custom_elems.append(
Line2D([0], [0], color=posterior_color, lw=1.0, linestyle="-")
)
custom_titles.append("Data")
custom_titles.append("Posterior")
# If more than two choices --> more styling
else:
custom_elems = [
Line2D([0], [0], color=color_dict[choice], lw=1) for choice in choices
]
custom_titles = ["response: " + str(choice) for choice in choices]
custom_elems.append(
Line2D([0], [0], color=posterior_color, lw=1.0, linestyle="dashed")
)
custom_elems.append(Line2D([0], [0], color="black", lw=1.0, linestyle="-"))
custom_titles.append("Data")
custom_titles.append("Posterior")
axis.legend(custom_elems, custom_titles, loc="upper right")
def _plot_func_posterior_node_from_sim(
bottom_node,
axis,
value_range=None,
samples=10,
bin_size=0.05,
add_posterior_uncertainty_rts=True,
add_posterior_mean_rts=True,
legend_location="upper right",
legend_fontsize=12,
legend_shadow=True,
alpha=0.05,
linewidth=0.5,
add_legend=True,
data_color="blue",
posterior_mean_color="red",
posterior_uncertainty_color="black",
**kwargs,
):
"""Calculate posterior predictive for a certain bottom node and plot a histogram using the supplied axis element.
:Arguments:
bottom_node : pymc.stochastic
Bottom node to compute posterior over.
axis : matplotlib.axis
Axis to plot into.
value_range : numpy.ndarray
Range over which to evaluate the likelihood.
:Optional:
samples : int (default=10)
Number of posterior samples to use.
bin_size : int (default=0.05)
Number of bins to compute histogram over.
add_posterior_uncertainty_rts: bool (default=True)
Plot individual posterior samples or not.
add_posterior_mean_rts: bool (default=True)
Whether to add a mean posterior (histogram from a dataset collapsed across posterior samples)
alpha: float (default=0.05)
alpha (transparency) level for plot elements from single posterior samples.
linewidth: float (default=0.5)
linewidth used for histograms
add_legend: bool (default=True)
whether or not to add a legend to the current axis.
legend_loc: str <default='upper right'>
string defining legend position. Find the rest of the options in the matplotlib documentation.
legend_shadow: bool <default=True>
Add shadow to legend box?
legend_fontsize: float <default=12>
Fontsize of legend.
data_color : str <default="blue">
Color for the data part of the plot.
posterior_mean_color : str <default="red">
Color for the posterior mean part of the plot.
posterior_uncertainty_color : str <default="black">
Color for the posterior uncertainty part of the plot.
model_: str (default='lca_no_bias_4')
string that the defines generative models used (e.g. 'ddm', 'ornstein' etc.).
"""
color_dict = {
-1: "black",
0: "black",
1: "green",
2: "blue",
3: "red",
4: "orange",
5: "purple",
6: "brown",
}
if value_range is None:
# Infer from data by finding the min and max from the nodes
raise NotImplementedError("value_range keyword argument must be supplied.")
if len(value_range) == 1:
value_range = (-value_range[0], value_range[0])
else:
value_range = (value_range[0], value_range[-1])
# Extract some parameters from kwargs
bins = np.arange(value_range[0], value_range[1], bin_size)
model_ = kwargs.pop("model_", "lca_no_bias_4")
choices = model_config[model_]["choices"]
n_choices = len(model_config[model_]["choices"])
if type(bottom_node) == pd.DataFrame:
samples = None
data_tmp = bottom_node
data_only = 1
else:
samples = _post_pred_generate(
bottom_node,
samples=samples,
data=None,
append_data=False,
add_model_parameters=False,
)
data_tmp = bottom_node.value
data_only = 0
# Go sample by sample (to show uncertainty)
if add_posterior_uncertainty_rts and not data_only:
for sample in samples:
if n_choices == 2:
if np.sum(sample.rt < 0) == 0:
sample.loc[sample.response != 1, "rt"] = (-1) * sample.rt[
sample.response != 1
].values
axis.hist(
sample.rt,
bins=bins,
density=True,
color=posterior_uncertainty_color,
label="posterior",
histtype="step",
lw=linewidth,
alpha=alpha,
)
else:
for choice in choices:
weights = np.tile(
(1 / bin_size) / sample.shape[0],
reps=sample.loc[sample.response == choice, :].shape[0],
)
axis.hist(
sample.rt[sample.response == choice],
bins=bins,
weights=weights,
color=color_dict[choice],
label="posterior",
histtype="step",
lw=linewidth,
alpha=alpha,
)
# Add a 'mean' line
if add_posterior_mean_rts and not data_only:
concat_data = pd.concat(samples)
if n_choices == 2:
if np.sum(concat_data.rt < 0) == 0:
concat_data.loc[concat_data.response != 1, "rt"] = (
-1
) * concat_data.rt[concat_data.response != 1].values
axis.hist(
concat_data.rt,
bins=bins,
density=True,
color=posterior_mean_color,
label="posterior",
histtype="step",
lw=linewidth,
alpha=1.0,
)
else:
for choice in choices:
weights = np.tile(
(1 / bin_size) / concat_data.shape[0],
reps=concat_data.loc[concat_data.response == choice, :].shape[0],
)
axis.hist(
concat_data.rt[concat_data.response == choice],
bins=bins,
weights=weights,
color=color_dict[choice],
label="posterior",
histtype="step",
lw=linewidth,
alpha=1.0,
)
# Plot data
if len(data_tmp) != 0:
if n_choices == 2:
rt_dat = data_tmp.copy()
if np.sum(rt_dat.rt < 0) == 0:
if "response" in rt_dat.columns:
rt_dat.loc[rt_dat.response != 1, "rt"] = (-1) * rt_dat.rt[
rt_dat.response != 1
].values
else:
pass
axis.hist(
rt_dat.rt,
bins=bins,
density=True,
color=data_color,
label="data",
linestyle="-",
histtype="step",
lw=linewidth,
)
else:
for choice in choices:
weights = np.tile(
(1 / bin_size) / data_tmp.shape[0],
reps=data_tmp[data_tmp.response == choice].shape[0],
)
axis.hist(
data_tmp.rt[data_tmp.response == choice],
bins=bins,
weights=weights,
color=color_dict[choice],
label="data",
linestyle="dashed",
histtype="step",
lw=linewidth,
)
axis.set_ylim(bottom=0) # Likelihood and histogram can only be positive
# Adding legend:
if add_legend:
if n_choices == 2:
custom_elems = []
custom_titles = []
custom_elems.append(
Line2D([0], [0], color=data_color, lw=1.0, linestyle="-")
)
custom_elems.append(
Line2D([0], [0], color=posterior_mean_color, lw=1.0, linestyle="-")
)
custom_titles.append("Data")
custom_titles.append("Posterior")
else:
custom_elems = [
Line2D([0], [0], color=color_dict[choice], lw=1) for choice in choices
]
custom_titles = ["response: " + str(choice) for choice in choices]
custom_elems.append(
Line2D([0], [0], color="black", lw=1.0, linestyle="dashed")
)
custom_elems.append(Line2D([0], [0], color="black", lw=1.0, linestyle="-"))
custom_titles.append("Data")
custom_titles.append("Posterior")
if not data_only:
axis.legend(
custom_elems,
custom_titles,
loc=legend_location,
fontsize=legend_fontsize,
shadow=legend_shadow,
)
def _plot_func_model(
bottom_node,
axis,
value_range=None,
samples=10,
bin_size=0.05,
add_data_rts=True,
add_data_model=True,
add_data_model_keep_slope=True,
add_data_model_keep_boundary=True,
add_data_model_keep_ndt=True,
add_data_model_keep_starting_point=True,
add_data_model_markersize_starting_point=50,
add_data_model_markertype_starting_point=0,
add_data_model_markershift_starting_point=0,
add_posterior_uncertainty_model=False,
add_posterior_uncertainty_rts=False,
add_posterior_mean_model=True,
add_posterior_mean_rts=True,
add_trajectories=False,
data_label="Data",
secondary_data=None,
secondary_data_label=None,
secondary_data_color="blue",
linewidth_histogram=0.5,
linewidth_model=0.5,
legend_fontsize=12,
legend_shadow=True,
legend_location="upper right",
data_color="blue",
posterior_mean_color="red",
posterior_uncertainty_color="black",
alpha=0.05,
delta_t_model=0.01,
add_legend=True, # keep_frame=False,
**kwargs,
):
"""Calculate posterior predictive for a certain bottom node.
Arguments:
bottom_node: pymc.stochastic
Bottom node to compute posterior over.
axis: matplotlib.axis
Axis to plot into.
value_range: numpy.ndarray
Range over which to evaluate the likelihood.
Optional:
samples: int <default=10>
Number of posterior samples to use.
bin_size: float <default=0.05>
Size of bins used for histograms
alpha: float <default=0.05>
alpha (transparency) level for the sample-wise elements of the plot
add_data_rts: bool <default=True>
Add data histogram of rts ?
add_data_model: bool <default=True>
Add model cartoon for data
add_posterior_uncertainty_rts: bool <default=True>
Add sample by sample histograms?
add_posterior_mean_rts: bool <default=True>
Add a mean posterior?
add_model: bool <default=True>
Whether to add model cartoons to the plot.
linewidth_histogram: float <default=0.5>
linewdith of histrogram plot elements.
linewidth_model: float <default=0.5>
linewidth of plot elements concerning the model cartoons.
legend_location: str <default='upper right'>
string defining legend position. Find the rest of the options in the matplotlib documentation.
legend_shadow: bool <default=True>
Add shadow to legend box?
legend_fontsize: float <default=12>
Fontsize of legend.
data_color : str <default="blue">
Color for the data part of the plot.
posterior_mean_color : str <default="red">
Color for the posterior mean part of the plot.
posterior_uncertainty_color : str <default="black">
Color for the posterior uncertainty part of the plot.
delta_t_model:
specifies plotting intervals for model cartoon elements of the graphs.
"""
# AF-TODO: Add a mean version of this!
if value_range is None:
# Infer from data by finding the min and max from the nodes
raise NotImplementedError("value_range keyword argument must be supplied.")
if len(value_range) > 2:
value_range = (value_range[0], value_range[-1])
# Extract some parameters from kwargs
bins = np.arange(value_range[0], value_range[-1], bin_size)
# If bottom_node is a DataFrame we know that we are just plotting real data
if type(bottom_node) == pd.DataFrame:
samples_tmp = [bottom_node]
data_tmp = None
else:
samples_tmp = _post_pred_generate(
bottom_node,
samples=samples,
data=None,
append_data=False,
add_model_parameters=True,
)
data_tmp = bottom_node.value.copy()
# Relevant for recovery mode
node_data_full = kwargs.pop("node_data", None)
tmp_model = kwargs.pop("model_", "angle")
if len(model_config[tmp_model]["choices"]) > 2:
raise ValueError("The model plot works only for 2 choice models at the moment")
# ---------------------------
ylim = kwargs.pop("ylim", 3)
hist_bottom = kwargs.pop("hist_bottom", 2)
hist_histtype = kwargs.pop("hist_histtype", "step")
if ("ylim_high" in kwargs) and ("ylim_low" in kwargs):
ylim_high = kwargs["ylim_high"]
ylim_low = kwargs["ylim_low"]
else:
ylim_high = ylim
ylim_low = -ylim
if ("hist_bottom_high" in kwargs) and ("hist_bottom_low" in kwargs):
hist_bottom_high = kwargs["hist_bottom_high"]
hist_bottom_low = kwargs["hist_bottom_low"]
else:
hist_bottom_high = hist_bottom
hist_bottom_low = hist_bottom
axis.set_xlim(value_range[0], value_range[-1])
axis.set_ylim(ylim_low, ylim_high)
axis_twin_up = axis.twinx()
axis_twin_down = axis.twinx()
axis_twin_up.set_ylim(ylim_low, ylim_high)
axis_twin_up.set_yticks([])
axis_twin_down.set_ylim(ylim_high, ylim_low)
axis_twin_down.set_yticks([])
axis_twin_down.set_axis_off()
axis_twin_up.set_axis_off()
# ADD HISTOGRAMS
# -------------------------------
# POSTERIOR BASED HISTOGRAM
if add_posterior_uncertainty_rts: # add_uc_rts:
j = 0
for sample in samples_tmp:
tmp_label = None
if add_legend and j == 0:
tmp_label = "PostPred"
weights_up = np.tile(
(1 / bin_size) / sample.shape[0],
reps=sample.loc[sample.response == 1, :].shape[0],
)
weights_down = np.tile(
(1 / bin_size) / sample.shape[0],
reps=sample.loc[(sample.response != 1), :].shape[0],
)
axis_twin_up.hist(
np.abs(sample.rt[sample.response == 1]),
bins=bins,
weights=weights_up,
histtype=hist_histtype,
bottom=hist_bottom_high,
alpha=alpha,
color=posterior_uncertainty_color,
edgecolor=posterior_uncertainty_color,
zorder=-1,
label=tmp_label,
linewidth=linewidth_histogram,
)
axis_twin_down.hist(
np.abs(sample.loc[(sample.response != 1), :].rt),
bins=bins,
weights=weights_down,
histtype=hist_histtype,
bottom=hist_bottom_low,
alpha=alpha,
color=posterior_uncertainty_color,
edgecolor=posterior_uncertainty_color,
linewidth=linewidth_histogram,
zorder=-1,
)
j += 1
if add_posterior_mean_rts: # add_mean_rts:
concat_data = pd.concat(samples_tmp)
tmp_label = None
if add_legend:
tmp_label = "PostPred Mean"
weights_up = np.tile(
(1 / bin_size) / concat_data.shape[0],
reps=concat_data.loc[concat_data.response == 1, :].shape[0],
)
weights_down = np.tile(
(1 / bin_size) / concat_data.shape[0],
reps=concat_data.loc[(concat_data.response != 1), :].shape[0],
)
axis_twin_up.hist(
np.abs(concat_data.rt[concat_data.response == 1]),
bins=bins,
weights=weights_up,
histtype=hist_histtype,
bottom=hist_bottom_high,
alpha=1.0,
color=posterior_mean_color,
edgecolor=posterior_mean_color,
zorder=-1,
label=tmp_label,
linewidth=linewidth_histogram,
)
axis_twin_down.hist(
np.abs(concat_data.loc[(concat_data.response != 1), :].rt),
bins=bins,
weights=weights_down,
histtype=hist_histtype,
bottom=hist_bottom_low,
alpha=1.0,
color=posterior_mean_color,
edgecolor=posterior_mean_color,
linewidth=linewidth_histogram,
zorder=-1,
)
# DATA HISTOGRAM
if (data_tmp is not None) and add_data_rts:
tmp_label = None
if add_legend:
tmp_label = data_label
weights_up = np.tile(
(1 / bin_size) / data_tmp.shape[0],
reps=data_tmp[data_tmp.response == 1].shape[0],
)
weights_down = np.tile(
(1 / bin_size) / data_tmp.shape[0],
reps=data_tmp[(data_tmp.response != 1)].shape[0],
)
axis_twin_up.hist(
np.abs(data_tmp[data_tmp.response == 1].rt),
bins=bins,
weights=weights_up,
histtype=hist_histtype,
bottom=hist_bottom_high,
alpha=1,
color=data_color,
edgecolor=data_color,
label=tmp_label,
zorder=-1,
linewidth=linewidth_histogram,
)
axis_twin_down.hist(
np.abs(data_tmp[(data_tmp.response != 1)].rt),
bins=bins,
weights=weights_down,
histtype=hist_histtype,
bottom=hist_bottom_low,
alpha=1,
color=data_color,
edgecolor=data_color,
linewidth=linewidth_histogram,
zorder=-1,
)
# SECONDARY DATA HISTOGRAM
if secondary_data is not None:
tmp_label = None
if add_legend:
if secondary_data_label is not None:
tmp_label = secondary_data_label
weights_up = np.tile(
(1 / bin_size) / secondary_data.shape[0],
reps=secondary_data[secondary_data.response == 1].shape[0],
)
weights_down = np.tile(
(1 / bin_size) / secondary_data.shape[0],
reps=secondary_data[(secondary_data.response != 1)].shape[0],
)
axis_twin_up.hist(
np.abs(secondary_data[secondary_data.response == 1].rt),
bins=bins,
weights=weights_up,
histtype=hist_histtype,
bottom=hist_bottom_high,
alpha=1,
color=secondary_data_color,
edgecolor=secondary_data_color,
label=tmp_label,
zorder=-100,
linewidth=linewidth_histogram,
)
axis_twin_down.hist(
np.abs(secondary_data[(secondary_data.response != 1)].rt),
bins=bins,
weights=weights_down,
histtype=hist_histtype,
bottom=hist_bottom_low,
alpha=1,
color=secondary_data_color,
edgecolor=secondary_data_color,
linewidth=linewidth_histogram,
zorder=-100,
)
# -------------------------------
if add_legend:
if data_tmp is not None:
axis_twin_up.legend(
fontsize=legend_fontsize, shadow=legend_shadow, loc=legend_location
)
# ADD MODEL:
j = 0
t_s = np.arange(0, value_range[-1], delta_t_model)
# MAKE BOUNDS (FROM MODEL CONFIG) !
if add_posterior_uncertainty_model: # add_uc_model:
for sample in samples_tmp:
_add_model_cartoon_to_ax(
sample=sample,
axis=axis,
tmp_model=tmp_model,
keep_slope=add_data_model_keep_slope,
keep_boundary=add_data_model_keep_boundary,
keep_ndt=add_data_model_keep_ndt,
keep_starting_point=add_data_model_keep_starting_point,
markersize_starting_point=add_data_model_markersize_starting_point,
markertype_starting_point=add_data_model_markertype_starting_point,
markershift_starting_point=add_data_model_markershift_starting_point,
delta_t_graph=delta_t_model,
sample_hist_alpha=alpha,
lw_m=linewidth_model,
tmp_label=tmp_label,
ylim_low=ylim_low,
ylim_high=ylim_high,
t_s=t_s,
color=posterior_uncertainty_color,
zorder_cnt=j,
)
if (node_data_full is not None) and add_data_model:
_add_model_cartoon_to_ax(
sample=node_data_full,
axis=axis,
tmp_model=tmp_model,
keep_slope=add_data_model_keep_slope,
keep_boundary=add_data_model_keep_boundary,
keep_ndt=add_data_model_keep_ndt,
keep_starting_point=add_data_model_keep_starting_point,
markersize_starting_point=add_data_model_markersize_starting_point,
markertype_starting_point=add_data_model_markertype_starting_point,
markershift_starting_point=add_data_model_markershift_starting_point,
delta_t_graph=delta_t_model,
sample_hist_alpha=1.0,
lw_m=linewidth_model + 0.5,
tmp_label=None,
ylim_low=ylim_low,
ylim_high=ylim_high,
t_s=t_s,
color=data_color,
zorder_cnt=j + 1,
)
if add_posterior_mean_model: # add_mean_model:
tmp_label = None
if add_legend:
tmp_label = "PostPred Mean"
_add_model_cartoon_to_ax(
sample=pd.DataFrame(pd.concat(samples_tmp).mean().astype(np.float32)).T,
axis=axis,
tmp_model=tmp_model,
keep_slope=add_data_model_keep_slope,
keep_boundary=add_data_model_keep_boundary,
keep_ndt=add_data_model_keep_ndt,
keep_starting_point=add_data_model_keep_starting_point,
markersize_starting_point=add_data_model_markersize_starting_point,
markertype_starting_point=add_data_model_markertype_starting_point,
markershift_starting_point=add_data_model_markershift_starting_point,
delta_t_graph=delta_t_model,
sample_hist_alpha=1.0,
lw_m=linewidth_model + 0.5,
tmp_label=None,
ylim_low=ylim_low,
ylim_high=ylim_high,
t_s=t_s,
color=posterior_mean_color,
zorder_cnt=j + 2,
)
if add_trajectories:
_add_trajectories(
axis=axis,
sample=samples_tmp[0],
tmp_model=tmp_model,
t_s=t_s,
delta_t_graph=delta_t_model,
**kwargs,
)
# AF-TODO: Add documentation for this function
def _add_trajectories(
axis=None,
sample=None,
t_s=None,
delta_t_graph=0.01,
tmp_model=None,
n_trajectories=10,
supplied_trajectory=None,
maxid_supplied_trajectory=1, # useful for gifs
highlight_trajectory_rt_choice=True,
markersize_trajectory_rt_choice=50,
markertype_trajectory_rt_choice="*",
markercolor_trajectory_rt_choice="red",
linewidth_trajectories=1,
alpha_trajectories=0.5,
color_trajectories="black",
**kwargs,
):
# Check markercolor type
if type(markercolor_trajectory_rt_choice) == str:
markercolor_trajectory_rt_choice_dict = {}
for value_ in model_config[tmp_model]["choices"]:
markercolor_trajectory_rt_choice_dict[
value_
] = markercolor_trajectory_rt_choice
elif type(markercolor_trajectory_rt_choice) == list:
cnt = 0
for value_ in model_config[tmp_model]["choices"]:
markercolor_trajectory_rt_choice_dict[
value_
] = markercolor_trajectory_rt_choice[cnt]
cnt += 1
elif type(markercolor_trajectory_rt_choice) == dict:
markercolor_trajectory_rt_choice_dict = markercolor_trajectory_rt_choice
else:
pass
# Check trajectory color type
if type(color_trajectories) == str:
color_trajectories_dict = {}
for value_ in model_config[tmp_model]["choices"]:
color_trajectories_dict[value_] = color_trajectories
elif type(color_trajectories) == list:
cnt = 0
for value_ in model_config[tmp_model]["choices"]:
color_trajectories_dict[value_] = color_trajectories[cnt]
cnt += 1
elif type(color_trajectories) == dict:
color_trajectories_dict = color_trajectories
else:
pass
# Make bounds
(b_low, b_high) = _make_bounds(
tmp_model=tmp_model,
sample=sample,
delta_t_graph=delta_t_graph,
t_s=t_s,
return_shifted_by_ndt=False,
)
# Trajectories
if supplied_trajectory is None:
for i in range(n_trajectories):
rand_int = np.random.choice(400000000)
out_traj = simulator(
theta=sample[model_config[tmp_model]["params"]].values[0],
model=tmp_model,
n_samples=1,
no_noise=False,
delta_t=delta_t_graph,
bin_dim=None,
random_state=rand_int,
)
tmp_traj = out_traj[2]["trajectory"]
tmp_traj_choice = float(out_traj[1].flatten())
maxid = np.minimum(np.argmax(np.where(tmp_traj > -999)), t_s.shape[0])
# Identify boundary value at timepoint of crossing
b_tmp = b_high[maxid] if tmp_traj_choice > 0 else b_low[maxid]
axis.plot(
t_s[:maxid] + sample.t.values[0],
tmp_traj[:maxid],
color=color_trajectories_dict[tmp_traj_choice],
alpha=alpha_trajectories,
linewidth=linewidth_trajectories,
zorder=2000 + i,
)
if highlight_trajectory_rt_choice:
axis.scatter(
t_s[maxid] + sample.t.values[0],
b_tmp,
# tmp_traj[maxid],
markersize_trajectory_rt_choice,
color=markercolor_trajectory_rt_choice_dict[tmp_traj_choice],
alpha=1,
marker=markertype_trajectory_rt_choice,
zorder=2000 + i,
)
else:
if len(supplied_trajectory["trajectories"].shape) == 1:
supplied_trajectory["trajectories"] = np.expand_dims(
supplied_trajectory["trajectories"], axis=0
)
for j in range(supplied_trajectory["trajectories"].shape[0]):
maxid = np.minimum(
np.argmax(np.where(supplied_trajectory["trajectories"][j, :] > -999)),
t_s.shape[0],
)
if j == (supplied_trajectory["trajectories"].shape[0] - 1):
maxid_traj = min(maxid, maxid_supplied_trajectory)
else:
maxid_traj = maxid
axis.plot(
t_s[:maxid_traj] + sample.t.values[0],
supplied_trajectory["trajectories"][j, :maxid_traj],
color=color_trajectories_dict[
supplied_trajectory["trajectory_choices"][j]
], # color_trajectories,
alpha=alpha_trajectories,
linewidth=linewidth_trajectories,
zorder=2000 + j,
)
# Identify boundary value at timepoint of crossing
b_tmp = (
b_high[maxid_traj]
if supplied_trajectory["trajectory_choices"][j] > 0
else b_low[maxid_traj]
)
if maxid_traj == maxid:
if highlight_trajectory_rt_choice:
axis.scatter(
t_s[maxid_traj] + sample.t.values[0],
b_tmp,
# supplied_trajectory['trajectories'][j, maxid_traj],
markersize_trajectory_rt_choice,
color=markercolor_trajectory_rt_choice_dict[
supplied_trajectory["trajectory_choices"][j]
], # markercolor_trajectory_rt_choice,
alpha=1,
marker=markertype_trajectory_rt_choice,
zorder=2000 + j,
)
# AF-TODO: Add documentation to this function
def _add_model_cartoon_to_ax(
sample=None,
axis=None,
tmp_model=None,
keep_slope=True,
keep_boundary=True,
keep_ndt=True,
keep_starting_point=True,
markersize_starting_point=80,
markertype_starting_point=1,
markershift_starting_point=-0.05,
delta_t_graph=None,
sample_hist_alpha=None,
lw_m=None,
tmp_label=None,
ylim_low=None,
ylim_high=None,
t_s=None,
zorder_cnt=1,
color="black",
):
# Make bounds
b_low, b_high = _make_bounds(
tmp_model=tmp_model,
sample=sample,
delta_t_graph=delta_t_graph,
t_s=t_s,
return_shifted_by_ndt=True,
)
# MAKE SLOPES (VIA TRAJECTORIES HERE --> RUN NOISE FREE SIMULATIONS)!
out = simulator(
theta=sample[model_config[tmp_model]["params"]].values[0],
model=tmp_model,
n_samples=1,
no_noise=True,
delta_t=delta_t_graph,
bin_dim=None,
)
tmp_traj = out[2]["trajectory"]
maxid = np.minimum(np.argmax(np.where(tmp_traj > -999)), t_s.shape[0])
if "hddm_base" in tmp_model:
a_tmp = sample.a.values[0] / 2
tmp_traj = tmp_traj - a_tmp
if keep_boundary:
# Upper bound
axis.plot(
t_s, # + sample.t.values[0],
b_high,
color=color,
alpha=sample_hist_alpha,
zorder=1000 + zorder_cnt,
linewidth=lw_m,
label=tmp_label,
)
# Lower bound
axis.plot(
t_s, # + sample.t.values[0],
b_low,
color=color,
alpha=sample_hist_alpha,
zorder=1000 + zorder_cnt,
linewidth=lw_m,
)
# Slope
if keep_slope:
axis.plot(
t_s[:maxid] + sample.t.values[0],
tmp_traj[:maxid],
color=color,
alpha=sample_hist_alpha,
zorder=1000 + zorder_cnt,
linewidth=lw_m,
) # TOOK AWAY LABEL
# Non-decision time
if keep_ndt:
axis.axvline(
x=sample.t.values[0],
ymin=ylim_low,
ymax=ylim_high,
color=color,
linestyle="--",
linewidth=lw_m,
zorder=1000 + zorder_cnt,
alpha=sample_hist_alpha,
)
# Starting point
if keep_starting_point:
axis.scatter(
sample.t.values[0] + markershift_starting_point,
b_low[0] + (sample.z.values[0] * (b_high[0] - b_low[0])),
markersize_starting_point,
marker=markertype_starting_point,
color=color,
alpha=sample_hist_alpha,
zorder=1000 + zorder_cnt,
)
def _make_bounds(
tmp_model=None,
sample=None,
delta_t_graph=None,
t_s=None,
return_shifted_by_ndt=True,
):
# MULTIPLICATIVE BOUND
if tmp_model == "weibull":
b = np.maximum(
sample.a.values[0]
* model_config[tmp_model]["boundary"](
t=t_s, alpha=sample.alpha.values[0], beta=sample.beta.values[0]
),
0,
)
# Move boundary forward by the non-decision time
b_raw_high = deepcopy(b)
b_raw_low = deepcopy(-b)
b_init_val = b[0]
t_shift = np.arange(0, sample.t.values[0], delta_t_graph).shape[0]
b = np.roll(b, t_shift)
b[:t_shift] = b_init_val
# ADDITIVE BOUND
elif tmp_model == "angle":
b = np.maximum(
sample.a.values[0]
+ model_config[tmp_model]["boundary"](t=t_s, theta=sample.theta.values[0]),
0,
)
b_raw_high = deepcopy(b)
b_raw_low = deepcopy(-b)
# Move boundary forward by the non-decision time
b_init_val = b[0]
t_shift = np.arange(0, sample.t.values[0], delta_t_graph).shape[0]
b = np.roll(b, t_shift)
b[:t_shift] = b_init_val
# CONSTANT BOUND
elif (
tmp_model == "ddm"
or tmp_model == "ornstein"
or tmp_model == "levy"
or tmp_model == "full_ddm"
or tmp_model == "ddm_hddm_base"
or tmp_model == "full_ddm_hddm_base"
):
b = sample.a.values[0] * np.ones(t_s.shape[0])
if "hddm_base" in tmp_model:
b = (sample.a.values[0] / 2) * np.ones(t_s.shape[0])
b_raw_high = b
b_raw_low = -b
# Separate out upper and lower bound:
b_high = b
b_low = -b
if return_shifted_by_ndt:
return (b_low, b_high)
else:
return (b_raw_low, b_raw_high)
def _plot_func_model_n(
bottom_node,
axis,
value_range=None,
samples=10,
bin_size=0.05,
add_posterior_uncertainty_model=False,
add_posterior_uncertainty_rts=False,
add_posterior_mean_model=True,
add_posterior_mean_rts=True,
linewidth_histogram=0.5,
linewidth_model=0.5,
legend_fontsize=7,
legend_shadow=True,
legend_location="upper right",
delta_t_model=0.01,
add_legend=True,
alpha=0.01,
keep_frame=False,
**kwargs,
):
"""Calculate posterior predictive for a certain bottom node.
Arguments:
bottom_node: pymc.stochastic
Bottom node to compute posterior over.
axis: matplotlib.axis
Axis to plot into.
value_range: numpy.ndarray
Range over which to evaluate the likelihood.
Optional:
samples: int <default=10>
Number of posterior samples to use.
bin_size: float <default=0.05>
Size of bins used for histograms
alpha: float <default=0.05>
alpha (transparency) level for the sample-wise elements of the plot
add_posterior_uncertainty_rts: bool <default=True>
Add sample by sample histograms?
add_posterior_mean_rts: bool <default=True>
Add a mean posterior?
add_model: bool <default=True>
Whether to add model cartoons to the plot.
linewidth_histogram: float <default=0.5>
linewdith of histrogram plot elements.
linewidth_model: float <default=0.5>
linewidth of plot elements concerning the model cartoons.
legend_loc: str <default='upper right'>
string defining legend position. Find the rest of the options in the matplotlib documentation.
legend_shadow: bool <default=True>
Add shadow to legend box?
legend_fontsize: float <default=12>
Fontsize of legend.
data_color : str <default="blue">
Color for the data part of the plot.
posterior_mean_color : str <default="red">
Color for the posterior mean part of the plot.
posterior_uncertainty_color : str <default="black">
Color for the posterior uncertainty part of the plot.
delta_t_model:
specifies plotting intervals for model cartoon elements of the graphs.
"""
color_dict = {
-1: "black",
0: "black",
1: "green",
2: "blue",
3: "red",
4: "orange",
5: "purple",
6: "brown",
}
# AF-TODO: Add a mean version of this !
if value_range is None:
# Infer from data by finding the min and max from the nodes
raise NotImplementedError("value_range keyword argument must be supplied.")
if len(value_range) > 2:
value_range = (value_range[0], value_range[-1])
# Extract some parameters from kwargs
bins = np.arange(value_range[0], value_range[-1], bin_size)
# Relevant for recovery mode
node_data_full = kwargs.pop("node_data", None)
tmp_model = kwargs.pop("model_", "angle")
bottom = 0
# ------------
ylim = kwargs.pop("ylim", 3)
choices = model_config[tmp_model]["choices"]
# If bottom_node is a DataFrame we know that we are just plotting real data
if type(bottom_node) == pd.DataFrame:
samples_tmp = [bottom_node]
data_tmp = None
else:
samples_tmp = _post_pred_generate(
bottom_node,
samples=samples,
data=None,
append_data=False,
add_model_parameters=True,
)
data_tmp = bottom_node.value.copy()
axis.set_xlim(value_range[0], value_range[-1])
axis.set_ylim(0, ylim)
# ADD MODEL:
j = 0
t_s = np.arange(0, value_range[-1], delta_t_model)
# # MAKE BOUNDS (FROM MODEL CONFIG) !
if add_posterior_uncertainty_model: # add_uc_model:
for sample in samples_tmp:
tmp_label = None
if add_legend and (j == 0):
tmp_label = "PostPred"
_add_model_n_cartoon_to_ax(
sample=sample,
axis=axis,
tmp_model=tmp_model,
delta_t_graph=delta_t_model,
sample_hist_alpha=alpha,
lw_m=linewidth_model,
tmp_label=tmp_label,
linestyle="-",
ylim=ylim,
t_s=t_s,
color_dict=color_dict,
zorder_cnt=j,
)
j += 1
if add_posterior_mean_model: # add_mean_model:
tmp_label = None
if add_legend:
tmp_label = "PostPred Mean"
bottom = _add_model_n_cartoon_to_ax(
sample=pd.DataFrame(pd.concat(samples_tmp).mean().astype(np.float32)).T,
axis=axis,
tmp_model=tmp_model,
delta_t_graph=delta_t_model,
sample_hist_alpha=1.0,
lw_m=linewidth_model + 0.5,
linestyle="-",
tmp_label=None,
ylim=ylim,
t_s=t_s,
color_dict=color_dict,
zorder_cnt=j + 2,
)
if node_data_full is not None:
_add_model_n_cartoon_to_ax(
sample=node_data_full,
axis=axis,
tmp_model=tmp_model,
delta_t_graph=delta_t_model,
sample_hist_alpha=1.0,
lw_m=linewidth_model + 0.5,
linestyle="dashed",
tmp_label=None,
ylim=ylim,
t_s=t_s,
color_dict=color_dict,
zorder_cnt=j + 1,
)
# ADD HISTOGRAMS
# -------------------------------
# POSTERIOR BASED HISTOGRAM
if add_posterior_uncertainty_rts: # add_uc_rts:
j = 0
for sample in samples_tmp:
for choice in choices:
tmp_label = None
if add_legend and j == 0:
tmp_label = "PostPred"
weights = np.tile(
(1 / bin_size) / sample.shape[0],
reps=sample.loc[sample.response == choice, :].shape[0],
)
axis.hist(
np.abs(sample.rt[sample.response == choice]),
bins=bins,
bottom=bottom,
weights=weights,
histtype="step",
alpha=alpha,
color=color_dict[choice],
zorder=-1,
label=tmp_label,
linewidth=linewidth_histogram,
)
j += 1
if add_posterior_mean_rts:
concat_data = pd.concat(samples_tmp)
for choice in choices:
tmp_label = None
if add_legend and (choice == choices[0]):
tmp_label = "PostPred Mean"
weights = np.tile(
(1 / bin_size) / concat_data.shape[0],
reps=concat_data.loc[concat_data.response == choice, :].shape[0],
)
axis.hist(
np.abs(concat_data.rt[concat_data.response == choice]),
bins=bins,
bottom=bottom,
weights=weights,
histtype="step",
alpha=1.0,
color=color_dict[choice],
zorder=-1,
label=tmp_label,
linewidth=linewidth_histogram,
)
# DATA HISTOGRAM
if data_tmp is not None:
for choice in choices:
tmp_label = None
if add_legend and (choice == choices[0]):
tmp_label = "Data"
weights = np.tile(
(1 / bin_size) / data_tmp.shape[0],
reps=data_tmp.loc[data_tmp.response == choice, :].shape[0],
)
axis.hist(
np.abs(data_tmp.rt[data_tmp.response == choice]),
bins=bins,
bottom=bottom,
weights=weights,
histtype="step",
linestyle="dashed",
alpha=1.0,
color=color_dict[choice],
edgecolor=color_dict[choice],
zorder=-1,
label=tmp_label,
linewidth=linewidth_histogram,
)
# -------------------------------
if add_legend:
if data_tmp is not None:
custom_elems = [
Line2D([0], [0], color=color_dict[choice], lw=1) for choice in choices
]
custom_titles = ["response: " + str(choice) for choice in choices]
custom_elems.append(
Line2D([0], [0], color="black", lw=1.0, linestyle="dashed")
)
custom_elems.append(Line2D([0], [0], color="black", lw=1.0, linestyle="-"))
custom_titles.append("Data")
custom_titles.append("Posterior")
axis.legend(
custom_elems,
custom_titles,
fontsize=legend_fontsize,
shadow=legend_shadow,
loc=legend_location,
)
# FRAME
if not keep_frame:
axis.set_frame_on(False)
def _add_model_n_cartoon_to_ax(
sample=None,
axis=None,
tmp_model=None,
delta_t_graph=None,
sample_hist_alpha=None,
lw_m=None,
linestyle="-",
tmp_label=None,
ylim=None,
t_s=None,
zorder_cnt=1,
color_dict=None,
):
if "weibull" in tmp_model:
b = np.maximum(
sample.a.values[0]
* model_config[tmp_model]["boundary"](
t=t_s, alpha=sample.alpha.values[0], beta=sample.beta.values[0]
),
0,
)
elif "angle" in tmp_model:
b = np.maximum(
sample.a.values[0]
+ model_config[tmp_model]["boundary"](t=t_s, theta=sample.theta.values[0]),
0,
)
else:
b = sample.a.values[0] * np.ones(t_s.shape[0])
# Upper bound
axis.plot(
t_s + sample.t.values[0],
b,
color="black",
alpha=sample_hist_alpha,
zorder=1000 + zorder_cnt,
linewidth=lw_m,
linestyle=linestyle,
label=tmp_label,
)
# Starting point
axis.axvline(
x=sample.t.values[0],
ymin=-ylim,
ymax=ylim,
color="black",
linestyle=linestyle,
linewidth=lw_m,
alpha=sample_hist_alpha,
)
# # MAKE SLOPES (VIA TRAJECTORIES HERE --> RUN NOISE FREE SIMULATIONS)!
out = simulator(
theta=sample[model_config[tmp_model]["params"]].values[0],
model=tmp_model,
n_samples=1,
no_noise=True,
delta_t=delta_t_graph,
bin_dim=None,
)
# # AF-TODO: Add trajectories
tmp_traj = out[2]["trajectory"]
for i in range(len(model_config[tmp_model]["choices"])):
tmp_maxid = np.minimum(np.argmax(np.where(tmp_traj[:, i] > -999)), t_s.shape[0])
# Slope
axis.plot(
t_s[:tmp_maxid] + sample.t.values[0],
tmp_traj[:tmp_maxid, i],
color=color_dict[i],
linestyle=linestyle,
alpha=sample_hist_alpha,
zorder=1000 + zorder_cnt,
linewidth=lw_m,
) # TOOK AWAY LABEL
return b[0]
def _plot_func_pair(
bottom_node,
model_="ddm_hddm_base",
n_samples=200,
figsize=(8, 6),
title="",
**kwargs,
):
"""Generates a posterior pair plot for a given kabuki node.
Arguments:
bottom_node: kabuki_node
Observed node of a kabuki.Hierarchical model.
n_samples: int <default=200>
Number of posterior samples to consider for the plot.
figsize: (int, int) <default=(8,6)>
Size of the figure in inches.
title: str <default=''>
Plot title.
model: str <default='ddm_hddm_base'>
"""
params = model_config[model_]["params"]
parent_keys = bottom_node.parents.value.keys()
param_intersection = set(params).intersection(set(parent_keys))
df = pd.DataFrame(
np.empty((n_samples, len(param_intersection))), columns=param_intersection
)
node_data_full = kwargs.pop("node_data", None)
for i in range(n_samples):
_parents_to_random_posterior_sample(bottom_node)
for key_tmp in param_intersection:
df.loc[i, key_tmp] = bottom_node.parents.value[key_tmp]
sns.set_theme(
style="ticks", color_codes=True
) # , rc = {'figure.figsize': figsize})
g = sns.PairGrid(data=df, corner=True)
g.fig.set_size_inches(figsize[0], figsize[1])
g = g.map_diag(plt.hist, histtype="step", color="black", alpha=0.8)
g = g.map_lower(sns.kdeplot, cmap="Reds")
# Adding ground truth if calling function was in parameter recovery mode
if node_data_full is not None:
for i in range(1, g.axes.shape[0], 1):
for j in range(0, i, 1):
tmp_y_label = g.axes[g.axes.shape[0] - 1, i].get_xlabel()
tmp_x_label = g.axes[g.axes.shape[0] - 1, j].get_xlabel()
g.axes[i, j].scatter(
node_data_full[tmp_x_label].values[0],
node_data_full[tmp_y_label].values[0],
color="blue",
marker="+",
s=100,
zorder=1000,
)
# Adding ground truth to axes !
for i in range(g.axes.shape[0]):
if i == 0:
y_lims_tmp = g.axes[i, i].get_ylim()
g.axes[i, i].set_ylim(0, y_lims_tmp[1])
tmp_x_label = g.axes[g.axes.shape[0] - 1, i].get_xlabel()
g.axes[i, i].scatter(
node_data_full[tmp_x_label].values[0],
g.axes[i, i].get_ylim()[0],
color="blue",
marker="|",
s=100,
)
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.95, hspace=0.3, wspace=0.2)
return g
# ONE OFF PLOTS
def _group_node_names_by_param(model):
tmp_params_allowed = model_config[model.model]["params"].copy()
if hasattr(model, "rlssm_model"):
if (
model.rlssm_model
): # TODO: Turns out basic hddm classes have rlssm_model attribute but set to false ....
tmp_params_allowed.extend(model_config_rl[model.rl_rule]["params"])
tmp_params_allowed.append("dc") # to accomodate HDDMStimcoding class
keys_by_param = {}
# Cycle through all nodes
for key_ in model.nodes_db.index:
if "offset" in key_:
continue
# Cycle through model relevant parameters
for param_tmp in tmp_params_allowed: # model_config[model.model]["params"]:
# Initialize param_tmp key if not yet done
if param_tmp not in keys_by_param.keys():
keys_by_param[param_tmp] = []
# Get ids of identifiers
param_id = key_.find(param_tmp)
underscore_id = key_.find(param_tmp + "_")
bracket_id = key_.find(param_tmp + "(")
# Take out 'trans' and 'tau' and observed nodes
if (
not ("trans" in key_)
and not ("tau" in key_)
and not ((param_tmp + "_reg") in key_)
and not ("_rate" in key_)
and not ("_shape" in key_)
and not (model.nodes_db.loc[key_].observed)
):
if param_id == 0:
if (bracket_id == 0) or (underscore_id == 0):
keys_by_param[param_tmp].append(key_)
elif key_ == param_tmp:
keys_by_param[param_tmp].append(key_)
# Drop keys that didn't receive and stochastics
drop_list = []
for key_ in keys_by_param.keys():
if len(keys_by_param[key_]) == 0:
drop_list.append(key_)
for drop_key in drop_list:
del keys_by_param[drop_key]
return keys_by_param
def _group_traces_via_grouped_nodes(model, group_dict):
grouped_traces = {}
for key_ in group_dict.keys():
tmp_traces = {}
tmp_nodes_db = model.nodes_db.loc[group_dict[key_], :]
for i in range(tmp_nodes_db.shape[0]):
tmp_traces[tmp_nodes_db.iloc[i].node.__str__()] = tmp_nodes_db.iloc[
i
].node.trace()
grouped_traces[key_] = pd.DataFrame.from_dict(tmp_traces, orient="columns")
return grouped_traces
def plot_caterpillar(
hddm_model=None,
ground_truth_parameter_dict=None,
drop_sd=True,
keep_key=None,
figsize=(10, 10),
columns=3,
save=False,
path=None,
format="png",
y_tick_size=10,
x_tick_size=10,
):
"""An alternative posterior predictive plot. Works for all models listed in hddm (e.g. 'ddm', 'angle', 'weibull', 'levy', 'ornstein')
Arguments:
hddm_model: hddm model object <default=None>
If you supply a ground truth model, the data you supplied to the hddm model should include trial by trial parameters.
ground_truth_parameter_dict: dict <default=None>
Parameter dictionary (for example coming out of the function simulator_h_c()) that provides ground truth values
for the parameters fit in the hddm_model.
drop_sd: bool <default=True>
Whether or not to drop group level standard deviations from the caterpillar plot.
This is sometimes useful because scales can be off if included.
figsize: tuple <default=(10, 15)>
Size of initial figure.
keep_key: list <default=None>
If you want to keep only a specific list of parameters in the caterpillar plot, supply those here as
a list. All other parameters for which you supply traces in the posterior samples are going to be ignored.
save: bool <default=False>
Whether to save the plot
format: str <default='png'>
File format in which to save the figure.
path: str <default=None>
Path in which to save the figure.
Return: plot object
"""
if hddm_model is None:
return "No HDDM object supplied"
out = _group_node_names_by_param(model=hddm_model)
traces_by_param = _group_traces_via_grouped_nodes(model=hddm_model, group_dict=out)
ncolumns = columns
nrows = int(np.ceil(len(out.keys()) / ncolumns))
fig = plt.figure(figsize=figsize)
fig.suptitle("")
fig.subplots_adjust(top=1.0, hspace=0.2, wspace=0.4)
i = 1
for key_ in traces_by_param.keys():
ax = fig.add_subplot(nrows, ncolumns, i)
sns.despine(right=True, ax=ax)
traces_tmp = traces_by_param[key_]
ecdfs = {}
plot_vals = {} # [0.01, 0.9], [0.01, 0.99], [mean]
for k in traces_tmp.keys():
# If we want to keep only a specific parameter we skip all traces which don't include it in
# their names !
if keep_key is not None and k not in keep_key:
continue
# Deal with
if "std" in k and drop_sd:
pass
else:
ok_ = 1
if drop_sd == True:
if "_sd" in k:
ok_ = 0
if ok_:
# Make empirical CDFs and extract the 10th, 1th / 99th, 90th percentiles
print("tracename: ")
print(k)
if hasattr(hddm_model, "rlssm_model"):
if "rl_alpha" in k and not "std" in k:
vals = traces_tmp[k].values
transformed_trace = np.exp(vals) / (1 + np.exp(vals))
ecdfs[k] = ECDF(transformed_trace)
tmp_sorted = sorted(transformed_trace)
else:
ecdfs[k] = ECDF(traces_tmp[k].values)
tmp_sorted = sorted(traces_tmp[k].values)
else:
ecdfs[k] = ECDF(traces_tmp[k].values)
tmp_sorted = sorted(traces_tmp[k].values)
_p01 = tmp_sorted[np.sum(ecdfs[k](tmp_sorted) <= 0.01) - 1]
_p99 = tmp_sorted[np.sum(ecdfs[k](tmp_sorted) <= 0.99) - 1]
_p1 = tmp_sorted[np.sum(ecdfs[k](tmp_sorted) <= 0.1) - 1]
_p9 = tmp_sorted[np.sum(ecdfs[k](tmp_sorted) <= 0.9) - 1]
_pmean = traces_tmp[k].mean()
plot_vals[k] = [[_p01, _p99], [_p1, _p9], _pmean]
x = [plot_vals[k][2] for k in plot_vals.keys()]
# Create y-axis labels first
ax.scatter(x, plot_vals.keys(), c="black", marker="s", alpha=0)
i += 1
# Plot the actual cdf-based data
for k in plot_vals.keys():
ax.plot(plot_vals[k][1], [k, k], c="grey", zorder=-1, linewidth=5)
ax.plot(plot_vals[k][0], [k, k], c="black", zorder=-1)
# Add in ground truth if supplied
if ground_truth_parameter_dict is not None:
ax.scatter(ground_truth_parameter_dict[k], k, c="blue", marker="|")
ax.tick_params(axis="y", rotation=45)
ax.tick_params(axis="y", labelsize=y_tick_size)
ax.tick_params(axis="x", labelsize=x_tick_size)
if save:
fname = "caterpillar_" + hddm_model.model
if path is None:
path = "."
if isinstance(format, str):
format = [format]
print(["%s.%s" % (os.path.join(path, fname), x) for x in format])
[
fig.savefig("%s.%s" % (os.path.join(path, fname), x), format=x)
for x in format
]
plt.show()
"""
=== RLSSM functions ===
"""
def get_mean_correct_responses_rlssm(trials, nbins, data):
"""Gets the mean proportion of correct responses condition-wise.
Arguments:
trials: int
Number of initial trials to consider for computing proportion of correct responses.
nbins: int
Number of bins to put the trials into (Num. of trials per bin = trials/nbin).
data: pandas.DataFrame
Pandas DataFrame for the observed or simulated data.
Return:
mean_correct_responses: dict
Dictionary of conditions containing proportion of mean correct responses (for each bin).
up_err: dict
Dictionary of conditions containing upper intervals of HDI of mean correct responses (for each bin).
low_err: dict
Dictionary of conditions containing lower intervals of HDI of mean correct responses (for each bin).
"""
data_ppc = data[data.trial <= trials].copy()
data_ppc.loc[data_ppc["response"] < 1, "response"] = 0
data_ppc["bin_trial"] = np.array(
pd.cut(data_ppc.trial, nbins, labels=np.linspace(0, nbins, nbins))
)
sums = data_ppc.groupby(["bin_trial", "split_by", "trial"]).mean().reset_index()
ppc_sim = sums.groupby(["bin_trial", "split_by"]).mean().reset_index()
# initiate columns that will have the upper and lower bound of the hpd
ppc_sim["upper_hpd"] = 0
ppc_sim["lower_hpd"] = 0
for i in range(0, ppc_sim.shape[0]):
# calculate the hpd/hdi of the predicted mean responses across bin_trials
hdi = pymc.utils.hpd(
sums.response[
(sums["bin_trial"] == ppc_sim.bin_trial[i])
& (sums["split_by"] == ppc_sim.split_by[i])
],
alpha=0.05,
)
ppc_sim.loc[i, "upper_hpd"] = hdi[1]
ppc_sim.loc[i, "lower_hpd"] = hdi[0]
# calculate error term as the distance from upper bound to mean
ppc_sim["up_err"] = ppc_sim["upper_hpd"] - ppc_sim["response"]
ppc_sim["low_err"] = ppc_sim["response"] - ppc_sim["lower_hpd"]
mean_correct_responses = {}
up_err = {}
low_err = {}
for cond in np.unique(ppc_sim.split_by):
mean_correct_responses[cond] = ppc_sim[ppc_sim.split_by == cond]["response"]
up_err[cond] = ppc_sim[ppc_sim.split_by == cond]["up_err"]
low_err[cond] = ppc_sim[ppc_sim.split_by == cond]["low_err"]
return mean_correct_responses, up_err, low_err
def gen_ppc_rlssm(
model_ssm,
config_ssm,
model_rl,
config_rl,
data,
traces,
nsamples,
p_lower,
p_upper,
save_data=False,
save_name=None,
save_path=None,
):
"""Generates data (for posterior predictives) using samples from the given trace as parameters.
Arguments:
model_ssm: str
Name of the sequential sampling model used.
config_ssm: dict
Config dictionary for the specified sequential sampling model.
model_rl: str
Name of the reinforcement learning model used.
config_rl: dict
Config dictionary for the specified reinforcement learning model.
data: pandas.DataFrame
Pandas DataFrame for the observed data.
traces: pandas.DataFrame
Pandas DataFrame containing the traces.
nsamples: int
Number of posterior samples to draw for each subject.
p_lower: dict
Dictionary of conditions containing the probability of reward for the lower choice/action in the 2-armed bandit task.
p_upper: dict
Dictionary of conditions containing the probability of reward for the upper choice/action in the 2-armed bandit task.
save_data: bool <default=False>
Boolean denoting whether to save the data as csv.
save_name: str <default=None>
Specifies filename to save the data.
save_path: str <default=None>
Specifies path to save the data.
Return:
ppc_sdata: pandas.DataFrame
Pandas DataFrame containing the simulated data (for posterior predictives).
"""
def transform_param(param, param_val):
if param == "rl_alpha":
transformed_param_val = np.exp(param_val) / (1 + np.exp(param_val))
else:
transformed_param_val = param_val
return transformed_param_val
sim_data = pd.DataFrame()
nsamples += 1
for i in tqdm(range(1, nsamples)):
sample = np.random.randint(0, traces.shape[0] - 1)
for subj in data.subj_idx.unique():
sub_data = pd.DataFrame()
for cond in np.unique(data.split_by):
sampled_param_ssm = list()
for p in config_ssm["params"]:
p_val = traces.loc[sample, p + "_subj." + str(subj)]
p_val = transform_param(p, p_val)
sampled_param_ssm.append(p_val)
sampled_param_rl = list()
for p in config_rl["params"]:
p_val = traces.loc[sample, p + "_subj." + str(subj)]
p_val = transform_param(p, p_val)
sampled_param_rl.append(p_val)
cond_size = len(
data[
(data["subj_idx"] == subj) & (data["split_by"] == cond)
].trial.unique()
)
ind_cond_data = gen_rand_rlssm_data_MAB_RWupdate(
model_ssm,
sampled_param_ssm,
sampled_param_rl,
size=cond_size,
p_lower=p_lower[cond],
p_upper=p_upper[cond],
subjs=1,
split_by=cond,
)
# append the conditions
# sub_data = sub_data.append([ind_cond_data], ignore_index=False)
sub_data = pd.concat([sub_data, ind_cond_data], ignore_index=False)
# assign subj_idx
sub_data["subj_idx"] = subj
# identify the simulated data
sub_data["samp"] = i
# append data from each subject
# sim_data = sim_data.append(sub_data, ignore_index=True)
sim_data = pd.concat([sim_data, sub_data], ignore_index=True)
ppc_sdata = sim_data[
["subj_idx", "response", "split_by", "rt", "trial", "feedback", "samp"]
].copy()
if save_data:
if save_name is None:
save_name = "ppc_data"
if save_path is None:
save_path = "."
ppc_sdata.to_csv("%s.%s" % (os.path.join(save_path, save_name), "csv"))
print("ppc data saved at %s.%s" % (os.path.join(save_path, save_name), "csv"))
return ppc_sdata
def plot_ppc_choice_rlssm(
obs_data, sim_data, trials, nbins, save_fig=False, save_name=None, save_path=None
):
"""Plot posterior preditive plot for choice data.
Arguments:
obs_data: pandas.DataFrame
Pandas DataFrame for the observed data.
sim_data: pandas.DataFrame
Pandas DataFrame for the simulated data.
trials: int
Number of initial trials to consider for computing proportion of correct responses.
nbins: int
Number of bins to put the trials into (Num. of trials per bin = trials/nbin).
save_fig: bool <default=False>
Boolean denoting whether to save the plot.
save_name: str <default=None>
Specifies filename to save the figure.
save_path: str <default=None>
Specifies path to save the figure.
Return:
fig: matplotlib.Figure
plot object
"""
res_obs, up_err_obs, low_err_obs = get_mean_correct_responses_rlssm(
trials, nbins, obs_data
)
res_sim, up_err_sim, low_err_sim = get_mean_correct_responses_rlssm(
trials, nbins, sim_data
)
cond_list = np.unique(obs_data.split_by)
rows = 1
cols = len(cond_list)
fig, ax = plt.subplots(rows, cols, constrained_layout=False, tight_layout=True)
cond_index = 0
for ay in range(cols):
cond = cond_list[cond_index]
ax[ay].errorbar(
1 + np.arange(len(res_obs[cond])),
res_obs[cond],
yerr=[low_err_obs[cond], up_err_obs[cond]],
label="observed",
color="royalblue",
)
ax[ay].errorbar(
1 + np.arange(len(res_sim[cond])),
res_sim[cond],
yerr=[low_err_sim[cond], up_err_sim[cond]],
label="simulated",
color="tomato",
)
ax[ay].set_ylim((0, 1))
# ax[ay].legend()
ax[ay].set_title("split_by=" + str(cond), fontsize=12)
ax[ay].grid()
cond_index += 1
fig = plt.gcf()
lines, labels = fig.axes[-1].get_legend_handles_labels()
fig.legend(lines, labels, loc="lower right")
fig.supxlabel("Trial bins", fontsize=12)
fig.supylabel("Proportion of Correct Responses", fontsize=12)
fig.set_size_inches(4 * len(cond_list), 4)
if save_fig:
if save_name is None:
save_name = "ppc_choice"
if save_path is None:
save_path = "."
fig.savefig("%s.%s" % (os.path.join(save_path, save_name), "png"))
print("fig saved at %s.%s" % (os.path.join(save_path, save_name), "png"))
return fig
def plot_ppc_rt_rlssm(
obs_data, sim_data, trials, bw=0.1, save_fig=False, save_name=None, save_path=None
):
"""Plot posterior preditive plot for reaction time data.
Arguments:
obs_data: pandas.DataFrame
Pandas DataFrame for the observed data.
sim_data: pandas.DataFrame
Pandas DataFrame for the simulated data.
trials: int
Number of initial trials to consider for computing proportion of correct responses.
bw: float <default=0.1>
Bandwidth parameter for kernel-density estimates.
save_fig: bool <default=False>
Boolean denoting whether to save the plot.
save_name: str <default=None>
Specifies filename to save the figure.
save_path: str <default=None>
Specifies path to save the figure.
Return:
fig: matplotlib.Figure
plot object
"""
obs_data_ppc = obs_data[obs_data.trial <= trials].copy()
sim_data_ppc = sim_data[sim_data.trial <= trials].copy()
cond_list = np.unique(obs_data.split_by)
rows = 1
cols = len(cond_list)
fig, ax = plt.subplots(rows, cols, constrained_layout=False, tight_layout=True)
cond_index = 0
for ay in range(cols):
cond = cond_list[cond_index]
rt_ppc_sim = np.where(
sim_data_ppc[sim_data_ppc.split_by == cond].response == 1,
sim_data_ppc[sim_data_ppc.split_by == cond].rt,
0 - sim_data_ppc[sim_data_ppc.split_by == cond].rt,
)
rt_ppc_obs = np.where(
obs_data_ppc[obs_data_ppc.split_by == cond].response == 1,
obs_data_ppc[obs_data_ppc.split_by == cond].rt,
0 - obs_data_ppc[obs_data_ppc.split_by == cond].rt,
)
sns.kdeplot(
rt_ppc_sim, label="simulated", color="tomato", ax=ax[ay], bw_method=bw
).set(ylabel=None)
sns.kdeplot(
rt_ppc_obs, label="observed", color="royalblue", ax=ax[ay], bw_method=bw
).set(ylabel=None)
# ax[ay].legend()
ax[ay].set_title("split_by=" + str(cond), fontsize=12)
ax[ay].grid()
cond_index += 1
fig = plt.gcf()
lines, labels = fig.axes[-1].get_legend_handles_labels()
fig.legend(lines, labels, loc="lower right")
fig.supxlabel("Reaction Time", fontsize=12)
fig.supylabel("Density", fontsize=12)
fig.set_size_inches(4 * len(cond_list), 4)
if save_fig:
if save_name is None:
save_name = "ppc_rt"
if save_path is None:
save_path = "."
fig.savefig("%s.%s" % (os.path.join(save_path, save_name), "png"))
print("fig saved at %s.%s" % (os.path.join(save_path, save_name), "png"))
return fig
def plot_posterior_pairs_rlssm(
tracefile, param_list, save_fig=False, save_name=None, save_path=None, **kwargs
):
"""Plot posterior pairs.
Arguments:
tracefile: dict
Dictionary containing the traces.
param_list: list
List of model parameters to be included in the posterior pair plots.
save_fig: bool <default=False>
Boolean denoting whether to save the plot.
save_name: str <default=None>
Specifies filename to save the figure.
save_path: str <default=None>
Specifies path to save the figure.
Return:
fig: matplotlib.Figure
plot object
"""
traces = hddm.utils.get_traces_rlssm(tracefile)
tr = traces.copy()
tr_trunc = tr[param_list]
tr_dataset = az.dict_to_dataset(tr_trunc)
tr_inf_data = az.convert_to_inference_data(tr_dataset)
axes = az.plot_pair(
tr_inf_data,
kind="kde",
marginals=True,
point_estimate="mean",
textsize=20,
**kwargs,
)
fig = axes.ravel()[0].figure
if save_fig:
if save_name is None:
save_name = "posterior_pair"
if save_path is None:
save_path = "."
fig.savefig("%s.%s" % (os.path.join(save_path, save_name), "png"))
print("fig saved at %s.%s" % (os.path.join(save_path, save_name), "png"))
return fig | PypiClean |
/Bonerator-0.1.1.tar.gz/Bonerator-0.1.1/bonerator/content/content.py | from typing import Union
class ContentBase(object):
# def to_dict(self):
# raise NotImplementedError
def latex(self, indent=""):
"""
:param indent: 缩进
:return: LaTeX2e代码
"""
raise NotImplementedError
def latex3(self, indent=""):
return self.latex(indent)
class Content(ContentBase):
def __init__(self, text: str, mode: str = "paragraph"):
escape = r"\{}%"
for e in escape:
text = text.replace(e, "\\" + e)
self.text = text
self.mode = mode
def to_dict(self):
return self.text
def latex(self, indent=""):
if self.mode == "paragraph":
out = ""
for t in self.text.split("\n"):
out += f"{indent}{t}\n"
return out
else:
return self.text
class ContentList(ContentBase):
def __init__(self, *contents: Union[str, ContentBase], start="", separator="\n", end="\n"):
self.start = start
self.separator = separator
self.end = end
if contents:
self.contents = list(contents)
else:
self.contents = []
def to_dict(self):
out_dict = []
for c in self.contents:
if isinstance(c, str):
out_dict.append(c)
else:
c = c.to_dict()
if isinstance(c, list):
out_dict.extend(c)
else:
out_dict.append(c)
return out_dict
def __len__(self):
return len(self.contents)
def __iter__(self):
for out in self.contents:
yield out
def __getitem__(self, item):
return self.contents[item]
def append(self, *obj):
self.contents.extend(obj)
def latex(self, indent=""):
contents = []
if "\n" in self.separator:
new_indent = indent
else:
new_indent = ""
self.start += indent
for c in self.contents:
if hasattr(c, "latex"):
contents.append(c.latex(new_indent))
else:
contents.append(f"{new_indent}{str(c)}")
contents = self.start + self.separator.join(contents) + self.end
return contents
class Attribute(Content):
def __init__(self):
super().__init__()
class Element(Content):
def __init__(self):
super().__init__() | PypiClean |
/Bugs%20Everywhere%20(BEurtle%20fork)-1.5.0.1.-2012-07-16-.zip/Bugs Everywhere (BEurtle fork)-1.5.0.1.-2012-07-16-/libbe/command/base.py |
import codecs
import optparse
import os.path
import StringIO
import sys
import libbe
import libbe.storage
import libbe.ui.util.user
import libbe.util.encoding
import libbe.util.plugin
class UserError (Exception):
"An error due to improper BE usage."
pass
class UsageError (UserError):
"""A serious parsing error due to invalid BE command construction.
The distinction between `UserError`\s and the more specific
`UsageError`\s is that when displaying a `UsageError` to the user,
the user is pointed towards the command usage information. Use
the more general `UserError` if you feel that usage information
would not be particularly enlightening.
"""
def __init__(self, command=None, command_name=None, message=None):
super(UsageError, self).__init__(message)
self.command = command
if command_name is None and command is not None:
command_name = command.name
self.command_name = command_name
self.message = message
class UnknownCommand (UsageError):
def __init__(self, command_name, message=None):
uc_message = "Unknown command '%s'" % command_name
if message is None:
message = uc_message
else:
message = '%s\n(%s)' % (uc_message, message)
super(UnknownCommand, self).__init__(
command_name=command_name,
message=message)
def get_command(command_name):
"""Retrieves the module for a user command
>>> try:
... get_command('asdf')
... except UnknownCommand, e:
... print e
Unknown command 'asdf'
(No module named asdf)
>>> repr(get_command('list')).startswith("<module 'libbe.command.list' from ")
True
"""
try:
cmd = libbe.util.plugin.import_by_name(
'libbe.command.%s' % command_name.replace("-", "_"))
except ImportError, e:
raise UnknownCommand(command_name, message=unicode(e))
return cmd
def get_command_class(module=None, command_name=None):
"""Retrieves a command class from a module.
>>> import_xml_mod = get_command('import-xml')
>>> import_xml = get_command_class(import_xml_mod, 'import-xml')
>>> repr(import_xml)
"<class 'libbe.command.import_xml.Import_XML'>"
>>> import_xml = get_command_class(command_name='import-xml')
>>> repr(import_xml)
"<class 'libbe.command.import_xml.Import_XML'>"
"""
if module == None:
module = get_command(command_name)
try:
cname = command_name.capitalize().replace('-', '_')
cmd = getattr(module, cname)
except ImportError, e:
raise UnknownCommand(command_name)
return cmd
def modname_to_command_name(modname):
"""Little hack to replicate
>>> import sys
>>> def real_modname_to_command_name(modname):
... mod = libbe.util.plugin.import_by_name(
... 'libbe.command.%s' % modname)
... attrs = [getattr(mod, name) for name in dir(mod)]
... commands = []
... for attr_name in dir(mod):
... attr = getattr(mod, attr_name)
... try:
... if issubclass(attr, Command):
... commands.append(attr)
... except TypeError, e:
... pass
... if len(commands) == 0:
... raise Exception('No Command classes in %s' % dir(mod))
... return commands[0].name
>>> real_modname_to_command_name('new')
'new'
>>> real_modname_to_command_name('import_xml')
'import-xml'
"""
return modname.replace('_', '-')
def commands(command_names=False):
for modname in libbe.util.plugin.modnames('libbe.command'):
if modname not in ['base', 'util']:
if command_names == False:
yield modname
else:
yield modname_to_command_name(modname)
class CommandInput (object):
def __init__(self, name, help=''):
self.name = name
self.help = help
def __str__(self):
return '<%s %s>' % (self.__class__.__name__, self.name)
def __repr__(self):
return self.__str__()
class Argument (CommandInput):
def __init__(self, metavar=None, default=None, type='string',
optional=False, repeatable=False,
completion_callback=None, *args, **kwargs):
CommandInput.__init__(self, *args, **kwargs)
self.metavar = metavar
self.default = default
self.type = type
self.optional = optional
self.repeatable = repeatable
self.completion_callback = completion_callback
if self.metavar == None:
self.metavar = self.name.upper()
class Option (CommandInput):
def __init__(self, callback=None, short_name=None, arg=None,
*args, **kwargs):
CommandInput.__init__(self, *args, **kwargs)
self.callback = callback
self.short_name = short_name
self.arg = arg
if self.arg == None and self.callback == None:
# use an implicit boolean argument
self.arg = Argument(name=self.name, help=self.help,
default=False, type='bool')
self.validate()
def validate(self):
if self.arg == None:
assert self.callback != None, self.name
return
assert self.callback == None, '%s: %s' (self.name, self.callback)
assert self.arg.name == self.name, \
'Name missmatch: %s != %s' % (self.arg.name, self.name)
assert self.arg.optional == False, self.name
assert self.arg.repeatable == False, self.name
def __str__(self):
return '--%s' % self.name
def __repr__(self):
return '<Option %s>' % self.__str__()
class _DummyParser (optparse.OptionParser):
def __init__(self, command):
optparse.OptionParser.__init__(self)
self.remove_option('-h')
self.command = command
self._command_opts = []
for option in self.command.options:
self._add_option(option)
def _add_option(self, option):
# from libbe.ui.command_line.CmdOptionParser._add_option
option.validate()
long_opt = '--%s' % option.name
if option.short_name != None:
short_opt = '-%s' % option.short_name
assert '_' not in option.name, \
'Non-reconstructable option name %s' % option.name
kwargs = {'dest':option.name.replace('-', '_'),
'help':option.help}
if option.arg == None or option.arg.type == 'bool':
kwargs['action'] = 'store_true'
kwargs['metavar'] = None
kwargs['default'] = False
else:
kwargs['type'] = option.arg.type
kwargs['action'] = 'store'
kwargs['metavar'] = option.arg.metavar
kwargs['default'] = option.arg.default
if option.short_name != None:
opt = optparse.Option(short_opt, long_opt, **kwargs)
else:
opt = optparse.Option(long_opt, **kwargs)
#option.takes_value = lambda : option.arg != None
opt._option = option
self._command_opts.append(opt)
self.add_option(opt)
class OptionFormatter (optparse.IndentedHelpFormatter):
def __init__(self, command):
optparse.IndentedHelpFormatter.__init__(self)
self.command = command
def option_help(self):
# based on optparse.OptionParser.format_option_help()
parser = _DummyParser(self.command)
self.store_option_strings(parser)
ret = []
ret.append(self.format_heading('Options'))
self.indent()
for option in parser._command_opts:
ret.append(self.format_option(option))
ret.append('\n')
self.dedent()
# Drop the last '\n', or the header if no options or option groups:
return ''.join(ret[:-1])
class Command (object):
"""One-line command description here.
>>> c = Command()
>>> print c.help()
usage: be command [options]
<BLANKLINE>
Options:
-h, --help Print a help message.
<BLANKLINE>
--complete Print a list of possible completions.
<BLANKLINE>
A detailed help message.
"""
name = 'command'
def __init__(self, ui=None):
self.ui = ui # calling user-interface
self.status = None
self.result = None
self.restrict_file_access = True
self.options = [
Option(name='help', short_name='h',
help='Print a help message.',
callback=self.help),
Option(name='complete',
help='Print a list of possible completions.',
callback=self.complete),
]
self.args = []
def run(self, options=None, args=None):
self.status = 1 # in case we raise an exception
params = self._parse_options_args(options, args)
if params['help'] == True:
pass
else:
params.pop('help')
if params['complete'] != None:
pass
else:
params.pop('complete')
self.status = self._run(**params)
return self.status
def _parse_options_args(self, options=None, args=None):
if options == None:
options = {}
if args == None:
args = []
params = {}
for option in self.options:
assert option.name not in params, params[option.name]
if option.name in options:
params[option.name] = options.pop(option.name)
elif option.arg != None:
params[option.name] = option.arg.default
else: # non-arg options are flags, set to default flag value
params[option.name] = False
assert 'user-id' not in params, params['user-id']
if 'user-id' in options:
self._user_id = options.pop('user-id')
if len(options) > 0:
raise UserError, 'Invalid option passed to command %s:\n %s' \
% (self.name, '\n '.join(['%s: %s' % (k,v)
for k,v in options.items()]))
in_optional_args = False
for i,arg in enumerate(self.args):
if arg.repeatable == True:
assert i == len(self.args)-1, arg.name
if in_optional_args == True:
assert arg.optional == True, arg.name
else:
in_optional_args = arg.optional
if i < len(args):
if arg.repeatable == True:
params[arg.name] = [args[i]]
else:
params[arg.name] = args[i]
else: # no value given
assert in_optional_args == True, arg.name
params[arg.name] = arg.default
if len(args) > len(self.args): # add some additional repeats
assert self.args[-1].repeatable == True, self.args[-1].name
params[self.args[-1].name].extend(args[len(self.args):])
return params
def _run(self, **kwargs):
raise NotImplementedError
def help(self, *args):
return '\n\n'.join([self.usage(),
self._option_help(),
self._long_help().rstrip('\n')])
def usage(self):
usage = 'usage: be %s [options]' % self.name
num_optional = 0
for arg in self.args:
usage += ' '
if arg.optional == True:
usage += '['
num_optional += 1
usage += arg.metavar
if arg.repeatable == True:
usage += ' ...'
usage += ']'*num_optional
return usage
def _option_help(self):
o = OptionFormatter(self)
return o.option_help().strip('\n')
def _long_help(self):
return "A detailed help message."
def complete(self, argument=None, fragment=None):
if argument == None:
ret = ['--%s' % o.name for o in self.options
if o.name != 'complete']
if len(self.args) > 0 and self.args[0].completion_callback != None:
ret.extend(self.args[0].completion_callback(self, argument, fragment))
return ret
elif argument.completion_callback != None:
# finish a particular argument
return argument.completion_callback(self, argument, fragment)
return [] # the particular argument doesn't supply completion info
def _check_restricted_access(self, storage, path):
"""
Check that the file at path is inside bugdir.root. This is
important if you allow other users to execute becommands with
your username (e.g. if you're running be-handle-mail through
your ~/.procmailrc). If this check wasn't made, a user could
e.g. run
be commit -b ~/.ssh/id_rsa "Hack to expose ssh key"
which would expose your ssh key to anyone who could read the
VCS log.
>>> class DummyStorage (object): pass
>>> s = DummyStorage()
>>> s.repo = os.path.expanduser('~/x/')
>>> c = Command()
>>> try:
... c._check_restricted_access(s, os.path.expanduser('~/.ssh/id_rsa'))
... except UserError, e:
... assert str(e).startswith('file access restricted!'), str(e)
... print 'we got the expected error'
we got the expected error
>>> c._check_restricted_access(s, os.path.expanduser('~/x'))
>>> c._check_restricted_access(s, os.path.expanduser('~/x/y'))
>>> c.restrict_file_access = False
>>> c._check_restricted_access(s, os.path.expanduser('~/.ssh/id_rsa'))
"""
if self.restrict_file_access == True:
path = os.path.abspath(path)
repo = os.path.abspath(storage.repo).rstrip(os.path.sep)
if path == repo or path.startswith(repo+os.path.sep):
return
raise UserError('file access restricted!\n %s not in %s'
% (path, repo))
def cleanup(self):
pass
class InputOutput (object):
def __init__(self, stdin=None, stdout=None):
self.stdin = stdin
self.stdout = stdout
def setup_command(self, command):
if not hasattr(self.stdin, 'encoding'):
self.stdin.encoding = libbe.util.encoding.get_input_encoding()
if not hasattr(self.stdout, 'encoding'):
self.stdout.encoding = libbe.util.encoding.get_output_encoding()
command.stdin = self.stdin
command.stdin.encoding = self.stdin.encoding
command.stdout = self.stdout
command.stdout.encoding = self.stdout.encoding
def cleanup(self):
pass
class StdInputOutput (InputOutput):
def __init__(self, input_encoding=None, output_encoding=None):
stdin,stdout = self._get_io(input_encoding, output_encoding)
InputOutput.__init__(self, stdin, stdout)
def _get_io(self, input_encoding=None, output_encoding=None):
if input_encoding == None:
input_encoding = libbe.util.encoding.get_input_encoding()
if output_encoding == None:
output_encoding = libbe.util.encoding.get_output_encoding()
stdin = codecs.getreader(input_encoding)(sys.stdin)
stdin.encoding = input_encoding
stdout = codecs.getwriter(output_encoding)(sys.stdout)
stdout.encoding = output_encoding
return (stdin, stdout)
class StringInputOutput (InputOutput):
"""
>>> s = StringInputOutput()
>>> s.set_stdin('hello')
>>> s.stdin.read()
'hello'
>>> s.stdin.read()
''
>>> print >> s.stdout, 'goodbye'
>>> s.get_stdout()
'goodbye\\n'
>>> s.get_stdout()
''
Also works with unicode strings
>>> s.set_stdin(u'hello')
>>> s.stdin.read()
u'hello'
>>> print >> s.stdout, u'goodbye'
>>> s.get_stdout()
u'goodbye\\n'
"""
def __init__(self):
stdin = StringIO.StringIO()
stdin.encoding = 'utf-8'
stdout = StringIO.StringIO()
stdout.encoding = 'utf-8'
InputOutput.__init__(self, stdin, stdout)
def set_stdin(self, stdin_string):
self.stdin = StringIO.StringIO(stdin_string)
def get_stdout(self):
ret = self.stdout.getvalue()
self.stdout = StringIO.StringIO() # clear stdout for next read
self.stdin.encoding = 'utf-8'
return ret
class UnconnectedStorageGetter (object):
def __init__(self, location):
self.location = location
def __call__(self):
return libbe.storage.get_storage(self.location)
class StorageCallbacks (object):
def __init__(self, location=None):
if location == None:
location = '.'
self.location = location
self._get_unconnected_storage = UnconnectedStorageGetter(location)
def setup_command(self, command):
command._get_unconnected_storage = self.get_unconnected_storage
command._get_storage = self.get_storage
command._get_bugdir = self.get_bugdir
def get_unconnected_storage(self):
"""
Callback for use by commands that need it.
The returned Storage instance is may actually be connected,
but commands that make use of the returned value should only
make use of non-connected Storage methods. This is mainly
intended for the init command, which calls Storage.init().
"""
if not hasattr(self, '_unconnected_storage'):
if self._get_unconnected_storage == None:
raise NotImplementedError
self._unconnected_storage = self._get_unconnected_storage()
return self._unconnected_storage
def set_unconnected_storage(self, unconnected_storage):
self._unconnected_storage = unconnected_storage
def get_storage(self):
"""Callback for use by commands that need it."""
if not hasattr(self, '_storage'):
self._storage = self.get_unconnected_storage()
self._storage.connect()
version = self._storage.storage_version()
if version != libbe.storage.STORAGE_VERSION:
raise libbe.storage.InvalidStorageVersion(version)
return self._storage
def set_storage(self, storage):
self._storage = storage
def get_bugdir(self):
"""Callback for use by commands that need it."""
if not hasattr(self, '_bugdir'):
self._bugdir = libbe.bugdir.BugDir(self.get_storage(),
from_storage=True)
return self._bugdir
def set_bugdir(self, bugdir):
self._bugdir = bugdir
def cleanup(self):
if hasattr(self, '_storage'):
self._storage.disconnect()
class UserInterface (object):
def __init__(self, io=None, location=None):
if io == None:
io = StringInputOutput()
self.io = io
self.storage_callbacks = StorageCallbacks(location)
self.restrict_file_access = True
def help(self):
raise NotImplementedError
def run(self, command, options=None, args=None):
self.setup_command(command)
return command.run(options, args)
def setup_command(self, command):
if command.ui == None:
command.ui = self
if self.io != None:
self.io.setup_command(command)
if self.storage_callbacks != None:
self.storage_callbacks.setup_command(command)
command.restrict_file_access = self.restrict_file_access
command._get_user_id = self._get_user_id
def _get_user_id(self):
"""Callback for use by commands that need it."""
if not hasattr(self, '_user_id'):
self._user_id = libbe.ui.util.user.get_user_id(
self.storage_callbacks.get_storage())
return self._user_id
def cleanup(self):
self.storage_callbacks.cleanup()
self.io.cleanup() | PypiClean |
/Argonaut-0.3.4.tar.gz/Argonaut-0.3.4/argonaut/public/ckeditor/_source/lang/sr.js | /*
Copyright (c) 2003-2010, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
/**
* @fileOverview Defines the {@link CKEDITOR.lang} object, for the
* Serbian (Cyrillic) language.
*/
/**#@+
@type String
@example
*/
/**
* Constains the dictionary of language entries.
* @namespace
*/
CKEDITOR.lang['sr'] =
{
/**
* The language reading direction. Possible values are "rtl" for
* Right-To-Left languages (like Arabic) and "ltr" for Left-To-Right
* languages (like English).
* @default 'ltr'
*/
dir : 'ltr',
/*
* Screenreader titles. Please note that screenreaders are not always capable
* of reading non-English words. So be careful while translating it.
*/
editorTitle : 'Rich text editor, %1, press ALT 0 for help.', // MISSING
// ARIA descriptions.
toolbar : 'Toolbar', // MISSING
editor : 'Rich Text Editor', // MISSING
// Toolbar buttons without dialogs.
source : 'Kôд',
newPage : 'Нова страница',
save : 'Сачувај',
preview : 'Изглед странице',
cut : 'Исеци',
copy : 'Копирај',
paste : 'Залепи',
print : 'Штампа',
underline : 'Подвучено',
bold : 'Подебљано',
italic : 'Курзив',
selectAll : 'Означи све',
removeFormat : 'Уклони форматирање',
strike : 'Прецртано',
subscript : 'Индекс',
superscript : 'Степен',
horizontalrule : 'Унеси хоризонталну линију',
pagebreak : 'Insert Page Break for Printing', // MISSING
unlink : 'Уклони линк',
undo : 'Поништи акцију',
redo : 'Понови акцију',
// Common messages and labels.
common :
{
browseServer : 'Претражи сервер',
url : 'УРЛ',
protocol : 'Протокол',
upload : 'Пошаљи',
uploadSubmit : 'Пошаљи на сервер',
image : 'Слика',
flash : 'Флеш елемент',
form : 'Форма',
checkbox : 'Поље за потврду',
radio : 'Радио-дугме',
textField : 'Текстуално поље',
textarea : 'Зона текста',
hiddenField : 'Скривено поље',
button : 'Дугме',
select : 'Изборно поље',
imageButton : 'Дугме са сликом',
notSet : '<није постављено>',
id : 'Ид',
name : 'Назив',
langDir : 'Смер језика',
langDirLtr : 'С лева на десно (LTR)',
langDirRtl : 'С десна на лево (RTL)',
langCode : 'Kôд језика',
longDescr : 'Пун опис УРЛ',
cssClass : 'Stylesheet класе',
advisoryTitle : 'Advisory наслов',
cssStyle : 'Стил',
ok : 'OK',
cancel : 'Oткажи',
close : 'Close', // MISSING
preview : 'Preview', // MISSING
generalTab : 'General', // MISSING
advancedTab : 'Напредни тагови',
validateNumberFailed : 'This value is not a number.', // MISSING
confirmNewPage : 'Any unsaved changes to this content will be lost. Are you sure you want to load new page?', // MISSING
confirmCancel : 'Some of the options have been changed. Are you sure to close the dialog?', // MISSING
options : 'Options', // MISSING
target : 'Target', // MISSING
targetNew : 'New Window (_blank)', // MISSING
targetTop : 'Topmost Window (_top)', // MISSING
targetSelf : 'Same Window (_self)', // MISSING
targetParent : 'Parent Window (_parent)', // MISSING
langDirLTR : 'Left to Right (LTR)', // MISSING
langDirRTL : 'Right to Left (RTL)', // MISSING
styles : 'Style', // MISSING
cssClasses : 'Stylesheet Classes', // MISSING
// Put the voice-only part of the label in the span.
unavailable : '%1<span class="cke_accessibility">, unavailable</span>' // MISSING
},
contextmenu :
{
options : 'Context Menu Options' // MISSING
},
// Special char dialog.
specialChar :
{
toolbar : 'Унеси специјални карактер',
title : 'Одаберите специјални карактер',
options : 'Special Character Options' // MISSING
},
// Link dialog.
link :
{
toolbar : 'Унеси/измени линк',
other : '<other>', // MISSING
menu : 'Промени линк',
title : 'Линк',
info : 'Линк инфо',
target : 'Meтa',
upload : 'Пошаљи',
advanced : 'Напредни тагови',
type : 'Врста линка',
toUrl : 'URL', // MISSING
toAnchor : 'Сидро на овој страници',
toEmail : 'Eлектронска пошта',
targetFrame : '<оквир>',
targetPopup : '<искачући прозор>',
targetFrameName : 'Назив одредишног фрејма',
targetPopupName : 'Назив искачућег прозора',
popupFeatures : 'Могућности искачућег прозора',
popupResizable : 'Resizable', // MISSING
popupStatusBar : 'Статусна линија',
popupLocationBar: 'Локација',
popupToolbar : 'Toolbar',
popupMenuBar : 'Контекстни мени',
popupFullScreen : 'Приказ преко целог екрана (ИE)',
popupScrollBars : 'Скрол бар',
popupDependent : 'Зависно (Netscape)',
popupWidth : 'Ширина',
popupLeft : 'Од леве ивице екрана (пиксела)',
popupHeight : 'Висина',
popupTop : 'Од врха екрана (пиксела)',
id : 'Id', // MISSING
langDir : 'Смер језика',
langDirLTR : 'С лева на десно (LTR)',
langDirRTL : 'С десна на лево (RTL)',
acccessKey : 'Приступни тастер',
name : 'Назив',
langCode : 'Смер језика',
tabIndex : 'Таб индекс',
advisoryTitle : 'Advisory наслов',
advisoryContentType : 'Advisory врста садржаја',
cssClasses : 'Stylesheet класе',
charset : 'Linked Resource Charset',
styles : 'Стил',
selectAnchor : 'Одабери сидро',
anchorName : 'По називу сидра',
anchorId : 'Пo Ид-jу елемента',
emailAddress : 'Адреса електронске поште',
emailSubject : 'Наслов',
emailBody : 'Садржај поруке',
noAnchors : '(Нема доступних сидра)',
noUrl : 'Унесите УРЛ линка',
noEmail : 'Откуцајте адресу електронске поште'
},
// Anchor dialog
anchor :
{
toolbar : 'Унеси/измени сидро',
menu : 'Особине сидра',
title : 'Особине сидра',
name : 'Име сидра',
errorName : 'Молимо Вас да унесете име сидра'
},
// List style dialog
list:
{
numberedTitle : 'Numbered List Properties', // MISSING
bulletedTitle : 'Bulleted List Properties', // MISSING
type : 'Type', // MISSING
start : 'Start', // MISSING
validateStartNumber :'List start number must be a whole number.', // MISSING
circle : 'Circle', // MISSING
disc : 'Disc', // MISSING
square : 'Square', // MISSING
none : 'None', // MISSING
notset : '<not set>', // MISSING
armenian : 'Armenian numbering', // MISSING
georgian : 'Georgian numbering (an, ban, gan, etc.)', // MISSING
lowerRoman : 'Lower Roman (i, ii, iii, iv, v, etc.)', // MISSING
upperRoman : 'Upper Roman (I, II, III, IV, V, etc.)', // MISSING
lowerAlpha : 'Lower Alpha (a, b, c, d, e, etc.)', // MISSING
upperAlpha : 'Upper Alpha (A, B, C, D, E, etc.)', // MISSING
lowerGreek : 'Lower Greek (alpha, beta, gamma, etc.)', // MISSING
decimal : 'Decimal (1, 2, 3, etc.)', // MISSING
decimalLeadingZero : 'Decimal leading zero (01, 02, 03, etc.)' // MISSING
},
// Find And Replace Dialog
findAndReplace :
{
title : 'Find and Replace', // MISSING
find : 'Претрага',
replace : 'Замена',
findWhat : 'Пронађи:',
replaceWith : 'Замени са:',
notFoundMsg : 'Тражени текст није пронађен.',
matchCase : 'Разликуј велика и мала слова',
matchWord : 'Упореди целе речи',
matchCyclic : 'Match cyclic', // MISSING
replaceAll : 'Замени све',
replaceSuccessMsg : '%1 occurrence(s) replaced.' // MISSING
},
// Table Dialog
table :
{
toolbar : 'Табела',
title : 'Особине табеле',
menu : 'Особине табеле',
deleteTable : 'Delete Table', // MISSING
rows : 'Редова',
columns : 'Kолона',
border : 'Величина оквира',
align : 'Равнање',
alignLeft : 'Лево',
alignCenter : 'Средина',
alignRight : 'Десно',
width : 'Ширина',
widthPx : 'пиксела',
widthPc : 'процената',
widthUnit : 'width unit', // MISSING
height : 'Висина',
cellSpace : 'Ћелијски простор',
cellPad : 'Размак ћелија',
caption : 'Наслов табеле',
summary : 'Summary', // MISSING
headers : 'Headers', // MISSING
headersNone : 'None', // MISSING
headersColumn : 'First column', // MISSING
headersRow : 'First Row', // MISSING
headersBoth : 'Both', // MISSING
invalidRows : 'Number of rows must be a number greater than 0.', // MISSING
invalidCols : 'Number of columns must be a number greater than 0.', // MISSING
invalidBorder : 'Border size must be a number.', // MISSING
invalidWidth : 'Table width must be a number.', // MISSING
invalidHeight : 'Table height must be a number.', // MISSING
invalidCellSpacing : 'Cell spacing must be a number.', // MISSING
invalidCellPadding : 'Cell padding must be a number.', // MISSING
cell :
{
menu : 'Cell', // MISSING
insertBefore : 'Insert Cell Before', // MISSING
insertAfter : 'Insert Cell After', // MISSING
deleteCell : 'Обриши ћелије',
merge : 'Спој ћелије',
mergeRight : 'Merge Right', // MISSING
mergeDown : 'Merge Down', // MISSING
splitHorizontal : 'Split Cell Horizontally', // MISSING
splitVertical : 'Split Cell Vertically', // MISSING
title : 'Cell Properties', // MISSING
cellType : 'Cell Type', // MISSING
rowSpan : 'Rows Span', // MISSING
colSpan : 'Columns Span', // MISSING
wordWrap : 'Word Wrap', // MISSING
hAlign : 'Horizontal Alignment', // MISSING
vAlign : 'Vertical Alignment', // MISSING
alignTop : 'Top', // MISSING
alignMiddle : 'Middle', // MISSING
alignBottom : 'Bottom', // MISSING
alignBaseline : 'Baseline', // MISSING
bgColor : 'Background Color', // MISSING
borderColor : 'Border Color', // MISSING
data : 'Data', // MISSING
header : 'Header', // MISSING
yes : 'Yes', // MISSING
no : 'No', // MISSING
invalidWidth : 'Cell width must be a number.', // MISSING
invalidHeight : 'Cell height must be a number.', // MISSING
invalidRowSpan : 'Rows span must be a whole number.', // MISSING
invalidColSpan : 'Columns span must be a whole number.', // MISSING
chooseColor : 'Choose' // MISSING
},
row :
{
menu : 'Row', // MISSING
insertBefore : 'Insert Row Before', // MISSING
insertAfter : 'Insert Row After', // MISSING
deleteRow : 'Обриши редове'
},
column :
{
menu : 'Column', // MISSING
insertBefore : 'Insert Column Before', // MISSING
insertAfter : 'Insert Column After', // MISSING
deleteColumn : 'Обриши колоне'
}
},
// Button Dialog.
button :
{
title : 'Особине дугмета',
text : 'Текст (вредност)',
type : 'Tип',
typeBtn : 'Button', // MISSING
typeSbm : 'Submit', // MISSING
typeRst : 'Reset' // MISSING
},
// Checkbox and Radio Button Dialogs.
checkboxAndRadio :
{
checkboxTitle : 'Особине поља за потврду',
radioTitle : 'Особине радио-дугмета',
value : 'Вредност',
selected : 'Означено'
},
// Form Dialog.
form :
{
title : 'Особине форме',
menu : 'Особине форме',
action : 'Aкција',
method : 'Mетода',
encoding : 'Encoding' // MISSING
},
// Select Field Dialog.
select :
{
title : 'Особине изборног поља',
selectInfo : 'Инфо',
opAvail : 'Доступне опције',
value : 'Вредност',
size : 'Величина',
lines : 'линија',
chkMulti : 'Дозволи вишеструку селекцију',
opText : 'Текст',
opValue : 'Вредност',
btnAdd : 'Додај',
btnModify : 'Измени',
btnUp : 'Горе',
btnDown : 'Доле',
btnSetValue : 'Подеси као означену вредност',
btnDelete : 'Обриши'
},
// Textarea Dialog.
textarea :
{
title : 'Особине зоне текста',
cols : 'Број колона',
rows : 'Број редова'
},
// Text Field Dialog.
textfield :
{
title : 'Особине текстуалног поља',
name : 'Назив',
value : 'Вредност',
charWidth : 'Ширина (карактера)',
maxChars : 'Максимално карактера',
type : 'Тип',
typeText : 'Текст',
typePass : 'Лозинка'
},
// Hidden Field Dialog.
hidden :
{
title : 'Особине скривеног поља',
name : 'Назив',
value : 'Вредност'
},
// Image Dialog.
image :
{
title : 'Особине слика',
titleButton : 'Особине дугмета са сликом',
menu : 'Особине слика',
infoTab : 'Инфо слике',
btnUpload : 'Пошаљи на сервер',
upload : 'Пошаљи',
alt : 'Алтернативни текст',
width : 'Ширина',
height : 'Висина',
lockRatio : 'Закључај однос',
unlockRatio : 'Unlock Ratio', // MISSING
resetSize : 'Ресетуј величину',
border : 'Оквир',
hSpace : 'HSpace',
vSpace : 'VSpace',
align : 'Равнање',
alignLeft : 'Лево',
alignRight : 'Десно',
alertUrl : 'Унесите УРЛ слике',
linkTab : 'Линк',
button2Img : 'Do you want to transform the selected image button on a simple image?', // MISSING
img2Button : 'Do you want to transform the selected image on a image button?', // MISSING
urlMissing : 'Image source URL is missing.', // MISSING
validateWidth : 'Width must be a whole number.', // MISSING
validateHeight : 'Height must be a whole number.', // MISSING
validateBorder : 'Border must be a whole number.', // MISSING
validateHSpace : 'HSpace must be a whole number.', // MISSING
validateVSpace : 'VSpace must be a whole number.' // MISSING
},
// Flash Dialog
flash :
{
properties : 'Особине Флеша',
propertiesTab : 'Properties', // MISSING
title : 'Особине флеша',
chkPlay : 'Аутоматски старт',
chkLoop : 'Понављај',
chkMenu : 'Укључи флеш мени',
chkFull : 'Allow Fullscreen', // MISSING
scale : 'Скалирај',
scaleAll : 'Прикажи све',
scaleNoBorder : 'Без ивице',
scaleFit : 'Попуни површину',
access : 'Script Access', // MISSING
accessAlways : 'Always', // MISSING
accessSameDomain: 'Same domain', // MISSING
accessNever : 'Never', // MISSING
align : 'Равнање',
alignLeft : 'Лево',
alignAbsBottom : 'Abs доле',
alignAbsMiddle : 'Abs средина',
alignBaseline : 'Базно',
alignBottom : 'Доле',
alignMiddle : 'Средина',
alignRight : 'Десно',
alignTextTop : 'Врх текста',
alignTop : 'Врх',
quality : 'Quality', // MISSING
qualityBest : 'Best', // MISSING
qualityHigh : 'High', // MISSING
qualityAutoHigh : 'Auto High', // MISSING
qualityMedium : 'Medium', // MISSING
qualityAutoLow : 'Auto Low', // MISSING
qualityLow : 'Low', // MISSING
windowModeWindow: 'Window', // MISSING
windowModeOpaque: 'Opaque', // MISSING
windowModeTransparent : 'Transparent', // MISSING
windowMode : 'Window mode', // MISSING
flashvars : 'Variables for Flash', // MISSING
bgcolor : 'Боја позадине',
width : 'Ширина',
height : 'Висина',
hSpace : 'HSpace',
vSpace : 'VSpace',
validateSrc : 'Унесите УРЛ линка',
validateWidth : 'Width must be a number.', // MISSING
validateHeight : 'Height must be a number.', // MISSING
validateHSpace : 'HSpace must be a number.', // MISSING
validateVSpace : 'VSpace must be a number.' // MISSING
},
// Speller Pages Dialog
spellCheck :
{
toolbar : 'Провери спеловање',
title : 'Spell Check', // MISSING
notAvailable : 'Sorry, but service is unavailable now.', // MISSING
errorLoading : 'Error loading application service host: %s.', // MISSING
notInDic : 'Није у речнику',
changeTo : 'Измени',
btnIgnore : 'Игнориши',
btnIgnoreAll : 'Игнориши све',
btnReplace : 'Замени',
btnReplaceAll : 'Замени све',
btnUndo : 'Врати акцију',
noSuggestions : '- Без сугестија -',
progress : 'Провера спеловања у току...',
noMispell : 'Провера спеловања завршена: грешке нису пронађене',
noChanges : 'Провера спеловања завршена: Није измењена ниједна реч',
oneChange : 'Провера спеловања завршена: Измењена је једна реч',
manyChanges : 'Провера спеловања завршена: %1 реч(и) је измењено',
ieSpellDownload : 'Провера спеловања није инсталирана. Да ли желите да је скинете са Интернета?'
},
smiley :
{
toolbar : 'Смајли',
title : 'Унеси смајлија',
options : 'Smiley Options' // MISSING
},
elementsPath :
{
eleLabel : 'Elements path', // MISSING
eleTitle : '%1 element' // MISSING
},
numberedlist : 'Набројиву листу',
bulletedlist : 'Ненабројива листа',
indent : 'Увећај леву маргину',
outdent : 'Смањи леву маргину',
justify :
{
left : 'Лево равнање',
center : 'Центриран текст',
right : 'Десно равнање',
block : 'Обострано равнање'
},
blockquote : 'Block Quote', // MISSING
clipboard :
{
title : 'Залепи',
cutError : 'Сигурносна подешавања Вашег претраживача не дозвољавају операције аутоматског исецања текста. Молимо Вас да користите пречицу са тастатуре (Ctrl/Cmd+X).',
copyError : 'Сигурносна подешавања Вашег претраживача не дозвољавају операције аутоматског копирања текста. Молимо Вас да користите пречицу са тастатуре (Ctrl/Cmd+C).',
pasteMsg : 'Молимо Вас да залепите унутар доње површине користећи тастатурну пречицу (<STRONG>Ctrl/Cmd+V</STRONG>) и да притиснете <STRONG>OK</STRONG>.',
securityMsg : 'Because of your browser security settings, the editor is not able to access your clipboard data directly. You are required to paste it again in this window.', // MISSING
pasteArea : 'Paste Area' // MISSING
},
pastefromword :
{
confirmCleanup : 'The text you want to paste seems to be copied from Word. Do you want to clean it before pasting?', // MISSING
toolbar : 'Залепи из Worda',
title : 'Залепи из Worda',
error : 'It was not possible to clean up the pasted data due to an internal error' // MISSING
},
pasteText :
{
button : 'Залепи као чист текст',
title : 'Залепи као чист текст'
},
templates :
{
button : 'Обрасци',
title : 'Обрасци за садржај',
options : 'Template Options', // MISSING
insertOption : 'Replace actual contents', // MISSING
selectPromptMsg : 'Молимо Вас да одаберете образац који ће бити примењен на страницу (тренутни садржај ће бити обрисан):',
emptyListMsg : '(Нема дефинисаних образаца)'
},
showBlocks : 'Show Blocks', // MISSING
stylesCombo :
{
label : 'Стил',
panelTitle : 'Formatting Styles', // MISSING
panelTitle1 : 'Block Styles', // MISSING
panelTitle2 : 'Inline Styles', // MISSING
panelTitle3 : 'Object Styles' // MISSING
},
format :
{
label : 'Формат',
panelTitle : 'Формат',
tag_p : 'Normal',
tag_pre : 'Formatirano',
tag_address : 'Adresa',
tag_h1 : 'Heading 1',
tag_h2 : 'Heading 2',
tag_h3 : 'Heading 3',
tag_h4 : 'Heading 4',
tag_h5 : 'Heading 5',
tag_h6 : 'Heading 6',
tag_div : 'Normal (DIV)' // MISSING
},
div :
{
title : 'Create Div Container', // MISSING
toolbar : 'Create Div Container', // MISSING
cssClassInputLabel : 'Stylesheet Classes', // MISSING
styleSelectLabel : 'Style', // MISSING
IdInputLabel : 'Id', // MISSING
languageCodeInputLabel : ' Language Code', // MISSING
inlineStyleInputLabel : 'Inline Style', // MISSING
advisoryTitleInputLabel : 'Advisory Title', // MISSING
langDirLabel : 'Language Direction', // MISSING
langDirLTRLabel : 'Left to Right (LTR)', // MISSING
langDirRTLLabel : 'Right to Left (RTL)', // MISSING
edit : 'Edit Div', // MISSING
remove : 'Remove Div' // MISSING
},
font :
{
label : 'Фонт',
voiceLabel : 'Font', // MISSING
panelTitle : 'Фонт'
},
fontSize :
{
label : 'Величина фонта',
voiceLabel : 'Font Size', // MISSING
panelTitle : 'Величина фонта'
},
colorButton :
{
textColorTitle : 'Боја текста',
bgColorTitle : 'Боја позадине',
panelTitle : 'Colors', // MISSING
auto : 'Аутоматски',
more : 'Више боја...'
},
colors :
{
'000' : 'Black', // MISSING
'800000' : 'Maroon', // MISSING
'8B4513' : 'Saddle Brown', // MISSING
'2F4F4F' : 'Dark Slate Gray', // MISSING
'008080' : 'Teal', // MISSING
'000080' : 'Navy', // MISSING
'4B0082' : 'Indigo', // MISSING
'696969' : 'Dark Gray', // MISSING
'B22222' : 'Fire Brick', // MISSING
'A52A2A' : 'Brown', // MISSING
'DAA520' : 'Golden Rod', // MISSING
'006400' : 'Dark Green', // MISSING
'40E0D0' : 'Turquoise', // MISSING
'0000CD' : 'Medium Blue', // MISSING
'800080' : 'Purple', // MISSING
'808080' : 'Gray', // MISSING
'F00' : 'Red', // MISSING
'FF8C00' : 'Dark Orange', // MISSING
'FFD700' : 'Gold', // MISSING
'008000' : 'Green', // MISSING
'0FF' : 'Cyan', // MISSING
'00F' : 'Blue', // MISSING
'EE82EE' : 'Violet', // MISSING
'A9A9A9' : 'Dim Gray', // MISSING
'FFA07A' : 'Light Salmon', // MISSING
'FFA500' : 'Orange', // MISSING
'FFFF00' : 'Yellow', // MISSING
'00FF00' : 'Lime', // MISSING
'AFEEEE' : 'Pale Turquoise', // MISSING
'ADD8E6' : 'Light Blue', // MISSING
'DDA0DD' : 'Plum', // MISSING
'D3D3D3' : 'Light Grey', // MISSING
'FFF0F5' : 'Lavender Blush', // MISSING
'FAEBD7' : 'Antique White', // MISSING
'FFFFE0' : 'Light Yellow', // MISSING
'F0FFF0' : 'Honeydew', // MISSING
'F0FFFF' : 'Azure', // MISSING
'F0F8FF' : 'Alice Blue', // MISSING
'E6E6FA' : 'Lavender', // MISSING
'FFF' : 'White' // MISSING
},
scayt :
{
title : 'Spell Check As You Type', // MISSING
opera_title : 'Not supported by Opera', // MISSING
enable : 'Enable SCAYT', // MISSING
disable : 'Disable SCAYT', // MISSING
about : 'About SCAYT', // MISSING
toggle : 'Toggle SCAYT', // MISSING
options : 'Options', // MISSING
langs : 'Languages', // MISSING
moreSuggestions : 'More suggestions', // MISSING
ignore : 'Ignore', // MISSING
ignoreAll : 'Ignore All', // MISSING
addWord : 'Add Word', // MISSING
emptyDic : 'Dictionary name should not be empty.', // MISSING
optionsTab : 'Options', // MISSING
allCaps : 'Ignore All-Caps Words', // MISSING
ignoreDomainNames : 'Ignore Domain Names', // MISSING
mixedCase : 'Ignore Words with Mixed Case', // MISSING
mixedWithDigits : 'Ignore Words with Numbers', // MISSING
languagesTab : 'Languages', // MISSING
dictionariesTab : 'Dictionaries', // MISSING
dic_field_name : 'Dictionary name', // MISSING
dic_create : 'Create', // MISSING
dic_restore : 'Restore', // MISSING
dic_delete : 'Delete', // MISSING
dic_rename : 'Rename', // MISSING
dic_info : 'Initially the User Dictionary is stored in a Cookie. However, Cookies are limited in size. When the User Dictionary grows to a point where it cannot be stored in a Cookie, then the dictionary may be stored on our server. To store your personal dictionary on our server you should specify a name for your dictionary. If you already have a stored dictionary, please type it\'s name and click the Restore button.', // MISSING
aboutTab : 'About' // MISSING
},
about :
{
title : 'About CKEditor', // MISSING
dlgTitle : 'About CKEditor', // MISSING
moreInfo : 'For licensing information please visit our web site:', // MISSING
copy : 'Copyright © $1. All rights reserved.' // MISSING
},
maximize : 'Maximize', // MISSING
minimize : 'Minimize', // MISSING
fakeobjects :
{
anchor : 'Anchor', // MISSING
flash : 'Flash Animation', // MISSING
div : 'Page Break', // MISSING
unknown : 'Unknown Object' // MISSING
},
resize : 'Drag to resize', // MISSING
colordialog :
{
title : 'Select color', // MISSING
options : 'Color Options', // MISSING
highlight : 'Highlight', // MISSING
selected : 'Selected Color', // MISSING
clear : 'Clear' // MISSING
},
toolbarCollapse : 'Collapse Toolbar', // MISSING
toolbarExpand : 'Expand Toolbar', // MISSING
bidi :
{
ltr : 'Text direction from left to right', // MISSING
rtl : 'Text direction from right to left' // MISSING
}
}; | PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/bower_components/bootstrap/site/content/docs/5.0/customize/optimize.md | ---
layout: docs
title: Optimize
description: Keep your projects lean, responsive, and maintainable so you can deliver the best experience and focus on more important jobs.
group: customize
toc: true
---
## Lean Sass imports
When using Sass in your asset pipeline, make sure you optimize Bootstrap by only `@import`ing the components you need. Your largest optimizations will likely come from the `Layout & Components` section of our `bootstrap.scss`.
{{< scss-docs name="import-stack" file="scss/bootstrap.scss" >}}
If you're not using a component, comment it out or delete it entirely. For example, if you're not using the carousel, remove that import to save some file size in your compiled CSS. Keep in mind there are some dependencies across Sass imports that may make it more difficult to omit a file.
## Lean JavaScript
Bootstrap's JavaScript includes every component in our primary dist files (`bootstrap.js` and `bootstrap.min.js`), and even our primary dependency (Popper) with our bundle files (`bootstrap.bundle.js` and `bootstrap.bundle.min.js`). While you're customizing via Sass, be sure to remove related JavaScript.
For instance, assuming you're using your own JavaScript bundler like Webpack or Rollup, you'd only import the JavaScript you plan on using. In the example below, we show how to just include our modal JavaScript:
```js
// Import just what we need
// import 'bootstrap/js/dist/alert';
// import 'bootstrap/js/dist/button';
// import 'bootstrap/js/dist/carousel';
// import 'bootstrap/js/dist/collapse';
// import 'bootstrap/js/dist/dropdown';
import 'bootstrap/js/dist/modal';
// import 'bootstrap/js/dist/popover';
// import 'bootstrap/js/dist/scrollspy';
// import 'bootstrap/js/dist/tab';
// import 'bootstrap/js/dist/toast';
// import 'bootstrap/js/dist/tooltip';
```
This way, you're not including any JavaScript you don't intend to use for components like buttons, carousels, and tooltips. If you're importing dropdowns, tooltips or popovers, be sure to list the Popper dependency in your `package.json` file.
{{< callout info >}}
### Default Exports
Files in `bootstrap/js/dist` use the **default export**, so if you want to use one of them you have to do the following:
```js
import Modal from 'bootstrap/js/dist/modal'
const modal = new Modal(document.getElementById('myModal'))
```
{{< /callout >}}
## Autoprefixer .browserslistrc
Bootstrap depends on Autoprefixer to automatically add browser prefixes to certain CSS properties. Prefixes are dictated by our `.browserslistrc` file, found in the root of the Bootstrap repo. Customizing this list of browsers and recompiling the Sass will automatically remove some CSS from your compiled CSS, if there are vendor prefixes unique to that browser or version.
## Unused CSS
_Help wanted with this section, please consider opening a PR. Thanks!_
While we don't have a prebuilt example for using [PurgeCSS](https://github.com/FullHuman/purgecss) with Bootstrap, there are some helpful articles and walkthroughs that the community has written. Here are some options:
- <https://medium.com/dwarves-foundation/remove-unused-css-styles-from-bootstrap-using-purgecss-88395a2c5772>
- <https://lukelowrey.com/automatically-removeunused-css-from-bootstrap-or-other-frameworks/>
Lastly, this [CSS Tricks article on unused CSS](https://css-tricks.com/how-do-you-remove-unused-css-from-a-site/) shows how to use PurgeCSS and other similar tools.
## Minify and gzip
Whenever possible, be sure to compress all the code you serve to your visitors. If you're using Bootstrap dist files, try to stick to the minified versions (indicated by the `.min.css` and `.min.js` extensions). If you're building Bootstrap from the source with your own build system, be sure to implement your own minifiers for HTML, CSS, and JS.
## Nonblocking files
While minifying and using compression might seem like enough, making your files nonblocking ones is also a big step in making your site well-optimized and fast enough.
If you are using a [Lighthouse](https://developers.google.com/web/tools/lighthouse/) plugin in Google Chrome, you may have stumbled over FCP. [The First Contentful Paint](https://web.dev/fcp/) metric measures the time from when the page starts loading to when any part of the page's content is rendered on the screen.
You can improve FCP by deferring non-critical JavaScript or CSS. What does that mean? Simply, JavaScript or stylesheets that don't need to be present on the first paint of your page should be marked with `async` or `defer` attributes.
This ensures that the less important resources are loaded later and not blocking the first paint. On the other hand, critical resources can be included as inline scripts or styles.
If you want to learn more about this, there are already a lot of great articles about it:
- <https://web.dev/render-blocking-resources/>
- <https://web.dev/defer-non-critical-css/>
## Always use HTTPS
Your website should only be available over HTTPS connections in production. HTTPS improves the security, privacy, and availability of all sites, and [there is no such thing as non-sensitive web traffic](https://https.cio.gov/everything/). The steps to configure your website to be served exclusively over HTTPS vary widely depending on your architecture and web hosting provider, and thus are beyond the scope of these docs.
Sites served over HTTPS should also access all stylesheets, scripts, and other assets over HTTPS connections. Otherwise, you'll be sending users [mixed active content](https://developer.mozilla.org/en-US/docs/Web/Security/Mixed_content), leading to potential vulnerabilities where a site can be compromised by altering a dependency. This can lead to security issues and in-browser warnings displayed to users. Whether you're getting Bootstrap from a CDN or serving it yourself, ensure that you only access it over HTTPS connections.
| PypiClean |
/AnyStrEnum-0.2.0-py3-none-any.whl/anystrenum/anystrenum.py | import abc
from enum import Enum, EnumMeta, _EnumDict, auto
from typing import List, Callable, AnyStr, Set, TypeVar, Type, Any
SEP_ATTR = "__sep__"
CONVERTER_ATTR = "__converter__"
ITEM_TYPE_ATTR = '__item_type__'
class BaseStrEnumItem(metaclass=abc.ABCMeta):
sep: AnyStr
converter: Callable[[AnyStr], AnyStr]
@abc.abstractmethod
def __init__(self, sep: AnyStr = None, converter: Callable[[AnyStr], AnyStr] = None):
self.sep = sep
self.converter = converter
@abc.abstractmethod
def generate_value(self, name: str) -> AnyStr:
pass
class BaseAnyStrEnum(Enum):
__sep__: AnyStr = None
__converter__: Callable[[str], AnyStr] = None
__item_type__: Type[BaseStrEnumItem] = None
@classmethod
def filter(cls,
contains: AnyStr = None, *,
contained_in: AnyStr = None,
startswith: AnyStr = None,
endswith: AnyStr = None,
case_sensitive: bool = False,
intersection: bool = True,
inverse: bool = False) -> Set['StrEnum']:
"""
:param contains: filter all enum members which are contain some substring
:param startswith: filter all enum members which are start with some substring
:param endswith: filter all enum members which are end with some substring
:param contained_in: filter all enum members which are substrings of some string
:param case_sensitive: defines whether found values must match case of given string
:param inverse: if True, all enum members except found will be returned
:param intersection: indicates whether function should return all found objects or their interception
:return: all found enums
"""
def prepare(value):
if case_sensitive:
return value
return value.lower()
found_sets: List[set] = []
if contains:
contains = prepare(contains)
found_sets.append({e for e in cls if contains in prepare(e)})
if startswith:
startswith = prepare(startswith)
found_sets.append({e for e in cls if prepare(e).startswith(startswith)})
if endswith:
endswith = prepare(endswith)
found_sets.append({e for e in cls if prepare(e).endswith(endswith)})
if contained_in:
contained_in = prepare(contained_in)
found_sets.append({e for e in cls if prepare(e) in contained_in})
if not found_sets:
return set()
if intersection:
found = found_sets[0].intersection(*found_sets[1:])
else:
found = found_sets[0].union(*found_sets[1:])
if inverse:
return {e for e in cls} - found
return found
def _generate_next_value_(*_):
return auto()
class AnyStrEnumMeta(EnumMeta):
# It's here to avoid 'got an unexpected keyword argument' TypeError
@classmethod
def __prepare__(mcs, *args, sep: AnyStr = None, converter: Callable[[AnyStr], AnyStr] = None, **kwargs):
return super().__prepare__(*args, **kwargs)
def __new__(mcs, cls, bases, class_dict, sep: AnyStr = None, converter: Callable[[AnyStr], AnyStr] = None):
mixin_type, base_enum = mcs._get_mixins_(bases)
if not issubclass(base_enum, BaseAnyStrEnum):
raise TypeError(f'Unexpected Enum type \'{base_enum.__name__}\'. '
f'Only {BaseAnyStrEnum.__name__} and its subclasses are allowed')
elif not issubclass(mixin_type, (str, bytes)):
raise TypeError(f'Unexpected mixin type \'{mixin_type.__name__}\'. '
f'Only str, bytes and their subclasses are allowed')
# Resolving Item class for mixin_type
item_type: Type[BaseStrEnumItem] = class_dict.get(ITEM_TYPE_ATTR, base_enum.__item_type__)
if item_type is None:
raise NotImplementedError(f'{cls} must implement {ITEM_TYPE_ATTR}')
elif not issubclass(item_type, BaseStrEnumItem):
raise TypeError(f'{item_type.__name__} must be type of {BaseStrEnumItem.__name__}')
# Trying to get sep and converter from class dict and base enum class
if sep is None:
sep = class_dict.get(SEP_ATTR) or base_enum.__sep__
if converter is None:
converter = class_dict.get(CONVERTER_ATTR) or base_enum.__converter__
item: BaseStrEnumItem = item_type(sep=sep, converter=converter)
new_class_dict = _EnumDict()
for name, type_hint in class_dict.get('__annotations__', {}).items():
if name.startswith('_') or name in class_dict:
continue
mcs.check_type_equals(type_hint, mixin_type)
value = item.generate_value(name)
new_class_dict[name] = value
mcs.check_type_equals(type(value), mixin_type)
for name, value in class_dict.items():
if isinstance(value, BaseStrEnumItem):
value = value.generate_value(name)
elif isinstance(value, auto):
value = item.generate_value(name)
if not name.startswith('_'):
mcs.check_type_equals(type(value), mixin_type)
new_class_dict[name] = value
new_class_dict[SEP_ATTR] = sep
new_class_dict[CONVERTER_ATTR] = converter
new_class_dict[ITEM_TYPE_ATTR] = item_type
return super().__new__(mcs, cls, bases, new_class_dict)
@staticmethod
def check_type_equals(type_to_check: Any, allowed_type: Type[Any]):
if isinstance(type_to_check, TypeVar):
if len(type_to_check.__constraints__) > 1:
raise TypeError(f'Only {allowed_type.__name__} is allowed, '
f'not {type_to_check} {type_to_check.__constraints__}')
elif type_to_check.__constraints__[0] is not allowed_type:
raise TypeError(f'Unexpected type {type_to_check.__constraints__[0].__name__}, '
f'allowed type: {allowed_type.__name__}')
elif type_to_check is not allowed_type:
raise TypeError(f'Unexpected type {getattr(type_to_check, "__name__", type_to_check)}'
f', allowed type: {allowed_type.__name__}')
class StrItem(BaseStrEnumItem):
# https://youtrack.jetbrains.com/issue/PY-24426
# noinspection PyMissingConstructor
def __init__(self, sep: AnyStr = None, converter: Callable[[str], str] = None):
self.sep = sep
self.converter = converter
def generate_value(self, name: str) -> str:
if self.converter:
name = self.converter(name)
if self.sep:
name = name.replace('_', self.sep)
return name
class BytesItem(BaseStrEnumItem):
# https://youtrack.jetbrains.com/issue/PY-24426
# noinspection PyMissingConstructor
def __init__(self, sep: AnyStr = None, converter: Callable[[bytes], bytes] = None):
self.sep = sep
self.converter = converter
def generate_value(self, name: str) -> bytes:
name = bytes(name, 'utf8')
if self.converter:
name = self.converter(name)
if self.sep:
name = name.replace(b'_', self.sep)
return name
auto_str = StrItem
auto_bytes = BytesItem
class StrEnum(str, BaseAnyStrEnum, metaclass=AnyStrEnumMeta):
__sep__: str = None
__converter__: Callable[[str], str] = None
__item_type__ = StrItem
def __str__(self):
return self.value
class BytesEnum(bytes, BaseAnyStrEnum, metaclass=AnyStrEnumMeta):
__sep__: bytes = None
__converter__: Callable[[bytes], bytes] = None
__item_type__: Type[BaseStrEnumItem] = BytesItem
def __str__(self):
return str(self.value) | PypiClean |
/AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/config.py |
import os
import sys
fldrs = {}
logs = {}
params = {}
"""
# path for personal data location (TODO - you need to modify this line below!)
if sys.platform == 'linux':
if os.path.exists('/home/duncan'):
hme = '/home/duncan/'
core_folder = '/home/duncan/dev/src/python/AIKIF'
print('config.py : running locally on duncans PC!')
else:
hme = os.getcwd()
core_folder = os.getcwd()
print('config.py : running on CI build!')
else:
hme = 'T:\\user\\'
core_folder = 'T:\\user\\dev\\src\\python\\AIKIF'
"""
def get_root_folder():
"""
returns the home folder and program root depending on OS
"""
locations = {
'linux':{'hme':'/home/duncan/', 'core_folder':'/home/duncan/dev/src/python/AIKIF'},
'win32':{'hme':'T:\\user\\', 'core_folder':'T:\\user\\dev\\src\\python\\AIKIF'},
'cygwin':{'hme':os.getcwd() + os.sep, 'core_folder':os.getcwd()},
'darwin':{'hme':os.getcwd() + os.sep, 'core_folder':os.getcwd()}
}
hme = locations[sys.platform]['hme']
core_folder = locations[sys.platform]['core_folder']
if not os.path.exists(core_folder):
hme = os.getcwd()
core_folder = os.getcwd()
print('config.py : running on CI build (or you need to modify the paths in config.py)')
return hme, core_folder
hme, core_folder = get_root_folder()
fldrs['localPath'] = hme + 'AIKIF' + os.sep
fldrs['log_folder'] = hme + 'AIKIF' + os.sep + 'log'
fldrs['pers_data'] = hme + 'AIKIF' + os.sep + 'pers_data'
fldrs['pers_credentials'] = hme + 'AIKIF' + os.sep + 'pers_data' + os.sep + 'credentials'
# FOR DEVELOPMENT
core_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + ".." )
fldrs['root_path'] = core_folder
fldrs['public_data_path'] = core_folder + os.sep + 'aikif' + os.sep + 'data'
fldrs['program_path'] = os.path.abspath(core_folder + os.sep + 'aikif')
# user defined parameters
params['AIKIF_version'] = '0.1.9'
params['AIKIF_deploy'] = 'DEV'
# names of logfiles for AIKIF
logs['logFileProcess'] = fldrs['localPath'] + 'log' + os.sep + 'process.log'
logs['logFileSource'] = fldrs['localPath'] + 'log' + os.sep + 'source.log'
logs['logFileCommand'] = fldrs['localPath'] + 'log' + os.sep + 'command.log'
logs['logFileResult'] = fldrs['localPath'] + 'log' + os.sep + 'result.log'
# index files
# fldrs['public_data_path'] + os.sep + 'index' + os.sep + 'ndxWordsToFilesLecture.txt',
# fldrs['localPath'] + 'diary' + os.sep + 'filelister2014.csv',
params['index_files'] = [fldrs['public_data_path'] + os.sep + 'index' + os.sep + 'ndxAll.txt',
fldrs['localPath'] + 'pers_data' + os.sep + 'pers_index_final.txt',
fldrs['localPath'] + 'pers_data' + os.sep + 'ndx_PCusage.txt'
]
def read_credentials(fname):
"""
read a simple text file from a private location to get
username and password
"""
with open(fname, 'r') as f:
username = f.readline().strip('\n')
password = f.readline().strip('\n')
return username, password
def show_config():
"""
module intended to be imported in most AIKIF utils
to manage folder paths, user settings, etc.
Modify the parameters at the top of this file to suit
"""
res = ''
res += '\n---------- Folder Locations ---------\n'
for k,v in fldrs.items():
res += str(k) + ' = ' + str(v) + '\n'
res += '\n---------- Logfiles ---------\n'
for k,v in logs.items():
res += str(k) + ' = ' + str(v) + '\n'
res += '\n---------- Parameters ---------\n'
for k,v in params.items():
res += str(k) + ' = ' + str(v) + '\n'
print("\nusage from other programs - returns " + fldr_root())
return res
def fldr_root():
return fldrs['root_path']
if __name__ == '__main__':
show_config() | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/widget/rotator/PanFade.js.uncompressed.js | define("dojox/widget/rotator/PanFade", ["dijit","dojo","dojox","dojo/require!dojo/fx"], function(dijit,dojo,dojox){
dojo.provide("dojox.widget.rotator.PanFade");
dojo.require("dojo.fx");
(function(d){
// Constants used to identify which edge the pane pans in from.
var DOWN = 0,
RIGHT = 1,
UP = 2,
LEFT = 3;
function _pan(/*int*/type, /*Object*/args){
// summary:
// Handles the preparation of the dom node and creates the dojo.Animation object.
var j = {
node: args.current.node,
duration: args.duration,
easing: args.easing
},
k = {
node: args.next.node,
duration: args.duration,
easing: args.easing
},
r = args.rotatorBox,
m = type % 2,
a = m ? "left" : "top",
s = (m ? r.w : r.h) * (type < 2 ? -1 : 1),
p = {},
q = {};
d.style(k.node, {
display: "",
opacity: 0
});
p[a] = {
start: 0,
end: -s
};
q[a] = {
start: s,
end: 0
};
return d.fx.combine([ /*dojo.Animation*/
d.animateProperty(d.mixin({ properties: p }, j)),
d.fadeOut(j),
d.animateProperty(d.mixin({ properties: q }, k)),
d.fadeIn(k)
]);
}
function _setZindex(/*DomNode*/n, /*int*/z){
// summary:
// Helper function for continuously panning.
d.style(n, "zIndex", z);
}
d.mixin(dojox.widget.rotator, {
panFade: function(/*Object*/args){
// summary:
// Returns a dojo.Animation that either pans left or right to the next pane.
// The actual direction depends on the order of the panes.
//
// If panning forward from index 1 to 3, it will perform a pan left. If panning
// backwards from 5 to 1, then it will perform a pan right.
//
// If the parameter "continuous" is set to true, it will return an animation
// chain of several pan animations of each intermediate pane panning. For
// example, if you pan forward from 1 to 3, it will return an animation panning
// left from 1 to 2 and then 2 to 3.
//
// If an easing is specified, it will be applied to each pan transition. For
// example, if you are panning from pane 1 to pane 5 and you set the easing to
// "dojo.fx.easing.elasticInOut", then it will "wobble" 5 times, once for each
// pan transition.
//
// If the parameter "wrap" is set to true, it will pan to the next pane using
// the shortest distance in the array of panes. For example, if there are 6
// panes, then panning from 5 to 1 will pan forward (left) from pane 5 to 6 and
// 6 to 1. If the distance is the same either going forward or backwards, then
// it will always pan forward (left).
//
// A continuous pan will use the target pane's duration to pan all intermediate
// panes. To use the target's pane duration for each intermediate pane, then
// set the "quick" parameter to "false".
var w = args.wrap,
p = args.rotator.panes,
len = p.length,
z = len,
j = args.current.idx,
k = args.next.idx,
nw = Math.abs(k - j),
ww = Math.abs((len - Math.max(j, k)) + Math.min(j, k)) % len,
_forward = j < k,
_dir = LEFT,
_pans = [],
_nodes = [],
_duration = args.duration;
// default to pan left, but check if we should pan right.
// need to take into account wrapping.
if((!w && !_forward) || (w && (_forward && nw > ww || !_forward && nw < ww))){
_dir = RIGHT;
}
if(args.continuous){
// if continuous pans are quick, then divide the duration by the number of panes
if(args.quick){
_duration = Math.round(_duration / (w ? Math.min(ww, nw) : nw));
}
// set the current pane's z-index
_setZindex(p[j].node, z--);
var f = (_dir == LEFT);
// loop and set z-indexes and get all pan animations
while(1){
// set the current pane
var i = j;
// increment/decrement the next pane's index
if(f){
if(++j >= len){
j = 0;
}
}else{
if(--j < 0){
j = len - 1;
}
}
var x = p[i],
y = p[j];
// set next pane's z-index
_setZindex(y.node, z--);
// build the pan animation
_pans.push(_pan(_dir, d.mixin({
easing: function(m){ return m; } // continuous gets a linear easing by default
}, args, {
current: x,
next: y,
duration: _duration
})));
// if we're done, then break out of the loop
if((f && j == k) || (!f && j == k)){
break;
}
// this must come after the break... we don't want the last pane to get it's
// styles reset.
_nodes.push(y.node);
}
// build the chained animation of all pan animations
var _anim = d.fx.chain(_pans),
// clean up styles when the chained animation finishes
h = d.connect(_anim, "onEnd", function(){
d.disconnect(h);
d.forEach(_nodes, function(q){
d.style(q, {
display: "none",
left: 0,
opacity: 1,
top: 0,
zIndex: 0
});
});
});
return _anim;
}
// we're not continuous, so just return a normal pan animation
return _pan(_dir, args); /*dojo.Animation*/
},
panFadeDown: function(/*Object*/args){
// summary:
// Returns a dojo.Animation that pans in the next rotator pane from the top.
return _pan(DOWN, args); /*dojo.Animation*/
},
panFadeRight: function(/*Object*/args){
// summary:
// Returns a dojo.Animation that pans in the next rotator pane from the right.
return _pan(RIGHT, args); /*dojo.Animation*/
},
panFadeUp: function(/*Object*/args){
// summary:
// Returns a dojo.Animation that pans in the next rotator pane from the bottom.
return _pan(UP, args); /*dojo.Animation*/
},
panFadeLeft: function(/*Object*/args){
// summary:
// Returns a dojo.Animation that pans in the next rotator pane from the left.
return _pan(LEFT, args); /*dojo.Animation*/
}
});
})(dojo);
}); | PypiClean |
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/codeintel2/lib_srcs/node.js/0.8/cluster.js | var cluster = {};
/**
* The setupMaster is used to change the default 'fork' behavior. It takes
* one option object argument.
* @param settings {Object}
*/
cluster.setupMaster = function(settings) {}
/**
* Spawn a new worker process. This can only be called from the master
* process.
* @param env {Object}
*/
cluster.fork = function(env) {}
/**
* When calling this method, all workers will commit a graceful suicide.
* When they are disconnected all internal handlers will be closed,
* allowing the master process to die graceful if no other event is
* waiting.
* @param callback {Function}
*/
cluster.disconnect = function(callback) {}
/**
* A Worker object contains all public information and method about a
* worker.
* @constructor
*/
cluster.Worker = function() {}
cluster.Worker.prototype = new events.EventEmitter();
/**
* This function is equal to the send methods provided by
* child_process.fork(). In the master you should use this function to send
* a message to a specific worker. However in a worker you can also use
* process.send(message), since this is the same function.
* @param message {Object}
* @param sendHandle {Handle}
*/
cluster.Worker.prototype.send = function(message, sendHandle) {}
/**
* This function will kill the worker, and inform the master to not spawn a
* new worker. The boolean suicide lets you distinguish between voluntary
* and accidental exit.
*/
cluster.Worker.prototype.destroy = function() {}
/**
* When calling this function the worker will no longer accept new
* connections, but they will be handled by any other listening worker.
* Existing connection will be allowed to exit as usual. When no more
* connections exist, the IPC channel to the worker will close allowing it
* to die graceful. When the IPC channel is closed the disconnect event
* will emit, this is then followed by the exit event, there is emitted
* when the worker finally die.
*/
cluster.Worker.prototype.disconnect = function() {}
/**
* Each new worker is given its own unique id, this id is stored in the id.
*/
cluster.Worker.prototype.id = 0;
/**
* All workers are created using child_process.fork(), the returned object
* from this function is stored in process.
*/
cluster.Worker.prototype.process = 0;
/**
* This property is a boolean. It is set when a worker dies after calling
* .destroy() or immediately after calling the .disconnect() method. Until
* then it is undefined.
*/
cluster.Worker.prototype.suicide = 0;
/** @__local__ */ cluster.Worker.__events__ = {};
/**
* This event is the same as the one provided by child_process.fork(). In
* the master you should use this event, however in a worker you can also
* use process.on('message') As an example, here is a cluster that keeps
* count of the number of requests in the master process using the message
* system:
*/
cluster.Worker.__events__.message = function() {};
/**
* Same as the cluster.on('online') event, but emits only when the state
* change on the specified worker.
*/
cluster.Worker.__events__.online = function() {};
/**
* Same as the cluster.on('listening') event, but emits only when the state
* change on the specified worker.
*/
cluster.Worker.__events__.listening = function() {};
/**
* Same as the cluster.on('disconnect') event, but emits only when the
* state change on the specified worker.
*/
cluster.Worker.__events__.disconnect = function() {};
/**
* Emitted by the individual worker instance, when the underlying child
* process is terminated. See child_process event: 'exit'.
*/
cluster.Worker.__events__.exit = function() {};
/**
* All settings set by the .setupMaster is stored in this settings object.
*/
cluster.settings = 0;
/**
* True if the process is a master. This is determined by the
* process.env.NODE_UNIQUE_ID. If process.env.NODE_UNIQUE_ID is undefined,
* then isMaster is true.
*/
cluster.isMaster = 0;
/**
* This boolean flag is true if the process is a worker forked from a
* master.
*/
cluster.isWorker = 0;
/**
* All settings set by the .setupMaster is stored in this settings object.
*/
cluster.settings = 0;
/**
* In the cluster all living worker objects are stored in this object by
* there id as the key. This makes it easy to loop through all living
* workers.
*/
cluster.workers = 0;
var events = require('events');
exports = cluster; | PypiClean |
/Metamorf-0.4.4.2.tar.gz/Metamorf-0.4.4.2/metamorf/engines/engine_upload.py | from metamorf.engines.engine import Engine
from metamorf.tools.filecontroller import FileControllerFactory
from metamorf.tools.connection import ConnectionFactory
from metamorf.constants import *
from metamorf.tools.metadata import Metadata
from metamorf.tools.query import Query
import os
class EngineUpload(Engine):
def _initialize_engine(self):
self.engine_name = "Engine Upload"
self.engine_command = "upload"
self.entry_files_to_load = [FILE_ENTRY_AGGREGATORS, FILE_ENTRY_DATASET_MAPPINGS, FILE_ENTRY_DATASET_RELATIONSHIPS,
FILE_ENTRY_ENTITY, FILE_ENTRY_FILTERS, FILE_ENTRY_ORDER, FILE_ENTRY_PATH,
FILE_ENTRY_HAVING, FILE_ENTRY_DV_ENTITY, FILE_ENTRY_DV_PROPERTIES,
FILE_ENTRY_DV_MAPPINGS, FILE_ENTRY_FILES]
if 'select' in self.arguments:
if self.arguments['select'] == "all" or self.arguments['select'] == "*":
self.entry_files_to_load = [FILE_ENTRY_AGGREGATORS, FILE_ENTRY_DATASET_MAPPINGS, FILE_ENTRY_DATASET_RELATIONSHIPS,
FILE_ENTRY_ENTITY, FILE_ENTRY_FILTERS, FILE_ENTRY_ORDER, FILE_ENTRY_PATH,
FILE_ENTRY_HAVING, FILE_ENTRY_DV_ENTITY, FILE_ENTRY_DV_PROPERTIES,
FILE_ENTRY_DV_MAPPINGS, FILE_ENTRY_FILES]
if self.arguments['select'].lower() == FILE_ENTRY_DATASET_MAPPINGS.lower():
self.entry_files_to_load = [FILE_ENTRY_DATASET_MAPPINGS]
if self.arguments['select'].lower() == FILE_ENTRY_DATASET_RELATIONSHIPS.lower():
self.entry_files_to_load = [FILE_ENTRY_DATASET_RELATIONSHIPS]
if self.arguments['select'].lower() == FILE_ENTRY_ENTITY.lower():
self.entry_files_to_load = [FILE_ENTRY_ENTITY]
if self.arguments['select'].lower() == FILE_ENTRY_FILTERS.lower():
self.entry_files_to_load = [FILE_ENTRY_FILTERS]
if self.arguments['select'].lower() == FILE_ENTRY_ORDER.lower():
self.entry_files_to_load = [FILE_ENTRY_ORDER]
if self.arguments['select'].lower() == FILE_ENTRY_PATH.lower():
self.entry_files_to_load = [FILE_ENTRY_PATH]
if self.arguments['select'].lower() == FILE_ENTRY_HAVING.lower():
self.entry_files_to_load = [FILE_ENTRY_HAVING]
if self.arguments['select'].lower() == FILE_ENTRY_DV_ENTITY.lower():
self.tables_to_load = [FILE_ENTRY_DV_ENTITY]
if self.arguments['select'].lower() == FILE_ENTRY_DV_PROPERTIES.lower():
self.tables_to_load = [FILE_ENTRY_DV_PROPERTIES]
if self.arguments['select'].lower() == FILE_ENTRY_DV_MAPPINGS.lower():
self.tables_to_load = [FILE_ENTRY_DV_MAPPINGS]
if self.arguments['select'].lower() == FILE_ENTRY_FILES.lower():
self.tables_to_load = [FILE_ENTRY_FILES]
def run(self):
# Starts the execution loading the Configuration File. If there is an error it finishes the execution.
super().start_execution()
connection_type = self.configuration_file['metadata']['connection_type']
self.connection = ConnectionFactory().get_connection(connection_type)
self.connection.setup_connection(self.configuration_file['metadata'], self.log)
result_delete = self.delete_all_entry_from_owner()
if not result_delete:
self.connection.rollback()
super().finish_execution(result_delete)
else:
result = self.load_files_to_metadata_database()
if not result: self.connection.rollback()
else: self.connection.commit()
super().finish_execution(result)
def delete_all_entry_from_owner(self):
result = True
self.log.log(self.engine_name, "Starting deleting Metadata Entry on database" , LOG_LEVEL_INFO)
connection_type = self.configuration_file['metadata']['connection_type']
for file in self.entry_files_to_load:
self.log.log(self.engine_name, "Deleting: " + file, LOG_LEVEL_INFO)
query = Query()
query.set_database(connection_type)
query.set_type(QUERY_TYPE_DELETE)
query.set_target_table(file)
query.set_where_filters([COLUMN_ENTRY_OWNER + "='" + self.owner + "'"])
res = self.connection.execute(str(query))
result = result and res
self.log.log(self.engine_name, "Finished deleting Metadata Entry on database", LOG_LEVEL_INFO)
return result
def add_owner_at_end_each_row(self, file: list[list]):
result = []
for f in file:
s = f
s.append(self.configuration_file['owner'])
result.append(s)
return result
def load_files_to_metadata_database(self):
self.log.log(self.engine_name, "Starting uploading Metadata Entry on database", LOG_LEVEL_INFO)
result = True
metadata = Metadata(self.log)
connection_type = self.configuration_file['metadata']['connection_type']
# ENTRY_AGGREGATORS
if FILE_ENTRY_AGGREGATORS in self.entry_files_to_load:
file_reader = FileControllerFactory().get_file_reader(FILE_TYPE_CSV)
file_reader.set_file_location(os.path.join(ACTUAL_PATH, ENTRY_FILES_PATH),
FILE_ENTRY_AGGREGATORS + "." + FILE_TYPE_CSV)
file = file_reader.read_file()
all_rows = self.add_owner_at_end_each_row(file)
res = metadata.add_entry_aggregators(all_rows)
result = result and res
query_values = Query()
query_values.set_database(connection_type)
query_values.set_has_header(True)
query_values.set_type(QUERY_TYPE_VALUES)
query_values.set_target_table(TABLE_ENTRY_AGGREGATORS)
query_values.set_insert_columns(COLUMNS_ENTRY_AGGREGATORS)
query_values.set_values(metadata.entry_aggregators)
self.log.log(self.engine_name, "Loading: " + TABLE_ENTRY_AGGREGATORS, LOG_LEVEL_INFO)
res = self.connection.execute(str(query_values))
result = result and res
# ENTRY_DATASET_MAPPINGS
if FILE_ENTRY_DATASET_MAPPINGS in self.entry_files_to_load:
file_reader = FileControllerFactory().get_file_reader(FILE_TYPE_CSV)
file_reader.set_file_location(os.path.join(ACTUAL_PATH, ENTRY_FILES_PATH),
FILE_ENTRY_DATASET_MAPPINGS + "." + FILE_TYPE_CSV)
file = file_reader.read_file()
all_rows = self.add_owner_at_end_each_row(file)
res = metadata.add_entry_dataset_mappings(all_rows)
result = result and res
query_values = Query()
query_values.set_database(connection_type)
query_values.set_has_header(True)
query_values.set_type(QUERY_TYPE_VALUES)
query_values.set_target_table(TABLE_ENTRY_DATASET_MAPPINGS)
query_values.set_insert_columns(COLUMNS_ENTRY_DATASET_MAPPINGS)
query_values.set_values(metadata.entry_dataset_mappings)
self.log.log(self.engine_name, "Loading: " + TABLE_ENTRY_DATASET_MAPPINGS, LOG_LEVEL_INFO)
res = self.connection.execute(str(query_values))
result = result and res
# ENTRY_DATASET_RELATIONSHIPS
if FILE_ENTRY_DATASET_RELATIONSHIPS in self.entry_files_to_load:
file_reader = FileControllerFactory().get_file_reader(FILE_TYPE_CSV)
file_reader.set_file_location(os.path.join(ACTUAL_PATH, ENTRY_FILES_PATH),
FILE_ENTRY_DATASET_RELATIONSHIPS + "." + FILE_TYPE_CSV)
file = file_reader.read_file()
all_rows = self.add_owner_at_end_each_row(file)
res = metadata.add_entry_dataset_relationship(all_rows)
result = result and res
query_values = Query()
query_values.set_database(connection_type)
query_values.set_has_header(True)
query_values.set_type(QUERY_TYPE_VALUES)
query_values.set_target_table(TABLE_ENTRY_DATASET_RELATIONSHIPS)
query_values.set_insert_columns(COLUMNS_ENTRY_DATASET_RELATIONSHIPS)
query_values.set_values(metadata.entry_dataset_relationship)
self.log.log(self.engine_name, "Loading: " + TABLE_ENTRY_DATASET_RELATIONSHIPS, LOG_LEVEL_INFO)
res = self.connection.execute(str(query_values))
result = result and res
# ENTRY_ENTITY
if FILE_ENTRY_ENTITY in self.entry_files_to_load:
file_reader = FileControllerFactory().get_file_reader(FILE_TYPE_CSV)
file_reader.set_file_location(os.path.join(ACTUAL_PATH, ENTRY_FILES_PATH), FILE_ENTRY_ENTITY + "." + FILE_TYPE_CSV)
file = file_reader.read_file()
all_rows = self.add_owner_at_end_each_row(file)
res = metadata.add_entry_entity(all_rows)
result = result and res
query_values = Query()
query_values.set_database(connection_type)
query_values.set_has_header(True)
query_values.set_type(QUERY_TYPE_VALUES)
query_values.set_target_table(TABLE_ENTRY_ENTITY)
query_values.set_insert_columns(COLUMNS_ENTRY_ENTITY)
query_values.set_values(metadata.entry_entity)
self.log.log(self.engine_name, "Loading: " + TABLE_ENTRY_ENTITY, LOG_LEVEL_INFO)
res = self.connection.execute(str(query_values))
result = result and res
# ENTRY_FILTERS
if FILE_ENTRY_FILTERS in self.entry_files_to_load:
file_reader = FileControllerFactory().get_file_reader(FILE_TYPE_CSV)
file_reader.set_file_location(os.path.join(ACTUAL_PATH, ENTRY_FILES_PATH), FILE_ENTRY_FILTERS + "." + FILE_TYPE_CSV)
file = file_reader.read_file()
all_rows = self.add_owner_at_end_each_row(file)
res = metadata.add_entry_filters(all_rows)
result = result and res
query_values = Query()
query_values.set_database(connection_type)
query_values.set_has_header(True)
query_values.set_type(QUERY_TYPE_VALUES)
query_values.set_target_table(TABLE_ENTRY_FILTERS)
query_values.set_insert_columns(COLUMNS_ENTRY_FILTERS)
query_values.set_values(metadata.entry_filters)
self.log.log(self.engine_name, "Loading: " + TABLE_ENTRY_FILTERS, LOG_LEVEL_INFO)
res = self.connection.execute(str(query_values))
result = result and res
# ENTRY_HAVING
if FILE_ENTRY_HAVING in self.entry_files_to_load:
file_reader = FileControllerFactory().get_file_reader(FILE_TYPE_CSV)
file_reader.set_file_location(os.path.join(ACTUAL_PATH, ENTRY_FILES_PATH),
FILE_ENTRY_HAVING + "." + FILE_TYPE_CSV)
file = file_reader.read_file()
all_rows = self.add_owner_at_end_each_row(file)
res = metadata.add_entry_having(all_rows)
result = result and res
query_values = Query()
query_values.set_database(connection_type)
query_values.set_has_header(True)
query_values.set_type(QUERY_TYPE_VALUES)
query_values.set_target_table(TABLE_ENTRY_HAVING)
query_values.set_insert_columns(COLUMNS_ENTRY_HAVING)
query_values.set_values(metadata.entry_having)
self.log.log(self.engine_name, "Loading: " + TABLE_ENTRY_HAVING, LOG_LEVEL_INFO)
res = self.connection.execute(str(query_values))
result = result and res
# ENTRY_ORDER
if FILE_ENTRY_ORDER in self.entry_files_to_load:
file_reader = FileControllerFactory().get_file_reader(FILE_TYPE_CSV)
file_reader.set_file_location(os.path.join(ACTUAL_PATH, ENTRY_FILES_PATH), FILE_ENTRY_ORDER + "." + FILE_TYPE_CSV)
file = file_reader.read_file()
all_rows = self.add_owner_at_end_each_row(file)
res = metadata.add_entry_order(all_rows)
result = result and res
query_values = Query()
query_values.set_database(connection_type)
query_values.set_has_header(True)
query_values.set_type(QUERY_TYPE_VALUES)
query_values.set_target_table(TABLE_ENTRY_ORDER)
query_values.set_insert_columns(COLUMNS_ENTRY_ORDER)
query_values.set_values(metadata.entry_order)
self.log.log(self.engine_name, "Loading: " + TABLE_ENTRY_ORDER, LOG_LEVEL_INFO)
res = self.connection.execute(str(query_values))
result = result and res
# ENTRY_PATH
if FILE_ENTRY_PATH in self.entry_files_to_load:
file_reader = FileControllerFactory().get_file_reader(FILE_TYPE_CSV)
file_reader.set_file_location(os.path.join(ACTUAL_PATH, ENTRY_FILES_PATH), FILE_ENTRY_PATH + "." + FILE_TYPE_CSV)
file = file_reader.read_file()
all_rows = self.add_owner_at_end_each_row(file)
res = metadata.add_entry_path(all_rows)
result = result and res
query_values = Query()
query_values.set_database(connection_type)
query_values.set_has_header(True)
query_values.set_type(QUERY_TYPE_VALUES)
query_values.set_target_table(TABLE_ENTRY_PATH)
query_values.set_insert_columns(COLUMNS_ENTRY_PATH)
query_values.set_values(metadata.entry_path)
self.log.log(self.engine_name, "Loading: " + TABLE_ENTRY_PATH, LOG_LEVEL_INFO)
res = self.connection.execute(str(query_values))
result = result and res
# ENTRY_DV_ENTITY
if FILE_ENTRY_DV_ENTITY in self.entry_files_to_load:
file_reader = FileControllerFactory().get_file_reader(FILE_TYPE_CSV)
file_reader.set_file_location(os.path.join(ACTUAL_PATH, ENTRY_FILES_PATH),
FILE_ENTRY_DV_ENTITY + "." + FILE_TYPE_CSV)
file = file_reader.read_file()
all_rows = self.add_owner_at_end_each_row(file)
res = metadata.add_entry_dv_entity(all_rows)
result = result and res
query_values = Query()
query_values.set_database(connection_type)
query_values.set_has_header(True)
query_values.set_type(QUERY_TYPE_VALUES)
query_values.set_target_table(TABLE_ENTRY_DV_ENTITY)
query_values.set_insert_columns(COLUMNS_ENTRY_DV_ENTITY)
query_values.set_values(metadata.entry_dv_entity)
self.log.log(self.engine_name, "Loading: " + TABLE_ENTRY_DV_ENTITY, LOG_LEVEL_INFO)
res = self.connection.execute(str(query_values))
result = result and res
# ENTRY_DV_MAPPINGS
if FILE_ENTRY_DV_MAPPINGS in self.entry_files_to_load:
file_reader = FileControllerFactory().get_file_reader(FILE_TYPE_CSV)
file_reader.set_file_location(os.path.join(ACTUAL_PATH, ENTRY_FILES_PATH), FILE_ENTRY_DV_MAPPINGS + "." + FILE_TYPE_CSV)
file = file_reader.read_file()
all_rows = self.add_owner_at_end_each_row(file)
res = metadata.add_entry_dv_mappings(all_rows)
result = result and res
query_values = Query()
query_values.set_database(connection_type)
query_values.set_has_header(True)
query_values.set_type(QUERY_TYPE_VALUES)
query_values.set_target_table(TABLE_ENTRY_DV_MAPPINGS)
query_values.set_insert_columns(COLUMNS_ENTRY_DV_MAPPINGS)
query_values.set_values(metadata.entry_dv_mappings)
self.log.log(self.engine_name, "Loading: " + TABLE_ENTRY_DV_MAPPINGS, LOG_LEVEL_INFO)
res = self.connection.execute(str(query_values))
result = result and res
# ENTRY_DV_PROPERTIES
if FILE_ENTRY_DV_PROPERTIES in self.entry_files_to_load:
file_reader = FileControllerFactory().get_file_reader(FILE_TYPE_CSV)
file_reader.set_file_location(os.path.join(ACTUAL_PATH, ENTRY_FILES_PATH),
FILE_ENTRY_DV_PROPERTIES + "." + FILE_TYPE_CSV)
file = file_reader.read_file()
all_rows = self.add_owner_at_end_each_row(file)
res = metadata.add_entry_dv_properties(all_rows)
result = result and res
query_values = Query()
query_values.set_database(connection_type)
query_values.set_has_header(True)
query_values.set_type(QUERY_TYPE_VALUES)
query_values.set_target_table(TABLE_ENTRY_DV_PROPERTIES)
query_values.set_insert_columns(COLUMNS_ENTRY_DV_PROPERTIES)
query_values.set_values(metadata.entry_dv_properties)
self.log.log(self.engine_name, "Loading: " + TABLE_ENTRY_DV_PROPERTIES, LOG_LEVEL_INFO)
res = self.connection.execute(str(query_values))
result = result and res
# ENTRY_FILES
if FILE_ENTRY_FILES in self.entry_files_to_load:
file_reader = FileControllerFactory().get_file_reader(FILE_TYPE_CSV)
file_reader.set_file_location(os.path.join(ACTUAL_PATH, ENTRY_FILES_PATH),
FILE_ENTRY_FILES + "." + FILE_TYPE_CSV)
file = file_reader.read_file()
all_rows = self.add_owner_at_end_each_row(file)
res = metadata.add_entry_files(all_rows)
result = result and res
query_values = Query()
query_values.set_database(connection_type)
query_values.set_has_header(True)
query_values.set_type(QUERY_TYPE_VALUES)
query_values.set_target_table(TABLE_ENTRY_FILES)
query_values.set_insert_columns(COLUMNS_ENTRY_FILES)
query_values.set_values(metadata.entry_files)
self.log.log(self.engine_name, "Loading: " + TABLE_ENTRY_FILES, LOG_LEVEL_INFO)
res = self.connection.execute(str(query_values))
result = result and res
self.log.log(self.engine_name, "Finished uploading Metadata Entry on database", LOG_LEVEL_INFO)
return result | PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/place_response_py3.py |
from msrest.serialization import Model
class PlaceResponse(Model):
"""PlaceResponse.
:param parent:
:type parent: ~energycap.sdk.models.PlaceChild
:param place_type:
:type place_type: ~energycap.sdk.models.PlaceTypeResponse
:param created_date: The date and time the place was created
:type created_date: datetime
:param created_by:
:type created_by: ~energycap.sdk.models.UserChild
:param modified_date: The date and time of the most recent modification to
the place
:type modified_date: datetime
:param modified_by:
:type modified_by: ~energycap.sdk.models.UserChild
:param address:
:type address: ~energycap.sdk.models.AddressChild
:param build_date: The date and time the place was built
:type build_date: datetime
:param primary_use:
:type primary_use: ~energycap.sdk.models.PrimaryUseChild
:param weather_station:
:type weather_station: ~energycap.sdk.models.WeatherStationChild
:param size:
:type size: ~energycap.sdk.models.PlaceSizeChild
:param benchmark1:
:type benchmark1: ~energycap.sdk.models.LatestBenchmarkValue
:param benchmark2:
:type benchmark2: ~energycap.sdk.models.LatestBenchmarkValue
:param benchmark3:
:type benchmark3: ~energycap.sdk.models.LatestBenchmarkValue
:param energy_star_enabled: Tells whether energy star is enabled for the
given place
:type energy_star_enabled: bool
:param energy_star_rating:
:type energy_star_rating: ~energycap.sdk.models.EnergyStarRatingChild
:param places: An array of child places. A child place is one directly
beneath the current place on the buildings and meters tree
:type places: list[~energycap.sdk.models.PlaceChild]
:param meters: An array of child meters. A child meter is one directly
beneath the current place on the buildings and meters tree
:type meters: list[~energycap.sdk.models.MeterChild]
:param contact:
:type contact: ~energycap.sdk.models.ContactChild
:param place_description: A description of the place
:type place_description: str
:param wattics_site:
:type wattics_site: ~energycap.sdk.models.WatticsSite
:param place_id: The place identifier
:type place_id: int
:param place_code: The place code
:type place_code: str
:param place_info: The place info
:type place_info: str
"""
_attribute_map = {
'parent': {'key': 'parent', 'type': 'PlaceChild'},
'place_type': {'key': 'placeType', 'type': 'PlaceTypeResponse'},
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'created_by': {'key': 'createdBy', 'type': 'UserChild'},
'modified_date': {'key': 'modifiedDate', 'type': 'iso-8601'},
'modified_by': {'key': 'modifiedBy', 'type': 'UserChild'},
'address': {'key': 'address', 'type': 'AddressChild'},
'build_date': {'key': 'buildDate', 'type': 'iso-8601'},
'primary_use': {'key': 'primaryUse', 'type': 'PrimaryUseChild'},
'weather_station': {'key': 'weatherStation', 'type': 'WeatherStationChild'},
'size': {'key': 'size', 'type': 'PlaceSizeChild'},
'benchmark1': {'key': 'benchmark1', 'type': 'LatestBenchmarkValue'},
'benchmark2': {'key': 'benchmark2', 'type': 'LatestBenchmarkValue'},
'benchmark3': {'key': 'benchmark3', 'type': 'LatestBenchmarkValue'},
'energy_star_enabled': {'key': 'energyStarEnabled', 'type': 'bool'},
'energy_star_rating': {'key': 'energyStarRating', 'type': 'EnergyStarRatingChild'},
'places': {'key': 'places', 'type': '[PlaceChild]'},
'meters': {'key': 'meters', 'type': '[MeterChild]'},
'contact': {'key': 'contact', 'type': 'ContactChild'},
'place_description': {'key': 'placeDescription', 'type': 'str'},
'wattics_site': {'key': 'watticsSite', 'type': 'WatticsSite'},
'place_id': {'key': 'placeId', 'type': 'int'},
'place_code': {'key': 'placeCode', 'type': 'str'},
'place_info': {'key': 'placeInfo', 'type': 'str'},
}
def __init__(self, *, parent=None, place_type=None, created_date=None, created_by=None, modified_date=None, modified_by=None, address=None, build_date=None, primary_use=None, weather_station=None, size=None, benchmark1=None, benchmark2=None, benchmark3=None, energy_star_enabled: bool=None, energy_star_rating=None, places=None, meters=None, contact=None, place_description: str=None, wattics_site=None, place_id: int=None, place_code: str=None, place_info: str=None, **kwargs) -> None:
super(PlaceResponse, self).__init__(**kwargs)
self.parent = parent
self.place_type = place_type
self.created_date = created_date
self.created_by = created_by
self.modified_date = modified_date
self.modified_by = modified_by
self.address = address
self.build_date = build_date
self.primary_use = primary_use
self.weather_station = weather_station
self.size = size
self.benchmark1 = benchmark1
self.benchmark2 = benchmark2
self.benchmark3 = benchmark3
self.energy_star_enabled = energy_star_enabled
self.energy_star_rating = energy_star_rating
self.places = places
self.meters = meters
self.contact = contact
self.place_description = place_description
self.wattics_site = wattics_site
self.place_id = place_id
self.place_code = place_code
self.place_info = place_info | PypiClean |
/7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/SevenWondersEnv/SevenWonEnv/envs/mainGameEnv/Personality.py | import random
from sys import stdin
from copy import deepcopy
from SevenWonEnv.envs.mainGameEnv.stageClass import Stage
from SevenWonEnv.envs.mainGameEnv.cardClass import Card
class Personality:
def __init__(self):
pass
def make_choice(self, player, age, options):
pass
class DQNAI(Personality): # placeholder
def __init__(self):
super().__init__()
def make_choice(self, player, age, options):
pass
class RuleBasedAI(Personality):
def __init__(self):
super().__init__()
def make_choice(self, player, age, options):
# return random.choice(range(len(options)))
for choicesIndex in range(len(options)):
if isinstance(options[choicesIndex][0], Stage): # If stage is free, buy it. 50% to buy if it's not free.
if options[choicesIndex][1] + options[choicesIndex][2] == 0 or random.randint(0, 1) % 2 == 0:
return choicesIndex
else:
continue
# options[choicesIndex[3]] is play code. If it's -1, it means discarded. 0 is play with paying, 1 is play with effect.
if age < 3:
posChoice = []
nonDiscard = []
for choicesIndex in range(len(options)):
if options[choicesIndex][3] != -1:
nonDiscard.append(choicesIndex)
nonDiscarded = [option for option in options if option[3] != -1]
if not nonDiscarded: # have only choice by discarding
return random.choice(range(len(options)))
for choicesIndex in range(
len(options)
): # Select Card that gives more than 1 resource. If there are multiple cards, select one randomly
if type(options[choicesIndex][0]).__name__ == "Card":
if options[choicesIndex][0].getResource["type"] == "mixed" and options[choicesIndex][3] != -1:
posChoice.append(choicesIndex)
if posChoice:
return random.choice(posChoice)
for choicesIndex in range(
len(options)
): # Select Card that can be selected between resource. If there are multiple cards, select one randomly
if isinstance(options[choicesIndex][0], Card):
if options[choicesIndex][0].getResource["type"] == "choose" and options[choicesIndex][3] != -1:
posChoice.append(choicesIndex)
if posChoice:
return random.choice(posChoice)
zeroRes = {key: value for (key, value) in player.resource.items() if value == 0 and key != "shield"}
for choicesIndex in range(
len(options)
): # Select resource that does not have yet (0 resource) except military. If there are multiple cards, select one randomly
if isinstance(options[choicesIndex][0], Card):
for res in zeroRes.keys():
if options[choicesIndex][0].getResource["type"] == res and options[choicesIndex][3] != -1:
posChoice.append(choicesIndex)
if posChoice:
return random.choice(posChoice)
if not (
player.resource["shield"] > player.left.resource["shield"]
or player.resource["shield"] > player.right.resource["shield"]
):
for choicesIndex in range(
len(options)
): # Select military IF it makes player surpass neighbors in shield. If there are multiple cards, select one randomly
if isinstance(options[choicesIndex][0], Card):
if options[choicesIndex][0].getResource["type"] == "shield" and options[choicesIndex][3] != -1:
shieldPts = options[choicesIndex][0].getResource["amount"]
if (
player.resource["shield"] + shieldPts > player.left.resource["shield"]
or player.resource["shield"] + shieldPts > player.right.resource["shield"]
):
posChoice.append(choicesIndex)
if posChoice:
return random.choice(posChoice)
for choicesIndex in range(len(options)): # Select science card. If there are multiple cards, select one.
if isinstance(options[choicesIndex][0], Card):
if options[choicesIndex][0].color == "green" and options[choicesIndex][3] != -1:
posChoice.append(choicesIndex)
if posChoice:
return random.choice(posChoice)
for choicesIndex in range(len(options)): # Select VP (civil) card. If there are multiple cards, select one.
if isinstance(options[choicesIndex][0], Card):
if options[choicesIndex][0].getResource["type"] == "VP" and options[choicesIndex][3] != -1:
if not posChoice:
posChoice.append(choicesIndex)
elif (
options[posChoice[0]][0].getResource["amount"]
< options[choicesIndex][0].getResource["amount"]
):
posChoice = [choicesIndex]
if posChoice:
return random.choice(posChoice)
# play random non-discarded choice
return random.choice(nonDiscard)
else: # age 3. Simulate all plays, greedy by most points.
basePoints = player.endGameCal()
gainPoints = -1
selected = []
for choicesIndex in range(len(options)):
afterPlayer = deepcopy(player)
afterPlayer.hand = deepcopy(player.hand)
# print("HAND")
# print(len(afterPlayer.hand))
# print(choicesIndex)
# print(options[choicesIndex])
afterPlayer.playChosenCardFake(options[choicesIndex])
addPoints = afterPlayer.endGameCal() - basePoints
if addPoints < 0:
print("WRONG")
if addPoints > gainPoints:
selected = [choicesIndex]
gainPoints = addPoints
elif addPoints == gainPoints:
selected.append(choicesIndex)
if selected:
return random.choice(selected)
else:
return random.choice(range(len(options)))
class Human(Personality):
def __init__(self):
super().__init__()
def make_choice(self, player, age, options):
return int(stdin.readline())
class RandomAI(Personality):
def __init__(self):
super().__init__()
def make_choice(self, player, age, options):
return random.choice(range(len(options))) | PypiClean |
/MFD%20Floods-0.1.14.tar.gz/MFD Floods-0.1.14/bin/gdal2tiles.py |
from __future__ import print_function, division
import math
from multiprocessing import Pool
from functools import partial
import os
import tempfile
import threading
import shutil
import sys
from uuid import uuid4
from xml.etree import ElementTree
from osgeo import gdal
from osgeo import osr
try:
from PIL import Image
import numpy
import osgeo.gdal_array as gdalarray
numpy_available = True
except ImportError:
# 'antialias' resampling is not available
numpy_available = False
__version__ = "$Id$"
resampling_list = ('average', 'near', 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'antialias')
profile_list = ('mercator', 'geodetic', 'raster')
webviewer_list = ('all', 'google', 'openlayers', 'leaflet', 'none')
threadLocal = threading.local()
# =============================================================================
# =============================================================================
# =============================================================================
__doc__globalmaptiles = """
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:3857)
for Google Maps, Yahoo Maps, Bing Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it useful for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
MAXZOOMLEVEL = 32
class GlobalMercator(object):
r"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:3857.
Such tiles are compatible with Google Maps, Bing Maps, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in meters XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:387
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:3857?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:3857
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:3857?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yes?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually
noticeable.
How do I create a raster in EPSG:3857 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:3857'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is designated as EPSG:3857. WKT definition is in the
official EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPSG:3857:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tile_size=256):
"Initialize the TMS Global Mercator pyramid"
self.tile_size = tile_size
self.initialResolution = 2 * math.pi * 6378137 / self.tile_size
# 156543.03392804062 for tile_size 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:3857"
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my):
"Converts XY point from Spherical Mercator EPSG:3857 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:3857"
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:3857 to pyramid pixel coordinates in given zoom level"
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int(math.ceil(px / float(self.tile_size)) - 1)
ty = int(math.ceil(py / float(self.tile_size)) - 1)
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tile_size << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:3857 coordinates"
minx, miny = self.PixelsToMeters(tx * self.tile_size, ty * self.tile_size, zoom)
maxx, maxy = self.PixelsToMeters((tx + 1) * self.tile_size, (ty + 1) * self.tile_size, zoom)
return (minx, miny, maxx, maxy)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in latitude/longitude using WGS84 datum"
bounds = self.TileBounds(tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
def Resolution(self, zoom):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tile_size * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
return max(0, i - 1) # We don't want to scale up
return MAXZOOMLEVEL - 1
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
class GlobalGeodetic(object):
r"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tmscompatible, tile_size=256):
self.tile_size = tile_size
if tmscompatible is not None:
# Defaults the resolution factor to 0.703125 (2 tiles @ level 0)
# Adhers to OSGeo TMS spec
# http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification#global-geodetic
self.resFact = 180.0 / self.tile_size
else:
# Defaults the resolution factor to 1.40625 (1 tile @ level 0)
# Adheres OpenLayers, MapProxy, etc default resolution for WMTS
self.resFact = 360.0 / self.tile_size
def LonLatToPixels(self, lon, lat, zoom):
"Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = self.resFact / 2**zoom
px = (180 + lon) / res
py = (90 + lat) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int(math.ceil(px / float(self.tile_size)) - 1)
ty = int(math.ceil(py / float(self.tile_size)) - 1)
return tx, ty
def LonLatToTile(self, lon, lat, zoom):
"Returns the tile for zoom which covers given lon/lat coordinates"
px, py = self.LonLatToPixels(lon, lat, zoom)
return self.PixelsToTile(px, py)
def Resolution(self, zoom):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return self.resFact / 2**zoom
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
return max(0, i - 1) # We don't want to scale up
return MAXZOOMLEVEL - 1
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile"
res = self.resFact / 2**zoom
return (
tx * self.tile_size * res - 180,
ty * self.tile_size * res - 90,
(tx + 1) * self.tile_size * res - 180,
(ty + 1) * self.tile_size * res - 90
)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in the SWNE form"
b = self.TileBounds(tx, ty, zoom)
return (b[1], b[0], b[3], b[2])
class Zoomify(object):
"""
Tiles compatible with the Zoomify viewer
----------------------------------------
"""
def __init__(self, width, height, tile_size=256, tileformat='jpg'):
"""Initialization of the Zoomify tile tree"""
self.tile_size = tile_size
self.tileformat = tileformat
imagesize = (width, height)
tiles = (math.ceil(width / tile_size), math.ceil(height / tile_size))
# Size (in tiles) for each tier of pyramid.
self.tierSizeInTiles = []
self.tierSizeInTiles.append(tiles)
# Image size in pixels for each pyramid tierself
self.tierImageSize = []
self.tierImageSize.append(imagesize)
while (imagesize[0] > tile_size or imagesize[1] > tile_size):
imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))
tiles = (math.ceil(imagesize[0] / tile_size), math.ceil(imagesize[1] / tile_size))
self.tierSizeInTiles.append(tiles)
self.tierImageSize.append(imagesize)
self.tierSizeInTiles.reverse()
self.tierImageSize.reverse()
# Depth of the Zoomify pyramid, number of tiers (zoom levels)
self.numberOfTiers = len(self.tierSizeInTiles)
# Number of tiles up to the given tier of pyramid.
self.tileCountUpToTier = []
self.tileCountUpToTier[0] = 0
for i in range(1, self.numberOfTiers + 1):
self.tileCountUpToTier.append(
self.tierSizeInTiles[i - 1][0] * self.tierSizeInTiles[i - 1][1] +
self.tileCountUpToTier[i - 1]
)
def tilefilename(self, x, y, z):
"""Returns filename for tile with given coordinates"""
tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]
return os.path.join("TileGroup%.0f" % math.floor(tileIndex / 256),
"%s-%s-%s.%s" % (z, x, y, self.tileformat))
class GDALError(Exception):
pass
def exit_with_error(message, details=""):
# Message printing and exit code kept from the way it worked using the OptionParser (in case
# someone parses the error output)
sys.stderr.write("Usage: gdal2tiles.py [options] input_file [output]\n\n")
sys.stderr.write("gdal2tiles.py: error: %s\n" % message)
if details:
sys.stderr.write("\n\n%s\n" % details)
sys.exit(2)
def set_cache_max(cache_in_bytes):
# We set the maximum using `SetCacheMax` and `GDAL_CACHEMAX` to support both fork and spawn as multiprocessing start methods.
# https://github.com/OSGeo/gdal/pull/2112
os.environ['GDAL_CACHEMAX'] = '%d' % int(cache_in_bytes / 1024 / 1024)
gdal.SetCacheMax(cache_in_bytes)
def generate_kml(tx, ty, tz, tileext, tile_size, tileswne, options, children=None, **args):
"""
Template for the KML. Returns filled string.
"""
if not children:
children = []
args['tx'], args['ty'], args['tz'] = tx, ty, tz
args['tileformat'] = tileext
if 'tile_size' not in args:
args['tile_size'] = tile_size
if 'minlodpixels' not in args:
args['minlodpixels'] = int(args['tile_size'] / 2)
if 'maxlodpixels' not in args:
args['maxlodpixels'] = int(args['tile_size'] * 8)
if children == []:
args['maxlodpixels'] = -1
if tx is None:
tilekml = False
args['title'] = options.title
else:
tilekml = True
args['title'] = "%d/%d/%d.kml" % (tz, tx, ty)
args['south'], args['west'], args['north'], args['east'] = tileswne(tx, ty, tz)
if tx == 0:
args['drawOrder'] = 2 * tz + 1
elif tx is not None:
args['drawOrder'] = 2 * tz
else:
args['drawOrder'] = 0
url = options.url
if not url:
if tilekml:
url = "../../"
else:
url = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>%(title)s</name>
<description></description>
<Style>
<ListStyle id="hideChildren">
<listItemType>checkHideChildren</listItemType>
</ListStyle>
</Style>""" % args
if tilekml:
s += """
<Region>
<LatLonAltBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%(minlodpixels)d</minLodPixels>
<maxLodPixels>%(maxlodpixels)d</maxLodPixels>
</Lod>
</Region>
<GroundOverlay>
<drawOrder>%(drawOrder)d</drawOrder>
<Icon>
<href>%(ty)d.%(tileformat)s</href>
</Icon>
<LatLonBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonBox>
</GroundOverlay>
""" % args
for cx, cy, cz in children:
csouth, cwest, cnorth, ceast = tileswne(cx, cy, cz)
s += """
<NetworkLink>
<name>%d/%d/%d.%s</name>
<Region>
<LatLonAltBox>
<north>%.14f</north>
<south>%.14f</south>
<east>%.14f</east>
<west>%.14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%d</minLodPixels>
<maxLodPixels>-1</maxLodPixels>
</Lod>
</Region>
<Link>
<href>%s%d/%d/%d.kml</href>
<viewRefreshMode>onRegion</viewRefreshMode>
<viewFormat/>
</Link>
</NetworkLink>
""" % (cz, cx, cy, args['tileformat'], cnorth, csouth, ceast, cwest,
args['minlodpixels'], url, cz, cx, cy)
s += """ </Document>
</kml>
"""
return s
def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=''):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tile_size = dstile.RasterXSize
tilebands = dstile.RasterCount
if options.resampling == 'average':
# Function: gdal.RegenerateOverview()
for i in range(1, tilebands + 1):
# Black border around NODATA
res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i),
'average')
if res != 0:
exit_with_error("RegenerateOverview() failed on %s, error %d" % (
tilefilename, res))
elif options.resampling == 'antialias' and numpy_available:
# Scaling by PIL (Python Imaging Library) - improved Lanczos
array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8)
for i in range(tilebands):
array[:, :, i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i + 1),
0, 0, querysize, querysize)
im = Image.fromarray(array, 'RGBA') # Always four bands
im1 = im.resize((tile_size, tile_size), Image.ANTIALIAS)
if os.path.exists(tilefilename):
im0 = Image.open(tilefilename)
im1 = Image.composite(im1, im0, im1)
im1.save(tilefilename, tiledriver)
else:
if options.resampling == 'near':
gdal_resampling = gdal.GRA_NearestNeighbour
elif options.resampling == 'bilinear':
gdal_resampling = gdal.GRA_Bilinear
elif options.resampling == 'cubic':
gdal_resampling = gdal.GRA_Cubic
elif options.resampling == 'cubicspline':
gdal_resampling = gdal.GRA_CubicSpline
elif options.resampling == 'lanczos':
gdal_resampling = gdal.GRA_Lanczos
# Other algorithms are implemented by gdal.ReprojectImage().
dsquery.SetGeoTransform((0.0, tile_size / float(querysize), 0.0, 0.0, 0.0,
tile_size / float(querysize)))
dstile.SetGeoTransform((0.0, 1.0, 0.0, 0.0, 0.0, 1.0))
res = gdal.ReprojectImage(dsquery, dstile, None, None, gdal_resampling)
if res != 0:
exit_with_error("ReprojectImage() failed on %s, error %d" % (tilefilename, res))
def setup_no_data_values(input_dataset, options):
"""
Extract the NODATA values from the dataset or use the passed arguments as override if any
"""
in_nodata = []
if options.srcnodata:
nds = list(map(float, options.srcnodata.split(',')))
if len(nds) < input_dataset.RasterCount:
in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount]
else:
in_nodata = nds
else:
for i in range(1, input_dataset.RasterCount + 1):
raster_no_data = input_dataset.GetRasterBand(i).GetNoDataValue()
if raster_no_data is not None:
in_nodata.append(raster_no_data)
if options.verbose:
print("NODATA: %s" % in_nodata)
return in_nodata
def setup_input_srs(input_dataset, options):
"""
Determines and returns the Input Spatial Reference System (SRS) as an osr object and as a
WKT representation
Uses in priority the one passed in the command line arguments. If None, tries to extract them
from the input dataset
"""
input_srs = None
input_srs_wkt = None
if options.s_srs:
input_srs = osr.SpatialReference()
input_srs.SetFromUserInput(options.s_srs)
input_srs_wkt = input_srs.ExportToWkt()
else:
input_srs_wkt = input_dataset.GetProjection()
if not input_srs_wkt and input_dataset.GetGCPCount() != 0:
input_srs_wkt = input_dataset.GetGCPProjection()
if input_srs_wkt:
input_srs = osr.SpatialReference()
input_srs.ImportFromWkt(input_srs_wkt)
input_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
return input_srs, input_srs_wkt
def setup_output_srs(input_srs, options):
"""
Setup the desired SRS (based on options)
"""
output_srs = osr.SpatialReference()
output_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
if options.profile == 'mercator':
output_srs.ImportFromEPSG(3857)
elif options.profile == 'geodetic':
output_srs.ImportFromEPSG(4326)
else:
output_srs = input_srs
return output_srs
def has_georeference(dataset):
return (dataset.GetGeoTransform() != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) or
dataset.GetGCPCount() != 0)
def reproject_dataset(from_dataset, from_srs, to_srs, options=None):
"""
Returns the input dataset in the expected "destination" SRS.
If the dataset is already in the correct SRS, returns it unmodified
"""
if not from_srs or not to_srs:
raise GDALError("from and to SRS must be defined to reproject the dataset")
if (from_srs.ExportToProj4() != to_srs.ExportToProj4()) or (from_dataset.GetGCPCount() != 0):
to_dataset = gdal.AutoCreateWarpedVRT(from_dataset,
from_srs.ExportToWkt(), to_srs.ExportToWkt())
if options and options.verbose:
print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')")
to_dataset.GetDriver().CreateCopy("tiles.vrt", to_dataset)
return to_dataset
else:
return from_dataset
def add_gdal_warp_options_to_string(vrt_string, warp_options):
if not warp_options:
return vrt_string
vrt_root = ElementTree.fromstring(vrt_string)
options = vrt_root.find("GDALWarpOptions")
if options is None:
return vrt_string
for key, value in warp_options.items():
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": key})
tb.data(value)
tb.end("Option")
elem = tb.close()
options.insert(0, elem)
return ElementTree.tostring(vrt_root).decode()
def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):
"""
Takes an array of NODATA values and forces them on the WarpedVRT file dataset passed
"""
# TODO: gbataille - Seems that I forgot tests there
assert nodata_values != []
vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0]
vrt_string = add_gdal_warp_options_to_string(
vrt_string, {"INIT_DEST": "NO_DATA", "UNIFIED_SRC_NODATA": "YES"})
# TODO: gbataille - check the need for this replacement. Seems to work without
# # replace BandMapping tag for NODATA bands....
# for i in range(len(nodata_values)):
# s = s.replace(
# '<BandMapping src="%i" dst="%i"/>' % ((i+1), (i+1)),
# """
# <BandMapping src="%i" dst="%i">
# <SrcNoDataReal>%i</SrcNoDataReal>
# <SrcNoDataImag>0</SrcNoDataImag>
# <DstNoDataReal>%i</DstNoDataReal>
# <DstNoDataImag>0</DstNoDataImag>
# </BandMapping>
# """ % ((i+1), (i+1), nodata_values[i], nodata_values[i]))
corrected_dataset = gdal.Open(vrt_string)
# set NODATA_VALUE metadata
corrected_dataset.SetMetadataItem(
'NODATA_VALUES', ' '.join([str(i) for i in nodata_values]))
if options and options.verbose:
print("Modified warping result saved into 'tiles1.vrt'")
with open("tiles1.vrt", "w") as f:
f.write(corrected_dataset.GetMetadata("xml:VRT")[0])
return corrected_dataset
def add_alpha_band_to_string_vrt(vrt_string):
# TODO: gbataille - Old code speak of this being equivalent to gdalwarp -dstalpha
# To be checked
vrt_root = ElementTree.fromstring(vrt_string)
index = 0
nb_bands = 0
for subelem in list(vrt_root):
if subelem.tag == "VRTRasterBand":
nb_bands += 1
color_node = subelem.find("./ColorInterp")
if color_node is not None and color_node.text == "Alpha":
raise Exception("Alpha band already present")
else:
if nb_bands:
# This means that we are one element after the Band definitions
break
index += 1
tb = ElementTree.TreeBuilder()
tb.start("VRTRasterBand",
{'dataType': "Byte", "band": str(nb_bands + 1), "subClass": "VRTWarpedRasterBand"})
tb.start("ColorInterp", {})
tb.data("Alpha")
tb.end("ColorInterp")
tb.end("VRTRasterBand")
elem = tb.close()
vrt_root.insert(index, elem)
warp_options = vrt_root.find(".//GDALWarpOptions")
tb = ElementTree.TreeBuilder()
tb.start("DstAlphaBand", {})
tb.data(str(nb_bands + 1))
tb.end("DstAlphaBand")
elem = tb.close()
warp_options.append(elem)
# TODO: gbataille - this is a GDALWarpOptions. Why put it in a specific place?
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": "INIT_DEST"})
tb.data("0")
tb.end("Option")
elem = tb.close()
warp_options.append(elem)
return ElementTree.tostring(vrt_root).decode()
def update_alpha_value_for_non_alpha_inputs(warped_vrt_dataset, options=None):
"""
Handles dataset with 1 or 3 bands, i.e. without alpha channel, in the case the nodata value has
not been forced by options
"""
if warped_vrt_dataset.RasterCount in [1, 3]:
vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0]
vrt_string = add_alpha_band_to_string_vrt(vrt_string)
warped_vrt_dataset = gdal.Open(vrt_string)
if options and options.verbose:
print("Modified -dstalpha warping result saved into 'tiles1.vrt'")
with open("tiles1.vrt", "w") as f:
f.write(warped_vrt_dataset.GetMetadata("xml:VRT")[0])
return warped_vrt_dataset
def nb_data_bands(dataset):
"""
Return the number of data (non-alpha) bands of a gdal dataset
"""
alphaband = dataset.GetRasterBand(1).GetMaskBand()
if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or
dataset.RasterCount == 4 or
dataset.RasterCount == 2):
return dataset.RasterCount - 1
return dataset.RasterCount
def create_base_tile(tile_job_info, tile_detail):
dataBandsCount = tile_job_info.nb_data_bands
output = tile_job_info.output_file_path
tileext = tile_job_info.tile_extension
tile_size = tile_job_info.tile_size
options = tile_job_info.options
tilebands = dataBandsCount + 1
cached_ds = getattr(threadLocal, 'cached_ds', None)
if cached_ds and cached_ds.GetDescription() == tile_job_info.src_file:
ds = cached_ds
else:
ds = gdal.Open(tile_job_info.src_file, gdal.GA_ReadOnly)
threadLocal.cached_ds = ds
mem_drv = gdal.GetDriverByName('MEM')
out_drv = gdal.GetDriverByName(tile_job_info.tile_driver)
alphaband = ds.GetRasterBand(1).GetMaskBand()
tx = tile_detail.tx
ty = tile_detail.ty
tz = tile_detail.tz
rx = tile_detail.rx
ry = tile_detail.ry
rxsize = tile_detail.rxsize
rysize = tile_detail.rysize
wx = tile_detail.wx
wy = tile_detail.wy
wxsize = tile_detail.wxsize
wysize = tile_detail.wysize
querysize = tile_detail.querysize
# Tile dataset in memory
tilefilename = os.path.join(
output, str(tz), str(tx), "%s.%s" % (ty, tileext))
dstile = mem_drv.Create('', tile_size, tile_size, tilebands)
data = alpha = None
if options.verbose:
print("\tReadRaster Extent: ",
(rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
# Query is in 'nearest neighbour' but can be bigger in then the tile_size
# We scale down the query to the tile_size by supplied algorithm.
if rxsize != 0 and rysize != 0 and wxsize != 0 and wysize != 0:
alpha = alphaband.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize)
# Detect totally transparent tile and skip its creation
if tile_job_info.exclude_transparent and len(alpha) == alpha.count('\x00'.encode('ascii')):
return
data = ds.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize,
band_list=list(range(1, dataBandsCount + 1)))
# The tile in memory is a transparent file by default. Write pixel values into it if
# any
if data:
if tile_size == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
dstile.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dstile.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
# Note: For source drivers based on WaveLet compression (JPEG2000, ECW,
# MrSID) the ReadRaster function returns high-quality raster (not ugly
# nearest neighbour)
# TODO: Use directly 'near' for WaveLet files
else:
# Big ReadRaster query in memory scaled to the tile_size - all but 'near'
# algo
dsquery = mem_drv.Create('', querysize, querysize, tilebands)
# TODO: fill the null value in case a tile without alpha is produced (now
# only png tiles are supported)
dsquery.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dsquery.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
scale_query_to_tile(dsquery, dstile, tile_job_info.tile_driver, options,
tilefilename=tilefilename)
del dsquery
del data
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_drv.CreateCopy(tilefilename, dstile, strict=0)
del dstile
# Create a KML file for this tile.
if tile_job_info.kml:
kmlfilename = os.path.join(output, str(tz), str(tx), '%d.kml' % ty)
if not options.resume or not os.path.exists(kmlfilename):
with open(kmlfilename, 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
get_tile_swne(tile_job_info, options), tile_job_info.options
).encode('utf-8'))
def create_overview_tiles(tile_job_info, output_folder, options):
"""Generation of the overview tiles (higher in the pyramid) based on existing tiles"""
mem_driver = gdal.GetDriverByName('MEM')
tile_driver = tile_job_info.tile_driver
out_driver = gdal.GetDriverByName(tile_driver)
tilebands = tile_job_info.nb_data_bands + 1
# Usage of existing tiles: from 4 underlying tiles generate one as overview.
tcount = 0
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
if tcount == 0:
return
if not options.quiet:
print("Generating Overview Tiles:")
progress_bar = ProgressBar(tcount)
progress_bar.start()
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
tilefilename = os.path.join(output_folder,
str(tz),
str(tx),
"%s.%s" % (ty, tile_job_info.tile_extension))
if options.verbose:
print(ti, '/', tcount, tilefilename)
if options.resume and os.path.exists(tilefilename):
if options.verbose:
print("Tile generation skipped because of --resume")
else:
progress_bar.log_progress()
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,
2 * tile_job_info.tile_size, tilebands)
# TODO: fill the null value
dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,
tilebands)
# TODO: Implement more clever walking on the tiles with cache functionality
# probably walk should start with reading of four tiles from top left corner
# Hilbert curve
children = []
# Read the tiles and write them to query window
for y in range(2 * ty, 2 * ty + 2):
for x in range(2 * tx, 2 * tx + 2):
minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]
if x >= minx and x <= maxx and y >= miny and y <= maxy:
base_tile_path = os.path.join(output_folder, str(tz + 1), str(x),
"%s.%s" % (y, tile_job_info.tile_extension))
if not os.path.isfile(base_tile_path):
continue
dsquerytile = gdal.Open(
base_tile_path,
gdal.GA_ReadOnly)
if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0):
tileposy = 0
else:
tileposy = tile_job_info.tile_size
if tx:
tileposx = x % (2 * tx) * tile_job_info.tile_size
elif tx == 0 and x == 1:
tileposx = tile_job_info.tile_size
else:
tileposx = 0
dsquery.WriteRaster(
tileposx, tileposy, tile_job_info.tile_size,
tile_job_info.tile_size,
dsquerytile.ReadRaster(0, 0,
tile_job_info.tile_size,
tile_job_info.tile_size),
band_list=list(range(1, tilebands + 1)))
children.append([x, y, tz + 1])
if children:
scale_query_to_tile(dsquery, dstile, tile_driver, options,
tilefilename=tilefilename)
# Write a copy of tile to png/jpg
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_driver.CreateCopy(tilefilename, dstile, strict=0)
if options.verbose:
print("\tbuild from zoom", tz + 1,
" tiles:", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),
(2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))
# Create a KML file for this tile.
if tile_job_info.kml:
with open(os.path.join(
output_folder,
'%d/%d/%d.kml' % (tz, tx, ty)
), 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
get_tile_swne(tile_job_info, options), options, children
).encode('utf-8'))
if not options.verbose and not options.quiet:
progress_bar.log_progress()
def optparse_init():
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = "Usage: %prog [options] input_file [output]"
p = OptionParser(usage, version="%prog " + __version__)
p.add_option("-p", "--profile", dest='profile',
type='choice', choices=profile_list,
help=("Tile cutting profile (%s) - default 'mercator' "
"(Google Maps compatible)" % ",".join(profile_list)))
p.add_option("-r", "--resampling", dest="resampling",
type='choice', choices=resampling_list,
help="Resampling method (%s) - default 'average'" % ",".join(resampling_list))
p.add_option('-s', '--s_srs', dest="s_srs", metavar="SRS",
help="The spatial reference system used for the source input data")
p.add_option('-z', '--zoom', dest="zoom",
help="Zoom levels to render (format:'2-5' or '10').")
p.add_option('-e', '--resume', dest="resume", action="store_true",
help="Resume mode. Generate only missing files.")
p.add_option('-a', '--srcnodata', dest="srcnodata", metavar="NODATA",
help="NODATA transparency value to assign to the input data")
p.add_option('-d', '--tmscompatible', dest="tmscompatible", action="store_true",
help=("When using the geodetic profile, specifies the base resolution "
"as 0.703125 or 2 tiles at zoom level 0."))
p.add_option("-v", "--verbose",
action="store_true", dest="verbose",
help="Print status messages to stdout")
p.add_option("-x", "--exclude",
action="store_true", dest="exclude_transparent",
help="Exclude transparent tiles from result tileset")
p.add_option("-q", "--quiet",
action="store_true", dest="quiet",
help="Disable messages and status to stdout")
p.add_option("--processes",
dest="nb_processes",
type='int',
help="Number of processes to use for tiling")
# KML options
g = OptionGroup(p, "KML (Google Earth) options",
"Options for generated Google Earth SuperOverlay metadata")
g.add_option("-k", "--force-kml", dest='kml', action="store_true",
help=("Generate KML for Google Earth - default for 'geodetic' profile and "
"'raster' in EPSG:4326. For a dataset with different projection use "
"with caution!"))
g.add_option("-n", "--no-kml", dest='kml', action="store_false",
help="Avoid automatic generation of KML files for EPSG:4326")
g.add_option("-u", "--url", dest='url',
help="URL address where the generated tiles are going to be published")
p.add_option_group(g)
# HTML options
g = OptionGroup(p, "Web viewer options",
"Options for generated HTML viewers a la Google Maps")
g.add_option("-w", "--webviewer", dest='webviewer', type='choice', choices=webviewer_list,
help="Web viewer to generate (%s) - default 'all'" % ",".join(webviewer_list))
g.add_option("-t", "--title", dest='title',
help="Title of the map")
g.add_option("-c", "--copyright", dest='copyright',
help="Copyright for the map")
g.add_option("-g", "--googlekey", dest='googlekey',
help="Google Maps API key from http://code.google.com/apis/maps/signup.html")
g.add_option("-b", "--bingkey", dest='bingkey',
help="Bing Maps API key from https://www.bingmapsportal.com/")
p.add_option_group(g)
p.set_defaults(verbose=False, profile="mercator", kml=False, url='',
webviewer='all', copyright='', resampling='average', resume=False,
googlekey='INSERT_YOUR_KEY_HERE', bingkey='INSERT_YOUR_KEY_HERE',
processes=1)
return p
def process_args(argv):
parser = optparse_init()
options, args = parser.parse_args(args=argv)
# Args should be either an input file OR an input file and an output folder
if not args:
exit_with_error("You need to specify at least an input file as argument to the script")
if len(args) > 2:
exit_with_error("Processing of several input files is not supported.",
"Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the "
"files: gdal_vrtmerge.py -o merged.vrt %s" % " ".join(args))
input_file = args[0]
if not os.path.isfile(input_file):
exit_with_error("The provided input file %s does not exist or is not a file" % input_file)
if len(args) == 2:
output_folder = args[1]
else:
# Directory with input filename without extension in actual directory
output_folder = os.path.splitext(os.path.basename(input_file))[0]
options = options_post_processing(options, input_file, output_folder)
return input_file, output_folder, options
def options_post_processing(options, input_file, output_folder):
if not options.title:
options.title = os.path.basename(input_file)
if options.url and not options.url.endswith('/'):
options.url += '/'
if options.url:
out_path = output_folder
if out_path.endswith("/"):
out_path = out_path[:-1]
options.url += os.path.basename(out_path) + '/'
# Supported options
if options.resampling == 'antialias' and not numpy_available:
exit_with_error("'antialias' resampling algorithm is not available.",
"Install PIL (Python Imaging Library) and numpy.")
try:
os.path.basename(input_file).encode('ascii')
except UnicodeEncodeError:
full_ascii = False
else:
full_ascii = True
# LC_CTYPE check
if not full_ascii and 'UTF-8' not in os.environ.get("LC_CTYPE", ""):
if not options.quiet:
print("\nWARNING: "
"You are running gdal2tiles.py with a LC_CTYPE environment variable that is "
"not UTF-8 compatible, and your input file contains non-ascii characters. "
"The generated sample googlemaps, openlayers or "
"leaflet files might contain some invalid characters as a result\n")
# Output the results
if options.verbose:
print("Options:", options)
print("Input:", input_file)
print("Output:", output_folder)
print("Cache: %s MB" % (gdal.GetCacheMax() / 1024 / 1024))
print('')
return options
class TileDetail(object):
tx = 0
ty = 0
tz = 0
rx = 0
ry = 0
rxsize = 0
rysize = 0
wx = 0
wy = 0
wxsize = 0
wysize = 0
querysize = 0
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __str__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __repr__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
class TileJobInfo(object):
"""
Plain object to hold tile job configuration for a dataset
"""
src_file = ""
nb_data_bands = 0
output_file_path = ""
tile_extension = ""
tile_size = 0
tile_driver = None
kml = False
tminmax = []
tminz = 0
tmaxz = 0
in_srs_wkt = 0
out_geo_trans = []
ominy = 0
is_epsg_4326 = False
options = None
exclude_transparent = False
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __str__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __repr__(self):
return "TileJobInfo %s\n" % (self.src_file)
class Gdal2TilesError(Exception):
pass
class GDAL2Tiles(object):
def __init__(self, input_file, output_folder, options):
"""Constructor function - initialization"""
self.out_drv = None
self.mem_drv = None
self.warped_input_dataset = None
self.out_srs = None
self.nativezoom = None
self.tminmax = None
self.tsize = None
self.mercator = None
self.geodetic = None
self.alphaband = None
self.dataBandsCount = None
self.out_gt = None
self.tileswne = None
self.swne = None
self.ominx = None
self.omaxx = None
self.omaxy = None
self.ominy = None
self.input_file = None
self.output_folder = None
self.isepsg4326 = None
self.in_srs_wkt = None
# Tile format
self.tile_size = 256
self.tiledriver = 'PNG'
self.tileext = 'png'
self.tmp_dir = tempfile.mkdtemp()
self.tmp_vrt_filename = os.path.join(self.tmp_dir, str(uuid4()) + '.vrt')
# Should we read bigger window of the input raster and scale it down?
# Note: Modified later by open_input()
# Not for 'near' resampling
# Not for Wavelet based drivers (JPEG2000, ECW, MrSID)
# Not for 'raster' profile
self.scaledquery = True
# How big should be query window be for scaling down
# Later on reset according the chosen resampling algorightm
self.querysize = 4 * self.tile_size
# Should we use Read on the input file for generating overview tiles?
# Note: Modified later by open_input()
# Otherwise the overview tiles are generated from existing underlying tiles
self.overviewquery = False
self.input_file = input_file
self.output_folder = output_folder
self.options = options
if self.options.resampling == 'near':
self.querysize = self.tile_size
elif self.options.resampling == 'bilinear':
self.querysize = self.tile_size * 2
# User specified zoom levels
self.tminz = None
self.tmaxz = None
if self.options.zoom:
minmax = self.options.zoom.split('-', 1)
minmax.extend([''])
zoom_min, zoom_max = minmax[:2]
self.tminz = int(zoom_min)
if zoom_max:
self.tmaxz = int(zoom_max)
else:
self.tmaxz = int(zoom_min)
# KML generation
self.kml = self.options.kml
# -------------------------------------------------------------------------
def open_input(self):
"""Initialization of the input raster, reprojection if necessary"""
gdal.AllRegister()
self.out_drv = gdal.GetDriverByName(self.tiledriver)
self.mem_drv = gdal.GetDriverByName('MEM')
if not self.out_drv:
raise Exception("The '%s' driver was not found, is it available in this GDAL build?" %
self.tiledriver)
if not self.mem_drv:
raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?")
# Open the input file
if self.input_file:
input_dataset = gdal.Open(self.input_file, gdal.GA_ReadOnly)
else:
raise Exception("No input file was specified")
if self.options.verbose:
print("Input file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
if not input_dataset:
# Note: GDAL prints the ERROR message too
exit_with_error("It is not possible to open the input file '%s'." % self.input_file)
# Read metadata from the input file
if input_dataset.RasterCount == 0:
exit_with_error("Input file '%s' has no raster band" % self.input_file)
if input_dataset.GetRasterBand(1).GetRasterColorTable():
exit_with_error(
"Please convert this file to RGB/RGBA and run gdal2tiles on the result.",
"From paletted file you can create RGBA file (temp.vrt) by:\n"
"gdal_translate -of vrt -expand rgba %s temp.vrt\n"
"then run:\n"
"gdal2tiles temp.vrt" % self.input_file
)
in_nodata = setup_no_data_values(input_dataset, self.options)
if self.options.verbose:
print("Preprocessed file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options)
self.out_srs = setup_output_srs(in_srs, self.options)
# If input and output reference systems are different, we reproject the input dataset into
# the output reference system for easier manipulation
self.warped_input_dataset = None
if self.options.profile in ('mercator', 'geodetic'):
if not in_srs:
exit_with_error(
"Input file has unknown SRS.",
"Use --s_srs ESPG:xyz (or similar) to provide source reference system.")
if not has_georeference(input_dataset):
exit_with_error(
"There is no georeference - neither affine transformation (worldfile) "
"nor GCPs. You can generate only 'raster' profile tiles.",
"Either gdal2tiles with parameter -p 'raster' or use another GIS "
"software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs"
)
if ((in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or
(input_dataset.GetGCPCount() != 0)):
self.warped_input_dataset = reproject_dataset(
input_dataset, in_srs, self.out_srs)
if in_nodata:
self.warped_input_dataset = update_no_data_values(
self.warped_input_dataset, in_nodata, options=self.options)
else:
self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs(
self.warped_input_dataset, options=self.options)
if self.warped_input_dataset and self.options.verbose:
print("Projected file:", "tiles.vrt", "( %sP x %sL - %s bands)" % (
self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize,
self.warped_input_dataset.RasterCount))
if not self.warped_input_dataset:
self.warped_input_dataset = input_dataset
gdal.GetDriverByName('VRT').CreateCopy(self.tmp_vrt_filename,
self.warped_input_dataset)
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand()
self.dataBandsCount = nb_data_bands(self.warped_input_dataset)
# KML test
self.isepsg4326 = False
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
srs4326.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4():
self.kml = True
self.isepsg4326 = True
if self.options.verbose:
print("KML autotest OK!")
# Read the georeference
self.out_gt = self.warped_input_dataset.GetGeoTransform()
# Test the size of the pixel
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
exit_with_error("Georeference of the raster contains rotation or skew. "
"Such raster is not supported. Please use gdalwarp first.")
# Here we expect: pixel is square, no rotation on the raster
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1]
self.omaxy = self.out_gt[3]
self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15
if self.options.verbose:
print("Bounds (output srs):", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy)
# Calculating ranges for tiles in different zoom levels
if self.options.profile == 'mercator':
self.mercator = GlobalMercator()
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.mercator.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.mercator.MetersToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**tz - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz is None:
self.tminz = self.mercator.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):",
self.mercator.MetersToLatLon(self.ominx, self.ominy),
self.mercator.MetersToLatLon(self.omaxx, self.omaxy))
print('MinZoomLevel:', self.tminz)
print("MaxZoomLevel:",
self.tmaxz,
"(",
self.mercator.Resolution(self.tmaxz),
")")
if self.options.profile == 'geodetic':
self.geodetic = GlobalGeodetic(self.options.tmscompatible)
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.geodetic.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.geodetic.LonLatToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**(tz + 1) - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tminz is None:
self.tminz = self.geodetic.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):", self.ominx, self.ominy, self.omaxx, self.omaxy)
if self.options.profile == 'raster':
def log2(x):
return math.log10(x) / math.log10(2)
self.nativezoom = int(
max(math.ceil(log2(self.warped_input_dataset.RasterXSize / float(self.tile_size))),
math.ceil(log2(self.warped_input_dataset.RasterYSize / float(self.tile_size)))))
if self.options.verbose:
print("Native zoom of the raster:", self.nativezoom)
# Get the minimal zoom level (whole raster in one tile)
if self.tminz is None:
self.tminz = 0
# Get the maximal zoom level (native resolution of the raster)
if self.tmaxz is None:
self.tmaxz = self.nativezoom
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, self.tmaxz + 1))
self.tsize = list(range(0, self.tmaxz + 1))
for tz in range(0, self.tmaxz + 1):
tsize = 2.0**(self.nativezoom - tz) * self.tile_size
tminx, tminy = 0, 0
tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1
tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1
self.tsize[tz] = math.ceil(tsize)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Function which generates SWNE in LatLong for given tile
if self.kml and self.in_srs_wkt:
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2**(self.tmaxz - z) * self.out_gt[1]) # X-pixel size in level
west = self.out_gt[0] + x * self.tile_size * pixelsizex
east = west + self.tile_size * pixelsizex
south = self.ominy + y * self.tile_size * pixelsizex
north = south + self.tile_size * pixelsizex
if not self.isepsg4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
self.tileswne = rastertileswne
else:
self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa
def generate_metadata(self):
"""
Generation of main metadata files and HTML viewers (metadata related to particular
tiles are generated during the tile processing).
"""
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if self.options.profile == 'mercator':
south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy)
north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy)
south, west = max(-85.05112878, south), max(-180.0, west)
north, east = min(85.05112878, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate googlemaps.html
if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator':
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))):
with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f:
f.write(self.generate_googlemaps().encode('utf-8'))
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate leaflet.html
if self.options.webviewer in ('all', 'leaflet'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))):
with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f:
f.write(self.generate_leaflet().encode('utf-8'))
elif self.options.profile == 'geodetic':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
south, west = max(-90.0, south), max(-180.0, west)
north, east = min(90.0, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
elif self.options.profile == 'raster':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate tilemapresource.xml.
if not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml')):
with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f:
f.write(self.generate_tilemapresource().encode('utf-8'))
if self.kml:
# TODO: Maybe problem for not automatically generated tminz
# The root KML should contain links to all tiles in the tminz level
children = []
xmin, ymin, xmax, ymax = self.tminmax[self.tminz]
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
children.append([x, y, self.tminz])
# Generate Root KML
if self.kml:
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'doc.kml'))):
with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f:
f.write(generate_kml(
None, None, None, self.tileext, self.tile_size, self.tileswne,
self.options, children
).encode('utf-8'))
def generate_base_tiles(self):
"""
Generation of the base tiles (the lowest in the pyramid) directly from the input raster
"""
if not self.options.quiet:
print("Generating Base Tiles:")
if self.options.verbose:
print('')
print("Tiles generated from the max zoom level:")
print("----------------------------------------")
print('')
# Set the bounds
tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]
ds = self.warped_input_dataset
tilebands = self.dataBandsCount + 1
querysize = self.querysize
if self.options.verbose:
print("dataBandsCount: ", self.dataBandsCount)
print("tilebands: ", tilebands)
tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
tile_details = []
tz = self.tmaxz
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
tilefilename = os.path.join(
self.output_folder, str(tz), str(tx), "%s.%s" % (ty, self.tileext))
if self.options.verbose:
print(ti, '/', tcount, tilefilename)
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print("Tile generation skipped because of --resume")
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
if self.options.profile == 'mercator':
# Tile bounds in EPSG:3857
b = self.mercator.TileBounds(tx, ty, tz)
elif self.options.profile == 'geodetic':
b = self.geodetic.TileBounds(tx, ty, tz)
# Don't scale up by nearest neighbour, better change the querysize
# to the native resolution (and return smaller query tile) for scaling
if self.options.profile in ('mercator', 'geodetic'):
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])
# Pixel size in the raster covering query geo extent
nativesize = wb[0] + wb[2]
if self.options.verbose:
print("\tNative Extent (querysize", nativesize, "): ", rb, wb)
# Tile bounds in raster coordinates for ReadRaster query
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)
rx, ry, rxsize, rysize = rb
wx, wy, wxsize, wysize = wb
else: # 'raster' profile:
tsize = int(self.tsize[tz]) # tile_size in raster coordinates for actual zoom
xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels
ysize = self.warped_input_dataset.RasterYSize
if tz >= self.nativezoom:
querysize = self.tile_size
rx = (tx) * tsize
rxsize = 0
if tx == tmaxx:
rxsize = xsize % tsize
if rxsize == 0:
rxsize = tsize
rysize = 0
if ty == tmaxy:
rysize = ysize % tsize
if rysize == 0:
rysize = tsize
ry = ysize - (ty * tsize) - rysize
wx, wy = 0, 0
wxsize = int(rxsize / float(tsize) * self.tile_size)
wysize = int(rysize / float(tsize) * self.tile_size)
if wysize != self.tile_size:
wy = self.tile_size - wysize
# Read the source raster if anything is going inside the tile as per the computed
# geo_query
tile_details.append(
TileDetail(
tx=tx, ty=ty, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,
wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,
)
)
conf = TileJobInfo(
src_file=self.tmp_vrt_filename,
nb_data_bands=self.dataBandsCount,
output_file_path=self.output_folder,
tile_extension=self.tileext,
tile_driver=self.tiledriver,
tile_size=self.tile_size,
kml=self.kml,
tminmax=self.tminmax,
tminz=self.tminz,
tmaxz=self.tmaxz,
in_srs_wkt=self.in_srs_wkt,
out_geo_trans=self.out_gt,
ominy=self.ominy,
is_epsg_4326=self.isepsg4326,
options=self.options,
exclude_transparent=self.options.exclude_transparent,
)
return conf, tile_details
def geo_query(self, ds, ulx, uly, lrx, lry, querysize=0):
"""
For given dataset and query in cartographic coordinates returns parameters for ReadRaster()
in raster coordinates and x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds.
raises Gdal2TilesError if the dataset does not contain anything inside this geo_query
"""
geotran = ds.GetGeoTransform()
rx = int((ulx - geotran[0]) / geotran[1] + 0.001)
ry = int((uly - geotran[3]) / geotran[5] + 0.001)
rxsize = int((lrx - ulx) / geotran[1] + 0.5)
rysize = int((lry - uly) / geotran[5] + 0.5)
if not querysize:
wxsize, wysize = rxsize, rysize
else:
wxsize, wysize = querysize, querysize
# Coordinates should not go out of the bounds of the raster
wx = 0
if rx < 0:
rxshift = abs(rx)
wx = int(wxsize * (float(rxshift) / rxsize))
wxsize = wxsize - wx
rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize))
rx = 0
if rx + rxsize > ds.RasterXSize:
wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize))
rxsize = ds.RasterXSize - rx
wy = 0
if ry < 0:
ryshift = abs(ry)
wy = int(wysize * (float(ryshift) / rysize))
wysize = wysize - wy
rysize = rysize - int(rysize * (float(ryshift) / rysize))
ry = 0
if ry + rysize > ds.RasterYSize:
wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize))
rysize = ds.RasterYSize - ry
return (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)
def generate_tilemapresource(self):
"""
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tile_size, tileformat, profile
"""
args = {}
args['title'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['profile'] = self.options.profile
if self.options.profile == 'mercator':
args['srs'] = "EPSG:3857"
elif self.options.profile == 'geodetic':
args['srs'] = "EPSG:4326"
elif self.options.s_srs:
args['srs'] = self.options.s_srs
elif self.out_srs:
args['srs'] = self.out_srs.ExportToWkt()
else:
args['srs'] = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0">
<Title>%(title)s</Title>
<Abstract></Abstract>
<SRS>%(srs)s</SRS>
<BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/>
<Origin x="%(west).14f" y="%(south).14f"/>
<TileFormat width="%(tile_size)d" height="%(tile_size)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/>
<TileSets profile="%(profile)s">
""" % args # noqa
for z in range(self.tminz, self.tmaxz + 1):
if self.options.profile == 'raster':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, (2**(self.nativezoom - z) * self.out_gt[1]), z)
elif self.options.profile == 'mercator':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 156543.0339 / 2**z, z)
elif self.options.profile == 'geodetic':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 0.703125 / 2**z, z)
s += """ </TileSets>
</TileMap>
"""
return s
def generate_googlemaps(self):
"""
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tile_size, tileformat,
publishurl
"""
args = {}
args['title'] = self.options.title
args['googlemapskey'] = self.options.googlekey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
</style>
<script src='http://maps.google.com/maps?file=api&v=2&key=%(googlemapskey)s'></script>
<script>
//<![CDATA[
/*
* Constants for given map
* TODO: read it from tilemapresource.xml
*/
var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var opacity = 0.75;
var map;
var hybridOverlay;
/*
* Create a Custom Opacity GControl
* http://www.maptiler.org/google-maps-overlay-opacity-control/
*/
var CTransparencyLENGTH = 58;
// maximum width that the knob can move (slide width minus knob width)
function CTransparencyControl( overlay ) {
this.overlay = overlay;
this.opacity = overlay.getTileLayer().getOpacity();
}
CTransparencyControl.prototype = new GControl();
// This function positions the slider to match the specified opacity
CTransparencyControl.prototype.setSlider = function(pos) {
var left = Math.round((CTransparencyLENGTH*pos));
this.slide.left = left;
this.knob.style.left = left+"px";
this.knob.style.top = "0px";
}
// This function reads the slider and sets the overlay opacity level
CTransparencyControl.prototype.setOpacity = function() {
// set the global variable
opacity = this.slide.left/CTransparencyLENGTH;
this.map.clearOverlays();
this.map.addOverlay(this.overlay, { zPriority: 0 });
if (this.map.getCurrentMapType() == G_HYBRID_MAP) {
this.map.addOverlay(hybridOverlay);
}
}
// This gets called by the API when addControl(new CTransparencyControl())
CTransparencyControl.prototype.initialize = function(map) {
var that=this;
this.map = map;
// Is this MSIE, if so we need to use AlphaImageLoader
var agent = navigator.userAgent.toLowerCase();
if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false}
// create the background graphic as a <div> containing an image
var container = document.createElement("div");
container.style.width="70px";
container.style.height="21px";
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>';
} else {
container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>';
}
// create the knob as a GDraggableObject
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.overflow="hidden";
this.knob_img = document.createElement("div");
this.knob_img.style.height="21px";
this.knob_img.style.width="83px";
this.knob_img.style.filter=loader;
this.knob_img.style.position="relative";
this.knob_img.style.left="-70px";
this.knob.appendChild(this.knob_img);
} else {
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)";
this.knob.style.backgroundPosition="-70px 0px";
}
container.appendChild(this.knob);
this.slide=new GDraggableObject(this.knob, {container:container});
this.slide.setDraggableCursor('pointer');
this.slide.setDraggingCursor('pointer');
this.container = container;
// attach the control to the map
map.getContainer().appendChild(container);
// init slider
this.setSlider(this.opacity);
// Listen for the slider being moved and set the opacity
GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()});
//GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) });
return container;
}
// Set the default position for the control
CTransparencyControl.prototype.getDefaultPosition = function() {
return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));
}
/*
* Full-screen Window Resize
*/
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
// map.checkResize();
}
/*
* Main load function:
*/
function load() {
if (GBrowserIsCompatible()) {
// Bug in the Google Maps: Copyright for Overlay is not correctly displayed
var gcr = GMapType.prototype.getCopyrights;
GMapType.prototype.getCopyrights = function(bounds,zoom) {
return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom));
}
map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } );
map.addMapType(G_PHYSICAL_MAP);
map.setMapType(G_PHYSICAL_MAP);
map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));
hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );
GEvent.addListener(map, "maptypechanged", function() {
if (map.getCurrentMapType() == G_HYBRID_MAP) {
map.addOverlay(hybridOverlay);
} else {
map.removeOverlay(hybridOverlay);
}
} );
var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);
var mercator = new GMercatorProjection(mapMaxZoom+1);
tilelayer.getTileUrl = function(tile,zoom) {
if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {
return "http://www.maptiler.org/img/none.png";
}
var ymax = 1 << zoom;
var y = ymax - tile.y -1;
var tileBounds = new GLatLngBounds(
mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),
mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )
);
if (mapBounds.intersects(tileBounds)) {
return zoom+"/"+tile.x+"/"+y+".png";
} else {
return "http://www.maptiler.org/img/none.png";
}
}
// IE 7-: support for PNG alpha channel
// Unfortunately, the opacity for whole overlay is then not changeable, either or...
tilelayer.isPng = function() { return true;};
tilelayer.getOpacity = function() { return opacity; }
overlay = new GTileLayerOverlay( tilelayer );
map.addOverlay(overlay);
map.addControl(new GLargeMapControl());
map.addControl(new GHierarchicalMapTypeControl());
map.addControl(new CTransparencyControl( overlay ));
""" % args # noqa
if self.kml:
s += """
map.addMapType(G_SATELLITE_3D_MAP);
map.getEarthInstance(getEarthInstanceCB);
"""
s += """
map.enableContinuousZoom();
map.enableScrollWheelZoom();
map.setMapType(G_HYBRID_MAP);
}
resize();
}
"""
if self.kml:
s += """
function getEarthInstanceCB(object) {
var ge = object;
if (ge) {
var url = document.location.toString();
url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';
var link = ge.createLink("");
if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") }
else { link.setHref(url) };
var networkLink = ge.createNetworkLink("");
networkLink.setName("TMS Map Overlay");
networkLink.setFlyToView(true);
networkLink.setLink(link);
ge.getFeatures().appendChild(networkLink);
} else {
// alert("You should open a KML in Google Earth");
// add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?
}
}
""" % args # noqa
s += """
onresize=function(){ resize(); };
//]]>
</script>
</head>
<body onload="load()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
</body>
</html>
""" % args # noqa
return s
def generate_leaflet(self):
"""
Template for leaflet.html implementing overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, north, south, east, west, minzoom, maxzoom, tile_size, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title.replace('"', '\\"')
args['htmltitle'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['centerlon'] = (args['north'] + args['south']) / 2.
args['centerlat'] = (args['west'] + args['east']) / 2.
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['beginzoom'] = self.tmaxz
args['tile_size'] = self.tile_size # not used
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url # not used
args['copyright'] = self.options.copyright.replace('"', '\\"')
s = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' />
<title>%(htmltitle)s</title>
<!-- Leaflet -->
<link rel="stylesheet" href="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.css" />
<script src="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.js"></script>
<style>
body { margin:0; padding:0; }
body, table, tr, td, th, div, h1, h2, input { font-family: "Calibri", "Trebuchet MS", "Ubuntu", Serif; font-size: 11pt; }
#map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */
.ctl {
padding: 2px 10px 2px 10px;
background: white;
background: rgba(255,255,255,0.9);
box-shadow: 0 0 15px rgba(0,0,0,0.2);
border-radius: 5px;
text-align: right;
}
.title {
font-size: 18pt;
font-weight: bold;
}
.src {
font-size: 10pt;
}
</style>
</head>
<body>
<div id="map"></div>
<script>
/* **** Leaflet **** */
// Base layers
// .. OpenStreetMap
var osm = L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {attribution: '© <a href="http://osm.org/copyright">OpenStreetMap</a> contributors', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. CartoDB Positron
var cartodb = L.tileLayer('http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png', {attribution: '© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="http://cartodb.com/attributions">CartoDB</a>', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. OSM Toner
var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {attribution: 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>. Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, under <a href="http://www.openstreetmap.org/copyright">ODbL</a>.', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. White background
var white = L.tileLayer("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg==", {minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// Overlay layers (TMS)
var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {tms: true, opacity: 0.7, attribution: "%(copyright)s", minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// Map
var map = L.map('map', {
center: [%(centerlon)s, %(centerlat)s],
zoom: %(beginzoom)s,
minZoom: %(minzoom)s,
maxZoom: %(maxzoom)s,
layers: [osm]
});
var basemaps = {"OpenStreetMap": osm, "CartoDB Positron": cartodb, "Stamen Toner": toner, "Without background": white}
var overlaymaps = {"Layer": lyr}
// Title
var title = L.control();
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl title');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = "%(title)s";
};
title.addTo(map);
// Note
var src = 'Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>';
var title = L.control({position: 'bottomleft'});
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl src');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = src;
};
title.addTo(map);
// Add base layers
L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map);
// Fit to overlay bounds (SW and NE points with (lat, lon))
map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]);
</script>
</body>
</html>
""" % args # noqa
return s
def generate_openlayers(self):
"""
Template for openlayers.html implementing overlay of available Spherical Mercator layers.
It returns filled string. Expected variables:
title, bingkey, north, south, east, west, minzoom, maxzoom, tile_size, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title
args['bingkey'] = self.options.bingkey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
if self.options.tmscompatible:
args['tmsoffset'] = "-1"
else:
args['tmsoffset'] = ""
if self.options.profile == 'raster':
args['rasterzoomlevels'] = self.tmaxz + 1
args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1]
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
<head>
<title>%(title)s</title>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
.olImageLoadError { display: none; }
.olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }
</style>""" % args # noqa
if self.options.profile == 'mercator':
s += """
<script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>
""" % args
s += """
<script src="http://www.openlayers.org/api/2.12/OpenLayers.js"></script>
<script>
var map;
var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var emptyTileURL = "http://www.maptiler.org/img/none.png";
OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;
function init(){""" % args
if self.options.profile == 'mercator':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:3857",
displayProjection: new OpenLayers.Projection("EPSG:4326"),
numZoomLevels: 20
};
map = new OpenLayers.Map(options);
// Create Google Mercator layers
var gmap = new OpenLayers.Layer.Google("Google Streets",
{
type: google.maps.MapTypeId.ROADMAP,
sphericalMercator: true
});
var gsat = new OpenLayers.Layer.Google("Google Satellite",
{
type: google.maps.MapTypeId.SATELLITE,
sphericalMercator: true
});
var ghyb = new OpenLayers.Layer.Google("Google Hybrid",
{
type: google.maps.MapTypeId.HYBRID,
sphericalMercator: true
});
var gter = new OpenLayers.Layer.Google("Google Terrain",
{
type: google.maps.MapTypeId.TERRAIN,
sphericalMercator: true
});
// Create Bing layers
var broad = new OpenLayers.Layer.Bing({
name: "Bing Roads",
key: "%(bingkey)s",
type: "Road",
sphericalMercator: true
});
var baer = new OpenLayers.Layer.Bing({
name: "Bing Aerial",
key: "%(bingkey)s",
type: "Aerial",
sphericalMercator: true
});
var bhyb = new OpenLayers.Layer.Bing({
name: "Bing Hybrid",
key: "%(bingkey)s",
type: "AerialWithLabels",
sphericalMercator: true
});
// Create OSM layer
var osm = new OpenLayers.Layer.OSM("OpenStreetMap");
// create TMS Overlay layer
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([gmap, gsat, ghyb, gter,
broad, baer, bhyb,
osm, tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection));
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:4326"
};
map = new OpenLayers.Map(options);
var wms = new OpenLayers.Layer.WMS("VMap0",
"http://tilecache.osgeo.org/wms-c/Basic.py?",
{
layers: 'basic',
format: 'image/png'
}
);
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([wms,tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds);
""" % args # noqa
elif self.options.profile == 'raster':
s += """
var options = {
div: "map",
controls: [],
maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s),
maxResolution: %(rastermaxresolution)f,
numZoomLevels: %(rasterzoomlevels)d
};
map = new OpenLayers.Map(options);
var layer = new OpenLayers.Layer.TMS("TMS Layer", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
getURL: getURL
});
map.addLayer(layer);
map.zoomToExtent(mapBounds);
""" % args # noqa
s += """
map.addControls([new OpenLayers.Control.PanZoomBar(),
new OpenLayers.Control.Navigation(),
new OpenLayers.Control.MousePosition(),
new OpenLayers.Control.ArgParser(),
new OpenLayers.Control.Attribution()]);
}
""" % args
if self.options.profile == 'mercator':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {
z+=1;
}
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom()%(tmsoffset)s;
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'raster':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
s += """
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
if (map.updateSize) { map.updateSize(); };
}
onresize=function(){ resize(); };
</script>
</head>
<body onload="init()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
<script type="text/javascript" >resize()</script>
</body>
</html>""" % args # noqa
return s
def worker_tile_details(input_file, output_folder, options):
gdal2tiles = GDAL2Tiles(input_file, output_folder, options)
gdal2tiles.open_input()
gdal2tiles.generate_metadata()
tile_job_info, tile_details = gdal2tiles.generate_base_tiles()
return tile_job_info, tile_details
class ProgressBar(object):
def __init__(self, total_items):
self.total_items = total_items
self.nb_items_done = 0
self.current_progress = 0
self.STEP = 2.5
def start(self):
sys.stdout.write("0")
def log_progress(self, nb_items=1):
self.nb_items_done += nb_items
progress = float(self.nb_items_done) / self.total_items * 100
if progress >= self.current_progress + self.STEP:
done = False
while not done:
if self.current_progress + self.STEP <= progress:
self.current_progress += self.STEP
if self.current_progress % 10 == 0:
sys.stdout.write(str(int(self.current_progress)))
if self.current_progress == 100:
sys.stdout.write("\n")
else:
sys.stdout.write(".")
else:
done = True
sys.stdout.flush()
def get_tile_swne(tile_job_info, options):
if options.profile == 'mercator':
mercator = GlobalMercator()
tile_swne = mercator.TileLatLonBounds
elif options.profile == 'geodetic':
geodetic = GlobalGeodetic(options.tmscompatible)
tile_swne = geodetic.TileLatLonBounds
elif options.profile == 'raster':
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
srs4326.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
if tile_job_info.kml and tile_job_info.in_srs_wkt:
in_srs = osr.SpatialReference()
in_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
in_srs.ImportFromWkt(tile_job_info.in_srs_wkt)
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2 ** (tile_job_info.tmaxz - z) * tile_job_info.out_geo_trans[1])
west = tile_job_info.out_geo_trans[0] + x * tile_job_info.tile_size * pixelsizex
east = west + tile_job_info.tile_size * pixelsizex
south = tile_job_info.ominy + y * tile_job_info.tile_size * pixelsizex
north = south + tile_job_info.tile_size * pixelsizex
if not tile_job_info.is_epsg_4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
tile_swne = rastertileswne
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
return tile_swne
def single_threaded_tiling(input_file, output_folder, options):
"""
Keep a single threaded version that stays clear of multiprocessing, for platforms that would not
support it
"""
if options.verbose:
print("Begin tiles details calc")
conf, tile_details = worker_tile_details(input_file, output_folder, options)
if options.verbose:
print("Tiles details calc complete.")
if not options.verbose and not options.quiet:
progress_bar = ProgressBar(len(tile_details))
progress_bar.start()
for tile_detail in tile_details:
create_base_tile(conf, tile_detail)
if not options.verbose and not options.quiet:
progress_bar.log_progress()
if getattr(threadLocal, 'cached_ds', None):
del threadLocal.cached_ds
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def multi_threaded_tiling(input_file, output_folder, options):
nb_processes = options.nb_processes or 1
# Make sure that all processes do not consume more than `gdal.GetCacheMax()`
gdal_cache_max = gdal.GetCacheMax()
gdal_cache_max_per_process = max(1024 * 1024, math.floor(gdal_cache_max / nb_processes))
set_cache_max(gdal_cache_max_per_process)
pool = Pool(processes=nb_processes)
if options.verbose:
print("Begin tiles details calc")
conf, tile_details = pool.apply(worker_tile_details, [input_file, output_folder, options])
if options.verbose:
print("Tiles details calc complete.")
if not options.verbose and not options.quiet:
progress_bar = ProgressBar(len(tile_details))
progress_bar.start()
# TODO: gbataille - check the confs for which each element is an array... one useless level?
# TODO: gbataille - assign an ID to each job for print in verbose mode "ReadRaster Extent ..."
for _ in pool.imap_unordered(partial(create_base_tile, conf), tile_details, chunksize=128):
if not options.verbose and not options.quiet:
progress_bar.log_progress()
pool.close()
pool.join() # Jobs finished
# Set the maximum cache back to the original value
set_cache_max(gdal_cache_max)
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def main():
# TODO: gbataille - use mkdtemp to work in a temp directory
# TODO: gbataille - debug intermediate tiles.vrt not produced anymore?
# TODO: gbataille - Refactor generate overview tiles to not depend on self variables
argv = gdal.GeneralCmdLineProcessor(sys.argv)
input_file, output_folder, options = process_args(argv[1:])
nb_processes = options.nb_processes or 1
if nb_processes == 1:
single_threaded_tiling(input_file, output_folder, options)
else:
multi_threaded_tiling(input_file, output_folder, options)
if __name__ == '__main__':
main()
# vim: set tabstop=4 shiftwidth=4 expandtab: | PypiClean |
/IPRA-3.17.51-py3-none-any.whl/ipra/Model/Robot/baseRobot.py | from abc import abstractmethod
from tkinter import *
#from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import time
import pandas as pd
from ipra.Utility.StringUtility import GetStringSingletion
from ipra.Logger.logger import Logger
import json
from selenium.webdriver.common.by import By
class BaseRobot:
STATUS_EXCEPTION = 0
STATUS_SCRAP_COMPLETE = 1
STATUS_REPORT_COMPLETE = 2
def __init__(self, policyList, frame, reportPath,inputPath,downloadReport=False):
self.policyList = policyList
self.frame = frame
self.frame.resetProgress()
self.isLogin = False
self.isStopped = False
self.buildReportQueue = []
self.buildHeaderQueue = []
self.buildReportThread = None
self.reportPath = reportPath
self.inputPath = inputPath
self.logger = Logger()
self.downloadReport = downloadReport
self.stringValue = GetStringSingletion()
def SearchByIdValue(self, soup, key):
result = soup.find_all(id=key)
result_soup = BeautifulSoup(str(result), 'lxml')
return result_soup
def SearchByHtmlTagClassValue(self, soup, tag, class_key):
result = soup.find_all(tag, attrs={'class': class_key})
result_soup = BeautifulSoup(str(result), 'lxml')
return result_soup
def SearchByHtmlTagValueKey(self, soup, tag, key, value):
result = soup.find_all(tag, attrs={key: value})
result_soup = BeautifulSoup(str(result), 'lxml')
return result_soup
def DoClickUntilNoException(self,xPath,timeout = 5):
isWaiting = True
counter = 0
while isWaiting:
try:
self.browser.find_element(By.XPATH,xPath).click()
isWaiting = False
except:
time.sleep(1)
if counter > timeout:
isWaiting = False
else:
counter = counter + 1
def setIsStopped(self, status):
self.isStopped = status
def getIsStopped(self):
return self.isStopped
def startBrowser(self):
appState = {
"recentDestinations": [
{
"id": "Save as PDF",
"origin": "local",
"account": ""
}
],
"selectedDestinationId": "Save as PDF",
"version": 2
}
profile = {
'printing.print_preview_sticky_settings.appState': json.dumps(appState),
"download.default_directory": self.reportPath.replace("/","\\"),
'savefile.default_directory': self.reportPath,
"directory_upgrade": True,
"download.prompt_for_download": False,
"plugins.always_open_pdf_externally": True,
'intl.accept_languages': 'zh,zh_TW'
}
chrome_options = webdriver.ChromeOptions()
#chrome_options = webdriver.EdgeOptions()
chrome_options.add_experimental_option('prefs', profile)
chrome_options.add_argument('--kiosk-printing')
chrome_options.add_argument("start-maximized")
chrome_options.add_argument('--disable-blink-features=AutomationControlled')
chrome_options.add_argument("--disable-site-isolation-trials")
#self.browser = webdriver.Edge(service=Service(EdgeChromiumDriverManager().install()),options=chrome_options)
self.browser = webdriver.Chrome(options=chrome_options)
@abstractmethod
def waitingLoginComplete(self):
pass
@abstractmethod
def scrapPolicy(self):
pass
@abstractmethod
def downloadPolicyReport(self,policy):
pass
@abstractmethod
def buildReport(self):
pass
@abstractmethod
def buildReportOnly(self):
pass
@abstractmethod
def buildReportHeaderFullFlow(self):
pass
@abstractmethod
def buildReportHeaderHalfFlow(self):
pass
def execReport(self):
self.buildReportOnly()
def execRobot(self):
self.startBrowser()
self.waitingLoginComplete()
self.buildReport()
self.scrapPolicy()
self.buildReportThread.join()
self.browser.close() | PypiClean |
/CleanerVersion-2.1.1.tar.gz/CleanerVersion-2.1.1/versions/deletion.py | from django.db.models.deletion import (
attrgetter, signals, sql, transaction,
CASCADE,
Collector,
)
import versions.models
class VersionedCollector(Collector):
"""
A Collector that can be used to collect and delete Versionable objects.
The delete operation for Versionable objects is Versionable._delete_at,
which does not delete the record, it updates it's version_end_date to be
the timestamp passed to the delete() method.
Since non-versionable and versionable objects can be related, the delete()
method handles both of them. The standard Django behaviour is kept for
non-versionable objects. For versionable objects, no pre/post-delete
signals are sent. No signal is sent because the object is not being
removed from the database. If you want the standard signals to be sent,
or custom signals, create a subclass of this class and override
versionable_pre_delete() and/or versionable_post_delete(), and in your
settings file specify the dotted path to your custom class as a
string, e.g.:
VERSIONED_DELETE_COLLECTOR_CLASS =
'myapp.deletion.CustomVersionedCollector'
"""
def can_fast_delete(self, objs, from_field=None):
"""Do not fast delete anything"""
return False
def is_versionable(self, model):
return hasattr(model, 'VERSION_IDENTIFIER_FIELD') and \
hasattr(model, 'OBJECT_IDENTIFIER_FIELD')
def delete(self, timestamp):
# sort instance collections
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer constraint checks until
# the end of a transaction.
self.sort()
with transaction.atomic(using=self.using, savepoint=False):
# send pre_delete signals, but not for versionables
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
if self.is_versionable(model):
# By default, no signal is sent when deleting a
# Versionable.
self.versionable_pre_delete(obj, timestamp)
else:
signals.pre_delete.send(
sender=model, instance=obj, using=self.using
)
# do not do fast deletes
if self.fast_deletes:
raise RuntimeError("No fast_deletes should be present; "
"they are not safe for Versionables")
# update fields
for model, instances_for_fieldvalues in self.field_updates.items():
id_map = {}
for (field, value), instances in \
instances_for_fieldvalues.items():
if self.is_versionable(model):
# Do not set the foreign key to null, which can be the
# behaviour (depending on DB backend) for the default
# CASCADE on_delete method.
# In the case of a SET.. method, clone before
# changing the value (if it hasn't already been cloned)
updated_instances = set()
if not (isinstance(
field,
versions.fields.VersionedForeignKey) and
field.remote_field.on_delete == CASCADE):
for instance in instances:
# Clone before updating
cloned = id_map.get(instance.pk, None)
if not cloned:
cloned = instance.clone()
id_map[instance.pk] = cloned
updated_instances.add(cloned)
# TODO: instance should get updated with new
# values from clone ?
instances_for_fieldvalues[
(field, value)] = updated_instances
# Replace the instances with their clones in self.data, too
model_instances = self.data.get(model, {})
for index, instance in enumerate(model_instances):
cloned = id_map.get(instance.pk)
if cloned:
self.data[model][index] = cloned
query = sql.UpdateQuery(model)
for (field, value), instances in \
instances_for_fieldvalues.items():
if instances:
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
# reverse instance collections
for instances in self.data.values():
instances.reverse()
# delete instances
for model, instances in self.data.items():
if self.is_versionable(model):
for instance in instances:
self.versionable_delete(instance, timestamp)
if not model._meta.auto_created:
# By default, no signal is sent when deleting a
# Versionable.
self.versionable_post_delete(instance, timestamp)
else:
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
query.delete_batch(pk_list, self.using)
if not model._meta.auto_created:
for obj in instances:
signals.post_delete.send(
sender=model, instance=obj, using=self.using
)
# update collected instances
for model, instances_for_fieldvalues in self.field_updates.items():
for (field, value), instances in instances_for_fieldvalues.items():
for obj in instances:
setattr(obj, field.attname, value)
# Do not set Versionable object ids to None, since they still do have
# an id.
# Instead, set their version_end_date.
for model, instances in self.data.items():
is_versionable = self.is_versionable(model)
for instance in instances:
if is_versionable:
setattr(instance, 'version_end_date', timestamp)
else:
setattr(instance, model._meta.pk.attname, None)
def related_objects(self, related, objs):
"""
Gets a QuerySet of current objects related to ``objs`` via the
relation ``related``.
"""
from versions.models import Versionable
related_model = related.related_model
if issubclass(related_model, Versionable):
qs = related_model.objects.current
else:
qs = related_model._base_manager.all()
return qs.using(self.using).filter(
**{"%s__in" % related.field.name: objs}
)
def versionable_pre_delete(self, instance, timestamp):
"""
Override this method to implement custom behaviour. By default,
does nothing.
:param Versionable instance:
:param datetime timestamp:
"""
pass
def versionable_post_delete(self, instance, timestamp):
"""
Override this method to implement custom behaviour. By default,
does nothing.
:param Versionable instance:
:param datetime timestamp:
"""
pass
def versionable_delete(self, instance, timestamp):
"""
Soft-deletes the instance, setting it's version_end_date to timestamp.
Override this method to implement custom behaviour.
:param Versionable instance:
:param datetime timestamp:
"""
instance._delete_at(timestamp, using=self.using) | PypiClean |
/NC_distributions-0.1.tar.gz/NC_distributions-0.1/NC_distributions/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) the total number of trials
"""
# A binomial distribution is defined by two variables:
# the probability of getting a positive outcome
# the number of trials
# If you know these two values, you can calculate the mean and the standard deviation
#
# For example, if you flip a fair coin 25 times, p = 0.5 and n = 25
# You can then calculate the mean and standard deviation with the following formula:
# mean = p * n
# standard deviation = sqrt(n * p * (1 - p))
#
def __init__(self, prob=.5, size=20):
# TODO: store the probability of the distribution in an instance variable p
# TODO: store the size of the distribution in an instance variable n
# TODO: Now that you know p and n, you can calculate the mean and standard deviation
# Use the calculate_mean() and calculate_stdev() methods to calculate the
# distribution mean and standard deviation
#
# Then use the init function from the Distribution class to initialize the
# mean and the standard deviation of the distribution
#
# Hint: You need to define the calculate_mean() and calculate_stdev() methods
# farther down in the code starting in line 55.
# The init function can get access to these methods via the self
# variable.
self.p = prob
self.n = size
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = sum(self.data)/len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
return self.p, self.n
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# Use the matplotlib package to plot a bar chart of the data
# The x-axis should have the value zero or one
# The y-axis should have the count of results for each case
#
# For example, say you have a coin where heads = 1 and tails = 0.
# If you flipped a coin 35 times, and the coin landed on
# heads 20 times and tails 15 times, the bar chart would have two bars:
# 0 on the x-axis and 15 on the y-axis
# 1 on the x-axis and 20 on the y-axis
# Make sure to label the chart with a title, x-axis label and y-axis label
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the binomial distribution.
Args:
k (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# Calculate the probability density function for a binomial distribution
# For a binomial distribution with n trials and probability p,
# the probability density function calculates the likelihood of getting
# k positive outcomes.
#
# For example, if you flip a coin n = 60 times, with p = .5,
# what's the likelihood that the coin lands on heads 40 out of 60 times?
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
# Use a bar chart to plot the probability density function from
# k = 0 to k = n
# Hint: You'll need to use the pdf() method defined above to calculate the
# density function for every value of k.
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
# Define addition for two binomial distributions. Assume that the
# p values of the two distributions are the same. The formula for
# summing two binomial distributions with different p values is more complicated,
# so you are only expected to implement the case for two distributions with equal p.
# the try, except statement above will raise an exception if the p values are not equal
result = Binomial()
# The new n value is the sum of the n values of the two distributions.
result.n = self.n + other.n
# When adding two binomial distributions, the p value remains the same
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/ats/model/gender_enum.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
from MergePythonSDK.shared.model_utils import MergeEnumType
class GenderEnum(ModelNormal, MergeEnumType):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'MALE': "MALE",
'FEMALE': "FEMALE",
'NON-BINARY': "NON-BINARY",
'OTHER': "OTHER",
'DECLINE_TO_SELF_IDENTIFY': "DECLINE_TO_SELF_IDENTIFY",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
defined_types = {
'value': (str,),
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, value, *args, **kwargs): # noqa: E501
"""GenderEnum - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, value, *args, **kwargs): # noqa: E501
"""GenderEnum - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value | PypiClean |
/Homevee_Dev-0.0.0.0-py3-none-any.whl/Homevee/Updater/__init__.py | import json
import os
import urllib
from _thread import start_new_thread
import pip
from packaging import version
from Homevee.Item.Status import *
from Homevee.Utils import Constants, NotificationManager
from Homevee.Utils.Database import Database
def get_homevee_update_version():
installed_version = Constants.HOMEVEE_VERSION_NUMBER
newest_version = get_newest_version()
if(newest_version is None):
return False
if(version.parse(newest_version) > version.parse(installed_version)):
return newest_version
else:
return None
def get_newest_version():
url = "https://pypi.org/pypi/Homevee/json"
try:
response = urllib.request.urlopen(url).read()
response = response.decode('utf-8')
response_json = json.loads(response)
version = response_json['info']['version']
return version
except:
return None
def check_for_updates():
new_version = get_homevee_update_version()
return {
'updates':{
'current_version': Constants.HOMEVEE_VERSION_NUMBER,
'new_version': new_version,
'update_available': (new_version is not None),
'changelog': "Changelog blabla..." #TODO add changelog or link to actual changelog
}
}
'''
Updates the Homevee PIP-Package
Returns true if update was successful,
returns false if there was an error
'''
def do_homevee_update(user, db):
if(not user.has_permission("admin")):
return {'error': "nopermission"}
start_new_thread(update_thread, ())
return Status(type=STATUS_OK).get_dict()
def update_thread():
new_version = get_homevee_update_version()
try:
pip.main(["install", "--upgrade", "Homevee"])
except:
return False
#Datenbank upgraden
Database().upgrade()
# TODO texte lokalisieren
title = "Update"
body = "Update auf Version " + new_version
# Send notification to admin
NotificationManager().send_notification_to_admin(title, body, Database.get_database_con())
# Reboot the system after the update
os.system('reboot') | PypiClean |
/MicroStark-0.0.0.0.1b1.tar.gz/MicroStark-0.0.0.0.1b1/stark/types/message.py |
import os
import asyncio
from .user import User
from .chat import Chat
from ..client import Stark
from stark.utils.patch import patch
from pyrogram.enums import ParseMode
from pyrogram.errors import MessageTooLong
from pyrogram.types import Message, MessageEntity
@patch(Message)
class Message(Message):
_client: Stark
from_user: User
chat: Chat
async def tell(
self,
text: str,
format: str | tuple = None,
del_in: int = 0,
quote: bool = True,
parse_mode: ParseMode = ParseMode.DEFAULT,
entities: list["MessageEntity"] = None,
disable_web_page_preview: bool = True,
disable_notification: bool = None,
reply_to_message_id: int = None,
schedule_date: int = None,
reply_markup=None
) -> "Message":
try:
if self.from_user.is_self:
reply = await self.edit(
str(text),
parse_mode=parse_mode,
entities=entities,
disable_web_page_preview=disable_web_page_preview,
reply_markup=reply_markup,
)
else:
reply = await self.reply(
str(text),
quote=quote,
parse_mode=parse_mode,
entities=entities,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
schedule_date=schedule_date,
disable_web_page_preview=disable_web_page_preview,
reply_markup=reply_markup,
)
except MessageTooLong:
reply = await self.reply(
"Sending as document...",
quote=quote,
parse_mode=parse_mode,
entities=entities,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
schedule_date=schedule_date,
disable_web_page_preview=disable_web_page_preview,
reply_markup=reply_markup,
)
file = f'{reply.message_id}.txt'
with open(file, 'w+', encoding="utf-8") as f:
f.write(text)
await reply.delete()
reply = await self.reply_document(
document=file,
caption="Output",
quote=quote,
parse_mode=parse_mode,
caption_entities=entities,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
schedule_date=schedule_date,
reply_markup=reply_markup,
)
os.remove(file)
if del_in:
await asyncio.sleep(del_in)
await reply.delete()
return reply
@property
def args(self, split: str = " ") -> list[str]:
"""List arguments passed in a message. Removes first word (the command itself)"""
args: list[str] = self.text.markdown.split(split)
args.pop(0)
if args:
args[0] = args[0].strip()
if "\n" in args[0]:
wtf = args[0]
f, s = wtf.split("\n", 1)
args[0] = f
args.insert(1, s)
return args
@property
def input(self) -> str | None:
"""Input passed in a message. Removes first word (the command itself)"""
i = self.text.markdown.split(" ", 1)
if len(i) > 1 and i[1]:
return i[1]
return
@property
def ref(self) -> int | str | None:
"""Returns the referred user's id or username. To get the full user, use method `get_ref_user`"""
if self.reply_to_message:
return self.reply_to_message.from_user.id
args = self.args
if not args:
return
return args[0] if not args[0].isdigit() else int(args[0])
async def get_ref_user(self) -> User | None:
"""Returns the full referred user. To get only user id or username, use property `ref` as it's faster."""
if self.reply_to_message:
return self.reply_to_message.from_user
args = self.args
if not args:
return
user = args[0] if not args[0].isdigit() else int(args[0])
user = await self._client.get_users(user)
return user
async def get_ref_chat(self) -> Chat | None:
args = self.args
if not args:
return
chat = args[0] if not args[0].isdigit() else int(args[0])
chat = await self._client.get_chat(chat)
return chat
async def get_aor(self) -> str:
"""Get arg or reply text"""
if self.reply_to_message:
return self.reply_to_message.text.markdown
else:
return self.input
@property
def client(self):
return self._client
@property
def c(self):
return self._client | PypiClean |
/CloudRepoAnalytics-2.0.0-py3-none-any.whl/flaskr/static/piechart.min.js | (function(global,factory){if(typeof module==='object'&&typeof module.exports==='object'){var wrapper=function(w){if(!w.document){throw Error('AnyChart requires a window with a document');}factory.call(w,w,w.document);w.acgraph.isNodeJS=true;return w.anychart;};module.exports=global.document?wrapper(global):wrapper;}else{factory.call(global,window,document)}})(typeof window!=='undefined'?window:this,function(window,document,opt_noGlobal){var $,_,$_=this.anychart;if($_&&(_=$_._)){$=$_.$}else{throw Error('anychart-base.min.js module should be included first');$={};_={}}if(!_.pie){_.pie=1;(function($){var BH=function(a,b,c,d){var e;a>b&&(e=a,a=b,b=e);c>d&&(e=c,c=d,d=e);return Math.max(a,c)<=Math.min(b,d)},CH=function(){return $.Gc("iPad")||$.Gc("Android")&&!$.Gc("Mobile")||$.Gc("Silk")},DH=function(){return!CH()&&($.Gc("iPod")||$.Gc("iPhone")||$.Gc("Android")||$.Gc("IEMobile"))},EH=function(){return new $.lw},FH=function(a){$.uu.call(this,a)},GH=function(a,b,c,d){var e=!1,f,h;if(!d)return!1;f=0;for(h=d.length;f<h-1;f+=2)if(!e)var e=d[f],k=d[f+1],e=(a-e)*(a-e)+(b-k)*(b-k)>c*c;return e},HH=function(a,
b,c,d,e){var f=!1,h,k,l,m;if(!e)return!1;l=0;for(m=e.length;l<m-1;l+=2)if(h=l==m-2?0:l+2,k=l==m-2?1:l+3,!f){var f=e[l],p=e[l+1];h=e[h];k=e[k];f=BH(f,h,a,c)&&BH(p,k,b,d)&&0>=((h-f)*(b-p)-(k-p)*(a-f))*((h-f)*(d-p)-(k-p)*(c-f))&&0>=((c-a)*(p-b)-(d-b)*(f-a))*((c-a)*(k-b)-(d-b)*(h-a))}return f},IH=function(a,b,c){$.hw.call(this,null,[],[],b,c);this.ya=a},KH=function(a,b,c,d,e){$.rq.call(this,a);this.B=b;this.j=c||null;this.K=d||function(a,b){a.value+=Math.max(0,+b)||0;return a};this.ma=e||function(){return{value:0}};
this.g=new JH;this.g.G(0,"groupedPoint",!0)},JH=function(){$.hq.call(this);$.xq(this);this.j=void 0},MH=function(a,b,c){$.hw.call(this,null,[],[],b,c);this.ya=a;this.F=LH(this.ya);this.J=this.ya.N("connectorStroke")},NH=function(a,b){$.Ax.call(this,a,b)},PH=function(a,b){$.Pu.call(this);$.T(this);this.Y=this.Sa=this.P=this.o=this.Oc=null;this.Ob={};this.Ie=function(){var a=this.sourceColor,b=this.aquaStyleObj;return{keys:[{offset:0,color:$.Pl(a,.5)},{offset:.95,color:$.Ql(a,.4)},{offset:1,color:$.Ql(a,
.4)}],cx:.5,cy:.5,fx:b.fx,fy:b.fy,mode:b.mode}};this.F=[];this.state=new FH(this);this.data(a||null,b);this.D(4294967295);$.vp(this.J,[["overlapMode",4096,1],["radius",4,1],["innerRadius",4,1],["startAngle",4112,1],["explode",4116,1],["sort",16,1,0,function(){OH(this)}],["outsideLabelsSpace",4100,9],["insideLabelsOffset",4100,9],["connectorLength",4100,9],["outsideLabelsCriticalAngle",4096,1],["forceHoverLabels",4096,1],["connectorStroke",16,1],["mode3d",4112,1]]);var c={};$.vp(c,[["fill",528,1],
["stroke",528,1],["hatchFill",528,1],["labels",0,0]]);this.ca=new $.pw(this,c,$.cm,[[1,"fill",function(a){var b=!1;$.B(a[0])&&(a[0]=a[0].toLowerCase(),b="aquastyle"==a[0]);return this.N("mode3d")&&b?this.N("fill"):$.D(a[0])||b?a[0]:$.wc.apply(null,a)}]]);this.ca.ua.labelsFactoryConstructor=EH;$.iq(this.ca,"labelsAfterInitCallback",function(a){$.S(a,this.sd,this);a.mb(this);this.D(4096,1)});c={};$.vp(c,[["fill",16,1],["stroke",16,1],["hatchFill",0,0],["labels",0,0]]);this.Xb=new $.pw(this,c,1);this.Xb.ua.labelsFactoryConstructor=
EH;this.ka(!1)},OH=function(a){$.K(a.Na);delete a.Ha;a.Na=Cca(a,a.Sa);$.S(a.Na,a.c3,a);$.L(a,a.Na);a.D(13104,17)},Cca=function(a,b){if(a.Oc){var c=b;b=new KH(b,"value",a.Oc,void 0,function(){return{value:0}});$.L(c,b);b.bN=!0}c=a.N("sort");"none"!=c&&(b="asc"==c?b.sort("value",function(a,b){return a-b}):b.sort("value",function(a,b){return b-a}),b.bN=!0);return b},QH=function(a,b,c){if($.J(a.o,b))c&&a.o.X(c);else{var d=!!a.o;$.K(a.o);a.o=new b;c&&a.o.X(c);$.S(a.o,a.WP,a);$.L(a,a.o);d&&a.D(528,1)}},
SH=function(a,b,c){if(!a)return $.Vl;var d=b+"|"+a+"|"+c,e=RH[d];if(!e){switch(b){case 2:e=$.Xl;break;case 3:e=$.Yl;break;default:case 1:e=$.Zl}RH[d]=e=$.va(Dca,a,e,3==b,c)}return e},Dca=function(a,b,c,d,e,f,h){var k;if(f!=$.cm&&d&&(k=e.Si(a,f,e.la(),b,!1,void 0,h),c&&!0===k&&(k=b(e.Ci())),$.n(k))){if(!$.D(k))return k;if(c)return c=e.Tk(h),b(k.call(c,c))}a=e.Si(a,0,e.la(),b,!1,void 0,h);d=$.B(a)&&"aquastyle"==a;c&&!0===a&&(a=b(e.Ci()));$.D(a)&&(c=c?e.Tk(h):e.Yi(void 0,h),a=b(a.call(c,c)));d&&(c=e.la(),
c={aquaStyleObj:e.Ob,sourceColor:e.Mq().pd(c.sa())},a=e.Ie.call(c));k&&(c=e.Yi(a,h),a=b(k.call(c,c)));return a},Eca=function(a,b){var c=Math.min(b.width,b.height);a.fh=LH(a)&&a.labels().enabled()?$.M(a.N("outsideLabelsSpace"),c):0;a.b=$.M(a.N("radius"),c-a.fh);a.K=$.M(a.N("connectorLength"),a.b);var d=a.N("innerRadius");a.Ua=$.D(d)?d(a.b):$.M(d,a.b);a.B=$.M(a.N("explode"),c);a.Fb=b.left+b.width/2;a.Ab=b.top+b.height/2;a.de=new $.I(a.Fb-a.b,a.Ab-a.b,2*a.b,2*a.b);var d=$.H(-145),e=Math.min(b.width,
b.height)/2,c=.5*e*Math.cos(d)/b.width+.5,d=.5*e*Math.sin(d)/b.height+.5;0>b.width&&(b.width=0);0>b.height&&(b.height=0);a.Ob.fx=!(0,window.isNaN)(c)&&(0,window.isFinite)(c)?c:0;a.Ob.fy=!(0,window.isNaN)(d)&&(0,window.isFinite)(d)?d:0;a.Ob.mode=b;c=a.labels();$.T(c);c.Dd(a.Fb);c.Ed(a.Ab);$.mw(c,a.b);c.Wd(a.N("startAngle"));c.eh(360);c.oa(a.de);c.ka(!1);a.Kb().labels().oa(a.de)},TH=function(a){return $.A(a)&&a.hasOwnProperty("mode")&&a.hasOwnProperty("cx")},UH=function(a){a=null===a?window.NaN:+a;
return!($.C(a)&&!(0,window.isNaN)(a)&&0<a)},VH=function(a,b,c,d){var e=a.labels();$.T(e);e.fontOpacity(b);e.ea();e.ka(!1);if(d&&a.g)for(var f in a.g)a.g.hasOwnProperty(f)&&a.g[f].stroke($.Tl(a.N("connectorStroke"),c))},YH=function(a,b){var c=a.la(),d=c.sa(),e=c.G("start"),f=c.G("sweep"),h=!!c.G("exploded")&&1!=c.Ib(),k,l;b?(k=c.G("slice"),l=c.G("hatchSlice"),k.clear(),l&&l.clear()):(k=$.Ty(a.ha),c.G("slice",k),l=$.Ty(a.Ba),c.G("hatchSlice",l));h?(h=e+f/2,k=$.Zi(k,a.Fb+a.B*Math.cos($.H(h)),a.Ab+a.B*
Math.sin($.H(h)),a.b,a.Ua,e,f)):k=$.Zi(k,a.Fb,a.Ab,a.b,a.Ua,e,f);k.tag={ia:a,index:d};c=$.zu(a.state,c.sa());WH(a,c);l&&(l.jd(k.O()),l.tag={ia:a,index:d},XH(a,c))},Fca=function(a){var b=a.la(),c=b.sa(),d=b.G("start"),b=b.G("sweep"),e=d+b;if($.n(d)&&$.n(b)&&b){var f=d+b/2,h=a.B*Math.cos($.H(f)),f=.45*a.B*Math.sin($.H(f));a.F.push({index:c,type:"top",start:d,KA:b,Sy:h,Ty:f});if(360!=Math.abs(b)){var k;k=$.H(d);var l=$.Om(Math.cos(k),7);k=$.Om(Math.sin(k),7);var m=ZH(l,k);((l||1!=Math.abs(k))&&3==m||
2==m)&&a.F.push({index:c,type:"start",angle:d,Sy:h,Ty:f});k=$.H(e);l=$.Om(Math.cos(k),7);k=$.Om(Math.sin(k),7);m=ZH(l,k);((l||1!=Math.abs(k))&&1==m||4==m)&&a.F.push({index:c,type:"end",angle:e,Sy:h,Ty:f})}var p;p=d;m=e;p==m?l=!1:(p=$.H(p),m=$.H(m),l=$.Om(Math.cos(p),7),k=$.Om(Math.cos(m),7),p=ZH(l,Math.sin(p)),m=ZH(k,Math.sin(m)),l=1==p||2==p?!0:3==p?1==m||2==m?!0:3==m?l>=k:!1:4==p?4==m?l>=k:!0:!1);if(l){k=$.Om(Math.cos($.H(d)),7);m=$.Om(Math.cos($.H(e)),7);p=ZH(k,Math.sin($.H(d)));var q=ZH(m,Math.sin($.H(e))),
l=[];if(1==p)switch(q){case 1:k>=m?l.push({start:d,end:e}):(l.push({start:d,end:180,Jj:!0}),l.push({start:360,end:e}));break;case 2:l.push({start:d,end:e,Jj:!0});break;case 3:case 4:l.push({start:d,end:180,Jj:!0})}else if(2==p)switch(q){case 1:l.push({start:d,end:180});l.push({start:360,end:e});break;case 2:k>=m?l.push({start:d,end:e}):(l.push({start:d,end:180}),l.push({start:360,end:e,Jj:!0}));break;case 3:case 4:l.push({start:d,end:180})}else if(3==p)switch(q){case 1:l.push({start:360,end:e});break;
case 2:l.push({start:360,end:e,Jj:!0});break;case 3:k>=m&&l.push({start:0,end:180,Jj:!0})}else if(4==p)switch(q){case 1:l.push({start:360,end:e});break;case 2:l.push({start:360,end:e,Jj:!0});break;case 3:l.push({start:360,end:180,Jj:!0});break;case 4:k>=m&&l.push({start:0,end:180,Jj:!0})}k=a.F.length;m=l.length;a.F.length=k+m;for(p=0;p<m;p++)l[p].index=c,l[p].type="front",l[p].KA=b,l[p].Sy=h,l[p].Ty=f,a.F[k+p]=l[p]}p=d;m=e;p!=m&&a.Ua?(p=$.H(p),m=$.H(m),l=$.Om(Math.cos(p),7),k=$.Om(Math.cos(m),7),
p=ZH(l,Math.sin(p)),m=ZH(k,Math.sin(m)),l=3==p||4==p?!0:1==p?3==m||4==m?!0:1==m?l<=k:!1:2==p?2==m?l<=k:!0:!1):l=!1;if(l){k=$.Om(Math.cos($.H(d)),7);m=$.Om(Math.cos($.H(e)),7);p=ZH(k,Math.sin($.H(d)));q=ZH(m,Math.sin($.H(e)));l=[];if(1==p)switch(q){case 1:k<=m&&l.push({start:180,end:360});break;case 3:l.push({start:180,end:e});break;case 4:l.push({start:180,end:e,Jj:!0})}else if(2==p)switch(q){case 1:l.push({start:180,end:360,Jj:!0});break;case 2:k<=m&&l.push({start:180,end:360,Jj:!0});break;case 3:l.push({start:180,
end:e});break;case 4:l.push({start:180,end:e,Jj:!0})}else if(3==p)switch(q){case 1:case 2:l.push({start:d,end:360,Jj:!0});break;case 3:k>=m?(l.push({start:d,end:360}),l.push({start:180,end:e})):l.push({start:d,end:e});break;case 4:l.push({start:d,end:e,Jj:!0})}else if(4==p)switch(q){case 1:case 2:l.push({start:d,end:360});break;case 3:l.push({start:d,end:360});l.push({start:180,end:e});break;case 4:k>=m?(l.push({start:d,end:360}),l.push({start:180,end:e})):l.push({start:d,end:e})}k=a.F.length;m=l.length;
a.F.length=k+m;for(p=0;p<m;p++)l[p].index=c,l[p].type="back",l[p].KA=b,l[p].Sy=h,l[p].Ty=f,a.F[k+p]=l[p]}}},bI=function(a,b,c){var d=0,e=a.F.length,f;if($.n(b))for(d=0;d<e;d++)f=a.F[d],f.index==b&&$H(a,f,c);else{for(d=0;d<e;d++)switch(f=a.F[d],f.type){case "top":f.AA=1;break;case "front":f.AA=f.Jj?1:$.Om(Math.sin($.H(aI(f.start,f.end))),7);break;case "back":f.AA=f.Jj?-1:$.Om(Math.sin($.H(aI(f.start,f.end))),7);break;default:f.AA=$.Om(Math.sin($.H(f.angle)),7)}a.F.sort(function(a,b){return a.AA-b.AA});
for(d=0;d<e;d++)$H(a,a.F[d])}},$H=function(a,b,c){var d=a.la();d.select(b.index);var e=!!d.G("exploded")&&1!=d.Ib(),f=a.Fb,h=a.Ab;e&&(f+=b.Sy,h+=b.Ty);var k=a.b,e=a.Ua,d=$.zu(a.state,d.sa());switch(b.type){case "top":var l=b.start;b=b.KA;0>k&&(k=0);0>e&&(e=0);if(k<e)var m=k,k=e,e=m;b=$.Hb(b,-360,360);c=cI(a,"topPath",c);0>=e?360==Math.abs(b)?c.Wc(f,h,k,.45*k,l,b,!1):c.moveTo(f,h).Wc(f,h,k,.45*k,l,b,!0).close():(m=360>Math.abs(b),c.Wc(f,h,k,.45*k,l,b).Wc(f,h,e,.45*e,l+b,-b,m),m&&c.close());dI(a,"topPath",
d);break;case "front":var e=f,f=h,p=b.start,m=b.end,q=b.KA;b="frontPath"+p;c=cI(a,b,c);h=.45*k;l=.2*a.b;m<p&&(m+=360);360==Math.abs(q)&&(p=0,m=180);q=$.H(p);p=$.H(m);m=e+k*+Math.cos(q).toFixed(5);q=f+h*+Math.sin(q).toFixed(5);e+=k*+Math.cos(p).toFixed(5);f+=h*+Math.sin(p).toFixed(5);c.moveTo(m,q);c.ie(e,f,k,h,!1,!0);c.lineTo(e,f+l);c.ie(m,q+l,k,h,!1,!1);c.lineTo(m,q);c.close();dI(a,b,d);break;case "back":k=f;f=h;p=b.start;m=b.end;q=b.KA;b="backPath"+p;c=cI(a,b,c);h=.45*e;l=.2*a.b;m<p&&(m+=360);360==
Math.abs(q)&&(p=180,m=0);q=$.H(p);p=$.H(m);m=k+e*Math.cos(q);q=f+h*Math.sin(q);k+=e*Math.cos(p);f+=h*Math.sin(p);c.moveTo(m,q);c.ie(k,f,e,h,!1,!0);c.lineTo(k,f+l);c.ie(m,q+l,e,h,!1,!1);c.lineTo(m,q);c.close();dI(a,b,d);break;case "start":eI(a,"startPath",f,h,k,e,b.angle,d,c);break;case "end":eI(a,"endPath",f,h,k,e,b.angle,d,c)}},cI=function(a,b,c){var d=a.la(),e="hatch"+String(b.charAt(0)).toUpperCase()+b.substr(1);c?(c=d.G(b),a=d.G(e),c.clear(),a&&a.clear()):(c=$.Ty(a.ha),d.G(b,c),a=$.Ty(a.ha),d.G(e,
a));return c},fI=function(a,b){var c=a.la(),d=c.sa(),e=c.get("normal"),f=$.so($.n(e)?e.fill:void 0,c.get("fill"),a.fill());b&1?(e=c.get("hovered"),c=$.so($.n(e)?e.fill:void 0,c.get("hoverFill"),f),f=a.DI(c,f)):f=a.DI(f);var h;$.B(f)&&(h=$.Ll(f));var d=a.Mq().pd(d),k;$.A(d)&&(d.color?k=$.Ll(d.color):d.keys&&d.keys.length&&(k=$.Ll(d.keys[0].color)));k=k?k.Qf:d;return h?h.Qf:k},dI=function(a,b,c){var d=a.la(),e=d.sa(),f=fI(a,c),h=$.El(f),k=d.G(b);k.tag={ia:a,index:e};var l,m=$.Il(h,.3),p=$.Hl([255,255,
255],h,.1),q=$.uc($.Hl(h,m,.7)),p=$.uc($.Hl(m,p,.1)),r=$.uc($.Hl(h,m,.8)),t=$.uc($.Hl(h,m,.2)),h=$.uc($.Hl(h,m,.1)),m=!!(c&1);"topPath"==b?l={angle:-50,keys:[{position:0,opacity:1,color:m?$.Pl(f,.3):f},{position:1,opacity:1,color:m?$.Pl(q,.2):q}]}:$.xa(b,"frontPath")?l={angle:45,keys:[{position:0,opacity:1,color:m?$.Pl(f,.2):$.Pl(f,.1)},{position:.19,opacity:1,color:m?$.Pl(p,.2):p},{position:1,opacity:1,color:m?$.Pl(r,.2):r}]}:l=$.xa(b,"backPath")?m?$.Pl(t,.2):t:m?$.Pl(h,.2):h;k.fill(l);k.stroke(l);
b="hatch"+String(b.charAt(0)).toUpperCase()+b.substr(1);if(d=d.G(b))d.jd(k.O()),d.tag={ia:a,index:e},XH(a,c,b)},ZH=function(a,b){return 0<=a&&0<=b?1:0>=a&&0<=b?2:0>=a&&0>b?3:4},eI=function(a,b,c,d,e,f,h,k,l){var m=$.H(h);h=.2*a.b;var p=c+f*Math.cos(m);c+=e*Math.cos(m);f=d+.45*f*Math.sin(m);d+=.45*e*Math.sin(m);l=cI(a,b,l);l.moveTo(p,f);l.lineTo(c,d);l.lineTo(c,d+h);l.lineTo(p,f+h);l.lineTo(p,f);l.close();dI(a,b,k)},aI=function(a,b){b<a&&(b+=360);return(a+b)/2},jI=function(a,b,c){if(LH(a)){var d=a.la(),
e=!!(b&1),f=d.get("normal"),f=$.n(f)?f.label:void 0,h=d.get("hovered"),h=$.n(h)?h.label:void 0,f=$.so(f,d.get("label")),h=e?$.so(h,d.get("hoverLabel")):null,k=d.sa(),l=a.Kb().labels(),m=e?l:null;b=a.labels().Yd(k);var p=f&&$.n(f.enabled)?f.enabled:null,q=h&&$.n(h.enabled)?h.enabled:null,e=e?null===q?null===l.enabled()?null===p?b&&$.n(b.enabled())?b.enabled():a.labels().enabled():p:l.enabled():q:null===p?b&&$.n(b.enabled())?b.enabled():a.labels().enabled():p,p=gI(a,!0),q=hI(a);if(e){if(l=!b)b=a.labels().add(p,
q,k);k=b.enabled();b.ig();$.dv(b,m);b.wc(f,h);b.enabled(k);d=d.G("anchor");$.n(d)&&b.anchor(d);l||b.ea()}else b?(k=b.enabled(),b.clear(),b.enabled(k)):(b=a.labels().add(p,q,k),d=d.G("anchor"),$.n(d)&&b.anchor(d),b.enabled(!1));if(c&&(c=b)&&a.g){var d=c.sa(),r;(r=a.g[d])?c&&0!=c.enabled()&&e?iI(a,c,r):r.clear():e?iI(a,c,a.Rc):a.Rc.clear()}return b}r=!!(b&1);var t=a.la();b=t.get("normal");b=$.n(b)?b.label:void 0;c=t.get("hovered");c=$.n(c)?c.label:void 0;b=$.so(b,t.get("label"));c=r?$.so(c,t.get("hoverLabel")):
null;var d=t.sa(),f=a.Kb().labels(),h=r?f:null,m=a.labels().Yd(d),k=b&&$.n(b.enabled)?b.enabled:null,e=c&&$.n(c.enabled)?c.enabled:null,l=hI(a),p=gI(a,!0),u=a.N("mode3d"),q=!0;if((!r||r&&!a.N("forceHoverLabels"))&&"allow-overlap"!=a.N("overlapMode")){var v=t.G("start"),w=t.G("sweep"),q=a.Fb,x=a.Ab,z;if(t.G("exploded")&&1!=t.Ib()){z=(v+w/2)*Math.PI/180;var E=(u?.45*a.B:a.B)*Math.sin(z),q=q+a.B*Math.cos(z),x=x+E}z=v*Math.PI/180;var E=q+a.b*Math.cos(z),N=x+(u?.45*a.b:a.b)*Math.sin(z);z=(v+w)*Math.PI/
180;v=q+a.b*Math.cos(z);z=x+(u?.45*a.b:a.b)*Math.sin(z);a.ub?a.ub.clear():a.ub=new $.nw;a.ub.Fe(p);a.ub.rc(l);a.ub.ig();$.cv(a.ub,a.labels());$.dv(a.ub,h);a.ub.wc(b,c);u=a.labels().ki(a.ub,null,null,d);w=(t=(1==t.Ib()||360==w)&&!a.Ua)||!HH(q,x,v,z,u);v=!GH(q,x,a.b,u);z=t||GH(q,x,a.Ua,u);q=(t||!HH(E,N,q,x,u))&&w&&v&&z}(r?null===e?null===f.enabled()?null===k?a.labels().enabled():k:f.enabled():e:null===k?a.labels().enabled():k)&&q?(m?(m.Fe(p),m.rc(l)):m=a.labels().add(p,l,d),m.ig(),$.dv(m,h),m.wc(b,
c),r&&!m.$()&&a.labels().$e()&&(m.$(a.labels().$e()),m.$().parent()||m.$().parent(a.labels().$()))):m&&a.labels().clear(m.sa());return m},WH=function(a,b){if(a.N("mode3d")){var c,d=a.F.length,e,f=a.la().sa();for(c=0;c<d;c++)e=a.F[c],e.index==f&&dI(a,e.type+"Path"+("front"==e.type||"back"==e.type?e.start:""),b)}else c=a.la().G("slice"),$.n(c)&&(d=SH("fill",1,!0)(a,b,!1,!0),TH(d)&&null===d.mode&&(d.mode=a.de?a.de:null),c.fill(d),d=SH("stroke",2,!0)(a,b,!1,!0),TH(d)&&null===d.mode&&(d.mode=a.de?a.de:
null),c.stroke(d),XH(a,b))},XH=function(a,b,c){if(c=a.la().G(c||"hatchSlice"))a=SH("hatchFill",3,!0)(a,b,!1),c.stroke(null).fill(a)},lI=function(a,b){var c=a.la();if(1!=c.Ib()&&360!=c.G("sweep")){if($.n(b))c.G("exploded",b);else{var d=c.G("exploded");c.G("exploded",!d)}var d=c.G("start"),e=c.G("sweep");$.n(d)&&$.n(e)&&e&&(d=c.sa(),a.N("mode3d")?bI(a,d,!0):YH(a,!0),LH(a)&&($.T(a.labels()),a.labels().clear(),kI(a),a.labels().ea(),a.labels().ka(!0),c.select(d)),c=a.state.j|$.zu(a.state,c.sa()),jI(a,
c,!!(c&1)),a.labels().ea())}},LH=function(a){return"outside"==$.Wk(a.labels().N("position"))},gI=function(a,b){var c=a.la();if(!a.ba||b)a.ba=new $.Nt;a.ba.Eh(c).vh([a.$d(c.sa()),a]);var d={x:{value:c.get("x"),type:"string"},value:{value:c.get("value"),type:"number"},name:{value:c.get("name"),type:"string"},index:{value:c.sa(),type:"number"},chart:{value:a,type:""}};c.G("groupedPoint")&&(d.name={value:"Other points",type:"string"},d.groupedPoint={value:!0,type:"string"},d.names={value:c.G("names"),
type:""},d.values={value:c.G("values"),type:""});return $.Ct(a.ba,d)},nI=function(a,b){a.Jd||(a.Jd=[]);var c=b.sa();a.Jd[c]||(a.Jd[c]=$.$m(a.labels().ki(b)));return a.Jd[c]},oI=function(a,b){var c=b.sa();a.Jd&&(a.Jd[c]=null)},kI=function(a){var b=a.la(),c,d,e,f;a.dc=[];var h=a.N("mode3d");a.Ja?(a.Ja.clear(),h&&a.vc.clear()):(a.Ja=new $.Sy(function(){return $.kk()},function(a){a.clear()}),a.Ja.parent(a.Ta),a.Ja.zIndex(32),h&&(a.vc=new $.Sy(function(){return $.kk()},function(a){a.clear()}),a.vc.parent(a.Ta),
a.vc.zIndex(29)));a.g=[];var k=a.N("connectorStroke");a.Rc||(a.Rc=a.Ta.path(),a.Rc.stroke(k));var l=[],m=[],p,q;b.reset();for(var r=!1,t=!1;b.advance();)if(!UH(b.get("value"))){var u=b.sa();f=b.G("start");d=b.G("sweep");c=b.G("exploded")&&1!=b.Ib();d=(f+d/2)*Math.PI/180;var v=$.Jb($.Kb(d));270<v&&!r&&(m.length||q&&q.length)&&(r=!0,p=[]);90<v&&!t&&(l.length||p&&p.length)&&(t=!0,q=[]);f=90>v||270<v;e=a.b+(c?a.B:0);var w=h?.45*a.b+(c?.45*a.B:0):e;c=a.Fb+e*Math.cos(d);d=a.Ab+w*Math.sin(d);h&&(d+=.2*a.b/
2);e=f?5:-5;b.G("connector",e);a.dc[2*u]=c;a.dc[2*u+1]=d;b.G("anchor",f?"left-center":"right-center");c=jI(a,$.cm,!1);oI(a,c);c.ha=v;f?r?p.push(c):l.push(c):t?q.push(c):m.push(c);"allow-overlap"==a.N("overlapMode")&&c&&0!=c.enabled()&&(u=c.sa(),a.g[u]||(d=a.dc[2*u+1]-.2*a.b/2,d=h&&d<a.Ab?$.Ty(a.vc):$.Ty(a.Ja),a.g[u]=d,d.stroke(k),iI(a,c,d)))}l=p?p.concat(l):l;m=q?q.concat(m):m;if("allow-overlap"!=a.N("overlapMode")){var x,r=[];f=null;p=0;for(q=m.length;p<q;p++)if(c=m[p]){b.select(c.sa());c.Fe(gI(a));
d=nI(a,c);if(!f||pI(f,d))f&&r.push(f),f="left-center"==c.anchor(),f=new qI(f,a,r);rI(f,c)}f&&r.push(f);p=0;for(q=r.length;p<q;p++)(f=r[p])&&f.Cj&&(x||(x=[]),x=$.lb(x,f.Cj));f=null;if(x){$.wb(x,function(a,b){return a.sa()>b.sa()?1:a.sa()<b.sa()?-1:0});p=0;for(q=x.length;p<q;p++)if(c=x[p]){b.select(c.sa());c.Fe(gI(a));d=nI(a,c);t=!0;v=0;for(u=r.length;v<u;v++)t=t&&pI(r[v],d);if(t){f||(f="left-center"==c.anchor(),f=new qI(f,a,[]));sI(f,c);d=f.sb();t=!0;for(v=0;v<u;v++)t=t&&pI(r[v],d);f.md||!t?(f.labels.pop().enabled(!1),
tI(f),r.push(f),f=null):c.enabled(!0)}else f&&(r.push(f),f=null)}f&&(r.push(f),f=null)}m=[];f=null;for(p=l.length;p--;)if(c=l[p]){b.select(c.sa());c.Fe(gI(a));d=nI(a,c);if(!f||pI(f,d))f&&m.push(f),f="left-center"==c.anchor(),f=new qI(f,a,m);rI(f,c)}f&&m.push(f);x&&(x.length=0);p=0;for(q=m.length;p<q;p++)(f=m[p])&&f.Cj&&(x||(x=[]),x=$.lb(x,f.Cj));f=null;if(x)for($.wb(x,function(a,b){return a.sa()>b.sa()?1:a.sa()<b.sa()?-1:0}),p=x.length;p--;)if(c=x[p]){b.select(c.sa());c.Fe(gI(a));d=nI(a,c);t=!0;v=
0;for(u=m.length;v<u;v++)t=t&&pI(m[v],d);if(t){f||(f="left-center"==c.anchor(),f=new qI(f,a,[]));sI(f,c);d=f.sb();t=!0;for(v=0;v<u;v++)t=t&&pI(m[v],d);f.md||!t?(f.labels.pop().enabled(!1),tI(f),m.push(f),f=null):c.enabled(!0)}else f&&(m.push(f),f=null)}f&&(r.push(f),f=null);p=0;for(q=r.length;p<q;p++)if(f=r[p])for(uI(f),b=0,l=f.labels.length;b<l;b++)(c=f.labels[b])&&0!=c.enabled()&&(u=c.sa(),a.g[u]||(d=a.dc[2*u+1]-.2*a.b/2,d=h&&d<a.Ab?$.Ty(a.vc):$.Ty(a.Ja),a.g[u]=d,d.stroke(k),iI(a,c,d)));p=0;for(q=
m.length;p<q;p++)if(f=m[p])for(uI(f),b=0,l=f.labels.length;b<l;b++)(c=f.labels[b])&&0!=c.enabled()&&(u=c.sa(),a.g[u]||(d=a.dc[2*u+1]-.2*a.b/2,d=h&&d<a.Ab?$.Ty(a.vc):$.Ty(a.Ja),a.g[u]=d,d.stroke(k),iI(a,c,d)))}},iI=function(a,b,c){var d=a.data().la(),e=b.sa();if(d.select(e)){var f=a.dc[2*e],e=a.dc[2*e+1],d=d.G("connector"),h=b.rc().value,k=$.n(b.N("offsetY"))?b.N("offsetY"):a.labels().N("offsetY");k||(k=0);k=$.M(k,a.b);(b=$.n(b.N("offsetX"))?b.N("offsetX"):a.labels().N("offsetX"))||(b=0);b=$.H(h.angle+
$.M(b,360));k=h.radius+k;h=a.Fb+k*Math.cos(b)-d;a=a.Ab+k*Math.sin(b);c.clear().moveTo(f,e).lineTo(h,a).lineTo(h+d,a)}},hI=function(a){var b=LH(a),c=a.la(),d=c.G("start"),e=c.G("sweep"),f=1==c.Ib()||360==e,c=c.G("exploded")&&!f,d=d+e/2,h,k,l;l=a.N("mode3d");e=a.N("insideLabelsOffset");return l?(b?(f=a.b+a.K,e=.45*a.b+a.K,c&&(f+=a.B,e+=.45*a.B)):(h=a.b,b=.45*a.b,k=a.Ua,l=.45*a.Ua,f&&!k?e=f=0:(f=$.M(e,k+h),e=$.Vn(e)?$.M(e,l+b):.45*$.M(e,l+b),c&&(f+=a.B,e+=.45*a.B))),{value:{angle:d,radius:f,radiusY:e}}):
{value:{angle:d,radius:b?a.b+a.K+(c?a.B:0):$.M(e,f&&!a.Ua?0:a.b-a.Ua)+a.Ua+(c?a.B:0)}}},qI=function(a,b,c){this.J=c;this.b=b;this.labels=[];this.y=this.height=0;this.g=[];this.j=a;this.md=!1;this.Bh=null},rI=function(a,b){b&&(a.labels.push(b),vI(a))},sI=function(a,b){b&&(a.labels.push(b),tI(a))},pI=function(a,b){var c=$.Zm(a.sb());return!$.Wm(c,$.Zm(b))},uI=function(a){for(var b=0,c=a.labels.length;b<c;b++){var d=a.labels[b],e=a.g[3*b],f=a.g[3*b+1],h=a.g[3*b+2],k=d.rc().value;k.angle=e;k.radius=f;
k.radiusY=h;oI(a.b,d)}},tI=function(a){var b,c,d=a.height=0;a.od();var e=a.b.kT(),f=e.x,e=e.y,h,k,l=a.b.N("mode3d");l?(h=e+.45*a.b.b+a.b.K-.1+.2*a.b.b/2,k=e-(.45*a.b.b+a.b.K)+.1-.2*a.b.b/2):(h=e+a.b.b+a.b.K-.1,k=e-(a.b.b+a.b.K)+.1);for(var m=0,p=a.labels.length;m<p;m++)b=a.labels[m],c=nI(a.b,b),d+=c.top-a.height-c.height/2,a.height+=c.height;a.y=d/p;d=a.y+a.height;d>h&&(d=h,a.y=h-a.height);a.labels.length&&(c=nI(a.b,a.labels[0]).height,a.y+c<k&&(d=k-c+a.height,a.y=k-c));k=a.b.N("outsideLabelsCriticalAngle");
a.g.length=0;h=a.b.data().la();var q,r,t,u,v,w,x,z;a.x=window.NaN;a.width=window.NaN;a.F=null;a.B=window.NaN;a.o=window.NaN;a.md=!1;m=0;for(p=a.labels.length;m<p;m++){b=a.labels[m];c=nI(a.b,b);q=m==p-1?0:nI(a.b,a.labels[m+1]).height;h.select(b.sa());r=h.G("start");t=h.G("sweep");u=h.G("exploded");var E=$.n(b.N("offsetX"))?b.N("offsetX"):a.b.labels().N("offsetX");E||(E=0);var E=$.M(E,360),N=$.n(b.N("offsetY"))?b.N("offsetY"):a.b.labels().N("offsetY");N||(N=0);N=$.M(N,a.b.b);t=(r+t/2+E)*Math.PI/180;
r=a.j?5:-5;w=a.b.b+(u?a.b.B:0);v=a.b.b+a.b.K+(u?a.b.B:0)+N;var R;l?(x=.45*a.b.b+(u?.45*a.b.B:0),R=.45*a.b.b+a.b.K+(u?.45*a.b.B:0)+.45*N):(x=w,R=v);u=d;z=w+a.b.K;var P=x+a.b.K;Math.abs(u-e)>P&&(P=Math.abs(u-e));P=z*Math.sqrt(Math.pow(P,2)-Math.pow(u-e,2))/P;z=f+(a.j?1:-1)*Math.abs(P);w=f+w*Math.cos(t);x=e+x*Math.sin(t);v=f+v*Math.cos(t);t=e+R*Math.sin(t);t=$.Kb(Math.acos($.Ym(w,x,v,t).toFixed(3)/$.Ym(w,x,z,u).toFixed(3)));if(t>a.o||(0,window.isNaN)(a.o)||0>P)a.o=0>P?Number.POSITIVE_INFINITY:t,a.F=
b,a.B=m;if(t>k||0>P)a.md=!0;b=z+r;r=a.j?b:b-c.width;a.x=(0,window.isNaN)(a.x)?r:a.x>r?r:a.x;r=a.j?b+c.width:b;a.width=(0,window.isNaN)(a.width)?r:a.width<r?r:a.width;b-=f;u-=e;l&&(u+=.2*a.b.b/2);r=Math.sqrt(Math.pow(b,2)+Math.pow(u,2))-N;N=Math.sqrt(Math.pow(b,2)+Math.pow(u,2))-N;t=window.NaN;0<b&&0<=u?t=$.Kb(Math.atan(u/b)):0<b&&0>u?t=$.Kb(Math.atan(u/b))+360:0>b?t=$.Kb(Math.atan(u/b))+180:!b&&0<u?t=90:!b&&0>u&&(t=270);t-=E;a.g.push(t,r,N);d-=c.height/2+q/2}a.width-=a.x},vI=function(a){tI(a);if(a.md){var b=
a.F,c=a.B;(0,window.isNaN)(c)||(b.enabled(!1),a.Cj||(a.Cj=[]),a.Cj.push(b),$.gb(a.labels,c,1));var d=a.b,b=a.labels,e=a.J,f,h,k,l,m=e[e.length-1];if(m!=a){var p=null,c=b.slice(),q=e.length,r=!1;f=0;for(h=b.length;f<h;f++)if(k=b[f])l=nI(d,k),!m||pI(m,l)?!p||pI(p,l)?(p&&(e.push(p),m=p),l="left-center"==k.anchor(),p=new qI(l,d,e),sI(p,k)):(sI(p,k),d.md?(k.enabled(!1),p.Cj||(p.Cj=[]),p.Cj.push(k),p.labels.pop(),tI(p)):m&&p&&!pI(m,p.sb())&&(e.pop(),p.labels=$.lb(m.labels,p.labels),m=null,tI(p),r=!0)):
(k.enabled(!1),p&&(p.Cj||(p.Cj=[]),p.Cj.push(k)));if(p)if(0<e.length-q||r)a.labels=p.labels;else{d=p;if(d.Cj){e=0;for(f=d.Cj.length;e<f;e++)d.Cj[e].enabled(!0);d.Cj.length=0}c.length!=b.length&&(a.labels=c)}}vI(a)}else if((b=a.J[a.J.length-1])&&(c=b.sb()),c&&!pI(a,c)){a.J.pop();a.labels=$.lb(b.labels,a.labels);b=0;for(c=a.labels.length;b<c;b++)a.labels[b].enabled(!0);vI(a)}},wI=function(a,b){var c=new PH(a,b);c.va(!0,$.jm("pie"));return c},xI=function(a,b){var c=new PH(a,b);c.va(!0,$.jm("pie3d"));
return c};$.G(FH,$.uu);FH.prototype.B=function(a,b){if(this.target.la().select(b)){var c=$.ub(this.J,b);a!=$.cm&&(0>c?($.hb(this.J,b,~c),$.hb(this.b,a,~c),this.j==$.cm&&this.target.jh(a),$.wu(this,window.NaN)&&!this.target.vf()&&"single"==this.target.Mc()&&this.target.kh(a)):(this.b[c]|=a,this.target.jh(this.b[c])))}};$.G(IH,$.hw);$.g=IH.prototype;
$.g.update=function(){this.g.length=this.j.length=0;for(var a=this.ya.qf();a.advance();)if(!a.G("missing")){var b=a.G("start"),c=a.G("sweep"),d=this.ya.b,e=this.ya.CT();this.g.push(this.ya.oh(),0,0,0);this.j.push(b,c,d,e)}};$.g.sl=function(){VH(this.ya,1E-5,1E-5,LH(this.ya))};
$.g.li=function(){for(var a=this.ya.qf(),b=0;a.advance();)if(!a.G("missing")){a.G("start",this.b[b++]);a.G("sweep",this.b[b++]);a.G("radius",this.b[b++]);a.G("innerRadius",this.b[b++]);var c=this.ya,d=a,e=d.G("slice");e.clear();var f=d.G("start"),h=d.G("sweep"),k=d.G("radius"),l=d.G("innerRadius");if(d.G("exploded")&&1!=d.Ib())var m=f+h/2,e=$.Zi(e,c.Fb+c.B*Math.cos($.H(m)),c.Ab+c.B*Math.sin($.H(m)),k,l,f,h);else e=$.Zi(e,c.Fb,c.Ab,k,l,f,h);if(f=d.G("hatchSlice"))c.la().select(d.sa()),f.clear(),f.jd(e.O()),
c=SH("hatchFill",3,!0)(c,$.zu(c.state,d.sa()),!1),f.stroke(null).fill(c)}};$.g.Ng=function(){this.li()};$.g.da=function(){IH.I.da.call(this);this.ya=null};$.G(KH,$.rq);$.g=KH.prototype;$.g.ty=function(){var a=[],b=this.b.la(),c=void 0,d=void 0;if(this.j)for(;b.advance();){var e=b.get(this.B),f=b.get("name");$.n(f)||(f="Point "+b.sa());this.j(e)?a.push(b.sa()):c?(c.push(e),d.push(f)):(c=[e],d=[f])}else for(e=0,b=b.Ib();e<b;e++)a.push(e);c?(this.g.Ff(0,(0,$.hh)(c,this.K,this.ma())),this.g.G(0,"names",d),this.g.G(0,"values",c)):(this.g.Ff(0,c),this.g.G(0,"names",[]),this.g.G(0,"values",[]));return a};
$.g.Ib=function(){$.sq(this);return(this.ck?this.ck.length:this.b.Ib())+this.g.Ib()};$.g.ci=function(a){$.sq(this);var b=this.b.Ib();return a<b?this.b.ci(a):this.g.ci(a-b)};$.g.Zn=function(){return $.lb(this.b.Zn(),this.g)};$.g.Ff=function(a,b){$.sq(this);var c=this.ck?this.ck.length:this.b.Ib();if(a<c)return $.rq.prototype.Ff.apply(this,arguments);a-=c;return this.g.Ff.apply(this.g,arguments)};
$.g.kF=function(a,b,c){var d=this.ck?this.ck.length:this.b.Ib();if(a>d)throw Error("Index can not be masked by this View");return a>=d?(a-=d,2<arguments.length?(this.g.G.apply(this.g,arguments),this):this.g.G.apply(this.g,arguments)):$.rq.prototype.kF.apply(this,arguments)};$.G(JH,$.yq);JH.prototype.Ff=function(a,b){if(!a){var c=this.j;1<arguments.length&&(this.j=b);return c}};JH.prototype.G=function(a,b,c){if(!a)return $.rq.prototype.G.apply(this,arguments)};
JH.prototype.Ib=function(){return $.n(this.j)?1:0};var yI=JH.prototype;yI.row=yI.Ff;yI.getRowsCount=yI.Ib;$.G(MH,$.hw);MH.prototype.update=function(){this.g.length=this.j.length=0;this.g.push(1E-5,1E-5);this.j.push(1,this.J.opacity||1)};MH.prototype.li=function(){VH(this.ya,this.b[0],this.b[1],this.F)};MH.prototype.Ng=function(){this.li()};MH.prototype.da=function(){MH.I.da.call(this);this.J=this.ya=null;delete this.F};$.G(NH,$.Ax);NH.prototype.oh=function(){return this.ya.data().G(this.index,"start")};NH.prototype.j=function(){var a=this.ya.data(),b=a.G(this.index,"start"),a=a.G(this.index,"sweep");return b+a};NH.prototype.selected=function(a){return $.n(a)?(this.Yb().iK(this.index,!!a),this):this.ya.data().G(this.index,"exploded")};NH.prototype.g=NH.prototype.selected;var zI=NH.prototype;zI.getStartAngle=zI.oh;zI.getEndAngle=zI.j;zI.hovered=zI.Kb;zI.selected=zI.selected;zI.exploded=zI.g;$.G(PH,$.Pu);$.Hp(PH,["fill","stroke","hatchFill"]);$.g=PH.prototype;$.g.Za=function(a){return $.n(a)?(this.ca.X(a),this):this.ca};$.g.Kb=function(a){return $.n(a)?(this.Xb.X(a),this):this.Xb};$.g.Va=function(){return this.N("mode3d")?"pie-3d":"pie"};$.g.Aa=$.Pu.prototype.Aa|16;$.g.xa=$.Pu.prototype.xa|12304;
$.g.pD=function(){if(!this.N("mode3d")&&this.Jl().enabled()&&0<this.Jl().duration())if(this.fa&&1==this.fa.Ud)this.fa.update();else if(this.W(2048)){$.K(this.fa);this.fa=new $.Ry;var a=this.Jl().duration(),b=a*(1-.85),a=new IH(this,.85*a),b=new MH(this,b);this.fa.add(a);this.fa.add(b);this.fa.pa("begin",function(){$.Iu(this,!0);$.lq(this,{type:"animationstart",chart:this})},!1,this);this.fa.pa("end",function(){$.Iu(this,!1);$.lq(this,{type:"animationend",chart:this})},!1,this);this.fa.Jh(!1)}};
$.g.we=function(){return[this]};$.g.vf=function(){return!0};$.g.Yg=function(){return!1};$.g.xg=function(){return!0};$.g.data=function(a,b){if($.n(a)){if(a){var c=a.title||a.caption;c&&this.title(c);a.rows&&(a=a.rows)}if(this.Ia!==a){this.Ia=a;if(this.Sa!=a||null===a){$.K(this.Y);var d;$.J(a,$.rq)?(d=a,this.Y=null):($.J(a,$.Cq)?d=(this.Y=a).qe():d=(this.Y=new $.Cq($.y(a)||$.B(a)?a:null,b)).qe(),$.L(this,this.Y));this.Sa=d.xk()}OH(this)}return this}return this.Na};
$.g.la=function(){return this.Ha||(this.Ha=this.Na.la())};$.g.hc=function(){return this.Ha=this.Na.la()};$.g.qf=function(){return this.Na.la()};$.g.Mq=function(a){if($.J(a,$.xr))return QH(this,$.xr,a),this;if($.J(a,$.tr))return QH(this,$.tr,a),this;$.A(a)&&"range"==a.type?QH(this,$.xr):!$.A(a)&&this.o||QH(this,$.tr);return $.n(a)?(this.o.X(a),this):this.o};$.g.PB=function(a){this.P||(this.P=new $.ur,$.S(this.P,this.WP,this),$.L(this,this.P));return $.n(a)?(this.P.X(a),this):this.P};var RH={};
PH.prototype.Si=function(a,b,c,d,e,f,h){f=!!(b&1);e=(f?this.Xb:this.ca).N(a);h?a=e:(h=c.get(f?"hovered":"normal"),a=$.so($.n(h)?h[a]:void 0,c.get($.dm(b,a)),e));$.n(a)&&(a=d(a));return a};PH.prototype.Ci=function(){return $.zc(this.PB().pd(this.la().sa())||"none")};PH.prototype.Tk=function(){var a=this.la();return{index:a.sa(),sourceHatchFill:this.Ci(),iterator:a}};PH.prototype.Yi=function(a){var b=this.la();return{index:b.sa(),sourceColor:a||this.Mq().pd(b.sa())||"blue",iterator:b}};
var AI=function(){var a={};$.Q(a,0,"overlapMode",$.Sk);$.Q(a,0,"radius",function(a){return $.Wn(a,"100%")});$.Q(a,0,"innerRadius",function(a){return $.D(a)?a:$.Wn(a)});$.Q(a,0,"startAngle",function(a){return $.Jb($.O(a)||0)});$.Q(a,0,"explode",function(a){return $.Wn(a,15)});$.Q(a,0,"sort",$.Mk);$.Q(a,0,"outsideLabelsSpace",function(a){return $.Wn(a,"30%")});$.Q(a,0,"insideLabelsOffset",$.Wn);$.Q(a,0,"connectorLength",function(a){return $.Wn(a,"20%")});$.Q(a,0,"outsideLabelsCriticalAngle",function(a){return $.Jb($.M(a))});
$.Q(a,0,"forceHoverLabels",$.Bp);$.Q(a,1,"connectorStroke",$.Np);$.Q(a,0,"mode3d",$.Cp);return a}();$.Gp(PH,AI);$.g=PH.prototype;$.g.labels=function(a){return $.n(a)?(this.ca.labels(a),this):this.ca.labels()};$.g.VP=function(a){return $.n(a)?($.D(a)&&a!=this.Oc?(this.Oc=a,OH(this)):$.to(a)&&(this.Oc=null,OH(this)),this):this.Oc};$.g.kT=function(){return{x:this.Fb,y:this.Ab}};$.g.d3=function(){return this.b};$.g.CT=function(){return this.Ua};$.g.H9=function(){return this.B};
$.g.oh=function(){return this.N("startAngle")+-90};$.g.iK=function(a,b){var c=this.la();c.select(a)&&!UH(c.get("value"))&&lI(this,$.n(b)?!!b:!0);return this};$.g.f9=function(a){var b=this.la().reset();if($.y(a))for(var c=0,d=a.length;c<d;c++)b.select(a[c])&&!UH(b.get("value"))&&lI(this,!0);else for(;b.advance();)b.select(b.sa())&&!UH(b.get("value"))&&lI(this,a);return this};
$.g.DI=function(a,b){var c;c=this.la().sa();var d;$.B(a)&&"aquastyle"==a?(c={aquaStyleObj:this.Ob,sourceColor:this.Mq().pd(c)},c=this.Ie.call(c)):$.D(a)?(d=1<arguments.length?this.DI.apply(this,$.pb(arguments,1)):this.Mq().pd(c),c={index:c,sourceColor:d,iterator:this.la()},c=a.call(c)):c=a;return c};$.g.xx=function(){this.o&&$.J(this.o,$.xr)&&this.o.count(this.la().Ib())};
$.g.$h=function(a){this.yc();$.bv(this.labels());var b=this.la(),c;b.Ib();var d=this.N("mode3d");this.W(4)&&(Eca(this,a),this.D(4112));if(this.W(16)){this.ha?this.ha.clear():(this.ha=new $.Sy(function(){return $.kk()},function(a){a.clear()}),this.ha.zIndex(30),this.ha.parent(this.Ta));this.Ba?this.Ba.clear():(this.Ba=new $.Sy(function(){return $.kk()},function(a){a.clear()}),this.Ba.parent(this.Ta),this.Ba.zIndex(31).Xc(!0));d&&(this.F.length=0);var e=this.oh(),f=0;for(b.reset();b.advance();)c=b.get("value"),
UH(c)?b.G("missing",!0):(c=+c,f=c/this.af("sum")*360,b.G("start",e).G("sweep",f),$.n(c=b.G("exploded"))||(c=!!b.get("exploded"),b.G("exploded",c),c&&this.state.o(2,b.sa())),d?Fca(this):YH(this),e+=f);d&&bI(this);e=this.N("connectorStroke");if(this.g)for(var h in this.g)this.g.hasOwnProperty(h)&&this.g[h].stroke(e);this.Rc&&this.Rc.stroke(e);this.U(16)}if(this.W(4096)){this.labels().$()||this.labels().$(this.Ta);this.labels().clear();this.Ja&&(this.Ja.clear(),d&&this.vc.clear());d=LH(this)?$.jm("pie.outsideLabels"):
$.jm("pie.insideLabels");this.labels().yl(d.autoColor);this.labels().disablePointerEvents(d.disablePointerEvents);if(LH(this))kI(this);else for(b.reset();b.advance();)UH(b.get("value"))||(d=this.state.j|$.zu(this.state,b.sa()),jI(this,d,!!(d&1)));this.labels().ea();this.labels().$e().clip(a);this.U(4096)}};$.g.Mc=function(a){return $.n(a)?(a=$.yk(a),a!=this.qa&&(this.qa=a),this):this.qa};$.g.c3=function(a){$.U(a,16)&&this.D(13104,17)};
$.g.sd=function(a){var b=0,c=0;$.U(a,1)&&(b|=4096,c|=1);$.U(a,8)&&(b|=4100,c|=9);this.D(b,c)};$.g.WP=function(a){$.U(a,2)&&this.D(528,1)};$.g.Um=function(){return[]};
$.g.If=function(a){a={type:a.type,target:this,relatedTarget:$.js(a.relatedTarget)||a.relatedTarget,domTarget:a.target,relatedDomTarget:a.relatedTarget,offsetX:a.offsetX,offsetY:a.offsetY,clientX:a.clientX,clientY:a.clientY,screenX:a.screenX,screenY:a.screenY,button:a.button,keyCode:a.keyCode,charCode:a.charCode,ctrlKey:a.ctrlKey,altKey:a.altKey,shiftKey:a.shiftKey,metaKey:a.metaKey,platformModifierKey:a.platformModifierKey,state:a.state};var b=$.Tn(a.domTarget).index;if(!$.n(b)&&$.yu(this.state,1)){var c=
$.Du(this.state,1);c.length&&(b=c[0])}b=$.O(b);(0,window.isNaN)(b)||(a.pointIndex=a.sliceIndex=b);return a};$.g.Ml=function(a){var b=this.lf(a);if(b){var c=b.pointIndex;this.dispatchEvent(b)&&this.la().select(c)&&(this.jg(c),a=this.th("selected",a,[{ia:this,qc:[c],fd:{index:c,Le:0}}]),a.currentPoint.selected=!!this.la().G("exploded"),this.dispatchEvent(a))}};$.g.dg=function(a){(a=this.lf(a))&&this.dispatchEvent(a)};
$.g.lf=function(a){var b;"pointIndex"in a?b=a.pointIndex:"labelIndex"in a?b=a.labelIndex:"markerIndex"in a&&(b=a.markerIndex);b=$.O(b);if((0,window.isNaN)(b))return null;a.pointIndex=b;var c=a.type;switch(c){case "mouseout":c="pointmouseout";break;case "mouseover":c="pointmouseover";break;case "mousemove":c="pointmousemove";break;case "mousedown":c="pointmousedown";break;case "mouseup":c="pointmouseup";break;case "click":c="pointclick";break;case "dblclick":c="pointdblclick";break;default:return null}var d=
this.data().la();d.select(b)||d.reset();return{type:c,actualTarget:a.target,pie:this,iterator:d,sliceIndex:b,pointIndex:b,target:this,originalEvent:a,point:this.$d(b)}};$.g.$d=function(a){var b=new NH(this,a),c;this.la().select(a)&&b.$s()&&!UH(c=b.get("value"))&&(a=c/this.af("sum")*100,b.Da("percentValue",a),b.Da("yPercentOfTotal",a));return b};
$.g.bj=function(a,b){var c=[],d=this.la().reset(),e,f="aquastyle"==this.ca.N("fill");if(f){var h=this.Ob;this.Ob={}}for(;d.advance();){e=d.sa();var k=d.get("legendItem")||{},l=null;$.D(b)&&(l=gI(this),l=b.call(l,l),$.C(l)&&(l=String(l)));$.B(l)||(l=d.G("groupedPoint")?"Other points":String($.n(d.get("name"))?d.get("name"):d.get("x")));var m=this.N("mode3d"),p=SH("fill",1,!1),q=SH("stroke",2,!1),r=SH("hatchFill",3,!1),l={enabled:!0,meta:{pointIndex:e,pointValue:d.get("value")},iconType:"square",text:l,
iconStroke:m?$.Ql(fI(this,$.cm),.2):q(this,$.cm,!1),iconFill:m?fI(this,$.cm):p(this,$.cm,!1),iconHatchFill:r(this,$.cm,!1)};$.Tc(l,k);l.sourceUid=$.ra(this);l.sourceKey=e;c.push(l)}f&&(this.Ob=h);return c};$.g.Zm=function(){return!0};$.g.em=function(a){a=a.Jf();var b=this.data().la();b.select(a)&&(b=!!b.G("exploded"),this.iK(a,!b))};$.g.il=function(a,b){var c=a.Jf();if(!a||null!=c||(0,window.isNaN)(c))if(c=$.Tn(b.domTarget))c.ia=this};
$.g.hl=function(a,b){var c=a.Jf();if(!a||null!=c||(0,window.isNaN)(c))if(c=$.Tn(b.domTarget))c.ia=this};$.g.Xg=function(a){$.n(a)?this.eg(a):this.hi();return this};$.g.Qc=function(a){if(($.yu(this.state,1)||$.Fu(this.state.Kg(),1))&&this.enabled()){var b;$.n(a)?b=a:b=this.state.j==$.cm?window.NaN:void 0;this.state.g(1,b);a=this.Db();this.tc(DH()||CH()?"touchstart":"mousemove",this.QB);a.cd()}};
$.g.eg=function(a,b){if(!this.enabled())return this;if($.y(a)){for(var c=$.Du(this.state,1),d=0;d<c.length;d++)$.eb(a,c[d])||this.state.g(1,c[d]);$.Bu(this.state,1,a);$.n(b)&&this.QB(b)}else $.C(a)&&(this.Qc(),$.Bu(this.state,1,a),$.n(b)&&this.QB(b));return this};$.g.hi=function(){if(!this.enabled())return this;this.state.o(1);return this};$.g.jg=function(a){if(!this.enabled())return this;var b=this.la();this.la().select(a[0]||a);b.G("exploded")?this.state.g(2,a):$.Bu(this.state,2,a);lI(this);return this};
$.g.jh=function(a){WH(this,a);jI(this,a,!0)};$.g.Hi=function(){this.labels().ea()};$.g.kh=function(a){WH(this,a)};$.g.XC=function(){var a=new $.Ht(0);$.L(this,a);a.ya(this);$.S(a,this.e3,this);return a};$.g.e3=function(){this.Db().ea()};$.g.QB=function(a){if(!a||a.target!=this.be()){var b=this.Db(),c=gI(this);a&&($.T(b),$.Wt(b,a.clientX,a.clientY,c),b.ka(!1),this.pa(DH()||CH()?"touchstart":"mousemove",this.QB))}};
$.g.yc=function(){if(this.W(8192)){this.jk={};for(var a=this.data().la(),b,c=0,d=Number.MAX_VALUE,e=-Number.MAX_VALUE,f=0;a.advance();)b=a.get("value"),UH(b)?c++:(b=+b,d=Math.min(b,d),e=Math.max(b,e),f+=b);var a=a.Ib()-c,h;a?h=f/a:d=e=f=h=void 0;this.Da("count",a);this.Da("min",d);this.Da("max",e);this.Da("sum",f);this.Da("average",h);this.U(8192)}};$.g.Dh=function(){return gI(this)};$.g.uj=function(){return!this.la().Ib()};
$.g.O=function(){var a=PH.I.O.call(this);a.type=this.Va();a.data=this.data().O();a.palette=this.Mq().O();a.hatchFillPalette=this.PB().O();a.tooltip=this.Db().O();$.Rp(this,AI,a,"Pie");a.normal=this.ca.O();a.hovered=this.Xb.O();return{chart:a}};$.g.ga=function(a,b){PH.I.ga.call(this,a,b);this.VP(a.group);this.data(a.data);this.Mq(a.palette);this.PB(a.hatchFillPalette);"tooltip"in a&&this.Db().va(!!b,a.tooltip);$.Ip(this,AI,a);this.ca.va(!!b,a);this.ca.va(!!b,a.normal);this.Xb.va(!!b,a.hovered)};
qI.prototype.sb=function(){if(!this.Bh){var a=this.labels[0]?nI(this.b,this.labels[0]).height:0;this.Bh=new $.I(this.x,this.y+a/2,this.width,this.height)}return this.Bh};qI.prototype.od=function(){this.Bh=null};PH.prototype.da=function(){$.od(this.fa,this.ca,this.Xb);PH.I.da.call(this)};var BI=PH.prototype;BI.group=BI.VP;BI.data=BI.data;BI.labels=BI.labels;BI.getCenterPoint=BI.kT;BI.getPixelRadius=BI.d3;BI.getPixelInnerRadius=BI.CT;BI.getPixelExplode=BI.H9;BI.palette=BI.Mq;BI.explodeSlice=BI.iK;
BI.explodeSlices=BI.f9;BI.tooltip=BI.Db;BI.hatchFillPalette=BI.PB;BI.getType=BI.Va;BI.hover=BI.Xg;BI.unhover=BI.Qc;BI.getPoint=BI.$d;BI.toCsv=BI.nk;BI.normal=BI.Za;BI.hovered=BI.Kb;$.Yo.pie=wI;$.Yo["pie-3d"]=xI;$.F("anychart.pie",wI);$.F("anychart.pie3d",xI);}).call(this,$)}
if(!_.theme_pie){_.theme_pie=1;(function($){$.wa($.fa.anychart.themes.defaultTheme,{pie:{animation:{duration:2E3},title:{text:"Pie Chart"},group:!1,sort:"none",radius:"45%",innerRadius:0,startAngle:0,explode:15,outsideLabelsCriticalAngle:60,outsideLabelsSpace:30,insideLabelsOffset:"50%",normal:{labels:{format:"{%PercentValue}{decimalsCount:1,zeroFillDecimals:true}%"}},a11y:{titleFormat:function(){var a=this.chart,b=$.LG.apply(this),b=b+(", with "+a.af("count")+" points. ");return b+="Min value is "+a.af("min")+", max value is "+a.af("max")+
"."}}},pie3d:{mode3d:!0,explode:"5%",connectorLength:"15%",legendItem:{iconStroke:null}}});}).call(this,$)}
$_=window.anychart;$_.$=$;$_._=_}); | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/models/utils/point_sample.py | import torch
from mmcv.ops import point_sample
def get_uncertainty(mask_pred, labels):
"""Estimate uncertainty based on pred logits.
We estimate uncertainty as L1 distance between 0.0 and the logits
prediction in 'mask_pred' for the foreground class in `classes`.
Args:
mask_pred (Tensor): mask predication logits, shape (num_rois,
num_classes, mask_height, mask_width).
labels (list[Tensor]): Either predicted or ground truth label for
each predicted mask, of length num_rois.
Returns:
scores (Tensor): Uncertainty scores with the most uncertain
locations having the highest uncertainty score,
shape (num_rois, 1, mask_height, mask_width)
"""
if mask_pred.shape[1] == 1:
gt_class_logits = mask_pred.clone()
else:
inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)
gt_class_logits = mask_pred[inds, labels].unsqueeze(1)
return -torch.abs(gt_class_logits)
def get_uncertain_point_coords_with_randomness(mask_pred, labels, num_points,
oversample_ratio,
importance_sample_ratio):
"""Get ``num_points`` most uncertain points with random points during
train.
Sample points in [0, 1] x [0, 1] coordinate space based on their
uncertainty. The uncertainties are calculated for each point using
'get_uncertainty()' function that takes point's logit prediction as
input.
Args:
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
labels (list): The ground truth class for each instance.
num_points (int): The number of points to sample.
oversample_ratio (int): Oversampling parameter.
importance_sample_ratio (float): Ratio of points that are sampled
via importnace sampling.
Returns:
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
that contains the coordinates sampled points.
"""
assert oversample_ratio >= 1
assert 0 <= importance_sample_ratio <= 1
batch_size = mask_pred.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(
batch_size, num_sampled, 2, device=mask_pred.device)
point_logits = point_sample(mask_pred, point_coords)
# It is crucial to calculate uncertainty based on the sampled
# prediction value for the points. Calculating uncertainties of the
# coarse predictions first and sampling them for points leads to
# incorrect results. To illustrate this: assume uncertainty func(
# logits)=-abs(logits), a sampled point between two coarse
# predictions with -1 and 1 logits has 0 logits, and therefore 0
# uncertainty value. However, if we calculate uncertainties for the
# coarse predictions first, both will have -1 uncertainty,
# and sampled point will get -1 uncertainty.
point_uncertainties = get_uncertainty(point_logits, labels)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(
point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(
batch_size, dtype=torch.long, device=mask_pred.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
batch_size, num_uncertain_points, 2)
if num_random_points > 0:
rand_roi_coords = torch.rand(
batch_size, num_random_points, 2, device=mask_pred.device)
point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)
return point_coords | PypiClean |
/Flask-Migrate-4.0.4.tar.gz/Flask-Migrate-4.0.4/src/flask_migrate/cli.py | import click
from flask.cli import with_appcontext
from flask_migrate import list_templates as _list_templates
from flask_migrate import init as _init
from flask_migrate import revision as _revision
from flask_migrate import migrate as _migrate
from flask_migrate import edit as _edit
from flask_migrate import merge as _merge
from flask_migrate import upgrade as _upgrade
from flask_migrate import downgrade as _downgrade
from flask_migrate import show as _show
from flask_migrate import history as _history
from flask_migrate import heads as _heads
from flask_migrate import branches as _branches
from flask_migrate import current as _current
from flask_migrate import stamp as _stamp
from flask_migrate import check as _check
@click.group()
def db():
"""Perform database migrations."""
pass
@db.command()
@with_appcontext
def list_templates():
"""List available templates."""
_list_templates()
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@click.option('--multidb', is_flag=True,
help=('Support multiple databases'))
@click.option('-t', '--template', default=None,
help=('Repository template to use (default is "flask")'))
@click.option('--package', is_flag=True,
help=('Write empty __init__.py files to the environment and '
'version locations'))
@with_appcontext
def init(directory, multidb, template, package):
"""Creates a new migration repository."""
_init(directory, multidb, template, package)
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@click.option('-m', '--message', default=None, help='Revision message')
@click.option('--autogenerate', is_flag=True,
help=('Populate revision script with candidate migration '
'operations, based on comparison of database to model'))
@click.option('--sql', is_flag=True,
help=('Don\'t emit SQL to database - dump to standard output '
'instead'))
@click.option('--head', default='head',
help=('Specify head revision or <branchname>@head to base new '
'revision on'))
@click.option('--splice', is_flag=True,
help=('Allow a non-head revision as the "head" to splice onto'))
@click.option('--branch-label', default=None,
help=('Specify a branch label to apply to the new revision'))
@click.option('--version-path', default=None,
help=('Specify specific path from config for version file'))
@click.option('--rev-id', default=None,
help=('Specify a hardcoded revision id instead of generating '
'one'))
@with_appcontext
def revision(directory, message, autogenerate, sql, head, splice, branch_label,
version_path, rev_id):
"""Create a new revision file."""
_revision(directory, message, autogenerate, sql, head, splice,
branch_label, version_path, rev_id)
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@click.option('-m', '--message', default=None, help='Revision message')
@click.option('--sql', is_flag=True,
help=('Don\'t emit SQL to database - dump to standard output '
'instead'))
@click.option('--head', default='head',
help=('Specify head revision or <branchname>@head to base new '
'revision on'))
@click.option('--splice', is_flag=True,
help=('Allow a non-head revision as the "head" to splice onto'))
@click.option('--branch-label', default=None,
help=('Specify a branch label to apply to the new revision'))
@click.option('--version-path', default=None,
help=('Specify specific path from config for version file'))
@click.option('--rev-id', default=None,
help=('Specify a hardcoded revision id instead of generating '
'one'))
@click.option('-x', '--x-arg', multiple=True,
help='Additional arguments consumed by custom env.py scripts')
@with_appcontext
def migrate(directory, message, sql, head, splice, branch_label, version_path,
rev_id, x_arg):
"""Autogenerate a new revision file (Alias for
'revision --autogenerate')"""
_migrate(directory, message, sql, head, splice, branch_label, version_path,
rev_id, x_arg)
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@click.argument('revision', default='head')
@with_appcontext
def edit(directory, revision):
"""Edit a revision file"""
_edit(directory, revision)
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@click.option('-m', '--message', default=None, help='Merge revision message')
@click.option('--branch-label', default=None,
help=('Specify a branch label to apply to the new revision'))
@click.option('--rev-id', default=None,
help=('Specify a hardcoded revision id instead of generating '
'one'))
@click.argument('revisions', nargs=-1)
@with_appcontext
def merge(directory, message, branch_label, rev_id, revisions):
"""Merge two revisions together, creating a new revision file"""
_merge(directory, revisions, message, branch_label, rev_id)
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@click.option('--sql', is_flag=True,
help=('Don\'t emit SQL to database - dump to standard output '
'instead'))
@click.option('--tag', default=None,
help=('Arbitrary "tag" name - can be used by custom env.py '
'scripts'))
@click.option('-x', '--x-arg', multiple=True,
help='Additional arguments consumed by custom env.py scripts')
@click.argument('revision', default='head')
@with_appcontext
def upgrade(directory, sql, tag, x_arg, revision):
"""Upgrade to a later version"""
_upgrade(directory, revision, sql, tag, x_arg)
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@click.option('--sql', is_flag=True,
help=('Don\'t emit SQL to database - dump to standard output '
'instead'))
@click.option('--tag', default=None,
help=('Arbitrary "tag" name - can be used by custom env.py '
'scripts'))
@click.option('-x', '--x-arg', multiple=True,
help='Additional arguments consumed by custom env.py scripts')
@click.argument('revision', default='-1')
@with_appcontext
def downgrade(directory, sql, tag, x_arg, revision):
"""Revert to a previous version"""
_downgrade(directory, revision, sql, tag, x_arg)
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@click.argument('revision', default='head')
@with_appcontext
def show(directory, revision):
"""Show the revision denoted by the given symbol."""
_show(directory, revision)
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@click.option('-r', '--rev-range', default=None,
help='Specify a revision range; format is [start]:[end]')
@click.option('-v', '--verbose', is_flag=True, help='Use more verbose output')
@click.option('-i', '--indicate-current', is_flag=True,
help=('Indicate current version (Alembic 0.9.9 or greater is '
'required)'))
@with_appcontext
def history(directory, rev_range, verbose, indicate_current):
"""List changeset scripts in chronological order."""
_history(directory, rev_range, verbose, indicate_current)
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@click.option('-v', '--verbose', is_flag=True, help='Use more verbose output')
@click.option('--resolve-dependencies', is_flag=True,
help='Treat dependency versions as down revisions')
@with_appcontext
def heads(directory, verbose, resolve_dependencies):
"""Show current available heads in the script directory"""
_heads(directory, verbose, resolve_dependencies)
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@click.option('-v', '--verbose', is_flag=True, help='Use more verbose output')
@with_appcontext
def branches(directory, verbose):
"""Show current branch points"""
_branches(directory, verbose)
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@click.option('-v', '--verbose', is_flag=True, help='Use more verbose output')
@with_appcontext
def current(directory, verbose):
"""Display the current revision for each database."""
_current(directory, verbose)
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@click.option('--sql', is_flag=True,
help=('Don\'t emit SQL to database - dump to standard output '
'instead'))
@click.option('--tag', default=None,
help=('Arbitrary "tag" name - can be used by custom env.py '
'scripts'))
@click.argument('revision', default='head')
@with_appcontext
def stamp(directory, sql, tag, revision):
"""'stamp' the revision table with the given revision; don't run any
migrations"""
_stamp(directory, revision, sql, tag)
@db.command()
@click.option('-d', '--directory', default=None,
help=('Migration script directory (default is "migrations")'))
@with_appcontext
def check(directory):
"""Check if there are any new operations to migrate"""
_check(directory) | PypiClean |
/Chiplotle-0.4.1.tar.gz/Chiplotle-0.4.1/chiplotle/documentation/chapters/fundamentals/index.rst | Chiplotle fundamentals
======================
.. warning:: This section needs fleshing out.
In addition to being an HPGL plotter driver, *Chiplotle* is a vector drawing librarly specifically designed to work with these HPGL plotters. While other drawing computer tools are designed to create art on the screen (or for ordinary raster printing), Chiplotle knows about and understands some of the mechanics of drawing with pen plotters.
One can think of Chiplotle as consisting of three layers:
#. A high abstraction layer consisting of platonic shapes, like `line`, `circle`, `label`, etc.'
#. An interface / communication layer consisting of the HPGL language.
#. A plotter driver wich manages communication between your hardware and software.
HPGL
****
How does Chiplote communicate with a plotter?
During the 70s and 80s, a variety of languages were developed by different manufacturers to control different brands of pen plotters, but the one language that gained most popularity and eventually became sort of a standard is HPGL (Hewlett-Packard Graphics Language).
Chiplotle supports all the standard HPGL commands, giving you full control of these plotters.
Further, Chiplotle provides plotter interfaces that allow you to contol the plotter as if through a control panel.
Chiplotle vector drawing
************************
In addition to being an HPGL plotter driver, Chiplotle is also a general purpose vector drawing librarly.
With Chiplotle you can create generic shapes that can be sent to an HPGL plotter directly for drawing, without you knowing anything about the underlying HPGL language.
Chiplotle geometry
*****************************
Shapes
------
Chiplotle comes built in with a set of common shapes, like `line`, `circle`, `rectangle`, `ellipse`, etc.
These shapes are agnostic of any particular drawing language, such as HPGL or g-code.
Transforms
-----------
Chiplotle allows you to apply your standard geometric transformations to any shapes you may create with it.
Chiplotle-HPGL commands
*****************************
In addition to the generic shape constructors, in Chiplotle you have access to specific HPGL command definitions.
All the standard HPGL commands are implemented in Chiplotle, and their class names corresponds to the two letter mnemonic used in the HPGL.
Refer to the :doc:`Chiplotle API </chapters/api/hpgl>` for a list and documentation of all the HPGL commands.
Chiplotle HPGL commands can be instantiated as you would normally instantiate any other class. Some commands require arguments, others don't::
chiplotle> PD( )
PD(xy=[])
chiplotle> CI(10)
CI(chordangle=None, radius=10.0)
All Chiplotle HPGL commands have a ``format`` attribute. This attribute returns a string representation of the HPGL command as sent to the plotter.
::
chiplotle> t = PD( )
chiplotle> t.format
'PD;'
| PypiClean |
/GDAL-3.7.1.1.tar.gz/GDAL-3.7.1.1/gdal-utils/osgeo_utils/gdal_proximity.py |
import sys
from typing import Optional, Sequence
from osgeo import gdal
from osgeo_utils.auxiliary.util import GetOutputDriverFor
def Usage():
print(
"""
Usage: gdal_proximity.py srcfile dstfile [-srcband n] [-dstband n]
[-of format] [-co name=value]*
[-ot Byte/UInt16/UInt32/Float32/etc]
[-values n,n,n] [-distunits PIXEL/GEO]
[-maxdist n] [-nodata n] [-use_input_nodata YES/NO]
[-fixed-buf-val n] [-q] """
)
return 2
def main(argv=sys.argv):
driver_name = None
creation_options = []
alg_options = []
src_filename = None
src_band_n = 1
dst_filename = None
dst_band_n = 1
creation_type = "Float32"
quiet = False
argv = gdal.GeneralCmdLineProcessor(argv)
if argv is None:
return 0
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == "-of" or arg == "-f":
i = i + 1
driver_name = argv[i]
elif arg == "-co":
i = i + 1
creation_options.append(argv[i])
elif arg == "-ot":
i = i + 1
creation_type = argv[i]
elif arg == "-maxdist":
i = i + 1
alg_options.append("MAXDIST=" + argv[i])
elif arg == "-values":
i = i + 1
alg_options.append("VALUES=" + argv[i])
elif arg == "-distunits":
i = i + 1
alg_options.append("DISTUNITS=" + argv[i])
elif arg == "-nodata":
i = i + 1
alg_options.append("NODATA=" + argv[i])
elif arg == "-use_input_nodata":
i = i + 1
alg_options.append("USE_INPUT_NODATA=" + argv[i])
elif arg == "-fixed-buf-val":
i = i + 1
alg_options.append("FIXED_BUF_VAL=" + argv[i])
elif arg == "-srcband":
i = i + 1
src_band_n = int(argv[i])
elif arg == "-dstband":
i = i + 1
dst_band_n = int(argv[i])
elif arg == "-q" or arg == "-quiet":
quiet = True
elif src_filename is None:
src_filename = argv[i]
elif dst_filename is None:
dst_filename = argv[i]
else:
return Usage()
i = i + 1
if src_filename is None or dst_filename is None:
return Usage()
return gdal_proximity(
src_filename=src_filename,
src_band_n=src_band_n,
dst_filename=dst_filename,
dst_band_n=dst_band_n,
driver_name=driver_name,
creation_type=creation_type,
creation_options=creation_options,
alg_options=alg_options,
quiet=quiet,
)
def gdal_proximity(
src_filename: Optional[str] = None,
src_band_n: int = 1,
dst_filename: Optional[str] = None,
dst_band_n: int = 1,
driver_name: Optional[str] = None,
creation_type: str = "Float32",
creation_options: Optional[Sequence[str]] = None,
alg_options: Optional[Sequence[str]] = None,
quiet: bool = False,
):
# =============================================================================
# Open source file
# =============================================================================
creation_options = creation_options or []
alg_options = alg_options or []
src_ds = gdal.Open(src_filename)
if src_ds is None:
print("Unable to open %s" % src_filename)
return 1
srcband = src_ds.GetRasterBand(src_band_n)
# =============================================================================
# Try opening the destination file as an existing file.
# =============================================================================
try:
driver_name = gdal.IdentifyDriver(dst_filename)
if driver_name is not None:
dst_ds = gdal.Open(dst_filename, gdal.GA_Update)
dstband = dst_ds.GetRasterBand(dst_band_n)
else:
dst_ds = None
except Exception:
dst_ds = None
# =============================================================================
# Create output file.
# =============================================================================
if dst_ds is None:
if driver_name is None:
driver_name = GetOutputDriverFor(dst_filename)
drv = gdal.GetDriverByName(driver_name)
dst_ds = drv.Create(
dst_filename,
src_ds.RasterXSize,
src_ds.RasterYSize,
1,
gdal.GetDataTypeByName(creation_type),
creation_options,
)
dst_ds.SetGeoTransform(src_ds.GetGeoTransform())
dst_ds.SetProjection(src_ds.GetProjectionRef())
dstband = dst_ds.GetRasterBand(1)
# =============================================================================
# Invoke algorithm.
# =============================================================================
if quiet:
prog_func = None
else:
prog_func = gdal.TermProgress_nocb
gdal.ComputeProximity(srcband, dstband, alg_options, callback=prog_func)
srcband = None
dstband = None
src_ds = None
dst_ds = None
if __name__ == "__main__":
sys.exit(main(sys.argv)) | PypiClean |
/MOM-Tapyr-1.6.2.tar.gz/MOM-Tapyr-1.6.2/_Meta/M_Prop_Type.py |
from _MOM import MOM
from _TFL import TFL
import _MOM._Meta
import _TFL._Meta.M_Auto_Combine_Nested_Classes
import _TFL._Meta.M_Auto_Update_Combined
import _TFL._Meta.Once_Property
import _TFL.normalized_indent
from _TFL.predicate import first
from _TFL.pyk import pyk
class _M_Doc_Map_ (TFL.Meta.M_Class) :
"""Meta class for `_Doc_Map_` classes."""
def __init__ (cls, name, bases, dct) :
cls.__m_super.__init__ (name, bases, dct)
_own_names = set (k for k in dct if not k.startswith ("__"))
_names = set (_own_names)
for b in cls.__bases__ :
_names.update (getattr (b, "_names", ()))
for k in _own_names :
v = dct [k]
if v :
v = TFL.normalized_indent (v)
setattr (cls, k, v)
setattr (cls, "_names", _names)
setattr (cls, "_own_names", _own_names)
if not cls.__doc__ :
setattr \
( cls, "__doc__"
, first (b.__doc__ for b in cls.__bases__ if b.__doc__)
)
cls._OWN = cls._ALL = None
# end def __init__
@property
def ALL (cls) :
"""All documented attributes of `cls` and its ancestors."""
result = cls._ALL
if result is None :
result = cls._ALL = cls._items (cls._names)
return result
# end def ALL
@property
def OWN (cls) :
"""Documented attributes of `cls` itself."""
result = cls._OWN
if result is None :
result = cls._OWN = cls._items (cls._own_names)
return result
# end def OWN
def get (self, key, default) :
try :
return getattr (cls, key)
except AttributeError :
return default
# end def get
def _items (cls, names) :
def _gen (cls, names) :
for k in sorted (names) :
v = getattr (cls, k)
if v :
yield k, v
return list (_gen (cls, names))
# end def _items
def __getitem__ (cls, key) :
try :
return getattr (cls, key)
except AttributeError :
raise KeyError (key)
# end def __getitem__
def __iter__ (cls) :
return iter (cls._names)
# end def __iter__
# end class _M_Doc_Map_
class M_Prop_Type \
( TFL.Meta.M_Auto_Update_Combined
, TFL.Meta.M_Auto_Combine_Nested_Classes
) :
"""Root of metaclasses for MOM.Attr.Type and MOM.Pred.Type"""
count = 0
_nested_classes_to_combine = ("_Doc_Map_", )
def __new__ (meta, name, bases, dct) :
doc = dct.get ("__doc__")
if not doc :
if "__doc__" in dct :
del dct ["__doc__"]
elif "description" not in dct :
dct ["description"] = doc
dct ["name"] = name
return meta.__mc_super.__new__ (meta, name, bases, dct)
# end def __new__
def __init__ (cls, name, bases, dct) :
M_Prop_Type.count += 1
cls._i_rank = M_Prop_Type.count
cls.__m_super.__init__ (name, bases, dct)
cls.dyn_doc_p = dyn_doc_p = dict (getattr (cls, "dyn_doc_p", {}))
for n in cls._doc_properties :
v = dct.get (n)
if v :
v = TFL.normalized_indent (v)
setattr (cls, n, v)
if "%(" in v :
dyn_doc_p [n] = v
if '"' in v :
### Double quotes in _doc_properties break generated HTML
### like::
### """<input title="%s" ···>""" % (v, )
raise TypeError \
( "Property `%s` of %s must not contain double quotes"
% (n, cls)
)
if not cls.__doc__ :
cls.__doc__ = cls.description
# end def __init__
# end class M_Prop_Type
### «text» ### start of documentation
__doc__ = """
`MOM.Meta.M_Prop_Type` provides the meta machinery for defining
:class:`attribute<_MOM._Meta.M_Attr_Type.Root>` and
:class:`predicate<_MOM._Meta.M_Pred_Type.M_Pred_Type>` types.
`M_Prop_Type` adds the class attributes:
.. attribute:: name
The name of the property.
`M_Prop_Type` normalizes the `__doc__`, `description` and `explanation`
attributes:
* It removes an empty `__doc__` attribute to allow inheritance (by
default, each Python class gets an empty `__doc__` attribute if the
class definition doesn't contain an explicit docstring).
* It sets `description` to the value of `__doc__`, if the class
definition contains an explicit docstring.
* It normalizes the indentation of `description` and `explanation` by
calling `TFL.normalized_indent`.
"""
if __name__ != "__main__" :
MOM.Meta._Export ("*","_M_Doc_Map_")
### __END__ MOM.Meta.M_Prop_Type | PypiClean |
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/weblate/trans/views/git.py |
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.utils.translation import gettext as _
from django.views.decorators.http import require_POST
from weblate.trans.util import redirect_param
from weblate.utils import messages
from weblate.utils.errors import report_error
from weblate.utils.lock import WeblateLockTimeout
from weblate.utils.views import get_component, get_project, get_translation
def execute_locked(request, obj, message, call, *args, **kwargs):
"""Helper function to catch possible lock exception."""
try:
result = call(*args, **kwargs)
# With False the call is supposed to show errors on its own
if result is None or result:
messages.success(request, message)
except WeblateLockTimeout:
messages.error(
request,
_("Failed to lock the repository, another operation is in progress."),
)
report_error()
return redirect_param(obj, "#repository")
def perform_commit(request, obj):
"""Helper function to do the repository commit."""
if not request.user.has_perm("vcs.commit", obj):
raise PermissionDenied()
return execute_locked(
request,
obj,
_("All pending translations were committed."),
obj.commit_pending,
"commit",
request.user,
)
def perform_update(request, obj):
"""Helper function to do the repository update."""
if not request.user.has_perm("vcs.update", obj):
raise PermissionDenied()
return execute_locked(
request,
obj,
_("All repositories were updated."),
obj.do_update,
request,
method=request.GET.get("method"),
)
def perform_push(request, obj):
"""Helper function to do the repository push."""
if not request.user.has_perm("vcs.push", obj):
raise PermissionDenied()
return execute_locked(
request, obj, _("All repositories were pushed."), obj.do_push, request
)
def perform_reset(request, obj):
"""Helper function to do the repository reset."""
if not request.user.has_perm("vcs.reset", obj):
raise PermissionDenied()
return execute_locked(
request, obj, _("All repositories have been reset."), obj.do_reset, request
)
def perform_cleanup(request, obj):
"""Helper function to do the repository cleanup."""
if not request.user.has_perm("vcs.reset", obj):
raise PermissionDenied()
return execute_locked(
request,
obj,
_("All repositories have been cleaned up."),
obj.do_cleanup,
request,
)
def perform_file_sync(request, obj):
"""Helper function to do the repository file_sync."""
if not request.user.has_perm("vcs.reset", obj):
raise PermissionDenied()
return execute_locked(
request,
obj,
_("Translation files have been synchronized."),
obj.do_file_sync,
request,
)
@login_required
@require_POST
def commit_project(request, project):
obj = get_project(request, project)
return perform_commit(request, obj)
@login_required
@require_POST
def commit_component(request, project, component):
obj = get_component(request, project, component)
return perform_commit(request, obj)
@login_required
@require_POST
def commit_translation(request, project, component, lang):
obj = get_translation(request, project, component, lang)
return perform_commit(request, obj)
@login_required
@require_POST
def update_project(request, project):
obj = get_project(request, project)
return perform_update(request, obj)
@login_required
@require_POST
def update_component(request, project, component):
obj = get_component(request, project, component)
return perform_update(request, obj)
@login_required
@require_POST
def update_translation(request, project, component, lang):
obj = get_translation(request, project, component, lang)
return perform_update(request, obj)
@login_required
@require_POST
def push_project(request, project):
obj = get_project(request, project)
return perform_push(request, obj)
@login_required
@require_POST
def push_component(request, project, component):
obj = get_component(request, project, component)
return perform_push(request, obj)
@login_required
@require_POST
def push_translation(request, project, component, lang):
obj = get_translation(request, project, component, lang)
return perform_push(request, obj)
@login_required
@require_POST
def reset_project(request, project):
obj = get_project(request, project)
return perform_reset(request, obj)
@login_required
@require_POST
def reset_component(request, project, component):
obj = get_component(request, project, component)
return perform_reset(request, obj)
@login_required
@require_POST
def reset_translation(request, project, component, lang):
obj = get_translation(request, project, component, lang)
return perform_reset(request, obj)
@login_required
@require_POST
def cleanup_project(request, project):
obj = get_project(request, project)
return perform_cleanup(request, obj)
@login_required
@require_POST
def cleanup_component(request, project, component):
obj = get_component(request, project, component)
return perform_cleanup(request, obj)
@login_required
@require_POST
def cleanup_translation(request, project, component, lang):
obj = get_translation(request, project, component, lang)
return perform_cleanup(request, obj)
@login_required
@require_POST
def file_sync_project(request, project):
obj = get_project(request, project)
return perform_file_sync(request, obj)
@login_required
@require_POST
def file_sync_component(request, project, component):
obj = get_component(request, project, component)
return perform_file_sync(request, obj)
@login_required
@require_POST
def file_sync_translation(request, project, component, lang):
obj = get_translation(request, project, component, lang)
return perform_file_sync(request, obj) | PypiClean |
/FlaskFarm-4.0.104-py3-none-any.whl/flaskfarm/lib/framework/static/js/ff_common1.js | var tmp = window.location.pathname.split('/');
if (tmp.length == 2) {
var PACKAGE_NAME = tmp[1];
var MODULE_NAME = "";
var PAGE_NAME = "";
} else if (tmp.length == 3) {
var PACKAGE_NAME = tmp[1];
var MODULE_NAME = tmp[2];
var PAGE_NAME = "";
} else if (tmp.length > 3){
var PACKAGE_NAME = tmp[1];
var MODULE_NAME = tmp[2];
var PAGE_NAME = tmp[3];
}
var current_data = null;
var current_page = null;
console.log("NAME: [" + PACKAGE_NAME + '] [' + MODULE_NAME + '] [' + PAGE_NAME + ']');
$(window).on("load resize", function (event) {
var $navbar = $(".navbar");
var $body = $("body");
$body.css("padding-top", $navbar.outerHeight());
});
$('#command_modal').on('show.bs.modal', function (event) {
})
///////////////////////////////////////
// 사용 미확인
///////////////////////////////////////
// 알림
$.notify({
// options
icon: 'glyphicon glyphicon-ok',
title: 'APP',
message: '',
url: '',
target: '_blank'
},{
// settings
element: 'body',
position: null,
type: "info",
allow_dismiss: true,
newest_on_top: false,
showProgressbar: false,
placement: {
from: "top",
align: "right"
},
offset: 20,
spacing: 10,
z_index: 3000,
delay: 10000,
timer: 1000,
url_target: '_blank',
mouse_over: null,
animate: {
enter: 'animated fadeInDown',
exit: 'animated fadeOutUp'
},
onShow: null,
onShown: null,
onClose: null,
onClosed: null,
icon_type: 'class',
template: '<div data-notify="container" class="col-xs-11 col-sm-3 alert alert-{0}" role="alert">' +
'<button type="button" aria-hidden="true" class="close" data-notify="dismiss">×</button>' +
'<span data-notify="icon"></span> ' +
'<span data-notify="title" style="word-break:break-all;">{1}</span> ' +
'<span data-notify="message" style="word-break:break-all;">{2}</span>' +
'<div class="progress" data-notify="progressbar">' +
'<div class="progress-bar progress-bar-{0}" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100" style="width: 0%;"></div>' +
'</div>' +
'<a href="{3}" target="{4}" data-notify="url"></a>' +
'</div>'
});
function notify(msg, type) {
$.notify('<strong>' + msg + '</strong>', {type: type, z_index: 3000});
}
// 메뉴 제거
function hideMenu() {
$("#menu_div").html('');
hideMenuModule();
hideMenuPage();
}
function hideMenuModule() {
$("#menu_module_div").html('');
}
function hideMenuPage() {
$("#menu_page_div").html('');
}
// 넓은 화면
function setWide() {
$('#main_container').attr('class', 'container-fluid');
}
function showModal(data='EMPTY', title='JSON', json=true) {
document.getElementById("modal_title").innerHTML = title;
if (json) {
data = JSON.stringify(data, null, 2);
}
document.getElementById("modal_body").innerHTML = '<pre style="white-space: pre-wrap;">' +data + '</pre>';
$("#large_modal").modal();
}
function getFormdata(form_id) {
// on, off 일수도 있으니 모두 True, False로 통일하고
// 밑에서는 False인 경우 값이 추가되지 않으니.. 수동으로 넣어줌
var checkboxs = $(form_id + ' input[type=checkbox]');
//for (var i in checkboxs) {
for (var i =0 ; i < checkboxs.length; i++) {
if ( $(checkboxs[i]).is(':checked') ) {
$(checkboxs[i]).val('True');
} else {
$(checkboxs[i]).val('False');
}
}
var formData = $(form_id).serialize();
$.each($(form_id + ' input[type=checkbox]')
.filter(function(idx) {
return $(this).prop('checked') === false
}),
function(idx, el) {
var emptyVal = "False";
formData += '&' + $(el).attr('name') + '=' + emptyVal;
}
);
formData = formData.replace("&global_scheduler=True", "")
formData = formData.replace("&global_scheduler=False", "")
formData = formData.replace("global_scheduler=True&", "")
formData = formData.replace("global_scheduler=False&", "")
return formData;
}
///////////////////////////////////////
// camel
function use_collapse(div, reverse=false) {
var ret = $('#' + div).prop('checked');
if (reverse) {
if (ret) {
$('#' + div + '_div').collapse('hide');
} else {
$('#' + div + '_div').collapse('show');
}
} else {
if (ret) {
$('#' + div + '_div').collapse('show');
} else {
$('#' + div + '_div').collapse('hide');
}
}
}
// jquery extend function
// post로 요청하면서 리다이렉트
$.extend(
{
redirectPost: function(location, args)
{
var form = '';
$.each( args, function( key, value ) {
value = value.split('"').join('\"')
form += '<input type="hidden" name="'+key+'" value="'+value+'">';
});
$('<form action="' + location + '" method="POST">' + form + '</form>').appendTo($(document.body)).submit();
}
});
///////////////////////////////////////
// 유틸리티 - 프로젝트 관련성 없음
///////////////////////////////////////
function humanFileSize(bytes) {
var thresh = 1024;
if(Math.abs(bytes) < thresh) {
return bytes + ' B';
}
var units = ['KB','MB','GB','TB','PB','EB','ZB','YB']
var u = -1;
do {
bytes /= thresh;
++u;
} while(Math.abs(bytes) >= thresh && u < units.length - 1);
return bytes.toFixed(1)+' '+units[u];
}
function FormatNumberLength(num, length) {
var r = "" + num;
while (r.length < length) {
r = "0" + r;
}
return r;
}
function msToHMS( ms ) {
// 1- Convert to seconds:
var seconds = ms / 1000;
// 2- Extract hours:
var hours = parseInt( seconds / 3600 ); // 3,600 seconds in 1 hour
seconds = seconds % 3600; // seconds remaining after extracting hours
// 3- Extract minutes:
var minutes = parseInt( seconds / 60 ); // 60 seconds in 1 minute
// 4- Keep only seconds not extracted to minutes:
seconds = seconds % 60;
return (''+hours).padStart(2, "0")+":"+(''+minutes).padStart(2, "0")+":"+parseInt(seconds);
}
///////////////////////////////////////
// 사용 미확인
///////////////////////////////////////
function duration_str(duration) {
duration = duration / 100;
var minutes = parseInt(duration / 60);
var hour = parseInt(minutes / 60);
var min = parseInt(minutes % 60);
var sec = parseInt((duration/60 - parseInt(duration/60)) * 60);
return pad(hour, 2) + ':' + pad(min, 2) + ':' + pad(sec,2);
}
// 자리맞춤
function pad(n, width) {
n = n + '';
return n.length >= width ? n : new Array(width - n.length + 1).join('0') + n;
} | PypiClean |
/KratosShapeOptimizationApplication-9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/KratosMultiphysics/ShapeOptimizationApplication/response_functions/mesh_based_packaging.py |
import KratosMultiphysics as KM
import KratosMultiphysics.ShapeOptimizationApplication as KSO
from .packaging_response_base import PackagingResponseBase
from ..custom_ios.wrl_io import WrlIO
class MeshBasedPackaging(PackagingResponseBase):
"""
A class for mesh packaging response function. The mesh should contain surface conditions only.
By default the normals of the conditions indicate the feasible side of the mesh (see setting 'feasible_in_normal_direction')
"""
def __init__(self, identifier, response_settings, model):
super().__init__(identifier, response_settings, model)
self.packaging_model_part = None
self.packaging_model_part_needs_to_be_imported = False
packaging_model_part_name = response_settings["packaging_model_part_name"].GetString()
self.packaging_input_type = response_settings["packaging_model_import_settings"]["input_type"].GetString()
if self.packaging_input_type in ["mdpa", "vrml", "wrl"]:
self.packaging_model_part = self.model.CreateModelPart(packaging_model_part_name)
domain_size = response_settings["packaging_domain_size"].GetInt()
if domain_size not in [2, 3]:
raise Exception("PlaneBasedPackaging: Invalid 'domain_size': {}".format(domain_size))
self.packaging_model_part.ProcessInfo.SetValue(KM.DOMAIN_SIZE, domain_size)
elif self.packaging_input_type == "use_input_model_part":
self.packaging_model_part = self.model.GetModelPart(packaging_model_part_name)
else:
raise Exception("Other model part input options are not yet implemented.")
self.packaging_model_part.AddNodalSolutionStepVariable(KM.NORMAL)
self.packaging_model_part.AddNodalSolutionStepVariable(KSO.NORMALIZED_SURFACE_NORMAL)
@classmethod
def GetDefaultParameters(cls):
this_defaults = KM.Parameters("""{
"packaging_model_part_name" : "UNKNOWN_NAME",
"packaging_domain_size" : 3,
"packaging_model_import_settings" : {
"input_type" : "mdpa",
"input_filename" : "UNKNOWN_NAME"
}
}""")
this_defaults.AddMissingParameters(super().GetDefaultParameters())
return this_defaults
def Initialize(self):
super().Initialize()
if self.packaging_input_type == "mdpa":
model_part_io = KM.ModelPartIO(self.response_settings["packaging_model_import_settings"]["input_filename"].GetString())
model_part_io.ReadModelPart(self.packaging_model_part)
elif self.packaging_input_type in ["vrml", "wrl"]:
model_part_io = WrlIO(self.response_settings["packaging_model_import_settings"]["input_filename"].GetString())
model_part_io.ReadModelPart(self.packaging_model_part)
def _CalculateDistances(self):
geometry_tools = KSO.GeometryUtilities(self.model_part)
self.signed_distances, self.directions = geometry_tools.ComputeDistancesToBoundingModelPart(self.packaging_model_part) | PypiClean |
/CocoRPy27-1.4.1.zip/CocoRPy27-1.4.1/testSuite/TestDel_parserBaseline.py |
import sys
from Scanner import Token
from Scanner import Scanner
from Scanner import Position
class ErrorRec( object ):
def __init__( self, l, c, s ):
self.line = l
self.col = c
self.num = 0
self.str = s
class Errors( object ):
errMsgFormat = "file %(file)s : (%(line)d, %(col)d) %(text)s\n"
eof = False
count = 0 # number of errors detected
fileName = ''
listName = ''
mergeErrors = False
mergedList = None # PrintWriter
errors = [ ]
minErrDist = 2
errDist = minErrDist
# A function with prototype: f( errorNum=None ) where errorNum is a
# predefined error number. f returns a tuple, ( line, column, message )
# such that line and column refer to the location in the
# source file most recently parsed. message is the error
# message corresponging to errorNum.
@staticmethod
def Init( fn, dir, merge, getParsingPos, errorMessages ):
Errors.theErrors = [ ]
Errors.getParsingPos = getParsingPos
Errors.errorMessages = errorMessages
Errors.fileName = fn
listName = dir + 'listing.txt'
Errors.mergeErrors = merge
if Errors.mergeErrors:
try:
Errors.mergedList = open( listName, 'w' )
except IOError:
raise RuntimeError( '-- Compiler Error: could not open ' + listName )
@staticmethod
def storeError( line, col, s ):
if Errors.mergeErrors:
Errors.errors.append( ErrorRec( line, col, s ) )
else:
Errors.printMsg( Errors.fileName, line, col, s )
@staticmethod
def SynErr( errNum, errPos=None ):
line,col = errPos if errPos else Errors.getParsingPos( )
msg = Errors.errorMessages[ errNum ]
Errors.storeError( line, col, msg )
Errors.count += 1
@staticmethod
def SemErr( errMsg, errPos=None ):
line,col = errPos if errPos else Errors.getParsingPos( )
Errors.storeError( line, col, errMsg )
Errors.count += 1
@staticmethod
def Warn( errMsg, errPos=None ):
line,col = errPos if errPos else Errors.getParsingPos( )
Errors.storeError( line, col, errMsg )
@staticmethod
def Exception( errMsg ):
print errMsg
sys.exit( 1 )
@staticmethod
def printMsg( fileName, line, column, msg ):
vals = { 'file':fileName, 'line':line, 'col':column, 'text':msg }
sys.stdout.write( Errors.errMsgFormat % vals )
@staticmethod
def display( s, e ):
Errors.mergedList.write('**** ')
for c in xrange( 1, e.col ):
if s[c-1] == '\t':
Errors.mergedList.write( '\t' )
else:
Errors.mergedList.write( ' ' )
Errors.mergedList.write( '^ ' + e.str + '\n')
@staticmethod
def Summarize( sourceBuffer ):
if Errors.mergeErrors:
# Initialize the line iterator
srcLineIter = iter(sourceBuffer)
srcLineStr = srcLineIter.next( )
srcLineNum = 1
try:
# Initialize the error iterator
errIter = iter(Errors.errors)
errRec = errIter.next( )
# Advance to the source line of the next error
while srcLineNum < errRec.line:
Errors.mergedList.write( '%4d %s\n' % (srcLineNum, srcLineStr) )
srcLineStr = srcLineIter.next( )
srcLineNum += 1
# Write out all errors for the current source line
while errRec.line == srcLineNum:
Errors.display( srcLineStr, errRec )
errRec = errIter.next( )
except:
pass
# No more errors to report
try:
# Advance to end of source file
while True:
Errors.mergedList.write( '%4d %s\n' % (srcLineNum, srcLineStr) )
srcLineStr = srcLineIter.next( )
srcLineNum += 1
except:
pass
Errors.mergedList.write( '\n' )
Errors.mergedList.write( '%d errors detected\n' % Errors.count )
Errors.mergedList.close( )
sys.stdout.write( '%d errors detected\n' % Errors.count )
if (Errors.count > 0) and Errors.mergeErrors:
sys.stdout.write( 'see ' + Errors.listName + '\n' )
class Parser( object ):
_EOF = 0
_a = 1
_b = 2
_c = 3
_d = 4
_e = 5
_f = 6
_g = 7
_h = 8
_i = 9
maxT = 10
T = True
x = False
minErrDist = 2
def __init__( self ):
self.scanner = None
self.token = None # last recognized token
self.la = None # lookahead token
self.genScanner = False
self.tokenString = '' # used in declarations of literal tokens
self.noString = '-none-' # used in declarations of literal tokens
self.errDist = Parser.minErrDist
def getParsingPos( self ):
return self.la.line, self.la.col
def SynErr( self, errNum ):
if self.errDist >= Parser.minErrDist:
Errors.SynErr( errNum )
self.errDist = 0
def SemErr( self, msg ):
if self.errDist >= Parser.minErrDist:
Errors.SemErr( msg )
self.errDist = 0
def Warning( self, msg ):
if self.errDist >= Parser.minErrDist:
Errors.Warn( msg )
self.errDist = 0
def Successful( self ):
return Errors.count == 0;
def LexString( self ):
return self.token.val
def LookAheadString( self ):
return self.la.val
def Get( self ):
while True:
self.token = self.la
self.la = self.scanner.Scan( )
if self.la.kind <= Parser.maxT:
self.errDist += 1
break
self.la = self.token
def Expect( self, n ):
if self.la.kind == n:
self.Get( )
else:
self.SynErr( n )
def StartOf( self, s ):
return self.set[s][self.la.kind]
def ExpectWeak( self, n, follow ):
if self.la.kind == n:
self.Get( )
else:
self.SynErr( n )
while not self.StartOf(follow):
self.Get( )
def WeakSeparator( self, n, syFol, repFol ):
s = [ False for i in xrange( Parser.maxT+1 ) ]
if self.la.kind == n:
self.Get( )
return True
elif self.StartOf(repFol):
return False
else:
for i in xrange( Parser.maxT ):
s[i] = self.set[syFol][i] or self.set[repFol][i] or self.set[0][i]
self.SynErr( n )
while not s[self.la.kind]:
self.Get( )
return self.StartOf( syFol )
def Test( self ):
self.A()
self.B()
self.Expect(7)
self.C()
self.Expect(7)
self.D()
def A( self ):
if self.la.kind == 1:
self.Get( )
elif self.StartOf(1):
while self.la.kind == 5:
self.Get( )
if (self.la.kind == 6):
self.Get( )
else:
self.SynErr(11)
def B( self ):
while self.la.kind == 2:
self.Get( )
if (self.la.kind == 3):
self.Get( )
if self.la.kind == 4:
self.Get( )
elif self.la.kind == 0 or self.la.kind == 7:
pass
else:
self.SynErr(12)
def C( self ):
self.A()
self.B()
def D( self ):
if self.StartOf(2):
self.C()
elif self.la.kind == 8:
self.Get( )
else:
self.SynErr(13)
def Parse( self, scanner ):
self.scanner = scanner
self.la = Token( )
self.la.val = u''
self.Get( )
self.Test()
self.Expect(0)
set = [
[T,x,x,x, x,x,x,x, x,x,x,x],
[T,x,T,T, T,T,T,T, x,x,x,x],
[T,T,T,T, T,T,T,x, x,x,x,x]
]
errorMessages = {
0 : "EOF expected",
1 : "a expected",
2 : "b expected",
3 : "c expected",
4 : "d expected",
5 : "e expected",
6 : "f expected",
7 : "g expected",
8 : "h expected",
9 : "i expected",
10 : "??? expected",
11 : "invalid A",
12 : "invalid B",
13 : "invalid D",
} | PypiClean |
/Cooky-1.0.0.tar.gz/Cooky-1.0.0/cooky/payloads/generator.py | import os
from cooky.payloads.payload import Payload, Registry
class Generator(Payload):
def __init__(self, name, initial, end, step):
"""
Base class for a generator payload, this type of payload iterates through a set of values be it numbers of strings.
(The Generator base class implements all the features necessary for the Numbers payload)
:param name: name of the payload
:param initial: the initial value
:param end: the final value
:param step: the step that the values should increment in (if decreasing payload is desired use negative step)
"""
Payload.__init__(self, name)
self.initial = initial
self.count = initial
self.step = step
self.end = end
def next(self):
"""
Returns the next value for the payload by generating it and then incrementing the count by the defined step
:return: the next payload value
"""
payload = self.generate()
self.count += self.step
return payload
def reset(self):
"""
Rewinds the count for the generator back to the initial value so it can be restarted or looped
"""
self.count = self.initial
def done(self):
"""
Determines if the generator has any more values to generate (if the count is greater than the end)
:return: True if there are no more values and False if there are
"""
return self.count > self.end
def generate(self):
"""
Generates the current value for the payload
:return: the count
"""
return self.count
@classmethod
def setup(cls):
"""
Classmethod facilitating interactive setup of the Numbers payload, see Generator constructor for input values
:return: if all inputs are valid it returns a Generator or Numbers payload, otherwise None
"""
name = input("Choose a name for this payload: ")
initial = int(input("Choose a start value: "))
end = int(input("Choose an end value: "))
step = int(input("Choose a step value: "))
if isinstance(name, str) and all(map(lambda t: isinstance(t, int), (initial, end, step))):
Registry.register(name, cls(name=name, initial=initial, end=end, step=step))
return Registry.get(name)
else:
print("Incompatible types", type(name), type(initial), type(end), type(step))
return None
def __repr__(self):
return f"{type(self).__name__}(initial: {self.initial}, end: {self.end}, step: {self.step})"
class Numbers(Generator):
pass
class Strings(Generator):
def __init__(self, name, strings, file, initial, end, step):
"""
This payload inherits Generator and generates string values from a
:param name:
:param strings:
:param file:
:param initial:
:param end:
:param step:
"""
Generator.__init__(self, name, initial, end, step)
self.strings = strings
self.file = file
def generate(self):
return self.strings[self.count]
@classmethod
def setup(cls):
name = input("Choose a name for this payload: ")
file = input("Choose a file path for strings: ")
if not isinstance(name, str) or not os.path.isfile(file):
print("File not found or name not valid")
return None
string_file = open(file, "r", newline="\n")
strings = [string for string in string_file.read().splitlines()]
string_file.close()
initial = 0
end = len(strings) - 1
step = 1
Registry.register(name, cls(name=name, strings=strings, file=file, initial=initial, end=end, step=step))
return Registry.get(name)
def __repr__(self):
return f"{type(self).__name__}(file: {self.file})" | PypiClean |
/HALEasy-0.4.3.tar.gz/HALEasy-0.4.3/LICENSE.md | The MIT License (MIT)
Copyright (c) 2014 Matthew Clark <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| PypiClean |
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/node_modules/ms/index.js | var s = 1000;
var m = s * 60;
var h = m * 60;
var d = h * 24;
var w = d * 7;
var y = d * 365.25;
/**
* Parse or format the given `val`.
*
* Options:
*
* - `long` verbose formatting [false]
*
* @param {String|Number} val
* @param {Object} [options]
* @throws {Error} throw an error if val is not a non-empty string or a number
* @return {String|Number}
* @api public
*/
module.exports = function(val, options) {
options = options || {};
var type = typeof val;
if (type === 'string' && val.length > 0) {
return parse(val);
} else if (type === 'number' && isFinite(val)) {
return options.long ? fmtLong(val) : fmtShort(val);
}
throw new Error(
'val is not a non-empty string or a valid number. val=' +
JSON.stringify(val)
);
};
/**
* Parse the given `str` and return milliseconds.
*
* @param {String} str
* @return {Number}
* @api private
*/
function parse(str) {
str = String(str);
if (str.length > 100) {
return;
}
var match = /^(-?(?:\d+)?\.?\d+) *(milliseconds?|msecs?|ms|seconds?|secs?|s|minutes?|mins?|m|hours?|hrs?|h|days?|d|weeks?|w|years?|yrs?|y)?$/i.exec(
str
);
if (!match) {
return;
}
var n = parseFloat(match[1]);
var type = (match[2] || 'ms').toLowerCase();
switch (type) {
case 'years':
case 'year':
case 'yrs':
case 'yr':
case 'y':
return n * y;
case 'weeks':
case 'week':
case 'w':
return n * w;
case 'days':
case 'day':
case 'd':
return n * d;
case 'hours':
case 'hour':
case 'hrs':
case 'hr':
case 'h':
return n * h;
case 'minutes':
case 'minute':
case 'mins':
case 'min':
case 'm':
return n * m;
case 'seconds':
case 'second':
case 'secs':
case 'sec':
case 's':
return n * s;
case 'milliseconds':
case 'millisecond':
case 'msecs':
case 'msec':
case 'ms':
return n;
default:
return undefined;
}
}
/**
* Short format for `ms`.
*
* @param {Number} ms
* @return {String}
* @api private
*/
function fmtShort(ms) {
var msAbs = Math.abs(ms);
if (msAbs >= d) {
return Math.round(ms / d) + 'd';
}
if (msAbs >= h) {
return Math.round(ms / h) + 'h';
}
if (msAbs >= m) {
return Math.round(ms / m) + 'm';
}
if (msAbs >= s) {
return Math.round(ms / s) + 's';
}
return ms + 'ms';
}
/**
* Long format for `ms`.
*
* @param {Number} ms
* @return {String}
* @api private
*/
function fmtLong(ms) {
var msAbs = Math.abs(ms);
if (msAbs >= d) {
return plural(ms, msAbs, d, 'day');
}
if (msAbs >= h) {
return plural(ms, msAbs, h, 'hour');
}
if (msAbs >= m) {
return plural(ms, msAbs, m, 'minute');
}
if (msAbs >= s) {
return plural(ms, msAbs, s, 'second');
}
return ms + ' ms';
}
/**
* Pluralization helper.
*/
function plural(ms, msAbs, n, name) {
var isPlural = msAbs >= n * 1.5;
return Math.round(ms / n) + ' ' + name + (isPlural ? 's' : '');
} | PypiClean |
/K_AIKO-0.5.2-py3-none-any.whl/kaiko/tui/textboxes.py | import dataclasses
from typing import Tuple
import numpy
from ..utils import datanodes as dn
from ..utils import markups as mu
from ..devices import engines
from ..devices import clocks
@dataclasses.dataclass(frozen=True)
class Caret(mu.Pair):
name = "caret"
@dataclasses.dataclass
class TextBoxWidgetSettings:
r"""
Fields
------
caret_margins : tuple of int and int
The width of left/right margins of caret.
overflow_ellipses : tuple of str and str
Texts to display when overflowing left/right.
caret_normal_appearance : str
The markup template of the normal-style caret.
caret_blinking_appearance : str
The markup template of the blinking-style caret.
caret_highlighted_appearance : str
The markup template of the highlighted-style caret.
caret_blink_ratio : float
The ratio to blink.
"""
caret_margins: Tuple[int, int] = (3, 3)
overflow_ellipses: Tuple[str, str] = ("…", "…")
caret_normal_appearance: str = "[slot/]"
caret_blinking_appearance: str = "[weight=dim][invert][slot/][/][/]"
caret_highlighted_appearance: str = "[weight=bold][invert][slot/][/][/]"
caret_blink_ratio: float = 0.3
class TextBox:
def __init__(self, text_node, settings):
r"""Constructor.
Parameters
----------
text_node : dn.datanode
settings : TextBoxWidgetSettings
"""
self.text_node = text_node
self.settings = settings
self.text_offset = 0
self.left_overflow = False
self.right_overflow = False
def text_geometry(self, markup, text_width=0, caret_masks=(), *, rich):
if isinstance(markup, mu.Text):
w = rich.widthof(markup.string)
if w == -1:
raise TypeError(f"invalid text: {markup.string!r}")
text_width += w
return text_width, caret_masks
elif isinstance(markup, (mu.Group, mu.SGR)):
res = text_width, caret_masks
for child in markup.children:
res = self.text_geometry(child, *res, rich=rich)
return res
elif isinstance(markup, Caret):
start = text_width
res = text_width, caret_masks
for child in markup.children:
res = self.text_geometry(child, *res, rich=rich)
text_width, caret_masks = res
stop = text_width
caret_masks = (*caret_masks, slice(start, stop))
return text_width, caret_masks
elif isinstance(markup, (mu.CSI, mu.ControlCharacter)):
raise TypeError(f"invalid markup type: {type(markup)}")
else:
raise TypeError(f"unknown markup type: {type(markup)}")
def shift_text(
self, text_width, caret_masks, box_width, *, left_margin, right_margin
):
# trim empty spaces
if text_width - self.text_offset < box_width - right_margin:
# from: ......[....I... ]
# to: ...[.......I... ]
self.text_offset = max(0, text_width - box_width + right_margin)
# reveal the rightmost caret
caret_stop = max(
(caret_slice.stop for caret_slice in caret_masks), default=float("-inf")
)
if caret_stop - self.text_offset > box_width - right_margin:
# from: ...[............]..I....
# to: ........[..........I.]..
self.text_offset = caret_stop - box_width + right_margin
# reveal the leftmost caret
caret_start = min(
(caret_slice.start for caret_slice in caret_masks), default=float("inf")
)
if caret_start - self.text_offset < left_margin:
# from: .....I...[............]...
# to: ...[.I..........].........
self.text_offset = max(caret_start - left_margin, 0)
# determine overflow
self.left_overflow = self.text_offset > 0
self.right_overflow = text_width - self.text_offset > box_width
def draw_ellipses(
self, box_width, *, left_ellipsis, right_ellipsis, right_ellipsis_width
):
res = []
if self.left_overflow:
res.append((0, left_ellipsis))
if self.right_overflow:
res.append((box_width - right_ellipsis_width, right_ellipsis))
return res
@dn.datanode
def adjust_view(self, *, rich):
caret_margins = self.settings.caret_margins
overflow_ellipses = self.settings.overflow_ellipses
left_ellipsis = rich.parse(overflow_ellipses[0])
left_ellipsis_width = rich.widthof(left_ellipsis)
right_ellipsis = rich.parse(overflow_ellipses[1])
right_ellipsis_width = rich.widthof(right_ellipsis)
if left_ellipsis_width == -1 or right_ellipsis_width == -1:
raise ValueError(f"invalid ellipsis: {overflow_ellipses!r}")
left_margin = max(caret_margins[0], left_ellipsis_width)
right_margin = max(caret_margins[1], right_ellipsis_width)
text_geometry = dn.starcachemap(self.text_geometry, rich=rich)
shift_text = dn.starmap(
self.shift_text,
left_margin=left_margin,
right_margin=right_margin,
)
draw_ellipses = dn.map(
self.draw_ellipses,
left_ellipsis=left_ellipsis,
right_ellipsis=right_ellipsis,
right_ellipsis_width=right_ellipsis_width,
)
with text_geometry, shift_text, draw_ellipses:
markup, box_width = yield
while True:
text_width, caret_masks = text_geometry.send((markup,))
shift_text.send((text_width, caret_masks, box_width))
ellipses_res = draw_ellipses.send(box_width)
markup, box_width = yield ellipses_res
@dn.datanode
def get_caret_template(self, *, rich, metronome):
caret_blink_ratio = self.settings.caret_blink_ratio
normal_template = rich.parse(
self.settings.caret_normal_appearance, slotted=True
)
blinking_template = rich.parse(
self.settings.caret_blinking_appearance, slotted=True
)
highlighted_template = rich.parse(
self.settings.caret_highlighted_appearance, slotted=True
)
key_pressed_beat = 0
time, key_pressed = yield
while True:
beat = metronome.beat(time)
# don't blink while key pressing
if beat < key_pressed_beat or beat % 1 < caret_blink_ratio:
if beat % 4 < 1:
res = highlighted_template
else:
res = blinking_template
else:
res = normal_template
time, key_pressed = yield res
if key_pressed:
key_pressed_beat = metronome.beat(time) // -1 * -1
@dn.datanode
def render_caret(self, *, rich, metronome):
def render_caret_cached(markup, caret_template):
if caret_template is not None:
markup = markup.traverse(
Caret,
lambda m: caret_template(mu.Group(m.children)),
strategy=mu.TraverseStrategy.TopDown,
)
else:
markup = markup.traverse(
Caret,
lambda m: mu.Group(m.children),
strategy=mu.TraverseStrategy.TopDown,
)
return markup.expand()
get_caret_template = self.get_caret_template(rich=rich, metronome=metronome)
render_caret_cached = dn.starcachemap(render_caret_cached)
with get_caret_template, render_caret_cached:
markup, time, key_pressed = yield
while True:
caret_template = get_caret_template.send((time, key_pressed))
markup = render_caret_cached.send((markup, caret_template))
markup, time, key_pressed = yield markup
@dn.datanode
def render_textbox(self, *, rich, metronome):
text_node = self.text_node
adjust_view = self.adjust_view(rich=rich)
render_caret = self.render_caret(rich=rich, metronome=metronome)
with text_node, adjust_view, render_caret:
time, ran = yield
while True:
markup, key_pressed = text_node.send()
ellipses = adjust_view.send((markup, len(ran)))
markup = render_caret.send((markup, time, key_pressed))
time, ran = yield [(-self.text_offset, markup), *ellipses]
def load(self, provider):
rich = provider.get(mu.RichParser)
metronome = provider.get(clocks.Metronome)
return self.render_textbox(rich=rich, metronome=metronome) | PypiClean |
/Adafruit_Blinka-8.20.1-py3-none-any.whl/adafruit_blinka/board/hardkernel/odroidn2.py | """Pin definitions for the Odroid N2."""
from adafruit_blinka.microcontroller.amlogic.s922x import pin
GPIOX_0 = pin.GPIO476
GPIOX_1 = pin.GPIO477
GPIOX_2 = pin.GPIO478
GPIOX_3 = pin.GPIO479
GPIOX_4 = pin.GPIO480
GPIOX_5 = pin.GPIO481
GPIOX_6 = pin.GPIO482
GPIOX_7 = pin.GPIO483
GPIOX_8 = pin.GPIO484
GPIOX_9 = pin.GPIO485
GPIOX_10 = pin.GPIO486
GPIOX_11 = pin.GPIO487
GPIOX_12 = pin.GPIO488
GPIOX_13 = pin.GPIO489
GPIOX_14 = pin.GPIO490
GPIOX_15 = pin.GPIO491
GPIOX_16 = pin.GPIO492
GPIOX_17 = pin.GPIO493
GPIOX_18 = pin.GPIO494
GPIOX_19 = pin.GPIO495
GPIODV_24 = pin.GPIO493
GPIODV_25 = pin.GPIO494
GPIODV_26 = pin.GPIO474
GPIODV_27 = pin.GPIO475
GPIOA_4 = pin.GPIO464
GPIOA_12 = pin.GPIO472
GPIOA_13 = pin.GPIO473
GPIOA_14 = pin.GPIO474
GPIOA_15 = pin.GPIO475
GPIOA0_0 = pin.GPIO496
GPIOA0_1 = pin.GPIO497
GPIOA0_2 = pin.GPIO498
GPIOA0_3 = pin.GPIO499
GPIOA0_4 = pin.GPIO500
GPIOA0_5 = pin.GPIO501
GPIOA0_6 = pin.GPIO502
GPIOA0_7 = pin.GPIO503
GPIOA0_8 = pin.GPIO504
GPIOA0_9 = pin.GPIO505
GPIOA0_10 = pin.GPIO506
GPIOA0_11 = pin.GPIO507
GPIOA0_12 = pin.GPIO508
GPIOA0_13 = pin.GPIO509
GPIOA0_14 = pin.GPIO510
GPIOA0_15 = pin.GPIO511
for it in pin.i2cPorts:
globals()["SCL" + str(it[0])] = it[1]
globals()["SDA" + str(it[0])] = it[2]
# Set second i2c bus as default for backward compatibility.
SCL = pin.i2cPorts[1][1]
SDA = pin.i2cPorts[1][2]
SCLK = pin.SPI0_SCLK
MOSI = pin.SPI0_MOSI
MISO = pin.SPI0_MISO
SPI_CS0 = pin.GPIO486
D0 = GPIOX_3 # PIN_11
D1 = GPIOX_16 # PIN_12
D2 = GPIOX_4 # PIN_13
D3 = GPIOX_7 # PIN_15
D4 = GPIOX_0 # PIN_16
D5 = GPIOX_1 # PIN_18
D6 = GPIOX_2 # PIN_22
D7 = GPIOA_13 # PIN_7
D8 = GPIOX_17 # PIN_3
D9 = GPIOX_18 # PIN_5
D10 = GPIOX_10 # PIN_24
D11 = GPIOA_4 # PIN_26
D12 = GPIOX_8 # PIN_19
D13 = GPIOX_9 # PIN_21
D14 = GPIOX_11 # PIN_23
D15 = GPIOX_12 # PIN_8
D16 = GPIOX_13 # PIN_10
D21 = GPIOX_14 # PIN_29
D22 = GPIOX_15 # PIN_31
D23 = GPIOX_5 # PIN_33
D24 = GPIOX_6 # PIN_35
D26 = GPIOA_12 # PIN_32
D27 = GPIOX_19 # PIN_36
D30 = GPIOA_14 # PIN_27
D31 = GPIOA_15 # PIN_28 | PypiClean |
/GoogleAppEngineMapReduce-1.9.22.0.tar.gz/GoogleAppEngineMapReduce-1.9.22.0/mapreduce/third_party/crc32c.py | # TODO(user): We would really benefit from having this as a c extension.
import array
CRC_TABLE = (
0x00000000L, 0xf26b8303L, 0xe13b70f7L, 0x1350f3f4L,
0xc79a971fL, 0x35f1141cL, 0x26a1e7e8L, 0xd4ca64ebL,
0x8ad958cfL, 0x78b2dbccL, 0x6be22838L, 0x9989ab3bL,
0x4d43cfd0L, 0xbf284cd3L, 0xac78bf27L, 0x5e133c24L,
0x105ec76fL, 0xe235446cL, 0xf165b798L, 0x030e349bL,
0xd7c45070L, 0x25afd373L, 0x36ff2087L, 0xc494a384L,
0x9a879fa0L, 0x68ec1ca3L, 0x7bbcef57L, 0x89d76c54L,
0x5d1d08bfL, 0xaf768bbcL, 0xbc267848L, 0x4e4dfb4bL,
0x20bd8edeL, 0xd2d60dddL, 0xc186fe29L, 0x33ed7d2aL,
0xe72719c1L, 0x154c9ac2L, 0x061c6936L, 0xf477ea35L,
0xaa64d611L, 0x580f5512L, 0x4b5fa6e6L, 0xb93425e5L,
0x6dfe410eL, 0x9f95c20dL, 0x8cc531f9L, 0x7eaeb2faL,
0x30e349b1L, 0xc288cab2L, 0xd1d83946L, 0x23b3ba45L,
0xf779deaeL, 0x05125dadL, 0x1642ae59L, 0xe4292d5aL,
0xba3a117eL, 0x4851927dL, 0x5b016189L, 0xa96ae28aL,
0x7da08661L, 0x8fcb0562L, 0x9c9bf696L, 0x6ef07595L,
0x417b1dbcL, 0xb3109ebfL, 0xa0406d4bL, 0x522bee48L,
0x86e18aa3L, 0x748a09a0L, 0x67dafa54L, 0x95b17957L,
0xcba24573L, 0x39c9c670L, 0x2a993584L, 0xd8f2b687L,
0x0c38d26cL, 0xfe53516fL, 0xed03a29bL, 0x1f682198L,
0x5125dad3L, 0xa34e59d0L, 0xb01eaa24L, 0x42752927L,
0x96bf4dccL, 0x64d4cecfL, 0x77843d3bL, 0x85efbe38L,
0xdbfc821cL, 0x2997011fL, 0x3ac7f2ebL, 0xc8ac71e8L,
0x1c661503L, 0xee0d9600L, 0xfd5d65f4L, 0x0f36e6f7L,
0x61c69362L, 0x93ad1061L, 0x80fde395L, 0x72966096L,
0xa65c047dL, 0x5437877eL, 0x4767748aL, 0xb50cf789L,
0xeb1fcbadL, 0x197448aeL, 0x0a24bb5aL, 0xf84f3859L,
0x2c855cb2L, 0xdeeedfb1L, 0xcdbe2c45L, 0x3fd5af46L,
0x7198540dL, 0x83f3d70eL, 0x90a324faL, 0x62c8a7f9L,
0xb602c312L, 0x44694011L, 0x5739b3e5L, 0xa55230e6L,
0xfb410cc2L, 0x092a8fc1L, 0x1a7a7c35L, 0xe811ff36L,
0x3cdb9bddL, 0xceb018deL, 0xdde0eb2aL, 0x2f8b6829L,
0x82f63b78L, 0x709db87bL, 0x63cd4b8fL, 0x91a6c88cL,
0x456cac67L, 0xb7072f64L, 0xa457dc90L, 0x563c5f93L,
0x082f63b7L, 0xfa44e0b4L, 0xe9141340L, 0x1b7f9043L,
0xcfb5f4a8L, 0x3dde77abL, 0x2e8e845fL, 0xdce5075cL,
0x92a8fc17L, 0x60c37f14L, 0x73938ce0L, 0x81f80fe3L,
0x55326b08L, 0xa759e80bL, 0xb4091bffL, 0x466298fcL,
0x1871a4d8L, 0xea1a27dbL, 0xf94ad42fL, 0x0b21572cL,
0xdfeb33c7L, 0x2d80b0c4L, 0x3ed04330L, 0xccbbc033L,
0xa24bb5a6L, 0x502036a5L, 0x4370c551L, 0xb11b4652L,
0x65d122b9L, 0x97baa1baL, 0x84ea524eL, 0x7681d14dL,
0x2892ed69L, 0xdaf96e6aL, 0xc9a99d9eL, 0x3bc21e9dL,
0xef087a76L, 0x1d63f975L, 0x0e330a81L, 0xfc588982L,
0xb21572c9L, 0x407ef1caL, 0x532e023eL, 0xa145813dL,
0x758fe5d6L, 0x87e466d5L, 0x94b49521L, 0x66df1622L,
0x38cc2a06L, 0xcaa7a905L, 0xd9f75af1L, 0x2b9cd9f2L,
0xff56bd19L, 0x0d3d3e1aL, 0x1e6dcdeeL, 0xec064eedL,
0xc38d26c4L, 0x31e6a5c7L, 0x22b65633L, 0xd0ddd530L,
0x0417b1dbL, 0xf67c32d8L, 0xe52cc12cL, 0x1747422fL,
0x49547e0bL, 0xbb3ffd08L, 0xa86f0efcL, 0x5a048dffL,
0x8ecee914L, 0x7ca56a17L, 0x6ff599e3L, 0x9d9e1ae0L,
0xd3d3e1abL, 0x21b862a8L, 0x32e8915cL, 0xc083125fL,
0x144976b4L, 0xe622f5b7L, 0xf5720643L, 0x07198540L,
0x590ab964L, 0xab613a67L, 0xb831c993L, 0x4a5a4a90L,
0x9e902e7bL, 0x6cfbad78L, 0x7fab5e8cL, 0x8dc0dd8fL,
0xe330a81aL, 0x115b2b19L, 0x020bd8edL, 0xf0605beeL,
0x24aa3f05L, 0xd6c1bc06L, 0xc5914ff2L, 0x37faccf1L,
0x69e9f0d5L, 0x9b8273d6L, 0x88d28022L, 0x7ab90321L,
0xae7367caL, 0x5c18e4c9L, 0x4f48173dL, 0xbd23943eL,
0xf36e6f75L, 0x0105ec76L, 0x12551f82L, 0xe03e9c81L,
0x34f4f86aL, 0xc69f7b69L, 0xd5cf889dL, 0x27a40b9eL,
0x79b737baL, 0x8bdcb4b9L, 0x988c474dL, 0x6ae7c44eL,
0xbe2da0a5L, 0x4c4623a6L, 0x5f16d052L, 0xad7d5351L,
)
# initial CRC value
CRC_INIT = 0
_MASK = 0xFFFFFFFFL
def crc_update(crc, data):
"""Update CRC-32C checksum with data.
Args:
crc: 32-bit checksum to update as long.
data: byte array, string or iterable over bytes.
Returns:
32-bit updated CRC-32C as long.
"""
# Convert data to byte array if needed
if type(data) != array.array or data.itemsize != 1:
buf = array.array("B", data)
else:
buf = data
crc = crc ^ _MASK
for b in buf:
table_index = (crc ^ b) & 0xff
crc = (CRC_TABLE[table_index] ^ (crc >> 8)) & _MASK
return crc ^ _MASK
def crc_finalize(crc):
"""Finalize CRC-32C checksum.
This function should be called as last step of crc calculation.
Args:
crc: 32-bit checksum as long.
Returns:
finalized 32-bit checksum as long
"""
return crc & _MASK
def crc(data):
"""Compute CRC-32C checksum of the data.
Args:
data: byte array, string or iterable over bytes.
Returns:
32-bit CRC-32C checksum of data as long.
"""
return crc_finalize(crc_update(CRC_INIT, data)) | PypiClean |
/HoChiMinh-1.0.0-py3-none-any.whl/hochiminh/dev/font_to_image.py | import cv2
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from copy import deepcopy
from os import listdir
from os.path import isfile, join
from multiprocessing import Process
from numpy.random import randint, choice
class DatasetGenerator:
def __init__(self, in_path, out_path):
self.font_size = [11, 23]
self.font_path = 'data/fonts/'
self.fonts = ["1.ttf", "2.ttf", "3.ttf", "4.ttf", "5.ttf", "6.ttf", "7.ttf"]
self.letters = list(range(ord('А'), ord('Я') + 1)) + \
list(range(ord('а'), ord('я') + 1)) + \
list(range(ord('0'), ord('9') + 1)) + \
list(range(ord('a'), ord('z') + 1)) + \
list(range(ord('A'), ord('Z') + 1))
self.letters = [chr(letter) for letter in self.letters]
self.erode_kernel = [1, 5]
self.erode_iterate = [1, 5]
self.dilate_kernel = [1, 5]
self.dilate_iterate = [1, 5]
self.gauss_kernel = [1, 5]
self.gauss_sigma = [0, 4]
self.seq_len = [1, 8]
self.sep = [' ', '\n']
self.seqs = [1, 10]
self.intensity = [128, 255]
self.in_path = in_path
self.out_path = out_path
def sample(self, inds, id):
num = inds[0]
print('Process', id, 'was started')
i = 0
while num < inds[-1]:
image = Image.fromarray(np.zeros((160, 160), dtype=np.uint8))
draw = ImageDraw.Draw(image)
seq = ''
for _ in np.arange(randint(self.seqs[0], self.seqs[1])):
seq_len = randint(self.seq_len[0], self.seq_len[1])
seq += ''.join([choice(self.letters) for _ in np.arange(seq_len)])
seq += choice(self.sep)
font_type = self.font_path + choice(self.fonts)
font_size = randint(self.font_size[0], self.font_size[1])
font = ImageFont.truetype(font_type, font_size)
intensity = randint(self.intensity[0], self.intensity[1])
draw.text((0, 0), seq, intensity, font=font)
in_image = np.array(deepcopy(image))
in_image[in_image > 0] = 255
etalon_image = Image.fromarray(np.zeros((100, 100), dtype=np.uint8))
etalon_draw = ImageDraw.Draw(etalon_image)
etalon_font = ImageFont.truetype(font_type, font_size)
etalon_draw.text((0, 0), seq, 255, font=etalon_font)
cv2.imwrite(self.in_path + str(num) + '.tif', np.array(etalon_image))
noise_type = randint(0, 9)
if noise_type == 0:
pass
elif noise_type == 1:
sigma = randint(0, 3)
image = cv2.GaussianBlur(np.array(image), (3, 3), sigma)
elif noise_type == 2:
image = cv2.medianBlur(np.array(image), 3)
elif noise_type == 3:
image = cv2.dilate(np.array(image), np.ones((3, 3), np.uint8), iterations=1)
elif noise_type == 5:
if font_size > 20:
image = cv2.dilate(np.array(image), np.ones((3, 3), np.uint8), iterations=1)
else:
continue
elif noise_type == 6:
if font_size > 22:
image = cv2.dilate(np.array(image), np.ones((3, 3), np.uint8), iterations=1)
image = cv2.GaussianBlur(np.array(image), (3, 3), 0)
else:
continue
elif noise_type == 7:
if font_size > 22:
image = cv2.GaussianBlur(np.array(image), (3, 3), 0)
image = cv2.dilate(np.array(image), np.ones((3, 3), np.uint8), iterations=1)
else:
continue
elif noise_type == 8:
if font_size > 22:
image = cv2.erode(np.array(image), np.ones((2, 2), np.uint8), iterations=1)
else:
continue
cv2.imwrite(self.out_path + str(num) + '.tif', np.array(image))
if i > 0 and i % 500 == 0:
print('#', id, '. Step:', i)
num += 1
i += 1
def extract_non_zero_image(in_image, out_image, max_size, border=0):
vert = np.where(np.sum(out_image, axis=1) > 0)[0]
hor = np.where(np.sum(out_image, axis=0) > 0)[0]
min_y = max(0, np.min(vert) - border)
min_x = max(0, np.min(hor) - border)
in_empty_image = np.zeros(max_size, np.uint8)
out_empty_image = np.zeros(max_size, np.uint8)
max_y = min(min_y + max_size[0], len(in_image))
max_x = min(min_x + max_size[1], len(in_image[0]))
in_empty_image[:max_y - min_y, :max_x - min_x] = in_image[min_y:max_y, min_x:max_x]
out_empty_image[:max_y - min_y, :max_x - min_x] = out_image[min_y:max_y, min_x:max_x]
return in_empty_image, out_empty_image
if __name__ == "__main__":
in_path = '../rosatom_dataset/in/'
out_path = '../rosatom_dataset/out/'
n = 20000
i = 0
pr_count = 8
DS = DatasetGenerator(in_path, out_path)
step = n // pr_count + 15
process = []
for pr_num in range(pr_count):
inds = range(min(step * pr_num, n), min(step * (pr_num + 1), n))
p = Process(target=DS.sample, args=(inds, pr_num))
p.start()
process.append(p)
for p in process:
p.join() | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/rpc/OfflineRest.js.uncompressed.js | define("dojox/rpc/OfflineRest", ["dojo", "dojox", "dojox/data/ClientFilter", "dojox/rpc/Rest", "dojox/storage"], function(dojo, dojox) {
// summary:
// Makes the REST service be able to store changes in local
// storage so it can be used offline automatically.
var Rest = dojox.rpc.Rest;
var namespace = "dojox_rpc_OfflineRest";
var loaded;
var index = Rest._index;
dojox.storage.manager.addOnLoad(function(){
// now that we are loaded we need to save everything in the index
loaded = dojox.storage.manager.available;
for(var i in index){
saveObject(index[i], i);
}
});
var dontSave;
function getStorageKey(key){
// returns a key that is safe to use in storage
return key.replace(/[^0-9A-Za-z_]/g,'_');
}
function saveObject(object,id){
// save the object into local storage
if(loaded && !dontSave && (id || (object && object.__id))){
dojox.storage.put(
getStorageKey(id||object.__id),
typeof object=='object'?dojox.json.ref.toJson(object):object, // makeshift technique to determine if the object is json object or not
function(){},
namespace);
}
}
function isNetworkError(error){
// determine if the error was a network error and should be saved offline
// or if it was a server error and not a result of offline-ness
return error instanceof Error && (error.status == 503 || error.status > 12000 || !error.status); // TODO: Make the right error determination
}
function sendChanges(){
// periodical try to save our dirty data
if(loaded){
var dirty = dojox.storage.get("dirty",namespace);
if(dirty){
for (var dirtyId in dirty){
commitDirty(dirtyId,dirty);
}
}
}
}
var OfflineRest;
function sync(){
OfflineRest.sendChanges();
OfflineRest.downloadChanges();
}
var syncId = setInterval(sync,15000);
dojo.connect(document, "ononline", sync);
OfflineRest = dojox.rpc.OfflineRest = {
turnOffAutoSync: function(){
clearInterval(syncId);
},
sync: sync,
sendChanges: sendChanges,
downloadChanges: function(){
},
addStore: function(/*data-store*/store,/*query?*/baseQuery){
// summary:
// Adds a store to the monitored store for local storage
// store:
// Store to add
// baseQuery:
// This is the base query to should be used to load the items for
// the store. Generally you want to load all the items that should be
// available when offline.
OfflineRest.stores.push(store);
store.fetch({queryOptions:{cache:true},query:baseQuery,onComplete:function(results,args){
store._localBaseResults = results;
store._localBaseFetch = args;
}});
}
};
OfflineRest.stores = [];
var defaultGet = Rest._get;
Rest._get = function(service, id){
// We specifically do NOT want the paging information to be used by the default handler,
// this is because online apps want to minimize the data transfer,
// but an offline app wants the opposite, as much data as possible transferred to
// the client side
try{
// if we are reloading the application with local dirty data in an online environment
// we want to make sure we save the changes first, so that we get up-to-date
// information from the server
sendChanges();
if(window.navigator && navigator.onLine===false){
// we force an error if we are offline in firefox, otherwise it will silently load it from the cache
throw new Error();
}
var dfd = defaultGet(service, id);
}catch(e){
dfd = new dojo.Deferred();
dfd.errback(e);
}
var sync = dojox.rpc._sync;
dfd.addCallback(function(result){
saveObject(result, service._getRequest(id).url);
return result;
});
dfd.addErrback(function(error){
if(loaded){
// if the storage is loaded, we can go ahead and get the object out of storage
if(isNetworkError(error)){
var loadedObjects = {};
// network error, load from local storage
var byId = function(id,backup){
if(loadedObjects[id]){
return backup;
}
var result = dojo.fromJson(dojox.storage.get(getStorageKey(id),namespace)) || backup;
loadedObjects[id] = result;
for(var i in result){
var val = result[i]; // resolve references if we can
id = val && val.$ref;
if (id){
if(id.substring && id.substring(0,4) == "cid:"){
// strip the cid scheme, we should be able to resolve it locally
id = id.substring(4);
}
result[i] = byId(id,val);
}
}
if (result instanceof Array){
//remove any deleted items
for (i = 0;i<result.length;i++){
if (result[i]===undefined){
result.splice(i--,1);
}
}
}
return result;
};
dontSave = true; // we don't want to be resaving objects when loading from local storage
//TODO: Should this reuse something from dojox.rpc.Rest
var result = byId(service._getRequest(id).url);
if(!result){// if it is not found we have to just return the error
return error;
}
dontSave = false;
return result;
}
else{
return error; // server error, let the error propagate
}
}
else{
if(sync){
return new Error("Storage manager not loaded, can not continue");
}
// we are not loaded, so we need to defer until we are loaded
dfd = new dojo.Deferred();
dfd.addCallback(arguments.callee);
dojox.storage.manager.addOnLoad(function(){
dfd.callback();
});
return dfd;
}
});
return dfd;
};
function changeOccurred(method, absoluteId, contentId, serializedContent, service){
if(method=='delete'){
dojox.storage.remove(getStorageKey(absoluteId),namespace);
}
else{
// both put and post should store the actual object
dojox.storage.put(getStorageKey(contentId), serializedContent, function(){
},namespace);
}
var store = service && service._store;
// record all the updated queries
if(store){
store.updateResultSet(store._localBaseResults, store._localBaseFetch);
dojox.storage.put(getStorageKey(service._getRequest(store._localBaseFetch.query).url),dojox.json.ref.toJson(store._localBaseResults),function(){
},namespace);
}
}
dojo.addOnLoad(function(){
dojo.connect(dojox.data, "restListener", function(message){
var channel = message.channel;
var method = message.event.toLowerCase();
var service = dojox.rpc.JsonRest && dojox.rpc.JsonRest.getServiceAndId(channel).service;
changeOccurred(
method,
channel,
method == "post" ? channel + message.result.id : channel,
dojo.toJson(message.result),
service
);
});
});
//FIXME: Should we make changes after a commit to see if the server rejected the change
// or should we come up with a revert mechanism?
var defaultChange = Rest._change;
Rest._change = function(method,service,id,serializedContent){
if(!loaded){
return defaultChange.apply(this,arguments);
}
var absoluteId = service._getRequest(id).url;
changeOccurred(method, absoluteId, dojox.rpc.JsonRest._contentId, serializedContent, service);
var dirty = dojox.storage.get("dirty",namespace) || {};
if (method=='put' || method=='delete'){
// these supersede so we can overwrite anything using this id
var dirtyId = absoluteId;
}
else{
dirtyId = 0;
for (var i in dirty){
if(!isNaN(parseInt(i))){
dirtyId = i;
}
} // get the last dirtyId to make a unique id for non-idempotent methods
dirtyId++;
}
dirty[dirtyId] = {method:method,id:absoluteId,content:serializedContent};
return commitDirty(dirtyId,dirty);
};
function commitDirty(dirtyId, dirty){
var dirtyItem = dirty[dirtyId];
var serviceAndId = dojox.rpc.JsonRest.getServiceAndId(dirtyItem.id);
var deferred = defaultChange(dirtyItem.method,serviceAndId.service,serviceAndId.id,dirtyItem.content);
// add it to our list of dirty objects
dirty[dirtyId] = dirtyItem;
dojox.storage.put("dirty",dirty,function(){},namespace);
deferred.addBoth(function(result){
if (isNetworkError(result)){
// if a network error (offlineness) was the problem, we leave it
// dirty, and return to indicate successfulness
return null;
}
// it was successful or the server rejected it, we remove it from the dirty list
var dirty = dojox.storage.get("dirty",namespace) || {};
delete dirty[dirtyId];
dojox.storage.put("dirty",dirty,function(){},namespace);
return result;
});
return deferred;
}
dojo.connect(index,"onLoad",saveObject);
dojo.connect(index,"onUpdate",saveObject);
return dojox.rpc.OfflineRest;
}); | PypiClean |
/Django-Pizza-16.10.1.tar.gz/Django-Pizza-16.10.1/pizza/kitchen_sink/static/ks/ckeditor/plugins/a11yhelp/dialogs/lang/fr-ca.js | /*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang("a11yhelp","fr-ca",{title:"Instructions d'accessibilité",contents:"Contenu de l'aide. Pour fermer cette fenêtre, appuyez sur ESC.",legend:[{name:"Général",items:[{name:"Barre d'outil de l'éditeur",legend:"Appuyer sur ${toolbarFocus} pour accéder à la barre d'outils. Se déplacer vers les groupes suivant ou précédent de la barre d'outil avec les touches TAB et SHIFT-TAB. Se déplacer vers les boutons suivant ou précédent de la barre d'outils avec les touches FLECHE DROITE et FLECHE GAUCHE. Appuyer sur la barre d'espace ou la touche ENTRER pour activer le bouton de barre d'outils."},
{name:"Dialogue de l'éditeur",legend:"A l'intérieur d'un dialogue, appuyer sur la touche TAB pour naviguer jusqu'au champ de dalogue suivant, appuyez sur les touches SHIFT + TAB pour revenir au champ précédent, appuyez sur la touche ENTRER pour soumettre le dialogue, appuyer sur la touche ESC pour annuler le dialogue. Pour les dialogues avec plusieurs pages d'onglets, appuyer sur ALT + F10 pour naviguer jusqu'à la liste des onglets. Puis se déplacer vers l'onglet suivant avec la touche TAB ou FLECHE DROITE. Se déplacer vers l'onglet précédent avec les touches SHIFT + TAB ou FLECHE GAUCHE. Appuyer sur la barre d'espace ou la touche ENTRER pour sélectionner la page de l'onglet."},
{name:"Menu contextuel de l'éditeur",legend:"Appuyer sur ${contextMenu} ou entrer le RACCOURCI CLAVIER pour ouvrir le menu contextuel. Puis se déplacer vers l'option suivante du menu avec les touches TAB ou FLECHE BAS. Se déplacer vers l'option précédente avec les touches SHIFT+TAB ou FLECHE HAUT. appuyer sur la BARRE D'ESPACE ou la touche ENTREE pour sélectionner l'option du menu. Oovrir le sous-menu de l'option courante avec la BARRE D'ESPACE ou les touches ENTREE ou FLECHE DROITE. Revenir à l'élément de menu parent avec les touches ESC ou FLECHE GAUCHE. Fermer le menu contextuel avec ESC."},
{name:"Menu déroulant de l'éditeur",legend:"A l'intérieur d'une liste en menu déroulant, se déplacer vers l'élément suivant de la liste avec les touches TAB ou FLECHE BAS. Se déplacer vers l'élément précédent de la liste avec les touches SHIFT + TAB ou FLECHE HAUT. Appuyer sur la BARRE D'ESPACE ou sur ENTREE pour sélectionner l'option dans la liste. Appuyer sur ESC pour fermer le menu déroulant."},{name:"Barre d'emplacement des éléments de l'éditeur",legend:"Appuyer sur ${elementsPathFocus} pour naviguer vers la barre d'emplacement des éléments de léditeur. Se déplacer vers le bouton d'élément suivant avec les touches TAB ou FLECHE DROITE. Se déplacer vers le bouton d'élément précédent avec les touches SHIFT+TAB ou FLECHE GAUCHE. Appuyer sur la BARRE D'ESPACE ou sur ENTREE pour sélectionner l'élément dans l'éditeur."}]},
{name:"Commandes",items:[{name:"Annuler",legend:"Appuyer sur ${undo}"},{name:"Refaire",legend:"Appuyer sur ${redo}"},{name:"Gras",legend:"Appuyer sur ${bold}"},{name:"Italique",legend:"Appuyer sur ${italic}"},{name:"Souligné",legend:"Appuyer sur ${underline}"},{name:"Lien",legend:"Appuyer sur ${link}"},{name:"Enrouler la barre d'outils",legend:"Appuyer sur ${toolbarCollapse}"},{name:"Accéder à l'objet de focus précédent",legend:"Appuyer ${accessPreviousSpace} pour accéder au prochain espace disponible avant le curseur, par exemple: deux éléments HR adjacents. Répéter la combinaison pour joindre les éléments d'espaces distantes."},
{name:"Accéder au prochain objet de focus",legend:"Appuyer ${accessNextSpace} pour accéder au prochain espace disponible après le curseur, par exemple: deux éléments HR adjacents. Répéter la combinaison pour joindre les éléments d'espaces distantes."},{name:"Aide d'accessibilité",legend:"Appuyer sur ${a11yHelp}"}]}]}); | PypiClean |
/ModEA-0.5.0.tar.gz/ModEA-0.5.0/README.md | [](https://badge.fury.io/py/ModEA)
[](https://zenodo.org/badge/latestdoi/157624013)
[](https://www.travis-ci.com/sjvrijn/ModEA)
[](https://coveralls.io/github/sjvrijn/ModEA?branch=master)
[](https://www.codacy.com/app/sjvrijn/ModEA?utm_source=github.com&utm_medium=referral&utm_content=sjvrijn/ModEA&utm_campaign=Badge_Grade)
[](https://modea.readthedocs.io/en/latest/?badge=latest)
[](https://gitter.im/pyModEA/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
# Summary #
This repository contains the code for the Modular EA framework by Sander van Rijn.
# Documentation #
Some basic documentation is available at [modea.readthedocs.io](https://modea.readthedocs.io)
To see this framework in action, please refer to [this GitHub repository](https://github.com/sjvrijn/ConfiguringCMAES) which contains the code for all experiments on the Configurable CMA-ES.
# Citation #
To cite this framework, please use the following reference:
* [Evolving the Structure of Evolution Strategies. Sander van Rijn, Hao Wang, Matthijs van Leeuwen, Thomas Bäck. IEEE SSCI December 6-9 2016, Athens, Greece.](https://ieeexplore.ieee.org/document/7850138)
- Experiments and analysis code available [here](https://github.com/sjvrijn/ConfiguringCMAES)
```
@INPROCEEDINGS{vanrijn2016,
author={S. {van Rijn} and H. {Wang} and M. {van Leeuwen} and T. {Bäck}},
booktitle={2016 IEEE Symposium Series on Computational Intelligence (SSCI)},
title={Evolving the structure of Evolution Strategies},
year={2016},
doi={10.1109/SSCI.2016.7850138},
}
```
| PypiClean |
/Barak-0.3.2.tar.gz/Barak-0.3.2/barak/pyvpfit.py | import os
import numpy as np
from textwrap import wrap
from constants import c_kms
# the data types of the lines and regions numpy arrays:
len_filename = 150
dtype_lines = [('name', 'S6'),
('z', 'f8'),
('zpar', 'S2'),
('b', 'f8'),
('bpar', 'S2'),
('logN', 'f8'),
('logNpar', 'S2'),
('zsig', 'f8'),
('bsig', 'f8'),
('logNsig', 'f8')]
dtype_regions = [('filename', 'S%i' % len_filename),
('num', 'S2'),
('wmin', 'f8'),
('wmax', 'f8'),
('resolution', 'S100')]
def parse_entry(entry):
""" Separates an entry into a numeric value and a tied/fixed
parameter, if present.
"""
if entry.startswith('nan'):
val = float(entry[:3])
par = entry[3:]
else:
i = -1
while not entry[i].isdigit(): i -= 1
if i != -1:
val = float(entry[:i+1])
par = entry[i+1:]
else:
val = float(entry)
par = ''
return val,par
def parse_lines(params):
""" Separates the parameters from their tied/fixed/special
characters.
"""
#print params
temp = []
for name,z,b,logN,zsig,bsig,logNsig in params:
z, zpar = parse_entry(z)
b, bpar = parse_entry(b)
logN, logNpar = parse_entry(logN)
try:
zsig = float(zsig)
except ValueError:
zsig = -1
try:
bsig = float(bsig)
except ValueError:
bsig = -1
try:
logNsig = float(logNsig)
except ValueError:
logNsig = -1
temp.append((name,z,zpar,b,bpar,logN,logNpar,
zsig,bsig,logNsig))
temp = np.rec.fromrecords(temp, dtype=dtype_lines)
return temp
def parse_regions(rows, res=None):
""" Parses the region information from a f26 or fort.13 file. """
if res is None:
res = ''
out = None
rinfo = []
for row in rows:
r = row.split('!')[0].lstrip().lstrip('%%').split()
nitems = len(r)
r[2] = float(r[2])
r[3] = float(r[3])
if nitems == 4:
rinfo.append(tuple(r + [res]))
elif nitems > 4:
r = r[:4] + [' '.join(r[4:])]
rinfo.append(tuple(r))
else:
raise Exception('bad format in fitting regions:\n %s' % row)
if rinfo:
out = np.rec.fromrecords(rinfo, dtype=dtype_regions)
return out
def sumlines(lines):
""" Given several lines (record array), returns them in the vpfit
summed format. """
summedlines = lines.copy()
logNtots = np.log10(np.sum(10**lines.logN))
for i,logNtot in enumerate(logNtots):
if i == 0:
summedlines[i].logN = logNtot
#summedlines[i].logNstr = '%7.4f' % logNtot
summedlines[i].logNpar = 'w'
return summedlines
class VpfitModel(object):
""" Holds all the info about a vpfit model. Can write out the
model as a fort.13 or fort.26 style file.
"""
def __init__(self, names=None, logN=None, z=None, b=None,
zpar=None, bpar=None, logNpar=None,
filenames=None, wmin=None, wmax=None, res=None, num=None):
if None in (names,logN,z,b):
self.lines = None # record array
else:
assert len(z) == len(logN) == len(b) == len(names)
ncomp = len(z)
if zpar is None: zpar = [''] * ncomp
if bpar is None: bpar = [''] * ncomp
if logNpar is None: logNpar = [''] * ncomp
zsig = [-1] * ncomp
bsig = [-1] * ncomp
logNsig = [-1] * ncomp
temp = np.rec.fromarrays([names,z,zpar,b,bpar,logN,logNpar,zsig,
bsig,logNsig], dtype=dtype_lines)
self.lines = temp
if None in (filenames, wmin, wmax):
self.regions = None # record array
else:
if res is None:
res = [''] * len(filenames)
if num is None:
num = ['1'] * len(filenames)
assert all((len(n) < len_filename) for n in filenames)
temp = np.rec.fromarrays([filenames,num,wmin,wmax,res],
dtype=dtype_regions)
self.regions = temp
self.stats = None
def __repr__(self):
temp = ', '.join(sorted(str(attr) for attr in self.__dict__ if not str(attr).startswith('_')))
return 'VpfitModel(%s)' % '\n '.join(wrap(temp, width=69))
def writef26(self,filename, write_regions=True):
""" Writes out a f26 style file."""
temp = []
if write_regions and self.regions is not None:
for r in self.regions:
temp.append('%%%% %(filename)s %(num)s %(wmin)7.2f '
'%(wmax)7.2f %(resolution)s\n' % r)
if self.lines is not None:
for line in self.lines:
temp.append(' %(name)s %(z)11.8f%(zpar)-2s '
'%(zsig)11.8f %(b)6.2f%(bpar)-2s %(bsig)6.2f '
'%(logN)7.4f%(logNpar)-2s %(logNsig)7.4f\n' % line)
open(filename,'w').writelines(temp)
def writef13(self, filename, write_regions=True):
""" Writes out a fort.13 style file. """
# The whitespace is important if the f13 files are to be read
# by vpguess - don't change it!
temp = []
if write_regions:
temp.append(' *\n')
if self.regions is not None:
for r in self.regions:
temp.append('%(filename)s %(num)s %(wmin)7.2f '
'%(wmax)7.2f %(resolution)s\n' % r)
temp.append(' *\n')
if self.lines is not None:
for line in self.lines:
temp.append(' %(name)s %(logN)7.4f%(logNpar)-2s '
'%(z)11.8f%(zpar)-2s %(b)6.2f%(bpar)-2s '
'0.00 0.00E+00 0\n' % line)
open(filename,'w').writelines(temp)
def copy(self):
from copy import deepcopy
return deepcopy(self)
def readf26(fh, res=None):
""" Reads a f26 style file and returns a VpfitModel object. If the
keyword res is given, this string provides the resolution
information for the spectra fitted.
For example: res='vsig=69.0'
"""
if isinstance(fh, basestring):
fh = open(fh)
f = fh.readlines()
fh.close()
vp = VpfitModel()
if len(f) == 0:
#print filename, 'is empty'
return None
f = [r for r in f if
not r.lstrip().startswith('!') or 'Stats' in r]
regionrows = [r for r in f if r.lstrip().startswith('%%')]
ionrows = [r for r in f if '%%' not in r and
'Stats' not in r and r.lstrip()]
keys = 'iterations nchisq npts dof prob ndropped info'.split()
statrow = [row for row in f if 'Stats' in row]
if statrow:
if statrow[0].split()[-1] == 'BAD':
status = 'BAD'
else:
status = 'OK'
vals = statrow[0].split()[2:8] + [status]
vp.stats = dict(zip(keys,vals))
elif ionrows:
# older style f26 file
stat = ionrows[0]
status = ('BAD' if stat.split()[-1] == 'BAD' else 'OK')
vals = [stat[66:71], stat[71:85], stat[85:90], stat[90:95],
stat[95:102], stat[102:107], status]
vp.stats = dict(zip(keys,vals))
vp.regions = parse_regions(regionrows,res=res)
#print vp.regions,'\n\n\n'
#vp.filename = filename
if len(ionrows) == 0:
return vp
ionrows = [r.lstrip() for r in ionrows]
param = []
molecule_names = set(('H2J0 H2J1 H2J2 H2J3 H2J4 H2J5 H2J6 '
'COJ0 COJ1 COJ2 COJ3 COJ4 COJ5 COJ6 '
'HDJ0 HDJ1 HDJ2').split())
for r in ionrows:
if 'nan' in r:
i = r.index('nan')
param.append([r[:i]] + r[i:].split())
continue
if r[:4] in molecule_names:
i = 4
else:
i = 0
while not r[i].isdigit() and r[i] != '-':
i += 1
param.append([r[:i]] + r[i:].split())
param = [[p[0],p[1],p[3],p[5],p[2],p[4],p[6]] for p in param]
vp.lines = parse_lines(param)
return vp
def readf13(filename, read_regions=True, res=None):
""" Reads a fort.13 style file. """
fh = open(filename)
f = fh.readlines()
fh.close()
if len(f) == 0:
#print filename, 'is empty'
return None
f = [row.lstrip() for row in f[1:]] # skip past first line with *
isep = [row[0] for row in f].index('*') # find separating *
vp = VpfitModel()
if read_regions:
vp.regions = parse_regions([row for row in f[:isep]],res=res)
param = [[row[:5]] + row[5:].split() for row in f[isep+1:]]
param = [[p[0],p[2],p[3],p[1],-1,-1,-1] for p in param]
vp.lines = parse_lines(param)
vp.stats = None
vp.filename = filename
return vp
def calc_Ntot(f26name, trans=None):
""" Calculate the total column density in f26-style file
Parameters
----------
f26name : str
f26 filename.
trans : str (optional)
Transition name ('Mg' for example). By default all column
density entries are used.
Returns
-------
logNtot : float
Log10 of the total column denisty
"""
f26 = readf26(f26name)
logN = f26.lines.logN
sig = f26.lines.logNsig
if trans is not None:
cond = f26.lines.name == trans
logN = f26.lines.logN[cond]
sig = f26.lines.logNsig[cond]
Ntot = np.sum(10**logN)
Nmin = np.sum(10**(logN - sig))
Nmax = np.sum(10**(logN + sig))
return np.log10(Ntot), np.log10(Nmin), np.log10(Nmax)
def calc_v90(vp, plot=False, z0=None,
wav0=1215.6701, osc=0.4164, gam=6.265e8):
""" For a vp model, we want to calculate the velocity width that
contains 90% of the the total optical depth at the lya line (or
perhaps it is the same regardless of which transition I take?) v_90
is defined in Prochaska and Wolfe 1997.
At the moment it guesses how big a velocity range it has to
calculate the optical depth over - a bit dodgy"""
lines = vp.lines
#print 'calculating for %s' % lines
# work in velocity space
z = lines.z
if z0 is None: z0 = np.median(z)
vel = (z - z0) / (1 + z0) * c_kms
# calculate the optical depth as a function of velocity, 500 km/s
# past the redmost and bluemost components - hopefully this is far
# enough (maybe not for DLAs?)
dv = 0.5
vhalf = (vel.max() - vel.min())/2. + 300
v = np.arange(-vhalf, vhalf + dv, dv)
tau = np.zeros(len(v))
for line,vline in zip(lines,vel):
if line['logN'] > 21.0:
print ('very (too?) high logN: %s' % line['logN'])
print ('returning width of -1')
return -1.
temptau = calctau(v - vline, wav0, osc, gam, line['logN'],
btemp=line['b'])
tau += temptau
#pl.plot(v,tau,'+-')
#raw_input('N %(logN)f b %(b)f enter to continue' % line)
# integrate over the entire v range to calculate integral of tau wrt v.
sumtaudv = np.trapz(tau,dx=dv)
lenv = len(v)
# starting from the left v edge, increase v until int from left
# edge to v gives 5% of total integral
sum5perc = sumtaudv / 20.
sumtau = 0.
i = 0
while (sumtau < sum5perc):
i += 1
sumtau = np.trapz(tau[:i])
if i == lenv:
raise Exception('Problem with velocity limits!')
vmin = v[i-1]
# Do the same starting from the right edge.
sumtau = 0
i = -1
while (sumtau < sum5perc):
sumtau = np.trapz(tau[i:])
i -= 1
if -i == lenv:
raise Exception('Problem with velocity limits!')
vmax = v[i+1]
# Difference between the two is v_90
v90 = vmax - vmin
if plot:
pl.plot(v,tau,'+-')
pl.vlines((vmin,vmax),0,tau.max())
#raw_input('Enter to continue...')
return v90
def make_rdgen_input(specfilename, filename, wmin=None, wmax=None):
temp = ('rd %(specfilename)s\n'
'ab\n'
'\n'
'\n'
'\n'
'%(wmin)s %(wmax)s\n'
'qu\n' % locals() )
fh = open(filename,'w')
fh.write(temp)
fh.close()
def make_autovpin_input(specfilename, filename):
temp = ('%(specfilename)s\n'
'\n'
'\n'
'\n'
'\n'
'\n' % locals() )
fh = open(filename,'w')
fh.write(temp)
fh.close() | PypiClean |
/Netzob-2.0.0.tar.gz/Netzob-2.0.0/src/netzob/Common/Utils/TypedList.py |
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| File contributors : |
#| - Georges Bossert <georges.bossert (a) supelec.fr> |
#| - Frédéric Guihéry <frederic.guihery (a) amossys.fr> |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports |
#+---------------------------------------------------------------------------+
import collections.abc
#+---------------------------------------------------------------------------+
#| Related third party imports |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Local application imports |
#+---------------------------------------------------------------------------+
class TypedList(collections.abc.MutableSequence):
"""A strong typed list based on collections.abc.MutableSequence.
The idea is to verify members type when editing the list. By using this
class instead of the typical list, we enforce members type.
>>> typedList = TypedList(str)
>>> typedList.append("toto")
>>> typedList.extend(["titi", "tata"])
>>> len(typedList)
3
>>> typedList[1]
'titi'
>>> typedList.append(3)
Traceback (most recent call last):
TypeError: Invalid type for argument, expecting: <type 'str'>
>>> typedList.extend(["tutu", 5])
Traceback (most recent call last):
TypeError: Invalid type for argument, expecting: <type 'str'>
"""
def __init__(self, membersTypes, *args):
self.membersTypes = membersTypes
self.list = list()
self.extend(list(args))
def check(self, v):
if not isinstance(v, self.membersTypes):
raise TypeError(
"Invalid type for argument, expecting: {0}, received : {1}".
format(self.membersTypes, v.__class__.__name__))
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def __delitem__(self, i):
del self.list[i]
def __setitem__(self, i, v):
self.check(v)
self.list[i] = v
def insert(self, i, v):
self.check(v)
self.list.insert(i, v)
def __str__(self):
return str(',\n'.join([str(x) for x in self.list]))
def __repr__(self):
return repr(self.list) | PypiClean |
/Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/config.py |
import json
class Config(dict):
"""Alarmageddon configuration object.
A configuration object that both acts like a read-only dictionary and
provides some methods to access application specific settings
:param dictionary: A dictionary of the form {'env':{config options},...}
:param environment_name: The environment that this Config object belongs to
"""
ENVIRONMENT_KEY = 'environment'
def __init__(self, dictionary, environment_name):
super(Config, self).__init__(self, **dictionary)
self._environment_name = environment_name
try:
config = self[Config.ENVIRONMENT_KEY][environment_name]
self.environment_config = config
except KeyError:
raise ValueError(
"environment: '%s' was not found in configuration"
% environment_name)
@staticmethod
def from_file(config_path, environment_name):
"""Load a Config object from a file
An environment_name must be provided so that the resulting Config
object can provide access to environment specific settings.
"""
with open(config_path, 'r') as config_file:
return Config(json.load(config_file), environment_name)
def hostname(self, alias):
"""Returns an environment-specific hostname given its alias.
host names are pulled from the hosts dictionary under each of the
environment dictionaries.
"""
try:
return self.environment_config['hosts'][alias]['url']
except:
raise KeyError("No base URL defined for alias: %s" % alias)
def environment_name(self):
"""returns current environment name"""
return self._environment_name
def test_results_file(self):
"""returns the location of the test results file"""
return self['test_results_file']
def __str__(self):
"""Return a string representation of this Config object"""
return "Current Environment: %s Dictionary: %s" % (
self._environment_name, dict.__str__(self)) | PypiClean |
/HyperKitty-1.3.7.tar.gz/HyperKitty-1.3.7/hyperkitty/management/commands/attachments_to_file.py | import os
from django.core.management.base import BaseCommand, CommandError
from hyperkitty.management.utils import setup_logging
from hyperkitty.models.email import Attachment
class Command(BaseCommand):
help = """Move attachments from database to file system after setting
HYPERKITTY_ATTACHMENT_FOLDER."""
def add_arguments(self, parser):
for action in parser._actions:
for option in ('-v', '--verbosity'):
if vars(action)['option_strings'][0] == option:
parser._handle_conflict_resolve(
None, [(option, action)])
parser.add_argument(
'-v', '--verbosity', default=0,
type=int, choices=[0, 1],
help="""Verbosity = 1 will print a dot for each 100 attachments
moved."""
)
parser.add_argument(
'-c', '--chunk-size', default=100, type=int,
help="""Specify the number of attachments to retrieve at one time
from the database. Default is 100. Larger values use more memory."""
)
def handle(self, *args, **options):
options["verbosity"] = int(options.get("verbosity", "0"))
options["chunk-size"] = int(options.get("chunk-size", 100))
setup_logging(self, options["verbosity"])
if args:
raise CommandError("no arguments allowed")
count = 0
for attachment in Attachment.objects.iterator(
chunk_size=options["chunk-size"]):
path = attachment._get_folder()
if path is None:
raise CommandError('HYPERKITTY_ATTACHMENT_FOLDER is not set')
if attachment.content is None:
continue
count += 1
if options['verbosity'] > 0:
if count % 100 == 0:
print('.', end='', flush=True)
if count % 7000 == 0:
print()
if not os.path.exists(path):
os.makedirs(path)
file = os.path.join(path, str(attachment.counter))
with open(file, 'wb') as fp:
fp.write(bytes(attachment.content))
attachment.content = None
attachment.save()
if options['verbosity'] > 0:
print()
print(f'{count} attachments moved.') | PypiClean |
/Binomial_Gaussian_practice-1.0.tar.gz/Binomial_Gaussian_practice-1.0/Binomial_Gaussian_practice/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/Andy_mess_server-0.0.1-py3-none-any.whl/server/server.py | import sys
import logging
import argparse
import configparser
import os
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import Qt
import logs.server_log_config
from common.variables import *
from common.decorators import log
from server.database import ServerStorage
from server.core import MessageProcessor
from server.main_window import MainWindow
# Инициализация логирования сервера:
SERVER_LOGGER = logging.getLogger('server')
@log
def arg_parser(default_port, default_address):
"""
Парсер аргументов командной строки.
:param default_port: порт
:param default_address: ip-адрес
:return: ip-адрес, порт, gui-флаг
"""
parser = argparse.ArgumentParser()
parser.add_argument('-p', default=default_port, type=int, nargs='?')
parser.add_argument('-a', default=default_address, nargs='?')
parser.add_argument('--no_gui', action='store_true')
namespace = parser.parse_args(sys.argv[1:])
listen_address = namespace.a
listen_port = namespace.p
gui_flag = namespace.no_gui
SERVER_LOGGER.debug('Аргументы успешно загружены.')
return listen_address, listen_port, gui_flag
@log
def config_load():
"""
Парсер конфигурационного ini файла.
:return: словарь, содержащий параметры конфигурации сервера
"""
config = configparser.ConfigParser()
dir_path = os.path.dirname(os.path.realpath(__file__))
config.read(f"{dir_path}/{'server.ini'}")
# Если конфиг файл загружен правильно, запускаемся, иначе конфиг по умолчанию.
if 'SETTINGS' in config:
return config
else:
config.add_section('SETTINGS')
config.set('SETTINGS', 'Default_port', str(DEFAULT_PORT))
config.set('SETTINGS', 'Listen_Address', '')
config.set('SETTINGS', 'Database_path', '')
config.set('SETTINGS', 'Database_file', 'server_database.db3')
return config
@log
def main():
"""
Основная функция.
:return: ничего не возвращает
"""
# Загрузка файла конфигурации сервера
config = config_load()
# Загрузка параметров командной строки, если нет параметров, то задаём значения по умолчанию.
listen_address, listen_port, gui_flag = arg_parser(
config['SETTINGS']['Default_port'], config['SETTINGS']['Listen_Address'])
# Инициализация базы данных
database = ServerStorage(os.path.join(config['SETTINGS']['Database_path'], config['SETTINGS']['Database_file']))
# Создание экземпляра класса - сервера и его запуск:
server = MessageProcessor(listen_address, listen_port, database)
server.daemon = True
server.start()
# Если указан параметр без GUI то запускаем простенький обработчик
# консольного ввода
if gui_flag:
while True:
command = input('Введите exit для завершения работы сервера.')
if command == 'exit':
# Если выход, то завершаем основной цикл сервера.
server.running = False
server.join()
break
# Если не указан запуск без GUI, то запускаем GUI:
else:
# Создаём графическое окружение для сервера:
server_app = QApplication(sys.argv)
server_app.setAttribute(Qt.AA_DisableWindowContextHelpButton)
main_window = MainWindow(database, server, config)
# Запускаем GUI
server_app.exec_()
# По закрытию окон останавливаем обработчик сообщений
server.running = False
if __name__ == '__main__':
main() | PypiClean |
/LinkPython-0.1.1.tar.gz/LinkPython-0.1.1/modules/link/GNU-GPL-v2.0.md | GNU General Public License
==========================
_Version 2, June 1991_
_Copyright © 1989, 1991 Free Software Foundation, Inc.,_
_51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA_
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
### Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: **(1)** copyright the software, and
**(2)** offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
### TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
**0.** This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The “Program”, below,
refers to any such program or work, and a “work based on the Program”
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term “modification”.) Each licensee is addressed as “you”.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
**1.** You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
**2.** You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
* **a)** You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
* **b)** You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
* **c)** If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
**3.** You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
* **a)** Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
* **b)** Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
* **c)** Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
**4.** You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
**5.** You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
**6.** Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
**7.** If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
**8.** If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
**9.** The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and “any
later version”, you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
**10.** If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
### NO WARRANTY
**11.** BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
**12.** IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
### How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the “copyright” line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w` and `show c` should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w` and `show c`; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a “copyright disclaimer” for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.
| PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/plugins/a11yhelp/dialogs/lang/it.js | /*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang("a11yhelp","it",{title:"Istruzioni di Accessibilità",contents:"Contenuti di Aiuto. Per chiudere questa finestra premi ESC.",legend:[{name:"Generale",items:[{name:"Barra degli strumenti Editor",legend:"Premi ${toolbarFocus} per navigare fino alla barra degli strumenti. Muoviti tra i gruppi della barra degli strumenti con i tasti Tab e Maiusc-Tab. Spostati tra il successivo ed il precedente pulsante della barra degli strumenti usando le frecce direzionali Destra e Sinistra. Premi Spazio o Invio per attivare il pulsante della barra degli strumenti."},
{name:"Finestra Editor",legend:"All'interno di una finestra di dialogo, premi Tab per navigare fino al campo successivo della finestra di dialogo, premi Maiusc-Tab per tornare al campo precedente, premi Invio per inviare la finestra di dialogo, premi Esc per uscire. Per le finestre che hanno schede multiple, premi Alt+F10 per navigare nella lista delle schede. Quindi spostati alla scheda successiva con il tasto Tab oppure con la Freccia Destra. Torna alla scheda precedente con Maiusc+Tab oppure con la Freccia Sinistra. Premi Spazio o Invio per scegliere la scheda."},
{name:"Menù contestuale Editor",legend:"Premi ${contextMenu} o TASTO APPLICAZIONE per aprire il menu contestuale. Dunque muoviti all'opzione successiva del menu con il tasto TAB o con la Freccia Sotto. Muoviti all'opzione precedente con MAIUSC+TAB o con Freccia Sopra. Premi SPAZIO o INVIO per scegliere l'opzione di menu. Apri il sottomenu dell'opzione corrente con SPAZIO o INVIO oppure con la Freccia Destra. Torna indietro al menu superiore con ESC oppure Freccia Sinistra. Chiudi il menu contestuale con ESC."},
{name:"Box Lista Editor",legend:"Dentro un box-lista, muoviti al prossimo elemento della lista con TAB o con la Freccia direzionale giù. Spostati all'elemento precedente con MAIUSC+TAB oppure con Freccia direzionale sopra. Premi SPAZIO o INVIO per scegliere l'opzione della lista. Premi ESC per chiudere il box-lista."},{name:"Barra percorso elementi editor",legend:"Premi ${elementsPathFocus} per navigare tra gli elementi della barra percorso. Muoviti al prossimo pulsante di elemento con TAB o la Freccia direzionale destra. Muoviti al pulsante precedente con MAIUSC+TAB o la Freccia Direzionale Sinistra. Premi SPAZIO o INVIO per scegliere l'elemento nell'editor."}]},
{name:"Comandi",items:[{name:" Annulla comando",legend:"Premi ${undo}"},{name:" Ripeti comando",legend:"Premi ${redo}"},{name:" Comando Grassetto",legend:"Premi ${bold}"},{name:" Comando Corsivo",legend:"Premi ${italic}"},{name:" Comando Sottolineato",legend:"Premi ${underline}"},{name:" Comando Link",legend:"Premi ${link}"},{name:" Comando riduci barra degli strumenti",legend:"Premi ${toolbarCollapse}"},{name:"Comando di accesso al precedente spazio di focus",legend:"Premi ${accessPreviousSpace} per accedere il più vicino spazio di focus non raggiungibile prima del simbolo caret, per esempio due elementi HR adiacenti. Ripeti la combinazione di tasti per raggiungere spazi di focus distanti."},
{name:"Comando di accesso al prossimo spazio di focus",legend:"Premi ${accessNextSpace} per accedere il più vicino spazio di focus non raggiungibile dopo il simbolo caret, per esempio due elementi HR adiacenti. Ripeti la combinazione di tasti per raggiungere spazi di focus distanti."},{name:" Aiuto Accessibilità",legend:"Premi ${a11yHelp}"}]}]}); | PypiClean |
/KiwoomDE-0.5.1-py3-none-any.whl/kiwoomde/models/_devguide.py | import re
import pandas as pd
from idebug import *
from kiwoomde.database import *
from kiwoomde import config
from kiwoomde.base import BaseClass, BaseDataClass
from kiwoomde.models.base import KWModel
from kiwoomde.models._util import *
def DevGuideModel(modelName):
if modelName == 'TRList':
return TRList()
elif modelName == 'RTList':
return RTList()
elif modelName == 'ChejanFID':
return ChejanFID()
# DevGuideText를 데이타로 생성하는 모델
class BaseDGModel(KWModel):
def __init__(self, modelName):
super().__init__(modelName)
self._read_file()
def _read_file(self):
# 파일 읽기
if self.modelName in dba.DevGuideModels():
# 텍스트파일 읽어들이기
fpath = config.clean_path(f'{config.DevGuideTextPath}/{self.modelName}.txt')
f = open(fpath, mode='r', encoding='utf-8')
text = f.read()
f.close()
return text
else:
# 모델명에 해당하는 파일명이 없다면 에러발생
logger.critical(f'해당 모델({self.modelName})에 대한 텍스트 파일이 존재하지 않는다')
raise
def _create(self):
# 텍스트 파일 읽어들이기
text = self._read_file()
# 텍스트를 데이터 구조화 --> 모델에 따라 각각 다르게 함수를 구성하라
data = self._structure_data(text)
# DB저장
self.drop()
self.insert_many(data)
class TRList(BaseDGModel):
# KOA StudioSA / TR목록
@funcIdentity
def __init__(self):
super().__init__(self.__class__.__name__)
@funcIdentity
def create_collection(self):
# PartGubun('Phase-1: DevGuideText 를 데이타구조화 및 저장')
self._create()
# PartGubun('Phase-2: 컬렉션 데이타를 이용하여 추가컬럼들을 업데이트')
self._update_markettype()
logger.info('Done.')
def _split_whole_text(self, text):
# Split Whole-Text into Each TR-based Text
p = re.compile('(/[\*]+/)')
li = p.split(text)
li = [e.strip() for e in li if len(e.strip()) > 0]
# 분할패턴도 결과에 포함되어 리턴되므로 삭제해야 한다 --> 쥰내 이해가 안됨
return [e for e in li if p.search(e) is None]
def _structure_data(self, text):
txt_list = self._split_whole_text(text)
data = []
for txt in txt_list:
# 파싱
trcode, trname = self._get_trcodename(txt)
outputs = self._get_outputs(txt)
inputs = self._get_inputs(txt)
caution = self._get_caution(txt)
real_active, test_active = self._get_active(caution)
data.append({
'trcode':trcode, 'trname':trname,
'inputs':inputs, 'outputs':outputs,
'caution':caution, 'real_active':real_active, 'test_active':test_active
})
return data
def _get_trcodename(self, text):
m = re.search("\[\s*([a-zA-Z0-9]+)\s*:\s*([가-힝A-Z\s0-9\(\)]+)\s*\]", text)
return m.group(1).strip(), m.group(2).strip()
def _get_outputs(self, text):
m = re.search('OUTPUT=(.+)', text)
return None if m is None else m.group(1).strip().split(',')
def _get_inputs(self, text):
inputs = re.findall('SetInputValue\("(.+)"\s*,', text)
# print(inputs)
data = []
for input in inputs:
d = {'id':input}
m = re.search(f'{input}\s*=\s*(.+)\n', text)
value = None if m is None else m.group(1).strip()
# print(value)
d.update({'value':value})
data.append(d)
return data
def _get_caution(self, text):
p = re.compile('\[\s*주의\s*\]')
m = p.search(text)
if m is None:
return None
else:
lines = text.splitlines()
for i, line in enumerate(lines):
if p.search(line) is not None:
break
return lines[i+1]
def _get_active(self, caution):
if caution is None:
real, test = True, True
else:
m = re.search('(이 TR은)[.\s]+(모의투자)*', caution)
if m is None:
real, test = True, True
else:
real, test = (False, False) if m.group(2) is None else (True, False)
return real, test
@funcIdentity
def _update_markettype(self):
self.update_many({}, {'$set':{'markettype':'stock'}})
filter = {'trname':{'$regex':'선물|옵션|선옵'}}
update = {'$set':{'markettype':'fo'}}
self.update_many(filter, update)
filter = {'trname':{'$regex':'ETF'}}
update = {'$set':{'markettype':'ETF'}}
self.update_many(filter, update)
filter = {'trname':{'$regex':'ELW'}}
update = {'$set':{'markettype':'ELW'}}
self.update_many(filter, update)
pattern = '계좌|예수금|자산|위탁.+거래|비밀번호|잔고현황|인출가능|증거금|신용융자'
filter = {'trname':{'$regex':pattern}}
update = {'$set':{'markettype':'acct'}}
self.update_many(filter, update)
logger.info("Done.")
class RTList(BaseDGModel):
# KOA StudioSA / 실시간목록
def __init__(self):
super().__init__(self.__class__.__name__)
@funcIdentity
def create_collection(self):
# PartGubun('Phase-1: DevGuideText 를 데이타구조화 및 저장')
self._create()
# PartGubun('Phase-2: colName의 dtype을 정의/업데이트')
assign_dtype(self.collName, 'name')
logger.info('Done.')
def _split_by_realtype(self, text):
# 전체 텍스트를 26개의 Realtype별로 나눈다
li = re.split('[\*]+', text)
return [e.strip() for e in li if len(e.strip()) > 0]
def _structure_data(self, text):
txt_list = self._split_by_realtype(text)
data = []
for txt in txt_list:
realtype = self._get_realtype(txt)
fid_data = self._get_fid_data(txt)
data.append({'realtype':realtype, 'fid_data':fid_data})
return data
def _get_realtype(self, text):
m = re.search("Real Type\s*:\s*([가-힝A-Z\s0-9\(\)]+)", text)
return m.group(1).strip()
def _get_fid_data(self, text):
li = re.findall('\[(\d+)\]\s*=\s*(.+)', text)
data = []
for t in li:
data.append({'fid':t[0], 'name':t[1].strip()})
return data
class ChejanFID(BaseDGModel):
# RealtimeFID 랑 겹치는데 굳이 필요한가?
def __init__(self):
super().__init__(self.__class__.__name__)
@funcIdentity
def create_collection(self):
# PartGubun('Phase-1: DevGuideText 를 데이타구조화 및 저장')
self._create()
# PartGubun('Phase-2: colName의 dtype을 정의/업데이트')
assign_dtype(self.collName, 'name')
# PartGubun('Phase-3: DevGuide에는 빠진 FID 정보를 RealtimeFID로부터 추가저장')
self._insert_omitted_FIDs()
# PartGubun('Phase-4: 수동으로 잔여 FID 정보를 저장')
self._insert_newly_found_FIDs()
logger.info('Done.')
def _structure_data(self, text):
# 텍스트를 구조화
data = []
pairs = re.findall(pattern='"(\d+)"\s*:\s*"(.+)"', string=text)
for p in pairs:
data.append({'fid':p[0].strip(), 'name':p[1].strip()})
return data
def _insert_omitted_FIDs(self):
# 개발 중
# cursor = self.find(None, {'_id':0, 'fid':1, 'name':1})
# df = pd.DataFrame(list(cursor))
#
# projection = {fid:0 for fid in list(df.fid)}
# projection.update({'_id':0, 'dt':0})
# cursor = db['_Test_Chejan'].find(None, projection).limit(10)
# df = pd.DataFrame(list(cursor))
#
# cursor = db.RealtimeFID.find({'fid':{'$in':list(df.columns)}}, {'_id':0, 'fid':1, 'name':1, 'dtype':1})
# df_ = pd.DataFrame(list(cursor))
#
# for d in df_.to_dict('records'):
# filter = {'fid':d['fid']}
# update = {'$set':d}
# self.update_one(filter, update, True)
return
def _insert_newly_found_FIDs(self):
values = []
for fid in [819,949,969,970,10010]:
values.append([str(fid),None,'int'])
data = pd.DataFrame(data=values, columns=self.schema.get_columns()).to_dict('records')
self.insert_many(data) | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/iron-fit-behavior/.github/ISSUE_TEMPLATE.md | <!-- Instructions: https://github.com/PolymerElements/iron-fit-behavior/CONTRIBUTING.md#filing-issues -->
### Description
<!-- Example: The `paper-foo` element causes the page to turn pink when clicked. -->
### Expected outcome
<!-- Example: The page stays the same color. -->
### Actual outcome
<!-- Example: The page turns pink. -->
### Live Demo
<!-- Example: https://jsbin.com/cagaye/edit?html,output -->
### Steps to reproduce
<!-- Example
1. Put a `paper-foo` element in the page.
2. Open the page in a web browser.
3. Click the `paper-foo` element.
-->
### Browsers Affected
<!-- Check all that apply -->
- [ ] Chrome
- [ ] Firefox
- [ ] Safari 9
- [ ] Safari 8
- [ ] Safari 7
- [ ] Edge
- [ ] IE 11
- [ ] IE 10
| PypiClean |
/Infomericaclass-1.0.0.tar.gz/Infomericaclass-1.0.0/inf/data/wiki/readme.md | ## Data from Wikipedia
The data were originally collected by a team lead by Steven Skiena as part of the project to build a classifier for race and ethnicity based on names. The team scraped Wikipedia to produce a novel database of over 140k name/race associations. For details of the how the data was collected, see [Name-ethnicity classification from open sources](http://dl.acm.org/citation.cfm?id=1557032) (for reference, see below).
The team has two papers (reference for one of the papers can be found below; the other paper is forthcoming) on novel ways of building a classifier. The team has also made it easy to use the classifiers they have built by providing public APIs. The classifier based on the methods discussed in the first paper can be accessed at: [http://www.textmap.com/ethnicity](http://www.textmap.com/ethnicity), and for the second paper at: [http://www.data-prism.com](http://www.data-prism.com).
If you use this data, please cite:
@inproceedings{ambekar2009name,
title={Name-ethnicity classification from open sources},
author={Ambekar, Anurag and Ward, Charles and Mohammed, Jahangir and Male, Swapna and Skiena, Steven},
booktitle={Proceedings of the 15th ACM SIGKDD international conference on Knowledge Discovery and Data Mining},
pages={49--58},
year={2009},
organization={ACM}
}
| PypiClean |
/Lentil-0.7.0.tar.gz/Lentil-0.7.0/README.rst | Lentil
======
|build status| |coverage| |docs status| |pypi version|
Lentil is a Python library for modeling the imaging chain of an optical system.
It was originally developed at NASA's Jet Propulsion Lab by the Wavefront Sensing and
Control group (383E) to provide an easy to use framework for simulating point spread
functions of segmented aperture telescopes.
Lentil provides classes for representing optical elements with a simple interface for
including effects like wavefront error, radiometric properties, and various noise and
aberration sources. Lentil also provides numerical methods for performing Fraunhofer
(far-field) diffraction calculations. The collection of classes provided by Lentil can
be used to simulate imagery for a wide variety of optical systems.
Lentil is still under active development and new features continue to be added. Until
Lentil reaches version 1.0, the API is not guaranteed to be stable, but changes breaking
backwards compatibility will be noted.
Installing
----------
Install and update using `pip`_:
.. code-block:: text
pip install lentil
Links
-----
* Documentation: https://lentil.readthedocs.io/
* Releases: https://pypi.org/project/lentil/
* Code: https://github.com/andykee/lentil/
* Issue tracker: https://github.com/andykee/lentil/issues/
.. _pip: https://pip.pypa.io/en/stable/quickstart/
.. |pypi version| image:: https://img.shields.io/pypi/v/lentil.svg
:target: https://pypi.python.org/pypi/lentil
.. |build status| image:: https://github.com/andykee/lentil/actions/workflows/test.yml/badge.svg
:target: https://github.com/andykee/lentil/actions/workflows/test.yml
.. |coverage| image:: https://coveralls.io/repos/github/andykee/lentil/badge.svg
:target: https://coveralls.io/github/andykee/lentil
.. |docs status| image:: https://readthedocs.org/projects/lentil/badge/?version=latest
:target: https://lentil.readthedocs.io/en/latest/?badge=latest
| PypiClean |
/OCAICM-0.0.2.tar.gz/OCAICM-0.0.2/script/dnn_torch_utils.py | import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import datetime
import random
from sklearn.metrics import roc_auc_score, confusion_matrix, precision_recall_curve, auc, \
mean_absolute_error, r2_score
def statistical(y_true, y_pred, y_pro):
c_mat = confusion_matrix(y_true, y_pred)
tn, fp, fn, tp = list(c_mat.flatten())
se = tp/(tp+fn)
sp = tn/(tn+fp)
acc = (tp+tn)/(tn+fp+fn+tp)
mcc = (tp*tn-fp*fn)/np.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)+1e-8)
auc_prc = auc(precision_recall_curve(y_true, y_pro, pos_label=1)[1],
precision_recall_curve(y_true, y_pro, pos_label=1)[0])
auc_roc = roc_auc_score(y_true, y_pro)
return tn, fp, fn, tp, se, sp, acc, mcc, auc_prc, auc_roc
class Meter(object):
"""Track and summarize model performance on a dataset for
(multi-label) binary classification."""
def __init__(self):
self.mask = []
self.y_pred = []
self.y_true = []
def update(self, y_pred, y_true, mask):
"""Update for the result of an iteration
Parameters
----------
y_pred : float32 tensor
Predicted molecule labels with shape (B, T),
B for batch size and T for the number of tasks
y_true : float32 tensor
Ground truth molecule labels with shape (B, T)
mask : float32 tensor
Mask for indicating the existence of ground
truth labels with shape (B, T)
"""
self.y_pred.append(y_pred.detach().cpu())
self.y_true.append(y_true.detach().cpu())
self.mask.append(mask.detach().cpu())
def roc_precision_recall_score(self):
"""Compute AUC_PRC for each task.
Returns
-------
list of float
rmse for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_pred = torch.sigmoid(y_pred)
y_true = torch.cat(self.y_true, dim=0)
n_data, n_tasks = y_true.shape
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
precision, recall, _thresholds = precision_recall_curve(task_y_true, task_y_pred, pos_label=1)
scores.append(auc(recall, precision))
return scores
def roc_auc_score(self):
"""Compute roc-auc score for each task.
Returns
-------
list of float
roc-auc score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
# Todo: support categorical classes
# This assumes binary case only
y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
scores.append(roc_auc_score(task_y_true, task_y_pred))
return scores
def se(self):
"""Compute se score for each task.
Returns
-------
list of float
se score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
# Todo: support categorical classes
# This assumes binary case only
y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
task_y_pred = [1 if i >= 0.5 else 0 for i in task_y_pred]
c_mat = confusion_matrix(task_y_true, task_y_pred)
tn, fp, fn, tp = list(c_mat.flatten())
se = tp / (tp + fn)
scores.append(se)
return scores
def precision(self):
"""Compute precision score for each task.
Returns
-------
list of float
precision score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
# Todo: support categorical classes
# This assumes binary case only
y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
task_y_pred = [1 if i >=0.5 else 0 for i in task_y_pred]
c_mat = confusion_matrix(task_y_true, task_y_pred)
tn, fp, fn, tp = list(c_mat.flatten())
precision = tp / (tp + fp)
scores.append(precision)
return scores
def sp(self):
"""Compute sp score for each task.
Returns
-------
list of float
sp score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
# Todo: support categorical classes
# This assumes binary case only
y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
task_y_pred = [1 if i >=0.5 else 0 for i in task_y_pred]
c_mat = confusion_matrix(task_y_true, task_y_pred)
tn, fp, fn, tp = list(c_mat.flatten())
sp = tn / (tn + fp)
scores.append(sp)
return scores
def acc(self):
"""Compute acc score for each task.
Returns
-------
list of float
acc score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
# Todo: support categorical classes
# This assumes binary case only
y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
task_y_pred = [1 if i >=0.5 else 0 for i in task_y_pred]
c_mat = confusion_matrix(task_y_true, task_y_pred)
tn, fp, fn, tp = list(c_mat.flatten())
acc = (tp + tn) / (tn + fp + fn + tp)
scores.append(acc)
return scores
def mcc(self):
"""Compute mcc score for each task.
Returns
-------
list of float
mcc score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
# Todo: support categorical classes
# This assumes binary case only
y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
task_y_pred = [1 if i >=0.5 else 0 for i in task_y_pred]
c_mat = confusion_matrix(task_y_true, task_y_pred)
tn, fp, fn, tp = list(c_mat.flatten())
mcc = (tp * tn - fp * fn) / np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn) + 1e-8)
scores.append(mcc)
return scores
# def r2(self):
# """Compute r2 score for each task.
#
# Returns
# -------
# list of float
# r2 score for all tasks
# """
# mask = torch.cat(self.mask, dim=0)
# y_pred = torch.cat(self.y_pred, dim=0)
# y_true = torch.cat(self.y_true, dim=0)
# n_data, n_tasks = y_true.shape
# scores = []
# for task in range(n_tasks):
# task_w = mask[:, task]
# task_y_true = y_true[:, task][task_w != 0]
# task_y_pred = y_pred[:, task][task_w != 0]
# scores.append(r2_score(task_y_true, task_y_pred))
# return scores
def l1_loss(self, reduction):
"""Compute l1 loss for each task.
Returns
-------
list of float
l1 loss for all tasks
reduction : str
* 'mean': average the metric over all labeled data points for each task
* 'sum': sum the metric over all labeled data points for each task
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0]
task_y_pred = y_pred[:, task][task_w != 0]
scores.append(F.l1_loss(task_y_true, task_y_pred, reduction=reduction).item())
return scores
def rmse(self):
"""Compute RMSE for each task.
Returns
-------
list of float
rmse for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
n_data, n_tasks = y_true.shape
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0]
task_y_pred = y_pred[:, task][task_w != 0]
scores.append(np.sqrt(F.mse_loss(task_y_pred, task_y_true).cpu().item()))
return scores
def mae(self):
"""Compute mae for each task.
Returns
-------
list of float
mae for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
n_data, n_tasks = y_true.shape
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0]
task_y_pred = y_pred[:, task][task_w != 0]
scores.append(mean_absolute_error(task_y_true, task_y_pred))
return scores
def r2(self):
"""Compute r2 score for each task.
Returns
-------
list of float
r2 score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
n_data, n_tasks = y_true.shape
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0]
task_y_pred = y_pred[:, task][task_w != 0]
scores.append(r2_score(task_y_true, task_y_pred))
return scores
def compute_metric(self, metric_name, reduction='mean'):
"""Compute metric for each task.
Parameters
----------
metric_name : str
Name for the metric to compute.
reduction : str
Only comes into effect when the metric_name is l1_loss.
* 'mean': average the metric over all labeled data points for each task
* 'sum': sum the metric over all labeled data points for each task
Returns
-------
list of float
Metric value for each task
"""
assert metric_name in ['roc_auc', 'l1', 'rmse', 'prc_auc', 'mae', 'r2', 'se', 'sp', 'acc', 'mcc', 'pred',
'precision'], \
'Expect metric name to be "roc_auc", "l1", "rmse", "prc_auc", "mae", "r2","pred" got {}'.format(
metric_name) # assert(断言)用于判断一个表达式,在表达式条件为 false 的时候触发异常
assert reduction in ['mean', 'sum']
if metric_name == 'roc_auc':
return self.roc_auc_score()
if metric_name == 'l1':
return self.l1_loss(reduction)
if metric_name == 'rmse':
return self.rmse()
if metric_name == 'prc_auc':
return self.roc_precision_recall_score()
if metric_name == 'mae':
return self.mae()
if metric_name == 'r2':
return self.r2()
if metric_name == 'mcc':
return self.mcc()
if metric_name == 'se':
return self.se()
if metric_name == 'precision':
return self.precision()
if metric_name == 'sp':
return self.sp()
if metric_name == 'acc':
return self.acc()
if metric_name == 'mcc':
return self.mcc()
class MyDataset(object):
def __init__(self, Xs, Ys):
self.Xs = torch.tensor(Xs, dtype=torch.float32)
self.masks = torch.tensor(~np.isnan(Ys) * 1.0, dtype=torch.float32)
# convert np.nan to 0
self.Ys = torch.tensor(np.nan_to_num(Ys), dtype=torch.float32)
def __len__(self):
return len(self.Ys)
def __getitem__(self, idx):
X = self.Xs[idx]
Y = self.Ys[idx]
mask = self.masks[idx]
return X, Y, mask
class EarlyStopping(object):
"""Early stop performing
Parameters
----------
mode : str
* 'higher': Higher metric suggests a better model
* 'lower': Lower metric suggests a better model
patience : int
Number of epochs to wait before early stop
if the metric stops getting improved
filename : str or None
Filename for storing the model checkpoint
"""
def __init__(self, mode='higher', patience=10, filename=None): # 可以加tolerance
if filename is None:
dt = datetime.datetime.now()
filename = '{}_early_stop_{}_{:02d}-{:02d}-{:02d}.pth'.format(
dt.date(), dt.hour, dt.minute, dt.second)
assert mode in ['higher', 'lower']
self.mode = mode
if self.mode == 'higher':
self._check = self._check_higher # 私有变量(private),只有内部可以访问,外部不能访问
else:
self._check = self._check_lower
self.patience = patience
self.counter = 0
self.filename = filename
self.best_score = None
self.early_stop = False
def _check_higher(self, score, prev_best_score):
return (score > prev_best_score)
def _check_lower(self, score, prev_best_score):
return (score < prev_best_score)
def step(self, score, model):
if self.best_score is None:
self.best_score = score
self.save_checkpoint(model)
elif self._check(score, self.best_score): # 当前模型如果是更优模型,则保存当前模型
self.best_score = score
self.save_checkpoint(model)
self.counter = 0
else:
self.counter += 1
# print(
# f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
return self.early_stop
def save_checkpoint(self, model):
'''Saves model when the metric on the validation set gets improved.'''
torch.save({'model_state_dict': model.state_dict()}, self.filename)
def load_checkpoint(self, model):
'''Load model saved with early stopping.'''
model.load_state_dict(torch.load(self.filename)['model_state_dict'])
class MyDNN(nn.Module):
def __init__(self, inputs, hideen_units, outputs, dp_ratio, reg):
"""
:param inputs: number of inputs
:param hideen_units: [128, 256, 512]
:param out_puts: number of outputs
:param dp_ratio:
:param reg:
"""
super(MyDNN, self).__init__()
# parameters
self.reg = reg
# layers
self.hidden1 = nn.Linear(inputs, hideen_units[0])
self.dropout1 = nn.Dropout(dp_ratio)
self.hidden2 = nn.Linear(hideen_units[0], hideen_units[1])
self.dropout2 = nn.Dropout(dp_ratio)
self.hidden3 = nn.Linear(hideen_units[1], hideen_units[2])
self.dropout3 = nn.Dropout(dp_ratio)
if reg:
self.output = nn.Linear(hideen_units[2], 1)
else:
self.output = nn.Linear(hideen_units[2], outputs)
def forward(self, x):
x = self.hidden1(x)
x = F.relu(self.dropout1(x))
x = self.hidden2(x)
x = F.relu(self.dropout2(x))
x = self.hidden3(x)
x = F.relu(self.dropout3(x))
return self.output(x)
def collate_fn(data_batch):
Xs, Ys, masks = map(list, zip(*data_batch))
Xs = torch.stack(Xs, dim=0)
Ys = torch.stack(Ys, dim=0)
masks = torch.stack(masks, dim=0)
return Xs, Ys, masks
def set_random_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置种子用于生成随机数
if torch.cuda.is_available():
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子 | PypiClean |
/Jalapeno-0.1.4.tar.gz/Jalapeno-0.1.4/Jalapeno_data/Sites/first/Pages/doc/writing.md | title: 开始一篇博客
pos: 4
###New
现在你准备好了吗?
首先我们要在'pages'文件夹下创建一个空白文档'test.md',这里test只是一个名字,你可以给你的文章起任何名字,它将会影响到你未来网页的链接地址:
yourwebsite.com/article/test
###Title
接着我们要编辑文档的开头,注意冒号后面要空格
title: 这里写文章标题
date: 这里写发表日期,格式为 YYYY-MM-DD
tag: 这里是你的文章分类/标签名称
###Content
接着我们编写正文,正文要与之前的开头用一个空行隔开
hello world!balabalabala....
balabalabala....
balabalabala....
###Image
在之前我们提到过图片都放在image文件夹下的文章同名子文件夹下,现在假设我们的testpic.jpg在image/test文件夹下,路径为
Jalapeno/source/image/test/testimg.jpg
我们配合Markdown引用图片的语法:
\!\[\]\(\图片地址)
而我们的图片地址表示方法为
\{\{image.子文件夹名.图片名}},
所以最后引用的方法为
hello world!balabalabala....
\
balabalabala....
balabalabala....
###Excerpt
如果你想在文章列表中显示摘要,我们使用<!\--More-->来进行分隔。<!\--More-->之前内容会被放到你的文章列表的摘要中
hello world!balabalabala....
<!\--More-->
balabalabala....
balabalabala....
###Sidebar-content
如果你想在你的文章中启用索引/目录,我们使用\[TOC\]作为标示,将\[TOC\]放入你希望的位置,Jalapeno会在该位置生成目录。前提是你有使用'\#'号来注明各个子标题
\[TOC\]
hello world!balabalabala....
<!\--More-->
##第一个标题
balabalabala....
##第二个标题
balabalabala....
如果你想将目录放入侧边栏而不是正文,我们使用<!\--Siderbar-->进行标记,<!\--Siderbar-->上面的内容会被放入侧边栏目录中,注意,与\[TOC\]用空行隔开
\[TOC\]
<!\--Siderbar-->
hello world!balabalabala....
<!\--More-->
##第一个标题
balabalabala....
##第二个标题
balabalabala....
###Syntax
想要了解更多Markdown语法,参见[Markdown 语法说明](https://github.com/riku/Markdown-Syntax-CN/blob/master/syntax.md)
到这里,我们的博客就写完啦,在发布前我们需要对其测试 | PypiClean |
/Mecoda_Orange-1.9.0-py3-none-any.whl/mecoda_orange/ictio.py | import pandas as pd
import os
import datetime
from ictiopy import ictiopy
from orangewidget.widget import OWBaseWidget, Output
from orangewidget.settings import Setting
from orangewidget import gui
from orangewidget.utils.widgetpreview import WidgetPreview
import Orange.data
from Orange.data.pandas_compat import table_from_frame
from PyQt5.QtWidgets import QFileDialog
from AnyQt.QtWidgets import QStyle, QSizePolicy as Policy
def clean_df(observations):
observations.weight = observations.weight.astype(float)
observations.price_local_currency = observations.price_local_currency.astype(
float)
observations.num_photos = observations.num_photos.astype(int)
observations.fishing_duration = observations.fishing_duration.astype(float)
observations.num_of_fishers = observations.num_of_fishers.astype(float)
observations.number_of_fish = observations.number_of_fish.astype(float)
observations.obs_year = observations.obs_year.astype(int)
observations.obs_month = observations.obs_month.astype(int)
observations.obs_day = observations.obs_day.astype(int)
return observations
def split_date(observations, init, end):
observations['obs_date'] = observations['obs_year'].astype(int).astype(str) + observations['obs_month'].astype(
int).astype(str).str.zfill(2) + observations['obs_day'].astype(int).astype(str).str.zfill(2)
observations['obs_date'] = pd.to_datetime(observations['obs_date'])
observations = observations[observations['obs_date'] >= init]
observations = observations[observations['obs_date'] <= end]
observations = observations.drop(['obs_date'], axis=1)
return observations
class IctioWidget(OWBaseWidget):
# Widget's name as displayed in the canvas
name = "Ictio Observations"
# Short widget description
description = "Get observations from Ictio, a mobile phone app created to register observations of caught fish in the Amazon basin"
# An icon resource file path for this widget
icon = "icons/ictio-circular.png"
# Priority in the section MECODA
priority = 8
# Basic (convenience) GUI definition:
# a simple 'single column' GUI layout
want_main_area = False
# with a fixed non resizable geometry.
resizing_enabled = False
# Defining settings
path_folder = Setting("", schema_only=True)
path_file = Setting("", schema_only=True)
date_init = Setting("1860-01-01", schema_only=True)
date_end = Setting(str(datetime.date.today()), schema_only=True)
# Widget's outputs; here, a single output named "Observations", of type Table
class Outputs:
observations = Output(
"Observations", Orange.data.Table, auto_summary=False)
def __init__(self):
# use the init method from the class OWBaseWidget
super().__init__()
# info area
info = gui.widgetBox(self.controlArea, "Info")
self.infoa = gui.widgetLabel(
info,
'Please specify the path to a <b>Ictio_Basic_xxxxxxxx.zip</b> file\
<br>that has been downloaded from the "Download" section<br>\
of <a href="https://ictio.org/">ictio.org</a> website.<br><br>\
You can also specity an XLSX file if you have already extracted\
<br> the file BDB_XXXXXXXX.xlsx that can be found inside\
<br>ICTIO_BASIC_XXXXXXXX.zip'
)
self.infob = gui.widgetLabel(
info, 'NOTE: Downloading the file requires user registration.')
gui.separator(self.controlArea)
# searchBox area
self.searchBox = gui.widgetBox(self.controlArea, "Source")
self.browsers = gui.widgetBox(self.searchBox, "", orientation=1)
zip_button = gui.button(
self.browsers,
self,
'Choose .zip',
callback=self.browse_zip,
autoDefault=False,
width=160,
)
zip_button.setIcon(self.style().standardIcon(QStyle.SP_DirOpenIcon))
zip_button.setSizePolicy(
Policy.Maximum,
Policy.Fixed
)
file_button = gui.button(
self.browsers,
self,
'Choose .xlsx',
callback=self.browse_file,
autoDefault=False,
width=160,
)
file_button.setIcon(self.style().standardIcon(QStyle.SP_DirOpenIcon))
file_button.setSizePolicy(
Policy.Maximum,
Policy.Fixed
)
# gui.separator(self.searchBox)
self.dateBox = gui.widgetBox(self.controlArea, "Filter by date")
self.date_init_line = gui.lineEdit(
self.dateBox,
self,
"date_init",
label="Initial Date:",
orientation=1,
controlWidth=140,
)
self.date_end_line = gui.lineEdit(
self.dateBox,
self,
"date_end",
label="End Date:",
orientation=1,
controlWidth=140
)
# commit area
self.commitBox = gui.widgetBox(self.controlArea, "", spacing=2)
gui.button(self.commitBox, self, "Load", callback=self.commit)
def info_searching(self):
self.infoa.setText('Loading...')
def browse_file(self):
dialog = QFileDialog()
home = os.path.expanduser("~")
path_string, __ = dialog.getOpenFileName(
self, 'Select a zip file', home, "xlsx files (*.xlsx)")
self.path_file = path_string
if self.path_file is not None:
try:
self.infoa.setText(f"<b>File selected:</b><br>{path_string}")
self.infob.setText("")
except ValueError:
self.infoa.setText(f'Nothing found.')
except Exception as error:
self.infoa.setText(f'ERROR: \n{error}')
self.infob.setText("")
print(error)
else:
self.infoa.setText(f'Choose some xlsx file to load data.')
def browse_zip(self):
dialog = QFileDialog()
home = os.path.expanduser("~")
path_string, __ = dialog.getOpenFileName(
self, 'Select a zip file', home, "Zip files (*.zip)")
self.path_folder = path_string
if self.path_folder is not None:
try:
self.infoa.setText(f"<b>File selected:</b><br>{path_string}")
self.infob.setText("")
except ValueError:
self.infoa.setText(f'Nothing found.')
except Exception as error:
self.infoa.setText(f'ERROR: \n{error}')
self.infob.setText("")
print(error)
else:
self.infoa.setText(f'Choose some zip file to load data.')
def commit(self):
self.infoa.setText(f'Loading...')
self.infob.setText(f'(This could take a while, be patient)')
try:
# convert date_init and date_end to datetime format
if type(self.date_init) == str:
init = datetime.datetime.strptime(self.date_init, '%Y-%m-%d')
else:
init = self.date_init
if type(self.date_end) == str:
end = datetime.datetime.strptime(self.date_end, '%Y-%m-%d')
else:
end = self.date_end
# show progress bar
progress = gui.ProgressBar(self, 2)
progress.advance()
if self.path_file != "":
directory, file = os.path.split(
os.path.abspath(self.path_file))
observations = ictiopy.sanitizedb(
ictiopy.load_ictio_bdb_file(
directory,
file
)
)
elif self.path_folder != "":
observations = ictiopy.load_zipdb(self.path_folder)
observations = clean_df(observations)
observations = split_date(observations, init, end)
if len(observations) > 0:
table_ictio = table_from_frame(observations)
self.infoa.setText(
f'{len(observations):,} observations gathered')
self.infob.setText("")
self.info.set_output_summary(len(observations))
self.Outputs.observations.send(table_ictio)
else:
self.infoa.setText(f'Nothing found.')
self.info.set_output_summary(self.info.NoOutput)
except ValueError:
self.infoa.setText(f'Nothing found.')
except Exception as error:
self.infoa.setText(f'ERROR: \n{error}')
self.infob.setText("")
print(error)
progress.finish()
# For developer purpose, allow running the widget directly with python
if __name__ == "__main__":
WidgetPreview(IctioWidget).run() | PypiClean |
/MindsDB-23.8.3.0.tar.gz/MindsDB-23.8.3.0/mindsdb/interfaces/storage/db.py | import os
import json
import datetime
from typing import Dict
import numpy as np
from sqlalchemy import create_engine, types, UniqueConstraint
from sqlalchemy.orm import scoped_session, sessionmaker, declarative_base
from sqlalchemy import Column, Integer, String, DateTime, Boolean, Index, text
from sqlalchemy.sql.schema import ForeignKey
from sqlalchemy import JSON
from sqlalchemy.exc import OperationalError
class Base:
__allow_unmapped__ = True
Base = declarative_base(cls=Base)
session, engine = None, None
def init(connection_str: str = None):
global Base, session, engine
if connection_str is None:
connection_str = os.environ['MINDSDB_DB_CON']
base_args = {
'pool_size': 30,
'max_overflow': 200
}
engine = create_engine(connection_str, echo=False, **base_args)
session = scoped_session(sessionmaker(bind=engine, autoflush=True))
Base.query = session.query_property()
def serializable_insert(record: Base, try_count: int = 100):
""" Do serializeble insert. If fail - repeat it {try_count} times.
Args:
record (Base): sqlalchey record to insert
try_count (int): count of tryes to insert record
"""
commited = False
while not commited:
session.connection(
execution_options={'isolation_level': 'SERIALIZABLE'}
)
if engine.name == 'postgresql':
session.execute(text('LOCK TABLE PREDICTOR IN EXCLUSIVE MODE'))
session.add(record)
try:
session.commit()
except OperationalError:
# catch 'SerializationFailure' (it should be in str(e), but it may depend on engine)
session.rollback()
try_count += -1
if try_count == 0:
raise
else:
commited = True
# Source: https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
class Array(types.TypeDecorator):
''' Float Type that replaces commas with dots on input '''
impl = types.String
def process_bind_param(self, value, dialect): # insert
if isinstance(value, str):
return value
elif value is None:
return value
else:
return ',|,|,'.join(value)
def process_result_value(self, value, dialect): # select
return value.split(',|,|,') if value is not None else None
class Json(types.TypeDecorator):
''' Float Type that replaces commas with dots on input '''
impl = types.String
def process_bind_param(self, value, dialect): # insert
return json.dumps(value, cls=NumpyEncoder) if value is not None else None
def process_result_value(self, value, dialect): # select
if isinstance(value, dict):
return value
return json.loads(value) if value is not None else None
class Semaphor(Base):
__tablename__ = 'semaphor'
id = Column(Integer, primary_key=True)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
entity_type = Column('entity_type', String)
entity_id = Column('entity_id', Integer)
action = Column(String)
company_id = Column(Integer)
__table_args__ = (
UniqueConstraint('entity_type', 'entity_id', name='uniq_const'),
)
class PREDICTOR_STATUS:
__slots__ = ()
COMPLETE = 'complete'
TRAINING = 'training'
FINETUNING = 'finetuning'
GENERATING = 'generating'
ERROR = 'error'
VALIDATION = 'validation'
DELETED = 'deleted' # TODO remove it?
PREDICTOR_STATUS = PREDICTOR_STATUS()
class Predictor(Base):
__tablename__ = 'predictor'
id = Column(Integer, primary_key=True)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
deleted_at = Column(DateTime)
name = Column(String)
data = Column(Json) # A JSON -- should be everything returned by `get_model_data`, I think
to_predict = Column(Array)
company_id = Column(Integer)
mindsdb_version = Column(String)
native_version = Column(String)
integration_id = Column(ForeignKey('integration.id', name='fk_integration_id'))
data_integration_ref = Column(Json)
fetch_data_query = Column(String)
is_custom = Column(Boolean)
learn_args = Column(Json)
update_status = Column(String, default='up_to_date')
status = Column(String)
active = Column(Boolean, default=True)
training_data_columns_count = Column(Integer)
training_data_rows_count = Column(Integer)
training_start_at = Column(DateTime)
training_stop_at = Column(DateTime)
label = Column(String, nullable=True)
version = Column(Integer, default=1)
code = Column(String, nullable=True)
lightwood_version = Column(String, nullable=True)
dtype_dict = Column(Json, nullable=True)
project_id = Column(Integer, ForeignKey('project.id', name='fk_project_id'), nullable=False)
training_phase_current = Column(Integer)
training_phase_total = Column(Integer)
training_phase_name = Column(String)
@staticmethod
def get_name_and_version(full_name):
name_no_version = full_name
version = None
parts = full_name.split('.')
if len(parts) > 1 and parts[-1].isdigit():
version = int(parts[-1])
name_no_version = '.'.join(parts[:-1])
return name_no_version, version
class Project(Base):
__tablename__ = 'project'
id = Column(Integer, primary_key=True)
created_at = Column(DateTime, default=datetime.datetime.now)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
deleted_at = Column(DateTime)
name = Column(String, nullable=False)
company_id = Column(Integer)
__table_args__ = (
UniqueConstraint('name', 'company_id', name='unique_project_name_company_id'),
)
class Log(Base):
__tablename__ = 'log'
id = Column(Integer, primary_key=True)
created_at = Column(DateTime, default=datetime.datetime.now)
log_type = Column(String) # log, info, warning, traceback etc
source = Column(String) # file + line
company_id = Column(Integer)
payload = Column(String)
created_at_index = Index("some_index", "created_at_index")
class Integration(Base):
__tablename__ = 'integration'
id = Column(Integer, primary_key=True)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
name = Column(String, nullable=False)
engine = Column(String, nullable=False)
data = Column(Json)
company_id = Column(Integer)
__table_args__ = (
UniqueConstraint('name', 'company_id', name='unique_integration_name_company_id'),
)
class File(Base):
__tablename__ = 'file'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
company_id = Column(Integer)
source_file_path = Column(String, nullable=False)
file_path = Column(String, nullable=False)
row_count = Column(Integer, nullable=False)
columns = Column(Json, nullable=False)
created_at = Column(DateTime, default=datetime.datetime.now)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
__table_args__ = (
UniqueConstraint('name', 'company_id', name='unique_file_name_company_id'),
)
class View(Base):
__tablename__ = 'view'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
company_id = Column(Integer)
query = Column(String, nullable=False)
project_id = Column(Integer, ForeignKey('project.id', name='fk_project_id'), nullable=False)
__table_args__ = (
UniqueConstraint('name', 'company_id', name='unique_view_name_company_id'),
)
class JsonStorage(Base):
__tablename__ = 'json_storage'
id = Column(Integer, primary_key=True)
resource_group = Column(String)
resource_id = Column(Integer)
name = Column(String)
content = Column(JSON)
company_id = Column(Integer)
class Jobs(Base):
__tablename__ = 'jobs'
id = Column(Integer, primary_key=True)
company_id = Column(Integer)
user_class = Column(Integer, nullable=True)
name = Column(String, nullable=False)
project_id = Column(Integer, nullable=False)
query_str = Column(String, nullable=False)
start_at = Column(DateTime, default=datetime.datetime.now)
end_at = Column(DateTime)
next_run_at = Column(DateTime)
schedule_str = Column(String)
deleted_at = Column(DateTime)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
class JobsHistory(Base):
__tablename__ = 'jobs_history'
id = Column(Integer, primary_key=True)
company_id = Column(Integer)
job_id = Column(Integer)
query_str = Column(String)
start_at = Column(DateTime)
end_at = Column(DateTime)
error = Column(String)
created_at = Column(DateTime, default=datetime.datetime.now)
updated_at = Column(DateTime, default=datetime.datetime.now)
__table_args__ = (
UniqueConstraint('job_id', 'start_at', name='uniq_job_history_job_id_start'),
)
class ChatBots(Base):
__tablename__ = 'chat_bots'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
project_id = Column(Integer, nullable=False)
model_name = Column(String, nullable=False)
database_id = Column(Integer)
params = Column(JSON)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
def as_dict(self) -> Dict:
return {
'id': self.id,
'name': self.name,
'project_id': self.project_id,
'model_name': self.model_name,
'params': self.params,
'created_at': self.created_at,
'database_id': self.database_id,
}
class ChatBotsHistory(Base):
__tablename__ = 'chat_bots_history'
id = Column(Integer, primary_key=True)
chat_bot_id = Column(Integer)
type = Column(String) # TODO replace to enum
text = Column(String)
user = Column(String)
destination = Column(String)
sent_at = Column(DateTime, default=datetime.datetime.now)
error = Column(String)
class Triggers(Base):
__tablename__ = 'triggers'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
project_id = Column(Integer, nullable=False)
database_id = Column(Integer, nullable=False)
table_name = Column(String, nullable=False)
query_str = Column(String, nullable=False)
columns = Column(String) # list of columns separated by delimiter
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
class Tasks(Base):
__tablename__ = 'tasks'
id = Column(Integer, primary_key=True)
company_id = Column(Integer)
user_class = Column(Integer, nullable=True)
# trigger, chatbot
object_type = Column(String, nullable=False)
object_id = Column(Integer, nullable=False)
last_error = Column(String)
active = Column(Boolean, default=True)
reload = Column(Boolean, default=False)
# for running in concurrent processes
run_by = Column(String)
alive_time = Column(DateTime(timezone=True))
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now) | PypiClean |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.